mmu.c 45 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907
  1. /*
  2. * Kernel-based Virtual Machine driver for Linux
  3. *
  4. * This module enables machines with Intel VT-x extensions to run virtual
  5. * machines without emulation or binary translation.
  6. *
  7. * MMU support
  8. *
  9. * Copyright (C) 2006 Qumranet, Inc.
  10. *
  11. * Authors:
  12. * Yaniv Kamay <yaniv@qumranet.com>
  13. * Avi Kivity <avi@qumranet.com>
  14. *
  15. * This work is licensed under the terms of the GNU GPL, version 2. See
  16. * the COPYING file in the top-level directory.
  17. *
  18. */
  19. #include "vmx.h"
  20. #include "mmu.h"
  21. #include <linux/kvm_host.h>
  22. #include <linux/types.h>
  23. #include <linux/string.h>
  24. #include <linux/mm.h>
  25. #include <linux/highmem.h>
  26. #include <linux/module.h>
  27. #include <linux/swap.h>
  28. #include <asm/page.h>
  29. #include <asm/cmpxchg.h>
  30. #include <asm/io.h>
  31. #undef MMU_DEBUG
  32. #undef AUDIT
  33. #ifdef AUDIT
  34. static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg);
  35. #else
  36. static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg) {}
  37. #endif
  38. #ifdef MMU_DEBUG
  39. #define pgprintk(x...) do { if (dbg) printk(x); } while (0)
  40. #define rmap_printk(x...) do { if (dbg) printk(x); } while (0)
  41. #else
  42. #define pgprintk(x...) do { } while (0)
  43. #define rmap_printk(x...) do { } while (0)
  44. #endif
  45. #if defined(MMU_DEBUG) || defined(AUDIT)
  46. static int dbg = 1;
  47. #endif
  48. #ifndef MMU_DEBUG
  49. #define ASSERT(x) do { } while (0)
  50. #else
  51. #define ASSERT(x) \
  52. if (!(x)) { \
  53. printk(KERN_WARNING "assertion failed %s:%d: %s\n", \
  54. __FILE__, __LINE__, #x); \
  55. }
  56. #endif
  57. #define PT64_PT_BITS 9
  58. #define PT64_ENT_PER_PAGE (1 << PT64_PT_BITS)
  59. #define PT32_PT_BITS 10
  60. #define PT32_ENT_PER_PAGE (1 << PT32_PT_BITS)
  61. #define PT_WRITABLE_SHIFT 1
  62. #define PT_PRESENT_MASK (1ULL << 0)
  63. #define PT_WRITABLE_MASK (1ULL << PT_WRITABLE_SHIFT)
  64. #define PT_USER_MASK (1ULL << 2)
  65. #define PT_PWT_MASK (1ULL << 3)
  66. #define PT_PCD_MASK (1ULL << 4)
  67. #define PT_ACCESSED_MASK (1ULL << 5)
  68. #define PT_DIRTY_MASK (1ULL << 6)
  69. #define PT_PAGE_SIZE_MASK (1ULL << 7)
  70. #define PT_PAT_MASK (1ULL << 7)
  71. #define PT_GLOBAL_MASK (1ULL << 8)
  72. #define PT64_NX_SHIFT 63
  73. #define PT64_NX_MASK (1ULL << PT64_NX_SHIFT)
  74. #define PT_PAT_SHIFT 7
  75. #define PT_DIR_PAT_SHIFT 12
  76. #define PT_DIR_PAT_MASK (1ULL << PT_DIR_PAT_SHIFT)
  77. #define PT32_DIR_PSE36_SIZE 4
  78. #define PT32_DIR_PSE36_SHIFT 13
  79. #define PT32_DIR_PSE36_MASK \
  80. (((1ULL << PT32_DIR_PSE36_SIZE) - 1) << PT32_DIR_PSE36_SHIFT)
  81. #define PT_FIRST_AVAIL_BITS_SHIFT 9
  82. #define PT64_SECOND_AVAIL_BITS_SHIFT 52
  83. #define PT_SHADOW_IO_MARK (1ULL << PT_FIRST_AVAIL_BITS_SHIFT)
  84. #define VALID_PAGE(x) ((x) != INVALID_PAGE)
  85. #define PT64_LEVEL_BITS 9
  86. #define PT64_LEVEL_SHIFT(level) \
  87. (PAGE_SHIFT + (level - 1) * PT64_LEVEL_BITS)
  88. #define PT64_LEVEL_MASK(level) \
  89. (((1ULL << PT64_LEVEL_BITS) - 1) << PT64_LEVEL_SHIFT(level))
  90. #define PT64_INDEX(address, level)\
  91. (((address) >> PT64_LEVEL_SHIFT(level)) & ((1 << PT64_LEVEL_BITS) - 1))
  92. #define PT32_LEVEL_BITS 10
  93. #define PT32_LEVEL_SHIFT(level) \
  94. (PAGE_SHIFT + (level - 1) * PT32_LEVEL_BITS)
  95. #define PT32_LEVEL_MASK(level) \
  96. (((1ULL << PT32_LEVEL_BITS) - 1) << PT32_LEVEL_SHIFT(level))
  97. #define PT32_INDEX(address, level)\
  98. (((address) >> PT32_LEVEL_SHIFT(level)) & ((1 << PT32_LEVEL_BITS) - 1))
  99. #define PT64_BASE_ADDR_MASK (((1ULL << 52) - 1) & ~(u64)(PAGE_SIZE-1))
  100. #define PT64_DIR_BASE_ADDR_MASK \
  101. (PT64_BASE_ADDR_MASK & ~((1ULL << (PAGE_SHIFT + PT64_LEVEL_BITS)) - 1))
  102. #define PT32_BASE_ADDR_MASK PAGE_MASK
  103. #define PT32_DIR_BASE_ADDR_MASK \
  104. (PAGE_MASK & ~((1ULL << (PAGE_SHIFT + PT32_LEVEL_BITS)) - 1))
  105. #define PT64_PERM_MASK (PT_PRESENT_MASK | PT_WRITABLE_MASK | PT_USER_MASK \
  106. | PT64_NX_MASK)
  107. #define PFERR_PRESENT_MASK (1U << 0)
  108. #define PFERR_WRITE_MASK (1U << 1)
  109. #define PFERR_USER_MASK (1U << 2)
  110. #define PFERR_FETCH_MASK (1U << 4)
  111. #define PT64_ROOT_LEVEL 4
  112. #define PT32_ROOT_LEVEL 2
  113. #define PT32E_ROOT_LEVEL 3
  114. #define PT_DIRECTORY_LEVEL 2
  115. #define PT_PAGE_TABLE_LEVEL 1
  116. #define RMAP_EXT 4
  117. #define ACC_EXEC_MASK 1
  118. #define ACC_WRITE_MASK PT_WRITABLE_MASK
  119. #define ACC_USER_MASK PT_USER_MASK
  120. #define ACC_ALL (ACC_EXEC_MASK | ACC_WRITE_MASK | ACC_USER_MASK)
  121. struct kvm_rmap_desc {
  122. u64 *shadow_ptes[RMAP_EXT];
  123. struct kvm_rmap_desc *more;
  124. };
  125. static struct kmem_cache *pte_chain_cache;
  126. static struct kmem_cache *rmap_desc_cache;
  127. static struct kmem_cache *mmu_page_header_cache;
  128. static u64 __read_mostly shadow_trap_nonpresent_pte;
  129. static u64 __read_mostly shadow_notrap_nonpresent_pte;
  130. void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte)
  131. {
  132. shadow_trap_nonpresent_pte = trap_pte;
  133. shadow_notrap_nonpresent_pte = notrap_pte;
  134. }
  135. EXPORT_SYMBOL_GPL(kvm_mmu_set_nonpresent_ptes);
  136. static int is_write_protection(struct kvm_vcpu *vcpu)
  137. {
  138. return vcpu->arch.cr0 & X86_CR0_WP;
  139. }
  140. static int is_cpuid_PSE36(void)
  141. {
  142. return 1;
  143. }
  144. static int is_nx(struct kvm_vcpu *vcpu)
  145. {
  146. return vcpu->arch.shadow_efer & EFER_NX;
  147. }
  148. static int is_present_pte(unsigned long pte)
  149. {
  150. return pte & PT_PRESENT_MASK;
  151. }
  152. static int is_shadow_present_pte(u64 pte)
  153. {
  154. pte &= ~PT_SHADOW_IO_MARK;
  155. return pte != shadow_trap_nonpresent_pte
  156. && pte != shadow_notrap_nonpresent_pte;
  157. }
  158. static int is_writeble_pte(unsigned long pte)
  159. {
  160. return pte & PT_WRITABLE_MASK;
  161. }
  162. static int is_dirty_pte(unsigned long pte)
  163. {
  164. return pte & PT_DIRTY_MASK;
  165. }
  166. static int is_io_pte(unsigned long pte)
  167. {
  168. return pte & PT_SHADOW_IO_MARK;
  169. }
  170. static int is_rmap_pte(u64 pte)
  171. {
  172. return is_shadow_present_pte(pte);
  173. }
  174. static gfn_t pse36_gfn_delta(u32 gpte)
  175. {
  176. int shift = 32 - PT32_DIR_PSE36_SHIFT - PAGE_SHIFT;
  177. return (gpte & PT32_DIR_PSE36_MASK) << shift;
  178. }
  179. static void set_shadow_pte(u64 *sptep, u64 spte)
  180. {
  181. #ifdef CONFIG_X86_64
  182. set_64bit((unsigned long *)sptep, spte);
  183. #else
  184. set_64bit((unsigned long long *)sptep, spte);
  185. #endif
  186. }
  187. static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
  188. struct kmem_cache *base_cache, int min)
  189. {
  190. void *obj;
  191. if (cache->nobjs >= min)
  192. return 0;
  193. while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
  194. obj = kmem_cache_zalloc(base_cache, GFP_KERNEL);
  195. if (!obj)
  196. return -ENOMEM;
  197. cache->objects[cache->nobjs++] = obj;
  198. }
  199. return 0;
  200. }
  201. static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
  202. {
  203. while (mc->nobjs)
  204. kfree(mc->objects[--mc->nobjs]);
  205. }
  206. static int mmu_topup_memory_cache_page(struct kvm_mmu_memory_cache *cache,
  207. int min)
  208. {
  209. struct page *page;
  210. if (cache->nobjs >= min)
  211. return 0;
  212. while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
  213. page = alloc_page(GFP_KERNEL);
  214. if (!page)
  215. return -ENOMEM;
  216. set_page_private(page, 0);
  217. cache->objects[cache->nobjs++] = page_address(page);
  218. }
  219. return 0;
  220. }
  221. static void mmu_free_memory_cache_page(struct kvm_mmu_memory_cache *mc)
  222. {
  223. while (mc->nobjs)
  224. free_page((unsigned long)mc->objects[--mc->nobjs]);
  225. }
  226. static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu)
  227. {
  228. int r;
  229. r = mmu_topup_memory_cache(&vcpu->arch.mmu_pte_chain_cache,
  230. pte_chain_cache, 4);
  231. if (r)
  232. goto out;
  233. r = mmu_topup_memory_cache(&vcpu->arch.mmu_rmap_desc_cache,
  234. rmap_desc_cache, 1);
  235. if (r)
  236. goto out;
  237. r = mmu_topup_memory_cache_page(&vcpu->arch.mmu_page_cache, 8);
  238. if (r)
  239. goto out;
  240. r = mmu_topup_memory_cache(&vcpu->arch.mmu_page_header_cache,
  241. mmu_page_header_cache, 4);
  242. out:
  243. return r;
  244. }
  245. static void mmu_free_memory_caches(struct kvm_vcpu *vcpu)
  246. {
  247. mmu_free_memory_cache(&vcpu->arch.mmu_pte_chain_cache);
  248. mmu_free_memory_cache(&vcpu->arch.mmu_rmap_desc_cache);
  249. mmu_free_memory_cache_page(&vcpu->arch.mmu_page_cache);
  250. mmu_free_memory_cache(&vcpu->arch.mmu_page_header_cache);
  251. }
  252. static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc,
  253. size_t size)
  254. {
  255. void *p;
  256. BUG_ON(!mc->nobjs);
  257. p = mc->objects[--mc->nobjs];
  258. memset(p, 0, size);
  259. return p;
  260. }
  261. static struct kvm_pte_chain *mmu_alloc_pte_chain(struct kvm_vcpu *vcpu)
  262. {
  263. return mmu_memory_cache_alloc(&vcpu->arch.mmu_pte_chain_cache,
  264. sizeof(struct kvm_pte_chain));
  265. }
  266. static void mmu_free_pte_chain(struct kvm_pte_chain *pc)
  267. {
  268. kfree(pc);
  269. }
  270. static struct kvm_rmap_desc *mmu_alloc_rmap_desc(struct kvm_vcpu *vcpu)
  271. {
  272. return mmu_memory_cache_alloc(&vcpu->arch.mmu_rmap_desc_cache,
  273. sizeof(struct kvm_rmap_desc));
  274. }
  275. static void mmu_free_rmap_desc(struct kvm_rmap_desc *rd)
  276. {
  277. kfree(rd);
  278. }
  279. /*
  280. * Take gfn and return the reverse mapping to it.
  281. * Note: gfn must be unaliased before this function get called
  282. */
  283. static unsigned long *gfn_to_rmap(struct kvm *kvm, gfn_t gfn)
  284. {
  285. struct kvm_memory_slot *slot;
  286. slot = gfn_to_memslot(kvm, gfn);
  287. return &slot->rmap[gfn - slot->base_gfn];
  288. }
  289. /*
  290. * Reverse mapping data structures:
  291. *
  292. * If rmapp bit zero is zero, then rmapp point to the shadw page table entry
  293. * that points to page_address(page).
  294. *
  295. * If rmapp bit zero is one, (then rmap & ~1) points to a struct kvm_rmap_desc
  296. * containing more mappings.
  297. */
  298. static void rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
  299. {
  300. struct kvm_mmu_page *sp;
  301. struct kvm_rmap_desc *desc;
  302. unsigned long *rmapp;
  303. int i;
  304. if (!is_rmap_pte(*spte))
  305. return;
  306. gfn = unalias_gfn(vcpu->kvm, gfn);
  307. sp = page_header(__pa(spte));
  308. sp->gfns[spte - sp->spt] = gfn;
  309. rmapp = gfn_to_rmap(vcpu->kvm, gfn);
  310. if (!*rmapp) {
  311. rmap_printk("rmap_add: %p %llx 0->1\n", spte, *spte);
  312. *rmapp = (unsigned long)spte;
  313. } else if (!(*rmapp & 1)) {
  314. rmap_printk("rmap_add: %p %llx 1->many\n", spte, *spte);
  315. desc = mmu_alloc_rmap_desc(vcpu);
  316. desc->shadow_ptes[0] = (u64 *)*rmapp;
  317. desc->shadow_ptes[1] = spte;
  318. *rmapp = (unsigned long)desc | 1;
  319. } else {
  320. rmap_printk("rmap_add: %p %llx many->many\n", spte, *spte);
  321. desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
  322. while (desc->shadow_ptes[RMAP_EXT-1] && desc->more)
  323. desc = desc->more;
  324. if (desc->shadow_ptes[RMAP_EXT-1]) {
  325. desc->more = mmu_alloc_rmap_desc(vcpu);
  326. desc = desc->more;
  327. }
  328. for (i = 0; desc->shadow_ptes[i]; ++i)
  329. ;
  330. desc->shadow_ptes[i] = spte;
  331. }
  332. }
  333. static void rmap_desc_remove_entry(unsigned long *rmapp,
  334. struct kvm_rmap_desc *desc,
  335. int i,
  336. struct kvm_rmap_desc *prev_desc)
  337. {
  338. int j;
  339. for (j = RMAP_EXT - 1; !desc->shadow_ptes[j] && j > i; --j)
  340. ;
  341. desc->shadow_ptes[i] = desc->shadow_ptes[j];
  342. desc->shadow_ptes[j] = NULL;
  343. if (j != 0)
  344. return;
  345. if (!prev_desc && !desc->more)
  346. *rmapp = (unsigned long)desc->shadow_ptes[0];
  347. else
  348. if (prev_desc)
  349. prev_desc->more = desc->more;
  350. else
  351. *rmapp = (unsigned long)desc->more | 1;
  352. mmu_free_rmap_desc(desc);
  353. }
  354. static void rmap_remove(struct kvm *kvm, u64 *spte)
  355. {
  356. struct kvm_rmap_desc *desc;
  357. struct kvm_rmap_desc *prev_desc;
  358. struct kvm_mmu_page *sp;
  359. struct page *page;
  360. unsigned long *rmapp;
  361. int i;
  362. if (!is_rmap_pte(*spte))
  363. return;
  364. sp = page_header(__pa(spte));
  365. page = pfn_to_page((*spte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT);
  366. mark_page_accessed(page);
  367. if (is_writeble_pte(*spte))
  368. kvm_release_page_dirty(page);
  369. else
  370. kvm_release_page_clean(page);
  371. rmapp = gfn_to_rmap(kvm, sp->gfns[spte - sp->spt]);
  372. if (!*rmapp) {
  373. printk(KERN_ERR "rmap_remove: %p %llx 0->BUG\n", spte, *spte);
  374. BUG();
  375. } else if (!(*rmapp & 1)) {
  376. rmap_printk("rmap_remove: %p %llx 1->0\n", spte, *spte);
  377. if ((u64 *)*rmapp != spte) {
  378. printk(KERN_ERR "rmap_remove: %p %llx 1->BUG\n",
  379. spte, *spte);
  380. BUG();
  381. }
  382. *rmapp = 0;
  383. } else {
  384. rmap_printk("rmap_remove: %p %llx many->many\n", spte, *spte);
  385. desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
  386. prev_desc = NULL;
  387. while (desc) {
  388. for (i = 0; i < RMAP_EXT && desc->shadow_ptes[i]; ++i)
  389. if (desc->shadow_ptes[i] == spte) {
  390. rmap_desc_remove_entry(rmapp,
  391. desc, i,
  392. prev_desc);
  393. return;
  394. }
  395. prev_desc = desc;
  396. desc = desc->more;
  397. }
  398. BUG();
  399. }
  400. }
  401. static u64 *rmap_next(struct kvm *kvm, unsigned long *rmapp, u64 *spte)
  402. {
  403. struct kvm_rmap_desc *desc;
  404. struct kvm_rmap_desc *prev_desc;
  405. u64 *prev_spte;
  406. int i;
  407. if (!*rmapp)
  408. return NULL;
  409. else if (!(*rmapp & 1)) {
  410. if (!spte)
  411. return (u64 *)*rmapp;
  412. return NULL;
  413. }
  414. desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
  415. prev_desc = NULL;
  416. prev_spte = NULL;
  417. while (desc) {
  418. for (i = 0; i < RMAP_EXT && desc->shadow_ptes[i]; ++i) {
  419. if (prev_spte == spte)
  420. return desc->shadow_ptes[i];
  421. prev_spte = desc->shadow_ptes[i];
  422. }
  423. desc = desc->more;
  424. }
  425. return NULL;
  426. }
  427. static void rmap_write_protect(struct kvm *kvm, u64 gfn)
  428. {
  429. unsigned long *rmapp;
  430. u64 *spte;
  431. int write_protected = 0;
  432. gfn = unalias_gfn(kvm, gfn);
  433. rmapp = gfn_to_rmap(kvm, gfn);
  434. spte = rmap_next(kvm, rmapp, NULL);
  435. while (spte) {
  436. BUG_ON(!spte);
  437. BUG_ON(!(*spte & PT_PRESENT_MASK));
  438. rmap_printk("rmap_write_protect: spte %p %llx\n", spte, *spte);
  439. if (is_writeble_pte(*spte)) {
  440. set_shadow_pte(spte, *spte & ~PT_WRITABLE_MASK);
  441. write_protected = 1;
  442. }
  443. spte = rmap_next(kvm, rmapp, spte);
  444. }
  445. if (write_protected)
  446. kvm_flush_remote_tlbs(kvm);
  447. }
  448. #ifdef MMU_DEBUG
  449. static int is_empty_shadow_page(u64 *spt)
  450. {
  451. u64 *pos;
  452. u64 *end;
  453. for (pos = spt, end = pos + PAGE_SIZE / sizeof(u64); pos != end; pos++)
  454. if ((*pos & ~PT_SHADOW_IO_MARK) != shadow_trap_nonpresent_pte) {
  455. printk(KERN_ERR "%s: %p %llx\n", __FUNCTION__,
  456. pos, *pos);
  457. return 0;
  458. }
  459. return 1;
  460. }
  461. #endif
  462. static void kvm_mmu_free_page(struct kvm *kvm, struct kvm_mmu_page *sp)
  463. {
  464. ASSERT(is_empty_shadow_page(sp->spt));
  465. list_del(&sp->link);
  466. __free_page(virt_to_page(sp->spt));
  467. __free_page(virt_to_page(sp->gfns));
  468. kfree(sp);
  469. ++kvm->arch.n_free_mmu_pages;
  470. }
  471. static unsigned kvm_page_table_hashfn(gfn_t gfn)
  472. {
  473. return gfn;
  474. }
  475. static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu,
  476. u64 *parent_pte)
  477. {
  478. struct kvm_mmu_page *sp;
  479. sp = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache, sizeof *sp);
  480. sp->spt = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache, PAGE_SIZE);
  481. sp->gfns = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache, PAGE_SIZE);
  482. set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
  483. list_add(&sp->link, &vcpu->kvm->arch.active_mmu_pages);
  484. ASSERT(is_empty_shadow_page(sp->spt));
  485. sp->slot_bitmap = 0;
  486. sp->multimapped = 0;
  487. sp->parent_pte = parent_pte;
  488. --vcpu->kvm->arch.n_free_mmu_pages;
  489. return sp;
  490. }
  491. static void mmu_page_add_parent_pte(struct kvm_vcpu *vcpu,
  492. struct kvm_mmu_page *sp, u64 *parent_pte)
  493. {
  494. struct kvm_pte_chain *pte_chain;
  495. struct hlist_node *node;
  496. int i;
  497. if (!parent_pte)
  498. return;
  499. if (!sp->multimapped) {
  500. u64 *old = sp->parent_pte;
  501. if (!old) {
  502. sp->parent_pte = parent_pte;
  503. return;
  504. }
  505. sp->multimapped = 1;
  506. pte_chain = mmu_alloc_pte_chain(vcpu);
  507. INIT_HLIST_HEAD(&sp->parent_ptes);
  508. hlist_add_head(&pte_chain->link, &sp->parent_ptes);
  509. pte_chain->parent_ptes[0] = old;
  510. }
  511. hlist_for_each_entry(pte_chain, node, &sp->parent_ptes, link) {
  512. if (pte_chain->parent_ptes[NR_PTE_CHAIN_ENTRIES-1])
  513. continue;
  514. for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i)
  515. if (!pte_chain->parent_ptes[i]) {
  516. pte_chain->parent_ptes[i] = parent_pte;
  517. return;
  518. }
  519. }
  520. pte_chain = mmu_alloc_pte_chain(vcpu);
  521. BUG_ON(!pte_chain);
  522. hlist_add_head(&pte_chain->link, &sp->parent_ptes);
  523. pte_chain->parent_ptes[0] = parent_pte;
  524. }
  525. static void mmu_page_remove_parent_pte(struct kvm_mmu_page *sp,
  526. u64 *parent_pte)
  527. {
  528. struct kvm_pte_chain *pte_chain;
  529. struct hlist_node *node;
  530. int i;
  531. if (!sp->multimapped) {
  532. BUG_ON(sp->parent_pte != parent_pte);
  533. sp->parent_pte = NULL;
  534. return;
  535. }
  536. hlist_for_each_entry(pte_chain, node, &sp->parent_ptes, link)
  537. for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i) {
  538. if (!pte_chain->parent_ptes[i])
  539. break;
  540. if (pte_chain->parent_ptes[i] != parent_pte)
  541. continue;
  542. while (i + 1 < NR_PTE_CHAIN_ENTRIES
  543. && pte_chain->parent_ptes[i + 1]) {
  544. pte_chain->parent_ptes[i]
  545. = pte_chain->parent_ptes[i + 1];
  546. ++i;
  547. }
  548. pte_chain->parent_ptes[i] = NULL;
  549. if (i == 0) {
  550. hlist_del(&pte_chain->link);
  551. mmu_free_pte_chain(pte_chain);
  552. if (hlist_empty(&sp->parent_ptes)) {
  553. sp->multimapped = 0;
  554. sp->parent_pte = NULL;
  555. }
  556. }
  557. return;
  558. }
  559. BUG();
  560. }
  561. static struct kvm_mmu_page *kvm_mmu_lookup_page(struct kvm *kvm, gfn_t gfn)
  562. {
  563. unsigned index;
  564. struct hlist_head *bucket;
  565. struct kvm_mmu_page *sp;
  566. struct hlist_node *node;
  567. pgprintk("%s: looking for gfn %lx\n", __FUNCTION__, gfn);
  568. index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
  569. bucket = &kvm->arch.mmu_page_hash[index];
  570. hlist_for_each_entry(sp, node, bucket, hash_link)
  571. if (sp->gfn == gfn && !sp->role.metaphysical) {
  572. pgprintk("%s: found role %x\n",
  573. __FUNCTION__, sp->role.word);
  574. return sp;
  575. }
  576. return NULL;
  577. }
  578. static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
  579. gfn_t gfn,
  580. gva_t gaddr,
  581. unsigned level,
  582. int metaphysical,
  583. unsigned access,
  584. u64 *parent_pte)
  585. {
  586. union kvm_mmu_page_role role;
  587. unsigned index;
  588. unsigned quadrant;
  589. struct hlist_head *bucket;
  590. struct kvm_mmu_page *sp;
  591. struct hlist_node *node;
  592. role.word = 0;
  593. role.glevels = vcpu->arch.mmu.root_level;
  594. role.level = level;
  595. role.metaphysical = metaphysical;
  596. role.access = access;
  597. if (vcpu->arch.mmu.root_level <= PT32_ROOT_LEVEL) {
  598. quadrant = gaddr >> (PAGE_SHIFT + (PT64_PT_BITS * level));
  599. quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1;
  600. role.quadrant = quadrant;
  601. }
  602. pgprintk("%s: looking gfn %lx role %x\n", __FUNCTION__,
  603. gfn, role.word);
  604. index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
  605. bucket = &vcpu->kvm->arch.mmu_page_hash[index];
  606. hlist_for_each_entry(sp, node, bucket, hash_link)
  607. if (sp->gfn == gfn && sp->role.word == role.word) {
  608. mmu_page_add_parent_pte(vcpu, sp, parent_pte);
  609. pgprintk("%s: found\n", __FUNCTION__);
  610. return sp;
  611. }
  612. ++vcpu->kvm->stat.mmu_cache_miss;
  613. sp = kvm_mmu_alloc_page(vcpu, parent_pte);
  614. if (!sp)
  615. return sp;
  616. pgprintk("%s: adding gfn %lx role %x\n", __FUNCTION__, gfn, role.word);
  617. sp->gfn = gfn;
  618. sp->role = role;
  619. hlist_add_head(&sp->hash_link, bucket);
  620. vcpu->arch.mmu.prefetch_page(vcpu, sp);
  621. if (!metaphysical)
  622. rmap_write_protect(vcpu->kvm, gfn);
  623. return sp;
  624. }
  625. static void kvm_mmu_page_unlink_children(struct kvm *kvm,
  626. struct kvm_mmu_page *sp)
  627. {
  628. unsigned i;
  629. u64 *pt;
  630. u64 ent;
  631. pt = sp->spt;
  632. if (sp->role.level == PT_PAGE_TABLE_LEVEL) {
  633. for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
  634. if (is_shadow_present_pte(pt[i]))
  635. rmap_remove(kvm, &pt[i]);
  636. pt[i] = shadow_trap_nonpresent_pte;
  637. }
  638. kvm_flush_remote_tlbs(kvm);
  639. return;
  640. }
  641. for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
  642. ent = pt[i];
  643. pt[i] = shadow_trap_nonpresent_pte;
  644. if (!is_shadow_present_pte(ent))
  645. continue;
  646. ent &= PT64_BASE_ADDR_MASK;
  647. mmu_page_remove_parent_pte(page_header(ent), &pt[i]);
  648. }
  649. kvm_flush_remote_tlbs(kvm);
  650. }
  651. static void kvm_mmu_put_page(struct kvm_mmu_page *sp, u64 *parent_pte)
  652. {
  653. mmu_page_remove_parent_pte(sp, parent_pte);
  654. }
  655. static void kvm_mmu_reset_last_pte_updated(struct kvm *kvm)
  656. {
  657. int i;
  658. for (i = 0; i < KVM_MAX_VCPUS; ++i)
  659. if (kvm->vcpus[i])
  660. kvm->vcpus[i]->arch.last_pte_updated = NULL;
  661. }
  662. static void kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp)
  663. {
  664. u64 *parent_pte;
  665. ++kvm->stat.mmu_shadow_zapped;
  666. while (sp->multimapped || sp->parent_pte) {
  667. if (!sp->multimapped)
  668. parent_pte = sp->parent_pte;
  669. else {
  670. struct kvm_pte_chain *chain;
  671. chain = container_of(sp->parent_ptes.first,
  672. struct kvm_pte_chain, link);
  673. parent_pte = chain->parent_ptes[0];
  674. }
  675. BUG_ON(!parent_pte);
  676. kvm_mmu_put_page(sp, parent_pte);
  677. set_shadow_pte(parent_pte, shadow_trap_nonpresent_pte);
  678. }
  679. kvm_mmu_page_unlink_children(kvm, sp);
  680. if (!sp->root_count) {
  681. hlist_del(&sp->hash_link);
  682. kvm_mmu_free_page(kvm, sp);
  683. } else
  684. list_move(&sp->link, &kvm->arch.active_mmu_pages);
  685. kvm_mmu_reset_last_pte_updated(kvm);
  686. }
  687. /*
  688. * Changing the number of mmu pages allocated to the vm
  689. * Note: if kvm_nr_mmu_pages is too small, you will get dead lock
  690. */
  691. void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages)
  692. {
  693. /*
  694. * If we set the number of mmu pages to be smaller be than the
  695. * number of actived pages , we must to free some mmu pages before we
  696. * change the value
  697. */
  698. if ((kvm->arch.n_alloc_mmu_pages - kvm->arch.n_free_mmu_pages) >
  699. kvm_nr_mmu_pages) {
  700. int n_used_mmu_pages = kvm->arch.n_alloc_mmu_pages
  701. - kvm->arch.n_free_mmu_pages;
  702. while (n_used_mmu_pages > kvm_nr_mmu_pages) {
  703. struct kvm_mmu_page *page;
  704. page = container_of(kvm->arch.active_mmu_pages.prev,
  705. struct kvm_mmu_page, link);
  706. kvm_mmu_zap_page(kvm, page);
  707. n_used_mmu_pages--;
  708. }
  709. kvm->arch.n_free_mmu_pages = 0;
  710. }
  711. else
  712. kvm->arch.n_free_mmu_pages += kvm_nr_mmu_pages
  713. - kvm->arch.n_alloc_mmu_pages;
  714. kvm->arch.n_alloc_mmu_pages = kvm_nr_mmu_pages;
  715. }
  716. static int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
  717. {
  718. unsigned index;
  719. struct hlist_head *bucket;
  720. struct kvm_mmu_page *sp;
  721. struct hlist_node *node, *n;
  722. int r;
  723. pgprintk("%s: looking for gfn %lx\n", __FUNCTION__, gfn);
  724. r = 0;
  725. index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
  726. bucket = &kvm->arch.mmu_page_hash[index];
  727. hlist_for_each_entry_safe(sp, node, n, bucket, hash_link)
  728. if (sp->gfn == gfn && !sp->role.metaphysical) {
  729. pgprintk("%s: gfn %lx role %x\n", __FUNCTION__, gfn,
  730. sp->role.word);
  731. kvm_mmu_zap_page(kvm, sp);
  732. r = 1;
  733. }
  734. return r;
  735. }
  736. static void mmu_unshadow(struct kvm *kvm, gfn_t gfn)
  737. {
  738. struct kvm_mmu_page *sp;
  739. while ((sp = kvm_mmu_lookup_page(kvm, gfn)) != NULL) {
  740. pgprintk("%s: zap %lx %x\n", __FUNCTION__, gfn, sp->role.word);
  741. kvm_mmu_zap_page(kvm, sp);
  742. }
  743. }
  744. static void page_header_update_slot(struct kvm *kvm, void *pte, gfn_t gfn)
  745. {
  746. int slot = memslot_id(kvm, gfn_to_memslot(kvm, gfn));
  747. struct kvm_mmu_page *sp = page_header(__pa(pte));
  748. __set_bit(slot, &sp->slot_bitmap);
  749. }
  750. struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva)
  751. {
  752. struct page *page;
  753. gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gva);
  754. if (gpa == UNMAPPED_GVA)
  755. return NULL;
  756. down_read(&current->mm->mmap_sem);
  757. page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT);
  758. up_read(&current->mm->mmap_sem);
  759. return page;
  760. }
  761. static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
  762. unsigned pt_access, unsigned pte_access,
  763. int user_fault, int write_fault, int dirty,
  764. int *ptwrite, gfn_t gfn, struct page *page)
  765. {
  766. u64 spte;
  767. int was_rmapped = 0;
  768. int was_writeble = is_writeble_pte(*shadow_pte);
  769. hfn_t host_pfn = (*shadow_pte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT;
  770. pgprintk("%s: spte %llx access %x write_fault %d"
  771. " user_fault %d gfn %lx\n",
  772. __FUNCTION__, *shadow_pte, pt_access,
  773. write_fault, user_fault, gfn);
  774. if (is_rmap_pte(*shadow_pte)) {
  775. if (host_pfn != page_to_pfn(page)) {
  776. pgprintk("hfn old %lx new %lx\n",
  777. host_pfn, page_to_pfn(page));
  778. rmap_remove(vcpu->kvm, shadow_pte);
  779. }
  780. else
  781. was_rmapped = 1;
  782. }
  783. /*
  784. * We don't set the accessed bit, since we sometimes want to see
  785. * whether the guest actually used the pte (in order to detect
  786. * demand paging).
  787. */
  788. spte = PT_PRESENT_MASK | PT_DIRTY_MASK;
  789. if (!dirty)
  790. pte_access &= ~ACC_WRITE_MASK;
  791. if (!(pte_access & ACC_EXEC_MASK))
  792. spte |= PT64_NX_MASK;
  793. spte |= PT_PRESENT_MASK;
  794. if (pte_access & ACC_USER_MASK)
  795. spte |= PT_USER_MASK;
  796. if (is_error_page(page)) {
  797. set_shadow_pte(shadow_pte,
  798. shadow_trap_nonpresent_pte | PT_SHADOW_IO_MARK);
  799. kvm_release_page_clean(page);
  800. return;
  801. }
  802. spte |= page_to_phys(page);
  803. if ((pte_access & ACC_WRITE_MASK)
  804. || (write_fault && !is_write_protection(vcpu) && !user_fault)) {
  805. struct kvm_mmu_page *shadow;
  806. spte |= PT_WRITABLE_MASK;
  807. if (user_fault) {
  808. mmu_unshadow(vcpu->kvm, gfn);
  809. goto unshadowed;
  810. }
  811. shadow = kvm_mmu_lookup_page(vcpu->kvm, gfn);
  812. if (shadow) {
  813. pgprintk("%s: found shadow page for %lx, marking ro\n",
  814. __FUNCTION__, gfn);
  815. pte_access &= ~ACC_WRITE_MASK;
  816. if (is_writeble_pte(spte)) {
  817. spte &= ~PT_WRITABLE_MASK;
  818. kvm_x86_ops->tlb_flush(vcpu);
  819. }
  820. if (write_fault)
  821. *ptwrite = 1;
  822. }
  823. }
  824. unshadowed:
  825. if (pte_access & ACC_WRITE_MASK)
  826. mark_page_dirty(vcpu->kvm, gfn);
  827. pgprintk("%s: setting spte %llx\n", __FUNCTION__, spte);
  828. set_shadow_pte(shadow_pte, spte);
  829. page_header_update_slot(vcpu->kvm, shadow_pte, gfn);
  830. if (!was_rmapped) {
  831. rmap_add(vcpu, shadow_pte, gfn);
  832. if (!is_rmap_pte(*shadow_pte))
  833. kvm_release_page_clean(page);
  834. } else {
  835. if (was_writeble)
  836. kvm_release_page_dirty(page);
  837. else
  838. kvm_release_page_clean(page);
  839. }
  840. if (!ptwrite || !*ptwrite)
  841. vcpu->arch.last_pte_updated = shadow_pte;
  842. }
  843. static void nonpaging_new_cr3(struct kvm_vcpu *vcpu)
  844. {
  845. }
  846. static int __nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write,
  847. gfn_t gfn, struct page *page)
  848. {
  849. int level = PT32E_ROOT_LEVEL;
  850. hpa_t table_addr = vcpu->arch.mmu.root_hpa;
  851. int pt_write = 0;
  852. for (; ; level--) {
  853. u32 index = PT64_INDEX(v, level);
  854. u64 *table;
  855. ASSERT(VALID_PAGE(table_addr));
  856. table = __va(table_addr);
  857. if (level == 1) {
  858. mmu_set_spte(vcpu, &table[index], ACC_ALL, ACC_ALL,
  859. 0, write, 1, &pt_write, gfn, page);
  860. return pt_write || is_io_pte(table[index]);
  861. }
  862. if (table[index] == shadow_trap_nonpresent_pte) {
  863. struct kvm_mmu_page *new_table;
  864. gfn_t pseudo_gfn;
  865. pseudo_gfn = (v & PT64_DIR_BASE_ADDR_MASK)
  866. >> PAGE_SHIFT;
  867. new_table = kvm_mmu_get_page(vcpu, pseudo_gfn,
  868. v, level - 1,
  869. 1, ACC_ALL, &table[index]);
  870. if (!new_table) {
  871. pgprintk("nonpaging_map: ENOMEM\n");
  872. kvm_release_page_clean(page);
  873. return -ENOMEM;
  874. }
  875. table[index] = __pa(new_table->spt) | PT_PRESENT_MASK
  876. | PT_WRITABLE_MASK | PT_USER_MASK;
  877. }
  878. table_addr = table[index] & PT64_BASE_ADDR_MASK;
  879. }
  880. }
  881. static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn)
  882. {
  883. int r;
  884. struct page *page;
  885. down_read(&vcpu->kvm->slots_lock);
  886. down_read(&current->mm->mmap_sem);
  887. page = gfn_to_page(vcpu->kvm, gfn);
  888. up_read(&current->mm->mmap_sem);
  889. spin_lock(&vcpu->kvm->mmu_lock);
  890. kvm_mmu_free_some_pages(vcpu);
  891. r = __nonpaging_map(vcpu, v, write, gfn, page);
  892. spin_unlock(&vcpu->kvm->mmu_lock);
  893. up_read(&vcpu->kvm->slots_lock);
  894. return r;
  895. }
  896. static void nonpaging_prefetch_page(struct kvm_vcpu *vcpu,
  897. struct kvm_mmu_page *sp)
  898. {
  899. int i;
  900. for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
  901. sp->spt[i] = shadow_trap_nonpresent_pte;
  902. }
  903. static void mmu_free_roots(struct kvm_vcpu *vcpu)
  904. {
  905. int i;
  906. struct kvm_mmu_page *sp;
  907. if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
  908. return;
  909. spin_lock(&vcpu->kvm->mmu_lock);
  910. #ifdef CONFIG_X86_64
  911. if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) {
  912. hpa_t root = vcpu->arch.mmu.root_hpa;
  913. sp = page_header(root);
  914. --sp->root_count;
  915. vcpu->arch.mmu.root_hpa = INVALID_PAGE;
  916. spin_unlock(&vcpu->kvm->mmu_lock);
  917. return;
  918. }
  919. #endif
  920. for (i = 0; i < 4; ++i) {
  921. hpa_t root = vcpu->arch.mmu.pae_root[i];
  922. if (root) {
  923. root &= PT64_BASE_ADDR_MASK;
  924. sp = page_header(root);
  925. --sp->root_count;
  926. }
  927. vcpu->arch.mmu.pae_root[i] = INVALID_PAGE;
  928. }
  929. spin_unlock(&vcpu->kvm->mmu_lock);
  930. vcpu->arch.mmu.root_hpa = INVALID_PAGE;
  931. }
  932. static void mmu_alloc_roots(struct kvm_vcpu *vcpu)
  933. {
  934. int i;
  935. gfn_t root_gfn;
  936. struct kvm_mmu_page *sp;
  937. root_gfn = vcpu->arch.cr3 >> PAGE_SHIFT;
  938. #ifdef CONFIG_X86_64
  939. if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) {
  940. hpa_t root = vcpu->arch.mmu.root_hpa;
  941. ASSERT(!VALID_PAGE(root));
  942. sp = kvm_mmu_get_page(vcpu, root_gfn, 0,
  943. PT64_ROOT_LEVEL, 0, ACC_ALL, NULL);
  944. root = __pa(sp->spt);
  945. ++sp->root_count;
  946. vcpu->arch.mmu.root_hpa = root;
  947. return;
  948. }
  949. #endif
  950. for (i = 0; i < 4; ++i) {
  951. hpa_t root = vcpu->arch.mmu.pae_root[i];
  952. ASSERT(!VALID_PAGE(root));
  953. if (vcpu->arch.mmu.root_level == PT32E_ROOT_LEVEL) {
  954. if (!is_present_pte(vcpu->arch.pdptrs[i])) {
  955. vcpu->arch.mmu.pae_root[i] = 0;
  956. continue;
  957. }
  958. root_gfn = vcpu->arch.pdptrs[i] >> PAGE_SHIFT;
  959. } else if (vcpu->arch.mmu.root_level == 0)
  960. root_gfn = 0;
  961. sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30,
  962. PT32_ROOT_LEVEL, !is_paging(vcpu),
  963. ACC_ALL, NULL);
  964. root = __pa(sp->spt);
  965. ++sp->root_count;
  966. vcpu->arch.mmu.pae_root[i] = root | PT_PRESENT_MASK;
  967. }
  968. vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.pae_root);
  969. }
  970. static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr)
  971. {
  972. return vaddr;
  973. }
  974. static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
  975. u32 error_code)
  976. {
  977. gfn_t gfn;
  978. int r;
  979. pgprintk("%s: gva %lx error %x\n", __FUNCTION__, gva, error_code);
  980. r = mmu_topup_memory_caches(vcpu);
  981. if (r)
  982. return r;
  983. ASSERT(vcpu);
  984. ASSERT(VALID_PAGE(vcpu->arch.mmu.root_hpa));
  985. gfn = gva >> PAGE_SHIFT;
  986. return nonpaging_map(vcpu, gva & PAGE_MASK,
  987. error_code & PFERR_WRITE_MASK, gfn);
  988. }
  989. static void nonpaging_free(struct kvm_vcpu *vcpu)
  990. {
  991. mmu_free_roots(vcpu);
  992. }
  993. static int nonpaging_init_context(struct kvm_vcpu *vcpu)
  994. {
  995. struct kvm_mmu *context = &vcpu->arch.mmu;
  996. context->new_cr3 = nonpaging_new_cr3;
  997. context->page_fault = nonpaging_page_fault;
  998. context->gva_to_gpa = nonpaging_gva_to_gpa;
  999. context->free = nonpaging_free;
  1000. context->prefetch_page = nonpaging_prefetch_page;
  1001. context->root_level = 0;
  1002. context->shadow_root_level = PT32E_ROOT_LEVEL;
  1003. context->root_hpa = INVALID_PAGE;
  1004. return 0;
  1005. }
  1006. void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu)
  1007. {
  1008. ++vcpu->stat.tlb_flush;
  1009. kvm_x86_ops->tlb_flush(vcpu);
  1010. }
  1011. static void paging_new_cr3(struct kvm_vcpu *vcpu)
  1012. {
  1013. pgprintk("%s: cr3 %lx\n", __FUNCTION__, vcpu->arch.cr3);
  1014. mmu_free_roots(vcpu);
  1015. }
  1016. static void inject_page_fault(struct kvm_vcpu *vcpu,
  1017. u64 addr,
  1018. u32 err_code)
  1019. {
  1020. kvm_inject_page_fault(vcpu, addr, err_code);
  1021. }
  1022. static void paging_free(struct kvm_vcpu *vcpu)
  1023. {
  1024. nonpaging_free(vcpu);
  1025. }
  1026. #define PTTYPE 64
  1027. #include "paging_tmpl.h"
  1028. #undef PTTYPE
  1029. #define PTTYPE 32
  1030. #include "paging_tmpl.h"
  1031. #undef PTTYPE
  1032. static int paging64_init_context_common(struct kvm_vcpu *vcpu, int level)
  1033. {
  1034. struct kvm_mmu *context = &vcpu->arch.mmu;
  1035. ASSERT(is_pae(vcpu));
  1036. context->new_cr3 = paging_new_cr3;
  1037. context->page_fault = paging64_page_fault;
  1038. context->gva_to_gpa = paging64_gva_to_gpa;
  1039. context->prefetch_page = paging64_prefetch_page;
  1040. context->free = paging_free;
  1041. context->root_level = level;
  1042. context->shadow_root_level = level;
  1043. context->root_hpa = INVALID_PAGE;
  1044. return 0;
  1045. }
  1046. static int paging64_init_context(struct kvm_vcpu *vcpu)
  1047. {
  1048. return paging64_init_context_common(vcpu, PT64_ROOT_LEVEL);
  1049. }
  1050. static int paging32_init_context(struct kvm_vcpu *vcpu)
  1051. {
  1052. struct kvm_mmu *context = &vcpu->arch.mmu;
  1053. context->new_cr3 = paging_new_cr3;
  1054. context->page_fault = paging32_page_fault;
  1055. context->gva_to_gpa = paging32_gva_to_gpa;
  1056. context->free = paging_free;
  1057. context->prefetch_page = paging32_prefetch_page;
  1058. context->root_level = PT32_ROOT_LEVEL;
  1059. context->shadow_root_level = PT32E_ROOT_LEVEL;
  1060. context->root_hpa = INVALID_PAGE;
  1061. return 0;
  1062. }
  1063. static int paging32E_init_context(struct kvm_vcpu *vcpu)
  1064. {
  1065. return paging64_init_context_common(vcpu, PT32E_ROOT_LEVEL);
  1066. }
  1067. static int init_kvm_mmu(struct kvm_vcpu *vcpu)
  1068. {
  1069. ASSERT(vcpu);
  1070. ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
  1071. if (!is_paging(vcpu))
  1072. return nonpaging_init_context(vcpu);
  1073. else if (is_long_mode(vcpu))
  1074. return paging64_init_context(vcpu);
  1075. else if (is_pae(vcpu))
  1076. return paging32E_init_context(vcpu);
  1077. else
  1078. return paging32_init_context(vcpu);
  1079. }
  1080. static void destroy_kvm_mmu(struct kvm_vcpu *vcpu)
  1081. {
  1082. ASSERT(vcpu);
  1083. if (VALID_PAGE(vcpu->arch.mmu.root_hpa)) {
  1084. vcpu->arch.mmu.free(vcpu);
  1085. vcpu->arch.mmu.root_hpa = INVALID_PAGE;
  1086. }
  1087. }
  1088. int kvm_mmu_reset_context(struct kvm_vcpu *vcpu)
  1089. {
  1090. destroy_kvm_mmu(vcpu);
  1091. return init_kvm_mmu(vcpu);
  1092. }
  1093. EXPORT_SYMBOL_GPL(kvm_mmu_reset_context);
  1094. int kvm_mmu_load(struct kvm_vcpu *vcpu)
  1095. {
  1096. int r;
  1097. r = mmu_topup_memory_caches(vcpu);
  1098. if (r)
  1099. goto out;
  1100. spin_lock(&vcpu->kvm->mmu_lock);
  1101. kvm_mmu_free_some_pages(vcpu);
  1102. mmu_alloc_roots(vcpu);
  1103. spin_unlock(&vcpu->kvm->mmu_lock);
  1104. kvm_x86_ops->set_cr3(vcpu, vcpu->arch.mmu.root_hpa);
  1105. kvm_mmu_flush_tlb(vcpu);
  1106. out:
  1107. return r;
  1108. }
  1109. EXPORT_SYMBOL_GPL(kvm_mmu_load);
  1110. void kvm_mmu_unload(struct kvm_vcpu *vcpu)
  1111. {
  1112. mmu_free_roots(vcpu);
  1113. }
  1114. static void mmu_pte_write_zap_pte(struct kvm_vcpu *vcpu,
  1115. struct kvm_mmu_page *sp,
  1116. u64 *spte)
  1117. {
  1118. u64 pte;
  1119. struct kvm_mmu_page *child;
  1120. pte = *spte;
  1121. if (is_shadow_present_pte(pte)) {
  1122. if (sp->role.level == PT_PAGE_TABLE_LEVEL)
  1123. rmap_remove(vcpu->kvm, spte);
  1124. else {
  1125. child = page_header(pte & PT64_BASE_ADDR_MASK);
  1126. mmu_page_remove_parent_pte(child, spte);
  1127. }
  1128. }
  1129. set_shadow_pte(spte, shadow_trap_nonpresent_pte);
  1130. }
  1131. static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu,
  1132. struct kvm_mmu_page *sp,
  1133. u64 *spte,
  1134. const void *new, int bytes,
  1135. int offset_in_pte)
  1136. {
  1137. if (sp->role.level != PT_PAGE_TABLE_LEVEL) {
  1138. ++vcpu->kvm->stat.mmu_pde_zapped;
  1139. return;
  1140. }
  1141. ++vcpu->kvm->stat.mmu_pte_updated;
  1142. if (sp->role.glevels == PT32_ROOT_LEVEL)
  1143. paging32_update_pte(vcpu, sp, spte, new, bytes, offset_in_pte);
  1144. else
  1145. paging64_update_pte(vcpu, sp, spte, new, bytes, offset_in_pte);
  1146. }
  1147. static bool need_remote_flush(u64 old, u64 new)
  1148. {
  1149. if (!is_shadow_present_pte(old))
  1150. return false;
  1151. if (!is_shadow_present_pte(new))
  1152. return true;
  1153. if ((old ^ new) & PT64_BASE_ADDR_MASK)
  1154. return true;
  1155. old ^= PT64_NX_MASK;
  1156. new ^= PT64_NX_MASK;
  1157. return (old & ~new & PT64_PERM_MASK) != 0;
  1158. }
  1159. static void mmu_pte_write_flush_tlb(struct kvm_vcpu *vcpu, u64 old, u64 new)
  1160. {
  1161. if (need_remote_flush(old, new))
  1162. kvm_flush_remote_tlbs(vcpu->kvm);
  1163. else
  1164. kvm_mmu_flush_tlb(vcpu);
  1165. }
  1166. static bool last_updated_pte_accessed(struct kvm_vcpu *vcpu)
  1167. {
  1168. u64 *spte = vcpu->arch.last_pte_updated;
  1169. return !!(spte && (*spte & PT_ACCESSED_MASK));
  1170. }
  1171. static void mmu_guess_page_from_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
  1172. const u8 *new, int bytes)
  1173. {
  1174. gfn_t gfn;
  1175. int r;
  1176. u64 gpte = 0;
  1177. struct page *page;
  1178. if (bytes != 4 && bytes != 8)
  1179. return;
  1180. /*
  1181. * Assume that the pte write on a page table of the same type
  1182. * as the current vcpu paging mode. This is nearly always true
  1183. * (might be false while changing modes). Note it is verified later
  1184. * by update_pte().
  1185. */
  1186. if (is_pae(vcpu)) {
  1187. /* Handle a 32-bit guest writing two halves of a 64-bit gpte */
  1188. if ((bytes == 4) && (gpa % 4 == 0)) {
  1189. r = kvm_read_guest(vcpu->kvm, gpa & ~(u64)7, &gpte, 8);
  1190. if (r)
  1191. return;
  1192. memcpy((void *)&gpte + (gpa % 8), new, 4);
  1193. } else if ((bytes == 8) && (gpa % 8 == 0)) {
  1194. memcpy((void *)&gpte, new, 8);
  1195. }
  1196. } else {
  1197. if ((bytes == 4) && (gpa % 4 == 0))
  1198. memcpy((void *)&gpte, new, 4);
  1199. }
  1200. if (!is_present_pte(gpte))
  1201. return;
  1202. gfn = (gpte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT;
  1203. down_read(&current->mm->mmap_sem);
  1204. page = gfn_to_page(vcpu->kvm, gfn);
  1205. up_read(&current->mm->mmap_sem);
  1206. vcpu->arch.update_pte.gfn = gfn;
  1207. vcpu->arch.update_pte.page = page;
  1208. }
  1209. void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
  1210. const u8 *new, int bytes)
  1211. {
  1212. gfn_t gfn = gpa >> PAGE_SHIFT;
  1213. struct kvm_mmu_page *sp;
  1214. struct hlist_node *node, *n;
  1215. struct hlist_head *bucket;
  1216. unsigned index;
  1217. u64 entry;
  1218. u64 *spte;
  1219. unsigned offset = offset_in_page(gpa);
  1220. unsigned pte_size;
  1221. unsigned page_offset;
  1222. unsigned misaligned;
  1223. unsigned quadrant;
  1224. int level;
  1225. int flooded = 0;
  1226. int npte;
  1227. pgprintk("%s: gpa %llx bytes %d\n", __FUNCTION__, gpa, bytes);
  1228. mmu_guess_page_from_pte_write(vcpu, gpa, new, bytes);
  1229. spin_lock(&vcpu->kvm->mmu_lock);
  1230. kvm_mmu_free_some_pages(vcpu);
  1231. ++vcpu->kvm->stat.mmu_pte_write;
  1232. kvm_mmu_audit(vcpu, "pre pte write");
  1233. if (gfn == vcpu->arch.last_pt_write_gfn
  1234. && !last_updated_pte_accessed(vcpu)) {
  1235. ++vcpu->arch.last_pt_write_count;
  1236. if (vcpu->arch.last_pt_write_count >= 3)
  1237. flooded = 1;
  1238. } else {
  1239. vcpu->arch.last_pt_write_gfn = gfn;
  1240. vcpu->arch.last_pt_write_count = 1;
  1241. vcpu->arch.last_pte_updated = NULL;
  1242. }
  1243. index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
  1244. bucket = &vcpu->kvm->arch.mmu_page_hash[index];
  1245. hlist_for_each_entry_safe(sp, node, n, bucket, hash_link) {
  1246. if (sp->gfn != gfn || sp->role.metaphysical)
  1247. continue;
  1248. pte_size = sp->role.glevels == PT32_ROOT_LEVEL ? 4 : 8;
  1249. misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1);
  1250. misaligned |= bytes < 4;
  1251. if (misaligned || flooded) {
  1252. /*
  1253. * Misaligned accesses are too much trouble to fix
  1254. * up; also, they usually indicate a page is not used
  1255. * as a page table.
  1256. *
  1257. * If we're seeing too many writes to a page,
  1258. * it may no longer be a page table, or we may be
  1259. * forking, in which case it is better to unmap the
  1260. * page.
  1261. */
  1262. pgprintk("misaligned: gpa %llx bytes %d role %x\n",
  1263. gpa, bytes, sp->role.word);
  1264. kvm_mmu_zap_page(vcpu->kvm, sp);
  1265. ++vcpu->kvm->stat.mmu_flooded;
  1266. continue;
  1267. }
  1268. page_offset = offset;
  1269. level = sp->role.level;
  1270. npte = 1;
  1271. if (sp->role.glevels == PT32_ROOT_LEVEL) {
  1272. page_offset <<= 1; /* 32->64 */
  1273. /*
  1274. * A 32-bit pde maps 4MB while the shadow pdes map
  1275. * only 2MB. So we need to double the offset again
  1276. * and zap two pdes instead of one.
  1277. */
  1278. if (level == PT32_ROOT_LEVEL) {
  1279. page_offset &= ~7; /* kill rounding error */
  1280. page_offset <<= 1;
  1281. npte = 2;
  1282. }
  1283. quadrant = page_offset >> PAGE_SHIFT;
  1284. page_offset &= ~PAGE_MASK;
  1285. if (quadrant != sp->role.quadrant)
  1286. continue;
  1287. }
  1288. spte = &sp->spt[page_offset / sizeof(*spte)];
  1289. while (npte--) {
  1290. entry = *spte;
  1291. mmu_pte_write_zap_pte(vcpu, sp, spte);
  1292. mmu_pte_write_new_pte(vcpu, sp, spte, new, bytes,
  1293. page_offset & (pte_size - 1));
  1294. mmu_pte_write_flush_tlb(vcpu, entry, *spte);
  1295. ++spte;
  1296. }
  1297. }
  1298. kvm_mmu_audit(vcpu, "post pte write");
  1299. spin_unlock(&vcpu->kvm->mmu_lock);
  1300. if (vcpu->arch.update_pte.page) {
  1301. kvm_release_page_clean(vcpu->arch.update_pte.page);
  1302. vcpu->arch.update_pte.page = NULL;
  1303. }
  1304. }
  1305. int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
  1306. {
  1307. gpa_t gpa;
  1308. int r;
  1309. down_read(&vcpu->kvm->slots_lock);
  1310. gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gva);
  1311. up_read(&vcpu->kvm->slots_lock);
  1312. spin_lock(&vcpu->kvm->mmu_lock);
  1313. r = kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT);
  1314. spin_unlock(&vcpu->kvm->mmu_lock);
  1315. return r;
  1316. }
  1317. void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
  1318. {
  1319. while (vcpu->kvm->arch.n_free_mmu_pages < KVM_REFILL_PAGES) {
  1320. struct kvm_mmu_page *sp;
  1321. sp = container_of(vcpu->kvm->arch.active_mmu_pages.prev,
  1322. struct kvm_mmu_page, link);
  1323. kvm_mmu_zap_page(vcpu->kvm, sp);
  1324. ++vcpu->kvm->stat.mmu_recycled;
  1325. }
  1326. }
  1327. int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code)
  1328. {
  1329. int r;
  1330. enum emulation_result er;
  1331. r = vcpu->arch.mmu.page_fault(vcpu, cr2, error_code);
  1332. if (r < 0)
  1333. goto out;
  1334. if (!r) {
  1335. r = 1;
  1336. goto out;
  1337. }
  1338. r = mmu_topup_memory_caches(vcpu);
  1339. if (r)
  1340. goto out;
  1341. er = emulate_instruction(vcpu, vcpu->run, cr2, error_code, 0);
  1342. switch (er) {
  1343. case EMULATE_DONE:
  1344. return 1;
  1345. case EMULATE_DO_MMIO:
  1346. ++vcpu->stat.mmio_exits;
  1347. return 0;
  1348. case EMULATE_FAIL:
  1349. kvm_report_emulation_failure(vcpu, "pagetable");
  1350. return 1;
  1351. default:
  1352. BUG();
  1353. }
  1354. out:
  1355. return r;
  1356. }
  1357. EXPORT_SYMBOL_GPL(kvm_mmu_page_fault);
  1358. static void free_mmu_pages(struct kvm_vcpu *vcpu)
  1359. {
  1360. struct kvm_mmu_page *sp;
  1361. while (!list_empty(&vcpu->kvm->arch.active_mmu_pages)) {
  1362. sp = container_of(vcpu->kvm->arch.active_mmu_pages.next,
  1363. struct kvm_mmu_page, link);
  1364. kvm_mmu_zap_page(vcpu->kvm, sp);
  1365. }
  1366. free_page((unsigned long)vcpu->arch.mmu.pae_root);
  1367. }
  1368. static int alloc_mmu_pages(struct kvm_vcpu *vcpu)
  1369. {
  1370. struct page *page;
  1371. int i;
  1372. ASSERT(vcpu);
  1373. if (vcpu->kvm->arch.n_requested_mmu_pages)
  1374. vcpu->kvm->arch.n_free_mmu_pages =
  1375. vcpu->kvm->arch.n_requested_mmu_pages;
  1376. else
  1377. vcpu->kvm->arch.n_free_mmu_pages =
  1378. vcpu->kvm->arch.n_alloc_mmu_pages;
  1379. /*
  1380. * When emulating 32-bit mode, cr3 is only 32 bits even on x86_64.
  1381. * Therefore we need to allocate shadow page tables in the first
  1382. * 4GB of memory, which happens to fit the DMA32 zone.
  1383. */
  1384. page = alloc_page(GFP_KERNEL | __GFP_DMA32);
  1385. if (!page)
  1386. goto error_1;
  1387. vcpu->arch.mmu.pae_root = page_address(page);
  1388. for (i = 0; i < 4; ++i)
  1389. vcpu->arch.mmu.pae_root[i] = INVALID_PAGE;
  1390. return 0;
  1391. error_1:
  1392. free_mmu_pages(vcpu);
  1393. return -ENOMEM;
  1394. }
  1395. int kvm_mmu_create(struct kvm_vcpu *vcpu)
  1396. {
  1397. ASSERT(vcpu);
  1398. ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
  1399. return alloc_mmu_pages(vcpu);
  1400. }
  1401. int kvm_mmu_setup(struct kvm_vcpu *vcpu)
  1402. {
  1403. ASSERT(vcpu);
  1404. ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
  1405. return init_kvm_mmu(vcpu);
  1406. }
  1407. void kvm_mmu_destroy(struct kvm_vcpu *vcpu)
  1408. {
  1409. ASSERT(vcpu);
  1410. destroy_kvm_mmu(vcpu);
  1411. free_mmu_pages(vcpu);
  1412. mmu_free_memory_caches(vcpu);
  1413. }
  1414. void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot)
  1415. {
  1416. struct kvm_mmu_page *sp;
  1417. list_for_each_entry(sp, &kvm->arch.active_mmu_pages, link) {
  1418. int i;
  1419. u64 *pt;
  1420. if (!test_bit(slot, &sp->slot_bitmap))
  1421. continue;
  1422. pt = sp->spt;
  1423. for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
  1424. /* avoid RMW */
  1425. if (pt[i] & PT_WRITABLE_MASK)
  1426. pt[i] &= ~PT_WRITABLE_MASK;
  1427. }
  1428. }
  1429. void kvm_mmu_zap_all(struct kvm *kvm)
  1430. {
  1431. struct kvm_mmu_page *sp, *node;
  1432. spin_lock(&kvm->mmu_lock);
  1433. list_for_each_entry_safe(sp, node, &kvm->arch.active_mmu_pages, link)
  1434. kvm_mmu_zap_page(kvm, sp);
  1435. spin_unlock(&kvm->mmu_lock);
  1436. kvm_flush_remote_tlbs(kvm);
  1437. }
  1438. void kvm_mmu_module_exit(void)
  1439. {
  1440. if (pte_chain_cache)
  1441. kmem_cache_destroy(pte_chain_cache);
  1442. if (rmap_desc_cache)
  1443. kmem_cache_destroy(rmap_desc_cache);
  1444. if (mmu_page_header_cache)
  1445. kmem_cache_destroy(mmu_page_header_cache);
  1446. }
  1447. int kvm_mmu_module_init(void)
  1448. {
  1449. pte_chain_cache = kmem_cache_create("kvm_pte_chain",
  1450. sizeof(struct kvm_pte_chain),
  1451. 0, 0, NULL);
  1452. if (!pte_chain_cache)
  1453. goto nomem;
  1454. rmap_desc_cache = kmem_cache_create("kvm_rmap_desc",
  1455. sizeof(struct kvm_rmap_desc),
  1456. 0, 0, NULL);
  1457. if (!rmap_desc_cache)
  1458. goto nomem;
  1459. mmu_page_header_cache = kmem_cache_create("kvm_mmu_page_header",
  1460. sizeof(struct kvm_mmu_page),
  1461. 0, 0, NULL);
  1462. if (!mmu_page_header_cache)
  1463. goto nomem;
  1464. return 0;
  1465. nomem:
  1466. kvm_mmu_module_exit();
  1467. return -ENOMEM;
  1468. }
  1469. /*
  1470. * Caculate mmu pages needed for kvm.
  1471. */
  1472. unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm)
  1473. {
  1474. int i;
  1475. unsigned int nr_mmu_pages;
  1476. unsigned int nr_pages = 0;
  1477. for (i = 0; i < kvm->nmemslots; i++)
  1478. nr_pages += kvm->memslots[i].npages;
  1479. nr_mmu_pages = nr_pages * KVM_PERMILLE_MMU_PAGES / 1000;
  1480. nr_mmu_pages = max(nr_mmu_pages,
  1481. (unsigned int) KVM_MIN_ALLOC_MMU_PAGES);
  1482. return nr_mmu_pages;
  1483. }
  1484. #ifdef AUDIT
  1485. static const char *audit_msg;
  1486. static gva_t canonicalize(gva_t gva)
  1487. {
  1488. #ifdef CONFIG_X86_64
  1489. gva = (long long)(gva << 16) >> 16;
  1490. #endif
  1491. return gva;
  1492. }
  1493. static void audit_mappings_page(struct kvm_vcpu *vcpu, u64 page_pte,
  1494. gva_t va, int level)
  1495. {
  1496. u64 *pt = __va(page_pte & PT64_BASE_ADDR_MASK);
  1497. int i;
  1498. gva_t va_delta = 1ul << (PAGE_SHIFT + 9 * (level - 1));
  1499. for (i = 0; i < PT64_ENT_PER_PAGE; ++i, va += va_delta) {
  1500. u64 ent = pt[i];
  1501. if (ent == shadow_trap_nonpresent_pte)
  1502. continue;
  1503. va = canonicalize(va);
  1504. if (level > 1) {
  1505. if (ent == shadow_notrap_nonpresent_pte)
  1506. printk(KERN_ERR "audit: (%s) nontrapping pte"
  1507. " in nonleaf level: levels %d gva %lx"
  1508. " level %d pte %llx\n", audit_msg,
  1509. vcpu->arch.mmu.root_level, va, level, ent);
  1510. audit_mappings_page(vcpu, ent, va, level - 1);
  1511. } else {
  1512. gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, va);
  1513. struct page *page = gpa_to_page(vcpu, gpa);
  1514. hpa_t hpa = page_to_phys(page);
  1515. if (is_shadow_present_pte(ent)
  1516. && (ent & PT64_BASE_ADDR_MASK) != hpa)
  1517. printk(KERN_ERR "xx audit error: (%s) levels %d"
  1518. " gva %lx gpa %llx hpa %llx ent %llx %d\n",
  1519. audit_msg, vcpu->arch.mmu.root_level,
  1520. va, gpa, hpa, ent,
  1521. is_shadow_present_pte(ent));
  1522. else if (ent == shadow_notrap_nonpresent_pte
  1523. && !is_error_hpa(hpa))
  1524. printk(KERN_ERR "audit: (%s) notrap shadow,"
  1525. " valid guest gva %lx\n", audit_msg, va);
  1526. kvm_release_page_clean(page);
  1527. }
  1528. }
  1529. }
  1530. static void audit_mappings(struct kvm_vcpu *vcpu)
  1531. {
  1532. unsigned i;
  1533. if (vcpu->arch.mmu.root_level == 4)
  1534. audit_mappings_page(vcpu, vcpu->arch.mmu.root_hpa, 0, 4);
  1535. else
  1536. for (i = 0; i < 4; ++i)
  1537. if (vcpu->arch.mmu.pae_root[i] & PT_PRESENT_MASK)
  1538. audit_mappings_page(vcpu,
  1539. vcpu->arch.mmu.pae_root[i],
  1540. i << 30,
  1541. 2);
  1542. }
  1543. static int count_rmaps(struct kvm_vcpu *vcpu)
  1544. {
  1545. int nmaps = 0;
  1546. int i, j, k;
  1547. for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
  1548. struct kvm_memory_slot *m = &vcpu->kvm->memslots[i];
  1549. struct kvm_rmap_desc *d;
  1550. for (j = 0; j < m->npages; ++j) {
  1551. unsigned long *rmapp = &m->rmap[j];
  1552. if (!*rmapp)
  1553. continue;
  1554. if (!(*rmapp & 1)) {
  1555. ++nmaps;
  1556. continue;
  1557. }
  1558. d = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
  1559. while (d) {
  1560. for (k = 0; k < RMAP_EXT; ++k)
  1561. if (d->shadow_ptes[k])
  1562. ++nmaps;
  1563. else
  1564. break;
  1565. d = d->more;
  1566. }
  1567. }
  1568. }
  1569. return nmaps;
  1570. }
  1571. static int count_writable_mappings(struct kvm_vcpu *vcpu)
  1572. {
  1573. int nmaps = 0;
  1574. struct kvm_mmu_page *sp;
  1575. int i;
  1576. list_for_each_entry(sp, &vcpu->kvm->arch.active_mmu_pages, link) {
  1577. u64 *pt = sp->spt;
  1578. if (sp->role.level != PT_PAGE_TABLE_LEVEL)
  1579. continue;
  1580. for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
  1581. u64 ent = pt[i];
  1582. if (!(ent & PT_PRESENT_MASK))
  1583. continue;
  1584. if (!(ent & PT_WRITABLE_MASK))
  1585. continue;
  1586. ++nmaps;
  1587. }
  1588. }
  1589. return nmaps;
  1590. }
  1591. static void audit_rmap(struct kvm_vcpu *vcpu)
  1592. {
  1593. int n_rmap = count_rmaps(vcpu);
  1594. int n_actual = count_writable_mappings(vcpu);
  1595. if (n_rmap != n_actual)
  1596. printk(KERN_ERR "%s: (%s) rmap %d actual %d\n",
  1597. __FUNCTION__, audit_msg, n_rmap, n_actual);
  1598. }
  1599. static void audit_write_protection(struct kvm_vcpu *vcpu)
  1600. {
  1601. struct kvm_mmu_page *sp;
  1602. struct kvm_memory_slot *slot;
  1603. unsigned long *rmapp;
  1604. gfn_t gfn;
  1605. list_for_each_entry(sp, &vcpu->kvm->arch.active_mmu_pages, link) {
  1606. if (sp->role.metaphysical)
  1607. continue;
  1608. slot = gfn_to_memslot(vcpu->kvm, sp->gfn);
  1609. gfn = unalias_gfn(vcpu->kvm, sp->gfn);
  1610. rmapp = &slot->rmap[gfn - slot->base_gfn];
  1611. if (*rmapp)
  1612. printk(KERN_ERR "%s: (%s) shadow page has writable"
  1613. " mappings: gfn %lx role %x\n",
  1614. __FUNCTION__, audit_msg, sp->gfn,
  1615. sp->role.word);
  1616. }
  1617. }
  1618. static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg)
  1619. {
  1620. int olddbg = dbg;
  1621. dbg = 0;
  1622. audit_msg = msg;
  1623. audit_rmap(vcpu);
  1624. audit_write_protection(vcpu);
  1625. audit_mappings(vcpu);
  1626. dbg = olddbg;
  1627. }
  1628. #endif