paging_tmpl.h 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773
  1. /*
  2. * Kernel-based Virtual Machine driver for Linux
  3. *
  4. * This module enables machines with Intel VT-x extensions to run virtual
  5. * machines without emulation or binary translation.
  6. *
  7. * MMU support
  8. *
  9. * Copyright (C) 2006 Qumranet, Inc.
  10. * Copyright 2010 Red Hat, Inc. and/or its affilates.
  11. *
  12. * Authors:
  13. * Yaniv Kamay <yaniv@qumranet.com>
  14. * Avi Kivity <avi@qumranet.com>
  15. *
  16. * This work is licensed under the terms of the GNU GPL, version 2. See
  17. * the COPYING file in the top-level directory.
  18. *
  19. */
  20. /*
  21. * We need the mmu code to access both 32-bit and 64-bit guest ptes,
  22. * so the code in this file is compiled twice, once per pte size.
  23. */
  24. #if PTTYPE == 64
  25. #define pt_element_t u64
  26. #define guest_walker guest_walker64
  27. #define FNAME(name) paging##64_##name
  28. #define PT_BASE_ADDR_MASK PT64_BASE_ADDR_MASK
  29. #define PT_LVL_ADDR_MASK(lvl) PT64_LVL_ADDR_MASK(lvl)
  30. #define PT_LVL_OFFSET_MASK(lvl) PT64_LVL_OFFSET_MASK(lvl)
  31. #define PT_INDEX(addr, level) PT64_INDEX(addr, level)
  32. #define PT_LEVEL_MASK(level) PT64_LEVEL_MASK(level)
  33. #define PT_LEVEL_BITS PT64_LEVEL_BITS
  34. #ifdef CONFIG_X86_64
  35. #define PT_MAX_FULL_LEVELS 4
  36. #define CMPXCHG cmpxchg
  37. #else
  38. #define CMPXCHG cmpxchg64
  39. #define PT_MAX_FULL_LEVELS 2
  40. #endif
  41. #elif PTTYPE == 32
  42. #define pt_element_t u32
  43. #define guest_walker guest_walker32
  44. #define FNAME(name) paging##32_##name
  45. #define PT_BASE_ADDR_MASK PT32_BASE_ADDR_MASK
  46. #define PT_LVL_ADDR_MASK(lvl) PT32_LVL_ADDR_MASK(lvl)
  47. #define PT_LVL_OFFSET_MASK(lvl) PT32_LVL_OFFSET_MASK(lvl)
  48. #define PT_INDEX(addr, level) PT32_INDEX(addr, level)
  49. #define PT_LEVEL_MASK(level) PT32_LEVEL_MASK(level)
  50. #define PT_LEVEL_BITS PT32_LEVEL_BITS
  51. #define PT_MAX_FULL_LEVELS 2
  52. #define CMPXCHG cmpxchg
  53. #else
  54. #error Invalid PTTYPE value
  55. #endif
  56. #define gpte_to_gfn_lvl FNAME(gpte_to_gfn_lvl)
  57. #define gpte_to_gfn(pte) gpte_to_gfn_lvl((pte), PT_PAGE_TABLE_LEVEL)
  58. /*
  59. * The guest_walker structure emulates the behavior of the hardware page
  60. * table walker.
  61. */
  62. struct guest_walker {
  63. int level;
  64. gfn_t table_gfn[PT_MAX_FULL_LEVELS];
  65. pt_element_t ptes[PT_MAX_FULL_LEVELS];
  66. pt_element_t prefetch_ptes[PTE_PREFETCH_NUM];
  67. gpa_t pte_gpa[PT_MAX_FULL_LEVELS];
  68. unsigned pt_access;
  69. unsigned pte_access;
  70. gfn_t gfn;
  71. u32 error_code;
  72. };
  73. static gfn_t gpte_to_gfn_lvl(pt_element_t gpte, int lvl)
  74. {
  75. return (gpte & PT_LVL_ADDR_MASK(lvl)) >> PAGE_SHIFT;
  76. }
  77. static bool FNAME(cmpxchg_gpte)(struct kvm *kvm,
  78. gfn_t table_gfn, unsigned index,
  79. pt_element_t orig_pte, pt_element_t new_pte)
  80. {
  81. pt_element_t ret;
  82. pt_element_t *table;
  83. struct page *page;
  84. page = gfn_to_page(kvm, table_gfn);
  85. table = kmap_atomic(page, KM_USER0);
  86. ret = CMPXCHG(&table[index], orig_pte, new_pte);
  87. kunmap_atomic(table, KM_USER0);
  88. kvm_release_page_dirty(page);
  89. return (ret != orig_pte);
  90. }
  91. static unsigned FNAME(gpte_access)(struct kvm_vcpu *vcpu, pt_element_t gpte)
  92. {
  93. unsigned access;
  94. access = (gpte & (PT_WRITABLE_MASK | PT_USER_MASK)) | ACC_EXEC_MASK;
  95. #if PTTYPE == 64
  96. if (is_nx(vcpu))
  97. access &= ~(gpte >> PT64_NX_SHIFT);
  98. #endif
  99. return access;
  100. }
  101. /*
  102. * Fetch a guest pte for a guest virtual address
  103. */
  104. static int FNAME(walk_addr_generic)(struct guest_walker *walker,
  105. struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
  106. gva_t addr, int write_fault,
  107. int user_fault, int fetch_fault)
  108. {
  109. pt_element_t pte;
  110. gfn_t table_gfn;
  111. unsigned index, pt_access, uninitialized_var(pte_access);
  112. gpa_t pte_gpa;
  113. bool eperm, present, rsvd_fault;
  114. trace_kvm_mmu_pagetable_walk(addr, write_fault, user_fault,
  115. fetch_fault);
  116. walk:
  117. present = true;
  118. eperm = rsvd_fault = false;
  119. walker->level = mmu->root_level;
  120. pte = mmu->get_cr3(vcpu);
  121. #if PTTYPE == 64
  122. if (walker->level == PT32E_ROOT_LEVEL) {
  123. pte = kvm_pdptr_read(vcpu, (addr >> 30) & 3);
  124. trace_kvm_mmu_paging_element(pte, walker->level);
  125. if (!is_present_gpte(pte)) {
  126. present = false;
  127. goto error;
  128. }
  129. --walker->level;
  130. }
  131. #endif
  132. ASSERT((!is_long_mode(vcpu) && is_pae(vcpu)) ||
  133. (mmu->get_cr3(vcpu) & CR3_NONPAE_RESERVED_BITS) == 0);
  134. pt_access = ACC_ALL;
  135. for (;;) {
  136. index = PT_INDEX(addr, walker->level);
  137. table_gfn = gpte_to_gfn(pte);
  138. pte_gpa = gfn_to_gpa(table_gfn);
  139. pte_gpa += index * sizeof(pt_element_t);
  140. walker->table_gfn[walker->level - 1] = table_gfn;
  141. walker->pte_gpa[walker->level - 1] = pte_gpa;
  142. if (kvm_read_guest(vcpu->kvm, pte_gpa, &pte, sizeof(pte))) {
  143. present = false;
  144. break;
  145. }
  146. trace_kvm_mmu_paging_element(pte, walker->level);
  147. if (!is_present_gpte(pte)) {
  148. present = false;
  149. break;
  150. }
  151. if (is_rsvd_bits_set(&vcpu->arch.mmu, pte, walker->level)) {
  152. rsvd_fault = true;
  153. break;
  154. }
  155. if (write_fault && !is_writable_pte(pte))
  156. if (user_fault || is_write_protection(vcpu))
  157. eperm = true;
  158. if (user_fault && !(pte & PT_USER_MASK))
  159. eperm = true;
  160. #if PTTYPE == 64
  161. if (fetch_fault && (pte & PT64_NX_MASK))
  162. eperm = true;
  163. #endif
  164. if (!eperm && !rsvd_fault && !(pte & PT_ACCESSED_MASK)) {
  165. trace_kvm_mmu_set_accessed_bit(table_gfn, index,
  166. sizeof(pte));
  167. if (FNAME(cmpxchg_gpte)(vcpu->kvm, table_gfn,
  168. index, pte, pte|PT_ACCESSED_MASK))
  169. goto walk;
  170. mark_page_dirty(vcpu->kvm, table_gfn);
  171. pte |= PT_ACCESSED_MASK;
  172. }
  173. pte_access = pt_access & FNAME(gpte_access)(vcpu, pte);
  174. walker->ptes[walker->level - 1] = pte;
  175. if ((walker->level == PT_PAGE_TABLE_LEVEL) ||
  176. ((walker->level == PT_DIRECTORY_LEVEL) &&
  177. is_large_pte(pte) &&
  178. (PTTYPE == 64 || is_pse(vcpu))) ||
  179. ((walker->level == PT_PDPE_LEVEL) &&
  180. is_large_pte(pte) &&
  181. mmu->root_level == PT64_ROOT_LEVEL)) {
  182. int lvl = walker->level;
  183. walker->gfn = gpte_to_gfn_lvl(pte, lvl);
  184. walker->gfn += (addr & PT_LVL_OFFSET_MASK(lvl))
  185. >> PAGE_SHIFT;
  186. if (PTTYPE == 32 &&
  187. walker->level == PT_DIRECTORY_LEVEL &&
  188. is_cpuid_PSE36())
  189. walker->gfn += pse36_gfn_delta(pte);
  190. break;
  191. }
  192. pt_access = pte_access;
  193. --walker->level;
  194. }
  195. if (!present || eperm || rsvd_fault)
  196. goto error;
  197. if (write_fault && !is_dirty_gpte(pte)) {
  198. bool ret;
  199. trace_kvm_mmu_set_dirty_bit(table_gfn, index, sizeof(pte));
  200. ret = FNAME(cmpxchg_gpte)(vcpu->kvm, table_gfn, index, pte,
  201. pte|PT_DIRTY_MASK);
  202. if (ret)
  203. goto walk;
  204. mark_page_dirty(vcpu->kvm, table_gfn);
  205. pte |= PT_DIRTY_MASK;
  206. walker->ptes[walker->level - 1] = pte;
  207. }
  208. walker->pt_access = pt_access;
  209. walker->pte_access = pte_access;
  210. pgprintk("%s: pte %llx pte_access %x pt_access %x\n",
  211. __func__, (u64)pte, pte_access, pt_access);
  212. return 1;
  213. error:
  214. walker->error_code = 0;
  215. if (present)
  216. walker->error_code |= PFERR_PRESENT_MASK;
  217. if (write_fault)
  218. walker->error_code |= PFERR_WRITE_MASK;
  219. if (user_fault)
  220. walker->error_code |= PFERR_USER_MASK;
  221. if (fetch_fault && is_nx(vcpu))
  222. walker->error_code |= PFERR_FETCH_MASK;
  223. if (rsvd_fault)
  224. walker->error_code |= PFERR_RSVD_MASK;
  225. vcpu->arch.fault.address = addr;
  226. vcpu->arch.fault.error_code = walker->error_code;
  227. trace_kvm_mmu_walker_error(walker->error_code);
  228. return 0;
  229. }
  230. static int FNAME(walk_addr)(struct guest_walker *walker,
  231. struct kvm_vcpu *vcpu, gva_t addr,
  232. int write_fault, int user_fault, int fetch_fault)
  233. {
  234. return FNAME(walk_addr_generic)(walker, vcpu, &vcpu->arch.mmu, addr,
  235. write_fault, user_fault, fetch_fault);
  236. }
  237. static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
  238. u64 *spte, const void *pte)
  239. {
  240. pt_element_t gpte;
  241. unsigned pte_access;
  242. pfn_t pfn;
  243. u64 new_spte;
  244. gpte = *(const pt_element_t *)pte;
  245. if (~gpte & (PT_PRESENT_MASK | PT_ACCESSED_MASK)) {
  246. if (!is_present_gpte(gpte)) {
  247. if (sp->unsync)
  248. new_spte = shadow_trap_nonpresent_pte;
  249. else
  250. new_spte = shadow_notrap_nonpresent_pte;
  251. __set_spte(spte, new_spte);
  252. }
  253. return;
  254. }
  255. pgprintk("%s: gpte %llx spte %p\n", __func__, (u64)gpte, spte);
  256. pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte);
  257. if (gpte_to_gfn(gpte) != vcpu->arch.update_pte.gfn)
  258. return;
  259. pfn = vcpu->arch.update_pte.pfn;
  260. if (is_error_pfn(pfn))
  261. return;
  262. if (mmu_notifier_retry(vcpu, vcpu->arch.update_pte.mmu_seq))
  263. return;
  264. kvm_get_pfn(pfn);
  265. /*
  266. * we call mmu_set_spte() with reset_host_protection = true beacuse that
  267. * vcpu->arch.update_pte.pfn was fetched from get_user_pages(write = 1).
  268. */
  269. mmu_set_spte(vcpu, spte, sp->role.access, pte_access, 0, 0,
  270. is_dirty_gpte(gpte), NULL, PT_PAGE_TABLE_LEVEL,
  271. gpte_to_gfn(gpte), pfn, true, true);
  272. }
  273. static bool FNAME(gpte_changed)(struct kvm_vcpu *vcpu,
  274. struct guest_walker *gw, int level)
  275. {
  276. pt_element_t curr_pte;
  277. gpa_t base_gpa, pte_gpa = gw->pte_gpa[level - 1];
  278. u64 mask;
  279. int r, index;
  280. if (level == PT_PAGE_TABLE_LEVEL) {
  281. mask = PTE_PREFETCH_NUM * sizeof(pt_element_t) - 1;
  282. base_gpa = pte_gpa & ~mask;
  283. index = (pte_gpa - base_gpa) / sizeof(pt_element_t);
  284. r = kvm_read_guest_atomic(vcpu->kvm, base_gpa,
  285. gw->prefetch_ptes, sizeof(gw->prefetch_ptes));
  286. curr_pte = gw->prefetch_ptes[index];
  287. } else
  288. r = kvm_read_guest_atomic(vcpu->kvm, pte_gpa,
  289. &curr_pte, sizeof(curr_pte));
  290. return r || curr_pte != gw->ptes[level - 1];
  291. }
  292. static void FNAME(pte_prefetch)(struct kvm_vcpu *vcpu, struct guest_walker *gw,
  293. u64 *sptep)
  294. {
  295. struct kvm_mmu_page *sp;
  296. struct kvm_mmu *mmu = &vcpu->arch.mmu;
  297. pt_element_t *gptep = gw->prefetch_ptes;
  298. u64 *spte;
  299. int i;
  300. sp = page_header(__pa(sptep));
  301. if (sp->role.level > PT_PAGE_TABLE_LEVEL)
  302. return;
  303. if (sp->role.direct)
  304. return __direct_pte_prefetch(vcpu, sp, sptep);
  305. i = (sptep - sp->spt) & ~(PTE_PREFETCH_NUM - 1);
  306. spte = sp->spt + i;
  307. for (i = 0; i < PTE_PREFETCH_NUM; i++, spte++) {
  308. pt_element_t gpte;
  309. unsigned pte_access;
  310. gfn_t gfn;
  311. pfn_t pfn;
  312. bool dirty;
  313. if (spte == sptep)
  314. continue;
  315. if (*spte != shadow_trap_nonpresent_pte)
  316. continue;
  317. gpte = gptep[i];
  318. if (!is_present_gpte(gpte) ||
  319. is_rsvd_bits_set(mmu, gpte, PT_PAGE_TABLE_LEVEL)) {
  320. if (!sp->unsync)
  321. __set_spte(spte, shadow_notrap_nonpresent_pte);
  322. continue;
  323. }
  324. if (!(gpte & PT_ACCESSED_MASK))
  325. continue;
  326. pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte);
  327. gfn = gpte_to_gfn(gpte);
  328. dirty = is_dirty_gpte(gpte);
  329. pfn = pte_prefetch_gfn_to_pfn(vcpu, gfn,
  330. (pte_access & ACC_WRITE_MASK) && dirty);
  331. if (is_error_pfn(pfn)) {
  332. kvm_release_pfn_clean(pfn);
  333. break;
  334. }
  335. mmu_set_spte(vcpu, spte, sp->role.access, pte_access, 0, 0,
  336. dirty, NULL, PT_PAGE_TABLE_LEVEL, gfn,
  337. pfn, true, true);
  338. }
  339. }
  340. /*
  341. * Fetch a shadow pte for a specific level in the paging hierarchy.
  342. */
  343. static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
  344. struct guest_walker *gw,
  345. int user_fault, int write_fault, int hlevel,
  346. int *ptwrite, pfn_t pfn)
  347. {
  348. unsigned access = gw->pt_access;
  349. struct kvm_mmu_page *sp = NULL;
  350. bool dirty = is_dirty_gpte(gw->ptes[gw->level - 1]);
  351. int top_level;
  352. unsigned direct_access;
  353. struct kvm_shadow_walk_iterator it;
  354. if (!is_present_gpte(gw->ptes[gw->level - 1]))
  355. return NULL;
  356. direct_access = gw->pt_access & gw->pte_access;
  357. if (!dirty)
  358. direct_access &= ~ACC_WRITE_MASK;
  359. top_level = vcpu->arch.mmu.root_level;
  360. if (top_level == PT32E_ROOT_LEVEL)
  361. top_level = PT32_ROOT_LEVEL;
  362. /*
  363. * Verify that the top-level gpte is still there. Since the page
  364. * is a root page, it is either write protected (and cannot be
  365. * changed from now on) or it is invalid (in which case, we don't
  366. * really care if it changes underneath us after this point).
  367. */
  368. if (FNAME(gpte_changed)(vcpu, gw, top_level))
  369. goto out_gpte_changed;
  370. for (shadow_walk_init(&it, vcpu, addr);
  371. shadow_walk_okay(&it) && it.level > gw->level;
  372. shadow_walk_next(&it)) {
  373. gfn_t table_gfn;
  374. drop_large_spte(vcpu, it.sptep);
  375. sp = NULL;
  376. if (!is_shadow_present_pte(*it.sptep)) {
  377. table_gfn = gw->table_gfn[it.level - 2];
  378. sp = kvm_mmu_get_page(vcpu, table_gfn, addr, it.level-1,
  379. false, access, it.sptep);
  380. }
  381. /*
  382. * Verify that the gpte in the page we've just write
  383. * protected is still there.
  384. */
  385. if (FNAME(gpte_changed)(vcpu, gw, it.level - 1))
  386. goto out_gpte_changed;
  387. if (sp)
  388. link_shadow_page(it.sptep, sp);
  389. }
  390. for (;
  391. shadow_walk_okay(&it) && it.level > hlevel;
  392. shadow_walk_next(&it)) {
  393. gfn_t direct_gfn;
  394. validate_direct_spte(vcpu, it.sptep, direct_access);
  395. drop_large_spte(vcpu, it.sptep);
  396. if (is_shadow_present_pte(*it.sptep))
  397. continue;
  398. direct_gfn = gw->gfn & ~(KVM_PAGES_PER_HPAGE(it.level) - 1);
  399. sp = kvm_mmu_get_page(vcpu, direct_gfn, addr, it.level-1,
  400. true, direct_access, it.sptep);
  401. link_shadow_page(it.sptep, sp);
  402. }
  403. mmu_set_spte(vcpu, it.sptep, access, gw->pte_access & access,
  404. user_fault, write_fault, dirty, ptwrite, it.level,
  405. gw->gfn, pfn, false, true);
  406. FNAME(pte_prefetch)(vcpu, gw, it.sptep);
  407. return it.sptep;
  408. out_gpte_changed:
  409. if (sp)
  410. kvm_mmu_put_page(sp, it.sptep);
  411. kvm_release_pfn_clean(pfn);
  412. return NULL;
  413. }
  414. /*
  415. * Page fault handler. There are several causes for a page fault:
  416. * - there is no shadow pte for the guest pte
  417. * - write access through a shadow pte marked read only so that we can set
  418. * the dirty bit
  419. * - write access to a shadow pte marked read only so we can update the page
  420. * dirty bitmap, when userspace requests it
  421. * - mmio access; in this case we will never install a present shadow pte
  422. * - normal guest page fault due to the guest pte marked not present, not
  423. * writable, or not executable
  424. *
  425. * Returns: 1 if we need to emulate the instruction, 0 otherwise, or
  426. * a negative value on error.
  427. */
  428. static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
  429. u32 error_code)
  430. {
  431. int write_fault = error_code & PFERR_WRITE_MASK;
  432. int user_fault = error_code & PFERR_USER_MASK;
  433. int fetch_fault = error_code & PFERR_FETCH_MASK;
  434. struct guest_walker walker;
  435. u64 *sptep;
  436. int write_pt = 0;
  437. int r;
  438. pfn_t pfn;
  439. int level = PT_PAGE_TABLE_LEVEL;
  440. unsigned long mmu_seq;
  441. pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code);
  442. r = mmu_topup_memory_caches(vcpu);
  443. if (r)
  444. return r;
  445. /*
  446. * Look up the guest pte for the faulting address.
  447. */
  448. r = FNAME(walk_addr)(&walker, vcpu, addr, write_fault, user_fault,
  449. fetch_fault);
  450. /*
  451. * The page is not mapped by the guest. Let the guest handle it.
  452. */
  453. if (!r) {
  454. pgprintk("%s: guest page fault\n", __func__);
  455. inject_page_fault(vcpu);
  456. vcpu->arch.last_pt_write_count = 0; /* reset fork detector */
  457. return 0;
  458. }
  459. if (walker.level >= PT_DIRECTORY_LEVEL) {
  460. level = min(walker.level, mapping_level(vcpu, walker.gfn));
  461. walker.gfn = walker.gfn & ~(KVM_PAGES_PER_HPAGE(level) - 1);
  462. }
  463. mmu_seq = vcpu->kvm->mmu_notifier_seq;
  464. smp_rmb();
  465. pfn = gfn_to_pfn(vcpu->kvm, walker.gfn);
  466. /* mmio */
  467. if (is_error_pfn(pfn))
  468. return kvm_handle_bad_page(vcpu->kvm, walker.gfn, pfn);
  469. spin_lock(&vcpu->kvm->mmu_lock);
  470. if (mmu_notifier_retry(vcpu, mmu_seq))
  471. goto out_unlock;
  472. trace_kvm_mmu_audit(vcpu, AUDIT_PRE_PAGE_FAULT);
  473. kvm_mmu_free_some_pages(vcpu);
  474. sptep = FNAME(fetch)(vcpu, addr, &walker, user_fault, write_fault,
  475. level, &write_pt, pfn);
  476. (void)sptep;
  477. pgprintk("%s: shadow pte %p %llx ptwrite %d\n", __func__,
  478. sptep, *sptep, write_pt);
  479. if (!write_pt)
  480. vcpu->arch.last_pt_write_count = 0; /* reset fork detector */
  481. ++vcpu->stat.pf_fixed;
  482. trace_kvm_mmu_audit(vcpu, AUDIT_POST_PAGE_FAULT);
  483. spin_unlock(&vcpu->kvm->mmu_lock);
  484. return write_pt;
  485. out_unlock:
  486. spin_unlock(&vcpu->kvm->mmu_lock);
  487. kvm_release_pfn_clean(pfn);
  488. return 0;
  489. }
  490. static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
  491. {
  492. struct kvm_shadow_walk_iterator iterator;
  493. struct kvm_mmu_page *sp;
  494. gpa_t pte_gpa = -1;
  495. int level;
  496. u64 *sptep;
  497. int need_flush = 0;
  498. spin_lock(&vcpu->kvm->mmu_lock);
  499. for_each_shadow_entry(vcpu, gva, iterator) {
  500. level = iterator.level;
  501. sptep = iterator.sptep;
  502. sp = page_header(__pa(sptep));
  503. if (is_last_spte(*sptep, level)) {
  504. int offset, shift;
  505. if (!sp->unsync)
  506. break;
  507. shift = PAGE_SHIFT -
  508. (PT_LEVEL_BITS - PT64_LEVEL_BITS) * level;
  509. offset = sp->role.quadrant << shift;
  510. pte_gpa = (sp->gfn << PAGE_SHIFT) + offset;
  511. pte_gpa += (sptep - sp->spt) * sizeof(pt_element_t);
  512. if (is_shadow_present_pte(*sptep)) {
  513. if (is_large_pte(*sptep))
  514. --vcpu->kvm->stat.lpages;
  515. drop_spte(vcpu->kvm, sptep,
  516. shadow_trap_nonpresent_pte);
  517. need_flush = 1;
  518. } else
  519. __set_spte(sptep, shadow_trap_nonpresent_pte);
  520. break;
  521. }
  522. if (!is_shadow_present_pte(*sptep) || !sp->unsync_children)
  523. break;
  524. }
  525. if (need_flush)
  526. kvm_flush_remote_tlbs(vcpu->kvm);
  527. atomic_inc(&vcpu->kvm->arch.invlpg_counter);
  528. spin_unlock(&vcpu->kvm->mmu_lock);
  529. if (pte_gpa == -1)
  530. return;
  531. if (mmu_topup_memory_caches(vcpu))
  532. return;
  533. kvm_mmu_pte_write(vcpu, pte_gpa, NULL, sizeof(pt_element_t), 0);
  534. }
  535. static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr, u32 access,
  536. u32 *error)
  537. {
  538. struct guest_walker walker;
  539. gpa_t gpa = UNMAPPED_GVA;
  540. int r;
  541. r = FNAME(walk_addr)(&walker, vcpu, vaddr,
  542. !!(access & PFERR_WRITE_MASK),
  543. !!(access & PFERR_USER_MASK),
  544. !!(access & PFERR_FETCH_MASK));
  545. if (r) {
  546. gpa = gfn_to_gpa(walker.gfn);
  547. gpa |= vaddr & ~PAGE_MASK;
  548. } else if (error)
  549. *error = walker.error_code;
  550. return gpa;
  551. }
  552. static void FNAME(prefetch_page)(struct kvm_vcpu *vcpu,
  553. struct kvm_mmu_page *sp)
  554. {
  555. int i, j, offset, r;
  556. pt_element_t pt[256 / sizeof(pt_element_t)];
  557. gpa_t pte_gpa;
  558. if (sp->role.direct
  559. || (PTTYPE == 32 && sp->role.level > PT_PAGE_TABLE_LEVEL)) {
  560. nonpaging_prefetch_page(vcpu, sp);
  561. return;
  562. }
  563. pte_gpa = gfn_to_gpa(sp->gfn);
  564. if (PTTYPE == 32) {
  565. offset = sp->role.quadrant << PT64_LEVEL_BITS;
  566. pte_gpa += offset * sizeof(pt_element_t);
  567. }
  568. for (i = 0; i < PT64_ENT_PER_PAGE; i += ARRAY_SIZE(pt)) {
  569. r = kvm_read_guest_atomic(vcpu->kvm, pte_gpa, pt, sizeof pt);
  570. pte_gpa += ARRAY_SIZE(pt) * sizeof(pt_element_t);
  571. for (j = 0; j < ARRAY_SIZE(pt); ++j)
  572. if (r || is_present_gpte(pt[j]))
  573. sp->spt[i+j] = shadow_trap_nonpresent_pte;
  574. else
  575. sp->spt[i+j] = shadow_notrap_nonpresent_pte;
  576. }
  577. }
  578. /*
  579. * Using the cached information from sp->gfns is safe because:
  580. * - The spte has a reference to the struct page, so the pfn for a given gfn
  581. * can't change unless all sptes pointing to it are nuked first.
  582. */
  583. static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
  584. bool clear_unsync)
  585. {
  586. int i, offset, nr_present;
  587. bool reset_host_protection;
  588. gpa_t first_pte_gpa;
  589. offset = nr_present = 0;
  590. /* direct kvm_mmu_page can not be unsync. */
  591. BUG_ON(sp->role.direct);
  592. if (PTTYPE == 32)
  593. offset = sp->role.quadrant << PT64_LEVEL_BITS;
  594. first_pte_gpa = gfn_to_gpa(sp->gfn) + offset * sizeof(pt_element_t);
  595. for (i = 0; i < PT64_ENT_PER_PAGE; i++) {
  596. unsigned pte_access;
  597. pt_element_t gpte;
  598. gpa_t pte_gpa;
  599. gfn_t gfn;
  600. if (!is_shadow_present_pte(sp->spt[i]))
  601. continue;
  602. pte_gpa = first_pte_gpa + i * sizeof(pt_element_t);
  603. if (kvm_read_guest_atomic(vcpu->kvm, pte_gpa, &gpte,
  604. sizeof(pt_element_t)))
  605. return -EINVAL;
  606. gfn = gpte_to_gfn(gpte);
  607. if (is_rsvd_bits_set(&vcpu->arch.mmu, gpte, PT_PAGE_TABLE_LEVEL)
  608. || gfn != sp->gfns[i] || !is_present_gpte(gpte)
  609. || !(gpte & PT_ACCESSED_MASK)) {
  610. u64 nonpresent;
  611. if (is_present_gpte(gpte) || !clear_unsync)
  612. nonpresent = shadow_trap_nonpresent_pte;
  613. else
  614. nonpresent = shadow_notrap_nonpresent_pte;
  615. drop_spte(vcpu->kvm, &sp->spt[i], nonpresent);
  616. continue;
  617. }
  618. nr_present++;
  619. pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte);
  620. if (!(sp->spt[i] & SPTE_HOST_WRITEABLE)) {
  621. pte_access &= ~ACC_WRITE_MASK;
  622. reset_host_protection = 0;
  623. } else {
  624. reset_host_protection = 1;
  625. }
  626. set_spte(vcpu, &sp->spt[i], pte_access, 0, 0,
  627. is_dirty_gpte(gpte), PT_PAGE_TABLE_LEVEL, gfn,
  628. spte_to_pfn(sp->spt[i]), true, false,
  629. reset_host_protection);
  630. }
  631. return !nr_present;
  632. }
  633. #undef pt_element_t
  634. #undef guest_walker
  635. #undef FNAME
  636. #undef PT_BASE_ADDR_MASK
  637. #undef PT_INDEX
  638. #undef PT_LEVEL_MASK
  639. #undef PT_LVL_ADDR_MASK
  640. #undef PT_LVL_OFFSET_MASK
  641. #undef PT_LEVEL_BITS
  642. #undef PT_MAX_FULL_LEVELS
  643. #undef gpte_to_gfn
  644. #undef gpte_to_gfn_lvl
  645. #undef CMPXCHG