paging_tmpl.h 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831
  1. /*
  2. * Kernel-based Virtual Machine driver for Linux
  3. *
  4. * This module enables machines with Intel VT-x extensions to run virtual
  5. * machines without emulation or binary translation.
  6. *
  7. * MMU support
  8. *
  9. * Copyright (C) 2006 Qumranet, Inc.
  10. * Copyright 2010 Red Hat, Inc. and/or its affiliates.
  11. *
  12. * Authors:
  13. * Yaniv Kamay <yaniv@qumranet.com>
  14. * Avi Kivity <avi@qumranet.com>
  15. *
  16. * This work is licensed under the terms of the GNU GPL, version 2. See
  17. * the COPYING file in the top-level directory.
  18. *
  19. */
  20. /*
  21. * We need the mmu code to access both 32-bit and 64-bit guest ptes,
  22. * so the code in this file is compiled twice, once per pte size.
  23. */
  24. #if PTTYPE == 64
  25. #define pt_element_t u64
  26. #define guest_walker guest_walker64
  27. #define FNAME(name) paging##64_##name
  28. #define PT_BASE_ADDR_MASK PT64_BASE_ADDR_MASK
  29. #define PT_LVL_ADDR_MASK(lvl) PT64_LVL_ADDR_MASK(lvl)
  30. #define PT_LVL_OFFSET_MASK(lvl) PT64_LVL_OFFSET_MASK(lvl)
  31. #define PT_INDEX(addr, level) PT64_INDEX(addr, level)
  32. #define PT_LEVEL_BITS PT64_LEVEL_BITS
  33. #ifdef CONFIG_X86_64
  34. #define PT_MAX_FULL_LEVELS 4
  35. #define CMPXCHG cmpxchg
  36. #else
  37. #define CMPXCHG cmpxchg64
  38. #define PT_MAX_FULL_LEVELS 2
  39. #endif
  40. #elif PTTYPE == 32
  41. #define pt_element_t u32
  42. #define guest_walker guest_walker32
  43. #define FNAME(name) paging##32_##name
  44. #define PT_BASE_ADDR_MASK PT32_BASE_ADDR_MASK
  45. #define PT_LVL_ADDR_MASK(lvl) PT32_LVL_ADDR_MASK(lvl)
  46. #define PT_LVL_OFFSET_MASK(lvl) PT32_LVL_OFFSET_MASK(lvl)
  47. #define PT_INDEX(addr, level) PT32_INDEX(addr, level)
  48. #define PT_LEVEL_BITS PT32_LEVEL_BITS
  49. #define PT_MAX_FULL_LEVELS 2
  50. #define CMPXCHG cmpxchg
  51. #else
  52. #error Invalid PTTYPE value
  53. #endif
  54. #define gpte_to_gfn_lvl FNAME(gpte_to_gfn_lvl)
  55. #define gpte_to_gfn(pte) gpte_to_gfn_lvl((pte), PT_PAGE_TABLE_LEVEL)
  56. /*
  57. * The guest_walker structure emulates the behavior of the hardware page
  58. * table walker.
  59. */
  60. struct guest_walker {
  61. int level;
  62. unsigned max_level;
  63. gfn_t table_gfn[PT_MAX_FULL_LEVELS];
  64. pt_element_t ptes[PT_MAX_FULL_LEVELS];
  65. pt_element_t prefetch_ptes[PTE_PREFETCH_NUM];
  66. gpa_t pte_gpa[PT_MAX_FULL_LEVELS];
  67. pt_element_t __user *ptep_user[PT_MAX_FULL_LEVELS];
  68. unsigned pt_access;
  69. unsigned pte_access;
  70. gfn_t gfn;
  71. struct x86_exception fault;
  72. };
  73. static gfn_t gpte_to_gfn_lvl(pt_element_t gpte, int lvl)
  74. {
  75. return (gpte & PT_LVL_ADDR_MASK(lvl)) >> PAGE_SHIFT;
  76. }
  77. static int FNAME(cmpxchg_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
  78. pt_element_t __user *ptep_user, unsigned index,
  79. pt_element_t orig_pte, pt_element_t new_pte)
  80. {
  81. int npages;
  82. pt_element_t ret;
  83. pt_element_t *table;
  84. struct page *page;
  85. npages = get_user_pages_fast((unsigned long)ptep_user, 1, 1, &page);
  86. /* Check if the user is doing something meaningless. */
  87. if (unlikely(npages != 1))
  88. return -EFAULT;
  89. table = kmap_atomic(page);
  90. ret = CMPXCHG(&table[index], orig_pte, new_pte);
  91. kunmap_atomic(table);
  92. kvm_release_page_dirty(page);
  93. return (ret != orig_pte);
  94. }
  95. static int FNAME(update_accessed_dirty_bits)(struct kvm_vcpu *vcpu,
  96. struct kvm_mmu *mmu,
  97. struct guest_walker *walker,
  98. int write_fault)
  99. {
  100. unsigned level, index;
  101. pt_element_t pte, orig_pte;
  102. pt_element_t __user *ptep_user;
  103. gfn_t table_gfn;
  104. int ret;
  105. for (level = walker->max_level; level >= walker->level; --level) {
  106. pte = orig_pte = walker->ptes[level - 1];
  107. table_gfn = walker->table_gfn[level - 1];
  108. ptep_user = walker->ptep_user[level - 1];
  109. index = offset_in_page(ptep_user) / sizeof(pt_element_t);
  110. if (!(pte & PT_ACCESSED_MASK)) {
  111. trace_kvm_mmu_set_accessed_bit(table_gfn, index, sizeof(pte));
  112. pte |= PT_ACCESSED_MASK;
  113. }
  114. if (level == walker->level && write_fault && !is_dirty_gpte(pte)) {
  115. trace_kvm_mmu_set_dirty_bit(table_gfn, index, sizeof(pte));
  116. pte |= PT_DIRTY_MASK;
  117. }
  118. if (pte == orig_pte)
  119. continue;
  120. ret = FNAME(cmpxchg_gpte)(vcpu, mmu, ptep_user, index, orig_pte, pte);
  121. if (ret)
  122. return ret;
  123. mark_page_dirty(vcpu->kvm, table_gfn);
  124. walker->ptes[level] = pte;
  125. }
  126. return 0;
  127. }
  128. /*
  129. * Fetch a guest pte for a guest virtual address
  130. */
  131. static int FNAME(walk_addr_generic)(struct guest_walker *walker,
  132. struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
  133. gva_t addr, u32 access)
  134. {
  135. int ret;
  136. pt_element_t pte;
  137. pt_element_t __user *uninitialized_var(ptep_user);
  138. gfn_t table_gfn;
  139. unsigned index, pt_access, pte_access, accessed_dirty;
  140. gpa_t pte_gpa;
  141. int offset;
  142. const int write_fault = access & PFERR_WRITE_MASK;
  143. const int user_fault = access & PFERR_USER_MASK;
  144. const int fetch_fault = access & PFERR_FETCH_MASK;
  145. u16 errcode = 0;
  146. gpa_t real_gpa;
  147. gfn_t gfn;
  148. trace_kvm_mmu_pagetable_walk(addr, access);
  149. retry_walk:
  150. walker->level = mmu->root_level;
  151. pte = mmu->get_cr3(vcpu);
  152. #if PTTYPE == 64
  153. if (walker->level == PT32E_ROOT_LEVEL) {
  154. pte = mmu->get_pdptr(vcpu, (addr >> 30) & 3);
  155. trace_kvm_mmu_paging_element(pte, walker->level);
  156. if (!is_present_gpte(pte))
  157. goto error;
  158. --walker->level;
  159. }
  160. #endif
  161. walker->max_level = walker->level;
  162. ASSERT((!is_long_mode(vcpu) && is_pae(vcpu)) ||
  163. (mmu->get_cr3(vcpu) & CR3_NONPAE_RESERVED_BITS) == 0);
  164. accessed_dirty = PT_ACCESSED_MASK;
  165. pt_access = pte_access = ACC_ALL;
  166. ++walker->level;
  167. do {
  168. gfn_t real_gfn;
  169. unsigned long host_addr;
  170. pt_access &= pte_access;
  171. --walker->level;
  172. index = PT_INDEX(addr, walker->level);
  173. table_gfn = gpte_to_gfn(pte);
  174. offset = index * sizeof(pt_element_t);
  175. pte_gpa = gfn_to_gpa(table_gfn) + offset;
  176. walker->table_gfn[walker->level - 1] = table_gfn;
  177. walker->pte_gpa[walker->level - 1] = pte_gpa;
  178. real_gfn = mmu->translate_gpa(vcpu, gfn_to_gpa(table_gfn),
  179. PFERR_USER_MASK|PFERR_WRITE_MASK);
  180. if (unlikely(real_gfn == UNMAPPED_GVA))
  181. goto error;
  182. real_gfn = gpa_to_gfn(real_gfn);
  183. host_addr = gfn_to_hva(vcpu->kvm, real_gfn);
  184. if (unlikely(kvm_is_error_hva(host_addr)))
  185. goto error;
  186. ptep_user = (pt_element_t __user *)((void *)host_addr + offset);
  187. if (unlikely(__copy_from_user(&pte, ptep_user, sizeof(pte))))
  188. goto error;
  189. walker->ptep_user[walker->level - 1] = ptep_user;
  190. trace_kvm_mmu_paging_element(pte, walker->level);
  191. if (unlikely(!is_present_gpte(pte)))
  192. goto error;
  193. if (unlikely(is_rsvd_bits_set(mmu, pte, walker->level))) {
  194. errcode |= PFERR_RSVD_MASK | PFERR_PRESENT_MASK;
  195. goto error;
  196. }
  197. accessed_dirty &= pte;
  198. pte_access = pt_access & gpte_access(vcpu, pte);
  199. walker->ptes[walker->level - 1] = pte;
  200. } while (!is_last_gpte(mmu, walker->level, pte));
  201. if (unlikely(permission_fault(mmu, pte_access, access))) {
  202. errcode |= PFERR_PRESENT_MASK;
  203. goto error;
  204. }
  205. gfn = gpte_to_gfn_lvl(pte, walker->level);
  206. gfn += (addr & PT_LVL_OFFSET_MASK(walker->level)) >> PAGE_SHIFT;
  207. if (PTTYPE == 32 && walker->level == PT_DIRECTORY_LEVEL && is_cpuid_PSE36())
  208. gfn += pse36_gfn_delta(pte);
  209. real_gpa = mmu->translate_gpa(vcpu, gfn_to_gpa(gfn), access);
  210. if (real_gpa == UNMAPPED_GVA)
  211. return 0;
  212. walker->gfn = real_gpa >> PAGE_SHIFT;
  213. if (!write_fault)
  214. protect_clean_gpte(&pte_access, pte);
  215. else
  216. /*
  217. * On a write fault, fold the dirty bit into accessed_dirty by
  218. * shifting it one place right.
  219. */
  220. accessed_dirty &= pte >> (PT_DIRTY_SHIFT - PT_ACCESSED_SHIFT);
  221. if (unlikely(!accessed_dirty)) {
  222. ret = FNAME(update_accessed_dirty_bits)(vcpu, mmu, walker, write_fault);
  223. if (unlikely(ret < 0))
  224. goto error;
  225. else if (ret)
  226. goto retry_walk;
  227. }
  228. walker->pt_access = pt_access;
  229. walker->pte_access = pte_access;
  230. pgprintk("%s: pte %llx pte_access %x pt_access %x\n",
  231. __func__, (u64)pte, pte_access, pt_access);
  232. return 1;
  233. error:
  234. errcode |= write_fault | user_fault;
  235. if (fetch_fault && (mmu->nx ||
  236. kvm_read_cr4_bits(vcpu, X86_CR4_SMEP)))
  237. errcode |= PFERR_FETCH_MASK;
  238. walker->fault.vector = PF_VECTOR;
  239. walker->fault.error_code_valid = true;
  240. walker->fault.error_code = errcode;
  241. walker->fault.address = addr;
  242. walker->fault.nested_page_fault = mmu != vcpu->arch.walk_mmu;
  243. trace_kvm_mmu_walker_error(walker->fault.error_code);
  244. return 0;
  245. }
  246. static int FNAME(walk_addr)(struct guest_walker *walker,
  247. struct kvm_vcpu *vcpu, gva_t addr, u32 access)
  248. {
  249. return FNAME(walk_addr_generic)(walker, vcpu, &vcpu->arch.mmu, addr,
  250. access);
  251. }
  252. static int FNAME(walk_addr_nested)(struct guest_walker *walker,
  253. struct kvm_vcpu *vcpu, gva_t addr,
  254. u32 access)
  255. {
  256. return FNAME(walk_addr_generic)(walker, vcpu, &vcpu->arch.nested_mmu,
  257. addr, access);
  258. }
  259. static bool
  260. FNAME(prefetch_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
  261. u64 *spte, pt_element_t gpte, bool no_dirty_log)
  262. {
  263. unsigned pte_access;
  264. gfn_t gfn;
  265. pfn_t pfn;
  266. if (prefetch_invalid_gpte(vcpu, sp, spte, gpte))
  267. return false;
  268. pgprintk("%s: gpte %llx spte %p\n", __func__, (u64)gpte, spte);
  269. gfn = gpte_to_gfn(gpte);
  270. pte_access = sp->role.access & gpte_access(vcpu, gpte);
  271. protect_clean_gpte(&pte_access, gpte);
  272. pfn = pte_prefetch_gfn_to_pfn(vcpu, gfn,
  273. no_dirty_log && (pte_access & ACC_WRITE_MASK));
  274. if (is_error_pfn(pfn))
  275. return false;
  276. /*
  277. * we call mmu_set_spte() with host_writable = true because
  278. * pte_prefetch_gfn_to_pfn always gets a writable pfn.
  279. */
  280. mmu_set_spte(vcpu, spte, pte_access, 0, NULL, PT_PAGE_TABLE_LEVEL,
  281. gfn, pfn, true, true);
  282. return true;
  283. }
  284. static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
  285. u64 *spte, const void *pte)
  286. {
  287. pt_element_t gpte = *(const pt_element_t *)pte;
  288. FNAME(prefetch_gpte)(vcpu, sp, spte, gpte, false);
  289. }
  290. static bool FNAME(gpte_changed)(struct kvm_vcpu *vcpu,
  291. struct guest_walker *gw, int level)
  292. {
  293. pt_element_t curr_pte;
  294. gpa_t base_gpa, pte_gpa = gw->pte_gpa[level - 1];
  295. u64 mask;
  296. int r, index;
  297. if (level == PT_PAGE_TABLE_LEVEL) {
  298. mask = PTE_PREFETCH_NUM * sizeof(pt_element_t) - 1;
  299. base_gpa = pte_gpa & ~mask;
  300. index = (pte_gpa - base_gpa) / sizeof(pt_element_t);
  301. r = kvm_read_guest_atomic(vcpu->kvm, base_gpa,
  302. gw->prefetch_ptes, sizeof(gw->prefetch_ptes));
  303. curr_pte = gw->prefetch_ptes[index];
  304. } else
  305. r = kvm_read_guest_atomic(vcpu->kvm, pte_gpa,
  306. &curr_pte, sizeof(curr_pte));
  307. return r || curr_pte != gw->ptes[level - 1];
  308. }
  309. static void FNAME(pte_prefetch)(struct kvm_vcpu *vcpu, struct guest_walker *gw,
  310. u64 *sptep)
  311. {
  312. struct kvm_mmu_page *sp;
  313. pt_element_t *gptep = gw->prefetch_ptes;
  314. u64 *spte;
  315. int i;
  316. sp = page_header(__pa(sptep));
  317. if (sp->role.level > PT_PAGE_TABLE_LEVEL)
  318. return;
  319. if (sp->role.direct)
  320. return __direct_pte_prefetch(vcpu, sp, sptep);
  321. i = (sptep - sp->spt) & ~(PTE_PREFETCH_NUM - 1);
  322. spte = sp->spt + i;
  323. for (i = 0; i < PTE_PREFETCH_NUM; i++, spte++) {
  324. if (spte == sptep)
  325. continue;
  326. if (is_shadow_present_pte(*spte))
  327. continue;
  328. if (!FNAME(prefetch_gpte)(vcpu, sp, spte, gptep[i], true))
  329. break;
  330. }
  331. }
  332. /*
  333. * Fetch a shadow pte for a specific level in the paging hierarchy.
  334. * If the guest tries to write a write-protected page, we need to
  335. * emulate this operation, return 1 to indicate this case.
  336. */
  337. static int FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
  338. struct guest_walker *gw,
  339. int write_fault, int hlevel,
  340. pfn_t pfn, bool map_writable, bool prefault)
  341. {
  342. struct kvm_mmu_page *sp = NULL;
  343. struct kvm_shadow_walk_iterator it;
  344. unsigned direct_access, access = gw->pt_access;
  345. int top_level, emulate = 0;
  346. direct_access = gw->pte_access;
  347. top_level = vcpu->arch.mmu.root_level;
  348. if (top_level == PT32E_ROOT_LEVEL)
  349. top_level = PT32_ROOT_LEVEL;
  350. /*
  351. * Verify that the top-level gpte is still there. Since the page
  352. * is a root page, it is either write protected (and cannot be
  353. * changed from now on) or it is invalid (in which case, we don't
  354. * really care if it changes underneath us after this point).
  355. */
  356. if (FNAME(gpte_changed)(vcpu, gw, top_level))
  357. goto out_gpte_changed;
  358. for (shadow_walk_init(&it, vcpu, addr);
  359. shadow_walk_okay(&it) && it.level > gw->level;
  360. shadow_walk_next(&it)) {
  361. gfn_t table_gfn;
  362. clear_sp_write_flooding_count(it.sptep);
  363. drop_large_spte(vcpu, it.sptep);
  364. sp = NULL;
  365. if (!is_shadow_present_pte(*it.sptep)) {
  366. table_gfn = gw->table_gfn[it.level - 2];
  367. sp = kvm_mmu_get_page(vcpu, table_gfn, addr, it.level-1,
  368. false, access, it.sptep);
  369. }
  370. /*
  371. * Verify that the gpte in the page we've just write
  372. * protected is still there.
  373. */
  374. if (FNAME(gpte_changed)(vcpu, gw, it.level - 1))
  375. goto out_gpte_changed;
  376. if (sp)
  377. link_shadow_page(it.sptep, sp);
  378. }
  379. for (;
  380. shadow_walk_okay(&it) && it.level > hlevel;
  381. shadow_walk_next(&it)) {
  382. gfn_t direct_gfn;
  383. clear_sp_write_flooding_count(it.sptep);
  384. validate_direct_spte(vcpu, it.sptep, direct_access);
  385. drop_large_spte(vcpu, it.sptep);
  386. if (is_shadow_present_pte(*it.sptep))
  387. continue;
  388. direct_gfn = gw->gfn & ~(KVM_PAGES_PER_HPAGE(it.level) - 1);
  389. sp = kvm_mmu_get_page(vcpu, direct_gfn, addr, it.level-1,
  390. true, direct_access, it.sptep);
  391. link_shadow_page(it.sptep, sp);
  392. }
  393. clear_sp_write_flooding_count(it.sptep);
  394. mmu_set_spte(vcpu, it.sptep, gw->pte_access, write_fault, &emulate,
  395. it.level, gw->gfn, pfn, prefault, map_writable);
  396. FNAME(pte_prefetch)(vcpu, gw, it.sptep);
  397. return emulate;
  398. out_gpte_changed:
  399. if (sp)
  400. kvm_mmu_put_page(sp, it.sptep);
  401. kvm_release_pfn_clean(pfn);
  402. return 0;
  403. }
  404. /*
  405. * To see whether the mapped gfn can write its page table in the current
  406. * mapping.
  407. *
  408. * It is the helper function of FNAME(page_fault). When guest uses large page
  409. * size to map the writable gfn which is used as current page table, we should
  410. * force kvm to use small page size to map it because new shadow page will be
  411. * created when kvm establishes shadow page table that stop kvm using large
  412. * page size. Do it early can avoid unnecessary #PF and emulation.
  413. *
  414. * @write_fault_to_shadow_pgtable will return true if the fault gfn is
  415. * currently used as its page table.
  416. *
  417. * Note: the PDPT page table is not checked for PAE-32 bit guest. It is ok
  418. * since the PDPT is always shadowed, that means, we can not use large page
  419. * size to map the gfn which is used as PDPT.
  420. */
  421. static bool
  422. FNAME(is_self_change_mapping)(struct kvm_vcpu *vcpu,
  423. struct guest_walker *walker, int user_fault,
  424. bool *write_fault_to_shadow_pgtable)
  425. {
  426. int level;
  427. gfn_t mask = ~(KVM_PAGES_PER_HPAGE(walker->level) - 1);
  428. bool self_changed = false;
  429. if (!(walker->pte_access & ACC_WRITE_MASK ||
  430. (!is_write_protection(vcpu) && !user_fault)))
  431. return false;
  432. for (level = walker->level; level <= walker->max_level; level++) {
  433. gfn_t gfn = walker->gfn ^ walker->table_gfn[level - 1];
  434. self_changed |= !(gfn & mask);
  435. *write_fault_to_shadow_pgtable |= !gfn;
  436. }
  437. return self_changed;
  438. }
  439. /*
  440. * Page fault handler. There are several causes for a page fault:
  441. * - there is no shadow pte for the guest pte
  442. * - write access through a shadow pte marked read only so that we can set
  443. * the dirty bit
  444. * - write access to a shadow pte marked read only so we can update the page
  445. * dirty bitmap, when userspace requests it
  446. * - mmio access; in this case we will never install a present shadow pte
  447. * - normal guest page fault due to the guest pte marked not present, not
  448. * writable, or not executable
  449. *
  450. * Returns: 1 if we need to emulate the instruction, 0 otherwise, or
  451. * a negative value on error.
  452. */
  453. static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
  454. bool prefault)
  455. {
  456. int write_fault = error_code & PFERR_WRITE_MASK;
  457. int user_fault = error_code & PFERR_USER_MASK;
  458. struct guest_walker walker;
  459. int r;
  460. pfn_t pfn;
  461. int level = PT_PAGE_TABLE_LEVEL;
  462. int force_pt_level;
  463. unsigned long mmu_seq;
  464. bool map_writable, is_self_change_mapping;
  465. pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code);
  466. if (unlikely(error_code & PFERR_RSVD_MASK)) {
  467. r = handle_mmio_page_fault(vcpu, addr, error_code,
  468. mmu_is_nested(vcpu));
  469. if (likely(r != RET_MMIO_PF_INVALID))
  470. return r;
  471. };
  472. r = mmu_topup_memory_caches(vcpu);
  473. if (r)
  474. return r;
  475. /*
  476. * Look up the guest pte for the faulting address.
  477. */
  478. r = FNAME(walk_addr)(&walker, vcpu, addr, error_code);
  479. /*
  480. * The page is not mapped by the guest. Let the guest handle it.
  481. */
  482. if (!r) {
  483. pgprintk("%s: guest page fault\n", __func__);
  484. if (!prefault)
  485. inject_page_fault(vcpu, &walker.fault);
  486. return 0;
  487. }
  488. vcpu->arch.write_fault_to_shadow_pgtable = false;
  489. is_self_change_mapping = FNAME(is_self_change_mapping)(vcpu,
  490. &walker, user_fault, &vcpu->arch.write_fault_to_shadow_pgtable);
  491. if (walker.level >= PT_DIRECTORY_LEVEL)
  492. force_pt_level = mapping_level_dirty_bitmap(vcpu, walker.gfn)
  493. || is_self_change_mapping;
  494. else
  495. force_pt_level = 1;
  496. if (!force_pt_level) {
  497. level = min(walker.level, mapping_level(vcpu, walker.gfn));
  498. walker.gfn = walker.gfn & ~(KVM_PAGES_PER_HPAGE(level) - 1);
  499. }
  500. mmu_seq = vcpu->kvm->mmu_notifier_seq;
  501. smp_rmb();
  502. if (try_async_pf(vcpu, prefault, walker.gfn, addr, &pfn, write_fault,
  503. &map_writable))
  504. return 0;
  505. if (handle_abnormal_pfn(vcpu, mmu_is_nested(vcpu) ? 0 : addr,
  506. walker.gfn, pfn, walker.pte_access, &r))
  507. return r;
  508. /*
  509. * Do not change pte_access if the pfn is a mmio page, otherwise
  510. * we will cache the incorrect access into mmio spte.
  511. */
  512. if (write_fault && !(walker.pte_access & ACC_WRITE_MASK) &&
  513. !is_write_protection(vcpu) && !user_fault &&
  514. !is_noslot_pfn(pfn)) {
  515. walker.pte_access |= ACC_WRITE_MASK;
  516. walker.pte_access &= ~ACC_USER_MASK;
  517. /*
  518. * If we converted a user page to a kernel page,
  519. * so that the kernel can write to it when cr0.wp=0,
  520. * then we should prevent the kernel from executing it
  521. * if SMEP is enabled.
  522. */
  523. if (kvm_read_cr4_bits(vcpu, X86_CR4_SMEP))
  524. walker.pte_access &= ~ACC_EXEC_MASK;
  525. }
  526. spin_lock(&vcpu->kvm->mmu_lock);
  527. if (mmu_notifier_retry(vcpu->kvm, mmu_seq))
  528. goto out_unlock;
  529. kvm_mmu_audit(vcpu, AUDIT_PRE_PAGE_FAULT);
  530. make_mmu_pages_available(vcpu);
  531. if (!force_pt_level)
  532. transparent_hugepage_adjust(vcpu, &walker.gfn, &pfn, &level);
  533. r = FNAME(fetch)(vcpu, addr, &walker, write_fault,
  534. level, pfn, map_writable, prefault);
  535. ++vcpu->stat.pf_fixed;
  536. kvm_mmu_audit(vcpu, AUDIT_POST_PAGE_FAULT);
  537. spin_unlock(&vcpu->kvm->mmu_lock);
  538. return r;
  539. out_unlock:
  540. spin_unlock(&vcpu->kvm->mmu_lock);
  541. kvm_release_pfn_clean(pfn);
  542. return 0;
  543. }
  544. static gpa_t FNAME(get_level1_sp_gpa)(struct kvm_mmu_page *sp)
  545. {
  546. int offset = 0;
  547. WARN_ON(sp->role.level != PT_PAGE_TABLE_LEVEL);
  548. if (PTTYPE == 32)
  549. offset = sp->role.quadrant << PT64_LEVEL_BITS;
  550. return gfn_to_gpa(sp->gfn) + offset * sizeof(pt_element_t);
  551. }
  552. static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
  553. {
  554. struct kvm_shadow_walk_iterator iterator;
  555. struct kvm_mmu_page *sp;
  556. int level;
  557. u64 *sptep;
  558. vcpu_clear_mmio_info(vcpu, gva);
  559. /*
  560. * No need to check return value here, rmap_can_add() can
  561. * help us to skip pte prefetch later.
  562. */
  563. mmu_topup_memory_caches(vcpu);
  564. spin_lock(&vcpu->kvm->mmu_lock);
  565. for_each_shadow_entry(vcpu, gva, iterator) {
  566. level = iterator.level;
  567. sptep = iterator.sptep;
  568. sp = page_header(__pa(sptep));
  569. if (is_last_spte(*sptep, level)) {
  570. pt_element_t gpte;
  571. gpa_t pte_gpa;
  572. if (!sp->unsync)
  573. break;
  574. pte_gpa = FNAME(get_level1_sp_gpa)(sp);
  575. pte_gpa += (sptep - sp->spt) * sizeof(pt_element_t);
  576. if (mmu_page_zap_pte(vcpu->kvm, sp, sptep))
  577. kvm_flush_remote_tlbs(vcpu->kvm);
  578. if (!rmap_can_add(vcpu))
  579. break;
  580. if (kvm_read_guest_atomic(vcpu->kvm, pte_gpa, &gpte,
  581. sizeof(pt_element_t)))
  582. break;
  583. FNAME(update_pte)(vcpu, sp, sptep, &gpte);
  584. }
  585. if (!is_shadow_present_pte(*sptep) || !sp->unsync_children)
  586. break;
  587. }
  588. spin_unlock(&vcpu->kvm->mmu_lock);
  589. }
  590. static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr, u32 access,
  591. struct x86_exception *exception)
  592. {
  593. struct guest_walker walker;
  594. gpa_t gpa = UNMAPPED_GVA;
  595. int r;
  596. r = FNAME(walk_addr)(&walker, vcpu, vaddr, access);
  597. if (r) {
  598. gpa = gfn_to_gpa(walker.gfn);
  599. gpa |= vaddr & ~PAGE_MASK;
  600. } else if (exception)
  601. *exception = walker.fault;
  602. return gpa;
  603. }
  604. static gpa_t FNAME(gva_to_gpa_nested)(struct kvm_vcpu *vcpu, gva_t vaddr,
  605. u32 access,
  606. struct x86_exception *exception)
  607. {
  608. struct guest_walker walker;
  609. gpa_t gpa = UNMAPPED_GVA;
  610. int r;
  611. r = FNAME(walk_addr_nested)(&walker, vcpu, vaddr, access);
  612. if (r) {
  613. gpa = gfn_to_gpa(walker.gfn);
  614. gpa |= vaddr & ~PAGE_MASK;
  615. } else if (exception)
  616. *exception = walker.fault;
  617. return gpa;
  618. }
  619. /*
  620. * Using the cached information from sp->gfns is safe because:
  621. * - The spte has a reference to the struct page, so the pfn for a given gfn
  622. * can't change unless all sptes pointing to it are nuked first.
  623. *
  624. * Note:
  625. * We should flush all tlbs if spte is dropped even though guest is
  626. * responsible for it. Since if we don't, kvm_mmu_notifier_invalidate_page
  627. * and kvm_mmu_notifier_invalidate_range_start detect the mapping page isn't
  628. * used by guest then tlbs are not flushed, so guest is allowed to access the
  629. * freed pages.
  630. * And we increase kvm->tlbs_dirty to delay tlbs flush in this case.
  631. */
  632. static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
  633. {
  634. int i, nr_present = 0;
  635. bool host_writable;
  636. gpa_t first_pte_gpa;
  637. /* direct kvm_mmu_page can not be unsync. */
  638. BUG_ON(sp->role.direct);
  639. first_pte_gpa = FNAME(get_level1_sp_gpa)(sp);
  640. for (i = 0; i < PT64_ENT_PER_PAGE; i++) {
  641. unsigned pte_access;
  642. pt_element_t gpte;
  643. gpa_t pte_gpa;
  644. gfn_t gfn;
  645. if (!sp->spt[i])
  646. continue;
  647. pte_gpa = first_pte_gpa + i * sizeof(pt_element_t);
  648. if (kvm_read_guest_atomic(vcpu->kvm, pte_gpa, &gpte,
  649. sizeof(pt_element_t)))
  650. return -EINVAL;
  651. if (prefetch_invalid_gpte(vcpu, sp, &sp->spt[i], gpte)) {
  652. vcpu->kvm->tlbs_dirty++;
  653. continue;
  654. }
  655. gfn = gpte_to_gfn(gpte);
  656. pte_access = sp->role.access;
  657. pte_access &= gpte_access(vcpu, gpte);
  658. protect_clean_gpte(&pte_access, gpte);
  659. if (sync_mmio_spte(vcpu->kvm, &sp->spt[i], gfn, pte_access,
  660. &nr_present))
  661. continue;
  662. if (gfn != sp->gfns[i]) {
  663. drop_spte(vcpu->kvm, &sp->spt[i]);
  664. vcpu->kvm->tlbs_dirty++;
  665. continue;
  666. }
  667. nr_present++;
  668. host_writable = sp->spt[i] & SPTE_HOST_WRITEABLE;
  669. set_spte(vcpu, &sp->spt[i], pte_access,
  670. PT_PAGE_TABLE_LEVEL, gfn,
  671. spte_to_pfn(sp->spt[i]), true, false,
  672. host_writable);
  673. }
  674. return !nr_present;
  675. }
  676. #undef pt_element_t
  677. #undef guest_walker
  678. #undef FNAME
  679. #undef PT_BASE_ADDR_MASK
  680. #undef PT_INDEX
  681. #undef PT_LVL_ADDR_MASK
  682. #undef PT_LVL_OFFSET_MASK
  683. #undef PT_LEVEL_BITS
  684. #undef PT_MAX_FULL_LEVELS
  685. #undef gpte_to_gfn
  686. #undef gpte_to_gfn_lvl
  687. #undef CMPXCHG