paging_tmpl.h 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834
  1. /*
  2. * Kernel-based Virtual Machine driver for Linux
  3. *
  4. * This module enables machines with Intel VT-x extensions to run virtual
  5. * machines without emulation or binary translation.
  6. *
  7. * MMU support
  8. *
  9. * Copyright (C) 2006 Qumranet, Inc.
  10. * Copyright 2010 Red Hat, Inc. and/or its affiliates.
  11. *
  12. * Authors:
  13. * Yaniv Kamay <yaniv@qumranet.com>
  14. * Avi Kivity <avi@qumranet.com>
  15. *
  16. * This work is licensed under the terms of the GNU GPL, version 2. See
  17. * the COPYING file in the top-level directory.
  18. *
  19. */
  20. /*
  21. * We need the mmu code to access both 32-bit and 64-bit guest ptes,
  22. * so the code in this file is compiled twice, once per pte size.
  23. */
  24. #if PTTYPE == 64
  25. #define pt_element_t u64
  26. #define guest_walker guest_walker64
  27. #define FNAME(name) paging##64_##name
  28. #define PT_BASE_ADDR_MASK PT64_BASE_ADDR_MASK
  29. #define PT_LVL_ADDR_MASK(lvl) PT64_LVL_ADDR_MASK(lvl)
  30. #define PT_LVL_OFFSET_MASK(lvl) PT64_LVL_OFFSET_MASK(lvl)
  31. #define PT_INDEX(addr, level) PT64_INDEX(addr, level)
  32. #define PT_LEVEL_BITS PT64_LEVEL_BITS
  33. #ifdef CONFIG_X86_64
  34. #define PT_MAX_FULL_LEVELS 4
  35. #define CMPXCHG cmpxchg
  36. #else
  37. #define CMPXCHG cmpxchg64
  38. #define PT_MAX_FULL_LEVELS 2
  39. #endif
  40. #elif PTTYPE == 32
  41. #define pt_element_t u32
  42. #define guest_walker guest_walker32
  43. #define FNAME(name) paging##32_##name
  44. #define PT_BASE_ADDR_MASK PT32_BASE_ADDR_MASK
  45. #define PT_LVL_ADDR_MASK(lvl) PT32_LVL_ADDR_MASK(lvl)
  46. #define PT_LVL_OFFSET_MASK(lvl) PT32_LVL_OFFSET_MASK(lvl)
  47. #define PT_INDEX(addr, level) PT32_INDEX(addr, level)
  48. #define PT_LEVEL_BITS PT32_LEVEL_BITS
  49. #define PT_MAX_FULL_LEVELS 2
  50. #define CMPXCHG cmpxchg
  51. #else
  52. #error Invalid PTTYPE value
  53. #endif
  54. #define gpte_to_gfn_lvl FNAME(gpte_to_gfn_lvl)
  55. #define gpte_to_gfn(pte) gpte_to_gfn_lvl((pte), PT_PAGE_TABLE_LEVEL)
  56. /*
  57. * The guest_walker structure emulates the behavior of the hardware page
  58. * table walker.
  59. */
  60. struct guest_walker {
  61. int level;
  62. gfn_t table_gfn[PT_MAX_FULL_LEVELS];
  63. pt_element_t ptes[PT_MAX_FULL_LEVELS];
  64. pt_element_t prefetch_ptes[PTE_PREFETCH_NUM];
  65. gpa_t pte_gpa[PT_MAX_FULL_LEVELS];
  66. unsigned pt_access;
  67. unsigned pte_access;
  68. gfn_t gfn;
  69. struct x86_exception fault;
  70. };
  71. static gfn_t gpte_to_gfn_lvl(pt_element_t gpte, int lvl)
  72. {
  73. return (gpte & PT_LVL_ADDR_MASK(lvl)) >> PAGE_SHIFT;
  74. }
  75. static bool FNAME(cmpxchg_gpte)(struct kvm *kvm,
  76. gfn_t table_gfn, unsigned index,
  77. pt_element_t orig_pte, pt_element_t new_pte)
  78. {
  79. pt_element_t ret;
  80. pt_element_t *table;
  81. struct page *page;
  82. page = gfn_to_page(kvm, table_gfn);
  83. table = kmap_atomic(page, KM_USER0);
  84. ret = CMPXCHG(&table[index], orig_pte, new_pte);
  85. kunmap_atomic(table, KM_USER0);
  86. kvm_release_page_dirty(page);
  87. return (ret != orig_pte);
  88. }
  89. static unsigned FNAME(gpte_access)(struct kvm_vcpu *vcpu, pt_element_t gpte)
  90. {
  91. unsigned access;
  92. access = (gpte & (PT_WRITABLE_MASK | PT_USER_MASK)) | ACC_EXEC_MASK;
  93. #if PTTYPE == 64
  94. if (vcpu->arch.mmu.nx)
  95. access &= ~(gpte >> PT64_NX_SHIFT);
  96. #endif
  97. return access;
  98. }
  99. /*
  100. * Fetch a guest pte for a guest virtual address
  101. */
  102. static int FNAME(walk_addr_generic)(struct guest_walker *walker,
  103. struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
  104. gva_t addr, u32 access)
  105. {
  106. pt_element_t pte;
  107. gfn_t table_gfn;
  108. unsigned index, pt_access, uninitialized_var(pte_access);
  109. gpa_t pte_gpa;
  110. bool eperm, present, rsvd_fault;
  111. int offset, write_fault, user_fault, fetch_fault;
  112. write_fault = access & PFERR_WRITE_MASK;
  113. user_fault = access & PFERR_USER_MASK;
  114. fetch_fault = access & PFERR_FETCH_MASK;
  115. trace_kvm_mmu_pagetable_walk(addr, write_fault, user_fault,
  116. fetch_fault);
  117. walk:
  118. present = true;
  119. eperm = rsvd_fault = false;
  120. walker->level = mmu->root_level;
  121. pte = mmu->get_cr3(vcpu);
  122. #if PTTYPE == 64
  123. if (walker->level == PT32E_ROOT_LEVEL) {
  124. pte = kvm_pdptr_read_mmu(vcpu, mmu, (addr >> 30) & 3);
  125. trace_kvm_mmu_paging_element(pte, walker->level);
  126. if (!is_present_gpte(pte)) {
  127. present = false;
  128. goto error;
  129. }
  130. --walker->level;
  131. }
  132. #endif
  133. ASSERT((!is_long_mode(vcpu) && is_pae(vcpu)) ||
  134. (mmu->get_cr3(vcpu) & CR3_NONPAE_RESERVED_BITS) == 0);
  135. pt_access = ACC_ALL;
  136. for (;;) {
  137. index = PT_INDEX(addr, walker->level);
  138. table_gfn = gpte_to_gfn(pte);
  139. offset = index * sizeof(pt_element_t);
  140. pte_gpa = gfn_to_gpa(table_gfn) + offset;
  141. walker->table_gfn[walker->level - 1] = table_gfn;
  142. walker->pte_gpa[walker->level - 1] = pte_gpa;
  143. if (kvm_read_guest_page_mmu(vcpu, mmu, table_gfn, &pte,
  144. offset, sizeof(pte),
  145. PFERR_USER_MASK|PFERR_WRITE_MASK)) {
  146. present = false;
  147. break;
  148. }
  149. trace_kvm_mmu_paging_element(pte, walker->level);
  150. if (!is_present_gpte(pte)) {
  151. present = false;
  152. break;
  153. }
  154. if (is_rsvd_bits_set(&vcpu->arch.mmu, pte, walker->level)) {
  155. rsvd_fault = true;
  156. break;
  157. }
  158. if (write_fault && !is_writable_pte(pte))
  159. if (user_fault || is_write_protection(vcpu))
  160. eperm = true;
  161. if (user_fault && !(pte & PT_USER_MASK))
  162. eperm = true;
  163. #if PTTYPE == 64
  164. if (fetch_fault && (pte & PT64_NX_MASK))
  165. eperm = true;
  166. #endif
  167. if (!eperm && !rsvd_fault && !(pte & PT_ACCESSED_MASK)) {
  168. trace_kvm_mmu_set_accessed_bit(table_gfn, index,
  169. sizeof(pte));
  170. if (FNAME(cmpxchg_gpte)(vcpu->kvm, table_gfn,
  171. index, pte, pte|PT_ACCESSED_MASK))
  172. goto walk;
  173. mark_page_dirty(vcpu->kvm, table_gfn);
  174. pte |= PT_ACCESSED_MASK;
  175. }
  176. pte_access = pt_access & FNAME(gpte_access)(vcpu, pte);
  177. walker->ptes[walker->level - 1] = pte;
  178. if ((walker->level == PT_PAGE_TABLE_LEVEL) ||
  179. ((walker->level == PT_DIRECTORY_LEVEL) &&
  180. is_large_pte(pte) &&
  181. (PTTYPE == 64 || is_pse(vcpu))) ||
  182. ((walker->level == PT_PDPE_LEVEL) &&
  183. is_large_pte(pte) &&
  184. mmu->root_level == PT64_ROOT_LEVEL)) {
  185. int lvl = walker->level;
  186. gpa_t real_gpa;
  187. gfn_t gfn;
  188. u32 ac;
  189. gfn = gpte_to_gfn_lvl(pte, lvl);
  190. gfn += (addr & PT_LVL_OFFSET_MASK(lvl)) >> PAGE_SHIFT;
  191. if (PTTYPE == 32 &&
  192. walker->level == PT_DIRECTORY_LEVEL &&
  193. is_cpuid_PSE36())
  194. gfn += pse36_gfn_delta(pte);
  195. ac = write_fault | fetch_fault | user_fault;
  196. real_gpa = mmu->translate_gpa(vcpu, gfn_to_gpa(gfn),
  197. ac);
  198. if (real_gpa == UNMAPPED_GVA)
  199. return 0;
  200. walker->gfn = real_gpa >> PAGE_SHIFT;
  201. break;
  202. }
  203. pt_access = pte_access;
  204. --walker->level;
  205. }
  206. if (!present || eperm || rsvd_fault)
  207. goto error;
  208. if (write_fault && !is_dirty_gpte(pte)) {
  209. bool ret;
  210. trace_kvm_mmu_set_dirty_bit(table_gfn, index, sizeof(pte));
  211. ret = FNAME(cmpxchg_gpte)(vcpu->kvm, table_gfn, index, pte,
  212. pte|PT_DIRTY_MASK);
  213. if (ret)
  214. goto walk;
  215. mark_page_dirty(vcpu->kvm, table_gfn);
  216. pte |= PT_DIRTY_MASK;
  217. walker->ptes[walker->level - 1] = pte;
  218. }
  219. walker->pt_access = pt_access;
  220. walker->pte_access = pte_access;
  221. pgprintk("%s: pte %llx pte_access %x pt_access %x\n",
  222. __func__, (u64)pte, pte_access, pt_access);
  223. return 1;
  224. error:
  225. walker->fault.vector = PF_VECTOR;
  226. walker->fault.error_code_valid = true;
  227. walker->fault.error_code = 0;
  228. if (present)
  229. walker->fault.error_code |= PFERR_PRESENT_MASK;
  230. walker->fault.error_code |= write_fault | user_fault;
  231. if (fetch_fault && mmu->nx)
  232. walker->fault.error_code |= PFERR_FETCH_MASK;
  233. if (rsvd_fault)
  234. walker->fault.error_code |= PFERR_RSVD_MASK;
  235. walker->fault.address = addr;
  236. walker->fault.nested_page_fault = mmu != vcpu->arch.walk_mmu;
  237. trace_kvm_mmu_walker_error(walker->fault.error_code);
  238. return 0;
  239. }
  240. static int FNAME(walk_addr)(struct guest_walker *walker,
  241. struct kvm_vcpu *vcpu, gva_t addr, u32 access)
  242. {
  243. return FNAME(walk_addr_generic)(walker, vcpu, &vcpu->arch.mmu, addr,
  244. access);
  245. }
  246. static int FNAME(walk_addr_nested)(struct guest_walker *walker,
  247. struct kvm_vcpu *vcpu, gva_t addr,
  248. u32 access)
  249. {
  250. return FNAME(walk_addr_generic)(walker, vcpu, &vcpu->arch.nested_mmu,
  251. addr, access);
  252. }
  253. static bool FNAME(prefetch_invalid_gpte)(struct kvm_vcpu *vcpu,
  254. struct kvm_mmu_page *sp, u64 *spte,
  255. pt_element_t gpte)
  256. {
  257. u64 nonpresent = shadow_trap_nonpresent_pte;
  258. if (is_rsvd_bits_set(&vcpu->arch.mmu, gpte, PT_PAGE_TABLE_LEVEL))
  259. goto no_present;
  260. if (!is_present_gpte(gpte)) {
  261. if (!sp->unsync)
  262. nonpresent = shadow_notrap_nonpresent_pte;
  263. goto no_present;
  264. }
  265. if (!(gpte & PT_ACCESSED_MASK))
  266. goto no_present;
  267. return false;
  268. no_present:
  269. drop_spte(vcpu->kvm, spte, nonpresent);
  270. return true;
  271. }
  272. static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
  273. u64 *spte, const void *pte)
  274. {
  275. pt_element_t gpte;
  276. unsigned pte_access;
  277. pfn_t pfn;
  278. gpte = *(const pt_element_t *)pte;
  279. if (FNAME(prefetch_invalid_gpte)(vcpu, sp, spte, gpte))
  280. return;
  281. pgprintk("%s: gpte %llx spte %p\n", __func__, (u64)gpte, spte);
  282. pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte);
  283. pfn = vcpu->arch.update_pte.pfn;
  284. if (is_error_pfn(pfn))
  285. return;
  286. if (mmu_notifier_retry(vcpu, vcpu->arch.update_pte.mmu_seq))
  287. return;
  288. kvm_get_pfn(pfn);
  289. /*
  290. * we call mmu_set_spte() with host_writable = true beacuse that
  291. * vcpu->arch.update_pte.pfn was fetched from get_user_pages(write = 1).
  292. */
  293. mmu_set_spte(vcpu, spte, sp->role.access, pte_access, 0, 0,
  294. is_dirty_gpte(gpte), NULL, PT_PAGE_TABLE_LEVEL,
  295. gpte_to_gfn(gpte), pfn, true, true);
  296. }
  297. static bool FNAME(gpte_changed)(struct kvm_vcpu *vcpu,
  298. struct guest_walker *gw, int level)
  299. {
  300. pt_element_t curr_pte;
  301. gpa_t base_gpa, pte_gpa = gw->pte_gpa[level - 1];
  302. u64 mask;
  303. int r, index;
  304. if (level == PT_PAGE_TABLE_LEVEL) {
  305. mask = PTE_PREFETCH_NUM * sizeof(pt_element_t) - 1;
  306. base_gpa = pte_gpa & ~mask;
  307. index = (pte_gpa - base_gpa) / sizeof(pt_element_t);
  308. r = kvm_read_guest_atomic(vcpu->kvm, base_gpa,
  309. gw->prefetch_ptes, sizeof(gw->prefetch_ptes));
  310. curr_pte = gw->prefetch_ptes[index];
  311. } else
  312. r = kvm_read_guest_atomic(vcpu->kvm, pte_gpa,
  313. &curr_pte, sizeof(curr_pte));
  314. return r || curr_pte != gw->ptes[level - 1];
  315. }
  316. static void FNAME(pte_prefetch)(struct kvm_vcpu *vcpu, struct guest_walker *gw,
  317. u64 *sptep)
  318. {
  319. struct kvm_mmu_page *sp;
  320. pt_element_t *gptep = gw->prefetch_ptes;
  321. u64 *spte;
  322. int i;
  323. sp = page_header(__pa(sptep));
  324. if (sp->role.level > PT_PAGE_TABLE_LEVEL)
  325. return;
  326. if (sp->role.direct)
  327. return __direct_pte_prefetch(vcpu, sp, sptep);
  328. i = (sptep - sp->spt) & ~(PTE_PREFETCH_NUM - 1);
  329. spte = sp->spt + i;
  330. for (i = 0; i < PTE_PREFETCH_NUM; i++, spte++) {
  331. pt_element_t gpte;
  332. unsigned pte_access;
  333. gfn_t gfn;
  334. pfn_t pfn;
  335. bool dirty;
  336. if (spte == sptep)
  337. continue;
  338. if (*spte != shadow_trap_nonpresent_pte)
  339. continue;
  340. gpte = gptep[i];
  341. if (FNAME(prefetch_invalid_gpte)(vcpu, sp, spte, gpte))
  342. continue;
  343. pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte);
  344. gfn = gpte_to_gfn(gpte);
  345. dirty = is_dirty_gpte(gpte);
  346. pfn = pte_prefetch_gfn_to_pfn(vcpu, gfn,
  347. (pte_access & ACC_WRITE_MASK) && dirty);
  348. if (is_error_pfn(pfn)) {
  349. kvm_release_pfn_clean(pfn);
  350. break;
  351. }
  352. mmu_set_spte(vcpu, spte, sp->role.access, pte_access, 0, 0,
  353. dirty, NULL, PT_PAGE_TABLE_LEVEL, gfn,
  354. pfn, true, true);
  355. }
  356. }
  357. /*
  358. * Fetch a shadow pte for a specific level in the paging hierarchy.
  359. */
  360. static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
  361. struct guest_walker *gw,
  362. int user_fault, int write_fault, int hlevel,
  363. int *ptwrite, pfn_t pfn, bool map_writable,
  364. bool prefault)
  365. {
  366. unsigned access = gw->pt_access;
  367. struct kvm_mmu_page *sp = NULL;
  368. bool dirty = is_dirty_gpte(gw->ptes[gw->level - 1]);
  369. int top_level;
  370. unsigned direct_access;
  371. struct kvm_shadow_walk_iterator it;
  372. if (!is_present_gpte(gw->ptes[gw->level - 1]))
  373. return NULL;
  374. direct_access = gw->pt_access & gw->pte_access;
  375. if (!dirty)
  376. direct_access &= ~ACC_WRITE_MASK;
  377. top_level = vcpu->arch.mmu.root_level;
  378. if (top_level == PT32E_ROOT_LEVEL)
  379. top_level = PT32_ROOT_LEVEL;
  380. /*
  381. * Verify that the top-level gpte is still there. Since the page
  382. * is a root page, it is either write protected (and cannot be
  383. * changed from now on) or it is invalid (in which case, we don't
  384. * really care if it changes underneath us after this point).
  385. */
  386. if (FNAME(gpte_changed)(vcpu, gw, top_level))
  387. goto out_gpte_changed;
  388. for (shadow_walk_init(&it, vcpu, addr);
  389. shadow_walk_okay(&it) && it.level > gw->level;
  390. shadow_walk_next(&it)) {
  391. gfn_t table_gfn;
  392. drop_large_spte(vcpu, it.sptep);
  393. sp = NULL;
  394. if (!is_shadow_present_pte(*it.sptep)) {
  395. table_gfn = gw->table_gfn[it.level - 2];
  396. sp = kvm_mmu_get_page(vcpu, table_gfn, addr, it.level-1,
  397. false, access, it.sptep);
  398. }
  399. /*
  400. * Verify that the gpte in the page we've just write
  401. * protected is still there.
  402. */
  403. if (FNAME(gpte_changed)(vcpu, gw, it.level - 1))
  404. goto out_gpte_changed;
  405. if (sp)
  406. link_shadow_page(it.sptep, sp);
  407. }
  408. for (;
  409. shadow_walk_okay(&it) && it.level > hlevel;
  410. shadow_walk_next(&it)) {
  411. gfn_t direct_gfn;
  412. validate_direct_spte(vcpu, it.sptep, direct_access);
  413. drop_large_spte(vcpu, it.sptep);
  414. if (is_shadow_present_pte(*it.sptep))
  415. continue;
  416. direct_gfn = gw->gfn & ~(KVM_PAGES_PER_HPAGE(it.level) - 1);
  417. sp = kvm_mmu_get_page(vcpu, direct_gfn, addr, it.level-1,
  418. true, direct_access, it.sptep);
  419. link_shadow_page(it.sptep, sp);
  420. }
  421. mmu_set_spte(vcpu, it.sptep, access, gw->pte_access & access,
  422. user_fault, write_fault, dirty, ptwrite, it.level,
  423. gw->gfn, pfn, prefault, map_writable);
  424. FNAME(pte_prefetch)(vcpu, gw, it.sptep);
  425. return it.sptep;
  426. out_gpte_changed:
  427. if (sp)
  428. kvm_mmu_put_page(sp, it.sptep);
  429. kvm_release_pfn_clean(pfn);
  430. return NULL;
  431. }
  432. /*
  433. * Page fault handler. There are several causes for a page fault:
  434. * - there is no shadow pte for the guest pte
  435. * - write access through a shadow pte marked read only so that we can set
  436. * the dirty bit
  437. * - write access to a shadow pte marked read only so we can update the page
  438. * dirty bitmap, when userspace requests it
  439. * - mmio access; in this case we will never install a present shadow pte
  440. * - normal guest page fault due to the guest pte marked not present, not
  441. * writable, or not executable
  442. *
  443. * Returns: 1 if we need to emulate the instruction, 0 otherwise, or
  444. * a negative value on error.
  445. */
  446. static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
  447. bool prefault)
  448. {
  449. int write_fault = error_code & PFERR_WRITE_MASK;
  450. int user_fault = error_code & PFERR_USER_MASK;
  451. struct guest_walker walker;
  452. u64 *sptep;
  453. int write_pt = 0;
  454. int r;
  455. pfn_t pfn;
  456. int level = PT_PAGE_TABLE_LEVEL;
  457. int force_pt_level;
  458. unsigned long mmu_seq;
  459. bool map_writable;
  460. pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code);
  461. r = mmu_topup_memory_caches(vcpu);
  462. if (r)
  463. return r;
  464. /*
  465. * Look up the guest pte for the faulting address.
  466. */
  467. r = FNAME(walk_addr)(&walker, vcpu, addr, error_code);
  468. /*
  469. * The page is not mapped by the guest. Let the guest handle it.
  470. */
  471. if (!r) {
  472. pgprintk("%s: guest page fault\n", __func__);
  473. if (!prefault) {
  474. inject_page_fault(vcpu, &walker.fault);
  475. /* reset fork detector */
  476. vcpu->arch.last_pt_write_count = 0;
  477. }
  478. return 0;
  479. }
  480. if (walker.level >= PT_DIRECTORY_LEVEL)
  481. force_pt_level = mapping_level_dirty_bitmap(vcpu, walker.gfn);
  482. else
  483. force_pt_level = 1;
  484. if (!force_pt_level) {
  485. level = min(walker.level, mapping_level(vcpu, walker.gfn));
  486. walker.gfn = walker.gfn & ~(KVM_PAGES_PER_HPAGE(level) - 1);
  487. }
  488. mmu_seq = vcpu->kvm->mmu_notifier_seq;
  489. smp_rmb();
  490. if (try_async_pf(vcpu, prefault, walker.gfn, addr, &pfn, write_fault,
  491. &map_writable))
  492. return 0;
  493. /* mmio */
  494. if (is_error_pfn(pfn))
  495. return kvm_handle_bad_page(vcpu->kvm, walker.gfn, pfn);
  496. spin_lock(&vcpu->kvm->mmu_lock);
  497. if (mmu_notifier_retry(vcpu, mmu_seq))
  498. goto out_unlock;
  499. trace_kvm_mmu_audit(vcpu, AUDIT_PRE_PAGE_FAULT);
  500. kvm_mmu_free_some_pages(vcpu);
  501. if (!force_pt_level)
  502. transparent_hugepage_adjust(vcpu, &walker.gfn, &pfn, &level);
  503. sptep = FNAME(fetch)(vcpu, addr, &walker, user_fault, write_fault,
  504. level, &write_pt, pfn, map_writable, prefault);
  505. (void)sptep;
  506. pgprintk("%s: shadow pte %p %llx ptwrite %d\n", __func__,
  507. sptep, *sptep, write_pt);
  508. if (!write_pt)
  509. vcpu->arch.last_pt_write_count = 0; /* reset fork detector */
  510. ++vcpu->stat.pf_fixed;
  511. trace_kvm_mmu_audit(vcpu, AUDIT_POST_PAGE_FAULT);
  512. spin_unlock(&vcpu->kvm->mmu_lock);
  513. return write_pt;
  514. out_unlock:
  515. spin_unlock(&vcpu->kvm->mmu_lock);
  516. kvm_release_pfn_clean(pfn);
  517. return 0;
  518. }
  519. static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
  520. {
  521. struct kvm_shadow_walk_iterator iterator;
  522. struct kvm_mmu_page *sp;
  523. gpa_t pte_gpa = -1;
  524. int level;
  525. u64 *sptep;
  526. int need_flush = 0;
  527. spin_lock(&vcpu->kvm->mmu_lock);
  528. for_each_shadow_entry(vcpu, gva, iterator) {
  529. level = iterator.level;
  530. sptep = iterator.sptep;
  531. sp = page_header(__pa(sptep));
  532. if (is_last_spte(*sptep, level)) {
  533. int offset, shift;
  534. if (!sp->unsync)
  535. break;
  536. shift = PAGE_SHIFT -
  537. (PT_LEVEL_BITS - PT64_LEVEL_BITS) * level;
  538. offset = sp->role.quadrant << shift;
  539. pte_gpa = (sp->gfn << PAGE_SHIFT) + offset;
  540. pte_gpa += (sptep - sp->spt) * sizeof(pt_element_t);
  541. if (is_shadow_present_pte(*sptep)) {
  542. if (is_large_pte(*sptep))
  543. --vcpu->kvm->stat.lpages;
  544. drop_spte(vcpu->kvm, sptep,
  545. shadow_trap_nonpresent_pte);
  546. need_flush = 1;
  547. } else
  548. __set_spte(sptep, shadow_trap_nonpresent_pte);
  549. break;
  550. }
  551. if (!is_shadow_present_pte(*sptep) || !sp->unsync_children)
  552. break;
  553. }
  554. if (need_flush)
  555. kvm_flush_remote_tlbs(vcpu->kvm);
  556. atomic_inc(&vcpu->kvm->arch.invlpg_counter);
  557. spin_unlock(&vcpu->kvm->mmu_lock);
  558. if (pte_gpa == -1)
  559. return;
  560. if (mmu_topup_memory_caches(vcpu))
  561. return;
  562. kvm_mmu_pte_write(vcpu, pte_gpa, NULL, sizeof(pt_element_t), 0);
  563. }
  564. static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr, u32 access,
  565. struct x86_exception *exception)
  566. {
  567. struct guest_walker walker;
  568. gpa_t gpa = UNMAPPED_GVA;
  569. int r;
  570. r = FNAME(walk_addr)(&walker, vcpu, vaddr, access);
  571. if (r) {
  572. gpa = gfn_to_gpa(walker.gfn);
  573. gpa |= vaddr & ~PAGE_MASK;
  574. } else if (exception)
  575. *exception = walker.fault;
  576. return gpa;
  577. }
  578. static gpa_t FNAME(gva_to_gpa_nested)(struct kvm_vcpu *vcpu, gva_t vaddr,
  579. u32 access,
  580. struct x86_exception *exception)
  581. {
  582. struct guest_walker walker;
  583. gpa_t gpa = UNMAPPED_GVA;
  584. int r;
  585. r = FNAME(walk_addr_nested)(&walker, vcpu, vaddr, access);
  586. if (r) {
  587. gpa = gfn_to_gpa(walker.gfn);
  588. gpa |= vaddr & ~PAGE_MASK;
  589. } else if (exception)
  590. *exception = walker.fault;
  591. return gpa;
  592. }
  593. static void FNAME(prefetch_page)(struct kvm_vcpu *vcpu,
  594. struct kvm_mmu_page *sp)
  595. {
  596. int i, j, offset, r;
  597. pt_element_t pt[256 / sizeof(pt_element_t)];
  598. gpa_t pte_gpa;
  599. if (sp->role.direct
  600. || (PTTYPE == 32 && sp->role.level > PT_PAGE_TABLE_LEVEL)) {
  601. nonpaging_prefetch_page(vcpu, sp);
  602. return;
  603. }
  604. pte_gpa = gfn_to_gpa(sp->gfn);
  605. if (PTTYPE == 32) {
  606. offset = sp->role.quadrant << PT64_LEVEL_BITS;
  607. pte_gpa += offset * sizeof(pt_element_t);
  608. }
  609. for (i = 0; i < PT64_ENT_PER_PAGE; i += ARRAY_SIZE(pt)) {
  610. r = kvm_read_guest_atomic(vcpu->kvm, pte_gpa, pt, sizeof pt);
  611. pte_gpa += ARRAY_SIZE(pt) * sizeof(pt_element_t);
  612. for (j = 0; j < ARRAY_SIZE(pt); ++j)
  613. if (r || is_present_gpte(pt[j]))
  614. sp->spt[i+j] = shadow_trap_nonpresent_pte;
  615. else
  616. sp->spt[i+j] = shadow_notrap_nonpresent_pte;
  617. }
  618. }
  619. /*
  620. * Using the cached information from sp->gfns is safe because:
  621. * - The spte has a reference to the struct page, so the pfn for a given gfn
  622. * can't change unless all sptes pointing to it are nuked first.
  623. *
  624. * Note:
  625. * We should flush all tlbs if spte is dropped even though guest is
  626. * responsible for it. Since if we don't, kvm_mmu_notifier_invalidate_page
  627. * and kvm_mmu_notifier_invalidate_range_start detect the mapping page isn't
  628. * used by guest then tlbs are not flushed, so guest is allowed to access the
  629. * freed pages.
  630. * And we increase kvm->tlbs_dirty to delay tlbs flush in this case.
  631. */
  632. static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
  633. {
  634. int i, offset, nr_present;
  635. bool host_writable;
  636. gpa_t first_pte_gpa;
  637. offset = nr_present = 0;
  638. /* direct kvm_mmu_page can not be unsync. */
  639. BUG_ON(sp->role.direct);
  640. if (PTTYPE == 32)
  641. offset = sp->role.quadrant << PT64_LEVEL_BITS;
  642. first_pte_gpa = gfn_to_gpa(sp->gfn) + offset * sizeof(pt_element_t);
  643. for (i = 0; i < PT64_ENT_PER_PAGE; i++) {
  644. unsigned pte_access;
  645. pt_element_t gpte;
  646. gpa_t pte_gpa;
  647. gfn_t gfn;
  648. if (!is_shadow_present_pte(sp->spt[i]))
  649. continue;
  650. pte_gpa = first_pte_gpa + i * sizeof(pt_element_t);
  651. if (kvm_read_guest_atomic(vcpu->kvm, pte_gpa, &gpte,
  652. sizeof(pt_element_t)))
  653. return -EINVAL;
  654. gfn = gpte_to_gfn(gpte);
  655. if (FNAME(prefetch_invalid_gpte)(vcpu, sp, &sp->spt[i], gpte)) {
  656. vcpu->kvm->tlbs_dirty++;
  657. continue;
  658. }
  659. if (gfn != sp->gfns[i]) {
  660. drop_spte(vcpu->kvm, &sp->spt[i],
  661. shadow_trap_nonpresent_pte);
  662. vcpu->kvm->tlbs_dirty++;
  663. continue;
  664. }
  665. nr_present++;
  666. pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte);
  667. host_writable = sp->spt[i] & SPTE_HOST_WRITEABLE;
  668. set_spte(vcpu, &sp->spt[i], pte_access, 0, 0,
  669. is_dirty_gpte(gpte), PT_PAGE_TABLE_LEVEL, gfn,
  670. spte_to_pfn(sp->spt[i]), true, false,
  671. host_writable);
  672. }
  673. return !nr_present;
  674. }
  675. #undef pt_element_t
  676. #undef guest_walker
  677. #undef FNAME
  678. #undef PT_BASE_ADDR_MASK
  679. #undef PT_INDEX
  680. #undef PT_LVL_ADDR_MASK
  681. #undef PT_LVL_OFFSET_MASK
  682. #undef PT_LEVEL_BITS
  683. #undef PT_MAX_FULL_LEVELS
  684. #undef gpte_to_gfn
  685. #undef gpte_to_gfn_lvl
  686. #undef CMPXCHG