mmu.c 79 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386
  1. /*
  2. * Kernel-based Virtual Machine driver for Linux
  3. *
  4. * This module enables machines with Intel VT-x extensions to run virtual
  5. * machines without emulation or binary translation.
  6. *
  7. * MMU support
  8. *
  9. * Copyright (C) 2006 Qumranet, Inc.
  10. *
  11. * Authors:
  12. * Yaniv Kamay <yaniv@qumranet.com>
  13. * Avi Kivity <avi@qumranet.com>
  14. *
  15. * This work is licensed under the terms of the GNU GPL, version 2. See
  16. * the COPYING file in the top-level directory.
  17. *
  18. */
  19. #include "mmu.h"
  20. #include "kvm_cache_regs.h"
  21. #include <linux/kvm_host.h>
  22. #include <linux/types.h>
  23. #include <linux/string.h>
  24. #include <linux/mm.h>
  25. #include <linux/highmem.h>
  26. #include <linux/module.h>
  27. #include <linux/swap.h>
  28. #include <linux/hugetlb.h>
  29. #include <linux/compiler.h>
  30. #include <asm/page.h>
  31. #include <asm/cmpxchg.h>
  32. #include <asm/io.h>
  33. #include <asm/vmx.h>
  34. /*
  35. * When setting this variable to true it enables Two-Dimensional-Paging
  36. * where the hardware walks 2 page tables:
  37. * 1. the guest-virtual to guest-physical
  38. * 2. while doing 1. it walks guest-physical to host-physical
  39. * If the hardware supports that we don't need to do shadow paging.
  40. */
  41. bool tdp_enabled = false;
  42. #undef MMU_DEBUG
  43. #undef AUDIT
  44. #ifdef AUDIT
  45. static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg);
  46. #else
  47. static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg) {}
  48. #endif
  49. #ifdef MMU_DEBUG
  50. #define pgprintk(x...) do { if (dbg) printk(x); } while (0)
  51. #define rmap_printk(x...) do { if (dbg) printk(x); } while (0)
  52. #else
  53. #define pgprintk(x...) do { } while (0)
  54. #define rmap_printk(x...) do { } while (0)
  55. #endif
  56. #if defined(MMU_DEBUG) || defined(AUDIT)
  57. static int dbg = 0;
  58. module_param(dbg, bool, 0644);
  59. #endif
  60. static int oos_shadow = 1;
  61. module_param(oos_shadow, bool, 0644);
  62. #ifndef MMU_DEBUG
  63. #define ASSERT(x) do { } while (0)
  64. #else
  65. #define ASSERT(x) \
  66. if (!(x)) { \
  67. printk(KERN_WARNING "assertion failed %s:%d: %s\n", \
  68. __FILE__, __LINE__, #x); \
  69. }
  70. #endif
  71. #define PT_FIRST_AVAIL_BITS_SHIFT 9
  72. #define PT64_SECOND_AVAIL_BITS_SHIFT 52
  73. #define VALID_PAGE(x) ((x) != INVALID_PAGE)
  74. #define PT64_LEVEL_BITS 9
  75. #define PT64_LEVEL_SHIFT(level) \
  76. (PAGE_SHIFT + (level - 1) * PT64_LEVEL_BITS)
  77. #define PT64_LEVEL_MASK(level) \
  78. (((1ULL << PT64_LEVEL_BITS) - 1) << PT64_LEVEL_SHIFT(level))
  79. #define PT64_INDEX(address, level)\
  80. (((address) >> PT64_LEVEL_SHIFT(level)) & ((1 << PT64_LEVEL_BITS) - 1))
  81. #define PT32_LEVEL_BITS 10
  82. #define PT32_LEVEL_SHIFT(level) \
  83. (PAGE_SHIFT + (level - 1) * PT32_LEVEL_BITS)
  84. #define PT32_LEVEL_MASK(level) \
  85. (((1ULL << PT32_LEVEL_BITS) - 1) << PT32_LEVEL_SHIFT(level))
  86. #define PT32_LVL_OFFSET_MASK(level) \
  87. (PT32_BASE_ADDR_MASK & ((1ULL << (PAGE_SHIFT + (((level) - 1) \
  88. * PT32_LEVEL_BITS))) - 1))
  89. #define PT32_INDEX(address, level)\
  90. (((address) >> PT32_LEVEL_SHIFT(level)) & ((1 << PT32_LEVEL_BITS) - 1))
  91. #define PT64_BASE_ADDR_MASK (((1ULL << 52) - 1) & ~(u64)(PAGE_SIZE-1))
  92. #define PT64_DIR_BASE_ADDR_MASK \
  93. (PT64_BASE_ADDR_MASK & ~((1ULL << (PAGE_SHIFT + PT64_LEVEL_BITS)) - 1))
  94. #define PT64_LVL_ADDR_MASK(level) \
  95. (PT64_BASE_ADDR_MASK & ~((1ULL << (PAGE_SHIFT + (((level) - 1) \
  96. * PT64_LEVEL_BITS))) - 1))
  97. #define PT64_LVL_OFFSET_MASK(level) \
  98. (PT64_BASE_ADDR_MASK & ((1ULL << (PAGE_SHIFT + (((level) - 1) \
  99. * PT64_LEVEL_BITS))) - 1))
  100. #define PT32_BASE_ADDR_MASK PAGE_MASK
  101. #define PT32_DIR_BASE_ADDR_MASK \
  102. (PAGE_MASK & ~((1ULL << (PAGE_SHIFT + PT32_LEVEL_BITS)) - 1))
  103. #define PT32_LVL_ADDR_MASK(level) \
  104. (PAGE_MASK & ~((1ULL << (PAGE_SHIFT + (((level) - 1) \
  105. * PT32_LEVEL_BITS))) - 1))
  106. #define PT64_PERM_MASK (PT_PRESENT_MASK | PT_WRITABLE_MASK | PT_USER_MASK \
  107. | PT64_NX_MASK)
  108. #define PFERR_PRESENT_MASK (1U << 0)
  109. #define PFERR_WRITE_MASK (1U << 1)
  110. #define PFERR_USER_MASK (1U << 2)
  111. #define PFERR_RSVD_MASK (1U << 3)
  112. #define PFERR_FETCH_MASK (1U << 4)
  113. #define PT_PDPE_LEVEL 3
  114. #define PT_DIRECTORY_LEVEL 2
  115. #define PT_PAGE_TABLE_LEVEL 1
  116. #define RMAP_EXT 4
  117. #define ACC_EXEC_MASK 1
  118. #define ACC_WRITE_MASK PT_WRITABLE_MASK
  119. #define ACC_USER_MASK PT_USER_MASK
  120. #define ACC_ALL (ACC_EXEC_MASK | ACC_WRITE_MASK | ACC_USER_MASK)
  121. #define CREATE_TRACE_POINTS
  122. #include "mmutrace.h"
  123. #define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level)
  124. struct kvm_rmap_desc {
  125. u64 *sptes[RMAP_EXT];
  126. struct kvm_rmap_desc *more;
  127. };
  128. struct kvm_shadow_walk_iterator {
  129. u64 addr;
  130. hpa_t shadow_addr;
  131. int level;
  132. u64 *sptep;
  133. unsigned index;
  134. };
  135. #define for_each_shadow_entry(_vcpu, _addr, _walker) \
  136. for (shadow_walk_init(&(_walker), _vcpu, _addr); \
  137. shadow_walk_okay(&(_walker)); \
  138. shadow_walk_next(&(_walker)))
  139. struct kvm_unsync_walk {
  140. int (*entry) (struct kvm_mmu_page *sp, struct kvm_unsync_walk *walk);
  141. };
  142. typedef int (*mmu_parent_walk_fn) (struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp);
  143. static struct kmem_cache *pte_chain_cache;
  144. static struct kmem_cache *rmap_desc_cache;
  145. static struct kmem_cache *mmu_page_header_cache;
  146. static u64 __read_mostly shadow_trap_nonpresent_pte;
  147. static u64 __read_mostly shadow_notrap_nonpresent_pte;
  148. static u64 __read_mostly shadow_base_present_pte;
  149. static u64 __read_mostly shadow_nx_mask;
  150. static u64 __read_mostly shadow_x_mask; /* mutual exclusive with nx_mask */
  151. static u64 __read_mostly shadow_user_mask;
  152. static u64 __read_mostly shadow_accessed_mask;
  153. static u64 __read_mostly shadow_dirty_mask;
  154. static inline u64 rsvd_bits(int s, int e)
  155. {
  156. return ((1ULL << (e - s + 1)) - 1) << s;
  157. }
  158. void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte)
  159. {
  160. shadow_trap_nonpresent_pte = trap_pte;
  161. shadow_notrap_nonpresent_pte = notrap_pte;
  162. }
  163. EXPORT_SYMBOL_GPL(kvm_mmu_set_nonpresent_ptes);
  164. void kvm_mmu_set_base_ptes(u64 base_pte)
  165. {
  166. shadow_base_present_pte = base_pte;
  167. }
  168. EXPORT_SYMBOL_GPL(kvm_mmu_set_base_ptes);
  169. void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
  170. u64 dirty_mask, u64 nx_mask, u64 x_mask)
  171. {
  172. shadow_user_mask = user_mask;
  173. shadow_accessed_mask = accessed_mask;
  174. shadow_dirty_mask = dirty_mask;
  175. shadow_nx_mask = nx_mask;
  176. shadow_x_mask = x_mask;
  177. }
  178. EXPORT_SYMBOL_GPL(kvm_mmu_set_mask_ptes);
  179. static int is_write_protection(struct kvm_vcpu *vcpu)
  180. {
  181. return vcpu->arch.cr0 & X86_CR0_WP;
  182. }
  183. static int is_cpuid_PSE36(void)
  184. {
  185. return 1;
  186. }
  187. static int is_nx(struct kvm_vcpu *vcpu)
  188. {
  189. return vcpu->arch.shadow_efer & EFER_NX;
  190. }
  191. static int is_shadow_present_pte(u64 pte)
  192. {
  193. return pte != shadow_trap_nonpresent_pte
  194. && pte != shadow_notrap_nonpresent_pte;
  195. }
  196. static int is_large_pte(u64 pte)
  197. {
  198. return pte & PT_PAGE_SIZE_MASK;
  199. }
  200. static int is_writeble_pte(unsigned long pte)
  201. {
  202. return pte & PT_WRITABLE_MASK;
  203. }
  204. static int is_dirty_gpte(unsigned long pte)
  205. {
  206. return pte & PT_DIRTY_MASK;
  207. }
  208. static int is_rmap_spte(u64 pte)
  209. {
  210. return is_shadow_present_pte(pte);
  211. }
  212. static int is_last_spte(u64 pte, int level)
  213. {
  214. if (level == PT_PAGE_TABLE_LEVEL)
  215. return 1;
  216. if (is_large_pte(pte))
  217. return 1;
  218. return 0;
  219. }
  220. static pfn_t spte_to_pfn(u64 pte)
  221. {
  222. return (pte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT;
  223. }
  224. static gfn_t pse36_gfn_delta(u32 gpte)
  225. {
  226. int shift = 32 - PT32_DIR_PSE36_SHIFT - PAGE_SHIFT;
  227. return (gpte & PT32_DIR_PSE36_MASK) << shift;
  228. }
  229. static void __set_spte(u64 *sptep, u64 spte)
  230. {
  231. #ifdef CONFIG_X86_64
  232. set_64bit((unsigned long *)sptep, spte);
  233. #else
  234. set_64bit((unsigned long long *)sptep, spte);
  235. #endif
  236. }
  237. static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
  238. struct kmem_cache *base_cache, int min)
  239. {
  240. void *obj;
  241. if (cache->nobjs >= min)
  242. return 0;
  243. while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
  244. obj = kmem_cache_zalloc(base_cache, GFP_KERNEL);
  245. if (!obj)
  246. return -ENOMEM;
  247. cache->objects[cache->nobjs++] = obj;
  248. }
  249. return 0;
  250. }
  251. static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
  252. {
  253. while (mc->nobjs)
  254. kfree(mc->objects[--mc->nobjs]);
  255. }
  256. static int mmu_topup_memory_cache_page(struct kvm_mmu_memory_cache *cache,
  257. int min)
  258. {
  259. struct page *page;
  260. if (cache->nobjs >= min)
  261. return 0;
  262. while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
  263. page = alloc_page(GFP_KERNEL);
  264. if (!page)
  265. return -ENOMEM;
  266. set_page_private(page, 0);
  267. cache->objects[cache->nobjs++] = page_address(page);
  268. }
  269. return 0;
  270. }
  271. static void mmu_free_memory_cache_page(struct kvm_mmu_memory_cache *mc)
  272. {
  273. while (mc->nobjs)
  274. free_page((unsigned long)mc->objects[--mc->nobjs]);
  275. }
  276. static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu)
  277. {
  278. int r;
  279. r = mmu_topup_memory_cache(&vcpu->arch.mmu_pte_chain_cache,
  280. pte_chain_cache, 4);
  281. if (r)
  282. goto out;
  283. r = mmu_topup_memory_cache(&vcpu->arch.mmu_rmap_desc_cache,
  284. rmap_desc_cache, 4);
  285. if (r)
  286. goto out;
  287. r = mmu_topup_memory_cache_page(&vcpu->arch.mmu_page_cache, 8);
  288. if (r)
  289. goto out;
  290. r = mmu_topup_memory_cache(&vcpu->arch.mmu_page_header_cache,
  291. mmu_page_header_cache, 4);
  292. out:
  293. return r;
  294. }
  295. static void mmu_free_memory_caches(struct kvm_vcpu *vcpu)
  296. {
  297. mmu_free_memory_cache(&vcpu->arch.mmu_pte_chain_cache);
  298. mmu_free_memory_cache(&vcpu->arch.mmu_rmap_desc_cache);
  299. mmu_free_memory_cache_page(&vcpu->arch.mmu_page_cache);
  300. mmu_free_memory_cache(&vcpu->arch.mmu_page_header_cache);
  301. }
  302. static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc,
  303. size_t size)
  304. {
  305. void *p;
  306. BUG_ON(!mc->nobjs);
  307. p = mc->objects[--mc->nobjs];
  308. return p;
  309. }
  310. static struct kvm_pte_chain *mmu_alloc_pte_chain(struct kvm_vcpu *vcpu)
  311. {
  312. return mmu_memory_cache_alloc(&vcpu->arch.mmu_pte_chain_cache,
  313. sizeof(struct kvm_pte_chain));
  314. }
  315. static void mmu_free_pte_chain(struct kvm_pte_chain *pc)
  316. {
  317. kfree(pc);
  318. }
  319. static struct kvm_rmap_desc *mmu_alloc_rmap_desc(struct kvm_vcpu *vcpu)
  320. {
  321. return mmu_memory_cache_alloc(&vcpu->arch.mmu_rmap_desc_cache,
  322. sizeof(struct kvm_rmap_desc));
  323. }
  324. static void mmu_free_rmap_desc(struct kvm_rmap_desc *rd)
  325. {
  326. kfree(rd);
  327. }
  328. /*
  329. * Return the pointer to the largepage write count for a given
  330. * gfn, handling slots that are not large page aligned.
  331. */
  332. static int *slot_largepage_idx(gfn_t gfn,
  333. struct kvm_memory_slot *slot,
  334. int level)
  335. {
  336. unsigned long idx;
  337. idx = (gfn / KVM_PAGES_PER_HPAGE(level)) -
  338. (slot->base_gfn / KVM_PAGES_PER_HPAGE(level));
  339. return &slot->lpage_info[level - 2][idx].write_count;
  340. }
  341. static void account_shadowed(struct kvm *kvm, gfn_t gfn)
  342. {
  343. struct kvm_memory_slot *slot;
  344. int *write_count;
  345. int i;
  346. gfn = unalias_gfn(kvm, gfn);
  347. slot = gfn_to_memslot_unaliased(kvm, gfn);
  348. for (i = PT_DIRECTORY_LEVEL;
  349. i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) {
  350. write_count = slot_largepage_idx(gfn, slot, i);
  351. *write_count += 1;
  352. }
  353. }
  354. static void unaccount_shadowed(struct kvm *kvm, gfn_t gfn)
  355. {
  356. struct kvm_memory_slot *slot;
  357. int *write_count;
  358. int i;
  359. gfn = unalias_gfn(kvm, gfn);
  360. for (i = PT_DIRECTORY_LEVEL;
  361. i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) {
  362. slot = gfn_to_memslot_unaliased(kvm, gfn);
  363. write_count = slot_largepage_idx(gfn, slot, i);
  364. *write_count -= 1;
  365. WARN_ON(*write_count < 0);
  366. }
  367. }
  368. static int has_wrprotected_page(struct kvm *kvm,
  369. gfn_t gfn,
  370. int level)
  371. {
  372. struct kvm_memory_slot *slot;
  373. int *largepage_idx;
  374. gfn = unalias_gfn(kvm, gfn);
  375. slot = gfn_to_memslot_unaliased(kvm, gfn);
  376. if (slot) {
  377. largepage_idx = slot_largepage_idx(gfn, slot, level);
  378. return *largepage_idx;
  379. }
  380. return 1;
  381. }
  382. static int host_mapping_level(struct kvm *kvm, gfn_t gfn)
  383. {
  384. unsigned long page_size = PAGE_SIZE;
  385. struct vm_area_struct *vma;
  386. unsigned long addr;
  387. int i, ret = 0;
  388. addr = gfn_to_hva(kvm, gfn);
  389. if (kvm_is_error_hva(addr))
  390. return page_size;
  391. down_read(&current->mm->mmap_sem);
  392. vma = find_vma(current->mm, addr);
  393. if (!vma)
  394. goto out;
  395. page_size = vma_kernel_pagesize(vma);
  396. out:
  397. up_read(&current->mm->mmap_sem);
  398. for (i = PT_PAGE_TABLE_LEVEL;
  399. i < (PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES); ++i) {
  400. if (page_size >= KVM_HPAGE_SIZE(i))
  401. ret = i;
  402. else
  403. break;
  404. }
  405. return ret;
  406. }
  407. static int mapping_level(struct kvm_vcpu *vcpu, gfn_t large_gfn)
  408. {
  409. struct kvm_memory_slot *slot;
  410. int host_level;
  411. int level = PT_PAGE_TABLE_LEVEL;
  412. slot = gfn_to_memslot(vcpu->kvm, large_gfn);
  413. if (slot && slot->dirty_bitmap)
  414. return PT_PAGE_TABLE_LEVEL;
  415. host_level = host_mapping_level(vcpu->kvm, large_gfn);
  416. if (host_level == PT_PAGE_TABLE_LEVEL)
  417. return host_level;
  418. for (level = PT_DIRECTORY_LEVEL; level <= host_level; ++level) {
  419. if (has_wrprotected_page(vcpu->kvm, large_gfn, level))
  420. break;
  421. }
  422. return level - 1;
  423. }
  424. /*
  425. * Take gfn and return the reverse mapping to it.
  426. * Note: gfn must be unaliased before this function get called
  427. */
  428. static unsigned long *gfn_to_rmap(struct kvm *kvm, gfn_t gfn, int level)
  429. {
  430. struct kvm_memory_slot *slot;
  431. unsigned long idx;
  432. slot = gfn_to_memslot(kvm, gfn);
  433. if (likely(level == PT_PAGE_TABLE_LEVEL))
  434. return &slot->rmap[gfn - slot->base_gfn];
  435. idx = (gfn / KVM_PAGES_PER_HPAGE(level)) -
  436. (slot->base_gfn / KVM_PAGES_PER_HPAGE(level));
  437. return &slot->lpage_info[level - 2][idx].rmap_pde;
  438. }
  439. /*
  440. * Reverse mapping data structures:
  441. *
  442. * If rmapp bit zero is zero, then rmapp point to the shadw page table entry
  443. * that points to page_address(page).
  444. *
  445. * If rmapp bit zero is one, (then rmap & ~1) points to a struct kvm_rmap_desc
  446. * containing more mappings.
  447. *
  448. * Returns the number of rmap entries before the spte was added or zero if
  449. * the spte was not added.
  450. *
  451. */
  452. static int rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
  453. {
  454. struct kvm_mmu_page *sp;
  455. struct kvm_rmap_desc *desc;
  456. unsigned long *rmapp;
  457. int i, count = 0;
  458. if (!is_rmap_spte(*spte))
  459. return count;
  460. gfn = unalias_gfn(vcpu->kvm, gfn);
  461. sp = page_header(__pa(spte));
  462. sp->gfns[spte - sp->spt] = gfn;
  463. rmapp = gfn_to_rmap(vcpu->kvm, gfn, sp->role.level);
  464. if (!*rmapp) {
  465. rmap_printk("rmap_add: %p %llx 0->1\n", spte, *spte);
  466. *rmapp = (unsigned long)spte;
  467. } else if (!(*rmapp & 1)) {
  468. rmap_printk("rmap_add: %p %llx 1->many\n", spte, *spte);
  469. desc = mmu_alloc_rmap_desc(vcpu);
  470. desc->sptes[0] = (u64 *)*rmapp;
  471. desc->sptes[1] = spte;
  472. *rmapp = (unsigned long)desc | 1;
  473. } else {
  474. rmap_printk("rmap_add: %p %llx many->many\n", spte, *spte);
  475. desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
  476. while (desc->sptes[RMAP_EXT-1] && desc->more) {
  477. desc = desc->more;
  478. count += RMAP_EXT;
  479. }
  480. if (desc->sptes[RMAP_EXT-1]) {
  481. desc->more = mmu_alloc_rmap_desc(vcpu);
  482. desc = desc->more;
  483. }
  484. for (i = 0; desc->sptes[i]; ++i)
  485. ;
  486. desc->sptes[i] = spte;
  487. }
  488. return count;
  489. }
  490. static void rmap_desc_remove_entry(unsigned long *rmapp,
  491. struct kvm_rmap_desc *desc,
  492. int i,
  493. struct kvm_rmap_desc *prev_desc)
  494. {
  495. int j;
  496. for (j = RMAP_EXT - 1; !desc->sptes[j] && j > i; --j)
  497. ;
  498. desc->sptes[i] = desc->sptes[j];
  499. desc->sptes[j] = NULL;
  500. if (j != 0)
  501. return;
  502. if (!prev_desc && !desc->more)
  503. *rmapp = (unsigned long)desc->sptes[0];
  504. else
  505. if (prev_desc)
  506. prev_desc->more = desc->more;
  507. else
  508. *rmapp = (unsigned long)desc->more | 1;
  509. mmu_free_rmap_desc(desc);
  510. }
  511. static void rmap_remove(struct kvm *kvm, u64 *spte)
  512. {
  513. struct kvm_rmap_desc *desc;
  514. struct kvm_rmap_desc *prev_desc;
  515. struct kvm_mmu_page *sp;
  516. pfn_t pfn;
  517. unsigned long *rmapp;
  518. int i;
  519. if (!is_rmap_spte(*spte))
  520. return;
  521. sp = page_header(__pa(spte));
  522. pfn = spte_to_pfn(*spte);
  523. if (*spte & shadow_accessed_mask)
  524. kvm_set_pfn_accessed(pfn);
  525. if (is_writeble_pte(*spte))
  526. kvm_release_pfn_dirty(pfn);
  527. else
  528. kvm_release_pfn_clean(pfn);
  529. rmapp = gfn_to_rmap(kvm, sp->gfns[spte - sp->spt], sp->role.level);
  530. if (!*rmapp) {
  531. printk(KERN_ERR "rmap_remove: %p %llx 0->BUG\n", spte, *spte);
  532. BUG();
  533. } else if (!(*rmapp & 1)) {
  534. rmap_printk("rmap_remove: %p %llx 1->0\n", spte, *spte);
  535. if ((u64 *)*rmapp != spte) {
  536. printk(KERN_ERR "rmap_remove: %p %llx 1->BUG\n",
  537. spte, *spte);
  538. BUG();
  539. }
  540. *rmapp = 0;
  541. } else {
  542. rmap_printk("rmap_remove: %p %llx many->many\n", spte, *spte);
  543. desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
  544. prev_desc = NULL;
  545. while (desc) {
  546. for (i = 0; i < RMAP_EXT && desc->sptes[i]; ++i)
  547. if (desc->sptes[i] == spte) {
  548. rmap_desc_remove_entry(rmapp,
  549. desc, i,
  550. prev_desc);
  551. return;
  552. }
  553. prev_desc = desc;
  554. desc = desc->more;
  555. }
  556. BUG();
  557. }
  558. }
  559. static u64 *rmap_next(struct kvm *kvm, unsigned long *rmapp, u64 *spte)
  560. {
  561. struct kvm_rmap_desc *desc;
  562. struct kvm_rmap_desc *prev_desc;
  563. u64 *prev_spte;
  564. int i;
  565. if (!*rmapp)
  566. return NULL;
  567. else if (!(*rmapp & 1)) {
  568. if (!spte)
  569. return (u64 *)*rmapp;
  570. return NULL;
  571. }
  572. desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
  573. prev_desc = NULL;
  574. prev_spte = NULL;
  575. while (desc) {
  576. for (i = 0; i < RMAP_EXT && desc->sptes[i]; ++i) {
  577. if (prev_spte == spte)
  578. return desc->sptes[i];
  579. prev_spte = desc->sptes[i];
  580. }
  581. desc = desc->more;
  582. }
  583. return NULL;
  584. }
  585. static int rmap_write_protect(struct kvm *kvm, u64 gfn)
  586. {
  587. unsigned long *rmapp;
  588. u64 *spte;
  589. int i, write_protected = 0;
  590. gfn = unalias_gfn(kvm, gfn);
  591. rmapp = gfn_to_rmap(kvm, gfn, PT_PAGE_TABLE_LEVEL);
  592. spte = rmap_next(kvm, rmapp, NULL);
  593. while (spte) {
  594. BUG_ON(!spte);
  595. BUG_ON(!(*spte & PT_PRESENT_MASK));
  596. rmap_printk("rmap_write_protect: spte %p %llx\n", spte, *spte);
  597. if (is_writeble_pte(*spte)) {
  598. __set_spte(spte, *spte & ~PT_WRITABLE_MASK);
  599. write_protected = 1;
  600. }
  601. spte = rmap_next(kvm, rmapp, spte);
  602. }
  603. if (write_protected) {
  604. pfn_t pfn;
  605. spte = rmap_next(kvm, rmapp, NULL);
  606. pfn = spte_to_pfn(*spte);
  607. kvm_set_pfn_dirty(pfn);
  608. }
  609. /* check for huge page mappings */
  610. for (i = PT_DIRECTORY_LEVEL;
  611. i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) {
  612. rmapp = gfn_to_rmap(kvm, gfn, i);
  613. spte = rmap_next(kvm, rmapp, NULL);
  614. while (spte) {
  615. BUG_ON(!spte);
  616. BUG_ON(!(*spte & PT_PRESENT_MASK));
  617. BUG_ON((*spte & (PT_PAGE_SIZE_MASK|PT_PRESENT_MASK)) != (PT_PAGE_SIZE_MASK|PT_PRESENT_MASK));
  618. pgprintk("rmap_write_protect(large): spte %p %llx %lld\n", spte, *spte, gfn);
  619. if (is_writeble_pte(*spte)) {
  620. rmap_remove(kvm, spte);
  621. --kvm->stat.lpages;
  622. __set_spte(spte, shadow_trap_nonpresent_pte);
  623. spte = NULL;
  624. write_protected = 1;
  625. }
  626. spte = rmap_next(kvm, rmapp, spte);
  627. }
  628. }
  629. return write_protected;
  630. }
  631. static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp)
  632. {
  633. u64 *spte;
  634. int need_tlb_flush = 0;
  635. while ((spte = rmap_next(kvm, rmapp, NULL))) {
  636. BUG_ON(!(*spte & PT_PRESENT_MASK));
  637. rmap_printk("kvm_rmap_unmap_hva: spte %p %llx\n", spte, *spte);
  638. rmap_remove(kvm, spte);
  639. __set_spte(spte, shadow_trap_nonpresent_pte);
  640. need_tlb_flush = 1;
  641. }
  642. return need_tlb_flush;
  643. }
  644. static int kvm_handle_hva(struct kvm *kvm, unsigned long hva,
  645. int (*handler)(struct kvm *kvm, unsigned long *rmapp))
  646. {
  647. int i, j;
  648. int retval = 0;
  649. /*
  650. * If mmap_sem isn't taken, we can look the memslots with only
  651. * the mmu_lock by skipping over the slots with userspace_addr == 0.
  652. */
  653. for (i = 0; i < kvm->nmemslots; i++) {
  654. struct kvm_memory_slot *memslot = &kvm->memslots[i];
  655. unsigned long start = memslot->userspace_addr;
  656. unsigned long end;
  657. /* mmu_lock protects userspace_addr */
  658. if (!start)
  659. continue;
  660. end = start + (memslot->npages << PAGE_SHIFT);
  661. if (hva >= start && hva < end) {
  662. gfn_t gfn_offset = (hva - start) >> PAGE_SHIFT;
  663. retval |= handler(kvm, &memslot->rmap[gfn_offset]);
  664. for (j = 0; j < KVM_NR_PAGE_SIZES - 1; ++j) {
  665. int idx = gfn_offset;
  666. idx /= KVM_PAGES_PER_HPAGE(PT_DIRECTORY_LEVEL + j);
  667. retval |= handler(kvm,
  668. &memslot->lpage_info[j][idx].rmap_pde);
  669. }
  670. }
  671. }
  672. return retval;
  673. }
  674. int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
  675. {
  676. return kvm_handle_hva(kvm, hva, kvm_unmap_rmapp);
  677. }
  678. static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp)
  679. {
  680. u64 *spte;
  681. int young = 0;
  682. /* always return old for EPT */
  683. if (!shadow_accessed_mask)
  684. return 0;
  685. spte = rmap_next(kvm, rmapp, NULL);
  686. while (spte) {
  687. int _young;
  688. u64 _spte = *spte;
  689. BUG_ON(!(_spte & PT_PRESENT_MASK));
  690. _young = _spte & PT_ACCESSED_MASK;
  691. if (_young) {
  692. young = 1;
  693. clear_bit(PT_ACCESSED_SHIFT, (unsigned long *)spte);
  694. }
  695. spte = rmap_next(kvm, rmapp, spte);
  696. }
  697. return young;
  698. }
  699. #define RMAP_RECYCLE_THRESHOLD 1000
  700. static void rmap_recycle(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
  701. {
  702. unsigned long *rmapp;
  703. struct kvm_mmu_page *sp;
  704. sp = page_header(__pa(spte));
  705. gfn = unalias_gfn(vcpu->kvm, gfn);
  706. rmapp = gfn_to_rmap(vcpu->kvm, gfn, sp->role.level);
  707. kvm_unmap_rmapp(vcpu->kvm, rmapp);
  708. kvm_flush_remote_tlbs(vcpu->kvm);
  709. }
  710. int kvm_age_hva(struct kvm *kvm, unsigned long hva)
  711. {
  712. return kvm_handle_hva(kvm, hva, kvm_age_rmapp);
  713. }
  714. #ifdef MMU_DEBUG
  715. static int is_empty_shadow_page(u64 *spt)
  716. {
  717. u64 *pos;
  718. u64 *end;
  719. for (pos = spt, end = pos + PAGE_SIZE / sizeof(u64); pos != end; pos++)
  720. if (is_shadow_present_pte(*pos)) {
  721. printk(KERN_ERR "%s: %p %llx\n", __func__,
  722. pos, *pos);
  723. return 0;
  724. }
  725. return 1;
  726. }
  727. #endif
  728. static void kvm_mmu_free_page(struct kvm *kvm, struct kvm_mmu_page *sp)
  729. {
  730. ASSERT(is_empty_shadow_page(sp->spt));
  731. list_del(&sp->link);
  732. __free_page(virt_to_page(sp->spt));
  733. __free_page(virt_to_page(sp->gfns));
  734. kfree(sp);
  735. ++kvm->arch.n_free_mmu_pages;
  736. }
  737. static unsigned kvm_page_table_hashfn(gfn_t gfn)
  738. {
  739. return gfn & ((1 << KVM_MMU_HASH_SHIFT) - 1);
  740. }
  741. static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu,
  742. u64 *parent_pte)
  743. {
  744. struct kvm_mmu_page *sp;
  745. sp = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache, sizeof *sp);
  746. sp->spt = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache, PAGE_SIZE);
  747. sp->gfns = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache, PAGE_SIZE);
  748. set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
  749. list_add(&sp->link, &vcpu->kvm->arch.active_mmu_pages);
  750. INIT_LIST_HEAD(&sp->oos_link);
  751. bitmap_zero(sp->slot_bitmap, KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS);
  752. sp->multimapped = 0;
  753. sp->parent_pte = parent_pte;
  754. --vcpu->kvm->arch.n_free_mmu_pages;
  755. return sp;
  756. }
  757. static void mmu_page_add_parent_pte(struct kvm_vcpu *vcpu,
  758. struct kvm_mmu_page *sp, u64 *parent_pte)
  759. {
  760. struct kvm_pte_chain *pte_chain;
  761. struct hlist_node *node;
  762. int i;
  763. if (!parent_pte)
  764. return;
  765. if (!sp->multimapped) {
  766. u64 *old = sp->parent_pte;
  767. if (!old) {
  768. sp->parent_pte = parent_pte;
  769. return;
  770. }
  771. sp->multimapped = 1;
  772. pte_chain = mmu_alloc_pte_chain(vcpu);
  773. INIT_HLIST_HEAD(&sp->parent_ptes);
  774. hlist_add_head(&pte_chain->link, &sp->parent_ptes);
  775. pte_chain->parent_ptes[0] = old;
  776. }
  777. hlist_for_each_entry(pte_chain, node, &sp->parent_ptes, link) {
  778. if (pte_chain->parent_ptes[NR_PTE_CHAIN_ENTRIES-1])
  779. continue;
  780. for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i)
  781. if (!pte_chain->parent_ptes[i]) {
  782. pte_chain->parent_ptes[i] = parent_pte;
  783. return;
  784. }
  785. }
  786. pte_chain = mmu_alloc_pte_chain(vcpu);
  787. BUG_ON(!pte_chain);
  788. hlist_add_head(&pte_chain->link, &sp->parent_ptes);
  789. pte_chain->parent_ptes[0] = parent_pte;
  790. }
  791. static void mmu_page_remove_parent_pte(struct kvm_mmu_page *sp,
  792. u64 *parent_pte)
  793. {
  794. struct kvm_pte_chain *pte_chain;
  795. struct hlist_node *node;
  796. int i;
  797. if (!sp->multimapped) {
  798. BUG_ON(sp->parent_pte != parent_pte);
  799. sp->parent_pte = NULL;
  800. return;
  801. }
  802. hlist_for_each_entry(pte_chain, node, &sp->parent_ptes, link)
  803. for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i) {
  804. if (!pte_chain->parent_ptes[i])
  805. break;
  806. if (pte_chain->parent_ptes[i] != parent_pte)
  807. continue;
  808. while (i + 1 < NR_PTE_CHAIN_ENTRIES
  809. && pte_chain->parent_ptes[i + 1]) {
  810. pte_chain->parent_ptes[i]
  811. = pte_chain->parent_ptes[i + 1];
  812. ++i;
  813. }
  814. pte_chain->parent_ptes[i] = NULL;
  815. if (i == 0) {
  816. hlist_del(&pte_chain->link);
  817. mmu_free_pte_chain(pte_chain);
  818. if (hlist_empty(&sp->parent_ptes)) {
  819. sp->multimapped = 0;
  820. sp->parent_pte = NULL;
  821. }
  822. }
  823. return;
  824. }
  825. BUG();
  826. }
  827. static void mmu_parent_walk(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
  828. mmu_parent_walk_fn fn)
  829. {
  830. struct kvm_pte_chain *pte_chain;
  831. struct hlist_node *node;
  832. struct kvm_mmu_page *parent_sp;
  833. int i;
  834. if (!sp->multimapped && sp->parent_pte) {
  835. parent_sp = page_header(__pa(sp->parent_pte));
  836. fn(vcpu, parent_sp);
  837. mmu_parent_walk(vcpu, parent_sp, fn);
  838. return;
  839. }
  840. hlist_for_each_entry(pte_chain, node, &sp->parent_ptes, link)
  841. for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i) {
  842. if (!pte_chain->parent_ptes[i])
  843. break;
  844. parent_sp = page_header(__pa(pte_chain->parent_ptes[i]));
  845. fn(vcpu, parent_sp);
  846. mmu_parent_walk(vcpu, parent_sp, fn);
  847. }
  848. }
  849. static void kvm_mmu_update_unsync_bitmap(u64 *spte)
  850. {
  851. unsigned int index;
  852. struct kvm_mmu_page *sp = page_header(__pa(spte));
  853. index = spte - sp->spt;
  854. if (!__test_and_set_bit(index, sp->unsync_child_bitmap))
  855. sp->unsync_children++;
  856. WARN_ON(!sp->unsync_children);
  857. }
  858. static void kvm_mmu_update_parents_unsync(struct kvm_mmu_page *sp)
  859. {
  860. struct kvm_pte_chain *pte_chain;
  861. struct hlist_node *node;
  862. int i;
  863. if (!sp->parent_pte)
  864. return;
  865. if (!sp->multimapped) {
  866. kvm_mmu_update_unsync_bitmap(sp->parent_pte);
  867. return;
  868. }
  869. hlist_for_each_entry(pte_chain, node, &sp->parent_ptes, link)
  870. for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i) {
  871. if (!pte_chain->parent_ptes[i])
  872. break;
  873. kvm_mmu_update_unsync_bitmap(pte_chain->parent_ptes[i]);
  874. }
  875. }
  876. static int unsync_walk_fn(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
  877. {
  878. kvm_mmu_update_parents_unsync(sp);
  879. return 1;
  880. }
  881. static void kvm_mmu_mark_parents_unsync(struct kvm_vcpu *vcpu,
  882. struct kvm_mmu_page *sp)
  883. {
  884. mmu_parent_walk(vcpu, sp, unsync_walk_fn);
  885. kvm_mmu_update_parents_unsync(sp);
  886. }
  887. static void nonpaging_prefetch_page(struct kvm_vcpu *vcpu,
  888. struct kvm_mmu_page *sp)
  889. {
  890. int i;
  891. for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
  892. sp->spt[i] = shadow_trap_nonpresent_pte;
  893. }
  894. static int nonpaging_sync_page(struct kvm_vcpu *vcpu,
  895. struct kvm_mmu_page *sp)
  896. {
  897. return 1;
  898. }
  899. static void nonpaging_invlpg(struct kvm_vcpu *vcpu, gva_t gva)
  900. {
  901. }
  902. #define KVM_PAGE_ARRAY_NR 16
  903. struct kvm_mmu_pages {
  904. struct mmu_page_and_offset {
  905. struct kvm_mmu_page *sp;
  906. unsigned int idx;
  907. } page[KVM_PAGE_ARRAY_NR];
  908. unsigned int nr;
  909. };
  910. #define for_each_unsync_children(bitmap, idx) \
  911. for (idx = find_first_bit(bitmap, 512); \
  912. idx < 512; \
  913. idx = find_next_bit(bitmap, 512, idx+1))
  914. static int mmu_pages_add(struct kvm_mmu_pages *pvec, struct kvm_mmu_page *sp,
  915. int idx)
  916. {
  917. int i;
  918. if (sp->unsync)
  919. for (i=0; i < pvec->nr; i++)
  920. if (pvec->page[i].sp == sp)
  921. return 0;
  922. pvec->page[pvec->nr].sp = sp;
  923. pvec->page[pvec->nr].idx = idx;
  924. pvec->nr++;
  925. return (pvec->nr == KVM_PAGE_ARRAY_NR);
  926. }
  927. static int __mmu_unsync_walk(struct kvm_mmu_page *sp,
  928. struct kvm_mmu_pages *pvec)
  929. {
  930. int i, ret, nr_unsync_leaf = 0;
  931. for_each_unsync_children(sp->unsync_child_bitmap, i) {
  932. u64 ent = sp->spt[i];
  933. if (is_shadow_present_pte(ent) && !is_large_pte(ent)) {
  934. struct kvm_mmu_page *child;
  935. child = page_header(ent & PT64_BASE_ADDR_MASK);
  936. if (child->unsync_children) {
  937. if (mmu_pages_add(pvec, child, i))
  938. return -ENOSPC;
  939. ret = __mmu_unsync_walk(child, pvec);
  940. if (!ret)
  941. __clear_bit(i, sp->unsync_child_bitmap);
  942. else if (ret > 0)
  943. nr_unsync_leaf += ret;
  944. else
  945. return ret;
  946. }
  947. if (child->unsync) {
  948. nr_unsync_leaf++;
  949. if (mmu_pages_add(pvec, child, i))
  950. return -ENOSPC;
  951. }
  952. }
  953. }
  954. if (find_first_bit(sp->unsync_child_bitmap, 512) == 512)
  955. sp->unsync_children = 0;
  956. return nr_unsync_leaf;
  957. }
  958. static int mmu_unsync_walk(struct kvm_mmu_page *sp,
  959. struct kvm_mmu_pages *pvec)
  960. {
  961. if (!sp->unsync_children)
  962. return 0;
  963. mmu_pages_add(pvec, sp, 0);
  964. return __mmu_unsync_walk(sp, pvec);
  965. }
  966. static struct kvm_mmu_page *kvm_mmu_lookup_page(struct kvm *kvm, gfn_t gfn)
  967. {
  968. unsigned index;
  969. struct hlist_head *bucket;
  970. struct kvm_mmu_page *sp;
  971. struct hlist_node *node;
  972. pgprintk("%s: looking for gfn %lx\n", __func__, gfn);
  973. index = kvm_page_table_hashfn(gfn);
  974. bucket = &kvm->arch.mmu_page_hash[index];
  975. hlist_for_each_entry(sp, node, bucket, hash_link)
  976. if (sp->gfn == gfn && !sp->role.direct
  977. && !sp->role.invalid) {
  978. pgprintk("%s: found role %x\n",
  979. __func__, sp->role.word);
  980. return sp;
  981. }
  982. return NULL;
  983. }
  984. static void kvm_unlink_unsync_page(struct kvm *kvm, struct kvm_mmu_page *sp)
  985. {
  986. WARN_ON(!sp->unsync);
  987. sp->unsync = 0;
  988. --kvm->stat.mmu_unsync;
  989. }
  990. static int kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp);
  991. static int kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
  992. {
  993. if (sp->role.glevels != vcpu->arch.mmu.root_level) {
  994. kvm_mmu_zap_page(vcpu->kvm, sp);
  995. return 1;
  996. }
  997. trace_kvm_mmu_sync_page(sp);
  998. if (rmap_write_protect(vcpu->kvm, sp->gfn))
  999. kvm_flush_remote_tlbs(vcpu->kvm);
  1000. kvm_unlink_unsync_page(vcpu->kvm, sp);
  1001. if (vcpu->arch.mmu.sync_page(vcpu, sp)) {
  1002. kvm_mmu_zap_page(vcpu->kvm, sp);
  1003. return 1;
  1004. }
  1005. kvm_mmu_flush_tlb(vcpu);
  1006. return 0;
  1007. }
  1008. struct mmu_page_path {
  1009. struct kvm_mmu_page *parent[PT64_ROOT_LEVEL-1];
  1010. unsigned int idx[PT64_ROOT_LEVEL-1];
  1011. };
  1012. #define for_each_sp(pvec, sp, parents, i) \
  1013. for (i = mmu_pages_next(&pvec, &parents, -1), \
  1014. sp = pvec.page[i].sp; \
  1015. i < pvec.nr && ({ sp = pvec.page[i].sp; 1;}); \
  1016. i = mmu_pages_next(&pvec, &parents, i))
  1017. static int mmu_pages_next(struct kvm_mmu_pages *pvec,
  1018. struct mmu_page_path *parents,
  1019. int i)
  1020. {
  1021. int n;
  1022. for (n = i+1; n < pvec->nr; n++) {
  1023. struct kvm_mmu_page *sp = pvec->page[n].sp;
  1024. if (sp->role.level == PT_PAGE_TABLE_LEVEL) {
  1025. parents->idx[0] = pvec->page[n].idx;
  1026. return n;
  1027. }
  1028. parents->parent[sp->role.level-2] = sp;
  1029. parents->idx[sp->role.level-1] = pvec->page[n].idx;
  1030. }
  1031. return n;
  1032. }
  1033. static void mmu_pages_clear_parents(struct mmu_page_path *parents)
  1034. {
  1035. struct kvm_mmu_page *sp;
  1036. unsigned int level = 0;
  1037. do {
  1038. unsigned int idx = parents->idx[level];
  1039. sp = parents->parent[level];
  1040. if (!sp)
  1041. return;
  1042. --sp->unsync_children;
  1043. WARN_ON((int)sp->unsync_children < 0);
  1044. __clear_bit(idx, sp->unsync_child_bitmap);
  1045. level++;
  1046. } while (level < PT64_ROOT_LEVEL-1 && !sp->unsync_children);
  1047. }
  1048. static void kvm_mmu_pages_init(struct kvm_mmu_page *parent,
  1049. struct mmu_page_path *parents,
  1050. struct kvm_mmu_pages *pvec)
  1051. {
  1052. parents->parent[parent->role.level-1] = NULL;
  1053. pvec->nr = 0;
  1054. }
  1055. static void mmu_sync_children(struct kvm_vcpu *vcpu,
  1056. struct kvm_mmu_page *parent)
  1057. {
  1058. int i;
  1059. struct kvm_mmu_page *sp;
  1060. struct mmu_page_path parents;
  1061. struct kvm_mmu_pages pages;
  1062. kvm_mmu_pages_init(parent, &parents, &pages);
  1063. while (mmu_unsync_walk(parent, &pages)) {
  1064. int protected = 0;
  1065. for_each_sp(pages, sp, parents, i)
  1066. protected |= rmap_write_protect(vcpu->kvm, sp->gfn);
  1067. if (protected)
  1068. kvm_flush_remote_tlbs(vcpu->kvm);
  1069. for_each_sp(pages, sp, parents, i) {
  1070. kvm_sync_page(vcpu, sp);
  1071. mmu_pages_clear_parents(&parents);
  1072. }
  1073. cond_resched_lock(&vcpu->kvm->mmu_lock);
  1074. kvm_mmu_pages_init(parent, &parents, &pages);
  1075. }
  1076. }
  1077. static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
  1078. gfn_t gfn,
  1079. gva_t gaddr,
  1080. unsigned level,
  1081. int direct,
  1082. unsigned access,
  1083. u64 *parent_pte)
  1084. {
  1085. union kvm_mmu_page_role role;
  1086. unsigned index;
  1087. unsigned quadrant;
  1088. struct hlist_head *bucket;
  1089. struct kvm_mmu_page *sp;
  1090. struct hlist_node *node, *tmp;
  1091. role = vcpu->arch.mmu.base_role;
  1092. role.level = level;
  1093. role.direct = direct;
  1094. role.access = access;
  1095. if (vcpu->arch.mmu.root_level <= PT32_ROOT_LEVEL) {
  1096. quadrant = gaddr >> (PAGE_SHIFT + (PT64_PT_BITS * level));
  1097. quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1;
  1098. role.quadrant = quadrant;
  1099. }
  1100. index = kvm_page_table_hashfn(gfn);
  1101. bucket = &vcpu->kvm->arch.mmu_page_hash[index];
  1102. hlist_for_each_entry_safe(sp, node, tmp, bucket, hash_link)
  1103. if (sp->gfn == gfn) {
  1104. if (sp->unsync)
  1105. if (kvm_sync_page(vcpu, sp))
  1106. continue;
  1107. if (sp->role.word != role.word)
  1108. continue;
  1109. mmu_page_add_parent_pte(vcpu, sp, parent_pte);
  1110. if (sp->unsync_children) {
  1111. set_bit(KVM_REQ_MMU_SYNC, &vcpu->requests);
  1112. kvm_mmu_mark_parents_unsync(vcpu, sp);
  1113. }
  1114. trace_kvm_mmu_get_page(sp, false);
  1115. return sp;
  1116. }
  1117. ++vcpu->kvm->stat.mmu_cache_miss;
  1118. sp = kvm_mmu_alloc_page(vcpu, parent_pte);
  1119. if (!sp)
  1120. return sp;
  1121. sp->gfn = gfn;
  1122. sp->role = role;
  1123. hlist_add_head(&sp->hash_link, bucket);
  1124. if (!direct) {
  1125. if (rmap_write_protect(vcpu->kvm, gfn))
  1126. kvm_flush_remote_tlbs(vcpu->kvm);
  1127. account_shadowed(vcpu->kvm, gfn);
  1128. }
  1129. if (shadow_trap_nonpresent_pte != shadow_notrap_nonpresent_pte)
  1130. vcpu->arch.mmu.prefetch_page(vcpu, sp);
  1131. else
  1132. nonpaging_prefetch_page(vcpu, sp);
  1133. trace_kvm_mmu_get_page(sp, true);
  1134. return sp;
  1135. }
  1136. static void shadow_walk_init(struct kvm_shadow_walk_iterator *iterator,
  1137. struct kvm_vcpu *vcpu, u64 addr)
  1138. {
  1139. iterator->addr = addr;
  1140. iterator->shadow_addr = vcpu->arch.mmu.root_hpa;
  1141. iterator->level = vcpu->arch.mmu.shadow_root_level;
  1142. if (iterator->level == PT32E_ROOT_LEVEL) {
  1143. iterator->shadow_addr
  1144. = vcpu->arch.mmu.pae_root[(addr >> 30) & 3];
  1145. iterator->shadow_addr &= PT64_BASE_ADDR_MASK;
  1146. --iterator->level;
  1147. if (!iterator->shadow_addr)
  1148. iterator->level = 0;
  1149. }
  1150. }
  1151. static bool shadow_walk_okay(struct kvm_shadow_walk_iterator *iterator)
  1152. {
  1153. if (iterator->level < PT_PAGE_TABLE_LEVEL)
  1154. return false;
  1155. if (iterator->level == PT_PAGE_TABLE_LEVEL)
  1156. if (is_large_pte(*iterator->sptep))
  1157. return false;
  1158. iterator->index = SHADOW_PT_INDEX(iterator->addr, iterator->level);
  1159. iterator->sptep = ((u64 *)__va(iterator->shadow_addr)) + iterator->index;
  1160. return true;
  1161. }
  1162. static void shadow_walk_next(struct kvm_shadow_walk_iterator *iterator)
  1163. {
  1164. iterator->shadow_addr = *iterator->sptep & PT64_BASE_ADDR_MASK;
  1165. --iterator->level;
  1166. }
  1167. static void kvm_mmu_page_unlink_children(struct kvm *kvm,
  1168. struct kvm_mmu_page *sp)
  1169. {
  1170. unsigned i;
  1171. u64 *pt;
  1172. u64 ent;
  1173. pt = sp->spt;
  1174. for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
  1175. ent = pt[i];
  1176. if (is_shadow_present_pte(ent)) {
  1177. if (!is_last_spte(ent, sp->role.level)) {
  1178. ent &= PT64_BASE_ADDR_MASK;
  1179. mmu_page_remove_parent_pte(page_header(ent),
  1180. &pt[i]);
  1181. } else {
  1182. if (is_large_pte(ent))
  1183. --kvm->stat.lpages;
  1184. rmap_remove(kvm, &pt[i]);
  1185. }
  1186. }
  1187. pt[i] = shadow_trap_nonpresent_pte;
  1188. }
  1189. }
  1190. static void kvm_mmu_put_page(struct kvm_mmu_page *sp, u64 *parent_pte)
  1191. {
  1192. mmu_page_remove_parent_pte(sp, parent_pte);
  1193. }
  1194. static void kvm_mmu_reset_last_pte_updated(struct kvm *kvm)
  1195. {
  1196. int i;
  1197. struct kvm_vcpu *vcpu;
  1198. kvm_for_each_vcpu(i, vcpu, kvm)
  1199. vcpu->arch.last_pte_updated = NULL;
  1200. }
  1201. static void kvm_mmu_unlink_parents(struct kvm *kvm, struct kvm_mmu_page *sp)
  1202. {
  1203. u64 *parent_pte;
  1204. while (sp->multimapped || sp->parent_pte) {
  1205. if (!sp->multimapped)
  1206. parent_pte = sp->parent_pte;
  1207. else {
  1208. struct kvm_pte_chain *chain;
  1209. chain = container_of(sp->parent_ptes.first,
  1210. struct kvm_pte_chain, link);
  1211. parent_pte = chain->parent_ptes[0];
  1212. }
  1213. BUG_ON(!parent_pte);
  1214. kvm_mmu_put_page(sp, parent_pte);
  1215. __set_spte(parent_pte, shadow_trap_nonpresent_pte);
  1216. }
  1217. }
  1218. static int mmu_zap_unsync_children(struct kvm *kvm,
  1219. struct kvm_mmu_page *parent)
  1220. {
  1221. int i, zapped = 0;
  1222. struct mmu_page_path parents;
  1223. struct kvm_mmu_pages pages;
  1224. if (parent->role.level == PT_PAGE_TABLE_LEVEL)
  1225. return 0;
  1226. kvm_mmu_pages_init(parent, &parents, &pages);
  1227. while (mmu_unsync_walk(parent, &pages)) {
  1228. struct kvm_mmu_page *sp;
  1229. for_each_sp(pages, sp, parents, i) {
  1230. kvm_mmu_zap_page(kvm, sp);
  1231. mmu_pages_clear_parents(&parents);
  1232. }
  1233. zapped += pages.nr;
  1234. kvm_mmu_pages_init(parent, &parents, &pages);
  1235. }
  1236. return zapped;
  1237. }
  1238. static int kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp)
  1239. {
  1240. int ret;
  1241. trace_kvm_mmu_zap_page(sp);
  1242. ++kvm->stat.mmu_shadow_zapped;
  1243. ret = mmu_zap_unsync_children(kvm, sp);
  1244. kvm_mmu_page_unlink_children(kvm, sp);
  1245. kvm_mmu_unlink_parents(kvm, sp);
  1246. kvm_flush_remote_tlbs(kvm);
  1247. if (!sp->role.invalid && !sp->role.direct)
  1248. unaccount_shadowed(kvm, sp->gfn);
  1249. if (sp->unsync)
  1250. kvm_unlink_unsync_page(kvm, sp);
  1251. if (!sp->root_count) {
  1252. hlist_del(&sp->hash_link);
  1253. kvm_mmu_free_page(kvm, sp);
  1254. } else {
  1255. sp->role.invalid = 1;
  1256. list_move(&sp->link, &kvm->arch.active_mmu_pages);
  1257. kvm_reload_remote_mmus(kvm);
  1258. }
  1259. kvm_mmu_reset_last_pte_updated(kvm);
  1260. return ret;
  1261. }
  1262. /*
  1263. * Changing the number of mmu pages allocated to the vm
  1264. * Note: if kvm_nr_mmu_pages is too small, you will get dead lock
  1265. */
  1266. void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages)
  1267. {
  1268. int used_pages;
  1269. used_pages = kvm->arch.n_alloc_mmu_pages - kvm->arch.n_free_mmu_pages;
  1270. used_pages = max(0, used_pages);
  1271. /*
  1272. * If we set the number of mmu pages to be smaller be than the
  1273. * number of actived pages , we must to free some mmu pages before we
  1274. * change the value
  1275. */
  1276. if (used_pages > kvm_nr_mmu_pages) {
  1277. while (used_pages > kvm_nr_mmu_pages) {
  1278. struct kvm_mmu_page *page;
  1279. page = container_of(kvm->arch.active_mmu_pages.prev,
  1280. struct kvm_mmu_page, link);
  1281. kvm_mmu_zap_page(kvm, page);
  1282. used_pages--;
  1283. }
  1284. kvm->arch.n_free_mmu_pages = 0;
  1285. }
  1286. else
  1287. kvm->arch.n_free_mmu_pages += kvm_nr_mmu_pages
  1288. - kvm->arch.n_alloc_mmu_pages;
  1289. kvm->arch.n_alloc_mmu_pages = kvm_nr_mmu_pages;
  1290. }
  1291. static int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
  1292. {
  1293. unsigned index;
  1294. struct hlist_head *bucket;
  1295. struct kvm_mmu_page *sp;
  1296. struct hlist_node *node, *n;
  1297. int r;
  1298. pgprintk("%s: looking for gfn %lx\n", __func__, gfn);
  1299. r = 0;
  1300. index = kvm_page_table_hashfn(gfn);
  1301. bucket = &kvm->arch.mmu_page_hash[index];
  1302. hlist_for_each_entry_safe(sp, node, n, bucket, hash_link)
  1303. if (sp->gfn == gfn && !sp->role.direct) {
  1304. pgprintk("%s: gfn %lx role %x\n", __func__, gfn,
  1305. sp->role.word);
  1306. r = 1;
  1307. if (kvm_mmu_zap_page(kvm, sp))
  1308. n = bucket->first;
  1309. }
  1310. return r;
  1311. }
  1312. static void mmu_unshadow(struct kvm *kvm, gfn_t gfn)
  1313. {
  1314. unsigned index;
  1315. struct hlist_head *bucket;
  1316. struct kvm_mmu_page *sp;
  1317. struct hlist_node *node, *nn;
  1318. index = kvm_page_table_hashfn(gfn);
  1319. bucket = &kvm->arch.mmu_page_hash[index];
  1320. hlist_for_each_entry_safe(sp, node, nn, bucket, hash_link) {
  1321. if (sp->gfn == gfn && !sp->role.direct
  1322. && !sp->role.invalid) {
  1323. pgprintk("%s: zap %lx %x\n",
  1324. __func__, gfn, sp->role.word);
  1325. kvm_mmu_zap_page(kvm, sp);
  1326. }
  1327. }
  1328. }
  1329. static void page_header_update_slot(struct kvm *kvm, void *pte, gfn_t gfn)
  1330. {
  1331. int slot = memslot_id(kvm, gfn_to_memslot(kvm, gfn));
  1332. struct kvm_mmu_page *sp = page_header(__pa(pte));
  1333. __set_bit(slot, sp->slot_bitmap);
  1334. }
  1335. static void mmu_convert_notrap(struct kvm_mmu_page *sp)
  1336. {
  1337. int i;
  1338. u64 *pt = sp->spt;
  1339. if (shadow_trap_nonpresent_pte == shadow_notrap_nonpresent_pte)
  1340. return;
  1341. for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
  1342. if (pt[i] == shadow_notrap_nonpresent_pte)
  1343. __set_spte(&pt[i], shadow_trap_nonpresent_pte);
  1344. }
  1345. }
  1346. struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva)
  1347. {
  1348. struct page *page;
  1349. gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gva);
  1350. if (gpa == UNMAPPED_GVA)
  1351. return NULL;
  1352. page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT);
  1353. return page;
  1354. }
  1355. /*
  1356. * The function is based on mtrr_type_lookup() in
  1357. * arch/x86/kernel/cpu/mtrr/generic.c
  1358. */
  1359. static int get_mtrr_type(struct mtrr_state_type *mtrr_state,
  1360. u64 start, u64 end)
  1361. {
  1362. int i;
  1363. u64 base, mask;
  1364. u8 prev_match, curr_match;
  1365. int num_var_ranges = KVM_NR_VAR_MTRR;
  1366. if (!mtrr_state->enabled)
  1367. return 0xFF;
  1368. /* Make end inclusive end, instead of exclusive */
  1369. end--;
  1370. /* Look in fixed ranges. Just return the type as per start */
  1371. if (mtrr_state->have_fixed && (start < 0x100000)) {
  1372. int idx;
  1373. if (start < 0x80000) {
  1374. idx = 0;
  1375. idx += (start >> 16);
  1376. return mtrr_state->fixed_ranges[idx];
  1377. } else if (start < 0xC0000) {
  1378. idx = 1 * 8;
  1379. idx += ((start - 0x80000) >> 14);
  1380. return mtrr_state->fixed_ranges[idx];
  1381. } else if (start < 0x1000000) {
  1382. idx = 3 * 8;
  1383. idx += ((start - 0xC0000) >> 12);
  1384. return mtrr_state->fixed_ranges[idx];
  1385. }
  1386. }
  1387. /*
  1388. * Look in variable ranges
  1389. * Look of multiple ranges matching this address and pick type
  1390. * as per MTRR precedence
  1391. */
  1392. if (!(mtrr_state->enabled & 2))
  1393. return mtrr_state->def_type;
  1394. prev_match = 0xFF;
  1395. for (i = 0; i < num_var_ranges; ++i) {
  1396. unsigned short start_state, end_state;
  1397. if (!(mtrr_state->var_ranges[i].mask_lo & (1 << 11)))
  1398. continue;
  1399. base = (((u64)mtrr_state->var_ranges[i].base_hi) << 32) +
  1400. (mtrr_state->var_ranges[i].base_lo & PAGE_MASK);
  1401. mask = (((u64)mtrr_state->var_ranges[i].mask_hi) << 32) +
  1402. (mtrr_state->var_ranges[i].mask_lo & PAGE_MASK);
  1403. start_state = ((start & mask) == (base & mask));
  1404. end_state = ((end & mask) == (base & mask));
  1405. if (start_state != end_state)
  1406. return 0xFE;
  1407. if ((start & mask) != (base & mask))
  1408. continue;
  1409. curr_match = mtrr_state->var_ranges[i].base_lo & 0xff;
  1410. if (prev_match == 0xFF) {
  1411. prev_match = curr_match;
  1412. continue;
  1413. }
  1414. if (prev_match == MTRR_TYPE_UNCACHABLE ||
  1415. curr_match == MTRR_TYPE_UNCACHABLE)
  1416. return MTRR_TYPE_UNCACHABLE;
  1417. if ((prev_match == MTRR_TYPE_WRBACK &&
  1418. curr_match == MTRR_TYPE_WRTHROUGH) ||
  1419. (prev_match == MTRR_TYPE_WRTHROUGH &&
  1420. curr_match == MTRR_TYPE_WRBACK)) {
  1421. prev_match = MTRR_TYPE_WRTHROUGH;
  1422. curr_match = MTRR_TYPE_WRTHROUGH;
  1423. }
  1424. if (prev_match != curr_match)
  1425. return MTRR_TYPE_UNCACHABLE;
  1426. }
  1427. if (prev_match != 0xFF)
  1428. return prev_match;
  1429. return mtrr_state->def_type;
  1430. }
  1431. u8 kvm_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn)
  1432. {
  1433. u8 mtrr;
  1434. mtrr = get_mtrr_type(&vcpu->arch.mtrr_state, gfn << PAGE_SHIFT,
  1435. (gfn << PAGE_SHIFT) + PAGE_SIZE);
  1436. if (mtrr == 0xfe || mtrr == 0xff)
  1437. mtrr = MTRR_TYPE_WRBACK;
  1438. return mtrr;
  1439. }
  1440. EXPORT_SYMBOL_GPL(kvm_get_guest_memory_type);
  1441. static int kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
  1442. {
  1443. unsigned index;
  1444. struct hlist_head *bucket;
  1445. struct kvm_mmu_page *s;
  1446. struct hlist_node *node, *n;
  1447. trace_kvm_mmu_unsync_page(sp);
  1448. index = kvm_page_table_hashfn(sp->gfn);
  1449. bucket = &vcpu->kvm->arch.mmu_page_hash[index];
  1450. /* don't unsync if pagetable is shadowed with multiple roles */
  1451. hlist_for_each_entry_safe(s, node, n, bucket, hash_link) {
  1452. if (s->gfn != sp->gfn || s->role.direct)
  1453. continue;
  1454. if (s->role.word != sp->role.word)
  1455. return 1;
  1456. }
  1457. ++vcpu->kvm->stat.mmu_unsync;
  1458. sp->unsync = 1;
  1459. kvm_mmu_mark_parents_unsync(vcpu, sp);
  1460. mmu_convert_notrap(sp);
  1461. return 0;
  1462. }
  1463. static int mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn,
  1464. bool can_unsync)
  1465. {
  1466. struct kvm_mmu_page *shadow;
  1467. shadow = kvm_mmu_lookup_page(vcpu->kvm, gfn);
  1468. if (shadow) {
  1469. if (shadow->role.level != PT_PAGE_TABLE_LEVEL)
  1470. return 1;
  1471. if (shadow->unsync)
  1472. return 0;
  1473. if (can_unsync && oos_shadow)
  1474. return kvm_unsync_page(vcpu, shadow);
  1475. return 1;
  1476. }
  1477. return 0;
  1478. }
  1479. static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
  1480. unsigned pte_access, int user_fault,
  1481. int write_fault, int dirty, int level,
  1482. gfn_t gfn, pfn_t pfn, bool speculative,
  1483. bool can_unsync)
  1484. {
  1485. u64 spte;
  1486. int ret = 0;
  1487. /*
  1488. * We don't set the accessed bit, since we sometimes want to see
  1489. * whether the guest actually used the pte (in order to detect
  1490. * demand paging).
  1491. */
  1492. spte = shadow_base_present_pte | shadow_dirty_mask;
  1493. if (!speculative)
  1494. spte |= shadow_accessed_mask;
  1495. if (!dirty)
  1496. pte_access &= ~ACC_WRITE_MASK;
  1497. if (pte_access & ACC_EXEC_MASK)
  1498. spte |= shadow_x_mask;
  1499. else
  1500. spte |= shadow_nx_mask;
  1501. if (pte_access & ACC_USER_MASK)
  1502. spte |= shadow_user_mask;
  1503. if (level > PT_PAGE_TABLE_LEVEL)
  1504. spte |= PT_PAGE_SIZE_MASK;
  1505. if (tdp_enabled)
  1506. spte |= kvm_x86_ops->get_mt_mask(vcpu, gfn,
  1507. kvm_is_mmio_pfn(pfn));
  1508. spte |= (u64)pfn << PAGE_SHIFT;
  1509. if ((pte_access & ACC_WRITE_MASK)
  1510. || (write_fault && !is_write_protection(vcpu) && !user_fault)) {
  1511. if (level > PT_PAGE_TABLE_LEVEL &&
  1512. has_wrprotected_page(vcpu->kvm, gfn, level)) {
  1513. ret = 1;
  1514. spte = shadow_trap_nonpresent_pte;
  1515. goto set_pte;
  1516. }
  1517. spte |= PT_WRITABLE_MASK;
  1518. /*
  1519. * Optimization: for pte sync, if spte was writable the hash
  1520. * lookup is unnecessary (and expensive). Write protection
  1521. * is responsibility of mmu_get_page / kvm_sync_page.
  1522. * Same reasoning can be applied to dirty page accounting.
  1523. */
  1524. if (!can_unsync && is_writeble_pte(*sptep))
  1525. goto set_pte;
  1526. if (mmu_need_write_protect(vcpu, gfn, can_unsync)) {
  1527. pgprintk("%s: found shadow page for %lx, marking ro\n",
  1528. __func__, gfn);
  1529. ret = 1;
  1530. pte_access &= ~ACC_WRITE_MASK;
  1531. if (is_writeble_pte(spte))
  1532. spte &= ~PT_WRITABLE_MASK;
  1533. }
  1534. }
  1535. if (pte_access & ACC_WRITE_MASK)
  1536. mark_page_dirty(vcpu->kvm, gfn);
  1537. set_pte:
  1538. __set_spte(sptep, spte);
  1539. return ret;
  1540. }
  1541. static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
  1542. unsigned pt_access, unsigned pte_access,
  1543. int user_fault, int write_fault, int dirty,
  1544. int *ptwrite, int level, gfn_t gfn,
  1545. pfn_t pfn, bool speculative)
  1546. {
  1547. int was_rmapped = 0;
  1548. int was_writeble = is_writeble_pte(*sptep);
  1549. int rmap_count;
  1550. pgprintk("%s: spte %llx access %x write_fault %d"
  1551. " user_fault %d gfn %lx\n",
  1552. __func__, *sptep, pt_access,
  1553. write_fault, user_fault, gfn);
  1554. if (is_rmap_spte(*sptep)) {
  1555. /*
  1556. * If we overwrite a PTE page pointer with a 2MB PMD, unlink
  1557. * the parent of the now unreachable PTE.
  1558. */
  1559. if (level > PT_PAGE_TABLE_LEVEL &&
  1560. !is_large_pte(*sptep)) {
  1561. struct kvm_mmu_page *child;
  1562. u64 pte = *sptep;
  1563. child = page_header(pte & PT64_BASE_ADDR_MASK);
  1564. mmu_page_remove_parent_pte(child, sptep);
  1565. } else if (pfn != spte_to_pfn(*sptep)) {
  1566. pgprintk("hfn old %lx new %lx\n",
  1567. spte_to_pfn(*sptep), pfn);
  1568. rmap_remove(vcpu->kvm, sptep);
  1569. } else
  1570. was_rmapped = 1;
  1571. }
  1572. if (set_spte(vcpu, sptep, pte_access, user_fault, write_fault,
  1573. dirty, level, gfn, pfn, speculative, true)) {
  1574. if (write_fault)
  1575. *ptwrite = 1;
  1576. kvm_x86_ops->tlb_flush(vcpu);
  1577. }
  1578. pgprintk("%s: setting spte %llx\n", __func__, *sptep);
  1579. pgprintk("instantiating %s PTE (%s) at %ld (%llx) addr %p\n",
  1580. is_large_pte(*sptep)? "2MB" : "4kB",
  1581. *sptep & PT_PRESENT_MASK ?"RW":"R", gfn,
  1582. *sptep, sptep);
  1583. if (!was_rmapped && is_large_pte(*sptep))
  1584. ++vcpu->kvm->stat.lpages;
  1585. page_header_update_slot(vcpu->kvm, sptep, gfn);
  1586. if (!was_rmapped) {
  1587. rmap_count = rmap_add(vcpu, sptep, gfn);
  1588. if (!is_rmap_spte(*sptep))
  1589. kvm_release_pfn_clean(pfn);
  1590. if (rmap_count > RMAP_RECYCLE_THRESHOLD)
  1591. rmap_recycle(vcpu, sptep, gfn);
  1592. } else {
  1593. if (was_writeble)
  1594. kvm_release_pfn_dirty(pfn);
  1595. else
  1596. kvm_release_pfn_clean(pfn);
  1597. }
  1598. if (speculative) {
  1599. vcpu->arch.last_pte_updated = sptep;
  1600. vcpu->arch.last_pte_gfn = gfn;
  1601. }
  1602. }
  1603. static void nonpaging_new_cr3(struct kvm_vcpu *vcpu)
  1604. {
  1605. }
  1606. static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write,
  1607. int level, gfn_t gfn, pfn_t pfn)
  1608. {
  1609. struct kvm_shadow_walk_iterator iterator;
  1610. struct kvm_mmu_page *sp;
  1611. int pt_write = 0;
  1612. gfn_t pseudo_gfn;
  1613. for_each_shadow_entry(vcpu, (u64)gfn << PAGE_SHIFT, iterator) {
  1614. if (iterator.level == level) {
  1615. mmu_set_spte(vcpu, iterator.sptep, ACC_ALL, ACC_ALL,
  1616. 0, write, 1, &pt_write,
  1617. level, gfn, pfn, false);
  1618. ++vcpu->stat.pf_fixed;
  1619. break;
  1620. }
  1621. if (*iterator.sptep == shadow_trap_nonpresent_pte) {
  1622. pseudo_gfn = (iterator.addr & PT64_DIR_BASE_ADDR_MASK) >> PAGE_SHIFT;
  1623. sp = kvm_mmu_get_page(vcpu, pseudo_gfn, iterator.addr,
  1624. iterator.level - 1,
  1625. 1, ACC_ALL, iterator.sptep);
  1626. if (!sp) {
  1627. pgprintk("nonpaging_map: ENOMEM\n");
  1628. kvm_release_pfn_clean(pfn);
  1629. return -ENOMEM;
  1630. }
  1631. __set_spte(iterator.sptep,
  1632. __pa(sp->spt)
  1633. | PT_PRESENT_MASK | PT_WRITABLE_MASK
  1634. | shadow_user_mask | shadow_x_mask);
  1635. }
  1636. }
  1637. return pt_write;
  1638. }
  1639. static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn)
  1640. {
  1641. int r;
  1642. int level;
  1643. pfn_t pfn;
  1644. unsigned long mmu_seq;
  1645. level = mapping_level(vcpu, gfn);
  1646. /*
  1647. * This path builds a PAE pagetable - so we can map 2mb pages at
  1648. * maximum. Therefore check if the level is larger than that.
  1649. */
  1650. if (level > PT_DIRECTORY_LEVEL)
  1651. level = PT_DIRECTORY_LEVEL;
  1652. gfn &= ~(KVM_PAGES_PER_HPAGE(level) - 1);
  1653. mmu_seq = vcpu->kvm->mmu_notifier_seq;
  1654. smp_rmb();
  1655. pfn = gfn_to_pfn(vcpu->kvm, gfn);
  1656. /* mmio */
  1657. if (is_error_pfn(pfn)) {
  1658. kvm_release_pfn_clean(pfn);
  1659. return 1;
  1660. }
  1661. spin_lock(&vcpu->kvm->mmu_lock);
  1662. if (mmu_notifier_retry(vcpu, mmu_seq))
  1663. goto out_unlock;
  1664. kvm_mmu_free_some_pages(vcpu);
  1665. r = __direct_map(vcpu, v, write, level, gfn, pfn);
  1666. spin_unlock(&vcpu->kvm->mmu_lock);
  1667. return r;
  1668. out_unlock:
  1669. spin_unlock(&vcpu->kvm->mmu_lock);
  1670. kvm_release_pfn_clean(pfn);
  1671. return 0;
  1672. }
  1673. static void mmu_free_roots(struct kvm_vcpu *vcpu)
  1674. {
  1675. int i;
  1676. struct kvm_mmu_page *sp;
  1677. if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
  1678. return;
  1679. spin_lock(&vcpu->kvm->mmu_lock);
  1680. if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) {
  1681. hpa_t root = vcpu->arch.mmu.root_hpa;
  1682. sp = page_header(root);
  1683. --sp->root_count;
  1684. if (!sp->root_count && sp->role.invalid)
  1685. kvm_mmu_zap_page(vcpu->kvm, sp);
  1686. vcpu->arch.mmu.root_hpa = INVALID_PAGE;
  1687. spin_unlock(&vcpu->kvm->mmu_lock);
  1688. return;
  1689. }
  1690. for (i = 0; i < 4; ++i) {
  1691. hpa_t root = vcpu->arch.mmu.pae_root[i];
  1692. if (root) {
  1693. root &= PT64_BASE_ADDR_MASK;
  1694. sp = page_header(root);
  1695. --sp->root_count;
  1696. if (!sp->root_count && sp->role.invalid)
  1697. kvm_mmu_zap_page(vcpu->kvm, sp);
  1698. }
  1699. vcpu->arch.mmu.pae_root[i] = INVALID_PAGE;
  1700. }
  1701. spin_unlock(&vcpu->kvm->mmu_lock);
  1702. vcpu->arch.mmu.root_hpa = INVALID_PAGE;
  1703. }
  1704. static int mmu_check_root(struct kvm_vcpu *vcpu, gfn_t root_gfn)
  1705. {
  1706. int ret = 0;
  1707. if (!kvm_is_visible_gfn(vcpu->kvm, root_gfn)) {
  1708. set_bit(KVM_REQ_TRIPLE_FAULT, &vcpu->requests);
  1709. ret = 1;
  1710. }
  1711. return ret;
  1712. }
  1713. static int mmu_alloc_roots(struct kvm_vcpu *vcpu)
  1714. {
  1715. int i;
  1716. gfn_t root_gfn;
  1717. struct kvm_mmu_page *sp;
  1718. int direct = 0;
  1719. u64 pdptr;
  1720. root_gfn = vcpu->arch.cr3 >> PAGE_SHIFT;
  1721. if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) {
  1722. hpa_t root = vcpu->arch.mmu.root_hpa;
  1723. ASSERT(!VALID_PAGE(root));
  1724. if (tdp_enabled)
  1725. direct = 1;
  1726. if (mmu_check_root(vcpu, root_gfn))
  1727. return 1;
  1728. sp = kvm_mmu_get_page(vcpu, root_gfn, 0,
  1729. PT64_ROOT_LEVEL, direct,
  1730. ACC_ALL, NULL);
  1731. root = __pa(sp->spt);
  1732. ++sp->root_count;
  1733. vcpu->arch.mmu.root_hpa = root;
  1734. return 0;
  1735. }
  1736. direct = !is_paging(vcpu);
  1737. if (tdp_enabled)
  1738. direct = 1;
  1739. for (i = 0; i < 4; ++i) {
  1740. hpa_t root = vcpu->arch.mmu.pae_root[i];
  1741. ASSERT(!VALID_PAGE(root));
  1742. if (vcpu->arch.mmu.root_level == PT32E_ROOT_LEVEL) {
  1743. pdptr = kvm_pdptr_read(vcpu, i);
  1744. if (!is_present_gpte(pdptr)) {
  1745. vcpu->arch.mmu.pae_root[i] = 0;
  1746. continue;
  1747. }
  1748. root_gfn = pdptr >> PAGE_SHIFT;
  1749. } else if (vcpu->arch.mmu.root_level == 0)
  1750. root_gfn = 0;
  1751. if (mmu_check_root(vcpu, root_gfn))
  1752. return 1;
  1753. sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30,
  1754. PT32_ROOT_LEVEL, direct,
  1755. ACC_ALL, NULL);
  1756. root = __pa(sp->spt);
  1757. ++sp->root_count;
  1758. vcpu->arch.mmu.pae_root[i] = root | PT_PRESENT_MASK;
  1759. }
  1760. vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.pae_root);
  1761. return 0;
  1762. }
  1763. static void mmu_sync_roots(struct kvm_vcpu *vcpu)
  1764. {
  1765. int i;
  1766. struct kvm_mmu_page *sp;
  1767. if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
  1768. return;
  1769. if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) {
  1770. hpa_t root = vcpu->arch.mmu.root_hpa;
  1771. sp = page_header(root);
  1772. mmu_sync_children(vcpu, sp);
  1773. return;
  1774. }
  1775. for (i = 0; i < 4; ++i) {
  1776. hpa_t root = vcpu->arch.mmu.pae_root[i];
  1777. if (root && VALID_PAGE(root)) {
  1778. root &= PT64_BASE_ADDR_MASK;
  1779. sp = page_header(root);
  1780. mmu_sync_children(vcpu, sp);
  1781. }
  1782. }
  1783. }
  1784. void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu)
  1785. {
  1786. spin_lock(&vcpu->kvm->mmu_lock);
  1787. mmu_sync_roots(vcpu);
  1788. spin_unlock(&vcpu->kvm->mmu_lock);
  1789. }
  1790. static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr)
  1791. {
  1792. return vaddr;
  1793. }
  1794. static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
  1795. u32 error_code)
  1796. {
  1797. gfn_t gfn;
  1798. int r;
  1799. pgprintk("%s: gva %lx error %x\n", __func__, gva, error_code);
  1800. r = mmu_topup_memory_caches(vcpu);
  1801. if (r)
  1802. return r;
  1803. ASSERT(vcpu);
  1804. ASSERT(VALID_PAGE(vcpu->arch.mmu.root_hpa));
  1805. gfn = gva >> PAGE_SHIFT;
  1806. return nonpaging_map(vcpu, gva & PAGE_MASK,
  1807. error_code & PFERR_WRITE_MASK, gfn);
  1808. }
  1809. static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa,
  1810. u32 error_code)
  1811. {
  1812. pfn_t pfn;
  1813. int r;
  1814. int level;
  1815. gfn_t gfn = gpa >> PAGE_SHIFT;
  1816. unsigned long mmu_seq;
  1817. ASSERT(vcpu);
  1818. ASSERT(VALID_PAGE(vcpu->arch.mmu.root_hpa));
  1819. r = mmu_topup_memory_caches(vcpu);
  1820. if (r)
  1821. return r;
  1822. level = mapping_level(vcpu, gfn);
  1823. gfn &= ~(KVM_PAGES_PER_HPAGE(level) - 1);
  1824. mmu_seq = vcpu->kvm->mmu_notifier_seq;
  1825. smp_rmb();
  1826. pfn = gfn_to_pfn(vcpu->kvm, gfn);
  1827. if (is_error_pfn(pfn)) {
  1828. kvm_release_pfn_clean(pfn);
  1829. return 1;
  1830. }
  1831. spin_lock(&vcpu->kvm->mmu_lock);
  1832. if (mmu_notifier_retry(vcpu, mmu_seq))
  1833. goto out_unlock;
  1834. kvm_mmu_free_some_pages(vcpu);
  1835. r = __direct_map(vcpu, gpa, error_code & PFERR_WRITE_MASK,
  1836. level, gfn, pfn);
  1837. spin_unlock(&vcpu->kvm->mmu_lock);
  1838. return r;
  1839. out_unlock:
  1840. spin_unlock(&vcpu->kvm->mmu_lock);
  1841. kvm_release_pfn_clean(pfn);
  1842. return 0;
  1843. }
  1844. static void nonpaging_free(struct kvm_vcpu *vcpu)
  1845. {
  1846. mmu_free_roots(vcpu);
  1847. }
  1848. static int nonpaging_init_context(struct kvm_vcpu *vcpu)
  1849. {
  1850. struct kvm_mmu *context = &vcpu->arch.mmu;
  1851. context->new_cr3 = nonpaging_new_cr3;
  1852. context->page_fault = nonpaging_page_fault;
  1853. context->gva_to_gpa = nonpaging_gva_to_gpa;
  1854. context->free = nonpaging_free;
  1855. context->prefetch_page = nonpaging_prefetch_page;
  1856. context->sync_page = nonpaging_sync_page;
  1857. context->invlpg = nonpaging_invlpg;
  1858. context->root_level = 0;
  1859. context->shadow_root_level = PT32E_ROOT_LEVEL;
  1860. context->root_hpa = INVALID_PAGE;
  1861. return 0;
  1862. }
  1863. void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu)
  1864. {
  1865. ++vcpu->stat.tlb_flush;
  1866. kvm_x86_ops->tlb_flush(vcpu);
  1867. }
  1868. static void paging_new_cr3(struct kvm_vcpu *vcpu)
  1869. {
  1870. pgprintk("%s: cr3 %lx\n", __func__, vcpu->arch.cr3);
  1871. mmu_free_roots(vcpu);
  1872. }
  1873. static void inject_page_fault(struct kvm_vcpu *vcpu,
  1874. u64 addr,
  1875. u32 err_code)
  1876. {
  1877. kvm_inject_page_fault(vcpu, addr, err_code);
  1878. }
  1879. static void paging_free(struct kvm_vcpu *vcpu)
  1880. {
  1881. nonpaging_free(vcpu);
  1882. }
  1883. static bool is_rsvd_bits_set(struct kvm_vcpu *vcpu, u64 gpte, int level)
  1884. {
  1885. int bit7;
  1886. bit7 = (gpte >> 7) & 1;
  1887. return (gpte & vcpu->arch.mmu.rsvd_bits_mask[bit7][level-1]) != 0;
  1888. }
  1889. #define PTTYPE 64
  1890. #include "paging_tmpl.h"
  1891. #undef PTTYPE
  1892. #define PTTYPE 32
  1893. #include "paging_tmpl.h"
  1894. #undef PTTYPE
  1895. static void reset_rsvds_bits_mask(struct kvm_vcpu *vcpu, int level)
  1896. {
  1897. struct kvm_mmu *context = &vcpu->arch.mmu;
  1898. int maxphyaddr = cpuid_maxphyaddr(vcpu);
  1899. u64 exb_bit_rsvd = 0;
  1900. if (!is_nx(vcpu))
  1901. exb_bit_rsvd = rsvd_bits(63, 63);
  1902. switch (level) {
  1903. case PT32_ROOT_LEVEL:
  1904. /* no rsvd bits for 2 level 4K page table entries */
  1905. context->rsvd_bits_mask[0][1] = 0;
  1906. context->rsvd_bits_mask[0][0] = 0;
  1907. if (is_cpuid_PSE36())
  1908. /* 36bits PSE 4MB page */
  1909. context->rsvd_bits_mask[1][1] = rsvd_bits(17, 21);
  1910. else
  1911. /* 32 bits PSE 4MB page */
  1912. context->rsvd_bits_mask[1][1] = rsvd_bits(13, 21);
  1913. context->rsvd_bits_mask[1][0] = context->rsvd_bits_mask[1][0];
  1914. break;
  1915. case PT32E_ROOT_LEVEL:
  1916. context->rsvd_bits_mask[0][2] =
  1917. rsvd_bits(maxphyaddr, 63) |
  1918. rsvd_bits(7, 8) | rsvd_bits(1, 2); /* PDPTE */
  1919. context->rsvd_bits_mask[0][1] = exb_bit_rsvd |
  1920. rsvd_bits(maxphyaddr, 62); /* PDE */
  1921. context->rsvd_bits_mask[0][0] = exb_bit_rsvd |
  1922. rsvd_bits(maxphyaddr, 62); /* PTE */
  1923. context->rsvd_bits_mask[1][1] = exb_bit_rsvd |
  1924. rsvd_bits(maxphyaddr, 62) |
  1925. rsvd_bits(13, 20); /* large page */
  1926. context->rsvd_bits_mask[1][0] = context->rsvd_bits_mask[1][0];
  1927. break;
  1928. case PT64_ROOT_LEVEL:
  1929. context->rsvd_bits_mask[0][3] = exb_bit_rsvd |
  1930. rsvd_bits(maxphyaddr, 51) | rsvd_bits(7, 8);
  1931. context->rsvd_bits_mask[0][2] = exb_bit_rsvd |
  1932. rsvd_bits(maxphyaddr, 51) | rsvd_bits(7, 8);
  1933. context->rsvd_bits_mask[0][1] = exb_bit_rsvd |
  1934. rsvd_bits(maxphyaddr, 51);
  1935. context->rsvd_bits_mask[0][0] = exb_bit_rsvd |
  1936. rsvd_bits(maxphyaddr, 51);
  1937. context->rsvd_bits_mask[1][3] = context->rsvd_bits_mask[0][3];
  1938. context->rsvd_bits_mask[1][2] = exb_bit_rsvd |
  1939. rsvd_bits(maxphyaddr, 51) |
  1940. rsvd_bits(13, 29);
  1941. context->rsvd_bits_mask[1][1] = exb_bit_rsvd |
  1942. rsvd_bits(maxphyaddr, 51) |
  1943. rsvd_bits(13, 20); /* large page */
  1944. context->rsvd_bits_mask[1][0] = context->rsvd_bits_mask[1][0];
  1945. break;
  1946. }
  1947. }
  1948. static int paging64_init_context_common(struct kvm_vcpu *vcpu, int level)
  1949. {
  1950. struct kvm_mmu *context = &vcpu->arch.mmu;
  1951. ASSERT(is_pae(vcpu));
  1952. context->new_cr3 = paging_new_cr3;
  1953. context->page_fault = paging64_page_fault;
  1954. context->gva_to_gpa = paging64_gva_to_gpa;
  1955. context->prefetch_page = paging64_prefetch_page;
  1956. context->sync_page = paging64_sync_page;
  1957. context->invlpg = paging64_invlpg;
  1958. context->free = paging_free;
  1959. context->root_level = level;
  1960. context->shadow_root_level = level;
  1961. context->root_hpa = INVALID_PAGE;
  1962. return 0;
  1963. }
  1964. static int paging64_init_context(struct kvm_vcpu *vcpu)
  1965. {
  1966. reset_rsvds_bits_mask(vcpu, PT64_ROOT_LEVEL);
  1967. return paging64_init_context_common(vcpu, PT64_ROOT_LEVEL);
  1968. }
  1969. static int paging32_init_context(struct kvm_vcpu *vcpu)
  1970. {
  1971. struct kvm_mmu *context = &vcpu->arch.mmu;
  1972. reset_rsvds_bits_mask(vcpu, PT32_ROOT_LEVEL);
  1973. context->new_cr3 = paging_new_cr3;
  1974. context->page_fault = paging32_page_fault;
  1975. context->gva_to_gpa = paging32_gva_to_gpa;
  1976. context->free = paging_free;
  1977. context->prefetch_page = paging32_prefetch_page;
  1978. context->sync_page = paging32_sync_page;
  1979. context->invlpg = paging32_invlpg;
  1980. context->root_level = PT32_ROOT_LEVEL;
  1981. context->shadow_root_level = PT32E_ROOT_LEVEL;
  1982. context->root_hpa = INVALID_PAGE;
  1983. return 0;
  1984. }
  1985. static int paging32E_init_context(struct kvm_vcpu *vcpu)
  1986. {
  1987. reset_rsvds_bits_mask(vcpu, PT32E_ROOT_LEVEL);
  1988. return paging64_init_context_common(vcpu, PT32E_ROOT_LEVEL);
  1989. }
  1990. static int init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
  1991. {
  1992. struct kvm_mmu *context = &vcpu->arch.mmu;
  1993. context->new_cr3 = nonpaging_new_cr3;
  1994. context->page_fault = tdp_page_fault;
  1995. context->free = nonpaging_free;
  1996. context->prefetch_page = nonpaging_prefetch_page;
  1997. context->sync_page = nonpaging_sync_page;
  1998. context->invlpg = nonpaging_invlpg;
  1999. context->shadow_root_level = kvm_x86_ops->get_tdp_level();
  2000. context->root_hpa = INVALID_PAGE;
  2001. if (!is_paging(vcpu)) {
  2002. context->gva_to_gpa = nonpaging_gva_to_gpa;
  2003. context->root_level = 0;
  2004. } else if (is_long_mode(vcpu)) {
  2005. reset_rsvds_bits_mask(vcpu, PT64_ROOT_LEVEL);
  2006. context->gva_to_gpa = paging64_gva_to_gpa;
  2007. context->root_level = PT64_ROOT_LEVEL;
  2008. } else if (is_pae(vcpu)) {
  2009. reset_rsvds_bits_mask(vcpu, PT32E_ROOT_LEVEL);
  2010. context->gva_to_gpa = paging64_gva_to_gpa;
  2011. context->root_level = PT32E_ROOT_LEVEL;
  2012. } else {
  2013. reset_rsvds_bits_mask(vcpu, PT32_ROOT_LEVEL);
  2014. context->gva_to_gpa = paging32_gva_to_gpa;
  2015. context->root_level = PT32_ROOT_LEVEL;
  2016. }
  2017. return 0;
  2018. }
  2019. static int init_kvm_softmmu(struct kvm_vcpu *vcpu)
  2020. {
  2021. int r;
  2022. ASSERT(vcpu);
  2023. ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
  2024. if (!is_paging(vcpu))
  2025. r = nonpaging_init_context(vcpu);
  2026. else if (is_long_mode(vcpu))
  2027. r = paging64_init_context(vcpu);
  2028. else if (is_pae(vcpu))
  2029. r = paging32E_init_context(vcpu);
  2030. else
  2031. r = paging32_init_context(vcpu);
  2032. vcpu->arch.mmu.base_role.glevels = vcpu->arch.mmu.root_level;
  2033. return r;
  2034. }
  2035. static int init_kvm_mmu(struct kvm_vcpu *vcpu)
  2036. {
  2037. vcpu->arch.update_pte.pfn = bad_pfn;
  2038. if (tdp_enabled)
  2039. return init_kvm_tdp_mmu(vcpu);
  2040. else
  2041. return init_kvm_softmmu(vcpu);
  2042. }
  2043. static void destroy_kvm_mmu(struct kvm_vcpu *vcpu)
  2044. {
  2045. ASSERT(vcpu);
  2046. if (VALID_PAGE(vcpu->arch.mmu.root_hpa)) {
  2047. vcpu->arch.mmu.free(vcpu);
  2048. vcpu->arch.mmu.root_hpa = INVALID_PAGE;
  2049. }
  2050. }
  2051. int kvm_mmu_reset_context(struct kvm_vcpu *vcpu)
  2052. {
  2053. destroy_kvm_mmu(vcpu);
  2054. return init_kvm_mmu(vcpu);
  2055. }
  2056. EXPORT_SYMBOL_GPL(kvm_mmu_reset_context);
  2057. int kvm_mmu_load(struct kvm_vcpu *vcpu)
  2058. {
  2059. int r;
  2060. r = mmu_topup_memory_caches(vcpu);
  2061. if (r)
  2062. goto out;
  2063. spin_lock(&vcpu->kvm->mmu_lock);
  2064. kvm_mmu_free_some_pages(vcpu);
  2065. r = mmu_alloc_roots(vcpu);
  2066. mmu_sync_roots(vcpu);
  2067. spin_unlock(&vcpu->kvm->mmu_lock);
  2068. if (r)
  2069. goto out;
  2070. /* set_cr3() should ensure TLB has been flushed */
  2071. kvm_x86_ops->set_cr3(vcpu, vcpu->arch.mmu.root_hpa);
  2072. out:
  2073. return r;
  2074. }
  2075. EXPORT_SYMBOL_GPL(kvm_mmu_load);
  2076. void kvm_mmu_unload(struct kvm_vcpu *vcpu)
  2077. {
  2078. mmu_free_roots(vcpu);
  2079. }
  2080. static void mmu_pte_write_zap_pte(struct kvm_vcpu *vcpu,
  2081. struct kvm_mmu_page *sp,
  2082. u64 *spte)
  2083. {
  2084. u64 pte;
  2085. struct kvm_mmu_page *child;
  2086. pte = *spte;
  2087. if (is_shadow_present_pte(pte)) {
  2088. if (is_last_spte(pte, sp->role.level))
  2089. rmap_remove(vcpu->kvm, spte);
  2090. else {
  2091. child = page_header(pte & PT64_BASE_ADDR_MASK);
  2092. mmu_page_remove_parent_pte(child, spte);
  2093. }
  2094. }
  2095. __set_spte(spte, shadow_trap_nonpresent_pte);
  2096. if (is_large_pte(pte))
  2097. --vcpu->kvm->stat.lpages;
  2098. }
  2099. static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu,
  2100. struct kvm_mmu_page *sp,
  2101. u64 *spte,
  2102. const void *new)
  2103. {
  2104. if (sp->role.level != PT_PAGE_TABLE_LEVEL) {
  2105. ++vcpu->kvm->stat.mmu_pde_zapped;
  2106. return;
  2107. }
  2108. ++vcpu->kvm->stat.mmu_pte_updated;
  2109. if (sp->role.glevels == PT32_ROOT_LEVEL)
  2110. paging32_update_pte(vcpu, sp, spte, new);
  2111. else
  2112. paging64_update_pte(vcpu, sp, spte, new);
  2113. }
  2114. static bool need_remote_flush(u64 old, u64 new)
  2115. {
  2116. if (!is_shadow_present_pte(old))
  2117. return false;
  2118. if (!is_shadow_present_pte(new))
  2119. return true;
  2120. if ((old ^ new) & PT64_BASE_ADDR_MASK)
  2121. return true;
  2122. old ^= PT64_NX_MASK;
  2123. new ^= PT64_NX_MASK;
  2124. return (old & ~new & PT64_PERM_MASK) != 0;
  2125. }
  2126. static void mmu_pte_write_flush_tlb(struct kvm_vcpu *vcpu, u64 old, u64 new)
  2127. {
  2128. if (need_remote_flush(old, new))
  2129. kvm_flush_remote_tlbs(vcpu->kvm);
  2130. else
  2131. kvm_mmu_flush_tlb(vcpu);
  2132. }
  2133. static bool last_updated_pte_accessed(struct kvm_vcpu *vcpu)
  2134. {
  2135. u64 *spte = vcpu->arch.last_pte_updated;
  2136. return !!(spte && (*spte & shadow_accessed_mask));
  2137. }
  2138. static void mmu_guess_page_from_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
  2139. const u8 *new, int bytes)
  2140. {
  2141. gfn_t gfn;
  2142. int r;
  2143. u64 gpte = 0;
  2144. pfn_t pfn;
  2145. if (bytes != 4 && bytes != 8)
  2146. return;
  2147. /*
  2148. * Assume that the pte write on a page table of the same type
  2149. * as the current vcpu paging mode. This is nearly always true
  2150. * (might be false while changing modes). Note it is verified later
  2151. * by update_pte().
  2152. */
  2153. if (is_pae(vcpu)) {
  2154. /* Handle a 32-bit guest writing two halves of a 64-bit gpte */
  2155. if ((bytes == 4) && (gpa % 4 == 0)) {
  2156. r = kvm_read_guest(vcpu->kvm, gpa & ~(u64)7, &gpte, 8);
  2157. if (r)
  2158. return;
  2159. memcpy((void *)&gpte + (gpa % 8), new, 4);
  2160. } else if ((bytes == 8) && (gpa % 8 == 0)) {
  2161. memcpy((void *)&gpte, new, 8);
  2162. }
  2163. } else {
  2164. if ((bytes == 4) && (gpa % 4 == 0))
  2165. memcpy((void *)&gpte, new, 4);
  2166. }
  2167. if (!is_present_gpte(gpte))
  2168. return;
  2169. gfn = (gpte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT;
  2170. vcpu->arch.update_pte.mmu_seq = vcpu->kvm->mmu_notifier_seq;
  2171. smp_rmb();
  2172. pfn = gfn_to_pfn(vcpu->kvm, gfn);
  2173. if (is_error_pfn(pfn)) {
  2174. kvm_release_pfn_clean(pfn);
  2175. return;
  2176. }
  2177. vcpu->arch.update_pte.gfn = gfn;
  2178. vcpu->arch.update_pte.pfn = pfn;
  2179. }
  2180. static void kvm_mmu_access_page(struct kvm_vcpu *vcpu, gfn_t gfn)
  2181. {
  2182. u64 *spte = vcpu->arch.last_pte_updated;
  2183. if (spte
  2184. && vcpu->arch.last_pte_gfn == gfn
  2185. && shadow_accessed_mask
  2186. && !(*spte & shadow_accessed_mask)
  2187. && is_shadow_present_pte(*spte))
  2188. set_bit(PT_ACCESSED_SHIFT, (unsigned long *)spte);
  2189. }
  2190. void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
  2191. const u8 *new, int bytes,
  2192. bool guest_initiated)
  2193. {
  2194. gfn_t gfn = gpa >> PAGE_SHIFT;
  2195. struct kvm_mmu_page *sp;
  2196. struct hlist_node *node, *n;
  2197. struct hlist_head *bucket;
  2198. unsigned index;
  2199. u64 entry, gentry;
  2200. u64 *spte;
  2201. unsigned offset = offset_in_page(gpa);
  2202. unsigned pte_size;
  2203. unsigned page_offset;
  2204. unsigned misaligned;
  2205. unsigned quadrant;
  2206. int level;
  2207. int flooded = 0;
  2208. int npte;
  2209. int r;
  2210. pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes);
  2211. mmu_guess_page_from_pte_write(vcpu, gpa, new, bytes);
  2212. spin_lock(&vcpu->kvm->mmu_lock);
  2213. kvm_mmu_access_page(vcpu, gfn);
  2214. kvm_mmu_free_some_pages(vcpu);
  2215. ++vcpu->kvm->stat.mmu_pte_write;
  2216. kvm_mmu_audit(vcpu, "pre pte write");
  2217. if (guest_initiated) {
  2218. if (gfn == vcpu->arch.last_pt_write_gfn
  2219. && !last_updated_pte_accessed(vcpu)) {
  2220. ++vcpu->arch.last_pt_write_count;
  2221. if (vcpu->arch.last_pt_write_count >= 3)
  2222. flooded = 1;
  2223. } else {
  2224. vcpu->arch.last_pt_write_gfn = gfn;
  2225. vcpu->arch.last_pt_write_count = 1;
  2226. vcpu->arch.last_pte_updated = NULL;
  2227. }
  2228. }
  2229. index = kvm_page_table_hashfn(gfn);
  2230. bucket = &vcpu->kvm->arch.mmu_page_hash[index];
  2231. hlist_for_each_entry_safe(sp, node, n, bucket, hash_link) {
  2232. if (sp->gfn != gfn || sp->role.direct || sp->role.invalid)
  2233. continue;
  2234. pte_size = sp->role.glevels == PT32_ROOT_LEVEL ? 4 : 8;
  2235. misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1);
  2236. misaligned |= bytes < 4;
  2237. if (misaligned || flooded) {
  2238. /*
  2239. * Misaligned accesses are too much trouble to fix
  2240. * up; also, they usually indicate a page is not used
  2241. * as a page table.
  2242. *
  2243. * If we're seeing too many writes to a page,
  2244. * it may no longer be a page table, or we may be
  2245. * forking, in which case it is better to unmap the
  2246. * page.
  2247. */
  2248. pgprintk("misaligned: gpa %llx bytes %d role %x\n",
  2249. gpa, bytes, sp->role.word);
  2250. if (kvm_mmu_zap_page(vcpu->kvm, sp))
  2251. n = bucket->first;
  2252. ++vcpu->kvm->stat.mmu_flooded;
  2253. continue;
  2254. }
  2255. page_offset = offset;
  2256. level = sp->role.level;
  2257. npte = 1;
  2258. if (sp->role.glevels == PT32_ROOT_LEVEL) {
  2259. page_offset <<= 1; /* 32->64 */
  2260. /*
  2261. * A 32-bit pde maps 4MB while the shadow pdes map
  2262. * only 2MB. So we need to double the offset again
  2263. * and zap two pdes instead of one.
  2264. */
  2265. if (level == PT32_ROOT_LEVEL) {
  2266. page_offset &= ~7; /* kill rounding error */
  2267. page_offset <<= 1;
  2268. npte = 2;
  2269. }
  2270. quadrant = page_offset >> PAGE_SHIFT;
  2271. page_offset &= ~PAGE_MASK;
  2272. if (quadrant != sp->role.quadrant)
  2273. continue;
  2274. }
  2275. spte = &sp->spt[page_offset / sizeof(*spte)];
  2276. if ((gpa & (pte_size - 1)) || (bytes < pte_size)) {
  2277. gentry = 0;
  2278. r = kvm_read_guest_atomic(vcpu->kvm,
  2279. gpa & ~(u64)(pte_size - 1),
  2280. &gentry, pte_size);
  2281. new = (const void *)&gentry;
  2282. if (r < 0)
  2283. new = NULL;
  2284. }
  2285. while (npte--) {
  2286. entry = *spte;
  2287. mmu_pte_write_zap_pte(vcpu, sp, spte);
  2288. if (new)
  2289. mmu_pte_write_new_pte(vcpu, sp, spte, new);
  2290. mmu_pte_write_flush_tlb(vcpu, entry, *spte);
  2291. ++spte;
  2292. }
  2293. }
  2294. kvm_mmu_audit(vcpu, "post pte write");
  2295. spin_unlock(&vcpu->kvm->mmu_lock);
  2296. if (!is_error_pfn(vcpu->arch.update_pte.pfn)) {
  2297. kvm_release_pfn_clean(vcpu->arch.update_pte.pfn);
  2298. vcpu->arch.update_pte.pfn = bad_pfn;
  2299. }
  2300. }
  2301. int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
  2302. {
  2303. gpa_t gpa;
  2304. int r;
  2305. if (tdp_enabled)
  2306. return 0;
  2307. gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gva);
  2308. spin_lock(&vcpu->kvm->mmu_lock);
  2309. r = kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT);
  2310. spin_unlock(&vcpu->kvm->mmu_lock);
  2311. return r;
  2312. }
  2313. EXPORT_SYMBOL_GPL(kvm_mmu_unprotect_page_virt);
  2314. void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
  2315. {
  2316. while (vcpu->kvm->arch.n_free_mmu_pages < KVM_REFILL_PAGES &&
  2317. !list_empty(&vcpu->kvm->arch.active_mmu_pages)) {
  2318. struct kvm_mmu_page *sp;
  2319. sp = container_of(vcpu->kvm->arch.active_mmu_pages.prev,
  2320. struct kvm_mmu_page, link);
  2321. kvm_mmu_zap_page(vcpu->kvm, sp);
  2322. ++vcpu->kvm->stat.mmu_recycled;
  2323. }
  2324. }
  2325. int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code)
  2326. {
  2327. int r;
  2328. enum emulation_result er;
  2329. r = vcpu->arch.mmu.page_fault(vcpu, cr2, error_code);
  2330. if (r < 0)
  2331. goto out;
  2332. if (!r) {
  2333. r = 1;
  2334. goto out;
  2335. }
  2336. r = mmu_topup_memory_caches(vcpu);
  2337. if (r)
  2338. goto out;
  2339. er = emulate_instruction(vcpu, vcpu->run, cr2, error_code, 0);
  2340. switch (er) {
  2341. case EMULATE_DONE:
  2342. return 1;
  2343. case EMULATE_DO_MMIO:
  2344. ++vcpu->stat.mmio_exits;
  2345. return 0;
  2346. case EMULATE_FAIL:
  2347. vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
  2348. vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
  2349. return 0;
  2350. default:
  2351. BUG();
  2352. }
  2353. out:
  2354. return r;
  2355. }
  2356. EXPORT_SYMBOL_GPL(kvm_mmu_page_fault);
  2357. void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva)
  2358. {
  2359. vcpu->arch.mmu.invlpg(vcpu, gva);
  2360. kvm_mmu_flush_tlb(vcpu);
  2361. ++vcpu->stat.invlpg;
  2362. }
  2363. EXPORT_SYMBOL_GPL(kvm_mmu_invlpg);
  2364. void kvm_enable_tdp(void)
  2365. {
  2366. tdp_enabled = true;
  2367. }
  2368. EXPORT_SYMBOL_GPL(kvm_enable_tdp);
  2369. void kvm_disable_tdp(void)
  2370. {
  2371. tdp_enabled = false;
  2372. }
  2373. EXPORT_SYMBOL_GPL(kvm_disable_tdp);
  2374. static void free_mmu_pages(struct kvm_vcpu *vcpu)
  2375. {
  2376. free_page((unsigned long)vcpu->arch.mmu.pae_root);
  2377. }
  2378. static int alloc_mmu_pages(struct kvm_vcpu *vcpu)
  2379. {
  2380. struct page *page;
  2381. int i;
  2382. ASSERT(vcpu);
  2383. /*
  2384. * When emulating 32-bit mode, cr3 is only 32 bits even on x86_64.
  2385. * Therefore we need to allocate shadow page tables in the first
  2386. * 4GB of memory, which happens to fit the DMA32 zone.
  2387. */
  2388. page = alloc_page(GFP_KERNEL | __GFP_DMA32);
  2389. if (!page)
  2390. goto error_1;
  2391. vcpu->arch.mmu.pae_root = page_address(page);
  2392. for (i = 0; i < 4; ++i)
  2393. vcpu->arch.mmu.pae_root[i] = INVALID_PAGE;
  2394. return 0;
  2395. error_1:
  2396. free_mmu_pages(vcpu);
  2397. return -ENOMEM;
  2398. }
  2399. int kvm_mmu_create(struct kvm_vcpu *vcpu)
  2400. {
  2401. ASSERT(vcpu);
  2402. ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
  2403. return alloc_mmu_pages(vcpu);
  2404. }
  2405. int kvm_mmu_setup(struct kvm_vcpu *vcpu)
  2406. {
  2407. ASSERT(vcpu);
  2408. ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
  2409. return init_kvm_mmu(vcpu);
  2410. }
  2411. void kvm_mmu_destroy(struct kvm_vcpu *vcpu)
  2412. {
  2413. ASSERT(vcpu);
  2414. destroy_kvm_mmu(vcpu);
  2415. free_mmu_pages(vcpu);
  2416. mmu_free_memory_caches(vcpu);
  2417. }
  2418. void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot)
  2419. {
  2420. struct kvm_mmu_page *sp;
  2421. list_for_each_entry(sp, &kvm->arch.active_mmu_pages, link) {
  2422. int i;
  2423. u64 *pt;
  2424. if (!test_bit(slot, sp->slot_bitmap))
  2425. continue;
  2426. pt = sp->spt;
  2427. for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
  2428. /* avoid RMW */
  2429. if (pt[i] & PT_WRITABLE_MASK)
  2430. pt[i] &= ~PT_WRITABLE_MASK;
  2431. }
  2432. kvm_flush_remote_tlbs(kvm);
  2433. }
  2434. void kvm_mmu_zap_all(struct kvm *kvm)
  2435. {
  2436. struct kvm_mmu_page *sp, *node;
  2437. spin_lock(&kvm->mmu_lock);
  2438. list_for_each_entry_safe(sp, node, &kvm->arch.active_mmu_pages, link)
  2439. if (kvm_mmu_zap_page(kvm, sp))
  2440. node = container_of(kvm->arch.active_mmu_pages.next,
  2441. struct kvm_mmu_page, link);
  2442. spin_unlock(&kvm->mmu_lock);
  2443. kvm_flush_remote_tlbs(kvm);
  2444. }
  2445. static void kvm_mmu_remove_one_alloc_mmu_page(struct kvm *kvm)
  2446. {
  2447. struct kvm_mmu_page *page;
  2448. page = container_of(kvm->arch.active_mmu_pages.prev,
  2449. struct kvm_mmu_page, link);
  2450. kvm_mmu_zap_page(kvm, page);
  2451. }
  2452. static int mmu_shrink(int nr_to_scan, gfp_t gfp_mask)
  2453. {
  2454. struct kvm *kvm;
  2455. struct kvm *kvm_freed = NULL;
  2456. int cache_count = 0;
  2457. spin_lock(&kvm_lock);
  2458. list_for_each_entry(kvm, &vm_list, vm_list) {
  2459. int npages;
  2460. if (!down_read_trylock(&kvm->slots_lock))
  2461. continue;
  2462. spin_lock(&kvm->mmu_lock);
  2463. npages = kvm->arch.n_alloc_mmu_pages -
  2464. kvm->arch.n_free_mmu_pages;
  2465. cache_count += npages;
  2466. if (!kvm_freed && nr_to_scan > 0 && npages > 0) {
  2467. kvm_mmu_remove_one_alloc_mmu_page(kvm);
  2468. cache_count--;
  2469. kvm_freed = kvm;
  2470. }
  2471. nr_to_scan--;
  2472. spin_unlock(&kvm->mmu_lock);
  2473. up_read(&kvm->slots_lock);
  2474. }
  2475. if (kvm_freed)
  2476. list_move_tail(&kvm_freed->vm_list, &vm_list);
  2477. spin_unlock(&kvm_lock);
  2478. return cache_count;
  2479. }
  2480. static struct shrinker mmu_shrinker = {
  2481. .shrink = mmu_shrink,
  2482. .seeks = DEFAULT_SEEKS * 10,
  2483. };
  2484. static void mmu_destroy_caches(void)
  2485. {
  2486. if (pte_chain_cache)
  2487. kmem_cache_destroy(pte_chain_cache);
  2488. if (rmap_desc_cache)
  2489. kmem_cache_destroy(rmap_desc_cache);
  2490. if (mmu_page_header_cache)
  2491. kmem_cache_destroy(mmu_page_header_cache);
  2492. }
  2493. void kvm_mmu_module_exit(void)
  2494. {
  2495. mmu_destroy_caches();
  2496. unregister_shrinker(&mmu_shrinker);
  2497. }
  2498. int kvm_mmu_module_init(void)
  2499. {
  2500. pte_chain_cache = kmem_cache_create("kvm_pte_chain",
  2501. sizeof(struct kvm_pte_chain),
  2502. 0, 0, NULL);
  2503. if (!pte_chain_cache)
  2504. goto nomem;
  2505. rmap_desc_cache = kmem_cache_create("kvm_rmap_desc",
  2506. sizeof(struct kvm_rmap_desc),
  2507. 0, 0, NULL);
  2508. if (!rmap_desc_cache)
  2509. goto nomem;
  2510. mmu_page_header_cache = kmem_cache_create("kvm_mmu_page_header",
  2511. sizeof(struct kvm_mmu_page),
  2512. 0, 0, NULL);
  2513. if (!mmu_page_header_cache)
  2514. goto nomem;
  2515. register_shrinker(&mmu_shrinker);
  2516. return 0;
  2517. nomem:
  2518. mmu_destroy_caches();
  2519. return -ENOMEM;
  2520. }
  2521. /*
  2522. * Caculate mmu pages needed for kvm.
  2523. */
  2524. unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm)
  2525. {
  2526. int i;
  2527. unsigned int nr_mmu_pages;
  2528. unsigned int nr_pages = 0;
  2529. for (i = 0; i < kvm->nmemslots; i++)
  2530. nr_pages += kvm->memslots[i].npages;
  2531. nr_mmu_pages = nr_pages * KVM_PERMILLE_MMU_PAGES / 1000;
  2532. nr_mmu_pages = max(nr_mmu_pages,
  2533. (unsigned int) KVM_MIN_ALLOC_MMU_PAGES);
  2534. return nr_mmu_pages;
  2535. }
  2536. static void *pv_mmu_peek_buffer(struct kvm_pv_mmu_op_buffer *buffer,
  2537. unsigned len)
  2538. {
  2539. if (len > buffer->len)
  2540. return NULL;
  2541. return buffer->ptr;
  2542. }
  2543. static void *pv_mmu_read_buffer(struct kvm_pv_mmu_op_buffer *buffer,
  2544. unsigned len)
  2545. {
  2546. void *ret;
  2547. ret = pv_mmu_peek_buffer(buffer, len);
  2548. if (!ret)
  2549. return ret;
  2550. buffer->ptr += len;
  2551. buffer->len -= len;
  2552. buffer->processed += len;
  2553. return ret;
  2554. }
  2555. static int kvm_pv_mmu_write(struct kvm_vcpu *vcpu,
  2556. gpa_t addr, gpa_t value)
  2557. {
  2558. int bytes = 8;
  2559. int r;
  2560. if (!is_long_mode(vcpu) && !is_pae(vcpu))
  2561. bytes = 4;
  2562. r = mmu_topup_memory_caches(vcpu);
  2563. if (r)
  2564. return r;
  2565. if (!emulator_write_phys(vcpu, addr, &value, bytes))
  2566. return -EFAULT;
  2567. return 1;
  2568. }
  2569. static int kvm_pv_mmu_flush_tlb(struct kvm_vcpu *vcpu)
  2570. {
  2571. kvm_set_cr3(vcpu, vcpu->arch.cr3);
  2572. return 1;
  2573. }
  2574. static int kvm_pv_mmu_release_pt(struct kvm_vcpu *vcpu, gpa_t addr)
  2575. {
  2576. spin_lock(&vcpu->kvm->mmu_lock);
  2577. mmu_unshadow(vcpu->kvm, addr >> PAGE_SHIFT);
  2578. spin_unlock(&vcpu->kvm->mmu_lock);
  2579. return 1;
  2580. }
  2581. static int kvm_pv_mmu_op_one(struct kvm_vcpu *vcpu,
  2582. struct kvm_pv_mmu_op_buffer *buffer)
  2583. {
  2584. struct kvm_mmu_op_header *header;
  2585. header = pv_mmu_peek_buffer(buffer, sizeof *header);
  2586. if (!header)
  2587. return 0;
  2588. switch (header->op) {
  2589. case KVM_MMU_OP_WRITE_PTE: {
  2590. struct kvm_mmu_op_write_pte *wpte;
  2591. wpte = pv_mmu_read_buffer(buffer, sizeof *wpte);
  2592. if (!wpte)
  2593. return 0;
  2594. return kvm_pv_mmu_write(vcpu, wpte->pte_phys,
  2595. wpte->pte_val);
  2596. }
  2597. case KVM_MMU_OP_FLUSH_TLB: {
  2598. struct kvm_mmu_op_flush_tlb *ftlb;
  2599. ftlb = pv_mmu_read_buffer(buffer, sizeof *ftlb);
  2600. if (!ftlb)
  2601. return 0;
  2602. return kvm_pv_mmu_flush_tlb(vcpu);
  2603. }
  2604. case KVM_MMU_OP_RELEASE_PT: {
  2605. struct kvm_mmu_op_release_pt *rpt;
  2606. rpt = pv_mmu_read_buffer(buffer, sizeof *rpt);
  2607. if (!rpt)
  2608. return 0;
  2609. return kvm_pv_mmu_release_pt(vcpu, rpt->pt_phys);
  2610. }
  2611. default: return 0;
  2612. }
  2613. }
  2614. int kvm_pv_mmu_op(struct kvm_vcpu *vcpu, unsigned long bytes,
  2615. gpa_t addr, unsigned long *ret)
  2616. {
  2617. int r;
  2618. struct kvm_pv_mmu_op_buffer *buffer = &vcpu->arch.mmu_op_buffer;
  2619. buffer->ptr = buffer->buf;
  2620. buffer->len = min_t(unsigned long, bytes, sizeof buffer->buf);
  2621. buffer->processed = 0;
  2622. r = kvm_read_guest(vcpu->kvm, addr, buffer->buf, buffer->len);
  2623. if (r)
  2624. goto out;
  2625. while (buffer->len) {
  2626. r = kvm_pv_mmu_op_one(vcpu, buffer);
  2627. if (r < 0)
  2628. goto out;
  2629. if (r == 0)
  2630. break;
  2631. }
  2632. r = 1;
  2633. out:
  2634. *ret = buffer->processed;
  2635. return r;
  2636. }
  2637. int kvm_mmu_get_spte_hierarchy(struct kvm_vcpu *vcpu, u64 addr, u64 sptes[4])
  2638. {
  2639. struct kvm_shadow_walk_iterator iterator;
  2640. int nr_sptes = 0;
  2641. spin_lock(&vcpu->kvm->mmu_lock);
  2642. for_each_shadow_entry(vcpu, addr, iterator) {
  2643. sptes[iterator.level-1] = *iterator.sptep;
  2644. nr_sptes++;
  2645. if (!is_shadow_present_pte(*iterator.sptep))
  2646. break;
  2647. }
  2648. spin_unlock(&vcpu->kvm->mmu_lock);
  2649. return nr_sptes;
  2650. }
  2651. EXPORT_SYMBOL_GPL(kvm_mmu_get_spte_hierarchy);
  2652. #ifdef AUDIT
  2653. static const char *audit_msg;
  2654. static gva_t canonicalize(gva_t gva)
  2655. {
  2656. #ifdef CONFIG_X86_64
  2657. gva = (long long)(gva << 16) >> 16;
  2658. #endif
  2659. return gva;
  2660. }
  2661. typedef void (*inspect_spte_fn) (struct kvm *kvm, struct kvm_mmu_page *sp,
  2662. u64 *sptep);
  2663. static void __mmu_spte_walk(struct kvm *kvm, struct kvm_mmu_page *sp,
  2664. inspect_spte_fn fn)
  2665. {
  2666. int i;
  2667. for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
  2668. u64 ent = sp->spt[i];
  2669. if (is_shadow_present_pte(ent)) {
  2670. if (!is_last_spte(ent, sp->role.level)) {
  2671. struct kvm_mmu_page *child;
  2672. child = page_header(ent & PT64_BASE_ADDR_MASK);
  2673. __mmu_spte_walk(kvm, child, fn);
  2674. } else
  2675. fn(kvm, sp, &sp->spt[i]);
  2676. }
  2677. }
  2678. }
  2679. static void mmu_spte_walk(struct kvm_vcpu *vcpu, inspect_spte_fn fn)
  2680. {
  2681. int i;
  2682. struct kvm_mmu_page *sp;
  2683. if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
  2684. return;
  2685. if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) {
  2686. hpa_t root = vcpu->arch.mmu.root_hpa;
  2687. sp = page_header(root);
  2688. __mmu_spte_walk(vcpu->kvm, sp, fn);
  2689. return;
  2690. }
  2691. for (i = 0; i < 4; ++i) {
  2692. hpa_t root = vcpu->arch.mmu.pae_root[i];
  2693. if (root && VALID_PAGE(root)) {
  2694. root &= PT64_BASE_ADDR_MASK;
  2695. sp = page_header(root);
  2696. __mmu_spte_walk(vcpu->kvm, sp, fn);
  2697. }
  2698. }
  2699. return;
  2700. }
  2701. static void audit_mappings_page(struct kvm_vcpu *vcpu, u64 page_pte,
  2702. gva_t va, int level)
  2703. {
  2704. u64 *pt = __va(page_pte & PT64_BASE_ADDR_MASK);
  2705. int i;
  2706. gva_t va_delta = 1ul << (PAGE_SHIFT + 9 * (level - 1));
  2707. for (i = 0; i < PT64_ENT_PER_PAGE; ++i, va += va_delta) {
  2708. u64 ent = pt[i];
  2709. if (ent == shadow_trap_nonpresent_pte)
  2710. continue;
  2711. va = canonicalize(va);
  2712. if (is_shadow_present_pte(ent) && !is_last_spte(ent, level))
  2713. audit_mappings_page(vcpu, ent, va, level - 1);
  2714. else {
  2715. gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, va);
  2716. gfn_t gfn = gpa >> PAGE_SHIFT;
  2717. pfn_t pfn = gfn_to_pfn(vcpu->kvm, gfn);
  2718. hpa_t hpa = (hpa_t)pfn << PAGE_SHIFT;
  2719. if (is_error_pfn(pfn)) {
  2720. kvm_release_pfn_clean(pfn);
  2721. continue;
  2722. }
  2723. if (is_shadow_present_pte(ent)
  2724. && (ent & PT64_BASE_ADDR_MASK) != hpa)
  2725. printk(KERN_ERR "xx audit error: (%s) levels %d"
  2726. " gva %lx gpa %llx hpa %llx ent %llx %d\n",
  2727. audit_msg, vcpu->arch.mmu.root_level,
  2728. va, gpa, hpa, ent,
  2729. is_shadow_present_pte(ent));
  2730. else if (ent == shadow_notrap_nonpresent_pte
  2731. && !is_error_hpa(hpa))
  2732. printk(KERN_ERR "audit: (%s) notrap shadow,"
  2733. " valid guest gva %lx\n", audit_msg, va);
  2734. kvm_release_pfn_clean(pfn);
  2735. }
  2736. }
  2737. }
  2738. static void audit_mappings(struct kvm_vcpu *vcpu)
  2739. {
  2740. unsigned i;
  2741. if (vcpu->arch.mmu.root_level == 4)
  2742. audit_mappings_page(vcpu, vcpu->arch.mmu.root_hpa, 0, 4);
  2743. else
  2744. for (i = 0; i < 4; ++i)
  2745. if (vcpu->arch.mmu.pae_root[i] & PT_PRESENT_MASK)
  2746. audit_mappings_page(vcpu,
  2747. vcpu->arch.mmu.pae_root[i],
  2748. i << 30,
  2749. 2);
  2750. }
  2751. static int count_rmaps(struct kvm_vcpu *vcpu)
  2752. {
  2753. int nmaps = 0;
  2754. int i, j, k;
  2755. for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
  2756. struct kvm_memory_slot *m = &vcpu->kvm->memslots[i];
  2757. struct kvm_rmap_desc *d;
  2758. for (j = 0; j < m->npages; ++j) {
  2759. unsigned long *rmapp = &m->rmap[j];
  2760. if (!*rmapp)
  2761. continue;
  2762. if (!(*rmapp & 1)) {
  2763. ++nmaps;
  2764. continue;
  2765. }
  2766. d = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
  2767. while (d) {
  2768. for (k = 0; k < RMAP_EXT; ++k)
  2769. if (d->sptes[k])
  2770. ++nmaps;
  2771. else
  2772. break;
  2773. d = d->more;
  2774. }
  2775. }
  2776. }
  2777. return nmaps;
  2778. }
  2779. void inspect_spte_has_rmap(struct kvm *kvm, struct kvm_mmu_page *sp, u64 *sptep)
  2780. {
  2781. unsigned long *rmapp;
  2782. struct kvm_mmu_page *rev_sp;
  2783. gfn_t gfn;
  2784. if (*sptep & PT_WRITABLE_MASK) {
  2785. rev_sp = page_header(__pa(sptep));
  2786. gfn = rev_sp->gfns[sptep - rev_sp->spt];
  2787. if (!gfn_to_memslot(kvm, gfn)) {
  2788. if (!printk_ratelimit())
  2789. return;
  2790. printk(KERN_ERR "%s: no memslot for gfn %ld\n",
  2791. audit_msg, gfn);
  2792. printk(KERN_ERR "%s: index %ld of sp (gfn=%lx)\n",
  2793. audit_msg, sptep - rev_sp->spt,
  2794. rev_sp->gfn);
  2795. dump_stack();
  2796. return;
  2797. }
  2798. rmapp = gfn_to_rmap(kvm, rev_sp->gfns[sptep - rev_sp->spt],
  2799. is_large_pte(*sptep));
  2800. if (!*rmapp) {
  2801. if (!printk_ratelimit())
  2802. return;
  2803. printk(KERN_ERR "%s: no rmap for writable spte %llx\n",
  2804. audit_msg, *sptep);
  2805. dump_stack();
  2806. }
  2807. }
  2808. }
  2809. void audit_writable_sptes_have_rmaps(struct kvm_vcpu *vcpu)
  2810. {
  2811. mmu_spte_walk(vcpu, inspect_spte_has_rmap);
  2812. }
  2813. static void check_writable_mappings_rmap(struct kvm_vcpu *vcpu)
  2814. {
  2815. struct kvm_mmu_page *sp;
  2816. int i;
  2817. list_for_each_entry(sp, &vcpu->kvm->arch.active_mmu_pages, link) {
  2818. u64 *pt = sp->spt;
  2819. if (sp->role.level != PT_PAGE_TABLE_LEVEL)
  2820. continue;
  2821. for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
  2822. u64 ent = pt[i];
  2823. if (!(ent & PT_PRESENT_MASK))
  2824. continue;
  2825. if (!(ent & PT_WRITABLE_MASK))
  2826. continue;
  2827. inspect_spte_has_rmap(vcpu->kvm, sp, &pt[i]);
  2828. }
  2829. }
  2830. return;
  2831. }
  2832. static void audit_rmap(struct kvm_vcpu *vcpu)
  2833. {
  2834. check_writable_mappings_rmap(vcpu);
  2835. count_rmaps(vcpu);
  2836. }
  2837. static void audit_write_protection(struct kvm_vcpu *vcpu)
  2838. {
  2839. struct kvm_mmu_page *sp;
  2840. struct kvm_memory_slot *slot;
  2841. unsigned long *rmapp;
  2842. u64 *spte;
  2843. gfn_t gfn;
  2844. list_for_each_entry(sp, &vcpu->kvm->arch.active_mmu_pages, link) {
  2845. if (sp->role.direct)
  2846. continue;
  2847. if (sp->unsync)
  2848. continue;
  2849. gfn = unalias_gfn(vcpu->kvm, sp->gfn);
  2850. slot = gfn_to_memslot_unaliased(vcpu->kvm, sp->gfn);
  2851. rmapp = &slot->rmap[gfn - slot->base_gfn];
  2852. spte = rmap_next(vcpu->kvm, rmapp, NULL);
  2853. while (spte) {
  2854. if (*spte & PT_WRITABLE_MASK)
  2855. printk(KERN_ERR "%s: (%s) shadow page has "
  2856. "writable mappings: gfn %lx role %x\n",
  2857. __func__, audit_msg, sp->gfn,
  2858. sp->role.word);
  2859. spte = rmap_next(vcpu->kvm, rmapp, spte);
  2860. }
  2861. }
  2862. }
  2863. static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg)
  2864. {
  2865. int olddbg = dbg;
  2866. dbg = 0;
  2867. audit_msg = msg;
  2868. audit_rmap(vcpu);
  2869. audit_write_protection(vcpu);
  2870. if (strcmp("pre pte write", audit_msg) != 0)
  2871. audit_mappings(vcpu);
  2872. audit_writable_sptes_have_rmaps(vcpu);
  2873. dbg = olddbg;
  2874. }
  2875. #endif