mmu.c 86 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627
  1. /*
  2. * Kernel-based Virtual Machine driver for Linux
  3. *
  4. * This module enables machines with Intel VT-x extensions to run virtual
  5. * machines without emulation or binary translation.
  6. *
  7. * MMU support
  8. *
  9. * Copyright (C) 2006 Qumranet, Inc.
  10. * Copyright 2010 Red Hat, Inc. and/or its affilates.
  11. *
  12. * Authors:
  13. * Yaniv Kamay <yaniv@qumranet.com>
  14. * Avi Kivity <avi@qumranet.com>
  15. *
  16. * This work is licensed under the terms of the GNU GPL, version 2. See
  17. * the COPYING file in the top-level directory.
  18. *
  19. */
  20. #include "mmu.h"
  21. #include "x86.h"
  22. #include "kvm_cache_regs.h"
  23. #include <linux/kvm_host.h>
  24. #include <linux/types.h>
  25. #include <linux/string.h>
  26. #include <linux/mm.h>
  27. #include <linux/highmem.h>
  28. #include <linux/module.h>
  29. #include <linux/swap.h>
  30. #include <linux/hugetlb.h>
  31. #include <linux/compiler.h>
  32. #include <linux/srcu.h>
  33. #include <linux/slab.h>
  34. #include <linux/uaccess.h>
  35. #include <asm/page.h>
  36. #include <asm/cmpxchg.h>
  37. #include <asm/io.h>
  38. #include <asm/vmx.h>
  39. /*
  40. * When setting this variable to true it enables Two-Dimensional-Paging
  41. * where the hardware walks 2 page tables:
  42. * 1. the guest-virtual to guest-physical
  43. * 2. while doing 1. it walks guest-physical to host-physical
  44. * If the hardware supports that we don't need to do shadow paging.
  45. */
  46. bool tdp_enabled = false;
  47. #undef MMU_DEBUG
  48. #undef AUDIT
  49. #ifdef AUDIT
  50. static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg);
  51. #else
  52. static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg) {}
  53. #endif
  54. #ifdef MMU_DEBUG
  55. #define pgprintk(x...) do { if (dbg) printk(x); } while (0)
  56. #define rmap_printk(x...) do { if (dbg) printk(x); } while (0)
  57. #else
  58. #define pgprintk(x...) do { } while (0)
  59. #define rmap_printk(x...) do { } while (0)
  60. #endif
  61. #if defined(MMU_DEBUG) || defined(AUDIT)
  62. static int dbg = 0;
  63. module_param(dbg, bool, 0644);
  64. #endif
  65. static int oos_shadow = 1;
  66. module_param(oos_shadow, bool, 0644);
  67. #ifndef MMU_DEBUG
  68. #define ASSERT(x) do { } while (0)
  69. #else
  70. #define ASSERT(x) \
  71. if (!(x)) { \
  72. printk(KERN_WARNING "assertion failed %s:%d: %s\n", \
  73. __FILE__, __LINE__, #x); \
  74. }
  75. #endif
  76. #define PT_FIRST_AVAIL_BITS_SHIFT 9
  77. #define PT64_SECOND_AVAIL_BITS_SHIFT 52
  78. #define PT64_LEVEL_BITS 9
  79. #define PT64_LEVEL_SHIFT(level) \
  80. (PAGE_SHIFT + (level - 1) * PT64_LEVEL_BITS)
  81. #define PT64_LEVEL_MASK(level) \
  82. (((1ULL << PT64_LEVEL_BITS) - 1) << PT64_LEVEL_SHIFT(level))
  83. #define PT64_INDEX(address, level)\
  84. (((address) >> PT64_LEVEL_SHIFT(level)) & ((1 << PT64_LEVEL_BITS) - 1))
  85. #define PT32_LEVEL_BITS 10
  86. #define PT32_LEVEL_SHIFT(level) \
  87. (PAGE_SHIFT + (level - 1) * PT32_LEVEL_BITS)
  88. #define PT32_LEVEL_MASK(level) \
  89. (((1ULL << PT32_LEVEL_BITS) - 1) << PT32_LEVEL_SHIFT(level))
  90. #define PT32_LVL_OFFSET_MASK(level) \
  91. (PT32_BASE_ADDR_MASK & ((1ULL << (PAGE_SHIFT + (((level) - 1) \
  92. * PT32_LEVEL_BITS))) - 1))
  93. #define PT32_INDEX(address, level)\
  94. (((address) >> PT32_LEVEL_SHIFT(level)) & ((1 << PT32_LEVEL_BITS) - 1))
  95. #define PT64_BASE_ADDR_MASK (((1ULL << 52) - 1) & ~(u64)(PAGE_SIZE-1))
  96. #define PT64_DIR_BASE_ADDR_MASK \
  97. (PT64_BASE_ADDR_MASK & ~((1ULL << (PAGE_SHIFT + PT64_LEVEL_BITS)) - 1))
  98. #define PT64_LVL_ADDR_MASK(level) \
  99. (PT64_BASE_ADDR_MASK & ~((1ULL << (PAGE_SHIFT + (((level) - 1) \
  100. * PT64_LEVEL_BITS))) - 1))
  101. #define PT64_LVL_OFFSET_MASK(level) \
  102. (PT64_BASE_ADDR_MASK & ((1ULL << (PAGE_SHIFT + (((level) - 1) \
  103. * PT64_LEVEL_BITS))) - 1))
  104. #define PT32_BASE_ADDR_MASK PAGE_MASK
  105. #define PT32_DIR_BASE_ADDR_MASK \
  106. (PAGE_MASK & ~((1ULL << (PAGE_SHIFT + PT32_LEVEL_BITS)) - 1))
  107. #define PT32_LVL_ADDR_MASK(level) \
  108. (PAGE_MASK & ~((1ULL << (PAGE_SHIFT + (((level) - 1) \
  109. * PT32_LEVEL_BITS))) - 1))
  110. #define PT64_PERM_MASK (PT_PRESENT_MASK | PT_WRITABLE_MASK | PT_USER_MASK \
  111. | PT64_NX_MASK)
  112. #define RMAP_EXT 4
  113. #define ACC_EXEC_MASK 1
  114. #define ACC_WRITE_MASK PT_WRITABLE_MASK
  115. #define ACC_USER_MASK PT_USER_MASK
  116. #define ACC_ALL (ACC_EXEC_MASK | ACC_WRITE_MASK | ACC_USER_MASK)
  117. #include <trace/events/kvm.h>
  118. #define CREATE_TRACE_POINTS
  119. #include "mmutrace.h"
  120. #define SPTE_HOST_WRITEABLE (1ULL << PT_FIRST_AVAIL_BITS_SHIFT)
  121. #define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level)
  122. struct kvm_rmap_desc {
  123. u64 *sptes[RMAP_EXT];
  124. struct kvm_rmap_desc *more;
  125. };
  126. struct kvm_shadow_walk_iterator {
  127. u64 addr;
  128. hpa_t shadow_addr;
  129. int level;
  130. u64 *sptep;
  131. unsigned index;
  132. };
  133. #define for_each_shadow_entry(_vcpu, _addr, _walker) \
  134. for (shadow_walk_init(&(_walker), _vcpu, _addr); \
  135. shadow_walk_okay(&(_walker)); \
  136. shadow_walk_next(&(_walker)))
  137. typedef void (*mmu_parent_walk_fn) (struct kvm_mmu_page *sp, u64 *spte);
  138. static struct kmem_cache *pte_chain_cache;
  139. static struct kmem_cache *rmap_desc_cache;
  140. static struct kmem_cache *mmu_page_header_cache;
  141. static u64 __read_mostly shadow_trap_nonpresent_pte;
  142. static u64 __read_mostly shadow_notrap_nonpresent_pte;
  143. static u64 __read_mostly shadow_base_present_pte;
  144. static u64 __read_mostly shadow_nx_mask;
  145. static u64 __read_mostly shadow_x_mask; /* mutual exclusive with nx_mask */
  146. static u64 __read_mostly shadow_user_mask;
  147. static u64 __read_mostly shadow_accessed_mask;
  148. static u64 __read_mostly shadow_dirty_mask;
  149. static inline u64 rsvd_bits(int s, int e)
  150. {
  151. return ((1ULL << (e - s + 1)) - 1) << s;
  152. }
  153. void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte)
  154. {
  155. shadow_trap_nonpresent_pte = trap_pte;
  156. shadow_notrap_nonpresent_pte = notrap_pte;
  157. }
  158. EXPORT_SYMBOL_GPL(kvm_mmu_set_nonpresent_ptes);
  159. void kvm_mmu_set_base_ptes(u64 base_pte)
  160. {
  161. shadow_base_present_pte = base_pte;
  162. }
  163. EXPORT_SYMBOL_GPL(kvm_mmu_set_base_ptes);
  164. void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
  165. u64 dirty_mask, u64 nx_mask, u64 x_mask)
  166. {
  167. shadow_user_mask = user_mask;
  168. shadow_accessed_mask = accessed_mask;
  169. shadow_dirty_mask = dirty_mask;
  170. shadow_nx_mask = nx_mask;
  171. shadow_x_mask = x_mask;
  172. }
  173. EXPORT_SYMBOL_GPL(kvm_mmu_set_mask_ptes);
  174. static bool is_write_protection(struct kvm_vcpu *vcpu)
  175. {
  176. return kvm_read_cr0_bits(vcpu, X86_CR0_WP);
  177. }
  178. static int is_cpuid_PSE36(void)
  179. {
  180. return 1;
  181. }
  182. static int is_nx(struct kvm_vcpu *vcpu)
  183. {
  184. return vcpu->arch.efer & EFER_NX;
  185. }
  186. static int is_shadow_present_pte(u64 pte)
  187. {
  188. return pte != shadow_trap_nonpresent_pte
  189. && pte != shadow_notrap_nonpresent_pte;
  190. }
  191. static int is_large_pte(u64 pte)
  192. {
  193. return pte & PT_PAGE_SIZE_MASK;
  194. }
  195. static int is_writable_pte(unsigned long pte)
  196. {
  197. return pte & PT_WRITABLE_MASK;
  198. }
  199. static int is_dirty_gpte(unsigned long pte)
  200. {
  201. return pte & PT_DIRTY_MASK;
  202. }
  203. static int is_rmap_spte(u64 pte)
  204. {
  205. return is_shadow_present_pte(pte);
  206. }
  207. static int is_last_spte(u64 pte, int level)
  208. {
  209. if (level == PT_PAGE_TABLE_LEVEL)
  210. return 1;
  211. if (is_large_pte(pte))
  212. return 1;
  213. return 0;
  214. }
  215. static pfn_t spte_to_pfn(u64 pte)
  216. {
  217. return (pte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT;
  218. }
  219. static gfn_t pse36_gfn_delta(u32 gpte)
  220. {
  221. int shift = 32 - PT32_DIR_PSE36_SHIFT - PAGE_SHIFT;
  222. return (gpte & PT32_DIR_PSE36_MASK) << shift;
  223. }
  224. static void __set_spte(u64 *sptep, u64 spte)
  225. {
  226. #ifdef CONFIG_X86_64
  227. set_64bit((unsigned long *)sptep, spte);
  228. #else
  229. set_64bit((unsigned long long *)sptep, spte);
  230. #endif
  231. }
  232. static u64 __xchg_spte(u64 *sptep, u64 new_spte)
  233. {
  234. #ifdef CONFIG_X86_64
  235. return xchg(sptep, new_spte);
  236. #else
  237. u64 old_spte;
  238. do {
  239. old_spte = *sptep;
  240. } while (cmpxchg64(sptep, old_spte, new_spte) != old_spte);
  241. return old_spte;
  242. #endif
  243. }
  244. static void update_spte(u64 *sptep, u64 new_spte)
  245. {
  246. u64 old_spte;
  247. if (!shadow_accessed_mask || (new_spte & shadow_accessed_mask)) {
  248. __set_spte(sptep, new_spte);
  249. } else {
  250. old_spte = __xchg_spte(sptep, new_spte);
  251. if (old_spte & shadow_accessed_mask)
  252. mark_page_accessed(pfn_to_page(spte_to_pfn(old_spte)));
  253. }
  254. }
  255. static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
  256. struct kmem_cache *base_cache, int min)
  257. {
  258. void *obj;
  259. if (cache->nobjs >= min)
  260. return 0;
  261. while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
  262. obj = kmem_cache_zalloc(base_cache, GFP_KERNEL);
  263. if (!obj)
  264. return -ENOMEM;
  265. cache->objects[cache->nobjs++] = obj;
  266. }
  267. return 0;
  268. }
  269. static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc,
  270. struct kmem_cache *cache)
  271. {
  272. while (mc->nobjs)
  273. kmem_cache_free(cache, mc->objects[--mc->nobjs]);
  274. }
  275. static int mmu_topup_memory_cache_page(struct kvm_mmu_memory_cache *cache,
  276. int min)
  277. {
  278. struct page *page;
  279. if (cache->nobjs >= min)
  280. return 0;
  281. while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
  282. page = alloc_page(GFP_KERNEL);
  283. if (!page)
  284. return -ENOMEM;
  285. cache->objects[cache->nobjs++] = page_address(page);
  286. }
  287. return 0;
  288. }
  289. static void mmu_free_memory_cache_page(struct kvm_mmu_memory_cache *mc)
  290. {
  291. while (mc->nobjs)
  292. free_page((unsigned long)mc->objects[--mc->nobjs]);
  293. }
  294. static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu)
  295. {
  296. int r;
  297. r = mmu_topup_memory_cache(&vcpu->arch.mmu_pte_chain_cache,
  298. pte_chain_cache, 4);
  299. if (r)
  300. goto out;
  301. r = mmu_topup_memory_cache(&vcpu->arch.mmu_rmap_desc_cache,
  302. rmap_desc_cache, 4);
  303. if (r)
  304. goto out;
  305. r = mmu_topup_memory_cache_page(&vcpu->arch.mmu_page_cache, 8);
  306. if (r)
  307. goto out;
  308. r = mmu_topup_memory_cache(&vcpu->arch.mmu_page_header_cache,
  309. mmu_page_header_cache, 4);
  310. out:
  311. return r;
  312. }
  313. static void mmu_free_memory_caches(struct kvm_vcpu *vcpu)
  314. {
  315. mmu_free_memory_cache(&vcpu->arch.mmu_pte_chain_cache, pte_chain_cache);
  316. mmu_free_memory_cache(&vcpu->arch.mmu_rmap_desc_cache, rmap_desc_cache);
  317. mmu_free_memory_cache_page(&vcpu->arch.mmu_page_cache);
  318. mmu_free_memory_cache(&vcpu->arch.mmu_page_header_cache,
  319. mmu_page_header_cache);
  320. }
  321. static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc,
  322. size_t size)
  323. {
  324. void *p;
  325. BUG_ON(!mc->nobjs);
  326. p = mc->objects[--mc->nobjs];
  327. return p;
  328. }
  329. static struct kvm_pte_chain *mmu_alloc_pte_chain(struct kvm_vcpu *vcpu)
  330. {
  331. return mmu_memory_cache_alloc(&vcpu->arch.mmu_pte_chain_cache,
  332. sizeof(struct kvm_pte_chain));
  333. }
  334. static void mmu_free_pte_chain(struct kvm_pte_chain *pc)
  335. {
  336. kmem_cache_free(pte_chain_cache, pc);
  337. }
  338. static struct kvm_rmap_desc *mmu_alloc_rmap_desc(struct kvm_vcpu *vcpu)
  339. {
  340. return mmu_memory_cache_alloc(&vcpu->arch.mmu_rmap_desc_cache,
  341. sizeof(struct kvm_rmap_desc));
  342. }
  343. static void mmu_free_rmap_desc(struct kvm_rmap_desc *rd)
  344. {
  345. kmem_cache_free(rmap_desc_cache, rd);
  346. }
  347. static gfn_t kvm_mmu_page_get_gfn(struct kvm_mmu_page *sp, int index)
  348. {
  349. if (!sp->role.direct)
  350. return sp->gfns[index];
  351. return sp->gfn + (index << ((sp->role.level - 1) * PT64_LEVEL_BITS));
  352. }
  353. static void kvm_mmu_page_set_gfn(struct kvm_mmu_page *sp, int index, gfn_t gfn)
  354. {
  355. if (sp->role.direct)
  356. BUG_ON(gfn != kvm_mmu_page_get_gfn(sp, index));
  357. else
  358. sp->gfns[index] = gfn;
  359. }
  360. /*
  361. * Return the pointer to the largepage write count for a given
  362. * gfn, handling slots that are not large page aligned.
  363. */
  364. static int *slot_largepage_idx(gfn_t gfn,
  365. struct kvm_memory_slot *slot,
  366. int level)
  367. {
  368. unsigned long idx;
  369. idx = (gfn >> KVM_HPAGE_GFN_SHIFT(level)) -
  370. (slot->base_gfn >> KVM_HPAGE_GFN_SHIFT(level));
  371. return &slot->lpage_info[level - 2][idx].write_count;
  372. }
  373. static void account_shadowed(struct kvm *kvm, gfn_t gfn)
  374. {
  375. struct kvm_memory_slot *slot;
  376. int *write_count;
  377. int i;
  378. slot = gfn_to_memslot(kvm, gfn);
  379. for (i = PT_DIRECTORY_LEVEL;
  380. i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) {
  381. write_count = slot_largepage_idx(gfn, slot, i);
  382. *write_count += 1;
  383. }
  384. }
  385. static void unaccount_shadowed(struct kvm *kvm, gfn_t gfn)
  386. {
  387. struct kvm_memory_slot *slot;
  388. int *write_count;
  389. int i;
  390. slot = gfn_to_memslot(kvm, gfn);
  391. for (i = PT_DIRECTORY_LEVEL;
  392. i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) {
  393. write_count = slot_largepage_idx(gfn, slot, i);
  394. *write_count -= 1;
  395. WARN_ON(*write_count < 0);
  396. }
  397. }
  398. static int has_wrprotected_page(struct kvm *kvm,
  399. gfn_t gfn,
  400. int level)
  401. {
  402. struct kvm_memory_slot *slot;
  403. int *largepage_idx;
  404. slot = gfn_to_memslot(kvm, gfn);
  405. if (slot) {
  406. largepage_idx = slot_largepage_idx(gfn, slot, level);
  407. return *largepage_idx;
  408. }
  409. return 1;
  410. }
  411. static int host_mapping_level(struct kvm *kvm, gfn_t gfn)
  412. {
  413. unsigned long page_size;
  414. int i, ret = 0;
  415. page_size = kvm_host_page_size(kvm, gfn);
  416. for (i = PT_PAGE_TABLE_LEVEL;
  417. i < (PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES); ++i) {
  418. if (page_size >= KVM_HPAGE_SIZE(i))
  419. ret = i;
  420. else
  421. break;
  422. }
  423. return ret;
  424. }
  425. static int mapping_level(struct kvm_vcpu *vcpu, gfn_t large_gfn)
  426. {
  427. struct kvm_memory_slot *slot;
  428. int host_level, level, max_level;
  429. slot = gfn_to_memslot(vcpu->kvm, large_gfn);
  430. if (slot && slot->dirty_bitmap)
  431. return PT_PAGE_TABLE_LEVEL;
  432. host_level = host_mapping_level(vcpu->kvm, large_gfn);
  433. if (host_level == PT_PAGE_TABLE_LEVEL)
  434. return host_level;
  435. max_level = kvm_x86_ops->get_lpage_level() < host_level ?
  436. kvm_x86_ops->get_lpage_level() : host_level;
  437. for (level = PT_DIRECTORY_LEVEL; level <= max_level; ++level)
  438. if (has_wrprotected_page(vcpu->kvm, large_gfn, level))
  439. break;
  440. return level - 1;
  441. }
  442. /*
  443. * Take gfn and return the reverse mapping to it.
  444. */
  445. static unsigned long *gfn_to_rmap(struct kvm *kvm, gfn_t gfn, int level)
  446. {
  447. struct kvm_memory_slot *slot;
  448. unsigned long idx;
  449. slot = gfn_to_memslot(kvm, gfn);
  450. if (likely(level == PT_PAGE_TABLE_LEVEL))
  451. return &slot->rmap[gfn - slot->base_gfn];
  452. idx = (gfn >> KVM_HPAGE_GFN_SHIFT(level)) -
  453. (slot->base_gfn >> KVM_HPAGE_GFN_SHIFT(level));
  454. return &slot->lpage_info[level - 2][idx].rmap_pde;
  455. }
  456. /*
  457. * Reverse mapping data structures:
  458. *
  459. * If rmapp bit zero is zero, then rmapp point to the shadw page table entry
  460. * that points to page_address(page).
  461. *
  462. * If rmapp bit zero is one, (then rmap & ~1) points to a struct kvm_rmap_desc
  463. * containing more mappings.
  464. *
  465. * Returns the number of rmap entries before the spte was added or zero if
  466. * the spte was not added.
  467. *
  468. */
  469. static int rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
  470. {
  471. struct kvm_mmu_page *sp;
  472. struct kvm_rmap_desc *desc;
  473. unsigned long *rmapp;
  474. int i, count = 0;
  475. if (!is_rmap_spte(*spte))
  476. return count;
  477. sp = page_header(__pa(spte));
  478. kvm_mmu_page_set_gfn(sp, spte - sp->spt, gfn);
  479. rmapp = gfn_to_rmap(vcpu->kvm, gfn, sp->role.level);
  480. if (!*rmapp) {
  481. rmap_printk("rmap_add: %p %llx 0->1\n", spte, *spte);
  482. *rmapp = (unsigned long)spte;
  483. } else if (!(*rmapp & 1)) {
  484. rmap_printk("rmap_add: %p %llx 1->many\n", spte, *spte);
  485. desc = mmu_alloc_rmap_desc(vcpu);
  486. desc->sptes[0] = (u64 *)*rmapp;
  487. desc->sptes[1] = spte;
  488. *rmapp = (unsigned long)desc | 1;
  489. } else {
  490. rmap_printk("rmap_add: %p %llx many->many\n", spte, *spte);
  491. desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
  492. while (desc->sptes[RMAP_EXT-1] && desc->more) {
  493. desc = desc->more;
  494. count += RMAP_EXT;
  495. }
  496. if (desc->sptes[RMAP_EXT-1]) {
  497. desc->more = mmu_alloc_rmap_desc(vcpu);
  498. desc = desc->more;
  499. }
  500. for (i = 0; desc->sptes[i]; ++i)
  501. ;
  502. desc->sptes[i] = spte;
  503. }
  504. return count;
  505. }
  506. static void rmap_desc_remove_entry(unsigned long *rmapp,
  507. struct kvm_rmap_desc *desc,
  508. int i,
  509. struct kvm_rmap_desc *prev_desc)
  510. {
  511. int j;
  512. for (j = RMAP_EXT - 1; !desc->sptes[j] && j > i; --j)
  513. ;
  514. desc->sptes[i] = desc->sptes[j];
  515. desc->sptes[j] = NULL;
  516. if (j != 0)
  517. return;
  518. if (!prev_desc && !desc->more)
  519. *rmapp = (unsigned long)desc->sptes[0];
  520. else
  521. if (prev_desc)
  522. prev_desc->more = desc->more;
  523. else
  524. *rmapp = (unsigned long)desc->more | 1;
  525. mmu_free_rmap_desc(desc);
  526. }
  527. static void rmap_remove(struct kvm *kvm, u64 *spte)
  528. {
  529. struct kvm_rmap_desc *desc;
  530. struct kvm_rmap_desc *prev_desc;
  531. struct kvm_mmu_page *sp;
  532. gfn_t gfn;
  533. unsigned long *rmapp;
  534. int i;
  535. sp = page_header(__pa(spte));
  536. gfn = kvm_mmu_page_get_gfn(sp, spte - sp->spt);
  537. rmapp = gfn_to_rmap(kvm, gfn, sp->role.level);
  538. if (!*rmapp) {
  539. printk(KERN_ERR "rmap_remove: %p %llx 0->BUG\n", spte, *spte);
  540. BUG();
  541. } else if (!(*rmapp & 1)) {
  542. rmap_printk("rmap_remove: %p %llx 1->0\n", spte, *spte);
  543. if ((u64 *)*rmapp != spte) {
  544. printk(KERN_ERR "rmap_remove: %p %llx 1->BUG\n",
  545. spte, *spte);
  546. BUG();
  547. }
  548. *rmapp = 0;
  549. } else {
  550. rmap_printk("rmap_remove: %p %llx many->many\n", spte, *spte);
  551. desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
  552. prev_desc = NULL;
  553. while (desc) {
  554. for (i = 0; i < RMAP_EXT && desc->sptes[i]; ++i)
  555. if (desc->sptes[i] == spte) {
  556. rmap_desc_remove_entry(rmapp,
  557. desc, i,
  558. prev_desc);
  559. return;
  560. }
  561. prev_desc = desc;
  562. desc = desc->more;
  563. }
  564. pr_err("rmap_remove: %p %llx many->many\n", spte, *spte);
  565. BUG();
  566. }
  567. }
  568. static void drop_spte(struct kvm *kvm, u64 *sptep, u64 new_spte)
  569. {
  570. pfn_t pfn;
  571. u64 old_spte;
  572. old_spte = __xchg_spte(sptep, new_spte);
  573. if (!is_rmap_spte(old_spte))
  574. return;
  575. pfn = spte_to_pfn(old_spte);
  576. if (old_spte & shadow_accessed_mask)
  577. kvm_set_pfn_accessed(pfn);
  578. if (is_writable_pte(old_spte))
  579. kvm_set_pfn_dirty(pfn);
  580. rmap_remove(kvm, sptep);
  581. }
  582. static u64 *rmap_next(struct kvm *kvm, unsigned long *rmapp, u64 *spte)
  583. {
  584. struct kvm_rmap_desc *desc;
  585. u64 *prev_spte;
  586. int i;
  587. if (!*rmapp)
  588. return NULL;
  589. else if (!(*rmapp & 1)) {
  590. if (!spte)
  591. return (u64 *)*rmapp;
  592. return NULL;
  593. }
  594. desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
  595. prev_spte = NULL;
  596. while (desc) {
  597. for (i = 0; i < RMAP_EXT && desc->sptes[i]; ++i) {
  598. if (prev_spte == spte)
  599. return desc->sptes[i];
  600. prev_spte = desc->sptes[i];
  601. }
  602. desc = desc->more;
  603. }
  604. return NULL;
  605. }
  606. static int rmap_write_protect(struct kvm *kvm, u64 gfn)
  607. {
  608. unsigned long *rmapp;
  609. u64 *spte;
  610. int i, write_protected = 0;
  611. rmapp = gfn_to_rmap(kvm, gfn, PT_PAGE_TABLE_LEVEL);
  612. spte = rmap_next(kvm, rmapp, NULL);
  613. while (spte) {
  614. BUG_ON(!spte);
  615. BUG_ON(!(*spte & PT_PRESENT_MASK));
  616. rmap_printk("rmap_write_protect: spte %p %llx\n", spte, *spte);
  617. if (is_writable_pte(*spte)) {
  618. update_spte(spte, *spte & ~PT_WRITABLE_MASK);
  619. write_protected = 1;
  620. }
  621. spte = rmap_next(kvm, rmapp, spte);
  622. }
  623. if (write_protected) {
  624. pfn_t pfn;
  625. spte = rmap_next(kvm, rmapp, NULL);
  626. pfn = spte_to_pfn(*spte);
  627. kvm_set_pfn_dirty(pfn);
  628. }
  629. /* check for huge page mappings */
  630. for (i = PT_DIRECTORY_LEVEL;
  631. i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) {
  632. rmapp = gfn_to_rmap(kvm, gfn, i);
  633. spte = rmap_next(kvm, rmapp, NULL);
  634. while (spte) {
  635. BUG_ON(!spte);
  636. BUG_ON(!(*spte & PT_PRESENT_MASK));
  637. BUG_ON((*spte & (PT_PAGE_SIZE_MASK|PT_PRESENT_MASK)) != (PT_PAGE_SIZE_MASK|PT_PRESENT_MASK));
  638. pgprintk("rmap_write_protect(large): spte %p %llx %lld\n", spte, *spte, gfn);
  639. if (is_writable_pte(*spte)) {
  640. drop_spte(kvm, spte,
  641. shadow_trap_nonpresent_pte);
  642. --kvm->stat.lpages;
  643. spte = NULL;
  644. write_protected = 1;
  645. }
  646. spte = rmap_next(kvm, rmapp, spte);
  647. }
  648. }
  649. return write_protected;
  650. }
  651. static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp,
  652. unsigned long data)
  653. {
  654. u64 *spte;
  655. int need_tlb_flush = 0;
  656. while ((spte = rmap_next(kvm, rmapp, NULL))) {
  657. BUG_ON(!(*spte & PT_PRESENT_MASK));
  658. rmap_printk("kvm_rmap_unmap_hva: spte %p %llx\n", spte, *spte);
  659. drop_spte(kvm, spte, shadow_trap_nonpresent_pte);
  660. need_tlb_flush = 1;
  661. }
  662. return need_tlb_flush;
  663. }
  664. static int kvm_set_pte_rmapp(struct kvm *kvm, unsigned long *rmapp,
  665. unsigned long data)
  666. {
  667. int need_flush = 0;
  668. u64 *spte, new_spte, old_spte;
  669. pte_t *ptep = (pte_t *)data;
  670. pfn_t new_pfn;
  671. WARN_ON(pte_huge(*ptep));
  672. new_pfn = pte_pfn(*ptep);
  673. spte = rmap_next(kvm, rmapp, NULL);
  674. while (spte) {
  675. BUG_ON(!is_shadow_present_pte(*spte));
  676. rmap_printk("kvm_set_pte_rmapp: spte %p %llx\n", spte, *spte);
  677. need_flush = 1;
  678. if (pte_write(*ptep)) {
  679. drop_spte(kvm, spte, shadow_trap_nonpresent_pte);
  680. spte = rmap_next(kvm, rmapp, NULL);
  681. } else {
  682. new_spte = *spte &~ (PT64_BASE_ADDR_MASK);
  683. new_spte |= (u64)new_pfn << PAGE_SHIFT;
  684. new_spte &= ~PT_WRITABLE_MASK;
  685. new_spte &= ~SPTE_HOST_WRITEABLE;
  686. new_spte &= ~shadow_accessed_mask;
  687. if (is_writable_pte(*spte))
  688. kvm_set_pfn_dirty(spte_to_pfn(*spte));
  689. old_spte = __xchg_spte(spte, new_spte);
  690. if (is_shadow_present_pte(old_spte)
  691. && (old_spte & shadow_accessed_mask))
  692. mark_page_accessed(pfn_to_page(spte_to_pfn(old_spte)));
  693. spte = rmap_next(kvm, rmapp, spte);
  694. }
  695. }
  696. if (need_flush)
  697. kvm_flush_remote_tlbs(kvm);
  698. return 0;
  699. }
  700. static int kvm_handle_hva(struct kvm *kvm, unsigned long hva,
  701. unsigned long data,
  702. int (*handler)(struct kvm *kvm, unsigned long *rmapp,
  703. unsigned long data))
  704. {
  705. int i, j;
  706. int ret;
  707. int retval = 0;
  708. struct kvm_memslots *slots;
  709. slots = kvm_memslots(kvm);
  710. for (i = 0; i < slots->nmemslots; i++) {
  711. struct kvm_memory_slot *memslot = &slots->memslots[i];
  712. unsigned long start = memslot->userspace_addr;
  713. unsigned long end;
  714. end = start + (memslot->npages << PAGE_SHIFT);
  715. if (hva >= start && hva < end) {
  716. gfn_t gfn_offset = (hva - start) >> PAGE_SHIFT;
  717. ret = handler(kvm, &memslot->rmap[gfn_offset], data);
  718. for (j = 0; j < KVM_NR_PAGE_SIZES - 1; ++j) {
  719. unsigned long idx;
  720. int sh;
  721. sh = KVM_HPAGE_GFN_SHIFT(PT_DIRECTORY_LEVEL+j);
  722. idx = ((memslot->base_gfn+gfn_offset) >> sh) -
  723. (memslot->base_gfn >> sh);
  724. ret |= handler(kvm,
  725. &memslot->lpage_info[j][idx].rmap_pde,
  726. data);
  727. }
  728. trace_kvm_age_page(hva, memslot, ret);
  729. retval |= ret;
  730. }
  731. }
  732. return retval;
  733. }
  734. int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
  735. {
  736. return kvm_handle_hva(kvm, hva, 0, kvm_unmap_rmapp);
  737. }
  738. void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
  739. {
  740. kvm_handle_hva(kvm, hva, (unsigned long)&pte, kvm_set_pte_rmapp);
  741. }
  742. static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
  743. unsigned long data)
  744. {
  745. u64 *spte;
  746. int young = 0;
  747. /*
  748. * Emulate the accessed bit for EPT, by checking if this page has
  749. * an EPT mapping, and clearing it if it does. On the next access,
  750. * a new EPT mapping will be established.
  751. * This has some overhead, but not as much as the cost of swapping
  752. * out actively used pages or breaking up actively used hugepages.
  753. */
  754. if (!shadow_accessed_mask)
  755. return kvm_unmap_rmapp(kvm, rmapp, data);
  756. spte = rmap_next(kvm, rmapp, NULL);
  757. while (spte) {
  758. int _young;
  759. u64 _spte = *spte;
  760. BUG_ON(!(_spte & PT_PRESENT_MASK));
  761. _young = _spte & PT_ACCESSED_MASK;
  762. if (_young) {
  763. young = 1;
  764. clear_bit(PT_ACCESSED_SHIFT, (unsigned long *)spte);
  765. }
  766. spte = rmap_next(kvm, rmapp, spte);
  767. }
  768. return young;
  769. }
  770. #define RMAP_RECYCLE_THRESHOLD 1000
  771. static void rmap_recycle(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
  772. {
  773. unsigned long *rmapp;
  774. struct kvm_mmu_page *sp;
  775. sp = page_header(__pa(spte));
  776. rmapp = gfn_to_rmap(vcpu->kvm, gfn, sp->role.level);
  777. kvm_unmap_rmapp(vcpu->kvm, rmapp, 0);
  778. kvm_flush_remote_tlbs(vcpu->kvm);
  779. }
  780. int kvm_age_hva(struct kvm *kvm, unsigned long hva)
  781. {
  782. return kvm_handle_hva(kvm, hva, 0, kvm_age_rmapp);
  783. }
  784. #ifdef MMU_DEBUG
  785. static int is_empty_shadow_page(u64 *spt)
  786. {
  787. u64 *pos;
  788. u64 *end;
  789. for (pos = spt, end = pos + PAGE_SIZE / sizeof(u64); pos != end; pos++)
  790. if (is_shadow_present_pte(*pos)) {
  791. printk(KERN_ERR "%s: %p %llx\n", __func__,
  792. pos, *pos);
  793. return 0;
  794. }
  795. return 1;
  796. }
  797. #endif
  798. static void kvm_mmu_free_page(struct kvm *kvm, struct kvm_mmu_page *sp)
  799. {
  800. ASSERT(is_empty_shadow_page(sp->spt));
  801. hlist_del(&sp->hash_link);
  802. list_del(&sp->link);
  803. __free_page(virt_to_page(sp->spt));
  804. if (!sp->role.direct)
  805. __free_page(virt_to_page(sp->gfns));
  806. kmem_cache_free(mmu_page_header_cache, sp);
  807. ++kvm->arch.n_free_mmu_pages;
  808. }
  809. static unsigned kvm_page_table_hashfn(gfn_t gfn)
  810. {
  811. return gfn & ((1 << KVM_MMU_HASH_SHIFT) - 1);
  812. }
  813. static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu,
  814. u64 *parent_pte, int direct)
  815. {
  816. struct kvm_mmu_page *sp;
  817. sp = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache, sizeof *sp);
  818. sp->spt = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache, PAGE_SIZE);
  819. if (!direct)
  820. sp->gfns = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache,
  821. PAGE_SIZE);
  822. set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
  823. list_add(&sp->link, &vcpu->kvm->arch.active_mmu_pages);
  824. bitmap_zero(sp->slot_bitmap, KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS);
  825. sp->multimapped = 0;
  826. sp->parent_pte = parent_pte;
  827. --vcpu->kvm->arch.n_free_mmu_pages;
  828. return sp;
  829. }
  830. static void mmu_page_add_parent_pte(struct kvm_vcpu *vcpu,
  831. struct kvm_mmu_page *sp, u64 *parent_pte)
  832. {
  833. struct kvm_pte_chain *pte_chain;
  834. struct hlist_node *node;
  835. int i;
  836. if (!parent_pte)
  837. return;
  838. if (!sp->multimapped) {
  839. u64 *old = sp->parent_pte;
  840. if (!old) {
  841. sp->parent_pte = parent_pte;
  842. return;
  843. }
  844. sp->multimapped = 1;
  845. pte_chain = mmu_alloc_pte_chain(vcpu);
  846. INIT_HLIST_HEAD(&sp->parent_ptes);
  847. hlist_add_head(&pte_chain->link, &sp->parent_ptes);
  848. pte_chain->parent_ptes[0] = old;
  849. }
  850. hlist_for_each_entry(pte_chain, node, &sp->parent_ptes, link) {
  851. if (pte_chain->parent_ptes[NR_PTE_CHAIN_ENTRIES-1])
  852. continue;
  853. for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i)
  854. if (!pte_chain->parent_ptes[i]) {
  855. pte_chain->parent_ptes[i] = parent_pte;
  856. return;
  857. }
  858. }
  859. pte_chain = mmu_alloc_pte_chain(vcpu);
  860. BUG_ON(!pte_chain);
  861. hlist_add_head(&pte_chain->link, &sp->parent_ptes);
  862. pte_chain->parent_ptes[0] = parent_pte;
  863. }
  864. static void mmu_page_remove_parent_pte(struct kvm_mmu_page *sp,
  865. u64 *parent_pte)
  866. {
  867. struct kvm_pte_chain *pte_chain;
  868. struct hlist_node *node;
  869. int i;
  870. if (!sp->multimapped) {
  871. BUG_ON(sp->parent_pte != parent_pte);
  872. sp->parent_pte = NULL;
  873. return;
  874. }
  875. hlist_for_each_entry(pte_chain, node, &sp->parent_ptes, link)
  876. for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i) {
  877. if (!pte_chain->parent_ptes[i])
  878. break;
  879. if (pte_chain->parent_ptes[i] != parent_pte)
  880. continue;
  881. while (i + 1 < NR_PTE_CHAIN_ENTRIES
  882. && pte_chain->parent_ptes[i + 1]) {
  883. pte_chain->parent_ptes[i]
  884. = pte_chain->parent_ptes[i + 1];
  885. ++i;
  886. }
  887. pte_chain->parent_ptes[i] = NULL;
  888. if (i == 0) {
  889. hlist_del(&pte_chain->link);
  890. mmu_free_pte_chain(pte_chain);
  891. if (hlist_empty(&sp->parent_ptes)) {
  892. sp->multimapped = 0;
  893. sp->parent_pte = NULL;
  894. }
  895. }
  896. return;
  897. }
  898. BUG();
  899. }
  900. static void mmu_parent_walk(struct kvm_mmu_page *sp, mmu_parent_walk_fn fn)
  901. {
  902. struct kvm_pte_chain *pte_chain;
  903. struct hlist_node *node;
  904. struct kvm_mmu_page *parent_sp;
  905. int i;
  906. if (!sp->multimapped && sp->parent_pte) {
  907. parent_sp = page_header(__pa(sp->parent_pte));
  908. fn(parent_sp, sp->parent_pte);
  909. return;
  910. }
  911. hlist_for_each_entry(pte_chain, node, &sp->parent_ptes, link)
  912. for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i) {
  913. u64 *spte = pte_chain->parent_ptes[i];
  914. if (!spte)
  915. break;
  916. parent_sp = page_header(__pa(spte));
  917. fn(parent_sp, spte);
  918. }
  919. }
  920. static void mark_unsync(struct kvm_mmu_page *sp, u64 *spte);
  921. static void kvm_mmu_mark_parents_unsync(struct kvm_mmu_page *sp)
  922. {
  923. mmu_parent_walk(sp, mark_unsync);
  924. }
  925. static void mark_unsync(struct kvm_mmu_page *sp, u64 *spte)
  926. {
  927. unsigned int index;
  928. index = spte - sp->spt;
  929. if (__test_and_set_bit(index, sp->unsync_child_bitmap))
  930. return;
  931. if (sp->unsync_children++)
  932. return;
  933. kvm_mmu_mark_parents_unsync(sp);
  934. }
  935. static void nonpaging_prefetch_page(struct kvm_vcpu *vcpu,
  936. struct kvm_mmu_page *sp)
  937. {
  938. int i;
  939. for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
  940. sp->spt[i] = shadow_trap_nonpresent_pte;
  941. }
  942. static int nonpaging_sync_page(struct kvm_vcpu *vcpu,
  943. struct kvm_mmu_page *sp, bool clear_unsync)
  944. {
  945. return 1;
  946. }
  947. static void nonpaging_invlpg(struct kvm_vcpu *vcpu, gva_t gva)
  948. {
  949. }
  950. #define KVM_PAGE_ARRAY_NR 16
  951. struct kvm_mmu_pages {
  952. struct mmu_page_and_offset {
  953. struct kvm_mmu_page *sp;
  954. unsigned int idx;
  955. } page[KVM_PAGE_ARRAY_NR];
  956. unsigned int nr;
  957. };
  958. #define for_each_unsync_children(bitmap, idx) \
  959. for (idx = find_first_bit(bitmap, 512); \
  960. idx < 512; \
  961. idx = find_next_bit(bitmap, 512, idx+1))
  962. static int mmu_pages_add(struct kvm_mmu_pages *pvec, struct kvm_mmu_page *sp,
  963. int idx)
  964. {
  965. int i;
  966. if (sp->unsync)
  967. for (i=0; i < pvec->nr; i++)
  968. if (pvec->page[i].sp == sp)
  969. return 0;
  970. pvec->page[pvec->nr].sp = sp;
  971. pvec->page[pvec->nr].idx = idx;
  972. pvec->nr++;
  973. return (pvec->nr == KVM_PAGE_ARRAY_NR);
  974. }
  975. static int __mmu_unsync_walk(struct kvm_mmu_page *sp,
  976. struct kvm_mmu_pages *pvec)
  977. {
  978. int i, ret, nr_unsync_leaf = 0;
  979. for_each_unsync_children(sp->unsync_child_bitmap, i) {
  980. struct kvm_mmu_page *child;
  981. u64 ent = sp->spt[i];
  982. if (!is_shadow_present_pte(ent) || is_large_pte(ent))
  983. goto clear_child_bitmap;
  984. child = page_header(ent & PT64_BASE_ADDR_MASK);
  985. if (child->unsync_children) {
  986. if (mmu_pages_add(pvec, child, i))
  987. return -ENOSPC;
  988. ret = __mmu_unsync_walk(child, pvec);
  989. if (!ret)
  990. goto clear_child_bitmap;
  991. else if (ret > 0)
  992. nr_unsync_leaf += ret;
  993. else
  994. return ret;
  995. } else if (child->unsync) {
  996. nr_unsync_leaf++;
  997. if (mmu_pages_add(pvec, child, i))
  998. return -ENOSPC;
  999. } else
  1000. goto clear_child_bitmap;
  1001. continue;
  1002. clear_child_bitmap:
  1003. __clear_bit(i, sp->unsync_child_bitmap);
  1004. sp->unsync_children--;
  1005. WARN_ON((int)sp->unsync_children < 0);
  1006. }
  1007. return nr_unsync_leaf;
  1008. }
  1009. static int mmu_unsync_walk(struct kvm_mmu_page *sp,
  1010. struct kvm_mmu_pages *pvec)
  1011. {
  1012. if (!sp->unsync_children)
  1013. return 0;
  1014. mmu_pages_add(pvec, sp, 0);
  1015. return __mmu_unsync_walk(sp, pvec);
  1016. }
  1017. static void kvm_unlink_unsync_page(struct kvm *kvm, struct kvm_mmu_page *sp)
  1018. {
  1019. WARN_ON(!sp->unsync);
  1020. trace_kvm_mmu_sync_page(sp);
  1021. sp->unsync = 0;
  1022. --kvm->stat.mmu_unsync;
  1023. }
  1024. static int kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
  1025. struct list_head *invalid_list);
  1026. static void kvm_mmu_commit_zap_page(struct kvm *kvm,
  1027. struct list_head *invalid_list);
  1028. #define for_each_gfn_sp(kvm, sp, gfn, pos) \
  1029. hlist_for_each_entry(sp, pos, \
  1030. &(kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)], hash_link) \
  1031. if ((sp)->gfn != (gfn)) {} else
  1032. #define for_each_gfn_indirect_valid_sp(kvm, sp, gfn, pos) \
  1033. hlist_for_each_entry(sp, pos, \
  1034. &(kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)], hash_link) \
  1035. if ((sp)->gfn != (gfn) || (sp)->role.direct || \
  1036. (sp)->role.invalid) {} else
  1037. /* @sp->gfn should be write-protected at the call site */
  1038. static int __kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
  1039. struct list_head *invalid_list, bool clear_unsync)
  1040. {
  1041. if (sp->role.cr4_pae != !!is_pae(vcpu)) {
  1042. kvm_mmu_prepare_zap_page(vcpu->kvm, sp, invalid_list);
  1043. return 1;
  1044. }
  1045. if (clear_unsync)
  1046. kvm_unlink_unsync_page(vcpu->kvm, sp);
  1047. if (vcpu->arch.mmu.sync_page(vcpu, sp, clear_unsync)) {
  1048. kvm_mmu_prepare_zap_page(vcpu->kvm, sp, invalid_list);
  1049. return 1;
  1050. }
  1051. kvm_mmu_flush_tlb(vcpu);
  1052. return 0;
  1053. }
  1054. static int kvm_sync_page_transient(struct kvm_vcpu *vcpu,
  1055. struct kvm_mmu_page *sp)
  1056. {
  1057. LIST_HEAD(invalid_list);
  1058. int ret;
  1059. ret = __kvm_sync_page(vcpu, sp, &invalid_list, false);
  1060. if (ret)
  1061. kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
  1062. return ret;
  1063. }
  1064. static int kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
  1065. struct list_head *invalid_list)
  1066. {
  1067. return __kvm_sync_page(vcpu, sp, invalid_list, true);
  1068. }
  1069. /* @gfn should be write-protected at the call site */
  1070. static void kvm_sync_pages(struct kvm_vcpu *vcpu, gfn_t gfn)
  1071. {
  1072. struct kvm_mmu_page *s;
  1073. struct hlist_node *node;
  1074. LIST_HEAD(invalid_list);
  1075. bool flush = false;
  1076. for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn, node) {
  1077. if (!s->unsync)
  1078. continue;
  1079. WARN_ON(s->role.level != PT_PAGE_TABLE_LEVEL);
  1080. if ((s->role.cr4_pae != !!is_pae(vcpu)) ||
  1081. (vcpu->arch.mmu.sync_page(vcpu, s, true))) {
  1082. kvm_mmu_prepare_zap_page(vcpu->kvm, s, &invalid_list);
  1083. continue;
  1084. }
  1085. kvm_unlink_unsync_page(vcpu->kvm, s);
  1086. flush = true;
  1087. }
  1088. kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
  1089. if (flush)
  1090. kvm_mmu_flush_tlb(vcpu);
  1091. }
  1092. struct mmu_page_path {
  1093. struct kvm_mmu_page *parent[PT64_ROOT_LEVEL-1];
  1094. unsigned int idx[PT64_ROOT_LEVEL-1];
  1095. };
  1096. #define for_each_sp(pvec, sp, parents, i) \
  1097. for (i = mmu_pages_next(&pvec, &parents, -1), \
  1098. sp = pvec.page[i].sp; \
  1099. i < pvec.nr && ({ sp = pvec.page[i].sp; 1;}); \
  1100. i = mmu_pages_next(&pvec, &parents, i))
  1101. static int mmu_pages_next(struct kvm_mmu_pages *pvec,
  1102. struct mmu_page_path *parents,
  1103. int i)
  1104. {
  1105. int n;
  1106. for (n = i+1; n < pvec->nr; n++) {
  1107. struct kvm_mmu_page *sp = pvec->page[n].sp;
  1108. if (sp->role.level == PT_PAGE_TABLE_LEVEL) {
  1109. parents->idx[0] = pvec->page[n].idx;
  1110. return n;
  1111. }
  1112. parents->parent[sp->role.level-2] = sp;
  1113. parents->idx[sp->role.level-1] = pvec->page[n].idx;
  1114. }
  1115. return n;
  1116. }
  1117. static void mmu_pages_clear_parents(struct mmu_page_path *parents)
  1118. {
  1119. struct kvm_mmu_page *sp;
  1120. unsigned int level = 0;
  1121. do {
  1122. unsigned int idx = parents->idx[level];
  1123. sp = parents->parent[level];
  1124. if (!sp)
  1125. return;
  1126. --sp->unsync_children;
  1127. WARN_ON((int)sp->unsync_children < 0);
  1128. __clear_bit(idx, sp->unsync_child_bitmap);
  1129. level++;
  1130. } while (level < PT64_ROOT_LEVEL-1 && !sp->unsync_children);
  1131. }
  1132. static void kvm_mmu_pages_init(struct kvm_mmu_page *parent,
  1133. struct mmu_page_path *parents,
  1134. struct kvm_mmu_pages *pvec)
  1135. {
  1136. parents->parent[parent->role.level-1] = NULL;
  1137. pvec->nr = 0;
  1138. }
  1139. static void mmu_sync_children(struct kvm_vcpu *vcpu,
  1140. struct kvm_mmu_page *parent)
  1141. {
  1142. int i;
  1143. struct kvm_mmu_page *sp;
  1144. struct mmu_page_path parents;
  1145. struct kvm_mmu_pages pages;
  1146. LIST_HEAD(invalid_list);
  1147. kvm_mmu_pages_init(parent, &parents, &pages);
  1148. while (mmu_unsync_walk(parent, &pages)) {
  1149. int protected = 0;
  1150. for_each_sp(pages, sp, parents, i)
  1151. protected |= rmap_write_protect(vcpu->kvm, sp->gfn);
  1152. if (protected)
  1153. kvm_flush_remote_tlbs(vcpu->kvm);
  1154. for_each_sp(pages, sp, parents, i) {
  1155. kvm_sync_page(vcpu, sp, &invalid_list);
  1156. mmu_pages_clear_parents(&parents);
  1157. }
  1158. kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
  1159. cond_resched_lock(&vcpu->kvm->mmu_lock);
  1160. kvm_mmu_pages_init(parent, &parents, &pages);
  1161. }
  1162. }
  1163. static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
  1164. gfn_t gfn,
  1165. gva_t gaddr,
  1166. unsigned level,
  1167. int direct,
  1168. unsigned access,
  1169. u64 *parent_pte)
  1170. {
  1171. union kvm_mmu_page_role role;
  1172. unsigned quadrant;
  1173. struct kvm_mmu_page *sp;
  1174. struct hlist_node *node;
  1175. bool need_sync = false;
  1176. role = vcpu->arch.mmu.base_role;
  1177. role.level = level;
  1178. role.direct = direct;
  1179. if (role.direct)
  1180. role.cr4_pae = 0;
  1181. role.access = access;
  1182. if (!tdp_enabled && vcpu->arch.mmu.root_level <= PT32_ROOT_LEVEL) {
  1183. quadrant = gaddr >> (PAGE_SHIFT + (PT64_PT_BITS * level));
  1184. quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1;
  1185. role.quadrant = quadrant;
  1186. }
  1187. for_each_gfn_sp(vcpu->kvm, sp, gfn, node) {
  1188. if (!need_sync && sp->unsync)
  1189. need_sync = true;
  1190. if (sp->role.word != role.word)
  1191. continue;
  1192. if (sp->unsync && kvm_sync_page_transient(vcpu, sp))
  1193. break;
  1194. mmu_page_add_parent_pte(vcpu, sp, parent_pte);
  1195. if (sp->unsync_children) {
  1196. kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
  1197. kvm_mmu_mark_parents_unsync(sp);
  1198. } else if (sp->unsync)
  1199. kvm_mmu_mark_parents_unsync(sp);
  1200. trace_kvm_mmu_get_page(sp, false);
  1201. return sp;
  1202. }
  1203. ++vcpu->kvm->stat.mmu_cache_miss;
  1204. sp = kvm_mmu_alloc_page(vcpu, parent_pte, direct);
  1205. if (!sp)
  1206. return sp;
  1207. sp->gfn = gfn;
  1208. sp->role = role;
  1209. hlist_add_head(&sp->hash_link,
  1210. &vcpu->kvm->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)]);
  1211. if (!direct) {
  1212. if (rmap_write_protect(vcpu->kvm, gfn))
  1213. kvm_flush_remote_tlbs(vcpu->kvm);
  1214. if (level > PT_PAGE_TABLE_LEVEL && need_sync)
  1215. kvm_sync_pages(vcpu, gfn);
  1216. account_shadowed(vcpu->kvm, gfn);
  1217. }
  1218. if (shadow_trap_nonpresent_pte != shadow_notrap_nonpresent_pte)
  1219. vcpu->arch.mmu.prefetch_page(vcpu, sp);
  1220. else
  1221. nonpaging_prefetch_page(vcpu, sp);
  1222. trace_kvm_mmu_get_page(sp, true);
  1223. return sp;
  1224. }
  1225. static void shadow_walk_init(struct kvm_shadow_walk_iterator *iterator,
  1226. struct kvm_vcpu *vcpu, u64 addr)
  1227. {
  1228. iterator->addr = addr;
  1229. iterator->shadow_addr = vcpu->arch.mmu.root_hpa;
  1230. iterator->level = vcpu->arch.mmu.shadow_root_level;
  1231. if (iterator->level == PT32E_ROOT_LEVEL) {
  1232. iterator->shadow_addr
  1233. = vcpu->arch.mmu.pae_root[(addr >> 30) & 3];
  1234. iterator->shadow_addr &= PT64_BASE_ADDR_MASK;
  1235. --iterator->level;
  1236. if (!iterator->shadow_addr)
  1237. iterator->level = 0;
  1238. }
  1239. }
  1240. static bool shadow_walk_okay(struct kvm_shadow_walk_iterator *iterator)
  1241. {
  1242. if (iterator->level < PT_PAGE_TABLE_LEVEL)
  1243. return false;
  1244. if (iterator->level == PT_PAGE_TABLE_LEVEL)
  1245. if (is_large_pte(*iterator->sptep))
  1246. return false;
  1247. iterator->index = SHADOW_PT_INDEX(iterator->addr, iterator->level);
  1248. iterator->sptep = ((u64 *)__va(iterator->shadow_addr)) + iterator->index;
  1249. return true;
  1250. }
  1251. static void shadow_walk_next(struct kvm_shadow_walk_iterator *iterator)
  1252. {
  1253. iterator->shadow_addr = *iterator->sptep & PT64_BASE_ADDR_MASK;
  1254. --iterator->level;
  1255. }
  1256. static void link_shadow_page(u64 *sptep, struct kvm_mmu_page *sp)
  1257. {
  1258. u64 spte;
  1259. spte = __pa(sp->spt)
  1260. | PT_PRESENT_MASK | PT_ACCESSED_MASK
  1261. | PT_WRITABLE_MASK | PT_USER_MASK;
  1262. __set_spte(sptep, spte);
  1263. }
  1264. static void drop_large_spte(struct kvm_vcpu *vcpu, u64 *sptep)
  1265. {
  1266. if (is_large_pte(*sptep)) {
  1267. drop_spte(vcpu->kvm, sptep, shadow_trap_nonpresent_pte);
  1268. kvm_flush_remote_tlbs(vcpu->kvm);
  1269. }
  1270. }
  1271. static void validate_direct_spte(struct kvm_vcpu *vcpu, u64 *sptep,
  1272. unsigned direct_access)
  1273. {
  1274. if (is_shadow_present_pte(*sptep) && !is_large_pte(*sptep)) {
  1275. struct kvm_mmu_page *child;
  1276. /*
  1277. * For the direct sp, if the guest pte's dirty bit
  1278. * changed form clean to dirty, it will corrupt the
  1279. * sp's access: allow writable in the read-only sp,
  1280. * so we should update the spte at this point to get
  1281. * a new sp with the correct access.
  1282. */
  1283. child = page_header(*sptep & PT64_BASE_ADDR_MASK);
  1284. if (child->role.access == direct_access)
  1285. return;
  1286. mmu_page_remove_parent_pte(child, sptep);
  1287. __set_spte(sptep, shadow_trap_nonpresent_pte);
  1288. kvm_flush_remote_tlbs(vcpu->kvm);
  1289. }
  1290. }
  1291. static void kvm_mmu_page_unlink_children(struct kvm *kvm,
  1292. struct kvm_mmu_page *sp)
  1293. {
  1294. unsigned i;
  1295. u64 *pt;
  1296. u64 ent;
  1297. pt = sp->spt;
  1298. for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
  1299. ent = pt[i];
  1300. if (is_shadow_present_pte(ent)) {
  1301. if (!is_last_spte(ent, sp->role.level)) {
  1302. ent &= PT64_BASE_ADDR_MASK;
  1303. mmu_page_remove_parent_pte(page_header(ent),
  1304. &pt[i]);
  1305. } else {
  1306. if (is_large_pte(ent))
  1307. --kvm->stat.lpages;
  1308. drop_spte(kvm, &pt[i],
  1309. shadow_trap_nonpresent_pte);
  1310. }
  1311. }
  1312. pt[i] = shadow_trap_nonpresent_pte;
  1313. }
  1314. }
  1315. static void kvm_mmu_put_page(struct kvm_mmu_page *sp, u64 *parent_pte)
  1316. {
  1317. mmu_page_remove_parent_pte(sp, parent_pte);
  1318. }
  1319. static void kvm_mmu_reset_last_pte_updated(struct kvm *kvm)
  1320. {
  1321. int i;
  1322. struct kvm_vcpu *vcpu;
  1323. kvm_for_each_vcpu(i, vcpu, kvm)
  1324. vcpu->arch.last_pte_updated = NULL;
  1325. }
  1326. static void kvm_mmu_unlink_parents(struct kvm *kvm, struct kvm_mmu_page *sp)
  1327. {
  1328. u64 *parent_pte;
  1329. while (sp->multimapped || sp->parent_pte) {
  1330. if (!sp->multimapped)
  1331. parent_pte = sp->parent_pte;
  1332. else {
  1333. struct kvm_pte_chain *chain;
  1334. chain = container_of(sp->parent_ptes.first,
  1335. struct kvm_pte_chain, link);
  1336. parent_pte = chain->parent_ptes[0];
  1337. }
  1338. BUG_ON(!parent_pte);
  1339. kvm_mmu_put_page(sp, parent_pte);
  1340. __set_spte(parent_pte, shadow_trap_nonpresent_pte);
  1341. }
  1342. }
  1343. static int mmu_zap_unsync_children(struct kvm *kvm,
  1344. struct kvm_mmu_page *parent,
  1345. struct list_head *invalid_list)
  1346. {
  1347. int i, zapped = 0;
  1348. struct mmu_page_path parents;
  1349. struct kvm_mmu_pages pages;
  1350. if (parent->role.level == PT_PAGE_TABLE_LEVEL)
  1351. return 0;
  1352. kvm_mmu_pages_init(parent, &parents, &pages);
  1353. while (mmu_unsync_walk(parent, &pages)) {
  1354. struct kvm_mmu_page *sp;
  1355. for_each_sp(pages, sp, parents, i) {
  1356. kvm_mmu_prepare_zap_page(kvm, sp, invalid_list);
  1357. mmu_pages_clear_parents(&parents);
  1358. zapped++;
  1359. }
  1360. kvm_mmu_pages_init(parent, &parents, &pages);
  1361. }
  1362. return zapped;
  1363. }
  1364. static int kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
  1365. struct list_head *invalid_list)
  1366. {
  1367. int ret;
  1368. trace_kvm_mmu_prepare_zap_page(sp);
  1369. ++kvm->stat.mmu_shadow_zapped;
  1370. ret = mmu_zap_unsync_children(kvm, sp, invalid_list);
  1371. kvm_mmu_page_unlink_children(kvm, sp);
  1372. kvm_mmu_unlink_parents(kvm, sp);
  1373. if (!sp->role.invalid && !sp->role.direct)
  1374. unaccount_shadowed(kvm, sp->gfn);
  1375. if (sp->unsync)
  1376. kvm_unlink_unsync_page(kvm, sp);
  1377. if (!sp->root_count) {
  1378. /* Count self */
  1379. ret++;
  1380. list_move(&sp->link, invalid_list);
  1381. } else {
  1382. list_move(&sp->link, &kvm->arch.active_mmu_pages);
  1383. kvm_reload_remote_mmus(kvm);
  1384. }
  1385. sp->role.invalid = 1;
  1386. kvm_mmu_reset_last_pte_updated(kvm);
  1387. return ret;
  1388. }
  1389. static void kvm_mmu_commit_zap_page(struct kvm *kvm,
  1390. struct list_head *invalid_list)
  1391. {
  1392. struct kvm_mmu_page *sp;
  1393. if (list_empty(invalid_list))
  1394. return;
  1395. kvm_flush_remote_tlbs(kvm);
  1396. do {
  1397. sp = list_first_entry(invalid_list, struct kvm_mmu_page, link);
  1398. WARN_ON(!sp->role.invalid || sp->root_count);
  1399. kvm_mmu_free_page(kvm, sp);
  1400. } while (!list_empty(invalid_list));
  1401. }
  1402. /*
  1403. * Changing the number of mmu pages allocated to the vm
  1404. * Note: if kvm_nr_mmu_pages is too small, you will get dead lock
  1405. */
  1406. void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages)
  1407. {
  1408. int used_pages;
  1409. LIST_HEAD(invalid_list);
  1410. used_pages = kvm->arch.n_alloc_mmu_pages - kvm->arch.n_free_mmu_pages;
  1411. used_pages = max(0, used_pages);
  1412. /*
  1413. * If we set the number of mmu pages to be smaller be than the
  1414. * number of actived pages , we must to free some mmu pages before we
  1415. * change the value
  1416. */
  1417. if (used_pages > kvm_nr_mmu_pages) {
  1418. while (used_pages > kvm_nr_mmu_pages &&
  1419. !list_empty(&kvm->arch.active_mmu_pages)) {
  1420. struct kvm_mmu_page *page;
  1421. page = container_of(kvm->arch.active_mmu_pages.prev,
  1422. struct kvm_mmu_page, link);
  1423. used_pages -= kvm_mmu_prepare_zap_page(kvm, page,
  1424. &invalid_list);
  1425. }
  1426. kvm_mmu_commit_zap_page(kvm, &invalid_list);
  1427. kvm_nr_mmu_pages = used_pages;
  1428. kvm->arch.n_free_mmu_pages = 0;
  1429. }
  1430. else
  1431. kvm->arch.n_free_mmu_pages += kvm_nr_mmu_pages
  1432. - kvm->arch.n_alloc_mmu_pages;
  1433. kvm->arch.n_alloc_mmu_pages = kvm_nr_mmu_pages;
  1434. }
  1435. static int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
  1436. {
  1437. struct kvm_mmu_page *sp;
  1438. struct hlist_node *node;
  1439. LIST_HEAD(invalid_list);
  1440. int r;
  1441. pgprintk("%s: looking for gfn %lx\n", __func__, gfn);
  1442. r = 0;
  1443. for_each_gfn_indirect_valid_sp(kvm, sp, gfn, node) {
  1444. pgprintk("%s: gfn %lx role %x\n", __func__, gfn,
  1445. sp->role.word);
  1446. r = 1;
  1447. kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
  1448. }
  1449. kvm_mmu_commit_zap_page(kvm, &invalid_list);
  1450. return r;
  1451. }
  1452. static void mmu_unshadow(struct kvm *kvm, gfn_t gfn)
  1453. {
  1454. struct kvm_mmu_page *sp;
  1455. struct hlist_node *node;
  1456. LIST_HEAD(invalid_list);
  1457. for_each_gfn_indirect_valid_sp(kvm, sp, gfn, node) {
  1458. pgprintk("%s: zap %lx %x\n",
  1459. __func__, gfn, sp->role.word);
  1460. kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
  1461. }
  1462. kvm_mmu_commit_zap_page(kvm, &invalid_list);
  1463. }
  1464. static void page_header_update_slot(struct kvm *kvm, void *pte, gfn_t gfn)
  1465. {
  1466. int slot = memslot_id(kvm, gfn);
  1467. struct kvm_mmu_page *sp = page_header(__pa(pte));
  1468. __set_bit(slot, sp->slot_bitmap);
  1469. }
  1470. static void mmu_convert_notrap(struct kvm_mmu_page *sp)
  1471. {
  1472. int i;
  1473. u64 *pt = sp->spt;
  1474. if (shadow_trap_nonpresent_pte == shadow_notrap_nonpresent_pte)
  1475. return;
  1476. for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
  1477. if (pt[i] == shadow_notrap_nonpresent_pte)
  1478. __set_spte(&pt[i], shadow_trap_nonpresent_pte);
  1479. }
  1480. }
  1481. /*
  1482. * The function is based on mtrr_type_lookup() in
  1483. * arch/x86/kernel/cpu/mtrr/generic.c
  1484. */
  1485. static int get_mtrr_type(struct mtrr_state_type *mtrr_state,
  1486. u64 start, u64 end)
  1487. {
  1488. int i;
  1489. u64 base, mask;
  1490. u8 prev_match, curr_match;
  1491. int num_var_ranges = KVM_NR_VAR_MTRR;
  1492. if (!mtrr_state->enabled)
  1493. return 0xFF;
  1494. /* Make end inclusive end, instead of exclusive */
  1495. end--;
  1496. /* Look in fixed ranges. Just return the type as per start */
  1497. if (mtrr_state->have_fixed && (start < 0x100000)) {
  1498. int idx;
  1499. if (start < 0x80000) {
  1500. idx = 0;
  1501. idx += (start >> 16);
  1502. return mtrr_state->fixed_ranges[idx];
  1503. } else if (start < 0xC0000) {
  1504. idx = 1 * 8;
  1505. idx += ((start - 0x80000) >> 14);
  1506. return mtrr_state->fixed_ranges[idx];
  1507. } else if (start < 0x1000000) {
  1508. idx = 3 * 8;
  1509. idx += ((start - 0xC0000) >> 12);
  1510. return mtrr_state->fixed_ranges[idx];
  1511. }
  1512. }
  1513. /*
  1514. * Look in variable ranges
  1515. * Look of multiple ranges matching this address and pick type
  1516. * as per MTRR precedence
  1517. */
  1518. if (!(mtrr_state->enabled & 2))
  1519. return mtrr_state->def_type;
  1520. prev_match = 0xFF;
  1521. for (i = 0; i < num_var_ranges; ++i) {
  1522. unsigned short start_state, end_state;
  1523. if (!(mtrr_state->var_ranges[i].mask_lo & (1 << 11)))
  1524. continue;
  1525. base = (((u64)mtrr_state->var_ranges[i].base_hi) << 32) +
  1526. (mtrr_state->var_ranges[i].base_lo & PAGE_MASK);
  1527. mask = (((u64)mtrr_state->var_ranges[i].mask_hi) << 32) +
  1528. (mtrr_state->var_ranges[i].mask_lo & PAGE_MASK);
  1529. start_state = ((start & mask) == (base & mask));
  1530. end_state = ((end & mask) == (base & mask));
  1531. if (start_state != end_state)
  1532. return 0xFE;
  1533. if ((start & mask) != (base & mask))
  1534. continue;
  1535. curr_match = mtrr_state->var_ranges[i].base_lo & 0xff;
  1536. if (prev_match == 0xFF) {
  1537. prev_match = curr_match;
  1538. continue;
  1539. }
  1540. if (prev_match == MTRR_TYPE_UNCACHABLE ||
  1541. curr_match == MTRR_TYPE_UNCACHABLE)
  1542. return MTRR_TYPE_UNCACHABLE;
  1543. if ((prev_match == MTRR_TYPE_WRBACK &&
  1544. curr_match == MTRR_TYPE_WRTHROUGH) ||
  1545. (prev_match == MTRR_TYPE_WRTHROUGH &&
  1546. curr_match == MTRR_TYPE_WRBACK)) {
  1547. prev_match = MTRR_TYPE_WRTHROUGH;
  1548. curr_match = MTRR_TYPE_WRTHROUGH;
  1549. }
  1550. if (prev_match != curr_match)
  1551. return MTRR_TYPE_UNCACHABLE;
  1552. }
  1553. if (prev_match != 0xFF)
  1554. return prev_match;
  1555. return mtrr_state->def_type;
  1556. }
  1557. u8 kvm_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn)
  1558. {
  1559. u8 mtrr;
  1560. mtrr = get_mtrr_type(&vcpu->arch.mtrr_state, gfn << PAGE_SHIFT,
  1561. (gfn << PAGE_SHIFT) + PAGE_SIZE);
  1562. if (mtrr == 0xfe || mtrr == 0xff)
  1563. mtrr = MTRR_TYPE_WRBACK;
  1564. return mtrr;
  1565. }
  1566. EXPORT_SYMBOL_GPL(kvm_get_guest_memory_type);
  1567. static void __kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
  1568. {
  1569. trace_kvm_mmu_unsync_page(sp);
  1570. ++vcpu->kvm->stat.mmu_unsync;
  1571. sp->unsync = 1;
  1572. kvm_mmu_mark_parents_unsync(sp);
  1573. mmu_convert_notrap(sp);
  1574. }
  1575. static void kvm_unsync_pages(struct kvm_vcpu *vcpu, gfn_t gfn)
  1576. {
  1577. struct kvm_mmu_page *s;
  1578. struct hlist_node *node;
  1579. for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn, node) {
  1580. if (s->unsync)
  1581. continue;
  1582. WARN_ON(s->role.level != PT_PAGE_TABLE_LEVEL);
  1583. __kvm_unsync_page(vcpu, s);
  1584. }
  1585. }
  1586. static int mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn,
  1587. bool can_unsync)
  1588. {
  1589. struct kvm_mmu_page *s;
  1590. struct hlist_node *node;
  1591. bool need_unsync = false;
  1592. for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn, node) {
  1593. if (!can_unsync)
  1594. return 1;
  1595. if (s->role.level != PT_PAGE_TABLE_LEVEL)
  1596. return 1;
  1597. if (!need_unsync && !s->unsync) {
  1598. if (!oos_shadow)
  1599. return 1;
  1600. need_unsync = true;
  1601. }
  1602. }
  1603. if (need_unsync)
  1604. kvm_unsync_pages(vcpu, gfn);
  1605. return 0;
  1606. }
  1607. static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
  1608. unsigned pte_access, int user_fault,
  1609. int write_fault, int dirty, int level,
  1610. gfn_t gfn, pfn_t pfn, bool speculative,
  1611. bool can_unsync, bool reset_host_protection)
  1612. {
  1613. u64 spte;
  1614. int ret = 0;
  1615. /*
  1616. * We don't set the accessed bit, since we sometimes want to see
  1617. * whether the guest actually used the pte (in order to detect
  1618. * demand paging).
  1619. */
  1620. spte = shadow_base_present_pte | shadow_dirty_mask;
  1621. if (!speculative)
  1622. spte |= shadow_accessed_mask;
  1623. if (!dirty)
  1624. pte_access &= ~ACC_WRITE_MASK;
  1625. if (pte_access & ACC_EXEC_MASK)
  1626. spte |= shadow_x_mask;
  1627. else
  1628. spte |= shadow_nx_mask;
  1629. if (pte_access & ACC_USER_MASK)
  1630. spte |= shadow_user_mask;
  1631. if (level > PT_PAGE_TABLE_LEVEL)
  1632. spte |= PT_PAGE_SIZE_MASK;
  1633. if (tdp_enabled)
  1634. spte |= kvm_x86_ops->get_mt_mask(vcpu, gfn,
  1635. kvm_is_mmio_pfn(pfn));
  1636. if (reset_host_protection)
  1637. spte |= SPTE_HOST_WRITEABLE;
  1638. spte |= (u64)pfn << PAGE_SHIFT;
  1639. if ((pte_access & ACC_WRITE_MASK)
  1640. || (!tdp_enabled && write_fault && !is_write_protection(vcpu)
  1641. && !user_fault)) {
  1642. if (level > PT_PAGE_TABLE_LEVEL &&
  1643. has_wrprotected_page(vcpu->kvm, gfn, level)) {
  1644. ret = 1;
  1645. drop_spte(vcpu->kvm, sptep, shadow_trap_nonpresent_pte);
  1646. goto done;
  1647. }
  1648. spte |= PT_WRITABLE_MASK;
  1649. if (!tdp_enabled && !(pte_access & ACC_WRITE_MASK))
  1650. spte &= ~PT_USER_MASK;
  1651. /*
  1652. * Optimization: for pte sync, if spte was writable the hash
  1653. * lookup is unnecessary (and expensive). Write protection
  1654. * is responsibility of mmu_get_page / kvm_sync_page.
  1655. * Same reasoning can be applied to dirty page accounting.
  1656. */
  1657. if (!can_unsync && is_writable_pte(*sptep))
  1658. goto set_pte;
  1659. if (mmu_need_write_protect(vcpu, gfn, can_unsync)) {
  1660. pgprintk("%s: found shadow page for %lx, marking ro\n",
  1661. __func__, gfn);
  1662. ret = 1;
  1663. pte_access &= ~ACC_WRITE_MASK;
  1664. if (is_writable_pte(spte))
  1665. spte &= ~PT_WRITABLE_MASK;
  1666. }
  1667. }
  1668. if (pte_access & ACC_WRITE_MASK)
  1669. mark_page_dirty(vcpu->kvm, gfn);
  1670. set_pte:
  1671. update_spte(sptep, spte);
  1672. done:
  1673. return ret;
  1674. }
  1675. static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
  1676. unsigned pt_access, unsigned pte_access,
  1677. int user_fault, int write_fault, int dirty,
  1678. int *ptwrite, int level, gfn_t gfn,
  1679. pfn_t pfn, bool speculative,
  1680. bool reset_host_protection)
  1681. {
  1682. int was_rmapped = 0;
  1683. int was_writable = is_writable_pte(*sptep);
  1684. int rmap_count;
  1685. pgprintk("%s: spte %llx access %x write_fault %d"
  1686. " user_fault %d gfn %lx\n",
  1687. __func__, *sptep, pt_access,
  1688. write_fault, user_fault, gfn);
  1689. if (is_rmap_spte(*sptep)) {
  1690. /*
  1691. * If we overwrite a PTE page pointer with a 2MB PMD, unlink
  1692. * the parent of the now unreachable PTE.
  1693. */
  1694. if (level > PT_PAGE_TABLE_LEVEL &&
  1695. !is_large_pte(*sptep)) {
  1696. struct kvm_mmu_page *child;
  1697. u64 pte = *sptep;
  1698. child = page_header(pte & PT64_BASE_ADDR_MASK);
  1699. mmu_page_remove_parent_pte(child, sptep);
  1700. __set_spte(sptep, shadow_trap_nonpresent_pte);
  1701. kvm_flush_remote_tlbs(vcpu->kvm);
  1702. } else if (pfn != spte_to_pfn(*sptep)) {
  1703. pgprintk("hfn old %lx new %lx\n",
  1704. spte_to_pfn(*sptep), pfn);
  1705. drop_spte(vcpu->kvm, sptep, shadow_trap_nonpresent_pte);
  1706. kvm_flush_remote_tlbs(vcpu->kvm);
  1707. } else
  1708. was_rmapped = 1;
  1709. }
  1710. if (set_spte(vcpu, sptep, pte_access, user_fault, write_fault,
  1711. dirty, level, gfn, pfn, speculative, true,
  1712. reset_host_protection)) {
  1713. if (write_fault)
  1714. *ptwrite = 1;
  1715. kvm_mmu_flush_tlb(vcpu);
  1716. }
  1717. pgprintk("%s: setting spte %llx\n", __func__, *sptep);
  1718. pgprintk("instantiating %s PTE (%s) at %ld (%llx) addr %p\n",
  1719. is_large_pte(*sptep)? "2MB" : "4kB",
  1720. *sptep & PT_PRESENT_MASK ?"RW":"R", gfn,
  1721. *sptep, sptep);
  1722. if (!was_rmapped && is_large_pte(*sptep))
  1723. ++vcpu->kvm->stat.lpages;
  1724. page_header_update_slot(vcpu->kvm, sptep, gfn);
  1725. if (!was_rmapped) {
  1726. rmap_count = rmap_add(vcpu, sptep, gfn);
  1727. kvm_release_pfn_clean(pfn);
  1728. if (rmap_count > RMAP_RECYCLE_THRESHOLD)
  1729. rmap_recycle(vcpu, sptep, gfn);
  1730. } else {
  1731. if (was_writable)
  1732. kvm_release_pfn_dirty(pfn);
  1733. else
  1734. kvm_release_pfn_clean(pfn);
  1735. }
  1736. if (speculative) {
  1737. vcpu->arch.last_pte_updated = sptep;
  1738. vcpu->arch.last_pte_gfn = gfn;
  1739. }
  1740. }
  1741. static void nonpaging_new_cr3(struct kvm_vcpu *vcpu)
  1742. {
  1743. }
  1744. static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write,
  1745. int level, gfn_t gfn, pfn_t pfn)
  1746. {
  1747. struct kvm_shadow_walk_iterator iterator;
  1748. struct kvm_mmu_page *sp;
  1749. int pt_write = 0;
  1750. gfn_t pseudo_gfn;
  1751. for_each_shadow_entry(vcpu, (u64)gfn << PAGE_SHIFT, iterator) {
  1752. if (iterator.level == level) {
  1753. mmu_set_spte(vcpu, iterator.sptep, ACC_ALL, ACC_ALL,
  1754. 0, write, 1, &pt_write,
  1755. level, gfn, pfn, false, true);
  1756. ++vcpu->stat.pf_fixed;
  1757. break;
  1758. }
  1759. if (*iterator.sptep == shadow_trap_nonpresent_pte) {
  1760. u64 base_addr = iterator.addr;
  1761. base_addr &= PT64_LVL_ADDR_MASK(iterator.level);
  1762. pseudo_gfn = base_addr >> PAGE_SHIFT;
  1763. sp = kvm_mmu_get_page(vcpu, pseudo_gfn, iterator.addr,
  1764. iterator.level - 1,
  1765. 1, ACC_ALL, iterator.sptep);
  1766. if (!sp) {
  1767. pgprintk("nonpaging_map: ENOMEM\n");
  1768. kvm_release_pfn_clean(pfn);
  1769. return -ENOMEM;
  1770. }
  1771. __set_spte(iterator.sptep,
  1772. __pa(sp->spt)
  1773. | PT_PRESENT_MASK | PT_WRITABLE_MASK
  1774. | shadow_user_mask | shadow_x_mask);
  1775. }
  1776. }
  1777. return pt_write;
  1778. }
  1779. static void kvm_send_hwpoison_signal(struct kvm *kvm, gfn_t gfn)
  1780. {
  1781. char buf[1];
  1782. void __user *hva;
  1783. int r;
  1784. /* Touch the page, so send SIGBUS */
  1785. hva = (void __user *)gfn_to_hva(kvm, gfn);
  1786. r = copy_from_user(buf, hva, 1);
  1787. }
  1788. static int kvm_handle_bad_page(struct kvm *kvm, gfn_t gfn, pfn_t pfn)
  1789. {
  1790. kvm_release_pfn_clean(pfn);
  1791. if (is_hwpoison_pfn(pfn)) {
  1792. kvm_send_hwpoison_signal(kvm, gfn);
  1793. return 0;
  1794. } else if (is_fault_pfn(pfn))
  1795. return -EFAULT;
  1796. return 1;
  1797. }
  1798. static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn)
  1799. {
  1800. int r;
  1801. int level;
  1802. pfn_t pfn;
  1803. unsigned long mmu_seq;
  1804. level = mapping_level(vcpu, gfn);
  1805. /*
  1806. * This path builds a PAE pagetable - so we can map 2mb pages at
  1807. * maximum. Therefore check if the level is larger than that.
  1808. */
  1809. if (level > PT_DIRECTORY_LEVEL)
  1810. level = PT_DIRECTORY_LEVEL;
  1811. gfn &= ~(KVM_PAGES_PER_HPAGE(level) - 1);
  1812. mmu_seq = vcpu->kvm->mmu_notifier_seq;
  1813. smp_rmb();
  1814. pfn = gfn_to_pfn(vcpu->kvm, gfn);
  1815. /* mmio */
  1816. if (is_error_pfn(pfn))
  1817. return kvm_handle_bad_page(vcpu->kvm, gfn, pfn);
  1818. spin_lock(&vcpu->kvm->mmu_lock);
  1819. if (mmu_notifier_retry(vcpu, mmu_seq))
  1820. goto out_unlock;
  1821. kvm_mmu_free_some_pages(vcpu);
  1822. r = __direct_map(vcpu, v, write, level, gfn, pfn);
  1823. spin_unlock(&vcpu->kvm->mmu_lock);
  1824. return r;
  1825. out_unlock:
  1826. spin_unlock(&vcpu->kvm->mmu_lock);
  1827. kvm_release_pfn_clean(pfn);
  1828. return 0;
  1829. }
  1830. static void mmu_free_roots(struct kvm_vcpu *vcpu)
  1831. {
  1832. int i;
  1833. struct kvm_mmu_page *sp;
  1834. LIST_HEAD(invalid_list);
  1835. if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
  1836. return;
  1837. spin_lock(&vcpu->kvm->mmu_lock);
  1838. if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) {
  1839. hpa_t root = vcpu->arch.mmu.root_hpa;
  1840. sp = page_header(root);
  1841. --sp->root_count;
  1842. if (!sp->root_count && sp->role.invalid) {
  1843. kvm_mmu_prepare_zap_page(vcpu->kvm, sp, &invalid_list);
  1844. kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
  1845. }
  1846. vcpu->arch.mmu.root_hpa = INVALID_PAGE;
  1847. spin_unlock(&vcpu->kvm->mmu_lock);
  1848. return;
  1849. }
  1850. for (i = 0; i < 4; ++i) {
  1851. hpa_t root = vcpu->arch.mmu.pae_root[i];
  1852. if (root) {
  1853. root &= PT64_BASE_ADDR_MASK;
  1854. sp = page_header(root);
  1855. --sp->root_count;
  1856. if (!sp->root_count && sp->role.invalid)
  1857. kvm_mmu_prepare_zap_page(vcpu->kvm, sp,
  1858. &invalid_list);
  1859. }
  1860. vcpu->arch.mmu.pae_root[i] = INVALID_PAGE;
  1861. }
  1862. kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
  1863. spin_unlock(&vcpu->kvm->mmu_lock);
  1864. vcpu->arch.mmu.root_hpa = INVALID_PAGE;
  1865. }
  1866. static int mmu_check_root(struct kvm_vcpu *vcpu, gfn_t root_gfn)
  1867. {
  1868. int ret = 0;
  1869. if (!kvm_is_visible_gfn(vcpu->kvm, root_gfn)) {
  1870. kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
  1871. ret = 1;
  1872. }
  1873. return ret;
  1874. }
  1875. static int mmu_alloc_roots(struct kvm_vcpu *vcpu)
  1876. {
  1877. int i;
  1878. gfn_t root_gfn;
  1879. struct kvm_mmu_page *sp;
  1880. int direct = 0;
  1881. u64 pdptr;
  1882. root_gfn = vcpu->arch.cr3 >> PAGE_SHIFT;
  1883. if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) {
  1884. hpa_t root = vcpu->arch.mmu.root_hpa;
  1885. ASSERT(!VALID_PAGE(root));
  1886. if (mmu_check_root(vcpu, root_gfn))
  1887. return 1;
  1888. if (tdp_enabled) {
  1889. direct = 1;
  1890. root_gfn = 0;
  1891. }
  1892. spin_lock(&vcpu->kvm->mmu_lock);
  1893. kvm_mmu_free_some_pages(vcpu);
  1894. sp = kvm_mmu_get_page(vcpu, root_gfn, 0,
  1895. PT64_ROOT_LEVEL, direct,
  1896. ACC_ALL, NULL);
  1897. root = __pa(sp->spt);
  1898. ++sp->root_count;
  1899. spin_unlock(&vcpu->kvm->mmu_lock);
  1900. vcpu->arch.mmu.root_hpa = root;
  1901. return 0;
  1902. }
  1903. direct = !is_paging(vcpu);
  1904. for (i = 0; i < 4; ++i) {
  1905. hpa_t root = vcpu->arch.mmu.pae_root[i];
  1906. ASSERT(!VALID_PAGE(root));
  1907. if (vcpu->arch.mmu.root_level == PT32E_ROOT_LEVEL) {
  1908. pdptr = kvm_pdptr_read(vcpu, i);
  1909. if (!is_present_gpte(pdptr)) {
  1910. vcpu->arch.mmu.pae_root[i] = 0;
  1911. continue;
  1912. }
  1913. root_gfn = pdptr >> PAGE_SHIFT;
  1914. } else if (vcpu->arch.mmu.root_level == 0)
  1915. root_gfn = 0;
  1916. if (mmu_check_root(vcpu, root_gfn))
  1917. return 1;
  1918. if (tdp_enabled) {
  1919. direct = 1;
  1920. root_gfn = i << 30;
  1921. }
  1922. spin_lock(&vcpu->kvm->mmu_lock);
  1923. kvm_mmu_free_some_pages(vcpu);
  1924. sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30,
  1925. PT32_ROOT_LEVEL, direct,
  1926. ACC_ALL, NULL);
  1927. root = __pa(sp->spt);
  1928. ++sp->root_count;
  1929. spin_unlock(&vcpu->kvm->mmu_lock);
  1930. vcpu->arch.mmu.pae_root[i] = root | PT_PRESENT_MASK;
  1931. }
  1932. vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.pae_root);
  1933. return 0;
  1934. }
  1935. static void mmu_sync_roots(struct kvm_vcpu *vcpu)
  1936. {
  1937. int i;
  1938. struct kvm_mmu_page *sp;
  1939. if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
  1940. return;
  1941. if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) {
  1942. hpa_t root = vcpu->arch.mmu.root_hpa;
  1943. sp = page_header(root);
  1944. mmu_sync_children(vcpu, sp);
  1945. return;
  1946. }
  1947. for (i = 0; i < 4; ++i) {
  1948. hpa_t root = vcpu->arch.mmu.pae_root[i];
  1949. if (root && VALID_PAGE(root)) {
  1950. root &= PT64_BASE_ADDR_MASK;
  1951. sp = page_header(root);
  1952. mmu_sync_children(vcpu, sp);
  1953. }
  1954. }
  1955. }
  1956. void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu)
  1957. {
  1958. spin_lock(&vcpu->kvm->mmu_lock);
  1959. mmu_sync_roots(vcpu);
  1960. spin_unlock(&vcpu->kvm->mmu_lock);
  1961. }
  1962. static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr,
  1963. u32 access, u32 *error)
  1964. {
  1965. if (error)
  1966. *error = 0;
  1967. return vaddr;
  1968. }
  1969. static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
  1970. u32 error_code)
  1971. {
  1972. gfn_t gfn;
  1973. int r;
  1974. pgprintk("%s: gva %lx error %x\n", __func__, gva, error_code);
  1975. r = mmu_topup_memory_caches(vcpu);
  1976. if (r)
  1977. return r;
  1978. ASSERT(vcpu);
  1979. ASSERT(VALID_PAGE(vcpu->arch.mmu.root_hpa));
  1980. gfn = gva >> PAGE_SHIFT;
  1981. return nonpaging_map(vcpu, gva & PAGE_MASK,
  1982. error_code & PFERR_WRITE_MASK, gfn);
  1983. }
  1984. static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa,
  1985. u32 error_code)
  1986. {
  1987. pfn_t pfn;
  1988. int r;
  1989. int level;
  1990. gfn_t gfn = gpa >> PAGE_SHIFT;
  1991. unsigned long mmu_seq;
  1992. ASSERT(vcpu);
  1993. ASSERT(VALID_PAGE(vcpu->arch.mmu.root_hpa));
  1994. r = mmu_topup_memory_caches(vcpu);
  1995. if (r)
  1996. return r;
  1997. level = mapping_level(vcpu, gfn);
  1998. gfn &= ~(KVM_PAGES_PER_HPAGE(level) - 1);
  1999. mmu_seq = vcpu->kvm->mmu_notifier_seq;
  2000. smp_rmb();
  2001. pfn = gfn_to_pfn(vcpu->kvm, gfn);
  2002. if (is_error_pfn(pfn))
  2003. return kvm_handle_bad_page(vcpu->kvm, gfn, pfn);
  2004. spin_lock(&vcpu->kvm->mmu_lock);
  2005. if (mmu_notifier_retry(vcpu, mmu_seq))
  2006. goto out_unlock;
  2007. kvm_mmu_free_some_pages(vcpu);
  2008. r = __direct_map(vcpu, gpa, error_code & PFERR_WRITE_MASK,
  2009. level, gfn, pfn);
  2010. spin_unlock(&vcpu->kvm->mmu_lock);
  2011. return r;
  2012. out_unlock:
  2013. spin_unlock(&vcpu->kvm->mmu_lock);
  2014. kvm_release_pfn_clean(pfn);
  2015. return 0;
  2016. }
  2017. static void nonpaging_free(struct kvm_vcpu *vcpu)
  2018. {
  2019. mmu_free_roots(vcpu);
  2020. }
  2021. static int nonpaging_init_context(struct kvm_vcpu *vcpu)
  2022. {
  2023. struct kvm_mmu *context = &vcpu->arch.mmu;
  2024. context->new_cr3 = nonpaging_new_cr3;
  2025. context->page_fault = nonpaging_page_fault;
  2026. context->gva_to_gpa = nonpaging_gva_to_gpa;
  2027. context->free = nonpaging_free;
  2028. context->prefetch_page = nonpaging_prefetch_page;
  2029. context->sync_page = nonpaging_sync_page;
  2030. context->invlpg = nonpaging_invlpg;
  2031. context->root_level = 0;
  2032. context->shadow_root_level = PT32E_ROOT_LEVEL;
  2033. context->root_hpa = INVALID_PAGE;
  2034. return 0;
  2035. }
  2036. void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu)
  2037. {
  2038. ++vcpu->stat.tlb_flush;
  2039. kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
  2040. }
  2041. static void paging_new_cr3(struct kvm_vcpu *vcpu)
  2042. {
  2043. pgprintk("%s: cr3 %lx\n", __func__, vcpu->arch.cr3);
  2044. mmu_free_roots(vcpu);
  2045. }
  2046. static void inject_page_fault(struct kvm_vcpu *vcpu,
  2047. u64 addr,
  2048. u32 err_code)
  2049. {
  2050. kvm_inject_page_fault(vcpu, addr, err_code);
  2051. }
  2052. static void paging_free(struct kvm_vcpu *vcpu)
  2053. {
  2054. nonpaging_free(vcpu);
  2055. }
  2056. static bool is_rsvd_bits_set(struct kvm_vcpu *vcpu, u64 gpte, int level)
  2057. {
  2058. int bit7;
  2059. bit7 = (gpte >> 7) & 1;
  2060. return (gpte & vcpu->arch.mmu.rsvd_bits_mask[bit7][level-1]) != 0;
  2061. }
  2062. #define PTTYPE 64
  2063. #include "paging_tmpl.h"
  2064. #undef PTTYPE
  2065. #define PTTYPE 32
  2066. #include "paging_tmpl.h"
  2067. #undef PTTYPE
  2068. static void reset_rsvds_bits_mask(struct kvm_vcpu *vcpu, int level)
  2069. {
  2070. struct kvm_mmu *context = &vcpu->arch.mmu;
  2071. int maxphyaddr = cpuid_maxphyaddr(vcpu);
  2072. u64 exb_bit_rsvd = 0;
  2073. if (!is_nx(vcpu))
  2074. exb_bit_rsvd = rsvd_bits(63, 63);
  2075. switch (level) {
  2076. case PT32_ROOT_LEVEL:
  2077. /* no rsvd bits for 2 level 4K page table entries */
  2078. context->rsvd_bits_mask[0][1] = 0;
  2079. context->rsvd_bits_mask[0][0] = 0;
  2080. context->rsvd_bits_mask[1][0] = context->rsvd_bits_mask[0][0];
  2081. if (!is_pse(vcpu)) {
  2082. context->rsvd_bits_mask[1][1] = 0;
  2083. break;
  2084. }
  2085. if (is_cpuid_PSE36())
  2086. /* 36bits PSE 4MB page */
  2087. context->rsvd_bits_mask[1][1] = rsvd_bits(17, 21);
  2088. else
  2089. /* 32 bits PSE 4MB page */
  2090. context->rsvd_bits_mask[1][1] = rsvd_bits(13, 21);
  2091. break;
  2092. case PT32E_ROOT_LEVEL:
  2093. context->rsvd_bits_mask[0][2] =
  2094. rsvd_bits(maxphyaddr, 63) |
  2095. rsvd_bits(7, 8) | rsvd_bits(1, 2); /* PDPTE */
  2096. context->rsvd_bits_mask[0][1] = exb_bit_rsvd |
  2097. rsvd_bits(maxphyaddr, 62); /* PDE */
  2098. context->rsvd_bits_mask[0][0] = exb_bit_rsvd |
  2099. rsvd_bits(maxphyaddr, 62); /* PTE */
  2100. context->rsvd_bits_mask[1][1] = exb_bit_rsvd |
  2101. rsvd_bits(maxphyaddr, 62) |
  2102. rsvd_bits(13, 20); /* large page */
  2103. context->rsvd_bits_mask[1][0] = context->rsvd_bits_mask[0][0];
  2104. break;
  2105. case PT64_ROOT_LEVEL:
  2106. context->rsvd_bits_mask[0][3] = exb_bit_rsvd |
  2107. rsvd_bits(maxphyaddr, 51) | rsvd_bits(7, 8);
  2108. context->rsvd_bits_mask[0][2] = exb_bit_rsvd |
  2109. rsvd_bits(maxphyaddr, 51) | rsvd_bits(7, 8);
  2110. context->rsvd_bits_mask[0][1] = exb_bit_rsvd |
  2111. rsvd_bits(maxphyaddr, 51);
  2112. context->rsvd_bits_mask[0][0] = exb_bit_rsvd |
  2113. rsvd_bits(maxphyaddr, 51);
  2114. context->rsvd_bits_mask[1][3] = context->rsvd_bits_mask[0][3];
  2115. context->rsvd_bits_mask[1][2] = exb_bit_rsvd |
  2116. rsvd_bits(maxphyaddr, 51) |
  2117. rsvd_bits(13, 29);
  2118. context->rsvd_bits_mask[1][1] = exb_bit_rsvd |
  2119. rsvd_bits(maxphyaddr, 51) |
  2120. rsvd_bits(13, 20); /* large page */
  2121. context->rsvd_bits_mask[1][0] = context->rsvd_bits_mask[0][0];
  2122. break;
  2123. }
  2124. }
  2125. static int paging64_init_context_common(struct kvm_vcpu *vcpu, int level)
  2126. {
  2127. struct kvm_mmu *context = &vcpu->arch.mmu;
  2128. ASSERT(is_pae(vcpu));
  2129. context->new_cr3 = paging_new_cr3;
  2130. context->page_fault = paging64_page_fault;
  2131. context->gva_to_gpa = paging64_gva_to_gpa;
  2132. context->prefetch_page = paging64_prefetch_page;
  2133. context->sync_page = paging64_sync_page;
  2134. context->invlpg = paging64_invlpg;
  2135. context->free = paging_free;
  2136. context->root_level = level;
  2137. context->shadow_root_level = level;
  2138. context->root_hpa = INVALID_PAGE;
  2139. return 0;
  2140. }
  2141. static int paging64_init_context(struct kvm_vcpu *vcpu)
  2142. {
  2143. reset_rsvds_bits_mask(vcpu, PT64_ROOT_LEVEL);
  2144. return paging64_init_context_common(vcpu, PT64_ROOT_LEVEL);
  2145. }
  2146. static int paging32_init_context(struct kvm_vcpu *vcpu)
  2147. {
  2148. struct kvm_mmu *context = &vcpu->arch.mmu;
  2149. reset_rsvds_bits_mask(vcpu, PT32_ROOT_LEVEL);
  2150. context->new_cr3 = paging_new_cr3;
  2151. context->page_fault = paging32_page_fault;
  2152. context->gva_to_gpa = paging32_gva_to_gpa;
  2153. context->free = paging_free;
  2154. context->prefetch_page = paging32_prefetch_page;
  2155. context->sync_page = paging32_sync_page;
  2156. context->invlpg = paging32_invlpg;
  2157. context->root_level = PT32_ROOT_LEVEL;
  2158. context->shadow_root_level = PT32E_ROOT_LEVEL;
  2159. context->root_hpa = INVALID_PAGE;
  2160. return 0;
  2161. }
  2162. static int paging32E_init_context(struct kvm_vcpu *vcpu)
  2163. {
  2164. reset_rsvds_bits_mask(vcpu, PT32E_ROOT_LEVEL);
  2165. return paging64_init_context_common(vcpu, PT32E_ROOT_LEVEL);
  2166. }
  2167. static int init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
  2168. {
  2169. struct kvm_mmu *context = &vcpu->arch.mmu;
  2170. context->new_cr3 = nonpaging_new_cr3;
  2171. context->page_fault = tdp_page_fault;
  2172. context->free = nonpaging_free;
  2173. context->prefetch_page = nonpaging_prefetch_page;
  2174. context->sync_page = nonpaging_sync_page;
  2175. context->invlpg = nonpaging_invlpg;
  2176. context->shadow_root_level = kvm_x86_ops->get_tdp_level();
  2177. context->root_hpa = INVALID_PAGE;
  2178. if (!is_paging(vcpu)) {
  2179. context->gva_to_gpa = nonpaging_gva_to_gpa;
  2180. context->root_level = 0;
  2181. } else if (is_long_mode(vcpu)) {
  2182. reset_rsvds_bits_mask(vcpu, PT64_ROOT_LEVEL);
  2183. context->gva_to_gpa = paging64_gva_to_gpa;
  2184. context->root_level = PT64_ROOT_LEVEL;
  2185. } else if (is_pae(vcpu)) {
  2186. reset_rsvds_bits_mask(vcpu, PT32E_ROOT_LEVEL);
  2187. context->gva_to_gpa = paging64_gva_to_gpa;
  2188. context->root_level = PT32E_ROOT_LEVEL;
  2189. } else {
  2190. reset_rsvds_bits_mask(vcpu, PT32_ROOT_LEVEL);
  2191. context->gva_to_gpa = paging32_gva_to_gpa;
  2192. context->root_level = PT32_ROOT_LEVEL;
  2193. }
  2194. return 0;
  2195. }
  2196. static int init_kvm_softmmu(struct kvm_vcpu *vcpu)
  2197. {
  2198. int r;
  2199. ASSERT(vcpu);
  2200. ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
  2201. if (!is_paging(vcpu))
  2202. r = nonpaging_init_context(vcpu);
  2203. else if (is_long_mode(vcpu))
  2204. r = paging64_init_context(vcpu);
  2205. else if (is_pae(vcpu))
  2206. r = paging32E_init_context(vcpu);
  2207. else
  2208. r = paging32_init_context(vcpu);
  2209. vcpu->arch.mmu.base_role.cr4_pae = !!is_pae(vcpu);
  2210. vcpu->arch.mmu.base_role.cr0_wp = is_write_protection(vcpu);
  2211. return r;
  2212. }
  2213. static int init_kvm_mmu(struct kvm_vcpu *vcpu)
  2214. {
  2215. vcpu->arch.update_pte.pfn = bad_pfn;
  2216. if (tdp_enabled)
  2217. return init_kvm_tdp_mmu(vcpu);
  2218. else
  2219. return init_kvm_softmmu(vcpu);
  2220. }
  2221. static void destroy_kvm_mmu(struct kvm_vcpu *vcpu)
  2222. {
  2223. ASSERT(vcpu);
  2224. if (VALID_PAGE(vcpu->arch.mmu.root_hpa))
  2225. /* mmu.free() should set root_hpa = INVALID_PAGE */
  2226. vcpu->arch.mmu.free(vcpu);
  2227. }
  2228. int kvm_mmu_reset_context(struct kvm_vcpu *vcpu)
  2229. {
  2230. destroy_kvm_mmu(vcpu);
  2231. return init_kvm_mmu(vcpu);
  2232. }
  2233. EXPORT_SYMBOL_GPL(kvm_mmu_reset_context);
  2234. int kvm_mmu_load(struct kvm_vcpu *vcpu)
  2235. {
  2236. int r;
  2237. r = mmu_topup_memory_caches(vcpu);
  2238. if (r)
  2239. goto out;
  2240. r = mmu_alloc_roots(vcpu);
  2241. spin_lock(&vcpu->kvm->mmu_lock);
  2242. mmu_sync_roots(vcpu);
  2243. spin_unlock(&vcpu->kvm->mmu_lock);
  2244. if (r)
  2245. goto out;
  2246. /* set_cr3() should ensure TLB has been flushed */
  2247. kvm_x86_ops->set_cr3(vcpu, vcpu->arch.mmu.root_hpa);
  2248. out:
  2249. return r;
  2250. }
  2251. EXPORT_SYMBOL_GPL(kvm_mmu_load);
  2252. void kvm_mmu_unload(struct kvm_vcpu *vcpu)
  2253. {
  2254. mmu_free_roots(vcpu);
  2255. }
  2256. static void mmu_pte_write_zap_pte(struct kvm_vcpu *vcpu,
  2257. struct kvm_mmu_page *sp,
  2258. u64 *spte)
  2259. {
  2260. u64 pte;
  2261. struct kvm_mmu_page *child;
  2262. pte = *spte;
  2263. if (is_shadow_present_pte(pte)) {
  2264. if (is_last_spte(pte, sp->role.level))
  2265. drop_spte(vcpu->kvm, spte, shadow_trap_nonpresent_pte);
  2266. else {
  2267. child = page_header(pte & PT64_BASE_ADDR_MASK);
  2268. mmu_page_remove_parent_pte(child, spte);
  2269. }
  2270. }
  2271. __set_spte(spte, shadow_trap_nonpresent_pte);
  2272. if (is_large_pte(pte))
  2273. --vcpu->kvm->stat.lpages;
  2274. }
  2275. static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu,
  2276. struct kvm_mmu_page *sp,
  2277. u64 *spte,
  2278. const void *new)
  2279. {
  2280. if (sp->role.level != PT_PAGE_TABLE_LEVEL) {
  2281. ++vcpu->kvm->stat.mmu_pde_zapped;
  2282. return;
  2283. }
  2284. if (is_rsvd_bits_set(vcpu, *(u64 *)new, PT_PAGE_TABLE_LEVEL))
  2285. return;
  2286. ++vcpu->kvm->stat.mmu_pte_updated;
  2287. if (!sp->role.cr4_pae)
  2288. paging32_update_pte(vcpu, sp, spte, new);
  2289. else
  2290. paging64_update_pte(vcpu, sp, spte, new);
  2291. }
  2292. static bool need_remote_flush(u64 old, u64 new)
  2293. {
  2294. if (!is_shadow_present_pte(old))
  2295. return false;
  2296. if (!is_shadow_present_pte(new))
  2297. return true;
  2298. if ((old ^ new) & PT64_BASE_ADDR_MASK)
  2299. return true;
  2300. old ^= PT64_NX_MASK;
  2301. new ^= PT64_NX_MASK;
  2302. return (old & ~new & PT64_PERM_MASK) != 0;
  2303. }
  2304. static void mmu_pte_write_flush_tlb(struct kvm_vcpu *vcpu, bool zap_page,
  2305. bool remote_flush, bool local_flush)
  2306. {
  2307. if (zap_page)
  2308. return;
  2309. if (remote_flush)
  2310. kvm_flush_remote_tlbs(vcpu->kvm);
  2311. else if (local_flush)
  2312. kvm_mmu_flush_tlb(vcpu);
  2313. }
  2314. static bool last_updated_pte_accessed(struct kvm_vcpu *vcpu)
  2315. {
  2316. u64 *spte = vcpu->arch.last_pte_updated;
  2317. return !!(spte && (*spte & shadow_accessed_mask));
  2318. }
  2319. static void mmu_guess_page_from_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
  2320. u64 gpte)
  2321. {
  2322. gfn_t gfn;
  2323. pfn_t pfn;
  2324. if (!is_present_gpte(gpte))
  2325. return;
  2326. gfn = (gpte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT;
  2327. vcpu->arch.update_pte.mmu_seq = vcpu->kvm->mmu_notifier_seq;
  2328. smp_rmb();
  2329. pfn = gfn_to_pfn(vcpu->kvm, gfn);
  2330. if (is_error_pfn(pfn)) {
  2331. kvm_release_pfn_clean(pfn);
  2332. return;
  2333. }
  2334. vcpu->arch.update_pte.gfn = gfn;
  2335. vcpu->arch.update_pte.pfn = pfn;
  2336. }
  2337. static void kvm_mmu_access_page(struct kvm_vcpu *vcpu, gfn_t gfn)
  2338. {
  2339. u64 *spte = vcpu->arch.last_pte_updated;
  2340. if (spte
  2341. && vcpu->arch.last_pte_gfn == gfn
  2342. && shadow_accessed_mask
  2343. && !(*spte & shadow_accessed_mask)
  2344. && is_shadow_present_pte(*spte))
  2345. set_bit(PT_ACCESSED_SHIFT, (unsigned long *)spte);
  2346. }
  2347. void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
  2348. const u8 *new, int bytes,
  2349. bool guest_initiated)
  2350. {
  2351. gfn_t gfn = gpa >> PAGE_SHIFT;
  2352. union kvm_mmu_page_role mask = { .word = 0 };
  2353. struct kvm_mmu_page *sp;
  2354. struct hlist_node *node;
  2355. LIST_HEAD(invalid_list);
  2356. u64 entry, gentry;
  2357. u64 *spte;
  2358. unsigned offset = offset_in_page(gpa);
  2359. unsigned pte_size;
  2360. unsigned page_offset;
  2361. unsigned misaligned;
  2362. unsigned quadrant;
  2363. int level;
  2364. int flooded = 0;
  2365. int npte;
  2366. int r;
  2367. int invlpg_counter;
  2368. bool remote_flush, local_flush, zap_page;
  2369. zap_page = remote_flush = local_flush = false;
  2370. pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes);
  2371. invlpg_counter = atomic_read(&vcpu->kvm->arch.invlpg_counter);
  2372. /*
  2373. * Assume that the pte write on a page table of the same type
  2374. * as the current vcpu paging mode. This is nearly always true
  2375. * (might be false while changing modes). Note it is verified later
  2376. * by update_pte().
  2377. */
  2378. if ((is_pae(vcpu) && bytes == 4) || !new) {
  2379. /* Handle a 32-bit guest writing two halves of a 64-bit gpte */
  2380. if (is_pae(vcpu)) {
  2381. gpa &= ~(gpa_t)7;
  2382. bytes = 8;
  2383. }
  2384. r = kvm_read_guest(vcpu->kvm, gpa, &gentry, min(bytes, 8));
  2385. if (r)
  2386. gentry = 0;
  2387. new = (const u8 *)&gentry;
  2388. }
  2389. switch (bytes) {
  2390. case 4:
  2391. gentry = *(const u32 *)new;
  2392. break;
  2393. case 8:
  2394. gentry = *(const u64 *)new;
  2395. break;
  2396. default:
  2397. gentry = 0;
  2398. break;
  2399. }
  2400. mmu_guess_page_from_pte_write(vcpu, gpa, gentry);
  2401. spin_lock(&vcpu->kvm->mmu_lock);
  2402. if (atomic_read(&vcpu->kvm->arch.invlpg_counter) != invlpg_counter)
  2403. gentry = 0;
  2404. kvm_mmu_access_page(vcpu, gfn);
  2405. kvm_mmu_free_some_pages(vcpu);
  2406. ++vcpu->kvm->stat.mmu_pte_write;
  2407. kvm_mmu_audit(vcpu, "pre pte write");
  2408. if (guest_initiated) {
  2409. if (gfn == vcpu->arch.last_pt_write_gfn
  2410. && !last_updated_pte_accessed(vcpu)) {
  2411. ++vcpu->arch.last_pt_write_count;
  2412. if (vcpu->arch.last_pt_write_count >= 3)
  2413. flooded = 1;
  2414. } else {
  2415. vcpu->arch.last_pt_write_gfn = gfn;
  2416. vcpu->arch.last_pt_write_count = 1;
  2417. vcpu->arch.last_pte_updated = NULL;
  2418. }
  2419. }
  2420. mask.cr0_wp = mask.cr4_pae = mask.nxe = 1;
  2421. for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn, node) {
  2422. pte_size = sp->role.cr4_pae ? 8 : 4;
  2423. misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1);
  2424. misaligned |= bytes < 4;
  2425. if (misaligned || flooded) {
  2426. /*
  2427. * Misaligned accesses are too much trouble to fix
  2428. * up; also, they usually indicate a page is not used
  2429. * as a page table.
  2430. *
  2431. * If we're seeing too many writes to a page,
  2432. * it may no longer be a page table, or we may be
  2433. * forking, in which case it is better to unmap the
  2434. * page.
  2435. */
  2436. pgprintk("misaligned: gpa %llx bytes %d role %x\n",
  2437. gpa, bytes, sp->role.word);
  2438. zap_page |= !!kvm_mmu_prepare_zap_page(vcpu->kvm, sp,
  2439. &invalid_list);
  2440. ++vcpu->kvm->stat.mmu_flooded;
  2441. continue;
  2442. }
  2443. page_offset = offset;
  2444. level = sp->role.level;
  2445. npte = 1;
  2446. if (!sp->role.cr4_pae) {
  2447. page_offset <<= 1; /* 32->64 */
  2448. /*
  2449. * A 32-bit pde maps 4MB while the shadow pdes map
  2450. * only 2MB. So we need to double the offset again
  2451. * and zap two pdes instead of one.
  2452. */
  2453. if (level == PT32_ROOT_LEVEL) {
  2454. page_offset &= ~7; /* kill rounding error */
  2455. page_offset <<= 1;
  2456. npte = 2;
  2457. }
  2458. quadrant = page_offset >> PAGE_SHIFT;
  2459. page_offset &= ~PAGE_MASK;
  2460. if (quadrant != sp->role.quadrant)
  2461. continue;
  2462. }
  2463. local_flush = true;
  2464. spte = &sp->spt[page_offset / sizeof(*spte)];
  2465. while (npte--) {
  2466. entry = *spte;
  2467. mmu_pte_write_zap_pte(vcpu, sp, spte);
  2468. if (gentry &&
  2469. !((sp->role.word ^ vcpu->arch.mmu.base_role.word)
  2470. & mask.word))
  2471. mmu_pte_write_new_pte(vcpu, sp, spte, &gentry);
  2472. if (!remote_flush && need_remote_flush(entry, *spte))
  2473. remote_flush = true;
  2474. ++spte;
  2475. }
  2476. }
  2477. mmu_pte_write_flush_tlb(vcpu, zap_page, remote_flush, local_flush);
  2478. kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
  2479. kvm_mmu_audit(vcpu, "post pte write");
  2480. spin_unlock(&vcpu->kvm->mmu_lock);
  2481. if (!is_error_pfn(vcpu->arch.update_pte.pfn)) {
  2482. kvm_release_pfn_clean(vcpu->arch.update_pte.pfn);
  2483. vcpu->arch.update_pte.pfn = bad_pfn;
  2484. }
  2485. }
  2486. int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
  2487. {
  2488. gpa_t gpa;
  2489. int r;
  2490. if (tdp_enabled)
  2491. return 0;
  2492. gpa = kvm_mmu_gva_to_gpa_read(vcpu, gva, NULL);
  2493. spin_lock(&vcpu->kvm->mmu_lock);
  2494. r = kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT);
  2495. spin_unlock(&vcpu->kvm->mmu_lock);
  2496. return r;
  2497. }
  2498. EXPORT_SYMBOL_GPL(kvm_mmu_unprotect_page_virt);
  2499. void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
  2500. {
  2501. int free_pages;
  2502. LIST_HEAD(invalid_list);
  2503. free_pages = vcpu->kvm->arch.n_free_mmu_pages;
  2504. while (free_pages < KVM_REFILL_PAGES &&
  2505. !list_empty(&vcpu->kvm->arch.active_mmu_pages)) {
  2506. struct kvm_mmu_page *sp;
  2507. sp = container_of(vcpu->kvm->arch.active_mmu_pages.prev,
  2508. struct kvm_mmu_page, link);
  2509. free_pages += kvm_mmu_prepare_zap_page(vcpu->kvm, sp,
  2510. &invalid_list);
  2511. ++vcpu->kvm->stat.mmu_recycled;
  2512. }
  2513. kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
  2514. }
  2515. int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code)
  2516. {
  2517. int r;
  2518. enum emulation_result er;
  2519. r = vcpu->arch.mmu.page_fault(vcpu, cr2, error_code);
  2520. if (r < 0)
  2521. goto out;
  2522. if (!r) {
  2523. r = 1;
  2524. goto out;
  2525. }
  2526. r = mmu_topup_memory_caches(vcpu);
  2527. if (r)
  2528. goto out;
  2529. er = emulate_instruction(vcpu, cr2, error_code, 0);
  2530. switch (er) {
  2531. case EMULATE_DONE:
  2532. return 1;
  2533. case EMULATE_DO_MMIO:
  2534. ++vcpu->stat.mmio_exits;
  2535. /* fall through */
  2536. case EMULATE_FAIL:
  2537. return 0;
  2538. default:
  2539. BUG();
  2540. }
  2541. out:
  2542. return r;
  2543. }
  2544. EXPORT_SYMBOL_GPL(kvm_mmu_page_fault);
  2545. void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva)
  2546. {
  2547. vcpu->arch.mmu.invlpg(vcpu, gva);
  2548. kvm_mmu_flush_tlb(vcpu);
  2549. ++vcpu->stat.invlpg;
  2550. }
  2551. EXPORT_SYMBOL_GPL(kvm_mmu_invlpg);
  2552. void kvm_enable_tdp(void)
  2553. {
  2554. tdp_enabled = true;
  2555. }
  2556. EXPORT_SYMBOL_GPL(kvm_enable_tdp);
  2557. void kvm_disable_tdp(void)
  2558. {
  2559. tdp_enabled = false;
  2560. }
  2561. EXPORT_SYMBOL_GPL(kvm_disable_tdp);
  2562. static void free_mmu_pages(struct kvm_vcpu *vcpu)
  2563. {
  2564. free_page((unsigned long)vcpu->arch.mmu.pae_root);
  2565. }
  2566. static int alloc_mmu_pages(struct kvm_vcpu *vcpu)
  2567. {
  2568. struct page *page;
  2569. int i;
  2570. ASSERT(vcpu);
  2571. /*
  2572. * When emulating 32-bit mode, cr3 is only 32 bits even on x86_64.
  2573. * Therefore we need to allocate shadow page tables in the first
  2574. * 4GB of memory, which happens to fit the DMA32 zone.
  2575. */
  2576. page = alloc_page(GFP_KERNEL | __GFP_DMA32);
  2577. if (!page)
  2578. return -ENOMEM;
  2579. vcpu->arch.mmu.pae_root = page_address(page);
  2580. for (i = 0; i < 4; ++i)
  2581. vcpu->arch.mmu.pae_root[i] = INVALID_PAGE;
  2582. return 0;
  2583. }
  2584. int kvm_mmu_create(struct kvm_vcpu *vcpu)
  2585. {
  2586. ASSERT(vcpu);
  2587. ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
  2588. return alloc_mmu_pages(vcpu);
  2589. }
  2590. int kvm_mmu_setup(struct kvm_vcpu *vcpu)
  2591. {
  2592. ASSERT(vcpu);
  2593. ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
  2594. return init_kvm_mmu(vcpu);
  2595. }
  2596. void kvm_mmu_destroy(struct kvm_vcpu *vcpu)
  2597. {
  2598. ASSERT(vcpu);
  2599. destroy_kvm_mmu(vcpu);
  2600. free_mmu_pages(vcpu);
  2601. mmu_free_memory_caches(vcpu);
  2602. }
  2603. void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot)
  2604. {
  2605. struct kvm_mmu_page *sp;
  2606. list_for_each_entry(sp, &kvm->arch.active_mmu_pages, link) {
  2607. int i;
  2608. u64 *pt;
  2609. if (!test_bit(slot, sp->slot_bitmap))
  2610. continue;
  2611. pt = sp->spt;
  2612. for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
  2613. /* avoid RMW */
  2614. if (is_writable_pte(pt[i]))
  2615. pt[i] &= ~PT_WRITABLE_MASK;
  2616. }
  2617. kvm_flush_remote_tlbs(kvm);
  2618. }
  2619. void kvm_mmu_zap_all(struct kvm *kvm)
  2620. {
  2621. struct kvm_mmu_page *sp, *node;
  2622. LIST_HEAD(invalid_list);
  2623. spin_lock(&kvm->mmu_lock);
  2624. restart:
  2625. list_for_each_entry_safe(sp, node, &kvm->arch.active_mmu_pages, link)
  2626. if (kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list))
  2627. goto restart;
  2628. kvm_mmu_commit_zap_page(kvm, &invalid_list);
  2629. spin_unlock(&kvm->mmu_lock);
  2630. }
  2631. static int kvm_mmu_remove_some_alloc_mmu_pages(struct kvm *kvm,
  2632. struct list_head *invalid_list)
  2633. {
  2634. struct kvm_mmu_page *page;
  2635. page = container_of(kvm->arch.active_mmu_pages.prev,
  2636. struct kvm_mmu_page, link);
  2637. return kvm_mmu_prepare_zap_page(kvm, page, invalid_list);
  2638. }
  2639. static int mmu_shrink(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask)
  2640. {
  2641. struct kvm *kvm;
  2642. struct kvm *kvm_freed = NULL;
  2643. int cache_count = 0;
  2644. spin_lock(&kvm_lock);
  2645. list_for_each_entry(kvm, &vm_list, vm_list) {
  2646. int npages, idx, freed_pages;
  2647. LIST_HEAD(invalid_list);
  2648. idx = srcu_read_lock(&kvm->srcu);
  2649. spin_lock(&kvm->mmu_lock);
  2650. npages = kvm->arch.n_alloc_mmu_pages -
  2651. kvm->arch.n_free_mmu_pages;
  2652. cache_count += npages;
  2653. if (!kvm_freed && nr_to_scan > 0 && npages > 0) {
  2654. freed_pages = kvm_mmu_remove_some_alloc_mmu_pages(kvm,
  2655. &invalid_list);
  2656. cache_count -= freed_pages;
  2657. kvm_freed = kvm;
  2658. }
  2659. nr_to_scan--;
  2660. kvm_mmu_commit_zap_page(kvm, &invalid_list);
  2661. spin_unlock(&kvm->mmu_lock);
  2662. srcu_read_unlock(&kvm->srcu, idx);
  2663. }
  2664. if (kvm_freed)
  2665. list_move_tail(&kvm_freed->vm_list, &vm_list);
  2666. spin_unlock(&kvm_lock);
  2667. return cache_count;
  2668. }
  2669. static struct shrinker mmu_shrinker = {
  2670. .shrink = mmu_shrink,
  2671. .seeks = DEFAULT_SEEKS * 10,
  2672. };
  2673. static void mmu_destroy_caches(void)
  2674. {
  2675. if (pte_chain_cache)
  2676. kmem_cache_destroy(pte_chain_cache);
  2677. if (rmap_desc_cache)
  2678. kmem_cache_destroy(rmap_desc_cache);
  2679. if (mmu_page_header_cache)
  2680. kmem_cache_destroy(mmu_page_header_cache);
  2681. }
  2682. void kvm_mmu_module_exit(void)
  2683. {
  2684. mmu_destroy_caches();
  2685. unregister_shrinker(&mmu_shrinker);
  2686. }
  2687. int kvm_mmu_module_init(void)
  2688. {
  2689. pte_chain_cache = kmem_cache_create("kvm_pte_chain",
  2690. sizeof(struct kvm_pte_chain),
  2691. 0, 0, NULL);
  2692. if (!pte_chain_cache)
  2693. goto nomem;
  2694. rmap_desc_cache = kmem_cache_create("kvm_rmap_desc",
  2695. sizeof(struct kvm_rmap_desc),
  2696. 0, 0, NULL);
  2697. if (!rmap_desc_cache)
  2698. goto nomem;
  2699. mmu_page_header_cache = kmem_cache_create("kvm_mmu_page_header",
  2700. sizeof(struct kvm_mmu_page),
  2701. 0, 0, NULL);
  2702. if (!mmu_page_header_cache)
  2703. goto nomem;
  2704. register_shrinker(&mmu_shrinker);
  2705. return 0;
  2706. nomem:
  2707. mmu_destroy_caches();
  2708. return -ENOMEM;
  2709. }
  2710. /*
  2711. * Caculate mmu pages needed for kvm.
  2712. */
  2713. unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm)
  2714. {
  2715. int i;
  2716. unsigned int nr_mmu_pages;
  2717. unsigned int nr_pages = 0;
  2718. struct kvm_memslots *slots;
  2719. slots = kvm_memslots(kvm);
  2720. for (i = 0; i < slots->nmemslots; i++)
  2721. nr_pages += slots->memslots[i].npages;
  2722. nr_mmu_pages = nr_pages * KVM_PERMILLE_MMU_PAGES / 1000;
  2723. nr_mmu_pages = max(nr_mmu_pages,
  2724. (unsigned int) KVM_MIN_ALLOC_MMU_PAGES);
  2725. return nr_mmu_pages;
  2726. }
  2727. static void *pv_mmu_peek_buffer(struct kvm_pv_mmu_op_buffer *buffer,
  2728. unsigned len)
  2729. {
  2730. if (len > buffer->len)
  2731. return NULL;
  2732. return buffer->ptr;
  2733. }
  2734. static void *pv_mmu_read_buffer(struct kvm_pv_mmu_op_buffer *buffer,
  2735. unsigned len)
  2736. {
  2737. void *ret;
  2738. ret = pv_mmu_peek_buffer(buffer, len);
  2739. if (!ret)
  2740. return ret;
  2741. buffer->ptr += len;
  2742. buffer->len -= len;
  2743. buffer->processed += len;
  2744. return ret;
  2745. }
  2746. static int kvm_pv_mmu_write(struct kvm_vcpu *vcpu,
  2747. gpa_t addr, gpa_t value)
  2748. {
  2749. int bytes = 8;
  2750. int r;
  2751. if (!is_long_mode(vcpu) && !is_pae(vcpu))
  2752. bytes = 4;
  2753. r = mmu_topup_memory_caches(vcpu);
  2754. if (r)
  2755. return r;
  2756. if (!emulator_write_phys(vcpu, addr, &value, bytes))
  2757. return -EFAULT;
  2758. return 1;
  2759. }
  2760. static int kvm_pv_mmu_flush_tlb(struct kvm_vcpu *vcpu)
  2761. {
  2762. (void)kvm_set_cr3(vcpu, vcpu->arch.cr3);
  2763. return 1;
  2764. }
  2765. static int kvm_pv_mmu_release_pt(struct kvm_vcpu *vcpu, gpa_t addr)
  2766. {
  2767. spin_lock(&vcpu->kvm->mmu_lock);
  2768. mmu_unshadow(vcpu->kvm, addr >> PAGE_SHIFT);
  2769. spin_unlock(&vcpu->kvm->mmu_lock);
  2770. return 1;
  2771. }
  2772. static int kvm_pv_mmu_op_one(struct kvm_vcpu *vcpu,
  2773. struct kvm_pv_mmu_op_buffer *buffer)
  2774. {
  2775. struct kvm_mmu_op_header *header;
  2776. header = pv_mmu_peek_buffer(buffer, sizeof *header);
  2777. if (!header)
  2778. return 0;
  2779. switch (header->op) {
  2780. case KVM_MMU_OP_WRITE_PTE: {
  2781. struct kvm_mmu_op_write_pte *wpte;
  2782. wpte = pv_mmu_read_buffer(buffer, sizeof *wpte);
  2783. if (!wpte)
  2784. return 0;
  2785. return kvm_pv_mmu_write(vcpu, wpte->pte_phys,
  2786. wpte->pte_val);
  2787. }
  2788. case KVM_MMU_OP_FLUSH_TLB: {
  2789. struct kvm_mmu_op_flush_tlb *ftlb;
  2790. ftlb = pv_mmu_read_buffer(buffer, sizeof *ftlb);
  2791. if (!ftlb)
  2792. return 0;
  2793. return kvm_pv_mmu_flush_tlb(vcpu);
  2794. }
  2795. case KVM_MMU_OP_RELEASE_PT: {
  2796. struct kvm_mmu_op_release_pt *rpt;
  2797. rpt = pv_mmu_read_buffer(buffer, sizeof *rpt);
  2798. if (!rpt)
  2799. return 0;
  2800. return kvm_pv_mmu_release_pt(vcpu, rpt->pt_phys);
  2801. }
  2802. default: return 0;
  2803. }
  2804. }
  2805. int kvm_pv_mmu_op(struct kvm_vcpu *vcpu, unsigned long bytes,
  2806. gpa_t addr, unsigned long *ret)
  2807. {
  2808. int r;
  2809. struct kvm_pv_mmu_op_buffer *buffer = &vcpu->arch.mmu_op_buffer;
  2810. buffer->ptr = buffer->buf;
  2811. buffer->len = min_t(unsigned long, bytes, sizeof buffer->buf);
  2812. buffer->processed = 0;
  2813. r = kvm_read_guest(vcpu->kvm, addr, buffer->buf, buffer->len);
  2814. if (r)
  2815. goto out;
  2816. while (buffer->len) {
  2817. r = kvm_pv_mmu_op_one(vcpu, buffer);
  2818. if (r < 0)
  2819. goto out;
  2820. if (r == 0)
  2821. break;
  2822. }
  2823. r = 1;
  2824. out:
  2825. *ret = buffer->processed;
  2826. return r;
  2827. }
  2828. int kvm_mmu_get_spte_hierarchy(struct kvm_vcpu *vcpu, u64 addr, u64 sptes[4])
  2829. {
  2830. struct kvm_shadow_walk_iterator iterator;
  2831. int nr_sptes = 0;
  2832. spin_lock(&vcpu->kvm->mmu_lock);
  2833. for_each_shadow_entry(vcpu, addr, iterator) {
  2834. sptes[iterator.level-1] = *iterator.sptep;
  2835. nr_sptes++;
  2836. if (!is_shadow_present_pte(*iterator.sptep))
  2837. break;
  2838. }
  2839. spin_unlock(&vcpu->kvm->mmu_lock);
  2840. return nr_sptes;
  2841. }
  2842. EXPORT_SYMBOL_GPL(kvm_mmu_get_spte_hierarchy);
  2843. #ifdef AUDIT
  2844. static const char *audit_msg;
  2845. static gva_t canonicalize(gva_t gva)
  2846. {
  2847. #ifdef CONFIG_X86_64
  2848. gva = (long long)(gva << 16) >> 16;
  2849. #endif
  2850. return gva;
  2851. }
  2852. typedef void (*inspect_spte_fn) (struct kvm *kvm, u64 *sptep);
  2853. static void __mmu_spte_walk(struct kvm *kvm, struct kvm_mmu_page *sp,
  2854. inspect_spte_fn fn)
  2855. {
  2856. int i;
  2857. for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
  2858. u64 ent = sp->spt[i];
  2859. if (is_shadow_present_pte(ent)) {
  2860. if (!is_last_spte(ent, sp->role.level)) {
  2861. struct kvm_mmu_page *child;
  2862. child = page_header(ent & PT64_BASE_ADDR_MASK);
  2863. __mmu_spte_walk(kvm, child, fn);
  2864. } else
  2865. fn(kvm, &sp->spt[i]);
  2866. }
  2867. }
  2868. }
  2869. static void mmu_spte_walk(struct kvm_vcpu *vcpu, inspect_spte_fn fn)
  2870. {
  2871. int i;
  2872. struct kvm_mmu_page *sp;
  2873. if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
  2874. return;
  2875. if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) {
  2876. hpa_t root = vcpu->arch.mmu.root_hpa;
  2877. sp = page_header(root);
  2878. __mmu_spte_walk(vcpu->kvm, sp, fn);
  2879. return;
  2880. }
  2881. for (i = 0; i < 4; ++i) {
  2882. hpa_t root = vcpu->arch.mmu.pae_root[i];
  2883. if (root && VALID_PAGE(root)) {
  2884. root &= PT64_BASE_ADDR_MASK;
  2885. sp = page_header(root);
  2886. __mmu_spte_walk(vcpu->kvm, sp, fn);
  2887. }
  2888. }
  2889. return;
  2890. }
  2891. static void audit_mappings_page(struct kvm_vcpu *vcpu, u64 page_pte,
  2892. gva_t va, int level)
  2893. {
  2894. u64 *pt = __va(page_pte & PT64_BASE_ADDR_MASK);
  2895. int i;
  2896. gva_t va_delta = 1ul << (PAGE_SHIFT + 9 * (level - 1));
  2897. for (i = 0; i < PT64_ENT_PER_PAGE; ++i, va += va_delta) {
  2898. u64 ent = pt[i];
  2899. if (ent == shadow_trap_nonpresent_pte)
  2900. continue;
  2901. va = canonicalize(va);
  2902. if (is_shadow_present_pte(ent) && !is_last_spte(ent, level))
  2903. audit_mappings_page(vcpu, ent, va, level - 1);
  2904. else {
  2905. gpa_t gpa = kvm_mmu_gva_to_gpa_read(vcpu, va, NULL);
  2906. gfn_t gfn = gpa >> PAGE_SHIFT;
  2907. pfn_t pfn = gfn_to_pfn(vcpu->kvm, gfn);
  2908. hpa_t hpa = (hpa_t)pfn << PAGE_SHIFT;
  2909. if (is_error_pfn(pfn)) {
  2910. kvm_release_pfn_clean(pfn);
  2911. continue;
  2912. }
  2913. if (is_shadow_present_pte(ent)
  2914. && (ent & PT64_BASE_ADDR_MASK) != hpa)
  2915. printk(KERN_ERR "xx audit error: (%s) levels %d"
  2916. " gva %lx gpa %llx hpa %llx ent %llx %d\n",
  2917. audit_msg, vcpu->arch.mmu.root_level,
  2918. va, gpa, hpa, ent,
  2919. is_shadow_present_pte(ent));
  2920. else if (ent == shadow_notrap_nonpresent_pte
  2921. && !is_error_hpa(hpa))
  2922. printk(KERN_ERR "audit: (%s) notrap shadow,"
  2923. " valid guest gva %lx\n", audit_msg, va);
  2924. kvm_release_pfn_clean(pfn);
  2925. }
  2926. }
  2927. }
  2928. static void audit_mappings(struct kvm_vcpu *vcpu)
  2929. {
  2930. unsigned i;
  2931. if (vcpu->arch.mmu.root_level == 4)
  2932. audit_mappings_page(vcpu, vcpu->arch.mmu.root_hpa, 0, 4);
  2933. else
  2934. for (i = 0; i < 4; ++i)
  2935. if (vcpu->arch.mmu.pae_root[i] & PT_PRESENT_MASK)
  2936. audit_mappings_page(vcpu,
  2937. vcpu->arch.mmu.pae_root[i],
  2938. i << 30,
  2939. 2);
  2940. }
  2941. static int count_rmaps(struct kvm_vcpu *vcpu)
  2942. {
  2943. struct kvm *kvm = vcpu->kvm;
  2944. struct kvm_memslots *slots;
  2945. int nmaps = 0;
  2946. int i, j, k, idx;
  2947. idx = srcu_read_lock(&kvm->srcu);
  2948. slots = kvm_memslots(kvm);
  2949. for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
  2950. struct kvm_memory_slot *m = &slots->memslots[i];
  2951. struct kvm_rmap_desc *d;
  2952. for (j = 0; j < m->npages; ++j) {
  2953. unsigned long *rmapp = &m->rmap[j];
  2954. if (!*rmapp)
  2955. continue;
  2956. if (!(*rmapp & 1)) {
  2957. ++nmaps;
  2958. continue;
  2959. }
  2960. d = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
  2961. while (d) {
  2962. for (k = 0; k < RMAP_EXT; ++k)
  2963. if (d->sptes[k])
  2964. ++nmaps;
  2965. else
  2966. break;
  2967. d = d->more;
  2968. }
  2969. }
  2970. }
  2971. srcu_read_unlock(&kvm->srcu, idx);
  2972. return nmaps;
  2973. }
  2974. void inspect_spte_has_rmap(struct kvm *kvm, u64 *sptep)
  2975. {
  2976. unsigned long *rmapp;
  2977. struct kvm_mmu_page *rev_sp;
  2978. gfn_t gfn;
  2979. if (is_writable_pte(*sptep)) {
  2980. rev_sp = page_header(__pa(sptep));
  2981. gfn = kvm_mmu_page_get_gfn(rev_sp, sptep - rev_sp->spt);
  2982. if (!gfn_to_memslot(kvm, gfn)) {
  2983. if (!printk_ratelimit())
  2984. return;
  2985. printk(KERN_ERR "%s: no memslot for gfn %ld\n",
  2986. audit_msg, gfn);
  2987. printk(KERN_ERR "%s: index %ld of sp (gfn=%lx)\n",
  2988. audit_msg, (long int)(sptep - rev_sp->spt),
  2989. rev_sp->gfn);
  2990. dump_stack();
  2991. return;
  2992. }
  2993. rmapp = gfn_to_rmap(kvm, gfn, rev_sp->role.level);
  2994. if (!*rmapp) {
  2995. if (!printk_ratelimit())
  2996. return;
  2997. printk(KERN_ERR "%s: no rmap for writable spte %llx\n",
  2998. audit_msg, *sptep);
  2999. dump_stack();
  3000. }
  3001. }
  3002. }
  3003. void audit_writable_sptes_have_rmaps(struct kvm_vcpu *vcpu)
  3004. {
  3005. mmu_spte_walk(vcpu, inspect_spte_has_rmap);
  3006. }
  3007. static void check_writable_mappings_rmap(struct kvm_vcpu *vcpu)
  3008. {
  3009. struct kvm_mmu_page *sp;
  3010. int i;
  3011. list_for_each_entry(sp, &vcpu->kvm->arch.active_mmu_pages, link) {
  3012. u64 *pt = sp->spt;
  3013. if (sp->role.level != PT_PAGE_TABLE_LEVEL)
  3014. continue;
  3015. for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
  3016. u64 ent = pt[i];
  3017. if (!(ent & PT_PRESENT_MASK))
  3018. continue;
  3019. if (!is_writable_pte(ent))
  3020. continue;
  3021. inspect_spte_has_rmap(vcpu->kvm, &pt[i]);
  3022. }
  3023. }
  3024. return;
  3025. }
  3026. static void audit_rmap(struct kvm_vcpu *vcpu)
  3027. {
  3028. check_writable_mappings_rmap(vcpu);
  3029. count_rmaps(vcpu);
  3030. }
  3031. static void audit_write_protection(struct kvm_vcpu *vcpu)
  3032. {
  3033. struct kvm_mmu_page *sp;
  3034. struct kvm_memory_slot *slot;
  3035. unsigned long *rmapp;
  3036. u64 *spte;
  3037. gfn_t gfn;
  3038. list_for_each_entry(sp, &vcpu->kvm->arch.active_mmu_pages, link) {
  3039. if (sp->role.direct)
  3040. continue;
  3041. if (sp->unsync)
  3042. continue;
  3043. slot = gfn_to_memslot(vcpu->kvm, sp->gfn);
  3044. rmapp = &slot->rmap[gfn - slot->base_gfn];
  3045. spte = rmap_next(vcpu->kvm, rmapp, NULL);
  3046. while (spte) {
  3047. if (is_writable_pte(*spte))
  3048. printk(KERN_ERR "%s: (%s) shadow page has "
  3049. "writable mappings: gfn %lx role %x\n",
  3050. __func__, audit_msg, sp->gfn,
  3051. sp->role.word);
  3052. spte = rmap_next(vcpu->kvm, rmapp, spte);
  3053. }
  3054. }
  3055. }
  3056. static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg)
  3057. {
  3058. int olddbg = dbg;
  3059. dbg = 0;
  3060. audit_msg = msg;
  3061. audit_rmap(vcpu);
  3062. audit_write_protection(vcpu);
  3063. if (strcmp("pre pte write", audit_msg) != 0)
  3064. audit_mappings(vcpu);
  3065. audit_writable_sptes_have_rmaps(vcpu);
  3066. dbg = olddbg;
  3067. }
  3068. #endif