mmu.c 104 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357
  1. /*
  2. * Kernel-based Virtual Machine driver for Linux
  3. *
  4. * This module enables machines with Intel VT-x extensions to run virtual
  5. * machines without emulation or binary translation.
  6. *
  7. * MMU support
  8. *
  9. * Copyright (C) 2006 Qumranet, Inc.
  10. * Copyright 2010 Red Hat, Inc. and/or its affiliates.
  11. *
  12. * Authors:
  13. * Yaniv Kamay <yaniv@qumranet.com>
  14. * Avi Kivity <avi@qumranet.com>
  15. *
  16. * This work is licensed under the terms of the GNU GPL, version 2. See
  17. * the COPYING file in the top-level directory.
  18. *
  19. */
  20. #include "irq.h"
  21. #include "mmu.h"
  22. #include "x86.h"
  23. #include "kvm_cache_regs.h"
  24. #include <linux/kvm_host.h>
  25. #include <linux/types.h>
  26. #include <linux/string.h>
  27. #include <linux/mm.h>
  28. #include <linux/highmem.h>
  29. #include <linux/module.h>
  30. #include <linux/swap.h>
  31. #include <linux/hugetlb.h>
  32. #include <linux/compiler.h>
  33. #include <linux/srcu.h>
  34. #include <linux/slab.h>
  35. #include <linux/uaccess.h>
  36. #include <asm/page.h>
  37. #include <asm/cmpxchg.h>
  38. #include <asm/io.h>
  39. #include <asm/vmx.h>
  40. /*
  41. * When setting this variable to true it enables Two-Dimensional-Paging
  42. * where the hardware walks 2 page tables:
  43. * 1. the guest-virtual to guest-physical
  44. * 2. while doing 1. it walks guest-physical to host-physical
  45. * If the hardware supports that we don't need to do shadow paging.
  46. */
  47. bool tdp_enabled = false;
  48. enum {
  49. AUDIT_PRE_PAGE_FAULT,
  50. AUDIT_POST_PAGE_FAULT,
  51. AUDIT_PRE_PTE_WRITE,
  52. AUDIT_POST_PTE_WRITE,
  53. AUDIT_PRE_SYNC,
  54. AUDIT_POST_SYNC
  55. };
  56. #undef MMU_DEBUG
  57. #ifdef MMU_DEBUG
  58. #define pgprintk(x...) do { if (dbg) printk(x); } while (0)
  59. #define rmap_printk(x...) do { if (dbg) printk(x); } while (0)
  60. #else
  61. #define pgprintk(x...) do { } while (0)
  62. #define rmap_printk(x...) do { } while (0)
  63. #endif
  64. #ifdef MMU_DEBUG
  65. static bool dbg = 0;
  66. module_param(dbg, bool, 0644);
  67. #endif
  68. #ifndef MMU_DEBUG
  69. #define ASSERT(x) do { } while (0)
  70. #else
  71. #define ASSERT(x) \
  72. if (!(x)) { \
  73. printk(KERN_WARNING "assertion failed %s:%d: %s\n", \
  74. __FILE__, __LINE__, #x); \
  75. }
  76. #endif
  77. #define PTE_PREFETCH_NUM 8
  78. #define PT_FIRST_AVAIL_BITS_SHIFT 10
  79. #define PT64_SECOND_AVAIL_BITS_SHIFT 52
  80. #define PT64_LEVEL_BITS 9
  81. #define PT64_LEVEL_SHIFT(level) \
  82. (PAGE_SHIFT + (level - 1) * PT64_LEVEL_BITS)
  83. #define PT64_INDEX(address, level)\
  84. (((address) >> PT64_LEVEL_SHIFT(level)) & ((1 << PT64_LEVEL_BITS) - 1))
  85. #define PT32_LEVEL_BITS 10
  86. #define PT32_LEVEL_SHIFT(level) \
  87. (PAGE_SHIFT + (level - 1) * PT32_LEVEL_BITS)
  88. #define PT32_LVL_OFFSET_MASK(level) \
  89. (PT32_BASE_ADDR_MASK & ((1ULL << (PAGE_SHIFT + (((level) - 1) \
  90. * PT32_LEVEL_BITS))) - 1))
  91. #define PT32_INDEX(address, level)\
  92. (((address) >> PT32_LEVEL_SHIFT(level)) & ((1 << PT32_LEVEL_BITS) - 1))
  93. #define PT64_BASE_ADDR_MASK (((1ULL << 52) - 1) & ~(u64)(PAGE_SIZE-1))
  94. #define PT64_DIR_BASE_ADDR_MASK \
  95. (PT64_BASE_ADDR_MASK & ~((1ULL << (PAGE_SHIFT + PT64_LEVEL_BITS)) - 1))
  96. #define PT64_LVL_ADDR_MASK(level) \
  97. (PT64_BASE_ADDR_MASK & ~((1ULL << (PAGE_SHIFT + (((level) - 1) \
  98. * PT64_LEVEL_BITS))) - 1))
  99. #define PT64_LVL_OFFSET_MASK(level) \
  100. (PT64_BASE_ADDR_MASK & ((1ULL << (PAGE_SHIFT + (((level) - 1) \
  101. * PT64_LEVEL_BITS))) - 1))
  102. #define PT32_BASE_ADDR_MASK PAGE_MASK
  103. #define PT32_DIR_BASE_ADDR_MASK \
  104. (PAGE_MASK & ~((1ULL << (PAGE_SHIFT + PT32_LEVEL_BITS)) - 1))
  105. #define PT32_LVL_ADDR_MASK(level) \
  106. (PAGE_MASK & ~((1ULL << (PAGE_SHIFT + (((level) - 1) \
  107. * PT32_LEVEL_BITS))) - 1))
  108. #define PT64_PERM_MASK (PT_PRESENT_MASK | PT_WRITABLE_MASK | PT_USER_MASK \
  109. | PT64_NX_MASK)
  110. #define ACC_EXEC_MASK 1
  111. #define ACC_WRITE_MASK PT_WRITABLE_MASK
  112. #define ACC_USER_MASK PT_USER_MASK
  113. #define ACC_ALL (ACC_EXEC_MASK | ACC_WRITE_MASK | ACC_USER_MASK)
  114. #include <trace/events/kvm.h>
  115. #define CREATE_TRACE_POINTS
  116. #include "mmutrace.h"
  117. #define SPTE_HOST_WRITEABLE (1ULL << PT_FIRST_AVAIL_BITS_SHIFT)
  118. #define SPTE_MMU_WRITEABLE (1ULL << (PT_FIRST_AVAIL_BITS_SHIFT + 1))
  119. #define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level)
  120. /* make pte_list_desc fit well in cache line */
  121. #define PTE_LIST_EXT 3
  122. struct pte_list_desc {
  123. u64 *sptes[PTE_LIST_EXT];
  124. struct pte_list_desc *more;
  125. };
  126. struct kvm_shadow_walk_iterator {
  127. u64 addr;
  128. hpa_t shadow_addr;
  129. u64 *sptep;
  130. int level;
  131. unsigned index;
  132. };
  133. #define for_each_shadow_entry(_vcpu, _addr, _walker) \
  134. for (shadow_walk_init(&(_walker), _vcpu, _addr); \
  135. shadow_walk_okay(&(_walker)); \
  136. shadow_walk_next(&(_walker)))
  137. #define for_each_shadow_entry_lockless(_vcpu, _addr, _walker, spte) \
  138. for (shadow_walk_init(&(_walker), _vcpu, _addr); \
  139. shadow_walk_okay(&(_walker)) && \
  140. ({ spte = mmu_spte_get_lockless(_walker.sptep); 1; }); \
  141. __shadow_walk_next(&(_walker), spte))
  142. static struct kmem_cache *pte_list_desc_cache;
  143. static struct kmem_cache *mmu_page_header_cache;
  144. static struct percpu_counter kvm_total_used_mmu_pages;
  145. static u64 __read_mostly shadow_nx_mask;
  146. static u64 __read_mostly shadow_x_mask; /* mutual exclusive with nx_mask */
  147. static u64 __read_mostly shadow_user_mask;
  148. static u64 __read_mostly shadow_accessed_mask;
  149. static u64 __read_mostly shadow_dirty_mask;
  150. static u64 __read_mostly shadow_mmio_mask;
  151. static void mmu_spte_set(u64 *sptep, u64 spte);
  152. static void mmu_free_roots(struct kvm_vcpu *vcpu);
  153. void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask)
  154. {
  155. shadow_mmio_mask = mmio_mask;
  156. }
  157. EXPORT_SYMBOL_GPL(kvm_mmu_set_mmio_spte_mask);
  158. static void mark_mmio_spte(u64 *sptep, u64 gfn, unsigned access)
  159. {
  160. struct kvm_mmu_page *sp = page_header(__pa(sptep));
  161. access &= ACC_WRITE_MASK | ACC_USER_MASK;
  162. sp->mmio_cached = true;
  163. trace_mark_mmio_spte(sptep, gfn, access);
  164. mmu_spte_set(sptep, shadow_mmio_mask | access | gfn << PAGE_SHIFT);
  165. }
  166. static bool is_mmio_spte(u64 spte)
  167. {
  168. return (spte & shadow_mmio_mask) == shadow_mmio_mask;
  169. }
  170. static gfn_t get_mmio_spte_gfn(u64 spte)
  171. {
  172. return (spte & ~shadow_mmio_mask) >> PAGE_SHIFT;
  173. }
  174. static unsigned get_mmio_spte_access(u64 spte)
  175. {
  176. return (spte & ~shadow_mmio_mask) & ~PAGE_MASK;
  177. }
  178. static bool set_mmio_spte(u64 *sptep, gfn_t gfn, pfn_t pfn, unsigned access)
  179. {
  180. if (unlikely(is_noslot_pfn(pfn))) {
  181. mark_mmio_spte(sptep, gfn, access);
  182. return true;
  183. }
  184. return false;
  185. }
  186. static inline u64 rsvd_bits(int s, int e)
  187. {
  188. return ((1ULL << (e - s + 1)) - 1) << s;
  189. }
  190. void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
  191. u64 dirty_mask, u64 nx_mask, u64 x_mask)
  192. {
  193. shadow_user_mask = user_mask;
  194. shadow_accessed_mask = accessed_mask;
  195. shadow_dirty_mask = dirty_mask;
  196. shadow_nx_mask = nx_mask;
  197. shadow_x_mask = x_mask;
  198. }
  199. EXPORT_SYMBOL_GPL(kvm_mmu_set_mask_ptes);
  200. static int is_cpuid_PSE36(void)
  201. {
  202. return 1;
  203. }
  204. static int is_nx(struct kvm_vcpu *vcpu)
  205. {
  206. return vcpu->arch.efer & EFER_NX;
  207. }
  208. static int is_shadow_present_pte(u64 pte)
  209. {
  210. return pte & PT_PRESENT_MASK && !is_mmio_spte(pte);
  211. }
  212. static int is_large_pte(u64 pte)
  213. {
  214. return pte & PT_PAGE_SIZE_MASK;
  215. }
  216. static int is_dirty_gpte(unsigned long pte)
  217. {
  218. return pte & PT_DIRTY_MASK;
  219. }
  220. static int is_rmap_spte(u64 pte)
  221. {
  222. return is_shadow_present_pte(pte);
  223. }
  224. static int is_last_spte(u64 pte, int level)
  225. {
  226. if (level == PT_PAGE_TABLE_LEVEL)
  227. return 1;
  228. if (is_large_pte(pte))
  229. return 1;
  230. return 0;
  231. }
  232. static pfn_t spte_to_pfn(u64 pte)
  233. {
  234. return (pte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT;
  235. }
  236. static gfn_t pse36_gfn_delta(u32 gpte)
  237. {
  238. int shift = 32 - PT32_DIR_PSE36_SHIFT - PAGE_SHIFT;
  239. return (gpte & PT32_DIR_PSE36_MASK) << shift;
  240. }
  241. #ifdef CONFIG_X86_64
  242. static void __set_spte(u64 *sptep, u64 spte)
  243. {
  244. *sptep = spte;
  245. }
  246. static void __update_clear_spte_fast(u64 *sptep, u64 spte)
  247. {
  248. *sptep = spte;
  249. }
  250. static u64 __update_clear_spte_slow(u64 *sptep, u64 spte)
  251. {
  252. return xchg(sptep, spte);
  253. }
  254. static u64 __get_spte_lockless(u64 *sptep)
  255. {
  256. return ACCESS_ONCE(*sptep);
  257. }
  258. static bool __check_direct_spte_mmio_pf(u64 spte)
  259. {
  260. /* It is valid if the spte is zapped. */
  261. return spte == 0ull;
  262. }
  263. #else
  264. union split_spte {
  265. struct {
  266. u32 spte_low;
  267. u32 spte_high;
  268. };
  269. u64 spte;
  270. };
  271. static void count_spte_clear(u64 *sptep, u64 spte)
  272. {
  273. struct kvm_mmu_page *sp = page_header(__pa(sptep));
  274. if (is_shadow_present_pte(spte))
  275. return;
  276. /* Ensure the spte is completely set before we increase the count */
  277. smp_wmb();
  278. sp->clear_spte_count++;
  279. }
  280. static void __set_spte(u64 *sptep, u64 spte)
  281. {
  282. union split_spte *ssptep, sspte;
  283. ssptep = (union split_spte *)sptep;
  284. sspte = (union split_spte)spte;
  285. ssptep->spte_high = sspte.spte_high;
  286. /*
  287. * If we map the spte from nonpresent to present, We should store
  288. * the high bits firstly, then set present bit, so cpu can not
  289. * fetch this spte while we are setting the spte.
  290. */
  291. smp_wmb();
  292. ssptep->spte_low = sspte.spte_low;
  293. }
  294. static void __update_clear_spte_fast(u64 *sptep, u64 spte)
  295. {
  296. union split_spte *ssptep, sspte;
  297. ssptep = (union split_spte *)sptep;
  298. sspte = (union split_spte)spte;
  299. ssptep->spte_low = sspte.spte_low;
  300. /*
  301. * If we map the spte from present to nonpresent, we should clear
  302. * present bit firstly to avoid vcpu fetch the old high bits.
  303. */
  304. smp_wmb();
  305. ssptep->spte_high = sspte.spte_high;
  306. count_spte_clear(sptep, spte);
  307. }
  308. static u64 __update_clear_spte_slow(u64 *sptep, u64 spte)
  309. {
  310. union split_spte *ssptep, sspte, orig;
  311. ssptep = (union split_spte *)sptep;
  312. sspte = (union split_spte)spte;
  313. /* xchg acts as a barrier before the setting of the high bits */
  314. orig.spte_low = xchg(&ssptep->spte_low, sspte.spte_low);
  315. orig.spte_high = ssptep->spte_high;
  316. ssptep->spte_high = sspte.spte_high;
  317. count_spte_clear(sptep, spte);
  318. return orig.spte;
  319. }
  320. /*
  321. * The idea using the light way get the spte on x86_32 guest is from
  322. * gup_get_pte(arch/x86/mm/gup.c).
  323. * The difference is we can not catch the spte tlb flush if we leave
  324. * guest mode, so we emulate it by increase clear_spte_count when spte
  325. * is cleared.
  326. */
  327. static u64 __get_spte_lockless(u64 *sptep)
  328. {
  329. struct kvm_mmu_page *sp = page_header(__pa(sptep));
  330. union split_spte spte, *orig = (union split_spte *)sptep;
  331. int count;
  332. retry:
  333. count = sp->clear_spte_count;
  334. smp_rmb();
  335. spte.spte_low = orig->spte_low;
  336. smp_rmb();
  337. spte.spte_high = orig->spte_high;
  338. smp_rmb();
  339. if (unlikely(spte.spte_low != orig->spte_low ||
  340. count != sp->clear_spte_count))
  341. goto retry;
  342. return spte.spte;
  343. }
  344. static bool __check_direct_spte_mmio_pf(u64 spte)
  345. {
  346. union split_spte sspte = (union split_spte)spte;
  347. u32 high_mmio_mask = shadow_mmio_mask >> 32;
  348. /* It is valid if the spte is zapped. */
  349. if (spte == 0ull)
  350. return true;
  351. /* It is valid if the spte is being zapped. */
  352. if (sspte.spte_low == 0ull &&
  353. (sspte.spte_high & high_mmio_mask) == high_mmio_mask)
  354. return true;
  355. return false;
  356. }
  357. #endif
  358. static bool spte_is_locklessly_modifiable(u64 spte)
  359. {
  360. return (spte & (SPTE_HOST_WRITEABLE | SPTE_MMU_WRITEABLE)) ==
  361. (SPTE_HOST_WRITEABLE | SPTE_MMU_WRITEABLE);
  362. }
  363. static bool spte_has_volatile_bits(u64 spte)
  364. {
  365. /*
  366. * Always atomicly update spte if it can be updated
  367. * out of mmu-lock, it can ensure dirty bit is not lost,
  368. * also, it can help us to get a stable is_writable_pte()
  369. * to ensure tlb flush is not missed.
  370. */
  371. if (spte_is_locklessly_modifiable(spte))
  372. return true;
  373. if (!shadow_accessed_mask)
  374. return false;
  375. if (!is_shadow_present_pte(spte))
  376. return false;
  377. if ((spte & shadow_accessed_mask) &&
  378. (!is_writable_pte(spte) || (spte & shadow_dirty_mask)))
  379. return false;
  380. return true;
  381. }
  382. static bool spte_is_bit_cleared(u64 old_spte, u64 new_spte, u64 bit_mask)
  383. {
  384. return (old_spte & bit_mask) && !(new_spte & bit_mask);
  385. }
  386. /* Rules for using mmu_spte_set:
  387. * Set the sptep from nonpresent to present.
  388. * Note: the sptep being assigned *must* be either not present
  389. * or in a state where the hardware will not attempt to update
  390. * the spte.
  391. */
  392. static void mmu_spte_set(u64 *sptep, u64 new_spte)
  393. {
  394. WARN_ON(is_shadow_present_pte(*sptep));
  395. __set_spte(sptep, new_spte);
  396. }
  397. /* Rules for using mmu_spte_update:
  398. * Update the state bits, it means the mapped pfn is not changged.
  399. *
  400. * Whenever we overwrite a writable spte with a read-only one we
  401. * should flush remote TLBs. Otherwise rmap_write_protect
  402. * will find a read-only spte, even though the writable spte
  403. * might be cached on a CPU's TLB, the return value indicates this
  404. * case.
  405. */
  406. static bool mmu_spte_update(u64 *sptep, u64 new_spte)
  407. {
  408. u64 old_spte = *sptep;
  409. bool ret = false;
  410. WARN_ON(!is_rmap_spte(new_spte));
  411. if (!is_shadow_present_pte(old_spte)) {
  412. mmu_spte_set(sptep, new_spte);
  413. return ret;
  414. }
  415. if (!spte_has_volatile_bits(old_spte))
  416. __update_clear_spte_fast(sptep, new_spte);
  417. else
  418. old_spte = __update_clear_spte_slow(sptep, new_spte);
  419. /*
  420. * For the spte updated out of mmu-lock is safe, since
  421. * we always atomicly update it, see the comments in
  422. * spte_has_volatile_bits().
  423. */
  424. if (is_writable_pte(old_spte) && !is_writable_pte(new_spte))
  425. ret = true;
  426. if (!shadow_accessed_mask)
  427. return ret;
  428. if (spte_is_bit_cleared(old_spte, new_spte, shadow_accessed_mask))
  429. kvm_set_pfn_accessed(spte_to_pfn(old_spte));
  430. if (spte_is_bit_cleared(old_spte, new_spte, shadow_dirty_mask))
  431. kvm_set_pfn_dirty(spte_to_pfn(old_spte));
  432. return ret;
  433. }
  434. /*
  435. * Rules for using mmu_spte_clear_track_bits:
  436. * It sets the sptep from present to nonpresent, and track the
  437. * state bits, it is used to clear the last level sptep.
  438. */
  439. static int mmu_spte_clear_track_bits(u64 *sptep)
  440. {
  441. pfn_t pfn;
  442. u64 old_spte = *sptep;
  443. if (!spte_has_volatile_bits(old_spte))
  444. __update_clear_spte_fast(sptep, 0ull);
  445. else
  446. old_spte = __update_clear_spte_slow(sptep, 0ull);
  447. if (!is_rmap_spte(old_spte))
  448. return 0;
  449. pfn = spte_to_pfn(old_spte);
  450. /*
  451. * KVM does not hold the refcount of the page used by
  452. * kvm mmu, before reclaiming the page, we should
  453. * unmap it from mmu first.
  454. */
  455. WARN_ON(!kvm_is_mmio_pfn(pfn) && !page_count(pfn_to_page(pfn)));
  456. if (!shadow_accessed_mask || old_spte & shadow_accessed_mask)
  457. kvm_set_pfn_accessed(pfn);
  458. if (!shadow_dirty_mask || (old_spte & shadow_dirty_mask))
  459. kvm_set_pfn_dirty(pfn);
  460. return 1;
  461. }
  462. /*
  463. * Rules for using mmu_spte_clear_no_track:
  464. * Directly clear spte without caring the state bits of sptep,
  465. * it is used to set the upper level spte.
  466. */
  467. static void mmu_spte_clear_no_track(u64 *sptep)
  468. {
  469. __update_clear_spte_fast(sptep, 0ull);
  470. }
  471. static u64 mmu_spte_get_lockless(u64 *sptep)
  472. {
  473. return __get_spte_lockless(sptep);
  474. }
  475. static void walk_shadow_page_lockless_begin(struct kvm_vcpu *vcpu)
  476. {
  477. /*
  478. * Prevent page table teardown by making any free-er wait during
  479. * kvm_flush_remote_tlbs() IPI to all active vcpus.
  480. */
  481. local_irq_disable();
  482. vcpu->mode = READING_SHADOW_PAGE_TABLES;
  483. /*
  484. * Make sure a following spte read is not reordered ahead of the write
  485. * to vcpu->mode.
  486. */
  487. smp_mb();
  488. }
  489. static void walk_shadow_page_lockless_end(struct kvm_vcpu *vcpu)
  490. {
  491. /*
  492. * Make sure the write to vcpu->mode is not reordered in front of
  493. * reads to sptes. If it does, kvm_commit_zap_page() can see us
  494. * OUTSIDE_GUEST_MODE and proceed to free the shadow page table.
  495. */
  496. smp_mb();
  497. vcpu->mode = OUTSIDE_GUEST_MODE;
  498. local_irq_enable();
  499. }
  500. static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
  501. struct kmem_cache *base_cache, int min)
  502. {
  503. void *obj;
  504. if (cache->nobjs >= min)
  505. return 0;
  506. while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
  507. obj = kmem_cache_zalloc(base_cache, GFP_KERNEL);
  508. if (!obj)
  509. return -ENOMEM;
  510. cache->objects[cache->nobjs++] = obj;
  511. }
  512. return 0;
  513. }
  514. static int mmu_memory_cache_free_objects(struct kvm_mmu_memory_cache *cache)
  515. {
  516. return cache->nobjs;
  517. }
  518. static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc,
  519. struct kmem_cache *cache)
  520. {
  521. while (mc->nobjs)
  522. kmem_cache_free(cache, mc->objects[--mc->nobjs]);
  523. }
  524. static int mmu_topup_memory_cache_page(struct kvm_mmu_memory_cache *cache,
  525. int min)
  526. {
  527. void *page;
  528. if (cache->nobjs >= min)
  529. return 0;
  530. while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
  531. page = (void *)__get_free_page(GFP_KERNEL);
  532. if (!page)
  533. return -ENOMEM;
  534. cache->objects[cache->nobjs++] = page;
  535. }
  536. return 0;
  537. }
  538. static void mmu_free_memory_cache_page(struct kvm_mmu_memory_cache *mc)
  539. {
  540. while (mc->nobjs)
  541. free_page((unsigned long)mc->objects[--mc->nobjs]);
  542. }
  543. static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu)
  544. {
  545. int r;
  546. r = mmu_topup_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache,
  547. pte_list_desc_cache, 8 + PTE_PREFETCH_NUM);
  548. if (r)
  549. goto out;
  550. r = mmu_topup_memory_cache_page(&vcpu->arch.mmu_page_cache, 8);
  551. if (r)
  552. goto out;
  553. r = mmu_topup_memory_cache(&vcpu->arch.mmu_page_header_cache,
  554. mmu_page_header_cache, 4);
  555. out:
  556. return r;
  557. }
  558. static void mmu_free_memory_caches(struct kvm_vcpu *vcpu)
  559. {
  560. mmu_free_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache,
  561. pte_list_desc_cache);
  562. mmu_free_memory_cache_page(&vcpu->arch.mmu_page_cache);
  563. mmu_free_memory_cache(&vcpu->arch.mmu_page_header_cache,
  564. mmu_page_header_cache);
  565. }
  566. static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc)
  567. {
  568. void *p;
  569. BUG_ON(!mc->nobjs);
  570. p = mc->objects[--mc->nobjs];
  571. return p;
  572. }
  573. static struct pte_list_desc *mmu_alloc_pte_list_desc(struct kvm_vcpu *vcpu)
  574. {
  575. return mmu_memory_cache_alloc(&vcpu->arch.mmu_pte_list_desc_cache);
  576. }
  577. static void mmu_free_pte_list_desc(struct pte_list_desc *pte_list_desc)
  578. {
  579. kmem_cache_free(pte_list_desc_cache, pte_list_desc);
  580. }
  581. static gfn_t kvm_mmu_page_get_gfn(struct kvm_mmu_page *sp, int index)
  582. {
  583. if (!sp->role.direct)
  584. return sp->gfns[index];
  585. return sp->gfn + (index << ((sp->role.level - 1) * PT64_LEVEL_BITS));
  586. }
  587. static void kvm_mmu_page_set_gfn(struct kvm_mmu_page *sp, int index, gfn_t gfn)
  588. {
  589. if (sp->role.direct)
  590. BUG_ON(gfn != kvm_mmu_page_get_gfn(sp, index));
  591. else
  592. sp->gfns[index] = gfn;
  593. }
  594. /*
  595. * Return the pointer to the large page information for a given gfn,
  596. * handling slots that are not large page aligned.
  597. */
  598. static struct kvm_lpage_info *lpage_info_slot(gfn_t gfn,
  599. struct kvm_memory_slot *slot,
  600. int level)
  601. {
  602. unsigned long idx;
  603. idx = gfn_to_index(gfn, slot->base_gfn, level);
  604. return &slot->arch.lpage_info[level - 2][idx];
  605. }
  606. static void account_shadowed(struct kvm *kvm, gfn_t gfn)
  607. {
  608. struct kvm_memory_slot *slot;
  609. struct kvm_lpage_info *linfo;
  610. int i;
  611. slot = gfn_to_memslot(kvm, gfn);
  612. for (i = PT_DIRECTORY_LEVEL;
  613. i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) {
  614. linfo = lpage_info_slot(gfn, slot, i);
  615. linfo->write_count += 1;
  616. }
  617. kvm->arch.indirect_shadow_pages++;
  618. }
  619. static void unaccount_shadowed(struct kvm *kvm, gfn_t gfn)
  620. {
  621. struct kvm_memory_slot *slot;
  622. struct kvm_lpage_info *linfo;
  623. int i;
  624. slot = gfn_to_memslot(kvm, gfn);
  625. for (i = PT_DIRECTORY_LEVEL;
  626. i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) {
  627. linfo = lpage_info_slot(gfn, slot, i);
  628. linfo->write_count -= 1;
  629. WARN_ON(linfo->write_count < 0);
  630. }
  631. kvm->arch.indirect_shadow_pages--;
  632. }
  633. static int has_wrprotected_page(struct kvm *kvm,
  634. gfn_t gfn,
  635. int level)
  636. {
  637. struct kvm_memory_slot *slot;
  638. struct kvm_lpage_info *linfo;
  639. slot = gfn_to_memslot(kvm, gfn);
  640. if (slot) {
  641. linfo = lpage_info_slot(gfn, slot, level);
  642. return linfo->write_count;
  643. }
  644. return 1;
  645. }
  646. static int host_mapping_level(struct kvm *kvm, gfn_t gfn)
  647. {
  648. unsigned long page_size;
  649. int i, ret = 0;
  650. page_size = kvm_host_page_size(kvm, gfn);
  651. for (i = PT_PAGE_TABLE_LEVEL;
  652. i < (PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES); ++i) {
  653. if (page_size >= KVM_HPAGE_SIZE(i))
  654. ret = i;
  655. else
  656. break;
  657. }
  658. return ret;
  659. }
  660. static struct kvm_memory_slot *
  661. gfn_to_memslot_dirty_bitmap(struct kvm_vcpu *vcpu, gfn_t gfn,
  662. bool no_dirty_log)
  663. {
  664. struct kvm_memory_slot *slot;
  665. slot = gfn_to_memslot(vcpu->kvm, gfn);
  666. if (!slot || slot->flags & KVM_MEMSLOT_INVALID ||
  667. (no_dirty_log && slot->dirty_bitmap))
  668. slot = NULL;
  669. return slot;
  670. }
  671. static bool mapping_level_dirty_bitmap(struct kvm_vcpu *vcpu, gfn_t large_gfn)
  672. {
  673. return !gfn_to_memslot_dirty_bitmap(vcpu, large_gfn, true);
  674. }
  675. static int mapping_level(struct kvm_vcpu *vcpu, gfn_t large_gfn)
  676. {
  677. int host_level, level, max_level;
  678. host_level = host_mapping_level(vcpu->kvm, large_gfn);
  679. if (host_level == PT_PAGE_TABLE_LEVEL)
  680. return host_level;
  681. max_level = min(kvm_x86_ops->get_lpage_level(), host_level);
  682. for (level = PT_DIRECTORY_LEVEL; level <= max_level; ++level)
  683. if (has_wrprotected_page(vcpu->kvm, large_gfn, level))
  684. break;
  685. return level - 1;
  686. }
  687. /*
  688. * Pte mapping structures:
  689. *
  690. * If pte_list bit zero is zero, then pte_list point to the spte.
  691. *
  692. * If pte_list bit zero is one, (then pte_list & ~1) points to a struct
  693. * pte_list_desc containing more mappings.
  694. *
  695. * Returns the number of pte entries before the spte was added or zero if
  696. * the spte was not added.
  697. *
  698. */
  699. static int pte_list_add(struct kvm_vcpu *vcpu, u64 *spte,
  700. unsigned long *pte_list)
  701. {
  702. struct pte_list_desc *desc;
  703. int i, count = 0;
  704. if (!*pte_list) {
  705. rmap_printk("pte_list_add: %p %llx 0->1\n", spte, *spte);
  706. *pte_list = (unsigned long)spte;
  707. } else if (!(*pte_list & 1)) {
  708. rmap_printk("pte_list_add: %p %llx 1->many\n", spte, *spte);
  709. desc = mmu_alloc_pte_list_desc(vcpu);
  710. desc->sptes[0] = (u64 *)*pte_list;
  711. desc->sptes[1] = spte;
  712. *pte_list = (unsigned long)desc | 1;
  713. ++count;
  714. } else {
  715. rmap_printk("pte_list_add: %p %llx many->many\n", spte, *spte);
  716. desc = (struct pte_list_desc *)(*pte_list & ~1ul);
  717. while (desc->sptes[PTE_LIST_EXT-1] && desc->more) {
  718. desc = desc->more;
  719. count += PTE_LIST_EXT;
  720. }
  721. if (desc->sptes[PTE_LIST_EXT-1]) {
  722. desc->more = mmu_alloc_pte_list_desc(vcpu);
  723. desc = desc->more;
  724. }
  725. for (i = 0; desc->sptes[i]; ++i)
  726. ++count;
  727. desc->sptes[i] = spte;
  728. }
  729. return count;
  730. }
  731. static void
  732. pte_list_desc_remove_entry(unsigned long *pte_list, struct pte_list_desc *desc,
  733. int i, struct pte_list_desc *prev_desc)
  734. {
  735. int j;
  736. for (j = PTE_LIST_EXT - 1; !desc->sptes[j] && j > i; --j)
  737. ;
  738. desc->sptes[i] = desc->sptes[j];
  739. desc->sptes[j] = NULL;
  740. if (j != 0)
  741. return;
  742. if (!prev_desc && !desc->more)
  743. *pte_list = (unsigned long)desc->sptes[0];
  744. else
  745. if (prev_desc)
  746. prev_desc->more = desc->more;
  747. else
  748. *pte_list = (unsigned long)desc->more | 1;
  749. mmu_free_pte_list_desc(desc);
  750. }
  751. static void pte_list_remove(u64 *spte, unsigned long *pte_list)
  752. {
  753. struct pte_list_desc *desc;
  754. struct pte_list_desc *prev_desc;
  755. int i;
  756. if (!*pte_list) {
  757. printk(KERN_ERR "pte_list_remove: %p 0->BUG\n", spte);
  758. BUG();
  759. } else if (!(*pte_list & 1)) {
  760. rmap_printk("pte_list_remove: %p 1->0\n", spte);
  761. if ((u64 *)*pte_list != spte) {
  762. printk(KERN_ERR "pte_list_remove: %p 1->BUG\n", spte);
  763. BUG();
  764. }
  765. *pte_list = 0;
  766. } else {
  767. rmap_printk("pte_list_remove: %p many->many\n", spte);
  768. desc = (struct pte_list_desc *)(*pte_list & ~1ul);
  769. prev_desc = NULL;
  770. while (desc) {
  771. for (i = 0; i < PTE_LIST_EXT && desc->sptes[i]; ++i)
  772. if (desc->sptes[i] == spte) {
  773. pte_list_desc_remove_entry(pte_list,
  774. desc, i,
  775. prev_desc);
  776. return;
  777. }
  778. prev_desc = desc;
  779. desc = desc->more;
  780. }
  781. pr_err("pte_list_remove: %p many->many\n", spte);
  782. BUG();
  783. }
  784. }
  785. typedef void (*pte_list_walk_fn) (u64 *spte);
  786. static void pte_list_walk(unsigned long *pte_list, pte_list_walk_fn fn)
  787. {
  788. struct pte_list_desc *desc;
  789. int i;
  790. if (!*pte_list)
  791. return;
  792. if (!(*pte_list & 1))
  793. return fn((u64 *)*pte_list);
  794. desc = (struct pte_list_desc *)(*pte_list & ~1ul);
  795. while (desc) {
  796. for (i = 0; i < PTE_LIST_EXT && desc->sptes[i]; ++i)
  797. fn(desc->sptes[i]);
  798. desc = desc->more;
  799. }
  800. }
  801. static unsigned long *__gfn_to_rmap(gfn_t gfn, int level,
  802. struct kvm_memory_slot *slot)
  803. {
  804. unsigned long idx;
  805. idx = gfn_to_index(gfn, slot->base_gfn, level);
  806. return &slot->arch.rmap[level - PT_PAGE_TABLE_LEVEL][idx];
  807. }
  808. /*
  809. * Take gfn and return the reverse mapping to it.
  810. */
  811. static unsigned long *gfn_to_rmap(struct kvm *kvm, gfn_t gfn, int level)
  812. {
  813. struct kvm_memory_slot *slot;
  814. slot = gfn_to_memslot(kvm, gfn);
  815. return __gfn_to_rmap(gfn, level, slot);
  816. }
  817. static bool rmap_can_add(struct kvm_vcpu *vcpu)
  818. {
  819. struct kvm_mmu_memory_cache *cache;
  820. cache = &vcpu->arch.mmu_pte_list_desc_cache;
  821. return mmu_memory_cache_free_objects(cache);
  822. }
  823. static int rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
  824. {
  825. struct kvm_mmu_page *sp;
  826. unsigned long *rmapp;
  827. sp = page_header(__pa(spte));
  828. kvm_mmu_page_set_gfn(sp, spte - sp->spt, gfn);
  829. rmapp = gfn_to_rmap(vcpu->kvm, gfn, sp->role.level);
  830. return pte_list_add(vcpu, spte, rmapp);
  831. }
  832. static void rmap_remove(struct kvm *kvm, u64 *spte)
  833. {
  834. struct kvm_mmu_page *sp;
  835. gfn_t gfn;
  836. unsigned long *rmapp;
  837. sp = page_header(__pa(spte));
  838. gfn = kvm_mmu_page_get_gfn(sp, spte - sp->spt);
  839. rmapp = gfn_to_rmap(kvm, gfn, sp->role.level);
  840. pte_list_remove(spte, rmapp);
  841. }
  842. /*
  843. * Used by the following functions to iterate through the sptes linked by a
  844. * rmap. All fields are private and not assumed to be used outside.
  845. */
  846. struct rmap_iterator {
  847. /* private fields */
  848. struct pte_list_desc *desc; /* holds the sptep if not NULL */
  849. int pos; /* index of the sptep */
  850. };
  851. /*
  852. * Iteration must be started by this function. This should also be used after
  853. * removing/dropping sptes from the rmap link because in such cases the
  854. * information in the itererator may not be valid.
  855. *
  856. * Returns sptep if found, NULL otherwise.
  857. */
  858. static u64 *rmap_get_first(unsigned long rmap, struct rmap_iterator *iter)
  859. {
  860. if (!rmap)
  861. return NULL;
  862. if (!(rmap & 1)) {
  863. iter->desc = NULL;
  864. return (u64 *)rmap;
  865. }
  866. iter->desc = (struct pte_list_desc *)(rmap & ~1ul);
  867. iter->pos = 0;
  868. return iter->desc->sptes[iter->pos];
  869. }
  870. /*
  871. * Must be used with a valid iterator: e.g. after rmap_get_first().
  872. *
  873. * Returns sptep if found, NULL otherwise.
  874. */
  875. static u64 *rmap_get_next(struct rmap_iterator *iter)
  876. {
  877. if (iter->desc) {
  878. if (iter->pos < PTE_LIST_EXT - 1) {
  879. u64 *sptep;
  880. ++iter->pos;
  881. sptep = iter->desc->sptes[iter->pos];
  882. if (sptep)
  883. return sptep;
  884. }
  885. iter->desc = iter->desc->more;
  886. if (iter->desc) {
  887. iter->pos = 0;
  888. /* desc->sptes[0] cannot be NULL */
  889. return iter->desc->sptes[iter->pos];
  890. }
  891. }
  892. return NULL;
  893. }
  894. static void drop_spte(struct kvm *kvm, u64 *sptep)
  895. {
  896. if (mmu_spte_clear_track_bits(sptep))
  897. rmap_remove(kvm, sptep);
  898. }
  899. static bool __drop_large_spte(struct kvm *kvm, u64 *sptep)
  900. {
  901. if (is_large_pte(*sptep)) {
  902. WARN_ON(page_header(__pa(sptep))->role.level ==
  903. PT_PAGE_TABLE_LEVEL);
  904. drop_spte(kvm, sptep);
  905. --kvm->stat.lpages;
  906. return true;
  907. }
  908. return false;
  909. }
  910. static void drop_large_spte(struct kvm_vcpu *vcpu, u64 *sptep)
  911. {
  912. if (__drop_large_spte(vcpu->kvm, sptep))
  913. kvm_flush_remote_tlbs(vcpu->kvm);
  914. }
  915. /*
  916. * Write-protect on the specified @sptep, @pt_protect indicates whether
  917. * spte writ-protection is caused by protecting shadow page table.
  918. * @flush indicates whether tlb need be flushed.
  919. *
  920. * Note: write protection is difference between drity logging and spte
  921. * protection:
  922. * - for dirty logging, the spte can be set to writable at anytime if
  923. * its dirty bitmap is properly set.
  924. * - for spte protection, the spte can be writable only after unsync-ing
  925. * shadow page.
  926. *
  927. * Return true if the spte is dropped.
  928. */
  929. static bool
  930. spte_write_protect(struct kvm *kvm, u64 *sptep, bool *flush, bool pt_protect)
  931. {
  932. u64 spte = *sptep;
  933. if (!is_writable_pte(spte) &&
  934. !(pt_protect && spte_is_locklessly_modifiable(spte)))
  935. return false;
  936. rmap_printk("rmap_write_protect: spte %p %llx\n", sptep, *sptep);
  937. if (__drop_large_spte(kvm, sptep)) {
  938. *flush |= true;
  939. return true;
  940. }
  941. if (pt_protect)
  942. spte &= ~SPTE_MMU_WRITEABLE;
  943. spte = spte & ~PT_WRITABLE_MASK;
  944. *flush |= mmu_spte_update(sptep, spte);
  945. return false;
  946. }
  947. static bool __rmap_write_protect(struct kvm *kvm, unsigned long *rmapp,
  948. bool pt_protect)
  949. {
  950. u64 *sptep;
  951. struct rmap_iterator iter;
  952. bool flush = false;
  953. for (sptep = rmap_get_first(*rmapp, &iter); sptep;) {
  954. BUG_ON(!(*sptep & PT_PRESENT_MASK));
  955. if (spte_write_protect(kvm, sptep, &flush, pt_protect)) {
  956. sptep = rmap_get_first(*rmapp, &iter);
  957. continue;
  958. }
  959. sptep = rmap_get_next(&iter);
  960. }
  961. return flush;
  962. }
  963. /**
  964. * kvm_mmu_write_protect_pt_masked - write protect selected PT level pages
  965. * @kvm: kvm instance
  966. * @slot: slot to protect
  967. * @gfn_offset: start of the BITS_PER_LONG pages we care about
  968. * @mask: indicates which pages we should protect
  969. *
  970. * Used when we do not need to care about huge page mappings: e.g. during dirty
  971. * logging we do not have any such mappings.
  972. */
  973. void kvm_mmu_write_protect_pt_masked(struct kvm *kvm,
  974. struct kvm_memory_slot *slot,
  975. gfn_t gfn_offset, unsigned long mask)
  976. {
  977. unsigned long *rmapp;
  978. while (mask) {
  979. rmapp = __gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask),
  980. PT_PAGE_TABLE_LEVEL, slot);
  981. __rmap_write_protect(kvm, rmapp, false);
  982. /* clear the first set bit */
  983. mask &= mask - 1;
  984. }
  985. }
  986. static bool rmap_write_protect(struct kvm *kvm, u64 gfn)
  987. {
  988. struct kvm_memory_slot *slot;
  989. unsigned long *rmapp;
  990. int i;
  991. bool write_protected = false;
  992. slot = gfn_to_memslot(kvm, gfn);
  993. for (i = PT_PAGE_TABLE_LEVEL;
  994. i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) {
  995. rmapp = __gfn_to_rmap(gfn, i, slot);
  996. write_protected |= __rmap_write_protect(kvm, rmapp, true);
  997. }
  998. return write_protected;
  999. }
  1000. static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp,
  1001. struct kvm_memory_slot *slot, unsigned long data)
  1002. {
  1003. u64 *sptep;
  1004. struct rmap_iterator iter;
  1005. int need_tlb_flush = 0;
  1006. while ((sptep = rmap_get_first(*rmapp, &iter))) {
  1007. BUG_ON(!(*sptep & PT_PRESENT_MASK));
  1008. rmap_printk("kvm_rmap_unmap_hva: spte %p %llx\n", sptep, *sptep);
  1009. drop_spte(kvm, sptep);
  1010. need_tlb_flush = 1;
  1011. }
  1012. return need_tlb_flush;
  1013. }
  1014. static int kvm_set_pte_rmapp(struct kvm *kvm, unsigned long *rmapp,
  1015. struct kvm_memory_slot *slot, unsigned long data)
  1016. {
  1017. u64 *sptep;
  1018. struct rmap_iterator iter;
  1019. int need_flush = 0;
  1020. u64 new_spte;
  1021. pte_t *ptep = (pte_t *)data;
  1022. pfn_t new_pfn;
  1023. WARN_ON(pte_huge(*ptep));
  1024. new_pfn = pte_pfn(*ptep);
  1025. for (sptep = rmap_get_first(*rmapp, &iter); sptep;) {
  1026. BUG_ON(!is_shadow_present_pte(*sptep));
  1027. rmap_printk("kvm_set_pte_rmapp: spte %p %llx\n", sptep, *sptep);
  1028. need_flush = 1;
  1029. if (pte_write(*ptep)) {
  1030. drop_spte(kvm, sptep);
  1031. sptep = rmap_get_first(*rmapp, &iter);
  1032. } else {
  1033. new_spte = *sptep & ~PT64_BASE_ADDR_MASK;
  1034. new_spte |= (u64)new_pfn << PAGE_SHIFT;
  1035. new_spte &= ~PT_WRITABLE_MASK;
  1036. new_spte &= ~SPTE_HOST_WRITEABLE;
  1037. new_spte &= ~shadow_accessed_mask;
  1038. mmu_spte_clear_track_bits(sptep);
  1039. mmu_spte_set(sptep, new_spte);
  1040. sptep = rmap_get_next(&iter);
  1041. }
  1042. }
  1043. if (need_flush)
  1044. kvm_flush_remote_tlbs(kvm);
  1045. return 0;
  1046. }
  1047. static int kvm_handle_hva_range(struct kvm *kvm,
  1048. unsigned long start,
  1049. unsigned long end,
  1050. unsigned long data,
  1051. int (*handler)(struct kvm *kvm,
  1052. unsigned long *rmapp,
  1053. struct kvm_memory_slot *slot,
  1054. unsigned long data))
  1055. {
  1056. int j;
  1057. int ret = 0;
  1058. struct kvm_memslots *slots;
  1059. struct kvm_memory_slot *memslot;
  1060. slots = kvm_memslots(kvm);
  1061. kvm_for_each_memslot(memslot, slots) {
  1062. unsigned long hva_start, hva_end;
  1063. gfn_t gfn_start, gfn_end;
  1064. hva_start = max(start, memslot->userspace_addr);
  1065. hva_end = min(end, memslot->userspace_addr +
  1066. (memslot->npages << PAGE_SHIFT));
  1067. if (hva_start >= hva_end)
  1068. continue;
  1069. /*
  1070. * {gfn(page) | page intersects with [hva_start, hva_end)} =
  1071. * {gfn_start, gfn_start+1, ..., gfn_end-1}.
  1072. */
  1073. gfn_start = hva_to_gfn_memslot(hva_start, memslot);
  1074. gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot);
  1075. for (j = PT_PAGE_TABLE_LEVEL;
  1076. j < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++j) {
  1077. unsigned long idx, idx_end;
  1078. unsigned long *rmapp;
  1079. /*
  1080. * {idx(page_j) | page_j intersects with
  1081. * [hva_start, hva_end)} = {idx, idx+1, ..., idx_end}.
  1082. */
  1083. idx = gfn_to_index(gfn_start, memslot->base_gfn, j);
  1084. idx_end = gfn_to_index(gfn_end - 1, memslot->base_gfn, j);
  1085. rmapp = __gfn_to_rmap(gfn_start, j, memslot);
  1086. for (; idx <= idx_end; ++idx)
  1087. ret |= handler(kvm, rmapp++, memslot, data);
  1088. }
  1089. }
  1090. return ret;
  1091. }
  1092. static int kvm_handle_hva(struct kvm *kvm, unsigned long hva,
  1093. unsigned long data,
  1094. int (*handler)(struct kvm *kvm, unsigned long *rmapp,
  1095. struct kvm_memory_slot *slot,
  1096. unsigned long data))
  1097. {
  1098. return kvm_handle_hva_range(kvm, hva, hva + 1, data, handler);
  1099. }
  1100. int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
  1101. {
  1102. return kvm_handle_hva(kvm, hva, 0, kvm_unmap_rmapp);
  1103. }
  1104. int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
  1105. {
  1106. return kvm_handle_hva_range(kvm, start, end, 0, kvm_unmap_rmapp);
  1107. }
  1108. void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
  1109. {
  1110. kvm_handle_hva(kvm, hva, (unsigned long)&pte, kvm_set_pte_rmapp);
  1111. }
  1112. static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
  1113. struct kvm_memory_slot *slot, unsigned long data)
  1114. {
  1115. u64 *sptep;
  1116. struct rmap_iterator uninitialized_var(iter);
  1117. int young = 0;
  1118. /*
  1119. * In case of absence of EPT Access and Dirty Bits supports,
  1120. * emulate the accessed bit for EPT, by checking if this page has
  1121. * an EPT mapping, and clearing it if it does. On the next access,
  1122. * a new EPT mapping will be established.
  1123. * This has some overhead, but not as much as the cost of swapping
  1124. * out actively used pages or breaking up actively used hugepages.
  1125. */
  1126. if (!shadow_accessed_mask) {
  1127. young = kvm_unmap_rmapp(kvm, rmapp, slot, data);
  1128. goto out;
  1129. }
  1130. for (sptep = rmap_get_first(*rmapp, &iter); sptep;
  1131. sptep = rmap_get_next(&iter)) {
  1132. BUG_ON(!is_shadow_present_pte(*sptep));
  1133. if (*sptep & shadow_accessed_mask) {
  1134. young = 1;
  1135. clear_bit((ffs(shadow_accessed_mask) - 1),
  1136. (unsigned long *)sptep);
  1137. }
  1138. }
  1139. out:
  1140. /* @data has hva passed to kvm_age_hva(). */
  1141. trace_kvm_age_page(data, slot, young);
  1142. return young;
  1143. }
  1144. static int kvm_test_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
  1145. struct kvm_memory_slot *slot, unsigned long data)
  1146. {
  1147. u64 *sptep;
  1148. struct rmap_iterator iter;
  1149. int young = 0;
  1150. /*
  1151. * If there's no access bit in the secondary pte set by the
  1152. * hardware it's up to gup-fast/gup to set the access bit in
  1153. * the primary pte or in the page structure.
  1154. */
  1155. if (!shadow_accessed_mask)
  1156. goto out;
  1157. for (sptep = rmap_get_first(*rmapp, &iter); sptep;
  1158. sptep = rmap_get_next(&iter)) {
  1159. BUG_ON(!is_shadow_present_pte(*sptep));
  1160. if (*sptep & shadow_accessed_mask) {
  1161. young = 1;
  1162. break;
  1163. }
  1164. }
  1165. out:
  1166. return young;
  1167. }
  1168. #define RMAP_RECYCLE_THRESHOLD 1000
  1169. static void rmap_recycle(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
  1170. {
  1171. unsigned long *rmapp;
  1172. struct kvm_mmu_page *sp;
  1173. sp = page_header(__pa(spte));
  1174. rmapp = gfn_to_rmap(vcpu->kvm, gfn, sp->role.level);
  1175. kvm_unmap_rmapp(vcpu->kvm, rmapp, NULL, 0);
  1176. kvm_flush_remote_tlbs(vcpu->kvm);
  1177. }
  1178. int kvm_age_hva(struct kvm *kvm, unsigned long hva)
  1179. {
  1180. return kvm_handle_hva(kvm, hva, hva, kvm_age_rmapp);
  1181. }
  1182. int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
  1183. {
  1184. return kvm_handle_hva(kvm, hva, 0, kvm_test_age_rmapp);
  1185. }
  1186. #ifdef MMU_DEBUG
  1187. static int is_empty_shadow_page(u64 *spt)
  1188. {
  1189. u64 *pos;
  1190. u64 *end;
  1191. for (pos = spt, end = pos + PAGE_SIZE / sizeof(u64); pos != end; pos++)
  1192. if (is_shadow_present_pte(*pos)) {
  1193. printk(KERN_ERR "%s: %p %llx\n", __func__,
  1194. pos, *pos);
  1195. return 0;
  1196. }
  1197. return 1;
  1198. }
  1199. #endif
  1200. /*
  1201. * This value is the sum of all of the kvm instances's
  1202. * kvm->arch.n_used_mmu_pages values. We need a global,
  1203. * aggregate version in order to make the slab shrinker
  1204. * faster
  1205. */
  1206. static inline void kvm_mod_used_mmu_pages(struct kvm *kvm, int nr)
  1207. {
  1208. kvm->arch.n_used_mmu_pages += nr;
  1209. percpu_counter_add(&kvm_total_used_mmu_pages, nr);
  1210. }
  1211. static void kvm_mmu_free_page(struct kvm_mmu_page *sp)
  1212. {
  1213. ASSERT(is_empty_shadow_page(sp->spt));
  1214. hlist_del(&sp->hash_link);
  1215. list_del(&sp->link);
  1216. free_page((unsigned long)sp->spt);
  1217. if (!sp->role.direct)
  1218. free_page((unsigned long)sp->gfns);
  1219. kmem_cache_free(mmu_page_header_cache, sp);
  1220. }
  1221. static unsigned kvm_page_table_hashfn(gfn_t gfn)
  1222. {
  1223. return gfn & ((1 << KVM_MMU_HASH_SHIFT) - 1);
  1224. }
  1225. static void mmu_page_add_parent_pte(struct kvm_vcpu *vcpu,
  1226. struct kvm_mmu_page *sp, u64 *parent_pte)
  1227. {
  1228. if (!parent_pte)
  1229. return;
  1230. pte_list_add(vcpu, parent_pte, &sp->parent_ptes);
  1231. }
  1232. static void mmu_page_remove_parent_pte(struct kvm_mmu_page *sp,
  1233. u64 *parent_pte)
  1234. {
  1235. pte_list_remove(parent_pte, &sp->parent_ptes);
  1236. }
  1237. static void drop_parent_pte(struct kvm_mmu_page *sp,
  1238. u64 *parent_pte)
  1239. {
  1240. mmu_page_remove_parent_pte(sp, parent_pte);
  1241. mmu_spte_clear_no_track(parent_pte);
  1242. }
  1243. static void make_mmu_pages_available(struct kvm_vcpu *vcpu);
  1244. static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu,
  1245. u64 *parent_pte, int direct)
  1246. {
  1247. struct kvm_mmu_page *sp;
  1248. make_mmu_pages_available(vcpu);
  1249. sp = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache);
  1250. sp->spt = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache);
  1251. if (!direct)
  1252. sp->gfns = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache);
  1253. set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
  1254. list_add(&sp->link, &vcpu->kvm->arch.active_mmu_pages);
  1255. sp->parent_ptes = 0;
  1256. mmu_page_add_parent_pte(vcpu, sp, parent_pte);
  1257. kvm_mod_used_mmu_pages(vcpu->kvm, +1);
  1258. return sp;
  1259. }
  1260. static void mark_unsync(u64 *spte);
  1261. static void kvm_mmu_mark_parents_unsync(struct kvm_mmu_page *sp)
  1262. {
  1263. pte_list_walk(&sp->parent_ptes, mark_unsync);
  1264. }
  1265. static void mark_unsync(u64 *spte)
  1266. {
  1267. struct kvm_mmu_page *sp;
  1268. unsigned int index;
  1269. sp = page_header(__pa(spte));
  1270. index = spte - sp->spt;
  1271. if (__test_and_set_bit(index, sp->unsync_child_bitmap))
  1272. return;
  1273. if (sp->unsync_children++)
  1274. return;
  1275. kvm_mmu_mark_parents_unsync(sp);
  1276. }
  1277. static int nonpaging_sync_page(struct kvm_vcpu *vcpu,
  1278. struct kvm_mmu_page *sp)
  1279. {
  1280. return 1;
  1281. }
  1282. static void nonpaging_invlpg(struct kvm_vcpu *vcpu, gva_t gva)
  1283. {
  1284. }
  1285. static void nonpaging_update_pte(struct kvm_vcpu *vcpu,
  1286. struct kvm_mmu_page *sp, u64 *spte,
  1287. const void *pte)
  1288. {
  1289. WARN_ON(1);
  1290. }
  1291. #define KVM_PAGE_ARRAY_NR 16
  1292. struct kvm_mmu_pages {
  1293. struct mmu_page_and_offset {
  1294. struct kvm_mmu_page *sp;
  1295. unsigned int idx;
  1296. } page[KVM_PAGE_ARRAY_NR];
  1297. unsigned int nr;
  1298. };
  1299. static int mmu_pages_add(struct kvm_mmu_pages *pvec, struct kvm_mmu_page *sp,
  1300. int idx)
  1301. {
  1302. int i;
  1303. if (sp->unsync)
  1304. for (i=0; i < pvec->nr; i++)
  1305. if (pvec->page[i].sp == sp)
  1306. return 0;
  1307. pvec->page[pvec->nr].sp = sp;
  1308. pvec->page[pvec->nr].idx = idx;
  1309. pvec->nr++;
  1310. return (pvec->nr == KVM_PAGE_ARRAY_NR);
  1311. }
  1312. static int __mmu_unsync_walk(struct kvm_mmu_page *sp,
  1313. struct kvm_mmu_pages *pvec)
  1314. {
  1315. int i, ret, nr_unsync_leaf = 0;
  1316. for_each_set_bit(i, sp->unsync_child_bitmap, 512) {
  1317. struct kvm_mmu_page *child;
  1318. u64 ent = sp->spt[i];
  1319. if (!is_shadow_present_pte(ent) || is_large_pte(ent))
  1320. goto clear_child_bitmap;
  1321. child = page_header(ent & PT64_BASE_ADDR_MASK);
  1322. if (child->unsync_children) {
  1323. if (mmu_pages_add(pvec, child, i))
  1324. return -ENOSPC;
  1325. ret = __mmu_unsync_walk(child, pvec);
  1326. if (!ret)
  1327. goto clear_child_bitmap;
  1328. else if (ret > 0)
  1329. nr_unsync_leaf += ret;
  1330. else
  1331. return ret;
  1332. } else if (child->unsync) {
  1333. nr_unsync_leaf++;
  1334. if (mmu_pages_add(pvec, child, i))
  1335. return -ENOSPC;
  1336. } else
  1337. goto clear_child_bitmap;
  1338. continue;
  1339. clear_child_bitmap:
  1340. __clear_bit(i, sp->unsync_child_bitmap);
  1341. sp->unsync_children--;
  1342. WARN_ON((int)sp->unsync_children < 0);
  1343. }
  1344. return nr_unsync_leaf;
  1345. }
  1346. static int mmu_unsync_walk(struct kvm_mmu_page *sp,
  1347. struct kvm_mmu_pages *pvec)
  1348. {
  1349. if (!sp->unsync_children)
  1350. return 0;
  1351. mmu_pages_add(pvec, sp, 0);
  1352. return __mmu_unsync_walk(sp, pvec);
  1353. }
  1354. static void kvm_unlink_unsync_page(struct kvm *kvm, struct kvm_mmu_page *sp)
  1355. {
  1356. WARN_ON(!sp->unsync);
  1357. trace_kvm_mmu_sync_page(sp);
  1358. sp->unsync = 0;
  1359. --kvm->stat.mmu_unsync;
  1360. }
  1361. static int kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
  1362. struct list_head *invalid_list);
  1363. static void kvm_mmu_commit_zap_page(struct kvm *kvm,
  1364. struct list_head *invalid_list);
  1365. #define for_each_gfn_sp(_kvm, _sp, _gfn) \
  1366. hlist_for_each_entry(_sp, \
  1367. &(_kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(_gfn)], hash_link) \
  1368. if ((_sp)->gfn != (_gfn)) {} else
  1369. #define for_each_gfn_indirect_valid_sp(_kvm, _sp, _gfn) \
  1370. for_each_gfn_sp(_kvm, _sp, _gfn) \
  1371. if ((_sp)->role.direct || (_sp)->role.invalid) {} else
  1372. /* @sp->gfn should be write-protected at the call site */
  1373. static int __kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
  1374. struct list_head *invalid_list, bool clear_unsync)
  1375. {
  1376. if (sp->role.cr4_pae != !!is_pae(vcpu)) {
  1377. kvm_mmu_prepare_zap_page(vcpu->kvm, sp, invalid_list);
  1378. return 1;
  1379. }
  1380. if (clear_unsync)
  1381. kvm_unlink_unsync_page(vcpu->kvm, sp);
  1382. if (vcpu->arch.mmu.sync_page(vcpu, sp)) {
  1383. kvm_mmu_prepare_zap_page(vcpu->kvm, sp, invalid_list);
  1384. return 1;
  1385. }
  1386. kvm_mmu_flush_tlb(vcpu);
  1387. return 0;
  1388. }
  1389. static int kvm_sync_page_transient(struct kvm_vcpu *vcpu,
  1390. struct kvm_mmu_page *sp)
  1391. {
  1392. LIST_HEAD(invalid_list);
  1393. int ret;
  1394. ret = __kvm_sync_page(vcpu, sp, &invalid_list, false);
  1395. if (ret)
  1396. kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
  1397. return ret;
  1398. }
  1399. #ifdef CONFIG_KVM_MMU_AUDIT
  1400. #include "mmu_audit.c"
  1401. #else
  1402. static void kvm_mmu_audit(struct kvm_vcpu *vcpu, int point) { }
  1403. static void mmu_audit_disable(void) { }
  1404. #endif
  1405. static int kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
  1406. struct list_head *invalid_list)
  1407. {
  1408. return __kvm_sync_page(vcpu, sp, invalid_list, true);
  1409. }
  1410. /* @gfn should be write-protected at the call site */
  1411. static void kvm_sync_pages(struct kvm_vcpu *vcpu, gfn_t gfn)
  1412. {
  1413. struct kvm_mmu_page *s;
  1414. LIST_HEAD(invalid_list);
  1415. bool flush = false;
  1416. for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn) {
  1417. if (!s->unsync)
  1418. continue;
  1419. WARN_ON(s->role.level != PT_PAGE_TABLE_LEVEL);
  1420. kvm_unlink_unsync_page(vcpu->kvm, s);
  1421. if ((s->role.cr4_pae != !!is_pae(vcpu)) ||
  1422. (vcpu->arch.mmu.sync_page(vcpu, s))) {
  1423. kvm_mmu_prepare_zap_page(vcpu->kvm, s, &invalid_list);
  1424. continue;
  1425. }
  1426. flush = true;
  1427. }
  1428. kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
  1429. if (flush)
  1430. kvm_mmu_flush_tlb(vcpu);
  1431. }
  1432. struct mmu_page_path {
  1433. struct kvm_mmu_page *parent[PT64_ROOT_LEVEL-1];
  1434. unsigned int idx[PT64_ROOT_LEVEL-1];
  1435. };
  1436. #define for_each_sp(pvec, sp, parents, i) \
  1437. for (i = mmu_pages_next(&pvec, &parents, -1), \
  1438. sp = pvec.page[i].sp; \
  1439. i < pvec.nr && ({ sp = pvec.page[i].sp; 1;}); \
  1440. i = mmu_pages_next(&pvec, &parents, i))
  1441. static int mmu_pages_next(struct kvm_mmu_pages *pvec,
  1442. struct mmu_page_path *parents,
  1443. int i)
  1444. {
  1445. int n;
  1446. for (n = i+1; n < pvec->nr; n++) {
  1447. struct kvm_mmu_page *sp = pvec->page[n].sp;
  1448. if (sp->role.level == PT_PAGE_TABLE_LEVEL) {
  1449. parents->idx[0] = pvec->page[n].idx;
  1450. return n;
  1451. }
  1452. parents->parent[sp->role.level-2] = sp;
  1453. parents->idx[sp->role.level-1] = pvec->page[n].idx;
  1454. }
  1455. return n;
  1456. }
  1457. static void mmu_pages_clear_parents(struct mmu_page_path *parents)
  1458. {
  1459. struct kvm_mmu_page *sp;
  1460. unsigned int level = 0;
  1461. do {
  1462. unsigned int idx = parents->idx[level];
  1463. sp = parents->parent[level];
  1464. if (!sp)
  1465. return;
  1466. --sp->unsync_children;
  1467. WARN_ON((int)sp->unsync_children < 0);
  1468. __clear_bit(idx, sp->unsync_child_bitmap);
  1469. level++;
  1470. } while (level < PT64_ROOT_LEVEL-1 && !sp->unsync_children);
  1471. }
  1472. static void kvm_mmu_pages_init(struct kvm_mmu_page *parent,
  1473. struct mmu_page_path *parents,
  1474. struct kvm_mmu_pages *pvec)
  1475. {
  1476. parents->parent[parent->role.level-1] = NULL;
  1477. pvec->nr = 0;
  1478. }
  1479. static void mmu_sync_children(struct kvm_vcpu *vcpu,
  1480. struct kvm_mmu_page *parent)
  1481. {
  1482. int i;
  1483. struct kvm_mmu_page *sp;
  1484. struct mmu_page_path parents;
  1485. struct kvm_mmu_pages pages;
  1486. LIST_HEAD(invalid_list);
  1487. kvm_mmu_pages_init(parent, &parents, &pages);
  1488. while (mmu_unsync_walk(parent, &pages)) {
  1489. bool protected = false;
  1490. for_each_sp(pages, sp, parents, i)
  1491. protected |= rmap_write_protect(vcpu->kvm, sp->gfn);
  1492. if (protected)
  1493. kvm_flush_remote_tlbs(vcpu->kvm);
  1494. for_each_sp(pages, sp, parents, i) {
  1495. kvm_sync_page(vcpu, sp, &invalid_list);
  1496. mmu_pages_clear_parents(&parents);
  1497. }
  1498. kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
  1499. cond_resched_lock(&vcpu->kvm->mmu_lock);
  1500. kvm_mmu_pages_init(parent, &parents, &pages);
  1501. }
  1502. }
  1503. static void init_shadow_page_table(struct kvm_mmu_page *sp)
  1504. {
  1505. int i;
  1506. for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
  1507. sp->spt[i] = 0ull;
  1508. }
  1509. static void __clear_sp_write_flooding_count(struct kvm_mmu_page *sp)
  1510. {
  1511. sp->write_flooding_count = 0;
  1512. }
  1513. static void clear_sp_write_flooding_count(u64 *spte)
  1514. {
  1515. struct kvm_mmu_page *sp = page_header(__pa(spte));
  1516. __clear_sp_write_flooding_count(sp);
  1517. }
  1518. static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
  1519. gfn_t gfn,
  1520. gva_t gaddr,
  1521. unsigned level,
  1522. int direct,
  1523. unsigned access,
  1524. u64 *parent_pte)
  1525. {
  1526. union kvm_mmu_page_role role;
  1527. unsigned quadrant;
  1528. struct kvm_mmu_page *sp;
  1529. bool need_sync = false;
  1530. role = vcpu->arch.mmu.base_role;
  1531. role.level = level;
  1532. role.direct = direct;
  1533. if (role.direct)
  1534. role.cr4_pae = 0;
  1535. role.access = access;
  1536. if (!vcpu->arch.mmu.direct_map
  1537. && vcpu->arch.mmu.root_level <= PT32_ROOT_LEVEL) {
  1538. quadrant = gaddr >> (PAGE_SHIFT + (PT64_PT_BITS * level));
  1539. quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1;
  1540. role.quadrant = quadrant;
  1541. }
  1542. for_each_gfn_sp(vcpu->kvm, sp, gfn) {
  1543. if (!need_sync && sp->unsync)
  1544. need_sync = true;
  1545. if (sp->role.word != role.word)
  1546. continue;
  1547. if (sp->unsync && kvm_sync_page_transient(vcpu, sp))
  1548. break;
  1549. mmu_page_add_parent_pte(vcpu, sp, parent_pte);
  1550. if (sp->unsync_children) {
  1551. kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
  1552. kvm_mmu_mark_parents_unsync(sp);
  1553. } else if (sp->unsync)
  1554. kvm_mmu_mark_parents_unsync(sp);
  1555. __clear_sp_write_flooding_count(sp);
  1556. trace_kvm_mmu_get_page(sp, false);
  1557. return sp;
  1558. }
  1559. ++vcpu->kvm->stat.mmu_cache_miss;
  1560. sp = kvm_mmu_alloc_page(vcpu, parent_pte, direct);
  1561. if (!sp)
  1562. return sp;
  1563. sp->gfn = gfn;
  1564. sp->role = role;
  1565. hlist_add_head(&sp->hash_link,
  1566. &vcpu->kvm->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)]);
  1567. if (!direct) {
  1568. if (rmap_write_protect(vcpu->kvm, gfn))
  1569. kvm_flush_remote_tlbs(vcpu->kvm);
  1570. if (level > PT_PAGE_TABLE_LEVEL && need_sync)
  1571. kvm_sync_pages(vcpu, gfn);
  1572. account_shadowed(vcpu->kvm, gfn);
  1573. }
  1574. init_shadow_page_table(sp);
  1575. trace_kvm_mmu_get_page(sp, true);
  1576. return sp;
  1577. }
  1578. static void shadow_walk_init(struct kvm_shadow_walk_iterator *iterator,
  1579. struct kvm_vcpu *vcpu, u64 addr)
  1580. {
  1581. iterator->addr = addr;
  1582. iterator->shadow_addr = vcpu->arch.mmu.root_hpa;
  1583. iterator->level = vcpu->arch.mmu.shadow_root_level;
  1584. if (iterator->level == PT64_ROOT_LEVEL &&
  1585. vcpu->arch.mmu.root_level < PT64_ROOT_LEVEL &&
  1586. !vcpu->arch.mmu.direct_map)
  1587. --iterator->level;
  1588. if (iterator->level == PT32E_ROOT_LEVEL) {
  1589. iterator->shadow_addr
  1590. = vcpu->arch.mmu.pae_root[(addr >> 30) & 3];
  1591. iterator->shadow_addr &= PT64_BASE_ADDR_MASK;
  1592. --iterator->level;
  1593. if (!iterator->shadow_addr)
  1594. iterator->level = 0;
  1595. }
  1596. }
  1597. static bool shadow_walk_okay(struct kvm_shadow_walk_iterator *iterator)
  1598. {
  1599. if (iterator->level < PT_PAGE_TABLE_LEVEL)
  1600. return false;
  1601. iterator->index = SHADOW_PT_INDEX(iterator->addr, iterator->level);
  1602. iterator->sptep = ((u64 *)__va(iterator->shadow_addr)) + iterator->index;
  1603. return true;
  1604. }
  1605. static void __shadow_walk_next(struct kvm_shadow_walk_iterator *iterator,
  1606. u64 spte)
  1607. {
  1608. if (is_last_spte(spte, iterator->level)) {
  1609. iterator->level = 0;
  1610. return;
  1611. }
  1612. iterator->shadow_addr = spte & PT64_BASE_ADDR_MASK;
  1613. --iterator->level;
  1614. }
  1615. static void shadow_walk_next(struct kvm_shadow_walk_iterator *iterator)
  1616. {
  1617. return __shadow_walk_next(iterator, *iterator->sptep);
  1618. }
  1619. static void link_shadow_page(u64 *sptep, struct kvm_mmu_page *sp)
  1620. {
  1621. u64 spte;
  1622. spte = __pa(sp->spt) | PT_PRESENT_MASK | PT_WRITABLE_MASK |
  1623. shadow_user_mask | shadow_x_mask | shadow_accessed_mask;
  1624. mmu_spte_set(sptep, spte);
  1625. }
  1626. static void validate_direct_spte(struct kvm_vcpu *vcpu, u64 *sptep,
  1627. unsigned direct_access)
  1628. {
  1629. if (is_shadow_present_pte(*sptep) && !is_large_pte(*sptep)) {
  1630. struct kvm_mmu_page *child;
  1631. /*
  1632. * For the direct sp, if the guest pte's dirty bit
  1633. * changed form clean to dirty, it will corrupt the
  1634. * sp's access: allow writable in the read-only sp,
  1635. * so we should update the spte at this point to get
  1636. * a new sp with the correct access.
  1637. */
  1638. child = page_header(*sptep & PT64_BASE_ADDR_MASK);
  1639. if (child->role.access == direct_access)
  1640. return;
  1641. drop_parent_pte(child, sptep);
  1642. kvm_flush_remote_tlbs(vcpu->kvm);
  1643. }
  1644. }
  1645. static bool mmu_page_zap_pte(struct kvm *kvm, struct kvm_mmu_page *sp,
  1646. u64 *spte)
  1647. {
  1648. u64 pte;
  1649. struct kvm_mmu_page *child;
  1650. pte = *spte;
  1651. if (is_shadow_present_pte(pte)) {
  1652. if (is_last_spte(pte, sp->role.level)) {
  1653. drop_spte(kvm, spte);
  1654. if (is_large_pte(pte))
  1655. --kvm->stat.lpages;
  1656. } else {
  1657. child = page_header(pte & PT64_BASE_ADDR_MASK);
  1658. drop_parent_pte(child, spte);
  1659. }
  1660. return true;
  1661. }
  1662. if (is_mmio_spte(pte))
  1663. mmu_spte_clear_no_track(spte);
  1664. return false;
  1665. }
  1666. static void kvm_mmu_page_unlink_children(struct kvm *kvm,
  1667. struct kvm_mmu_page *sp)
  1668. {
  1669. unsigned i;
  1670. for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
  1671. mmu_page_zap_pte(kvm, sp, sp->spt + i);
  1672. }
  1673. static void kvm_mmu_put_page(struct kvm_mmu_page *sp, u64 *parent_pte)
  1674. {
  1675. mmu_page_remove_parent_pte(sp, parent_pte);
  1676. }
  1677. static void kvm_mmu_unlink_parents(struct kvm *kvm, struct kvm_mmu_page *sp)
  1678. {
  1679. u64 *sptep;
  1680. struct rmap_iterator iter;
  1681. while ((sptep = rmap_get_first(sp->parent_ptes, &iter)))
  1682. drop_parent_pte(sp, sptep);
  1683. }
  1684. static int mmu_zap_unsync_children(struct kvm *kvm,
  1685. struct kvm_mmu_page *parent,
  1686. struct list_head *invalid_list)
  1687. {
  1688. int i, zapped = 0;
  1689. struct mmu_page_path parents;
  1690. struct kvm_mmu_pages pages;
  1691. if (parent->role.level == PT_PAGE_TABLE_LEVEL)
  1692. return 0;
  1693. kvm_mmu_pages_init(parent, &parents, &pages);
  1694. while (mmu_unsync_walk(parent, &pages)) {
  1695. struct kvm_mmu_page *sp;
  1696. for_each_sp(pages, sp, parents, i) {
  1697. kvm_mmu_prepare_zap_page(kvm, sp, invalid_list);
  1698. mmu_pages_clear_parents(&parents);
  1699. zapped++;
  1700. }
  1701. kvm_mmu_pages_init(parent, &parents, &pages);
  1702. }
  1703. return zapped;
  1704. }
  1705. static int kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
  1706. struct list_head *invalid_list)
  1707. {
  1708. int ret;
  1709. trace_kvm_mmu_prepare_zap_page(sp);
  1710. ++kvm->stat.mmu_shadow_zapped;
  1711. ret = mmu_zap_unsync_children(kvm, sp, invalid_list);
  1712. kvm_mmu_page_unlink_children(kvm, sp);
  1713. kvm_mmu_unlink_parents(kvm, sp);
  1714. if (!sp->role.invalid && !sp->role.direct)
  1715. unaccount_shadowed(kvm, sp->gfn);
  1716. if (sp->unsync)
  1717. kvm_unlink_unsync_page(kvm, sp);
  1718. if (!sp->root_count) {
  1719. /* Count self */
  1720. ret++;
  1721. list_move(&sp->link, invalid_list);
  1722. kvm_mod_used_mmu_pages(kvm, -1);
  1723. } else {
  1724. list_move(&sp->link, &kvm->arch.active_mmu_pages);
  1725. kvm_reload_remote_mmus(kvm);
  1726. }
  1727. sp->role.invalid = 1;
  1728. return ret;
  1729. }
  1730. static void kvm_mmu_commit_zap_page(struct kvm *kvm,
  1731. struct list_head *invalid_list)
  1732. {
  1733. struct kvm_mmu_page *sp, *nsp;
  1734. if (list_empty(invalid_list))
  1735. return;
  1736. /*
  1737. * wmb: make sure everyone sees our modifications to the page tables
  1738. * rmb: make sure we see changes to vcpu->mode
  1739. */
  1740. smp_mb();
  1741. /*
  1742. * Wait for all vcpus to exit guest mode and/or lockless shadow
  1743. * page table walks.
  1744. */
  1745. kvm_flush_remote_tlbs(kvm);
  1746. list_for_each_entry_safe(sp, nsp, invalid_list, link) {
  1747. WARN_ON(!sp->role.invalid || sp->root_count);
  1748. kvm_mmu_free_page(sp);
  1749. }
  1750. }
  1751. static bool prepare_zap_oldest_mmu_page(struct kvm *kvm,
  1752. struct list_head *invalid_list)
  1753. {
  1754. struct kvm_mmu_page *sp;
  1755. if (list_empty(&kvm->arch.active_mmu_pages))
  1756. return false;
  1757. sp = list_entry(kvm->arch.active_mmu_pages.prev,
  1758. struct kvm_mmu_page, link);
  1759. kvm_mmu_prepare_zap_page(kvm, sp, invalid_list);
  1760. return true;
  1761. }
  1762. /*
  1763. * Changing the number of mmu pages allocated to the vm
  1764. * Note: if goal_nr_mmu_pages is too small, you will get dead lock
  1765. */
  1766. void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int goal_nr_mmu_pages)
  1767. {
  1768. LIST_HEAD(invalid_list);
  1769. spin_lock(&kvm->mmu_lock);
  1770. if (kvm->arch.n_used_mmu_pages > goal_nr_mmu_pages) {
  1771. /* Need to free some mmu pages to achieve the goal. */
  1772. while (kvm->arch.n_used_mmu_pages > goal_nr_mmu_pages)
  1773. if (!prepare_zap_oldest_mmu_page(kvm, &invalid_list))
  1774. break;
  1775. kvm_mmu_commit_zap_page(kvm, &invalid_list);
  1776. goal_nr_mmu_pages = kvm->arch.n_used_mmu_pages;
  1777. }
  1778. kvm->arch.n_max_mmu_pages = goal_nr_mmu_pages;
  1779. spin_unlock(&kvm->mmu_lock);
  1780. }
  1781. int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
  1782. {
  1783. struct kvm_mmu_page *sp;
  1784. LIST_HEAD(invalid_list);
  1785. int r;
  1786. pgprintk("%s: looking for gfn %llx\n", __func__, gfn);
  1787. r = 0;
  1788. spin_lock(&kvm->mmu_lock);
  1789. for_each_gfn_indirect_valid_sp(kvm, sp, gfn) {
  1790. pgprintk("%s: gfn %llx role %x\n", __func__, gfn,
  1791. sp->role.word);
  1792. r = 1;
  1793. kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
  1794. }
  1795. kvm_mmu_commit_zap_page(kvm, &invalid_list);
  1796. spin_unlock(&kvm->mmu_lock);
  1797. return r;
  1798. }
  1799. EXPORT_SYMBOL_GPL(kvm_mmu_unprotect_page);
  1800. /*
  1801. * The function is based on mtrr_type_lookup() in
  1802. * arch/x86/kernel/cpu/mtrr/generic.c
  1803. */
  1804. static int get_mtrr_type(struct mtrr_state_type *mtrr_state,
  1805. u64 start, u64 end)
  1806. {
  1807. int i;
  1808. u64 base, mask;
  1809. u8 prev_match, curr_match;
  1810. int num_var_ranges = KVM_NR_VAR_MTRR;
  1811. if (!mtrr_state->enabled)
  1812. return 0xFF;
  1813. /* Make end inclusive end, instead of exclusive */
  1814. end--;
  1815. /* Look in fixed ranges. Just return the type as per start */
  1816. if (mtrr_state->have_fixed && (start < 0x100000)) {
  1817. int idx;
  1818. if (start < 0x80000) {
  1819. idx = 0;
  1820. idx += (start >> 16);
  1821. return mtrr_state->fixed_ranges[idx];
  1822. } else if (start < 0xC0000) {
  1823. idx = 1 * 8;
  1824. idx += ((start - 0x80000) >> 14);
  1825. return mtrr_state->fixed_ranges[idx];
  1826. } else if (start < 0x1000000) {
  1827. idx = 3 * 8;
  1828. idx += ((start - 0xC0000) >> 12);
  1829. return mtrr_state->fixed_ranges[idx];
  1830. }
  1831. }
  1832. /*
  1833. * Look in variable ranges
  1834. * Look of multiple ranges matching this address and pick type
  1835. * as per MTRR precedence
  1836. */
  1837. if (!(mtrr_state->enabled & 2))
  1838. return mtrr_state->def_type;
  1839. prev_match = 0xFF;
  1840. for (i = 0; i < num_var_ranges; ++i) {
  1841. unsigned short start_state, end_state;
  1842. if (!(mtrr_state->var_ranges[i].mask_lo & (1 << 11)))
  1843. continue;
  1844. base = (((u64)mtrr_state->var_ranges[i].base_hi) << 32) +
  1845. (mtrr_state->var_ranges[i].base_lo & PAGE_MASK);
  1846. mask = (((u64)mtrr_state->var_ranges[i].mask_hi) << 32) +
  1847. (mtrr_state->var_ranges[i].mask_lo & PAGE_MASK);
  1848. start_state = ((start & mask) == (base & mask));
  1849. end_state = ((end & mask) == (base & mask));
  1850. if (start_state != end_state)
  1851. return 0xFE;
  1852. if ((start & mask) != (base & mask))
  1853. continue;
  1854. curr_match = mtrr_state->var_ranges[i].base_lo & 0xff;
  1855. if (prev_match == 0xFF) {
  1856. prev_match = curr_match;
  1857. continue;
  1858. }
  1859. if (prev_match == MTRR_TYPE_UNCACHABLE ||
  1860. curr_match == MTRR_TYPE_UNCACHABLE)
  1861. return MTRR_TYPE_UNCACHABLE;
  1862. if ((prev_match == MTRR_TYPE_WRBACK &&
  1863. curr_match == MTRR_TYPE_WRTHROUGH) ||
  1864. (prev_match == MTRR_TYPE_WRTHROUGH &&
  1865. curr_match == MTRR_TYPE_WRBACK)) {
  1866. prev_match = MTRR_TYPE_WRTHROUGH;
  1867. curr_match = MTRR_TYPE_WRTHROUGH;
  1868. }
  1869. if (prev_match != curr_match)
  1870. return MTRR_TYPE_UNCACHABLE;
  1871. }
  1872. if (prev_match != 0xFF)
  1873. return prev_match;
  1874. return mtrr_state->def_type;
  1875. }
  1876. u8 kvm_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn)
  1877. {
  1878. u8 mtrr;
  1879. mtrr = get_mtrr_type(&vcpu->arch.mtrr_state, gfn << PAGE_SHIFT,
  1880. (gfn << PAGE_SHIFT) + PAGE_SIZE);
  1881. if (mtrr == 0xfe || mtrr == 0xff)
  1882. mtrr = MTRR_TYPE_WRBACK;
  1883. return mtrr;
  1884. }
  1885. EXPORT_SYMBOL_GPL(kvm_get_guest_memory_type);
  1886. static void __kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
  1887. {
  1888. trace_kvm_mmu_unsync_page(sp);
  1889. ++vcpu->kvm->stat.mmu_unsync;
  1890. sp->unsync = 1;
  1891. kvm_mmu_mark_parents_unsync(sp);
  1892. }
  1893. static void kvm_unsync_pages(struct kvm_vcpu *vcpu, gfn_t gfn)
  1894. {
  1895. struct kvm_mmu_page *s;
  1896. for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn) {
  1897. if (s->unsync)
  1898. continue;
  1899. WARN_ON(s->role.level != PT_PAGE_TABLE_LEVEL);
  1900. __kvm_unsync_page(vcpu, s);
  1901. }
  1902. }
  1903. static int mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn,
  1904. bool can_unsync)
  1905. {
  1906. struct kvm_mmu_page *s;
  1907. bool need_unsync = false;
  1908. for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn) {
  1909. if (!can_unsync)
  1910. return 1;
  1911. if (s->role.level != PT_PAGE_TABLE_LEVEL)
  1912. return 1;
  1913. if (!s->unsync)
  1914. need_unsync = true;
  1915. }
  1916. if (need_unsync)
  1917. kvm_unsync_pages(vcpu, gfn);
  1918. return 0;
  1919. }
  1920. static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
  1921. unsigned pte_access, int level,
  1922. gfn_t gfn, pfn_t pfn, bool speculative,
  1923. bool can_unsync, bool host_writable)
  1924. {
  1925. u64 spte;
  1926. int ret = 0;
  1927. if (set_mmio_spte(sptep, gfn, pfn, pte_access))
  1928. return 0;
  1929. spte = PT_PRESENT_MASK;
  1930. if (!speculative)
  1931. spte |= shadow_accessed_mask;
  1932. if (pte_access & ACC_EXEC_MASK)
  1933. spte |= shadow_x_mask;
  1934. else
  1935. spte |= shadow_nx_mask;
  1936. if (pte_access & ACC_USER_MASK)
  1937. spte |= shadow_user_mask;
  1938. if (level > PT_PAGE_TABLE_LEVEL)
  1939. spte |= PT_PAGE_SIZE_MASK;
  1940. if (tdp_enabled)
  1941. spte |= kvm_x86_ops->get_mt_mask(vcpu, gfn,
  1942. kvm_is_mmio_pfn(pfn));
  1943. if (host_writable)
  1944. spte |= SPTE_HOST_WRITEABLE;
  1945. else
  1946. pte_access &= ~ACC_WRITE_MASK;
  1947. spte |= (u64)pfn << PAGE_SHIFT;
  1948. if (pte_access & ACC_WRITE_MASK) {
  1949. /*
  1950. * Other vcpu creates new sp in the window between
  1951. * mapping_level() and acquiring mmu-lock. We can
  1952. * allow guest to retry the access, the mapping can
  1953. * be fixed if guest refault.
  1954. */
  1955. if (level > PT_PAGE_TABLE_LEVEL &&
  1956. has_wrprotected_page(vcpu->kvm, gfn, level))
  1957. goto done;
  1958. spte |= PT_WRITABLE_MASK | SPTE_MMU_WRITEABLE;
  1959. /*
  1960. * Optimization: for pte sync, if spte was writable the hash
  1961. * lookup is unnecessary (and expensive). Write protection
  1962. * is responsibility of mmu_get_page / kvm_sync_page.
  1963. * Same reasoning can be applied to dirty page accounting.
  1964. */
  1965. if (!can_unsync && is_writable_pte(*sptep))
  1966. goto set_pte;
  1967. if (mmu_need_write_protect(vcpu, gfn, can_unsync)) {
  1968. pgprintk("%s: found shadow page for %llx, marking ro\n",
  1969. __func__, gfn);
  1970. ret = 1;
  1971. pte_access &= ~ACC_WRITE_MASK;
  1972. spte &= ~(PT_WRITABLE_MASK | SPTE_MMU_WRITEABLE);
  1973. }
  1974. }
  1975. if (pte_access & ACC_WRITE_MASK)
  1976. mark_page_dirty(vcpu->kvm, gfn);
  1977. set_pte:
  1978. if (mmu_spte_update(sptep, spte))
  1979. kvm_flush_remote_tlbs(vcpu->kvm);
  1980. done:
  1981. return ret;
  1982. }
  1983. static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
  1984. unsigned pte_access, int write_fault, int *emulate,
  1985. int level, gfn_t gfn, pfn_t pfn, bool speculative,
  1986. bool host_writable)
  1987. {
  1988. int was_rmapped = 0;
  1989. int rmap_count;
  1990. pgprintk("%s: spte %llx write_fault %d gfn %llx\n", __func__,
  1991. *sptep, write_fault, gfn);
  1992. if (is_rmap_spte(*sptep)) {
  1993. /*
  1994. * If we overwrite a PTE page pointer with a 2MB PMD, unlink
  1995. * the parent of the now unreachable PTE.
  1996. */
  1997. if (level > PT_PAGE_TABLE_LEVEL &&
  1998. !is_large_pte(*sptep)) {
  1999. struct kvm_mmu_page *child;
  2000. u64 pte = *sptep;
  2001. child = page_header(pte & PT64_BASE_ADDR_MASK);
  2002. drop_parent_pte(child, sptep);
  2003. kvm_flush_remote_tlbs(vcpu->kvm);
  2004. } else if (pfn != spte_to_pfn(*sptep)) {
  2005. pgprintk("hfn old %llx new %llx\n",
  2006. spte_to_pfn(*sptep), pfn);
  2007. drop_spte(vcpu->kvm, sptep);
  2008. kvm_flush_remote_tlbs(vcpu->kvm);
  2009. } else
  2010. was_rmapped = 1;
  2011. }
  2012. if (set_spte(vcpu, sptep, pte_access, level, gfn, pfn, speculative,
  2013. true, host_writable)) {
  2014. if (write_fault)
  2015. *emulate = 1;
  2016. kvm_mmu_flush_tlb(vcpu);
  2017. }
  2018. if (unlikely(is_mmio_spte(*sptep) && emulate))
  2019. *emulate = 1;
  2020. pgprintk("%s: setting spte %llx\n", __func__, *sptep);
  2021. pgprintk("instantiating %s PTE (%s) at %llx (%llx) addr %p\n",
  2022. is_large_pte(*sptep)? "2MB" : "4kB",
  2023. *sptep & PT_PRESENT_MASK ?"RW":"R", gfn,
  2024. *sptep, sptep);
  2025. if (!was_rmapped && is_large_pte(*sptep))
  2026. ++vcpu->kvm->stat.lpages;
  2027. if (is_shadow_present_pte(*sptep)) {
  2028. if (!was_rmapped) {
  2029. rmap_count = rmap_add(vcpu, sptep, gfn);
  2030. if (rmap_count > RMAP_RECYCLE_THRESHOLD)
  2031. rmap_recycle(vcpu, sptep, gfn);
  2032. }
  2033. }
  2034. kvm_release_pfn_clean(pfn);
  2035. }
  2036. static void nonpaging_new_cr3(struct kvm_vcpu *vcpu)
  2037. {
  2038. mmu_free_roots(vcpu);
  2039. }
  2040. static bool is_rsvd_bits_set(struct kvm_mmu *mmu, u64 gpte, int level)
  2041. {
  2042. int bit7;
  2043. bit7 = (gpte >> 7) & 1;
  2044. return (gpte & mmu->rsvd_bits_mask[bit7][level-1]) != 0;
  2045. }
  2046. static pfn_t pte_prefetch_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn,
  2047. bool no_dirty_log)
  2048. {
  2049. struct kvm_memory_slot *slot;
  2050. slot = gfn_to_memslot_dirty_bitmap(vcpu, gfn, no_dirty_log);
  2051. if (!slot)
  2052. return KVM_PFN_ERR_FAULT;
  2053. return gfn_to_pfn_memslot_atomic(slot, gfn);
  2054. }
  2055. static bool prefetch_invalid_gpte(struct kvm_vcpu *vcpu,
  2056. struct kvm_mmu_page *sp, u64 *spte,
  2057. u64 gpte)
  2058. {
  2059. if (is_rsvd_bits_set(&vcpu->arch.mmu, gpte, PT_PAGE_TABLE_LEVEL))
  2060. goto no_present;
  2061. if (!is_present_gpte(gpte))
  2062. goto no_present;
  2063. if (!(gpte & PT_ACCESSED_MASK))
  2064. goto no_present;
  2065. return false;
  2066. no_present:
  2067. drop_spte(vcpu->kvm, spte);
  2068. return true;
  2069. }
  2070. static int direct_pte_prefetch_many(struct kvm_vcpu *vcpu,
  2071. struct kvm_mmu_page *sp,
  2072. u64 *start, u64 *end)
  2073. {
  2074. struct page *pages[PTE_PREFETCH_NUM];
  2075. unsigned access = sp->role.access;
  2076. int i, ret;
  2077. gfn_t gfn;
  2078. gfn = kvm_mmu_page_get_gfn(sp, start - sp->spt);
  2079. if (!gfn_to_memslot_dirty_bitmap(vcpu, gfn, access & ACC_WRITE_MASK))
  2080. return -1;
  2081. ret = gfn_to_page_many_atomic(vcpu->kvm, gfn, pages, end - start);
  2082. if (ret <= 0)
  2083. return -1;
  2084. for (i = 0; i < ret; i++, gfn++, start++)
  2085. mmu_set_spte(vcpu, start, access, 0, NULL,
  2086. sp->role.level, gfn, page_to_pfn(pages[i]),
  2087. true, true);
  2088. return 0;
  2089. }
  2090. static void __direct_pte_prefetch(struct kvm_vcpu *vcpu,
  2091. struct kvm_mmu_page *sp, u64 *sptep)
  2092. {
  2093. u64 *spte, *start = NULL;
  2094. int i;
  2095. WARN_ON(!sp->role.direct);
  2096. i = (sptep - sp->spt) & ~(PTE_PREFETCH_NUM - 1);
  2097. spte = sp->spt + i;
  2098. for (i = 0; i < PTE_PREFETCH_NUM; i++, spte++) {
  2099. if (is_shadow_present_pte(*spte) || spte == sptep) {
  2100. if (!start)
  2101. continue;
  2102. if (direct_pte_prefetch_many(vcpu, sp, start, spte) < 0)
  2103. break;
  2104. start = NULL;
  2105. } else if (!start)
  2106. start = spte;
  2107. }
  2108. }
  2109. static void direct_pte_prefetch(struct kvm_vcpu *vcpu, u64 *sptep)
  2110. {
  2111. struct kvm_mmu_page *sp;
  2112. /*
  2113. * Since it's no accessed bit on EPT, it's no way to
  2114. * distinguish between actually accessed translations
  2115. * and prefetched, so disable pte prefetch if EPT is
  2116. * enabled.
  2117. */
  2118. if (!shadow_accessed_mask)
  2119. return;
  2120. sp = page_header(__pa(sptep));
  2121. if (sp->role.level > PT_PAGE_TABLE_LEVEL)
  2122. return;
  2123. __direct_pte_prefetch(vcpu, sp, sptep);
  2124. }
  2125. static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write,
  2126. int map_writable, int level, gfn_t gfn, pfn_t pfn,
  2127. bool prefault)
  2128. {
  2129. struct kvm_shadow_walk_iterator iterator;
  2130. struct kvm_mmu_page *sp;
  2131. int emulate = 0;
  2132. gfn_t pseudo_gfn;
  2133. for_each_shadow_entry(vcpu, (u64)gfn << PAGE_SHIFT, iterator) {
  2134. if (iterator.level == level) {
  2135. mmu_set_spte(vcpu, iterator.sptep, ACC_ALL,
  2136. write, &emulate, level, gfn, pfn,
  2137. prefault, map_writable);
  2138. direct_pte_prefetch(vcpu, iterator.sptep);
  2139. ++vcpu->stat.pf_fixed;
  2140. break;
  2141. }
  2142. if (!is_shadow_present_pte(*iterator.sptep)) {
  2143. u64 base_addr = iterator.addr;
  2144. base_addr &= PT64_LVL_ADDR_MASK(iterator.level);
  2145. pseudo_gfn = base_addr >> PAGE_SHIFT;
  2146. sp = kvm_mmu_get_page(vcpu, pseudo_gfn, iterator.addr,
  2147. iterator.level - 1,
  2148. 1, ACC_ALL, iterator.sptep);
  2149. link_shadow_page(iterator.sptep, sp);
  2150. }
  2151. }
  2152. return emulate;
  2153. }
  2154. static void kvm_send_hwpoison_signal(unsigned long address, struct task_struct *tsk)
  2155. {
  2156. siginfo_t info;
  2157. info.si_signo = SIGBUS;
  2158. info.si_errno = 0;
  2159. info.si_code = BUS_MCEERR_AR;
  2160. info.si_addr = (void __user *)address;
  2161. info.si_addr_lsb = PAGE_SHIFT;
  2162. send_sig_info(SIGBUS, &info, tsk);
  2163. }
  2164. static int kvm_handle_bad_page(struct kvm_vcpu *vcpu, gfn_t gfn, pfn_t pfn)
  2165. {
  2166. /*
  2167. * Do not cache the mmio info caused by writing the readonly gfn
  2168. * into the spte otherwise read access on readonly gfn also can
  2169. * caused mmio page fault and treat it as mmio access.
  2170. * Return 1 to tell kvm to emulate it.
  2171. */
  2172. if (pfn == KVM_PFN_ERR_RO_FAULT)
  2173. return 1;
  2174. if (pfn == KVM_PFN_ERR_HWPOISON) {
  2175. kvm_send_hwpoison_signal(gfn_to_hva(vcpu->kvm, gfn), current);
  2176. return 0;
  2177. }
  2178. return -EFAULT;
  2179. }
  2180. static void transparent_hugepage_adjust(struct kvm_vcpu *vcpu,
  2181. gfn_t *gfnp, pfn_t *pfnp, int *levelp)
  2182. {
  2183. pfn_t pfn = *pfnp;
  2184. gfn_t gfn = *gfnp;
  2185. int level = *levelp;
  2186. /*
  2187. * Check if it's a transparent hugepage. If this would be an
  2188. * hugetlbfs page, level wouldn't be set to
  2189. * PT_PAGE_TABLE_LEVEL and there would be no adjustment done
  2190. * here.
  2191. */
  2192. if (!is_error_noslot_pfn(pfn) && !kvm_is_mmio_pfn(pfn) &&
  2193. level == PT_PAGE_TABLE_LEVEL &&
  2194. PageTransCompound(pfn_to_page(pfn)) &&
  2195. !has_wrprotected_page(vcpu->kvm, gfn, PT_DIRECTORY_LEVEL)) {
  2196. unsigned long mask;
  2197. /*
  2198. * mmu_notifier_retry was successful and we hold the
  2199. * mmu_lock here, so the pmd can't become splitting
  2200. * from under us, and in turn
  2201. * __split_huge_page_refcount() can't run from under
  2202. * us and we can safely transfer the refcount from
  2203. * PG_tail to PG_head as we switch the pfn to tail to
  2204. * head.
  2205. */
  2206. *levelp = level = PT_DIRECTORY_LEVEL;
  2207. mask = KVM_PAGES_PER_HPAGE(level) - 1;
  2208. VM_BUG_ON((gfn & mask) != (pfn & mask));
  2209. if (pfn & mask) {
  2210. gfn &= ~mask;
  2211. *gfnp = gfn;
  2212. kvm_release_pfn_clean(pfn);
  2213. pfn &= ~mask;
  2214. kvm_get_pfn(pfn);
  2215. *pfnp = pfn;
  2216. }
  2217. }
  2218. }
  2219. static bool handle_abnormal_pfn(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn,
  2220. pfn_t pfn, unsigned access, int *ret_val)
  2221. {
  2222. bool ret = true;
  2223. /* The pfn is invalid, report the error! */
  2224. if (unlikely(is_error_pfn(pfn))) {
  2225. *ret_val = kvm_handle_bad_page(vcpu, gfn, pfn);
  2226. goto exit;
  2227. }
  2228. if (unlikely(is_noslot_pfn(pfn)))
  2229. vcpu_cache_mmio_info(vcpu, gva, gfn, access);
  2230. ret = false;
  2231. exit:
  2232. return ret;
  2233. }
  2234. static bool page_fault_can_be_fast(struct kvm_vcpu *vcpu, u32 error_code)
  2235. {
  2236. /*
  2237. * #PF can be fast only if the shadow page table is present and it
  2238. * is caused by write-protect, that means we just need change the
  2239. * W bit of the spte which can be done out of mmu-lock.
  2240. */
  2241. if (!(error_code & PFERR_PRESENT_MASK) ||
  2242. !(error_code & PFERR_WRITE_MASK))
  2243. return false;
  2244. return true;
  2245. }
  2246. static bool
  2247. fast_pf_fix_direct_spte(struct kvm_vcpu *vcpu, u64 *sptep, u64 spte)
  2248. {
  2249. struct kvm_mmu_page *sp = page_header(__pa(sptep));
  2250. gfn_t gfn;
  2251. WARN_ON(!sp->role.direct);
  2252. /*
  2253. * The gfn of direct spte is stable since it is calculated
  2254. * by sp->gfn.
  2255. */
  2256. gfn = kvm_mmu_page_get_gfn(sp, sptep - sp->spt);
  2257. if (cmpxchg64(sptep, spte, spte | PT_WRITABLE_MASK) == spte)
  2258. mark_page_dirty(vcpu->kvm, gfn);
  2259. return true;
  2260. }
  2261. /*
  2262. * Return value:
  2263. * - true: let the vcpu to access on the same address again.
  2264. * - false: let the real page fault path to fix it.
  2265. */
  2266. static bool fast_page_fault(struct kvm_vcpu *vcpu, gva_t gva, int level,
  2267. u32 error_code)
  2268. {
  2269. struct kvm_shadow_walk_iterator iterator;
  2270. bool ret = false;
  2271. u64 spte = 0ull;
  2272. if (!page_fault_can_be_fast(vcpu, error_code))
  2273. return false;
  2274. walk_shadow_page_lockless_begin(vcpu);
  2275. for_each_shadow_entry_lockless(vcpu, gva, iterator, spte)
  2276. if (!is_shadow_present_pte(spte) || iterator.level < level)
  2277. break;
  2278. /*
  2279. * If the mapping has been changed, let the vcpu fault on the
  2280. * same address again.
  2281. */
  2282. if (!is_rmap_spte(spte)) {
  2283. ret = true;
  2284. goto exit;
  2285. }
  2286. if (!is_last_spte(spte, level))
  2287. goto exit;
  2288. /*
  2289. * Check if it is a spurious fault caused by TLB lazily flushed.
  2290. *
  2291. * Need not check the access of upper level table entries since
  2292. * they are always ACC_ALL.
  2293. */
  2294. if (is_writable_pte(spte)) {
  2295. ret = true;
  2296. goto exit;
  2297. }
  2298. /*
  2299. * Currently, to simplify the code, only the spte write-protected
  2300. * by dirty-log can be fast fixed.
  2301. */
  2302. if (!spte_is_locklessly_modifiable(spte))
  2303. goto exit;
  2304. /*
  2305. * Currently, fast page fault only works for direct mapping since
  2306. * the gfn is not stable for indirect shadow page.
  2307. * See Documentation/virtual/kvm/locking.txt to get more detail.
  2308. */
  2309. ret = fast_pf_fix_direct_spte(vcpu, iterator.sptep, spte);
  2310. exit:
  2311. trace_fast_page_fault(vcpu, gva, error_code, iterator.sptep,
  2312. spte, ret);
  2313. walk_shadow_page_lockless_end(vcpu);
  2314. return ret;
  2315. }
  2316. static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn,
  2317. gva_t gva, pfn_t *pfn, bool write, bool *writable);
  2318. static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, u32 error_code,
  2319. gfn_t gfn, bool prefault)
  2320. {
  2321. int r;
  2322. int level;
  2323. int force_pt_level;
  2324. pfn_t pfn;
  2325. unsigned long mmu_seq;
  2326. bool map_writable, write = error_code & PFERR_WRITE_MASK;
  2327. force_pt_level = mapping_level_dirty_bitmap(vcpu, gfn);
  2328. if (likely(!force_pt_level)) {
  2329. level = mapping_level(vcpu, gfn);
  2330. /*
  2331. * This path builds a PAE pagetable - so we can map
  2332. * 2mb pages at maximum. Therefore check if the level
  2333. * is larger than that.
  2334. */
  2335. if (level > PT_DIRECTORY_LEVEL)
  2336. level = PT_DIRECTORY_LEVEL;
  2337. gfn &= ~(KVM_PAGES_PER_HPAGE(level) - 1);
  2338. } else
  2339. level = PT_PAGE_TABLE_LEVEL;
  2340. if (fast_page_fault(vcpu, v, level, error_code))
  2341. return 0;
  2342. mmu_seq = vcpu->kvm->mmu_notifier_seq;
  2343. smp_rmb();
  2344. if (try_async_pf(vcpu, prefault, gfn, v, &pfn, write, &map_writable))
  2345. return 0;
  2346. if (handle_abnormal_pfn(vcpu, v, gfn, pfn, ACC_ALL, &r))
  2347. return r;
  2348. spin_lock(&vcpu->kvm->mmu_lock);
  2349. if (mmu_notifier_retry(vcpu->kvm, mmu_seq))
  2350. goto out_unlock;
  2351. if (likely(!force_pt_level))
  2352. transparent_hugepage_adjust(vcpu, &gfn, &pfn, &level);
  2353. r = __direct_map(vcpu, v, write, map_writable, level, gfn, pfn,
  2354. prefault);
  2355. spin_unlock(&vcpu->kvm->mmu_lock);
  2356. return r;
  2357. out_unlock:
  2358. spin_unlock(&vcpu->kvm->mmu_lock);
  2359. kvm_release_pfn_clean(pfn);
  2360. return 0;
  2361. }
  2362. static void mmu_free_roots(struct kvm_vcpu *vcpu)
  2363. {
  2364. int i;
  2365. struct kvm_mmu_page *sp;
  2366. LIST_HEAD(invalid_list);
  2367. if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
  2368. return;
  2369. spin_lock(&vcpu->kvm->mmu_lock);
  2370. if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL &&
  2371. (vcpu->arch.mmu.root_level == PT64_ROOT_LEVEL ||
  2372. vcpu->arch.mmu.direct_map)) {
  2373. hpa_t root = vcpu->arch.mmu.root_hpa;
  2374. sp = page_header(root);
  2375. --sp->root_count;
  2376. if (!sp->root_count && sp->role.invalid) {
  2377. kvm_mmu_prepare_zap_page(vcpu->kvm, sp, &invalid_list);
  2378. kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
  2379. }
  2380. vcpu->arch.mmu.root_hpa = INVALID_PAGE;
  2381. spin_unlock(&vcpu->kvm->mmu_lock);
  2382. return;
  2383. }
  2384. for (i = 0; i < 4; ++i) {
  2385. hpa_t root = vcpu->arch.mmu.pae_root[i];
  2386. if (root) {
  2387. root &= PT64_BASE_ADDR_MASK;
  2388. sp = page_header(root);
  2389. --sp->root_count;
  2390. if (!sp->root_count && sp->role.invalid)
  2391. kvm_mmu_prepare_zap_page(vcpu->kvm, sp,
  2392. &invalid_list);
  2393. }
  2394. vcpu->arch.mmu.pae_root[i] = INVALID_PAGE;
  2395. }
  2396. kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
  2397. spin_unlock(&vcpu->kvm->mmu_lock);
  2398. vcpu->arch.mmu.root_hpa = INVALID_PAGE;
  2399. }
  2400. static int mmu_check_root(struct kvm_vcpu *vcpu, gfn_t root_gfn)
  2401. {
  2402. int ret = 0;
  2403. if (!kvm_is_visible_gfn(vcpu->kvm, root_gfn)) {
  2404. kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
  2405. ret = 1;
  2406. }
  2407. return ret;
  2408. }
  2409. static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu)
  2410. {
  2411. struct kvm_mmu_page *sp;
  2412. unsigned i;
  2413. if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) {
  2414. spin_lock(&vcpu->kvm->mmu_lock);
  2415. sp = kvm_mmu_get_page(vcpu, 0, 0, PT64_ROOT_LEVEL,
  2416. 1, ACC_ALL, NULL);
  2417. ++sp->root_count;
  2418. spin_unlock(&vcpu->kvm->mmu_lock);
  2419. vcpu->arch.mmu.root_hpa = __pa(sp->spt);
  2420. } else if (vcpu->arch.mmu.shadow_root_level == PT32E_ROOT_LEVEL) {
  2421. for (i = 0; i < 4; ++i) {
  2422. hpa_t root = vcpu->arch.mmu.pae_root[i];
  2423. ASSERT(!VALID_PAGE(root));
  2424. spin_lock(&vcpu->kvm->mmu_lock);
  2425. sp = kvm_mmu_get_page(vcpu, i << (30 - PAGE_SHIFT),
  2426. i << 30,
  2427. PT32_ROOT_LEVEL, 1, ACC_ALL,
  2428. NULL);
  2429. root = __pa(sp->spt);
  2430. ++sp->root_count;
  2431. spin_unlock(&vcpu->kvm->mmu_lock);
  2432. vcpu->arch.mmu.pae_root[i] = root | PT_PRESENT_MASK;
  2433. }
  2434. vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.pae_root);
  2435. } else
  2436. BUG();
  2437. return 0;
  2438. }
  2439. static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
  2440. {
  2441. struct kvm_mmu_page *sp;
  2442. u64 pdptr, pm_mask;
  2443. gfn_t root_gfn;
  2444. int i;
  2445. root_gfn = vcpu->arch.mmu.get_cr3(vcpu) >> PAGE_SHIFT;
  2446. if (mmu_check_root(vcpu, root_gfn))
  2447. return 1;
  2448. /*
  2449. * Do we shadow a long mode page table? If so we need to
  2450. * write-protect the guests page table root.
  2451. */
  2452. if (vcpu->arch.mmu.root_level == PT64_ROOT_LEVEL) {
  2453. hpa_t root = vcpu->arch.mmu.root_hpa;
  2454. ASSERT(!VALID_PAGE(root));
  2455. spin_lock(&vcpu->kvm->mmu_lock);
  2456. sp = kvm_mmu_get_page(vcpu, root_gfn, 0, PT64_ROOT_LEVEL,
  2457. 0, ACC_ALL, NULL);
  2458. root = __pa(sp->spt);
  2459. ++sp->root_count;
  2460. spin_unlock(&vcpu->kvm->mmu_lock);
  2461. vcpu->arch.mmu.root_hpa = root;
  2462. return 0;
  2463. }
  2464. /*
  2465. * We shadow a 32 bit page table. This may be a legacy 2-level
  2466. * or a PAE 3-level page table. In either case we need to be aware that
  2467. * the shadow page table may be a PAE or a long mode page table.
  2468. */
  2469. pm_mask = PT_PRESENT_MASK;
  2470. if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL)
  2471. pm_mask |= PT_ACCESSED_MASK | PT_WRITABLE_MASK | PT_USER_MASK;
  2472. for (i = 0; i < 4; ++i) {
  2473. hpa_t root = vcpu->arch.mmu.pae_root[i];
  2474. ASSERT(!VALID_PAGE(root));
  2475. if (vcpu->arch.mmu.root_level == PT32E_ROOT_LEVEL) {
  2476. pdptr = vcpu->arch.mmu.get_pdptr(vcpu, i);
  2477. if (!is_present_gpte(pdptr)) {
  2478. vcpu->arch.mmu.pae_root[i] = 0;
  2479. continue;
  2480. }
  2481. root_gfn = pdptr >> PAGE_SHIFT;
  2482. if (mmu_check_root(vcpu, root_gfn))
  2483. return 1;
  2484. }
  2485. spin_lock(&vcpu->kvm->mmu_lock);
  2486. sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30,
  2487. PT32_ROOT_LEVEL, 0,
  2488. ACC_ALL, NULL);
  2489. root = __pa(sp->spt);
  2490. ++sp->root_count;
  2491. spin_unlock(&vcpu->kvm->mmu_lock);
  2492. vcpu->arch.mmu.pae_root[i] = root | pm_mask;
  2493. }
  2494. vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.pae_root);
  2495. /*
  2496. * If we shadow a 32 bit page table with a long mode page
  2497. * table we enter this path.
  2498. */
  2499. if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) {
  2500. if (vcpu->arch.mmu.lm_root == NULL) {
  2501. /*
  2502. * The additional page necessary for this is only
  2503. * allocated on demand.
  2504. */
  2505. u64 *lm_root;
  2506. lm_root = (void*)get_zeroed_page(GFP_KERNEL);
  2507. if (lm_root == NULL)
  2508. return 1;
  2509. lm_root[0] = __pa(vcpu->arch.mmu.pae_root) | pm_mask;
  2510. vcpu->arch.mmu.lm_root = lm_root;
  2511. }
  2512. vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.lm_root);
  2513. }
  2514. return 0;
  2515. }
  2516. static int mmu_alloc_roots(struct kvm_vcpu *vcpu)
  2517. {
  2518. if (vcpu->arch.mmu.direct_map)
  2519. return mmu_alloc_direct_roots(vcpu);
  2520. else
  2521. return mmu_alloc_shadow_roots(vcpu);
  2522. }
  2523. static void mmu_sync_roots(struct kvm_vcpu *vcpu)
  2524. {
  2525. int i;
  2526. struct kvm_mmu_page *sp;
  2527. if (vcpu->arch.mmu.direct_map)
  2528. return;
  2529. if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
  2530. return;
  2531. vcpu_clear_mmio_info(vcpu, ~0ul);
  2532. kvm_mmu_audit(vcpu, AUDIT_PRE_SYNC);
  2533. if (vcpu->arch.mmu.root_level == PT64_ROOT_LEVEL) {
  2534. hpa_t root = vcpu->arch.mmu.root_hpa;
  2535. sp = page_header(root);
  2536. mmu_sync_children(vcpu, sp);
  2537. kvm_mmu_audit(vcpu, AUDIT_POST_SYNC);
  2538. return;
  2539. }
  2540. for (i = 0; i < 4; ++i) {
  2541. hpa_t root = vcpu->arch.mmu.pae_root[i];
  2542. if (root && VALID_PAGE(root)) {
  2543. root &= PT64_BASE_ADDR_MASK;
  2544. sp = page_header(root);
  2545. mmu_sync_children(vcpu, sp);
  2546. }
  2547. }
  2548. kvm_mmu_audit(vcpu, AUDIT_POST_SYNC);
  2549. }
  2550. void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu)
  2551. {
  2552. spin_lock(&vcpu->kvm->mmu_lock);
  2553. mmu_sync_roots(vcpu);
  2554. spin_unlock(&vcpu->kvm->mmu_lock);
  2555. }
  2556. static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr,
  2557. u32 access, struct x86_exception *exception)
  2558. {
  2559. if (exception)
  2560. exception->error_code = 0;
  2561. return vaddr;
  2562. }
  2563. static gpa_t nonpaging_gva_to_gpa_nested(struct kvm_vcpu *vcpu, gva_t vaddr,
  2564. u32 access,
  2565. struct x86_exception *exception)
  2566. {
  2567. if (exception)
  2568. exception->error_code = 0;
  2569. return vcpu->arch.nested_mmu.translate_gpa(vcpu, vaddr, access);
  2570. }
  2571. static bool quickly_check_mmio_pf(struct kvm_vcpu *vcpu, u64 addr, bool direct)
  2572. {
  2573. if (direct)
  2574. return vcpu_match_mmio_gpa(vcpu, addr);
  2575. return vcpu_match_mmio_gva(vcpu, addr);
  2576. }
  2577. /*
  2578. * On direct hosts, the last spte is only allows two states
  2579. * for mmio page fault:
  2580. * - It is the mmio spte
  2581. * - It is zapped or it is being zapped.
  2582. *
  2583. * This function completely checks the spte when the last spte
  2584. * is not the mmio spte.
  2585. */
  2586. static bool check_direct_spte_mmio_pf(u64 spte)
  2587. {
  2588. return __check_direct_spte_mmio_pf(spte);
  2589. }
  2590. static u64 walk_shadow_page_get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr)
  2591. {
  2592. struct kvm_shadow_walk_iterator iterator;
  2593. u64 spte = 0ull;
  2594. walk_shadow_page_lockless_begin(vcpu);
  2595. for_each_shadow_entry_lockless(vcpu, addr, iterator, spte)
  2596. if (!is_shadow_present_pte(spte))
  2597. break;
  2598. walk_shadow_page_lockless_end(vcpu);
  2599. return spte;
  2600. }
  2601. /*
  2602. * If it is a real mmio page fault, return 1 and emulat the instruction
  2603. * directly, return 0 to let CPU fault again on the address, -1 is
  2604. * returned if bug is detected.
  2605. */
  2606. int handle_mmio_page_fault_common(struct kvm_vcpu *vcpu, u64 addr, bool direct)
  2607. {
  2608. u64 spte;
  2609. if (quickly_check_mmio_pf(vcpu, addr, direct))
  2610. return 1;
  2611. spte = walk_shadow_page_get_mmio_spte(vcpu, addr);
  2612. if (is_mmio_spte(spte)) {
  2613. gfn_t gfn = get_mmio_spte_gfn(spte);
  2614. unsigned access = get_mmio_spte_access(spte);
  2615. if (direct)
  2616. addr = 0;
  2617. trace_handle_mmio_page_fault(addr, gfn, access);
  2618. vcpu_cache_mmio_info(vcpu, addr, gfn, access);
  2619. return 1;
  2620. }
  2621. /*
  2622. * It's ok if the gva is remapped by other cpus on shadow guest,
  2623. * it's a BUG if the gfn is not a mmio page.
  2624. */
  2625. if (direct && !check_direct_spte_mmio_pf(spte))
  2626. return -1;
  2627. /*
  2628. * If the page table is zapped by other cpus, let CPU fault again on
  2629. * the address.
  2630. */
  2631. return 0;
  2632. }
  2633. EXPORT_SYMBOL_GPL(handle_mmio_page_fault_common);
  2634. static int handle_mmio_page_fault(struct kvm_vcpu *vcpu, u64 addr,
  2635. u32 error_code, bool direct)
  2636. {
  2637. int ret;
  2638. ret = handle_mmio_page_fault_common(vcpu, addr, direct);
  2639. WARN_ON(ret < 0);
  2640. return ret;
  2641. }
  2642. static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
  2643. u32 error_code, bool prefault)
  2644. {
  2645. gfn_t gfn;
  2646. int r;
  2647. pgprintk("%s: gva %lx error %x\n", __func__, gva, error_code);
  2648. if (unlikely(error_code & PFERR_RSVD_MASK))
  2649. return handle_mmio_page_fault(vcpu, gva, error_code, true);
  2650. r = mmu_topup_memory_caches(vcpu);
  2651. if (r)
  2652. return r;
  2653. ASSERT(vcpu);
  2654. ASSERT(VALID_PAGE(vcpu->arch.mmu.root_hpa));
  2655. gfn = gva >> PAGE_SHIFT;
  2656. return nonpaging_map(vcpu, gva & PAGE_MASK,
  2657. error_code, gfn, prefault);
  2658. }
  2659. static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn)
  2660. {
  2661. struct kvm_arch_async_pf arch;
  2662. arch.token = (vcpu->arch.apf.id++ << 12) | vcpu->vcpu_id;
  2663. arch.gfn = gfn;
  2664. arch.direct_map = vcpu->arch.mmu.direct_map;
  2665. arch.cr3 = vcpu->arch.mmu.get_cr3(vcpu);
  2666. return kvm_setup_async_pf(vcpu, gva, gfn, &arch);
  2667. }
  2668. static bool can_do_async_pf(struct kvm_vcpu *vcpu)
  2669. {
  2670. if (unlikely(!irqchip_in_kernel(vcpu->kvm) ||
  2671. kvm_event_needs_reinjection(vcpu)))
  2672. return false;
  2673. return kvm_x86_ops->interrupt_allowed(vcpu);
  2674. }
  2675. static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn,
  2676. gva_t gva, pfn_t *pfn, bool write, bool *writable)
  2677. {
  2678. bool async;
  2679. *pfn = gfn_to_pfn_async(vcpu->kvm, gfn, &async, write, writable);
  2680. if (!async)
  2681. return false; /* *pfn has correct page already */
  2682. if (!prefault && can_do_async_pf(vcpu)) {
  2683. trace_kvm_try_async_get_page(gva, gfn);
  2684. if (kvm_find_async_pf_gfn(vcpu, gfn)) {
  2685. trace_kvm_async_pf_doublefault(gva, gfn);
  2686. kvm_make_request(KVM_REQ_APF_HALT, vcpu);
  2687. return true;
  2688. } else if (kvm_arch_setup_async_pf(vcpu, gva, gfn))
  2689. return true;
  2690. }
  2691. *pfn = gfn_to_pfn_prot(vcpu->kvm, gfn, write, writable);
  2692. return false;
  2693. }
  2694. static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code,
  2695. bool prefault)
  2696. {
  2697. pfn_t pfn;
  2698. int r;
  2699. int level;
  2700. int force_pt_level;
  2701. gfn_t gfn = gpa >> PAGE_SHIFT;
  2702. unsigned long mmu_seq;
  2703. int write = error_code & PFERR_WRITE_MASK;
  2704. bool map_writable;
  2705. ASSERT(vcpu);
  2706. ASSERT(VALID_PAGE(vcpu->arch.mmu.root_hpa));
  2707. if (unlikely(error_code & PFERR_RSVD_MASK))
  2708. return handle_mmio_page_fault(vcpu, gpa, error_code, true);
  2709. r = mmu_topup_memory_caches(vcpu);
  2710. if (r)
  2711. return r;
  2712. force_pt_level = mapping_level_dirty_bitmap(vcpu, gfn);
  2713. if (likely(!force_pt_level)) {
  2714. level = mapping_level(vcpu, gfn);
  2715. gfn &= ~(KVM_PAGES_PER_HPAGE(level) - 1);
  2716. } else
  2717. level = PT_PAGE_TABLE_LEVEL;
  2718. if (fast_page_fault(vcpu, gpa, level, error_code))
  2719. return 0;
  2720. mmu_seq = vcpu->kvm->mmu_notifier_seq;
  2721. smp_rmb();
  2722. if (try_async_pf(vcpu, prefault, gfn, gpa, &pfn, write, &map_writable))
  2723. return 0;
  2724. if (handle_abnormal_pfn(vcpu, 0, gfn, pfn, ACC_ALL, &r))
  2725. return r;
  2726. spin_lock(&vcpu->kvm->mmu_lock);
  2727. if (mmu_notifier_retry(vcpu->kvm, mmu_seq))
  2728. goto out_unlock;
  2729. if (likely(!force_pt_level))
  2730. transparent_hugepage_adjust(vcpu, &gfn, &pfn, &level);
  2731. r = __direct_map(vcpu, gpa, write, map_writable,
  2732. level, gfn, pfn, prefault);
  2733. spin_unlock(&vcpu->kvm->mmu_lock);
  2734. return r;
  2735. out_unlock:
  2736. spin_unlock(&vcpu->kvm->mmu_lock);
  2737. kvm_release_pfn_clean(pfn);
  2738. return 0;
  2739. }
  2740. static void nonpaging_free(struct kvm_vcpu *vcpu)
  2741. {
  2742. mmu_free_roots(vcpu);
  2743. }
  2744. static int nonpaging_init_context(struct kvm_vcpu *vcpu,
  2745. struct kvm_mmu *context)
  2746. {
  2747. context->new_cr3 = nonpaging_new_cr3;
  2748. context->page_fault = nonpaging_page_fault;
  2749. context->gva_to_gpa = nonpaging_gva_to_gpa;
  2750. context->free = nonpaging_free;
  2751. context->sync_page = nonpaging_sync_page;
  2752. context->invlpg = nonpaging_invlpg;
  2753. context->update_pte = nonpaging_update_pte;
  2754. context->root_level = 0;
  2755. context->shadow_root_level = PT32E_ROOT_LEVEL;
  2756. context->root_hpa = INVALID_PAGE;
  2757. context->direct_map = true;
  2758. context->nx = false;
  2759. return 0;
  2760. }
  2761. void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu)
  2762. {
  2763. ++vcpu->stat.tlb_flush;
  2764. kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
  2765. }
  2766. static void paging_new_cr3(struct kvm_vcpu *vcpu)
  2767. {
  2768. pgprintk("%s: cr3 %lx\n", __func__, kvm_read_cr3(vcpu));
  2769. mmu_free_roots(vcpu);
  2770. }
  2771. static unsigned long get_cr3(struct kvm_vcpu *vcpu)
  2772. {
  2773. return kvm_read_cr3(vcpu);
  2774. }
  2775. static void inject_page_fault(struct kvm_vcpu *vcpu,
  2776. struct x86_exception *fault)
  2777. {
  2778. vcpu->arch.mmu.inject_page_fault(vcpu, fault);
  2779. }
  2780. static void paging_free(struct kvm_vcpu *vcpu)
  2781. {
  2782. nonpaging_free(vcpu);
  2783. }
  2784. static inline void protect_clean_gpte(unsigned *access, unsigned gpte)
  2785. {
  2786. unsigned mask;
  2787. BUILD_BUG_ON(PT_WRITABLE_MASK != ACC_WRITE_MASK);
  2788. mask = (unsigned)~ACC_WRITE_MASK;
  2789. /* Allow write access to dirty gptes */
  2790. mask |= (gpte >> (PT_DIRTY_SHIFT - PT_WRITABLE_SHIFT)) & PT_WRITABLE_MASK;
  2791. *access &= mask;
  2792. }
  2793. static bool sync_mmio_spte(u64 *sptep, gfn_t gfn, unsigned access,
  2794. int *nr_present)
  2795. {
  2796. if (unlikely(is_mmio_spte(*sptep))) {
  2797. if (gfn != get_mmio_spte_gfn(*sptep)) {
  2798. mmu_spte_clear_no_track(sptep);
  2799. return true;
  2800. }
  2801. (*nr_present)++;
  2802. mark_mmio_spte(sptep, gfn, access);
  2803. return true;
  2804. }
  2805. return false;
  2806. }
  2807. static inline unsigned gpte_access(struct kvm_vcpu *vcpu, u64 gpte)
  2808. {
  2809. unsigned access;
  2810. access = (gpte & (PT_WRITABLE_MASK | PT_USER_MASK)) | ACC_EXEC_MASK;
  2811. access &= ~(gpte >> PT64_NX_SHIFT);
  2812. return access;
  2813. }
  2814. static inline bool is_last_gpte(struct kvm_mmu *mmu, unsigned level, unsigned gpte)
  2815. {
  2816. unsigned index;
  2817. index = level - 1;
  2818. index |= (gpte & PT_PAGE_SIZE_MASK) >> (PT_PAGE_SIZE_SHIFT - 2);
  2819. return mmu->last_pte_bitmap & (1 << index);
  2820. }
  2821. #define PTTYPE 64
  2822. #include "paging_tmpl.h"
  2823. #undef PTTYPE
  2824. #define PTTYPE 32
  2825. #include "paging_tmpl.h"
  2826. #undef PTTYPE
  2827. static void reset_rsvds_bits_mask(struct kvm_vcpu *vcpu,
  2828. struct kvm_mmu *context)
  2829. {
  2830. int maxphyaddr = cpuid_maxphyaddr(vcpu);
  2831. u64 exb_bit_rsvd = 0;
  2832. if (!context->nx)
  2833. exb_bit_rsvd = rsvd_bits(63, 63);
  2834. switch (context->root_level) {
  2835. case PT32_ROOT_LEVEL:
  2836. /* no rsvd bits for 2 level 4K page table entries */
  2837. context->rsvd_bits_mask[0][1] = 0;
  2838. context->rsvd_bits_mask[0][0] = 0;
  2839. context->rsvd_bits_mask[1][0] = context->rsvd_bits_mask[0][0];
  2840. if (!is_pse(vcpu)) {
  2841. context->rsvd_bits_mask[1][1] = 0;
  2842. break;
  2843. }
  2844. if (is_cpuid_PSE36())
  2845. /* 36bits PSE 4MB page */
  2846. context->rsvd_bits_mask[1][1] = rsvd_bits(17, 21);
  2847. else
  2848. /* 32 bits PSE 4MB page */
  2849. context->rsvd_bits_mask[1][1] = rsvd_bits(13, 21);
  2850. break;
  2851. case PT32E_ROOT_LEVEL:
  2852. context->rsvd_bits_mask[0][2] =
  2853. rsvd_bits(maxphyaddr, 63) |
  2854. rsvd_bits(7, 8) | rsvd_bits(1, 2); /* PDPTE */
  2855. context->rsvd_bits_mask[0][1] = exb_bit_rsvd |
  2856. rsvd_bits(maxphyaddr, 62); /* PDE */
  2857. context->rsvd_bits_mask[0][0] = exb_bit_rsvd |
  2858. rsvd_bits(maxphyaddr, 62); /* PTE */
  2859. context->rsvd_bits_mask[1][1] = exb_bit_rsvd |
  2860. rsvd_bits(maxphyaddr, 62) |
  2861. rsvd_bits(13, 20); /* large page */
  2862. context->rsvd_bits_mask[1][0] = context->rsvd_bits_mask[0][0];
  2863. break;
  2864. case PT64_ROOT_LEVEL:
  2865. context->rsvd_bits_mask[0][3] = exb_bit_rsvd |
  2866. rsvd_bits(maxphyaddr, 51) | rsvd_bits(7, 8);
  2867. context->rsvd_bits_mask[0][2] = exb_bit_rsvd |
  2868. rsvd_bits(maxphyaddr, 51) | rsvd_bits(7, 8);
  2869. context->rsvd_bits_mask[0][1] = exb_bit_rsvd |
  2870. rsvd_bits(maxphyaddr, 51);
  2871. context->rsvd_bits_mask[0][0] = exb_bit_rsvd |
  2872. rsvd_bits(maxphyaddr, 51);
  2873. context->rsvd_bits_mask[1][3] = context->rsvd_bits_mask[0][3];
  2874. context->rsvd_bits_mask[1][2] = exb_bit_rsvd |
  2875. rsvd_bits(maxphyaddr, 51) |
  2876. rsvd_bits(13, 29);
  2877. context->rsvd_bits_mask[1][1] = exb_bit_rsvd |
  2878. rsvd_bits(maxphyaddr, 51) |
  2879. rsvd_bits(13, 20); /* large page */
  2880. context->rsvd_bits_mask[1][0] = context->rsvd_bits_mask[0][0];
  2881. break;
  2882. }
  2883. }
  2884. static void update_permission_bitmask(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu)
  2885. {
  2886. unsigned bit, byte, pfec;
  2887. u8 map;
  2888. bool fault, x, w, u, wf, uf, ff, smep;
  2889. smep = kvm_read_cr4_bits(vcpu, X86_CR4_SMEP);
  2890. for (byte = 0; byte < ARRAY_SIZE(mmu->permissions); ++byte) {
  2891. pfec = byte << 1;
  2892. map = 0;
  2893. wf = pfec & PFERR_WRITE_MASK;
  2894. uf = pfec & PFERR_USER_MASK;
  2895. ff = pfec & PFERR_FETCH_MASK;
  2896. for (bit = 0; bit < 8; ++bit) {
  2897. x = bit & ACC_EXEC_MASK;
  2898. w = bit & ACC_WRITE_MASK;
  2899. u = bit & ACC_USER_MASK;
  2900. /* Not really needed: !nx will cause pte.nx to fault */
  2901. x |= !mmu->nx;
  2902. /* Allow supervisor writes if !cr0.wp */
  2903. w |= !is_write_protection(vcpu) && !uf;
  2904. /* Disallow supervisor fetches of user code if cr4.smep */
  2905. x &= !(smep && u && !uf);
  2906. fault = (ff && !x) || (uf && !u) || (wf && !w);
  2907. map |= fault << bit;
  2908. }
  2909. mmu->permissions[byte] = map;
  2910. }
  2911. }
  2912. static void update_last_pte_bitmap(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu)
  2913. {
  2914. u8 map;
  2915. unsigned level, root_level = mmu->root_level;
  2916. const unsigned ps_set_index = 1 << 2; /* bit 2 of index: ps */
  2917. if (root_level == PT32E_ROOT_LEVEL)
  2918. --root_level;
  2919. /* PT_PAGE_TABLE_LEVEL always terminates */
  2920. map = 1 | (1 << ps_set_index);
  2921. for (level = PT_DIRECTORY_LEVEL; level <= root_level; ++level) {
  2922. if (level <= PT_PDPE_LEVEL
  2923. && (mmu->root_level >= PT32E_ROOT_LEVEL || is_pse(vcpu)))
  2924. map |= 1 << (ps_set_index | (level - 1));
  2925. }
  2926. mmu->last_pte_bitmap = map;
  2927. }
  2928. static int paging64_init_context_common(struct kvm_vcpu *vcpu,
  2929. struct kvm_mmu *context,
  2930. int level)
  2931. {
  2932. context->nx = is_nx(vcpu);
  2933. context->root_level = level;
  2934. reset_rsvds_bits_mask(vcpu, context);
  2935. update_permission_bitmask(vcpu, context);
  2936. update_last_pte_bitmap(vcpu, context);
  2937. ASSERT(is_pae(vcpu));
  2938. context->new_cr3 = paging_new_cr3;
  2939. context->page_fault = paging64_page_fault;
  2940. context->gva_to_gpa = paging64_gva_to_gpa;
  2941. context->sync_page = paging64_sync_page;
  2942. context->invlpg = paging64_invlpg;
  2943. context->update_pte = paging64_update_pte;
  2944. context->free = paging_free;
  2945. context->shadow_root_level = level;
  2946. context->root_hpa = INVALID_PAGE;
  2947. context->direct_map = false;
  2948. return 0;
  2949. }
  2950. static int paging64_init_context(struct kvm_vcpu *vcpu,
  2951. struct kvm_mmu *context)
  2952. {
  2953. return paging64_init_context_common(vcpu, context, PT64_ROOT_LEVEL);
  2954. }
  2955. static int paging32_init_context(struct kvm_vcpu *vcpu,
  2956. struct kvm_mmu *context)
  2957. {
  2958. context->nx = false;
  2959. context->root_level = PT32_ROOT_LEVEL;
  2960. reset_rsvds_bits_mask(vcpu, context);
  2961. update_permission_bitmask(vcpu, context);
  2962. update_last_pte_bitmap(vcpu, context);
  2963. context->new_cr3 = paging_new_cr3;
  2964. context->page_fault = paging32_page_fault;
  2965. context->gva_to_gpa = paging32_gva_to_gpa;
  2966. context->free = paging_free;
  2967. context->sync_page = paging32_sync_page;
  2968. context->invlpg = paging32_invlpg;
  2969. context->update_pte = paging32_update_pte;
  2970. context->shadow_root_level = PT32E_ROOT_LEVEL;
  2971. context->root_hpa = INVALID_PAGE;
  2972. context->direct_map = false;
  2973. return 0;
  2974. }
  2975. static int paging32E_init_context(struct kvm_vcpu *vcpu,
  2976. struct kvm_mmu *context)
  2977. {
  2978. return paging64_init_context_common(vcpu, context, PT32E_ROOT_LEVEL);
  2979. }
  2980. static int init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
  2981. {
  2982. struct kvm_mmu *context = vcpu->arch.walk_mmu;
  2983. context->base_role.word = 0;
  2984. context->new_cr3 = nonpaging_new_cr3;
  2985. context->page_fault = tdp_page_fault;
  2986. context->free = nonpaging_free;
  2987. context->sync_page = nonpaging_sync_page;
  2988. context->invlpg = nonpaging_invlpg;
  2989. context->update_pte = nonpaging_update_pte;
  2990. context->shadow_root_level = kvm_x86_ops->get_tdp_level();
  2991. context->root_hpa = INVALID_PAGE;
  2992. context->direct_map = true;
  2993. context->set_cr3 = kvm_x86_ops->set_tdp_cr3;
  2994. context->get_cr3 = get_cr3;
  2995. context->get_pdptr = kvm_pdptr_read;
  2996. context->inject_page_fault = kvm_inject_page_fault;
  2997. if (!is_paging(vcpu)) {
  2998. context->nx = false;
  2999. context->gva_to_gpa = nonpaging_gva_to_gpa;
  3000. context->root_level = 0;
  3001. } else if (is_long_mode(vcpu)) {
  3002. context->nx = is_nx(vcpu);
  3003. context->root_level = PT64_ROOT_LEVEL;
  3004. reset_rsvds_bits_mask(vcpu, context);
  3005. context->gva_to_gpa = paging64_gva_to_gpa;
  3006. } else if (is_pae(vcpu)) {
  3007. context->nx = is_nx(vcpu);
  3008. context->root_level = PT32E_ROOT_LEVEL;
  3009. reset_rsvds_bits_mask(vcpu, context);
  3010. context->gva_to_gpa = paging64_gva_to_gpa;
  3011. } else {
  3012. context->nx = false;
  3013. context->root_level = PT32_ROOT_LEVEL;
  3014. reset_rsvds_bits_mask(vcpu, context);
  3015. context->gva_to_gpa = paging32_gva_to_gpa;
  3016. }
  3017. update_permission_bitmask(vcpu, context);
  3018. update_last_pte_bitmap(vcpu, context);
  3019. return 0;
  3020. }
  3021. int kvm_init_shadow_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context)
  3022. {
  3023. int r;
  3024. bool smep = kvm_read_cr4_bits(vcpu, X86_CR4_SMEP);
  3025. ASSERT(vcpu);
  3026. ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
  3027. if (!is_paging(vcpu))
  3028. r = nonpaging_init_context(vcpu, context);
  3029. else if (is_long_mode(vcpu))
  3030. r = paging64_init_context(vcpu, context);
  3031. else if (is_pae(vcpu))
  3032. r = paging32E_init_context(vcpu, context);
  3033. else
  3034. r = paging32_init_context(vcpu, context);
  3035. vcpu->arch.mmu.base_role.nxe = is_nx(vcpu);
  3036. vcpu->arch.mmu.base_role.cr4_pae = !!is_pae(vcpu);
  3037. vcpu->arch.mmu.base_role.cr0_wp = is_write_protection(vcpu);
  3038. vcpu->arch.mmu.base_role.smep_andnot_wp
  3039. = smep && !is_write_protection(vcpu);
  3040. return r;
  3041. }
  3042. EXPORT_SYMBOL_GPL(kvm_init_shadow_mmu);
  3043. static int init_kvm_softmmu(struct kvm_vcpu *vcpu)
  3044. {
  3045. int r = kvm_init_shadow_mmu(vcpu, vcpu->arch.walk_mmu);
  3046. vcpu->arch.walk_mmu->set_cr3 = kvm_x86_ops->set_cr3;
  3047. vcpu->arch.walk_mmu->get_cr3 = get_cr3;
  3048. vcpu->arch.walk_mmu->get_pdptr = kvm_pdptr_read;
  3049. vcpu->arch.walk_mmu->inject_page_fault = kvm_inject_page_fault;
  3050. return r;
  3051. }
  3052. static int init_kvm_nested_mmu(struct kvm_vcpu *vcpu)
  3053. {
  3054. struct kvm_mmu *g_context = &vcpu->arch.nested_mmu;
  3055. g_context->get_cr3 = get_cr3;
  3056. g_context->get_pdptr = kvm_pdptr_read;
  3057. g_context->inject_page_fault = kvm_inject_page_fault;
  3058. /*
  3059. * Note that arch.mmu.gva_to_gpa translates l2_gva to l1_gpa. The
  3060. * translation of l2_gpa to l1_gpa addresses is done using the
  3061. * arch.nested_mmu.gva_to_gpa function. Basically the gva_to_gpa
  3062. * functions between mmu and nested_mmu are swapped.
  3063. */
  3064. if (!is_paging(vcpu)) {
  3065. g_context->nx = false;
  3066. g_context->root_level = 0;
  3067. g_context->gva_to_gpa = nonpaging_gva_to_gpa_nested;
  3068. } else if (is_long_mode(vcpu)) {
  3069. g_context->nx = is_nx(vcpu);
  3070. g_context->root_level = PT64_ROOT_LEVEL;
  3071. reset_rsvds_bits_mask(vcpu, g_context);
  3072. g_context->gva_to_gpa = paging64_gva_to_gpa_nested;
  3073. } else if (is_pae(vcpu)) {
  3074. g_context->nx = is_nx(vcpu);
  3075. g_context->root_level = PT32E_ROOT_LEVEL;
  3076. reset_rsvds_bits_mask(vcpu, g_context);
  3077. g_context->gva_to_gpa = paging64_gva_to_gpa_nested;
  3078. } else {
  3079. g_context->nx = false;
  3080. g_context->root_level = PT32_ROOT_LEVEL;
  3081. reset_rsvds_bits_mask(vcpu, g_context);
  3082. g_context->gva_to_gpa = paging32_gva_to_gpa_nested;
  3083. }
  3084. update_permission_bitmask(vcpu, g_context);
  3085. update_last_pte_bitmap(vcpu, g_context);
  3086. return 0;
  3087. }
  3088. static int init_kvm_mmu(struct kvm_vcpu *vcpu)
  3089. {
  3090. if (mmu_is_nested(vcpu))
  3091. return init_kvm_nested_mmu(vcpu);
  3092. else if (tdp_enabled)
  3093. return init_kvm_tdp_mmu(vcpu);
  3094. else
  3095. return init_kvm_softmmu(vcpu);
  3096. }
  3097. static void destroy_kvm_mmu(struct kvm_vcpu *vcpu)
  3098. {
  3099. ASSERT(vcpu);
  3100. if (VALID_PAGE(vcpu->arch.mmu.root_hpa))
  3101. /* mmu.free() should set root_hpa = INVALID_PAGE */
  3102. vcpu->arch.mmu.free(vcpu);
  3103. }
  3104. int kvm_mmu_reset_context(struct kvm_vcpu *vcpu)
  3105. {
  3106. destroy_kvm_mmu(vcpu);
  3107. return init_kvm_mmu(vcpu);
  3108. }
  3109. EXPORT_SYMBOL_GPL(kvm_mmu_reset_context);
  3110. int kvm_mmu_load(struct kvm_vcpu *vcpu)
  3111. {
  3112. int r;
  3113. r = mmu_topup_memory_caches(vcpu);
  3114. if (r)
  3115. goto out;
  3116. r = mmu_alloc_roots(vcpu);
  3117. spin_lock(&vcpu->kvm->mmu_lock);
  3118. mmu_sync_roots(vcpu);
  3119. spin_unlock(&vcpu->kvm->mmu_lock);
  3120. if (r)
  3121. goto out;
  3122. /* set_cr3() should ensure TLB has been flushed */
  3123. vcpu->arch.mmu.set_cr3(vcpu, vcpu->arch.mmu.root_hpa);
  3124. out:
  3125. return r;
  3126. }
  3127. EXPORT_SYMBOL_GPL(kvm_mmu_load);
  3128. void kvm_mmu_unload(struct kvm_vcpu *vcpu)
  3129. {
  3130. mmu_free_roots(vcpu);
  3131. }
  3132. EXPORT_SYMBOL_GPL(kvm_mmu_unload);
  3133. static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu,
  3134. struct kvm_mmu_page *sp, u64 *spte,
  3135. const void *new)
  3136. {
  3137. if (sp->role.level != PT_PAGE_TABLE_LEVEL) {
  3138. ++vcpu->kvm->stat.mmu_pde_zapped;
  3139. return;
  3140. }
  3141. ++vcpu->kvm->stat.mmu_pte_updated;
  3142. vcpu->arch.mmu.update_pte(vcpu, sp, spte, new);
  3143. }
  3144. static bool need_remote_flush(u64 old, u64 new)
  3145. {
  3146. if (!is_shadow_present_pte(old))
  3147. return false;
  3148. if (!is_shadow_present_pte(new))
  3149. return true;
  3150. if ((old ^ new) & PT64_BASE_ADDR_MASK)
  3151. return true;
  3152. old ^= PT64_NX_MASK;
  3153. new ^= PT64_NX_MASK;
  3154. return (old & ~new & PT64_PERM_MASK) != 0;
  3155. }
  3156. static void mmu_pte_write_flush_tlb(struct kvm_vcpu *vcpu, bool zap_page,
  3157. bool remote_flush, bool local_flush)
  3158. {
  3159. if (zap_page)
  3160. return;
  3161. if (remote_flush)
  3162. kvm_flush_remote_tlbs(vcpu->kvm);
  3163. else if (local_flush)
  3164. kvm_mmu_flush_tlb(vcpu);
  3165. }
  3166. static u64 mmu_pte_write_fetch_gpte(struct kvm_vcpu *vcpu, gpa_t *gpa,
  3167. const u8 *new, int *bytes)
  3168. {
  3169. u64 gentry;
  3170. int r;
  3171. /*
  3172. * Assume that the pte write on a page table of the same type
  3173. * as the current vcpu paging mode since we update the sptes only
  3174. * when they have the same mode.
  3175. */
  3176. if (is_pae(vcpu) && *bytes == 4) {
  3177. /* Handle a 32-bit guest writing two halves of a 64-bit gpte */
  3178. *gpa &= ~(gpa_t)7;
  3179. *bytes = 8;
  3180. r = kvm_read_guest(vcpu->kvm, *gpa, &gentry, 8);
  3181. if (r)
  3182. gentry = 0;
  3183. new = (const u8 *)&gentry;
  3184. }
  3185. switch (*bytes) {
  3186. case 4:
  3187. gentry = *(const u32 *)new;
  3188. break;
  3189. case 8:
  3190. gentry = *(const u64 *)new;
  3191. break;
  3192. default:
  3193. gentry = 0;
  3194. break;
  3195. }
  3196. return gentry;
  3197. }
  3198. /*
  3199. * If we're seeing too many writes to a page, it may no longer be a page table,
  3200. * or we may be forking, in which case it is better to unmap the page.
  3201. */
  3202. static bool detect_write_flooding(struct kvm_mmu_page *sp)
  3203. {
  3204. /*
  3205. * Skip write-flooding detected for the sp whose level is 1, because
  3206. * it can become unsync, then the guest page is not write-protected.
  3207. */
  3208. if (sp->role.level == PT_PAGE_TABLE_LEVEL)
  3209. return false;
  3210. return ++sp->write_flooding_count >= 3;
  3211. }
  3212. /*
  3213. * Misaligned accesses are too much trouble to fix up; also, they usually
  3214. * indicate a page is not used as a page table.
  3215. */
  3216. static bool detect_write_misaligned(struct kvm_mmu_page *sp, gpa_t gpa,
  3217. int bytes)
  3218. {
  3219. unsigned offset, pte_size, misaligned;
  3220. pgprintk("misaligned: gpa %llx bytes %d role %x\n",
  3221. gpa, bytes, sp->role.word);
  3222. offset = offset_in_page(gpa);
  3223. pte_size = sp->role.cr4_pae ? 8 : 4;
  3224. /*
  3225. * Sometimes, the OS only writes the last one bytes to update status
  3226. * bits, for example, in linux, andb instruction is used in clear_bit().
  3227. */
  3228. if (!(offset & (pte_size - 1)) && bytes == 1)
  3229. return false;
  3230. misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1);
  3231. misaligned |= bytes < 4;
  3232. return misaligned;
  3233. }
  3234. static u64 *get_written_sptes(struct kvm_mmu_page *sp, gpa_t gpa, int *nspte)
  3235. {
  3236. unsigned page_offset, quadrant;
  3237. u64 *spte;
  3238. int level;
  3239. page_offset = offset_in_page(gpa);
  3240. level = sp->role.level;
  3241. *nspte = 1;
  3242. if (!sp->role.cr4_pae) {
  3243. page_offset <<= 1; /* 32->64 */
  3244. /*
  3245. * A 32-bit pde maps 4MB while the shadow pdes map
  3246. * only 2MB. So we need to double the offset again
  3247. * and zap two pdes instead of one.
  3248. */
  3249. if (level == PT32_ROOT_LEVEL) {
  3250. page_offset &= ~7; /* kill rounding error */
  3251. page_offset <<= 1;
  3252. *nspte = 2;
  3253. }
  3254. quadrant = page_offset >> PAGE_SHIFT;
  3255. page_offset &= ~PAGE_MASK;
  3256. if (quadrant != sp->role.quadrant)
  3257. return NULL;
  3258. }
  3259. spte = &sp->spt[page_offset / sizeof(*spte)];
  3260. return spte;
  3261. }
  3262. void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
  3263. const u8 *new, int bytes)
  3264. {
  3265. gfn_t gfn = gpa >> PAGE_SHIFT;
  3266. union kvm_mmu_page_role mask = { .word = 0 };
  3267. struct kvm_mmu_page *sp;
  3268. LIST_HEAD(invalid_list);
  3269. u64 entry, gentry, *spte;
  3270. int npte;
  3271. bool remote_flush, local_flush, zap_page;
  3272. /*
  3273. * If we don't have indirect shadow pages, it means no page is
  3274. * write-protected, so we can exit simply.
  3275. */
  3276. if (!ACCESS_ONCE(vcpu->kvm->arch.indirect_shadow_pages))
  3277. return;
  3278. zap_page = remote_flush = local_flush = false;
  3279. pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes);
  3280. gentry = mmu_pte_write_fetch_gpte(vcpu, &gpa, new, &bytes);
  3281. /*
  3282. * No need to care whether allocation memory is successful
  3283. * or not since pte prefetch is skiped if it does not have
  3284. * enough objects in the cache.
  3285. */
  3286. mmu_topup_memory_caches(vcpu);
  3287. spin_lock(&vcpu->kvm->mmu_lock);
  3288. ++vcpu->kvm->stat.mmu_pte_write;
  3289. kvm_mmu_audit(vcpu, AUDIT_PRE_PTE_WRITE);
  3290. mask.cr0_wp = mask.cr4_pae = mask.nxe = 1;
  3291. for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn) {
  3292. if (detect_write_misaligned(sp, gpa, bytes) ||
  3293. detect_write_flooding(sp)) {
  3294. zap_page |= !!kvm_mmu_prepare_zap_page(vcpu->kvm, sp,
  3295. &invalid_list);
  3296. ++vcpu->kvm->stat.mmu_flooded;
  3297. continue;
  3298. }
  3299. spte = get_written_sptes(sp, gpa, &npte);
  3300. if (!spte)
  3301. continue;
  3302. local_flush = true;
  3303. while (npte--) {
  3304. entry = *spte;
  3305. mmu_page_zap_pte(vcpu->kvm, sp, spte);
  3306. if (gentry &&
  3307. !((sp->role.word ^ vcpu->arch.mmu.base_role.word)
  3308. & mask.word) && rmap_can_add(vcpu))
  3309. mmu_pte_write_new_pte(vcpu, sp, spte, &gentry);
  3310. if (need_remote_flush(entry, *spte))
  3311. remote_flush = true;
  3312. ++spte;
  3313. }
  3314. }
  3315. mmu_pte_write_flush_tlb(vcpu, zap_page, remote_flush, local_flush);
  3316. kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
  3317. kvm_mmu_audit(vcpu, AUDIT_POST_PTE_WRITE);
  3318. spin_unlock(&vcpu->kvm->mmu_lock);
  3319. }
  3320. int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
  3321. {
  3322. gpa_t gpa;
  3323. int r;
  3324. if (vcpu->arch.mmu.direct_map)
  3325. return 0;
  3326. gpa = kvm_mmu_gva_to_gpa_read(vcpu, gva, NULL);
  3327. r = kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT);
  3328. return r;
  3329. }
  3330. EXPORT_SYMBOL_GPL(kvm_mmu_unprotect_page_virt);
  3331. static void make_mmu_pages_available(struct kvm_vcpu *vcpu)
  3332. {
  3333. LIST_HEAD(invalid_list);
  3334. if (likely(kvm_mmu_available_pages(vcpu->kvm) >= KVM_MIN_FREE_MMU_PAGES))
  3335. return;
  3336. while (kvm_mmu_available_pages(vcpu->kvm) < KVM_REFILL_PAGES) {
  3337. if (!prepare_zap_oldest_mmu_page(vcpu->kvm, &invalid_list))
  3338. break;
  3339. ++vcpu->kvm->stat.mmu_recycled;
  3340. }
  3341. kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
  3342. }
  3343. static bool is_mmio_page_fault(struct kvm_vcpu *vcpu, gva_t addr)
  3344. {
  3345. if (vcpu->arch.mmu.direct_map || mmu_is_nested(vcpu))
  3346. return vcpu_match_mmio_gpa(vcpu, addr);
  3347. return vcpu_match_mmio_gva(vcpu, addr);
  3348. }
  3349. int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code,
  3350. void *insn, int insn_len)
  3351. {
  3352. int r, emulation_type = EMULTYPE_RETRY;
  3353. enum emulation_result er;
  3354. r = vcpu->arch.mmu.page_fault(vcpu, cr2, error_code, false);
  3355. if (r < 0)
  3356. goto out;
  3357. if (!r) {
  3358. r = 1;
  3359. goto out;
  3360. }
  3361. if (is_mmio_page_fault(vcpu, cr2))
  3362. emulation_type = 0;
  3363. er = x86_emulate_instruction(vcpu, cr2, emulation_type, insn, insn_len);
  3364. switch (er) {
  3365. case EMULATE_DONE:
  3366. return 1;
  3367. case EMULATE_DO_MMIO:
  3368. ++vcpu->stat.mmio_exits;
  3369. /* fall through */
  3370. case EMULATE_FAIL:
  3371. return 0;
  3372. default:
  3373. BUG();
  3374. }
  3375. out:
  3376. return r;
  3377. }
  3378. EXPORT_SYMBOL_GPL(kvm_mmu_page_fault);
  3379. void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva)
  3380. {
  3381. vcpu->arch.mmu.invlpg(vcpu, gva);
  3382. kvm_mmu_flush_tlb(vcpu);
  3383. ++vcpu->stat.invlpg;
  3384. }
  3385. EXPORT_SYMBOL_GPL(kvm_mmu_invlpg);
  3386. void kvm_enable_tdp(void)
  3387. {
  3388. tdp_enabled = true;
  3389. }
  3390. EXPORT_SYMBOL_GPL(kvm_enable_tdp);
  3391. void kvm_disable_tdp(void)
  3392. {
  3393. tdp_enabled = false;
  3394. }
  3395. EXPORT_SYMBOL_GPL(kvm_disable_tdp);
  3396. static void free_mmu_pages(struct kvm_vcpu *vcpu)
  3397. {
  3398. free_page((unsigned long)vcpu->arch.mmu.pae_root);
  3399. if (vcpu->arch.mmu.lm_root != NULL)
  3400. free_page((unsigned long)vcpu->arch.mmu.lm_root);
  3401. }
  3402. static int alloc_mmu_pages(struct kvm_vcpu *vcpu)
  3403. {
  3404. struct page *page;
  3405. int i;
  3406. ASSERT(vcpu);
  3407. /*
  3408. * When emulating 32-bit mode, cr3 is only 32 bits even on x86_64.
  3409. * Therefore we need to allocate shadow page tables in the first
  3410. * 4GB of memory, which happens to fit the DMA32 zone.
  3411. */
  3412. page = alloc_page(GFP_KERNEL | __GFP_DMA32);
  3413. if (!page)
  3414. return -ENOMEM;
  3415. vcpu->arch.mmu.pae_root = page_address(page);
  3416. for (i = 0; i < 4; ++i)
  3417. vcpu->arch.mmu.pae_root[i] = INVALID_PAGE;
  3418. return 0;
  3419. }
  3420. int kvm_mmu_create(struct kvm_vcpu *vcpu)
  3421. {
  3422. ASSERT(vcpu);
  3423. vcpu->arch.walk_mmu = &vcpu->arch.mmu;
  3424. vcpu->arch.mmu.root_hpa = INVALID_PAGE;
  3425. vcpu->arch.mmu.translate_gpa = translate_gpa;
  3426. vcpu->arch.nested_mmu.translate_gpa = translate_nested_gpa;
  3427. return alloc_mmu_pages(vcpu);
  3428. }
  3429. int kvm_mmu_setup(struct kvm_vcpu *vcpu)
  3430. {
  3431. ASSERT(vcpu);
  3432. ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
  3433. return init_kvm_mmu(vcpu);
  3434. }
  3435. void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot)
  3436. {
  3437. struct kvm_memory_slot *memslot;
  3438. gfn_t last_gfn;
  3439. int i;
  3440. memslot = id_to_memslot(kvm->memslots, slot);
  3441. last_gfn = memslot->base_gfn + memslot->npages - 1;
  3442. spin_lock(&kvm->mmu_lock);
  3443. for (i = PT_PAGE_TABLE_LEVEL;
  3444. i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) {
  3445. unsigned long *rmapp;
  3446. unsigned long last_index, index;
  3447. rmapp = memslot->arch.rmap[i - PT_PAGE_TABLE_LEVEL];
  3448. last_index = gfn_to_index(last_gfn, memslot->base_gfn, i);
  3449. for (index = 0; index <= last_index; ++index, ++rmapp) {
  3450. if (*rmapp)
  3451. __rmap_write_protect(kvm, rmapp, false);
  3452. if (need_resched() || spin_needbreak(&kvm->mmu_lock)) {
  3453. kvm_flush_remote_tlbs(kvm);
  3454. cond_resched_lock(&kvm->mmu_lock);
  3455. }
  3456. }
  3457. }
  3458. kvm_flush_remote_tlbs(kvm);
  3459. spin_unlock(&kvm->mmu_lock);
  3460. }
  3461. void kvm_mmu_zap_all(struct kvm *kvm)
  3462. {
  3463. struct kvm_mmu_page *sp, *node;
  3464. LIST_HEAD(invalid_list);
  3465. spin_lock(&kvm->mmu_lock);
  3466. restart:
  3467. list_for_each_entry_safe(sp, node, &kvm->arch.active_mmu_pages, link)
  3468. if (kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list))
  3469. goto restart;
  3470. kvm_mmu_commit_zap_page(kvm, &invalid_list);
  3471. spin_unlock(&kvm->mmu_lock);
  3472. }
  3473. void kvm_mmu_zap_mmio_sptes(struct kvm *kvm)
  3474. {
  3475. struct kvm_mmu_page *sp, *node;
  3476. LIST_HEAD(invalid_list);
  3477. spin_lock(&kvm->mmu_lock);
  3478. restart:
  3479. list_for_each_entry_safe(sp, node, &kvm->arch.active_mmu_pages, link) {
  3480. if (!sp->mmio_cached)
  3481. continue;
  3482. if (kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list))
  3483. goto restart;
  3484. }
  3485. kvm_mmu_commit_zap_page(kvm, &invalid_list);
  3486. spin_unlock(&kvm->mmu_lock);
  3487. }
  3488. static int mmu_shrink(struct shrinker *shrink, struct shrink_control *sc)
  3489. {
  3490. struct kvm *kvm;
  3491. int nr_to_scan = sc->nr_to_scan;
  3492. if (nr_to_scan == 0)
  3493. goto out;
  3494. raw_spin_lock(&kvm_lock);
  3495. list_for_each_entry(kvm, &vm_list, vm_list) {
  3496. int idx;
  3497. LIST_HEAD(invalid_list);
  3498. /*
  3499. * Never scan more than sc->nr_to_scan VM instances.
  3500. * Will not hit this condition practically since we do not try
  3501. * to shrink more than one VM and it is very unlikely to see
  3502. * !n_used_mmu_pages so many times.
  3503. */
  3504. if (!nr_to_scan--)
  3505. break;
  3506. /*
  3507. * n_used_mmu_pages is accessed without holding kvm->mmu_lock
  3508. * here. We may skip a VM instance errorneosly, but we do not
  3509. * want to shrink a VM that only started to populate its MMU
  3510. * anyway.
  3511. */
  3512. if (!kvm->arch.n_used_mmu_pages)
  3513. continue;
  3514. idx = srcu_read_lock(&kvm->srcu);
  3515. spin_lock(&kvm->mmu_lock);
  3516. prepare_zap_oldest_mmu_page(kvm, &invalid_list);
  3517. kvm_mmu_commit_zap_page(kvm, &invalid_list);
  3518. spin_unlock(&kvm->mmu_lock);
  3519. srcu_read_unlock(&kvm->srcu, idx);
  3520. list_move_tail(&kvm->vm_list, &vm_list);
  3521. break;
  3522. }
  3523. raw_spin_unlock(&kvm_lock);
  3524. out:
  3525. return percpu_counter_read_positive(&kvm_total_used_mmu_pages);
  3526. }
  3527. static struct shrinker mmu_shrinker = {
  3528. .shrink = mmu_shrink,
  3529. .seeks = DEFAULT_SEEKS * 10,
  3530. };
  3531. static void mmu_destroy_caches(void)
  3532. {
  3533. if (pte_list_desc_cache)
  3534. kmem_cache_destroy(pte_list_desc_cache);
  3535. if (mmu_page_header_cache)
  3536. kmem_cache_destroy(mmu_page_header_cache);
  3537. }
  3538. int kvm_mmu_module_init(void)
  3539. {
  3540. pte_list_desc_cache = kmem_cache_create("pte_list_desc",
  3541. sizeof(struct pte_list_desc),
  3542. 0, 0, NULL);
  3543. if (!pte_list_desc_cache)
  3544. goto nomem;
  3545. mmu_page_header_cache = kmem_cache_create("kvm_mmu_page_header",
  3546. sizeof(struct kvm_mmu_page),
  3547. 0, 0, NULL);
  3548. if (!mmu_page_header_cache)
  3549. goto nomem;
  3550. if (percpu_counter_init(&kvm_total_used_mmu_pages, 0))
  3551. goto nomem;
  3552. register_shrinker(&mmu_shrinker);
  3553. return 0;
  3554. nomem:
  3555. mmu_destroy_caches();
  3556. return -ENOMEM;
  3557. }
  3558. /*
  3559. * Caculate mmu pages needed for kvm.
  3560. */
  3561. unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm)
  3562. {
  3563. unsigned int nr_mmu_pages;
  3564. unsigned int nr_pages = 0;
  3565. struct kvm_memslots *slots;
  3566. struct kvm_memory_slot *memslot;
  3567. slots = kvm_memslots(kvm);
  3568. kvm_for_each_memslot(memslot, slots)
  3569. nr_pages += memslot->npages;
  3570. nr_mmu_pages = nr_pages * KVM_PERMILLE_MMU_PAGES / 1000;
  3571. nr_mmu_pages = max(nr_mmu_pages,
  3572. (unsigned int) KVM_MIN_ALLOC_MMU_PAGES);
  3573. return nr_mmu_pages;
  3574. }
  3575. int kvm_mmu_get_spte_hierarchy(struct kvm_vcpu *vcpu, u64 addr, u64 sptes[4])
  3576. {
  3577. struct kvm_shadow_walk_iterator iterator;
  3578. u64 spte;
  3579. int nr_sptes = 0;
  3580. walk_shadow_page_lockless_begin(vcpu);
  3581. for_each_shadow_entry_lockless(vcpu, addr, iterator, spte) {
  3582. sptes[iterator.level-1] = spte;
  3583. nr_sptes++;
  3584. if (!is_shadow_present_pte(spte))
  3585. break;
  3586. }
  3587. walk_shadow_page_lockless_end(vcpu);
  3588. return nr_sptes;
  3589. }
  3590. EXPORT_SYMBOL_GPL(kvm_mmu_get_spte_hierarchy);
  3591. void kvm_mmu_destroy(struct kvm_vcpu *vcpu)
  3592. {
  3593. ASSERT(vcpu);
  3594. destroy_kvm_mmu(vcpu);
  3595. free_mmu_pages(vcpu);
  3596. mmu_free_memory_caches(vcpu);
  3597. }
  3598. void kvm_mmu_module_exit(void)
  3599. {
  3600. mmu_destroy_caches();
  3601. percpu_counter_destroy(&kvm_total_used_mmu_pages);
  3602. unregister_shrinker(&mmu_shrinker);
  3603. mmu_audit_disable();
  3604. }