page_tables.c 36 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165
  1. /*P:700
  2. * The pagetable code, on the other hand, still shows the scars of
  3. * previous encounters. It's functional, and as neat as it can be in the
  4. * circumstances, but be wary, for these things are subtle and break easily.
  5. * The Guest provides a virtual to physical mapping, but we can neither trust
  6. * it nor use it: we verify and convert it here then point the CPU to the
  7. * converted Guest pages when running the Guest.
  8. :*/
  9. /* Copyright (C) Rusty Russell IBM Corporation 2006.
  10. * GPL v2 and any later version */
  11. #include <linux/mm.h>
  12. #include <linux/gfp.h>
  13. #include <linux/types.h>
  14. #include <linux/spinlock.h>
  15. #include <linux/random.h>
  16. #include <linux/percpu.h>
  17. #include <asm/tlbflush.h>
  18. #include <asm/uaccess.h>
  19. #include "lg.h"
  20. /*M:008
  21. * We hold reference to pages, which prevents them from being swapped.
  22. * It'd be nice to have a callback in the "struct mm_struct" when Linux wants
  23. * to swap out. If we had this, and a shrinker callback to trim PTE pages, we
  24. * could probably consider launching Guests as non-root.
  25. :*/
  26. /*H:300
  27. * The Page Table Code
  28. *
  29. * We use two-level page tables for the Guest, or three-level with PAE. If
  30. * you're not entirely comfortable with virtual addresses, physical addresses
  31. * and page tables then I recommend you review arch/x86/lguest/boot.c's "Page
  32. * Table Handling" (with diagrams!).
  33. *
  34. * The Guest keeps page tables, but we maintain the actual ones here: these are
  35. * called "shadow" page tables. Which is a very Guest-centric name: these are
  36. * the real page tables the CPU uses, although we keep them up to date to
  37. * reflect the Guest's. (See what I mean about weird naming? Since when do
  38. * shadows reflect anything?)
  39. *
  40. * Anyway, this is the most complicated part of the Host code. There are seven
  41. * parts to this:
  42. * (i) Looking up a page table entry when the Guest faults,
  43. * (ii) Making sure the Guest stack is mapped,
  44. * (iii) Setting up a page table entry when the Guest tells us one has changed,
  45. * (iv) Switching page tables,
  46. * (v) Flushing (throwing away) page tables,
  47. * (vi) Mapping the Switcher when the Guest is about to run,
  48. * (vii) Setting up the page tables initially.
  49. :*/
  50. /*
  51. * The Switcher uses the complete top PTE page. That's 1024 PTE entries (4MB)
  52. * or 512 PTE entries with PAE (2MB).
  53. */
  54. #define SWITCHER_PGD_INDEX (PTRS_PER_PGD - 1)
  55. /*
  56. * For PAE we need the PMD index as well. We use the last 2MB, so we
  57. * will need the last pmd entry of the last pmd page.
  58. */
  59. #ifdef CONFIG_X86_PAE
  60. #define CHECK_GPGD_MASK _PAGE_PRESENT
  61. #else
  62. #define CHECK_GPGD_MASK _PAGE_TABLE
  63. #endif
  64. /*H:320
  65. * The page table code is curly enough to need helper functions to keep it
  66. * clear and clean. The kernel itself provides many of them; one advantage
  67. * of insisting that the Guest and Host use the same CONFIG_PAE setting.
  68. *
  69. * There are two functions which return pointers to the shadow (aka "real")
  70. * page tables.
  71. *
  72. * spgd_addr() takes the virtual address and returns a pointer to the top-level
  73. * page directory entry (PGD) for that address. Since we keep track of several
  74. * page tables, the "i" argument tells us which one we're interested in (it's
  75. * usually the current one).
  76. */
  77. static pgd_t *spgd_addr(struct lg_cpu *cpu, u32 i, unsigned long vaddr)
  78. {
  79. unsigned int index = pgd_index(vaddr);
  80. /* Return a pointer index'th pgd entry for the i'th page table. */
  81. return &cpu->lg->pgdirs[i].pgdir[index];
  82. }
  83. #ifdef CONFIG_X86_PAE
  84. /*
  85. * This routine then takes the PGD entry given above, which contains the
  86. * address of the PMD page. It then returns a pointer to the PMD entry for the
  87. * given address.
  88. */
  89. static pmd_t *spmd_addr(struct lg_cpu *cpu, pgd_t spgd, unsigned long vaddr)
  90. {
  91. unsigned int index = pmd_index(vaddr);
  92. pmd_t *page;
  93. /* You should never call this if the PGD entry wasn't valid */
  94. BUG_ON(!(pgd_flags(spgd) & _PAGE_PRESENT));
  95. page = __va(pgd_pfn(spgd) << PAGE_SHIFT);
  96. return &page[index];
  97. }
  98. #endif
  99. /*
  100. * This routine then takes the page directory entry returned above, which
  101. * contains the address of the page table entry (PTE) page. It then returns a
  102. * pointer to the PTE entry for the given address.
  103. */
  104. static pte_t *spte_addr(struct lg_cpu *cpu, pgd_t spgd, unsigned long vaddr)
  105. {
  106. #ifdef CONFIG_X86_PAE
  107. pmd_t *pmd = spmd_addr(cpu, spgd, vaddr);
  108. pte_t *page = __va(pmd_pfn(*pmd) << PAGE_SHIFT);
  109. /* You should never call this if the PMD entry wasn't valid */
  110. BUG_ON(!(pmd_flags(*pmd) & _PAGE_PRESENT));
  111. #else
  112. pte_t *page = __va(pgd_pfn(spgd) << PAGE_SHIFT);
  113. /* You should never call this if the PGD entry wasn't valid */
  114. BUG_ON(!(pgd_flags(spgd) & _PAGE_PRESENT));
  115. #endif
  116. return &page[pte_index(vaddr)];
  117. }
  118. /*
  119. * These functions are just like the above, except they access the Guest
  120. * page tables. Hence they return a Guest address.
  121. */
  122. static unsigned long gpgd_addr(struct lg_cpu *cpu, unsigned long vaddr)
  123. {
  124. unsigned int index = vaddr >> (PGDIR_SHIFT);
  125. return cpu->lg->pgdirs[cpu->cpu_pgd].gpgdir + index * sizeof(pgd_t);
  126. }
  127. #ifdef CONFIG_X86_PAE
  128. /* Follow the PGD to the PMD. */
  129. static unsigned long gpmd_addr(pgd_t gpgd, unsigned long vaddr)
  130. {
  131. unsigned long gpage = pgd_pfn(gpgd) << PAGE_SHIFT;
  132. BUG_ON(!(pgd_flags(gpgd) & _PAGE_PRESENT));
  133. return gpage + pmd_index(vaddr) * sizeof(pmd_t);
  134. }
  135. /* Follow the PMD to the PTE. */
  136. static unsigned long gpte_addr(struct lg_cpu *cpu,
  137. pmd_t gpmd, unsigned long vaddr)
  138. {
  139. unsigned long gpage = pmd_pfn(gpmd) << PAGE_SHIFT;
  140. BUG_ON(!(pmd_flags(gpmd) & _PAGE_PRESENT));
  141. return gpage + pte_index(vaddr) * sizeof(pte_t);
  142. }
  143. #else
  144. /* Follow the PGD to the PTE (no mid-level for !PAE). */
  145. static unsigned long gpte_addr(struct lg_cpu *cpu,
  146. pgd_t gpgd, unsigned long vaddr)
  147. {
  148. unsigned long gpage = pgd_pfn(gpgd) << PAGE_SHIFT;
  149. BUG_ON(!(pgd_flags(gpgd) & _PAGE_PRESENT));
  150. return gpage + pte_index(vaddr) * sizeof(pte_t);
  151. }
  152. #endif
  153. /*:*/
  154. /*M:007
  155. * get_pfn is slow: we could probably try to grab batches of pages here as
  156. * an optimization (ie. pre-faulting).
  157. :*/
  158. /*H:350
  159. * This routine takes a page number given by the Guest and converts it to
  160. * an actual, physical page number. It can fail for several reasons: the
  161. * virtual address might not be mapped by the Launcher, the write flag is set
  162. * and the page is read-only, or the write flag was set and the page was
  163. * shared so had to be copied, but we ran out of memory.
  164. *
  165. * This holds a reference to the page, so release_pte() is careful to put that
  166. * back.
  167. */
  168. static unsigned long get_pfn(unsigned long virtpfn, int write)
  169. {
  170. struct page *page;
  171. /* gup me one page at this address please! */
  172. if (get_user_pages_fast(virtpfn << PAGE_SHIFT, 1, write, &page) == 1)
  173. return page_to_pfn(page);
  174. /* This value indicates failure. */
  175. return -1UL;
  176. }
  177. /*H:340
  178. * Converting a Guest page table entry to a shadow (ie. real) page table
  179. * entry can be a little tricky. The flags are (almost) the same, but the
  180. * Guest PTE contains a virtual page number: the CPU needs the real page
  181. * number.
  182. */
  183. static pte_t gpte_to_spte(struct lg_cpu *cpu, pte_t gpte, int write)
  184. {
  185. unsigned long pfn, base, flags;
  186. /*
  187. * The Guest sets the global flag, because it thinks that it is using
  188. * PGE. We only told it to use PGE so it would tell us whether it was
  189. * flushing a kernel mapping or a userspace mapping. We don't actually
  190. * use the global bit, so throw it away.
  191. */
  192. flags = (pte_flags(gpte) & ~_PAGE_GLOBAL);
  193. /* The Guest's pages are offset inside the Launcher. */
  194. base = (unsigned long)cpu->lg->mem_base / PAGE_SIZE;
  195. /*
  196. * We need a temporary "unsigned long" variable to hold the answer from
  197. * get_pfn(), because it returns 0xFFFFFFFF on failure, which wouldn't
  198. * fit in spte.pfn. get_pfn() finds the real physical number of the
  199. * page, given the virtual number.
  200. */
  201. pfn = get_pfn(base + pte_pfn(gpte), write);
  202. if (pfn == -1UL) {
  203. kill_guest(cpu, "failed to get page %lu", pte_pfn(gpte));
  204. /*
  205. * When we destroy the Guest, we'll go through the shadow page
  206. * tables and release_pte() them. Make sure we don't think
  207. * this one is valid!
  208. */
  209. flags = 0;
  210. }
  211. /* Now we assemble our shadow PTE from the page number and flags. */
  212. return pfn_pte(pfn, __pgprot(flags));
  213. }
  214. /*H:460 And to complete the chain, release_pte() looks like this: */
  215. static void release_pte(pte_t pte)
  216. {
  217. /*
  218. * Remember that get_user_pages_fast() took a reference to the page, in
  219. * get_pfn()? We have to put it back now.
  220. */
  221. if (pte_flags(pte) & _PAGE_PRESENT)
  222. put_page(pte_page(pte));
  223. }
  224. /*:*/
  225. static bool check_gpte(struct lg_cpu *cpu, pte_t gpte)
  226. {
  227. if ((pte_flags(gpte) & _PAGE_PSE) ||
  228. pte_pfn(gpte) >= cpu->lg->pfn_limit) {
  229. kill_guest(cpu, "bad page table entry");
  230. return false;
  231. }
  232. return true;
  233. }
  234. static bool check_gpgd(struct lg_cpu *cpu, pgd_t gpgd)
  235. {
  236. if ((pgd_flags(gpgd) & ~CHECK_GPGD_MASK) ||
  237. (pgd_pfn(gpgd) >= cpu->lg->pfn_limit)) {
  238. kill_guest(cpu, "bad page directory entry");
  239. return false;
  240. }
  241. return true;
  242. }
  243. #ifdef CONFIG_X86_PAE
  244. static bool check_gpmd(struct lg_cpu *cpu, pmd_t gpmd)
  245. {
  246. if ((pmd_flags(gpmd) & ~_PAGE_TABLE) ||
  247. (pmd_pfn(gpmd) >= cpu->lg->pfn_limit)) {
  248. kill_guest(cpu, "bad page middle directory entry");
  249. return false;
  250. }
  251. return true;
  252. }
  253. #endif
  254. /*H:331
  255. * This is the core routine to walk the shadow page tables and find the page
  256. * table entry for a specific address.
  257. *
  258. * If allocate is set, then we allocate any missing levels, setting the flags
  259. * on the new page directory and mid-level directories using the arguments
  260. * (which are copied from the Guest's page table entries).
  261. */
  262. static pte_t *find_spte(struct lg_cpu *cpu, unsigned long vaddr, bool allocate,
  263. int pgd_flags, int pmd_flags)
  264. {
  265. pgd_t *spgd;
  266. /* Mid level for PAE. */
  267. #ifdef CONFIG_X86_PAE
  268. pmd_t *spmd;
  269. #endif
  270. /* Get top level entry. */
  271. spgd = spgd_addr(cpu, cpu->cpu_pgd, vaddr);
  272. if (!(pgd_flags(*spgd) & _PAGE_PRESENT)) {
  273. /* No shadow entry: allocate a new shadow PTE page. */
  274. unsigned long ptepage;
  275. /* If they didn't want us to allocate anything, stop. */
  276. if (!allocate)
  277. return NULL;
  278. ptepage = get_zeroed_page(GFP_KERNEL);
  279. /*
  280. * This is not really the Guest's fault, but killing it is
  281. * simple for this corner case.
  282. */
  283. if (!ptepage) {
  284. kill_guest(cpu, "out of memory allocating pte page");
  285. return NULL;
  286. }
  287. /*
  288. * And we copy the flags to the shadow PGD entry. The page
  289. * number in the shadow PGD is the page we just allocated.
  290. */
  291. set_pgd(spgd, __pgd(__pa(ptepage) | pgd_flags));
  292. }
  293. /*
  294. * Intel's Physical Address Extension actually uses three levels of
  295. * page tables, so we need to look in the mid-level.
  296. */
  297. #ifdef CONFIG_X86_PAE
  298. /* Now look at the mid-level shadow entry. */
  299. spmd = spmd_addr(cpu, *spgd, vaddr);
  300. if (!(pmd_flags(*spmd) & _PAGE_PRESENT)) {
  301. /* No shadow entry: allocate a new shadow PTE page. */
  302. unsigned long ptepage;
  303. /* If they didn't want us to allocate anything, stop. */
  304. if (!allocate)
  305. return NULL;
  306. ptepage = get_zeroed_page(GFP_KERNEL);
  307. /*
  308. * This is not really the Guest's fault, but killing it is
  309. * simple for this corner case.
  310. */
  311. if (!ptepage) {
  312. kill_guest(cpu, "out of memory allocating pmd page");
  313. return NULL;
  314. }
  315. /*
  316. * And we copy the flags to the shadow PMD entry. The page
  317. * number in the shadow PMD is the page we just allocated.
  318. */
  319. set_pmd(spmd, __pmd(__pa(ptepage) | pmd_flags));
  320. }
  321. #endif
  322. /* Get the pointer to the shadow PTE entry we're going to set. */
  323. return spte_addr(cpu, *spgd, vaddr);
  324. }
  325. /*H:330
  326. * (i) Looking up a page table entry when the Guest faults.
  327. *
  328. * We saw this call in run_guest(): when we see a page fault in the Guest, we
  329. * come here. That's because we only set up the shadow page tables lazily as
  330. * they're needed, so we get page faults all the time and quietly fix them up
  331. * and return to the Guest without it knowing.
  332. *
  333. * If we fixed up the fault (ie. we mapped the address), this routine returns
  334. * true. Otherwise, it was a real fault and we need to tell the Guest.
  335. */
  336. bool demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode)
  337. {
  338. unsigned long gpte_ptr;
  339. pte_t gpte;
  340. pte_t *spte;
  341. pmd_t gpmd;
  342. pgd_t gpgd;
  343. /* We never demand page the Switcher, so trying is a mistake. */
  344. if (vaddr >= switcher_addr)
  345. return false;
  346. /* First step: get the top-level Guest page table entry. */
  347. if (unlikely(cpu->linear_pages)) {
  348. /* Faking up a linear mapping. */
  349. gpgd = __pgd(CHECK_GPGD_MASK);
  350. } else {
  351. gpgd = lgread(cpu, gpgd_addr(cpu, vaddr), pgd_t);
  352. /* Toplevel not present? We can't map it in. */
  353. if (!(pgd_flags(gpgd) & _PAGE_PRESENT))
  354. return false;
  355. /*
  356. * This kills the Guest if it has weird flags or tries to
  357. * refer to a "physical" address outside the bounds.
  358. */
  359. if (!check_gpgd(cpu, gpgd))
  360. return false;
  361. }
  362. /* This "mid-level" entry is only used for non-linear, PAE mode. */
  363. gpmd = __pmd(_PAGE_TABLE);
  364. #ifdef CONFIG_X86_PAE
  365. if (likely(!cpu->linear_pages)) {
  366. gpmd = lgread(cpu, gpmd_addr(gpgd, vaddr), pmd_t);
  367. /* Middle level not present? We can't map it in. */
  368. if (!(pmd_flags(gpmd) & _PAGE_PRESENT))
  369. return false;
  370. /*
  371. * This kills the Guest if it has weird flags or tries to
  372. * refer to a "physical" address outside the bounds.
  373. */
  374. if (!check_gpmd(cpu, gpmd))
  375. return false;
  376. }
  377. /*
  378. * OK, now we look at the lower level in the Guest page table: keep its
  379. * address, because we might update it later.
  380. */
  381. gpte_ptr = gpte_addr(cpu, gpmd, vaddr);
  382. #else
  383. /*
  384. * OK, now we look at the lower level in the Guest page table: keep its
  385. * address, because we might update it later.
  386. */
  387. gpte_ptr = gpte_addr(cpu, gpgd, vaddr);
  388. #endif
  389. if (unlikely(cpu->linear_pages)) {
  390. /* Linear? Make up a PTE which points to same page. */
  391. gpte = __pte((vaddr & PAGE_MASK) | _PAGE_RW | _PAGE_PRESENT);
  392. } else {
  393. /* Read the actual PTE value. */
  394. gpte = lgread(cpu, gpte_ptr, pte_t);
  395. }
  396. /* If this page isn't in the Guest page tables, we can't page it in. */
  397. if (!(pte_flags(gpte) & _PAGE_PRESENT))
  398. return false;
  399. /*
  400. * Check they're not trying to write to a page the Guest wants
  401. * read-only (bit 2 of errcode == write).
  402. */
  403. if ((errcode & 2) && !(pte_flags(gpte) & _PAGE_RW))
  404. return false;
  405. /* User access to a kernel-only page? (bit 3 == user access) */
  406. if ((errcode & 4) && !(pte_flags(gpte) & _PAGE_USER))
  407. return false;
  408. /*
  409. * Check that the Guest PTE flags are OK, and the page number is below
  410. * the pfn_limit (ie. not mapping the Launcher binary).
  411. */
  412. if (!check_gpte(cpu, gpte))
  413. return false;
  414. /* Add the _PAGE_ACCESSED and (for a write) _PAGE_DIRTY flag */
  415. gpte = pte_mkyoung(gpte);
  416. if (errcode & 2)
  417. gpte = pte_mkdirty(gpte);
  418. /* Get the pointer to the shadow PTE entry we're going to set. */
  419. spte = find_spte(cpu, vaddr, true, pgd_flags(gpgd), pmd_flags(gpmd));
  420. if (!spte)
  421. return false;
  422. /*
  423. * If there was a valid shadow PTE entry here before, we release it.
  424. * This can happen with a write to a previously read-only entry.
  425. */
  426. release_pte(*spte);
  427. /*
  428. * If this is a write, we insist that the Guest page is writable (the
  429. * final arg to gpte_to_spte()).
  430. */
  431. if (pte_dirty(gpte))
  432. *spte = gpte_to_spte(cpu, gpte, 1);
  433. else
  434. /*
  435. * If this is a read, don't set the "writable" bit in the page
  436. * table entry, even if the Guest says it's writable. That way
  437. * we will come back here when a write does actually occur, so
  438. * we can update the Guest's _PAGE_DIRTY flag.
  439. */
  440. set_pte(spte, gpte_to_spte(cpu, pte_wrprotect(gpte), 0));
  441. /*
  442. * Finally, we write the Guest PTE entry back: we've set the
  443. * _PAGE_ACCESSED and maybe the _PAGE_DIRTY flags.
  444. */
  445. if (likely(!cpu->linear_pages))
  446. lgwrite(cpu, gpte_ptr, pte_t, gpte);
  447. /*
  448. * The fault is fixed, the page table is populated, the mapping
  449. * manipulated, the result returned and the code complete. A small
  450. * delay and a trace of alliteration are the only indications the Guest
  451. * has that a page fault occurred at all.
  452. */
  453. return true;
  454. }
  455. /*H:360
  456. * (ii) Making sure the Guest stack is mapped.
  457. *
  458. * Remember that direct traps into the Guest need a mapped Guest kernel stack.
  459. * pin_stack_pages() calls us here: we could simply call demand_page(), but as
  460. * we've seen that logic is quite long, and usually the stack pages are already
  461. * mapped, so it's overkill.
  462. *
  463. * This is a quick version which answers the question: is this virtual address
  464. * mapped by the shadow page tables, and is it writable?
  465. */
  466. static bool page_writable(struct lg_cpu *cpu, unsigned long vaddr)
  467. {
  468. pte_t *spte;
  469. unsigned long flags;
  470. /* You can't put your stack in the Switcher! */
  471. if (vaddr >= switcher_addr)
  472. return false;
  473. /* If there's no shadow PTE, it's not writable. */
  474. spte = find_spte(cpu, vaddr, false, 0, 0);
  475. if (!spte)
  476. return false;
  477. /*
  478. * Check the flags on the pte entry itself: it must be present and
  479. * writable.
  480. */
  481. flags = pte_flags(*spte);
  482. return (flags & (_PAGE_PRESENT|_PAGE_RW)) == (_PAGE_PRESENT|_PAGE_RW);
  483. }
  484. /*
  485. * So, when pin_stack_pages() asks us to pin a page, we check if it's already
  486. * in the page tables, and if not, we call demand_page() with error code 2
  487. * (meaning "write").
  488. */
  489. void pin_page(struct lg_cpu *cpu, unsigned long vaddr)
  490. {
  491. if (!page_writable(cpu, vaddr) && !demand_page(cpu, vaddr, 2))
  492. kill_guest(cpu, "bad stack page %#lx", vaddr);
  493. }
  494. /*:*/
  495. #ifdef CONFIG_X86_PAE
  496. static void release_pmd(pmd_t *spmd)
  497. {
  498. /* If the entry's not present, there's nothing to release. */
  499. if (pmd_flags(*spmd) & _PAGE_PRESENT) {
  500. unsigned int i;
  501. pte_t *ptepage = __va(pmd_pfn(*spmd) << PAGE_SHIFT);
  502. /* For each entry in the page, we might need to release it. */
  503. for (i = 0; i < PTRS_PER_PTE; i++)
  504. release_pte(ptepage[i]);
  505. /* Now we can free the page of PTEs */
  506. free_page((long)ptepage);
  507. /* And zero out the PMD entry so we never release it twice. */
  508. set_pmd(spmd, __pmd(0));
  509. }
  510. }
  511. static void release_pgd(pgd_t *spgd)
  512. {
  513. /* If the entry's not present, there's nothing to release. */
  514. if (pgd_flags(*spgd) & _PAGE_PRESENT) {
  515. unsigned int i;
  516. pmd_t *pmdpage = __va(pgd_pfn(*spgd) << PAGE_SHIFT);
  517. for (i = 0; i < PTRS_PER_PMD; i++)
  518. release_pmd(&pmdpage[i]);
  519. /* Now we can free the page of PMDs */
  520. free_page((long)pmdpage);
  521. /* And zero out the PGD entry so we never release it twice. */
  522. set_pgd(spgd, __pgd(0));
  523. }
  524. }
  525. #else /* !CONFIG_X86_PAE */
  526. /*H:450
  527. * If we chase down the release_pgd() code, the non-PAE version looks like
  528. * this. The PAE version is almost identical, but instead of calling
  529. * release_pte it calls release_pmd(), which looks much like this.
  530. */
  531. static void release_pgd(pgd_t *spgd)
  532. {
  533. /* If the entry's not present, there's nothing to release. */
  534. if (pgd_flags(*spgd) & _PAGE_PRESENT) {
  535. unsigned int i;
  536. /*
  537. * Converting the pfn to find the actual PTE page is easy: turn
  538. * the page number into a physical address, then convert to a
  539. * virtual address (easy for kernel pages like this one).
  540. */
  541. pte_t *ptepage = __va(pgd_pfn(*spgd) << PAGE_SHIFT);
  542. /* For each entry in the page, we might need to release it. */
  543. for (i = 0; i < PTRS_PER_PTE; i++)
  544. release_pte(ptepage[i]);
  545. /* Now we can free the page of PTEs */
  546. free_page((long)ptepage);
  547. /* And zero out the PGD entry so we never release it twice. */
  548. *spgd = __pgd(0);
  549. }
  550. }
  551. #endif
  552. /*H:445
  553. * We saw flush_user_mappings() twice: once from the flush_user_mappings()
  554. * hypercall and once in new_pgdir() when we re-used a top-level pgdir page.
  555. * It simply releases every PTE page from 0 up to the Guest's kernel address.
  556. */
  557. static void flush_user_mappings(struct lguest *lg, int idx)
  558. {
  559. unsigned int i;
  560. /* Release every pgd entry up to the kernel's address. */
  561. for (i = 0; i < pgd_index(lg->kernel_address); i++)
  562. release_pgd(lg->pgdirs[idx].pgdir + i);
  563. }
  564. /*H:440
  565. * (v) Flushing (throwing away) page tables,
  566. *
  567. * The Guest has a hypercall to throw away the page tables: it's used when a
  568. * large number of mappings have been changed.
  569. */
  570. void guest_pagetable_flush_user(struct lg_cpu *cpu)
  571. {
  572. /* Drop the userspace part of the current page table. */
  573. flush_user_mappings(cpu->lg, cpu->cpu_pgd);
  574. }
  575. /*:*/
  576. /* We walk down the guest page tables to get a guest-physical address */
  577. unsigned long guest_pa(struct lg_cpu *cpu, unsigned long vaddr)
  578. {
  579. pgd_t gpgd;
  580. pte_t gpte;
  581. #ifdef CONFIG_X86_PAE
  582. pmd_t gpmd;
  583. #endif
  584. /* Still not set up? Just map 1:1. */
  585. if (unlikely(cpu->linear_pages))
  586. return vaddr;
  587. /* First step: get the top-level Guest page table entry. */
  588. gpgd = lgread(cpu, gpgd_addr(cpu, vaddr), pgd_t);
  589. /* Toplevel not present? We can't map it in. */
  590. if (!(pgd_flags(gpgd) & _PAGE_PRESENT)) {
  591. kill_guest(cpu, "Bad address %#lx", vaddr);
  592. return -1UL;
  593. }
  594. #ifdef CONFIG_X86_PAE
  595. gpmd = lgread(cpu, gpmd_addr(gpgd, vaddr), pmd_t);
  596. if (!(pmd_flags(gpmd) & _PAGE_PRESENT))
  597. kill_guest(cpu, "Bad address %#lx", vaddr);
  598. gpte = lgread(cpu, gpte_addr(cpu, gpmd, vaddr), pte_t);
  599. #else
  600. gpte = lgread(cpu, gpte_addr(cpu, gpgd, vaddr), pte_t);
  601. #endif
  602. if (!(pte_flags(gpte) & _PAGE_PRESENT))
  603. kill_guest(cpu, "Bad address %#lx", vaddr);
  604. return pte_pfn(gpte) * PAGE_SIZE | (vaddr & ~PAGE_MASK);
  605. }
  606. /*
  607. * We keep several page tables. This is a simple routine to find the page
  608. * table (if any) corresponding to this top-level address the Guest has given
  609. * us.
  610. */
  611. static unsigned int find_pgdir(struct lguest *lg, unsigned long pgtable)
  612. {
  613. unsigned int i;
  614. for (i = 0; i < ARRAY_SIZE(lg->pgdirs); i++)
  615. if (lg->pgdirs[i].pgdir && lg->pgdirs[i].gpgdir == pgtable)
  616. break;
  617. return i;
  618. }
  619. /*H:435
  620. * And this is us, creating the new page directory. If we really do
  621. * allocate a new one (and so the kernel parts are not there), we set
  622. * blank_pgdir.
  623. */
  624. static unsigned int new_pgdir(struct lg_cpu *cpu,
  625. unsigned long gpgdir,
  626. int *blank_pgdir)
  627. {
  628. unsigned int next;
  629. /*
  630. * We pick one entry at random to throw out. Choosing the Least
  631. * Recently Used might be better, but this is easy.
  632. */
  633. next = random32() % ARRAY_SIZE(cpu->lg->pgdirs);
  634. /* If it's never been allocated at all before, try now. */
  635. if (!cpu->lg->pgdirs[next].pgdir) {
  636. cpu->lg->pgdirs[next].pgdir =
  637. (pgd_t *)get_zeroed_page(GFP_KERNEL);
  638. /* If the allocation fails, just keep using the one we have */
  639. if (!cpu->lg->pgdirs[next].pgdir)
  640. next = cpu->cpu_pgd;
  641. else {
  642. /*
  643. * This is a blank page, so there are no kernel
  644. * mappings: caller must map the stack!
  645. */
  646. *blank_pgdir = 1;
  647. }
  648. }
  649. /* Record which Guest toplevel this shadows. */
  650. cpu->lg->pgdirs[next].gpgdir = gpgdir;
  651. /* Release all the non-kernel mappings. */
  652. flush_user_mappings(cpu->lg, next);
  653. return next;
  654. }
  655. /*H:501
  656. * We do need the Switcher code mapped at all times, so we allocate that
  657. * part of the Guest page table here. We map the Switcher code immediately,
  658. * but defer mapping of the guest register page and IDT/LDT etc page until
  659. * just before we run the guest in map_switcher_in_guest().
  660. *
  661. * We *could* do this setup in map_switcher_in_guest(), but at that point
  662. * we've interrupts disabled, and allocating pages like that is fraught: we
  663. * can't sleep if we need to free up some memory.
  664. */
  665. static bool allocate_switcher_mapping(struct lg_cpu *cpu)
  666. {
  667. int i;
  668. for (i = 0; i < TOTAL_SWITCHER_PAGES; i++) {
  669. pte_t *pte = find_spte(cpu, switcher_addr + i * PAGE_SIZE, true,
  670. CHECK_GPGD_MASK, _PAGE_TABLE);
  671. if (!pte)
  672. return false;
  673. /*
  674. * Map the switcher page if not already there. It might
  675. * already be there because we call allocate_switcher_mapping()
  676. * in guest_set_pgd() just in case it did discard our Switcher
  677. * mapping, but it probably didn't.
  678. */
  679. if (i == 0 && !(pte_flags(*pte) & _PAGE_PRESENT)) {
  680. /* Get a reference to the Switcher page. */
  681. get_page(lg_switcher_pages[0]);
  682. /* Create a read-only, exectuable, kernel-style PTE */
  683. set_pte(pte,
  684. mk_pte(lg_switcher_pages[0], PAGE_KERNEL_RX));
  685. }
  686. }
  687. cpu->lg->pgdirs[cpu->cpu_pgd].switcher_mapped = true;
  688. return true;
  689. }
  690. /*H:470
  691. * Finally, a routine which throws away everything: all PGD entries in all
  692. * the shadow page tables, including the Guest's kernel mappings. This is used
  693. * when we destroy the Guest.
  694. */
  695. static void release_all_pagetables(struct lguest *lg)
  696. {
  697. unsigned int i, j;
  698. /* Every shadow pagetable this Guest has */
  699. for (i = 0; i < ARRAY_SIZE(lg->pgdirs); i++) {
  700. if (!lg->pgdirs[i].pgdir)
  701. continue;
  702. /* Every PGD entry. */
  703. for (j = 0; j < PTRS_PER_PGD; j++)
  704. release_pgd(lg->pgdirs[i].pgdir + j);
  705. lg->pgdirs[i].switcher_mapped = false;
  706. }
  707. }
  708. /*
  709. * We also throw away everything when a Guest tells us it's changed a kernel
  710. * mapping. Since kernel mappings are in every page table, it's easiest to
  711. * throw them all away. This traps the Guest in amber for a while as
  712. * everything faults back in, but it's rare.
  713. */
  714. void guest_pagetable_clear_all(struct lg_cpu *cpu)
  715. {
  716. release_all_pagetables(cpu->lg);
  717. /* We need the Guest kernel stack mapped again. */
  718. pin_stack_pages(cpu);
  719. /* And we need Switcher allocated. */
  720. if (!allocate_switcher_mapping(cpu))
  721. kill_guest(cpu, "Cannot populate switcher mapping");
  722. }
  723. /*H:430
  724. * (iv) Switching page tables
  725. *
  726. * Now we've seen all the page table setting and manipulation, let's see
  727. * what happens when the Guest changes page tables (ie. changes the top-level
  728. * pgdir). This occurs on almost every context switch.
  729. */
  730. void guest_new_pagetable(struct lg_cpu *cpu, unsigned long pgtable)
  731. {
  732. int newpgdir, repin = 0;
  733. /*
  734. * The very first time they call this, we're actually running without
  735. * any page tables; we've been making it up. Throw them away now.
  736. */
  737. if (unlikely(cpu->linear_pages)) {
  738. release_all_pagetables(cpu->lg);
  739. cpu->linear_pages = false;
  740. /* Force allocation of a new pgdir. */
  741. newpgdir = ARRAY_SIZE(cpu->lg->pgdirs);
  742. } else {
  743. /* Look to see if we have this one already. */
  744. newpgdir = find_pgdir(cpu->lg, pgtable);
  745. }
  746. /*
  747. * If not, we allocate or mug an existing one: if it's a fresh one,
  748. * repin gets set to 1.
  749. */
  750. if (newpgdir == ARRAY_SIZE(cpu->lg->pgdirs))
  751. newpgdir = new_pgdir(cpu, pgtable, &repin);
  752. /* Change the current pgd index to the new one. */
  753. cpu->cpu_pgd = newpgdir;
  754. /*
  755. * If it was completely blank, we map in the Guest kernel stack and
  756. * the Switcher.
  757. */
  758. if (repin)
  759. pin_stack_pages(cpu);
  760. if (!cpu->lg->pgdirs[cpu->cpu_pgd].switcher_mapped) {
  761. if (!allocate_switcher_mapping(cpu))
  762. kill_guest(cpu, "Cannot populate switcher mapping");
  763. }
  764. }
  765. /*:*/
  766. /*M:009
  767. * Since we throw away all mappings when a kernel mapping changes, our
  768. * performance sucks for guests using highmem. In fact, a guest with
  769. * PAGE_OFFSET 0xc0000000 (the default) and more than about 700MB of RAM is
  770. * usually slower than a Guest with less memory.
  771. *
  772. * This, of course, cannot be fixed. It would take some kind of... well, I
  773. * don't know, but the term "puissant code-fu" comes to mind.
  774. :*/
  775. /*H:420
  776. * This is the routine which actually sets the page table entry for then
  777. * "idx"'th shadow page table.
  778. *
  779. * Normally, we can just throw out the old entry and replace it with 0: if they
  780. * use it demand_page() will put the new entry in. We need to do this anyway:
  781. * The Guest expects _PAGE_ACCESSED to be set on its PTE the first time a page
  782. * is read from, and _PAGE_DIRTY when it's written to.
  783. *
  784. * But Avi Kivity pointed out that most Operating Systems (Linux included) set
  785. * these bits on PTEs immediately anyway. This is done to save the CPU from
  786. * having to update them, but it helps us the same way: if they set
  787. * _PAGE_ACCESSED then we can put a read-only PTE entry in immediately, and if
  788. * they set _PAGE_DIRTY then we can put a writable PTE entry in immediately.
  789. */
  790. static void do_set_pte(struct lg_cpu *cpu, int idx,
  791. unsigned long vaddr, pte_t gpte)
  792. {
  793. /* Look up the matching shadow page directory entry. */
  794. pgd_t *spgd = spgd_addr(cpu, idx, vaddr);
  795. #ifdef CONFIG_X86_PAE
  796. pmd_t *spmd;
  797. #endif
  798. /* If the top level isn't present, there's no entry to update. */
  799. if (pgd_flags(*spgd) & _PAGE_PRESENT) {
  800. #ifdef CONFIG_X86_PAE
  801. spmd = spmd_addr(cpu, *spgd, vaddr);
  802. if (pmd_flags(*spmd) & _PAGE_PRESENT) {
  803. #endif
  804. /* Otherwise, start by releasing the existing entry. */
  805. pte_t *spte = spte_addr(cpu, *spgd, vaddr);
  806. release_pte(*spte);
  807. /*
  808. * If they're setting this entry as dirty or accessed,
  809. * we might as well put that entry they've given us in
  810. * now. This shaves 10% off a copy-on-write
  811. * micro-benchmark.
  812. */
  813. if (pte_flags(gpte) & (_PAGE_DIRTY | _PAGE_ACCESSED)) {
  814. if (!check_gpte(cpu, gpte))
  815. return;
  816. set_pte(spte,
  817. gpte_to_spte(cpu, gpte,
  818. pte_flags(gpte) & _PAGE_DIRTY));
  819. } else {
  820. /*
  821. * Otherwise kill it and we can demand_page()
  822. * it in later.
  823. */
  824. set_pte(spte, __pte(0));
  825. }
  826. #ifdef CONFIG_X86_PAE
  827. }
  828. #endif
  829. }
  830. }
  831. /*H:410
  832. * Updating a PTE entry is a little trickier.
  833. *
  834. * We keep track of several different page tables (the Guest uses one for each
  835. * process, so it makes sense to cache at least a few). Each of these have
  836. * identical kernel parts: ie. every mapping above PAGE_OFFSET is the same for
  837. * all processes. So when the page table above that address changes, we update
  838. * all the page tables, not just the current one. This is rare.
  839. *
  840. * The benefit is that when we have to track a new page table, we can keep all
  841. * the kernel mappings. This speeds up context switch immensely.
  842. */
  843. void guest_set_pte(struct lg_cpu *cpu,
  844. unsigned long gpgdir, unsigned long vaddr, pte_t gpte)
  845. {
  846. /* We don't let you remap the Switcher; we need it to get back! */
  847. if (vaddr >= switcher_addr) {
  848. kill_guest(cpu, "attempt to set pte into Switcher pages");
  849. return;
  850. }
  851. /*
  852. * Kernel mappings must be changed on all top levels. Slow, but doesn't
  853. * happen often.
  854. */
  855. if (vaddr >= cpu->lg->kernel_address) {
  856. unsigned int i;
  857. for (i = 0; i < ARRAY_SIZE(cpu->lg->pgdirs); i++)
  858. if (cpu->lg->pgdirs[i].pgdir)
  859. do_set_pte(cpu, i, vaddr, gpte);
  860. } else {
  861. /* Is this page table one we have a shadow for? */
  862. int pgdir = find_pgdir(cpu->lg, gpgdir);
  863. if (pgdir != ARRAY_SIZE(cpu->lg->pgdirs))
  864. /* If so, do the update. */
  865. do_set_pte(cpu, pgdir, vaddr, gpte);
  866. }
  867. }
  868. /*H:400
  869. * (iii) Setting up a page table entry when the Guest tells us one has changed.
  870. *
  871. * Just like we did in interrupts_and_traps.c, it makes sense for us to deal
  872. * with the other side of page tables while we're here: what happens when the
  873. * Guest asks for a page table to be updated?
  874. *
  875. * We already saw that demand_page() will fill in the shadow page tables when
  876. * needed, so we can simply remove shadow page table entries whenever the Guest
  877. * tells us they've changed. When the Guest tries to use the new entry it will
  878. * fault and demand_page() will fix it up.
  879. *
  880. * So with that in mind here's our code to update a (top-level) PGD entry:
  881. */
  882. void guest_set_pgd(struct lguest *lg, unsigned long gpgdir, u32 idx)
  883. {
  884. int pgdir;
  885. if (idx > PTRS_PER_PGD) {
  886. kill_guest(&lg->cpus[0], "Attempt to set pgd %u/%u",
  887. idx, PTRS_PER_PGD);
  888. return;
  889. }
  890. /* If they're talking about a page table we have a shadow for... */
  891. pgdir = find_pgdir(lg, gpgdir);
  892. if (pgdir < ARRAY_SIZE(lg->pgdirs)) {
  893. /* ... throw it away. */
  894. release_pgd(lg->pgdirs[pgdir].pgdir + idx);
  895. /* That might have been the Switcher mapping, remap it. */
  896. if (!allocate_switcher_mapping(&lg->cpus[0])) {
  897. kill_guest(&lg->cpus[0],
  898. "Cannot populate switcher mapping");
  899. }
  900. }
  901. }
  902. #ifdef CONFIG_X86_PAE
  903. /* For setting a mid-level, we just throw everything away. It's easy. */
  904. void guest_set_pmd(struct lguest *lg, unsigned long pmdp, u32 idx)
  905. {
  906. guest_pagetable_clear_all(&lg->cpus[0]);
  907. }
  908. #endif
  909. /*H:500
  910. * (vii) Setting up the page tables initially.
  911. *
  912. * When a Guest is first created, set initialize a shadow page table which
  913. * we will populate on future faults. The Guest doesn't have any actual
  914. * pagetables yet, so we set linear_pages to tell demand_page() to fake it
  915. * for the moment.
  916. *
  917. * We do need the Switcher to be mapped at all times, so we allocate that
  918. * part of the Guest page table here.
  919. */
  920. int init_guest_pagetable(struct lguest *lg)
  921. {
  922. struct lg_cpu *cpu = &lg->cpus[0];
  923. int allocated = 0;
  924. /* lg (and lg->cpus[]) starts zeroed: this allocates a new pgdir */
  925. cpu->cpu_pgd = new_pgdir(cpu, 0, &allocated);
  926. if (!allocated)
  927. return -ENOMEM;
  928. /* We start with a linear mapping until the initialize. */
  929. cpu->linear_pages = true;
  930. /* Allocate the page tables for the Switcher. */
  931. if (!allocate_switcher_mapping(cpu)) {
  932. release_all_pagetables(lg);
  933. return -ENOMEM;
  934. }
  935. return 0;
  936. }
  937. /*H:508 When the Guest calls LHCALL_LGUEST_INIT we do more setup. */
  938. void page_table_guest_data_init(struct lg_cpu *cpu)
  939. {
  940. /*
  941. * We tell the Guest that it can't use the virtual addresses
  942. * used by the Switcher. This trick is equivalent to 4GB -
  943. * switcher_addr.
  944. */
  945. u32 top = ~switcher_addr + 1;
  946. /* We get the kernel address: above this is all kernel memory. */
  947. if (get_user(cpu->lg->kernel_address,
  948. &cpu->lg->lguest_data->kernel_address)
  949. /*
  950. * We tell the Guest that it can't use the top virtual
  951. * addresses (used by the Switcher).
  952. */
  953. || put_user(top, &cpu->lg->lguest_data->reserve_mem)) {
  954. kill_guest(cpu, "bad guest page %p", cpu->lg->lguest_data);
  955. return;
  956. }
  957. /*
  958. * In flush_user_mappings() we loop from 0 to
  959. * "pgd_index(lg->kernel_address)". This assumes it won't hit the
  960. * Switcher mappings, so check that now.
  961. */
  962. if (cpu->lg->kernel_address >= switcher_addr)
  963. kill_guest(cpu, "bad kernel address %#lx",
  964. cpu->lg->kernel_address);
  965. }
  966. /* When a Guest dies, our cleanup is fairly simple. */
  967. void free_guest_pagetable(struct lguest *lg)
  968. {
  969. unsigned int i;
  970. /* Throw away all page table pages. */
  971. release_all_pagetables(lg);
  972. /* Now free the top levels: free_page() can handle 0 just fine. */
  973. for (i = 0; i < ARRAY_SIZE(lg->pgdirs); i++)
  974. free_page((long)lg->pgdirs[i].pgdir);
  975. }
  976. /*H:480
  977. * (vi) Mapping the Switcher when the Guest is about to run.
  978. *
  979. * The Switcher and the two pages for this CPU need to be visible in the
  980. * Guest (and not the pages for other CPUs).
  981. *
  982. * The pages have all been allocate
  983. */
  984. void map_switcher_in_guest(struct lg_cpu *cpu, struct lguest_pages *pages)
  985. {
  986. unsigned long base, i;
  987. struct page *percpu_switcher_page, *regs_page;
  988. pte_t *pte;
  989. /* Switcher page should always be mapped! */
  990. BUG_ON(!cpu->lg->pgdirs[cpu->cpu_pgd].switcher_mapped);
  991. /* Clear all the Switcher mappings for any other CPUs. */
  992. /* FIXME: This is dumb: update only when Host CPU changes. */
  993. for_each_possible_cpu(i) {
  994. /* Get location of lguest_pages (indexed by Host CPU) */
  995. base = switcher_addr + PAGE_SIZE
  996. + i * sizeof(struct lguest_pages);
  997. /* Get shadow PTE for first page (where we put guest regs). */
  998. pte = find_spte(cpu, base, false, 0, 0);
  999. set_pte(pte, __pte(0));
  1000. /* This is where we put R/O state. */
  1001. pte = find_spte(cpu, base + PAGE_SIZE, false, 0, 0);
  1002. set_pte(pte, __pte(0));
  1003. }
  1004. /*
  1005. * When we're running the Guest, we want the Guest's "regs" page to
  1006. * appear where the first Switcher page for this CPU is. This is an
  1007. * optimization: when the Switcher saves the Guest registers, it saves
  1008. * them into the first page of this CPU's "struct lguest_pages": if we
  1009. * make sure the Guest's register page is already mapped there, we
  1010. * don't have to copy them out again.
  1011. */
  1012. /* Find the shadow PTE for this regs page. */
  1013. base = switcher_addr + PAGE_SIZE
  1014. + raw_smp_processor_id() * sizeof(struct lguest_pages);
  1015. pte = find_spte(cpu, base, false, 0, 0);
  1016. regs_page = pfn_to_page(__pa(cpu->regs_page) >> PAGE_SHIFT);
  1017. get_page(regs_page);
  1018. set_pte(pte, mk_pte(regs_page, __pgprot(__PAGE_KERNEL & ~_PAGE_GLOBAL)));
  1019. /*
  1020. * We map the second page of the struct lguest_pages read-only in
  1021. * the Guest: the IDT, GDT and other things it's not supposed to
  1022. * change.
  1023. */
  1024. base += PAGE_SIZE;
  1025. pte = find_spte(cpu, base, false, 0, 0);
  1026. percpu_switcher_page
  1027. = lg_switcher_pages[1 + raw_smp_processor_id()*2 + 1];
  1028. get_page(percpu_switcher_page);
  1029. set_pte(pte, mk_pte(percpu_switcher_page,
  1030. __pgprot(__PAGE_KERNEL_RO & ~_PAGE_GLOBAL)));
  1031. }
  1032. /*:*/
  1033. /*
  1034. * We've made it through the page table code. Perhaps our tired brains are
  1035. * still processing the details, or perhaps we're simply glad it's over.
  1036. *
  1037. * If nothing else, note that all this complexity in juggling shadow page tables
  1038. * in sync with the Guest's page tables is for one reason: for most Guests this
  1039. * page table dance determines how bad performance will be. This is why Xen
  1040. * uses exotic direct Guest pagetable manipulation, and why both Intel and AMD
  1041. * have implemented shadow page table support directly into hardware.
  1042. *
  1043. * There is just one file remaining in the Host.
  1044. */