123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974 |
- /*
- * Xen mmu operations
- *
- * This file contains the various mmu fetch and update operations.
- * The most important job they must perform is the mapping between the
- * domain's pfn and the overall machine mfns.
- *
- * Xen allows guests to directly update the pagetable, in a controlled
- * fashion. In other words, the guest modifies the same pagetable
- * that the CPU actually uses, which eliminates the overhead of having
- * a separate shadow pagetable.
- *
- * In order to allow this, it falls on the guest domain to map its
- * notion of a "physical" pfn - which is just a domain-local linear
- * address - into a real "machine address" which the CPU's MMU can
- * use.
- *
- * A pgd_t/pmd_t/pte_t will typically contain an mfn, and so can be
- * inserted directly into the pagetable. When creating a new
- * pte/pmd/pgd, it converts the passed pfn into an mfn. Conversely,
- * when reading the content back with __(pgd|pmd|pte)_val, it converts
- * the mfn back into a pfn.
- *
- * The other constraint is that all pages which make up a pagetable
- * must be mapped read-only in the guest. This prevents uncontrolled
- * guest updates to the pagetable. Xen strictly enforces this, and
- * will disallow any pagetable update which will end up mapping a
- * pagetable page RW, and will disallow using any writable page as a
- * pagetable.
- *
- * Naively, when loading %cr3 with the base of a new pagetable, Xen
- * would need to validate the whole pagetable before going on.
- * Naturally, this is quite slow. The solution is to "pin" a
- * pagetable, which enforces all the constraints on the pagetable even
- * when it is not actively in use. This menas that Xen can be assured
- * that it is still valid when you do load it into %cr3, and doesn't
- * need to revalidate it.
- *
- * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
- */
- #include <linux/sched.h>
- #include <linux/highmem.h>
- #include <linux/bug.h>
- #include <asm/pgtable.h>
- #include <asm/tlbflush.h>
- #include <asm/fixmap.h>
- #include <asm/mmu_context.h>
- #include <asm/paravirt.h>
- #include <asm/linkage.h>
- #include <asm/xen/hypercall.h>
- #include <asm/xen/hypervisor.h>
- #include <xen/page.h>
- #include <xen/interface/xen.h>
- #include "multicalls.h"
- #include "mmu.h"
- /*
- * Just beyond the highest usermode address. STACK_TOP_MAX has a
- * redzone above it, so round it up to a PGD boundary.
- */
- #define USER_LIMIT ((STACK_TOP_MAX + PGDIR_SIZE - 1) & PGDIR_MASK)
- #define P2M_ENTRIES_PER_PAGE (PAGE_SIZE / sizeof(unsigned long))
- #define TOP_ENTRIES (MAX_DOMAIN_PAGES / P2M_ENTRIES_PER_PAGE)
- /* Placeholder for holes in the address space */
- static unsigned long p2m_missing[P2M_ENTRIES_PER_PAGE] __page_aligned_data =
- { [ 0 ... P2M_ENTRIES_PER_PAGE-1 ] = ~0UL };
- /* Array of pointers to pages containing p2m entries */
- static unsigned long *p2m_top[TOP_ENTRIES] __page_aligned_data =
- { [ 0 ... TOP_ENTRIES - 1] = &p2m_missing[0] };
- /* Arrays of p2m arrays expressed in mfns used for save/restore */
- static unsigned long p2m_top_mfn[TOP_ENTRIES] __page_aligned_bss;
- static unsigned long p2m_top_mfn_list[TOP_ENTRIES / P2M_ENTRIES_PER_PAGE]
- __page_aligned_bss;
- static inline unsigned p2m_top_index(unsigned long pfn)
- {
- BUG_ON(pfn >= MAX_DOMAIN_PAGES);
- return pfn / P2M_ENTRIES_PER_PAGE;
- }
- static inline unsigned p2m_index(unsigned long pfn)
- {
- return pfn % P2M_ENTRIES_PER_PAGE;
- }
- /* Build the parallel p2m_top_mfn structures */
- void xen_setup_mfn_list_list(void)
- {
- unsigned pfn, idx;
- for(pfn = 0; pfn < MAX_DOMAIN_PAGES; pfn += P2M_ENTRIES_PER_PAGE) {
- unsigned topidx = p2m_top_index(pfn);
- p2m_top_mfn[topidx] = virt_to_mfn(p2m_top[topidx]);
- }
- for(idx = 0; idx < ARRAY_SIZE(p2m_top_mfn_list); idx++) {
- unsigned topidx = idx * P2M_ENTRIES_PER_PAGE;
- p2m_top_mfn_list[idx] = virt_to_mfn(&p2m_top_mfn[topidx]);
- }
- BUG_ON(HYPERVISOR_shared_info == &xen_dummy_shared_info);
- HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list =
- virt_to_mfn(p2m_top_mfn_list);
- HYPERVISOR_shared_info->arch.max_pfn = xen_start_info->nr_pages;
- }
- /* Set up p2m_top to point to the domain-builder provided p2m pages */
- void __init xen_build_dynamic_phys_to_machine(void)
- {
- unsigned long *mfn_list = (unsigned long *)xen_start_info->mfn_list;
- unsigned long max_pfn = min(MAX_DOMAIN_PAGES, xen_start_info->nr_pages);
- unsigned pfn;
- for(pfn = 0; pfn < max_pfn; pfn += P2M_ENTRIES_PER_PAGE) {
- unsigned topidx = p2m_top_index(pfn);
- p2m_top[topidx] = &mfn_list[pfn];
- }
- }
- unsigned long get_phys_to_machine(unsigned long pfn)
- {
- unsigned topidx, idx;
- if (unlikely(pfn >= MAX_DOMAIN_PAGES))
- return INVALID_P2M_ENTRY;
- topidx = p2m_top_index(pfn);
- idx = p2m_index(pfn);
- return p2m_top[topidx][idx];
- }
- EXPORT_SYMBOL_GPL(get_phys_to_machine);
- static void alloc_p2m(unsigned long **pp, unsigned long *mfnp)
- {
- unsigned long *p;
- unsigned i;
- p = (void *)__get_free_page(GFP_KERNEL | __GFP_NOFAIL);
- BUG_ON(p == NULL);
- for(i = 0; i < P2M_ENTRIES_PER_PAGE; i++)
- p[i] = INVALID_P2M_ENTRY;
- if (cmpxchg(pp, p2m_missing, p) != p2m_missing)
- free_page((unsigned long)p);
- else
- *mfnp = virt_to_mfn(p);
- }
- void set_phys_to_machine(unsigned long pfn, unsigned long mfn)
- {
- unsigned topidx, idx;
- if (unlikely(xen_feature(XENFEAT_auto_translated_physmap))) {
- BUG_ON(pfn != mfn && mfn != INVALID_P2M_ENTRY);
- return;
- }
- if (unlikely(pfn >= MAX_DOMAIN_PAGES)) {
- BUG_ON(mfn != INVALID_P2M_ENTRY);
- return;
- }
- topidx = p2m_top_index(pfn);
- if (p2m_top[topidx] == p2m_missing) {
- /* no need to allocate a page to store an invalid entry */
- if (mfn == INVALID_P2M_ENTRY)
- return;
- alloc_p2m(&p2m_top[topidx], &p2m_top_mfn[topidx]);
- }
- idx = p2m_index(pfn);
- p2m_top[topidx][idx] = mfn;
- }
- xmaddr_t arbitrary_virt_to_machine(void *vaddr)
- {
- unsigned long address = (unsigned long)vaddr;
- unsigned int level;
- pte_t *pte = lookup_address(address, &level);
- unsigned offset = address & ~PAGE_MASK;
- BUG_ON(pte == NULL);
- return XMADDR(((phys_addr_t)pte_mfn(*pte) << PAGE_SHIFT) + offset);
- }
- void make_lowmem_page_readonly(void *vaddr)
- {
- pte_t *pte, ptev;
- unsigned long address = (unsigned long)vaddr;
- unsigned int level;
- pte = lookup_address(address, &level);
- BUG_ON(pte == NULL);
- ptev = pte_wrprotect(*pte);
- if (HYPERVISOR_update_va_mapping(address, ptev, 0))
- BUG();
- }
- void make_lowmem_page_readwrite(void *vaddr)
- {
- pte_t *pte, ptev;
- unsigned long address = (unsigned long)vaddr;
- unsigned int level;
- pte = lookup_address(address, &level);
- BUG_ON(pte == NULL);
- ptev = pte_mkwrite(*pte);
- if (HYPERVISOR_update_va_mapping(address, ptev, 0))
- BUG();
- }
- static bool page_pinned(void *ptr)
- {
- struct page *page = virt_to_page(ptr);
- return PagePinned(page);
- }
- static void extend_mmu_update(const struct mmu_update *update)
- {
- struct multicall_space mcs;
- struct mmu_update *u;
- mcs = xen_mc_extend_args(__HYPERVISOR_mmu_update, sizeof(*u));
- if (mcs.mc != NULL)
- mcs.mc->args[1]++;
- else {
- mcs = __xen_mc_entry(sizeof(*u));
- MULTI_mmu_update(mcs.mc, mcs.args, 1, NULL, DOMID_SELF);
- }
- u = mcs.args;
- *u = *update;
- }
- void xen_set_pmd_hyper(pmd_t *ptr, pmd_t val)
- {
- struct mmu_update u;
- preempt_disable();
- xen_mc_batch();
- /* ptr may be ioremapped for 64-bit pagetable setup */
- u.ptr = arbitrary_virt_to_machine(ptr).maddr;
- u.val = pmd_val_ma(val);
- extend_mmu_update(&u);
- xen_mc_issue(PARAVIRT_LAZY_MMU);
- preempt_enable();
- }
- void xen_set_pmd(pmd_t *ptr, pmd_t val)
- {
- /* If page is not pinned, we can just update the entry
- directly */
- if (!page_pinned(ptr)) {
- *ptr = val;
- return;
- }
- xen_set_pmd_hyper(ptr, val);
- }
- /*
- * Associate a virtual page frame with a given physical page frame
- * and protection flags for that frame.
- */
- void set_pte_mfn(unsigned long vaddr, unsigned long mfn, pgprot_t flags)
- {
- set_pte_vaddr(vaddr, mfn_pte(mfn, flags));
- }
- void xen_set_pte_at(struct mm_struct *mm, unsigned long addr,
- pte_t *ptep, pte_t pteval)
- {
- /* updates to init_mm may be done without lock */
- if (mm == &init_mm)
- preempt_disable();
- if (mm == current->mm || mm == &init_mm) {
- if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU) {
- struct multicall_space mcs;
- mcs = xen_mc_entry(0);
- MULTI_update_va_mapping(mcs.mc, addr, pteval, 0);
- xen_mc_issue(PARAVIRT_LAZY_MMU);
- goto out;
- } else
- if (HYPERVISOR_update_va_mapping(addr, pteval, 0) == 0)
- goto out;
- }
- xen_set_pte(ptep, pteval);
- out:
- if (mm == &init_mm)
- preempt_enable();
- }
- pte_t xen_ptep_modify_prot_start(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
- {
- /* Just return the pte as-is. We preserve the bits on commit */
- return *ptep;
- }
- void xen_ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
- pte_t *ptep, pte_t pte)
- {
- struct mmu_update u;
- xen_mc_batch();
- u.ptr = virt_to_machine(ptep).maddr | MMU_PT_UPDATE_PRESERVE_AD;
- u.val = pte_val_ma(pte);
- extend_mmu_update(&u);
- xen_mc_issue(PARAVIRT_LAZY_MMU);
- }
- /* Assume pteval_t is equivalent to all the other *val_t types. */
- static pteval_t pte_mfn_to_pfn(pteval_t val)
- {
- if (val & _PAGE_PRESENT) {
- unsigned long mfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
- pteval_t flags = val & PTE_FLAGS_MASK;
- val = ((pteval_t)mfn_to_pfn(mfn) << PAGE_SHIFT) | flags;
- }
- return val;
- }
- static pteval_t pte_pfn_to_mfn(pteval_t val)
- {
- if (val & _PAGE_PRESENT) {
- unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
- pteval_t flags = val & PTE_FLAGS_MASK;
- val = ((pteval_t)pfn_to_mfn(pfn) << PAGE_SHIFT) | flags;
- }
- return val;
- }
- pteval_t xen_pte_val(pte_t pte)
- {
- return pte_mfn_to_pfn(pte.pte);
- }
- pgdval_t xen_pgd_val(pgd_t pgd)
- {
- return pte_mfn_to_pfn(pgd.pgd);
- }
- pte_t xen_make_pte(pteval_t pte)
- {
- pte = pte_pfn_to_mfn(pte);
- return native_make_pte(pte);
- }
- pgd_t xen_make_pgd(pgdval_t pgd)
- {
- pgd = pte_pfn_to_mfn(pgd);
- return native_make_pgd(pgd);
- }
- pmdval_t xen_pmd_val(pmd_t pmd)
- {
- return pte_mfn_to_pfn(pmd.pmd);
- }
- void xen_set_pud_hyper(pud_t *ptr, pud_t val)
- {
- struct mmu_update u;
- preempt_disable();
- xen_mc_batch();
- /* ptr may be ioremapped for 64-bit pagetable setup */
- u.ptr = arbitrary_virt_to_machine(ptr).maddr;
- u.val = pud_val_ma(val);
- extend_mmu_update(&u);
- xen_mc_issue(PARAVIRT_LAZY_MMU);
- preempt_enable();
- }
- void xen_set_pud(pud_t *ptr, pud_t val)
- {
- /* If page is not pinned, we can just update the entry
- directly */
- if (!page_pinned(ptr)) {
- *ptr = val;
- return;
- }
- xen_set_pud_hyper(ptr, val);
- }
- void xen_set_pte(pte_t *ptep, pte_t pte)
- {
- #ifdef CONFIG_X86_PAE
- ptep->pte_high = pte.pte_high;
- smp_wmb();
- ptep->pte_low = pte.pte_low;
- #else
- *ptep = pte;
- #endif
- }
- #ifdef CONFIG_X86_PAE
- void xen_set_pte_atomic(pte_t *ptep, pte_t pte)
- {
- set_64bit((u64 *)ptep, native_pte_val(pte));
- }
- void xen_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
- {
- ptep->pte_low = 0;
- smp_wmb(); /* make sure low gets written first */
- ptep->pte_high = 0;
- }
- void xen_pmd_clear(pmd_t *pmdp)
- {
- set_pmd(pmdp, __pmd(0));
- }
- #endif /* CONFIG_X86_PAE */
- pmd_t xen_make_pmd(pmdval_t pmd)
- {
- pmd = pte_pfn_to_mfn(pmd);
- return native_make_pmd(pmd);
- }
- #if PAGETABLE_LEVELS == 4
- pudval_t xen_pud_val(pud_t pud)
- {
- return pte_mfn_to_pfn(pud.pud);
- }
- pud_t xen_make_pud(pudval_t pud)
- {
- pud = pte_pfn_to_mfn(pud);
- return native_make_pud(pud);
- }
- pgd_t *xen_get_user_pgd(pgd_t *pgd)
- {
- pgd_t *pgd_page = (pgd_t *)(((unsigned long)pgd) & PAGE_MASK);
- unsigned offset = pgd - pgd_page;
- pgd_t *user_ptr = NULL;
- if (offset < pgd_index(USER_LIMIT)) {
- struct page *page = virt_to_page(pgd_page);
- user_ptr = (pgd_t *)page->private;
- if (user_ptr)
- user_ptr += offset;
- }
- return user_ptr;
- }
- static void __xen_set_pgd_hyper(pgd_t *ptr, pgd_t val)
- {
- struct mmu_update u;
- u.ptr = virt_to_machine(ptr).maddr;
- u.val = pgd_val_ma(val);
- extend_mmu_update(&u);
- }
- /*
- * Raw hypercall-based set_pgd, intended for in early boot before
- * there's a page structure. This implies:
- * 1. The only existing pagetable is the kernel's
- * 2. It is always pinned
- * 3. It has no user pagetable attached to it
- */
- void __init xen_set_pgd_hyper(pgd_t *ptr, pgd_t val)
- {
- preempt_disable();
- xen_mc_batch();
- __xen_set_pgd_hyper(ptr, val);
- xen_mc_issue(PARAVIRT_LAZY_MMU);
- preempt_enable();
- }
- void xen_set_pgd(pgd_t *ptr, pgd_t val)
- {
- pgd_t *user_ptr = xen_get_user_pgd(ptr);
- /* If page is not pinned, we can just update the entry
- directly */
- if (!page_pinned(ptr)) {
- *ptr = val;
- if (user_ptr) {
- WARN_ON(page_pinned(user_ptr));
- *user_ptr = val;
- }
- return;
- }
- /* If it's pinned, then we can at least batch the kernel and
- user updates together. */
- xen_mc_batch();
- __xen_set_pgd_hyper(ptr, val);
- if (user_ptr)
- __xen_set_pgd_hyper(user_ptr, val);
- xen_mc_issue(PARAVIRT_LAZY_MMU);
- }
- #endif /* PAGETABLE_LEVELS == 4 */
- /*
- * (Yet another) pagetable walker. This one is intended for pinning a
- * pagetable. This means that it walks a pagetable and calls the
- * callback function on each page it finds making up the page table,
- * at every level. It walks the entire pagetable, but it only bothers
- * pinning pte pages which are below limit. In the normal case this
- * will be STACK_TOP_MAX, but at boot we need to pin up to
- * FIXADDR_TOP.
- *
- * For 32-bit the important bit is that we don't pin beyond there,
- * because then we start getting into Xen's ptes.
- *
- * For 64-bit, we must skip the Xen hole in the middle of the address
- * space, just after the big x86-64 virtual hole.
- */
- static int pgd_walk(pgd_t *pgd, int (*func)(struct page *, enum pt_level),
- unsigned long limit)
- {
- int flush = 0;
- unsigned hole_low, hole_high;
- unsigned pgdidx_limit, pudidx_limit, pmdidx_limit;
- unsigned pgdidx, pudidx, pmdidx;
- /* The limit is the last byte to be touched */
- limit--;
- BUG_ON(limit >= FIXADDR_TOP);
- if (xen_feature(XENFEAT_auto_translated_physmap))
- return 0;
- /*
- * 64-bit has a great big hole in the middle of the address
- * space, which contains the Xen mappings. On 32-bit these
- * will end up making a zero-sized hole and so is a no-op.
- */
- hole_low = pgd_index(USER_LIMIT);
- hole_high = pgd_index(PAGE_OFFSET);
- pgdidx_limit = pgd_index(limit);
- #if PTRS_PER_PUD > 1
- pudidx_limit = pud_index(limit);
- #else
- pudidx_limit = 0;
- #endif
- #if PTRS_PER_PMD > 1
- pmdidx_limit = pmd_index(limit);
- #else
- pmdidx_limit = 0;
- #endif
- flush |= (*func)(virt_to_page(pgd), PT_PGD);
- for (pgdidx = 0; pgdidx <= pgdidx_limit; pgdidx++) {
- pud_t *pud;
- if (pgdidx >= hole_low && pgdidx < hole_high)
- continue;
- if (!pgd_val(pgd[pgdidx]))
- continue;
- pud = pud_offset(&pgd[pgdidx], 0);
- if (PTRS_PER_PUD > 1) /* not folded */
- flush |= (*func)(virt_to_page(pud), PT_PUD);
- for (pudidx = 0; pudidx < PTRS_PER_PUD; pudidx++) {
- pmd_t *pmd;
- if (pgdidx == pgdidx_limit &&
- pudidx > pudidx_limit)
- goto out;
- if (pud_none(pud[pudidx]))
- continue;
- pmd = pmd_offset(&pud[pudidx], 0);
- if (PTRS_PER_PMD > 1) /* not folded */
- flush |= (*func)(virt_to_page(pmd), PT_PMD);
- for (pmdidx = 0; pmdidx < PTRS_PER_PMD; pmdidx++) {
- struct page *pte;
- if (pgdidx == pgdidx_limit &&
- pudidx == pudidx_limit &&
- pmdidx > pmdidx_limit)
- goto out;
- if (pmd_none(pmd[pmdidx]))
- continue;
- pte = pmd_page(pmd[pmdidx]);
- flush |= (*func)(pte, PT_PTE);
- }
- }
- }
- out:
- return flush;
- }
- static spinlock_t *lock_pte(struct page *page)
- {
- spinlock_t *ptl = NULL;
- #if NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS
- ptl = __pte_lockptr(page);
- spin_lock(ptl);
- #endif
- return ptl;
- }
- static void do_unlock(void *v)
- {
- spinlock_t *ptl = v;
- spin_unlock(ptl);
- }
- static void xen_do_pin(unsigned level, unsigned long pfn)
- {
- struct mmuext_op *op;
- struct multicall_space mcs;
- mcs = __xen_mc_entry(sizeof(*op));
- op = mcs.args;
- op->cmd = level;
- op->arg1.mfn = pfn_to_mfn(pfn);
- MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
- }
- static int pin_page(struct page *page, enum pt_level level)
- {
- unsigned pgfl = TestSetPagePinned(page);
- int flush;
- if (pgfl)
- flush = 0; /* already pinned */
- else if (PageHighMem(page))
- /* kmaps need flushing if we found an unpinned
- highpage */
- flush = 1;
- else {
- void *pt = lowmem_page_address(page);
- unsigned long pfn = page_to_pfn(page);
- struct multicall_space mcs = __xen_mc_entry(0);
- spinlock_t *ptl;
- flush = 0;
- ptl = NULL;
- if (level == PT_PTE)
- ptl = lock_pte(page);
- MULTI_update_va_mapping(mcs.mc, (unsigned long)pt,
- pfn_pte(pfn, PAGE_KERNEL_RO),
- level == PT_PGD ? UVMF_TLB_FLUSH : 0);
- if (level == PT_PTE)
- xen_do_pin(MMUEXT_PIN_L1_TABLE, pfn);
- if (ptl) {
- /* Queue a deferred unlock for when this batch
- is completed. */
- xen_mc_callback(do_unlock, ptl);
- }
- }
- return flush;
- }
- /* This is called just after a mm has been created, but it has not
- been used yet. We need to make sure that its pagetable is all
- read-only, and can be pinned. */
- void xen_pgd_pin(pgd_t *pgd)
- {
- xen_mc_batch();
- if (pgd_walk(pgd, pin_page, USER_LIMIT)) {
- /* re-enable interrupts for kmap_flush_unused */
- xen_mc_issue(0);
- kmap_flush_unused();
- xen_mc_batch();
- }
- #ifdef CONFIG_X86_64
- {
- pgd_t *user_pgd = xen_get_user_pgd(pgd);
- xen_do_pin(MMUEXT_PIN_L4_TABLE, PFN_DOWN(__pa(pgd)));
- if (user_pgd) {
- pin_page(virt_to_page(user_pgd), PT_PGD);
- xen_do_pin(MMUEXT_PIN_L4_TABLE, PFN_DOWN(__pa(user_pgd)));
- }
- }
- #else /* CONFIG_X86_32 */
- #ifdef CONFIG_X86_PAE
- /* Need to make sure unshared kernel PMD is pinnable */
- pin_page(virt_to_page(pgd_page(pgd[pgd_index(TASK_SIZE)])), PT_PMD);
- #endif
- xen_do_pin(MMUEXT_PIN_L3_TABLE, PFN_DOWN(__pa(pgd)));
- #endif /* CONFIG_X86_64 */
- xen_mc_issue(0);
- }
- /*
- * On save, we need to pin all pagetables to make sure they get their
- * mfns turned into pfns. Search the list for any unpinned pgds and pin
- * them (unpinned pgds are not currently in use, probably because the
- * process is under construction or destruction).
- */
- void xen_mm_pin_all(void)
- {
- unsigned long flags;
- struct page *page;
- spin_lock_irqsave(&pgd_lock, flags);
- list_for_each_entry(page, &pgd_list, lru) {
- if (!PagePinned(page)) {
- xen_pgd_pin((pgd_t *)page_address(page));
- SetPageSavePinned(page);
- }
- }
- spin_unlock_irqrestore(&pgd_lock, flags);
- }
- /*
- * The init_mm pagetable is really pinned as soon as its created, but
- * that's before we have page structures to store the bits. So do all
- * the book-keeping now.
- */
- static __init int mark_pinned(struct page *page, enum pt_level level)
- {
- SetPagePinned(page);
- return 0;
- }
- void __init xen_mark_init_mm_pinned(void)
- {
- pgd_walk(init_mm.pgd, mark_pinned, FIXADDR_TOP);
- }
- static int unpin_page(struct page *page, enum pt_level level)
- {
- unsigned pgfl = TestClearPagePinned(page);
- if (pgfl && !PageHighMem(page)) {
- void *pt = lowmem_page_address(page);
- unsigned long pfn = page_to_pfn(page);
- spinlock_t *ptl = NULL;
- struct multicall_space mcs;
- if (level == PT_PTE) {
- ptl = lock_pte(page);
- xen_do_pin(MMUEXT_UNPIN_TABLE, pfn);
- }
- mcs = __xen_mc_entry(0);
- MULTI_update_va_mapping(mcs.mc, (unsigned long)pt,
- pfn_pte(pfn, PAGE_KERNEL),
- level == PT_PGD ? UVMF_TLB_FLUSH : 0);
- if (ptl) {
- /* unlock when batch completed */
- xen_mc_callback(do_unlock, ptl);
- }
- }
- return 0; /* never need to flush on unpin */
- }
- /* Release a pagetables pages back as normal RW */
- static void xen_pgd_unpin(pgd_t *pgd)
- {
- xen_mc_batch();
- xen_do_pin(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
- #ifdef CONFIG_X86_64
- {
- pgd_t *user_pgd = xen_get_user_pgd(pgd);
- if (user_pgd) {
- xen_do_pin(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(user_pgd)));
- unpin_page(virt_to_page(user_pgd), PT_PGD);
- }
- }
- #endif
- #ifdef CONFIG_X86_PAE
- /* Need to make sure unshared kernel PMD is unpinned */
- pin_page(virt_to_page(pgd_page(pgd[pgd_index(TASK_SIZE)])), PT_PMD);
- #endif
- pgd_walk(pgd, unpin_page, USER_LIMIT);
- xen_mc_issue(0);
- }
- /*
- * On resume, undo any pinning done at save, so that the rest of the
- * kernel doesn't see any unexpected pinned pagetables.
- */
- void xen_mm_unpin_all(void)
- {
- unsigned long flags;
- struct page *page;
- spin_lock_irqsave(&pgd_lock, flags);
- list_for_each_entry(page, &pgd_list, lru) {
- if (PageSavePinned(page)) {
- BUG_ON(!PagePinned(page));
- xen_pgd_unpin((pgd_t *)page_address(page));
- ClearPageSavePinned(page);
- }
- }
- spin_unlock_irqrestore(&pgd_lock, flags);
- }
- void xen_activate_mm(struct mm_struct *prev, struct mm_struct *next)
- {
- spin_lock(&next->page_table_lock);
- xen_pgd_pin(next->pgd);
- spin_unlock(&next->page_table_lock);
- }
- void xen_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
- {
- spin_lock(&mm->page_table_lock);
- xen_pgd_pin(mm->pgd);
- spin_unlock(&mm->page_table_lock);
- }
- #ifdef CONFIG_SMP
- /* Another cpu may still have their %cr3 pointing at the pagetable, so
- we need to repoint it somewhere else before we can unpin it. */
- static void drop_other_mm_ref(void *info)
- {
- struct mm_struct *mm = info;
- struct mm_struct *active_mm;
- #ifdef CONFIG_X86_64
- active_mm = read_pda(active_mm);
- #else
- active_mm = __get_cpu_var(cpu_tlbstate).active_mm;
- #endif
- if (active_mm == mm)
- leave_mm(smp_processor_id());
- /* If this cpu still has a stale cr3 reference, then make sure
- it has been flushed. */
- if (x86_read_percpu(xen_current_cr3) == __pa(mm->pgd)) {
- load_cr3(swapper_pg_dir);
- arch_flush_lazy_cpu_mode();
- }
- }
- static void drop_mm_ref(struct mm_struct *mm)
- {
- cpumask_t mask;
- unsigned cpu;
- if (current->active_mm == mm) {
- if (current->mm == mm)
- load_cr3(swapper_pg_dir);
- else
- leave_mm(smp_processor_id());
- arch_flush_lazy_cpu_mode();
- }
- /* Get the "official" set of cpus referring to our pagetable. */
- mask = mm->cpu_vm_mask;
- /* It's possible that a vcpu may have a stale reference to our
- cr3, because its in lazy mode, and it hasn't yet flushed
- its set of pending hypercalls yet. In this case, we can
- look at its actual current cr3 value, and force it to flush
- if needed. */
- for_each_online_cpu(cpu) {
- if (per_cpu(xen_current_cr3, cpu) == __pa(mm->pgd))
- cpu_set(cpu, mask);
- }
- if (!cpus_empty(mask))
- smp_call_function_mask(mask, drop_other_mm_ref, mm, 1);
- }
- #else
- static void drop_mm_ref(struct mm_struct *mm)
- {
- if (current->active_mm == mm)
- load_cr3(swapper_pg_dir);
- }
- #endif
- /*
- * While a process runs, Xen pins its pagetables, which means that the
- * hypervisor forces it to be read-only, and it controls all updates
- * to it. This means that all pagetable updates have to go via the
- * hypervisor, which is moderately expensive.
- *
- * Since we're pulling the pagetable down, we switch to use init_mm,
- * unpin old process pagetable and mark it all read-write, which
- * allows further operations on it to be simple memory accesses.
- *
- * The only subtle point is that another CPU may be still using the
- * pagetable because of lazy tlb flushing. This means we need need to
- * switch all CPUs off this pagetable before we can unpin it.
- */
- void xen_exit_mmap(struct mm_struct *mm)
- {
- get_cpu(); /* make sure we don't move around */
- drop_mm_ref(mm);
- put_cpu();
- spin_lock(&mm->page_table_lock);
- /* pgd may not be pinned in the error exit path of execve */
- if (page_pinned(mm->pgd))
- xen_pgd_unpin(mm->pgd);
- spin_unlock(&mm->page_table_lock);
- }
|