|
@@ -1,10 +1,11 @@
|
|
|
/*
|
|
|
- * Copyright (C) 2008-2011 Freescale Semiconductor, Inc. All rights reserved.
|
|
|
+ * Copyright (C) 2008-2013 Freescale Semiconductor, Inc. All rights reserved.
|
|
|
*
|
|
|
* Author: Yu Liu, yu.liu@freescale.com
|
|
|
* Scott Wood, scottwood@freescale.com
|
|
|
* Ashish Kalra, ashish.kalra@freescale.com
|
|
|
* Varun Sethi, varun.sethi@freescale.com
|
|
|
+ * Alexander Graf, agraf@suse.de
|
|
|
*
|
|
|
* Description:
|
|
|
* This file is based on arch/powerpc/kvm/44x_tlb.c,
|
|
@@ -33,10 +34,7 @@
|
|
|
#include "e500.h"
|
|
|
#include "trace.h"
|
|
|
#include "timing.h"
|
|
|
-
|
|
|
-#define to_htlb1_esel(esel) (host_tlb_params[1].entries - (esel) - 1)
|
|
|
-
|
|
|
-static struct kvmppc_e500_tlb_params host_tlb_params[E500_TLB_NUM];
|
|
|
+#include "e500_mmu_host.h"
|
|
|
|
|
|
static inline unsigned int gtlb0_get_next_victim(
|
|
|
struct kvmppc_vcpu_e500 *vcpu_e500)
|
|
@@ -50,197 +48,6 @@ static inline unsigned int gtlb0_get_next_victim(
|
|
|
return victim;
|
|
|
}
|
|
|
|
|
|
-static inline unsigned int tlb1_max_shadow_size(void)
|
|
|
-{
|
|
|
- /* reserve one entry for magic page */
|
|
|
- return host_tlb_params[1].entries - tlbcam_index - 1;
|
|
|
-}
|
|
|
-
|
|
|
-static inline int tlbe_is_writable(struct kvm_book3e_206_tlb_entry *tlbe)
|
|
|
-{
|
|
|
- return tlbe->mas7_3 & (MAS3_SW|MAS3_UW);
|
|
|
-}
|
|
|
-
|
|
|
-static inline u32 e500_shadow_mas3_attrib(u32 mas3, int usermode)
|
|
|
-{
|
|
|
- /* Mask off reserved bits. */
|
|
|
- mas3 &= MAS3_ATTRIB_MASK;
|
|
|
-
|
|
|
-#ifndef CONFIG_KVM_BOOKE_HV
|
|
|
- if (!usermode) {
|
|
|
- /* Guest is in supervisor mode,
|
|
|
- * so we need to translate guest
|
|
|
- * supervisor permissions into user permissions. */
|
|
|
- mas3 &= ~E500_TLB_USER_PERM_MASK;
|
|
|
- mas3 |= (mas3 & E500_TLB_SUPER_PERM_MASK) << 1;
|
|
|
- }
|
|
|
- mas3 |= E500_TLB_SUPER_PERM_MASK;
|
|
|
-#endif
|
|
|
- return mas3;
|
|
|
-}
|
|
|
-
|
|
|
-static inline u32 e500_shadow_mas2_attrib(u32 mas2, int usermode)
|
|
|
-{
|
|
|
-#ifdef CONFIG_SMP
|
|
|
- return (mas2 & MAS2_ATTRIB_MASK) | MAS2_M;
|
|
|
-#else
|
|
|
- return mas2 & MAS2_ATTRIB_MASK;
|
|
|
-#endif
|
|
|
-}
|
|
|
-
|
|
|
-/*
|
|
|
- * writing shadow tlb entry to host TLB
|
|
|
- */
|
|
|
-static inline void __write_host_tlbe(struct kvm_book3e_206_tlb_entry *stlbe,
|
|
|
- uint32_t mas0)
|
|
|
-{
|
|
|
- unsigned long flags;
|
|
|
-
|
|
|
- local_irq_save(flags);
|
|
|
- mtspr(SPRN_MAS0, mas0);
|
|
|
- mtspr(SPRN_MAS1, stlbe->mas1);
|
|
|
- mtspr(SPRN_MAS2, (unsigned long)stlbe->mas2);
|
|
|
- mtspr(SPRN_MAS3, (u32)stlbe->mas7_3);
|
|
|
- mtspr(SPRN_MAS7, (u32)(stlbe->mas7_3 >> 32));
|
|
|
-#ifdef CONFIG_KVM_BOOKE_HV
|
|
|
- mtspr(SPRN_MAS8, stlbe->mas8);
|
|
|
-#endif
|
|
|
- asm volatile("isync; tlbwe" : : : "memory");
|
|
|
-
|
|
|
-#ifdef CONFIG_KVM_BOOKE_HV
|
|
|
- /* Must clear mas8 for other host tlbwe's */
|
|
|
- mtspr(SPRN_MAS8, 0);
|
|
|
- isync();
|
|
|
-#endif
|
|
|
- local_irq_restore(flags);
|
|
|
-
|
|
|
- trace_kvm_booke206_stlb_write(mas0, stlbe->mas8, stlbe->mas1,
|
|
|
- stlbe->mas2, stlbe->mas7_3);
|
|
|
-}
|
|
|
-
|
|
|
-/*
|
|
|
- * Acquire a mas0 with victim hint, as if we just took a TLB miss.
|
|
|
- *
|
|
|
- * We don't care about the address we're searching for, other than that it's
|
|
|
- * in the right set and is not present in the TLB. Using a zero PID and a
|
|
|
- * userspace address means we don't have to set and then restore MAS5, or
|
|
|
- * calculate a proper MAS6 value.
|
|
|
- */
|
|
|
-static u32 get_host_mas0(unsigned long eaddr)
|
|
|
-{
|
|
|
- unsigned long flags;
|
|
|
- u32 mas0;
|
|
|
-
|
|
|
- local_irq_save(flags);
|
|
|
- mtspr(SPRN_MAS6, 0);
|
|
|
- asm volatile("tlbsx 0, %0" : : "b" (eaddr & ~CONFIG_PAGE_OFFSET));
|
|
|
- mas0 = mfspr(SPRN_MAS0);
|
|
|
- local_irq_restore(flags);
|
|
|
-
|
|
|
- return mas0;
|
|
|
-}
|
|
|
-
|
|
|
-/* sesel is for tlb1 only */
|
|
|
-static inline void write_host_tlbe(struct kvmppc_vcpu_e500 *vcpu_e500,
|
|
|
- int tlbsel, int sesel, struct kvm_book3e_206_tlb_entry *stlbe)
|
|
|
-{
|
|
|
- u32 mas0;
|
|
|
-
|
|
|
- if (tlbsel == 0) {
|
|
|
- mas0 = get_host_mas0(stlbe->mas2);
|
|
|
- __write_host_tlbe(stlbe, mas0);
|
|
|
- } else {
|
|
|
- __write_host_tlbe(stlbe,
|
|
|
- MAS0_TLBSEL(1) |
|
|
|
- MAS0_ESEL(to_htlb1_esel(sesel)));
|
|
|
- }
|
|
|
-}
|
|
|
-
|
|
|
-/* sesel is for tlb1 only */
|
|
|
-static void write_stlbe(struct kvmppc_vcpu_e500 *vcpu_e500,
|
|
|
- struct kvm_book3e_206_tlb_entry *gtlbe,
|
|
|
- struct kvm_book3e_206_tlb_entry *stlbe,
|
|
|
- int stlbsel, int sesel)
|
|
|
-{
|
|
|
- int stid;
|
|
|
-
|
|
|
- preempt_disable();
|
|
|
- stid = kvmppc_e500_get_tlb_stid(&vcpu_e500->vcpu, gtlbe);
|
|
|
-
|
|
|
- stlbe->mas1 |= MAS1_TID(stid);
|
|
|
- write_host_tlbe(vcpu_e500, stlbsel, sesel, stlbe);
|
|
|
- preempt_enable();
|
|
|
-}
|
|
|
-
|
|
|
-#ifdef CONFIG_KVM_E500V2
|
|
|
-void kvmppc_map_magic(struct kvm_vcpu *vcpu)
|
|
|
-{
|
|
|
- struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
|
|
|
- struct kvm_book3e_206_tlb_entry magic;
|
|
|
- ulong shared_page = ((ulong)vcpu->arch.shared) & PAGE_MASK;
|
|
|
- unsigned int stid;
|
|
|
- pfn_t pfn;
|
|
|
-
|
|
|
- pfn = (pfn_t)virt_to_phys((void *)shared_page) >> PAGE_SHIFT;
|
|
|
- get_page(pfn_to_page(pfn));
|
|
|
-
|
|
|
- preempt_disable();
|
|
|
- stid = kvmppc_e500_get_sid(vcpu_e500, 0, 0, 0, 0);
|
|
|
-
|
|
|
- magic.mas1 = MAS1_VALID | MAS1_TS | MAS1_TID(stid) |
|
|
|
- MAS1_TSIZE(BOOK3E_PAGESZ_4K);
|
|
|
- magic.mas2 = vcpu->arch.magic_page_ea | MAS2_M;
|
|
|
- magic.mas7_3 = ((u64)pfn << PAGE_SHIFT) |
|
|
|
- MAS3_SW | MAS3_SR | MAS3_UW | MAS3_UR;
|
|
|
- magic.mas8 = 0;
|
|
|
-
|
|
|
- __write_host_tlbe(&magic, MAS0_TLBSEL(1) | MAS0_ESEL(tlbcam_index));
|
|
|
- preempt_enable();
|
|
|
-}
|
|
|
-#endif
|
|
|
-
|
|
|
-static void inval_gtlbe_on_host(struct kvmppc_vcpu_e500 *vcpu_e500,
|
|
|
- int tlbsel, int esel)
|
|
|
-{
|
|
|
- struct kvm_book3e_206_tlb_entry *gtlbe =
|
|
|
- get_entry(vcpu_e500, tlbsel, esel);
|
|
|
- struct tlbe_ref *ref = &vcpu_e500->gtlb_priv[tlbsel][esel].ref;
|
|
|
-
|
|
|
- /* Don't bother with unmapped entries */
|
|
|
- if (!(ref->flags & E500_TLB_VALID))
|
|
|
- return;
|
|
|
-
|
|
|
- if (tlbsel == 1 && ref->flags & E500_TLB_BITMAP) {
|
|
|
- u64 tmp = vcpu_e500->g2h_tlb1_map[esel];
|
|
|
- int hw_tlb_indx;
|
|
|
- unsigned long flags;
|
|
|
-
|
|
|
- local_irq_save(flags);
|
|
|
- while (tmp) {
|
|
|
- hw_tlb_indx = __ilog2_u64(tmp & -tmp);
|
|
|
- mtspr(SPRN_MAS0,
|
|
|
- MAS0_TLBSEL(1) |
|
|
|
- MAS0_ESEL(to_htlb1_esel(hw_tlb_indx)));
|
|
|
- mtspr(SPRN_MAS1, 0);
|
|
|
- asm volatile("tlbwe");
|
|
|
- vcpu_e500->h2g_tlb1_rmap[hw_tlb_indx] = 0;
|
|
|
- tmp &= tmp - 1;
|
|
|
- }
|
|
|
- mb();
|
|
|
- vcpu_e500->g2h_tlb1_map[esel] = 0;
|
|
|
- ref->flags &= ~(E500_TLB_BITMAP | E500_TLB_VALID);
|
|
|
- local_irq_restore(flags);
|
|
|
-
|
|
|
- return;
|
|
|
- }
|
|
|
-
|
|
|
- /* Guest tlbe is backed by at most one host tlbe per shadow pid. */
|
|
|
- kvmppc_e500_tlbil_one(vcpu_e500, gtlbe);
|
|
|
-
|
|
|
- /* Mark the TLB as not backed by the host anymore */
|
|
|
- ref->flags &= ~E500_TLB_VALID;
|
|
|
-}
|
|
|
-
|
|
|
static int tlb0_set_base(gva_t addr, int sets, int ways)
|
|
|
{
|
|
|
int set_base;
|
|
@@ -319,70 +126,6 @@ static int kvmppc_e500_tlb_index(struct kvmppc_vcpu_e500 *vcpu_e500,
|
|
|
return -1;
|
|
|
}
|
|
|
|
|
|
-static inline void kvmppc_e500_ref_setup(struct tlbe_ref *ref,
|
|
|
- struct kvm_book3e_206_tlb_entry *gtlbe,
|
|
|
- pfn_t pfn)
|
|
|
-{
|
|
|
- ref->pfn = pfn;
|
|
|
- ref->flags = E500_TLB_VALID;
|
|
|
-
|
|
|
- if (tlbe_is_writable(gtlbe))
|
|
|
- kvm_set_pfn_dirty(pfn);
|
|
|
-}
|
|
|
-
|
|
|
-static inline void kvmppc_e500_ref_release(struct tlbe_ref *ref)
|
|
|
-{
|
|
|
- if (ref->flags & E500_TLB_VALID) {
|
|
|
- trace_kvm_booke206_ref_release(ref->pfn, ref->flags);
|
|
|
- ref->flags = 0;
|
|
|
- }
|
|
|
-}
|
|
|
-
|
|
|
-static void clear_tlb1_bitmap(struct kvmppc_vcpu_e500 *vcpu_e500)
|
|
|
-{
|
|
|
- if (vcpu_e500->g2h_tlb1_map)
|
|
|
- memset(vcpu_e500->g2h_tlb1_map, 0,
|
|
|
- sizeof(u64) * vcpu_e500->gtlb_params[1].entries);
|
|
|
- if (vcpu_e500->h2g_tlb1_rmap)
|
|
|
- memset(vcpu_e500->h2g_tlb1_rmap, 0,
|
|
|
- sizeof(unsigned int) * host_tlb_params[1].entries);
|
|
|
-}
|
|
|
-
|
|
|
-static void clear_tlb_privs(struct kvmppc_vcpu_e500 *vcpu_e500)
|
|
|
-{
|
|
|
- int tlbsel = 0;
|
|
|
- int i;
|
|
|
-
|
|
|
- for (i = 0; i < vcpu_e500->gtlb_params[tlbsel].entries; i++) {
|
|
|
- struct tlbe_ref *ref =
|
|
|
- &vcpu_e500->gtlb_priv[tlbsel][i].ref;
|
|
|
- kvmppc_e500_ref_release(ref);
|
|
|
- }
|
|
|
-}
|
|
|
-
|
|
|
-static void clear_tlb_refs(struct kvmppc_vcpu_e500 *vcpu_e500)
|
|
|
-{
|
|
|
- int stlbsel = 1;
|
|
|
- int i;
|
|
|
-
|
|
|
- kvmppc_e500_tlbil_all(vcpu_e500);
|
|
|
-
|
|
|
- for (i = 0; i < host_tlb_params[stlbsel].entries; i++) {
|
|
|
- struct tlbe_ref *ref =
|
|
|
- &vcpu_e500->tlb_refs[stlbsel][i];
|
|
|
- kvmppc_e500_ref_release(ref);
|
|
|
- }
|
|
|
-
|
|
|
- clear_tlb_privs(vcpu_e500);
|
|
|
-}
|
|
|
-
|
|
|
-void kvmppc_core_flush_tlb(struct kvm_vcpu *vcpu)
|
|
|
-{
|
|
|
- struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
|
|
|
- clear_tlb_refs(vcpu_e500);
|
|
|
- clear_tlb1_bitmap(vcpu_e500);
|
|
|
-}
|
|
|
-
|
|
|
static inline void kvmppc_e500_deliver_tlb_miss(struct kvm_vcpu *vcpu,
|
|
|
unsigned int eaddr, int as)
|
|
|
{
|
|
@@ -408,234 +151,6 @@ static inline void kvmppc_e500_deliver_tlb_miss(struct kvm_vcpu *vcpu,
|
|
|
| (as ? MAS6_SAS : 0);
|
|
|
}
|
|
|
|
|
|
-/* TID must be supplied by the caller */
|
|
|
-static inline void kvmppc_e500_setup_stlbe(
|
|
|
- struct kvm_vcpu *vcpu,
|
|
|
- struct kvm_book3e_206_tlb_entry *gtlbe,
|
|
|
- int tsize, struct tlbe_ref *ref, u64 gvaddr,
|
|
|
- struct kvm_book3e_206_tlb_entry *stlbe)
|
|
|
-{
|
|
|
- pfn_t pfn = ref->pfn;
|
|
|
- u32 pr = vcpu->arch.shared->msr & MSR_PR;
|
|
|
-
|
|
|
- BUG_ON(!(ref->flags & E500_TLB_VALID));
|
|
|
-
|
|
|
- /* Force IPROT=0 for all guest mappings. */
|
|
|
- stlbe->mas1 = MAS1_TSIZE(tsize) | get_tlb_sts(gtlbe) | MAS1_VALID;
|
|
|
- stlbe->mas2 = (gvaddr & MAS2_EPN) |
|
|
|
- e500_shadow_mas2_attrib(gtlbe->mas2, pr);
|
|
|
- stlbe->mas7_3 = ((u64)pfn << PAGE_SHIFT) |
|
|
|
- e500_shadow_mas3_attrib(gtlbe->mas7_3, pr);
|
|
|
-
|
|
|
-#ifdef CONFIG_KVM_BOOKE_HV
|
|
|
- stlbe->mas8 = MAS8_TGS | vcpu->kvm->arch.lpid;
|
|
|
-#endif
|
|
|
-}
|
|
|
-
|
|
|
-static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
|
|
|
- u64 gvaddr, gfn_t gfn, struct kvm_book3e_206_tlb_entry *gtlbe,
|
|
|
- int tlbsel, struct kvm_book3e_206_tlb_entry *stlbe,
|
|
|
- struct tlbe_ref *ref)
|
|
|
-{
|
|
|
- struct kvm_memory_slot *slot;
|
|
|
- unsigned long pfn = 0; /* silence GCC warning */
|
|
|
- unsigned long hva;
|
|
|
- int pfnmap = 0;
|
|
|
- int tsize = BOOK3E_PAGESZ_4K;
|
|
|
-
|
|
|
- /*
|
|
|
- * Translate guest physical to true physical, acquiring
|
|
|
- * a page reference if it is normal, non-reserved memory.
|
|
|
- *
|
|
|
- * gfn_to_memslot() must succeed because otherwise we wouldn't
|
|
|
- * have gotten this far. Eventually we should just pass the slot
|
|
|
- * pointer through from the first lookup.
|
|
|
- */
|
|
|
- slot = gfn_to_memslot(vcpu_e500->vcpu.kvm, gfn);
|
|
|
- hva = gfn_to_hva_memslot(slot, gfn);
|
|
|
-
|
|
|
- if (tlbsel == 1) {
|
|
|
- struct vm_area_struct *vma;
|
|
|
- down_read(¤t->mm->mmap_sem);
|
|
|
-
|
|
|
- vma = find_vma(current->mm, hva);
|
|
|
- if (vma && hva >= vma->vm_start &&
|
|
|
- (vma->vm_flags & VM_PFNMAP)) {
|
|
|
- /*
|
|
|
- * This VMA is a physically contiguous region (e.g.
|
|
|
- * /dev/mem) that bypasses normal Linux page
|
|
|
- * management. Find the overlap between the
|
|
|
- * vma and the memslot.
|
|
|
- */
|
|
|
-
|
|
|
- unsigned long start, end;
|
|
|
- unsigned long slot_start, slot_end;
|
|
|
-
|
|
|
- pfnmap = 1;
|
|
|
-
|
|
|
- start = vma->vm_pgoff;
|
|
|
- end = start +
|
|
|
- ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT);
|
|
|
-
|
|
|
- pfn = start + ((hva - vma->vm_start) >> PAGE_SHIFT);
|
|
|
-
|
|
|
- slot_start = pfn - (gfn - slot->base_gfn);
|
|
|
- slot_end = slot_start + slot->npages;
|
|
|
-
|
|
|
- if (start < slot_start)
|
|
|
- start = slot_start;
|
|
|
- if (end > slot_end)
|
|
|
- end = slot_end;
|
|
|
-
|
|
|
- tsize = (gtlbe->mas1 & MAS1_TSIZE_MASK) >>
|
|
|
- MAS1_TSIZE_SHIFT;
|
|
|
-
|
|
|
- /*
|
|
|
- * e500 doesn't implement the lowest tsize bit,
|
|
|
- * or 1K pages.
|
|
|
- */
|
|
|
- tsize = max(BOOK3E_PAGESZ_4K, tsize & ~1);
|
|
|
-
|
|
|
- /*
|
|
|
- * Now find the largest tsize (up to what the guest
|
|
|
- * requested) that will cover gfn, stay within the
|
|
|
- * range, and for which gfn and pfn are mutually
|
|
|
- * aligned.
|
|
|
- */
|
|
|
-
|
|
|
- for (; tsize > BOOK3E_PAGESZ_4K; tsize -= 2) {
|
|
|
- unsigned long gfn_start, gfn_end, tsize_pages;
|
|
|
- tsize_pages = 1 << (tsize - 2);
|
|
|
-
|
|
|
- gfn_start = gfn & ~(tsize_pages - 1);
|
|
|
- gfn_end = gfn_start + tsize_pages;
|
|
|
-
|
|
|
- if (gfn_start + pfn - gfn < start)
|
|
|
- continue;
|
|
|
- if (gfn_end + pfn - gfn > end)
|
|
|
- continue;
|
|
|
- if ((gfn & (tsize_pages - 1)) !=
|
|
|
- (pfn & (tsize_pages - 1)))
|
|
|
- continue;
|
|
|
-
|
|
|
- gvaddr &= ~((tsize_pages << PAGE_SHIFT) - 1);
|
|
|
- pfn &= ~(tsize_pages - 1);
|
|
|
- break;
|
|
|
- }
|
|
|
- } else if (vma && hva >= vma->vm_start &&
|
|
|
- (vma->vm_flags & VM_HUGETLB)) {
|
|
|
- unsigned long psize = vma_kernel_pagesize(vma);
|
|
|
-
|
|
|
- tsize = (gtlbe->mas1 & MAS1_TSIZE_MASK) >>
|
|
|
- MAS1_TSIZE_SHIFT;
|
|
|
-
|
|
|
- /*
|
|
|
- * Take the largest page size that satisfies both host
|
|
|
- * and guest mapping
|
|
|
- */
|
|
|
- tsize = min(__ilog2(psize) - 10, tsize);
|
|
|
-
|
|
|
- /*
|
|
|
- * e500 doesn't implement the lowest tsize bit,
|
|
|
- * or 1K pages.
|
|
|
- */
|
|
|
- tsize = max(BOOK3E_PAGESZ_4K, tsize & ~1);
|
|
|
- }
|
|
|
-
|
|
|
- up_read(¤t->mm->mmap_sem);
|
|
|
- }
|
|
|
-
|
|
|
- if (likely(!pfnmap)) {
|
|
|
- unsigned long tsize_pages = 1 << (tsize + 10 - PAGE_SHIFT);
|
|
|
- pfn = gfn_to_pfn_memslot(slot, gfn);
|
|
|
- if (is_error_noslot_pfn(pfn)) {
|
|
|
- printk(KERN_ERR "Couldn't get real page for gfn %lx!\n",
|
|
|
- (long)gfn);
|
|
|
- return -EINVAL;
|
|
|
- }
|
|
|
-
|
|
|
- /* Align guest and physical address to page map boundaries */
|
|
|
- pfn &= ~(tsize_pages - 1);
|
|
|
- gvaddr &= ~((tsize_pages << PAGE_SHIFT) - 1);
|
|
|
- }
|
|
|
-
|
|
|
- /* Drop old ref and setup new one. */
|
|
|
- kvmppc_e500_ref_release(ref);
|
|
|
- kvmppc_e500_ref_setup(ref, gtlbe, pfn);
|
|
|
-
|
|
|
- kvmppc_e500_setup_stlbe(&vcpu_e500->vcpu, gtlbe, tsize,
|
|
|
- ref, gvaddr, stlbe);
|
|
|
-
|
|
|
- /* Clear i-cache for new pages */
|
|
|
- kvmppc_mmu_flush_icache(pfn);
|
|
|
-
|
|
|
- /* Drop refcount on page, so that mmu notifiers can clear it */
|
|
|
- kvm_release_pfn_clean(pfn);
|
|
|
-
|
|
|
- return 0;
|
|
|
-}
|
|
|
-
|
|
|
-/* XXX only map the one-one case, for now use TLB0 */
|
|
|
-static int kvmppc_e500_tlb0_map(struct kvmppc_vcpu_e500 *vcpu_e500,
|
|
|
- int esel,
|
|
|
- struct kvm_book3e_206_tlb_entry *stlbe)
|
|
|
-{
|
|
|
- struct kvm_book3e_206_tlb_entry *gtlbe;
|
|
|
- struct tlbe_ref *ref;
|
|
|
- int stlbsel = 0;
|
|
|
- int sesel = 0;
|
|
|
- int r;
|
|
|
-
|
|
|
- gtlbe = get_entry(vcpu_e500, 0, esel);
|
|
|
- ref = &vcpu_e500->gtlb_priv[0][esel].ref;
|
|
|
-
|
|
|
- r = kvmppc_e500_shadow_map(vcpu_e500, get_tlb_eaddr(gtlbe),
|
|
|
- get_tlb_raddr(gtlbe) >> PAGE_SHIFT,
|
|
|
- gtlbe, 0, stlbe, ref);
|
|
|
- if (r)
|
|
|
- return r;
|
|
|
-
|
|
|
- write_stlbe(vcpu_e500, gtlbe, stlbe, stlbsel, sesel);
|
|
|
-
|
|
|
- return 0;
|
|
|
-}
|
|
|
-
|
|
|
-/* Caller must ensure that the specified guest TLB entry is safe to insert into
|
|
|
- * the shadow TLB. */
|
|
|
-/* XXX for both one-one and one-to-many , for now use TLB1 */
|
|
|
-static int kvmppc_e500_tlb1_map(struct kvmppc_vcpu_e500 *vcpu_e500,
|
|
|
- u64 gvaddr, gfn_t gfn, struct kvm_book3e_206_tlb_entry *gtlbe,
|
|
|
- struct kvm_book3e_206_tlb_entry *stlbe, int esel)
|
|
|
-{
|
|
|
- struct tlbe_ref *ref;
|
|
|
- unsigned int sesel;
|
|
|
- int r;
|
|
|
- int stlbsel = 1;
|
|
|
-
|
|
|
- sesel = vcpu_e500->host_tlb1_nv++;
|
|
|
-
|
|
|
- if (unlikely(vcpu_e500->host_tlb1_nv >= tlb1_max_shadow_size()))
|
|
|
- vcpu_e500->host_tlb1_nv = 0;
|
|
|
-
|
|
|
- ref = &vcpu_e500->tlb_refs[1][sesel];
|
|
|
- r = kvmppc_e500_shadow_map(vcpu_e500, gvaddr, gfn, gtlbe, 1, stlbe,
|
|
|
- ref);
|
|
|
- if (r)
|
|
|
- return r;
|
|
|
-
|
|
|
- vcpu_e500->g2h_tlb1_map[esel] |= (u64)1 << sesel;
|
|
|
- vcpu_e500->gtlb_priv[1][esel].ref.flags |= E500_TLB_BITMAP;
|
|
|
- if (vcpu_e500->h2g_tlb1_rmap[sesel]) {
|
|
|
- unsigned int idx = vcpu_e500->h2g_tlb1_rmap[sesel];
|
|
|
- vcpu_e500->g2h_tlb1_map[idx] &= ~(1ULL << sesel);
|
|
|
- }
|
|
|
- vcpu_e500->h2g_tlb1_rmap[sesel] = esel;
|
|
|
-
|
|
|
- write_stlbe(vcpu_e500, gtlbe, stlbe, stlbsel, sesel);
|
|
|
-
|
|
|
- return 0;
|
|
|
-}
|
|
|
-
|
|
|
static void kvmppc_recalc_tlb1map_range(struct kvmppc_vcpu_e500 *vcpu_e500)
|
|
|
{
|
|
|
int size = vcpu_e500->gtlb_params[1].entries;
|
|
@@ -1020,85 +535,6 @@ void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
|
|
|
{
|
|
|
}
|
|
|
|
|
|
-void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 eaddr, gpa_t gpaddr,
|
|
|
- unsigned int index)
|
|
|
-{
|
|
|
- struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
|
|
|
- struct tlbe_priv *priv;
|
|
|
- struct kvm_book3e_206_tlb_entry *gtlbe, stlbe;
|
|
|
- int tlbsel = tlbsel_of(index);
|
|
|
- int esel = esel_of(index);
|
|
|
-
|
|
|
- gtlbe = get_entry(vcpu_e500, tlbsel, esel);
|
|
|
-
|
|
|
- switch (tlbsel) {
|
|
|
- case 0:
|
|
|
- priv = &vcpu_e500->gtlb_priv[tlbsel][esel];
|
|
|
-
|
|
|
- /* Triggers after clear_tlb_refs or on initial mapping */
|
|
|
- if (!(priv->ref.flags & E500_TLB_VALID)) {
|
|
|
- kvmppc_e500_tlb0_map(vcpu_e500, esel, &stlbe);
|
|
|
- } else {
|
|
|
- kvmppc_e500_setup_stlbe(vcpu, gtlbe, BOOK3E_PAGESZ_4K,
|
|
|
- &priv->ref, eaddr, &stlbe);
|
|
|
- write_stlbe(vcpu_e500, gtlbe, &stlbe, 0, 0);
|
|
|
- }
|
|
|
- break;
|
|
|
-
|
|
|
- case 1: {
|
|
|
- gfn_t gfn = gpaddr >> PAGE_SHIFT;
|
|
|
- kvmppc_e500_tlb1_map(vcpu_e500, eaddr, gfn, gtlbe, &stlbe,
|
|
|
- esel);
|
|
|
- break;
|
|
|
- }
|
|
|
-
|
|
|
- default:
|
|
|
- BUG();
|
|
|
- break;
|
|
|
- }
|
|
|
-}
|
|
|
-
|
|
|
-/************* MMU Notifiers *************/
|
|
|
-
|
|
|
-int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
|
|
|
-{
|
|
|
- trace_kvm_unmap_hva(hva);
|
|
|
-
|
|
|
- /*
|
|
|
- * Flush all shadow tlb entries everywhere. This is slow, but
|
|
|
- * we are 100% sure that we catch the to be unmapped page
|
|
|
- */
|
|
|
- kvm_flush_remote_tlbs(kvm);
|
|
|
-
|
|
|
- return 0;
|
|
|
-}
|
|
|
-
|
|
|
-int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
|
|
|
-{
|
|
|
- /* kvm_unmap_hva flushes everything anyways */
|
|
|
- kvm_unmap_hva(kvm, start);
|
|
|
-
|
|
|
- return 0;
|
|
|
-}
|
|
|
-
|
|
|
-int kvm_age_hva(struct kvm *kvm, unsigned long hva)
|
|
|
-{
|
|
|
- /* XXX could be more clever ;) */
|
|
|
- return 0;
|
|
|
-}
|
|
|
-
|
|
|
-int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
|
|
|
-{
|
|
|
- /* XXX could be more clever ;) */
|
|
|
- return 0;
|
|
|
-}
|
|
|
-
|
|
|
-void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
|
|
|
-{
|
|
|
- /* The page will get remapped properly on its next fault */
|
|
|
- kvm_unmap_hva(kvm, hva);
|
|
|
-}
|
|
|
-
|
|
|
/*****************************************/
|
|
|
|
|
|
static void free_gtlb(struct kvmppc_vcpu_e500 *vcpu_e500)
|
|
@@ -1309,37 +745,8 @@ int kvmppc_e500_tlb_init(struct kvmppc_vcpu_e500 *vcpu_e500)
|
|
|
int entry_size = sizeof(struct kvm_book3e_206_tlb_entry);
|
|
|
int entries = KVM_E500_TLB0_SIZE + KVM_E500_TLB1_SIZE;
|
|
|
|
|
|
- host_tlb_params[0].entries = mfspr(SPRN_TLB0CFG) & TLBnCFG_N_ENTRY;
|
|
|
- host_tlb_params[1].entries = mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY;
|
|
|
-
|
|
|
- /*
|
|
|
- * This should never happen on real e500 hardware, but is
|
|
|
- * architecturally possible -- e.g. in some weird nested
|
|
|
- * virtualization case.
|
|
|
- */
|
|
|
- if (host_tlb_params[0].entries == 0 ||
|
|
|
- host_tlb_params[1].entries == 0) {
|
|
|
- pr_err("%s: need to know host tlb size\n", __func__);
|
|
|
- return -ENODEV;
|
|
|
- }
|
|
|
-
|
|
|
- host_tlb_params[0].ways = (mfspr(SPRN_TLB0CFG) & TLBnCFG_ASSOC) >>
|
|
|
- TLBnCFG_ASSOC_SHIFT;
|
|
|
- host_tlb_params[1].ways = host_tlb_params[1].entries;
|
|
|
-
|
|
|
- if (!is_power_of_2(host_tlb_params[0].entries) ||
|
|
|
- !is_power_of_2(host_tlb_params[0].ways) ||
|
|
|
- host_tlb_params[0].entries < host_tlb_params[0].ways ||
|
|
|
- host_tlb_params[0].ways == 0) {
|
|
|
- pr_err("%s: bad tlb0 host config: %u entries %u ways\n",
|
|
|
- __func__, host_tlb_params[0].entries,
|
|
|
- host_tlb_params[0].ways);
|
|
|
- return -ENODEV;
|
|
|
- }
|
|
|
-
|
|
|
- host_tlb_params[0].sets =
|
|
|
- host_tlb_params[0].entries / host_tlb_params[0].ways;
|
|
|
- host_tlb_params[1].sets = 1;
|
|
|
+ if (e500_mmu_host_init(vcpu_e500))
|
|
|
+ goto err;
|
|
|
|
|
|
vcpu_e500->gtlb_params[0].entries = KVM_E500_TLB0_SIZE;
|
|
|
vcpu_e500->gtlb_params[1].entries = KVM_E500_TLB1_SIZE;
|
|
@@ -1358,18 +765,6 @@ int kvmppc_e500_tlb_init(struct kvmppc_vcpu_e500 *vcpu_e500)
|
|
|
vcpu_e500->gtlb_offset[0] = 0;
|
|
|
vcpu_e500->gtlb_offset[1] = KVM_E500_TLB0_SIZE;
|
|
|
|
|
|
- vcpu_e500->tlb_refs[0] =
|
|
|
- kzalloc(sizeof(struct tlbe_ref) * host_tlb_params[0].entries,
|
|
|
- GFP_KERNEL);
|
|
|
- if (!vcpu_e500->tlb_refs[0])
|
|
|
- goto err;
|
|
|
-
|
|
|
- vcpu_e500->tlb_refs[1] =
|
|
|
- kzalloc(sizeof(struct tlbe_ref) * host_tlb_params[1].entries,
|
|
|
- GFP_KERNEL);
|
|
|
- if (!vcpu_e500->tlb_refs[1])
|
|
|
- goto err;
|
|
|
-
|
|
|
vcpu_e500->gtlb_priv[0] = kzalloc(sizeof(struct tlbe_ref) *
|
|
|
vcpu_e500->gtlb_params[0].entries,
|
|
|
GFP_KERNEL);
|
|
@@ -1388,12 +783,6 @@ int kvmppc_e500_tlb_init(struct kvmppc_vcpu_e500 *vcpu_e500)
|
|
|
if (!vcpu_e500->g2h_tlb1_map)
|
|
|
goto err;
|
|
|
|
|
|
- vcpu_e500->h2g_tlb1_rmap = kzalloc(sizeof(unsigned int) *
|
|
|
- host_tlb_params[1].entries,
|
|
|
- GFP_KERNEL);
|
|
|
- if (!vcpu_e500->h2g_tlb1_rmap)
|
|
|
- goto err;
|
|
|
-
|
|
|
/* Init TLB configuration register */
|
|
|
vcpu->arch.tlbcfg[0] = mfspr(SPRN_TLB0CFG) &
|
|
|
~(TLBnCFG_N_ENTRY | TLBnCFG_ASSOC);
|
|
@@ -1412,15 +801,11 @@ int kvmppc_e500_tlb_init(struct kvmppc_vcpu_e500 *vcpu_e500)
|
|
|
|
|
|
err:
|
|
|
free_gtlb(vcpu_e500);
|
|
|
- kfree(vcpu_e500->tlb_refs[0]);
|
|
|
- kfree(vcpu_e500->tlb_refs[1]);
|
|
|
return -1;
|
|
|
}
|
|
|
|
|
|
void kvmppc_e500_tlb_uninit(struct kvmppc_vcpu_e500 *vcpu_e500)
|
|
|
{
|
|
|
free_gtlb(vcpu_e500);
|
|
|
- kfree(vcpu_e500->h2g_tlb1_rmap);
|
|
|
- kfree(vcpu_e500->tlb_refs[0]);
|
|
|
- kfree(vcpu_e500->tlb_refs[1]);
|
|
|
+ e500_mmu_host_uninit(vcpu_e500);
|
|
|
}
|