|
@@ -2,6 +2,9 @@
|
|
|
* Copyright (C) 2008-2011 Freescale Semiconductor, Inc. All rights reserved.
|
|
|
*
|
|
|
* Author: Yu Liu, yu.liu@freescale.com
|
|
|
+ * Scott Wood, scottwood@freescale.com
|
|
|
+ * Ashish Kalra, ashish.kalra@freescale.com
|
|
|
+ * Varun Sethi, varun.sethi@freescale.com
|
|
|
*
|
|
|
* Description:
|
|
|
* This file is based on arch/powerpc/kvm/44x_tlb.c,
|
|
@@ -26,210 +29,15 @@
|
|
|
#include <linux/vmalloc.h>
|
|
|
#include <linux/hugetlb.h>
|
|
|
#include <asm/kvm_ppc.h>
|
|
|
-#include <asm/kvm_e500.h>
|
|
|
|
|
|
-#include "../mm/mmu_decl.h"
|
|
|
-#include "e500_tlb.h"
|
|
|
+#include "e500.h"
|
|
|
#include "trace.h"
|
|
|
#include "timing.h"
|
|
|
|
|
|
#define to_htlb1_esel(esel) (host_tlb_params[1].entries - (esel) - 1)
|
|
|
|
|
|
-struct id {
|
|
|
- unsigned long val;
|
|
|
- struct id **pentry;
|
|
|
-};
|
|
|
-
|
|
|
-#define NUM_TIDS 256
|
|
|
-
|
|
|
-/*
|
|
|
- * This table provide mappings from:
|
|
|
- * (guestAS,guestTID,guestPR) --> ID of physical cpu
|
|
|
- * guestAS [0..1]
|
|
|
- * guestTID [0..255]
|
|
|
- * guestPR [0..1]
|
|
|
- * ID [1..255]
|
|
|
- * Each vcpu keeps one vcpu_id_table.
|
|
|
- */
|
|
|
-struct vcpu_id_table {
|
|
|
- struct id id[2][NUM_TIDS][2];
|
|
|
-};
|
|
|
-
|
|
|
-/*
|
|
|
- * This table provide reversed mappings of vcpu_id_table:
|
|
|
- * ID --> address of vcpu_id_table item.
|
|
|
- * Each physical core has one pcpu_id_table.
|
|
|
- */
|
|
|
-struct pcpu_id_table {
|
|
|
- struct id *entry[NUM_TIDS];
|
|
|
-};
|
|
|
-
|
|
|
-static DEFINE_PER_CPU(struct pcpu_id_table, pcpu_sids);
|
|
|
-
|
|
|
-/* This variable keeps last used shadow ID on local core.
|
|
|
- * The valid range of shadow ID is [1..255] */
|
|
|
-static DEFINE_PER_CPU(unsigned long, pcpu_last_used_sid);
|
|
|
-
|
|
|
static struct kvmppc_e500_tlb_params host_tlb_params[E500_TLB_NUM];
|
|
|
|
|
|
-static struct kvm_book3e_206_tlb_entry *get_entry(
|
|
|
- struct kvmppc_vcpu_e500 *vcpu_e500, int tlbsel, int entry)
|
|
|
-{
|
|
|
- int offset = vcpu_e500->gtlb_offset[tlbsel];
|
|
|
- return &vcpu_e500->gtlb_arch[offset + entry];
|
|
|
-}
|
|
|
-
|
|
|
-/*
|
|
|
- * Allocate a free shadow id and setup a valid sid mapping in given entry.
|
|
|
- * A mapping is only valid when vcpu_id_table and pcpu_id_table are match.
|
|
|
- *
|
|
|
- * The caller must have preemption disabled, and keep it that way until
|
|
|
- * it has finished with the returned shadow id (either written into the
|
|
|
- * TLB or arch.shadow_pid, or discarded).
|
|
|
- */
|
|
|
-static inline int local_sid_setup_one(struct id *entry)
|
|
|
-{
|
|
|
- unsigned long sid;
|
|
|
- int ret = -1;
|
|
|
-
|
|
|
- sid = ++(__get_cpu_var(pcpu_last_used_sid));
|
|
|
- if (sid < NUM_TIDS) {
|
|
|
- __get_cpu_var(pcpu_sids).entry[sid] = entry;
|
|
|
- entry->val = sid;
|
|
|
- entry->pentry = &__get_cpu_var(pcpu_sids).entry[sid];
|
|
|
- ret = sid;
|
|
|
- }
|
|
|
-
|
|
|
- /*
|
|
|
- * If sid == NUM_TIDS, we've run out of sids. We return -1, and
|
|
|
- * the caller will invalidate everything and start over.
|
|
|
- *
|
|
|
- * sid > NUM_TIDS indicates a race, which we disable preemption to
|
|
|
- * avoid.
|
|
|
- */
|
|
|
- WARN_ON(sid > NUM_TIDS);
|
|
|
-
|
|
|
- return ret;
|
|
|
-}
|
|
|
-
|
|
|
-/*
|
|
|
- * Check if given entry contain a valid shadow id mapping.
|
|
|
- * An ID mapping is considered valid only if
|
|
|
- * both vcpu and pcpu know this mapping.
|
|
|
- *
|
|
|
- * The caller must have preemption disabled, and keep it that way until
|
|
|
- * it has finished with the returned shadow id (either written into the
|
|
|
- * TLB or arch.shadow_pid, or discarded).
|
|
|
- */
|
|
|
-static inline int local_sid_lookup(struct id *entry)
|
|
|
-{
|
|
|
- if (entry && entry->val != 0 &&
|
|
|
- __get_cpu_var(pcpu_sids).entry[entry->val] == entry &&
|
|
|
- entry->pentry == &__get_cpu_var(pcpu_sids).entry[entry->val])
|
|
|
- return entry->val;
|
|
|
- return -1;
|
|
|
-}
|
|
|
-
|
|
|
-/* Invalidate all id mappings on local core -- call with preempt disabled */
|
|
|
-static inline void local_sid_destroy_all(void)
|
|
|
-{
|
|
|
- __get_cpu_var(pcpu_last_used_sid) = 0;
|
|
|
- memset(&__get_cpu_var(pcpu_sids), 0, sizeof(__get_cpu_var(pcpu_sids)));
|
|
|
-}
|
|
|
-
|
|
|
-static void *kvmppc_e500_id_table_alloc(struct kvmppc_vcpu_e500 *vcpu_e500)
|
|
|
-{
|
|
|
- vcpu_e500->idt = kzalloc(sizeof(struct vcpu_id_table), GFP_KERNEL);
|
|
|
- return vcpu_e500->idt;
|
|
|
-}
|
|
|
-
|
|
|
-static void kvmppc_e500_id_table_free(struct kvmppc_vcpu_e500 *vcpu_e500)
|
|
|
-{
|
|
|
- kfree(vcpu_e500->idt);
|
|
|
-}
|
|
|
-
|
|
|
-/* Invalidate all mappings on vcpu */
|
|
|
-static void kvmppc_e500_id_table_reset_all(struct kvmppc_vcpu_e500 *vcpu_e500)
|
|
|
-{
|
|
|
- memset(vcpu_e500->idt, 0, sizeof(struct vcpu_id_table));
|
|
|
-
|
|
|
- /* Update shadow pid when mappings are changed */
|
|
|
- kvmppc_e500_recalc_shadow_pid(vcpu_e500);
|
|
|
-}
|
|
|
-
|
|
|
-/* Invalidate one ID mapping on vcpu */
|
|
|
-static inline void kvmppc_e500_id_table_reset_one(
|
|
|
- struct kvmppc_vcpu_e500 *vcpu_e500,
|
|
|
- int as, int pid, int pr)
|
|
|
-{
|
|
|
- struct vcpu_id_table *idt = vcpu_e500->idt;
|
|
|
-
|
|
|
- BUG_ON(as >= 2);
|
|
|
- BUG_ON(pid >= NUM_TIDS);
|
|
|
- BUG_ON(pr >= 2);
|
|
|
-
|
|
|
- idt->id[as][pid][pr].val = 0;
|
|
|
- idt->id[as][pid][pr].pentry = NULL;
|
|
|
-
|
|
|
- /* Update shadow pid when mappings are changed */
|
|
|
- kvmppc_e500_recalc_shadow_pid(vcpu_e500);
|
|
|
-}
|
|
|
-
|
|
|
-/*
|
|
|
- * Map guest (vcpu,AS,ID,PR) to physical core shadow id.
|
|
|
- * This function first lookup if a valid mapping exists,
|
|
|
- * if not, then creates a new one.
|
|
|
- *
|
|
|
- * The caller must have preemption disabled, and keep it that way until
|
|
|
- * it has finished with the returned shadow id (either written into the
|
|
|
- * TLB or arch.shadow_pid, or discarded).
|
|
|
- */
|
|
|
-static unsigned int kvmppc_e500_get_sid(struct kvmppc_vcpu_e500 *vcpu_e500,
|
|
|
- unsigned int as, unsigned int gid,
|
|
|
- unsigned int pr, int avoid_recursion)
|
|
|
-{
|
|
|
- struct vcpu_id_table *idt = vcpu_e500->idt;
|
|
|
- int sid;
|
|
|
-
|
|
|
- BUG_ON(as >= 2);
|
|
|
- BUG_ON(gid >= NUM_TIDS);
|
|
|
- BUG_ON(pr >= 2);
|
|
|
-
|
|
|
- sid = local_sid_lookup(&idt->id[as][gid][pr]);
|
|
|
-
|
|
|
- while (sid <= 0) {
|
|
|
- /* No mapping yet */
|
|
|
- sid = local_sid_setup_one(&idt->id[as][gid][pr]);
|
|
|
- if (sid <= 0) {
|
|
|
- _tlbil_all();
|
|
|
- local_sid_destroy_all();
|
|
|
- }
|
|
|
-
|
|
|
- /* Update shadow pid when mappings are changed */
|
|
|
- if (!avoid_recursion)
|
|
|
- kvmppc_e500_recalc_shadow_pid(vcpu_e500);
|
|
|
- }
|
|
|
-
|
|
|
- return sid;
|
|
|
-}
|
|
|
-
|
|
|
-/* Map guest pid to shadow.
|
|
|
- * We use PID to keep shadow of current guest non-zero PID,
|
|
|
- * and use PID1 to keep shadow of guest zero PID.
|
|
|
- * So that guest tlbe with TID=0 can be accessed at any time */
|
|
|
-void kvmppc_e500_recalc_shadow_pid(struct kvmppc_vcpu_e500 *vcpu_e500)
|
|
|
-{
|
|
|
- preempt_disable();
|
|
|
- vcpu_e500->vcpu.arch.shadow_pid = kvmppc_e500_get_sid(vcpu_e500,
|
|
|
- get_cur_as(&vcpu_e500->vcpu),
|
|
|
- get_cur_pid(&vcpu_e500->vcpu),
|
|
|
- get_cur_pr(&vcpu_e500->vcpu), 1);
|
|
|
- vcpu_e500->vcpu.arch.shadow_pid1 = kvmppc_e500_get_sid(vcpu_e500,
|
|
|
- get_cur_as(&vcpu_e500->vcpu), 0,
|
|
|
- get_cur_pr(&vcpu_e500->vcpu), 1);
|
|
|
- preempt_enable();
|
|
|
-}
|
|
|
-
|
|
|
static inline unsigned int gtlb0_get_next_victim(
|
|
|
struct kvmppc_vcpu_e500 *vcpu_e500)
|
|
|
{
|
|
@@ -258,6 +66,7 @@ static inline u32 e500_shadow_mas3_attrib(u32 mas3, int usermode)
|
|
|
/* Mask off reserved bits. */
|
|
|
mas3 &= MAS3_ATTRIB_MASK;
|
|
|
|
|
|
+#ifndef CONFIG_KVM_BOOKE_HV
|
|
|
if (!usermode) {
|
|
|
/* Guest is in supervisor mode,
|
|
|
* so we need to translate guest
|
|
@@ -265,8 +74,9 @@ static inline u32 e500_shadow_mas3_attrib(u32 mas3, int usermode)
|
|
|
mas3 &= ~E500_TLB_USER_PERM_MASK;
|
|
|
mas3 |= (mas3 & E500_TLB_SUPER_PERM_MASK) << 1;
|
|
|
}
|
|
|
-
|
|
|
- return mas3 | E500_TLB_SUPER_PERM_MASK;
|
|
|
+ mas3 |= E500_TLB_SUPER_PERM_MASK;
|
|
|
+#endif
|
|
|
+ return mas3;
|
|
|
}
|
|
|
|
|
|
static inline u32 e500_shadow_mas2_attrib(u32 mas2, int usermode)
|
|
@@ -292,7 +102,16 @@ static inline void __write_host_tlbe(struct kvm_book3e_206_tlb_entry *stlbe,
|
|
|
mtspr(SPRN_MAS2, (unsigned long)stlbe->mas2);
|
|
|
mtspr(SPRN_MAS3, (u32)stlbe->mas7_3);
|
|
|
mtspr(SPRN_MAS7, (u32)(stlbe->mas7_3 >> 32));
|
|
|
+#ifdef CONFIG_KVM_BOOKE_HV
|
|
|
+ mtspr(SPRN_MAS8, stlbe->mas8);
|
|
|
+#endif
|
|
|
asm volatile("isync; tlbwe" : : : "memory");
|
|
|
+
|
|
|
+#ifdef CONFIG_KVM_BOOKE_HV
|
|
|
+ /* Must clear mas8 for other host tlbwe's */
|
|
|
+ mtspr(SPRN_MAS8, 0);
|
|
|
+ isync();
|
|
|
+#endif
|
|
|
local_irq_restore(flags);
|
|
|
|
|
|
trace_kvm_booke206_stlb_write(mas0, stlbe->mas8, stlbe->mas1,
|
|
@@ -337,6 +156,7 @@ static inline void write_host_tlbe(struct kvmppc_vcpu_e500 *vcpu_e500,
|
|
|
}
|
|
|
}
|
|
|
|
|
|
+#ifdef CONFIG_KVM_E500V2
|
|
|
void kvmppc_map_magic(struct kvm_vcpu *vcpu)
|
|
|
{
|
|
|
struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
|
|
@@ -361,75 +181,41 @@ void kvmppc_map_magic(struct kvm_vcpu *vcpu)
|
|
|
__write_host_tlbe(&magic, MAS0_TLBSEL(1) | MAS0_ESEL(tlbcam_index));
|
|
|
preempt_enable();
|
|
|
}
|
|
|
-
|
|
|
-void kvmppc_e500_tlb_load(struct kvm_vcpu *vcpu, int cpu)
|
|
|
-{
|
|
|
- struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
|
|
|
-
|
|
|
- /* Shadow PID may be expired on local core */
|
|
|
- kvmppc_e500_recalc_shadow_pid(vcpu_e500);
|
|
|
-}
|
|
|
-
|
|
|
-void kvmppc_e500_tlb_put(struct kvm_vcpu *vcpu)
|
|
|
-{
|
|
|
-}
|
|
|
+#endif
|
|
|
|
|
|
static void inval_gtlbe_on_host(struct kvmppc_vcpu_e500 *vcpu_e500,
|
|
|
int tlbsel, int esel)
|
|
|
{
|
|
|
struct kvm_book3e_206_tlb_entry *gtlbe =
|
|
|
get_entry(vcpu_e500, tlbsel, esel);
|
|
|
- struct vcpu_id_table *idt = vcpu_e500->idt;
|
|
|
- unsigned int pr, tid, ts, pid;
|
|
|
- u32 val, eaddr;
|
|
|
- unsigned long flags;
|
|
|
-
|
|
|
- ts = get_tlb_ts(gtlbe);
|
|
|
- tid = get_tlb_tid(gtlbe);
|
|
|
-
|
|
|
- preempt_disable();
|
|
|
-
|
|
|
- /* One guest ID may be mapped to two shadow IDs */
|
|
|
- for (pr = 0; pr < 2; pr++) {
|
|
|
- /*
|
|
|
- * The shadow PID can have a valid mapping on at most one
|
|
|
- * host CPU. In the common case, it will be valid on this
|
|
|
- * CPU, in which case (for TLB0) we do a local invalidation
|
|
|
- * of the specific address.
|
|
|
- *
|
|
|
- * If the shadow PID is not valid on the current host CPU, or
|
|
|
- * if we're invalidating a TLB1 entry, we invalidate the
|
|
|
- * entire shadow PID.
|
|
|
- */
|
|
|
- if (tlbsel == 1 ||
|
|
|
- (pid = local_sid_lookup(&idt->id[ts][tid][pr])) <= 0) {
|
|
|
- kvmppc_e500_id_table_reset_one(vcpu_e500, ts, tid, pr);
|
|
|
- continue;
|
|
|
- }
|
|
|
|
|
|
- /*
|
|
|
- * The guest is invalidating a TLB0 entry which is in a PID
|
|
|
- * that has a valid shadow mapping on this host CPU. We
|
|
|
- * search host TLB0 to invalidate it's shadow TLB entry,
|
|
|
- * similar to __tlbil_va except that we need to look in AS1.
|
|
|
- */
|
|
|
- val = (pid << MAS6_SPID_SHIFT) | MAS6_SAS;
|
|
|
- eaddr = get_tlb_eaddr(gtlbe);
|
|
|
+ if (tlbsel == 1 &&
|
|
|
+ vcpu_e500->gtlb_priv[1][esel].ref.flags & E500_TLB_BITMAP) {
|
|
|
+ u64 tmp = vcpu_e500->g2h_tlb1_map[esel];
|
|
|
+ int hw_tlb_indx;
|
|
|
+ unsigned long flags;
|
|
|
|
|
|
local_irq_save(flags);
|
|
|
-
|
|
|
- mtspr(SPRN_MAS6, val);
|
|
|
- asm volatile("tlbsx 0, %[eaddr]" : : [eaddr] "r" (eaddr));
|
|
|
- val = mfspr(SPRN_MAS1);
|
|
|
- if (val & MAS1_VALID) {
|
|
|
- mtspr(SPRN_MAS1, val & ~MAS1_VALID);
|
|
|
+ while (tmp) {
|
|
|
+ hw_tlb_indx = __ilog2_u64(tmp & -tmp);
|
|
|
+ mtspr(SPRN_MAS0,
|
|
|
+ MAS0_TLBSEL(1) |
|
|
|
+ MAS0_ESEL(to_htlb1_esel(hw_tlb_indx)));
|
|
|
+ mtspr(SPRN_MAS1, 0);
|
|
|
asm volatile("tlbwe");
|
|
|
+ vcpu_e500->h2g_tlb1_rmap[hw_tlb_indx] = 0;
|
|
|
+ tmp &= tmp - 1;
|
|
|
}
|
|
|
-
|
|
|
+ mb();
|
|
|
+ vcpu_e500->g2h_tlb1_map[esel] = 0;
|
|
|
+ vcpu_e500->gtlb_priv[1][esel].ref.flags &= ~E500_TLB_BITMAP;
|
|
|
local_irq_restore(flags);
|
|
|
+
|
|
|
+ return;
|
|
|
}
|
|
|
|
|
|
- preempt_enable();
|
|
|
+ /* Guest tlbe is backed by at most one host tlbe per shadow pid. */
|
|
|
+ kvmppc_e500_tlbil_one(vcpu_e500, gtlbe);
|
|
|
}
|
|
|
|
|
|
static int tlb0_set_base(gva_t addr, int sets, int ways)
|
|
@@ -475,6 +261,9 @@ static int kvmppc_e500_tlb_index(struct kvmppc_vcpu_e500 *vcpu_e500,
|
|
|
set_base = gtlb0_set_base(vcpu_e500, eaddr);
|
|
|
size = vcpu_e500->gtlb_params[0].ways;
|
|
|
} else {
|
|
|
+ if (eaddr < vcpu_e500->tlb1_min_eaddr ||
|
|
|
+ eaddr > vcpu_e500->tlb1_max_eaddr)
|
|
|
+ return -1;
|
|
|
set_base = 0;
|
|
|
}
|
|
|
|
|
@@ -530,6 +319,16 @@ static inline void kvmppc_e500_ref_release(struct tlbe_ref *ref)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
+static void clear_tlb1_bitmap(struct kvmppc_vcpu_e500 *vcpu_e500)
|
|
|
+{
|
|
|
+ if (vcpu_e500->g2h_tlb1_map)
|
|
|
+ memset(vcpu_e500->g2h_tlb1_map,
|
|
|
+ sizeof(u64) * vcpu_e500->gtlb_params[1].entries, 0);
|
|
|
+ if (vcpu_e500->h2g_tlb1_rmap)
|
|
|
+ memset(vcpu_e500->h2g_tlb1_rmap,
|
|
|
+ sizeof(unsigned int) * host_tlb_params[1].entries, 0);
|
|
|
+}
|
|
|
+
|
|
|
static void clear_tlb_privs(struct kvmppc_vcpu_e500 *vcpu_e500)
|
|
|
{
|
|
|
int tlbsel = 0;
|
|
@@ -547,7 +346,7 @@ static void clear_tlb_refs(struct kvmppc_vcpu_e500 *vcpu_e500)
|
|
|
int stlbsel = 1;
|
|
|
int i;
|
|
|
|
|
|
- kvmppc_e500_id_table_reset_all(vcpu_e500);
|
|
|
+ kvmppc_e500_tlbil_all(vcpu_e500);
|
|
|
|
|
|
for (i = 0; i < host_tlb_params[stlbsel].entries; i++) {
|
|
|
struct tlbe_ref *ref =
|
|
@@ -562,19 +361,18 @@ static inline void kvmppc_e500_deliver_tlb_miss(struct kvm_vcpu *vcpu,
|
|
|
unsigned int eaddr, int as)
|
|
|
{
|
|
|
struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
|
|
|
- unsigned int victim, pidsel, tsized;
|
|
|
+ unsigned int victim, tsized;
|
|
|
int tlbsel;
|
|
|
|
|
|
/* since we only have two TLBs, only lower bit is used. */
|
|
|
tlbsel = (vcpu->arch.shared->mas4 >> 28) & 0x1;
|
|
|
victim = (tlbsel == 0) ? gtlb0_get_next_victim(vcpu_e500) : 0;
|
|
|
- pidsel = (vcpu->arch.shared->mas4 >> 16) & 0xf;
|
|
|
tsized = (vcpu->arch.shared->mas4 >> 7) & 0x1f;
|
|
|
|
|
|
vcpu->arch.shared->mas0 = MAS0_TLBSEL(tlbsel) | MAS0_ESEL(victim)
|
|
|
| MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]);
|
|
|
vcpu->arch.shared->mas1 = MAS1_VALID | (as ? MAS1_TS : 0)
|
|
|
- | MAS1_TID(vcpu_e500->pid[pidsel])
|
|
|
+ | MAS1_TID(get_tlbmiss_tid(vcpu))
|
|
|
| MAS1_TSIZE(tsized);
|
|
|
vcpu->arch.shared->mas2 = (eaddr & MAS2_EPN)
|
|
|
| (vcpu->arch.shared->mas4 & MAS2_ATTRIB_MASK);
|
|
@@ -586,23 +384,26 @@ static inline void kvmppc_e500_deliver_tlb_miss(struct kvm_vcpu *vcpu,
|
|
|
|
|
|
/* TID must be supplied by the caller */
|
|
|
static inline void kvmppc_e500_setup_stlbe(
|
|
|
- struct kvmppc_vcpu_e500 *vcpu_e500,
|
|
|
+ struct kvm_vcpu *vcpu,
|
|
|
struct kvm_book3e_206_tlb_entry *gtlbe,
|
|
|
int tsize, struct tlbe_ref *ref, u64 gvaddr,
|
|
|
struct kvm_book3e_206_tlb_entry *stlbe)
|
|
|
{
|
|
|
pfn_t pfn = ref->pfn;
|
|
|
+ u32 pr = vcpu->arch.shared->msr & MSR_PR;
|
|
|
|
|
|
BUG_ON(!(ref->flags & E500_TLB_VALID));
|
|
|
|
|
|
- /* Force TS=1 IPROT=0 for all guest mappings. */
|
|
|
- stlbe->mas1 = MAS1_TSIZE(tsize) | MAS1_TS | MAS1_VALID;
|
|
|
- stlbe->mas2 = (gvaddr & MAS2_EPN)
|
|
|
- | e500_shadow_mas2_attrib(gtlbe->mas2,
|
|
|
- vcpu_e500->vcpu.arch.shared->msr & MSR_PR);
|
|
|
- stlbe->mas7_3 = ((u64)pfn << PAGE_SHIFT)
|
|
|
- | e500_shadow_mas3_attrib(gtlbe->mas7_3,
|
|
|
- vcpu_e500->vcpu.arch.shared->msr & MSR_PR);
|
|
|
+ /* Force IPROT=0 for all guest mappings. */
|
|
|
+ stlbe->mas1 = MAS1_TSIZE(tsize) | get_tlb_sts(gtlbe) | MAS1_VALID;
|
|
|
+ stlbe->mas2 = (gvaddr & MAS2_EPN) |
|
|
|
+ e500_shadow_mas2_attrib(gtlbe->mas2, pr);
|
|
|
+ stlbe->mas7_3 = ((u64)pfn << PAGE_SHIFT) |
|
|
|
+ e500_shadow_mas3_attrib(gtlbe->mas7_3, pr);
|
|
|
+
|
|
|
+#ifdef CONFIG_KVM_BOOKE_HV
|
|
|
+ stlbe->mas8 = MAS8_TGS | vcpu->kvm->arch.lpid;
|
|
|
+#endif
|
|
|
}
|
|
|
|
|
|
static inline void kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
|
|
@@ -736,7 +537,8 @@ static inline void kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
|
|
|
kvmppc_e500_ref_release(ref);
|
|
|
kvmppc_e500_ref_setup(ref, gtlbe, pfn);
|
|
|
|
|
|
- kvmppc_e500_setup_stlbe(vcpu_e500, gtlbe, tsize, ref, gvaddr, stlbe);
|
|
|
+ kvmppc_e500_setup_stlbe(&vcpu_e500->vcpu, gtlbe, tsize,
|
|
|
+ ref, gvaddr, stlbe);
|
|
|
}
|
|
|
|
|
|
/* XXX only map the one-one case, for now use TLB0 */
|
|
@@ -760,7 +562,7 @@ static void kvmppc_e500_tlb0_map(struct kvmppc_vcpu_e500 *vcpu_e500,
|
|
|
/* XXX for both one-one and one-to-many , for now use TLB1 */
|
|
|
static int kvmppc_e500_tlb1_map(struct kvmppc_vcpu_e500 *vcpu_e500,
|
|
|
u64 gvaddr, gfn_t gfn, struct kvm_book3e_206_tlb_entry *gtlbe,
|
|
|
- struct kvm_book3e_206_tlb_entry *stlbe)
|
|
|
+ struct kvm_book3e_206_tlb_entry *stlbe, int esel)
|
|
|
{
|
|
|
struct tlbe_ref *ref;
|
|
|
unsigned int victim;
|
|
@@ -773,15 +575,74 @@ static int kvmppc_e500_tlb1_map(struct kvmppc_vcpu_e500 *vcpu_e500,
|
|
|
ref = &vcpu_e500->tlb_refs[1][victim];
|
|
|
kvmppc_e500_shadow_map(vcpu_e500, gvaddr, gfn, gtlbe, 1, stlbe, ref);
|
|
|
|
|
|
+ vcpu_e500->g2h_tlb1_map[esel] |= (u64)1 << victim;
|
|
|
+ vcpu_e500->gtlb_priv[1][esel].ref.flags |= E500_TLB_BITMAP;
|
|
|
+ if (vcpu_e500->h2g_tlb1_rmap[victim]) {
|
|
|
+ unsigned int idx = vcpu_e500->h2g_tlb1_rmap[victim];
|
|
|
+ vcpu_e500->g2h_tlb1_map[idx] &= ~(1ULL << victim);
|
|
|
+ }
|
|
|
+ vcpu_e500->h2g_tlb1_rmap[victim] = esel;
|
|
|
+
|
|
|
return victim;
|
|
|
}
|
|
|
|
|
|
-void kvmppc_mmu_msr_notify(struct kvm_vcpu *vcpu, u32 old_msr)
|
|
|
+static void kvmppc_recalc_tlb1map_range(struct kvmppc_vcpu_e500 *vcpu_e500)
|
|
|
+{
|
|
|
+ int size = vcpu_e500->gtlb_params[1].entries;
|
|
|
+ unsigned int offset;
|
|
|
+ gva_t eaddr;
|
|
|
+ int i;
|
|
|
+
|
|
|
+ vcpu_e500->tlb1_min_eaddr = ~0UL;
|
|
|
+ vcpu_e500->tlb1_max_eaddr = 0;
|
|
|
+ offset = vcpu_e500->gtlb_offset[1];
|
|
|
+
|
|
|
+ for (i = 0; i < size; i++) {
|
|
|
+ struct kvm_book3e_206_tlb_entry *tlbe =
|
|
|
+ &vcpu_e500->gtlb_arch[offset + i];
|
|
|
+
|
|
|
+ if (!get_tlb_v(tlbe))
|
|
|
+ continue;
|
|
|
+
|
|
|
+ eaddr = get_tlb_eaddr(tlbe);
|
|
|
+ vcpu_e500->tlb1_min_eaddr =
|
|
|
+ min(vcpu_e500->tlb1_min_eaddr, eaddr);
|
|
|
+
|
|
|
+ eaddr = get_tlb_end(tlbe);
|
|
|
+ vcpu_e500->tlb1_max_eaddr =
|
|
|
+ max(vcpu_e500->tlb1_max_eaddr, eaddr);
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+static int kvmppc_need_recalc_tlb1map_range(struct kvmppc_vcpu_e500 *vcpu_e500,
|
|
|
+ struct kvm_book3e_206_tlb_entry *gtlbe)
|
|
|
{
|
|
|
+ unsigned long start, end, size;
|
|
|
+
|
|
|
+ size = get_tlb_bytes(gtlbe);
|
|
|
+ start = get_tlb_eaddr(gtlbe) & ~(size - 1);
|
|
|
+ end = start + size - 1;
|
|
|
+
|
|
|
+ return vcpu_e500->tlb1_min_eaddr == start ||
|
|
|
+ vcpu_e500->tlb1_max_eaddr == end;
|
|
|
+}
|
|
|
+
|
|
|
+/* This function is supposed to be called for a adding a new valid tlb entry */
|
|
|
+static void kvmppc_set_tlb1map_range(struct kvm_vcpu *vcpu,
|
|
|
+ struct kvm_book3e_206_tlb_entry *gtlbe)
|
|
|
+{
|
|
|
+ unsigned long start, end, size;
|
|
|
struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
|
|
|
|
|
|
- /* Recalc shadow pid since MSR changes */
|
|
|
- kvmppc_e500_recalc_shadow_pid(vcpu_e500);
|
|
|
+ if (!get_tlb_v(gtlbe))
|
|
|
+ return;
|
|
|
+
|
|
|
+ size = get_tlb_bytes(gtlbe);
|
|
|
+ start = get_tlb_eaddr(gtlbe) & ~(size - 1);
|
|
|
+ end = start + size - 1;
|
|
|
+
|
|
|
+ vcpu_e500->tlb1_min_eaddr = min(vcpu_e500->tlb1_min_eaddr, start);
|
|
|
+ vcpu_e500->tlb1_max_eaddr = max(vcpu_e500->tlb1_max_eaddr, end);
|
|
|
}
|
|
|
|
|
|
static inline int kvmppc_e500_gtlbe_invalidate(
|
|
@@ -794,6 +655,9 @@ static inline int kvmppc_e500_gtlbe_invalidate(
|
|
|
if (unlikely(get_tlb_iprot(gtlbe)))
|
|
|
return -1;
|
|
|
|
|
|
+ if (tlbsel == 1 && kvmppc_need_recalc_tlb1map_range(vcpu_e500, gtlbe))
|
|
|
+ kvmppc_recalc_tlb1map_range(vcpu_e500);
|
|
|
+
|
|
|
gtlbe->mas1 = 0;
|
|
|
|
|
|
return 0;
|
|
@@ -811,7 +675,7 @@ int kvmppc_e500_emul_mt_mmucsr0(struct kvmppc_vcpu_e500 *vcpu_e500, ulong value)
|
|
|
kvmppc_e500_gtlbe_invalidate(vcpu_e500, 1, esel);
|
|
|
|
|
|
/* Invalidate all vcpu id mappings */
|
|
|
- kvmppc_e500_id_table_reset_all(vcpu_e500);
|
|
|
+ kvmppc_e500_tlbil_all(vcpu_e500);
|
|
|
|
|
|
return EMULATE_DONE;
|
|
|
}
|
|
@@ -844,7 +708,59 @@ int kvmppc_e500_emul_tlbivax(struct kvm_vcpu *vcpu, int ra, int rb)
|
|
|
}
|
|
|
|
|
|
/* Invalidate all vcpu id mappings */
|
|
|
- kvmppc_e500_id_table_reset_all(vcpu_e500);
|
|
|
+ kvmppc_e500_tlbil_all(vcpu_e500);
|
|
|
+
|
|
|
+ return EMULATE_DONE;
|
|
|
+}
|
|
|
+
|
|
|
+static void tlbilx_all(struct kvmppc_vcpu_e500 *vcpu_e500, int tlbsel,
|
|
|
+ int pid, int rt)
|
|
|
+{
|
|
|
+ struct kvm_book3e_206_tlb_entry *tlbe;
|
|
|
+ int tid, esel;
|
|
|
+
|
|
|
+ /* invalidate all entries */
|
|
|
+ for (esel = 0; esel < vcpu_e500->gtlb_params[tlbsel].entries; esel++) {
|
|
|
+ tlbe = get_entry(vcpu_e500, tlbsel, esel);
|
|
|
+ tid = get_tlb_tid(tlbe);
|
|
|
+ if (rt == 0 || tid == pid) {
|
|
|
+ inval_gtlbe_on_host(vcpu_e500, tlbsel, esel);
|
|
|
+ kvmppc_e500_gtlbe_invalidate(vcpu_e500, tlbsel, esel);
|
|
|
+ }
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+static void tlbilx_one(struct kvmppc_vcpu_e500 *vcpu_e500, int pid,
|
|
|
+ int ra, int rb)
|
|
|
+{
|
|
|
+ int tlbsel, esel;
|
|
|
+ gva_t ea;
|
|
|
+
|
|
|
+ ea = kvmppc_get_gpr(&vcpu_e500->vcpu, rb);
|
|
|
+ if (ra)
|
|
|
+ ea += kvmppc_get_gpr(&vcpu_e500->vcpu, ra);
|
|
|
+
|
|
|
+ for (tlbsel = 0; tlbsel < 2; tlbsel++) {
|
|
|
+ esel = kvmppc_e500_tlb_index(vcpu_e500, ea, tlbsel, pid, -1);
|
|
|
+ if (esel >= 0) {
|
|
|
+ inval_gtlbe_on_host(vcpu_e500, tlbsel, esel);
|
|
|
+ kvmppc_e500_gtlbe_invalidate(vcpu_e500, tlbsel, esel);
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+int kvmppc_e500_emul_tlbilx(struct kvm_vcpu *vcpu, int rt, int ra, int rb)
|
|
|
+{
|
|
|
+ struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
|
|
|
+ int pid = get_cur_spid(vcpu);
|
|
|
+
|
|
|
+ if (rt == 0 || rt == 1) {
|
|
|
+ tlbilx_all(vcpu_e500, 0, pid, rt);
|
|
|
+ tlbilx_all(vcpu_e500, 1, pid, rt);
|
|
|
+ } else if (rt == 3) {
|
|
|
+ tlbilx_one(vcpu_e500, pid, ra, rb);
|
|
|
+ }
|
|
|
|
|
|
return EMULATE_DONE;
|
|
|
}
|
|
@@ -929,9 +845,7 @@ static void write_stlbe(struct kvmppc_vcpu_e500 *vcpu_e500,
|
|
|
int stid;
|
|
|
|
|
|
preempt_disable();
|
|
|
- stid = kvmppc_e500_get_sid(vcpu_e500, get_tlb_ts(gtlbe),
|
|
|
- get_tlb_tid(gtlbe),
|
|
|
- get_cur_pr(&vcpu_e500->vcpu), 0);
|
|
|
+ stid = kvmppc_e500_get_tlb_stid(&vcpu_e500->vcpu, gtlbe);
|
|
|
|
|
|
stlbe->mas1 |= MAS1_TID(stid);
|
|
|
write_host_tlbe(vcpu_e500, stlbsel, sesel, stlbe);
|
|
@@ -941,16 +855,21 @@ static void write_stlbe(struct kvmppc_vcpu_e500 *vcpu_e500,
|
|
|
int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu)
|
|
|
{
|
|
|
struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
|
|
|
- struct kvm_book3e_206_tlb_entry *gtlbe;
|
|
|
- int tlbsel, esel;
|
|
|
+ struct kvm_book3e_206_tlb_entry *gtlbe, stlbe;
|
|
|
+ int tlbsel, esel, stlbsel, sesel;
|
|
|
+ int recal = 0;
|
|
|
|
|
|
tlbsel = get_tlb_tlbsel(vcpu);
|
|
|
esel = get_tlb_esel(vcpu, tlbsel);
|
|
|
|
|
|
gtlbe = get_entry(vcpu_e500, tlbsel, esel);
|
|
|
|
|
|
- if (get_tlb_v(gtlbe))
|
|
|
+ if (get_tlb_v(gtlbe)) {
|
|
|
inval_gtlbe_on_host(vcpu_e500, tlbsel, esel);
|
|
|
+ if ((tlbsel == 1) &&
|
|
|
+ kvmppc_need_recalc_tlb1map_range(vcpu_e500, gtlbe))
|
|
|
+ recal = 1;
|
|
|
+ }
|
|
|
|
|
|
gtlbe->mas1 = vcpu->arch.shared->mas1;
|
|
|
gtlbe->mas2 = vcpu->arch.shared->mas2;
|
|
@@ -959,10 +878,20 @@ int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu)
|
|
|
trace_kvm_booke206_gtlb_write(vcpu->arch.shared->mas0, gtlbe->mas1,
|
|
|
gtlbe->mas2, gtlbe->mas7_3);
|
|
|
|
|
|
+ if (tlbsel == 1) {
|
|
|
+ /*
|
|
|
+ * If a valid tlb1 entry is overwritten then recalculate the
|
|
|
+ * min/max TLB1 map address range otherwise no need to look
|
|
|
+ * in tlb1 array.
|
|
|
+ */
|
|
|
+ if (recal)
|
|
|
+ kvmppc_recalc_tlb1map_range(vcpu_e500);
|
|
|
+ else
|
|
|
+ kvmppc_set_tlb1map_range(vcpu, gtlbe);
|
|
|
+ }
|
|
|
+
|
|
|
/* Invalidate shadow mappings for the about-to-be-clobbered TLBE. */
|
|
|
if (tlbe_is_host_safe(vcpu, gtlbe)) {
|
|
|
- struct kvm_book3e_206_tlb_entry stlbe;
|
|
|
- int stlbsel, sesel;
|
|
|
u64 eaddr;
|
|
|
u64 raddr;
|
|
|
|
|
@@ -989,7 +918,7 @@ int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu)
|
|
|
* are mapped on the fly. */
|
|
|
stlbsel = 1;
|
|
|
sesel = kvmppc_e500_tlb1_map(vcpu_e500, eaddr,
|
|
|
- raddr >> PAGE_SHIFT, gtlbe, &stlbe);
|
|
|
+ raddr >> PAGE_SHIFT, gtlbe, &stlbe, esel);
|
|
|
break;
|
|
|
|
|
|
default:
|
|
@@ -1003,6 +932,48 @@ int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu)
|
|
|
return EMULATE_DONE;
|
|
|
}
|
|
|
|
|
|
+static int kvmppc_e500_tlb_search(struct kvm_vcpu *vcpu,
|
|
|
+ gva_t eaddr, unsigned int pid, int as)
|
|
|
+{
|
|
|
+ struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
|
|
|
+ int esel, tlbsel;
|
|
|
+
|
|
|
+ for (tlbsel = 0; tlbsel < 2; tlbsel++) {
|
|
|
+ esel = kvmppc_e500_tlb_index(vcpu_e500, eaddr, tlbsel, pid, as);
|
|
|
+ if (esel >= 0)
|
|
|
+ return index_of(tlbsel, esel);
|
|
|
+ }
|
|
|
+
|
|
|
+ return -1;
|
|
|
+}
|
|
|
+
|
|
|
+/* 'linear_address' is actually an encoding of AS|PID|EADDR . */
|
|
|
+int kvmppc_core_vcpu_translate(struct kvm_vcpu *vcpu,
|
|
|
+ struct kvm_translation *tr)
|
|
|
+{
|
|
|
+ int index;
|
|
|
+ gva_t eaddr;
|
|
|
+ u8 pid;
|
|
|
+ u8 as;
|
|
|
+
|
|
|
+ eaddr = tr->linear_address;
|
|
|
+ pid = (tr->linear_address >> 32) & 0xff;
|
|
|
+ as = (tr->linear_address >> 40) & 0x1;
|
|
|
+
|
|
|
+ index = kvmppc_e500_tlb_search(vcpu, eaddr, pid, as);
|
|
|
+ if (index < 0) {
|
|
|
+ tr->valid = 0;
|
|
|
+ return 0;
|
|
|
+ }
|
|
|
+
|
|
|
+ tr->physical_address = kvmppc_mmu_xlate(vcpu, index, eaddr);
|
|
|
+ /* XXX what does "writeable" and "usermode" even mean? */
|
|
|
+ tr->valid = 1;
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+
|
|
|
int kvmppc_mmu_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr)
|
|
|
{
|
|
|
unsigned int as = !!(vcpu->arch.shared->msr & MSR_IS);
|
|
@@ -1066,7 +1037,7 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 eaddr, gpa_t gpaddr,
|
|
|
sesel = 0; /* unused */
|
|
|
priv = &vcpu_e500->gtlb_priv[tlbsel][esel];
|
|
|
|
|
|
- kvmppc_e500_setup_stlbe(vcpu_e500, gtlbe, BOOK3E_PAGESZ_4K,
|
|
|
+ kvmppc_e500_setup_stlbe(vcpu, gtlbe, BOOK3E_PAGESZ_4K,
|
|
|
&priv->ref, eaddr, &stlbe);
|
|
|
break;
|
|
|
|
|
@@ -1075,7 +1046,7 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 eaddr, gpa_t gpaddr,
|
|
|
|
|
|
stlbsel = 1;
|
|
|
sesel = kvmppc_e500_tlb1_map(vcpu_e500, eaddr, gfn,
|
|
|
- gtlbe, &stlbe);
|
|
|
+ gtlbe, &stlbe, esel);
|
|
|
break;
|
|
|
}
|
|
|
|
|
@@ -1087,52 +1058,13 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 eaddr, gpa_t gpaddr,
|
|
|
write_stlbe(vcpu_e500, gtlbe, &stlbe, stlbsel, sesel);
|
|
|
}
|
|
|
|
|
|
-int kvmppc_e500_tlb_search(struct kvm_vcpu *vcpu,
|
|
|
- gva_t eaddr, unsigned int pid, int as)
|
|
|
-{
|
|
|
- struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
|
|
|
- int esel, tlbsel;
|
|
|
-
|
|
|
- for (tlbsel = 0; tlbsel < 2; tlbsel++) {
|
|
|
- esel = kvmppc_e500_tlb_index(vcpu_e500, eaddr, tlbsel, pid, as);
|
|
|
- if (esel >= 0)
|
|
|
- return index_of(tlbsel, esel);
|
|
|
- }
|
|
|
-
|
|
|
- return -1;
|
|
|
-}
|
|
|
-
|
|
|
-void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 pid)
|
|
|
-{
|
|
|
- struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
|
|
|
-
|
|
|
- if (vcpu->arch.pid != pid) {
|
|
|
- vcpu_e500->pid[0] = vcpu->arch.pid = pid;
|
|
|
- kvmppc_e500_recalc_shadow_pid(vcpu_e500);
|
|
|
- }
|
|
|
-}
|
|
|
-
|
|
|
-void kvmppc_e500_tlb_setup(struct kvmppc_vcpu_e500 *vcpu_e500)
|
|
|
-{
|
|
|
- struct kvm_book3e_206_tlb_entry *tlbe;
|
|
|
-
|
|
|
- /* Insert large initial mapping for guest. */
|
|
|
- tlbe = get_entry(vcpu_e500, 1, 0);
|
|
|
- tlbe->mas1 = MAS1_VALID | MAS1_TSIZE(BOOK3E_PAGESZ_256M);
|
|
|
- tlbe->mas2 = 0;
|
|
|
- tlbe->mas7_3 = E500_TLB_SUPER_PERM_MASK;
|
|
|
-
|
|
|
- /* 4K map for serial output. Used by kernel wrapper. */
|
|
|
- tlbe = get_entry(vcpu_e500, 1, 1);
|
|
|
- tlbe->mas1 = MAS1_VALID | MAS1_TSIZE(BOOK3E_PAGESZ_4K);
|
|
|
- tlbe->mas2 = (0xe0004500 & 0xFFFFF000) | MAS2_I | MAS2_G;
|
|
|
- tlbe->mas7_3 = (0xe0004500 & 0xFFFFF000) | E500_TLB_SUPER_PERM_MASK;
|
|
|
-}
|
|
|
-
|
|
|
static void free_gtlb(struct kvmppc_vcpu_e500 *vcpu_e500)
|
|
|
{
|
|
|
int i;
|
|
|
|
|
|
+ clear_tlb1_bitmap(vcpu_e500);
|
|
|
+ kfree(vcpu_e500->g2h_tlb1_map);
|
|
|
+
|
|
|
clear_tlb_refs(vcpu_e500);
|
|
|
kfree(vcpu_e500->gtlb_priv[0]);
|
|
|
kfree(vcpu_e500->gtlb_priv[1]);
|
|
@@ -1155,6 +1087,36 @@ static void free_gtlb(struct kvmppc_vcpu_e500 *vcpu_e500)
|
|
|
vcpu_e500->gtlb_arch = NULL;
|
|
|
}
|
|
|
|
|
|
+void kvmppc_get_sregs_e500_tlb(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
|
|
|
+{
|
|
|
+ sregs->u.e.mas0 = vcpu->arch.shared->mas0;
|
|
|
+ sregs->u.e.mas1 = vcpu->arch.shared->mas1;
|
|
|
+ sregs->u.e.mas2 = vcpu->arch.shared->mas2;
|
|
|
+ sregs->u.e.mas7_3 = vcpu->arch.shared->mas7_3;
|
|
|
+ sregs->u.e.mas4 = vcpu->arch.shared->mas4;
|
|
|
+ sregs->u.e.mas6 = vcpu->arch.shared->mas6;
|
|
|
+
|
|
|
+ sregs->u.e.mmucfg = vcpu->arch.mmucfg;
|
|
|
+ sregs->u.e.tlbcfg[0] = vcpu->arch.tlbcfg[0];
|
|
|
+ sregs->u.e.tlbcfg[1] = vcpu->arch.tlbcfg[1];
|
|
|
+ sregs->u.e.tlbcfg[2] = 0;
|
|
|
+ sregs->u.e.tlbcfg[3] = 0;
|
|
|
+}
|
|
|
+
|
|
|
+int kvmppc_set_sregs_e500_tlb(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
|
|
|
+{
|
|
|
+ if (sregs->u.e.features & KVM_SREGS_E_ARCH206_MMU) {
|
|
|
+ vcpu->arch.shared->mas0 = sregs->u.e.mas0;
|
|
|
+ vcpu->arch.shared->mas1 = sregs->u.e.mas1;
|
|
|
+ vcpu->arch.shared->mas2 = sregs->u.e.mas2;
|
|
|
+ vcpu->arch.shared->mas7_3 = sregs->u.e.mas7_3;
|
|
|
+ vcpu->arch.shared->mas4 = sregs->u.e.mas4;
|
|
|
+ vcpu->arch.shared->mas6 = sregs->u.e.mas6;
|
|
|
+ }
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
int kvm_vcpu_ioctl_config_tlb(struct kvm_vcpu *vcpu,
|
|
|
struct kvm_config_tlb *cfg)
|
|
|
{
|
|
@@ -1163,6 +1125,7 @@ int kvm_vcpu_ioctl_config_tlb(struct kvm_vcpu *vcpu,
|
|
|
char *virt;
|
|
|
struct page **pages;
|
|
|
struct tlbe_priv *privs[2] = {};
|
|
|
+ u64 *g2h_bitmap = NULL;
|
|
|
size_t array_len;
|
|
|
u32 sets;
|
|
|
int num_pages, ret, i;
|
|
@@ -1224,10 +1187,16 @@ int kvm_vcpu_ioctl_config_tlb(struct kvm_vcpu *vcpu,
|
|
|
if (!privs[0] || !privs[1])
|
|
|
goto err_put_page;
|
|
|
|
|
|
+ g2h_bitmap = kzalloc(sizeof(u64) * params.tlb_sizes[1],
|
|
|
+ GFP_KERNEL);
|
|
|
+ if (!g2h_bitmap)
|
|
|
+ goto err_put_page;
|
|
|
+
|
|
|
free_gtlb(vcpu_e500);
|
|
|
|
|
|
vcpu_e500->gtlb_priv[0] = privs[0];
|
|
|
vcpu_e500->gtlb_priv[1] = privs[1];
|
|
|
+ vcpu_e500->g2h_tlb1_map = g2h_bitmap;
|
|
|
|
|
|
vcpu_e500->gtlb_arch = (struct kvm_book3e_206_tlb_entry *)
|
|
|
(virt + (cfg->array & (PAGE_SIZE - 1)));
|
|
@@ -1238,14 +1207,16 @@ int kvm_vcpu_ioctl_config_tlb(struct kvm_vcpu *vcpu,
|
|
|
vcpu_e500->gtlb_offset[0] = 0;
|
|
|
vcpu_e500->gtlb_offset[1] = params.tlb_sizes[0];
|
|
|
|
|
|
- vcpu_e500->tlb0cfg &= ~(TLBnCFG_N_ENTRY | TLBnCFG_ASSOC);
|
|
|
+ vcpu->arch.mmucfg = mfspr(SPRN_MMUCFG) & ~MMUCFG_LPIDSIZE;
|
|
|
+
|
|
|
+ vcpu->arch.tlbcfg[0] &= ~(TLBnCFG_N_ENTRY | TLBnCFG_ASSOC);
|
|
|
if (params.tlb_sizes[0] <= 2048)
|
|
|
- vcpu_e500->tlb0cfg |= params.tlb_sizes[0];
|
|
|
- vcpu_e500->tlb0cfg |= params.tlb_ways[0] << TLBnCFG_ASSOC_SHIFT;
|
|
|
+ vcpu->arch.tlbcfg[0] |= params.tlb_sizes[0];
|
|
|
+ vcpu->arch.tlbcfg[0] |= params.tlb_ways[0] << TLBnCFG_ASSOC_SHIFT;
|
|
|
|
|
|
- vcpu_e500->tlb1cfg &= ~(TLBnCFG_N_ENTRY | TLBnCFG_ASSOC);
|
|
|
- vcpu_e500->tlb1cfg |= params.tlb_sizes[1];
|
|
|
- vcpu_e500->tlb1cfg |= params.tlb_ways[1] << TLBnCFG_ASSOC_SHIFT;
|
|
|
+ vcpu->arch.tlbcfg[1] &= ~(TLBnCFG_N_ENTRY | TLBnCFG_ASSOC);
|
|
|
+ vcpu->arch.tlbcfg[1] |= params.tlb_sizes[1];
|
|
|
+ vcpu->arch.tlbcfg[1] |= params.tlb_ways[1] << TLBnCFG_ASSOC_SHIFT;
|
|
|
|
|
|
vcpu_e500->shared_tlb_pages = pages;
|
|
|
vcpu_e500->num_shared_tlb_pages = num_pages;
|
|
@@ -1256,6 +1227,7 @@ int kvm_vcpu_ioctl_config_tlb(struct kvm_vcpu *vcpu,
|
|
|
vcpu_e500->gtlb_params[1].ways = params.tlb_sizes[1];
|
|
|
vcpu_e500->gtlb_params[1].sets = 1;
|
|
|
|
|
|
+ kvmppc_recalc_tlb1map_range(vcpu_e500);
|
|
|
return 0;
|
|
|
|
|
|
err_put_page:
|
|
@@ -1274,13 +1246,14 @@ int kvm_vcpu_ioctl_dirty_tlb(struct kvm_vcpu *vcpu,
|
|
|
struct kvm_dirty_tlb *dirty)
|
|
|
{
|
|
|
struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
|
|
|
-
|
|
|
+ kvmppc_recalc_tlb1map_range(vcpu_e500);
|
|
|
clear_tlb_refs(vcpu_e500);
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
int kvmppc_e500_tlb_init(struct kvmppc_vcpu_e500 *vcpu_e500)
|
|
|
{
|
|
|
+ struct kvm_vcpu *vcpu = &vcpu_e500->vcpu;
|
|
|
int entry_size = sizeof(struct kvm_book3e_206_tlb_entry);
|
|
|
int entries = KVM_E500_TLB0_SIZE + KVM_E500_TLB1_SIZE;
|
|
|
|
|
@@ -1357,22 +1330,32 @@ int kvmppc_e500_tlb_init(struct kvmppc_vcpu_e500 *vcpu_e500)
|
|
|
if (!vcpu_e500->gtlb_priv[1])
|
|
|
goto err;
|
|
|
|
|
|
- if (kvmppc_e500_id_table_alloc(vcpu_e500) == NULL)
|
|
|
+ vcpu_e500->g2h_tlb1_map = kzalloc(sizeof(unsigned int) *
|
|
|
+ vcpu_e500->gtlb_params[1].entries,
|
|
|
+ GFP_KERNEL);
|
|
|
+ if (!vcpu_e500->g2h_tlb1_map)
|
|
|
+ goto err;
|
|
|
+
|
|
|
+ vcpu_e500->h2g_tlb1_rmap = kzalloc(sizeof(unsigned int) *
|
|
|
+ host_tlb_params[1].entries,
|
|
|
+ GFP_KERNEL);
|
|
|
+ if (!vcpu_e500->h2g_tlb1_rmap)
|
|
|
goto err;
|
|
|
|
|
|
/* Init TLB configuration register */
|
|
|
- vcpu_e500->tlb0cfg = mfspr(SPRN_TLB0CFG) &
|
|
|
+ vcpu->arch.tlbcfg[0] = mfspr(SPRN_TLB0CFG) &
|
|
|
~(TLBnCFG_N_ENTRY | TLBnCFG_ASSOC);
|
|
|
- vcpu_e500->tlb0cfg |= vcpu_e500->gtlb_params[0].entries;
|
|
|
- vcpu_e500->tlb0cfg |=
|
|
|
+ vcpu->arch.tlbcfg[0] |= vcpu_e500->gtlb_params[0].entries;
|
|
|
+ vcpu->arch.tlbcfg[0] |=
|
|
|
vcpu_e500->gtlb_params[0].ways << TLBnCFG_ASSOC_SHIFT;
|
|
|
|
|
|
- vcpu_e500->tlb1cfg = mfspr(SPRN_TLB1CFG) &
|
|
|
+ vcpu->arch.tlbcfg[1] = mfspr(SPRN_TLB1CFG) &
|
|
|
~(TLBnCFG_N_ENTRY | TLBnCFG_ASSOC);
|
|
|
- vcpu_e500->tlb0cfg |= vcpu_e500->gtlb_params[1].entries;
|
|
|
- vcpu_e500->tlb0cfg |=
|
|
|
+ vcpu->arch.tlbcfg[1] |= vcpu_e500->gtlb_params[1].entries;
|
|
|
+ vcpu->arch.tlbcfg[1] |=
|
|
|
vcpu_e500->gtlb_params[1].ways << TLBnCFG_ASSOC_SHIFT;
|
|
|
|
|
|
+ kvmppc_recalc_tlb1map_range(vcpu_e500);
|
|
|
return 0;
|
|
|
|
|
|
err:
|
|
@@ -1385,8 +1368,7 @@ err:
|
|
|
void kvmppc_e500_tlb_uninit(struct kvmppc_vcpu_e500 *vcpu_e500)
|
|
|
{
|
|
|
free_gtlb(vcpu_e500);
|
|
|
- kvmppc_e500_id_table_free(vcpu_e500);
|
|
|
-
|
|
|
+ kfree(vcpu_e500->h2g_tlb1_rmap);
|
|
|
kfree(vcpu_e500->tlb_refs[0]);
|
|
|
kfree(vcpu_e500->tlb_refs[1]);
|
|
|
}
|