|
@@ -39,22 +39,35 @@
|
|
|
|
|
|
DEFINE_RAW_SPINLOCK(native_tlbie_lock);
|
|
DEFINE_RAW_SPINLOCK(native_tlbie_lock);
|
|
|
|
|
|
-static inline void __tlbie(unsigned long va, int psize, int ssize)
|
|
|
|
|
|
+static inline void __tlbie(unsigned long vpn, int psize, int ssize)
|
|
{
|
|
{
|
|
|
|
+ unsigned long va;
|
|
unsigned int penc;
|
|
unsigned int penc;
|
|
|
|
|
|
- /* clear top 16 bits, non SLS segment */
|
|
|
|
|
|
+ /*
|
|
|
|
+ * We need 14 to 65 bits of va for a tlibe of 4K page
|
|
|
|
+ * With vpn we ignore the lower VPN_SHIFT bits already.
|
|
|
|
+ * And top two bits are already ignored because we can
|
|
|
|
+ * only accomadate 76 bits in a 64 bit vpn with a VPN_SHIFT
|
|
|
|
+ * of 12.
|
|
|
|
+ */
|
|
|
|
+ va = vpn << VPN_SHIFT;
|
|
|
|
+ /*
|
|
|
|
+ * clear top 16 bits of 64bit va, non SLS segment
|
|
|
|
+ * Older versions of the architecture (2.02 and earler) require the
|
|
|
|
+ * masking of the top 16 bits.
|
|
|
|
+ */
|
|
va &= ~(0xffffULL << 48);
|
|
va &= ~(0xffffULL << 48);
|
|
|
|
|
|
switch (psize) {
|
|
switch (psize) {
|
|
case MMU_PAGE_4K:
|
|
case MMU_PAGE_4K:
|
|
- va &= ~0xffful;
|
|
|
|
va |= ssize << 8;
|
|
va |= ssize << 8;
|
|
asm volatile(ASM_FTR_IFCLR("tlbie %0,0", PPC_TLBIE(%1,%0), %2)
|
|
asm volatile(ASM_FTR_IFCLR("tlbie %0,0", PPC_TLBIE(%1,%0), %2)
|
|
: : "r" (va), "r"(0), "i" (CPU_FTR_ARCH_206)
|
|
: : "r" (va), "r"(0), "i" (CPU_FTR_ARCH_206)
|
|
: "memory");
|
|
: "memory");
|
|
break;
|
|
break;
|
|
default:
|
|
default:
|
|
|
|
+ /* We need 14 to 14 + i bits of va */
|
|
penc = mmu_psize_defs[psize].penc;
|
|
penc = mmu_psize_defs[psize].penc;
|
|
va &= ~((1ul << mmu_psize_defs[psize].shift) - 1);
|
|
va &= ~((1ul << mmu_psize_defs[psize].shift) - 1);
|
|
va |= penc << 12;
|
|
va |= penc << 12;
|
|
@@ -67,21 +80,28 @@ static inline void __tlbie(unsigned long va, int psize, int ssize)
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
-static inline void __tlbiel(unsigned long va, int psize, int ssize)
|
|
|
|
|
|
+static inline void __tlbiel(unsigned long vpn, int psize, int ssize)
|
|
{
|
|
{
|
|
|
|
+ unsigned long va;
|
|
unsigned int penc;
|
|
unsigned int penc;
|
|
|
|
|
|
- /* clear top 16 bits, non SLS segment */
|
|
|
|
|
|
+ /* VPN_SHIFT can be atmost 12 */
|
|
|
|
+ va = vpn << VPN_SHIFT;
|
|
|
|
+ /*
|
|
|
|
+ * clear top 16 bits of 64 bit va, non SLS segment
|
|
|
|
+ * Older versions of the architecture (2.02 and earler) require the
|
|
|
|
+ * masking of the top 16 bits.
|
|
|
|
+ */
|
|
va &= ~(0xffffULL << 48);
|
|
va &= ~(0xffffULL << 48);
|
|
|
|
|
|
switch (psize) {
|
|
switch (psize) {
|
|
case MMU_PAGE_4K:
|
|
case MMU_PAGE_4K:
|
|
- va &= ~0xffful;
|
|
|
|
va |= ssize << 8;
|
|
va |= ssize << 8;
|
|
asm volatile(".long 0x7c000224 | (%0 << 11) | (0 << 21)"
|
|
asm volatile(".long 0x7c000224 | (%0 << 11) | (0 << 21)"
|
|
: : "r"(va) : "memory");
|
|
: : "r"(va) : "memory");
|
|
break;
|
|
break;
|
|
default:
|
|
default:
|
|
|
|
+ /* We need 14 to 14 + i bits of va */
|
|
penc = mmu_psize_defs[psize].penc;
|
|
penc = mmu_psize_defs[psize].penc;
|
|
va &= ~((1ul << mmu_psize_defs[psize].shift) - 1);
|
|
va &= ~((1ul << mmu_psize_defs[psize].shift) - 1);
|
|
va |= penc << 12;
|
|
va |= penc << 12;
|
|
@@ -94,7 +114,7 @@ static inline void __tlbiel(unsigned long va, int psize, int ssize)
|
|
|
|
|
|
}
|
|
}
|
|
|
|
|
|
-static inline void tlbie(unsigned long va, int psize, int ssize, int local)
|
|
|
|
|
|
+static inline void tlbie(unsigned long vpn, int psize, int ssize, int local)
|
|
{
|
|
{
|
|
unsigned int use_local = local && mmu_has_feature(MMU_FTR_TLBIEL);
|
|
unsigned int use_local = local && mmu_has_feature(MMU_FTR_TLBIEL);
|
|
int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
|
|
int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
|
|
@@ -105,10 +125,10 @@ static inline void tlbie(unsigned long va, int psize, int ssize, int local)
|
|
raw_spin_lock(&native_tlbie_lock);
|
|
raw_spin_lock(&native_tlbie_lock);
|
|
asm volatile("ptesync": : :"memory");
|
|
asm volatile("ptesync": : :"memory");
|
|
if (use_local) {
|
|
if (use_local) {
|
|
- __tlbiel(va, psize, ssize);
|
|
|
|
|
|
+ __tlbiel(vpn, psize, ssize);
|
|
asm volatile("ptesync": : :"memory");
|
|
asm volatile("ptesync": : :"memory");
|
|
} else {
|
|
} else {
|
|
- __tlbie(va, psize, ssize);
|
|
|
|
|
|
+ __tlbie(vpn, psize, ssize);
|
|
asm volatile("eieio; tlbsync; ptesync": : :"memory");
|
|
asm volatile("eieio; tlbsync; ptesync": : :"memory");
|
|
}
|
|
}
|
|
if (lock_tlbie && !use_local)
|
|
if (lock_tlbie && !use_local)
|
|
@@ -134,7 +154,7 @@ static inline void native_unlock_hpte(struct hash_pte *hptep)
|
|
clear_bit_unlock(HPTE_LOCK_BIT, word);
|
|
clear_bit_unlock(HPTE_LOCK_BIT, word);
|
|
}
|
|
}
|
|
|
|
|
|
-static long native_hpte_insert(unsigned long hpte_group, unsigned long va,
|
|
|
|
|
|
+static long native_hpte_insert(unsigned long hpte_group, unsigned long vpn,
|
|
unsigned long pa, unsigned long rflags,
|
|
unsigned long pa, unsigned long rflags,
|
|
unsigned long vflags, int psize, int ssize)
|
|
unsigned long vflags, int psize, int ssize)
|
|
{
|
|
{
|
|
@@ -143,9 +163,9 @@ static long native_hpte_insert(unsigned long hpte_group, unsigned long va,
|
|
int i;
|
|
int i;
|
|
|
|
|
|
if (!(vflags & HPTE_V_BOLTED)) {
|
|
if (!(vflags & HPTE_V_BOLTED)) {
|
|
- DBG_LOW(" insert(group=%lx, va=%016lx, pa=%016lx,"
|
|
|
|
|
|
+ DBG_LOW(" insert(group=%lx, vpn=%016lx, pa=%016lx,"
|
|
" rflags=%lx, vflags=%lx, psize=%d)\n",
|
|
" rflags=%lx, vflags=%lx, psize=%d)\n",
|
|
- hpte_group, va, pa, rflags, vflags, psize);
|
|
|
|
|
|
+ hpte_group, vpn, pa, rflags, vflags, psize);
|
|
}
|
|
}
|
|
|
|
|
|
for (i = 0; i < HPTES_PER_GROUP; i++) {
|
|
for (i = 0; i < HPTES_PER_GROUP; i++) {
|
|
@@ -163,7 +183,7 @@ static long native_hpte_insert(unsigned long hpte_group, unsigned long va,
|
|
if (i == HPTES_PER_GROUP)
|
|
if (i == HPTES_PER_GROUP)
|
|
return -1;
|
|
return -1;
|
|
|
|
|
|
- hpte_v = hpte_encode_v(va, psize, ssize) | vflags | HPTE_V_VALID;
|
|
|
|
|
|
+ hpte_v = hpte_encode_v(vpn, psize, ssize) | vflags | HPTE_V_VALID;
|
|
hpte_r = hpte_encode_r(pa, psize) | rflags;
|
|
hpte_r = hpte_encode_r(pa, psize) | rflags;
|
|
|
|
|
|
if (!(vflags & HPTE_V_BOLTED)) {
|
|
if (!(vflags & HPTE_V_BOLTED)) {
|
|
@@ -225,17 +245,17 @@ static long native_hpte_remove(unsigned long hpte_group)
|
|
}
|
|
}
|
|
|
|
|
|
static long native_hpte_updatepp(unsigned long slot, unsigned long newpp,
|
|
static long native_hpte_updatepp(unsigned long slot, unsigned long newpp,
|
|
- unsigned long va, int psize, int ssize,
|
|
|
|
|
|
+ unsigned long vpn, int psize, int ssize,
|
|
int local)
|
|
int local)
|
|
{
|
|
{
|
|
struct hash_pte *hptep = htab_address + slot;
|
|
struct hash_pte *hptep = htab_address + slot;
|
|
unsigned long hpte_v, want_v;
|
|
unsigned long hpte_v, want_v;
|
|
int ret = 0;
|
|
int ret = 0;
|
|
|
|
|
|
- want_v = hpte_encode_v(va, psize, ssize);
|
|
|
|
|
|
+ want_v = hpte_encode_v(vpn, psize, ssize);
|
|
|
|
|
|
- DBG_LOW(" update(va=%016lx, avpnv=%016lx, hash=%016lx, newpp=%x)",
|
|
|
|
- va, want_v & HPTE_V_AVPN, slot, newpp);
|
|
|
|
|
|
+ DBG_LOW(" update(vpn=%016lx, avpnv=%016lx, group=%lx, newpp=%lx)",
|
|
|
|
+ vpn, want_v & HPTE_V_AVPN, slot, newpp);
|
|
|
|
|
|
native_lock_hpte(hptep);
|
|
native_lock_hpte(hptep);
|
|
|
|
|
|
@@ -254,12 +274,12 @@ static long native_hpte_updatepp(unsigned long slot, unsigned long newpp,
|
|
native_unlock_hpte(hptep);
|
|
native_unlock_hpte(hptep);
|
|
|
|
|
|
/* Ensure it is out of the tlb too. */
|
|
/* Ensure it is out of the tlb too. */
|
|
- tlbie(va, psize, ssize, local);
|
|
|
|
|
|
+ tlbie(vpn, psize, ssize, local);
|
|
|
|
|
|
return ret;
|
|
return ret;
|
|
}
|
|
}
|
|
|
|
|
|
-static long native_hpte_find(unsigned long va, int psize, int ssize)
|
|
|
|
|
|
+static long native_hpte_find(unsigned long vpn, int psize, int ssize)
|
|
{
|
|
{
|
|
struct hash_pte *hptep;
|
|
struct hash_pte *hptep;
|
|
unsigned long hash;
|
|
unsigned long hash;
|
|
@@ -267,8 +287,8 @@ static long native_hpte_find(unsigned long va, int psize, int ssize)
|
|
long slot;
|
|
long slot;
|
|
unsigned long want_v, hpte_v;
|
|
unsigned long want_v, hpte_v;
|
|
|
|
|
|
- hash = hpt_hash(va, mmu_psize_defs[psize].shift, ssize);
|
|
|
|
- want_v = hpte_encode_v(va, psize, ssize);
|
|
|
|
|
|
+ hash = hpt_hash(vpn, mmu_psize_defs[psize].shift, ssize);
|
|
|
|
+ want_v = hpte_encode_v(vpn, psize, ssize);
|
|
|
|
|
|
/* Bolted mappings are only ever in the primary group */
|
|
/* Bolted mappings are only ever in the primary group */
|
|
slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
|
|
slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
|
|
@@ -295,14 +315,15 @@ static long native_hpte_find(unsigned long va, int psize, int ssize)
|
|
static void native_hpte_updateboltedpp(unsigned long newpp, unsigned long ea,
|
|
static void native_hpte_updateboltedpp(unsigned long newpp, unsigned long ea,
|
|
int psize, int ssize)
|
|
int psize, int ssize)
|
|
{
|
|
{
|
|
- unsigned long vsid, va;
|
|
|
|
|
|
+ unsigned long vpn;
|
|
|
|
+ unsigned long vsid;
|
|
long slot;
|
|
long slot;
|
|
struct hash_pte *hptep;
|
|
struct hash_pte *hptep;
|
|
|
|
|
|
vsid = get_kernel_vsid(ea, ssize);
|
|
vsid = get_kernel_vsid(ea, ssize);
|
|
- va = hpt_va(ea, vsid, ssize);
|
|
|
|
|
|
+ vpn = hpt_vpn(ea, vsid, ssize);
|
|
|
|
|
|
- slot = native_hpte_find(va, psize, ssize);
|
|
|
|
|
|
+ slot = native_hpte_find(vpn, psize, ssize);
|
|
if (slot == -1)
|
|
if (slot == -1)
|
|
panic("could not find page to bolt\n");
|
|
panic("could not find page to bolt\n");
|
|
hptep = htab_address + slot;
|
|
hptep = htab_address + slot;
|
|
@@ -312,10 +333,10 @@ static void native_hpte_updateboltedpp(unsigned long newpp, unsigned long ea,
|
|
(newpp & (HPTE_R_PP | HPTE_R_N));
|
|
(newpp & (HPTE_R_PP | HPTE_R_N));
|
|
|
|
|
|
/* Ensure it is out of the tlb too. */
|
|
/* Ensure it is out of the tlb too. */
|
|
- tlbie(va, psize, ssize, 0);
|
|
|
|
|
|
+ tlbie(vpn, psize, ssize, 0);
|
|
}
|
|
}
|
|
|
|
|
|
-static void native_hpte_invalidate(unsigned long slot, unsigned long va,
|
|
|
|
|
|
+static void native_hpte_invalidate(unsigned long slot, unsigned long vpn,
|
|
int psize, int ssize, int local)
|
|
int psize, int ssize, int local)
|
|
{
|
|
{
|
|
struct hash_pte *hptep = htab_address + slot;
|
|
struct hash_pte *hptep = htab_address + slot;
|
|
@@ -325,9 +346,9 @@ static void native_hpte_invalidate(unsigned long slot, unsigned long va,
|
|
|
|
|
|
local_irq_save(flags);
|
|
local_irq_save(flags);
|
|
|
|
|
|
- DBG_LOW(" invalidate(va=%016lx, hash: %x)\n", va, slot);
|
|
|
|
|
|
+ DBG_LOW(" invalidate(vpn=%016lx, hash: %lx)\n", vpn, slot);
|
|
|
|
|
|
- want_v = hpte_encode_v(va, psize, ssize);
|
|
|
|
|
|
+ want_v = hpte_encode_v(vpn, psize, ssize);
|
|
native_lock_hpte(hptep);
|
|
native_lock_hpte(hptep);
|
|
hpte_v = hptep->v;
|
|
hpte_v = hptep->v;
|
|
|
|
|
|
@@ -339,7 +360,7 @@ static void native_hpte_invalidate(unsigned long slot, unsigned long va,
|
|
hptep->v = 0;
|
|
hptep->v = 0;
|
|
|
|
|
|
/* Invalidate the TLB */
|
|
/* Invalidate the TLB */
|
|
- tlbie(va, psize, ssize, local);
|
|
|
|
|
|
+ tlbie(vpn, psize, ssize, local);
|
|
|
|
|
|
local_irq_restore(flags);
|
|
local_irq_restore(flags);
|
|
}
|
|
}
|
|
@@ -349,7 +370,7 @@ static void native_hpte_invalidate(unsigned long slot, unsigned long va,
|
|
#define LP_MASK(i) ((0xFF >> (i)) << LP_SHIFT)
|
|
#define LP_MASK(i) ((0xFF >> (i)) << LP_SHIFT)
|
|
|
|
|
|
static void hpte_decode(struct hash_pte *hpte, unsigned long slot,
|
|
static void hpte_decode(struct hash_pte *hpte, unsigned long slot,
|
|
- int *psize, int *ssize, unsigned long *va)
|
|
|
|
|
|
+ int *psize, int *ssize, unsigned long *vpn)
|
|
{
|
|
{
|
|
unsigned long avpn, pteg, vpi;
|
|
unsigned long avpn, pteg, vpi;
|
|
unsigned long hpte_r = hpte->r;
|
|
unsigned long hpte_r = hpte->r;
|
|
@@ -399,7 +420,7 @@ static void hpte_decode(struct hash_pte *hpte, unsigned long slot,
|
|
vpi = (vsid ^ pteg) & htab_hash_mask;
|
|
vpi = (vsid ^ pteg) & htab_hash_mask;
|
|
seg_off |= vpi << shift;
|
|
seg_off |= vpi << shift;
|
|
}
|
|
}
|
|
- *va = vsid << SID_SHIFT | seg_off;
|
|
|
|
|
|
+ *vpn = vsid << (SID_SHIFT - VPN_SHIFT) | seg_off >> VPN_SHIFT;
|
|
case MMU_SEGSIZE_1T:
|
|
case MMU_SEGSIZE_1T:
|
|
/* We only have 40 - 23 bits of seg_off in avpn */
|
|
/* We only have 40 - 23 bits of seg_off in avpn */
|
|
seg_off = (avpn & 0x1ffff) << 23;
|
|
seg_off = (avpn & 0x1ffff) << 23;
|
|
@@ -408,9 +429,9 @@ static void hpte_decode(struct hash_pte *hpte, unsigned long slot,
|
|
vpi = (vsid ^ (vsid << 25) ^ pteg) & htab_hash_mask;
|
|
vpi = (vsid ^ (vsid << 25) ^ pteg) & htab_hash_mask;
|
|
seg_off |= vpi << shift;
|
|
seg_off |= vpi << shift;
|
|
}
|
|
}
|
|
- *va = vsid << SID_SHIFT_1T | seg_off;
|
|
|
|
|
|
+ *vpn = vsid << (SID_SHIFT_1T - VPN_SHIFT) | seg_off >> VPN_SHIFT;
|
|
default:
|
|
default:
|
|
- *va = size = 0;
|
|
|
|
|
|
+ *vpn = size = 0;
|
|
}
|
|
}
|
|
*psize = size;
|
|
*psize = size;
|
|
}
|
|
}
|
|
@@ -425,9 +446,10 @@ static void hpte_decode(struct hash_pte *hpte, unsigned long slot,
|
|
*/
|
|
*/
|
|
static void native_hpte_clear(void)
|
|
static void native_hpte_clear(void)
|
|
{
|
|
{
|
|
|
|
+ unsigned long vpn = 0;
|
|
unsigned long slot, slots, flags;
|
|
unsigned long slot, slots, flags;
|
|
struct hash_pte *hptep = htab_address;
|
|
struct hash_pte *hptep = htab_address;
|
|
- unsigned long hpte_v, va;
|
|
|
|
|
|
+ unsigned long hpte_v;
|
|
unsigned long pteg_count;
|
|
unsigned long pteg_count;
|
|
int psize, ssize;
|
|
int psize, ssize;
|
|
|
|
|
|
@@ -455,9 +477,9 @@ static void native_hpte_clear(void)
|
|
* already hold the native_tlbie_lock.
|
|
* already hold the native_tlbie_lock.
|
|
*/
|
|
*/
|
|
if (hpte_v & HPTE_V_VALID) {
|
|
if (hpte_v & HPTE_V_VALID) {
|
|
- hpte_decode(hptep, slot, &psize, &ssize, &va);
|
|
|
|
|
|
+ hpte_decode(hptep, slot, &psize, &ssize, &vpn);
|
|
hptep->v = 0;
|
|
hptep->v = 0;
|
|
- __tlbie(va, psize, ssize);
|
|
|
|
|
|
+ __tlbie(vpn, psize, ssize);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
@@ -472,7 +494,8 @@ static void native_hpte_clear(void)
|
|
*/
|
|
*/
|
|
static void native_flush_hash_range(unsigned long number, int local)
|
|
static void native_flush_hash_range(unsigned long number, int local)
|
|
{
|
|
{
|
|
- unsigned long va, hash, index, hidx, shift, slot;
|
|
|
|
|
|
+ unsigned long vpn;
|
|
|
|
+ unsigned long hash, index, hidx, shift, slot;
|
|
struct hash_pte *hptep;
|
|
struct hash_pte *hptep;
|
|
unsigned long hpte_v;
|
|
unsigned long hpte_v;
|
|
unsigned long want_v;
|
|
unsigned long want_v;
|
|
@@ -486,18 +509,18 @@ static void native_flush_hash_range(unsigned long number, int local)
|
|
local_irq_save(flags);
|
|
local_irq_save(flags);
|
|
|
|
|
|
for (i = 0; i < number; i++) {
|
|
for (i = 0; i < number; i++) {
|
|
- va = batch->vaddr[i];
|
|
|
|
|
|
+ vpn = batch->vpn[i];
|
|
pte = batch->pte[i];
|
|
pte = batch->pte[i];
|
|
|
|
|
|
- pte_iterate_hashed_subpages(pte, psize, va, index, shift) {
|
|
|
|
- hash = hpt_hash(va, shift, ssize);
|
|
|
|
|
|
+ pte_iterate_hashed_subpages(pte, psize, vpn, index, shift) {
|
|
|
|
+ hash = hpt_hash(vpn, shift, ssize);
|
|
hidx = __rpte_to_hidx(pte, index);
|
|
hidx = __rpte_to_hidx(pte, index);
|
|
if (hidx & _PTEIDX_SECONDARY)
|
|
if (hidx & _PTEIDX_SECONDARY)
|
|
hash = ~hash;
|
|
hash = ~hash;
|
|
slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
|
|
slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
|
|
slot += hidx & _PTEIDX_GROUP_IX;
|
|
slot += hidx & _PTEIDX_GROUP_IX;
|
|
hptep = htab_address + slot;
|
|
hptep = htab_address + slot;
|
|
- want_v = hpte_encode_v(va, psize, ssize);
|
|
|
|
|
|
+ want_v = hpte_encode_v(vpn, psize, ssize);
|
|
native_lock_hpte(hptep);
|
|
native_lock_hpte(hptep);
|
|
hpte_v = hptep->v;
|
|
hpte_v = hptep->v;
|
|
if (!HPTE_V_COMPARE(hpte_v, want_v) ||
|
|
if (!HPTE_V_COMPARE(hpte_v, want_v) ||
|
|
@@ -512,12 +535,12 @@ static void native_flush_hash_range(unsigned long number, int local)
|
|
mmu_psize_defs[psize].tlbiel && local) {
|
|
mmu_psize_defs[psize].tlbiel && local) {
|
|
asm volatile("ptesync":::"memory");
|
|
asm volatile("ptesync":::"memory");
|
|
for (i = 0; i < number; i++) {
|
|
for (i = 0; i < number; i++) {
|
|
- va = batch->vaddr[i];
|
|
|
|
|
|
+ vpn = batch->vpn[i];
|
|
pte = batch->pte[i];
|
|
pte = batch->pte[i];
|
|
|
|
|
|
- pte_iterate_hashed_subpages(pte, psize, va, index,
|
|
|
|
- shift) {
|
|
|
|
- __tlbiel(va, psize, ssize);
|
|
|
|
|
|
+ pte_iterate_hashed_subpages(pte, psize,
|
|
|
|
+ vpn, index, shift) {
|
|
|
|
+ __tlbiel(vpn, psize, ssize);
|
|
} pte_iterate_hashed_end();
|
|
} pte_iterate_hashed_end();
|
|
}
|
|
}
|
|
asm volatile("ptesync":::"memory");
|
|
asm volatile("ptesync":::"memory");
|
|
@@ -529,12 +552,12 @@ static void native_flush_hash_range(unsigned long number, int local)
|
|
|
|
|
|
asm volatile("ptesync":::"memory");
|
|
asm volatile("ptesync":::"memory");
|
|
for (i = 0; i < number; i++) {
|
|
for (i = 0; i < number; i++) {
|
|
- va = batch->vaddr[i];
|
|
|
|
|
|
+ vpn = batch->vpn[i];
|
|
pte = batch->pte[i];
|
|
pte = batch->pte[i];
|
|
|
|
|
|
- pte_iterate_hashed_subpages(pte, psize, va, index,
|
|
|
|
- shift) {
|
|
|
|
- __tlbie(va, psize, ssize);
|
|
|
|
|
|
+ pte_iterate_hashed_subpages(pte, psize,
|
|
|
|
+ vpn, index, shift) {
|
|
|
|
+ __tlbie(vpn, psize, ssize);
|
|
} pte_iterate_hashed_end();
|
|
} pte_iterate_hashed_end();
|
|
}
|
|
}
|
|
asm volatile("eieio; tlbsync; ptesync":::"memory");
|
|
asm volatile("eieio; tlbsync; ptesync":::"memory");
|