|
@@ -92,15 +92,13 @@ static inline void create_shadowed_slbe(unsigned long ea, int ssize,
|
|
: "memory" );
|
|
: "memory" );
|
|
}
|
|
}
|
|
|
|
|
|
-void slb_flush_and_rebolt(void)
|
|
|
|
|
|
+static void __slb_flush_and_rebolt(void)
|
|
{
|
|
{
|
|
/* If you change this make sure you change SLB_NUM_BOLTED
|
|
/* If you change this make sure you change SLB_NUM_BOLTED
|
|
* appropriately too. */
|
|
* appropriately too. */
|
|
unsigned long linear_llp, vmalloc_llp, lflags, vflags;
|
|
unsigned long linear_llp, vmalloc_llp, lflags, vflags;
|
|
unsigned long ksp_esid_data, ksp_vsid_data;
|
|
unsigned long ksp_esid_data, ksp_vsid_data;
|
|
|
|
|
|
- WARN_ON(!irqs_disabled());
|
|
|
|
-
|
|
|
|
linear_llp = mmu_psize_defs[mmu_linear_psize].sllp;
|
|
linear_llp = mmu_psize_defs[mmu_linear_psize].sllp;
|
|
vmalloc_llp = mmu_psize_defs[mmu_vmalloc_psize].sllp;
|
|
vmalloc_llp = mmu_psize_defs[mmu_vmalloc_psize].sllp;
|
|
lflags = SLB_VSID_KERNEL | linear_llp;
|
|
lflags = SLB_VSID_KERNEL | linear_llp;
|
|
@@ -117,12 +115,6 @@ void slb_flush_and_rebolt(void)
|
|
ksp_vsid_data = get_slb_shadow()->save_area[2].vsid;
|
|
ksp_vsid_data = get_slb_shadow()->save_area[2].vsid;
|
|
}
|
|
}
|
|
|
|
|
|
- /*
|
|
|
|
- * We can't take a PMU exception in the following code, so hard
|
|
|
|
- * disable interrupts.
|
|
|
|
- */
|
|
|
|
- hard_irq_disable();
|
|
|
|
-
|
|
|
|
/* We need to do this all in asm, so we're sure we don't touch
|
|
/* We need to do this all in asm, so we're sure we don't touch
|
|
* the stack between the slbia and rebolting it. */
|
|
* the stack between the slbia and rebolting it. */
|
|
asm volatile("isync\n"
|
|
asm volatile("isync\n"
|
|
@@ -139,6 +131,21 @@ void slb_flush_and_rebolt(void)
|
|
: "memory");
|
|
: "memory");
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+void slb_flush_and_rebolt(void)
|
|
|
|
+{
|
|
|
|
+
|
|
|
|
+ WARN_ON(!irqs_disabled());
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * We can't take a PMU exception in the following code, so hard
|
|
|
|
+ * disable interrupts.
|
|
|
|
+ */
|
|
|
|
+ hard_irq_disable();
|
|
|
|
+
|
|
|
|
+ __slb_flush_and_rebolt();
|
|
|
|
+ get_paca()->slb_cache_ptr = 0;
|
|
|
|
+}
|
|
|
|
+
|
|
void slb_vmalloc_update(void)
|
|
void slb_vmalloc_update(void)
|
|
{
|
|
{
|
|
unsigned long vflags;
|
|
unsigned long vflags;
|
|
@@ -180,12 +187,20 @@ static inline int esids_match(unsigned long addr1, unsigned long addr2)
|
|
/* Flush all user entries from the segment table of the current processor. */
|
|
/* Flush all user entries from the segment table of the current processor. */
|
|
void switch_slb(struct task_struct *tsk, struct mm_struct *mm)
|
|
void switch_slb(struct task_struct *tsk, struct mm_struct *mm)
|
|
{
|
|
{
|
|
- unsigned long offset = get_paca()->slb_cache_ptr;
|
|
|
|
|
|
+ unsigned long offset;
|
|
unsigned long slbie_data = 0;
|
|
unsigned long slbie_data = 0;
|
|
unsigned long pc = KSTK_EIP(tsk);
|
|
unsigned long pc = KSTK_EIP(tsk);
|
|
unsigned long stack = KSTK_ESP(tsk);
|
|
unsigned long stack = KSTK_ESP(tsk);
|
|
unsigned long unmapped_base;
|
|
unsigned long unmapped_base;
|
|
|
|
|
|
|
|
+ /*
|
|
|
|
+ * We need interrupts hard-disabled here, not just soft-disabled,
|
|
|
|
+ * so that a PMU interrupt can't occur, which might try to access
|
|
|
|
+ * user memory (to get a stack trace) and possible cause an SLB miss
|
|
|
|
+ * which would update the slb_cache/slb_cache_ptr fields in the PACA.
|
|
|
|
+ */
|
|
|
|
+ hard_irq_disable();
|
|
|
|
+ offset = get_paca()->slb_cache_ptr;
|
|
if (!cpu_has_feature(CPU_FTR_NO_SLBIE_B) &&
|
|
if (!cpu_has_feature(CPU_FTR_NO_SLBIE_B) &&
|
|
offset <= SLB_CACHE_ENTRIES) {
|
|
offset <= SLB_CACHE_ENTRIES) {
|
|
int i;
|
|
int i;
|
|
@@ -200,7 +215,7 @@ void switch_slb(struct task_struct *tsk, struct mm_struct *mm)
|
|
}
|
|
}
|
|
asm volatile("isync" : : : "memory");
|
|
asm volatile("isync" : : : "memory");
|
|
} else {
|
|
} else {
|
|
- slb_flush_and_rebolt();
|
|
|
|
|
|
+ __slb_flush_and_rebolt();
|
|
}
|
|
}
|
|
|
|
|
|
/* Workaround POWER5 < DD2.1 issue */
|
|
/* Workaround POWER5 < DD2.1 issue */
|