|
@@ -36,8 +36,7 @@
|
|
|
#include <asm/firmware.h>
|
|
|
#include <asm/page_64.h>
|
|
|
#include <asm/exception.h>
|
|
|
-
|
|
|
-#define DO_SOFT_DISABLE
|
|
|
+#include <asm/irqflags.h>
|
|
|
|
|
|
/*
|
|
|
* We layout physical memory as follows:
|
|
@@ -450,8 +449,8 @@ bad_stack:
|
|
|
*/
|
|
|
fast_exc_return_irq: /* restores irq state too */
|
|
|
ld r3,SOFTE(r1)
|
|
|
+ TRACE_AND_RESTORE_IRQ(r3);
|
|
|
ld r12,_MSR(r1)
|
|
|
- stb r3,PACASOFTIRQEN(r13) /* restore paca->soft_enabled */
|
|
|
rldicl r4,r12,49,63 /* get MSR_EE to LSB */
|
|
|
stb r4,PACAHARDIRQEN(r13) /* restore paca->hard_enabled */
|
|
|
b 1f
|
|
@@ -824,7 +823,7 @@ _STATIC(load_up_altivec)
|
|
|
* Hash table stuff
|
|
|
*/
|
|
|
.align 7
|
|
|
-_GLOBAL(do_hash_page)
|
|
|
+_STATIC(do_hash_page)
|
|
|
std r3,_DAR(r1)
|
|
|
std r4,_DSISR(r1)
|
|
|
|
|
@@ -835,6 +834,27 @@ BEGIN_FTR_SECTION
|
|
|
bne- do_ste_alloc /* If so handle it */
|
|
|
END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
|
|
|
|
|
|
+ /*
|
|
|
+ * On iSeries, we soft-disable interrupts here, then
|
|
|
+ * hard-enable interrupts so that the hash_page code can spin on
|
|
|
+ * the hash_table_lock without problems on a shared processor.
|
|
|
+ */
|
|
|
+ DISABLE_INTS
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Currently, trace_hardirqs_off() will be called by DISABLE_INTS
|
|
|
+ * and will clobber volatile registers when irq tracing is enabled
|
|
|
+ * so we need to reload them. It may be possible to be smarter here
|
|
|
+ * and move the irq tracing elsewhere but let's keep it simple for
|
|
|
+ * now
|
|
|
+ */
|
|
|
+#ifdef CONFIG_TRACE_IRQFLAGS
|
|
|
+ ld r3,_DAR(r1)
|
|
|
+ ld r4,_DSISR(r1)
|
|
|
+ ld r5,_TRAP(r1)
|
|
|
+ ld r12,_MSR(r1)
|
|
|
+ clrrdi r5,r5,4
|
|
|
+#endif /* CONFIG_TRACE_IRQFLAGS */
|
|
|
/*
|
|
|
* We need to set the _PAGE_USER bit if MSR_PR is set or if we are
|
|
|
* accessing a userspace segment (even from the kernel). We assume
|
|
@@ -847,13 +867,6 @@ END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
|
|
|
ori r4,r4,1 /* add _PAGE_PRESENT */
|
|
|
rlwimi r4,r5,22+2,31-2,31-2 /* Set _PAGE_EXEC if trap is 0x400 */
|
|
|
|
|
|
- /*
|
|
|
- * On iSeries, we soft-disable interrupts here, then
|
|
|
- * hard-enable interrupts so that the hash_page code can spin on
|
|
|
- * the hash_table_lock without problems on a shared processor.
|
|
|
- */
|
|
|
- DISABLE_INTS
|
|
|
-
|
|
|
/*
|
|
|
* r3 contains the faulting address
|
|
|
* r4 contains the required access permissions
|
|
@@ -864,7 +877,6 @@ END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
|
|
|
bl .hash_page /* build HPTE if possible */
|
|
|
cmpdi r3,0 /* see if hash_page succeeded */
|
|
|
|
|
|
-#ifdef DO_SOFT_DISABLE
|
|
|
BEGIN_FW_FTR_SECTION
|
|
|
/*
|
|
|
* If we had interrupts soft-enabled at the point where the
|
|
@@ -876,7 +888,7 @@ BEGIN_FW_FTR_SECTION
|
|
|
*/
|
|
|
beq 13f
|
|
|
END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
|
|
|
-#endif
|
|
|
+
|
|
|
BEGIN_FW_FTR_SECTION
|
|
|
/*
|
|
|
* Here we have interrupts hard-disabled, so it is sufficient
|
|
@@ -890,11 +902,12 @@ END_FW_FTR_SECTION_IFCLR(FW_FEATURE_ISERIES)
|
|
|
|
|
|
/*
|
|
|
* hash_page couldn't handle it, set soft interrupt enable back
|
|
|
- * to what it was before the trap. Note that .local_irq_restore
|
|
|
+ * to what it was before the trap. Note that .raw_local_irq_restore
|
|
|
* handles any interrupts pending at this point.
|
|
|
*/
|
|
|
ld r3,SOFTE(r1)
|
|
|
- bl .local_irq_restore
|
|
|
+ TRACE_AND_RESTORE_IRQ_PARTIAL(r3, 11f)
|
|
|
+ bl .raw_local_irq_restore
|
|
|
b 11f
|
|
|
|
|
|
/* Here we have a page fault that hash_page can't handle. */
|
|
@@ -1493,6 +1506,10 @@ _INIT_STATIC(start_here_multiplatform)
|
|
|
addi r2,r2,0x4000
|
|
|
add r2,r2,r26
|
|
|
|
|
|
+ /* Set initial ptr to current */
|
|
|
+ LOAD_REG_IMMEDIATE(r4, init_task)
|
|
|
+ std r4,PACACURRENT(r13)
|
|
|
+
|
|
|
/* Do very early kernel initializations, including initial hash table,
|
|
|
* stab and slb setup before we turn on relocation. */
|
|
|
|