|
@@ -91,23 +91,17 @@ STD_ENTRY(smp_nap)
|
|
|
|
|
|
/*
|
|
|
* Enable interrupts racelessly and then nap until interrupted.
|
|
|
+ * Architecturally, we are guaranteed that enabling interrupts via
|
|
|
+ * mtspr to INTERRUPT_CRITICAL_SECTION only interrupts at the next PC.
|
|
|
* This function's _cpu_idle_nap address is special; see intvec.S.
|
|
|
* When interrupted at _cpu_idle_nap, we bump the PC forward 8, and
|
|
|
* as a result return to the function that called _cpu_idle().
|
|
|
*/
|
|
|
STD_ENTRY(_cpu_idle)
|
|
|
- {
|
|
|
- lnk r0
|
|
|
- movei r1, KERNEL_PL
|
|
|
- }
|
|
|
- {
|
|
|
- addli r0, r0, _cpu_idle_nap - .
|
|
|
- mtspr INTERRUPT_CRITICAL_SECTION, r1
|
|
|
- }
|
|
|
+ movei r1, 1
|
|
|
+ mtspr INTERRUPT_CRITICAL_SECTION, r1
|
|
|
IRQ_ENABLE(r2, r3) /* unmask, but still with ICS set */
|
|
|
- mtspr SPR_EX_CONTEXT_K_1, r1 /* Kernel PL, ICS clear */
|
|
|
- mtspr SPR_EX_CONTEXT_K_0, r0
|
|
|
- iret
|
|
|
+ mtspr INTERRUPT_CRITICAL_SECTION, zero
|
|
|
.global _cpu_idle_nap
|
|
|
_cpu_idle_nap:
|
|
|
nap
|