|
@@ -775,133 +775,6 @@ InstructionSegment:
|
|
|
EXC_XFER_STD(0x480, UnknownException)
|
|
|
#endif /* CONFIG_PPC64BRIDGE */
|
|
|
|
|
|
-/*
|
|
|
- * This task wants to use the FPU now.
|
|
|
- * On UP, disable FP for the task which had the FPU previously,
|
|
|
- * and save its floating-point registers in its thread_struct.
|
|
|
- * Load up this task's FP registers from its thread_struct,
|
|
|
- * enable the FPU for the current task and return to the task.
|
|
|
- */
|
|
|
-load_up_fpu:
|
|
|
- mfmsr r5
|
|
|
- ori r5,r5,MSR_FP
|
|
|
-#ifdef CONFIG_PPC64BRIDGE
|
|
|
- clrldi r5,r5,1 /* turn off 64-bit mode */
|
|
|
-#endif /* CONFIG_PPC64BRIDGE */
|
|
|
- SYNC
|
|
|
- MTMSRD(r5) /* enable use of fpu now */
|
|
|
- isync
|
|
|
-/*
|
|
|
- * For SMP, we don't do lazy FPU switching because it just gets too
|
|
|
- * horrendously complex, especially when a task switches from one CPU
|
|
|
- * to another. Instead we call giveup_fpu in switch_to.
|
|
|
- */
|
|
|
-#ifndef CONFIG_SMP
|
|
|
- tophys(r6,0) /* get __pa constant */
|
|
|
- addis r3,r6,last_task_used_math@ha
|
|
|
- lwz r4,last_task_used_math@l(r3)
|
|
|
- cmpwi 0,r4,0
|
|
|
- beq 1f
|
|
|
- add r4,r4,r6
|
|
|
- addi r4,r4,THREAD /* want last_task_used_math->thread */
|
|
|
- SAVE_32FPRS(0, r4)
|
|
|
- mffs fr0
|
|
|
- stfd fr0,THREAD_FPSCR-4(r4)
|
|
|
- lwz r5,PT_REGS(r4)
|
|
|
- add r5,r5,r6
|
|
|
- lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5)
|
|
|
- li r10,MSR_FP|MSR_FE0|MSR_FE1
|
|
|
- andc r4,r4,r10 /* disable FP for previous task */
|
|
|
- stw r4,_MSR-STACK_FRAME_OVERHEAD(r5)
|
|
|
-1:
|
|
|
-#endif /* CONFIG_SMP */
|
|
|
- /* enable use of FP after return */
|
|
|
- mfspr r5,SPRN_SPRG3 /* current task's THREAD (phys) */
|
|
|
- lwz r4,THREAD_FPEXC_MODE(r5)
|
|
|
- ori r9,r9,MSR_FP /* enable FP for current */
|
|
|
- or r9,r9,r4
|
|
|
- lfd fr0,THREAD_FPSCR-4(r5)
|
|
|
- mtfsf 0xff,fr0
|
|
|
- REST_32FPRS(0, r5)
|
|
|
-#ifndef CONFIG_SMP
|
|
|
- subi r4,r5,THREAD
|
|
|
- sub r4,r4,r6
|
|
|
- stw r4,last_task_used_math@l(r3)
|
|
|
-#endif /* CONFIG_SMP */
|
|
|
- /* restore registers and return */
|
|
|
- /* we haven't used ctr or xer or lr */
|
|
|
- /* fall through to fast_exception_return */
|
|
|
-
|
|
|
- .globl fast_exception_return
|
|
|
-fast_exception_return:
|
|
|
- andi. r10,r9,MSR_RI /* check for recoverable interrupt */
|
|
|
- beq 1f /* if not, we've got problems */
|
|
|
-2: REST_4GPRS(3, r11)
|
|
|
- lwz r10,_CCR(r11)
|
|
|
- REST_GPR(1, r11)
|
|
|
- mtcr r10
|
|
|
- lwz r10,_LINK(r11)
|
|
|
- mtlr r10
|
|
|
- REST_GPR(10, r11)
|
|
|
- mtspr SPRN_SRR1,r9
|
|
|
- mtspr SPRN_SRR0,r12
|
|
|
- REST_GPR(9, r11)
|
|
|
- REST_GPR(12, r11)
|
|
|
- lwz r11,GPR11(r11)
|
|
|
- SYNC
|
|
|
- RFI
|
|
|
-
|
|
|
-/* check if the exception happened in a restartable section */
|
|
|
-1: lis r3,exc_exit_restart_end@ha
|
|
|
- addi r3,r3,exc_exit_restart_end@l
|
|
|
- cmplw r12,r3
|
|
|
- bge 3f
|
|
|
- lis r4,exc_exit_restart@ha
|
|
|
- addi r4,r4,exc_exit_restart@l
|
|
|
- cmplw r12,r4
|
|
|
- blt 3f
|
|
|
- lis r3,fee_restarts@ha
|
|
|
- tophys(r3,r3)
|
|
|
- lwz r5,fee_restarts@l(r3)
|
|
|
- addi r5,r5,1
|
|
|
- stw r5,fee_restarts@l(r3)
|
|
|
- mr r12,r4 /* restart at exc_exit_restart */
|
|
|
- b 2b
|
|
|
-
|
|
|
- .comm fee_restarts,4
|
|
|
-
|
|
|
-/* aargh, a nonrecoverable interrupt, panic */
|
|
|
-/* aargh, we don't know which trap this is */
|
|
|
-/* but the 601 doesn't implement the RI bit, so assume it's OK */
|
|
|
-3:
|
|
|
-BEGIN_FTR_SECTION
|
|
|
- b 2b
|
|
|
-END_FTR_SECTION_IFSET(CPU_FTR_601)
|
|
|
- li r10,-1
|
|
|
- stw r10,TRAP(r11)
|
|
|
- addi r3,r1,STACK_FRAME_OVERHEAD
|
|
|
- li r10,MSR_KERNEL
|
|
|
- bl transfer_to_handler_full
|
|
|
- .long nonrecoverable_exception
|
|
|
- .long ret_from_except
|
|
|
-
|
|
|
-/*
|
|
|
- * FP unavailable trap from kernel - print a message, but let
|
|
|
- * the task use FP in the kernel until it returns to user mode.
|
|
|
- */
|
|
|
-KernelFP:
|
|
|
- lwz r3,_MSR(r1)
|
|
|
- ori r3,r3,MSR_FP
|
|
|
- stw r3,_MSR(r1) /* enable use of FP after return */
|
|
|
- lis r3,86f@h
|
|
|
- ori r3,r3,86f@l
|
|
|
- mr r4,r2 /* current */
|
|
|
- lwz r5,_NIP(r1)
|
|
|
- bl printk
|
|
|
- b ret_from_except
|
|
|
-86: .string "floating point used in kernel (task=%p, pc=%x)\n"
|
|
|
- .align 4,0
|
|
|
-
|
|
|
#ifdef CONFIG_ALTIVEC
|
|
|
/* Note that the AltiVec support is closely modeled after the FP
|
|
|
* support. Changes to one are likely to be applicable to the
|
|
@@ -1015,42 +888,6 @@ giveup_altivec:
|
|
|
blr
|
|
|
#endif /* CONFIG_ALTIVEC */
|
|
|
|
|
|
-/*
|
|
|
- * giveup_fpu(tsk)
|
|
|
- * Disable FP for the task given as the argument,
|
|
|
- * and save the floating-point registers in its thread_struct.
|
|
|
- * Enables the FPU for use in the kernel on return.
|
|
|
- */
|
|
|
- .globl giveup_fpu
|
|
|
-giveup_fpu:
|
|
|
- mfmsr r5
|
|
|
- ori r5,r5,MSR_FP
|
|
|
- SYNC_601
|
|
|
- ISYNC_601
|
|
|
- MTMSRD(r5) /* enable use of fpu now */
|
|
|
- SYNC_601
|
|
|
- isync
|
|
|
- cmpwi 0,r3,0
|
|
|
- beqlr- /* if no previous owner, done */
|
|
|
- addi r3,r3,THREAD /* want THREAD of task */
|
|
|
- lwz r5,PT_REGS(r3)
|
|
|
- cmpwi 0,r5,0
|
|
|
- SAVE_32FPRS(0, r3)
|
|
|
- mffs fr0
|
|
|
- stfd fr0,THREAD_FPSCR-4(r3)
|
|
|
- beq 1f
|
|
|
- lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5)
|
|
|
- li r3,MSR_FP|MSR_FE0|MSR_FE1
|
|
|
- andc r4,r4,r3 /* disable FP for previous task */
|
|
|
- stw r4,_MSR-STACK_FRAME_OVERHEAD(r5)
|
|
|
-1:
|
|
|
-#ifndef CONFIG_SMP
|
|
|
- li r5,0
|
|
|
- lis r4,last_task_used_math@ha
|
|
|
- stw r5,last_task_used_math@l(r4)
|
|
|
-#endif /* CONFIG_SMP */
|
|
|
- blr
|
|
|
-
|
|
|
/*
|
|
|
* This code is jumped to from the startup code to copy
|
|
|
* the kernel image to physical address 0.
|