Просмотр исходного кода

ppc: Fix various compile errors resulting from ptrace.c merge

This introduces flush_{fp,altivec,spe}_to_thread and fixes a
branch-too-far error in linking.

Signed-off-by: Paul Mackerras <paulus@samba.org>
Paul Mackerras 19 лет назад
Родитель
Сommit
7ac59c6249
3 измененных файлов с 116 добавлено и 37 удалено
  1. 2 1
      arch/ppc/kernel/entry.S
  2. 96 36
      arch/ppc/kernel/process.c
  3. 18 0
      include/asm-ppc/system.h

+ 2 - 1
arch/ppc/kernel/entry.S

@@ -633,7 +633,8 @@ sigreturn_exit:
 	rlwinm	r12,r1,0,0,18	/* current_thread_info() */
 	lwz	r9,TI_FLAGS(r12)
 	andi.	r0,r9,_TIF_SYSCALL_T_OR_A
-	bnel-	do_syscall_trace_leave
+	beq+	ret_from_except_full
+	bl	do_syscall_trace_leave
 	/* fall through */
 
 	.globl	ret_from_except_full

+ 96 - 36
arch/ppc/kernel/process.c

@@ -152,18 +152,66 @@ int check_stack(struct task_struct *tsk)
 }
 #endif /* defined(CHECK_STACK) */
 
-#ifdef CONFIG_ALTIVEC
-int
-dump_altivec(struct pt_regs *regs, elf_vrregset_t *vrregs)
+/*
+ * Make sure the floating-point register state in the
+ * the thread_struct is up to date for task tsk.
+ */
+void flush_fp_to_thread(struct task_struct *tsk)
 {
-	if (regs->msr & MSR_VEC)
-		giveup_altivec(current);
-	memcpy(vrregs, &current->thread.vr[0], sizeof(*vrregs));
+	if (tsk->thread.regs) {
+		/*
+		 * We need to disable preemption here because if we didn't,
+		 * another process could get scheduled after the regs->msr
+		 * test but before we have finished saving the FP registers
+		 * to the thread_struct.  That process could take over the
+		 * FPU, and then when we get scheduled again we would store
+		 * bogus values for the remaining FP registers.
+		 */
+		preempt_disable();
+		if (tsk->thread.regs->msr & MSR_FP) {
+#ifdef CONFIG_SMP
+			/*
+			 * This should only ever be called for current or
+			 * for a stopped child process.  Since we save away
+			 * the FP register state on context switch on SMP,
+			 * there is something wrong if a stopped child appears
+			 * to still have its FP state in the CPU registers.
+			 */
+			BUG_ON(tsk != current);
+#endif
+			giveup_fpu(current);
+		}
+		preempt_enable();
+	}
+}
+
+void enable_kernel_fp(void)
+{
+	WARN_ON(preemptible());
+
+#ifdef CONFIG_SMP
+	if (current->thread.regs && (current->thread.regs->msr & MSR_FP))
+		giveup_fpu(current);
+	else
+		giveup_fpu(NULL);	/* just enables FP for kernel */
+#else
+	giveup_fpu(last_task_used_math);
+#endif /* CONFIG_SMP */
+}
+EXPORT_SYMBOL(enable_kernel_fp);
+
+int dump_task_fpu(struct task_struct *tsk, elf_fpregset_t *fpregs)
+{
+	preempt_disable();
+	if (tsk->thread.regs && (tsk->thread.regs->msr & MSR_FP))
+		giveup_fpu(tsk);
+	preempt_enable();
+	memcpy(fpregs, &tsk->thread.fpr[0], sizeof(*fpregs));
 	return 1;
 }
 
-void
-enable_kernel_altivec(void)
+#ifdef CONFIG_ALTIVEC
+void enable_kernel_altivec(void)
 {
 	WARN_ON(preemptible());
 
@@ -177,19 +225,35 @@ enable_kernel_altivec(void)
 #endif /* __SMP __ */
 }
 EXPORT_SYMBOL(enable_kernel_altivec);
-#endif /* CONFIG_ALTIVEC */
 
-#ifdef CONFIG_SPE
-int
-dump_spe(struct pt_regs *regs, elf_vrregset_t *evrregs)
+/*
+ * Make sure the VMX/Altivec register state in the
+ * the thread_struct is up to date for task tsk.
+ */
+void flush_altivec_to_thread(struct task_struct *tsk)
 {
-	if (regs->msr & MSR_SPE)
-		giveup_spe(current);
-	/* We copy u32 evr[32] + u64 acc + u32 spefscr -> 35 */
-	memcpy(evrregs, &current->thread.evr[0], sizeof(u32) * 35);
+	if (tsk->thread.regs) {
+		preempt_disable();
+		if (tsk->thread.regs->msr & MSR_VEC) {
+#ifdef CONFIG_SMP
+			BUG_ON(tsk != current);
+#endif
+			giveup_altivec(current);
+		}
+		preempt_enable();
+	}
+}
+
+int dump_altivec(struct pt_regs *regs, elf_vrregset_t *vrregs)
+{
+	if (regs->msr & MSR_VEC)
+		giveup_altivec(current);
+	memcpy(vrregs, &current->thread.vr[0], sizeof(*vrregs));
 	return 1;
 }
+#endif /* CONFIG_ALTIVEC */
 
+#ifdef CONFIG_SPE
 void
 enable_kernel_spe(void)
 {
@@ -205,34 +269,30 @@ enable_kernel_spe(void)
 #endif /* __SMP __ */
 }
 EXPORT_SYMBOL(enable_kernel_spe);
-#endif /* CONFIG_SPE */
 
-void
-enable_kernel_fp(void)
+void flush_spe_to_thread(struct task_struct *tsk)
 {
-	WARN_ON(preemptible());
-
+	if (tsk->thread.regs) {
+		preempt_disable();
+		if (tsk->thread.regs->msr & MSR_SPE) {
 #ifdef CONFIG_SMP
-	if (current->thread.regs && (current->thread.regs->msr & MSR_FP))
-		giveup_fpu(current);
-	else
-		giveup_fpu(NULL);	/* just enables FP for kernel */
-#else
-	giveup_fpu(last_task_used_math);
-#endif /* CONFIG_SMP */
+			BUG_ON(tsk != current);
+#endif
+			giveup_spe(current);
+		}
+		preempt_enable();
+	}
 }
-EXPORT_SYMBOL(enable_kernel_fp);
 
-int
-dump_task_fpu(struct task_struct *tsk, elf_fpregset_t *fpregs)
+int dump_spe(struct pt_regs *regs, elf_vrregset_t *evrregs)
 {
-	preempt_disable();
-	if (tsk->thread.regs && (tsk->thread.regs->msr & MSR_FP))
-		giveup_fpu(tsk);
-	preempt_enable();
-	memcpy(fpregs, &tsk->thread.fpr[0], sizeof(*fpregs));
+	if (regs->msr & MSR_SPE)
+		giveup_spe(current);
+	/* We copy u32 evr[32] + u64 acc + u32 spefscr -> 35 */
+	memcpy(evrregs, &current->thread.evr[0], sizeof(u32) * 35);
 	return 1;
 }
+#endif /* CONFIG_SPE */
 
 struct task_struct *__switch_to(struct task_struct *prev,
 	struct task_struct *new)

+ 18 - 0
include/asm-ppc/system.h

@@ -74,6 +74,7 @@ extern void read_rtc_time(void);
 extern void pmac_find_display(void);
 extern void giveup_fpu(struct task_struct *);
 extern void enable_kernel_fp(void);
+extern void flush_fp_to_thread(struct task_struct *);
 extern void enable_kernel_altivec(void);
 extern void giveup_altivec(struct task_struct *);
 extern void load_up_altivec(struct task_struct *);
@@ -83,6 +84,23 @@ extern void load_up_spe(struct task_struct *);
 extern int fix_alignment(struct pt_regs *);
 extern void cvt_fd(float *from, double *to, unsigned long *fpscr);
 extern void cvt_df(double *from, float *to, unsigned long *fpscr);
+
+#ifdef CONFIG_ALTIVEC
+extern void flush_altivec_to_thread(struct task_struct *);
+#else
+static inline void flush_altivec_to_thread(struct task_struct *t)
+{
+}
+#endif
+
+#ifdef CONFIG_SPE
+extern void flush_spe_to_thread(struct task_struct *);
+#else
+static inline void flush_spe_to_thread(struct task_struct *t)
+{
+}
+#endif
+
 extern int call_rtas(const char *, int, int, unsigned long *, ...);
 extern void cacheable_memzero(void *p, unsigned int nb);
 extern void *cacheable_memcpy(void *, const void *, unsigned int);