瀏覽代碼

sh: TLB protection violation exception optimizations.

This adds a bit of rework to have the TLB protection violations skip the
TLB miss fastpath and go directly in to do_page_fault(), as these require
slow path handling.

Based on an earlier patch by SUGIOKA Toshinobu.

Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Paul Mundt 16 年之前
父節點
當前提交
112e58471d
共有 2 個文件被更改,包括 22 次插入14 次删除
  1. 19 11
      arch/sh/kernel/cpu/sh3/entry.S
  2. 3 3
      arch/sh/mm/fault_32.c

+ 19 - 11
arch/sh/kernel/cpu/sh3/entry.S

@@ -113,34 +113,33 @@ OFF_TRA	=  (16*4+6*4)
 #if defined(CONFIG_MMU)
 	.align	2
 ENTRY(tlb_miss_load)
-	bra	call_dpf
+	bra	call_handle_tlbmiss
 	 mov	#0, r5
 
 	.align	2
 ENTRY(tlb_miss_store)
-	bra	call_dpf
+	bra	call_handle_tlbmiss
 	 mov	#1, r5
 
 	.align	2
 ENTRY(initial_page_write)
-	bra	call_dpf
+	bra	call_handle_tlbmiss
 	 mov	#1, r5
 
 	.align	2
 ENTRY(tlb_protection_violation_load)
-	bra	call_dpf
+	bra	call_do_page_fault
 	 mov	#0, r5
 
 	.align	2
 ENTRY(tlb_protection_violation_store)
-	bra	call_dpf
+	bra	call_do_page_fault
 	 mov	#1, r5
 
-call_dpf:
+call_handle_tlbmiss:
 	mov.l	1f, r0
 	mov	r5, r8
 	mov.l	@r0, r6
-	mov	r6, r9
 	mov.l	2f, r0
 	sts	pr, r10
 	jsr	@r0
@@ -151,16 +150,25 @@ call_dpf:
 	 lds	r10, pr
 	rts
 	 nop
-0:	mov.l	3f, r0
-	mov	r9, r6
+0:
 	mov	r8, r5
+call_do_page_fault:
+	mov.l	1f, r0
+	mov.l	@r0, r6
+
+	sti
+
+	mov.l	3f, r0
+	mov.l	4f, r1
+	mov	r15, r4
 	jmp	@r0
-	 mov	r15, r4
+	 lds	r1, pr
 
 	.align 2
 1:	.long	MMU_TEA
-2:	.long	__do_page_fault
+2:	.long	handle_tlbmiss
 3:	.long	do_page_fault
+4:	.long	ret_from_exception
 
 	.align	2
 ENTRY(address_error_load)

+ 3 - 3
arch/sh/mm/fault_32.c

@@ -318,9 +318,9 @@ do_sigbus:
 /*
  * Called with interrupts disabled.
  */
-asmlinkage int __kprobes __do_page_fault(struct pt_regs *regs,
-					 unsigned long writeaccess,
-					 unsigned long address)
+asmlinkage int __kprobes
+handle_tlbmiss(struct pt_regs *regs, unsigned long writeaccess,
+	       unsigned long address)
 {
 	pgd_t *pgd;
 	pud_t *pud;