Quellcode durchsuchen

[SPARC64]: Move kernel TLB miss handling into a seperate file.

Signed-off-by: David S. Miller <davem@davemloft.net>
David S. Miller vor 19 Jahren
Ursprung
Commit
2a7e299034
4 geänderte Dateien mit 179 neuen und 157 gelöschten Zeilen
  1. 4 4
      arch/sparc64/kernel/dtlb_base.S
  2. 0 153
      arch/sparc64/kernel/entry.S
  3. 1 0
      arch/sparc64/kernel/head.S
  4. 174 0
      arch/sparc64/kernel/ktlb.S

+ 4 - 4
arch/sparc64/kernel/dtlb_base.S

@@ -71,7 +71,7 @@
 from_tl1_trap:
 	rdpr		%tl, %g5			! For TL==3 test
 	CREATE_VPTE_OFFSET1(%g4, %g6)			! Create VPTE offset
-	be,pn		%xcc, 3f			! Yep, special processing
+	be,pn		%xcc, kvmap			! Yep, special processing
 	 CREATE_VPTE_OFFSET2(%g4, %g6)			! Create VPTE offset
 	cmp		%g5, 4				! Last trap level?
 	be,pn		%xcc, longpath			! Yep, cannot risk VPTE miss
@@ -83,9 +83,9 @@ from_tl1_trap:
 	 nop						! Delay-slot
 9:	stxa		%g5, [%g0] ASI_DTLB_DATA_IN	! Reload TLB
 	retry						! Trap return
-3:	brlz,pt		%g4, 9b				! Kernel virtual map?
-	 xor		%g2, %g4, %g5			! Finish bit twiddles
-	ba,a,pt		%xcc, kvmap			! Yep, go check for obp/vmalloc
+	nop
+	nop
+	nop
 
 /* DTLB ** ICACHE line 3: winfixups+real_faults		*/
 longpath:

+ 0 - 153
arch/sparc64/kernel/entry.S

@@ -30,159 +30,6 @@
 	.text
 	.align		32
 
-	.globl		sparc64_vpte_patchme1
-	.globl		sparc64_vpte_patchme2
-/*
- * On a second level vpte miss, check whether the original fault is to the OBP 
- * range (note that this is only possible for instruction miss, data misses to
- * obp range do not use vpte). If so, go back directly to the faulting address.
- * This is because we want to read the tpc, otherwise we have no way of knowing
- * the 8k aligned faulting address if we are using >8k kernel pagesize. This
- * also ensures no vpte range addresses are dropped into tlb while obp is
- * executing (see inherit_locked_prom_mappings() rant).
- */
-sparc64_vpte_nucleus:
-	/* Note that kvmap below has verified that the address is
-	 * in the range MODULES_VADDR --> VMALLOC_END already.  So
-	 * here we need only check if it is an OBP address or not.
-	 */
-	sethi		%hi(LOW_OBP_ADDRESS), %g5
-	cmp		%g4, %g5
-	blu,pn		%xcc, sparc64_vpte_patchme1
-	 mov		0x1, %g5
-	sllx		%g5, 32, %g5
-	cmp		%g4, %g5
-	blu,pn		%xcc, obp_iaddr_patch
-	 nop
-
-	/* These two instructions are patched by paginig_init().  */
-sparc64_vpte_patchme1:
-	sethi		%hi(0), %g5
-sparc64_vpte_patchme2:
-	or		%g5, %lo(0), %g5
-
-	/* With kernel PGD in %g5, branch back into dtlb_backend.  */
-	ba,pt		%xcc, sparc64_kpte_continue
-	 andn		%g1, 0x3, %g1	/* Finish PMD offset adjustment.  */
-
-vpte_noent:
-	/* Restore previous TAG_ACCESS, %g5 is zero, and we will
-	 * skip over the trap instruction so that the top level
-	 * TLB miss handler will thing this %g5 value is just an
-	 * invalid PTE, thus branching to full fault processing.
-	 */
-	mov		TLB_SFSR, %g1
-	stxa		%g4, [%g1 + %g1] ASI_DMMU
-	done
-
-	.globl		obp_iaddr_patch
-obp_iaddr_patch:
-	/* These two instructions patched by inherit_prom_mappings().  */
-	sethi		%hi(0), %g5
-	or		%g5, %lo(0), %g5
-
-	/* Behave as if we are at TL0.  */
-	wrpr		%g0, 1, %tl
-	rdpr		%tpc, %g4	/* Find original faulting iaddr */
-	srlx		%g4, 13, %g4	/* Throw out context bits */
-	sllx		%g4, 13, %g4	/* g4 has vpn + ctx0 now */
-
-	/* Restore previous TAG_ACCESS.  */
-	mov		TLB_SFSR, %g1
-	stxa		%g4, [%g1 + %g1] ASI_IMMU
-
-	/* Get PMD offset.  */
-	srlx		%g4, 23, %g6
-	and		%g6, 0x7ff, %g6
-	sllx		%g6, 2, %g6
-
-	/* Load PMD, is it valid?  */
-	lduwa		[%g5 + %g6] ASI_PHYS_USE_EC, %g5
-	brz,pn		%g5, longpath
-	 sllx		%g5, 11, %g5
-
-	/* Get PTE offset.  */
-	srlx		%g4, 13, %g6
-	and		%g6, 0x3ff, %g6
-	sllx		%g6, 3, %g6
-
-	/* Load PTE.  */
-	ldxa		[%g5 + %g6] ASI_PHYS_USE_EC, %g5
-	brgez,pn	%g5, longpath
-	 nop
-
-	/* TLB load and return from trap.  */
-	stxa		%g5, [%g0] ASI_ITLB_DATA_IN
-	retry
-
-	.globl		obp_daddr_patch
-obp_daddr_patch:
-	/* These two instructions patched by inherit_prom_mappings().  */
-	sethi		%hi(0), %g5
-	or		%g5, %lo(0), %g5
-
-	/* Get PMD offset.  */
-	srlx		%g4, 23, %g6
-	and		%g6, 0x7ff, %g6
-	sllx		%g6, 2, %g6
-
-	/* Load PMD, is it valid?  */
-	lduwa		[%g5 + %g6] ASI_PHYS_USE_EC, %g5
-	brz,pn		%g5, longpath
-	 sllx		%g5, 11, %g5
-
-	/* Get PTE offset.  */
-	srlx		%g4, 13, %g6
-	and		%g6, 0x3ff, %g6
-	sllx		%g6, 3, %g6
-
-	/* Load PTE.  */
-	ldxa		[%g5 + %g6] ASI_PHYS_USE_EC, %g5
-	brgez,pn	%g5, longpath
-	 nop
-
-	/* TLB load and return from trap.  */
-	stxa		%g5, [%g0] ASI_DTLB_DATA_IN
-	retry
-
-/*
- * On a first level data miss, check whether this is to the OBP range (note
- * that such accesses can be made by prom, as well as by kernel using
- * prom_getproperty on "address"), and if so, do not use vpte access ...
- * rather, use information saved during inherit_prom_mappings() using 8k
- * pagesize.
- */
-	.align		32
-kvmap:
-	sethi		%hi(MODULES_VADDR), %g5
-	cmp		%g4, %g5
-	blu,pn		%xcc, longpath
-	 mov		(VMALLOC_END >> 24), %g5
-	sllx		%g5, 24, %g5
-	cmp		%g4, %g5
-	bgeu,pn		%xcc, longpath
-	 nop
-
-kvmap_check_obp:
-	sethi		%hi(LOW_OBP_ADDRESS), %g5
-	cmp		%g4, %g5
-	blu,pn		%xcc, kvmap_vmalloc_addr
-	 mov		0x1, %g5
-	sllx		%g5, 32, %g5
-	cmp		%g4, %g5
-	blu,pn		%xcc, obp_daddr_patch
-	 nop
-
-kvmap_vmalloc_addr:
-	/* If we get here, a vmalloc addr was accessed, load kernel VPTE.  */
-	ldxa		[%g3 + %g6] ASI_N, %g5
-	brgez,pn	%g5, longpath
-	 nop
-
-	/* PTE is valid, load into TLB and return from trap.  */
-	stxa		%g5, [%g0] ASI_DTLB_DATA_IN	! Reload TLB
-	retry
-
 	/* This is trivial with the new code... */
 	.globl		do_fpdis
 do_fpdis:

+ 1 - 0
arch/sparc64/kernel/head.S

@@ -762,6 +762,7 @@ bootup_user_stack_end:
 swapper_pg_dir:
 	.word	0
 
+#include "ktlb.S"
 #include "etrap.S"
 #include "rtrap.S"
 #include "winfixup.S"

+ 174 - 0
arch/sparc64/kernel/ktlb.S

@@ -0,0 +1,174 @@
+/* arch/sparc64/kernel/ktlb.S: Kernel mapping TLB miss handling.
+ *
+ * Copyright (C) 1995, 1997, 2005 David S. Miller <davem@davemloft.net>
+ * Copyright (C) 1996 Eddie C. Dost        (ecd@brainaid.de)
+ * Copyright (C) 1996 Miguel de Icaza      (miguel@nuclecu.unam.mx)
+ * Copyright (C) 1996,98,99 Jakub Jelinek  (jj@sunsite.mff.cuni.cz)
+*/
+
+#include <linux/config.h>
+#include <asm/head.h>
+#include <asm/asi.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+
+	.text
+	.align		32
+
+	.globl		sparc64_vpte_patchme1
+	.globl		sparc64_vpte_patchme2
+/*
+ * On a second level vpte miss, check whether the original fault is to the OBP 
+ * range (note that this is only possible for instruction miss, data misses to
+ * obp range do not use vpte). If so, go back directly to the faulting address.
+ * This is because we want to read the tpc, otherwise we have no way of knowing
+ * the 8k aligned faulting address if we are using >8k kernel pagesize. This
+ * also ensures no vpte range addresses are dropped into tlb while obp is
+ * executing (see inherit_locked_prom_mappings() rant).
+ */
+sparc64_vpte_nucleus:
+	/* Note that kvmap below has verified that the address is
+	 * in the range MODULES_VADDR --> VMALLOC_END already.  So
+	 * here we need only check if it is an OBP address or not.
+	 */
+	sethi		%hi(LOW_OBP_ADDRESS), %g5
+	cmp		%g4, %g5
+	blu,pn		%xcc, sparc64_vpte_patchme1
+	 mov		0x1, %g5
+	sllx		%g5, 32, %g5
+	cmp		%g4, %g5
+	blu,pn		%xcc, obp_iaddr_patch
+	 nop
+
+	/* These two instructions are patched by paginig_init().  */
+sparc64_vpte_patchme1:
+	sethi		%hi(0), %g5
+sparc64_vpte_patchme2:
+	or		%g5, %lo(0), %g5
+
+	/* With kernel PGD in %g5, branch back into dtlb_backend.  */
+	ba,pt		%xcc, sparc64_kpte_continue
+	 andn		%g1, 0x3, %g1	/* Finish PMD offset adjustment.  */
+
+vpte_noent:
+	/* Restore previous TAG_ACCESS, %g5 is zero, and we will
+	 * skip over the trap instruction so that the top level
+	 * TLB miss handler will thing this %g5 value is just an
+	 * invalid PTE, thus branching to full fault processing.
+	 */
+	mov		TLB_SFSR, %g1
+	stxa		%g4, [%g1 + %g1] ASI_DMMU
+	done
+
+	.globl		obp_iaddr_patch
+obp_iaddr_patch:
+	/* These two instructions patched by inherit_prom_mappings().  */
+	sethi		%hi(0), %g5
+	or		%g5, %lo(0), %g5
+
+	/* Behave as if we are at TL0.  */
+	wrpr		%g0, 1, %tl
+	rdpr		%tpc, %g4	/* Find original faulting iaddr */
+	srlx		%g4, 13, %g4	/* Throw out context bits */
+	sllx		%g4, 13, %g4	/* g4 has vpn + ctx0 now */
+
+	/* Restore previous TAG_ACCESS.  */
+	mov		TLB_SFSR, %g1
+	stxa		%g4, [%g1 + %g1] ASI_IMMU
+
+	/* Get PMD offset.  */
+	srlx		%g4, 23, %g6
+	and		%g6, 0x7ff, %g6
+	sllx		%g6, 2, %g6
+
+	/* Load PMD, is it valid?  */
+	lduwa		[%g5 + %g6] ASI_PHYS_USE_EC, %g5
+	brz,pn		%g5, longpath
+	 sllx		%g5, 11, %g5
+
+	/* Get PTE offset.  */
+	srlx		%g4, 13, %g6
+	and		%g6, 0x3ff, %g6
+	sllx		%g6, 3, %g6
+
+	/* Load PTE.  */
+	ldxa		[%g5 + %g6] ASI_PHYS_USE_EC, %g5
+	brgez,pn	%g5, longpath
+	 nop
+
+	/* TLB load and return from trap.  */
+	stxa		%g5, [%g0] ASI_ITLB_DATA_IN
+	retry
+
+	.globl		obp_daddr_patch
+obp_daddr_patch:
+	/* These two instructions patched by inherit_prom_mappings().  */
+	sethi		%hi(0), %g5
+	or		%g5, %lo(0), %g5
+
+	/* Get PMD offset.  */
+	srlx		%g4, 23, %g6
+	and		%g6, 0x7ff, %g6
+	sllx		%g6, 2, %g6
+
+	/* Load PMD, is it valid?  */
+	lduwa		[%g5 + %g6] ASI_PHYS_USE_EC, %g5
+	brz,pn		%g5, longpath
+	 sllx		%g5, 11, %g5
+
+	/* Get PTE offset.  */
+	srlx		%g4, 13, %g6
+	and		%g6, 0x3ff, %g6
+	sllx		%g6, 3, %g6
+
+	/* Load PTE.  */
+	ldxa		[%g5 + %g6] ASI_PHYS_USE_EC, %g5
+	brgez,pn	%g5, longpath
+	 nop
+
+	/* TLB load and return from trap.  */
+	stxa		%g5, [%g0] ASI_DTLB_DATA_IN
+	retry
+
+/*
+ * On a first level data miss, check whether this is to the OBP range (note
+ * that such accesses can be made by prom, as well as by kernel using
+ * prom_getproperty on "address"), and if so, do not use vpte access ...
+ * rather, use information saved during inherit_prom_mappings() using 8k
+ * pagesize.
+ */
+	.align		32
+kvmap:
+	brlz,pt		%g4, kvmap_load
+	 xor		%g2, %g4, %g5
+
+kvmap_nonlinear:
+	sethi		%hi(MODULES_VADDR), %g5
+	cmp		%g4, %g5
+	blu,pn		%xcc, longpath
+	 mov		(VMALLOC_END >> 24), %g5
+	sllx		%g5, 24, %g5
+	cmp		%g4, %g5
+	bgeu,pn		%xcc, longpath
+	 nop
+
+kvmap_check_obp:
+	sethi		%hi(LOW_OBP_ADDRESS), %g5
+	cmp		%g4, %g5
+	blu,pn		%xcc, kvmap_vmalloc_addr
+	 mov		0x1, %g5
+	sllx		%g5, 32, %g5
+	cmp		%g4, %g5
+	blu,pn		%xcc, obp_daddr_patch
+	 nop
+
+kvmap_vmalloc_addr:
+	/* If we get here, a vmalloc addr was accessed, load kernel VPTE.  */
+	ldxa		[%g3 + %g6] ASI_N, %g5
+	brgez,pn	%g5, longpath
+	 nop
+
+kvmap_load:
+	/* PTE is valid, load into TLB and return from trap.  */
+	stxa		%g5, [%g0] ASI_DTLB_DATA_IN	! Reload TLB
+	retry