Forráskód Böngészése

sh: Centralize the CPU cache initialization routines.

This provides a central point for CPU cache initialization routines.
This replaces the antiquated p3_cache_init() method, which the vast
majority of CPUs never cared about.

Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Paul Mundt 16 éve
szülő
commit
ecba106058

+ 2 - 1
arch/sh/include/asm/cacheflush.h

@@ -12,7 +12,6 @@
  *
  * See arch/sh/kernel/cpu/init.c:cache_init().
  */
-#define p3_cache_init()				do { } while (0)
 #define flush_cache_all()			do { } while (0)
 #define flush_cache_mm(mm)			do { } while (0)
 #define flush_cache_dup_mm(mm)			do { } while (0)
@@ -78,5 +77,7 @@ void kunmap_coherent(void);
 
 #define PG_dcache_dirty	PG_arch_1
 
+void cpu_cache_init(void);
+
 #endif /* __KERNEL__ */
 #endif /* __ASM_SH_CACHEFLUSH_H */

+ 0 - 2
arch/sh/include/cpu-common/cpu/cacheflush.h

@@ -39,6 +39,4 @@
 #define flush_icache_user_range(vma,pg,adr,len)	do { } while (0)
 #define flush_cache_sigtramp(vaddr)		do { } while (0)
 
-#define p3_cache_init()				do { } while (0)
-
 #endif /* __ASM_CPU_SH2_CACHEFLUSH_H */

+ 0 - 1
arch/sh/include/cpu-sh2a/cpu/cacheflush.h

@@ -30,5 +30,4 @@ void flush_icache_range(unsigned long start, unsigned long end);
 #define flush_icache_user_range(vma,pg,adr,len)	do { } while (0)
 #define flush_cache_sigtramp(vaddr)		do { } while (0)
 
-#define p3_cache_init()				do { } while (0)
 #endif /* __ASM_CPU_SH2A_CACHEFLUSH_H */

+ 0 - 2
arch/sh/include/cpu-sh3/cpu/cacheflush.h

@@ -32,8 +32,6 @@ void flush_icache_page(struct vm_area_struct *vma, struct page *page);
 #define flush_cache_sigtramp(vaddr)		do { } while (0)
 #define flush_icache_user_range(vma,pg,adr,len)	do { } while (0)
 
-#define p3_cache_init()				do { } while (0)
-
 #else
 #include <cpu-common/cpu/cacheflush.h>
 #endif

+ 0 - 3
arch/sh/include/cpu-sh4/cpu/cacheflush.h

@@ -35,7 +35,4 @@ void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
 
 #define flush_icache_page(vma,pg)		do { } while (0)
 
-/* Initialization of P3 area for copy_user_page */
-void p3_cache_init(void);
-
 #endif /* __ASM_CPU_SH4_CACHEFLUSH_H */

+ 0 - 1
arch/sh/include/cpu-sh5/cpu/cacheflush.h

@@ -25,7 +25,6 @@ extern void flush_icache_user_range(struct vm_area_struct *vma,
 #define flush_dcache_mmap_unlock(mapping)	do { } while (0)
 
 #define flush_icache_page(vma, page)	do { } while (0)
-void p3_cache_init(void);
 
 #endif /* __ASSEMBLY__ */
 

+ 1 - 1
arch/sh/mm/cache-sh4.c

@@ -94,7 +94,7 @@ static void __init emit_cache_params(void)
 /*
  * SH-4 has virtually indexed and physically tagged cache.
  */
-void __init p3_cache_init(void)
+void __init sh4_cache_init(void)
 {
 	compute_alias(&boot_cpu_data.icache);
 	compute_alias(&boot_cpu_data.dcache);

+ 1 - 1
arch/sh/mm/cache-sh5.c

@@ -23,7 +23,7 @@
 /* Wired TLB entry for the D-cache */
 static unsigned long long dtlb_cache_slot;
 
-void __init p3_cache_init(void)
+void __init cpu_cache_init(void)
 {
 	/* Reserve a slot for dcache colouring in the DTLB */
 	dtlb_cache_slot	= sh64_get_wired_dtlb_entry();

+ 11 - 0
arch/sh/mm/cache.c

@@ -127,3 +127,14 @@ void __flush_anon_page(struct page *page, unsigned long vmaddr)
 			__flush_wback_region((void *)addr, PAGE_SIZE);
 	}
 }
+
+void __init cpu_cache_init(void)
+{
+	if ((boot_cpu_data.family == CPU_FAMILY_SH4) ||
+	    (boot_cpu_data.family == CPU_FAMILY_SH4A) ||
+	    (boot_cpu_data.family == CPU_FAMILY_SH4AL_DSP)) {
+		extern void __weak sh4_cache_init(void);
+
+		sh4_cache_init();
+	}
+}

+ 1 - 1
arch/sh/mm/init.c

@@ -230,7 +230,7 @@ void __init mem_init(void)
 		datasize >> 10,
 		initsize >> 10);
 
-	p3_cache_init();
+	cpu_cache_init();
 
 	/* Initialize the vDSO */
 	vsyscall_init();