|
@@ -156,6 +156,12 @@
|
|
|
* Please note that the implementation of these, and the required
|
|
|
* effects are cache-type (VIVT/VIPT/PIPT) specific.
|
|
|
*
|
|
|
+ * flush_icache_all()
|
|
|
+ *
|
|
|
+ * Unconditionally clean and invalidate the entire icache.
|
|
|
+ * Currently only needed for cache-v6.S and cache-v7.S, see
|
|
|
+ * __flush_icache_all for the generic implementation.
|
|
|
+ *
|
|
|
* flush_kern_all()
|
|
|
*
|
|
|
* Unconditionally clean and invalidate the entire cache.
|
|
@@ -206,6 +212,7 @@
|
|
|
*/
|
|
|
|
|
|
struct cpu_cache_fns {
|
|
|
+ void (*flush_icache_all)(void);
|
|
|
void (*flush_kern_all)(void);
|
|
|
void (*flush_user_all)(void);
|
|
|
void (*flush_user_range)(unsigned long, unsigned long, unsigned int);
|
|
@@ -227,6 +234,7 @@ struct cpu_cache_fns {
|
|
|
|
|
|
extern struct cpu_cache_fns cpu_cache;
|
|
|
|
|
|
+#define __cpuc_flush_icache_all cpu_cache.flush_icache_all
|
|
|
#define __cpuc_flush_kern_all cpu_cache.flush_kern_all
|
|
|
#define __cpuc_flush_user_all cpu_cache.flush_user_all
|
|
|
#define __cpuc_flush_user_range cpu_cache.flush_user_range
|
|
@@ -246,6 +254,7 @@ extern struct cpu_cache_fns cpu_cache;
|
|
|
|
|
|
#else
|
|
|
|
|
|
+#define __cpuc_flush_icache_all __glue(_CACHE,_flush_icache_all)
|
|
|
#define __cpuc_flush_kern_all __glue(_CACHE,_flush_kern_cache_all)
|
|
|
#define __cpuc_flush_user_all __glue(_CACHE,_flush_user_cache_all)
|
|
|
#define __cpuc_flush_user_range __glue(_CACHE,_flush_user_cache_range)
|
|
@@ -253,6 +262,7 @@ extern struct cpu_cache_fns cpu_cache;
|
|
|
#define __cpuc_coherent_user_range __glue(_CACHE,_coherent_user_range)
|
|
|
#define __cpuc_flush_dcache_area __glue(_CACHE,_flush_kern_dcache_area)
|
|
|
|
|
|
+extern void __cpuc_flush_icache_all(void);
|
|
|
extern void __cpuc_flush_kern_all(void);
|
|
|
extern void __cpuc_flush_user_all(void);
|
|
|
extern void __cpuc_flush_user_range(unsigned long, unsigned long, unsigned int);
|
|
@@ -291,6 +301,37 @@ extern void copy_to_user_page(struct vm_area_struct *, struct page *,
|
|
|
/*
|
|
|
* Convert calls to our calling convention.
|
|
|
*/
|
|
|
+
|
|
|
+/* Invalidate I-cache */
|
|
|
+#define __flush_icache_all_generic() \
|
|
|
+ asm("mcr p15, 0, %0, c7, c5, 0" \
|
|
|
+ : : "r" (0));
|
|
|
+
|
|
|
+/* Invalidate I-cache inner shareable */
|
|
|
+#define __flush_icache_all_v7_smp() \
|
|
|
+ asm("mcr p15, 0, %0, c7, c1, 0" \
|
|
|
+ : : "r" (0));
|
|
|
+
|
|
|
+/*
|
|
|
+ * Optimized __flush_icache_all for the common cases. Note that UP ARMv7
|
|
|
+ * will fall through to use __flush_icache_all_generic.
|
|
|
+ */
|
|
|
+#if (defined(CONFIG_CPU_V7) && defined(CONFIG_CPU_V6)) || \
|
|
|
+ defined(CONFIG_SMP_ON_UP)
|
|
|
+#define __flush_icache_preferred __cpuc_flush_icache_all
|
|
|
+#elif __LINUX_ARM_ARCH__ >= 7 && defined(CONFIG_SMP)
|
|
|
+#define __flush_icache_preferred __flush_icache_all_v7_smp
|
|
|
+#elif __LINUX_ARM_ARCH__ == 6 && defined(CONFIG_ARM_ERRATA_411920)
|
|
|
+#define __flush_icache_preferred __cpuc_flush_icache_all
|
|
|
+#else
|
|
|
+#define __flush_icache_preferred __flush_icache_all_generic
|
|
|
+#endif
|
|
|
+
|
|
|
+static inline void __flush_icache_all(void)
|
|
|
+{
|
|
|
+ __flush_icache_preferred();
|
|
|
+}
|
|
|
+
|
|
|
#define flush_cache_all() __cpuc_flush_kern_all()
|
|
|
|
|
|
static inline void vivt_flush_cache_mm(struct mm_struct *mm)
|
|
@@ -366,21 +407,6 @@ extern void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr
|
|
|
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
|
|
|
extern void flush_dcache_page(struct page *);
|
|
|
|
|
|
-static inline void __flush_icache_all(void)
|
|
|
-{
|
|
|
-#ifdef CONFIG_ARM_ERRATA_411920
|
|
|
- extern void v6_icache_inval_all(void);
|
|
|
- v6_icache_inval_all();
|
|
|
-#elif defined(CONFIG_SMP) && __LINUX_ARM_ARCH__ >= 7
|
|
|
- asm("mcr p15, 0, %0, c7, c1, 0 @ invalidate I-cache inner shareable\n"
|
|
|
- :
|
|
|
- : "r" (0));
|
|
|
-#else
|
|
|
- asm("mcr p15, 0, %0, c7, c5, 0 @ invalidate I-cache\n"
|
|
|
- :
|
|
|
- : "r" (0));
|
|
|
-#endif
|
|
|
-}
|
|
|
static inline void flush_kernel_vmap_range(void *addr, int size)
|
|
|
{
|
|
|
if ((cache_is_vivt() || cache_is_vipt_aliasing()))
|