|
@@ -35,47 +35,41 @@
|
|
|
/* 32KB cache, 4kb PAGE sizes need to check bit 12 */
|
|
|
#define CACHE_ALIAS 0x00001000
|
|
|
|
|
|
-extern void flush_cache_all(void);
|
|
|
-extern void flush_cache_mm(struct mm_struct *mm);
|
|
|
-extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
|
|
|
- unsigned long end);
|
|
|
-extern void flush_cache_page(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn);
|
|
|
-extern void flush_dcache_page(struct page *pg);
|
|
|
-extern void flush_icache_range(unsigned long start, unsigned long end);
|
|
|
-extern void flush_icache_page(struct vm_area_struct *vma, struct page *page);
|
|
|
-
|
|
|
-#define flush_dcache_mmap_lock(mapping) do { } while (0)
|
|
|
-#define flush_dcache_mmap_unlock(mapping) do { } while (0)
|
|
|
-
|
|
|
-/* SH3 has unified cache so no special action needed here */
|
|
|
-#define flush_cache_sigtramp(vaddr) do { } while (0)
|
|
|
-#define flush_page_to_ram(page) do { } while (0)
|
|
|
-#define flush_icache_user_range(vma,pg,adr,len) do { } while (0)
|
|
|
-
|
|
|
-#define p3_cache_init() do { } while (0)
|
|
|
-
|
|
|
#define PG_mapped PG_arch_1
|
|
|
|
|
|
-/* We provide our own get_unmapped_area to avoid cache alias issue */
|
|
|
-#define HAVE_ARCH_UNMAPPED_AREA
|
|
|
-
|
|
|
+void flush_cache_all(void);
|
|
|
+void flush_cache_mm(struct mm_struct *mm);
|
|
|
+void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
|
|
|
+ unsigned long end);
|
|
|
+void flush_cache_page(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn);
|
|
|
+void flush_dcache_page(struct page *pg);
|
|
|
+void flush_icache_range(unsigned long start, unsigned long end);
|
|
|
+void flush_icache_page(struct vm_area_struct *vma, struct page *page);
|
|
|
#else
|
|
|
-
|
|
|
#define flush_cache_all() do { } while (0)
|
|
|
#define flush_cache_mm(mm) do { } while (0)
|
|
|
#define flush_cache_range(vma, start, end) do { } while (0)
|
|
|
#define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
|
|
|
#define flush_dcache_page(page) do { } while (0)
|
|
|
-#define flush_dcache_mmap_lock(mapping) do { } while (0)
|
|
|
-#define flush_dcache_mmap_unlock(mapping) do { } while (0)
|
|
|
#define flush_icache_range(start, end) do { } while (0)
|
|
|
#define flush_icache_page(vma,pg) do { } while (0)
|
|
|
-#define flush_icache_user_range(vma,pg,adr,len) do { } while (0)
|
|
|
+#endif
|
|
|
+
|
|
|
+#define flush_dcache_mmap_lock(mapping) do { } while (0)
|
|
|
+#define flush_dcache_mmap_unlock(mapping) do { } while (0)
|
|
|
+
|
|
|
+/* SH3 has unified cache so no special action needed here */
|
|
|
#define flush_cache_sigtramp(vaddr) do { } while (0)
|
|
|
+#define flush_icache_user_range(vma,pg,adr,len) do { } while (0)
|
|
|
|
|
|
#define p3_cache_init() do { } while (0)
|
|
|
|
|
|
-#endif
|
|
|
+/*
|
|
|
+ * We provide our own get_unmapped_area to avoid cache aliasing issues
|
|
|
+ * on SH7705 with a 32KB cache, and to page align addresses in the
|
|
|
+ * non-aliasing case.
|
|
|
+ */
|
|
|
+#define HAVE_ARCH_UNMAPPED_AREA
|
|
|
|
|
|
#endif /* __ASM_CPU_SH3_CACHEFLUSH_H */
|
|
|
|