|
@@ -29,6 +29,27 @@
|
|
|
#include <asm/war.h>
|
|
|
#include <asm/cacheflush.h> /* for run_uncached() */
|
|
|
|
|
|
+
|
|
|
+/*
|
|
|
+ * Special Variant of smp_call_function for use by cache functions:
|
|
|
+ *
|
|
|
+ * o No return value
|
|
|
+ * o collapses to normal function call on UP kernels
|
|
|
+ * o collapses to normal function call on systems with a single shared
|
|
|
+ * primary cache.
|
|
|
+ */
|
|
|
+static inline void r4k_on_each_cpu(void (*func) (void *info), void *info,
|
|
|
+ int retry, int wait)
|
|
|
+{
|
|
|
+ preempt_disable();
|
|
|
+
|
|
|
+#if !defined(CONFIG_MIPS_MT_SMP) && !defined(CONFIG_MIPS_MT_SMTC)
|
|
|
+ smp_call_function(func, info, retry, wait);
|
|
|
+#endif
|
|
|
+ func(info);
|
|
|
+ preempt_enable();
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* Must die.
|
|
|
*/
|
|
@@ -299,7 +320,7 @@ static void r4k_flush_cache_all(void)
|
|
|
if (!cpu_has_dc_aliases)
|
|
|
return;
|
|
|
|
|
|
- on_each_cpu(local_r4k_flush_cache_all, NULL, 1, 1);
|
|
|
+ r4k_on_each_cpu(local_r4k_flush_cache_all, NULL, 1, 1);
|
|
|
}
|
|
|
|
|
|
static inline void local_r4k___flush_cache_all(void * args)
|
|
@@ -320,7 +341,7 @@ static inline void local_r4k___flush_cache_all(void * args)
|
|
|
|
|
|
static void r4k___flush_cache_all(void)
|
|
|
{
|
|
|
- on_each_cpu(local_r4k___flush_cache_all, NULL, 1, 1);
|
|
|
+ r4k_on_each_cpu(local_r4k___flush_cache_all, NULL, 1, 1);
|
|
|
}
|
|
|
|
|
|
static inline void local_r4k_flush_cache_range(void * args)
|
|
@@ -341,7 +362,7 @@ static inline void local_r4k_flush_cache_range(void * args)
|
|
|
static void r4k_flush_cache_range(struct vm_area_struct *vma,
|
|
|
unsigned long start, unsigned long end)
|
|
|
{
|
|
|
- on_each_cpu(local_r4k_flush_cache_range, vma, 1, 1);
|
|
|
+ r4k_on_each_cpu(local_r4k_flush_cache_range, vma, 1, 1);
|
|
|
}
|
|
|
|
|
|
static inline void local_r4k_flush_cache_mm(void * args)
|
|
@@ -370,7 +391,7 @@ static void r4k_flush_cache_mm(struct mm_struct *mm)
|
|
|
if (!cpu_has_dc_aliases)
|
|
|
return;
|
|
|
|
|
|
- on_each_cpu(local_r4k_flush_cache_mm, mm, 1, 1);
|
|
|
+ r4k_on_each_cpu(local_r4k_flush_cache_mm, mm, 1, 1);
|
|
|
}
|
|
|
|
|
|
struct flush_cache_page_args {
|
|
@@ -461,7 +482,7 @@ static void r4k_flush_cache_page(struct vm_area_struct *vma,
|
|
|
args.addr = addr;
|
|
|
args.pfn = pfn;
|
|
|
|
|
|
- on_each_cpu(local_r4k_flush_cache_page, &args, 1, 1);
|
|
|
+ r4k_on_each_cpu(local_r4k_flush_cache_page, &args, 1, 1);
|
|
|
}
|
|
|
|
|
|
static inline void local_r4k_flush_data_cache_page(void * addr)
|
|
@@ -471,7 +492,7 @@ static inline void local_r4k_flush_data_cache_page(void * addr)
|
|
|
|
|
|
static void r4k_flush_data_cache_page(unsigned long addr)
|
|
|
{
|
|
|
- on_each_cpu(local_r4k_flush_data_cache_page, (void *) addr, 1, 1);
|
|
|
+ r4k_on_each_cpu(local_r4k_flush_data_cache_page, (void *) addr, 1, 1);
|
|
|
}
|
|
|
|
|
|
struct flush_icache_range_args {
|
|
@@ -514,7 +535,7 @@ static void r4k_flush_icache_range(unsigned long start, unsigned long end)
|
|
|
args.start = start;
|
|
|
args.end = end;
|
|
|
|
|
|
- on_each_cpu(local_r4k_flush_icache_range, &args, 1, 1);
|
|
|
+ r4k_on_each_cpu(local_r4k_flush_icache_range, &args, 1, 1);
|
|
|
instruction_hazard();
|
|
|
}
|
|
|
|
|
@@ -590,7 +611,7 @@ static void r4k_flush_icache_page(struct vm_area_struct *vma,
|
|
|
args.vma = vma;
|
|
|
args.page = page;
|
|
|
|
|
|
- on_each_cpu(local_r4k_flush_icache_page, &args, 1, 1);
|
|
|
+ r4k_on_each_cpu(local_r4k_flush_icache_page, &args, 1, 1);
|
|
|
}
|
|
|
|
|
|
|
|
@@ -689,7 +710,7 @@ static void local_r4k_flush_cache_sigtramp(void * arg)
|
|
|
|
|
|
static void r4k_flush_cache_sigtramp(unsigned long addr)
|
|
|
{
|
|
|
- on_each_cpu(local_r4k_flush_cache_sigtramp, (void *) addr, 1, 1);
|
|
|
+ r4k_on_each_cpu(local_r4k_flush_cache_sigtramp, (void *) addr, 1, 1);
|
|
|
}
|
|
|
|
|
|
static void r4k_flush_icache_all(void)
|