|
@@ -44,13 +44,12 @@
|
|
|
* primary cache.
|
|
|
* o doesn't disable interrupts on the local CPU
|
|
|
*/
|
|
|
-static inline void r4k_on_each_cpu(void (*func) (void *info), void *info,
|
|
|
- int wait)
|
|
|
+static inline void r4k_on_each_cpu(void (*func) (void *info), void *info)
|
|
|
{
|
|
|
preempt_disable();
|
|
|
|
|
|
#if !defined(CONFIG_MIPS_MT_SMP) && !defined(CONFIG_MIPS_MT_SMTC)
|
|
|
- smp_call_function(func, info, wait);
|
|
|
+ smp_call_function(func, info, 1);
|
|
|
#endif
|
|
|
func(info);
|
|
|
preempt_enable();
|
|
@@ -364,7 +363,7 @@ static inline void local_r4k___flush_cache_all(void * args)
|
|
|
|
|
|
static void r4k___flush_cache_all(void)
|
|
|
{
|
|
|
- r4k_on_each_cpu(local_r4k___flush_cache_all, NULL, 1);
|
|
|
+ r4k_on_each_cpu(local_r4k___flush_cache_all, NULL);
|
|
|
}
|
|
|
|
|
|
static inline int has_valid_asid(const struct mm_struct *mm)
|
|
@@ -411,7 +410,7 @@ static void r4k_flush_cache_range(struct vm_area_struct *vma,
|
|
|
int exec = vma->vm_flags & VM_EXEC;
|
|
|
|
|
|
if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc))
|
|
|
- r4k_on_each_cpu(local_r4k_flush_cache_range, vma, 1);
|
|
|
+ r4k_on_each_cpu(local_r4k_flush_cache_range, vma);
|
|
|
}
|
|
|
|
|
|
static inline void local_r4k_flush_cache_mm(void * args)
|
|
@@ -443,7 +442,7 @@ static void r4k_flush_cache_mm(struct mm_struct *mm)
|
|
|
if (!cpu_has_dc_aliases)
|
|
|
return;
|
|
|
|
|
|
- r4k_on_each_cpu(local_r4k_flush_cache_mm, mm, 1);
|
|
|
+ r4k_on_each_cpu(local_r4k_flush_cache_mm, mm);
|
|
|
}
|
|
|
|
|
|
struct flush_cache_page_args {
|
|
@@ -535,7 +534,7 @@ static void r4k_flush_cache_page(struct vm_area_struct *vma,
|
|
|
args.addr = addr;
|
|
|
args.pfn = pfn;
|
|
|
|
|
|
- r4k_on_each_cpu(local_r4k_flush_cache_page, &args, 1);
|
|
|
+ r4k_on_each_cpu(local_r4k_flush_cache_page, &args);
|
|
|
}
|
|
|
|
|
|
static inline void local_r4k_flush_data_cache_page(void * addr)
|
|
@@ -548,8 +547,7 @@ static void r4k_flush_data_cache_page(unsigned long addr)
|
|
|
if (in_atomic())
|
|
|
local_r4k_flush_data_cache_page((void *)addr);
|
|
|
else
|
|
|
- r4k_on_each_cpu(local_r4k_flush_data_cache_page, (void *) addr,
|
|
|
- 1);
|
|
|
+ r4k_on_each_cpu(local_r4k_flush_data_cache_page, (void *) addr);
|
|
|
}
|
|
|
|
|
|
struct flush_icache_range_args {
|
|
@@ -590,7 +588,7 @@ static void r4k_flush_icache_range(unsigned long start, unsigned long end)
|
|
|
args.start = start;
|
|
|
args.end = end;
|
|
|
|
|
|
- r4k_on_each_cpu(local_r4k_flush_icache_range_ipi, &args, 1);
|
|
|
+ r4k_on_each_cpu(local_r4k_flush_icache_range_ipi, &args);
|
|
|
instruction_hazard();
|
|
|
}
|
|
|
|
|
@@ -711,7 +709,7 @@ static void local_r4k_flush_cache_sigtramp(void * arg)
|
|
|
|
|
|
static void r4k_flush_cache_sigtramp(unsigned long addr)
|
|
|
{
|
|
|
- r4k_on_each_cpu(local_r4k_flush_cache_sigtramp, (void *) addr, 1);
|
|
|
+ r4k_on_each_cpu(local_r4k_flush_cache_sigtramp, (void *) addr);
|
|
|
}
|
|
|
|
|
|
static void r4k_flush_icache_all(void)
|