|
@@ -318,6 +318,32 @@ static void flush_tlb_mm_ipi(void *mm)
|
|
local_flush_tlb_mm((struct mm_struct *)mm);
|
|
local_flush_tlb_mm((struct mm_struct *)mm);
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+/*
|
|
|
|
+ * Special Variant of smp_call_function for use by TLB functions:
|
|
|
|
+ *
|
|
|
|
+ * o No return value
|
|
|
|
+ * o collapses to normal function call on UP kernels
|
|
|
|
+ * o collapses to normal function call on systems with a single shared
|
|
|
|
+ * primary cache.
|
|
|
|
+ * o CONFIG_MIPS_MT_SMTC currently implies there is only one physical core.
|
|
|
|
+ */
|
|
|
|
+static inline void smp_on_other_tlbs(void (*func) (void *info), void *info)
|
|
|
|
+{
|
|
|
|
+#ifndef CONFIG_MIPS_MT_SMTC
|
|
|
|
+ smp_call_function(func, info, 1, 1);
|
|
|
|
+#endif
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static inline void smp_on_each_tlb(void (*func) (void *info), void *info)
|
|
|
|
+{
|
|
|
|
+ preempt_disable();
|
|
|
|
+
|
|
|
|
+ smp_on_other_tlbs(func, info);
|
|
|
|
+ func(info);
|
|
|
|
+
|
|
|
|
+ preempt_enable();
|
|
|
|
+}
|
|
|
|
+
|
|
/*
|
|
/*
|
|
* The following tlb flush calls are invoked when old translations are
|
|
* The following tlb flush calls are invoked when old translations are
|
|
* being torn down, or pte attributes are changing. For single threaded
|
|
* being torn down, or pte attributes are changing. For single threaded
|
|
@@ -336,7 +362,7 @@ void flush_tlb_mm(struct mm_struct *mm)
|
|
preempt_disable();
|
|
preempt_disable();
|
|
|
|
|
|
if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
|
|
if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
|
|
- smp_call_function(flush_tlb_mm_ipi, (void *)mm, 1, 1);
|
|
|
|
|
|
+ smp_on_other_tlbs(flush_tlb_mm_ipi, (void *)mm);
|
|
} else {
|
|
} else {
|
|
int i;
|
|
int i;
|
|
for (i = 0; i < num_online_cpus(); i++)
|
|
for (i = 0; i < num_online_cpus(); i++)
|
|
@@ -372,7 +398,7 @@ void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned l
|
|
fd.vma = vma;
|
|
fd.vma = vma;
|
|
fd.addr1 = start;
|
|
fd.addr1 = start;
|
|
fd.addr2 = end;
|
|
fd.addr2 = end;
|
|
- smp_call_function(flush_tlb_range_ipi, (void *)&fd, 1, 1);
|
|
|
|
|
|
+ smp_on_other_tlbs(flush_tlb_range_ipi, (void *)&fd);
|
|
} else {
|
|
} else {
|
|
int i;
|
|
int i;
|
|
for (i = 0; i < num_online_cpus(); i++)
|
|
for (i = 0; i < num_online_cpus(); i++)
|
|
@@ -414,7 +440,7 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
|
|
|
|
|
|
fd.vma = vma;
|
|
fd.vma = vma;
|
|
fd.addr1 = page;
|
|
fd.addr1 = page;
|
|
- smp_call_function(flush_tlb_page_ipi, (void *)&fd, 1, 1);
|
|
|
|
|
|
+ smp_on_other_tlbs(flush_tlb_page_ipi, (void *)&fd);
|
|
} else {
|
|
} else {
|
|
int i;
|
|
int i;
|
|
for (i = 0; i < num_online_cpus(); i++)
|
|
for (i = 0; i < num_online_cpus(); i++)
|
|
@@ -434,8 +460,7 @@ static void flush_tlb_one_ipi(void *info)
|
|
|
|
|
|
void flush_tlb_one(unsigned long vaddr)
|
|
void flush_tlb_one(unsigned long vaddr)
|
|
{
|
|
{
|
|
- smp_call_function(flush_tlb_one_ipi, (void *) vaddr, 1, 1);
|
|
|
|
- local_flush_tlb_one(vaddr);
|
|
|
|
|
|
+ smp_on_each_tlb(flush_tlb_one_ipi, (void *) vaddr);
|
|
}
|
|
}
|
|
|
|
|
|
static DEFINE_PER_CPU(struct cpu, cpu_devices);
|
|
static DEFINE_PER_CPU(struct cpu, cpu_devices);
|