Ver código fonte

Merge branch 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6

* 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6:
  [IA64] fix allmodconfig compilation breakage.
  [IA64] smp_flush_tlb_mm() should only send IPI's to cpus in cpu_vm_mask
  [IA64] export smp_send_reschedule
Linus Torvalds 16 anos atrás
pai
commit
608faf1ff2
2 arquivos alterados com 6 adições e 10 exclusões
  1. 0 2
      arch/ia64/include/asm/paravirt_privop.h
  2. 6 8
      arch/ia64/kernel/smp.c

+ 0 - 2
arch/ia64/include/asm/paravirt_privop.h

@@ -445,7 +445,6 @@ paravirt_set_rr0_to_rr4(unsigned long val0, unsigned long val1,
 		register unsigned long ia64_intri_res asm ("r8");	\
 		register unsigned long __reg asm ("r8") = (reg);	\
 									\
-		BUILD_BUG_ON(!__builtin_constant_p(reg));		\
 		asm volatile (paravirt_alt_bundle(__PARAVIRT_BR,	\
 						  PARAVIRT_TYPE(GETREG) \
 						  + (reg))		\
@@ -464,7 +463,6 @@ paravirt_set_rr0_to_rr4(unsigned long val0, unsigned long val1,
 		register unsigned long ia64_clobber1 asm ("r8");	\
 		register unsigned long ia64_clobber2 asm ("r9");	\
 									\
-		BUILD_BUG_ON(!__builtin_constant_p(reg));		\
 		asm volatile (paravirt_alt_bundle(__PARAVIRT_BR,	\
 						  PARAVIRT_TYPE(SETREG) \
 						  + (reg))		\

+ 6 - 8
arch/ia64/kernel/smp.c

@@ -225,6 +225,7 @@ smp_send_reschedule (int cpu)
 {
 	platform_send_ipi(cpu, IA64_IPI_RESCHEDULE, IA64_IPI_DM_INT, 0);
 }
+EXPORT_SYMBOL_GPL(smp_send_reschedule);
 
 /*
  * Called with preemption disabled.
@@ -300,15 +301,12 @@ smp_flush_tlb_mm (struct mm_struct *mm)
 		return;
 	}
 
+	smp_call_function_mask(mm->cpu_vm_mask,
+		(void (*)(void *))local_finish_flush_tlb_mm, mm, 1);
+	local_irq_disable();
+	local_finish_flush_tlb_mm(mm);
+	local_irq_enable();
 	preempt_enable();
-	/*
-	 * We could optimize this further by using mm->cpu_vm_mask to track which CPUs
-	 * have been running in the address space.  It's not clear that this is worth the
-	 * trouble though: to avoid races, we have to raise the IPI on the target CPU
-	 * anyhow, and once a CPU is interrupted, the cost of local_flush_tlb_all() is
-	 * rather trivial.
-	 */
-	on_each_cpu((void (*)(void *))local_finish_flush_tlb_mm, mm, 1);
 }
 
 void arch_send_call_function_single_ipi(int cpu)