Browse Source

Merge branch 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6

* 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6:
  [IA64] ITC: Reduce rating for ITC clock if ITCs are drifty
  [IA64] SN2: Fix up sn2_rtc clock
  [IA64] Fix wrong access to irq_desc[] in iosapic_register_intr().
  [IA64] Fix possible race in destroy_and_reserve_irq()
  [IA64] Fix registered interrupt check
  [IA64] Remove a few duplicate includes
  [IA64] Allow smp_call_function_single() to current cpu
  [IA64] fix a few section mismatch warnings
Linus Torvalds 18 years ago
parent
commit
1ed4395035

+ 0 - 1
arch/ia64/ia32/sys_ia32.c

@@ -34,7 +34,6 @@
 #include <linux/uio.h>
 #include <linux/uio.h>
 #include <linux/nfs_fs.h>
 #include <linux/nfs_fs.h>
 #include <linux/quota.h>
 #include <linux/quota.h>
-#include <linux/syscalls.h>
 #include <linux/sunrpc/svc.h>
 #include <linux/sunrpc/svc.h>
 #include <linux/nfsd/nfsd.h>
 #include <linux/nfsd/nfsd.h>
 #include <linux/nfsd/cache.h>
 #include <linux/nfsd/cache.h>

+ 10 - 9
arch/ia64/kernel/iosapic.c

@@ -142,7 +142,7 @@ struct iosapic_rte_info {
 static struct iosapic_intr_info {
 static struct iosapic_intr_info {
 	struct list_head rtes;		/* RTEs using this vector (empty =>
 	struct list_head rtes;		/* RTEs using this vector (empty =>
 					 * not an IOSAPIC interrupt) */
 					 * not an IOSAPIC interrupt) */
-	int		count;		/* # of RTEs that shares this vector */
+	int		count;		/* # of registered RTEs */
 	u32		low32;		/* current value of low word of
 	u32		low32;		/* current value of low word of
 					 * Redirection table entry */
 					 * Redirection table entry */
 	unsigned int	dest;		/* destination CPU physical ID */
 	unsigned int	dest;		/* destination CPU physical ID */
@@ -313,7 +313,7 @@ mask_irq (unsigned int irq)
 	int rte_index;
 	int rte_index;
 	struct iosapic_rte_info *rte;
 	struct iosapic_rte_info *rte;
 
 
-	if (list_empty(&iosapic_intr_info[irq].rtes))
+	if (!iosapic_intr_info[irq].count)
 		return;			/* not an IOSAPIC interrupt! */
 		return;			/* not an IOSAPIC interrupt! */
 
 
 	/* set only the mask bit */
 	/* set only the mask bit */
@@ -331,7 +331,7 @@ unmask_irq (unsigned int irq)
 	int rte_index;
 	int rte_index;
 	struct iosapic_rte_info *rte;
 	struct iosapic_rte_info *rte;
 
 
-	if (list_empty(&iosapic_intr_info[irq].rtes))
+	if (!iosapic_intr_info[irq].count)
 		return;			/* not an IOSAPIC interrupt! */
 		return;			/* not an IOSAPIC interrupt! */
 
 
 	low32 = iosapic_intr_info[irq].low32 &= ~IOSAPIC_MASK;
 	low32 = iosapic_intr_info[irq].low32 &= ~IOSAPIC_MASK;
@@ -363,7 +363,7 @@ iosapic_set_affinity (unsigned int irq, cpumask_t mask)
 
 
 	dest = cpu_physical_id(first_cpu(mask));
 	dest = cpu_physical_id(first_cpu(mask));
 
 
-	if (list_empty(&iosapic_intr_info[irq].rtes))
+	if (!iosapic_intr_info[irq].count)
 		return;			/* not an IOSAPIC interrupt */
 		return;			/* not an IOSAPIC interrupt */
 
 
 	set_irq_affinity_info(irq, dest, redir);
 	set_irq_affinity_info(irq, dest, redir);
@@ -542,7 +542,7 @@ iosapic_reassign_vector (int irq)
 {
 {
 	int new_irq;
 	int new_irq;
 
 
-	if (!list_empty(&iosapic_intr_info[irq].rtes)) {
+	if (iosapic_intr_info[irq].count) {
 		new_irq = create_irq();
 		new_irq = create_irq();
 		if (new_irq < 0)
 		if (new_irq < 0)
 			panic("%s: out of interrupt vectors!\n", __FUNCTION__);
 			panic("%s: out of interrupt vectors!\n", __FUNCTION__);
@@ -560,7 +560,7 @@ iosapic_reassign_vector (int irq)
 	}
 	}
 }
 }
 
 
-static struct iosapic_rte_info *iosapic_alloc_rte (void)
+static struct iosapic_rte_info * __init_refok iosapic_alloc_rte (void)
 {
 {
 	int i;
 	int i;
 	struct iosapic_rte_info *rte;
 	struct iosapic_rte_info *rte;
@@ -677,7 +677,7 @@ get_target_cpu (unsigned int gsi, int irq)
 	 * In case of vector shared by multiple RTEs, all RTEs that
 	 * In case of vector shared by multiple RTEs, all RTEs that
 	 * share the vector need to use the same destination CPU.
 	 * share the vector need to use the same destination CPU.
 	 */
 	 */
-	if (!list_empty(&iosapic_intr_info[irq].rtes))
+	if (iosapic_intr_info[irq].count)
 		return iosapic_intr_info[irq].dest;
 		return iosapic_intr_info[irq].dest;
 
 
 	/*
 	/*
@@ -794,8 +794,9 @@ iosapic_register_intr (unsigned int gsi,
 	err = register_intr(gsi, irq, IOSAPIC_LOWEST_PRIORITY,
 	err = register_intr(gsi, irq, IOSAPIC_LOWEST_PRIORITY,
 			    polarity, trigger);
 			    polarity, trigger);
 	if (err < 0) {
 	if (err < 0) {
+		spin_unlock(&irq_desc[irq].lock);
 		irq = err;
 		irq = err;
-		goto unlock_all;
+		goto unlock_iosapic_lock;
 	}
 	}
 
 
 	/*
 	/*
@@ -811,7 +812,7 @@ iosapic_register_intr (unsigned int gsi,
 	       gsi, (trigger == IOSAPIC_EDGE ? "edge" : "level"),
 	       gsi, (trigger == IOSAPIC_EDGE ? "edge" : "level"),
 	       (polarity == IOSAPIC_POL_HIGH ? "high" : "low"),
 	       (polarity == IOSAPIC_POL_HIGH ? "high" : "low"),
 	       cpu_logical_id(dest), dest, irq_to_vector(irq));
 	       cpu_logical_id(dest), dest, irq_to_vector(irq));
- unlock_all:
+
 	spin_unlock(&irq_desc[irq].lock);
 	spin_unlock(&irq_desc[irq].lock);
  unlock_iosapic_lock:
  unlock_iosapic_lock:
 	spin_unlock_irqrestore(&iosapic_lock, flags);
 	spin_unlock_irqrestore(&iosapic_lock, flags);

+ 6 - 11
arch/ia64/kernel/irq_ia64.c

@@ -101,15 +101,6 @@ int check_irq_used(int irq)
 	return -1;
 	return -1;
 }
 }
 
 
-static void reserve_irq(unsigned int irq)
-{
-	unsigned long flags;
-
-	spin_lock_irqsave(&vector_lock, flags);
-	irq_status[irq] = IRQ_RSVD;
-	spin_unlock_irqrestore(&vector_lock, flags);
-}
-
 static inline int find_unassigned_irq(void)
 static inline int find_unassigned_irq(void)
 {
 {
 	int irq;
 	int irq;
@@ -302,10 +293,14 @@ static cpumask_t vector_allocation_domain(int cpu)
 
 
 void destroy_and_reserve_irq(unsigned int irq)
 void destroy_and_reserve_irq(unsigned int irq)
 {
 {
+	unsigned long flags;
+
 	dynamic_irq_cleanup(irq);
 	dynamic_irq_cleanup(irq);
 
 
-	clear_irq_vector(irq);
-	reserve_irq(irq);
+	spin_lock_irqsave(&vector_lock, flags);
+	__clear_irq_vector(irq);
+	irq_status[irq] = IRQ_RSVD;
+	spin_unlock_irqrestore(&vector_lock, flags);
 }
 }
 
 
 static int __reassign_irq_vector(int irq, int cpu)
 static int __reassign_irq_vector(int irq, int cpu)

+ 11 - 6
arch/ia64/kernel/mca.c

@@ -1750,8 +1750,17 @@ format_mca_init_stack(void *mca_data, unsigned long offset,
 	strncpy(p->comm, type, sizeof(p->comm)-1);
 	strncpy(p->comm, type, sizeof(p->comm)-1);
 }
 }
 
 
-/* Do per-CPU MCA-related initialization.  */
+/* Caller prevents this from being called after init */
+static void * __init_refok mca_bootmem(void)
+{
+	void *p;
 
 
+	p = alloc_bootmem(sizeof(struct ia64_mca_cpu) * NR_CPUS +
+	                  KERNEL_STACK_SIZE);
+	return (void *)ALIGN((unsigned long)p, KERNEL_STACK_SIZE);
+}
+
+/* Do per-CPU MCA-related initialization.  */
 void __cpuinit
 void __cpuinit
 ia64_mca_cpu_init(void *cpu_data)
 ia64_mca_cpu_init(void *cpu_data)
 {
 {
@@ -1763,11 +1772,7 @@ ia64_mca_cpu_init(void *cpu_data)
 		int cpu;
 		int cpu;
 
 
 		first_time = 0;
 		first_time = 0;
-		mca_data = alloc_bootmem(sizeof(struct ia64_mca_cpu)
-					 * NR_CPUS + KERNEL_STACK_SIZE);
-		mca_data = (void *)(((unsigned long)mca_data +
-					KERNEL_STACK_SIZE - 1) &
-				(-KERNEL_STACK_SIZE));
+		mca_data = mca_bootmem();
 		for (cpu = 0; cpu < NR_CPUS; cpu++) {
 		for (cpu = 0; cpu < NR_CPUS; cpu++) {
 			format_mca_init_stack(mca_data,
 			format_mca_init_stack(mca_data,
 					offsetof(struct ia64_mca_cpu, mca_stack),
 					offsetof(struct ia64_mca_cpu, mca_stack),

+ 0 - 1
arch/ia64/kernel/setup.c

@@ -60,7 +60,6 @@
 #include <asm/smp.h>
 #include <asm/smp.h>
 #include <asm/system.h>
 #include <asm/system.h>
 #include <asm/unistd.h>
 #include <asm/unistd.h>
-#include <asm/system.h>
 
 
 #if defined(CONFIG_SMP) && (IA64_CPU_SIZE > PAGE_SIZE)
 #if defined(CONFIG_SMP) && (IA64_CPU_SIZE > PAGE_SIZE)
 # error "struct cpuinfo_ia64 too big!"
 # error "struct cpuinfo_ia64 too big!"

+ 5 - 3
arch/ia64/kernel/smp.c

@@ -346,7 +346,7 @@ smp_flush_tlb_mm (struct mm_struct *mm)
 }
 }
 
 
 /*
 /*
- * Run a function on another CPU
+ * Run a function on a specific CPU
  *  <func>	The function to run. This must be fast and non-blocking.
  *  <func>	The function to run. This must be fast and non-blocking.
  *  <info>	An arbitrary pointer to pass to the function.
  *  <info>	An arbitrary pointer to pass to the function.
  *  <nonatomic>	Currently unused.
  *  <nonatomic>	Currently unused.
@@ -366,9 +366,11 @@ smp_call_function_single (int cpuid, void (*func) (void *info), void *info, int
 	int me = get_cpu(); /* prevent preemption and reschedule on another processor */
 	int me = get_cpu(); /* prevent preemption and reschedule on another processor */
 
 
 	if (cpuid == me) {
 	if (cpuid == me) {
-		printk(KERN_INFO "%s: trying to call self\n", __FUNCTION__);
+		local_irq_disable();
+		func(info);
+		local_irq_enable();
 		put_cpu();
 		put_cpu();
-		return -EBUSY;
+		return 0;
 	}
 	}
 
 
 	data.func = func;
 	data.func = func;

+ 15 - 1
arch/ia64/kernel/time.c

@@ -240,7 +240,21 @@ ia64_init_itm (void)
 		if (!nojitter)
 		if (!nojitter)
 			itc_jitter_data.itc_jitter = 1;
 			itc_jitter_data.itc_jitter = 1;
 #endif
 #endif
-	}
+	} else
+		/*
+		 * ITC is drifty and we have not synchronized the ITCs in smpboot.c.
+		 * ITC values may fluctuate significantly between processors.
+		 * Clock should not be used for hrtimers. Mark itc as only
+		 * useful for boot and testing.
+		 *
+		 * Note that jitter compensation is off! There is no point of
+		 * synchronizing ITCs since they may be large differentials
+		 * that change over time.
+		 *
+		 * The only way to fix this would be to repeatedly sync the
+		 * ITCs. Until that time we have to avoid ITC.
+		 */
+		clocksource_itc.rating = 50;
 
 
 	/* Setup the CPU local timer tick */
 	/* Setup the CPU local timer tick */
 	ia64_cpu_local_tick();
 	ia64_cpu_local_tick();

+ 1 - 1
arch/ia64/sn/kernel/io_common.c

@@ -391,7 +391,7 @@ void sn_bus_free_sysdata(void)
  * hubdev_init_node() - Creates the HUB data structure and link them to it's
  * hubdev_init_node() - Creates the HUB data structure and link them to it's
  *			own NODE specific data area.
  *			own NODE specific data area.
  */
  */
-void hubdev_init_node(nodepda_t * npda, cnodeid_t node)
+void __init hubdev_init_node(nodepda_t * npda, cnodeid_t node)
 {
 {
 	struct hubdev_info *hubdev_info;
 	struct hubdev_info *hubdev_info;
 	int size;
 	int size;

+ 0 - 1
arch/ia64/sn/kernel/setup.c

@@ -25,7 +25,6 @@
 #include <linux/interrupt.h>
 #include <linux/interrupt.h>
 #include <linux/acpi.h>
 #include <linux/acpi.h>
 #include <linux/compiler.h>
 #include <linux/compiler.h>
-#include <linux/sched.h>
 #include <linux/root_dev.h>
 #include <linux/root_dev.h>
 #include <linux/nodemask.h>
 #include <linux/nodemask.h>
 #include <linux/pm.h>
 #include <linux/pm.h>

+ 2 - 5
arch/ia64/sn/kernel/sn2/timer.c

@@ -23,16 +23,14 @@
 
 
 extern unsigned long sn_rtc_cycles_per_second;
 extern unsigned long sn_rtc_cycles_per_second;
 
 
-static void __iomem *sn2_mc;
-
 static cycle_t read_sn2(void)
 static cycle_t read_sn2(void)
 {
 {
-	return (cycle_t)readq(sn2_mc);
+	return (cycle_t)readq(RTC_COUNTER_ADDR);
 }
 }
 
 
 static struct clocksource clocksource_sn2 = {
 static struct clocksource clocksource_sn2 = {
         .name           = "sn2_rtc",
         .name           = "sn2_rtc",
-        .rating         = 300,
+        .rating         = 450,
         .read           = read_sn2,
         .read           = read_sn2,
         .mask           = (1LL << 55) - 1,
         .mask           = (1LL << 55) - 1,
         .mult           = 0,
         .mult           = 0,
@@ -58,7 +56,6 @@ ia64_sn_udelay (unsigned long usecs)
 
 
 void __init sn_timer_init(void)
 void __init sn_timer_init(void)
 {
 {
-	sn2_mc = RTC_COUNTER_ADDR;
 	clocksource_sn2.fsys_mmio = RTC_COUNTER_ADDR;
 	clocksource_sn2.fsys_mmio = RTC_COUNTER_ADDR;
 	clocksource_sn2.mult = clocksource_hz2mult(sn_rtc_cycles_per_second,
 	clocksource_sn2.mult = clocksource_hz2mult(sn_rtc_cycles_per_second,
 							clocksource_sn2.shift);
 							clocksource_sn2.shift);