Browse Source

Merge branch 'release' of master.kernel.org:/pub/scm/linux/kernel/git/aegl/linux-2.6

* 'release' of master.kernel.org:/pub/scm/linux/kernel/git/aegl/linux-2.6:
  [IA64] Correct definition of handle_IPI
  [IA64] move SAL_CACHE_FLUSH check later in boot
  [IA64] MCA recovery: Montecito support
  [IA64] cpu-hotplug: Fixing confliction between CPU hot-add and IPI
  [IA64] don't double >> PAGE_SHIFT pointer for /dev/kmem access
Linus Torvalds 18 years ago
parent
commit
d5b9b787b5

+ 68 - 27
arch/ia64/kernel/mca_drv.c

@@ -434,6 +434,50 @@ is_mca_global(peidx_table_t *peidx, pal_bus_check_info_t *pbci,
 	return MCA_IS_GLOBAL;
 	return MCA_IS_GLOBAL;
 }
 }
 
 
+/**
+ * get_target_identifier - Get the valid Cache or Bus check target identifier.
+ * @peidx:	pointer of index of processor error section
+ *
+ * Return value:
+ *	target address on Success / 0 on Failue
+ */
+static u64
+get_target_identifier(peidx_table_t *peidx)
+{
+	u64 target_address = 0;
+	sal_log_mod_error_info_t *smei;
+	pal_cache_check_info_t *pcci;
+	int i, level = 9;
+
+	/*
+	 * Look through the cache checks for a valid target identifier
+	 * If more than one valid target identifier, return the one
+	 * with the lowest cache level.
+	 */
+	for (i = 0; i < peidx_cache_check_num(peidx); i++) {
+		smei = (sal_log_mod_error_info_t *)peidx_cache_check(peidx, i);
+		if (smei->valid.target_identifier && smei->target_identifier) {
+			pcci = (pal_cache_check_info_t *)&(smei->check_info);
+			if (!target_address || (pcci->level < level)) {
+				target_address = smei->target_identifier;
+				level = pcci->level;
+				continue;
+			}
+		}
+	}
+	if (target_address)
+		return target_address;
+
+	/*
+	 * Look at the bus check for a valid target identifier
+	 */
+	smei = peidx_bus_check(peidx, 0);
+	if (smei && smei->valid.target_identifier)
+		return smei->target_identifier;
+
+	return 0;
+}
+
 /**
 /**
  * recover_from_read_error - Try to recover the errors which type are "read"s.
  * recover_from_read_error - Try to recover the errors which type are "read"s.
  * @slidx:	pointer of index of SAL error record
  * @slidx:	pointer of index of SAL error record
@@ -450,13 +494,14 @@ recover_from_read_error(slidx_table_t *slidx,
 			peidx_table_t *peidx, pal_bus_check_info_t *pbci,
 			peidx_table_t *peidx, pal_bus_check_info_t *pbci,
 			struct ia64_sal_os_state *sos)
 			struct ia64_sal_os_state *sos)
 {
 {
-	sal_log_mod_error_info_t *smei;
+	u64 target_identifier;
 	pal_min_state_area_t *pmsa;
 	pal_min_state_area_t *pmsa;
 	struct ia64_psr *psr1, *psr2;
 	struct ia64_psr *psr1, *psr2;
 	ia64_fptr_t *mca_hdlr_bh = (ia64_fptr_t*)mca_handler_bhhook;
 	ia64_fptr_t *mca_hdlr_bh = (ia64_fptr_t*)mca_handler_bhhook;
 
 
 	/* Is target address valid? */
 	/* Is target address valid? */
-	if (!pbci->tv)
+	target_identifier = get_target_identifier(peidx);
+	if (!target_identifier)
 		return fatal_mca("target address not valid");
 		return fatal_mca("target address not valid");
 
 
 	/*
 	/*
@@ -487,32 +532,28 @@ recover_from_read_error(slidx_table_t *slidx,
 	pmsa = sos->pal_min_state;
 	pmsa = sos->pal_min_state;
 	if (psr1->cpl != 0 ||
 	if (psr1->cpl != 0 ||
 	   ((psr2->cpl != 0) && mca_recover_range(pmsa->pmsa_iip))) {
 	   ((psr2->cpl != 0) && mca_recover_range(pmsa->pmsa_iip))) {
-		smei = peidx_bus_check(peidx, 0);
-		if (smei->valid.target_identifier) {
-			/*
-			 *  setup for resume to bottom half of MCA,
-			 * "mca_handler_bhhook"
-			 */
-			/* pass to bhhook as argument (gr8, ...) */
-			pmsa->pmsa_gr[8-1] = smei->target_identifier;
-			pmsa->pmsa_gr[9-1] = pmsa->pmsa_iip;
-			pmsa->pmsa_gr[10-1] = pmsa->pmsa_ipsr;
-			/* set interrupted return address (but no use) */
-			pmsa->pmsa_br0 = pmsa->pmsa_iip;
-			/* change resume address to bottom half */
-			pmsa->pmsa_iip = mca_hdlr_bh->fp;
-			pmsa->pmsa_gr[1-1] = mca_hdlr_bh->gp;
-			/* set cpl with kernel mode */
-			psr2 = (struct ia64_psr *)&pmsa->pmsa_ipsr;
-			psr2->cpl = 0;
-			psr2->ri  = 0;
-			psr2->bn  = 1;
-			psr2->i  = 0;
-
-			return mca_recovered("user memory corruption. "
+		/*
+		 *  setup for resume to bottom half of MCA,
+		 * "mca_handler_bhhook"
+		 */
+		/* pass to bhhook as argument (gr8, ...) */
+		pmsa->pmsa_gr[8-1] = target_identifier;
+		pmsa->pmsa_gr[9-1] = pmsa->pmsa_iip;
+		pmsa->pmsa_gr[10-1] = pmsa->pmsa_ipsr;
+		/* set interrupted return address (but no use) */
+		pmsa->pmsa_br0 = pmsa->pmsa_iip;
+		/* change resume address to bottom half */
+		pmsa->pmsa_iip = mca_hdlr_bh->fp;
+		pmsa->pmsa_gr[1-1] = mca_hdlr_bh->gp;
+		/* set cpl with kernel mode */
+		psr2 = (struct ia64_psr *)&pmsa->pmsa_ipsr;
+		psr2->cpl = 0;
+		psr2->ri  = 0;
+		psr2->bn  = 1;
+		psr2->i  = 0;
+
+		return mca_recovered("user memory corruption. "
 				"kill affected process - recovered.");
 				"kill affected process - recovered.");
-		}
-
 	}
 	}
 
 
 	return fatal_mca("kernel context not recovered, iip 0x%lx\n",
 	return fatal_mca("kernel context not recovered, iip 0x%lx\n",

+ 7 - 4
arch/ia64/kernel/sal.c

@@ -223,12 +223,13 @@ static void __init sal_desc_ap_wakeup(void *p) { }
  */
  */
 static int sal_cache_flush_drops_interrupts;
 static int sal_cache_flush_drops_interrupts;
 
 
-static void __init
+void __init
 check_sal_cache_flush (void)
 check_sal_cache_flush (void)
 {
 {
 	unsigned long flags;
 	unsigned long flags;
 	int cpu;
 	int cpu;
-	u64 vector;
+	u64 vector, cache_type = 3;
+	struct ia64_sal_retval isrv;
 
 
 	cpu = get_cpu();
 	cpu = get_cpu();
 	local_irq_save(flags);
 	local_irq_save(flags);
@@ -243,7 +244,10 @@ check_sal_cache_flush (void)
 	while (!ia64_get_irr(IA64_TIMER_VECTOR))
 	while (!ia64_get_irr(IA64_TIMER_VECTOR))
 		cpu_relax();
 		cpu_relax();
 
 
-	ia64_sal_cache_flush(3);
+	SAL_CALL(isrv, SAL_CACHE_FLUSH, cache_type, 0, 0, 0, 0, 0, 0);
+
+	if (isrv.status)
+		printk(KERN_ERR "SAL_CAL_FLUSH failed with %ld\n", isrv.status);
 
 
 	if (ia64_get_irr(IA64_TIMER_VECTOR)) {
 	if (ia64_get_irr(IA64_TIMER_VECTOR)) {
 		vector = ia64_get_ivr();
 		vector = ia64_get_ivr();
@@ -331,7 +335,6 @@ ia64_sal_init (struct ia64_sal_systab *systab)
 		p += SAL_DESC_SIZE(*p);
 		p += SAL_DESC_SIZE(*p);
 	}
 	}
 
 
-	check_sal_cache_flush();
 }
 }
 
 
 int
 int

+ 2 - 0
arch/ia64/kernel/setup.c

@@ -457,6 +457,8 @@ setup_arch (char **cmdline_p)
 	cpu_init();	/* initialize the bootstrap CPU */
 	cpu_init();	/* initialize the bootstrap CPU */
 	mmu_context_init();	/* initialize context_id bitmap */
 	mmu_context_init();	/* initialize context_id bitmap */
 
 
+	check_sal_cache_flush();
+
 #ifdef CONFIG_ACPI
 #ifdef CONFIG_ACPI
 	acpi_boot_init();
 	acpi_boot_init();
 #endif
 #endif

+ 7 - 5
arch/ia64/kernel/smp.c

@@ -108,7 +108,7 @@ cpu_die(void)
 }
 }
 
 
 irqreturn_t
 irqreturn_t
-handle_IPI (int irq, void *dev_id, struct pt_regs *regs)
+handle_IPI (int irq, void *dev_id)
 {
 {
 	int this_cpu = get_cpu();
 	int this_cpu = get_cpu();
 	unsigned long *pending_ipis = &__ia64_per_cpu_var(ipi_operation);
 	unsigned long *pending_ipis = &__ia64_per_cpu_var(ipi_operation);
@@ -328,10 +328,14 @@ int
 smp_call_function (void (*func) (void *info), void *info, int nonatomic, int wait)
 smp_call_function (void (*func) (void *info), void *info, int nonatomic, int wait)
 {
 {
 	struct call_data_struct data;
 	struct call_data_struct data;
-	int cpus = num_online_cpus()-1;
+	int cpus;
 
 
-	if (!cpus)
+	spin_lock(&call_lock);
+	cpus = num_online_cpus() - 1;
+	if (!cpus) {
+		spin_unlock(&call_lock);
 		return 0;
 		return 0;
+	}
 
 
 	/* Can deadlock when called with interrupts disabled */
 	/* Can deadlock when called with interrupts disabled */
 	WARN_ON(irqs_disabled());
 	WARN_ON(irqs_disabled());
@@ -343,8 +347,6 @@ smp_call_function (void (*func) (void *info), void *info, int nonatomic, int wai
 	if (wait)
 	if (wait)
 		atomic_set(&data.finished, 0);
 		atomic_set(&data.finished, 0);
 
 
-	spin_lock(&call_lock);
-
 	call_data = &data;
 	call_data = &data;
 	mb();	/* ensure store to call_data precedes setting of IPI_CALL_FUNC */
 	mb();	/* ensure store to call_data precedes setting of IPI_CALL_FUNC */
 	send_IPI_allbutself(IPI_CALL_FUNC);
 	send_IPI_allbutself(IPI_CALL_FUNC);

+ 1 - 0
include/asm-ia64/sal.h

@@ -659,6 +659,7 @@ ia64_sal_freq_base (unsigned long which, unsigned long *ticks_per_second,
 }
 }
 
 
 extern s64 ia64_sal_cache_flush (u64 cache_type);
 extern s64 ia64_sal_cache_flush (u64 cache_type);
+extern void __init check_sal_cache_flush (void);
 
 
 /* Initialize all the processor and platform level instruction and data caches */
 /* Initialize all the processor and platform level instruction and data caches */
 static inline s64
 static inline s64

+ 1 - 1
include/asm-ia64/uaccess.h

@@ -389,7 +389,7 @@ xlate_dev_kmem_ptr (char * p)
 	struct page *page;
 	struct page *page;
 	char * ptr;
 	char * ptr;
 
 
-	page = virt_to_page((unsigned long)p >> PAGE_SHIFT);
+	page = virt_to_page((unsigned long)p);
 	if (PageUncached(page))
 	if (PageUncached(page))
 		ptr = (char *)__pa(p) + __IA64_UNCACHED_OFFSET;
 		ptr = (char *)__pa(p) + __IA64_UNCACHED_OFFSET;
 	else
 	else