Browse Source

[SPARC64]: Fix endless loop in cheetah_xcall_deliver().

We need to mask out the proper bits when testing the dispatch status
register else we can see unrelated NACK bits from previous cross call
sends.

Signed-off-by: David S. Miller <davem@davemloft.net>
David S. Miller 17 years ago
parent
commit
0de56d1ab8
1 changed files with 13 additions and 6 deletions
  1. 13 6
      arch/sparc64/kernel/smp.c

+ 13 - 6
arch/sparc64/kernel/smp.c

@@ -476,7 +476,7 @@ static inline void spitfire_xcall_deliver(u64 data0, u64 data1, u64 data2, cpuma
  */
  */
 static void cheetah_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mask)
 static void cheetah_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mask)
 {
 {
-	u64 pstate, ver;
+	u64 pstate, ver, busy_mask;
 	int nack_busy_id, is_jbus, need_more;
 	int nack_busy_id, is_jbus, need_more;
 
 
 	if (cpus_empty(mask))
 	if (cpus_empty(mask))
@@ -508,14 +508,20 @@ retry:
 			       "i" (ASI_INTR_W));
 			       "i" (ASI_INTR_W));
 
 
 	nack_busy_id = 0;
 	nack_busy_id = 0;
+	busy_mask = 0;
 	{
 	{
 		int i;
 		int i;
 
 
 		for_each_cpu_mask(i, mask) {
 		for_each_cpu_mask(i, mask) {
 			u64 target = (i << 14) | 0x70;
 			u64 target = (i << 14) | 0x70;
 
 
-			if (!is_jbus)
+			if (is_jbus) {
+				busy_mask |= (0x1UL << (i * 2));
+			} else {
 				target |= (nack_busy_id << 24);
 				target |= (nack_busy_id << 24);
+				busy_mask |= (0x1UL <<
+					      (nack_busy_id * 2));
+			}
 			__asm__ __volatile__(
 			__asm__ __volatile__(
 				"stxa	%%g0, [%0] %1\n\t"
 				"stxa	%%g0, [%0] %1\n\t"
 				"membar	#Sync\n\t"
 				"membar	#Sync\n\t"
@@ -531,15 +537,16 @@ retry:
 
 
 	/* Now, poll for completion. */
 	/* Now, poll for completion. */
 	{
 	{
-		u64 dispatch_stat;
+		u64 dispatch_stat, nack_mask;
 		long stuck;
 		long stuck;
 
 
 		stuck = 100000 * nack_busy_id;
 		stuck = 100000 * nack_busy_id;
+		nack_mask = busy_mask << 1;
 		do {
 		do {
 			__asm__ __volatile__("ldxa	[%%g0] %1, %0"
 			__asm__ __volatile__("ldxa	[%%g0] %1, %0"
 					     : "=r" (dispatch_stat)
 					     : "=r" (dispatch_stat)
 					     : "i" (ASI_INTR_DISPATCH_STAT));
 					     : "i" (ASI_INTR_DISPATCH_STAT));
-			if (dispatch_stat == 0UL) {
+			if (!(dispatch_stat & (busy_mask | nack_mask))) {
 				__asm__ __volatile__("wrpr %0, 0x0, %%pstate"
 				__asm__ __volatile__("wrpr %0, 0x0, %%pstate"
 						     : : "r" (pstate));
 						     : : "r" (pstate));
 				if (unlikely(need_more)) {
 				if (unlikely(need_more)) {
@@ -556,12 +563,12 @@ retry:
 			}
 			}
 			if (!--stuck)
 			if (!--stuck)
 				break;
 				break;
-		} while (dispatch_stat & 0x5555555555555555UL);
+		} while (dispatch_stat & busy_mask);
 
 
 		__asm__ __volatile__("wrpr %0, 0x0, %%pstate"
 		__asm__ __volatile__("wrpr %0, 0x0, %%pstate"
 				     : : "r" (pstate));
 				     : : "r" (pstate));
 
 
-		if ((dispatch_stat & ~(0x5555555555555555UL)) == 0) {
+		if (dispatch_stat & busy_mask) {
 			/* Busy bits will not clear, continue instead
 			/* Busy bits will not clear, continue instead
 			 * of freezing up on this cpu.
 			 * of freezing up on this cpu.
 			 */
 			 */