|
@@ -75,7 +75,6 @@ unsigned long irq_hwmask[NR_IRQS];
|
|
|
|
|
|
asiduse smtc_live_asid[MAX_SMTC_TLBS][MAX_SMTC_ASIDS];
|
|
|
|
|
|
-
|
|
|
/*
|
|
|
* Number of InterProcessor Interrupt (IPI) message buffers to allocate
|
|
|
*/
|
|
@@ -388,6 +387,7 @@ void smtc_prepare_cpus(int cpus)
|
|
|
IPIQ[i].head = IPIQ[i].tail = NULL;
|
|
|
spin_lock_init(&IPIQ[i].lock);
|
|
|
IPIQ[i].depth = 0;
|
|
|
+ IPIQ[i].resched_flag = 0; /* No reschedules queued initially */
|
|
|
}
|
|
|
|
|
|
/* cpu_data index starts at zero */
|
|
@@ -741,11 +741,24 @@ void smtc_forward_irq(unsigned int irq)
|
|
|
static void smtc_ipi_qdump(void)
|
|
|
{
|
|
|
int i;
|
|
|
+ struct smtc_ipi *temp;
|
|
|
|
|
|
for (i = 0; i < NR_CPUS ;i++) {
|
|
|
- printk("IPIQ[%d]: head = 0x%x, tail = 0x%x, depth = %d\n",
|
|
|
+ pr_info("IPIQ[%d]: head = 0x%x, tail = 0x%x, depth = %d\n",
|
|
|
i, (unsigned)IPIQ[i].head, (unsigned)IPIQ[i].tail,
|
|
|
IPIQ[i].depth);
|
|
|
+ temp = IPIQ[i].head;
|
|
|
+
|
|
|
+ while (temp != IPIQ[i].tail) {
|
|
|
+ pr_debug("%d %d %d: ", temp->type, temp->dest,
|
|
|
+ (int)temp->arg);
|
|
|
+#ifdef SMTC_IPI_DEBUG
|
|
|
+ pr_debug("%u %lu\n", temp->sender, temp->stamp);
|
|
|
+#else
|
|
|
+ pr_debug("\n");
|
|
|
+#endif
|
|
|
+ temp = temp->flink;
|
|
|
+ }
|
|
|
}
|
|
|
}
|
|
|
|
|
@@ -784,11 +797,16 @@ void smtc_send_ipi(int cpu, int type, unsigned int action)
|
|
|
int mtflags;
|
|
|
unsigned long tcrestart;
|
|
|
extern void r4k_wait_irqoff(void), __pastwait(void);
|
|
|
+ int set_resched_flag = (type == LINUX_SMP_IPI &&
|
|
|
+ action == SMP_RESCHEDULE_YOURSELF);
|
|
|
|
|
|
if (cpu == smp_processor_id()) {
|
|
|
printk("Cannot Send IPI to self!\n");
|
|
|
return;
|
|
|
}
|
|
|
+ if (set_resched_flag && IPIQ[cpu].resched_flag != 0)
|
|
|
+ return; /* There is a reschedule queued already */
|
|
|
+
|
|
|
/* Set up a descriptor, to be delivered either promptly or queued */
|
|
|
pipi = smtc_ipi_dq(&freeIPIq);
|
|
|
if (pipi == NULL) {
|
|
@@ -801,6 +819,7 @@ void smtc_send_ipi(int cpu, int type, unsigned int action)
|
|
|
pipi->dest = cpu;
|
|
|
if (cpu_data[cpu].vpe_id != cpu_data[smp_processor_id()].vpe_id) {
|
|
|
/* If not on same VPE, enqueue and send cross-VPE interrupt */
|
|
|
+ IPIQ[cpu].resched_flag |= set_resched_flag;
|
|
|
smtc_ipi_nq(&IPIQ[cpu], pipi);
|
|
|
LOCK_CORE_PRA();
|
|
|
settc(cpu_data[cpu].tc_id);
|
|
@@ -847,6 +866,7 @@ void smtc_send_ipi(int cpu, int type, unsigned int action)
|
|
|
*/
|
|
|
write_tc_c0_tchalt(0);
|
|
|
UNLOCK_CORE_PRA();
|
|
|
+ IPIQ[cpu].resched_flag |= set_resched_flag;
|
|
|
smtc_ipi_nq(&IPIQ[cpu], pipi);
|
|
|
} else {
|
|
|
postdirect:
|
|
@@ -996,12 +1016,15 @@ void deferred_smtc_ipi(void)
|
|
|
* already enabled.
|
|
|
*/
|
|
|
local_irq_save(flags);
|
|
|
-
|
|
|
spin_lock(&q->lock);
|
|
|
pipi = __smtc_ipi_dq(q);
|
|
|
spin_unlock(&q->lock);
|
|
|
- if (pipi != NULL)
|
|
|
+ if (pipi != NULL) {
|
|
|
+ if (pipi->type == LINUX_SMP_IPI &&
|
|
|
+ (int)pipi->arg == SMP_RESCHEDULE_YOURSELF)
|
|
|
+ IPIQ[cpu].resched_flag = 0;
|
|
|
ipi_decode(pipi);
|
|
|
+ }
|
|
|
/*
|
|
|
* The use of the __raw_local restore isn't
|
|
|
* as obviously necessary here as in smtc_ipi_replay(),
|
|
@@ -1082,6 +1105,9 @@ static irqreturn_t ipi_interrupt(int irq, void *dev_idm)
|
|
|
* with interrupts off
|
|
|
*/
|
|
|
local_irq_save(flags);
|
|
|
+ if (pipi->type == LINUX_SMP_IPI &&
|
|
|
+ (int)pipi->arg == SMP_RESCHEDULE_YOURSELF)
|
|
|
+ IPIQ[cpu].resched_flag = 0;
|
|
|
ipi_decode(pipi);
|
|
|
local_irq_restore(flags);
|
|
|
}
|