|
@@ -68,11 +68,7 @@ struct ino_bucket ivector_table[NUM_IVECS] __attribute__ ((aligned (SMP_CACHE_BY
|
|
|
* access to this structure takes a TLB miss it could cause
|
|
|
* the 5-level sparc v9 trap stack to overflow.
|
|
|
*/
|
|
|
-struct irq_work_struct {
|
|
|
- unsigned int irq_worklists[16];
|
|
|
-};
|
|
|
-struct irq_work_struct __irq_work[NR_CPUS];
|
|
|
-#define irq_work(__cpu, __pil) &(__irq_work[(__cpu)].irq_worklists[(__pil)])
|
|
|
+#define irq_work(__cpu) &(trap_block[(__cpu)].irq_worklist)
|
|
|
|
|
|
static struct irqaction *irq_action[NR_IRQS+1];
|
|
|
|
|
@@ -91,10 +87,8 @@ static void register_irq_proc (unsigned int irq);
|
|
|
*/
|
|
|
#define put_ino_in_irqaction(action, irq) \
|
|
|
action->flags &= 0xffffffffffffUL; \
|
|
|
- if (__bucket(irq) == &pil0_dummy_bucket) \
|
|
|
- action->flags |= 0xdeadUL << 48; \
|
|
|
- else \
|
|
|
- action->flags |= __irq_ino(irq) << 48;
|
|
|
+ action->flags |= __irq_ino(irq) << 48;
|
|
|
+
|
|
|
#define get_ino_in_irqaction(action) (action->flags >> 48)
|
|
|
|
|
|
#define put_smpaff_in_irqaction(action, smpaff) (action)->mask = (smpaff)
|
|
@@ -251,15 +245,6 @@ void disable_irq(unsigned int irq)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
-/* The timer is the one "weird" interrupt which is generated by
|
|
|
- * the CPU %tick register and not by some normal vectored interrupt
|
|
|
- * source. To handle this special case, we use this dummy INO bucket.
|
|
|
- */
|
|
|
-static struct irq_desc pil0_dummy_desc;
|
|
|
-static struct ino_bucket pil0_dummy_bucket = {
|
|
|
- .irq_info = &pil0_dummy_desc,
|
|
|
-};
|
|
|
-
|
|
|
static void build_irq_error(const char *msg, unsigned int ino, int pil, int inofixup,
|
|
|
unsigned long iclr, unsigned long imap,
|
|
|
struct ino_bucket *bucket)
|
|
@@ -276,15 +261,7 @@ unsigned int build_irq(int pil, int inofixup, unsigned long iclr, unsigned long
|
|
|
struct ino_bucket *bucket;
|
|
|
int ino;
|
|
|
|
|
|
- if (pil == 0) {
|
|
|
- if (iclr != 0UL || imap != 0UL) {
|
|
|
- prom_printf("Invalid dummy bucket for PIL0 (%lx:%lx)\n",
|
|
|
- iclr, imap);
|
|
|
- prom_halt();
|
|
|
- }
|
|
|
- return __irq(&pil0_dummy_bucket);
|
|
|
- }
|
|
|
-
|
|
|
+ BUG_ON(pil == 0);
|
|
|
BUG_ON(tlb_type == hypervisor);
|
|
|
|
|
|
/* RULE: Both must be specified in all other cases. */
|
|
@@ -371,7 +348,7 @@ static void atomic_bucket_insert(struct ino_bucket *bucket)
|
|
|
__asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
|
|
|
__asm__ __volatile__("wrpr %0, %1, %%pstate"
|
|
|
: : "r" (pstate), "i" (PSTATE_IE));
|
|
|
- ent = irq_work(smp_processor_id(), bucket->pil);
|
|
|
+ ent = irq_work(smp_processor_id());
|
|
|
bucket->irq_chain = *ent;
|
|
|
*ent = __irq(bucket);
|
|
|
__asm__ __volatile__("wrpr %0, 0x0, %%pstate" : : "r" (pstate));
|
|
@@ -437,7 +414,7 @@ int request_irq(unsigned int irq, irqreturn_t (*handler)(int, void *, struct pt_
|
|
|
if (unlikely(!bucket->irq_info))
|
|
|
return -ENODEV;
|
|
|
|
|
|
- if ((bucket != &pil0_dummy_bucket) && (irqflags & SA_SAMPLE_RANDOM)) {
|
|
|
+ if (irqflags & SA_SAMPLE_RANDOM) {
|
|
|
/*
|
|
|
* This function might sleep, we want to call it first,
|
|
|
* outside of the atomic block. In SA_STATIC_ALLOC case,
|
|
@@ -465,12 +442,9 @@ int request_irq(unsigned int irq, irqreturn_t (*handler)(int, void *, struct pt_
|
|
|
}
|
|
|
|
|
|
bucket->flags |= IBF_ACTIVE;
|
|
|
- pending = 0;
|
|
|
- if (bucket != &pil0_dummy_bucket) {
|
|
|
- pending = bucket->pending;
|
|
|
- if (pending)
|
|
|
- bucket->pending = 0;
|
|
|
- }
|
|
|
+ pending = bucket->pending;
|
|
|
+ if (pending)
|
|
|
+ bucket->pending = 0;
|
|
|
|
|
|
action->handler = handler;
|
|
|
action->flags = irqflags;
|
|
@@ -487,13 +461,12 @@ int request_irq(unsigned int irq, irqreturn_t (*handler)(int, void *, struct pt_
|
|
|
/* We ate the IVEC already, this makes sure it does not get lost. */
|
|
|
if (pending) {
|
|
|
atomic_bucket_insert(bucket);
|
|
|
- set_softint(1 << bucket->pil);
|
|
|
+ set_softint(1 << PIL_DEVICE_IRQ);
|
|
|
}
|
|
|
|
|
|
spin_unlock_irqrestore(&irq_action_lock, flags);
|
|
|
|
|
|
- if (bucket != &pil0_dummy_bucket)
|
|
|
- register_irq_proc(__irq_ino(irq));
|
|
|
+ register_irq_proc(__irq_ino(irq));
|
|
|
|
|
|
#ifdef CONFIG_SMP
|
|
|
distribute_irqs();
|
|
@@ -533,7 +506,9 @@ void free_irq(unsigned int irq, void *dev_id)
|
|
|
{
|
|
|
struct irqaction *action;
|
|
|
struct ino_bucket *bucket;
|
|
|
+ struct irq_desc *desc;
|
|
|
unsigned long flags;
|
|
|
+ int ent, i;
|
|
|
|
|
|
spin_lock_irqsave(&irq_action_lock, flags);
|
|
|
|
|
@@ -549,42 +524,39 @@ void free_irq(unsigned int irq, void *dev_id)
|
|
|
spin_lock_irqsave(&irq_action_lock, flags);
|
|
|
|
|
|
bucket = __bucket(irq);
|
|
|
- if (bucket != &pil0_dummy_bucket) {
|
|
|
- struct irq_desc *desc = bucket->irq_info;
|
|
|
- int ent, i;
|
|
|
+ desc = bucket->irq_info;
|
|
|
|
|
|
- for (i = 0; i < MAX_IRQ_DESC_ACTION; i++) {
|
|
|
- struct irqaction *p = &desc->action[i];
|
|
|
+ for (i = 0; i < MAX_IRQ_DESC_ACTION; i++) {
|
|
|
+ struct irqaction *p = &desc->action[i];
|
|
|
|
|
|
- if (p == action) {
|
|
|
- desc->action_active_mask &= ~(1 << i);
|
|
|
- break;
|
|
|
- }
|
|
|
+ if (p == action) {
|
|
|
+ desc->action_active_mask &= ~(1 << i);
|
|
|
+ break;
|
|
|
}
|
|
|
+ }
|
|
|
|
|
|
- if (!desc->action_active_mask) {
|
|
|
- unsigned long imap = bucket->imap;
|
|
|
-
|
|
|
- /* This unique interrupt source is now inactive. */
|
|
|
- bucket->flags &= ~IBF_ACTIVE;
|
|
|
+ if (!desc->action_active_mask) {
|
|
|
+ unsigned long imap = bucket->imap;
|
|
|
|
|
|
- /* See if any other buckets share this bucket's IMAP
|
|
|
- * and are still active.
|
|
|
- */
|
|
|
- for (ent = 0; ent < NUM_IVECS; ent++) {
|
|
|
- struct ino_bucket *bp = &ivector_table[ent];
|
|
|
- if (bp != bucket &&
|
|
|
- bp->imap == imap &&
|
|
|
- (bp->flags & IBF_ACTIVE) != 0)
|
|
|
- break;
|
|
|
- }
|
|
|
+ /* This unique interrupt source is now inactive. */
|
|
|
+ bucket->flags &= ~IBF_ACTIVE;
|
|
|
|
|
|
- /* Only disable when no other sub-irq levels of
|
|
|
- * the same IMAP are active.
|
|
|
- */
|
|
|
- if (ent == NUM_IVECS)
|
|
|
- disable_irq(irq);
|
|
|
+ /* See if any other buckets share this bucket's IMAP
|
|
|
+ * and are still active.
|
|
|
+ */
|
|
|
+ for (ent = 0; ent < NUM_IVECS; ent++) {
|
|
|
+ struct ino_bucket *bp = &ivector_table[ent];
|
|
|
+ if (bp != bucket &&
|
|
|
+ bp->imap == imap &&
|
|
|
+ (bp->flags & IBF_ACTIVE) != 0)
|
|
|
+ break;
|
|
|
}
|
|
|
+
|
|
|
+ /* Only disable when no other sub-irq levels of
|
|
|
+ * the same IMAP are active.
|
|
|
+ */
|
|
|
+ if (ent == NUM_IVECS)
|
|
|
+ disable_irq(irq);
|
|
|
}
|
|
|
|
|
|
spin_unlock_irqrestore(&irq_action_lock, flags);
|
|
@@ -625,7 +597,7 @@ void synchronize_irq(unsigned int irq)
|
|
|
}
|
|
|
#endif /* CONFIG_SMP */
|
|
|
|
|
|
-static void process_bucket(int irq, struct ino_bucket *bp, struct pt_regs *regs)
|
|
|
+static void process_bucket(struct ino_bucket *bp, struct pt_regs *regs)
|
|
|
{
|
|
|
struct irq_desc *desc = bp->irq_info;
|
|
|
unsigned char flags = bp->flags;
|
|
@@ -676,51 +648,54 @@ static void process_bucket(int irq, struct ino_bucket *bp, struct pt_regs *regs)
|
|
|
|
|
|
/* Test and add entropy */
|
|
|
if (random & SA_SAMPLE_RANDOM)
|
|
|
- add_interrupt_randomness(irq);
|
|
|
+ add_interrupt_randomness(bp->pil);
|
|
|
}
|
|
|
out:
|
|
|
bp->flags &= ~IBF_INPROGRESS;
|
|
|
}
|
|
|
|
|
|
+#ifndef CONFIG_SMP
|
|
|
+extern irqreturn_t timer_interrupt(int, void *, struct pt_regs *);
|
|
|
+
|
|
|
+void timer_irq(int irq, struct pt_regs *regs)
|
|
|
+{
|
|
|
+ unsigned long clr_mask = 1 << irq;
|
|
|
+ unsigned long tick_mask = tick_ops->softint_mask;
|
|
|
+
|
|
|
+ if (get_softint() & tick_mask) {
|
|
|
+ irq = 0;
|
|
|
+ clr_mask = tick_mask;
|
|
|
+ }
|
|
|
+ clear_softint(clr_mask);
|
|
|
+
|
|
|
+ irq_enter();
|
|
|
+ kstat_this_cpu.irqs[irq]++;
|
|
|
+ timer_interrupt(irq, NULL, regs);
|
|
|
+ irq_exit();
|
|
|
+}
|
|
|
+#endif
|
|
|
+
|
|
|
void handler_irq(int irq, struct pt_regs *regs)
|
|
|
{
|
|
|
struct ino_bucket *bp;
|
|
|
int cpu = smp_processor_id();
|
|
|
|
|
|
-#ifndef CONFIG_SMP
|
|
|
- /*
|
|
|
- * Check for TICK_INT on level 14 softint.
|
|
|
+ /* XXX at this point we should be able to assert that
|
|
|
+ * XXX irq is PIL_DEVICE_IRQ...
|
|
|
*/
|
|
|
- {
|
|
|
- unsigned long clr_mask = 1 << irq;
|
|
|
- unsigned long tick_mask = tick_ops->softint_mask;
|
|
|
-
|
|
|
- if ((irq == 14) && (get_softint() & tick_mask)) {
|
|
|
- irq = 0;
|
|
|
- clr_mask = tick_mask;
|
|
|
- }
|
|
|
- clear_softint(clr_mask);
|
|
|
- }
|
|
|
-#else
|
|
|
clear_softint(1 << irq);
|
|
|
-#endif
|
|
|
|
|
|
irq_enter();
|
|
|
- kstat_this_cpu.irqs[irq]++;
|
|
|
|
|
|
/* Sliiiick... */
|
|
|
-#ifndef CONFIG_SMP
|
|
|
- bp = ((irq != 0) ?
|
|
|
- __bucket(xchg32(irq_work(cpu, irq), 0)) :
|
|
|
- &pil0_dummy_bucket);
|
|
|
-#else
|
|
|
- bp = __bucket(xchg32(irq_work(cpu, irq), 0));
|
|
|
-#endif
|
|
|
+ bp = __bucket(xchg32(irq_work(cpu), 0));
|
|
|
while (bp) {
|
|
|
struct ino_bucket *nbp = __bucket(bp->irq_chain);
|
|
|
|
|
|
+ kstat_this_cpu.irqs[bp->pil]++;
|
|
|
+
|
|
|
bp->irq_chain = 0;
|
|
|
- process_bucket(irq, bp, regs);
|
|
|
+ process_bucket(bp, regs);
|
|
|
bp = nbp;
|
|
|
}
|
|
|
irq_exit();
|
|
@@ -929,7 +904,7 @@ void init_irqwork_curcpu(void)
|
|
|
{
|
|
|
int cpu = hard_smp_processor_id();
|
|
|
|
|
|
- memset(__irq_work + cpu, 0, sizeof(struct irq_work_struct));
|
|
|
+ trap_block[cpu].irq_worklist = 0;
|
|
|
}
|
|
|
|
|
|
static void __cpuinit register_one_mondo(unsigned long paddr, unsigned long type)
|