msp_irq_per.c 3.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135
  1. /*
  2. * Copyright 2010 PMC-Sierra, Inc, derived from irq_cpu.c
  3. *
  4. * This file define the irq handler for MSP PER subsystem interrupts.
  5. *
  6. * This program is free software; you can redistribute it and/or modify it
  7. * under the terms of the GNU General Public License as published by the
  8. * Free Software Foundation; either version 2 of the License, or (at your
  9. * option) any later version.
  10. */
  11. #include <linux/init.h>
  12. #include <linux/interrupt.h>
  13. #include <linux/kernel.h>
  14. #include <linux/spinlock.h>
  15. #include <linux/bitops.h>
  16. #include <asm/mipsregs.h>
  17. #include <asm/system.h>
  18. #include <msp_cic_int.h>
  19. #include <msp_regs.h>
  20. /*
  21. * Convenience Macro. Should be somewhere generic.
  22. */
  23. #define get_current_vpe() \
  24. ((read_c0_tcbind() >> TCBIND_CURVPE_SHIFT) & TCBIND_CURVPE)
  25. #ifdef CONFIG_SMP
  26. /*
  27. * The PER registers must be protected from concurrent access.
  28. */
  29. static DEFINE_SPINLOCK(per_lock);
  30. #endif
  31. /* ensure writes to per are completed */
  32. static inline void per_wmb(void)
  33. {
  34. const volatile void __iomem *per_mem = PER_INT_MSK_REG;
  35. volatile u32 dummy_read;
  36. wmb();
  37. dummy_read = __raw_readl(per_mem);
  38. dummy_read++;
  39. }
  40. static inline void unmask_per_irq(struct irq_data *d)
  41. {
  42. #ifdef CONFIG_SMP
  43. unsigned long flags;
  44. spin_lock_irqsave(&per_lock, flags);
  45. *PER_INT_MSK_REG |= (1 << (d->irq - MSP_PER_INTBASE));
  46. spin_unlock_irqrestore(&per_lock, flags);
  47. #else
  48. *PER_INT_MSK_REG |= (1 << (d->irq - MSP_PER_INTBASE));
  49. #endif
  50. per_wmb();
  51. }
  52. static inline void mask_per_irq(struct irq_data *d)
  53. {
  54. #ifdef CONFIG_SMP
  55. unsigned long flags;
  56. spin_lock_irqsave(&per_lock, flags);
  57. *PER_INT_MSK_REG &= ~(1 << (d->irq - MSP_PER_INTBASE));
  58. spin_unlock_irqrestore(&per_lock, flags);
  59. #else
  60. *PER_INT_MSK_REG &= ~(1 << (d->irq - MSP_PER_INTBASE));
  61. #endif
  62. per_wmb();
  63. }
  64. static inline void msp_per_irq_ack(struct irq_data *d)
  65. {
  66. mask_per_irq(d);
  67. /*
  68. * In the PER interrupt controller, only bits 11 and 10
  69. * are write-to-clear, (SPI TX complete, SPI RX complete).
  70. * It does nothing for any others.
  71. */
  72. *PER_INT_STS_REG = (1 << (d->irq - MSP_PER_INTBASE));
  73. }
  74. #ifdef CONFIG_SMP
  75. static int msp_per_irq_set_affinity(struct irq_data *d,
  76. const struct cpumask *affinity, bool force)
  77. {
  78. /* WTF is this doing ????? */
  79. unmask_per_irq(d);
  80. return 0;
  81. }
  82. #endif
  83. static struct irq_chip msp_per_irq_controller = {
  84. .name = "MSP_PER",
  85. .irq_enable = unmask_per_irq,
  86. .irq_disable = mask_per_irq,
  87. .irq_ack = msp_per_irq_ack,
  88. #ifdef CONFIG_SMP
  89. .irq_set_affinity = msp_per_irq_set_affinity,
  90. #endif
  91. };
  92. void __init msp_per_irq_init(void)
  93. {
  94. int i;
  95. /* Mask/clear interrupts. */
  96. *PER_INT_MSK_REG = 0x00000000;
  97. *PER_INT_STS_REG = 0xFFFFFFFF;
  98. /* initialize all the IRQ descriptors */
  99. for (i = MSP_PER_INTBASE; i < MSP_PER_INTBASE + 32; i++) {
  100. irq_set_chip(i, &msp_per_irq_controller);
  101. #ifdef CONFIG_MIPS_MT_SMTC
  102. irq_hwmask[i] = C_IRQ4;
  103. #endif
  104. }
  105. }
  106. void msp_per_irq_dispatch(void)
  107. {
  108. u32 per_mask = *PER_INT_MSK_REG;
  109. u32 per_status = *PER_INT_STS_REG;
  110. u32 pending;
  111. pending = per_status & per_mask;
  112. if (pending) {
  113. do_IRQ(ffs(pending) + MSP_PER_INTBASE - 1);
  114. } else {
  115. spurious_interrupt();
  116. }
  117. }