malta_smtc.c 3.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154
  1. /*
  2. * Malta Platform-specific hooks for SMP operation
  3. */
  4. #include <linux/irq.h>
  5. #include <linux/init.h>
  6. #include <asm/mipsregs.h>
  7. #include <asm/mipsmtregs.h>
  8. #include <asm/smtc.h>
  9. #include <asm/smtc_ipi.h>
  10. /* VPE/SMP Prototype implements platform interfaces directly */
  11. /*
  12. * Cause the specified action to be performed on a targeted "CPU"
  13. */
  14. static void msmtc_send_ipi_single(int cpu, unsigned int action)
  15. {
  16. /* "CPU" may be TC of same VPE, VPE of same CPU, or different CPU */
  17. smtc_send_ipi(cpu, LINUX_SMP_IPI, action);
  18. }
  19. static void msmtc_send_ipi_mask(cpumask_t mask, unsigned int action)
  20. {
  21. unsigned int i;
  22. for_each_cpu_mask(i, mask)
  23. msmtc_send_ipi_single(i, action);
  24. }
  25. /*
  26. * Post-config but pre-boot cleanup entry point
  27. */
  28. static void __cpuinit msmtc_init_secondary(void)
  29. {
  30. void smtc_init_secondary(void);
  31. int myvpe;
  32. /* Don't enable Malta I/O interrupts (IP2) for secondary VPEs */
  33. myvpe = read_c0_tcbind() & TCBIND_CURVPE;
  34. if (myvpe != 0) {
  35. /* Ideally, this should be done only once per VPE, but... */
  36. clear_c0_status(ST0_IM);
  37. set_c0_status((0x100 << cp0_compare_irq)
  38. | (0x100 << MIPS_CPU_IPI_IRQ));
  39. if (cp0_perfcount_irq >= 0)
  40. set_c0_status(0x100 << cp0_perfcount_irq);
  41. }
  42. smtc_init_secondary();
  43. }
  44. /*
  45. * Platform "CPU" startup hook
  46. */
  47. static void __cpuinit msmtc_boot_secondary(int cpu, struct task_struct *idle)
  48. {
  49. smtc_boot_secondary(cpu, idle);
  50. }
  51. /*
  52. * SMP initialization finalization entry point
  53. */
  54. static void __cpuinit msmtc_smp_finish(void)
  55. {
  56. smtc_smp_finish();
  57. }
  58. /*
  59. * Hook for after all CPUs are online
  60. */
  61. static void msmtc_cpus_done(void)
  62. {
  63. }
  64. /*
  65. * Platform SMP pre-initialization
  66. *
  67. * As noted above, we can assume a single CPU for now
  68. * but it may be multithreaded.
  69. */
  70. static void __init msmtc_smp_setup(void)
  71. {
  72. mipsmt_build_cpu_map(0);
  73. }
  74. static void __init msmtc_prepare_cpus(unsigned int max_cpus)
  75. {
  76. mipsmt_prepare_cpus();
  77. }
  78. struct plat_smp_ops msmtc_smp_ops = {
  79. .send_ipi_single = msmtc_send_ipi_single,
  80. .send_ipi_mask = msmtc_send_ipi_mask,
  81. .init_secondary = msmtc_init_secondary,
  82. .smp_finish = msmtc_smp_finish,
  83. .cpus_done = msmtc_cpus_done,
  84. .boot_secondary = msmtc_boot_secondary,
  85. .smp_setup = msmtc_smp_setup,
  86. .prepare_cpus = msmtc_prepare_cpus,
  87. };
  88. #ifdef CONFIG_MIPS_MT_SMTC_IRQAFF
  89. /*
  90. * IRQ affinity hook
  91. */
  92. void plat_set_irq_affinity(unsigned int irq, cpumask_t affinity)
  93. {
  94. cpumask_t tmask = affinity;
  95. int cpu = 0;
  96. void smtc_set_irq_affinity(unsigned int irq, cpumask_t aff);
  97. /*
  98. * On the legacy Malta development board, all I/O interrupts
  99. * are routed through the 8259 and combined in a single signal
  100. * to the CPU daughterboard, and on the CoreFPGA2/3 34K models,
  101. * that signal is brought to IP2 of both VPEs. To avoid racing
  102. * concurrent interrupt service events, IP2 is enabled only on
  103. * one VPE, by convention VPE0. So long as no bits are ever
  104. * cleared in the affinity mask, there will never be any
  105. * interrupt forwarding. But as soon as a program or operator
  106. * sets affinity for one of the related IRQs, we need to make
  107. * sure that we don't ever try to forward across the VPE boundry,
  108. * at least not until we engineer a system where the interrupt
  109. * _ack() or _end() function can somehow know that it corresponds
  110. * to an interrupt taken on another VPE, and perform the appropriate
  111. * restoration of Status.IM state using MFTR/MTTR instead of the
  112. * normal local behavior. We also ensure that no attempt will
  113. * be made to forward to an offline "CPU".
  114. */
  115. for_each_cpu_mask(cpu, affinity) {
  116. if ((cpu_data[cpu].vpe_id != 0) || !cpu_online(cpu))
  117. cpu_clear(cpu, tmask);
  118. }
  119. irq_desc[irq].affinity = tmask;
  120. if (cpus_empty(tmask))
  121. /*
  122. * We could restore a default mask here, but the
  123. * runtime code can anyway deal with the null set
  124. */
  125. printk(KERN_WARNING
  126. "IRQ affinity leaves no legal CPU for IRQ %d\n", irq);
  127. /* Do any generic SMTC IRQ affinity setup */
  128. smtc_set_irq_affinity(irq, tmask);
  129. }
  130. #endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */