malta-smtc.c 4.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159
  1. /*
  2. * Malta Platform-specific hooks for SMP operation
  3. */
  4. #include <linux/irq.h>
  5. #include <linux/init.h>
  6. #include <asm/mipsregs.h>
  7. #include <asm/mipsmtregs.h>
  8. #include <asm/smtc.h>
  9. #include <asm/smtc_ipi.h>
  10. /* VPE/SMP Prototype implements platform interfaces directly */
  11. /*
  12. * Cause the specified action to be performed on a targeted "CPU"
  13. */
  14. static void msmtc_send_ipi_single(int cpu, unsigned int action)
  15. {
  16. /* "CPU" may be TC of same VPE, VPE of same CPU, or different CPU */
  17. smtc_send_ipi(cpu, LINUX_SMP_IPI, action);
  18. }
  19. static void msmtc_send_ipi_mask(cpumask_t mask, unsigned int action)
  20. {
  21. unsigned int i;
  22. for_each_cpu_mask(i, mask)
  23. msmtc_send_ipi_single(i, action);
  24. }
  25. /*
  26. * Post-config but pre-boot cleanup entry point
  27. */
  28. static void __cpuinit msmtc_init_secondary(void)
  29. {
  30. void smtc_init_secondary(void);
  31. int myvpe;
  32. /* Don't enable Malta I/O interrupts (IP2) for secondary VPEs */
  33. myvpe = read_c0_tcbind() & TCBIND_CURVPE;
  34. if (myvpe != 0) {
  35. /* Ideally, this should be done only once per VPE, but... */
  36. clear_c0_status(ST0_IM);
  37. set_c0_status((0x100 << cp0_compare_irq)
  38. | (0x100 << MIPS_CPU_IPI_IRQ));
  39. if (cp0_perfcount_irq >= 0)
  40. set_c0_status(0x100 << cp0_perfcount_irq);
  41. }
  42. smtc_init_secondary();
  43. }
  44. /*
  45. * Platform "CPU" startup hook
  46. */
  47. static void __cpuinit msmtc_boot_secondary(int cpu, struct task_struct *idle)
  48. {
  49. smtc_boot_secondary(cpu, idle);
  50. }
  51. /*
  52. * SMP initialization finalization entry point
  53. */
  54. static void __cpuinit msmtc_smp_finish(void)
  55. {
  56. smtc_smp_finish();
  57. }
  58. /*
  59. * Hook for after all CPUs are online
  60. */
  61. static void msmtc_cpus_done(void)
  62. {
  63. }
  64. /*
  65. * Platform SMP pre-initialization
  66. *
  67. * As noted above, we can assume a single CPU for now
  68. * but it may be multithreaded.
  69. */
  70. static void __init msmtc_smp_setup(void)
  71. {
  72. /*
  73. * we won't get the definitive value until
  74. * we've run smtc_prepare_cpus later, but
  75. * we would appear to need an upper bound now.
  76. */
  77. smp_num_siblings = smtc_build_cpu_map(0);
  78. }
  79. static void __init msmtc_prepare_cpus(unsigned int max_cpus)
  80. {
  81. smtc_prepare_cpus(max_cpus);
  82. }
  83. struct plat_smp_ops msmtc_smp_ops = {
  84. .send_ipi_single = msmtc_send_ipi_single,
  85. .send_ipi_mask = msmtc_send_ipi_mask,
  86. .init_secondary = msmtc_init_secondary,
  87. .smp_finish = msmtc_smp_finish,
  88. .cpus_done = msmtc_cpus_done,
  89. .boot_secondary = msmtc_boot_secondary,
  90. .smp_setup = msmtc_smp_setup,
  91. .prepare_cpus = msmtc_prepare_cpus,
  92. };
  93. #ifdef CONFIG_MIPS_MT_SMTC_IRQAFF
  94. /*
  95. * IRQ affinity hook
  96. */
  97. void plat_set_irq_affinity(unsigned int irq, cpumask_t affinity)
  98. {
  99. cpumask_t tmask = affinity;
  100. int cpu = 0;
  101. void smtc_set_irq_affinity(unsigned int irq, cpumask_t aff);
  102. /*
  103. * On the legacy Malta development board, all I/O interrupts
  104. * are routed through the 8259 and combined in a single signal
  105. * to the CPU daughterboard, and on the CoreFPGA2/3 34K models,
  106. * that signal is brought to IP2 of both VPEs. To avoid racing
  107. * concurrent interrupt service events, IP2 is enabled only on
  108. * one VPE, by convention VPE0. So long as no bits are ever
  109. * cleared in the affinity mask, there will never be any
  110. * interrupt forwarding. But as soon as a program or operator
  111. * sets affinity for one of the related IRQs, we need to make
  112. * sure that we don't ever try to forward across the VPE boundry,
  113. * at least not until we engineer a system where the interrupt
  114. * _ack() or _end() function can somehow know that it corresponds
  115. * to an interrupt taken on another VPE, and perform the appropriate
  116. * restoration of Status.IM state using MFTR/MTTR instead of the
  117. * normal local behavior. We also ensure that no attempt will
  118. * be made to forward to an offline "CPU".
  119. */
  120. for_each_cpu_mask(cpu, affinity) {
  121. if ((cpu_data[cpu].vpe_id != 0) || !cpu_online(cpu))
  122. cpu_clear(cpu, tmask);
  123. }
  124. irq_desc[irq].affinity = tmask;
  125. if (cpus_empty(tmask))
  126. /*
  127. * We could restore a default mask here, but the
  128. * runtime code can anyway deal with the null set
  129. */
  130. printk(KERN_WARNING
  131. "IRQ affinity leaves no legal CPU for IRQ %d\n", irq);
  132. /* Do any generic SMTC IRQ affinity setup */
  133. smtc_set_irq_affinity(irq, tmask);
  134. }
  135. #endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */