smp.c 4.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202
  1. /*
  2. * Copyright 2010 Tilera Corporation. All Rights Reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License
  6. * as published by the Free Software Foundation, version 2.
  7. *
  8. * This program is distributed in the hope that it will be useful, but
  9. * WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
  11. * NON INFRINGEMENT. See the GNU General Public License for
  12. * more details.
  13. *
  14. * TILE SMP support routines.
  15. */
  16. #include <linux/smp.h>
  17. #include <linux/irq.h>
  18. #include <asm/cacheflush.h>
  19. HV_Topology smp_topology __write_once;
  20. /*
  21. * Top-level send_IPI*() functions to send messages to other cpus.
  22. */
  23. /* Set by smp_send_stop() to avoid recursive panics. */
  24. static int stopping_cpus;
  25. void send_IPI_single(int cpu, int tag)
  26. {
  27. HV_Recipient recip = {
  28. .y = cpu / smp_width,
  29. .x = cpu % smp_width,
  30. .state = HV_TO_BE_SENT
  31. };
  32. int rc = hv_send_message(&recip, 1, (HV_VirtAddr)&tag, sizeof(tag));
  33. BUG_ON(rc <= 0);
  34. }
  35. void send_IPI_many(const struct cpumask *mask, int tag)
  36. {
  37. HV_Recipient recip[NR_CPUS];
  38. int cpu, sent;
  39. int nrecip = 0;
  40. int my_cpu = smp_processor_id();
  41. for_each_cpu(cpu, mask) {
  42. HV_Recipient *r;
  43. BUG_ON(cpu == my_cpu);
  44. r = &recip[nrecip++];
  45. r->y = cpu / smp_width;
  46. r->x = cpu % smp_width;
  47. r->state = HV_TO_BE_SENT;
  48. }
  49. sent = 0;
  50. while (sent < nrecip) {
  51. int rc = hv_send_message(recip, nrecip,
  52. (HV_VirtAddr)&tag, sizeof(tag));
  53. if (rc <= 0) {
  54. if (!stopping_cpus) /* avoid recursive panic */
  55. panic("hv_send_message returned %d", rc);
  56. break;
  57. }
  58. sent += rc;
  59. }
  60. }
  61. void send_IPI_allbutself(int tag)
  62. {
  63. struct cpumask mask;
  64. cpumask_copy(&mask, cpu_online_mask);
  65. cpumask_clear_cpu(smp_processor_id(), &mask);
  66. send_IPI_many(&mask, tag);
  67. }
  68. /*
  69. * Provide smp_call_function_mask, but also run function locally
  70. * if specified in the mask.
  71. */
  72. void on_each_cpu_mask(const struct cpumask *mask, void (*func)(void *),
  73. void *info, bool wait)
  74. {
  75. int cpu = get_cpu();
  76. smp_call_function_many(mask, func, info, wait);
  77. if (cpumask_test_cpu(cpu, mask)) {
  78. local_irq_disable();
  79. func(info);
  80. local_irq_enable();
  81. }
  82. put_cpu();
  83. }
  84. /*
  85. * Functions related to starting/stopping cpus.
  86. */
  87. /* Handler to start the current cpu. */
  88. static void smp_start_cpu_interrupt(void)
  89. {
  90. extern unsigned long start_cpu_function_addr;
  91. get_irq_regs()->pc = start_cpu_function_addr;
  92. }
  93. /* Handler to stop the current cpu. */
  94. static void smp_stop_cpu_interrupt(void)
  95. {
  96. set_cpu_online(smp_processor_id(), 0);
  97. raw_local_irq_disable_all();
  98. for (;;)
  99. asm("nap");
  100. }
  101. /* This function calls the 'stop' function on all other CPUs in the system. */
  102. void smp_send_stop(void)
  103. {
  104. stopping_cpus = 1;
  105. send_IPI_allbutself(MSG_TAG_STOP_CPU);
  106. }
  107. /*
  108. * Dispatch code called from hv_message_intr() for HV_MSG_TILE hv messages.
  109. */
  110. void evaluate_message(int tag)
  111. {
  112. switch (tag) {
  113. case MSG_TAG_START_CPU: /* Start up a cpu */
  114. smp_start_cpu_interrupt();
  115. break;
  116. case MSG_TAG_STOP_CPU: /* Sent to shut down slave CPU's */
  117. smp_stop_cpu_interrupt();
  118. break;
  119. case MSG_TAG_CALL_FUNCTION_MANY: /* Call function on cpumask */
  120. generic_smp_call_function_interrupt();
  121. break;
  122. case MSG_TAG_CALL_FUNCTION_SINGLE: /* Call function on one other CPU */
  123. generic_smp_call_function_single_interrupt();
  124. break;
  125. default:
  126. panic("Unknown IPI message tag %d", tag);
  127. break;
  128. }
  129. }
  130. /*
  131. * flush_icache_range() code uses smp_call_function().
  132. */
  133. struct ipi_flush {
  134. unsigned long start;
  135. unsigned long end;
  136. };
  137. static void ipi_flush_icache_range(void *info)
  138. {
  139. struct ipi_flush *flush = (struct ipi_flush *) info;
  140. __flush_icache_range(flush->start, flush->end);
  141. }
  142. void flush_icache_range(unsigned long start, unsigned long end)
  143. {
  144. struct ipi_flush flush = { start, end };
  145. preempt_disable();
  146. on_each_cpu(ipi_flush_icache_range, &flush, 1);
  147. preempt_enable();
  148. }
  149. /*
  150. * The smp_send_reschedule() path does not use the hv_message_intr()
  151. * path but instead the faster tile_dev_intr() path for interrupts.
  152. */
  153. irqreturn_t handle_reschedule_ipi(int irq, void *token)
  154. {
  155. /*
  156. * Nothing to do here; when we return from interrupt, the
  157. * rescheduling will occur there. But do bump the interrupt
  158. * profiler count in the meantime.
  159. */
  160. __get_cpu_var(irq_stat).irq_resched_count++;
  161. return IRQ_HANDLED;
  162. }
  163. void smp_send_reschedule(int cpu)
  164. {
  165. HV_Coord coord;
  166. WARN_ON(cpu_is_offline(cpu));
  167. coord.y = cpu / smp_width;
  168. coord.x = cpu % smp_width;
  169. hv_trigger_ipi(coord, IRQ_RESCHEDULE);
  170. }