cevt-smtc.c 8.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * Copyright (C) 2007 MIPS Technologies, Inc.
  7. * Copyright (C) 2007 Ralf Baechle <ralf@linux-mips.org>
  8. * Copyright (C) 2008 Kevin D. Kissell, Paralogos sarl
  9. */
  10. #include <linux/clockchips.h>
  11. #include <linux/interrupt.h>
  12. #include <linux/percpu.h>
  13. #include <linux/smp.h>
  14. #include <asm/smtc_ipi.h>
  15. #include <asm/time.h>
  16. #include <asm/cevt-r4k.h>
  17. /*
  18. * Variant clock event timer support for SMTC on MIPS 34K, 1004K
  19. * or other MIPS MT cores.
  20. *
  21. * Notes on SMTC Support:
  22. *
  23. * SMTC has multiple microthread TCs pretending to be Linux CPUs.
  24. * But there's only one Count/Compare pair per VPE, and Compare
  25. * interrupts are taken opportunisitically by available TCs
  26. * bound to the VPE with the Count register. The new timer
  27. * framework provides for global broadcasts, but we really
  28. * want VPE-level multicasts for best behavior. So instead
  29. * of invoking the high-level clock-event broadcast code,
  30. * this version of SMTC support uses the historical SMTC
  31. * multicast mechanisms "under the hood", appearing to the
  32. * generic clock layer as if the interrupts are per-CPU.
  33. *
  34. * The approach taken here is to maintain a set of NR_CPUS
  35. * virtual timers, and track which "CPU" needs to be alerted
  36. * at each event.
  37. *
  38. * It's unlikely that we'll see a MIPS MT core with more than
  39. * 2 VPEs, but we *know* that we won't need to handle more
  40. * VPEs than we have "CPUs". So NCPUs arrays of NCPUs elements
  41. * is always going to be overkill, but always going to be enough.
  42. */
  43. unsigned long smtc_nexttime[NR_CPUS][NR_CPUS];
  44. static int smtc_nextinvpe[NR_CPUS];
  45. /*
  46. * Timestamps stored are absolute values to be programmed
  47. * into Count register. Valid timestamps will never be zero.
  48. * If a Zero Count value is actually calculated, it is converted
  49. * to be a 1, which will introduce 1 or two CPU cycles of error
  50. * roughly once every four billion events, which at 1000 HZ means
  51. * about once every 50 days. If that's actually a problem, one
  52. * could alternate squashing 0 to 1 and to -1.
  53. */
  54. #define MAKEVALID(x) (((x) == 0L) ? 1L : (x))
  55. #define ISVALID(x) ((x) != 0L)
  56. /*
  57. * Time comparison is subtle, as it's really truncated
  58. * modular arithmetic.
  59. */
  60. #define IS_SOONER(a, b, reference) \
  61. (((a) - (unsigned long)(reference)) < ((b) - (unsigned long)(reference)))
  62. /*
  63. * CATCHUP_INCREMENT, used when the function falls behind the counter.
  64. * Could be an increasing function instead of a constant;
  65. */
  66. #define CATCHUP_INCREMENT 64
  67. static int mips_next_event(unsigned long delta,
  68. struct clock_event_device *evt)
  69. {
  70. unsigned long flags;
  71. unsigned int mtflags;
  72. unsigned long timestamp, reference, previous;
  73. unsigned long nextcomp = 0L;
  74. int vpe = current_cpu_data.vpe_id;
  75. int cpu = smp_processor_id();
  76. local_irq_save(flags);
  77. mtflags = dmt();
  78. /*
  79. * Maintain the per-TC virtual timer
  80. * and program the per-VPE shared Count register
  81. * as appropriate here...
  82. */
  83. reference = (unsigned long)read_c0_count();
  84. timestamp = MAKEVALID(reference + delta);
  85. /*
  86. * To really model the clock, we have to catch the case
  87. * where the current next-in-VPE timestamp is the old
  88. * timestamp for the calling CPE, but the new value is
  89. * in fact later. In that case, we have to do a full
  90. * scan and discover the new next-in-VPE CPU id and
  91. * timestamp.
  92. */
  93. previous = smtc_nexttime[vpe][cpu];
  94. if (cpu == smtc_nextinvpe[vpe] && ISVALID(previous)
  95. && IS_SOONER(previous, timestamp, reference)) {
  96. int i;
  97. int soonest = cpu;
  98. /*
  99. * Update timestamp array here, so that new
  100. * value gets considered along with those of
  101. * other virtual CPUs on the VPE.
  102. */
  103. smtc_nexttime[vpe][cpu] = timestamp;
  104. for_each_online_cpu(i) {
  105. if (ISVALID(smtc_nexttime[vpe][i])
  106. && IS_SOONER(smtc_nexttime[vpe][i],
  107. smtc_nexttime[vpe][soonest], reference)) {
  108. soonest = i;
  109. }
  110. }
  111. smtc_nextinvpe[vpe] = soonest;
  112. nextcomp = smtc_nexttime[vpe][soonest];
  113. /*
  114. * Otherwise, we don't have to process the whole array rank,
  115. * we just have to see if the event horizon has gotten closer.
  116. */
  117. } else {
  118. if (!ISVALID(smtc_nexttime[vpe][smtc_nextinvpe[vpe]]) ||
  119. IS_SOONER(timestamp,
  120. smtc_nexttime[vpe][smtc_nextinvpe[vpe]], reference)) {
  121. smtc_nextinvpe[vpe] = cpu;
  122. nextcomp = timestamp;
  123. }
  124. /*
  125. * Since next-in-VPE may me the same as the executing
  126. * virtual CPU, we update the array *after* checking
  127. * its value.
  128. */
  129. smtc_nexttime[vpe][cpu] = timestamp;
  130. }
  131. /*
  132. * It may be that, in fact, we don't need to update Compare,
  133. * but if we do, we want to make sure we didn't fall into
  134. * a crack just behind Count.
  135. */
  136. if (ISVALID(nextcomp)) {
  137. write_c0_compare(nextcomp);
  138. ehb();
  139. /*
  140. * We never return an error, we just make sure
  141. * that we trigger the handlers as quickly as
  142. * we can if we fell behind.
  143. */
  144. while ((nextcomp - (unsigned long)read_c0_count())
  145. > (unsigned long)LONG_MAX) {
  146. nextcomp += CATCHUP_INCREMENT;
  147. write_c0_compare(nextcomp);
  148. ehb();
  149. }
  150. }
  151. emt(mtflags);
  152. local_irq_restore(flags);
  153. return 0;
  154. }
  155. void smtc_distribute_timer(int vpe)
  156. {
  157. unsigned long flags;
  158. unsigned int mtflags;
  159. int cpu;
  160. struct clock_event_device *cd;
  161. unsigned long nextstamp = 0L;
  162. unsigned long reference;
  163. repeat:
  164. for_each_online_cpu(cpu) {
  165. /*
  166. * Find virtual CPUs within the current VPE who have
  167. * unserviced timer requests whose time is now past.
  168. */
  169. local_irq_save(flags);
  170. mtflags = dmt();
  171. if (cpu_data[cpu].vpe_id == vpe &&
  172. ISVALID(smtc_nexttime[vpe][cpu])) {
  173. reference = (unsigned long)read_c0_count();
  174. if ((smtc_nexttime[vpe][cpu] - reference)
  175. > (unsigned long)LONG_MAX) {
  176. smtc_nexttime[vpe][cpu] = 0L;
  177. emt(mtflags);
  178. local_irq_restore(flags);
  179. /*
  180. * We don't send IPIs to ourself.
  181. */
  182. if (cpu != smp_processor_id()) {
  183. smtc_send_ipi(cpu, SMTC_CLOCK_TICK, 0);
  184. } else {
  185. cd = &per_cpu(mips_clockevent_device, cpu);
  186. cd->event_handler(cd);
  187. }
  188. } else {
  189. /* Local to VPE but Valid Time not yet reached. */
  190. if (!ISVALID(nextstamp) ||
  191. IS_SOONER(smtc_nexttime[vpe][cpu], nextstamp,
  192. reference)) {
  193. smtc_nextinvpe[vpe] = cpu;
  194. nextstamp = smtc_nexttime[vpe][cpu];
  195. }
  196. emt(mtflags);
  197. local_irq_restore(flags);
  198. }
  199. } else {
  200. emt(mtflags);
  201. local_irq_restore(flags);
  202. }
  203. }
  204. /* Reprogram for interrupt at next soonest timestamp for VPE */
  205. if (ISVALID(nextstamp)) {
  206. write_c0_compare(nextstamp);
  207. ehb();
  208. if ((nextstamp - (unsigned long)read_c0_count())
  209. > (unsigned long)LONG_MAX)
  210. goto repeat;
  211. }
  212. }
  213. irqreturn_t c0_compare_interrupt(int irq, void *dev_id)
  214. {
  215. int cpu = smp_processor_id();
  216. /* If we're running SMTC, we've got MIPS MT and therefore MIPS32R2 */
  217. handle_perf_irq(1);
  218. if (read_c0_cause() & (1 << 30)) {
  219. /* Clear Count/Compare Interrupt */
  220. write_c0_compare(read_c0_compare());
  221. smtc_distribute_timer(cpu_data[cpu].vpe_id);
  222. }
  223. return IRQ_HANDLED;
  224. }
  225. int __cpuinit smtc_clockevent_init(void)
  226. {
  227. uint64_t mips_freq = mips_hpt_frequency;
  228. unsigned int cpu = smp_processor_id();
  229. struct clock_event_device *cd;
  230. unsigned int irq;
  231. int i;
  232. int j;
  233. if (!cpu_has_counter || !mips_hpt_frequency)
  234. return -ENXIO;
  235. if (cpu == 0) {
  236. for (i = 0; i < num_possible_cpus(); i++) {
  237. smtc_nextinvpe[i] = 0;
  238. for (j = 0; j < num_possible_cpus(); j++)
  239. smtc_nexttime[i][j] = 0L;
  240. }
  241. /*
  242. * SMTC also can't have the usablility test
  243. * run by secondary TCs once Compare is in use.
  244. */
  245. if (!c0_compare_int_usable())
  246. return -ENXIO;
  247. }
  248. /*
  249. * With vectored interrupts things are getting platform specific.
  250. * get_c0_compare_int is a hook to allow a platform to return the
  251. * interrupt number of it's liking.
  252. */
  253. irq = MIPS_CPU_IRQ_BASE + cp0_compare_irq;
  254. if (get_c0_compare_int)
  255. irq = get_c0_compare_int();
  256. cd = &per_cpu(mips_clockevent_device, cpu);
  257. cd->name = "MIPS";
  258. cd->features = CLOCK_EVT_FEAT_ONESHOT;
  259. /* Calculate the min / max delta */
  260. cd->mult = div_sc((unsigned long) mips_freq, NSEC_PER_SEC, 32);
  261. cd->shift = 32;
  262. cd->max_delta_ns = clockevent_delta2ns(0x7fffffff, cd);
  263. cd->min_delta_ns = clockevent_delta2ns(0x300, cd);
  264. cd->rating = 300;
  265. cd->irq = irq;
  266. cd->cpumask = cpumask_of(cpu);
  267. cd->set_next_event = mips_next_event;
  268. cd->set_mode = mips_set_clock_mode;
  269. cd->event_handler = mips_event_handler;
  270. clockevents_register_device(cd);
  271. /*
  272. * On SMTC we only want to do the data structure
  273. * initialization and IRQ setup once.
  274. */
  275. if (cpu)
  276. return 0;
  277. /*
  278. * And we need the hwmask associated with the c0_compare
  279. * vector to be initialized.
  280. */
  281. irq_hwmask[irq] = (0x100 << cp0_compare_irq);
  282. if (cp0_timer_irq_installed)
  283. return 0;
  284. cp0_timer_irq_installed = 1;
  285. setup_irq(irq, &c0_compare_irqaction);
  286. return 0;
  287. }