cevt-smtc.c 8.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * Copyright (C) 2007 MIPS Technologies, Inc.
  7. * Copyright (C) 2007 Ralf Baechle <ralf@linux-mips.org>
  8. * Copyright (C) 2008 Kevin D. Kissell, Paralogos sarl
  9. */
  10. #include <linux/clockchips.h>
  11. #include <linux/interrupt.h>
  12. #include <linux/percpu.h>
  13. #include <linux/smp.h>
  14. #include <asm/smtc_ipi.h>
  15. #include <asm/time.h>
  16. #include <asm/cevt-r4k.h>
  17. /*
  18. * Variant clock event timer support for SMTC on MIPS 34K, 1004K
  19. * or other MIPS MT cores.
  20. *
  21. * Notes on SMTC Support:
  22. *
  23. * SMTC has multiple microthread TCs pretending to be Linux CPUs.
  24. * But there's only one Count/Compare pair per VPE, and Compare
  25. * interrupts are taken opportunisitically by available TCs
  26. * bound to the VPE with the Count register. The new timer
  27. * framework provides for global broadcasts, but we really
  28. * want VPE-level multicasts for best behavior. So instead
  29. * of invoking the high-level clock-event broadcast code,
  30. * this version of SMTC support uses the historical SMTC
  31. * multicast mechanisms "under the hood", appearing to the
  32. * generic clock layer as if the interrupts are per-CPU.
  33. *
  34. * The approach taken here is to maintain a set of NR_CPUS
  35. * virtual timers, and track which "CPU" needs to be alerted
  36. * at each event.
  37. *
  38. * It's unlikely that we'll see a MIPS MT core with more than
  39. * 2 VPEs, but we *know* that we won't need to handle more
  40. * VPEs than we have "CPUs". So NCPUs arrays of NCPUs elements
  41. * is always going to be overkill, but always going to be enough.
  42. */
  43. unsigned long smtc_nexttime[NR_CPUS][NR_CPUS];
  44. static int smtc_nextinvpe[NR_CPUS];
  45. /*
  46. * Timestamps stored are absolute values to be programmed
  47. * into Count register. Valid timestamps will never be zero.
  48. * If a Zero Count value is actually calculated, it is converted
  49. * to be a 1, which will introduce 1 or two CPU cycles of error
  50. * roughly once every four billion events, which at 1000 HZ means
  51. * about once every 50 days. If that's actually a problem, one
  52. * could alternate squashing 0 to 1 and to -1.
  53. */
  54. #define MAKEVALID(x) (((x) == 0L) ? 1L : (x))
  55. #define ISVALID(x) ((x) != 0L)
  56. /*
  57. * Time comparison is subtle, as it's really truncated
  58. * modular arithmetic.
  59. */
  60. #define IS_SOONER(a, b, reference) \
  61. (((a) - (unsigned long)(reference)) < ((b) - (unsigned long)(reference)))
  62. /*
  63. * CATCHUP_INCREMENT, used when the function falls behind the counter.
  64. * Could be an increasing function instead of a constant;
  65. */
  66. #define CATCHUP_INCREMENT 64
  67. static int mips_next_event(unsigned long delta,
  68. struct clock_event_device *evt)
  69. {
  70. unsigned long flags;
  71. unsigned int mtflags;
  72. unsigned long timestamp, reference, previous;
  73. unsigned long nextcomp = 0L;
  74. int vpe = current_cpu_data.vpe_id;
  75. int cpu = smp_processor_id();
  76. local_irq_save(flags);
  77. mtflags = dmt();
  78. /*
  79. * Maintain the per-TC virtual timer
  80. * and program the per-VPE shared Count register
  81. * as appropriate here...
  82. */
  83. reference = (unsigned long)read_c0_count();
  84. timestamp = MAKEVALID(reference + delta);
  85. /*
  86. * To really model the clock, we have to catch the case
  87. * where the current next-in-VPE timestamp is the old
  88. * timestamp for the calling CPE, but the new value is
  89. * in fact later. In that case, we have to do a full
  90. * scan and discover the new next-in-VPE CPU id and
  91. * timestamp.
  92. */
  93. previous = smtc_nexttime[vpe][cpu];
  94. if (cpu == smtc_nextinvpe[vpe] && ISVALID(previous)
  95. && IS_SOONER(previous, timestamp, reference)) {
  96. int i;
  97. int soonest = cpu;
  98. /*
  99. * Update timestamp array here, so that new
  100. * value gets considered along with those of
  101. * other virtual CPUs on the VPE.
  102. */
  103. smtc_nexttime[vpe][cpu] = timestamp;
  104. for_each_online_cpu(i) {
  105. if (ISVALID(smtc_nexttime[vpe][i])
  106. && IS_SOONER(smtc_nexttime[vpe][i],
  107. smtc_nexttime[vpe][soonest], reference)) {
  108. soonest = i;
  109. }
  110. }
  111. smtc_nextinvpe[vpe] = soonest;
  112. nextcomp = smtc_nexttime[vpe][soonest];
  113. /*
  114. * Otherwise, we don't have to process the whole array rank,
  115. * we just have to see if the event horizon has gotten closer.
  116. */
  117. } else {
  118. if (!ISVALID(smtc_nexttime[vpe][smtc_nextinvpe[vpe]]) ||
  119. IS_SOONER(timestamp,
  120. smtc_nexttime[vpe][smtc_nextinvpe[vpe]], reference)) {
  121. smtc_nextinvpe[vpe] = cpu;
  122. nextcomp = timestamp;
  123. }
  124. /*
  125. * Since next-in-VPE may me the same as the executing
  126. * virtual CPU, we update the array *after* checking
  127. * its value.
  128. */
  129. smtc_nexttime[vpe][cpu] = timestamp;
  130. }
  131. /*
  132. * It may be that, in fact, we don't need to update Compare,
  133. * but if we do, we want to make sure we didn't fall into
  134. * a crack just behind Count.
  135. */
  136. if (ISVALID(nextcomp)) {
  137. write_c0_compare(nextcomp);
  138. ehb();
  139. /*
  140. * We never return an error, we just make sure
  141. * that we trigger the handlers as quickly as
  142. * we can if we fell behind.
  143. */
  144. while ((nextcomp - (unsigned long)read_c0_count())
  145. > (unsigned long)LONG_MAX) {
  146. nextcomp += CATCHUP_INCREMENT;
  147. write_c0_compare(nextcomp);
  148. ehb();
  149. }
  150. }
  151. emt(mtflags);
  152. local_irq_restore(flags);
  153. return 0;
  154. }
  155. void smtc_distribute_timer(int vpe)
  156. {
  157. unsigned long flags;
  158. unsigned int mtflags;
  159. int cpu;
  160. struct clock_event_device *cd;
  161. unsigned long nextstamp;
  162. unsigned long reference;
  163. repeat:
  164. nextstamp = 0L;
  165. for_each_online_cpu(cpu) {
  166. /*
  167. * Find virtual CPUs within the current VPE who have
  168. * unserviced timer requests whose time is now past.
  169. */
  170. local_irq_save(flags);
  171. mtflags = dmt();
  172. if (cpu_data[cpu].vpe_id == vpe &&
  173. ISVALID(smtc_nexttime[vpe][cpu])) {
  174. reference = (unsigned long)read_c0_count();
  175. if ((smtc_nexttime[vpe][cpu] - reference)
  176. > (unsigned long)LONG_MAX) {
  177. smtc_nexttime[vpe][cpu] = 0L;
  178. emt(mtflags);
  179. local_irq_restore(flags);
  180. /*
  181. * We don't send IPIs to ourself.
  182. */
  183. if (cpu != smp_processor_id()) {
  184. smtc_send_ipi(cpu, SMTC_CLOCK_TICK, 0);
  185. } else {
  186. cd = &per_cpu(mips_clockevent_device, cpu);
  187. cd->event_handler(cd);
  188. }
  189. } else {
  190. /* Local to VPE but Valid Time not yet reached. */
  191. if (!ISVALID(nextstamp) ||
  192. IS_SOONER(smtc_nexttime[vpe][cpu], nextstamp,
  193. reference)) {
  194. smtc_nextinvpe[vpe] = cpu;
  195. nextstamp = smtc_nexttime[vpe][cpu];
  196. }
  197. emt(mtflags);
  198. local_irq_restore(flags);
  199. }
  200. } else {
  201. emt(mtflags);
  202. local_irq_restore(flags);
  203. }
  204. }
  205. /* Reprogram for interrupt at next soonest timestamp for VPE */
  206. if (ISVALID(nextstamp)) {
  207. write_c0_compare(nextstamp);
  208. ehb();
  209. if ((nextstamp - (unsigned long)read_c0_count())
  210. > (unsigned long)LONG_MAX)
  211. goto repeat;
  212. }
  213. }
  214. irqreturn_t c0_compare_interrupt(int irq, void *dev_id)
  215. {
  216. int cpu = smp_processor_id();
  217. /* If we're running SMTC, we've got MIPS MT and therefore MIPS32R2 */
  218. handle_perf_irq(1);
  219. if (read_c0_cause() & (1 << 30)) {
  220. /* Clear Count/Compare Interrupt */
  221. write_c0_compare(read_c0_compare());
  222. smtc_distribute_timer(cpu_data[cpu].vpe_id);
  223. }
  224. return IRQ_HANDLED;
  225. }
  226. int __cpuinit smtc_clockevent_init(void)
  227. {
  228. uint64_t mips_freq = mips_hpt_frequency;
  229. unsigned int cpu = smp_processor_id();
  230. struct clock_event_device *cd;
  231. unsigned int irq;
  232. int i;
  233. int j;
  234. if (!cpu_has_counter || !mips_hpt_frequency)
  235. return -ENXIO;
  236. if (cpu == 0) {
  237. for (i = 0; i < num_possible_cpus(); i++) {
  238. smtc_nextinvpe[i] = 0;
  239. for (j = 0; j < num_possible_cpus(); j++)
  240. smtc_nexttime[i][j] = 0L;
  241. }
  242. /*
  243. * SMTC also can't have the usablility test
  244. * run by secondary TCs once Compare is in use.
  245. */
  246. if (!c0_compare_int_usable())
  247. return -ENXIO;
  248. }
  249. /*
  250. * With vectored interrupts things are getting platform specific.
  251. * get_c0_compare_int is a hook to allow a platform to return the
  252. * interrupt number of it's liking.
  253. */
  254. irq = MIPS_CPU_IRQ_BASE + cp0_compare_irq;
  255. if (get_c0_compare_int)
  256. irq = get_c0_compare_int();
  257. cd = &per_cpu(mips_clockevent_device, cpu);
  258. cd->name = "MIPS";
  259. cd->features = CLOCK_EVT_FEAT_ONESHOT;
  260. /* Calculate the min / max delta */
  261. cd->mult = div_sc((unsigned long) mips_freq, NSEC_PER_SEC, 32);
  262. cd->shift = 32;
  263. cd->max_delta_ns = clockevent_delta2ns(0x7fffffff, cd);
  264. cd->min_delta_ns = clockevent_delta2ns(0x300, cd);
  265. cd->rating = 300;
  266. cd->irq = irq;
  267. cd->cpumask = cpumask_of(cpu);
  268. cd->set_next_event = mips_next_event;
  269. cd->set_mode = mips_set_clock_mode;
  270. cd->event_handler = mips_event_handler;
  271. clockevents_register_device(cd);
  272. /*
  273. * On SMTC we only want to do the data structure
  274. * initialization and IRQ setup once.
  275. */
  276. if (cpu)
  277. return 0;
  278. /*
  279. * And we need the hwmask associated with the c0_compare
  280. * vector to be initialized.
  281. */
  282. irq_hwmask[irq] = (0x100 << cp0_compare_irq);
  283. if (cp0_timer_irq_installed)
  284. return 0;
  285. cp0_timer_irq_installed = 1;
  286. setup_irq(irq, &c0_compare_irqaction);
  287. return 0;
  288. }