cevt-smtc.c 8.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * Copyright (C) 2007 MIPS Technologies, Inc.
  7. * Copyright (C) 2007 Ralf Baechle <ralf@linux-mips.org>
  8. * Copyright (C) 2008 Kevin D. Kissell, Paralogos sarl
  9. */
  10. #include <linux/clockchips.h>
  11. #include <linux/interrupt.h>
  12. #include <linux/percpu.h>
  13. #include <asm/smtc_ipi.h>
  14. #include <asm/time.h>
  15. #include <asm/cevt-r4k.h>
  16. /*
  17. * Variant clock event timer support for SMTC on MIPS 34K, 1004K
  18. * or other MIPS MT cores.
  19. *
  20. * Notes on SMTC Support:
  21. *
  22. * SMTC has multiple microthread TCs pretending to be Linux CPUs.
  23. * But there's only one Count/Compare pair per VPE, and Compare
  24. * interrupts are taken opportunisitically by available TCs
  25. * bound to the VPE with the Count register. The new timer
  26. * framework provides for global broadcasts, but we really
  27. * want VPE-level multicasts for best behavior. So instead
  28. * of invoking the high-level clock-event broadcast code,
  29. * this version of SMTC support uses the historical SMTC
  30. * multicast mechanisms "under the hood", appearing to the
  31. * generic clock layer as if the interrupts are per-CPU.
  32. *
  33. * The approach taken here is to maintain a set of NR_CPUS
  34. * virtual timers, and track which "CPU" needs to be alerted
  35. * at each event.
  36. *
  37. * It's unlikely that we'll see a MIPS MT core with more than
  38. * 2 VPEs, but we *know* that we won't need to handle more
  39. * VPEs than we have "CPUs". So NCPUs arrays of NCPUs elements
  40. * is always going to be overkill, but always going to be enough.
  41. */
  42. unsigned long smtc_nexttime[NR_CPUS][NR_CPUS];
  43. static int smtc_nextinvpe[NR_CPUS];
  44. /*
  45. * Timestamps stored are absolute values to be programmed
  46. * into Count register. Valid timestamps will never be zero.
  47. * If a Zero Count value is actually calculated, it is converted
  48. * to be a 1, which will introduce 1 or two CPU cycles of error
  49. * roughly once every four billion events, which at 1000 HZ means
  50. * about once every 50 days. If that's actually a problem, one
  51. * could alternate squashing 0 to 1 and to -1.
  52. */
  53. #define MAKEVALID(x) (((x) == 0L) ? 1L : (x))
  54. #define ISVALID(x) ((x) != 0L)
  55. /*
  56. * Time comparison is subtle, as it's really truncated
  57. * modular arithmetic.
  58. */
  59. #define IS_SOONER(a, b, reference) \
  60. (((a) - (unsigned long)(reference)) < ((b) - (unsigned long)(reference)))
  61. /*
  62. * CATCHUP_INCREMENT, used when the function falls behind the counter.
  63. * Could be an increasing function instead of a constant;
  64. */
  65. #define CATCHUP_INCREMENT 64
  66. static int mips_next_event(unsigned long delta,
  67. struct clock_event_device *evt)
  68. {
  69. unsigned long flags;
  70. unsigned int mtflags;
  71. unsigned long timestamp, reference, previous;
  72. unsigned long nextcomp = 0L;
  73. int vpe = current_cpu_data.vpe_id;
  74. int cpu = smp_processor_id();
  75. local_irq_save(flags);
  76. mtflags = dmt();
  77. /*
  78. * Maintain the per-TC virtual timer
  79. * and program the per-VPE shared Count register
  80. * as appropriate here...
  81. */
  82. reference = (unsigned long)read_c0_count();
  83. timestamp = MAKEVALID(reference + delta);
  84. /*
  85. * To really model the clock, we have to catch the case
  86. * where the current next-in-VPE timestamp is the old
  87. * timestamp for the calling CPE, but the new value is
  88. * in fact later. In that case, we have to do a full
  89. * scan and discover the new next-in-VPE CPU id and
  90. * timestamp.
  91. */
  92. previous = smtc_nexttime[vpe][cpu];
  93. if (cpu == smtc_nextinvpe[vpe] && ISVALID(previous)
  94. && IS_SOONER(previous, timestamp, reference)) {
  95. int i;
  96. int soonest = cpu;
  97. /*
  98. * Update timestamp array here, so that new
  99. * value gets considered along with those of
  100. * other virtual CPUs on the VPE.
  101. */
  102. smtc_nexttime[vpe][cpu] = timestamp;
  103. for_each_online_cpu(i) {
  104. if (ISVALID(smtc_nexttime[vpe][i])
  105. && IS_SOONER(smtc_nexttime[vpe][i],
  106. smtc_nexttime[vpe][soonest], reference)) {
  107. soonest = i;
  108. }
  109. }
  110. smtc_nextinvpe[vpe] = soonest;
  111. nextcomp = smtc_nexttime[vpe][soonest];
  112. /*
  113. * Otherwise, we don't have to process the whole array rank,
  114. * we just have to see if the event horizon has gotten closer.
  115. */
  116. } else {
  117. if (!ISVALID(smtc_nexttime[vpe][smtc_nextinvpe[vpe]]) ||
  118. IS_SOONER(timestamp,
  119. smtc_nexttime[vpe][smtc_nextinvpe[vpe]], reference)) {
  120. smtc_nextinvpe[vpe] = cpu;
  121. nextcomp = timestamp;
  122. }
  123. /*
  124. * Since next-in-VPE may me the same as the executing
  125. * virtual CPU, we update the array *after* checking
  126. * its value.
  127. */
  128. smtc_nexttime[vpe][cpu] = timestamp;
  129. }
  130. /*
  131. * It may be that, in fact, we don't need to update Compare,
  132. * but if we do, we want to make sure we didn't fall into
  133. * a crack just behind Count.
  134. */
  135. if (ISVALID(nextcomp)) {
  136. write_c0_compare(nextcomp);
  137. ehb();
  138. /*
  139. * We never return an error, we just make sure
  140. * that we trigger the handlers as quickly as
  141. * we can if we fell behind.
  142. */
  143. while ((nextcomp - (unsigned long)read_c0_count())
  144. > (unsigned long)LONG_MAX) {
  145. nextcomp += CATCHUP_INCREMENT;
  146. write_c0_compare(nextcomp);
  147. ehb();
  148. }
  149. }
  150. emt(mtflags);
  151. local_irq_restore(flags);
  152. return 0;
  153. }
  154. void smtc_distribute_timer(int vpe)
  155. {
  156. unsigned long flags;
  157. unsigned int mtflags;
  158. int cpu;
  159. struct clock_event_device *cd;
  160. unsigned long nextstamp = 0L;
  161. unsigned long reference;
  162. repeat:
  163. for_each_online_cpu(cpu) {
  164. /*
  165. * Find virtual CPUs within the current VPE who have
  166. * unserviced timer requests whose time is now past.
  167. */
  168. local_irq_save(flags);
  169. mtflags = dmt();
  170. if (cpu_data[cpu].vpe_id == vpe &&
  171. ISVALID(smtc_nexttime[vpe][cpu])) {
  172. reference = (unsigned long)read_c0_count();
  173. if ((smtc_nexttime[vpe][cpu] - reference)
  174. > (unsigned long)LONG_MAX) {
  175. smtc_nexttime[vpe][cpu] = 0L;
  176. emt(mtflags);
  177. local_irq_restore(flags);
  178. /*
  179. * We don't send IPIs to ourself.
  180. */
  181. if (cpu != smp_processor_id()) {
  182. smtc_send_ipi(cpu, SMTC_CLOCK_TICK, 0);
  183. } else {
  184. cd = &per_cpu(mips_clockevent_device, cpu);
  185. cd->event_handler(cd);
  186. }
  187. } else {
  188. /* Local to VPE but Valid Time not yet reached. */
  189. if (!ISVALID(nextstamp) ||
  190. IS_SOONER(smtc_nexttime[vpe][cpu], nextstamp,
  191. reference)) {
  192. smtc_nextinvpe[vpe] = cpu;
  193. nextstamp = smtc_nexttime[vpe][cpu];
  194. }
  195. emt(mtflags);
  196. local_irq_restore(flags);
  197. }
  198. } else {
  199. emt(mtflags);
  200. local_irq_restore(flags);
  201. }
  202. }
  203. /* Reprogram for interrupt at next soonest timestamp for VPE */
  204. if (ISVALID(nextstamp)) {
  205. write_c0_compare(nextstamp);
  206. ehb();
  207. if ((nextstamp - (unsigned long)read_c0_count())
  208. > (unsigned long)LONG_MAX)
  209. goto repeat;
  210. }
  211. }
  212. irqreturn_t c0_compare_interrupt(int irq, void *dev_id)
  213. {
  214. int cpu = smp_processor_id();
  215. /* If we're running SMTC, we've got MIPS MT and therefore MIPS32R2 */
  216. handle_perf_irq(1);
  217. if (read_c0_cause() & (1 << 30)) {
  218. /* Clear Count/Compare Interrupt */
  219. write_c0_compare(read_c0_compare());
  220. smtc_distribute_timer(cpu_data[cpu].vpe_id);
  221. }
  222. return IRQ_HANDLED;
  223. }
  224. int __cpuinit smtc_clockevent_init(void)
  225. {
  226. uint64_t mips_freq = mips_hpt_frequency;
  227. unsigned int cpu = smp_processor_id();
  228. struct clock_event_device *cd;
  229. unsigned int irq;
  230. int i;
  231. int j;
  232. if (!cpu_has_counter || !mips_hpt_frequency)
  233. return -ENXIO;
  234. if (cpu == 0) {
  235. for (i = 0; i < num_possible_cpus(); i++) {
  236. smtc_nextinvpe[i] = 0;
  237. for (j = 0; j < num_possible_cpus(); j++)
  238. smtc_nexttime[i][j] = 0L;
  239. }
  240. /*
  241. * SMTC also can't have the usablility test
  242. * run by secondary TCs once Compare is in use.
  243. */
  244. if (!c0_compare_int_usable())
  245. return -ENXIO;
  246. }
  247. /*
  248. * With vectored interrupts things are getting platform specific.
  249. * get_c0_compare_int is a hook to allow a platform to return the
  250. * interrupt number of it's liking.
  251. */
  252. irq = MIPS_CPU_IRQ_BASE + cp0_compare_irq;
  253. if (get_c0_compare_int)
  254. irq = get_c0_compare_int();
  255. cd = &per_cpu(mips_clockevent_device, cpu);
  256. cd->name = "MIPS";
  257. cd->features = CLOCK_EVT_FEAT_ONESHOT;
  258. /* Calculate the min / max delta */
  259. cd->mult = div_sc((unsigned long) mips_freq, NSEC_PER_SEC, 32);
  260. cd->shift = 32;
  261. cd->max_delta_ns = clockevent_delta2ns(0x7fffffff, cd);
  262. cd->min_delta_ns = clockevent_delta2ns(0x300, cd);
  263. cd->rating = 300;
  264. cd->irq = irq;
  265. cd->cpumask = cpumask_of(cpu);
  266. cd->set_next_event = mips_next_event;
  267. cd->set_mode = mips_set_clock_mode;
  268. cd->event_handler = mips_event_handler;
  269. clockevents_register_device(cd);
  270. /*
  271. * On SMTC we only want to do the data structure
  272. * initialization and IRQ setup once.
  273. */
  274. if (cpu)
  275. return 0;
  276. /*
  277. * And we need the hwmask associated with the c0_compare
  278. * vector to be initialized.
  279. */
  280. irq_hwmask[irq] = (0x100 << cp0_compare_irq);
  281. if (cp0_timer_irq_installed)
  282. return 0;
  283. cp0_timer_irq_installed = 1;
  284. setup_irq(irq, &c0_compare_irqaction);
  285. return 0;
  286. }