timer-tmu.c 7.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297
  1. /*
  2. * arch/sh/kernel/timers/timer-tmu.c - TMU Timer Support
  3. *
  4. * Copyright (C) 2005 - 2007 Paul Mundt
  5. *
  6. * TMU handling code hacked out of arch/sh/kernel/time.c
  7. *
  8. * Copyright (C) 1999 Tetsuya Okada & Niibe Yutaka
  9. * Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org>
  10. * Copyright (C) 2002, 2003, 2004 Paul Mundt
  11. * Copyright (C) 2002 M. R. Brown <mrbrown@linux-sh.org>
  12. *
  13. * This file is subject to the terms and conditions of the GNU General Public
  14. * License. See the file "COPYING" in the main directory of this archive
  15. * for more details.
  16. */
  17. #include <linux/init.h>
  18. #include <linux/kernel.h>
  19. #include <linux/interrupt.h>
  20. #include <linux/seqlock.h>
  21. #include <linux/clockchips.h>
  22. #include <asm/timer.h>
  23. #include <asm/rtc.h>
  24. #include <asm/io.h>
  25. #include <asm/irq.h>
  26. #include <asm/clock.h>
  27. #define TMU_TOCR_INIT 0x00
  28. #define TMU_TCR_INIT 0x0020
  29. #define TMU0 (0)
  30. #define TMU1 (1)
  31. static inline void _tmu_start(int tmu_num)
  32. {
  33. ctrl_outb(ctrl_inb(TMU_012_TSTR) | (0x1<<tmu_num), TMU_012_TSTR);
  34. }
  35. static inline void _tmu_set_irq(int tmu_num, int enabled)
  36. {
  37. register unsigned long tmu_tcr = TMU0_TCR + (0xc*tmu_num);
  38. ctrl_outw( (enabled ? ctrl_inw(tmu_tcr) | (1<<5) : ctrl_inw(tmu_tcr) & ~(1<<5)), tmu_tcr);
  39. }
  40. static inline void _tmu_stop(int tmu_num)
  41. {
  42. ctrl_outb(ctrl_inb(TMU_012_TSTR) & ~(0x1<<tmu_num), TMU_012_TSTR);
  43. }
  44. static inline void _tmu_clear_status(int tmu_num)
  45. {
  46. register unsigned long tmu_tcr = TMU0_TCR + (0xc*tmu_num);
  47. /* Clear UNF bit */
  48. ctrl_outw(ctrl_inw(tmu_tcr) & ~0x100, tmu_tcr);
  49. }
  50. static inline unsigned long _tmu_read(int tmu_num)
  51. {
  52. return ctrl_inl(TMU0_TCNT+0xC*tmu_num);
  53. }
  54. static int tmu_timer_start(void)
  55. {
  56. _tmu_start(TMU0);
  57. _tmu_start(TMU1);
  58. _tmu_set_irq(TMU0,1);
  59. return 0;
  60. }
  61. static int tmu_timer_stop(void)
  62. {
  63. _tmu_stop(TMU0);
  64. _tmu_stop(TMU1);
  65. _tmu_clear_status(TMU0);
  66. return 0;
  67. }
  68. /*
  69. * also when the module_clk is scaled the TMU1
  70. * will show the same frequency
  71. */
  72. static int tmus_are_scaled;
  73. static cycle_t tmu_timer_read(void)
  74. {
  75. return ((cycle_t)(~_tmu_read(TMU1)))<<tmus_are_scaled;
  76. }
  77. static unsigned long tmu_latest_interval[3];
  78. static void tmu_timer_set_interval(int tmu_num, unsigned long interval, unsigned int reload)
  79. {
  80. unsigned long tmu_tcnt = TMU0_TCNT + tmu_num*0xC;
  81. unsigned long tmu_tcor = TMU0_TCOR + tmu_num*0xC;
  82. _tmu_stop(tmu_num);
  83. ctrl_outl(interval, tmu_tcnt);
  84. tmu_latest_interval[tmu_num] = interval;
  85. /*
  86. * TCNT reloads from TCOR on underflow, clear it if we don't
  87. * intend to auto-reload
  88. */
  89. ctrl_outl( reload ? interval : 0 , tmu_tcor);
  90. _tmu_start(tmu_num);
  91. }
  92. static int tmu_set_next_event(unsigned long cycles,
  93. struct clock_event_device *evt)
  94. {
  95. tmu_timer_set_interval(TMU0,cycles, evt->mode == CLOCK_EVT_MODE_PERIODIC);
  96. _tmu_set_irq(TMU0,1);
  97. return 0;
  98. }
  99. static void tmu_set_mode(enum clock_event_mode mode,
  100. struct clock_event_device *evt)
  101. {
  102. switch (mode) {
  103. case CLOCK_EVT_MODE_PERIODIC:
  104. ctrl_outl(tmu_latest_interval[TMU0], TMU0_TCOR);
  105. break;
  106. case CLOCK_EVT_MODE_ONESHOT:
  107. ctrl_outl(0, TMU0_TCOR);
  108. break;
  109. case CLOCK_EVT_MODE_UNUSED:
  110. case CLOCK_EVT_MODE_SHUTDOWN:
  111. case CLOCK_EVT_MODE_RESUME:
  112. break;
  113. }
  114. }
  115. static struct clock_event_device tmu0_clockevent = {
  116. .name = "tmu0",
  117. .shift = 32,
  118. .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
  119. .set_mode = tmu_set_mode,
  120. .set_next_event = tmu_set_next_event,
  121. };
  122. static irqreturn_t tmu_timer_interrupt(int irq, void *dummy)
  123. {
  124. struct clock_event_device *evt = &tmu0_clockevent;
  125. _tmu_clear_status(TMU0);
  126. _tmu_set_irq(TMU0,tmu0_clockevent.mode != CLOCK_EVT_MODE_ONESHOT);
  127. switch (tmu0_clockevent.mode) {
  128. case CLOCK_EVT_MODE_ONESHOT:
  129. case CLOCK_EVT_MODE_PERIODIC:
  130. evt->event_handler(evt);
  131. break;
  132. default:
  133. break;
  134. }
  135. return IRQ_HANDLED;
  136. }
  137. static struct irqaction tmu0_irq = {
  138. .name = "periodic/oneshot timer",
  139. .handler = tmu_timer_interrupt,
  140. .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL,
  141. };
  142. static void __init tmu_clk_init(struct clk *clk)
  143. {
  144. u8 divisor = TMU_TCR_INIT & 0x7;
  145. int tmu_num = clk->name[3]-'0';
  146. ctrl_outw(TMU_TCR_INIT, TMU0_TCR+(tmu_num*0xC));
  147. clk->rate = clk_get_rate(clk->parent) / (4 << (divisor << 1));
  148. }
  149. static void tmu_clk_recalc(struct clk *clk)
  150. {
  151. int tmu_num = clk->name[3]-'0';
  152. unsigned long prev_rate = clk_get_rate(clk);
  153. unsigned long flags;
  154. u8 divisor = ctrl_inw(TMU0_TCR+tmu_num*0xC) & 0x7;
  155. clk->rate = clk_get_rate(clk->parent) / (4 << (divisor << 1));
  156. if(prev_rate==clk_get_rate(clk))
  157. return;
  158. if(tmu_num)
  159. return; /* No more work on TMU1 */
  160. local_irq_save(flags);
  161. tmus_are_scaled = (prev_rate > clk->rate);
  162. _tmu_stop(TMU0);
  163. tmu0_clockevent.mult = div_sc(clk->rate, NSEC_PER_SEC,
  164. tmu0_clockevent.shift);
  165. tmu0_clockevent.max_delta_ns =
  166. clockevent_delta2ns(-1, &tmu0_clockevent);
  167. tmu0_clockevent.min_delta_ns =
  168. clockevent_delta2ns(1, &tmu0_clockevent);
  169. if (tmus_are_scaled)
  170. tmu_latest_interval[TMU0] >>= 1;
  171. else
  172. tmu_latest_interval[TMU0] <<= 1;
  173. tmu_timer_set_interval(TMU0,
  174. tmu_latest_interval[TMU0],
  175. tmu0_clockevent.mode == CLOCK_EVT_MODE_PERIODIC);
  176. _tmu_start(TMU0);
  177. local_irq_restore(flags);
  178. }
  179. static struct clk_ops tmu_clk_ops = {
  180. .init = tmu_clk_init,
  181. .recalc = tmu_clk_recalc,
  182. };
  183. static struct clk tmu0_clk = {
  184. .name = "tmu0_clk",
  185. .ops = &tmu_clk_ops,
  186. };
  187. static struct clk tmu1_clk = {
  188. .name = "tmu1_clk",
  189. .ops = &tmu_clk_ops,
  190. };
  191. static int tmu_timer_init(void)
  192. {
  193. unsigned long interval;
  194. unsigned long frequency;
  195. setup_irq(CONFIG_SH_TIMER_IRQ, &tmu0_irq);
  196. tmu0_clk.parent = clk_get(NULL, "module_clk");
  197. tmu1_clk.parent = clk_get(NULL, "module_clk");
  198. tmu_timer_stop();
  199. #if !defined(CONFIG_CPU_SUBTYPE_SH7720) && \
  200. !defined(CONFIG_CPU_SUBTYPE_SH7721) && \
  201. !defined(CONFIG_CPU_SUBTYPE_SH7760) && \
  202. !defined(CONFIG_CPU_SUBTYPE_SH7785) && \
  203. !defined(CONFIG_CPU_SUBTYPE_SH7786) && \
  204. !defined(CONFIG_CPU_SUBTYPE_SHX3)
  205. ctrl_outb(TMU_TOCR_INIT, TMU_TOCR);
  206. #endif
  207. clk_register(&tmu0_clk);
  208. clk_register(&tmu1_clk);
  209. clk_enable(&tmu0_clk);
  210. clk_enable(&tmu1_clk);
  211. frequency = clk_get_rate(&tmu0_clk);
  212. interval = (frequency + HZ / 2) / HZ;
  213. tmu_timer_set_interval(TMU0,interval, 1);
  214. tmu_timer_set_interval(TMU1,~0,1);
  215. _tmu_start(TMU1);
  216. clocksource_sh.rating = 200;
  217. clocksource_sh.mask = CLOCKSOURCE_MASK(32);
  218. clocksource_sh.read = tmu_timer_read;
  219. clocksource_sh.shift = 10;
  220. clocksource_sh.mult = clocksource_hz2mult(clk_get_rate(&tmu1_clk),
  221. clocksource_sh.shift);
  222. clocksource_sh.flags = CLOCK_SOURCE_IS_CONTINUOUS;
  223. clocksource_register(&clocksource_sh);
  224. tmu0_clockevent.mult = div_sc(frequency, NSEC_PER_SEC,
  225. tmu0_clockevent.shift);
  226. tmu0_clockevent.max_delta_ns =
  227. clockevent_delta2ns(-1, &tmu0_clockevent);
  228. tmu0_clockevent.min_delta_ns =
  229. clockevent_delta2ns(1, &tmu0_clockevent);
  230. tmu0_clockevent.cpumask = cpumask_of(0);
  231. tmu0_clockevent.rating = 100;
  232. clockevents_register_device(&tmu0_clockevent);
  233. return 0;
  234. }
  235. static struct sys_timer_ops tmu_timer_ops = {
  236. .init = tmu_timer_init,
  237. .start = tmu_timer_start,
  238. .stop = tmu_timer_stop,
  239. };
  240. struct sys_timer tmu_timer = {
  241. .name = "tmu",
  242. .ops = &tmu_timer_ops,
  243. };