mct.c 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421
  1. /* linux/arch/arm/mach-exynos4/mct.c
  2. *
  3. * Copyright (c) 2011 Samsung Electronics Co., Ltd.
  4. * http://www.samsung.com
  5. *
  6. * EXYNOS4 MCT(Multi-Core Timer) support
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License version 2 as
  10. * published by the Free Software Foundation.
  11. */
  12. #include <linux/sched.h>
  13. #include <linux/interrupt.h>
  14. #include <linux/irq.h>
  15. #include <linux/err.h>
  16. #include <linux/clk.h>
  17. #include <linux/clockchips.h>
  18. #include <linux/platform_device.h>
  19. #include <linux/delay.h>
  20. #include <linux/percpu.h>
  21. #include <mach/map.h>
  22. #include <mach/regs-mct.h>
  23. #include <asm/mach/time.h>
  24. static unsigned long clk_cnt_per_tick;
  25. static unsigned long clk_rate;
  26. struct mct_clock_event_device {
  27. struct clock_event_device *evt;
  28. void __iomem *base;
  29. };
  30. struct mct_clock_event_device mct_tick[2];
  31. static void exynos4_mct_write(unsigned int value, void *addr)
  32. {
  33. void __iomem *stat_addr;
  34. u32 mask;
  35. u32 i;
  36. __raw_writel(value, addr);
  37. switch ((u32) addr) {
  38. case (u32) EXYNOS4_MCT_G_TCON:
  39. stat_addr = EXYNOS4_MCT_G_WSTAT;
  40. mask = 1 << 16; /* G_TCON write status */
  41. break;
  42. case (u32) EXYNOS4_MCT_G_COMP0_L:
  43. stat_addr = EXYNOS4_MCT_G_WSTAT;
  44. mask = 1 << 0; /* G_COMP0_L write status */
  45. break;
  46. case (u32) EXYNOS4_MCT_G_COMP0_U:
  47. stat_addr = EXYNOS4_MCT_G_WSTAT;
  48. mask = 1 << 1; /* G_COMP0_U write status */
  49. break;
  50. case (u32) EXYNOS4_MCT_G_COMP0_ADD_INCR:
  51. stat_addr = EXYNOS4_MCT_G_WSTAT;
  52. mask = 1 << 2; /* G_COMP0_ADD_INCR write status */
  53. break;
  54. case (u32) EXYNOS4_MCT_G_CNT_L:
  55. stat_addr = EXYNOS4_MCT_G_CNT_WSTAT;
  56. mask = 1 << 0; /* G_CNT_L write status */
  57. break;
  58. case (u32) EXYNOS4_MCT_G_CNT_U:
  59. stat_addr = EXYNOS4_MCT_G_CNT_WSTAT;
  60. mask = 1 << 1; /* G_CNT_U write status */
  61. break;
  62. case (u32)(EXYNOS4_MCT_L0_BASE + MCT_L_TCON_OFFSET):
  63. stat_addr = EXYNOS4_MCT_L0_BASE + MCT_L_WSTAT_OFFSET;
  64. mask = 1 << 3; /* L0_TCON write status */
  65. break;
  66. case (u32)(EXYNOS4_MCT_L1_BASE + MCT_L_TCON_OFFSET):
  67. stat_addr = EXYNOS4_MCT_L1_BASE + MCT_L_WSTAT_OFFSET;
  68. mask = 1 << 3; /* L1_TCON write status */
  69. break;
  70. case (u32)(EXYNOS4_MCT_L0_BASE + MCT_L_TCNTB_OFFSET):
  71. stat_addr = EXYNOS4_MCT_L0_BASE + MCT_L_WSTAT_OFFSET;
  72. mask = 1 << 0; /* L0_TCNTB write status */
  73. break;
  74. case (u32)(EXYNOS4_MCT_L1_BASE + MCT_L_TCNTB_OFFSET):
  75. stat_addr = EXYNOS4_MCT_L1_BASE + MCT_L_WSTAT_OFFSET;
  76. mask = 1 << 0; /* L1_TCNTB write status */
  77. break;
  78. case (u32)(EXYNOS4_MCT_L0_BASE + MCT_L_ICNTB_OFFSET):
  79. stat_addr = EXYNOS4_MCT_L0_BASE + MCT_L_WSTAT_OFFSET;
  80. mask = 1 << 1; /* L0_ICNTB write status */
  81. break;
  82. case (u32)(EXYNOS4_MCT_L1_BASE + MCT_L_ICNTB_OFFSET):
  83. stat_addr = EXYNOS4_MCT_L1_BASE + MCT_L_WSTAT_OFFSET;
  84. mask = 1 << 1; /* L1_ICNTB write status */
  85. break;
  86. default:
  87. return;
  88. }
  89. /* Wait maximum 1 ms until written values are applied */
  90. for (i = 0; i < loops_per_jiffy / 1000 * HZ; i++)
  91. if (__raw_readl(stat_addr) & mask) {
  92. __raw_writel(mask, stat_addr);
  93. return;
  94. }
  95. panic("MCT hangs after writing %d (addr:0x%08x)\n", value, (u32)addr);
  96. }
  97. /* Clocksource handling */
  98. static void exynos4_mct_frc_start(u32 hi, u32 lo)
  99. {
  100. u32 reg;
  101. exynos4_mct_write(lo, EXYNOS4_MCT_G_CNT_L);
  102. exynos4_mct_write(hi, EXYNOS4_MCT_G_CNT_U);
  103. reg = __raw_readl(EXYNOS4_MCT_G_TCON);
  104. reg |= MCT_G_TCON_START;
  105. exynos4_mct_write(reg, EXYNOS4_MCT_G_TCON);
  106. }
  107. static cycle_t exynos4_frc_read(struct clocksource *cs)
  108. {
  109. unsigned int lo, hi;
  110. u32 hi2 = __raw_readl(EXYNOS4_MCT_G_CNT_U);
  111. do {
  112. hi = hi2;
  113. lo = __raw_readl(EXYNOS4_MCT_G_CNT_L);
  114. hi2 = __raw_readl(EXYNOS4_MCT_G_CNT_U);
  115. } while (hi != hi2);
  116. return ((cycle_t)hi << 32) | lo;
  117. }
  118. struct clocksource mct_frc = {
  119. .name = "mct-frc",
  120. .rating = 400,
  121. .read = exynos4_frc_read,
  122. .mask = CLOCKSOURCE_MASK(64),
  123. .flags = CLOCK_SOURCE_IS_CONTINUOUS,
  124. };
  125. static void __init exynos4_clocksource_init(void)
  126. {
  127. exynos4_mct_frc_start(0, 0);
  128. if (clocksource_register_hz(&mct_frc, clk_rate))
  129. panic("%s: can't register clocksource\n", mct_frc.name);
  130. }
  131. static void exynos4_mct_comp0_stop(void)
  132. {
  133. unsigned int tcon;
  134. tcon = __raw_readl(EXYNOS4_MCT_G_TCON);
  135. tcon &= ~(MCT_G_TCON_COMP0_ENABLE | MCT_G_TCON_COMP0_AUTO_INC);
  136. exynos4_mct_write(tcon, EXYNOS4_MCT_G_TCON);
  137. exynos4_mct_write(0, EXYNOS4_MCT_G_INT_ENB);
  138. }
  139. static void exynos4_mct_comp0_start(enum clock_event_mode mode,
  140. unsigned long cycles)
  141. {
  142. unsigned int tcon;
  143. cycle_t comp_cycle;
  144. tcon = __raw_readl(EXYNOS4_MCT_G_TCON);
  145. if (mode == CLOCK_EVT_MODE_PERIODIC) {
  146. tcon |= MCT_G_TCON_COMP0_AUTO_INC;
  147. exynos4_mct_write(cycles, EXYNOS4_MCT_G_COMP0_ADD_INCR);
  148. }
  149. comp_cycle = exynos4_frc_read(&mct_frc) + cycles;
  150. exynos4_mct_write((u32)comp_cycle, EXYNOS4_MCT_G_COMP0_L);
  151. exynos4_mct_write((u32)(comp_cycle >> 32), EXYNOS4_MCT_G_COMP0_U);
  152. exynos4_mct_write(0x1, EXYNOS4_MCT_G_INT_ENB);
  153. tcon |= MCT_G_TCON_COMP0_ENABLE;
  154. exynos4_mct_write(tcon , EXYNOS4_MCT_G_TCON);
  155. }
  156. static int exynos4_comp_set_next_event(unsigned long cycles,
  157. struct clock_event_device *evt)
  158. {
  159. exynos4_mct_comp0_start(evt->mode, cycles);
  160. return 0;
  161. }
  162. static void exynos4_comp_set_mode(enum clock_event_mode mode,
  163. struct clock_event_device *evt)
  164. {
  165. exynos4_mct_comp0_stop();
  166. switch (mode) {
  167. case CLOCK_EVT_MODE_PERIODIC:
  168. exynos4_mct_comp0_start(mode, clk_cnt_per_tick);
  169. break;
  170. case CLOCK_EVT_MODE_ONESHOT:
  171. case CLOCK_EVT_MODE_UNUSED:
  172. case CLOCK_EVT_MODE_SHUTDOWN:
  173. case CLOCK_EVT_MODE_RESUME:
  174. break;
  175. }
  176. }
  177. static struct clock_event_device mct_comp_device = {
  178. .name = "mct-comp",
  179. .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
  180. .rating = 250,
  181. .set_next_event = exynos4_comp_set_next_event,
  182. .set_mode = exynos4_comp_set_mode,
  183. };
  184. static irqreturn_t exynos4_mct_comp_isr(int irq, void *dev_id)
  185. {
  186. struct clock_event_device *evt = dev_id;
  187. exynos4_mct_write(0x1, EXYNOS4_MCT_G_INT_CSTAT);
  188. evt->event_handler(evt);
  189. return IRQ_HANDLED;
  190. }
  191. static struct irqaction mct_comp_event_irq = {
  192. .name = "mct_comp_irq",
  193. .flags = IRQF_TIMER | IRQF_IRQPOLL,
  194. .handler = exynos4_mct_comp_isr,
  195. .dev_id = &mct_comp_device,
  196. };
  197. static void exynos4_clockevent_init(void)
  198. {
  199. clk_cnt_per_tick = clk_rate / 2 / HZ;
  200. clockevents_calc_mult_shift(&mct_comp_device, clk_rate / 2, 5);
  201. mct_comp_device.max_delta_ns =
  202. clockevent_delta2ns(0xffffffff, &mct_comp_device);
  203. mct_comp_device.min_delta_ns =
  204. clockevent_delta2ns(0xf, &mct_comp_device);
  205. mct_comp_device.cpumask = cpumask_of(0);
  206. clockevents_register_device(&mct_comp_device);
  207. setup_irq(IRQ_MCT_G0, &mct_comp_event_irq);
  208. }
  209. #ifdef CONFIG_LOCAL_TIMERS
  210. /* Clock event handling */
  211. static void exynos4_mct_tick_stop(struct mct_clock_event_device *mevt)
  212. {
  213. unsigned long tmp;
  214. unsigned long mask = MCT_L_TCON_INT_START | MCT_L_TCON_TIMER_START;
  215. void __iomem *addr = mevt->base + MCT_L_TCON_OFFSET;
  216. tmp = __raw_readl(addr);
  217. if (tmp & mask) {
  218. tmp &= ~mask;
  219. exynos4_mct_write(tmp, addr);
  220. }
  221. }
  222. static void exynos4_mct_tick_start(unsigned long cycles,
  223. struct mct_clock_event_device *mevt)
  224. {
  225. unsigned long tmp;
  226. exynos4_mct_tick_stop(mevt);
  227. tmp = (1 << 31) | cycles; /* MCT_L_UPDATE_ICNTB */
  228. /* update interrupt count buffer */
  229. exynos4_mct_write(tmp, mevt->base + MCT_L_ICNTB_OFFSET);
  230. /* enable MCT tick interupt */
  231. exynos4_mct_write(0x1, mevt->base + MCT_L_INT_ENB_OFFSET);
  232. tmp = __raw_readl(mevt->base + MCT_L_TCON_OFFSET);
  233. tmp |= MCT_L_TCON_INT_START | MCT_L_TCON_TIMER_START |
  234. MCT_L_TCON_INTERVAL_MODE;
  235. exynos4_mct_write(tmp, mevt->base + MCT_L_TCON_OFFSET);
  236. }
  237. static int exynos4_tick_set_next_event(unsigned long cycles,
  238. struct clock_event_device *evt)
  239. {
  240. struct mct_clock_event_device *mevt = &mct_tick[smp_processor_id()];
  241. exynos4_mct_tick_start(cycles, mevt);
  242. return 0;
  243. }
  244. static inline void exynos4_tick_set_mode(enum clock_event_mode mode,
  245. struct clock_event_device *evt)
  246. {
  247. struct mct_clock_event_device *mevt = &mct_tick[smp_processor_id()];
  248. exynos4_mct_tick_stop(mevt);
  249. switch (mode) {
  250. case CLOCK_EVT_MODE_PERIODIC:
  251. exynos4_mct_tick_start(clk_cnt_per_tick, mevt);
  252. break;
  253. case CLOCK_EVT_MODE_ONESHOT:
  254. case CLOCK_EVT_MODE_UNUSED:
  255. case CLOCK_EVT_MODE_SHUTDOWN:
  256. case CLOCK_EVT_MODE_RESUME:
  257. break;
  258. }
  259. }
  260. static irqreturn_t exynos4_mct_tick_isr(int irq, void *dev_id)
  261. {
  262. struct mct_clock_event_device *mevt = dev_id;
  263. struct clock_event_device *evt = mevt->evt;
  264. /*
  265. * This is for supporting oneshot mode.
  266. * Mct would generate interrupt periodically
  267. * without explicit stopping.
  268. */
  269. if (evt->mode != CLOCK_EVT_MODE_PERIODIC)
  270. exynos4_mct_tick_stop(mevt);
  271. /* Clear the MCT tick interrupt */
  272. exynos4_mct_write(0x1, mevt->base + MCT_L_INT_CSTAT_OFFSET);
  273. evt->event_handler(evt);
  274. return IRQ_HANDLED;
  275. }
  276. static struct irqaction mct_tick0_event_irq = {
  277. .name = "mct_tick0_irq",
  278. .flags = IRQF_TIMER | IRQF_NOBALANCING,
  279. .handler = exynos4_mct_tick_isr,
  280. };
  281. static struct irqaction mct_tick1_event_irq = {
  282. .name = "mct_tick1_irq",
  283. .flags = IRQF_TIMER | IRQF_NOBALANCING,
  284. .handler = exynos4_mct_tick_isr,
  285. };
  286. static void exynos4_mct_tick_init(struct clock_event_device *evt)
  287. {
  288. unsigned int cpu = smp_processor_id();
  289. mct_tick[cpu].evt = evt;
  290. if (cpu == 0) {
  291. mct_tick[cpu].base = EXYNOS4_MCT_L0_BASE;
  292. evt->name = "mct_tick0";
  293. } else {
  294. mct_tick[cpu].base = EXYNOS4_MCT_L1_BASE;
  295. evt->name = "mct_tick1";
  296. }
  297. evt->cpumask = cpumask_of(cpu);
  298. evt->set_next_event = exynos4_tick_set_next_event;
  299. evt->set_mode = exynos4_tick_set_mode;
  300. evt->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
  301. evt->rating = 450;
  302. clockevents_calc_mult_shift(evt, clk_rate / 2, 5);
  303. evt->max_delta_ns =
  304. clockevent_delta2ns(0x7fffffff, evt);
  305. evt->min_delta_ns =
  306. clockevent_delta2ns(0xf, evt);
  307. clockevents_register_device(evt);
  308. exynos4_mct_write(0x1, mct_tick[cpu].base + MCT_L_TCNTB_OFFSET);
  309. if (cpu == 0) {
  310. mct_tick0_event_irq.dev_id = &mct_tick[cpu];
  311. setup_irq(IRQ_MCT_L0, &mct_tick0_event_irq);
  312. } else {
  313. mct_tick1_event_irq.dev_id = &mct_tick[cpu];
  314. irq_set_affinity(IRQ_MCT1, cpumask_of(1));
  315. setup_irq(IRQ_MCT_L1, &mct_tick1_event_irq);
  316. }
  317. }
  318. /* Setup the local clock events for a CPU */
  319. void __cpuinit local_timer_setup(struct clock_event_device *evt)
  320. {
  321. exynos4_mct_tick_init(evt);
  322. }
  323. int local_timer_ack(void)
  324. {
  325. return 0;
  326. }
  327. #endif /* CONFIG_LOCAL_TIMERS */
  328. static void __init exynos4_timer_resources(void)
  329. {
  330. struct clk *mct_clk;
  331. mct_clk = clk_get(NULL, "xtal");
  332. clk_rate = clk_get_rate(mct_clk);
  333. }
  334. static void __init exynos4_timer_init(void)
  335. {
  336. exynos4_timer_resources();
  337. exynos4_clocksource_init();
  338. exynos4_clockevent_init();
  339. }
  340. struct sys_timer exynos4_timer = {
  341. .init = exynos4_timer_init,
  342. };