mct.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478
  1. /* linux/arch/arm/mach-exynos4/mct.c
  2. *
  3. * Copyright (c) 2011 Samsung Electronics Co., Ltd.
  4. * http://www.samsung.com
  5. *
  6. * EXYNOS4 MCT(Multi-Core Timer) support
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License version 2 as
  10. * published by the Free Software Foundation.
  11. */
  12. #include <linux/sched.h>
  13. #include <linux/interrupt.h>
  14. #include <linux/irq.h>
  15. #include <linux/err.h>
  16. #include <linux/clk.h>
  17. #include <linux/clockchips.h>
  18. #include <linux/platform_device.h>
  19. #include <linux/delay.h>
  20. #include <linux/percpu.h>
  21. #include <asm/hardware/gic.h>
  22. #include <plat/cpu.h>
  23. #include <mach/map.h>
  24. #include <mach/irqs.h>
  25. #include <mach/regs-mct.h>
  26. #include <asm/mach/time.h>
  27. enum {
  28. MCT_INT_SPI,
  29. MCT_INT_PPI
  30. };
  31. static unsigned long clk_cnt_per_tick;
  32. static unsigned long clk_rate;
  33. static unsigned int mct_int_type;
  34. struct mct_clock_event_device {
  35. struct clock_event_device *evt;
  36. void __iomem *base;
  37. char name[10];
  38. };
  39. static void exynos4_mct_write(unsigned int value, void *addr)
  40. {
  41. void __iomem *stat_addr;
  42. u32 mask;
  43. u32 i;
  44. __raw_writel(value, addr);
  45. if (likely(addr >= EXYNOS4_MCT_L_BASE(0))) {
  46. u32 base = (u32) addr & EXYNOS4_MCT_L_MASK;
  47. switch ((u32) addr & ~EXYNOS4_MCT_L_MASK) {
  48. case (u32) MCT_L_TCON_OFFSET:
  49. stat_addr = (void __iomem *) base + MCT_L_WSTAT_OFFSET;
  50. mask = 1 << 3; /* L_TCON write status */
  51. break;
  52. case (u32) MCT_L_ICNTB_OFFSET:
  53. stat_addr = (void __iomem *) base + MCT_L_WSTAT_OFFSET;
  54. mask = 1 << 1; /* L_ICNTB write status */
  55. break;
  56. case (u32) MCT_L_TCNTB_OFFSET:
  57. stat_addr = (void __iomem *) base + MCT_L_WSTAT_OFFSET;
  58. mask = 1 << 0; /* L_TCNTB write status */
  59. break;
  60. default:
  61. return;
  62. }
  63. } else {
  64. switch ((u32) addr) {
  65. case (u32) EXYNOS4_MCT_G_TCON:
  66. stat_addr = EXYNOS4_MCT_G_WSTAT;
  67. mask = 1 << 16; /* G_TCON write status */
  68. break;
  69. case (u32) EXYNOS4_MCT_G_COMP0_L:
  70. stat_addr = EXYNOS4_MCT_G_WSTAT;
  71. mask = 1 << 0; /* G_COMP0_L write status */
  72. break;
  73. case (u32) EXYNOS4_MCT_G_COMP0_U:
  74. stat_addr = EXYNOS4_MCT_G_WSTAT;
  75. mask = 1 << 1; /* G_COMP0_U write status */
  76. break;
  77. case (u32) EXYNOS4_MCT_G_COMP0_ADD_INCR:
  78. stat_addr = EXYNOS4_MCT_G_WSTAT;
  79. mask = 1 << 2; /* G_COMP0_ADD_INCR w status */
  80. break;
  81. case (u32) EXYNOS4_MCT_G_CNT_L:
  82. stat_addr = EXYNOS4_MCT_G_CNT_WSTAT;
  83. mask = 1 << 0; /* G_CNT_L write status */
  84. break;
  85. case (u32) EXYNOS4_MCT_G_CNT_U:
  86. stat_addr = EXYNOS4_MCT_G_CNT_WSTAT;
  87. mask = 1 << 1; /* G_CNT_U write status */
  88. break;
  89. default:
  90. return;
  91. }
  92. }
  93. /* Wait maximum 1 ms until written values are applied */
  94. for (i = 0; i < loops_per_jiffy / 1000 * HZ; i++)
  95. if (__raw_readl(stat_addr) & mask) {
  96. __raw_writel(mask, stat_addr);
  97. return;
  98. }
  99. panic("MCT hangs after writing %d (addr:0x%08x)\n", value, (u32)addr);
  100. }
  101. /* Clocksource handling */
  102. static void exynos4_mct_frc_start(u32 hi, u32 lo)
  103. {
  104. u32 reg;
  105. exynos4_mct_write(lo, EXYNOS4_MCT_G_CNT_L);
  106. exynos4_mct_write(hi, EXYNOS4_MCT_G_CNT_U);
  107. reg = __raw_readl(EXYNOS4_MCT_G_TCON);
  108. reg |= MCT_G_TCON_START;
  109. exynos4_mct_write(reg, EXYNOS4_MCT_G_TCON);
  110. }
  111. static cycle_t exynos4_frc_read(struct clocksource *cs)
  112. {
  113. unsigned int lo, hi;
  114. u32 hi2 = __raw_readl(EXYNOS4_MCT_G_CNT_U);
  115. do {
  116. hi = hi2;
  117. lo = __raw_readl(EXYNOS4_MCT_G_CNT_L);
  118. hi2 = __raw_readl(EXYNOS4_MCT_G_CNT_U);
  119. } while (hi != hi2);
  120. return ((cycle_t)hi << 32) | lo;
  121. }
  122. static void exynos4_frc_resume(struct clocksource *cs)
  123. {
  124. exynos4_mct_frc_start(0, 0);
  125. }
  126. struct clocksource mct_frc = {
  127. .name = "mct-frc",
  128. .rating = 400,
  129. .read = exynos4_frc_read,
  130. .mask = CLOCKSOURCE_MASK(64),
  131. .flags = CLOCK_SOURCE_IS_CONTINUOUS,
  132. .resume = exynos4_frc_resume,
  133. };
  134. static void __init exynos4_clocksource_init(void)
  135. {
  136. exynos4_mct_frc_start(0, 0);
  137. if (clocksource_register_hz(&mct_frc, clk_rate))
  138. panic("%s: can't register clocksource\n", mct_frc.name);
  139. }
  140. static void exynos4_mct_comp0_stop(void)
  141. {
  142. unsigned int tcon;
  143. tcon = __raw_readl(EXYNOS4_MCT_G_TCON);
  144. tcon &= ~(MCT_G_TCON_COMP0_ENABLE | MCT_G_TCON_COMP0_AUTO_INC);
  145. exynos4_mct_write(tcon, EXYNOS4_MCT_G_TCON);
  146. exynos4_mct_write(0, EXYNOS4_MCT_G_INT_ENB);
  147. }
  148. static void exynos4_mct_comp0_start(enum clock_event_mode mode,
  149. unsigned long cycles)
  150. {
  151. unsigned int tcon;
  152. cycle_t comp_cycle;
  153. tcon = __raw_readl(EXYNOS4_MCT_G_TCON);
  154. if (mode == CLOCK_EVT_MODE_PERIODIC) {
  155. tcon |= MCT_G_TCON_COMP0_AUTO_INC;
  156. exynos4_mct_write(cycles, EXYNOS4_MCT_G_COMP0_ADD_INCR);
  157. }
  158. comp_cycle = exynos4_frc_read(&mct_frc) + cycles;
  159. exynos4_mct_write((u32)comp_cycle, EXYNOS4_MCT_G_COMP0_L);
  160. exynos4_mct_write((u32)(comp_cycle >> 32), EXYNOS4_MCT_G_COMP0_U);
  161. exynos4_mct_write(0x1, EXYNOS4_MCT_G_INT_ENB);
  162. tcon |= MCT_G_TCON_COMP0_ENABLE;
  163. exynos4_mct_write(tcon , EXYNOS4_MCT_G_TCON);
  164. }
  165. static int exynos4_comp_set_next_event(unsigned long cycles,
  166. struct clock_event_device *evt)
  167. {
  168. exynos4_mct_comp0_start(evt->mode, cycles);
  169. return 0;
  170. }
  171. static void exynos4_comp_set_mode(enum clock_event_mode mode,
  172. struct clock_event_device *evt)
  173. {
  174. exynos4_mct_comp0_stop();
  175. switch (mode) {
  176. case CLOCK_EVT_MODE_PERIODIC:
  177. exynos4_mct_comp0_start(mode, clk_cnt_per_tick);
  178. break;
  179. case CLOCK_EVT_MODE_ONESHOT:
  180. case CLOCK_EVT_MODE_UNUSED:
  181. case CLOCK_EVT_MODE_SHUTDOWN:
  182. case CLOCK_EVT_MODE_RESUME:
  183. break;
  184. }
  185. }
  186. static struct clock_event_device mct_comp_device = {
  187. .name = "mct-comp",
  188. .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
  189. .rating = 250,
  190. .set_next_event = exynos4_comp_set_next_event,
  191. .set_mode = exynos4_comp_set_mode,
  192. };
  193. static irqreturn_t exynos4_mct_comp_isr(int irq, void *dev_id)
  194. {
  195. struct clock_event_device *evt = dev_id;
  196. exynos4_mct_write(0x1, EXYNOS4_MCT_G_INT_CSTAT);
  197. evt->event_handler(evt);
  198. return IRQ_HANDLED;
  199. }
  200. static struct irqaction mct_comp_event_irq = {
  201. .name = "mct_comp_irq",
  202. .flags = IRQF_TIMER | IRQF_IRQPOLL,
  203. .handler = exynos4_mct_comp_isr,
  204. .dev_id = &mct_comp_device,
  205. };
  206. static void exynos4_clockevent_init(void)
  207. {
  208. clk_cnt_per_tick = clk_rate / 2 / HZ;
  209. clockevents_calc_mult_shift(&mct_comp_device, clk_rate / 2, 5);
  210. mct_comp_device.max_delta_ns =
  211. clockevent_delta2ns(0xffffffff, &mct_comp_device);
  212. mct_comp_device.min_delta_ns =
  213. clockevent_delta2ns(0xf, &mct_comp_device);
  214. mct_comp_device.cpumask = cpumask_of(0);
  215. clockevents_register_device(&mct_comp_device);
  216. setup_irq(IRQ_MCT_G0, &mct_comp_event_irq);
  217. }
  218. #ifdef CONFIG_LOCAL_TIMERS
  219. static DEFINE_PER_CPU(struct mct_clock_event_device, percpu_mct_tick);
  220. /* Clock event handling */
  221. static void exynos4_mct_tick_stop(struct mct_clock_event_device *mevt)
  222. {
  223. unsigned long tmp;
  224. unsigned long mask = MCT_L_TCON_INT_START | MCT_L_TCON_TIMER_START;
  225. void __iomem *addr = mevt->base + MCT_L_TCON_OFFSET;
  226. tmp = __raw_readl(addr);
  227. if (tmp & mask) {
  228. tmp &= ~mask;
  229. exynos4_mct_write(tmp, addr);
  230. }
  231. }
  232. static void exynos4_mct_tick_start(unsigned long cycles,
  233. struct mct_clock_event_device *mevt)
  234. {
  235. unsigned long tmp;
  236. exynos4_mct_tick_stop(mevt);
  237. tmp = (1 << 31) | cycles; /* MCT_L_UPDATE_ICNTB */
  238. /* update interrupt count buffer */
  239. exynos4_mct_write(tmp, mevt->base + MCT_L_ICNTB_OFFSET);
  240. /* enable MCT tick interrupt */
  241. exynos4_mct_write(0x1, mevt->base + MCT_L_INT_ENB_OFFSET);
  242. tmp = __raw_readl(mevt->base + MCT_L_TCON_OFFSET);
  243. tmp |= MCT_L_TCON_INT_START | MCT_L_TCON_TIMER_START |
  244. MCT_L_TCON_INTERVAL_MODE;
  245. exynos4_mct_write(tmp, mevt->base + MCT_L_TCON_OFFSET);
  246. }
  247. static int exynos4_tick_set_next_event(unsigned long cycles,
  248. struct clock_event_device *evt)
  249. {
  250. struct mct_clock_event_device *mevt = this_cpu_ptr(&percpu_mct_tick);
  251. exynos4_mct_tick_start(cycles, mevt);
  252. return 0;
  253. }
  254. static inline void exynos4_tick_set_mode(enum clock_event_mode mode,
  255. struct clock_event_device *evt)
  256. {
  257. struct mct_clock_event_device *mevt = this_cpu_ptr(&percpu_mct_tick);
  258. exynos4_mct_tick_stop(mevt);
  259. switch (mode) {
  260. case CLOCK_EVT_MODE_PERIODIC:
  261. exynos4_mct_tick_start(clk_cnt_per_tick, mevt);
  262. break;
  263. case CLOCK_EVT_MODE_ONESHOT:
  264. case CLOCK_EVT_MODE_UNUSED:
  265. case CLOCK_EVT_MODE_SHUTDOWN:
  266. case CLOCK_EVT_MODE_RESUME:
  267. break;
  268. }
  269. }
  270. static int exynos4_mct_tick_clear(struct mct_clock_event_device *mevt)
  271. {
  272. struct clock_event_device *evt = mevt->evt;
  273. /*
  274. * This is for supporting oneshot mode.
  275. * Mct would generate interrupt periodically
  276. * without explicit stopping.
  277. */
  278. if (evt->mode != CLOCK_EVT_MODE_PERIODIC)
  279. exynos4_mct_tick_stop(mevt);
  280. /* Clear the MCT tick interrupt */
  281. if (__raw_readl(mevt->base + MCT_L_INT_CSTAT_OFFSET) & 1) {
  282. exynos4_mct_write(0x1, mevt->base + MCT_L_INT_CSTAT_OFFSET);
  283. return 1;
  284. } else {
  285. return 0;
  286. }
  287. }
  288. static irqreturn_t exynos4_mct_tick_isr(int irq, void *dev_id)
  289. {
  290. struct mct_clock_event_device *mevt = dev_id;
  291. struct clock_event_device *evt = mevt->evt;
  292. exynos4_mct_tick_clear(mevt);
  293. evt->event_handler(evt);
  294. return IRQ_HANDLED;
  295. }
  296. static struct irqaction mct_tick0_event_irq = {
  297. .name = "mct_tick0_irq",
  298. .flags = IRQF_TIMER | IRQF_NOBALANCING,
  299. .handler = exynos4_mct_tick_isr,
  300. };
  301. static struct irqaction mct_tick1_event_irq = {
  302. .name = "mct_tick1_irq",
  303. .flags = IRQF_TIMER | IRQF_NOBALANCING,
  304. .handler = exynos4_mct_tick_isr,
  305. };
  306. static void exynos4_mct_tick_init(struct clock_event_device *evt)
  307. {
  308. struct mct_clock_event_device *mevt;
  309. unsigned int cpu = smp_processor_id();
  310. mevt = this_cpu_ptr(&percpu_mct_tick);
  311. mevt->evt = evt;
  312. mevt->base = EXYNOS4_MCT_L_BASE(cpu);
  313. sprintf(mevt->name, "mct_tick%d", cpu);
  314. evt->name = mevt->name;
  315. evt->cpumask = cpumask_of(cpu);
  316. evt->set_next_event = exynos4_tick_set_next_event;
  317. evt->set_mode = exynos4_tick_set_mode;
  318. evt->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
  319. evt->rating = 450;
  320. clockevents_calc_mult_shift(evt, clk_rate / 2, 5);
  321. evt->max_delta_ns =
  322. clockevent_delta2ns(0x7fffffff, evt);
  323. evt->min_delta_ns =
  324. clockevent_delta2ns(0xf, evt);
  325. clockevents_register_device(evt);
  326. exynos4_mct_write(0x1, mevt->base + MCT_L_TCNTB_OFFSET);
  327. if (mct_int_type == MCT_INT_SPI) {
  328. if (cpu == 0) {
  329. mct_tick0_event_irq.dev_id = mevt;
  330. evt->irq = IRQ_MCT_L0;
  331. setup_irq(IRQ_MCT_L0, &mct_tick0_event_irq);
  332. } else {
  333. mct_tick1_event_irq.dev_id = mevt;
  334. evt->irq = IRQ_MCT_L1;
  335. setup_irq(IRQ_MCT_L1, &mct_tick1_event_irq);
  336. irq_set_affinity(IRQ_MCT_L1, cpumask_of(1));
  337. }
  338. } else {
  339. enable_percpu_irq(IRQ_MCT_LOCALTIMER, 0);
  340. }
  341. }
  342. /* Setup the local clock events for a CPU */
  343. int __cpuinit local_timer_setup(struct clock_event_device *evt)
  344. {
  345. exynos4_mct_tick_init(evt);
  346. return 0;
  347. }
  348. void local_timer_stop(struct clock_event_device *evt)
  349. {
  350. unsigned int cpu = smp_processor_id();
  351. evt->set_mode(CLOCK_EVT_MODE_UNUSED, evt);
  352. if (mct_int_type == MCT_INT_SPI)
  353. if (cpu == 0)
  354. remove_irq(evt->irq, &mct_tick0_event_irq);
  355. else
  356. remove_irq(evt->irq, &mct_tick1_event_irq);
  357. else
  358. disable_percpu_irq(IRQ_MCT_LOCALTIMER);
  359. }
  360. #endif /* CONFIG_LOCAL_TIMERS */
  361. static void __init exynos4_timer_resources(void)
  362. {
  363. struct clk *mct_clk;
  364. mct_clk = clk_get(NULL, "xtal");
  365. clk_rate = clk_get_rate(mct_clk);
  366. #ifdef CONFIG_LOCAL_TIMERS
  367. if (mct_int_type == MCT_INT_PPI) {
  368. int err;
  369. err = request_percpu_irq(IRQ_MCT_LOCALTIMER,
  370. exynos4_mct_tick_isr, "MCT",
  371. &percpu_mct_tick);
  372. WARN(err, "MCT: can't request IRQ %d (%d)\n",
  373. IRQ_MCT_LOCALTIMER, err);
  374. }
  375. #endif /* CONFIG_LOCAL_TIMERS */
  376. }
  377. static void __init exynos4_timer_init(void)
  378. {
  379. if (soc_is_exynos4210())
  380. mct_int_type = MCT_INT_SPI;
  381. else
  382. mct_int_type = MCT_INT_PPI;
  383. exynos4_timer_resources();
  384. exynos4_clocksource_init();
  385. exynos4_clockevent_init();
  386. }
  387. struct sys_timer exynos4_timer = {
  388. .init = exynos4_timer_init,
  389. };