mct.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500
  1. /* linux/arch/arm/mach-exynos4/mct.c
  2. *
  3. * Copyright (c) 2011 Samsung Electronics Co., Ltd.
  4. * http://www.samsung.com
  5. *
  6. * EXYNOS4 MCT(Multi-Core Timer) support
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License version 2 as
  10. * published by the Free Software Foundation.
  11. */
  12. #include <linux/sched.h>
  13. #include <linux/interrupt.h>
  14. #include <linux/irq.h>
  15. #include <linux/err.h>
  16. #include <linux/clk.h>
  17. #include <linux/clockchips.h>
  18. #include <linux/platform_device.h>
  19. #include <linux/delay.h>
  20. #include <linux/percpu.h>
  21. #include <linux/of.h>
  22. #include <asm/arch_timer.h>
  23. #include <asm/hardware/gic.h>
  24. #include <asm/localtimer.h>
  25. #include <plat/cpu.h>
  26. #include <mach/map.h>
  27. #include <mach/irqs.h>
  28. #include <mach/regs-mct.h>
  29. #include <asm/mach/time.h>
  30. #define TICK_BASE_CNT 1
  31. enum {
  32. MCT_INT_SPI,
  33. MCT_INT_PPI
  34. };
  35. static unsigned long clk_rate;
  36. static unsigned int mct_int_type;
  37. struct mct_clock_event_device {
  38. struct clock_event_device *evt;
  39. void __iomem *base;
  40. char name[10];
  41. };
  42. static void exynos4_mct_write(unsigned int value, void *addr)
  43. {
  44. void __iomem *stat_addr;
  45. u32 mask;
  46. u32 i;
  47. __raw_writel(value, addr);
  48. if (likely(addr >= EXYNOS4_MCT_L_BASE(0))) {
  49. u32 base = (u32) addr & EXYNOS4_MCT_L_MASK;
  50. switch ((u32) addr & ~EXYNOS4_MCT_L_MASK) {
  51. case (u32) MCT_L_TCON_OFFSET:
  52. stat_addr = (void __iomem *) base + MCT_L_WSTAT_OFFSET;
  53. mask = 1 << 3; /* L_TCON write status */
  54. break;
  55. case (u32) MCT_L_ICNTB_OFFSET:
  56. stat_addr = (void __iomem *) base + MCT_L_WSTAT_OFFSET;
  57. mask = 1 << 1; /* L_ICNTB write status */
  58. break;
  59. case (u32) MCT_L_TCNTB_OFFSET:
  60. stat_addr = (void __iomem *) base + MCT_L_WSTAT_OFFSET;
  61. mask = 1 << 0; /* L_TCNTB write status */
  62. break;
  63. default:
  64. return;
  65. }
  66. } else {
  67. switch ((u32) addr) {
  68. case (u32) EXYNOS4_MCT_G_TCON:
  69. stat_addr = EXYNOS4_MCT_G_WSTAT;
  70. mask = 1 << 16; /* G_TCON write status */
  71. break;
  72. case (u32) EXYNOS4_MCT_G_COMP0_L:
  73. stat_addr = EXYNOS4_MCT_G_WSTAT;
  74. mask = 1 << 0; /* G_COMP0_L write status */
  75. break;
  76. case (u32) EXYNOS4_MCT_G_COMP0_U:
  77. stat_addr = EXYNOS4_MCT_G_WSTAT;
  78. mask = 1 << 1; /* G_COMP0_U write status */
  79. break;
  80. case (u32) EXYNOS4_MCT_G_COMP0_ADD_INCR:
  81. stat_addr = EXYNOS4_MCT_G_WSTAT;
  82. mask = 1 << 2; /* G_COMP0_ADD_INCR w status */
  83. break;
  84. case (u32) EXYNOS4_MCT_G_CNT_L:
  85. stat_addr = EXYNOS4_MCT_G_CNT_WSTAT;
  86. mask = 1 << 0; /* G_CNT_L write status */
  87. break;
  88. case (u32) EXYNOS4_MCT_G_CNT_U:
  89. stat_addr = EXYNOS4_MCT_G_CNT_WSTAT;
  90. mask = 1 << 1; /* G_CNT_U write status */
  91. break;
  92. default:
  93. return;
  94. }
  95. }
  96. /* Wait maximum 1 ms until written values are applied */
  97. for (i = 0; i < loops_per_jiffy / 1000 * HZ; i++)
  98. if (__raw_readl(stat_addr) & mask) {
  99. __raw_writel(mask, stat_addr);
  100. return;
  101. }
  102. panic("MCT hangs after writing %d (addr:0x%08x)\n", value, (u32)addr);
  103. }
  104. /* Clocksource handling */
  105. static void exynos4_mct_frc_start(u32 hi, u32 lo)
  106. {
  107. u32 reg;
  108. exynos4_mct_write(lo, EXYNOS4_MCT_G_CNT_L);
  109. exynos4_mct_write(hi, EXYNOS4_MCT_G_CNT_U);
  110. reg = __raw_readl(EXYNOS4_MCT_G_TCON);
  111. reg |= MCT_G_TCON_START;
  112. exynos4_mct_write(reg, EXYNOS4_MCT_G_TCON);
  113. }
  114. static cycle_t exynos4_frc_read(struct clocksource *cs)
  115. {
  116. unsigned int lo, hi;
  117. u32 hi2 = __raw_readl(EXYNOS4_MCT_G_CNT_U);
  118. do {
  119. hi = hi2;
  120. lo = __raw_readl(EXYNOS4_MCT_G_CNT_L);
  121. hi2 = __raw_readl(EXYNOS4_MCT_G_CNT_U);
  122. } while (hi != hi2);
  123. return ((cycle_t)hi << 32) | lo;
  124. }
  125. static void exynos4_frc_resume(struct clocksource *cs)
  126. {
  127. exynos4_mct_frc_start(0, 0);
  128. }
  129. struct clocksource mct_frc = {
  130. .name = "mct-frc",
  131. .rating = 400,
  132. .read = exynos4_frc_read,
  133. .mask = CLOCKSOURCE_MASK(64),
  134. .flags = CLOCK_SOURCE_IS_CONTINUOUS,
  135. .resume = exynos4_frc_resume,
  136. };
  137. static void __init exynos4_clocksource_init(void)
  138. {
  139. exynos4_mct_frc_start(0, 0);
  140. if (clocksource_register_hz(&mct_frc, clk_rate))
  141. panic("%s: can't register clocksource\n", mct_frc.name);
  142. }
  143. static void exynos4_mct_comp0_stop(void)
  144. {
  145. unsigned int tcon;
  146. tcon = __raw_readl(EXYNOS4_MCT_G_TCON);
  147. tcon &= ~(MCT_G_TCON_COMP0_ENABLE | MCT_G_TCON_COMP0_AUTO_INC);
  148. exynos4_mct_write(tcon, EXYNOS4_MCT_G_TCON);
  149. exynos4_mct_write(0, EXYNOS4_MCT_G_INT_ENB);
  150. }
  151. static void exynos4_mct_comp0_start(enum clock_event_mode mode,
  152. unsigned long cycles)
  153. {
  154. unsigned int tcon;
  155. cycle_t comp_cycle;
  156. tcon = __raw_readl(EXYNOS4_MCT_G_TCON);
  157. if (mode == CLOCK_EVT_MODE_PERIODIC) {
  158. tcon |= MCT_G_TCON_COMP0_AUTO_INC;
  159. exynos4_mct_write(cycles, EXYNOS4_MCT_G_COMP0_ADD_INCR);
  160. }
  161. comp_cycle = exynos4_frc_read(&mct_frc) + cycles;
  162. exynos4_mct_write((u32)comp_cycle, EXYNOS4_MCT_G_COMP0_L);
  163. exynos4_mct_write((u32)(comp_cycle >> 32), EXYNOS4_MCT_G_COMP0_U);
  164. exynos4_mct_write(0x1, EXYNOS4_MCT_G_INT_ENB);
  165. tcon |= MCT_G_TCON_COMP0_ENABLE;
  166. exynos4_mct_write(tcon , EXYNOS4_MCT_G_TCON);
  167. }
  168. static int exynos4_comp_set_next_event(unsigned long cycles,
  169. struct clock_event_device *evt)
  170. {
  171. exynos4_mct_comp0_start(evt->mode, cycles);
  172. return 0;
  173. }
  174. static void exynos4_comp_set_mode(enum clock_event_mode mode,
  175. struct clock_event_device *evt)
  176. {
  177. unsigned long cycles_per_jiffy;
  178. exynos4_mct_comp0_stop();
  179. switch (mode) {
  180. case CLOCK_EVT_MODE_PERIODIC:
  181. cycles_per_jiffy =
  182. (((unsigned long long) NSEC_PER_SEC / HZ * evt->mult) >> evt->shift);
  183. exynos4_mct_comp0_start(mode, cycles_per_jiffy);
  184. break;
  185. case CLOCK_EVT_MODE_ONESHOT:
  186. case CLOCK_EVT_MODE_UNUSED:
  187. case CLOCK_EVT_MODE_SHUTDOWN:
  188. case CLOCK_EVT_MODE_RESUME:
  189. break;
  190. }
  191. }
  192. static struct clock_event_device mct_comp_device = {
  193. .name = "mct-comp",
  194. .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
  195. .rating = 250,
  196. .set_next_event = exynos4_comp_set_next_event,
  197. .set_mode = exynos4_comp_set_mode,
  198. };
  199. static irqreturn_t exynos4_mct_comp_isr(int irq, void *dev_id)
  200. {
  201. struct clock_event_device *evt = dev_id;
  202. exynos4_mct_write(0x1, EXYNOS4_MCT_G_INT_CSTAT);
  203. evt->event_handler(evt);
  204. return IRQ_HANDLED;
  205. }
  206. static struct irqaction mct_comp_event_irq = {
  207. .name = "mct_comp_irq",
  208. .flags = IRQF_TIMER | IRQF_IRQPOLL,
  209. .handler = exynos4_mct_comp_isr,
  210. .dev_id = &mct_comp_device,
  211. };
  212. static void exynos4_clockevent_init(void)
  213. {
  214. clockevents_calc_mult_shift(&mct_comp_device, clk_rate, 5);
  215. mct_comp_device.max_delta_ns =
  216. clockevent_delta2ns(0xffffffff, &mct_comp_device);
  217. mct_comp_device.min_delta_ns =
  218. clockevent_delta2ns(0xf, &mct_comp_device);
  219. mct_comp_device.cpumask = cpumask_of(0);
  220. clockevents_register_device(&mct_comp_device);
  221. if (soc_is_exynos5250())
  222. setup_irq(EXYNOS5_IRQ_MCT_G0, &mct_comp_event_irq);
  223. else
  224. setup_irq(EXYNOS4_IRQ_MCT_G0, &mct_comp_event_irq);
  225. }
  226. #ifdef CONFIG_LOCAL_TIMERS
  227. static DEFINE_PER_CPU(struct mct_clock_event_device, percpu_mct_tick);
  228. /* Clock event handling */
  229. static void exynos4_mct_tick_stop(struct mct_clock_event_device *mevt)
  230. {
  231. unsigned long tmp;
  232. unsigned long mask = MCT_L_TCON_INT_START | MCT_L_TCON_TIMER_START;
  233. void __iomem *addr = mevt->base + MCT_L_TCON_OFFSET;
  234. tmp = __raw_readl(addr);
  235. if (tmp & mask) {
  236. tmp &= ~mask;
  237. exynos4_mct_write(tmp, addr);
  238. }
  239. }
  240. static void exynos4_mct_tick_start(unsigned long cycles,
  241. struct mct_clock_event_device *mevt)
  242. {
  243. unsigned long tmp;
  244. exynos4_mct_tick_stop(mevt);
  245. tmp = (1 << 31) | cycles; /* MCT_L_UPDATE_ICNTB */
  246. /* update interrupt count buffer */
  247. exynos4_mct_write(tmp, mevt->base + MCT_L_ICNTB_OFFSET);
  248. /* enable MCT tick interrupt */
  249. exynos4_mct_write(0x1, mevt->base + MCT_L_INT_ENB_OFFSET);
  250. tmp = __raw_readl(mevt->base + MCT_L_TCON_OFFSET);
  251. tmp |= MCT_L_TCON_INT_START | MCT_L_TCON_TIMER_START |
  252. MCT_L_TCON_INTERVAL_MODE;
  253. exynos4_mct_write(tmp, mevt->base + MCT_L_TCON_OFFSET);
  254. }
  255. static int exynos4_tick_set_next_event(unsigned long cycles,
  256. struct clock_event_device *evt)
  257. {
  258. struct mct_clock_event_device *mevt = this_cpu_ptr(&percpu_mct_tick);
  259. exynos4_mct_tick_start(cycles, mevt);
  260. return 0;
  261. }
  262. static inline void exynos4_tick_set_mode(enum clock_event_mode mode,
  263. struct clock_event_device *evt)
  264. {
  265. struct mct_clock_event_device *mevt = this_cpu_ptr(&percpu_mct_tick);
  266. unsigned long cycles_per_jiffy;
  267. exynos4_mct_tick_stop(mevt);
  268. switch (mode) {
  269. case CLOCK_EVT_MODE_PERIODIC:
  270. cycles_per_jiffy =
  271. (((unsigned long long) NSEC_PER_SEC / HZ * evt->mult) >> evt->shift);
  272. exynos4_mct_tick_start(cycles_per_jiffy, mevt);
  273. break;
  274. case CLOCK_EVT_MODE_ONESHOT:
  275. case CLOCK_EVT_MODE_UNUSED:
  276. case CLOCK_EVT_MODE_SHUTDOWN:
  277. case CLOCK_EVT_MODE_RESUME:
  278. break;
  279. }
  280. }
  281. static int exynos4_mct_tick_clear(struct mct_clock_event_device *mevt)
  282. {
  283. struct clock_event_device *evt = mevt->evt;
  284. /*
  285. * This is for supporting oneshot mode.
  286. * Mct would generate interrupt periodically
  287. * without explicit stopping.
  288. */
  289. if (evt->mode != CLOCK_EVT_MODE_PERIODIC)
  290. exynos4_mct_tick_stop(mevt);
  291. /* Clear the MCT tick interrupt */
  292. if (__raw_readl(mevt->base + MCT_L_INT_CSTAT_OFFSET) & 1) {
  293. exynos4_mct_write(0x1, mevt->base + MCT_L_INT_CSTAT_OFFSET);
  294. return 1;
  295. } else {
  296. return 0;
  297. }
  298. }
  299. static irqreturn_t exynos4_mct_tick_isr(int irq, void *dev_id)
  300. {
  301. struct mct_clock_event_device *mevt = dev_id;
  302. struct clock_event_device *evt = mevt->evt;
  303. exynos4_mct_tick_clear(mevt);
  304. evt->event_handler(evt);
  305. return IRQ_HANDLED;
  306. }
  307. static struct irqaction mct_tick0_event_irq = {
  308. .name = "mct_tick0_irq",
  309. .flags = IRQF_TIMER | IRQF_NOBALANCING,
  310. .handler = exynos4_mct_tick_isr,
  311. };
  312. static struct irqaction mct_tick1_event_irq = {
  313. .name = "mct_tick1_irq",
  314. .flags = IRQF_TIMER | IRQF_NOBALANCING,
  315. .handler = exynos4_mct_tick_isr,
  316. };
  317. static int __cpuinit exynos4_local_timer_setup(struct clock_event_device *evt)
  318. {
  319. struct mct_clock_event_device *mevt;
  320. unsigned int cpu = smp_processor_id();
  321. int mct_lx_irq;
  322. mevt = this_cpu_ptr(&percpu_mct_tick);
  323. mevt->evt = evt;
  324. mevt->base = EXYNOS4_MCT_L_BASE(cpu);
  325. sprintf(mevt->name, "mct_tick%d", cpu);
  326. evt->name = mevt->name;
  327. evt->cpumask = cpumask_of(cpu);
  328. evt->set_next_event = exynos4_tick_set_next_event;
  329. evt->set_mode = exynos4_tick_set_mode;
  330. evt->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
  331. evt->rating = 450;
  332. clockevents_calc_mult_shift(evt, clk_rate / (TICK_BASE_CNT + 1), 5);
  333. evt->max_delta_ns =
  334. clockevent_delta2ns(0x7fffffff, evt);
  335. evt->min_delta_ns =
  336. clockevent_delta2ns(0xf, evt);
  337. clockevents_register_device(evt);
  338. exynos4_mct_write(TICK_BASE_CNT, mevt->base + MCT_L_TCNTB_OFFSET);
  339. if (mct_int_type == MCT_INT_SPI) {
  340. if (cpu == 0) {
  341. mct_lx_irq = soc_is_exynos4210() ? EXYNOS4_IRQ_MCT_L0 :
  342. EXYNOS5_IRQ_MCT_L0;
  343. mct_tick0_event_irq.dev_id = mevt;
  344. evt->irq = mct_lx_irq;
  345. setup_irq(mct_lx_irq, &mct_tick0_event_irq);
  346. } else {
  347. mct_lx_irq = soc_is_exynos4210() ? EXYNOS4_IRQ_MCT_L1 :
  348. EXYNOS5_IRQ_MCT_L1;
  349. mct_tick1_event_irq.dev_id = mevt;
  350. evt->irq = mct_lx_irq;
  351. setup_irq(mct_lx_irq, &mct_tick1_event_irq);
  352. irq_set_affinity(mct_lx_irq, cpumask_of(1));
  353. }
  354. } else {
  355. enable_percpu_irq(EXYNOS_IRQ_MCT_LOCALTIMER, 0);
  356. }
  357. return 0;
  358. }
  359. static void exynos4_local_timer_stop(struct clock_event_device *evt)
  360. {
  361. unsigned int cpu = smp_processor_id();
  362. evt->set_mode(CLOCK_EVT_MODE_UNUSED, evt);
  363. if (mct_int_type == MCT_INT_SPI)
  364. if (cpu == 0)
  365. remove_irq(evt->irq, &mct_tick0_event_irq);
  366. else
  367. remove_irq(evt->irq, &mct_tick1_event_irq);
  368. else
  369. disable_percpu_irq(EXYNOS_IRQ_MCT_LOCALTIMER);
  370. }
  371. static struct local_timer_ops exynos4_mct_tick_ops __cpuinitdata = {
  372. .setup = exynos4_local_timer_setup,
  373. .stop = exynos4_local_timer_stop,
  374. };
  375. #endif /* CONFIG_LOCAL_TIMERS */
  376. static void __init exynos4_timer_resources(void)
  377. {
  378. struct clk *mct_clk;
  379. mct_clk = clk_get(NULL, "xtal");
  380. clk_rate = clk_get_rate(mct_clk);
  381. #ifdef CONFIG_LOCAL_TIMERS
  382. if (mct_int_type == MCT_INT_PPI) {
  383. int err;
  384. err = request_percpu_irq(EXYNOS_IRQ_MCT_LOCALTIMER,
  385. exynos4_mct_tick_isr, "MCT",
  386. &percpu_mct_tick);
  387. WARN(err, "MCT: can't request IRQ %d (%d)\n",
  388. EXYNOS_IRQ_MCT_LOCALTIMER, err);
  389. }
  390. local_timer_register(&exynos4_mct_tick_ops);
  391. #endif /* CONFIG_LOCAL_TIMERS */
  392. }
  393. static void __init exynos_timer_init(void)
  394. {
  395. if (soc_is_exynos5440()) {
  396. arch_timer_of_register();
  397. return;
  398. }
  399. if ((soc_is_exynos4210()) || (soc_is_exynos5250()))
  400. mct_int_type = MCT_INT_SPI;
  401. else
  402. mct_int_type = MCT_INT_PPI;
  403. exynos4_timer_resources();
  404. exynos4_clocksource_init();
  405. exynos4_clockevent_init();
  406. }
  407. struct sys_timer exynos4_timer = {
  408. .init = exynos_timer_init,
  409. };