exynos_mct.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555
  1. /* linux/arch/arm/mach-exynos4/mct.c
  2. *
  3. * Copyright (c) 2011 Samsung Electronics Co., Ltd.
  4. * http://www.samsung.com
  5. *
  6. * EXYNOS4 MCT(Multi-Core Timer) support
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License version 2 as
  10. * published by the Free Software Foundation.
  11. */
  12. #include <linux/sched.h>
  13. #include <linux/interrupt.h>
  14. #include <linux/irq.h>
  15. #include <linux/err.h>
  16. #include <linux/clk.h>
  17. #include <linux/clockchips.h>
  18. #include <linux/platform_device.h>
  19. #include <linux/delay.h>
  20. #include <linux/percpu.h>
  21. #include <linux/of.h>
  22. #include <linux/of_irq.h>
  23. #include <linux/of_address.h>
  24. #include <linux/clocksource.h>
  25. #include <asm/localtimer.h>
  26. #include <plat/cpu.h>
  27. #include <mach/map.h>
  28. #include <mach/irqs.h>
  29. #include <asm/mach/time.h>
  30. #define EXYNOS4_MCTREG(x) (x)
  31. #define EXYNOS4_MCT_G_CNT_L EXYNOS4_MCTREG(0x100)
  32. #define EXYNOS4_MCT_G_CNT_U EXYNOS4_MCTREG(0x104)
  33. #define EXYNOS4_MCT_G_CNT_WSTAT EXYNOS4_MCTREG(0x110)
  34. #define EXYNOS4_MCT_G_COMP0_L EXYNOS4_MCTREG(0x200)
  35. #define EXYNOS4_MCT_G_COMP0_U EXYNOS4_MCTREG(0x204)
  36. #define EXYNOS4_MCT_G_COMP0_ADD_INCR EXYNOS4_MCTREG(0x208)
  37. #define EXYNOS4_MCT_G_TCON EXYNOS4_MCTREG(0x240)
  38. #define EXYNOS4_MCT_G_INT_CSTAT EXYNOS4_MCTREG(0x244)
  39. #define EXYNOS4_MCT_G_INT_ENB EXYNOS4_MCTREG(0x248)
  40. #define EXYNOS4_MCT_G_WSTAT EXYNOS4_MCTREG(0x24C)
  41. #define _EXYNOS4_MCT_L_BASE EXYNOS4_MCTREG(0x300)
  42. #define EXYNOS4_MCT_L_BASE(x) (_EXYNOS4_MCT_L_BASE + (0x100 * x))
  43. #define EXYNOS4_MCT_L_MASK (0xffffff00)
  44. #define MCT_L_TCNTB_OFFSET (0x00)
  45. #define MCT_L_ICNTB_OFFSET (0x08)
  46. #define MCT_L_TCON_OFFSET (0x20)
  47. #define MCT_L_INT_CSTAT_OFFSET (0x30)
  48. #define MCT_L_INT_ENB_OFFSET (0x34)
  49. #define MCT_L_WSTAT_OFFSET (0x40)
  50. #define MCT_G_TCON_START (1 << 8)
  51. #define MCT_G_TCON_COMP0_AUTO_INC (1 << 1)
  52. #define MCT_G_TCON_COMP0_ENABLE (1 << 0)
  53. #define MCT_L_TCON_INTERVAL_MODE (1 << 2)
  54. #define MCT_L_TCON_INT_START (1 << 1)
  55. #define MCT_L_TCON_TIMER_START (1 << 0)
  56. #define TICK_BASE_CNT 1
  57. enum {
  58. MCT_INT_SPI,
  59. MCT_INT_PPI
  60. };
  61. enum {
  62. MCT_G0_IRQ,
  63. MCT_G1_IRQ,
  64. MCT_G2_IRQ,
  65. MCT_G3_IRQ,
  66. MCT_L0_IRQ,
  67. MCT_L1_IRQ,
  68. MCT_L2_IRQ,
  69. MCT_L3_IRQ,
  70. MCT_NR_IRQS,
  71. };
  72. static void __iomem *reg_base;
  73. static unsigned long clk_rate;
  74. static unsigned int mct_int_type;
  75. static int mct_irqs[MCT_NR_IRQS];
  76. struct mct_clock_event_device {
  77. struct clock_event_device *evt;
  78. unsigned long base;
  79. char name[10];
  80. };
  81. static void exynos4_mct_write(unsigned int value, unsigned long offset)
  82. {
  83. unsigned long stat_addr;
  84. u32 mask;
  85. u32 i;
  86. __raw_writel(value, reg_base + offset);
  87. if (likely(offset >= EXYNOS4_MCT_L_BASE(0))) {
  88. stat_addr = (offset & ~EXYNOS4_MCT_L_MASK) + MCT_L_WSTAT_OFFSET;
  89. switch (offset & EXYNOS4_MCT_L_MASK) {
  90. case MCT_L_TCON_OFFSET:
  91. mask = 1 << 3; /* L_TCON write status */
  92. break;
  93. case MCT_L_ICNTB_OFFSET:
  94. mask = 1 << 1; /* L_ICNTB write status */
  95. break;
  96. case MCT_L_TCNTB_OFFSET:
  97. mask = 1 << 0; /* L_TCNTB write status */
  98. break;
  99. default:
  100. return;
  101. }
  102. } else {
  103. switch (offset) {
  104. case EXYNOS4_MCT_G_TCON:
  105. stat_addr = EXYNOS4_MCT_G_WSTAT;
  106. mask = 1 << 16; /* G_TCON write status */
  107. break;
  108. case EXYNOS4_MCT_G_COMP0_L:
  109. stat_addr = EXYNOS4_MCT_G_WSTAT;
  110. mask = 1 << 0; /* G_COMP0_L write status */
  111. break;
  112. case EXYNOS4_MCT_G_COMP0_U:
  113. stat_addr = EXYNOS4_MCT_G_WSTAT;
  114. mask = 1 << 1; /* G_COMP0_U write status */
  115. break;
  116. case EXYNOS4_MCT_G_COMP0_ADD_INCR:
  117. stat_addr = EXYNOS4_MCT_G_WSTAT;
  118. mask = 1 << 2; /* G_COMP0_ADD_INCR w status */
  119. break;
  120. case EXYNOS4_MCT_G_CNT_L:
  121. stat_addr = EXYNOS4_MCT_G_CNT_WSTAT;
  122. mask = 1 << 0; /* G_CNT_L write status */
  123. break;
  124. case EXYNOS4_MCT_G_CNT_U:
  125. stat_addr = EXYNOS4_MCT_G_CNT_WSTAT;
  126. mask = 1 << 1; /* G_CNT_U write status */
  127. break;
  128. default:
  129. return;
  130. }
  131. }
  132. /* Wait maximum 1 ms until written values are applied */
  133. for (i = 0; i < loops_per_jiffy / 1000 * HZ; i++)
  134. if (__raw_readl(reg_base + stat_addr) & mask) {
  135. __raw_writel(mask, reg_base + stat_addr);
  136. return;
  137. }
  138. panic("MCT hangs after writing %d (offset:0x%lx)\n", value, offset);
  139. }
  140. /* Clocksource handling */
  141. static void exynos4_mct_frc_start(u32 hi, u32 lo)
  142. {
  143. u32 reg;
  144. exynos4_mct_write(lo, EXYNOS4_MCT_G_CNT_L);
  145. exynos4_mct_write(hi, EXYNOS4_MCT_G_CNT_U);
  146. reg = __raw_readl(reg_base + EXYNOS4_MCT_G_TCON);
  147. reg |= MCT_G_TCON_START;
  148. exynos4_mct_write(reg, EXYNOS4_MCT_G_TCON);
  149. }
  150. static cycle_t exynos4_frc_read(struct clocksource *cs)
  151. {
  152. unsigned int lo, hi;
  153. u32 hi2 = __raw_readl(reg_base + EXYNOS4_MCT_G_CNT_U);
  154. do {
  155. hi = hi2;
  156. lo = __raw_readl(reg_base + EXYNOS4_MCT_G_CNT_L);
  157. hi2 = __raw_readl(reg_base + EXYNOS4_MCT_G_CNT_U);
  158. } while (hi != hi2);
  159. return ((cycle_t)hi << 32) | lo;
  160. }
  161. static void exynos4_frc_resume(struct clocksource *cs)
  162. {
  163. exynos4_mct_frc_start(0, 0);
  164. }
  165. struct clocksource mct_frc = {
  166. .name = "mct-frc",
  167. .rating = 400,
  168. .read = exynos4_frc_read,
  169. .mask = CLOCKSOURCE_MASK(64),
  170. .flags = CLOCK_SOURCE_IS_CONTINUOUS,
  171. .resume = exynos4_frc_resume,
  172. };
  173. static void __init exynos4_clocksource_init(void)
  174. {
  175. exynos4_mct_frc_start(0, 0);
  176. if (clocksource_register_hz(&mct_frc, clk_rate))
  177. panic("%s: can't register clocksource\n", mct_frc.name);
  178. }
  179. static void exynos4_mct_comp0_stop(void)
  180. {
  181. unsigned int tcon;
  182. tcon = __raw_readl(reg_base + EXYNOS4_MCT_G_TCON);
  183. tcon &= ~(MCT_G_TCON_COMP0_ENABLE | MCT_G_TCON_COMP0_AUTO_INC);
  184. exynos4_mct_write(tcon, EXYNOS4_MCT_G_TCON);
  185. exynos4_mct_write(0, EXYNOS4_MCT_G_INT_ENB);
  186. }
  187. static void exynos4_mct_comp0_start(enum clock_event_mode mode,
  188. unsigned long cycles)
  189. {
  190. unsigned int tcon;
  191. cycle_t comp_cycle;
  192. tcon = __raw_readl(reg_base + EXYNOS4_MCT_G_TCON);
  193. if (mode == CLOCK_EVT_MODE_PERIODIC) {
  194. tcon |= MCT_G_TCON_COMP0_AUTO_INC;
  195. exynos4_mct_write(cycles, EXYNOS4_MCT_G_COMP0_ADD_INCR);
  196. }
  197. comp_cycle = exynos4_frc_read(&mct_frc) + cycles;
  198. exynos4_mct_write((u32)comp_cycle, EXYNOS4_MCT_G_COMP0_L);
  199. exynos4_mct_write((u32)(comp_cycle >> 32), EXYNOS4_MCT_G_COMP0_U);
  200. exynos4_mct_write(0x1, EXYNOS4_MCT_G_INT_ENB);
  201. tcon |= MCT_G_TCON_COMP0_ENABLE;
  202. exynos4_mct_write(tcon , EXYNOS4_MCT_G_TCON);
  203. }
  204. static int exynos4_comp_set_next_event(unsigned long cycles,
  205. struct clock_event_device *evt)
  206. {
  207. exynos4_mct_comp0_start(evt->mode, cycles);
  208. return 0;
  209. }
  210. static void exynos4_comp_set_mode(enum clock_event_mode mode,
  211. struct clock_event_device *evt)
  212. {
  213. unsigned long cycles_per_jiffy;
  214. exynos4_mct_comp0_stop();
  215. switch (mode) {
  216. case CLOCK_EVT_MODE_PERIODIC:
  217. cycles_per_jiffy =
  218. (((unsigned long long) NSEC_PER_SEC / HZ * evt->mult) >> evt->shift);
  219. exynos4_mct_comp0_start(mode, cycles_per_jiffy);
  220. break;
  221. case CLOCK_EVT_MODE_ONESHOT:
  222. case CLOCK_EVT_MODE_UNUSED:
  223. case CLOCK_EVT_MODE_SHUTDOWN:
  224. case CLOCK_EVT_MODE_RESUME:
  225. break;
  226. }
  227. }
  228. static struct clock_event_device mct_comp_device = {
  229. .name = "mct-comp",
  230. .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
  231. .rating = 250,
  232. .set_next_event = exynos4_comp_set_next_event,
  233. .set_mode = exynos4_comp_set_mode,
  234. };
  235. static irqreturn_t exynos4_mct_comp_isr(int irq, void *dev_id)
  236. {
  237. struct clock_event_device *evt = dev_id;
  238. exynos4_mct_write(0x1, EXYNOS4_MCT_G_INT_CSTAT);
  239. evt->event_handler(evt);
  240. return IRQ_HANDLED;
  241. }
  242. static struct irqaction mct_comp_event_irq = {
  243. .name = "mct_comp_irq",
  244. .flags = IRQF_TIMER | IRQF_IRQPOLL,
  245. .handler = exynos4_mct_comp_isr,
  246. .dev_id = &mct_comp_device,
  247. };
  248. static void exynos4_clockevent_init(void)
  249. {
  250. mct_comp_device.cpumask = cpumask_of(0);
  251. clockevents_config_and_register(&mct_comp_device, clk_rate,
  252. 0xf, 0xffffffff);
  253. setup_irq(mct_irqs[MCT_G0_IRQ], &mct_comp_event_irq);
  254. }
  255. #ifdef CONFIG_LOCAL_TIMERS
  256. static DEFINE_PER_CPU(struct mct_clock_event_device, percpu_mct_tick);
  257. /* Clock event handling */
  258. static void exynos4_mct_tick_stop(struct mct_clock_event_device *mevt)
  259. {
  260. unsigned long tmp;
  261. unsigned long mask = MCT_L_TCON_INT_START | MCT_L_TCON_TIMER_START;
  262. unsigned long offset = mevt->base + MCT_L_TCON_OFFSET;
  263. tmp = __raw_readl(reg_base + offset);
  264. if (tmp & mask) {
  265. tmp &= ~mask;
  266. exynos4_mct_write(tmp, offset);
  267. }
  268. }
  269. static void exynos4_mct_tick_start(unsigned long cycles,
  270. struct mct_clock_event_device *mevt)
  271. {
  272. unsigned long tmp;
  273. exynos4_mct_tick_stop(mevt);
  274. tmp = (1 << 31) | cycles; /* MCT_L_UPDATE_ICNTB */
  275. /* update interrupt count buffer */
  276. exynos4_mct_write(tmp, mevt->base + MCT_L_ICNTB_OFFSET);
  277. /* enable MCT tick interrupt */
  278. exynos4_mct_write(0x1, mevt->base + MCT_L_INT_ENB_OFFSET);
  279. tmp = __raw_readl(reg_base + mevt->base + MCT_L_TCON_OFFSET);
  280. tmp |= MCT_L_TCON_INT_START | MCT_L_TCON_TIMER_START |
  281. MCT_L_TCON_INTERVAL_MODE;
  282. exynos4_mct_write(tmp, mevt->base + MCT_L_TCON_OFFSET);
  283. }
  284. static int exynos4_tick_set_next_event(unsigned long cycles,
  285. struct clock_event_device *evt)
  286. {
  287. struct mct_clock_event_device *mevt = this_cpu_ptr(&percpu_mct_tick);
  288. exynos4_mct_tick_start(cycles, mevt);
  289. return 0;
  290. }
  291. static inline void exynos4_tick_set_mode(enum clock_event_mode mode,
  292. struct clock_event_device *evt)
  293. {
  294. struct mct_clock_event_device *mevt = this_cpu_ptr(&percpu_mct_tick);
  295. unsigned long cycles_per_jiffy;
  296. exynos4_mct_tick_stop(mevt);
  297. switch (mode) {
  298. case CLOCK_EVT_MODE_PERIODIC:
  299. cycles_per_jiffy =
  300. (((unsigned long long) NSEC_PER_SEC / HZ * evt->mult) >> evt->shift);
  301. exynos4_mct_tick_start(cycles_per_jiffy, mevt);
  302. break;
  303. case CLOCK_EVT_MODE_ONESHOT:
  304. case CLOCK_EVT_MODE_UNUSED:
  305. case CLOCK_EVT_MODE_SHUTDOWN:
  306. case CLOCK_EVT_MODE_RESUME:
  307. break;
  308. }
  309. }
  310. static int exynos4_mct_tick_clear(struct mct_clock_event_device *mevt)
  311. {
  312. struct clock_event_device *evt = mevt->evt;
  313. /*
  314. * This is for supporting oneshot mode.
  315. * Mct would generate interrupt periodically
  316. * without explicit stopping.
  317. */
  318. if (evt->mode != CLOCK_EVT_MODE_PERIODIC)
  319. exynos4_mct_tick_stop(mevt);
  320. /* Clear the MCT tick interrupt */
  321. if (__raw_readl(reg_base + mevt->base + MCT_L_INT_CSTAT_OFFSET) & 1) {
  322. exynos4_mct_write(0x1, mevt->base + MCT_L_INT_CSTAT_OFFSET);
  323. return 1;
  324. } else {
  325. return 0;
  326. }
  327. }
  328. static irqreturn_t exynos4_mct_tick_isr(int irq, void *dev_id)
  329. {
  330. struct mct_clock_event_device *mevt = dev_id;
  331. struct clock_event_device *evt = mevt->evt;
  332. exynos4_mct_tick_clear(mevt);
  333. evt->event_handler(evt);
  334. return IRQ_HANDLED;
  335. }
  336. static struct irqaction mct_tick0_event_irq = {
  337. .name = "mct_tick0_irq",
  338. .flags = IRQF_TIMER | IRQF_NOBALANCING,
  339. .handler = exynos4_mct_tick_isr,
  340. };
  341. static struct irqaction mct_tick1_event_irq = {
  342. .name = "mct_tick1_irq",
  343. .flags = IRQF_TIMER | IRQF_NOBALANCING,
  344. .handler = exynos4_mct_tick_isr,
  345. };
  346. static int __cpuinit exynos4_local_timer_setup(struct clock_event_device *evt)
  347. {
  348. struct mct_clock_event_device *mevt;
  349. unsigned int cpu = smp_processor_id();
  350. mevt = this_cpu_ptr(&percpu_mct_tick);
  351. mevt->evt = evt;
  352. mevt->base = EXYNOS4_MCT_L_BASE(cpu);
  353. sprintf(mevt->name, "mct_tick%d", cpu);
  354. evt->name = mevt->name;
  355. evt->cpumask = cpumask_of(cpu);
  356. evt->set_next_event = exynos4_tick_set_next_event;
  357. evt->set_mode = exynos4_tick_set_mode;
  358. evt->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
  359. evt->rating = 450;
  360. clockevents_config_and_register(evt, clk_rate / (TICK_BASE_CNT + 1),
  361. 0xf, 0x7fffffff);
  362. exynos4_mct_write(TICK_BASE_CNT, mevt->base + MCT_L_TCNTB_OFFSET);
  363. if (mct_int_type == MCT_INT_SPI) {
  364. if (cpu == 0) {
  365. mct_tick0_event_irq.dev_id = mevt;
  366. evt->irq = mct_irqs[MCT_L0_IRQ];
  367. setup_irq(evt->irq, &mct_tick0_event_irq);
  368. } else {
  369. mct_tick1_event_irq.dev_id = mevt;
  370. evt->irq = mct_irqs[MCT_L1_IRQ];
  371. setup_irq(evt->irq, &mct_tick1_event_irq);
  372. irq_set_affinity(evt->irq, cpumask_of(1));
  373. }
  374. } else {
  375. enable_percpu_irq(mct_irqs[MCT_L0_IRQ], 0);
  376. }
  377. return 0;
  378. }
  379. static void exynos4_local_timer_stop(struct clock_event_device *evt)
  380. {
  381. unsigned int cpu = smp_processor_id();
  382. evt->set_mode(CLOCK_EVT_MODE_UNUSED, evt);
  383. if (mct_int_type == MCT_INT_SPI)
  384. if (cpu == 0)
  385. remove_irq(evt->irq, &mct_tick0_event_irq);
  386. else
  387. remove_irq(evt->irq, &mct_tick1_event_irq);
  388. else
  389. disable_percpu_irq(mct_irqs[MCT_L0_IRQ]);
  390. }
  391. static struct local_timer_ops exynos4_mct_tick_ops __cpuinitdata = {
  392. .setup = exynos4_local_timer_setup,
  393. .stop = exynos4_local_timer_stop,
  394. };
  395. #endif /* CONFIG_LOCAL_TIMERS */
  396. static void __init exynos4_timer_resources(void __iomem *base)
  397. {
  398. struct clk *mct_clk;
  399. mct_clk = clk_get(NULL, "xtal");
  400. clk_rate = clk_get_rate(mct_clk);
  401. reg_base = base;
  402. if (!reg_base)
  403. panic("%s: unable to ioremap mct address space\n", __func__);
  404. #ifdef CONFIG_LOCAL_TIMERS
  405. if (mct_int_type == MCT_INT_PPI) {
  406. int err;
  407. err = request_percpu_irq(mct_irqs[MCT_L0_IRQ],
  408. exynos4_mct_tick_isr, "MCT",
  409. &percpu_mct_tick);
  410. WARN(err, "MCT: can't request IRQ %d (%d)\n",
  411. mct_irqs[MCT_L0_IRQ], err);
  412. }
  413. local_timer_register(&exynos4_mct_tick_ops);
  414. #endif /* CONFIG_LOCAL_TIMERS */
  415. }
  416. void __init mct_init(void)
  417. {
  418. if (soc_is_exynos4210()) {
  419. mct_irqs[MCT_G0_IRQ] = EXYNOS4_IRQ_MCT_G0;
  420. mct_irqs[MCT_L0_IRQ] = EXYNOS4_IRQ_MCT_L0;
  421. mct_irqs[MCT_L1_IRQ] = EXYNOS4_IRQ_MCT_L1;
  422. mct_int_type = MCT_INT_SPI;
  423. } else {
  424. panic("unable to determine mct controller type\n");
  425. }
  426. exynos4_timer_resources(S5P_VA_SYSTIMER);
  427. exynos4_clocksource_init();
  428. exynos4_clockevent_init();
  429. }
  430. static void __init mct_init_dt(struct device_node *np, unsigned int int_type)
  431. {
  432. u32 nr_irqs, i;
  433. mct_int_type = int_type;
  434. /* This driver uses only one global timer interrupt */
  435. mct_irqs[MCT_G0_IRQ] = irq_of_parse_and_map(np, MCT_G0_IRQ);
  436. /*
  437. * Find out the number of local irqs specified. The local
  438. * timer irqs are specified after the four global timer
  439. * irqs are specified.
  440. */
  441. nr_irqs = of_irq_count(np);
  442. for (i = MCT_L0_IRQ; i < nr_irqs; i++)
  443. mct_irqs[i] = irq_of_parse_and_map(np, i);
  444. exynos4_timer_resources(of_iomap(np, 0));
  445. exynos4_clocksource_init();
  446. exynos4_clockevent_init();
  447. }
  448. static void __init mct_init_spi(struct device_node *np)
  449. {
  450. return mct_init_dt(np, MCT_INT_SPI);
  451. }
  452. static void __init mct_init_ppi(struct device_node *np)
  453. {
  454. return mct_init_dt(np, MCT_INT_PPI);
  455. }
  456. CLOCKSOURCE_OF_DECLARE(exynos4210, "samsung,exynos4210-mct", mct_init_spi);
  457. CLOCKSOURCE_OF_DECLARE(exynos4412, "samsung,exynos4412-mct", mct_init_ppi);