sh_cmt.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868
  1. /*
  2. * SuperH Timer Support - CMT
  3. *
  4. * Copyright (C) 2008 Magnus Damm
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation; either version 2 of the License
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program; if not, write to the Free Software
  17. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  18. */
  19. #include <linux/init.h>
  20. #include <linux/platform_device.h>
  21. #include <linux/spinlock.h>
  22. #include <linux/interrupt.h>
  23. #include <linux/ioport.h>
  24. #include <linux/io.h>
  25. #include <linux/clk.h>
  26. #include <linux/irq.h>
  27. #include <linux/err.h>
  28. #include <linux/delay.h>
  29. #include <linux/clocksource.h>
  30. #include <linux/clockchips.h>
  31. #include <linux/sh_timer.h>
  32. #include <linux/slab.h>
  33. #include <linux/module.h>
  34. #include <linux/pm_domain.h>
  35. #include <linux/pm_runtime.h>
  36. struct sh_cmt_priv {
  37. void __iomem *mapbase;
  38. void __iomem *mapbase_str;
  39. struct clk *clk;
  40. unsigned long width; /* 16 or 32 bit version of hardware block */
  41. unsigned long overflow_bit;
  42. unsigned long clear_bits;
  43. struct irqaction irqaction;
  44. struct platform_device *pdev;
  45. unsigned long flags;
  46. unsigned long match_value;
  47. unsigned long next_match_value;
  48. unsigned long max_match_value;
  49. unsigned long rate;
  50. raw_spinlock_t lock;
  51. struct clock_event_device ced;
  52. struct clocksource cs;
  53. unsigned long total_cycles;
  54. bool cs_enabled;
  55. /* callbacks for CMSTR and CMCSR access */
  56. unsigned long (*read_control)(void __iomem *base, unsigned long offs);
  57. void (*write_control)(void __iomem *base, unsigned long offs,
  58. unsigned long value);
  59. /* callbacks for CMCNT and CMCOR access */
  60. unsigned long (*read_count)(void __iomem *base, unsigned long offs);
  61. void (*write_count)(void __iomem *base, unsigned long offs,
  62. unsigned long value);
  63. };
  64. /* Examples of supported CMT timer register layouts and I/O access widths:
  65. *
  66. * "16-bit counter and 16-bit control" as found on sh7263:
  67. * CMSTR 0xfffec000 16-bit
  68. * CMCSR 0xfffec002 16-bit
  69. * CMCNT 0xfffec004 16-bit
  70. * CMCOR 0xfffec006 16-bit
  71. *
  72. * "32-bit counter and 16-bit control" as found on sh7372, sh73a0, r8a7740:
  73. * CMSTR 0xffca0000 16-bit
  74. * CMCSR 0xffca0060 16-bit
  75. * CMCNT 0xffca0064 32-bit
  76. * CMCOR 0xffca0068 32-bit
  77. *
  78. * "32-bit counter and 32-bit control" as found on r8a73a4 and r8a7790:
  79. * CMSTR 0xffca0500 32-bit
  80. * CMCSR 0xffca0510 32-bit
  81. * CMCNT 0xffca0514 32-bit
  82. * CMCOR 0xffca0518 32-bit
  83. */
  84. static unsigned long sh_cmt_read16(void __iomem *base, unsigned long offs)
  85. {
  86. return ioread16(base + (offs << 1));
  87. }
  88. static unsigned long sh_cmt_read32(void __iomem *base, unsigned long offs)
  89. {
  90. return ioread32(base + (offs << 2));
  91. }
  92. static void sh_cmt_write16(void __iomem *base, unsigned long offs,
  93. unsigned long value)
  94. {
  95. iowrite16(value, base + (offs << 1));
  96. }
  97. static void sh_cmt_write32(void __iomem *base, unsigned long offs,
  98. unsigned long value)
  99. {
  100. iowrite32(value, base + (offs << 2));
  101. }
  102. #define CMCSR 0 /* channel register */
  103. #define CMCNT 1 /* channel register */
  104. #define CMCOR 2 /* channel register */
  105. static inline unsigned long sh_cmt_read_cmstr(struct sh_cmt_priv *p)
  106. {
  107. return p->read_control(p->mapbase_str, 0);
  108. }
  109. static inline unsigned long sh_cmt_read_cmcsr(struct sh_cmt_priv *p)
  110. {
  111. return p->read_control(p->mapbase, CMCSR);
  112. }
  113. static inline unsigned long sh_cmt_read_cmcnt(struct sh_cmt_priv *p)
  114. {
  115. return p->read_count(p->mapbase, CMCNT);
  116. }
  117. static inline void sh_cmt_write_cmstr(struct sh_cmt_priv *p,
  118. unsigned long value)
  119. {
  120. p->write_control(p->mapbase_str, 0, value);
  121. }
  122. static inline void sh_cmt_write_cmcsr(struct sh_cmt_priv *p,
  123. unsigned long value)
  124. {
  125. p->write_control(p->mapbase, CMCSR, value);
  126. }
  127. static inline void sh_cmt_write_cmcnt(struct sh_cmt_priv *p,
  128. unsigned long value)
  129. {
  130. p->write_count(p->mapbase, CMCNT, value);
  131. }
  132. static inline void sh_cmt_write_cmcor(struct sh_cmt_priv *p,
  133. unsigned long value)
  134. {
  135. p->write_count(p->mapbase, CMCOR, value);
  136. }
  137. static unsigned long sh_cmt_get_counter(struct sh_cmt_priv *p,
  138. int *has_wrapped)
  139. {
  140. unsigned long v1, v2, v3;
  141. int o1, o2;
  142. o1 = sh_cmt_read_cmcsr(p) & p->overflow_bit;
  143. /* Make sure the timer value is stable. Stolen from acpi_pm.c */
  144. do {
  145. o2 = o1;
  146. v1 = sh_cmt_read_cmcnt(p);
  147. v2 = sh_cmt_read_cmcnt(p);
  148. v3 = sh_cmt_read_cmcnt(p);
  149. o1 = sh_cmt_read_cmcsr(p) & p->overflow_bit;
  150. } while (unlikely((o1 != o2) || (v1 > v2 && v1 < v3)
  151. || (v2 > v3 && v2 < v1) || (v3 > v1 && v3 < v2)));
  152. *has_wrapped = o1;
  153. return v2;
  154. }
  155. static DEFINE_RAW_SPINLOCK(sh_cmt_lock);
  156. static void sh_cmt_start_stop_ch(struct sh_cmt_priv *p, int start)
  157. {
  158. struct sh_timer_config *cfg = p->pdev->dev.platform_data;
  159. unsigned long flags, value;
  160. /* start stop register shared by multiple timer channels */
  161. raw_spin_lock_irqsave(&sh_cmt_lock, flags);
  162. value = sh_cmt_read_cmstr(p);
  163. if (start)
  164. value |= 1 << cfg->timer_bit;
  165. else
  166. value &= ~(1 << cfg->timer_bit);
  167. sh_cmt_write_cmstr(p, value);
  168. raw_spin_unlock_irqrestore(&sh_cmt_lock, flags);
  169. }
  170. static int sh_cmt_enable(struct sh_cmt_priv *p, unsigned long *rate)
  171. {
  172. int k, ret;
  173. pm_runtime_get_sync(&p->pdev->dev);
  174. dev_pm_syscore_device(&p->pdev->dev, true);
  175. /* enable clock */
  176. ret = clk_enable(p->clk);
  177. if (ret) {
  178. dev_err(&p->pdev->dev, "cannot enable clock\n");
  179. goto err0;
  180. }
  181. /* make sure channel is disabled */
  182. sh_cmt_start_stop_ch(p, 0);
  183. /* configure channel, periodic mode and maximum timeout */
  184. if (p->width == 16) {
  185. *rate = clk_get_rate(p->clk) / 512;
  186. sh_cmt_write_cmcsr(p, 0x43);
  187. } else {
  188. *rate = clk_get_rate(p->clk) / 8;
  189. sh_cmt_write_cmcsr(p, 0x01a4);
  190. }
  191. sh_cmt_write_cmcor(p, 0xffffffff);
  192. sh_cmt_write_cmcnt(p, 0);
  193. /*
  194. * According to the sh73a0 user's manual, as CMCNT can be operated
  195. * only by the RCLK (Pseudo 32 KHz), there's one restriction on
  196. * modifying CMCNT register; two RCLK cycles are necessary before
  197. * this register is either read or any modification of the value
  198. * it holds is reflected in the LSI's actual operation.
  199. *
  200. * While at it, we're supposed to clear out the CMCNT as of this
  201. * moment, so make sure it's processed properly here. This will
  202. * take RCLKx2 at maximum.
  203. */
  204. for (k = 0; k < 100; k++) {
  205. if (!sh_cmt_read_cmcnt(p))
  206. break;
  207. udelay(1);
  208. }
  209. if (sh_cmt_read_cmcnt(p)) {
  210. dev_err(&p->pdev->dev, "cannot clear CMCNT\n");
  211. ret = -ETIMEDOUT;
  212. goto err1;
  213. }
  214. /* enable channel */
  215. sh_cmt_start_stop_ch(p, 1);
  216. return 0;
  217. err1:
  218. /* stop clock */
  219. clk_disable(p->clk);
  220. err0:
  221. return ret;
  222. }
  223. static void sh_cmt_disable(struct sh_cmt_priv *p)
  224. {
  225. /* disable channel */
  226. sh_cmt_start_stop_ch(p, 0);
  227. /* disable interrupts in CMT block */
  228. sh_cmt_write_cmcsr(p, 0);
  229. /* stop clock */
  230. clk_disable(p->clk);
  231. dev_pm_syscore_device(&p->pdev->dev, false);
  232. pm_runtime_put(&p->pdev->dev);
  233. }
  234. /* private flags */
  235. #define FLAG_CLOCKEVENT (1 << 0)
  236. #define FLAG_CLOCKSOURCE (1 << 1)
  237. #define FLAG_REPROGRAM (1 << 2)
  238. #define FLAG_SKIPEVENT (1 << 3)
  239. #define FLAG_IRQCONTEXT (1 << 4)
  240. static void sh_cmt_clock_event_program_verify(struct sh_cmt_priv *p,
  241. int absolute)
  242. {
  243. unsigned long new_match;
  244. unsigned long value = p->next_match_value;
  245. unsigned long delay = 0;
  246. unsigned long now = 0;
  247. int has_wrapped;
  248. now = sh_cmt_get_counter(p, &has_wrapped);
  249. p->flags |= FLAG_REPROGRAM; /* force reprogram */
  250. if (has_wrapped) {
  251. /* we're competing with the interrupt handler.
  252. * -> let the interrupt handler reprogram the timer.
  253. * -> interrupt number two handles the event.
  254. */
  255. p->flags |= FLAG_SKIPEVENT;
  256. return;
  257. }
  258. if (absolute)
  259. now = 0;
  260. do {
  261. /* reprogram the timer hardware,
  262. * but don't save the new match value yet.
  263. */
  264. new_match = now + value + delay;
  265. if (new_match > p->max_match_value)
  266. new_match = p->max_match_value;
  267. sh_cmt_write_cmcor(p, new_match);
  268. now = sh_cmt_get_counter(p, &has_wrapped);
  269. if (has_wrapped && (new_match > p->match_value)) {
  270. /* we are changing to a greater match value,
  271. * so this wrap must be caused by the counter
  272. * matching the old value.
  273. * -> first interrupt reprograms the timer.
  274. * -> interrupt number two handles the event.
  275. */
  276. p->flags |= FLAG_SKIPEVENT;
  277. break;
  278. }
  279. if (has_wrapped) {
  280. /* we are changing to a smaller match value,
  281. * so the wrap must be caused by the counter
  282. * matching the new value.
  283. * -> save programmed match value.
  284. * -> let isr handle the event.
  285. */
  286. p->match_value = new_match;
  287. break;
  288. }
  289. /* be safe: verify hardware settings */
  290. if (now < new_match) {
  291. /* timer value is below match value, all good.
  292. * this makes sure we won't miss any match events.
  293. * -> save programmed match value.
  294. * -> let isr handle the event.
  295. */
  296. p->match_value = new_match;
  297. break;
  298. }
  299. /* the counter has reached a value greater
  300. * than our new match value. and since the
  301. * has_wrapped flag isn't set we must have
  302. * programmed a too close event.
  303. * -> increase delay and retry.
  304. */
  305. if (delay)
  306. delay <<= 1;
  307. else
  308. delay = 1;
  309. if (!delay)
  310. dev_warn(&p->pdev->dev, "too long delay\n");
  311. } while (delay);
  312. }
  313. static void __sh_cmt_set_next(struct sh_cmt_priv *p, unsigned long delta)
  314. {
  315. if (delta > p->max_match_value)
  316. dev_warn(&p->pdev->dev, "delta out of range\n");
  317. p->next_match_value = delta;
  318. sh_cmt_clock_event_program_verify(p, 0);
  319. }
  320. static void sh_cmt_set_next(struct sh_cmt_priv *p, unsigned long delta)
  321. {
  322. unsigned long flags;
  323. raw_spin_lock_irqsave(&p->lock, flags);
  324. __sh_cmt_set_next(p, delta);
  325. raw_spin_unlock_irqrestore(&p->lock, flags);
  326. }
  327. static irqreturn_t sh_cmt_interrupt(int irq, void *dev_id)
  328. {
  329. struct sh_cmt_priv *p = dev_id;
  330. /* clear flags */
  331. sh_cmt_write_cmcsr(p, sh_cmt_read_cmcsr(p) & p->clear_bits);
  332. /* update clock source counter to begin with if enabled
  333. * the wrap flag should be cleared by the timer specific
  334. * isr before we end up here.
  335. */
  336. if (p->flags & FLAG_CLOCKSOURCE)
  337. p->total_cycles += p->match_value + 1;
  338. if (!(p->flags & FLAG_REPROGRAM))
  339. p->next_match_value = p->max_match_value;
  340. p->flags |= FLAG_IRQCONTEXT;
  341. if (p->flags & FLAG_CLOCKEVENT) {
  342. if (!(p->flags & FLAG_SKIPEVENT)) {
  343. if (p->ced.mode == CLOCK_EVT_MODE_ONESHOT) {
  344. p->next_match_value = p->max_match_value;
  345. p->flags |= FLAG_REPROGRAM;
  346. }
  347. p->ced.event_handler(&p->ced);
  348. }
  349. }
  350. p->flags &= ~FLAG_SKIPEVENT;
  351. if (p->flags & FLAG_REPROGRAM) {
  352. p->flags &= ~FLAG_REPROGRAM;
  353. sh_cmt_clock_event_program_verify(p, 1);
  354. if (p->flags & FLAG_CLOCKEVENT)
  355. if ((p->ced.mode == CLOCK_EVT_MODE_SHUTDOWN)
  356. || (p->match_value == p->next_match_value))
  357. p->flags &= ~FLAG_REPROGRAM;
  358. }
  359. p->flags &= ~FLAG_IRQCONTEXT;
  360. return IRQ_HANDLED;
  361. }
  362. static int sh_cmt_start(struct sh_cmt_priv *p, unsigned long flag)
  363. {
  364. int ret = 0;
  365. unsigned long flags;
  366. raw_spin_lock_irqsave(&p->lock, flags);
  367. if (!(p->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE)))
  368. ret = sh_cmt_enable(p, &p->rate);
  369. if (ret)
  370. goto out;
  371. p->flags |= flag;
  372. /* setup timeout if no clockevent */
  373. if ((flag == FLAG_CLOCKSOURCE) && (!(p->flags & FLAG_CLOCKEVENT)))
  374. __sh_cmt_set_next(p, p->max_match_value);
  375. out:
  376. raw_spin_unlock_irqrestore(&p->lock, flags);
  377. return ret;
  378. }
  379. static void sh_cmt_stop(struct sh_cmt_priv *p, unsigned long flag)
  380. {
  381. unsigned long flags;
  382. unsigned long f;
  383. raw_spin_lock_irqsave(&p->lock, flags);
  384. f = p->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE);
  385. p->flags &= ~flag;
  386. if (f && !(p->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE)))
  387. sh_cmt_disable(p);
  388. /* adjust the timeout to maximum if only clocksource left */
  389. if ((flag == FLAG_CLOCKEVENT) && (p->flags & FLAG_CLOCKSOURCE))
  390. __sh_cmt_set_next(p, p->max_match_value);
  391. raw_spin_unlock_irqrestore(&p->lock, flags);
  392. }
  393. static struct sh_cmt_priv *cs_to_sh_cmt(struct clocksource *cs)
  394. {
  395. return container_of(cs, struct sh_cmt_priv, cs);
  396. }
  397. static cycle_t sh_cmt_clocksource_read(struct clocksource *cs)
  398. {
  399. struct sh_cmt_priv *p = cs_to_sh_cmt(cs);
  400. unsigned long flags, raw;
  401. unsigned long value;
  402. int has_wrapped;
  403. raw_spin_lock_irqsave(&p->lock, flags);
  404. value = p->total_cycles;
  405. raw = sh_cmt_get_counter(p, &has_wrapped);
  406. if (unlikely(has_wrapped))
  407. raw += p->match_value + 1;
  408. raw_spin_unlock_irqrestore(&p->lock, flags);
  409. return value + raw;
  410. }
  411. static int sh_cmt_clocksource_enable(struct clocksource *cs)
  412. {
  413. int ret;
  414. struct sh_cmt_priv *p = cs_to_sh_cmt(cs);
  415. WARN_ON(p->cs_enabled);
  416. p->total_cycles = 0;
  417. ret = sh_cmt_start(p, FLAG_CLOCKSOURCE);
  418. if (!ret) {
  419. __clocksource_updatefreq_hz(cs, p->rate);
  420. p->cs_enabled = true;
  421. }
  422. return ret;
  423. }
  424. static void sh_cmt_clocksource_disable(struct clocksource *cs)
  425. {
  426. struct sh_cmt_priv *p = cs_to_sh_cmt(cs);
  427. WARN_ON(!p->cs_enabled);
  428. sh_cmt_stop(p, FLAG_CLOCKSOURCE);
  429. p->cs_enabled = false;
  430. }
  431. static void sh_cmt_clocksource_suspend(struct clocksource *cs)
  432. {
  433. struct sh_cmt_priv *p = cs_to_sh_cmt(cs);
  434. sh_cmt_stop(p, FLAG_CLOCKSOURCE);
  435. pm_genpd_syscore_poweroff(&p->pdev->dev);
  436. }
  437. static void sh_cmt_clocksource_resume(struct clocksource *cs)
  438. {
  439. struct sh_cmt_priv *p = cs_to_sh_cmt(cs);
  440. pm_genpd_syscore_poweron(&p->pdev->dev);
  441. sh_cmt_start(p, FLAG_CLOCKSOURCE);
  442. }
  443. static int sh_cmt_register_clocksource(struct sh_cmt_priv *p,
  444. char *name, unsigned long rating)
  445. {
  446. struct clocksource *cs = &p->cs;
  447. cs->name = name;
  448. cs->rating = rating;
  449. cs->read = sh_cmt_clocksource_read;
  450. cs->enable = sh_cmt_clocksource_enable;
  451. cs->disable = sh_cmt_clocksource_disable;
  452. cs->suspend = sh_cmt_clocksource_suspend;
  453. cs->resume = sh_cmt_clocksource_resume;
  454. cs->mask = CLOCKSOURCE_MASK(sizeof(unsigned long) * 8);
  455. cs->flags = CLOCK_SOURCE_IS_CONTINUOUS;
  456. dev_info(&p->pdev->dev, "used as clock source\n");
  457. /* Register with dummy 1 Hz value, gets updated in ->enable() */
  458. clocksource_register_hz(cs, 1);
  459. return 0;
  460. }
  461. static struct sh_cmt_priv *ced_to_sh_cmt(struct clock_event_device *ced)
  462. {
  463. return container_of(ced, struct sh_cmt_priv, ced);
  464. }
  465. static void sh_cmt_clock_event_start(struct sh_cmt_priv *p, int periodic)
  466. {
  467. struct clock_event_device *ced = &p->ced;
  468. sh_cmt_start(p, FLAG_CLOCKEVENT);
  469. /* TODO: calculate good shift from rate and counter bit width */
  470. ced->shift = 32;
  471. ced->mult = div_sc(p->rate, NSEC_PER_SEC, ced->shift);
  472. ced->max_delta_ns = clockevent_delta2ns(p->max_match_value, ced);
  473. ced->min_delta_ns = clockevent_delta2ns(0x1f, ced);
  474. if (periodic)
  475. sh_cmt_set_next(p, ((p->rate + HZ/2) / HZ) - 1);
  476. else
  477. sh_cmt_set_next(p, p->max_match_value);
  478. }
  479. static void sh_cmt_clock_event_mode(enum clock_event_mode mode,
  480. struct clock_event_device *ced)
  481. {
  482. struct sh_cmt_priv *p = ced_to_sh_cmt(ced);
  483. /* deal with old setting first */
  484. switch (ced->mode) {
  485. case CLOCK_EVT_MODE_PERIODIC:
  486. case CLOCK_EVT_MODE_ONESHOT:
  487. sh_cmt_stop(p, FLAG_CLOCKEVENT);
  488. break;
  489. default:
  490. break;
  491. }
  492. switch (mode) {
  493. case CLOCK_EVT_MODE_PERIODIC:
  494. dev_info(&p->pdev->dev, "used for periodic clock events\n");
  495. sh_cmt_clock_event_start(p, 1);
  496. break;
  497. case CLOCK_EVT_MODE_ONESHOT:
  498. dev_info(&p->pdev->dev, "used for oneshot clock events\n");
  499. sh_cmt_clock_event_start(p, 0);
  500. break;
  501. case CLOCK_EVT_MODE_SHUTDOWN:
  502. case CLOCK_EVT_MODE_UNUSED:
  503. sh_cmt_stop(p, FLAG_CLOCKEVENT);
  504. break;
  505. default:
  506. break;
  507. }
  508. }
  509. static int sh_cmt_clock_event_next(unsigned long delta,
  510. struct clock_event_device *ced)
  511. {
  512. struct sh_cmt_priv *p = ced_to_sh_cmt(ced);
  513. BUG_ON(ced->mode != CLOCK_EVT_MODE_ONESHOT);
  514. if (likely(p->flags & FLAG_IRQCONTEXT))
  515. p->next_match_value = delta - 1;
  516. else
  517. sh_cmt_set_next(p, delta - 1);
  518. return 0;
  519. }
  520. static void sh_cmt_clock_event_suspend(struct clock_event_device *ced)
  521. {
  522. pm_genpd_syscore_poweroff(&ced_to_sh_cmt(ced)->pdev->dev);
  523. }
  524. static void sh_cmt_clock_event_resume(struct clock_event_device *ced)
  525. {
  526. pm_genpd_syscore_poweron(&ced_to_sh_cmt(ced)->pdev->dev);
  527. }
  528. static void sh_cmt_register_clockevent(struct sh_cmt_priv *p,
  529. char *name, unsigned long rating)
  530. {
  531. struct clock_event_device *ced = &p->ced;
  532. memset(ced, 0, sizeof(*ced));
  533. ced->name = name;
  534. ced->features = CLOCK_EVT_FEAT_PERIODIC;
  535. ced->features |= CLOCK_EVT_FEAT_ONESHOT;
  536. ced->rating = rating;
  537. ced->cpumask = cpumask_of(0);
  538. ced->set_next_event = sh_cmt_clock_event_next;
  539. ced->set_mode = sh_cmt_clock_event_mode;
  540. ced->suspend = sh_cmt_clock_event_suspend;
  541. ced->resume = sh_cmt_clock_event_resume;
  542. dev_info(&p->pdev->dev, "used for clock events\n");
  543. clockevents_register_device(ced);
  544. }
  545. static int sh_cmt_register(struct sh_cmt_priv *p, char *name,
  546. unsigned long clockevent_rating,
  547. unsigned long clocksource_rating)
  548. {
  549. if (clockevent_rating)
  550. sh_cmt_register_clockevent(p, name, clockevent_rating);
  551. if (clocksource_rating)
  552. sh_cmt_register_clocksource(p, name, clocksource_rating);
  553. return 0;
  554. }
  555. static int sh_cmt_setup(struct sh_cmt_priv *p, struct platform_device *pdev)
  556. {
  557. struct sh_timer_config *cfg = pdev->dev.platform_data;
  558. struct resource *res, *res2;
  559. int irq, ret;
  560. ret = -ENXIO;
  561. memset(p, 0, sizeof(*p));
  562. p->pdev = pdev;
  563. if (!cfg) {
  564. dev_err(&p->pdev->dev, "missing platform data\n");
  565. goto err0;
  566. }
  567. res = platform_get_resource(p->pdev, IORESOURCE_MEM, 0);
  568. if (!res) {
  569. dev_err(&p->pdev->dev, "failed to get I/O memory\n");
  570. goto err0;
  571. }
  572. /* optional resource for the shared timer start/stop register */
  573. res2 = platform_get_resource(p->pdev, IORESOURCE_MEM, 1);
  574. irq = platform_get_irq(p->pdev, 0);
  575. if (irq < 0) {
  576. dev_err(&p->pdev->dev, "failed to get irq\n");
  577. goto err0;
  578. }
  579. /* map memory, let mapbase point to our channel */
  580. p->mapbase = ioremap_nocache(res->start, resource_size(res));
  581. if (p->mapbase == NULL) {
  582. dev_err(&p->pdev->dev, "failed to remap I/O memory\n");
  583. goto err0;
  584. }
  585. /* map second resource for CMSTR */
  586. p->mapbase_str = ioremap_nocache(res2 ? res2->start :
  587. res->start - cfg->channel_offset,
  588. res2 ? resource_size(res2) : 2);
  589. if (p->mapbase_str == NULL) {
  590. dev_err(&p->pdev->dev, "failed to remap I/O second memory\n");
  591. goto err1;
  592. }
  593. /* request irq using setup_irq() (too early for request_irq()) */
  594. p->irqaction.name = dev_name(&p->pdev->dev);
  595. p->irqaction.handler = sh_cmt_interrupt;
  596. p->irqaction.dev_id = p;
  597. p->irqaction.flags = IRQF_DISABLED | IRQF_TIMER | \
  598. IRQF_IRQPOLL | IRQF_NOBALANCING;
  599. /* get hold of clock */
  600. p->clk = clk_get(&p->pdev->dev, "cmt_fck");
  601. if (IS_ERR(p->clk)) {
  602. dev_err(&p->pdev->dev, "cannot get clock\n");
  603. ret = PTR_ERR(p->clk);
  604. goto err2;
  605. }
  606. if (res2 && (resource_size(res2) == 4)) {
  607. /* assume both CMSTR and CMCSR to be 32-bit */
  608. p->read_control = sh_cmt_read32;
  609. p->write_control = sh_cmt_write32;
  610. } else {
  611. p->read_control = sh_cmt_read16;
  612. p->write_control = sh_cmt_write16;
  613. }
  614. if (resource_size(res) == 6) {
  615. p->width = 16;
  616. p->read_count = sh_cmt_read16;
  617. p->write_count = sh_cmt_write16;
  618. p->overflow_bit = 0x80;
  619. p->clear_bits = ~0x80;
  620. } else {
  621. p->width = 32;
  622. p->read_count = sh_cmt_read32;
  623. p->write_count = sh_cmt_write32;
  624. p->overflow_bit = 0x8000;
  625. p->clear_bits = ~0xc000;
  626. }
  627. if (p->width == (sizeof(p->max_match_value) * 8))
  628. p->max_match_value = ~0;
  629. else
  630. p->max_match_value = (1 << p->width) - 1;
  631. p->match_value = p->max_match_value;
  632. raw_spin_lock_init(&p->lock);
  633. ret = sh_cmt_register(p, (char *)dev_name(&p->pdev->dev),
  634. cfg->clockevent_rating,
  635. cfg->clocksource_rating);
  636. if (ret) {
  637. dev_err(&p->pdev->dev, "registration failed\n");
  638. goto err3;
  639. }
  640. p->cs_enabled = false;
  641. ret = setup_irq(irq, &p->irqaction);
  642. if (ret) {
  643. dev_err(&p->pdev->dev, "failed to request irq %d\n", irq);
  644. goto err3;
  645. }
  646. platform_set_drvdata(pdev, p);
  647. return 0;
  648. err3:
  649. clk_put(p->clk);
  650. err2:
  651. iounmap(p->mapbase_str);
  652. err1:
  653. iounmap(p->mapbase);
  654. err0:
  655. return ret;
  656. }
  657. static int sh_cmt_probe(struct platform_device *pdev)
  658. {
  659. struct sh_cmt_priv *p = platform_get_drvdata(pdev);
  660. struct sh_timer_config *cfg = pdev->dev.platform_data;
  661. int ret;
  662. if (!is_early_platform_device(pdev)) {
  663. pm_runtime_set_active(&pdev->dev);
  664. pm_runtime_enable(&pdev->dev);
  665. }
  666. if (p) {
  667. dev_info(&pdev->dev, "kept as earlytimer\n");
  668. goto out;
  669. }
  670. p = kmalloc(sizeof(*p), GFP_KERNEL);
  671. if (p == NULL) {
  672. dev_err(&pdev->dev, "failed to allocate driver data\n");
  673. return -ENOMEM;
  674. }
  675. ret = sh_cmt_setup(p, pdev);
  676. if (ret) {
  677. kfree(p);
  678. pm_runtime_idle(&pdev->dev);
  679. return ret;
  680. }
  681. if (is_early_platform_device(pdev))
  682. return 0;
  683. out:
  684. if (cfg->clockevent_rating || cfg->clocksource_rating)
  685. pm_runtime_irq_safe(&pdev->dev);
  686. else
  687. pm_runtime_idle(&pdev->dev);
  688. return 0;
  689. }
  690. static int sh_cmt_remove(struct platform_device *pdev)
  691. {
  692. return -EBUSY; /* cannot unregister clockevent and clocksource */
  693. }
  694. static struct platform_driver sh_cmt_device_driver = {
  695. .probe = sh_cmt_probe,
  696. .remove = sh_cmt_remove,
  697. .driver = {
  698. .name = "sh_cmt",
  699. }
  700. };
  701. static int __init sh_cmt_init(void)
  702. {
  703. return platform_driver_register(&sh_cmt_device_driver);
  704. }
  705. static void __exit sh_cmt_exit(void)
  706. {
  707. platform_driver_unregister(&sh_cmt_device_driver);
  708. }
  709. early_platform_init("earlytimer", &sh_cmt_device_driver);
  710. subsys_initcall(sh_cmt_init);
  711. module_exit(sh_cmt_exit);
  712. MODULE_AUTHOR("Magnus Damm");
  713. MODULE_DESCRIPTION("SuperH CMT Timer Driver");
  714. MODULE_LICENSE("GPL v2");