sh_cmt.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833
  1. /*
  2. * SuperH Timer Support - CMT
  3. *
  4. * Copyright (C) 2008 Magnus Damm
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation; either version 2 of the License
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program; if not, write to the Free Software
  17. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  18. */
  19. #include <linux/init.h>
  20. #include <linux/platform_device.h>
  21. #include <linux/spinlock.h>
  22. #include <linux/interrupt.h>
  23. #include <linux/ioport.h>
  24. #include <linux/io.h>
  25. #include <linux/clk.h>
  26. #include <linux/irq.h>
  27. #include <linux/err.h>
  28. #include <linux/delay.h>
  29. #include <linux/clocksource.h>
  30. #include <linux/clockchips.h>
  31. #include <linux/sh_timer.h>
  32. #include <linux/slab.h>
  33. #include <linux/module.h>
  34. #include <linux/pm_domain.h>
  35. #include <linux/pm_runtime.h>
  36. struct sh_cmt_priv {
  37. void __iomem *mapbase;
  38. struct clk *clk;
  39. unsigned long width; /* 16 or 32 bit version of hardware block */
  40. unsigned long overflow_bit;
  41. unsigned long clear_bits;
  42. struct irqaction irqaction;
  43. struct platform_device *pdev;
  44. unsigned long flags;
  45. unsigned long match_value;
  46. unsigned long next_match_value;
  47. unsigned long max_match_value;
  48. unsigned long rate;
  49. raw_spinlock_t lock;
  50. struct clock_event_device ced;
  51. struct clocksource cs;
  52. unsigned long total_cycles;
  53. bool cs_enabled;
  54. };
  55. static inline unsigned long sh_cmt_read16(void __iomem *base,
  56. unsigned long offs)
  57. {
  58. return ioread16(base + (offs << 1));
  59. }
  60. static inline void sh_cmt_write16(void __iomem *base, unsigned long offs,
  61. unsigned long value)
  62. {
  63. iowrite16(value, base + (offs << 1));
  64. }
  65. #define CMCSR 0 /* channel register */
  66. #define CMCNT 1 /* channel register */
  67. #define CMCOR 2 /* channel register */
  68. static inline unsigned long sh_cmt_read(struct sh_cmt_priv *p, int reg_nr)
  69. {
  70. void __iomem *base = p->mapbase;
  71. unsigned long offs = reg_nr;
  72. if (p->width == 16) {
  73. offs <<= 1;
  74. return ioread16(base + offs);
  75. } else {
  76. offs <<= 2;
  77. return ioread32(base + offs);
  78. }
  79. }
  80. static inline unsigned long sh_cmt_read_cmstr(struct sh_cmt_priv *p)
  81. {
  82. struct sh_timer_config *cfg = p->pdev->dev.platform_data;
  83. return sh_cmt_read16(p->mapbase - cfg->channel_offset, 0);
  84. }
  85. static inline unsigned long sh_cmt_read_cmcsr(struct sh_cmt_priv *p)
  86. {
  87. return sh_cmt_read16(p->mapbase, CMCSR);
  88. }
  89. static inline unsigned long sh_cmt_read_cmcnt(struct sh_cmt_priv *p)
  90. {
  91. return sh_cmt_read(p, CMCNT);
  92. }
  93. static inline void sh_cmt_write(struct sh_cmt_priv *p, int reg_nr,
  94. unsigned long value)
  95. {
  96. void __iomem *base = p->mapbase;
  97. unsigned long offs = reg_nr;
  98. if (p->width == 16) {
  99. offs <<= 1;
  100. iowrite16(value, base + offs);
  101. } else {
  102. offs <<= 2;
  103. iowrite32(value, base + offs);
  104. }
  105. }
  106. static inline void sh_cmt_write_cmstr(struct sh_cmt_priv *p,
  107. unsigned long value)
  108. {
  109. struct sh_timer_config *cfg = p->pdev->dev.platform_data;
  110. sh_cmt_write16(p->mapbase - cfg->channel_offset, 0, value);
  111. }
  112. static inline void sh_cmt_write_cmcsr(struct sh_cmt_priv *p,
  113. unsigned long value)
  114. {
  115. sh_cmt_write16(p->mapbase, CMCSR, value);
  116. }
  117. static inline void sh_cmt_write_cmcnt(struct sh_cmt_priv *p,
  118. unsigned long value)
  119. {
  120. sh_cmt_write(p, CMCNT, value);
  121. }
  122. static inline void sh_cmt_write_cmcor(struct sh_cmt_priv *p,
  123. unsigned long value)
  124. {
  125. sh_cmt_write(p, CMCOR, value);
  126. }
  127. static unsigned long sh_cmt_get_counter(struct sh_cmt_priv *p,
  128. int *has_wrapped)
  129. {
  130. unsigned long v1, v2, v3;
  131. int o1, o2;
  132. o1 = sh_cmt_read_cmcsr(p) & p->overflow_bit;
  133. /* Make sure the timer value is stable. Stolen from acpi_pm.c */
  134. do {
  135. o2 = o1;
  136. v1 = sh_cmt_read_cmcnt(p);
  137. v2 = sh_cmt_read_cmcnt(p);
  138. v3 = sh_cmt_read_cmcnt(p);
  139. o1 = sh_cmt_read_cmcsr(p) & p->overflow_bit;
  140. } while (unlikely((o1 != o2) || (v1 > v2 && v1 < v3)
  141. || (v2 > v3 && v2 < v1) || (v3 > v1 && v3 < v2)));
  142. *has_wrapped = o1;
  143. return v2;
  144. }
  145. static DEFINE_RAW_SPINLOCK(sh_cmt_lock);
  146. static void sh_cmt_start_stop_ch(struct sh_cmt_priv *p, int start)
  147. {
  148. struct sh_timer_config *cfg = p->pdev->dev.platform_data;
  149. unsigned long flags, value;
  150. /* start stop register shared by multiple timer channels */
  151. raw_spin_lock_irqsave(&sh_cmt_lock, flags);
  152. value = sh_cmt_read_cmstr(p);
  153. if (start)
  154. value |= 1 << cfg->timer_bit;
  155. else
  156. value &= ~(1 << cfg->timer_bit);
  157. sh_cmt_write_cmstr(p, value);
  158. raw_spin_unlock_irqrestore(&sh_cmt_lock, flags);
  159. }
  160. static int sh_cmt_enable(struct sh_cmt_priv *p, unsigned long *rate)
  161. {
  162. int k, ret;
  163. pm_runtime_get_sync(&p->pdev->dev);
  164. dev_pm_syscore_device(&p->pdev->dev, true);
  165. /* enable clock */
  166. ret = clk_enable(p->clk);
  167. if (ret) {
  168. dev_err(&p->pdev->dev, "cannot enable clock\n");
  169. goto err0;
  170. }
  171. /* make sure channel is disabled */
  172. sh_cmt_start_stop_ch(p, 0);
  173. /* configure channel, periodic mode and maximum timeout */
  174. if (p->width == 16) {
  175. *rate = clk_get_rate(p->clk) / 512;
  176. sh_cmt_write_cmcsr(p, 0x43);
  177. } else {
  178. *rate = clk_get_rate(p->clk) / 8;
  179. sh_cmt_write_cmcsr(p, 0x01a4);
  180. }
  181. sh_cmt_write_cmcor(p, 0xffffffff);
  182. sh_cmt_write_cmcnt(p, 0);
  183. /*
  184. * According to the sh73a0 user's manual, as CMCNT can be operated
  185. * only by the RCLK (Pseudo 32 KHz), there's one restriction on
  186. * modifying CMCNT register; two RCLK cycles are necessary before
  187. * this register is either read or any modification of the value
  188. * it holds is reflected in the LSI's actual operation.
  189. *
  190. * While at it, we're supposed to clear out the CMCNT as of this
  191. * moment, so make sure it's processed properly here. This will
  192. * take RCLKx2 at maximum.
  193. */
  194. for (k = 0; k < 100; k++) {
  195. if (!sh_cmt_read_cmcnt(p))
  196. break;
  197. udelay(1);
  198. }
  199. if (sh_cmt_read_cmcnt(p)) {
  200. dev_err(&p->pdev->dev, "cannot clear CMCNT\n");
  201. ret = -ETIMEDOUT;
  202. goto err1;
  203. }
  204. /* enable channel */
  205. sh_cmt_start_stop_ch(p, 1);
  206. return 0;
  207. err1:
  208. /* stop clock */
  209. clk_disable(p->clk);
  210. err0:
  211. return ret;
  212. }
  213. static void sh_cmt_disable(struct sh_cmt_priv *p)
  214. {
  215. /* disable channel */
  216. sh_cmt_start_stop_ch(p, 0);
  217. /* disable interrupts in CMT block */
  218. sh_cmt_write_cmcsr(p, 0);
  219. /* stop clock */
  220. clk_disable(p->clk);
  221. dev_pm_syscore_device(&p->pdev->dev, false);
  222. pm_runtime_put(&p->pdev->dev);
  223. }
  224. /* private flags */
  225. #define FLAG_CLOCKEVENT (1 << 0)
  226. #define FLAG_CLOCKSOURCE (1 << 1)
  227. #define FLAG_REPROGRAM (1 << 2)
  228. #define FLAG_SKIPEVENT (1 << 3)
  229. #define FLAG_IRQCONTEXT (1 << 4)
  230. static void sh_cmt_clock_event_program_verify(struct sh_cmt_priv *p,
  231. int absolute)
  232. {
  233. unsigned long new_match;
  234. unsigned long value = p->next_match_value;
  235. unsigned long delay = 0;
  236. unsigned long now = 0;
  237. int has_wrapped;
  238. now = sh_cmt_get_counter(p, &has_wrapped);
  239. p->flags |= FLAG_REPROGRAM; /* force reprogram */
  240. if (has_wrapped) {
  241. /* we're competing with the interrupt handler.
  242. * -> let the interrupt handler reprogram the timer.
  243. * -> interrupt number two handles the event.
  244. */
  245. p->flags |= FLAG_SKIPEVENT;
  246. return;
  247. }
  248. if (absolute)
  249. now = 0;
  250. do {
  251. /* reprogram the timer hardware,
  252. * but don't save the new match value yet.
  253. */
  254. new_match = now + value + delay;
  255. if (new_match > p->max_match_value)
  256. new_match = p->max_match_value;
  257. sh_cmt_write_cmcor(p, new_match);
  258. now = sh_cmt_get_counter(p, &has_wrapped);
  259. if (has_wrapped && (new_match > p->match_value)) {
  260. /* we are changing to a greater match value,
  261. * so this wrap must be caused by the counter
  262. * matching the old value.
  263. * -> first interrupt reprograms the timer.
  264. * -> interrupt number two handles the event.
  265. */
  266. p->flags |= FLAG_SKIPEVENT;
  267. break;
  268. }
  269. if (has_wrapped) {
  270. /* we are changing to a smaller match value,
  271. * so the wrap must be caused by the counter
  272. * matching the new value.
  273. * -> save programmed match value.
  274. * -> let isr handle the event.
  275. */
  276. p->match_value = new_match;
  277. break;
  278. }
  279. /* be safe: verify hardware settings */
  280. if (now < new_match) {
  281. /* timer value is below match value, all good.
  282. * this makes sure we won't miss any match events.
  283. * -> save programmed match value.
  284. * -> let isr handle the event.
  285. */
  286. p->match_value = new_match;
  287. break;
  288. }
  289. /* the counter has reached a value greater
  290. * than our new match value. and since the
  291. * has_wrapped flag isn't set we must have
  292. * programmed a too close event.
  293. * -> increase delay and retry.
  294. */
  295. if (delay)
  296. delay <<= 1;
  297. else
  298. delay = 1;
  299. if (!delay)
  300. dev_warn(&p->pdev->dev, "too long delay\n");
  301. } while (delay);
  302. }
  303. static void __sh_cmt_set_next(struct sh_cmt_priv *p, unsigned long delta)
  304. {
  305. if (delta > p->max_match_value)
  306. dev_warn(&p->pdev->dev, "delta out of range\n");
  307. p->next_match_value = delta;
  308. sh_cmt_clock_event_program_verify(p, 0);
  309. }
  310. static void sh_cmt_set_next(struct sh_cmt_priv *p, unsigned long delta)
  311. {
  312. unsigned long flags;
  313. raw_spin_lock_irqsave(&p->lock, flags);
  314. __sh_cmt_set_next(p, delta);
  315. raw_spin_unlock_irqrestore(&p->lock, flags);
  316. }
  317. static irqreturn_t sh_cmt_interrupt(int irq, void *dev_id)
  318. {
  319. struct sh_cmt_priv *p = dev_id;
  320. /* clear flags */
  321. sh_cmt_write_cmcsr(p, sh_cmt_read_cmcsr(p) & p->clear_bits);
  322. /* update clock source counter to begin with if enabled
  323. * the wrap flag should be cleared by the timer specific
  324. * isr before we end up here.
  325. */
  326. if (p->flags & FLAG_CLOCKSOURCE)
  327. p->total_cycles += p->match_value + 1;
  328. if (!(p->flags & FLAG_REPROGRAM))
  329. p->next_match_value = p->max_match_value;
  330. p->flags |= FLAG_IRQCONTEXT;
  331. if (p->flags & FLAG_CLOCKEVENT) {
  332. if (!(p->flags & FLAG_SKIPEVENT)) {
  333. if (p->ced.mode == CLOCK_EVT_MODE_ONESHOT) {
  334. p->next_match_value = p->max_match_value;
  335. p->flags |= FLAG_REPROGRAM;
  336. }
  337. p->ced.event_handler(&p->ced);
  338. }
  339. }
  340. p->flags &= ~FLAG_SKIPEVENT;
  341. if (p->flags & FLAG_REPROGRAM) {
  342. p->flags &= ~FLAG_REPROGRAM;
  343. sh_cmt_clock_event_program_verify(p, 1);
  344. if (p->flags & FLAG_CLOCKEVENT)
  345. if ((p->ced.mode == CLOCK_EVT_MODE_SHUTDOWN)
  346. || (p->match_value == p->next_match_value))
  347. p->flags &= ~FLAG_REPROGRAM;
  348. }
  349. p->flags &= ~FLAG_IRQCONTEXT;
  350. return IRQ_HANDLED;
  351. }
  352. static int sh_cmt_start(struct sh_cmt_priv *p, unsigned long flag)
  353. {
  354. int ret = 0;
  355. unsigned long flags;
  356. raw_spin_lock_irqsave(&p->lock, flags);
  357. if (!(p->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE)))
  358. ret = sh_cmt_enable(p, &p->rate);
  359. if (ret)
  360. goto out;
  361. p->flags |= flag;
  362. /* setup timeout if no clockevent */
  363. if ((flag == FLAG_CLOCKSOURCE) && (!(p->flags & FLAG_CLOCKEVENT)))
  364. __sh_cmt_set_next(p, p->max_match_value);
  365. out:
  366. raw_spin_unlock_irqrestore(&p->lock, flags);
  367. return ret;
  368. }
  369. static void sh_cmt_stop(struct sh_cmt_priv *p, unsigned long flag)
  370. {
  371. unsigned long flags;
  372. unsigned long f;
  373. raw_spin_lock_irqsave(&p->lock, flags);
  374. f = p->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE);
  375. p->flags &= ~flag;
  376. if (f && !(p->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE)))
  377. sh_cmt_disable(p);
  378. /* adjust the timeout to maximum if only clocksource left */
  379. if ((flag == FLAG_CLOCKEVENT) && (p->flags & FLAG_CLOCKSOURCE))
  380. __sh_cmt_set_next(p, p->max_match_value);
  381. raw_spin_unlock_irqrestore(&p->lock, flags);
  382. }
  383. static struct sh_cmt_priv *cs_to_sh_cmt(struct clocksource *cs)
  384. {
  385. return container_of(cs, struct sh_cmt_priv, cs);
  386. }
  387. static cycle_t sh_cmt_clocksource_read(struct clocksource *cs)
  388. {
  389. struct sh_cmt_priv *p = cs_to_sh_cmt(cs);
  390. unsigned long flags, raw;
  391. unsigned long value;
  392. int has_wrapped;
  393. raw_spin_lock_irqsave(&p->lock, flags);
  394. value = p->total_cycles;
  395. raw = sh_cmt_get_counter(p, &has_wrapped);
  396. if (unlikely(has_wrapped))
  397. raw += p->match_value + 1;
  398. raw_spin_unlock_irqrestore(&p->lock, flags);
  399. return value + raw;
  400. }
  401. static int sh_cmt_clocksource_enable(struct clocksource *cs)
  402. {
  403. int ret;
  404. struct sh_cmt_priv *p = cs_to_sh_cmt(cs);
  405. WARN_ON(p->cs_enabled);
  406. p->total_cycles = 0;
  407. ret = sh_cmt_start(p, FLAG_CLOCKSOURCE);
  408. if (!ret) {
  409. __clocksource_updatefreq_hz(cs, p->rate);
  410. p->cs_enabled = true;
  411. }
  412. return ret;
  413. }
  414. static void sh_cmt_clocksource_disable(struct clocksource *cs)
  415. {
  416. struct sh_cmt_priv *p = cs_to_sh_cmt(cs);
  417. WARN_ON(!p->cs_enabled);
  418. sh_cmt_stop(p, FLAG_CLOCKSOURCE);
  419. p->cs_enabled = false;
  420. }
  421. static void sh_cmt_clocksource_suspend(struct clocksource *cs)
  422. {
  423. struct sh_cmt_priv *p = cs_to_sh_cmt(cs);
  424. sh_cmt_stop(p, FLAG_CLOCKSOURCE);
  425. pm_genpd_syscore_poweroff(&p->pdev->dev);
  426. }
  427. static void sh_cmt_clocksource_resume(struct clocksource *cs)
  428. {
  429. struct sh_cmt_priv *p = cs_to_sh_cmt(cs);
  430. pm_genpd_syscore_poweron(&p->pdev->dev);
  431. sh_cmt_start(p, FLAG_CLOCKSOURCE);
  432. }
  433. static int sh_cmt_register_clocksource(struct sh_cmt_priv *p,
  434. char *name, unsigned long rating)
  435. {
  436. struct clocksource *cs = &p->cs;
  437. cs->name = name;
  438. cs->rating = rating;
  439. cs->read = sh_cmt_clocksource_read;
  440. cs->enable = sh_cmt_clocksource_enable;
  441. cs->disable = sh_cmt_clocksource_disable;
  442. cs->suspend = sh_cmt_clocksource_suspend;
  443. cs->resume = sh_cmt_clocksource_resume;
  444. cs->mask = CLOCKSOURCE_MASK(sizeof(unsigned long) * 8);
  445. cs->flags = CLOCK_SOURCE_IS_CONTINUOUS;
  446. dev_info(&p->pdev->dev, "used as clock source\n");
  447. /* Register with dummy 1 Hz value, gets updated in ->enable() */
  448. clocksource_register_hz(cs, 1);
  449. return 0;
  450. }
  451. static struct sh_cmt_priv *ced_to_sh_cmt(struct clock_event_device *ced)
  452. {
  453. return container_of(ced, struct sh_cmt_priv, ced);
  454. }
  455. static void sh_cmt_clock_event_start(struct sh_cmt_priv *p, int periodic)
  456. {
  457. struct clock_event_device *ced = &p->ced;
  458. sh_cmt_start(p, FLAG_CLOCKEVENT);
  459. /* TODO: calculate good shift from rate and counter bit width */
  460. ced->shift = 32;
  461. ced->mult = div_sc(p->rate, NSEC_PER_SEC, ced->shift);
  462. ced->max_delta_ns = clockevent_delta2ns(p->max_match_value, ced);
  463. ced->min_delta_ns = clockevent_delta2ns(0x1f, ced);
  464. if (periodic)
  465. sh_cmt_set_next(p, ((p->rate + HZ/2) / HZ) - 1);
  466. else
  467. sh_cmt_set_next(p, p->max_match_value);
  468. }
  469. static void sh_cmt_clock_event_mode(enum clock_event_mode mode,
  470. struct clock_event_device *ced)
  471. {
  472. struct sh_cmt_priv *p = ced_to_sh_cmt(ced);
  473. /* deal with old setting first */
  474. switch (ced->mode) {
  475. case CLOCK_EVT_MODE_PERIODIC:
  476. case CLOCK_EVT_MODE_ONESHOT:
  477. sh_cmt_stop(p, FLAG_CLOCKEVENT);
  478. break;
  479. default:
  480. break;
  481. }
  482. switch (mode) {
  483. case CLOCK_EVT_MODE_PERIODIC:
  484. dev_info(&p->pdev->dev, "used for periodic clock events\n");
  485. sh_cmt_clock_event_start(p, 1);
  486. break;
  487. case CLOCK_EVT_MODE_ONESHOT:
  488. dev_info(&p->pdev->dev, "used for oneshot clock events\n");
  489. sh_cmt_clock_event_start(p, 0);
  490. break;
  491. case CLOCK_EVT_MODE_SHUTDOWN:
  492. case CLOCK_EVT_MODE_UNUSED:
  493. sh_cmt_stop(p, FLAG_CLOCKEVENT);
  494. break;
  495. default:
  496. break;
  497. }
  498. }
  499. static int sh_cmt_clock_event_next(unsigned long delta,
  500. struct clock_event_device *ced)
  501. {
  502. struct sh_cmt_priv *p = ced_to_sh_cmt(ced);
  503. BUG_ON(ced->mode != CLOCK_EVT_MODE_ONESHOT);
  504. if (likely(p->flags & FLAG_IRQCONTEXT))
  505. p->next_match_value = delta - 1;
  506. else
  507. sh_cmt_set_next(p, delta - 1);
  508. return 0;
  509. }
  510. static void sh_cmt_clock_event_suspend(struct clock_event_device *ced)
  511. {
  512. pm_genpd_syscore_poweroff(&ced_to_sh_cmt(ced)->pdev->dev);
  513. }
  514. static void sh_cmt_clock_event_resume(struct clock_event_device *ced)
  515. {
  516. pm_genpd_syscore_poweron(&ced_to_sh_cmt(ced)->pdev->dev);
  517. }
  518. static void sh_cmt_register_clockevent(struct sh_cmt_priv *p,
  519. char *name, unsigned long rating)
  520. {
  521. struct clock_event_device *ced = &p->ced;
  522. memset(ced, 0, sizeof(*ced));
  523. ced->name = name;
  524. ced->features = CLOCK_EVT_FEAT_PERIODIC;
  525. ced->features |= CLOCK_EVT_FEAT_ONESHOT;
  526. ced->rating = rating;
  527. ced->cpumask = cpumask_of(0);
  528. ced->set_next_event = sh_cmt_clock_event_next;
  529. ced->set_mode = sh_cmt_clock_event_mode;
  530. ced->suspend = sh_cmt_clock_event_suspend;
  531. ced->resume = sh_cmt_clock_event_resume;
  532. dev_info(&p->pdev->dev, "used for clock events\n");
  533. clockevents_register_device(ced);
  534. }
  535. static int sh_cmt_register(struct sh_cmt_priv *p, char *name,
  536. unsigned long clockevent_rating,
  537. unsigned long clocksource_rating)
  538. {
  539. if (clockevent_rating)
  540. sh_cmt_register_clockevent(p, name, clockevent_rating);
  541. if (clocksource_rating)
  542. sh_cmt_register_clocksource(p, name, clocksource_rating);
  543. return 0;
  544. }
  545. static int sh_cmt_setup(struct sh_cmt_priv *p, struct platform_device *pdev)
  546. {
  547. struct sh_timer_config *cfg = pdev->dev.platform_data;
  548. struct resource *res;
  549. int irq, ret;
  550. ret = -ENXIO;
  551. memset(p, 0, sizeof(*p));
  552. p->pdev = pdev;
  553. if (!cfg) {
  554. dev_err(&p->pdev->dev, "missing platform data\n");
  555. goto err0;
  556. }
  557. res = platform_get_resource(p->pdev, IORESOURCE_MEM, 0);
  558. if (!res) {
  559. dev_err(&p->pdev->dev, "failed to get I/O memory\n");
  560. goto err0;
  561. }
  562. irq = platform_get_irq(p->pdev, 0);
  563. if (irq < 0) {
  564. dev_err(&p->pdev->dev, "failed to get irq\n");
  565. goto err0;
  566. }
  567. /* map memory, let mapbase point to our channel */
  568. p->mapbase = ioremap_nocache(res->start, resource_size(res));
  569. if (p->mapbase == NULL) {
  570. dev_err(&p->pdev->dev, "failed to remap I/O memory\n");
  571. goto err0;
  572. }
  573. /* request irq using setup_irq() (too early for request_irq()) */
  574. p->irqaction.name = dev_name(&p->pdev->dev);
  575. p->irqaction.handler = sh_cmt_interrupt;
  576. p->irqaction.dev_id = p;
  577. p->irqaction.flags = IRQF_DISABLED | IRQF_TIMER | \
  578. IRQF_IRQPOLL | IRQF_NOBALANCING;
  579. /* get hold of clock */
  580. p->clk = clk_get(&p->pdev->dev, "cmt_fck");
  581. if (IS_ERR(p->clk)) {
  582. dev_err(&p->pdev->dev, "cannot get clock\n");
  583. ret = PTR_ERR(p->clk);
  584. goto err1;
  585. }
  586. if (resource_size(res) == 6) {
  587. p->width = 16;
  588. p->overflow_bit = 0x80;
  589. p->clear_bits = ~0x80;
  590. } else {
  591. p->width = 32;
  592. p->overflow_bit = 0x8000;
  593. p->clear_bits = ~0xc000;
  594. }
  595. if (p->width == (sizeof(p->max_match_value) * 8))
  596. p->max_match_value = ~0;
  597. else
  598. p->max_match_value = (1 << p->width) - 1;
  599. p->match_value = p->max_match_value;
  600. raw_spin_lock_init(&p->lock);
  601. ret = sh_cmt_register(p, (char *)dev_name(&p->pdev->dev),
  602. cfg->clockevent_rating,
  603. cfg->clocksource_rating);
  604. if (ret) {
  605. dev_err(&p->pdev->dev, "registration failed\n");
  606. goto err2;
  607. }
  608. p->cs_enabled = false;
  609. ret = setup_irq(irq, &p->irqaction);
  610. if (ret) {
  611. dev_err(&p->pdev->dev, "failed to request irq %d\n", irq);
  612. goto err2;
  613. }
  614. platform_set_drvdata(pdev, p);
  615. return 0;
  616. err2:
  617. clk_put(p->clk);
  618. err1:
  619. iounmap(p->mapbase);
  620. err0:
  621. return ret;
  622. }
  623. static int sh_cmt_probe(struct platform_device *pdev)
  624. {
  625. struct sh_cmt_priv *p = platform_get_drvdata(pdev);
  626. struct sh_timer_config *cfg = pdev->dev.platform_data;
  627. int ret;
  628. if (!is_early_platform_device(pdev)) {
  629. pm_runtime_set_active(&pdev->dev);
  630. pm_runtime_enable(&pdev->dev);
  631. }
  632. if (p) {
  633. dev_info(&pdev->dev, "kept as earlytimer\n");
  634. goto out;
  635. }
  636. p = kmalloc(sizeof(*p), GFP_KERNEL);
  637. if (p == NULL) {
  638. dev_err(&pdev->dev, "failed to allocate driver data\n");
  639. return -ENOMEM;
  640. }
  641. ret = sh_cmt_setup(p, pdev);
  642. if (ret) {
  643. kfree(p);
  644. pm_runtime_idle(&pdev->dev);
  645. return ret;
  646. }
  647. if (is_early_platform_device(pdev))
  648. return 0;
  649. out:
  650. if (cfg->clockevent_rating || cfg->clocksource_rating)
  651. pm_runtime_irq_safe(&pdev->dev);
  652. else
  653. pm_runtime_idle(&pdev->dev);
  654. return 0;
  655. }
  656. static int sh_cmt_remove(struct platform_device *pdev)
  657. {
  658. return -EBUSY; /* cannot unregister clockevent and clocksource */
  659. }
  660. static struct platform_driver sh_cmt_device_driver = {
  661. .probe = sh_cmt_probe,
  662. .remove = sh_cmt_remove,
  663. .driver = {
  664. .name = "sh_cmt",
  665. }
  666. };
  667. static int __init sh_cmt_init(void)
  668. {
  669. return platform_driver_register(&sh_cmt_device_driver);
  670. }
  671. static void __exit sh_cmt_exit(void)
  672. {
  673. platform_driver_unregister(&sh_cmt_device_driver);
  674. }
  675. early_platform_init("earlytimer", &sh_cmt_device_driver);
  676. module_init(sh_cmt_init);
  677. module_exit(sh_cmt_exit);
  678. MODULE_AUTHOR("Magnus Damm");
  679. MODULE_DESCRIPTION("SuperH CMT Timer Driver");
  680. MODULE_LICENSE("GPL v2");