sh_mtu2.c 9.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394
  1. /*
  2. * SuperH Timer Support - MTU2
  3. *
  4. * Copyright (C) 2009 Magnus Damm
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation; either version 2 of the License
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program; if not, write to the Free Software
  17. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  18. */
  19. #include <linux/init.h>
  20. #include <linux/platform_device.h>
  21. #include <linux/spinlock.h>
  22. #include <linux/interrupt.h>
  23. #include <linux/ioport.h>
  24. #include <linux/delay.h>
  25. #include <linux/io.h>
  26. #include <linux/clk.h>
  27. #include <linux/irq.h>
  28. #include <linux/err.h>
  29. #include <linux/clockchips.h>
  30. #include <linux/sh_timer.h>
  31. #include <linux/slab.h>
  32. #include <linux/module.h>
  33. #include <linux/pm_domain.h>
  34. #include <linux/pm_runtime.h>
  35. struct sh_mtu2_priv {
  36. void __iomem *mapbase;
  37. struct clk *clk;
  38. struct irqaction irqaction;
  39. struct platform_device *pdev;
  40. unsigned long rate;
  41. unsigned long periodic;
  42. struct clock_event_device ced;
  43. };
  44. static DEFINE_RAW_SPINLOCK(sh_mtu2_lock);
  45. #define TSTR -1 /* shared register */
  46. #define TCR 0 /* channel register */
  47. #define TMDR 1 /* channel register */
  48. #define TIOR 2 /* channel register */
  49. #define TIER 3 /* channel register */
  50. #define TSR 4 /* channel register */
  51. #define TCNT 5 /* channel register */
  52. #define TGR 6 /* channel register */
  53. static unsigned long mtu2_reg_offs[] = {
  54. [TCR] = 0,
  55. [TMDR] = 1,
  56. [TIOR] = 2,
  57. [TIER] = 4,
  58. [TSR] = 5,
  59. [TCNT] = 6,
  60. [TGR] = 8,
  61. };
  62. static inline unsigned long sh_mtu2_read(struct sh_mtu2_priv *p, int reg_nr)
  63. {
  64. struct sh_timer_config *cfg = p->pdev->dev.platform_data;
  65. void __iomem *base = p->mapbase;
  66. unsigned long offs;
  67. if (reg_nr == TSTR)
  68. return ioread8(base + cfg->channel_offset);
  69. offs = mtu2_reg_offs[reg_nr];
  70. if ((reg_nr == TCNT) || (reg_nr == TGR))
  71. return ioread16(base + offs);
  72. else
  73. return ioread8(base + offs);
  74. }
  75. static inline void sh_mtu2_write(struct sh_mtu2_priv *p, int reg_nr,
  76. unsigned long value)
  77. {
  78. struct sh_timer_config *cfg = p->pdev->dev.platform_data;
  79. void __iomem *base = p->mapbase;
  80. unsigned long offs;
  81. if (reg_nr == TSTR) {
  82. iowrite8(value, base + cfg->channel_offset);
  83. return;
  84. }
  85. offs = mtu2_reg_offs[reg_nr];
  86. if ((reg_nr == TCNT) || (reg_nr == TGR))
  87. iowrite16(value, base + offs);
  88. else
  89. iowrite8(value, base + offs);
  90. }
  91. static void sh_mtu2_start_stop_ch(struct sh_mtu2_priv *p, int start)
  92. {
  93. struct sh_timer_config *cfg = p->pdev->dev.platform_data;
  94. unsigned long flags, value;
  95. /* start stop register shared by multiple timer channels */
  96. raw_spin_lock_irqsave(&sh_mtu2_lock, flags);
  97. value = sh_mtu2_read(p, TSTR);
  98. if (start)
  99. value |= 1 << cfg->timer_bit;
  100. else
  101. value &= ~(1 << cfg->timer_bit);
  102. sh_mtu2_write(p, TSTR, value);
  103. raw_spin_unlock_irqrestore(&sh_mtu2_lock, flags);
  104. }
  105. static int sh_mtu2_enable(struct sh_mtu2_priv *p)
  106. {
  107. int ret;
  108. pm_runtime_get_sync(&p->pdev->dev);
  109. dev_pm_syscore_device(&p->pdev->dev, true);
  110. /* enable clock */
  111. ret = clk_enable(p->clk);
  112. if (ret) {
  113. dev_err(&p->pdev->dev, "cannot enable clock\n");
  114. return ret;
  115. }
  116. /* make sure channel is disabled */
  117. sh_mtu2_start_stop_ch(p, 0);
  118. p->rate = clk_get_rate(p->clk) / 64;
  119. p->periodic = (p->rate + HZ/2) / HZ;
  120. /* "Periodic Counter Operation" */
  121. sh_mtu2_write(p, TCR, 0x23); /* TGRA clear, divide clock by 64 */
  122. sh_mtu2_write(p, TIOR, 0);
  123. sh_mtu2_write(p, TGR, p->periodic);
  124. sh_mtu2_write(p, TCNT, 0);
  125. sh_mtu2_write(p, TMDR, 0);
  126. sh_mtu2_write(p, TIER, 0x01);
  127. /* enable channel */
  128. sh_mtu2_start_stop_ch(p, 1);
  129. return 0;
  130. }
  131. static void sh_mtu2_disable(struct sh_mtu2_priv *p)
  132. {
  133. /* disable channel */
  134. sh_mtu2_start_stop_ch(p, 0);
  135. /* stop clock */
  136. clk_disable(p->clk);
  137. dev_pm_syscore_device(&p->pdev->dev, false);
  138. pm_runtime_put(&p->pdev->dev);
  139. }
  140. static irqreturn_t sh_mtu2_interrupt(int irq, void *dev_id)
  141. {
  142. struct sh_mtu2_priv *p = dev_id;
  143. /* acknowledge interrupt */
  144. sh_mtu2_read(p, TSR);
  145. sh_mtu2_write(p, TSR, 0xfe);
  146. /* notify clockevent layer */
  147. p->ced.event_handler(&p->ced);
  148. return IRQ_HANDLED;
  149. }
  150. static struct sh_mtu2_priv *ced_to_sh_mtu2(struct clock_event_device *ced)
  151. {
  152. return container_of(ced, struct sh_mtu2_priv, ced);
  153. }
  154. static void sh_mtu2_clock_event_mode(enum clock_event_mode mode,
  155. struct clock_event_device *ced)
  156. {
  157. struct sh_mtu2_priv *p = ced_to_sh_mtu2(ced);
  158. int disabled = 0;
  159. /* deal with old setting first */
  160. switch (ced->mode) {
  161. case CLOCK_EVT_MODE_PERIODIC:
  162. sh_mtu2_disable(p);
  163. disabled = 1;
  164. break;
  165. default:
  166. break;
  167. }
  168. switch (mode) {
  169. case CLOCK_EVT_MODE_PERIODIC:
  170. dev_info(&p->pdev->dev, "used for periodic clock events\n");
  171. sh_mtu2_enable(p);
  172. break;
  173. case CLOCK_EVT_MODE_UNUSED:
  174. if (!disabled)
  175. sh_mtu2_disable(p);
  176. break;
  177. case CLOCK_EVT_MODE_SHUTDOWN:
  178. default:
  179. break;
  180. }
  181. }
  182. static void sh_mtu2_clock_event_suspend(struct clock_event_device *ced)
  183. {
  184. pm_genpd_syscore_poweroff(&ced_to_sh_mtu2(ced)->pdev->dev);
  185. }
  186. static void sh_mtu2_clock_event_resume(struct clock_event_device *ced)
  187. {
  188. pm_genpd_syscore_poweron(&ced_to_sh_mtu2(ced)->pdev->dev);
  189. }
  190. static void sh_mtu2_register_clockevent(struct sh_mtu2_priv *p,
  191. char *name, unsigned long rating)
  192. {
  193. struct clock_event_device *ced = &p->ced;
  194. int ret;
  195. memset(ced, 0, sizeof(*ced));
  196. ced->name = name;
  197. ced->features = CLOCK_EVT_FEAT_PERIODIC;
  198. ced->rating = rating;
  199. ced->cpumask = cpumask_of(0);
  200. ced->set_mode = sh_mtu2_clock_event_mode;
  201. ced->suspend = sh_mtu2_clock_event_suspend;
  202. ced->resume = sh_mtu2_clock_event_resume;
  203. dev_info(&p->pdev->dev, "used for clock events\n");
  204. clockevents_register_device(ced);
  205. ret = setup_irq(p->irqaction.irq, &p->irqaction);
  206. if (ret) {
  207. dev_err(&p->pdev->dev, "failed to request irq %d\n",
  208. p->irqaction.irq);
  209. return;
  210. }
  211. }
  212. static int sh_mtu2_register(struct sh_mtu2_priv *p, char *name,
  213. unsigned long clockevent_rating)
  214. {
  215. if (clockevent_rating)
  216. sh_mtu2_register_clockevent(p, name, clockevent_rating);
  217. return 0;
  218. }
  219. static int sh_mtu2_setup(struct sh_mtu2_priv *p, struct platform_device *pdev)
  220. {
  221. struct sh_timer_config *cfg = pdev->dev.platform_data;
  222. struct resource *res;
  223. int irq, ret;
  224. ret = -ENXIO;
  225. memset(p, 0, sizeof(*p));
  226. p->pdev = pdev;
  227. if (!cfg) {
  228. dev_err(&p->pdev->dev, "missing platform data\n");
  229. goto err0;
  230. }
  231. platform_set_drvdata(pdev, p);
  232. res = platform_get_resource(p->pdev, IORESOURCE_MEM, 0);
  233. if (!res) {
  234. dev_err(&p->pdev->dev, "failed to get I/O memory\n");
  235. goto err0;
  236. }
  237. irq = platform_get_irq(p->pdev, 0);
  238. if (irq < 0) {
  239. dev_err(&p->pdev->dev, "failed to get irq\n");
  240. goto err0;
  241. }
  242. /* map memory, let mapbase point to our channel */
  243. p->mapbase = ioremap_nocache(res->start, resource_size(res));
  244. if (p->mapbase == NULL) {
  245. dev_err(&p->pdev->dev, "failed to remap I/O memory\n");
  246. goto err0;
  247. }
  248. /* setup data for setup_irq() (too early for request_irq()) */
  249. p->irqaction.name = dev_name(&p->pdev->dev);
  250. p->irqaction.handler = sh_mtu2_interrupt;
  251. p->irqaction.dev_id = p;
  252. p->irqaction.irq = irq;
  253. p->irqaction.flags = IRQF_DISABLED | IRQF_TIMER | \
  254. IRQF_IRQPOLL | IRQF_NOBALANCING;
  255. /* get hold of clock */
  256. p->clk = clk_get(&p->pdev->dev, "mtu2_fck");
  257. if (IS_ERR(p->clk)) {
  258. dev_err(&p->pdev->dev, "cannot get clock\n");
  259. ret = PTR_ERR(p->clk);
  260. goto err1;
  261. }
  262. return sh_mtu2_register(p, (char *)dev_name(&p->pdev->dev),
  263. cfg->clockevent_rating);
  264. err1:
  265. iounmap(p->mapbase);
  266. err0:
  267. return ret;
  268. }
  269. static int sh_mtu2_probe(struct platform_device *pdev)
  270. {
  271. struct sh_mtu2_priv *p = platform_get_drvdata(pdev);
  272. struct sh_timer_config *cfg = pdev->dev.platform_data;
  273. int ret;
  274. if (!is_early_platform_device(pdev)) {
  275. pm_runtime_set_active(&pdev->dev);
  276. pm_runtime_enable(&pdev->dev);
  277. }
  278. if (p) {
  279. dev_info(&pdev->dev, "kept as earlytimer\n");
  280. goto out;
  281. }
  282. p = kmalloc(sizeof(*p), GFP_KERNEL);
  283. if (p == NULL) {
  284. dev_err(&pdev->dev, "failed to allocate driver data\n");
  285. return -ENOMEM;
  286. }
  287. ret = sh_mtu2_setup(p, pdev);
  288. if (ret) {
  289. kfree(p);
  290. platform_set_drvdata(pdev, NULL);
  291. pm_runtime_idle(&pdev->dev);
  292. return ret;
  293. }
  294. if (is_early_platform_device(pdev))
  295. return 0;
  296. out:
  297. if (cfg->clockevent_rating)
  298. pm_runtime_irq_safe(&pdev->dev);
  299. else
  300. pm_runtime_idle(&pdev->dev);
  301. return 0;
  302. }
  303. static int sh_mtu2_remove(struct platform_device *pdev)
  304. {
  305. return -EBUSY; /* cannot unregister clockevent */
  306. }
  307. static struct platform_driver sh_mtu2_device_driver = {
  308. .probe = sh_mtu2_probe,
  309. .remove = sh_mtu2_remove,
  310. .driver = {
  311. .name = "sh_mtu2",
  312. }
  313. };
  314. static int __init sh_mtu2_init(void)
  315. {
  316. return platform_driver_register(&sh_mtu2_device_driver);
  317. }
  318. static void __exit sh_mtu2_exit(void)
  319. {
  320. platform_driver_unregister(&sh_mtu2_device_driver);
  321. }
  322. early_platform_init("earlytimer", &sh_mtu2_device_driver);
  323. module_init(sh_mtu2_init);
  324. module_exit(sh_mtu2_exit);
  325. MODULE_AUTHOR("Magnus Damm");
  326. MODULE_DESCRIPTION("SuperH MTU2 Timer Driver");
  327. MODULE_LICENSE("GPL v2");