clock.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550
  1. /*
  2. * Clock and PLL control for DaVinci devices
  3. *
  4. * Copyright (C) 2006-2007 Texas Instruments.
  5. * Copyright (C) 2008-2009 Deep Root Systems, LLC
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License as published by
  9. * the Free Software Foundation; either version 2 of the License, or
  10. * (at your option) any later version.
  11. */
  12. #include <linux/module.h>
  13. #include <linux/kernel.h>
  14. #include <linux/init.h>
  15. #include <linux/errno.h>
  16. #include <linux/clk.h>
  17. #include <linux/err.h>
  18. #include <linux/mutex.h>
  19. #include <linux/io.h>
  20. #include <linux/delay.h>
  21. #include <mach/hardware.h>
  22. #include <mach/psc.h>
  23. #include <mach/cputype.h>
  24. #include "clock.h"
  25. static LIST_HEAD(clocks);
  26. static DEFINE_MUTEX(clocks_mutex);
  27. static DEFINE_SPINLOCK(clockfw_lock);
  28. static unsigned psc_domain(struct clk *clk)
  29. {
  30. return (clk->flags & PSC_DSP)
  31. ? DAVINCI_GPSC_DSPDOMAIN
  32. : DAVINCI_GPSC_ARMDOMAIN;
  33. }
  34. static void __clk_enable(struct clk *clk)
  35. {
  36. if (clk->parent)
  37. __clk_enable(clk->parent);
  38. if (clk->usecount++ == 0 && (clk->flags & CLK_PSC))
  39. davinci_psc_config(psc_domain(clk), clk->gpsc, clk->lpsc, 1);
  40. }
  41. static void __clk_disable(struct clk *clk)
  42. {
  43. if (WARN_ON(clk->usecount == 0))
  44. return;
  45. if (--clk->usecount == 0 && !(clk->flags & CLK_PLL) &&
  46. (clk->flags & CLK_PSC))
  47. davinci_psc_config(psc_domain(clk), clk->gpsc, clk->lpsc, 0);
  48. if (clk->parent)
  49. __clk_disable(clk->parent);
  50. }
  51. int clk_enable(struct clk *clk)
  52. {
  53. unsigned long flags;
  54. if (clk == NULL || IS_ERR(clk))
  55. return -EINVAL;
  56. spin_lock_irqsave(&clockfw_lock, flags);
  57. __clk_enable(clk);
  58. spin_unlock_irqrestore(&clockfw_lock, flags);
  59. return 0;
  60. }
  61. EXPORT_SYMBOL(clk_enable);
  62. void clk_disable(struct clk *clk)
  63. {
  64. unsigned long flags;
  65. if (clk == NULL || IS_ERR(clk))
  66. return;
  67. spin_lock_irqsave(&clockfw_lock, flags);
  68. __clk_disable(clk);
  69. spin_unlock_irqrestore(&clockfw_lock, flags);
  70. }
  71. EXPORT_SYMBOL(clk_disable);
  72. unsigned long clk_get_rate(struct clk *clk)
  73. {
  74. if (clk == NULL || IS_ERR(clk))
  75. return -EINVAL;
  76. return clk->rate;
  77. }
  78. EXPORT_SYMBOL(clk_get_rate);
  79. long clk_round_rate(struct clk *clk, unsigned long rate)
  80. {
  81. if (clk == NULL || IS_ERR(clk))
  82. return -EINVAL;
  83. if (clk->round_rate)
  84. return clk->round_rate(clk, rate);
  85. return clk->rate;
  86. }
  87. EXPORT_SYMBOL(clk_round_rate);
  88. /* Propagate rate to children */
  89. static void propagate_rate(struct clk *root)
  90. {
  91. struct clk *clk;
  92. list_for_each_entry(clk, &root->children, childnode) {
  93. if (clk->recalc)
  94. clk->rate = clk->recalc(clk);
  95. propagate_rate(clk);
  96. }
  97. }
  98. int clk_set_rate(struct clk *clk, unsigned long rate)
  99. {
  100. unsigned long flags;
  101. int ret = -EINVAL;
  102. if (clk == NULL || IS_ERR(clk))
  103. return ret;
  104. if (clk->set_rate)
  105. ret = clk->set_rate(clk, rate);
  106. spin_lock_irqsave(&clockfw_lock, flags);
  107. if (ret == 0) {
  108. if (clk->recalc)
  109. clk->rate = clk->recalc(clk);
  110. propagate_rate(clk);
  111. }
  112. spin_unlock_irqrestore(&clockfw_lock, flags);
  113. return ret;
  114. }
  115. EXPORT_SYMBOL(clk_set_rate);
  116. int clk_set_parent(struct clk *clk, struct clk *parent)
  117. {
  118. unsigned long flags;
  119. if (clk == NULL || IS_ERR(clk))
  120. return -EINVAL;
  121. /* Cannot change parent on enabled clock */
  122. if (WARN_ON(clk->usecount))
  123. return -EINVAL;
  124. mutex_lock(&clocks_mutex);
  125. clk->parent = parent;
  126. list_del_init(&clk->childnode);
  127. list_add(&clk->childnode, &clk->parent->children);
  128. mutex_unlock(&clocks_mutex);
  129. spin_lock_irqsave(&clockfw_lock, flags);
  130. if (clk->recalc)
  131. clk->rate = clk->recalc(clk);
  132. propagate_rate(clk);
  133. spin_unlock_irqrestore(&clockfw_lock, flags);
  134. return 0;
  135. }
  136. EXPORT_SYMBOL(clk_set_parent);
  137. int clk_register(struct clk *clk)
  138. {
  139. if (clk == NULL || IS_ERR(clk))
  140. return -EINVAL;
  141. if (WARN(clk->parent && !clk->parent->rate,
  142. "CLK: %s parent %s has no rate!\n",
  143. clk->name, clk->parent->name))
  144. return -EINVAL;
  145. INIT_LIST_HEAD(&clk->children);
  146. mutex_lock(&clocks_mutex);
  147. list_add_tail(&clk->node, &clocks);
  148. if (clk->parent)
  149. list_add_tail(&clk->childnode, &clk->parent->children);
  150. mutex_unlock(&clocks_mutex);
  151. /* If rate is already set, use it */
  152. if (clk->rate)
  153. return 0;
  154. /* Else, see if there is a way to calculate it */
  155. if (clk->recalc)
  156. clk->rate = clk->recalc(clk);
  157. /* Otherwise, default to parent rate */
  158. else if (clk->parent)
  159. clk->rate = clk->parent->rate;
  160. return 0;
  161. }
  162. EXPORT_SYMBOL(clk_register);
  163. void clk_unregister(struct clk *clk)
  164. {
  165. if (clk == NULL || IS_ERR(clk))
  166. return;
  167. mutex_lock(&clocks_mutex);
  168. list_del(&clk->node);
  169. list_del(&clk->childnode);
  170. mutex_unlock(&clocks_mutex);
  171. }
  172. EXPORT_SYMBOL(clk_unregister);
  173. #ifdef CONFIG_DAVINCI_RESET_CLOCKS
  174. /*
  175. * Disable any unused clocks left on by the bootloader
  176. */
  177. static int __init clk_disable_unused(void)
  178. {
  179. struct clk *ck;
  180. spin_lock_irq(&clockfw_lock);
  181. list_for_each_entry(ck, &clocks, node) {
  182. if (ck->usecount > 0)
  183. continue;
  184. if (!(ck->flags & CLK_PSC))
  185. continue;
  186. /* ignore if in Disabled or SwRstDisable states */
  187. if (!davinci_psc_is_clk_active(ck->gpsc, ck->lpsc))
  188. continue;
  189. pr_info("Clocks: disable unused %s\n", ck->name);
  190. davinci_psc_config(psc_domain(ck), ck->gpsc, ck->lpsc, 0);
  191. }
  192. spin_unlock_irq(&clockfw_lock);
  193. return 0;
  194. }
  195. late_initcall(clk_disable_unused);
  196. #endif
  197. static unsigned long clk_sysclk_recalc(struct clk *clk)
  198. {
  199. u32 v, plldiv;
  200. struct pll_data *pll;
  201. unsigned long rate = clk->rate;
  202. /* If this is the PLL base clock, no more calculations needed */
  203. if (clk->pll_data)
  204. return rate;
  205. if (WARN_ON(!clk->parent))
  206. return rate;
  207. rate = clk->parent->rate;
  208. /* Otherwise, the parent must be a PLL */
  209. if (WARN_ON(!clk->parent->pll_data))
  210. return rate;
  211. pll = clk->parent->pll_data;
  212. /* If pre-PLL, source clock is before the multiplier and divider(s) */
  213. if (clk->flags & PRE_PLL)
  214. rate = pll->input_rate;
  215. if (!clk->div_reg)
  216. return rate;
  217. v = __raw_readl(pll->base + clk->div_reg);
  218. if (v & PLLDIV_EN) {
  219. plldiv = (v & PLLDIV_RATIO_MASK) + 1;
  220. if (plldiv)
  221. rate /= plldiv;
  222. }
  223. return rate;
  224. }
  225. static unsigned long clk_leafclk_recalc(struct clk *clk)
  226. {
  227. if (WARN_ON(!clk->parent))
  228. return clk->rate;
  229. return clk->parent->rate;
  230. }
  231. static unsigned long clk_pllclk_recalc(struct clk *clk)
  232. {
  233. u32 ctrl, mult = 1, prediv = 1, postdiv = 1;
  234. u8 bypass;
  235. struct pll_data *pll = clk->pll_data;
  236. unsigned long rate = clk->rate;
  237. pll->base = IO_ADDRESS(pll->phys_base);
  238. ctrl = __raw_readl(pll->base + PLLCTL);
  239. rate = pll->input_rate = clk->parent->rate;
  240. if (ctrl & PLLCTL_PLLEN) {
  241. bypass = 0;
  242. mult = __raw_readl(pll->base + PLLM);
  243. if (cpu_is_davinci_dm365())
  244. mult = 2 * (mult & PLLM_PLLM_MASK);
  245. else
  246. mult = (mult & PLLM_PLLM_MASK) + 1;
  247. } else
  248. bypass = 1;
  249. if (pll->flags & PLL_HAS_PREDIV) {
  250. prediv = __raw_readl(pll->base + PREDIV);
  251. if (prediv & PLLDIV_EN)
  252. prediv = (prediv & PLLDIV_RATIO_MASK) + 1;
  253. else
  254. prediv = 1;
  255. }
  256. /* pre-divider is fixed, but (some?) chips won't report that */
  257. if (cpu_is_davinci_dm355() && pll->num == 1)
  258. prediv = 8;
  259. if (pll->flags & PLL_HAS_POSTDIV) {
  260. postdiv = __raw_readl(pll->base + POSTDIV);
  261. if (postdiv & PLLDIV_EN)
  262. postdiv = (postdiv & PLLDIV_RATIO_MASK) + 1;
  263. else
  264. postdiv = 1;
  265. }
  266. if (!bypass) {
  267. rate /= prediv;
  268. rate *= mult;
  269. rate /= postdiv;
  270. }
  271. pr_debug("PLL%d: input = %lu MHz [ ",
  272. pll->num, clk->parent->rate / 1000000);
  273. if (bypass)
  274. pr_debug("bypass ");
  275. if (prediv > 1)
  276. pr_debug("/ %d ", prediv);
  277. if (mult > 1)
  278. pr_debug("* %d ", mult);
  279. if (postdiv > 1)
  280. pr_debug("/ %d ", postdiv);
  281. pr_debug("] --> %lu MHz output.\n", rate / 1000000);
  282. return rate;
  283. }
  284. /**
  285. * davinci_set_pllrate - set the output rate of a given PLL.
  286. *
  287. * Note: Currently tested to work with OMAP-L138 only.
  288. *
  289. * @pll: pll whose rate needs to be changed.
  290. * @prediv: The pre divider value. Passing 0 disables the pre-divider.
  291. * @pllm: The multiplier value. Passing 0 leads to multiply-by-one.
  292. * @postdiv: The post divider value. Passing 0 disables the post-divider.
  293. */
  294. int davinci_set_pllrate(struct pll_data *pll, unsigned int prediv,
  295. unsigned int mult, unsigned int postdiv)
  296. {
  297. u32 ctrl;
  298. unsigned int locktime;
  299. unsigned long flags;
  300. if (pll->base == NULL)
  301. return -EINVAL;
  302. /*
  303. * PLL lock time required per OMAP-L138 datasheet is
  304. * (2000 * prediv)/sqrt(pllm) OSCIN cycles. We approximate sqrt(pllm)
  305. * as 4 and OSCIN cycle as 25 MHz.
  306. */
  307. if (prediv) {
  308. locktime = ((2000 * prediv) / 100);
  309. prediv = (prediv - 1) | PLLDIV_EN;
  310. } else {
  311. locktime = PLL_LOCK_TIME;
  312. }
  313. if (postdiv)
  314. postdiv = (postdiv - 1) | PLLDIV_EN;
  315. if (mult)
  316. mult = mult - 1;
  317. /* Protect against simultaneous calls to PLL setting seqeunce */
  318. spin_lock_irqsave(&clockfw_lock, flags);
  319. ctrl = __raw_readl(pll->base + PLLCTL);
  320. /* Switch the PLL to bypass mode */
  321. ctrl &= ~(PLLCTL_PLLENSRC | PLLCTL_PLLEN);
  322. __raw_writel(ctrl, pll->base + PLLCTL);
  323. udelay(PLL_BYPASS_TIME);
  324. /* Reset and enable PLL */
  325. ctrl &= ~(PLLCTL_PLLRST | PLLCTL_PLLDIS);
  326. __raw_writel(ctrl, pll->base + PLLCTL);
  327. if (pll->flags & PLL_HAS_PREDIV)
  328. __raw_writel(prediv, pll->base + PREDIV);
  329. __raw_writel(mult, pll->base + PLLM);
  330. if (pll->flags & PLL_HAS_POSTDIV)
  331. __raw_writel(postdiv, pll->base + POSTDIV);
  332. udelay(PLL_RESET_TIME);
  333. /* Bring PLL out of reset */
  334. ctrl |= PLLCTL_PLLRST;
  335. __raw_writel(ctrl, pll->base + PLLCTL);
  336. udelay(locktime);
  337. /* Remove PLL from bypass mode */
  338. ctrl |= PLLCTL_PLLEN;
  339. __raw_writel(ctrl, pll->base + PLLCTL);
  340. spin_unlock_irqrestore(&clockfw_lock, flags);
  341. return 0;
  342. }
  343. EXPORT_SYMBOL(davinci_set_pllrate);
  344. int __init davinci_clk_init(struct clk_lookup *clocks)
  345. {
  346. struct clk_lookup *c;
  347. struct clk *clk;
  348. size_t num_clocks = 0;
  349. for (c = clocks; c->clk; c++) {
  350. clk = c->clk;
  351. if (!clk->recalc) {
  352. /* Check if clock is a PLL */
  353. if (clk->pll_data)
  354. clk->recalc = clk_pllclk_recalc;
  355. /* Else, if it is a PLL-derived clock */
  356. else if (clk->flags & CLK_PLL)
  357. clk->recalc = clk_sysclk_recalc;
  358. /* Otherwise, it is a leaf clock (PSC clock) */
  359. else if (clk->parent)
  360. clk->recalc = clk_leafclk_recalc;
  361. }
  362. if (clk->recalc)
  363. clk->rate = clk->recalc(clk);
  364. if (clk->lpsc)
  365. clk->flags |= CLK_PSC;
  366. clk_register(clk);
  367. num_clocks++;
  368. /* Turn on clocks that Linux doesn't otherwise manage */
  369. if (clk->flags & ALWAYS_ENABLED)
  370. clk_enable(clk);
  371. }
  372. clkdev_add_table(clocks, num_clocks);
  373. return 0;
  374. }
  375. #ifdef CONFIG_DEBUG_FS
  376. #include <linux/debugfs.h>
  377. #include <linux/seq_file.h>
  378. #define CLKNAME_MAX 10 /* longest clock name */
  379. #define NEST_DELTA 2
  380. #define NEST_MAX 4
  381. static void
  382. dump_clock(struct seq_file *s, unsigned nest, struct clk *parent)
  383. {
  384. char *state;
  385. char buf[CLKNAME_MAX + NEST_DELTA * NEST_MAX];
  386. struct clk *clk;
  387. unsigned i;
  388. if (parent->flags & CLK_PLL)
  389. state = "pll";
  390. else if (parent->flags & CLK_PSC)
  391. state = "psc";
  392. else
  393. state = "";
  394. /* <nest spaces> name <pad to end> */
  395. memset(buf, ' ', sizeof(buf) - 1);
  396. buf[sizeof(buf) - 1] = 0;
  397. i = strlen(parent->name);
  398. memcpy(buf + nest, parent->name,
  399. min(i, (unsigned)(sizeof(buf) - 1 - nest)));
  400. seq_printf(s, "%s users=%2d %-3s %9ld Hz\n",
  401. buf, parent->usecount, state, clk_get_rate(parent));
  402. /* REVISIT show device associations too */
  403. /* cost is now small, but not linear... */
  404. list_for_each_entry(clk, &parent->children, childnode) {
  405. dump_clock(s, nest + NEST_DELTA, clk);
  406. }
  407. }
  408. static int davinci_ck_show(struct seq_file *m, void *v)
  409. {
  410. struct clk *clk;
  411. /*
  412. * Show clock tree; We trust nonzero usecounts equate to PSC enables...
  413. */
  414. mutex_lock(&clocks_mutex);
  415. list_for_each_entry(clk, &clocks, node)
  416. if (!clk->parent)
  417. dump_clock(m, 0, clk);
  418. mutex_unlock(&clocks_mutex);
  419. return 0;
  420. }
  421. static int davinci_ck_open(struct inode *inode, struct file *file)
  422. {
  423. return single_open(file, davinci_ck_show, NULL);
  424. }
  425. static const struct file_operations davinci_ck_operations = {
  426. .open = davinci_ck_open,
  427. .read = seq_read,
  428. .llseek = seq_lseek,
  429. .release = single_release,
  430. };
  431. static int __init davinci_clk_debugfs_init(void)
  432. {
  433. debugfs_create_file("davinci_clocks", S_IFREG | S_IRUGO, NULL, NULL,
  434. &davinci_ck_operations);
  435. return 0;
  436. }
  437. device_initcall(davinci_clk_debugfs_init);
  438. #endif /* CONFIG_DEBUG_FS */