clock.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641
  1. /*
  2. * Clock and PLL control for DaVinci devices
  3. *
  4. * Copyright (C) 2006-2007 Texas Instruments.
  5. * Copyright (C) 2008-2009 Deep Root Systems, LLC
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License as published by
  9. * the Free Software Foundation; either version 2 of the License, or
  10. * (at your option) any later version.
  11. */
  12. #include <linux/module.h>
  13. #include <linux/kernel.h>
  14. #include <linux/init.h>
  15. #include <linux/errno.h>
  16. #include <linux/clk.h>
  17. #include <linux/err.h>
  18. #include <linux/mutex.h>
  19. #include <linux/io.h>
  20. #include <linux/delay.h>
  21. #include <mach/hardware.h>
  22. #include <mach/clock.h>
  23. #include <mach/psc.h>
  24. #include <mach/cputype.h>
  25. #include "clock.h"
  26. static LIST_HEAD(clocks);
  27. static DEFINE_MUTEX(clocks_mutex);
  28. static DEFINE_SPINLOCK(clockfw_lock);
  29. static unsigned psc_domain(struct clk *clk)
  30. {
  31. return (clk->flags & PSC_DSP)
  32. ? DAVINCI_GPSC_DSPDOMAIN
  33. : DAVINCI_GPSC_ARMDOMAIN;
  34. }
  35. static void __clk_enable(struct clk *clk)
  36. {
  37. if (clk->parent)
  38. __clk_enable(clk->parent);
  39. if (clk->usecount++ == 0 && (clk->flags & CLK_PSC))
  40. davinci_psc_config(psc_domain(clk), clk->gpsc, clk->lpsc,
  41. PSC_STATE_ENABLE);
  42. }
  43. static void __clk_disable(struct clk *clk)
  44. {
  45. if (WARN_ON(clk->usecount == 0))
  46. return;
  47. if (--clk->usecount == 0 && !(clk->flags & CLK_PLL) &&
  48. (clk->flags & CLK_PSC))
  49. davinci_psc_config(psc_domain(clk), clk->gpsc, clk->lpsc,
  50. (clk->flags & PSC_SWRSTDISABLE) ?
  51. PSC_STATE_SWRSTDISABLE : PSC_STATE_DISABLE);
  52. if (clk->parent)
  53. __clk_disable(clk->parent);
  54. }
  55. int clk_enable(struct clk *clk)
  56. {
  57. unsigned long flags;
  58. if (clk == NULL || IS_ERR(clk))
  59. return -EINVAL;
  60. spin_lock_irqsave(&clockfw_lock, flags);
  61. __clk_enable(clk);
  62. spin_unlock_irqrestore(&clockfw_lock, flags);
  63. return 0;
  64. }
  65. EXPORT_SYMBOL(clk_enable);
  66. void clk_disable(struct clk *clk)
  67. {
  68. unsigned long flags;
  69. if (clk == NULL || IS_ERR(clk))
  70. return;
  71. spin_lock_irqsave(&clockfw_lock, flags);
  72. __clk_disable(clk);
  73. spin_unlock_irqrestore(&clockfw_lock, flags);
  74. }
  75. EXPORT_SYMBOL(clk_disable);
  76. unsigned long clk_get_rate(struct clk *clk)
  77. {
  78. if (clk == NULL || IS_ERR(clk))
  79. return -EINVAL;
  80. return clk->rate;
  81. }
  82. EXPORT_SYMBOL(clk_get_rate);
  83. long clk_round_rate(struct clk *clk, unsigned long rate)
  84. {
  85. if (clk == NULL || IS_ERR(clk))
  86. return -EINVAL;
  87. if (clk->round_rate)
  88. return clk->round_rate(clk, rate);
  89. return clk->rate;
  90. }
  91. EXPORT_SYMBOL(clk_round_rate);
  92. /* Propagate rate to children */
  93. static void propagate_rate(struct clk *root)
  94. {
  95. struct clk *clk;
  96. list_for_each_entry(clk, &root->children, childnode) {
  97. if (clk->recalc)
  98. clk->rate = clk->recalc(clk);
  99. propagate_rate(clk);
  100. }
  101. }
  102. int clk_set_rate(struct clk *clk, unsigned long rate)
  103. {
  104. unsigned long flags;
  105. int ret = -EINVAL;
  106. if (clk == NULL || IS_ERR(clk))
  107. return ret;
  108. if (clk->set_rate)
  109. ret = clk->set_rate(clk, rate);
  110. spin_lock_irqsave(&clockfw_lock, flags);
  111. if (ret == 0) {
  112. if (clk->recalc)
  113. clk->rate = clk->recalc(clk);
  114. propagate_rate(clk);
  115. }
  116. spin_unlock_irqrestore(&clockfw_lock, flags);
  117. return ret;
  118. }
  119. EXPORT_SYMBOL(clk_set_rate);
  120. int clk_set_parent(struct clk *clk, struct clk *parent)
  121. {
  122. unsigned long flags;
  123. if (clk == NULL || IS_ERR(clk))
  124. return -EINVAL;
  125. /* Cannot change parent on enabled clock */
  126. if (WARN_ON(clk->usecount))
  127. return -EINVAL;
  128. mutex_lock(&clocks_mutex);
  129. clk->parent = parent;
  130. list_del_init(&clk->childnode);
  131. list_add(&clk->childnode, &clk->parent->children);
  132. mutex_unlock(&clocks_mutex);
  133. spin_lock_irqsave(&clockfw_lock, flags);
  134. if (clk->recalc)
  135. clk->rate = clk->recalc(clk);
  136. propagate_rate(clk);
  137. spin_unlock_irqrestore(&clockfw_lock, flags);
  138. return 0;
  139. }
  140. EXPORT_SYMBOL(clk_set_parent);
  141. int clk_register(struct clk *clk)
  142. {
  143. if (clk == NULL || IS_ERR(clk))
  144. return -EINVAL;
  145. if (WARN(clk->parent && !clk->parent->rate,
  146. "CLK: %s parent %s has no rate!\n",
  147. clk->name, clk->parent->name))
  148. return -EINVAL;
  149. INIT_LIST_HEAD(&clk->children);
  150. mutex_lock(&clocks_mutex);
  151. list_add_tail(&clk->node, &clocks);
  152. if (clk->parent)
  153. list_add_tail(&clk->childnode, &clk->parent->children);
  154. mutex_unlock(&clocks_mutex);
  155. /* If rate is already set, use it */
  156. if (clk->rate)
  157. return 0;
  158. /* Else, see if there is a way to calculate it */
  159. if (clk->recalc)
  160. clk->rate = clk->recalc(clk);
  161. /* Otherwise, default to parent rate */
  162. else if (clk->parent)
  163. clk->rate = clk->parent->rate;
  164. return 0;
  165. }
  166. EXPORT_SYMBOL(clk_register);
  167. void clk_unregister(struct clk *clk)
  168. {
  169. if (clk == NULL || IS_ERR(clk))
  170. return;
  171. mutex_lock(&clocks_mutex);
  172. list_del(&clk->node);
  173. list_del(&clk->childnode);
  174. mutex_unlock(&clocks_mutex);
  175. }
  176. EXPORT_SYMBOL(clk_unregister);
  177. #ifdef CONFIG_DAVINCI_RESET_CLOCKS
  178. /*
  179. * Disable any unused clocks left on by the bootloader
  180. */
  181. static int __init clk_disable_unused(void)
  182. {
  183. struct clk *ck;
  184. spin_lock_irq(&clockfw_lock);
  185. list_for_each_entry(ck, &clocks, node) {
  186. if (ck->usecount > 0)
  187. continue;
  188. if (!(ck->flags & CLK_PSC))
  189. continue;
  190. /* ignore if in Disabled or SwRstDisable states */
  191. if (!davinci_psc_is_clk_active(ck->gpsc, ck->lpsc))
  192. continue;
  193. pr_debug("Clocks: disable unused %s\n", ck->name);
  194. davinci_psc_config(psc_domain(ck), ck->gpsc, ck->lpsc,
  195. (ck->flags & PSC_SWRSTDISABLE) ?
  196. PSC_STATE_SWRSTDISABLE : PSC_STATE_DISABLE);
  197. }
  198. spin_unlock_irq(&clockfw_lock);
  199. return 0;
  200. }
  201. late_initcall(clk_disable_unused);
  202. #endif
  203. static unsigned long clk_sysclk_recalc(struct clk *clk)
  204. {
  205. u32 v, plldiv;
  206. struct pll_data *pll;
  207. unsigned long rate = clk->rate;
  208. /* If this is the PLL base clock, no more calculations needed */
  209. if (clk->pll_data)
  210. return rate;
  211. if (WARN_ON(!clk->parent))
  212. return rate;
  213. rate = clk->parent->rate;
  214. /* Otherwise, the parent must be a PLL */
  215. if (WARN_ON(!clk->parent->pll_data))
  216. return rate;
  217. pll = clk->parent->pll_data;
  218. /* If pre-PLL, source clock is before the multiplier and divider(s) */
  219. if (clk->flags & PRE_PLL)
  220. rate = pll->input_rate;
  221. if (!clk->div_reg)
  222. return rate;
  223. v = __raw_readl(pll->base + clk->div_reg);
  224. if (v & PLLDIV_EN) {
  225. plldiv = (v & pll->div_ratio_mask) + 1;
  226. if (plldiv)
  227. rate /= plldiv;
  228. }
  229. return rate;
  230. }
  231. int davinci_set_sysclk_rate(struct clk *clk, unsigned long rate)
  232. {
  233. unsigned v;
  234. struct pll_data *pll;
  235. unsigned long input;
  236. unsigned ratio = 0;
  237. /* If this is the PLL base clock, wrong function to call */
  238. if (clk->pll_data)
  239. return -EINVAL;
  240. /* There must be a parent... */
  241. if (WARN_ON(!clk->parent))
  242. return -EINVAL;
  243. /* ... the parent must be a PLL... */
  244. if (WARN_ON(!clk->parent->pll_data))
  245. return -EINVAL;
  246. /* ... and this clock must have a divider. */
  247. if (WARN_ON(!clk->div_reg))
  248. return -EINVAL;
  249. pll = clk->parent->pll_data;
  250. input = clk->parent->rate;
  251. /* If pre-PLL, source clock is before the multiplier and divider(s) */
  252. if (clk->flags & PRE_PLL)
  253. input = pll->input_rate;
  254. if (input > rate) {
  255. /*
  256. * Can afford to provide an output little higher than requested
  257. * only if maximum rate supported by hardware on this sysclk
  258. * is known.
  259. */
  260. if (clk->maxrate) {
  261. ratio = DIV_ROUND_CLOSEST(input, rate);
  262. if (input / ratio > clk->maxrate)
  263. ratio = 0;
  264. }
  265. if (ratio == 0)
  266. ratio = DIV_ROUND_UP(input, rate);
  267. ratio--;
  268. }
  269. if (ratio > pll->div_ratio_mask)
  270. return -EINVAL;
  271. do {
  272. v = __raw_readl(pll->base + PLLSTAT);
  273. } while (v & PLLSTAT_GOSTAT);
  274. v = __raw_readl(pll->base + clk->div_reg);
  275. v &= ~pll->div_ratio_mask;
  276. v |= ratio | PLLDIV_EN;
  277. __raw_writel(v, pll->base + clk->div_reg);
  278. v = __raw_readl(pll->base + PLLCMD);
  279. v |= PLLCMD_GOSET;
  280. __raw_writel(v, pll->base + PLLCMD);
  281. do {
  282. v = __raw_readl(pll->base + PLLSTAT);
  283. } while (v & PLLSTAT_GOSTAT);
  284. return 0;
  285. }
  286. EXPORT_SYMBOL(davinci_set_sysclk_rate);
  287. static unsigned long clk_leafclk_recalc(struct clk *clk)
  288. {
  289. if (WARN_ON(!clk->parent))
  290. return clk->rate;
  291. return clk->parent->rate;
  292. }
  293. static unsigned long clk_pllclk_recalc(struct clk *clk)
  294. {
  295. u32 ctrl, mult = 1, prediv = 1, postdiv = 1;
  296. u8 bypass;
  297. struct pll_data *pll = clk->pll_data;
  298. unsigned long rate = clk->rate;
  299. ctrl = __raw_readl(pll->base + PLLCTL);
  300. rate = pll->input_rate = clk->parent->rate;
  301. if (ctrl & PLLCTL_PLLEN) {
  302. bypass = 0;
  303. mult = __raw_readl(pll->base + PLLM);
  304. if (cpu_is_davinci_dm365())
  305. mult = 2 * (mult & PLLM_PLLM_MASK);
  306. else
  307. mult = (mult & PLLM_PLLM_MASK) + 1;
  308. } else
  309. bypass = 1;
  310. if (pll->flags & PLL_HAS_PREDIV) {
  311. prediv = __raw_readl(pll->base + PREDIV);
  312. if (prediv & PLLDIV_EN)
  313. prediv = (prediv & pll->div_ratio_mask) + 1;
  314. else
  315. prediv = 1;
  316. }
  317. /* pre-divider is fixed, but (some?) chips won't report that */
  318. if (cpu_is_davinci_dm355() && pll->num == 1)
  319. prediv = 8;
  320. if (pll->flags & PLL_HAS_POSTDIV) {
  321. postdiv = __raw_readl(pll->base + POSTDIV);
  322. if (postdiv & PLLDIV_EN)
  323. postdiv = (postdiv & pll->div_ratio_mask) + 1;
  324. else
  325. postdiv = 1;
  326. }
  327. if (!bypass) {
  328. rate /= prediv;
  329. rate *= mult;
  330. rate /= postdiv;
  331. }
  332. pr_debug("PLL%d: input = %lu MHz [ ",
  333. pll->num, clk->parent->rate / 1000000);
  334. if (bypass)
  335. pr_debug("bypass ");
  336. if (prediv > 1)
  337. pr_debug("/ %d ", prediv);
  338. if (mult > 1)
  339. pr_debug("* %d ", mult);
  340. if (postdiv > 1)
  341. pr_debug("/ %d ", postdiv);
  342. pr_debug("] --> %lu MHz output.\n", rate / 1000000);
  343. return rate;
  344. }
  345. /**
  346. * davinci_set_pllrate - set the output rate of a given PLL.
  347. *
  348. * Note: Currently tested to work with OMAP-L138 only.
  349. *
  350. * @pll: pll whose rate needs to be changed.
  351. * @prediv: The pre divider value. Passing 0 disables the pre-divider.
  352. * @pllm: The multiplier value. Passing 0 leads to multiply-by-one.
  353. * @postdiv: The post divider value. Passing 0 disables the post-divider.
  354. */
  355. int davinci_set_pllrate(struct pll_data *pll, unsigned int prediv,
  356. unsigned int mult, unsigned int postdiv)
  357. {
  358. u32 ctrl;
  359. unsigned int locktime;
  360. unsigned long flags;
  361. if (pll->base == NULL)
  362. return -EINVAL;
  363. /*
  364. * PLL lock time required per OMAP-L138 datasheet is
  365. * (2000 * prediv)/sqrt(pllm) OSCIN cycles. We approximate sqrt(pllm)
  366. * as 4 and OSCIN cycle as 25 MHz.
  367. */
  368. if (prediv) {
  369. locktime = ((2000 * prediv) / 100);
  370. prediv = (prediv - 1) | PLLDIV_EN;
  371. } else {
  372. locktime = PLL_LOCK_TIME;
  373. }
  374. if (postdiv)
  375. postdiv = (postdiv - 1) | PLLDIV_EN;
  376. if (mult)
  377. mult = mult - 1;
  378. /* Protect against simultaneous calls to PLL setting seqeunce */
  379. spin_lock_irqsave(&clockfw_lock, flags);
  380. ctrl = __raw_readl(pll->base + PLLCTL);
  381. /* Switch the PLL to bypass mode */
  382. ctrl &= ~(PLLCTL_PLLENSRC | PLLCTL_PLLEN);
  383. __raw_writel(ctrl, pll->base + PLLCTL);
  384. udelay(PLL_BYPASS_TIME);
  385. /* Reset and enable PLL */
  386. ctrl &= ~(PLLCTL_PLLRST | PLLCTL_PLLDIS);
  387. __raw_writel(ctrl, pll->base + PLLCTL);
  388. if (pll->flags & PLL_HAS_PREDIV)
  389. __raw_writel(prediv, pll->base + PREDIV);
  390. __raw_writel(mult, pll->base + PLLM);
  391. if (pll->flags & PLL_HAS_POSTDIV)
  392. __raw_writel(postdiv, pll->base + POSTDIV);
  393. udelay(PLL_RESET_TIME);
  394. /* Bring PLL out of reset */
  395. ctrl |= PLLCTL_PLLRST;
  396. __raw_writel(ctrl, pll->base + PLLCTL);
  397. udelay(locktime);
  398. /* Remove PLL from bypass mode */
  399. ctrl |= PLLCTL_PLLEN;
  400. __raw_writel(ctrl, pll->base + PLLCTL);
  401. spin_unlock_irqrestore(&clockfw_lock, flags);
  402. return 0;
  403. }
  404. EXPORT_SYMBOL(davinci_set_pllrate);
  405. int __init davinci_clk_init(struct clk_lookup *clocks)
  406. {
  407. struct clk_lookup *c;
  408. struct clk *clk;
  409. size_t num_clocks = 0;
  410. for (c = clocks; c->clk; c++) {
  411. clk = c->clk;
  412. if (!clk->recalc) {
  413. /* Check if clock is a PLL */
  414. if (clk->pll_data)
  415. clk->recalc = clk_pllclk_recalc;
  416. /* Else, if it is a PLL-derived clock */
  417. else if (clk->flags & CLK_PLL)
  418. clk->recalc = clk_sysclk_recalc;
  419. /* Otherwise, it is a leaf clock (PSC clock) */
  420. else if (clk->parent)
  421. clk->recalc = clk_leafclk_recalc;
  422. }
  423. if (clk->pll_data) {
  424. struct pll_data *pll = clk->pll_data;
  425. if (!pll->div_ratio_mask)
  426. pll->div_ratio_mask = PLLDIV_RATIO_MASK;
  427. if (pll->phys_base && !pll->base) {
  428. pll->base = ioremap(pll->phys_base, SZ_4K);
  429. WARN_ON(!pll->base);
  430. }
  431. }
  432. if (clk->recalc)
  433. clk->rate = clk->recalc(clk);
  434. if (clk->lpsc)
  435. clk->flags |= CLK_PSC;
  436. clk_register(clk);
  437. num_clocks++;
  438. /* Turn on clocks that Linux doesn't otherwise manage */
  439. if (clk->flags & ALWAYS_ENABLED)
  440. clk_enable(clk);
  441. }
  442. clkdev_add_table(clocks, num_clocks);
  443. return 0;
  444. }
  445. #ifdef CONFIG_DEBUG_FS
  446. #include <linux/debugfs.h>
  447. #include <linux/seq_file.h>
  448. #define CLKNAME_MAX 10 /* longest clock name */
  449. #define NEST_DELTA 2
  450. #define NEST_MAX 4
  451. static void
  452. dump_clock(struct seq_file *s, unsigned nest, struct clk *parent)
  453. {
  454. char *state;
  455. char buf[CLKNAME_MAX + NEST_DELTA * NEST_MAX];
  456. struct clk *clk;
  457. unsigned i;
  458. if (parent->flags & CLK_PLL)
  459. state = "pll";
  460. else if (parent->flags & CLK_PSC)
  461. state = "psc";
  462. else
  463. state = "";
  464. /* <nest spaces> name <pad to end> */
  465. memset(buf, ' ', sizeof(buf) - 1);
  466. buf[sizeof(buf) - 1] = 0;
  467. i = strlen(parent->name);
  468. memcpy(buf + nest, parent->name,
  469. min(i, (unsigned)(sizeof(buf) - 1 - nest)));
  470. seq_printf(s, "%s users=%2d %-3s %9ld Hz\n",
  471. buf, parent->usecount, state, clk_get_rate(parent));
  472. /* REVISIT show device associations too */
  473. /* cost is now small, but not linear... */
  474. list_for_each_entry(clk, &parent->children, childnode) {
  475. dump_clock(s, nest + NEST_DELTA, clk);
  476. }
  477. }
  478. static int davinci_ck_show(struct seq_file *m, void *v)
  479. {
  480. struct clk *clk;
  481. /*
  482. * Show clock tree; We trust nonzero usecounts equate to PSC enables...
  483. */
  484. mutex_lock(&clocks_mutex);
  485. list_for_each_entry(clk, &clocks, node)
  486. if (!clk->parent)
  487. dump_clock(m, 0, clk);
  488. mutex_unlock(&clocks_mutex);
  489. return 0;
  490. }
  491. static int davinci_ck_open(struct inode *inode, struct file *file)
  492. {
  493. return single_open(file, davinci_ck_show, NULL);
  494. }
  495. static const struct file_operations davinci_ck_operations = {
  496. .open = davinci_ck_open,
  497. .read = seq_read,
  498. .llseek = seq_lseek,
  499. .release = single_release,
  500. };
  501. static int __init davinci_clk_debugfs_init(void)
  502. {
  503. debugfs_create_file("davinci_clocks", S_IFREG | S_IRUGO, NULL, NULL,
  504. &davinci_ck_operations);
  505. return 0;
  506. }
  507. device_initcall(davinci_clk_debugfs_init);
  508. #endif /* CONFIG_DEBUG_FS */