clock.c 24 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135
  1. /*
  2. * Clock manipulation routines for Freescale STMP37XX/STMP378X
  3. *
  4. * Author: Vitaly Wool <vital@embeddedalley.com>
  5. *
  6. * Copyright 2008 Freescale Semiconductor, Inc. All Rights Reserved.
  7. * Copyright 2008 Embedded Alley Solutions, Inc All Rights Reserved.
  8. */
  9. /*
  10. * The code contained herein is licensed under the GNU General Public
  11. * License. You may obtain a copy of the GNU General Public License
  12. * Version 2 or later at the following locations:
  13. *
  14. * http://www.opensource.org/licenses/gpl-license.html
  15. * http://www.gnu.org/copyleft/gpl.html
  16. */
  17. #define DEBUG
  18. #include <linux/kernel.h>
  19. #include <linux/module.h>
  20. #include <linux/init.h>
  21. #include <linux/clk.h>
  22. #include <linux/spinlock.h>
  23. #include <linux/errno.h>
  24. #include <linux/err.h>
  25. #include <linux/delay.h>
  26. #include <linux/io.h>
  27. #include <asm/mach-types.h>
  28. #include <asm/clkdev.h>
  29. #include <mach/platform.h>
  30. #include <mach/regs-clkctrl.h>
  31. #include "clock.h"
  32. static DEFINE_SPINLOCK(clocks_lock);
  33. static struct clk osc_24M;
  34. static struct clk pll_clk;
  35. static struct clk cpu_clk;
  36. static struct clk hclk;
  37. static int propagate_rate(struct clk *);
  38. static inline int clk_is_busy(struct clk *clk)
  39. {
  40. return __raw_readl(clk->busy_reg) & (1 << clk->busy_bit);
  41. }
  42. static inline int clk_good(struct clk *clk)
  43. {
  44. return clk && !IS_ERR(clk) && clk->ops;
  45. }
  46. static int std_clk_enable(struct clk *clk)
  47. {
  48. if (clk->enable_reg) {
  49. u32 clk_reg = __raw_readl(clk->enable_reg);
  50. if (clk->enable_negate)
  51. clk_reg &= ~(1 << clk->enable_shift);
  52. else
  53. clk_reg |= (1 << clk->enable_shift);
  54. __raw_writel(clk_reg, clk->enable_reg);
  55. if (clk->enable_wait)
  56. udelay(clk->enable_wait);
  57. return 0;
  58. } else
  59. return -EINVAL;
  60. }
  61. static int std_clk_disable(struct clk *clk)
  62. {
  63. if (clk->enable_reg) {
  64. u32 clk_reg = __raw_readl(clk->enable_reg);
  65. if (clk->enable_negate)
  66. clk_reg |= (1 << clk->enable_shift);
  67. else
  68. clk_reg &= ~(1 << clk->enable_shift);
  69. __raw_writel(clk_reg, clk->enable_reg);
  70. return 0;
  71. } else
  72. return -EINVAL;
  73. }
  74. static int io_set_rate(struct clk *clk, u32 rate)
  75. {
  76. u32 reg_frac, clkctrl_frac;
  77. int i, ret = 0, mask = 0x1f;
  78. clkctrl_frac = (clk->parent->rate * 18 + rate - 1) / rate;
  79. if (clkctrl_frac < 18 || clkctrl_frac > 35) {
  80. ret = -EINVAL;
  81. goto out;
  82. }
  83. reg_frac = __raw_readl(clk->scale_reg);
  84. reg_frac &= ~(mask << clk->scale_shift);
  85. __raw_writel(reg_frac | (clkctrl_frac << clk->scale_shift),
  86. clk->scale_reg);
  87. if (clk->busy_reg) {
  88. for (i = 10000; i; i--)
  89. if (!clk_is_busy(clk))
  90. break;
  91. if (!i)
  92. ret = -ETIMEDOUT;
  93. else
  94. ret = 0;
  95. }
  96. out:
  97. return ret;
  98. }
  99. static long io_get_rate(struct clk *clk)
  100. {
  101. long rate = clk->parent->rate * 18;
  102. int mask = 0x1f;
  103. rate /= (__raw_readl(clk->scale_reg) >> clk->scale_shift) & mask;
  104. clk->rate = rate;
  105. return rate;
  106. }
  107. static long per_get_rate(struct clk *clk)
  108. {
  109. long rate = clk->parent->rate;
  110. long div;
  111. const int mask = 0xff;
  112. if (clk->enable_reg &&
  113. !(__raw_readl(clk->enable_reg) & clk->enable_shift))
  114. clk->rate = 0;
  115. else {
  116. div = (__raw_readl(clk->scale_reg) >> clk->scale_shift) & mask;
  117. if (div)
  118. rate /= div;
  119. clk->rate = rate;
  120. }
  121. return clk->rate;
  122. }
  123. static int per_set_rate(struct clk *clk, u32 rate)
  124. {
  125. int ret = -EINVAL;
  126. int div = (clk->parent->rate + rate - 1) / rate;
  127. u32 reg_frac;
  128. const int mask = 0xff;
  129. int try = 10;
  130. int i = -1;
  131. if (div == 0 || div > mask)
  132. goto out;
  133. reg_frac = __raw_readl(clk->scale_reg);
  134. reg_frac &= ~(mask << clk->scale_shift);
  135. while (try--) {
  136. __raw_writel(reg_frac | (div << clk->scale_shift),
  137. clk->scale_reg);
  138. if (clk->busy_reg) {
  139. for (i = 10000; i; i--)
  140. if (!clk_is_busy(clk))
  141. break;
  142. }
  143. if (i)
  144. break;
  145. }
  146. if (!i)
  147. ret = -ETIMEDOUT;
  148. else
  149. ret = 0;
  150. out:
  151. if (ret != 0)
  152. printk(KERN_ERR "%s: error %d\n", __func__, ret);
  153. return ret;
  154. }
  155. static long lcdif_get_rate(struct clk *clk)
  156. {
  157. long rate = clk->parent->rate;
  158. long div;
  159. const int mask = 0xff;
  160. div = (__raw_readl(clk->scale_reg) >> clk->scale_shift) & mask;
  161. if (div) {
  162. rate /= div;
  163. div = (__raw_readl(REGS_CLKCTRL_BASE + HW_CLKCTRL_FRAC) &
  164. BM_CLKCTRL_FRAC_PIXFRAC) >> BP_CLKCTRL_FRAC_PIXFRAC;
  165. rate /= div;
  166. }
  167. clk->rate = rate;
  168. return rate;
  169. }
  170. static int lcdif_set_rate(struct clk *clk, u32 rate)
  171. {
  172. int ret = 0;
  173. /*
  174. * On 3700, we can get most timings exact by modifying ref_pix
  175. * and the divider, but keeping the phase timings at 1 (2
  176. * phases per cycle).
  177. *
  178. * ref_pix can be between 480e6*18/35=246.9MHz and 480e6*18/18=480MHz,
  179. * which is between 18/(18*480e6)=2.084ns and 35/(18*480e6)=4.050ns.
  180. *
  181. * ns_cycle >= 2*18e3/(18*480) = 25/6
  182. * ns_cycle <= 2*35e3/(18*480) = 875/108
  183. *
  184. * Multiply the ns_cycle by 'div' to lengthen it until it fits the
  185. * bounds. This is the divider we'll use after ref_pix.
  186. *
  187. * 6 * ns_cycle >= 25 * div
  188. * 108 * ns_cycle <= 875 * div
  189. */
  190. u32 ns_cycle = 1000000 / rate;
  191. u32 div, reg_val;
  192. u32 lowest_result = (u32) -1;
  193. u32 lowest_div = 0, lowest_fracdiv = 0;
  194. for (div = 1; div < 256; ++div) {
  195. u32 fracdiv;
  196. u32 ps_result;
  197. int lower_bound = 6 * ns_cycle >= 25 * div;
  198. int upper_bound = 108 * ns_cycle <= 875 * div;
  199. if (!lower_bound)
  200. break;
  201. if (!upper_bound)
  202. continue;
  203. /*
  204. * Found a matching div. Calculate fractional divider needed,
  205. * rounded up.
  206. */
  207. fracdiv = ((clk->parent->rate / 1000 * 18 / 2) *
  208. ns_cycle + 1000 * div - 1) /
  209. (1000 * div);
  210. if (fracdiv < 18 || fracdiv > 35) {
  211. ret = -EINVAL;
  212. goto out;
  213. }
  214. /* Calculate the actual cycle time this results in */
  215. ps_result = 6250 * div * fracdiv / 27;
  216. /* Use the fastest result that doesn't break ns_cycle */
  217. if (ps_result <= lowest_result) {
  218. lowest_result = ps_result;
  219. lowest_div = div;
  220. lowest_fracdiv = fracdiv;
  221. }
  222. }
  223. if (div >= 256 || lowest_result == (u32) -1) {
  224. ret = -EINVAL;
  225. goto out;
  226. }
  227. pr_debug("Programming PFD=%u,DIV=%u ref_pix=%uMHz "
  228. "PIXCLK=%uMHz cycle=%u.%03uns\n",
  229. lowest_fracdiv, lowest_div,
  230. 480*18/lowest_fracdiv, 480*18/lowest_fracdiv/lowest_div,
  231. lowest_result / 1000, lowest_result % 1000);
  232. /* Program ref_pix phase fractional divider */
  233. reg_val = __raw_readl(REGS_CLKCTRL_BASE + HW_CLKCTRL_FRAC);
  234. reg_val &= ~BM_CLKCTRL_FRAC_PIXFRAC;
  235. reg_val |= BF(lowest_fracdiv, CLKCTRL_FRAC_PIXFRAC);
  236. __raw_writel(reg_val, REGS_CLKCTRL_BASE + HW_CLKCTRL_FRAC);
  237. /* Ungate PFD */
  238. stmp3xxx_clearl(BM_CLKCTRL_FRAC_CLKGATEPIX,
  239. REGS_CLKCTRL_BASE + HW_CLKCTRL_FRAC);
  240. /* Program pix divider */
  241. reg_val = __raw_readl(clk->scale_reg);
  242. reg_val &= ~(BM_CLKCTRL_PIX_DIV | BM_CLKCTRL_PIX_CLKGATE);
  243. reg_val |= BF(lowest_div, CLKCTRL_PIX_DIV);
  244. __raw_writel(reg_val, clk->scale_reg);
  245. /* Wait for divider update */
  246. if (clk->busy_reg) {
  247. int i;
  248. for (i = 10000; i; i--)
  249. if (!clk_is_busy(clk))
  250. break;
  251. if (!i) {
  252. ret = -ETIMEDOUT;
  253. goto out;
  254. }
  255. }
  256. /* Switch to ref_pix source */
  257. reg_val = __raw_readl(REGS_CLKCTRL_BASE + HW_CLKCTRL_CLKSEQ);
  258. reg_val &= ~BM_CLKCTRL_CLKSEQ_BYPASS_PIX;
  259. __raw_writel(reg_val, REGS_CLKCTRL_BASE + HW_CLKCTRL_CLKSEQ);
  260. out:
  261. return ret;
  262. }
  263. static int cpu_set_rate(struct clk *clk, u32 rate)
  264. {
  265. u32 reg_val;
  266. if (rate < 24000)
  267. return -EINVAL;
  268. else if (rate == 24000) {
  269. /* switch to the 24M source */
  270. clk_set_parent(clk, &osc_24M);
  271. } else {
  272. int i;
  273. u32 clkctrl_cpu = 1;
  274. u32 c = clkctrl_cpu;
  275. u32 clkctrl_frac = 1;
  276. u32 val;
  277. for ( ; c < 0x40; c++) {
  278. u32 f = (pll_clk.rate*18/c + rate/2) / rate;
  279. int s1, s2;
  280. if (f < 18 || f > 35)
  281. continue;
  282. s1 = pll_clk.rate*18/clkctrl_frac/clkctrl_cpu - rate;
  283. s2 = pll_clk.rate*18/c/f - rate;
  284. pr_debug("%s: s1 %d, s2 %d\n", __func__, s1, s2);
  285. if (abs(s1) > abs(s2)) {
  286. clkctrl_cpu = c;
  287. clkctrl_frac = f;
  288. }
  289. if (s2 == 0)
  290. break;
  291. };
  292. pr_debug("%s: clkctrl_cpu %d, clkctrl_frac %d\n", __func__,
  293. clkctrl_cpu, clkctrl_frac);
  294. if (c == 0x40) {
  295. int d = pll_clk.rate*18/clkctrl_frac/clkctrl_cpu -
  296. rate;
  297. if (abs(d) > 100 ||
  298. clkctrl_frac < 18 || clkctrl_frac > 35)
  299. return -EINVAL;
  300. }
  301. /* 4.6.2 */
  302. val = __raw_readl(clk->scale_reg);
  303. val &= ~(0x3f << clk->scale_shift);
  304. val |= clkctrl_frac;
  305. clk_set_parent(clk, &osc_24M);
  306. udelay(10);
  307. __raw_writel(val, clk->scale_reg);
  308. /* ungate */
  309. __raw_writel(1<<7, clk->scale_reg + 8);
  310. /* write clkctrl_cpu */
  311. clk->saved_div = clkctrl_cpu;
  312. reg_val = __raw_readl(REGS_CLKCTRL_BASE + HW_CLKCTRL_CPU);
  313. reg_val &= ~0x3F;
  314. reg_val |= clkctrl_cpu;
  315. __raw_writel(reg_val, REGS_CLKCTRL_BASE + HW_CLKCTRL_CPU);
  316. for (i = 10000; i; i--)
  317. if (!clk_is_busy(clk))
  318. break;
  319. if (!i) {
  320. printk(KERN_ERR "couldn't set up CPU divisor\n");
  321. return -ETIMEDOUT;
  322. }
  323. clk_set_parent(clk, &pll_clk);
  324. clk->saved_div = 0;
  325. udelay(10);
  326. }
  327. return 0;
  328. }
  329. static long cpu_get_rate(struct clk *clk)
  330. {
  331. long rate = clk->parent->rate * 18;
  332. rate /= (__raw_readl(clk->scale_reg) >> clk->scale_shift) & 0x3f;
  333. rate /= __raw_readl(REGS_CLKCTRL_BASE + HW_CLKCTRL_CPU) & 0x3f;
  334. rate = ((rate + 9) / 10) * 10;
  335. clk->rate = rate;
  336. return rate;
  337. }
  338. static long cpu_round_rate(struct clk *clk, u32 rate)
  339. {
  340. unsigned long r = 0;
  341. if (rate <= 24000)
  342. r = 24000;
  343. else {
  344. u32 clkctrl_cpu = 1;
  345. u32 clkctrl_frac;
  346. do {
  347. clkctrl_frac =
  348. (pll_clk.rate*18 / clkctrl_cpu + rate/2) / rate;
  349. if (clkctrl_frac > 35)
  350. continue;
  351. if (pll_clk.rate*18 / clkctrl_frac / clkctrl_cpu/10 ==
  352. rate / 10)
  353. break;
  354. } while (pll_clk.rate / 2 >= clkctrl_cpu++ * rate);
  355. if (pll_clk.rate / 2 < (clkctrl_cpu - 1) * rate)
  356. clkctrl_cpu--;
  357. pr_debug("%s: clkctrl_cpu %d, clkctrl_frac %d\n", __func__,
  358. clkctrl_cpu, clkctrl_frac);
  359. if (clkctrl_frac < 18)
  360. clkctrl_frac = 18;
  361. if (clkctrl_frac > 35)
  362. clkctrl_frac = 35;
  363. r = pll_clk.rate * 18;
  364. r /= clkctrl_frac;
  365. r /= clkctrl_cpu;
  366. r = 10 * ((r + 9) / 10);
  367. }
  368. return r;
  369. }
  370. static long emi_get_rate(struct clk *clk)
  371. {
  372. long rate = clk->parent->rate * 18;
  373. rate /= (__raw_readl(clk->scale_reg) >> clk->scale_shift) & 0x3f;
  374. rate /= __raw_readl(REGS_CLKCTRL_BASE + HW_CLKCTRL_EMI) & 0x3f;
  375. clk->rate = rate;
  376. return rate;
  377. }
  378. static int clkseq_set_parent(struct clk *clk, struct clk *parent)
  379. {
  380. int ret = -EINVAL;
  381. int shift = 8;
  382. /* bypass? */
  383. if (parent == &osc_24M)
  384. shift = 4;
  385. if (clk->bypass_reg) {
  386. #ifdef CONFIG_ARCH_STMP378X
  387. u32 hbus_val, cpu_val;
  388. if (clk == &cpu_clk && shift == 4) {
  389. hbus_val = __raw_readl(REGS_CLKCTRL_BASE +
  390. HW_CLKCTRL_HBUS);
  391. cpu_val = __raw_readl(REGS_CLKCTRL_BASE +
  392. HW_CLKCTRL_CPU);
  393. hbus_val &= ~(BM_CLKCTRL_HBUS_DIV_FRAC_EN |
  394. BM_CLKCTRL_HBUS_DIV);
  395. clk->saved_div = cpu_val & BM_CLKCTRL_CPU_DIV_CPU;
  396. cpu_val &= ~BM_CLKCTRL_CPU_DIV_CPU;
  397. cpu_val |= 1;
  398. if (machine_is_stmp378x()) {
  399. __raw_writel(hbus_val,
  400. REGS_CLKCTRL_BASE + HW_CLKCTRL_HBUS);
  401. __raw_writel(cpu_val,
  402. REGS_CLKCTRL_BASE + HW_CLKCTRL_CPU);
  403. hclk.rate = 0;
  404. }
  405. } else if (clk == &cpu_clk && shift == 8) {
  406. hbus_val = __raw_readl(REGS_CLKCTRL_BASE +
  407. HW_CLKCTRL_HBUS);
  408. cpu_val = __raw_readl(REGS_CLKCTRL_BASE +
  409. HW_CLKCTRL_CPU);
  410. hbus_val &= ~(BM_CLKCTRL_HBUS_DIV_FRAC_EN |
  411. BM_CLKCTRL_HBUS_DIV);
  412. hbus_val |= 2;
  413. cpu_val &= ~BM_CLKCTRL_CPU_DIV_CPU;
  414. if (clk->saved_div)
  415. cpu_val |= clk->saved_div;
  416. else
  417. cpu_val |= 2;
  418. if (machine_is_stmp378x()) {
  419. __raw_writel(hbus_val,
  420. REGS_CLKCTRL_BASE + HW_CLKCTRL_HBUS);
  421. __raw_writel(cpu_val,
  422. REGS_CLKCTRL_BASE + HW_CLKCTRL_CPU);
  423. hclk.rate = 0;
  424. }
  425. }
  426. #endif
  427. __raw_writel(1 << clk->bypass_shift, clk->bypass_reg + shift);
  428. ret = 0;
  429. }
  430. return ret;
  431. }
  432. static int hbus_set_rate(struct clk *clk, u32 rate)
  433. {
  434. u8 div = 0;
  435. int is_frac = 0;
  436. u32 clkctrl_hbus;
  437. struct clk *parent = clk->parent;
  438. pr_debug("%s: rate %d, parent rate %d\n", __func__, rate,
  439. parent->rate);
  440. if (rate > parent->rate)
  441. return -EINVAL;
  442. if (((parent->rate + rate/2) / rate) * rate != parent->rate &&
  443. parent->rate / rate < 32) {
  444. pr_debug("%s: switching to fractional mode\n", __func__);
  445. is_frac = 1;
  446. }
  447. if (is_frac)
  448. div = (32 * rate + parent->rate / 2) / parent->rate;
  449. else
  450. div = (parent->rate + rate - 1) / rate;
  451. pr_debug("%s: div calculated is %d\n", __func__, div);
  452. if (!div || div > 0x1f)
  453. return -EINVAL;
  454. clk_set_parent(&cpu_clk, &osc_24M);
  455. udelay(10);
  456. clkctrl_hbus = __raw_readl(clk->scale_reg);
  457. clkctrl_hbus &= ~0x3f;
  458. clkctrl_hbus |= div;
  459. clkctrl_hbus |= (is_frac << 5);
  460. __raw_writel(clkctrl_hbus, clk->scale_reg);
  461. if (clk->busy_reg) {
  462. int i;
  463. for (i = 10000; i; i--)
  464. if (!clk_is_busy(clk))
  465. break;
  466. if (!i) {
  467. printk(KERN_ERR "couldn't set up CPU divisor\n");
  468. return -ETIMEDOUT;
  469. }
  470. }
  471. clk_set_parent(&cpu_clk, &pll_clk);
  472. __raw_writel(clkctrl_hbus, clk->scale_reg);
  473. udelay(10);
  474. return 0;
  475. }
  476. static long hbus_get_rate(struct clk *clk)
  477. {
  478. long rate = clk->parent->rate;
  479. if (__raw_readl(clk->scale_reg) & 0x20) {
  480. rate *= __raw_readl(clk->scale_reg) & 0x1f;
  481. rate /= 32;
  482. } else
  483. rate /= __raw_readl(clk->scale_reg) & 0x1f;
  484. clk->rate = rate;
  485. return rate;
  486. }
  487. static int xbus_set_rate(struct clk *clk, u32 rate)
  488. {
  489. u16 div = 0;
  490. u32 clkctrl_xbus;
  491. pr_debug("%s: rate %d, parent rate %d\n", __func__, rate,
  492. clk->parent->rate);
  493. div = (clk->parent->rate + rate - 1) / rate;
  494. pr_debug("%s: div calculated is %d\n", __func__, div);
  495. if (!div || div > 0x3ff)
  496. return -EINVAL;
  497. clkctrl_xbus = __raw_readl(clk->scale_reg);
  498. clkctrl_xbus &= ~0x3ff;
  499. clkctrl_xbus |= div;
  500. __raw_writel(clkctrl_xbus, clk->scale_reg);
  501. if (clk->busy_reg) {
  502. int i;
  503. for (i = 10000; i; i--)
  504. if (!clk_is_busy(clk))
  505. break;
  506. if (!i) {
  507. printk(KERN_ERR "couldn't set up xbus divisor\n");
  508. return -ETIMEDOUT;
  509. }
  510. }
  511. return 0;
  512. }
  513. static long xbus_get_rate(struct clk *clk)
  514. {
  515. long rate = clk->parent->rate;
  516. rate /= __raw_readl(clk->scale_reg) & 0x3ff;
  517. clk->rate = rate;
  518. return rate;
  519. }
  520. /* Clock ops */
  521. static struct clk_ops std_ops = {
  522. .enable = std_clk_enable,
  523. .disable = std_clk_disable,
  524. .get_rate = per_get_rate,
  525. .set_rate = per_set_rate,
  526. .set_parent = clkseq_set_parent,
  527. };
  528. static struct clk_ops min_ops = {
  529. .enable = std_clk_enable,
  530. .disable = std_clk_disable,
  531. };
  532. static struct clk_ops cpu_ops = {
  533. .enable = std_clk_enable,
  534. .disable = std_clk_disable,
  535. .get_rate = cpu_get_rate,
  536. .set_rate = cpu_set_rate,
  537. .round_rate = cpu_round_rate,
  538. .set_parent = clkseq_set_parent,
  539. };
  540. static struct clk_ops io_ops = {
  541. .enable = std_clk_enable,
  542. .disable = std_clk_disable,
  543. .get_rate = io_get_rate,
  544. .set_rate = io_set_rate,
  545. };
  546. static struct clk_ops hbus_ops = {
  547. .get_rate = hbus_get_rate,
  548. .set_rate = hbus_set_rate,
  549. };
  550. static struct clk_ops xbus_ops = {
  551. .get_rate = xbus_get_rate,
  552. .set_rate = xbus_set_rate,
  553. };
  554. static struct clk_ops lcdif_ops = {
  555. .enable = std_clk_enable,
  556. .disable = std_clk_disable,
  557. .get_rate = lcdif_get_rate,
  558. .set_rate = lcdif_set_rate,
  559. .set_parent = clkseq_set_parent,
  560. };
  561. static struct clk_ops emi_ops = {
  562. .get_rate = emi_get_rate,
  563. };
  564. /* List of on-chip clocks */
  565. static struct clk osc_24M = {
  566. .flags = FIXED_RATE | ENABLED,
  567. .rate = 24000,
  568. };
  569. static struct clk pll_clk = {
  570. .parent = &osc_24M,
  571. .enable_reg = REGS_CLKCTRL_BASE + HW_CLKCTRL_PLLCTRL0,
  572. .enable_shift = 16,
  573. .enable_wait = 10,
  574. .flags = FIXED_RATE | ENABLED,
  575. .rate = 480000,
  576. .ops = &min_ops,
  577. };
  578. static struct clk cpu_clk = {
  579. .parent = &pll_clk,
  580. .scale_reg = REGS_CLKCTRL_BASE + HW_CLKCTRL_FRAC,
  581. .scale_shift = 0,
  582. .bypass_reg = REGS_CLKCTRL_BASE + HW_CLKCTRL_CLKSEQ,
  583. .bypass_shift = 7,
  584. .busy_reg = REGS_CLKCTRL_BASE + HW_CLKCTRL_CPU,
  585. .busy_bit = 28,
  586. .flags = RATE_PROPAGATES | ENABLED,
  587. .ops = &cpu_ops,
  588. };
  589. static struct clk io_clk = {
  590. .parent = &pll_clk,
  591. .enable_reg = REGS_CLKCTRL_BASE + HW_CLKCTRL_FRAC,
  592. .enable_shift = 31,
  593. .enable_negate = 1,
  594. .scale_reg = REGS_CLKCTRL_BASE + HW_CLKCTRL_FRAC,
  595. .scale_shift = 24,
  596. .flags = RATE_PROPAGATES | ENABLED,
  597. .ops = &io_ops,
  598. };
  599. static struct clk hclk = {
  600. .parent = &cpu_clk,
  601. .scale_reg = REGS_CLKCTRL_BASE + HW_CLKCTRL_HBUS,
  602. .bypass_reg = REGS_CLKCTRL_BASE + HW_CLKCTRL_CLKSEQ,
  603. .bypass_shift = 7,
  604. .busy_reg = REGS_CLKCTRL_BASE + HW_CLKCTRL_HBUS,
  605. .busy_bit = 29,
  606. .flags = RATE_PROPAGATES | ENABLED,
  607. .ops = &hbus_ops,
  608. };
  609. static struct clk xclk = {
  610. .parent = &osc_24M,
  611. .scale_reg = REGS_CLKCTRL_BASE + HW_CLKCTRL_XBUS,
  612. .busy_reg = REGS_CLKCTRL_BASE + HW_CLKCTRL_XBUS,
  613. .busy_bit = 31,
  614. .flags = RATE_PROPAGATES | ENABLED,
  615. .ops = &xbus_ops,
  616. };
  617. static struct clk uart_clk = {
  618. .parent = &xclk,
  619. .enable_reg = REGS_CLKCTRL_BASE + HW_CLKCTRL_XTAL,
  620. .enable_shift = 31,
  621. .enable_negate = 1,
  622. .flags = ENABLED,
  623. .ops = &min_ops,
  624. };
  625. static struct clk audio_clk = {
  626. .parent = &xclk,
  627. .enable_reg = REGS_CLKCTRL_BASE + HW_CLKCTRL_XTAL,
  628. .enable_shift = 30,
  629. .enable_negate = 1,
  630. .ops = &min_ops,
  631. };
  632. static struct clk pwm_clk = {
  633. .parent = &xclk,
  634. .enable_reg = REGS_CLKCTRL_BASE + HW_CLKCTRL_XTAL,
  635. .enable_shift = 29,
  636. .enable_negate = 1,
  637. .ops = &min_ops,
  638. };
  639. static struct clk dri_clk = {
  640. .parent = &xclk,
  641. .enable_reg = REGS_CLKCTRL_BASE + HW_CLKCTRL_XTAL,
  642. .enable_shift = 28,
  643. .enable_negate = 1,
  644. .ops = &min_ops,
  645. };
  646. static struct clk digctl_clk = {
  647. .parent = &xclk,
  648. .enable_reg = REGS_CLKCTRL_BASE + HW_CLKCTRL_XTAL,
  649. .enable_shift = 27,
  650. .enable_negate = 1,
  651. .ops = &min_ops,
  652. };
  653. static struct clk timer_clk = {
  654. .parent = &xclk,
  655. .enable_reg = REGS_CLKCTRL_BASE + HW_CLKCTRL_XTAL,
  656. .enable_shift = 26,
  657. .enable_negate = 1,
  658. .flags = ENABLED,
  659. .ops = &min_ops,
  660. };
  661. static struct clk lcdif_clk = {
  662. .parent = &pll_clk,
  663. .scale_reg = REGS_CLKCTRL_BASE + HW_CLKCTRL_PIX,
  664. .busy_reg = REGS_CLKCTRL_BASE + HW_CLKCTRL_PIX,
  665. .busy_bit = 29,
  666. .enable_reg = REGS_CLKCTRL_BASE + HW_CLKCTRL_PIX,
  667. .enable_shift = 31,
  668. .enable_negate = 1,
  669. .bypass_reg = REGS_CLKCTRL_BASE + HW_CLKCTRL_CLKSEQ,
  670. .bypass_shift = 1,
  671. .flags = NEEDS_SET_PARENT,
  672. .ops = &lcdif_ops,
  673. };
  674. static struct clk ssp_clk = {
  675. .parent = &io_clk,
  676. .scale_reg = REGS_CLKCTRL_BASE + HW_CLKCTRL_SSP,
  677. .busy_reg = REGS_CLKCTRL_BASE + HW_CLKCTRL_SSP,
  678. .busy_bit = 29,
  679. .enable_reg = REGS_CLKCTRL_BASE + HW_CLKCTRL_SSP,
  680. .enable_shift = 31,
  681. .bypass_reg = REGS_CLKCTRL_BASE + HW_CLKCTRL_CLKSEQ,
  682. .bypass_shift = 5,
  683. .enable_negate = 1,
  684. .flags = NEEDS_SET_PARENT,
  685. .ops = &std_ops,
  686. };
  687. static struct clk gpmi_clk = {
  688. .parent = &io_clk,
  689. .scale_reg = REGS_CLKCTRL_BASE + HW_CLKCTRL_GPMI,
  690. .busy_reg = REGS_CLKCTRL_BASE + HW_CLKCTRL_GPMI,
  691. .busy_bit = 29,
  692. .enable_reg = REGS_CLKCTRL_BASE + HW_CLKCTRL_GPMI,
  693. .enable_shift = 31,
  694. .enable_negate = 1,
  695. .bypass_reg = REGS_CLKCTRL_BASE + HW_CLKCTRL_CLKSEQ,
  696. .bypass_shift = 4,
  697. .flags = NEEDS_SET_PARENT,
  698. .ops = &std_ops,
  699. };
  700. static struct clk spdif_clk = {
  701. .parent = &pll_clk,
  702. .enable_reg = REGS_CLKCTRL_BASE + HW_CLKCTRL_SPDIF,
  703. .enable_shift = 31,
  704. .enable_negate = 1,
  705. .ops = &min_ops,
  706. };
  707. static struct clk emi_clk = {
  708. .parent = &pll_clk,
  709. .enable_reg = REGS_CLKCTRL_BASE + HW_CLKCTRL_EMI,
  710. .enable_shift = 31,
  711. .enable_negate = 1,
  712. .scale_reg = REGS_CLKCTRL_BASE + HW_CLKCTRL_FRAC,
  713. .scale_shift = 8,
  714. .busy_reg = REGS_CLKCTRL_BASE + HW_CLKCTRL_EMI,
  715. .busy_bit = 28,
  716. .bypass_reg = REGS_CLKCTRL_BASE + HW_CLKCTRL_CLKSEQ,
  717. .bypass_shift = 6,
  718. .flags = ENABLED,
  719. .ops = &emi_ops,
  720. };
  721. static struct clk ir_clk = {
  722. .parent = &io_clk,
  723. .enable_reg = REGS_CLKCTRL_BASE + HW_CLKCTRL_IR,
  724. .enable_shift = 31,
  725. .enable_negate = 1,
  726. .bypass_reg = REGS_CLKCTRL_BASE + HW_CLKCTRL_CLKSEQ,
  727. .bypass_shift = 3,
  728. .ops = &min_ops,
  729. };
  730. static struct clk saif_clk = {
  731. .parent = &pll_clk,
  732. .scale_reg = REGS_CLKCTRL_BASE + HW_CLKCTRL_SAIF,
  733. .busy_reg = REGS_CLKCTRL_BASE + HW_CLKCTRL_SAIF,
  734. .busy_bit = 29,
  735. .enable_reg = REGS_CLKCTRL_BASE + HW_CLKCTRL_SAIF,
  736. .enable_shift = 31,
  737. .enable_negate = 1,
  738. .bypass_reg = REGS_CLKCTRL_BASE + HW_CLKCTRL_CLKSEQ,
  739. .bypass_shift = 0,
  740. .ops = &std_ops,
  741. };
  742. static struct clk usb_clk = {
  743. .parent = &pll_clk,
  744. .enable_reg = REGS_CLKCTRL_BASE + HW_CLKCTRL_PLLCTRL0,
  745. .enable_shift = 18,
  746. .enable_negate = 1,
  747. .ops = &min_ops,
  748. };
  749. /* list of all the clocks */
  750. static struct clk_lookup onchip_clks[] = {
  751. {
  752. .con_id = "osc_24M",
  753. .clk = &osc_24M,
  754. }, {
  755. .con_id = "pll",
  756. .clk = &pll_clk,
  757. }, {
  758. .con_id = "cpu",
  759. .clk = &cpu_clk,
  760. }, {
  761. .con_id = "hclk",
  762. .clk = &hclk,
  763. }, {
  764. .con_id = "xclk",
  765. .clk = &xclk,
  766. }, {
  767. .con_id = "io",
  768. .clk = &io_clk,
  769. }, {
  770. .con_id = "uart",
  771. .clk = &uart_clk,
  772. }, {
  773. .con_id = "audio",
  774. .clk = &audio_clk,
  775. }, {
  776. .con_id = "pwm",
  777. .clk = &pwm_clk,
  778. }, {
  779. .con_id = "dri",
  780. .clk = &dri_clk,
  781. }, {
  782. .con_id = "digctl",
  783. .clk = &digctl_clk,
  784. }, {
  785. .con_id = "timer",
  786. .clk = &timer_clk,
  787. }, {
  788. .con_id = "lcdif",
  789. .clk = &lcdif_clk,
  790. }, {
  791. .con_id = "ssp",
  792. .clk = &ssp_clk,
  793. }, {
  794. .con_id = "gpmi",
  795. .clk = &gpmi_clk,
  796. }, {
  797. .con_id = "spdif",
  798. .clk = &spdif_clk,
  799. }, {
  800. .con_id = "emi",
  801. .clk = &emi_clk,
  802. }, {
  803. .con_id = "ir",
  804. .clk = &ir_clk,
  805. }, {
  806. .con_id = "saif",
  807. .clk = &saif_clk,
  808. }, {
  809. .con_id = "usb",
  810. .clk = &usb_clk,
  811. },
  812. };
  813. static int __init propagate_rate(struct clk *clk)
  814. {
  815. struct clk_lookup *cl;
  816. for (cl = onchip_clks; cl < onchip_clks + ARRAY_SIZE(onchip_clks);
  817. cl++) {
  818. if (unlikely(!clk_good(cl->clk)))
  819. continue;
  820. if (cl->clk->parent == clk && cl->clk->ops->get_rate) {
  821. cl->clk->ops->get_rate(cl->clk);
  822. if (cl->clk->flags & RATE_PROPAGATES)
  823. propagate_rate(cl->clk);
  824. }
  825. }
  826. return 0;
  827. }
  828. /* Exported API */
  829. unsigned long clk_get_rate(struct clk *clk)
  830. {
  831. if (unlikely(!clk_good(clk)))
  832. return 0;
  833. if (clk->rate != 0)
  834. return clk->rate;
  835. if (clk->ops->get_rate != NULL)
  836. return clk->ops->get_rate(clk);
  837. return clk_get_rate(clk->parent);
  838. }
  839. EXPORT_SYMBOL(clk_get_rate);
  840. long clk_round_rate(struct clk *clk, unsigned long rate)
  841. {
  842. if (unlikely(!clk_good(clk)))
  843. return 0;
  844. if (clk->ops->round_rate)
  845. return clk->ops->round_rate(clk, rate);
  846. return 0;
  847. }
  848. EXPORT_SYMBOL(clk_round_rate);
  849. static inline int close_enough(long rate1, long rate2)
  850. {
  851. return rate1 && !((rate2 - rate1) * 1000 / rate1);
  852. }
  853. int clk_set_rate(struct clk *clk, unsigned long rate)
  854. {
  855. int ret = -EINVAL;
  856. if (unlikely(!clk_good(clk)))
  857. goto out;
  858. if (clk->flags & FIXED_RATE || !clk->ops->set_rate)
  859. goto out;
  860. else if (!close_enough(clk->rate, rate)) {
  861. ret = clk->ops->set_rate(clk, rate);
  862. if (ret < 0)
  863. goto out;
  864. clk->rate = rate;
  865. if (clk->flags & RATE_PROPAGATES)
  866. propagate_rate(clk);
  867. } else
  868. ret = 0;
  869. out:
  870. return ret;
  871. }
  872. EXPORT_SYMBOL(clk_set_rate);
  873. int clk_enable(struct clk *clk)
  874. {
  875. unsigned long clocks_flags;
  876. if (unlikely(!clk_good(clk)))
  877. return -EINVAL;
  878. if (clk->parent)
  879. clk_enable(clk->parent);
  880. spin_lock_irqsave(&clocks_lock, clocks_flags);
  881. clk->usage++;
  882. if (clk->ops && clk->ops->enable)
  883. clk->ops->enable(clk);
  884. spin_unlock_irqrestore(&clocks_lock, clocks_flags);
  885. return 0;
  886. }
  887. EXPORT_SYMBOL(clk_enable);
  888. static void local_clk_disable(struct clk *clk)
  889. {
  890. if (unlikely(!clk_good(clk)))
  891. return;
  892. if (clk->usage == 0 && clk->ops->disable)
  893. clk->ops->disable(clk);
  894. if (clk->parent)
  895. local_clk_disable(clk->parent);
  896. }
  897. void clk_disable(struct clk *clk)
  898. {
  899. unsigned long clocks_flags;
  900. if (unlikely(!clk_good(clk)))
  901. return;
  902. spin_lock_irqsave(&clocks_lock, clocks_flags);
  903. if ((--clk->usage) == 0 && clk->ops->disable)
  904. clk->ops->disable(clk);
  905. spin_unlock_irqrestore(&clocks_lock, clocks_flags);
  906. if (clk->parent)
  907. clk_disable(clk->parent);
  908. }
  909. EXPORT_SYMBOL(clk_disable);
  910. /* Some additional API */
  911. int clk_set_parent(struct clk *clk, struct clk *parent)
  912. {
  913. int ret = -ENODEV;
  914. unsigned long clocks_flags;
  915. if (unlikely(!clk_good(clk)))
  916. goto out;
  917. if (!clk->ops->set_parent)
  918. goto out;
  919. spin_lock_irqsave(&clocks_lock, clocks_flags);
  920. ret = clk->ops->set_parent(clk, parent);
  921. if (!ret) {
  922. /* disable if usage count is 0 */
  923. local_clk_disable(parent);
  924. parent->usage += clk->usage;
  925. clk->parent->usage -= clk->usage;
  926. /* disable if new usage count is 0 */
  927. local_clk_disable(clk->parent);
  928. clk->parent = parent;
  929. }
  930. spin_unlock_irqrestore(&clocks_lock, clocks_flags);
  931. out:
  932. return ret;
  933. }
  934. EXPORT_SYMBOL(clk_set_parent);
  935. struct clk *clk_get_parent(struct clk *clk)
  936. {
  937. if (unlikely(!clk_good(clk)))
  938. return NULL;
  939. return clk->parent;
  940. }
  941. EXPORT_SYMBOL(clk_get_parent);
  942. static int __init clk_init(void)
  943. {
  944. struct clk_lookup *cl;
  945. struct clk_ops *ops;
  946. spin_lock_init(&clocks_lock);
  947. for (cl = onchip_clks; cl < onchip_clks + ARRAY_SIZE(onchip_clks);
  948. cl++) {
  949. if (cl->clk->flags & ENABLED)
  950. clk_enable(cl->clk);
  951. else
  952. local_clk_disable(cl->clk);
  953. ops = cl->clk->ops;
  954. if ((cl->clk->flags & NEEDS_INITIALIZATION) &&
  955. ops && ops->set_rate)
  956. ops->set_rate(cl->clk, cl->clk->rate);
  957. if (cl->clk->flags & FIXED_RATE) {
  958. if (cl->clk->flags & RATE_PROPAGATES)
  959. propagate_rate(cl->clk);
  960. } else {
  961. if (ops && ops->get_rate)
  962. ops->get_rate(cl->clk);
  963. }
  964. if (cl->clk->flags & NEEDS_SET_PARENT) {
  965. if (ops && ops->set_parent)
  966. ops->set_parent(cl->clk, cl->clk->parent);
  967. }
  968. clkdev_add(cl);
  969. }
  970. return 0;
  971. }
  972. arch_initcall(clk_init);