clock.c 26 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129
  1. /*
  2. * linux/arch/arm/mach-omap2/clock.c
  3. *
  4. * Copyright (C) 2005 Texas Instruments Inc.
  5. * Richard Woodruff <r-woodruff2@ti.com>
  6. * Created for OMAP2.
  7. *
  8. * Cleaned up and modified to use omap shared clock framework by
  9. * Tony Lindgren <tony@atomide.com>
  10. *
  11. * Based on omap1 clock.c, Copyright (C) 2004 - 2005 Nokia corporation
  12. * Written by Tuukka Tikkanen <tuukka.tikkanen@elektrobit.com>
  13. *
  14. * This program is free software; you can redistribute it and/or modify
  15. * it under the terms of the GNU General Public License version 2 as
  16. * published by the Free Software Foundation.
  17. */
  18. #include <linux/config.h>
  19. #include <linux/module.h>
  20. #include <linux/kernel.h>
  21. #include <linux/device.h>
  22. #include <linux/list.h>
  23. #include <linux/errno.h>
  24. #include <linux/delay.h>
  25. #include <asm/io.h>
  26. #include <asm/hardware/clock.h>
  27. #include <asm/arch/clock.h>
  28. #include <asm/arch/sram.h>
  29. #include <asm/arch/prcm.h>
  30. #include "clock.h"
  31. //#define DOWN_VARIABLE_DPLL 1 /* Experimental */
  32. static struct prcm_config *curr_prcm_set;
  33. static struct memory_timings mem_timings;
  34. static u32 curr_perf_level = PRCM_FULL_SPEED;
  35. /*-------------------------------------------------------------------------
  36. * Omap2 specific clock functions
  37. *-------------------------------------------------------------------------*/
  38. /* Recalculate SYST_CLK */
  39. static void omap2_sys_clk_recalc(struct clk * clk)
  40. {
  41. u32 div = PRCM_CLKSRC_CTRL;
  42. div &= (1 << 7) | (1 << 6); /* Test if ext clk divided by 1 or 2 */
  43. div >>= clk->rate_offset;
  44. clk->rate = (clk->parent->rate / div);
  45. propagate_rate(clk);
  46. }
  47. static u32 omap2_get_dpll_rate(struct clk * tclk)
  48. {
  49. int dpll_clk, dpll_mult, dpll_div, amult;
  50. dpll_mult = (CM_CLKSEL1_PLL >> 12) & 0x03ff; /* 10 bits */
  51. dpll_div = (CM_CLKSEL1_PLL >> 8) & 0x0f; /* 4 bits */
  52. dpll_clk = (tclk->parent->rate * dpll_mult) / (dpll_div + 1);
  53. amult = CM_CLKSEL2_PLL & 0x3;
  54. dpll_clk *= amult;
  55. return dpll_clk;
  56. }
  57. static void omap2_followparent_recalc(struct clk *clk)
  58. {
  59. followparent_recalc(clk);
  60. }
  61. static void omap2_propagate_rate(struct clk * clk)
  62. {
  63. if (!(clk->flags & RATE_FIXED))
  64. clk->rate = clk->parent->rate;
  65. propagate_rate(clk);
  66. }
  67. /* Enable an APLL if off */
  68. static void omap2_clk_fixed_enable(struct clk *clk)
  69. {
  70. u32 cval, i=0;
  71. if (clk->enable_bit == 0xff) /* Parent will do it */
  72. return;
  73. cval = CM_CLKEN_PLL;
  74. if ((cval & (0x3 << clk->enable_bit)) == (0x3 << clk->enable_bit))
  75. return;
  76. cval &= ~(0x3 << clk->enable_bit);
  77. cval |= (0x3 << clk->enable_bit);
  78. CM_CLKEN_PLL = cval;
  79. if (clk == &apll96_ck)
  80. cval = (1 << 8);
  81. else if (clk == &apll54_ck)
  82. cval = (1 << 6);
  83. while (!CM_IDLEST_CKGEN & cval) { /* Wait for lock */
  84. ++i;
  85. udelay(1);
  86. if (i == 100000)
  87. break;
  88. }
  89. }
  90. /* Enables clock without considering parent dependencies or use count
  91. * REVISIT: Maybe change this to use clk->enable like on omap1?
  92. */
  93. static int omap2_clk_enable(struct clk * clk)
  94. {
  95. u32 regval32;
  96. if (clk->flags & ALWAYS_ENABLED)
  97. return 0;
  98. if (unlikely(clk->enable_reg == 0)) {
  99. printk(KERN_ERR "clock.c: Enable for %s without enable code\n",
  100. clk->name);
  101. return 0;
  102. }
  103. if (clk->enable_reg == (void __iomem *)&CM_CLKEN_PLL) {
  104. omap2_clk_fixed_enable(clk);
  105. return 0;
  106. }
  107. regval32 = __raw_readl(clk->enable_reg);
  108. regval32 |= (1 << clk->enable_bit);
  109. __raw_writel(regval32, clk->enable_reg);
  110. return 0;
  111. }
  112. /* Stop APLL */
  113. static void omap2_clk_fixed_disable(struct clk *clk)
  114. {
  115. u32 cval;
  116. if(clk->enable_bit == 0xff) /* let parent off do it */
  117. return;
  118. cval = CM_CLKEN_PLL;
  119. cval &= ~(0x3 << clk->enable_bit);
  120. CM_CLKEN_PLL = cval;
  121. }
  122. /* Disables clock without considering parent dependencies or use count */
  123. static void omap2_clk_disable(struct clk *clk)
  124. {
  125. u32 regval32;
  126. if (clk->enable_reg == 0)
  127. return;
  128. if (clk->enable_reg == (void __iomem *)&CM_CLKEN_PLL) {
  129. omap2_clk_fixed_disable(clk);
  130. return;
  131. }
  132. regval32 = __raw_readl(clk->enable_reg);
  133. regval32 &= ~(1 << clk->enable_bit);
  134. __raw_writel(regval32, clk->enable_reg);
  135. }
  136. static int omap2_clk_use(struct clk *clk)
  137. {
  138. int ret = 0;
  139. if (clk->usecount++ == 0) {
  140. if (likely((u32)clk->parent))
  141. ret = omap2_clk_use(clk->parent);
  142. if (unlikely(ret != 0)) {
  143. clk->usecount--;
  144. return ret;
  145. }
  146. ret = omap2_clk_enable(clk);
  147. if (unlikely(ret != 0) && clk->parent) {
  148. omap2_clk_unuse(clk->parent);
  149. clk->usecount--;
  150. }
  151. }
  152. return ret;
  153. }
  154. static void omap2_clk_unuse(struct clk *clk)
  155. {
  156. if (clk->usecount > 0 && !(--clk->usecount)) {
  157. omap2_clk_disable(clk);
  158. if (likely((u32)clk->parent))
  159. omap2_clk_unuse(clk->parent);
  160. }
  161. }
  162. /*
  163. * Uses the current prcm set to tell if a rate is valid.
  164. * You can go slower, but not faster within a given rate set.
  165. */
  166. static u32 omap2_dpll_round_rate(unsigned long target_rate)
  167. {
  168. u32 high, low;
  169. if ((CM_CLKSEL2_PLL & 0x3) == 1) { /* DPLL clockout */
  170. high = curr_prcm_set->dpll_speed * 2;
  171. low = curr_prcm_set->dpll_speed;
  172. } else { /* DPLL clockout x 2 */
  173. high = curr_prcm_set->dpll_speed;
  174. low = curr_prcm_set->dpll_speed / 2;
  175. }
  176. #ifdef DOWN_VARIABLE_DPLL
  177. if (target_rate > high)
  178. return high;
  179. else
  180. return target_rate;
  181. #else
  182. if (target_rate > low)
  183. return high;
  184. else
  185. return low;
  186. #endif
  187. }
  188. /*
  189. * Used for clocks that are part of CLKSEL_xyz governed clocks.
  190. * REVISIT: Maybe change to use clk->enable() functions like on omap1?
  191. */
  192. static void omap2_clksel_recalc(struct clk * clk)
  193. {
  194. u32 fixed = 0, div = 0;
  195. if (clk == &dpll_ck) {
  196. clk->rate = omap2_get_dpll_rate(clk);
  197. fixed = 1;
  198. div = 0;
  199. }
  200. if (clk == &iva1_mpu_int_ifck) {
  201. div = 2;
  202. fixed = 1;
  203. }
  204. if ((clk == &dss1_fck) && ((CM_CLKSEL1_CORE & (0x1f << 8)) == 0)) {
  205. clk->rate = sys_ck.rate;
  206. return;
  207. }
  208. if (!fixed) {
  209. div = omap2_clksel_get_divisor(clk);
  210. if (div == 0)
  211. return;
  212. }
  213. if (div != 0) {
  214. if (unlikely(clk->rate == clk->parent->rate / div))
  215. return;
  216. clk->rate = clk->parent->rate / div;
  217. }
  218. if (unlikely(clk->flags & RATE_PROPAGATES))
  219. propagate_rate(clk);
  220. }
  221. /*
  222. * Finds best divider value in an array based on the source and target
  223. * rates. The divider array must be sorted with smallest divider first.
  224. */
  225. static inline u32 omap2_divider_from_table(u32 size, u32 *div_array,
  226. u32 src_rate, u32 tgt_rate)
  227. {
  228. int i, test_rate;
  229. if (div_array == NULL)
  230. return ~1;
  231. for (i=0; i < size; i++) {
  232. test_rate = src_rate / *div_array;
  233. if (test_rate <= tgt_rate)
  234. return *div_array;
  235. ++div_array;
  236. }
  237. return ~0; /* No acceptable divider */
  238. }
  239. /*
  240. * Find divisor for the given clock and target rate.
  241. *
  242. * Note that this will not work for clocks which are part of CONFIG_PARTICIPANT,
  243. * they are only settable as part of virtual_prcm set.
  244. */
  245. static u32 omap2_clksel_round_rate(struct clk *tclk, u32 target_rate,
  246. u32 *new_div)
  247. {
  248. u32 gfx_div[] = {2, 3, 4};
  249. u32 sysclkout_div[] = {1, 2, 4, 8, 16};
  250. u32 dss1_div[] = {1, 2, 3, 4, 5, 6, 8, 9, 12, 16};
  251. u32 vylnq_div[] = {1, 2, 3, 4, 6, 8, 9, 12, 16, 18};
  252. u32 best_div = ~0, asize = 0;
  253. u32 *div_array = NULL;
  254. switch (tclk->flags & SRC_RATE_SEL_MASK) {
  255. case CM_GFX_SEL1:
  256. asize = 3;
  257. div_array = gfx_div;
  258. break;
  259. case CM_PLL_SEL1:
  260. return omap2_dpll_round_rate(target_rate);
  261. case CM_SYSCLKOUT_SEL1:
  262. asize = 5;
  263. div_array = sysclkout_div;
  264. break;
  265. case CM_CORE_SEL1:
  266. if(tclk == &dss1_fck){
  267. if(tclk->parent == &core_ck){
  268. asize = 10;
  269. div_array = dss1_div;
  270. } else {
  271. *new_div = 0; /* fixed clk */
  272. return(tclk->parent->rate);
  273. }
  274. } else if((tclk == &vlynq_fck) && cpu_is_omap2420()){
  275. if(tclk->parent == &core_ck){
  276. asize = 10;
  277. div_array = vylnq_div;
  278. } else {
  279. *new_div = 0; /* fixed clk */
  280. return(tclk->parent->rate);
  281. }
  282. }
  283. break;
  284. }
  285. best_div = omap2_divider_from_table(asize, div_array,
  286. tclk->parent->rate, target_rate);
  287. if (best_div == ~0){
  288. *new_div = 1;
  289. return best_div; /* signal error */
  290. }
  291. *new_div = best_div;
  292. return (tclk->parent->rate / best_div);
  293. }
  294. /* Given a clock and a rate apply a clock specific rounding function */
  295. static long omap2_clk_round_rate(struct clk *clk, unsigned long rate)
  296. {
  297. u32 new_div = 0;
  298. int valid_rate;
  299. if (clk->flags & RATE_FIXED)
  300. return clk->rate;
  301. if (clk->flags & RATE_CKCTL) {
  302. valid_rate = omap2_clksel_round_rate(clk, rate, &new_div);
  303. return valid_rate;
  304. }
  305. if (clk->round_rate != 0)
  306. return clk->round_rate(clk, rate);
  307. return clk->rate;
  308. }
  309. /*
  310. * Check the DLL lock state, and return tue if running in unlock mode.
  311. * This is needed to compenste for the shifted DLL value in unlock mode.
  312. */
  313. static u32 omap2_dll_force_needed(void)
  314. {
  315. u32 dll_state = SDRC_DLLA_CTRL; /* dlla and dllb are a set */
  316. if ((dll_state & (1 << 2)) == (1 << 2))
  317. return 1;
  318. else
  319. return 0;
  320. }
  321. static void omap2_init_memory_params(u32 force_lock_to_unlock_mode)
  322. {
  323. unsigned long dll_cnt;
  324. u32 fast_dll = 0;
  325. mem_timings.m_type = !((SDRC_MR_0 & 0x3) == 0x1); /* DDR = 1, SDR = 0 */
  326. /* 2422 es2.05 and beyond has a single SIP DDR instead of 2 like others.
  327. * In the case of 2422, its ok to use CS1 instead of CS0.
  328. */
  329. #if 0 /* FIXME: Enable after 24xx cpu detection works */
  330. ctype = get_cpu_type();
  331. if (cpu_is_omap2422())
  332. mem_timings.base_cs = 1;
  333. else
  334. #endif
  335. mem_timings.base_cs = 0;
  336. if (mem_timings.m_type != M_DDR)
  337. return;
  338. /* With DDR we need to determine the low frequency DLL value */
  339. if (((mem_timings.fast_dll_ctrl & (1 << 2)) == M_LOCK_CTRL))
  340. mem_timings.dll_mode = M_UNLOCK;
  341. else
  342. mem_timings.dll_mode = M_LOCK;
  343. if (mem_timings.base_cs == 0) {
  344. fast_dll = SDRC_DLLA_CTRL;
  345. dll_cnt = SDRC_DLLA_STATUS & 0xff00;
  346. } else {
  347. fast_dll = SDRC_DLLB_CTRL;
  348. dll_cnt = SDRC_DLLB_STATUS & 0xff00;
  349. }
  350. if (force_lock_to_unlock_mode) {
  351. fast_dll &= ~0xff00;
  352. fast_dll |= dll_cnt; /* Current lock mode */
  353. }
  354. mem_timings.fast_dll_ctrl = fast_dll;
  355. /* No disruptions, DDR will be offline & C-ABI not followed */
  356. omap2_sram_ddr_init(&mem_timings.slow_dll_ctrl,
  357. mem_timings.fast_dll_ctrl,
  358. mem_timings.base_cs,
  359. force_lock_to_unlock_mode);
  360. mem_timings.slow_dll_ctrl &= 0xff00; /* Keep lock value */
  361. /* Turn status into unlock ctrl */
  362. mem_timings.slow_dll_ctrl |=
  363. ((mem_timings.fast_dll_ctrl & 0xF) | (1 << 2));
  364. /* 90 degree phase for anything below 133Mhz */
  365. mem_timings.slow_dll_ctrl |= (1 << 1);
  366. }
  367. static u32 omap2_reprogram_sdrc(u32 level, u32 force)
  368. {
  369. u32 prev = curr_perf_level, flags;
  370. if ((curr_perf_level == level) && !force)
  371. return prev;
  372. if (level == PRCM_HALF_SPEED) {
  373. local_irq_save(flags);
  374. PRCM_VOLTSETUP = 0xffff;
  375. omap2_sram_reprogram_sdrc(PRCM_HALF_SPEED,
  376. mem_timings.slow_dll_ctrl,
  377. mem_timings.m_type);
  378. curr_perf_level = PRCM_HALF_SPEED;
  379. local_irq_restore(flags);
  380. }
  381. if (level == PRCM_FULL_SPEED) {
  382. local_irq_save(flags);
  383. PRCM_VOLTSETUP = 0xffff;
  384. omap2_sram_reprogram_sdrc(PRCM_FULL_SPEED,
  385. mem_timings.fast_dll_ctrl,
  386. mem_timings.m_type);
  387. curr_perf_level = PRCM_FULL_SPEED;
  388. local_irq_restore(flags);
  389. }
  390. return prev;
  391. }
  392. static int omap2_reprogram_dpll(struct clk * clk, unsigned long rate)
  393. {
  394. u32 flags, cur_rate, low, mult, div, valid_rate, done_rate;
  395. u32 bypass = 0;
  396. struct prcm_config tmpset;
  397. int ret = -EINVAL;
  398. local_irq_save(flags);
  399. cur_rate = omap2_get_dpll_rate(&dpll_ck);
  400. mult = CM_CLKSEL2_PLL & 0x3;
  401. if ((rate == (cur_rate / 2)) && (mult == 2)) {
  402. omap2_reprogram_sdrc(PRCM_HALF_SPEED, 1);
  403. } else if ((rate == (cur_rate * 2)) && (mult == 1)) {
  404. omap2_reprogram_sdrc(PRCM_FULL_SPEED, 1);
  405. } else if (rate != cur_rate) {
  406. valid_rate = omap2_dpll_round_rate(rate);
  407. if (valid_rate != rate)
  408. goto dpll_exit;
  409. if ((CM_CLKSEL2_PLL & 0x3) == 1)
  410. low = curr_prcm_set->dpll_speed;
  411. else
  412. low = curr_prcm_set->dpll_speed / 2;
  413. tmpset.cm_clksel1_pll = CM_CLKSEL1_PLL;
  414. tmpset.cm_clksel1_pll &= ~(0x3FFF << 8);
  415. div = ((curr_prcm_set->xtal_speed / 1000000) - 1);
  416. tmpset.cm_clksel2_pll = CM_CLKSEL2_PLL;
  417. tmpset.cm_clksel2_pll &= ~0x3;
  418. if (rate > low) {
  419. tmpset.cm_clksel2_pll |= 0x2;
  420. mult = ((rate / 2) / 1000000);
  421. done_rate = PRCM_FULL_SPEED;
  422. } else {
  423. tmpset.cm_clksel2_pll |= 0x1;
  424. mult = (rate / 1000000);
  425. done_rate = PRCM_HALF_SPEED;
  426. }
  427. tmpset.cm_clksel1_pll |= ((div << 8) | (mult << 12));
  428. /* Worst case */
  429. tmpset.base_sdrc_rfr = V24XX_SDRC_RFR_CTRL_BYPASS;
  430. if (rate == curr_prcm_set->xtal_speed) /* If asking for 1-1 */
  431. bypass = 1;
  432. omap2_reprogram_sdrc(PRCM_FULL_SPEED, 1); /* For init_mem */
  433. /* Force dll lock mode */
  434. omap2_set_prcm(tmpset.cm_clksel1_pll, tmpset.base_sdrc_rfr,
  435. bypass);
  436. /* Errata: ret dll entry state */
  437. omap2_init_memory_params(omap2_dll_force_needed());
  438. omap2_reprogram_sdrc(done_rate, 0);
  439. }
  440. omap2_clksel_recalc(&dpll_ck);
  441. ret = 0;
  442. dpll_exit:
  443. local_irq_restore(flags);
  444. return(ret);
  445. }
  446. /* Just return the MPU speed */
  447. static void omap2_mpu_recalc(struct clk * clk)
  448. {
  449. clk->rate = curr_prcm_set->mpu_speed;
  450. }
  451. /*
  452. * Look for a rate equal or less than the target rate given a configuration set.
  453. *
  454. * What's not entirely clear is "which" field represents the key field.
  455. * Some might argue L3-DDR, others ARM, others IVA. This code is simple and
  456. * just uses the ARM rates.
  457. */
  458. static long omap2_round_to_table_rate(struct clk * clk, unsigned long rate)
  459. {
  460. struct prcm_config * ptr;
  461. long highest_rate;
  462. if (clk != &virt_prcm_set)
  463. return -EINVAL;
  464. highest_rate = -EINVAL;
  465. for (ptr = rate_table; ptr->mpu_speed; ptr++) {
  466. if (ptr->xtal_speed != sys_ck.rate)
  467. continue;
  468. highest_rate = ptr->mpu_speed;
  469. /* Can check only after xtal frequency check */
  470. if (ptr->mpu_speed <= rate)
  471. break;
  472. }
  473. return highest_rate;
  474. }
  475. /*
  476. * omap2_convert_field_to_div() - turn field value into integer divider
  477. */
  478. static u32 omap2_clksel_to_divisor(u32 div_sel, u32 field_val)
  479. {
  480. u32 i;
  481. u32 clkout_array[] = {1, 2, 4, 8, 16};
  482. if ((div_sel & SRC_RATE_SEL_MASK) == CM_SYSCLKOUT_SEL1) {
  483. for (i = 0; i < 5; i++) {
  484. if (field_val == i)
  485. return clkout_array[i];
  486. }
  487. return ~0;
  488. } else
  489. return field_val;
  490. }
  491. /*
  492. * Returns the CLKSEL divider register value
  493. * REVISIT: This should be cleaned up to work nicely with void __iomem *
  494. */
  495. static u32 omap2_get_clksel(u32 *div_sel, u32 *field_mask,
  496. struct clk *clk)
  497. {
  498. int ret = ~0;
  499. u32 reg_val, div_off;
  500. u32 div_addr = 0;
  501. u32 mask = ~0;
  502. div_off = clk->rate_offset;
  503. switch ((*div_sel & SRC_RATE_SEL_MASK)) {
  504. case CM_MPU_SEL1:
  505. div_addr = (u32)&CM_CLKSEL_MPU;
  506. mask = 0x1f;
  507. break;
  508. case CM_DSP_SEL1:
  509. div_addr = (u32)&CM_CLKSEL_DSP;
  510. if (cpu_is_omap2420()) {
  511. if ((div_off == 0) || (div_off == 8))
  512. mask = 0x1f;
  513. else if (div_off == 5)
  514. mask = 0x3;
  515. } else if (cpu_is_omap2430()) {
  516. if (div_off == 0)
  517. mask = 0x1f;
  518. else if (div_off == 5)
  519. mask = 0x3;
  520. }
  521. break;
  522. case CM_GFX_SEL1:
  523. div_addr = (u32)&CM_CLKSEL_GFX;
  524. if (div_off == 0)
  525. mask = 0x7;
  526. break;
  527. case CM_MODEM_SEL1:
  528. div_addr = (u32)&CM_CLKSEL_MDM;
  529. if (div_off == 0)
  530. mask = 0xf;
  531. break;
  532. case CM_SYSCLKOUT_SEL1:
  533. div_addr = (u32)&PRCM_CLKOUT_CTRL;
  534. if ((div_off == 3) || (div_off = 11))
  535. mask= 0x3;
  536. break;
  537. case CM_CORE_SEL1:
  538. div_addr = (u32)&CM_CLKSEL1_CORE;
  539. switch (div_off) {
  540. case 0: /* l3 */
  541. case 8: /* dss1 */
  542. case 15: /* vylnc-2420 */
  543. case 20: /* ssi */
  544. mask = 0x1f; break;
  545. case 5: /* l4 */
  546. mask = 0x3; break;
  547. case 13: /* dss2 */
  548. mask = 0x1; break;
  549. case 25: /* usb */
  550. mask = 0xf; break;
  551. }
  552. }
  553. *field_mask = mask;
  554. if (unlikely(mask == ~0))
  555. div_addr = 0;
  556. *div_sel = div_addr;
  557. if (unlikely(div_addr == 0))
  558. return ret;
  559. /* Isolate field */
  560. reg_val = __raw_readl((void __iomem *)div_addr) & (mask << div_off);
  561. /* Normalize back to divider value */
  562. reg_val >>= div_off;
  563. return reg_val;
  564. }
  565. /*
  566. * Return divider to be applied to parent clock.
  567. * Return 0 on error.
  568. */
  569. static u32 omap2_clksel_get_divisor(struct clk *clk)
  570. {
  571. int ret = 0;
  572. u32 div, div_sel, div_off, field_mask, field_val;
  573. /* isolate control register */
  574. div_sel = (SRC_RATE_SEL_MASK & clk->flags);
  575. div_off = clk->rate_offset;
  576. field_val = omap2_get_clksel(&div_sel, &field_mask, clk);
  577. if (div_sel == 0)
  578. return ret;
  579. div_sel = (SRC_RATE_SEL_MASK & clk->flags);
  580. div = omap2_clksel_to_divisor(div_sel, field_val);
  581. return div;
  582. }
  583. /* Set the clock rate for a clock source */
  584. static int omap2_clk_set_rate(struct clk *clk, unsigned long rate)
  585. {
  586. int ret = -EINVAL;
  587. void __iomem * reg;
  588. u32 div_sel, div_off, field_mask, field_val, reg_val, validrate;
  589. u32 new_div = 0;
  590. if (!(clk->flags & CONFIG_PARTICIPANT) && (clk->flags & RATE_CKCTL)) {
  591. if (clk == &dpll_ck)
  592. return omap2_reprogram_dpll(clk, rate);
  593. /* Isolate control register */
  594. div_sel = (SRC_RATE_SEL_MASK & clk->flags);
  595. div_off = clk->src_offset;
  596. validrate = omap2_clksel_round_rate(clk, rate, &new_div);
  597. if(validrate != rate)
  598. return(ret);
  599. field_val = omap2_get_clksel(&div_sel, &field_mask, clk);
  600. if (div_sel == 0)
  601. return ret;
  602. if(clk->flags & CM_SYSCLKOUT_SEL1){
  603. switch(new_div){
  604. case 16: field_val = 4; break;
  605. case 8: field_val = 3; break;
  606. case 4: field_val = 2; break;
  607. case 2: field_val = 1; break;
  608. case 1: field_val = 0; break;
  609. }
  610. }
  611. else
  612. field_val = new_div;
  613. reg = (void __iomem *)div_sel;
  614. reg_val = __raw_readl(reg);
  615. reg_val &= ~(field_mask << div_off);
  616. reg_val |= (field_val << div_off);
  617. __raw_writel(reg_val, reg);
  618. clk->rate = clk->parent->rate / field_val;
  619. if (clk->flags & DELAYED_APP)
  620. __raw_writel(0x1, (void __iomem *)&PRCM_CLKCFG_CTRL);
  621. ret = 0;
  622. } else if (clk->set_rate != 0)
  623. ret = clk->set_rate(clk, rate);
  624. if (unlikely(ret == 0 && (clk->flags & RATE_PROPAGATES)))
  625. propagate_rate(clk);
  626. return ret;
  627. }
  628. /* Converts encoded control register address into a full address */
  629. static u32 omap2_get_src_field(u32 *type_to_addr, u32 reg_offset,
  630. struct clk *src_clk, u32 *field_mask)
  631. {
  632. u32 val = ~0, src_reg_addr = 0, mask = 0;
  633. /* Find target control register.*/
  634. switch ((*type_to_addr & SRC_RATE_SEL_MASK)) {
  635. case CM_CORE_SEL1:
  636. src_reg_addr = (u32)&CM_CLKSEL1_CORE;
  637. if (reg_offset == 13) { /* DSS2_fclk */
  638. mask = 0x1;
  639. if (src_clk == &sys_ck)
  640. val = 0;
  641. if (src_clk == &func_48m_ck)
  642. val = 1;
  643. } else if (reg_offset == 8) { /* DSS1_fclk */
  644. mask = 0x1f;
  645. if (src_clk == &sys_ck)
  646. val = 0;
  647. else if (src_clk == &core_ck) /* divided clock */
  648. val = 0x10; /* rate needs fixing */
  649. } else if ((reg_offset == 15) && cpu_is_omap2420()){ /*vlnyq*/
  650. mask = 0x1F;
  651. if(src_clk == &func_96m_ck)
  652. val = 0;
  653. else if (src_clk == &core_ck)
  654. val = 0x10;
  655. }
  656. break;
  657. case CM_CORE_SEL2:
  658. src_reg_addr = (u32)&CM_CLKSEL2_CORE;
  659. mask = 0x3;
  660. if (src_clk == &func_32k_ck)
  661. val = 0x0;
  662. if (src_clk == &sys_ck)
  663. val = 0x1;
  664. if (src_clk == &alt_ck)
  665. val = 0x2;
  666. break;
  667. case CM_WKUP_SEL1:
  668. src_reg_addr = (u32)&CM_CLKSEL2_CORE;
  669. mask = 0x3;
  670. if (src_clk == &func_32k_ck)
  671. val = 0x0;
  672. if (src_clk == &sys_ck)
  673. val = 0x1;
  674. if (src_clk == &alt_ck)
  675. val = 0x2;
  676. break;
  677. case CM_PLL_SEL1:
  678. src_reg_addr = (u32)&CM_CLKSEL1_PLL;
  679. mask = 0x1;
  680. if (reg_offset == 0x3) {
  681. if (src_clk == &apll96_ck)
  682. val = 0;
  683. if (src_clk == &alt_ck)
  684. val = 1;
  685. }
  686. else if (reg_offset == 0x5) {
  687. if (src_clk == &apll54_ck)
  688. val = 0;
  689. if (src_clk == &alt_ck)
  690. val = 1;
  691. }
  692. break;
  693. case CM_PLL_SEL2:
  694. src_reg_addr = (u32)&CM_CLKSEL2_PLL;
  695. mask = 0x3;
  696. if (src_clk == &func_32k_ck)
  697. val = 0x0;
  698. if (src_clk == &dpll_ck)
  699. val = 0x2;
  700. break;
  701. case CM_SYSCLKOUT_SEL1:
  702. src_reg_addr = (u32)&PRCM_CLKOUT_CTRL;
  703. mask = 0x3;
  704. if (src_clk == &dpll_ck)
  705. val = 0;
  706. if (src_clk == &sys_ck)
  707. val = 1;
  708. if (src_clk == &func_54m_ck)
  709. val = 2;
  710. if (src_clk == &func_96m_ck)
  711. val = 3;
  712. break;
  713. }
  714. if (val == ~0) /* Catch errors in offset */
  715. *type_to_addr = 0;
  716. else
  717. *type_to_addr = src_reg_addr;
  718. *field_mask = mask;
  719. return val;
  720. }
  721. static int omap2_clk_set_parent(struct clk *clk, struct clk *new_parent)
  722. {
  723. void __iomem * reg;
  724. u32 src_sel, src_off, field_val, field_mask, reg_val, rate;
  725. int ret = -EINVAL;
  726. if (unlikely(clk->flags & CONFIG_PARTICIPANT))
  727. return ret;
  728. if (clk->flags & SRC_SEL_MASK) { /* On-chip SEL collection */
  729. src_sel = (SRC_RATE_SEL_MASK & clk->flags);
  730. src_off = clk->src_offset;
  731. if (src_sel == 0)
  732. goto set_parent_error;
  733. field_val = omap2_get_src_field(&src_sel, src_off, new_parent,
  734. &field_mask);
  735. reg = (void __iomem *)src_sel;
  736. if (clk->usecount > 0)
  737. omap2_clk_disable(clk);
  738. /* Set new source value (previous dividers if any in effect) */
  739. reg_val = __raw_readl(reg) & ~(field_mask << src_off);
  740. reg_val |= (field_val << src_off);
  741. __raw_writel(reg_val, reg);
  742. if (clk->flags & DELAYED_APP)
  743. __raw_writel(0x1, (void __iomem *)&PRCM_CLKCFG_CTRL);
  744. if (clk->usecount > 0)
  745. omap2_clk_enable(clk);
  746. clk->parent = new_parent;
  747. /* SRC_RATE_SEL_MASK clocks follow their parents rates.*/
  748. if ((new_parent == &core_ck) && (clk == &dss1_fck))
  749. clk->rate = new_parent->rate / 0x10;
  750. else
  751. clk->rate = new_parent->rate;
  752. if (unlikely(clk->flags & RATE_PROPAGATES))
  753. propagate_rate(clk);
  754. return 0;
  755. } else {
  756. clk->parent = new_parent;
  757. rate = new_parent->rate;
  758. omap2_clk_set_rate(clk, rate);
  759. ret = 0;
  760. }
  761. set_parent_error:
  762. return ret;
  763. }
  764. /* Sets basic clocks based on the specified rate */
  765. static int omap2_select_table_rate(struct clk * clk, unsigned long rate)
  766. {
  767. u32 flags, cur_rate, done_rate, bypass = 0;
  768. u8 cpu_mask = 0;
  769. struct prcm_config *prcm;
  770. unsigned long found_speed = 0;
  771. if (clk != &virt_prcm_set)
  772. return -EINVAL;
  773. /* FIXME: Change cpu_is_omap2420() to cpu_is_omap242x() */
  774. if (cpu_is_omap2420())
  775. cpu_mask = RATE_IN_242X;
  776. else if (cpu_is_omap2430())
  777. cpu_mask = RATE_IN_243X;
  778. for (prcm = rate_table; prcm->mpu_speed; prcm++) {
  779. if (!(prcm->flags & cpu_mask))
  780. continue;
  781. if (prcm->xtal_speed != sys_ck.rate)
  782. continue;
  783. if (prcm->mpu_speed <= rate) {
  784. found_speed = prcm->mpu_speed;
  785. break;
  786. }
  787. }
  788. if (!found_speed) {
  789. printk(KERN_INFO "Could not set MPU rate to %luMHz\n",
  790. rate / 1000000);
  791. return -EINVAL;
  792. }
  793. curr_prcm_set = prcm;
  794. cur_rate = omap2_get_dpll_rate(&dpll_ck);
  795. if (prcm->dpll_speed == cur_rate / 2) {
  796. omap2_reprogram_sdrc(PRCM_HALF_SPEED, 1);
  797. } else if (prcm->dpll_speed == cur_rate * 2) {
  798. omap2_reprogram_sdrc(PRCM_FULL_SPEED, 1);
  799. } else if (prcm->dpll_speed != cur_rate) {
  800. local_irq_save(flags);
  801. if (prcm->dpll_speed == prcm->xtal_speed)
  802. bypass = 1;
  803. if ((prcm->cm_clksel2_pll & 0x3) == 2)
  804. done_rate = PRCM_FULL_SPEED;
  805. else
  806. done_rate = PRCM_HALF_SPEED;
  807. /* MPU divider */
  808. CM_CLKSEL_MPU = prcm->cm_clksel_mpu;
  809. /* dsp + iva1 div(2420), iva2.1(2430) */
  810. CM_CLKSEL_DSP = prcm->cm_clksel_dsp;
  811. CM_CLKSEL_GFX = prcm->cm_clksel_gfx;
  812. /* Major subsystem dividers */
  813. CM_CLKSEL1_CORE = prcm->cm_clksel1_core;
  814. if (cpu_is_omap2430())
  815. CM_CLKSEL_MDM = prcm->cm_clksel_mdm;
  816. /* x2 to enter init_mem */
  817. omap2_reprogram_sdrc(PRCM_FULL_SPEED, 1);
  818. omap2_set_prcm(prcm->cm_clksel1_pll, prcm->base_sdrc_rfr,
  819. bypass);
  820. omap2_init_memory_params(omap2_dll_force_needed());
  821. omap2_reprogram_sdrc(done_rate, 0);
  822. local_irq_restore(flags);
  823. }
  824. omap2_clksel_recalc(&dpll_ck);
  825. return 0;
  826. }
  827. /*-------------------------------------------------------------------------
  828. * Omap2 clock reset and init functions
  829. *-------------------------------------------------------------------------*/
  830. static struct clk_functions omap2_clk_functions = {
  831. .clk_enable = omap2_clk_enable,
  832. .clk_disable = omap2_clk_disable,
  833. .clk_use = omap2_clk_use,
  834. .clk_unuse = omap2_clk_unuse,
  835. .clk_round_rate = omap2_clk_round_rate,
  836. .clk_set_rate = omap2_clk_set_rate,
  837. .clk_set_parent = omap2_clk_set_parent,
  838. };
  839. static void __init omap2_get_crystal_rate(struct clk *osc, struct clk *sys)
  840. {
  841. u32 div, aplls, sclk = 13000000;
  842. aplls = CM_CLKSEL1_PLL;
  843. aplls &= ((1 << 23) | (1 << 24) | (1 << 25));
  844. aplls >>= 23; /* Isolate field, 0,2,3 */
  845. if (aplls == 0)
  846. sclk = 19200000;
  847. else if (aplls == 2)
  848. sclk = 13000000;
  849. else if (aplls == 3)
  850. sclk = 12000000;
  851. div = PRCM_CLKSRC_CTRL;
  852. div &= ((1 << 7) | (1 << 6));
  853. div >>= sys->rate_offset;
  854. osc->rate = sclk * div;
  855. sys->rate = sclk;
  856. }
  857. #ifdef CONFIG_OMAP_RESET_CLOCKS
  858. static void __init omap2_disable_unused_clocks(void)
  859. {
  860. struct clk *ck;
  861. u32 regval32;
  862. list_for_each_entry(ck, &clocks, node) {
  863. if (ck->usecount > 0 || (ck->flags & ALWAYS_ENABLED) ||
  864. ck->enable_reg == 0)
  865. continue;
  866. regval32 = __raw_readl(ck->enable_reg);
  867. if ((regval32 & (1 << ck->enable_bit)) == 0)
  868. continue;
  869. printk(KERN_INFO "Disabling unused clock \"%s\"\n", ck->name);
  870. omap2_clk_disable(ck);
  871. }
  872. }
  873. late_initcall(omap2_disable_unused_clocks);
  874. #endif
  875. /*
  876. * Switch the MPU rate if specified on cmdline.
  877. * We cannot do this early until cmdline is parsed.
  878. */
  879. static int __init omap2_clk_arch_init(void)
  880. {
  881. if (!mpurate)
  882. return -EINVAL;
  883. if (omap2_select_table_rate(&virt_prcm_set, mpurate))
  884. printk(KERN_ERR "Could not find matching MPU rate\n");
  885. propagate_rate(&osc_ck); /* update main root fast */
  886. propagate_rate(&func_32k_ck); /* update main root slow */
  887. printk(KERN_INFO "Switched to new clocking rate (Crystal/DPLL/MPU): "
  888. "%ld.%01ld/%ld/%ld MHz\n",
  889. (sys_ck.rate / 1000000), (sys_ck.rate / 100000) % 10,
  890. (dpll_ck.rate / 1000000), (mpu_ck.rate / 1000000)) ;
  891. return 0;
  892. }
  893. arch_initcall(omap2_clk_arch_init);
  894. int __init omap2_clk_init(void)
  895. {
  896. struct prcm_config *prcm;
  897. struct clk ** clkp;
  898. u32 clkrate;
  899. clk_init(&omap2_clk_functions);
  900. omap2_get_crystal_rate(&osc_ck, &sys_ck);
  901. for (clkp = onchip_clks; clkp < onchip_clks + ARRAY_SIZE(onchip_clks);
  902. clkp++) {
  903. if ((*clkp)->flags & CLOCK_IN_OMAP242X && cpu_is_omap2420()) {
  904. clk_register(*clkp);
  905. continue;
  906. }
  907. if ((*clkp)->flags & CLOCK_IN_OMAP243X && cpu_is_omap2430()) {
  908. clk_register(*clkp);
  909. continue;
  910. }
  911. }
  912. /* Check the MPU rate set by bootloader */
  913. clkrate = omap2_get_dpll_rate(&dpll_ck);
  914. for (prcm = rate_table; prcm->mpu_speed; prcm++) {
  915. if (prcm->xtal_speed != sys_ck.rate)
  916. continue;
  917. if (prcm->dpll_speed <= clkrate)
  918. break;
  919. }
  920. curr_prcm_set = prcm;
  921. propagate_rate(&osc_ck); /* update main root fast */
  922. propagate_rate(&func_32k_ck); /* update main root slow */
  923. printk(KERN_INFO "Clocking rate (Crystal/DPLL/MPU): "
  924. "%ld.%01ld/%ld/%ld MHz\n",
  925. (sys_ck.rate / 1000000), (sys_ck.rate / 100000) % 10,
  926. (dpll_ck.rate / 1000000), (mpu_ck.rate / 1000000)) ;
  927. /*
  928. * Only enable those clocks we will need, let the drivers
  929. * enable other clocks as necessary
  930. */
  931. clk_use(&sync_32k_ick);
  932. clk_use(&omapctrl_ick);
  933. if (cpu_is_omap2430())
  934. clk_use(&sdrc_ick);
  935. return 0;
  936. }