clock.c 35 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433
  1. /*
  2. * linux/arch/arm/mach-omap2/clock.c
  3. *
  4. * Copyright (C) 2005-2008 Texas Instruments, Inc.
  5. * Copyright (C) 2004-2010 Nokia Corporation
  6. *
  7. * Contacts:
  8. * Richard Woodruff <r-woodruff2@ti.com>
  9. * Paul Walmsley
  10. *
  11. * This program is free software; you can redistribute it and/or modify
  12. * it under the terms of the GNU General Public License version 2 as
  13. * published by the Free Software Foundation.
  14. */
  15. #undef DEBUG
  16. #include <linux/kernel.h>
  17. #include <linux/export.h>
  18. #include <linux/list.h>
  19. #include <linux/errno.h>
  20. #include <linux/err.h>
  21. #include <linux/delay.h>
  22. #ifdef CONFIG_COMMON_CLK
  23. #include <linux/clk-provider.h>
  24. #else
  25. #include <linux/clk.h>
  26. #endif
  27. #include <linux/io.h>
  28. #include <linux/bitops.h>
  29. #include <asm/cpu.h>
  30. #include <trace/events/power.h>
  31. #include "soc.h"
  32. #include "clockdomain.h"
  33. #include "clock.h"
  34. #include "cm.h"
  35. #include "cm2xxx.h"
  36. #include "cm3xxx.h"
  37. #include "cm-regbits-24xx.h"
  38. #include "cm-regbits-34xx.h"
  39. #include "common.h"
  40. /*
  41. * MAX_MODULE_ENABLE_WAIT: maximum of number of microseconds to wait
  42. * for a module to indicate that it is no longer in idle
  43. */
  44. #define MAX_MODULE_ENABLE_WAIT 100000
  45. u16 cpu_mask;
  46. /*
  47. * clkdm_control: if true, then when a clock is enabled in the
  48. * hardware, its clockdomain will first be enabled; and when a clock
  49. * is disabled in the hardware, its clockdomain will be disabled
  50. * afterwards.
  51. */
  52. static bool clkdm_control = true;
  53. static LIST_HEAD(clocks);
  54. static DEFINE_MUTEX(clocks_mutex);
  55. #ifndef CONFIG_COMMON_CLK
  56. static DEFINE_SPINLOCK(clockfw_lock);
  57. #endif
  58. #ifdef CONFIG_COMMON_CLK
  59. static LIST_HEAD(clk_hw_omap_clocks);
  60. /*
  61. * Used for clocks that have the same value as the parent clock,
  62. * divided by some factor
  63. */
  64. unsigned long omap_fixed_divisor_recalc(struct clk_hw *hw,
  65. unsigned long parent_rate)
  66. {
  67. struct clk_hw_omap *oclk;
  68. if (!hw) {
  69. pr_warn("%s: hw is NULL\n", __func__);
  70. return -EINVAL;
  71. }
  72. oclk = to_clk_hw_omap(hw);
  73. WARN_ON(!oclk->fixed_div);
  74. return parent_rate / oclk->fixed_div;
  75. }
  76. #endif
  77. /*
  78. * OMAP2+ specific clock functions
  79. */
  80. /* Private functions */
  81. /**
  82. * _wait_idlest_generic - wait for a module to leave the idle state
  83. * @reg: virtual address of module IDLEST register
  84. * @mask: value to mask against to determine if the module is active
  85. * @idlest: idle state indicator (0 or 1) for the clock
  86. * @name: name of the clock (for printk)
  87. *
  88. * Wait for a module to leave idle, where its idle-status register is
  89. * not inside the CM module. Returns 1 if the module left idle
  90. * promptly, or 0 if the module did not leave idle before the timeout
  91. * elapsed. XXX Deprecated - should be moved into drivers for the
  92. * individual IP block that the IDLEST register exists in.
  93. */
  94. static int _wait_idlest_generic(void __iomem *reg, u32 mask, u8 idlest,
  95. const char *name)
  96. {
  97. int i = 0, ena = 0;
  98. ena = (idlest) ? 0 : mask;
  99. omap_test_timeout(((__raw_readl(reg) & mask) == ena),
  100. MAX_MODULE_ENABLE_WAIT, i);
  101. if (i < MAX_MODULE_ENABLE_WAIT)
  102. pr_debug("omap clock: module associated with clock %s ready after %d loops\n",
  103. name, i);
  104. else
  105. pr_err("omap clock: module associated with clock %s didn't enable in %d tries\n",
  106. name, MAX_MODULE_ENABLE_WAIT);
  107. return (i < MAX_MODULE_ENABLE_WAIT) ? 1 : 0;
  108. };
  109. /**
  110. * _omap2_module_wait_ready - wait for an OMAP module to leave IDLE
  111. * @clk: struct clk * belonging to the module
  112. *
  113. * If the necessary clocks for the OMAP hardware IP block that
  114. * corresponds to clock @clk are enabled, then wait for the module to
  115. * indicate readiness (i.e., to leave IDLE). This code does not
  116. * belong in the clock code and will be moved in the medium term to
  117. * module-dependent code. No return value.
  118. */
  119. #ifdef CONFIG_COMMON_CLK
  120. static void _omap2_module_wait_ready(struct clk_hw_omap *clk)
  121. #else
  122. static void _omap2_module_wait_ready(struct clk *clk)
  123. #endif
  124. {
  125. void __iomem *companion_reg, *idlest_reg;
  126. u8 other_bit, idlest_bit, idlest_val, idlest_reg_id;
  127. s16 prcm_mod;
  128. int r;
  129. /* Not all modules have multiple clocks that their IDLEST depends on */
  130. if (clk->ops->find_companion) {
  131. clk->ops->find_companion(clk, &companion_reg, &other_bit);
  132. if (!(__raw_readl(companion_reg) & (1 << other_bit)))
  133. return;
  134. }
  135. clk->ops->find_idlest(clk, &idlest_reg, &idlest_bit, &idlest_val);
  136. r = cm_split_idlest_reg(idlest_reg, &prcm_mod, &idlest_reg_id);
  137. if (r) {
  138. /* IDLEST register not in the CM module */
  139. _wait_idlest_generic(idlest_reg, (1 << idlest_bit), idlest_val,
  140. #ifdef CONFIG_COMMON_CLK
  141. __clk_get_name(clk->hw.clk));
  142. #else
  143. clk->name);
  144. #endif
  145. } else {
  146. cm_wait_module_ready(prcm_mod, idlest_reg_id, idlest_bit);
  147. };
  148. }
  149. /* Public functions */
  150. /**
  151. * omap2_init_clk_clkdm - look up a clockdomain name, store pointer in clk
  152. * @clk: OMAP clock struct ptr to use
  153. *
  154. * Convert a clockdomain name stored in a struct clk 'clk' into a
  155. * clockdomain pointer, and save it into the struct clk. Intended to be
  156. * called during clk_register(). No return value.
  157. */
  158. #ifdef CONFIG_COMMON_CLK
  159. void omap2_init_clk_clkdm(struct clk_hw *hw)
  160. {
  161. struct clk_hw_omap *clk = to_clk_hw_omap(hw);
  162. #else
  163. void omap2_init_clk_clkdm(struct clk *clk)
  164. {
  165. #endif
  166. struct clockdomain *clkdm;
  167. const char *clk_name;
  168. if (!clk->clkdm_name)
  169. return;
  170. #ifdef CONFIG_COMMON_CLK
  171. clk_name = __clk_get_name(hw->clk);
  172. #else
  173. clk_name = __clk_get_name(clk);
  174. #endif
  175. clkdm = clkdm_lookup(clk->clkdm_name);
  176. if (clkdm) {
  177. pr_debug("clock: associated clk %s to clkdm %s\n",
  178. clk_name, clk->clkdm_name);
  179. clk->clkdm = clkdm;
  180. } else {
  181. pr_debug("clock: could not associate clk %s to clkdm %s\n",
  182. clk_name, clk->clkdm_name);
  183. }
  184. }
  185. /**
  186. * omap2_clk_disable_clkdm_control - disable clkdm control on clk enable/disable
  187. *
  188. * Prevent the OMAP clock code from calling into the clockdomain code
  189. * when a hardware clock in that clockdomain is enabled or disabled.
  190. * Intended to be called at init time from omap*_clk_init(). No
  191. * return value.
  192. */
  193. void __init omap2_clk_disable_clkdm_control(void)
  194. {
  195. clkdm_control = false;
  196. }
  197. /**
  198. * omap2_clk_dflt_find_companion - find companion clock to @clk
  199. * @clk: struct clk * to find the companion clock of
  200. * @other_reg: void __iomem ** to return the companion clock CM_*CLKEN va in
  201. * @other_bit: u8 ** to return the companion clock bit shift in
  202. *
  203. * Note: We don't need special code here for INVERT_ENABLE for the
  204. * time being since INVERT_ENABLE only applies to clocks enabled by
  205. * CM_CLKEN_PLL
  206. *
  207. * Convert CM_ICLKEN* <-> CM_FCLKEN*. This conversion assumes it's
  208. * just a matter of XORing the bits.
  209. *
  210. * Some clocks don't have companion clocks. For example, modules with
  211. * only an interface clock (such as MAILBOXES) don't have a companion
  212. * clock. Right now, this code relies on the hardware exporting a bit
  213. * in the correct companion register that indicates that the
  214. * nonexistent 'companion clock' is active. Future patches will
  215. * associate this type of code with per-module data structures to
  216. * avoid this issue, and remove the casts. No return value.
  217. */
  218. #ifdef CONFIG_COMMON_CLK
  219. void omap2_clk_dflt_find_companion(struct clk_hw_omap *clk,
  220. #else
  221. void omap2_clk_dflt_find_companion(struct clk *clk,
  222. #endif
  223. void __iomem **other_reg, u8 *other_bit)
  224. {
  225. u32 r;
  226. /*
  227. * Convert CM_ICLKEN* <-> CM_FCLKEN*. This conversion assumes
  228. * it's just a matter of XORing the bits.
  229. */
  230. r = ((__force u32)clk->enable_reg ^ (CM_FCLKEN ^ CM_ICLKEN));
  231. *other_reg = (__force void __iomem *)r;
  232. *other_bit = clk->enable_bit;
  233. }
  234. /**
  235. * omap2_clk_dflt_find_idlest - find CM_IDLEST reg va, bit shift for @clk
  236. * @clk: struct clk * to find IDLEST info for
  237. * @idlest_reg: void __iomem ** to return the CM_IDLEST va in
  238. * @idlest_bit: u8 * to return the CM_IDLEST bit shift in
  239. * @idlest_val: u8 * to return the idle status indicator
  240. *
  241. * Return the CM_IDLEST register address and bit shift corresponding
  242. * to the module that "owns" this clock. This default code assumes
  243. * that the CM_IDLEST bit shift is the CM_*CLKEN bit shift, and that
  244. * the IDLEST register address ID corresponds to the CM_*CLKEN
  245. * register address ID (e.g., that CM_FCLKEN2 corresponds to
  246. * CM_IDLEST2). This is not true for all modules. No return value.
  247. */
  248. #ifdef CONFIG_COMMON_CLK
  249. void omap2_clk_dflt_find_idlest(struct clk_hw_omap *clk,
  250. #else
  251. void omap2_clk_dflt_find_idlest(struct clk *clk,
  252. #endif
  253. void __iomem **idlest_reg, u8 *idlest_bit, u8 *idlest_val)
  254. {
  255. u32 r;
  256. r = (((__force u32)clk->enable_reg & ~0xf0) | 0x20);
  257. *idlest_reg = (__force void __iomem *)r;
  258. *idlest_bit = clk->enable_bit;
  259. /*
  260. * 24xx uses 0 to indicate not ready, and 1 to indicate ready.
  261. * 34xx reverses this, just to keep us on our toes
  262. * AM35xx uses both, depending on the module.
  263. */
  264. if (cpu_is_omap24xx())
  265. *idlest_val = OMAP24XX_CM_IDLEST_VAL;
  266. else if (cpu_is_omap34xx())
  267. *idlest_val = OMAP34XX_CM_IDLEST_VAL;
  268. else
  269. BUG();
  270. }
  271. #ifdef CONFIG_COMMON_CLK
  272. /**
  273. * omap2_dflt_clk_enable - enable a clock in the hardware
  274. * @hw: struct clk_hw * of the clock to enable
  275. *
  276. * Enable the clock @hw in the hardware. We first call into the OMAP
  277. * clockdomain code to "enable" the corresponding clockdomain if this
  278. * is the first enabled user of the clockdomain. Then program the
  279. * hardware to enable the clock. Then wait for the IP block that uses
  280. * this clock to leave idle (if applicable). Returns the error value
  281. * from clkdm_clk_enable() if it terminated with an error, or -EINVAL
  282. * if @hw has a null clock enable_reg, or zero upon success.
  283. */
  284. int omap2_dflt_clk_enable(struct clk_hw *hw)
  285. {
  286. struct clk_hw_omap *clk;
  287. u32 v;
  288. int ret = 0;
  289. clk = to_clk_hw_omap(hw);
  290. if (clkdm_control && clk->clkdm) {
  291. ret = clkdm_clk_enable(clk->clkdm, hw->clk);
  292. if (ret) {
  293. WARN(1, "%s: could not enable %s's clockdomain %s: %d\n",
  294. __func__, __clk_get_name(hw->clk),
  295. clk->clkdm->name, ret);
  296. return ret;
  297. }
  298. }
  299. if (unlikely(clk->enable_reg == NULL)) {
  300. pr_err("%s: %s missing enable_reg\n", __func__,
  301. __clk_get_name(hw->clk));
  302. ret = -EINVAL;
  303. goto err;
  304. }
  305. /* FIXME should not have INVERT_ENABLE bit here */
  306. v = __raw_readl(clk->enable_reg);
  307. if (clk->flags & INVERT_ENABLE)
  308. v &= ~(1 << clk->enable_bit);
  309. else
  310. v |= (1 << clk->enable_bit);
  311. __raw_writel(v, clk->enable_reg);
  312. v = __raw_readl(clk->enable_reg); /* OCP barrier */
  313. if (clk->ops && clk->ops->find_idlest)
  314. _omap2_module_wait_ready(clk);
  315. return 0;
  316. err:
  317. if (clkdm_control && clk->clkdm)
  318. clkdm_clk_disable(clk->clkdm, hw->clk);
  319. return ret;
  320. }
  321. /**
  322. * omap2_dflt_clk_disable - disable a clock in the hardware
  323. * @hw: struct clk_hw * of the clock to disable
  324. *
  325. * Disable the clock @hw in the hardware, and call into the OMAP
  326. * clockdomain code to "disable" the corresponding clockdomain if all
  327. * clocks/hwmods in that clockdomain are now disabled. No return
  328. * value.
  329. */
  330. void omap2_dflt_clk_disable(struct clk_hw *hw)
  331. {
  332. struct clk_hw_omap *clk;
  333. u32 v;
  334. clk = to_clk_hw_omap(hw);
  335. if (!clk->enable_reg) {
  336. /*
  337. * 'independent' here refers to a clock which is not
  338. * controlled by its parent.
  339. */
  340. pr_err("%s: independent clock %s has no enable_reg\n",
  341. __func__, __clk_get_name(hw->clk));
  342. return;
  343. }
  344. v = __raw_readl(clk->enable_reg);
  345. if (clk->flags & INVERT_ENABLE)
  346. v |= (1 << clk->enable_bit);
  347. else
  348. v &= ~(1 << clk->enable_bit);
  349. __raw_writel(v, clk->enable_reg);
  350. /* No OCP barrier needed here since it is a disable operation */
  351. if (clkdm_control && clk->clkdm)
  352. clkdm_clk_disable(clk->clkdm, hw->clk);
  353. }
  354. /**
  355. * omap2_clkops_enable_clkdm - increment usecount on clkdm of @hw
  356. * @hw: struct clk_hw * of the clock being enabled
  357. *
  358. * Increment the usecount of the clockdomain of the clock pointed to
  359. * by @hw; if the usecount is 1, the clockdomain will be "enabled."
  360. * Only needed for clocks that don't use omap2_dflt_clk_enable() as
  361. * their enable function pointer. Passes along the return value of
  362. * clkdm_clk_enable(), -EINVAL if @hw is not associated with a
  363. * clockdomain, or 0 if clock framework-based clockdomain control is
  364. * not implemented.
  365. */
  366. int omap2_clkops_enable_clkdm(struct clk_hw *hw)
  367. {
  368. struct clk_hw_omap *clk;
  369. int ret = 0;
  370. clk = to_clk_hw_omap(hw);
  371. if (unlikely(!clk->clkdm)) {
  372. pr_err("%s: %s: no clkdm set ?!\n", __func__,
  373. __clk_get_name(hw->clk));
  374. return -EINVAL;
  375. }
  376. if (unlikely(clk->enable_reg))
  377. pr_err("%s: %s: should use dflt_clk_enable ?!\n", __func__,
  378. __clk_get_name(hw->clk));
  379. if (!clkdm_control) {
  380. pr_err("%s: %s: clkfw-based clockdomain control disabled ?!\n",
  381. __func__, __clk_get_name(hw->clk));
  382. return 0;
  383. }
  384. ret = clkdm_clk_enable(clk->clkdm, hw->clk);
  385. WARN(ret, "%s: could not enable %s's clockdomain %s: %d\n",
  386. __func__, __clk_get_name(hw->clk), clk->clkdm->name, ret);
  387. return ret;
  388. }
  389. /**
  390. * omap2_clkops_disable_clkdm - decrement usecount on clkdm of @hw
  391. * @hw: struct clk_hw * of the clock being disabled
  392. *
  393. * Decrement the usecount of the clockdomain of the clock pointed to
  394. * by @hw; if the usecount is 0, the clockdomain will be "disabled."
  395. * Only needed for clocks that don't use omap2_dflt_clk_disable() as their
  396. * disable function pointer. No return value.
  397. */
  398. void omap2_clkops_disable_clkdm(struct clk_hw *hw)
  399. {
  400. struct clk_hw_omap *clk;
  401. clk = to_clk_hw_omap(hw);
  402. if (unlikely(!clk->clkdm)) {
  403. pr_err("%s: %s: no clkdm set ?!\n", __func__,
  404. __clk_get_name(hw->clk));
  405. return;
  406. }
  407. if (unlikely(clk->enable_reg))
  408. pr_err("%s: %s: should use dflt_clk_disable ?!\n", __func__,
  409. __clk_get_name(hw->clk));
  410. if (!clkdm_control) {
  411. pr_err("%s: %s: clkfw-based clockdomain control disabled ?!\n",
  412. __func__, __clk_get_name(hw->clk));
  413. return;
  414. }
  415. clkdm_clk_disable(clk->clkdm, hw->clk);
  416. }
  417. /**
  418. * omap2_dflt_clk_is_enabled - is clock enabled in the hardware?
  419. * @hw: struct clk_hw * to check
  420. *
  421. * Return 1 if the clock represented by @hw is enabled in the
  422. * hardware, or 0 otherwise. Intended for use in the struct
  423. * clk_ops.is_enabled function pointer.
  424. */
  425. int omap2_dflt_clk_is_enabled(struct clk_hw *hw)
  426. {
  427. struct clk_hw_omap *clk = to_clk_hw_omap(hw);
  428. u32 v;
  429. v = __raw_readl(clk->enable_reg);
  430. if (clk->flags & INVERT_ENABLE)
  431. v ^= BIT(clk->enable_bit);
  432. v &= BIT(clk->enable_bit);
  433. return v ? 1 : 0;
  434. }
  435. static int __initdata mpurate;
  436. /*
  437. * By default we use the rate set by the bootloader.
  438. * You can override this with mpurate= cmdline option.
  439. */
  440. static int __init omap_clk_setup(char *str)
  441. {
  442. get_option(&str, &mpurate);
  443. if (!mpurate)
  444. return 1;
  445. if (mpurate < 1000)
  446. mpurate *= 1000000;
  447. return 1;
  448. }
  449. __setup("mpurate=", omap_clk_setup);
  450. /**
  451. * omap2_init_clk_hw_omap_clocks - initialize an OMAP clock
  452. * @clk: struct clk * to initialize
  453. *
  454. * Add an OMAP clock @clk to the internal list of OMAP clocks. Used
  455. * temporarily for autoidle handling, until this support can be
  456. * integrated into the common clock framework code in some way. No
  457. * return value.
  458. */
  459. void omap2_init_clk_hw_omap_clocks(struct clk *clk)
  460. {
  461. struct clk_hw_omap *c;
  462. if (__clk_get_flags(clk) & CLK_IS_BASIC)
  463. return;
  464. c = to_clk_hw_omap(__clk_get_hw(clk));
  465. list_add(&c->node, &clk_hw_omap_clocks);
  466. }
  467. /**
  468. * omap2_clk_enable_autoidle_all - enable autoidle on all OMAP clocks that
  469. * support it
  470. *
  471. * Enable clock autoidle on all OMAP clocks that have allow_idle
  472. * function pointers associated with them. This function is intended
  473. * to be temporary until support for this is added to the common clock
  474. * code. Returns 0.
  475. */
  476. int omap2_clk_enable_autoidle_all(void)
  477. {
  478. struct clk_hw_omap *c;
  479. list_for_each_entry(c, &clk_hw_omap_clocks, node)
  480. if (c->ops && c->ops->allow_idle)
  481. c->ops->allow_idle(c);
  482. return 0;
  483. }
  484. /**
  485. * omap2_clk_disable_autoidle_all - disable autoidle on all OMAP clocks that
  486. * support it
  487. *
  488. * Disable clock autoidle on all OMAP clocks that have allow_idle
  489. * function pointers associated with them. This function is intended
  490. * to be temporary until support for this is added to the common clock
  491. * code. Returns 0.
  492. */
  493. int omap2_clk_disable_autoidle_all(void)
  494. {
  495. struct clk_hw_omap *c;
  496. list_for_each_entry(c, &clk_hw_omap_clocks, node)
  497. if (c->ops && c->ops->deny_idle)
  498. c->ops->deny_idle(c);
  499. return 0;
  500. }
  501. /**
  502. * omap2_clk_enable_init_clocks - prepare & enable a list of clocks
  503. * @clk_names: ptr to an array of strings of clock names to enable
  504. * @num_clocks: number of clock names in @clk_names
  505. *
  506. * Prepare and enable a list of clocks, named by @clk_names. No
  507. * return value. XXX Deprecated; only needed until these clocks are
  508. * properly claimed and enabled by the drivers or core code that uses
  509. * them. XXX What code disables & calls clk_put on these clocks?
  510. */
  511. void omap2_clk_enable_init_clocks(const char **clk_names, u8 num_clocks)
  512. {
  513. struct clk *init_clk;
  514. int i;
  515. for (i = 0; i < num_clocks; i++) {
  516. init_clk = clk_get(NULL, clk_names[i]);
  517. clk_prepare_enable(init_clk);
  518. }
  519. }
  520. const struct clk_hw_omap_ops clkhwops_wait = {
  521. .find_idlest = omap2_clk_dflt_find_idlest,
  522. .find_companion = omap2_clk_dflt_find_companion,
  523. };
  524. #else
  525. int omap2_dflt_clk_enable(struct clk *clk)
  526. {
  527. u32 v;
  528. if (unlikely(clk->enable_reg == NULL)) {
  529. pr_err("clock.c: Enable for %s without enable code\n",
  530. clk->name);
  531. return 0; /* REVISIT: -EINVAL */
  532. }
  533. v = __raw_readl(clk->enable_reg);
  534. if (clk->flags & INVERT_ENABLE)
  535. v &= ~(1 << clk->enable_bit);
  536. else
  537. v |= (1 << clk->enable_bit);
  538. __raw_writel(v, clk->enable_reg);
  539. v = __raw_readl(clk->enable_reg); /* OCP barrier */
  540. if (clk->ops->find_idlest)
  541. _omap2_module_wait_ready(clk);
  542. return 0;
  543. }
  544. void omap2_dflt_clk_disable(struct clk *clk)
  545. {
  546. u32 v;
  547. if (!clk->enable_reg) {
  548. /*
  549. * 'Independent' here refers to a clock which is not
  550. * controlled by its parent.
  551. */
  552. pr_err("clock: clk_disable called on independent clock %s which has no enable_reg\n", clk->name);
  553. return;
  554. }
  555. v = __raw_readl(clk->enable_reg);
  556. if (clk->flags & INVERT_ENABLE)
  557. v |= (1 << clk->enable_bit);
  558. else
  559. v &= ~(1 << clk->enable_bit);
  560. __raw_writel(v, clk->enable_reg);
  561. /* No OCP barrier needed here since it is a disable operation */
  562. }
  563. const struct clkops clkops_omap2_dflt_wait = {
  564. .enable = omap2_dflt_clk_enable,
  565. .disable = omap2_dflt_clk_disable,
  566. .find_companion = omap2_clk_dflt_find_companion,
  567. .find_idlest = omap2_clk_dflt_find_idlest,
  568. };
  569. const struct clkops clkops_omap2_dflt = {
  570. .enable = omap2_dflt_clk_enable,
  571. .disable = omap2_dflt_clk_disable,
  572. };
  573. /**
  574. * omap2_clk_disable - disable a clock, if the system is not using it
  575. * @clk: struct clk * to disable
  576. *
  577. * Decrements the usecount on struct clk @clk. If there are no users
  578. * left, call the clkops-specific clock disable function to disable it
  579. * in hardware. If the clock is part of a clockdomain (which they all
  580. * should be), request that the clockdomain be disabled. (It too has
  581. * a usecount, and so will not be disabled in the hardware until it no
  582. * longer has any users.) If the clock has a parent clock (most of
  583. * them do), then call ourselves, recursing on the parent clock. This
  584. * can cause an entire branch of the clock tree to be powered off by
  585. * simply disabling one clock. Intended to be called with the clockfw_lock
  586. * spinlock held. No return value.
  587. */
  588. void omap2_clk_disable(struct clk *clk)
  589. {
  590. if (clk->usecount == 0) {
  591. WARN(1, "clock: %s: omap2_clk_disable() called, but usecount already 0?", clk->name);
  592. return;
  593. }
  594. pr_debug("clock: %s: decrementing usecount\n", clk->name);
  595. clk->usecount--;
  596. if (clk->usecount > 0)
  597. return;
  598. pr_debug("clock: %s: disabling in hardware\n", clk->name);
  599. if (clk->ops && clk->ops->disable) {
  600. trace_clock_disable(clk->name, 0, smp_processor_id());
  601. clk->ops->disable(clk);
  602. }
  603. if (clkdm_control && clk->clkdm)
  604. clkdm_clk_disable(clk->clkdm, clk);
  605. if (clk->parent)
  606. omap2_clk_disable(clk->parent);
  607. }
  608. /**
  609. * omap2_clk_enable - request that the system enable a clock
  610. * @clk: struct clk * to enable
  611. *
  612. * Increments the usecount on struct clk @clk. If there were no users
  613. * previously, then recurse up the clock tree, enabling all of the
  614. * clock's parents and all of the parent clockdomains, and finally,
  615. * enabling @clk's clockdomain, and @clk itself. Intended to be
  616. * called with the clockfw_lock spinlock held. Returns 0 upon success
  617. * or a negative error code upon failure.
  618. */
  619. int omap2_clk_enable(struct clk *clk)
  620. {
  621. int ret;
  622. pr_debug("clock: %s: incrementing usecount\n", clk->name);
  623. clk->usecount++;
  624. if (clk->usecount > 1)
  625. return 0;
  626. pr_debug("clock: %s: enabling in hardware\n", clk->name);
  627. if (clk->parent) {
  628. ret = omap2_clk_enable(clk->parent);
  629. if (ret) {
  630. WARN(1, "clock: %s: could not enable parent %s: %d\n",
  631. clk->name, clk->parent->name, ret);
  632. goto oce_err1;
  633. }
  634. }
  635. if (clkdm_control && clk->clkdm) {
  636. ret = clkdm_clk_enable(clk->clkdm, clk);
  637. if (ret) {
  638. WARN(1, "clock: %s: could not enable clockdomain %s: %d\n",
  639. clk->name, clk->clkdm->name, ret);
  640. goto oce_err2;
  641. }
  642. }
  643. if (clk->ops && clk->ops->enable) {
  644. trace_clock_enable(clk->name, 1, smp_processor_id());
  645. ret = clk->ops->enable(clk);
  646. if (ret) {
  647. WARN(1, "clock: %s: could not enable: %d\n",
  648. clk->name, ret);
  649. goto oce_err3;
  650. }
  651. }
  652. return 0;
  653. oce_err3:
  654. if (clkdm_control && clk->clkdm)
  655. clkdm_clk_disable(clk->clkdm, clk);
  656. oce_err2:
  657. if (clk->parent)
  658. omap2_clk_disable(clk->parent);
  659. oce_err1:
  660. clk->usecount--;
  661. return ret;
  662. }
  663. /* Given a clock and a rate apply a clock specific rounding function */
  664. long omap2_clk_round_rate(struct clk *clk, unsigned long rate)
  665. {
  666. if (clk->round_rate)
  667. return clk->round_rate(clk, rate);
  668. return clk->rate;
  669. }
  670. /* Set the clock rate for a clock source */
  671. int omap2_clk_set_rate(struct clk *clk, unsigned long rate)
  672. {
  673. int ret = -EINVAL;
  674. pr_debug("clock: set_rate for clock %s to rate %ld\n", clk->name, rate);
  675. /* dpll_ck, core_ck, virt_prcm_set; plus all clksel clocks */
  676. if (clk->set_rate) {
  677. trace_clock_set_rate(clk->name, rate, smp_processor_id());
  678. ret = clk->set_rate(clk, rate);
  679. }
  680. return ret;
  681. }
  682. int omap2_clk_set_parent(struct clk *clk, struct clk *new_parent)
  683. {
  684. if (!clk->clksel)
  685. return -EINVAL;
  686. if (clk->parent == new_parent)
  687. return 0;
  688. return omap2_clksel_set_parent(clk, new_parent);
  689. }
  690. /*
  691. * OMAP2+ clock reset and init functions
  692. */
  693. #ifdef CONFIG_OMAP_RESET_CLOCKS
  694. void omap2_clk_disable_unused(struct clk *clk)
  695. {
  696. u32 regval32, v;
  697. v = (clk->flags & INVERT_ENABLE) ? (1 << clk->enable_bit) : 0;
  698. regval32 = __raw_readl(clk->enable_reg);
  699. if ((regval32 & (1 << clk->enable_bit)) == v)
  700. return;
  701. pr_debug("Disabling unused clock \"%s\"\n", clk->name);
  702. if (cpu_is_omap34xx()) {
  703. omap2_clk_enable(clk);
  704. omap2_clk_disable(clk);
  705. } else {
  706. clk->ops->disable(clk);
  707. }
  708. if (clk->clkdm != NULL)
  709. pwrdm_state_switch(clk->clkdm->pwrdm.ptr);
  710. }
  711. #endif
  712. #endif /* CONFIG_COMMON_CLK */
  713. /**
  714. * omap2_clk_switch_mpurate_at_boot - switch ARM MPU rate by boot-time argument
  715. * @mpurate_ck_name: clk name of the clock to change rate
  716. *
  717. * Change the ARM MPU clock rate to the rate specified on the command
  718. * line, if one was specified. @mpurate_ck_name should be
  719. * "virt_prcm_set" on OMAP2xxx and "dpll1_ck" on OMAP34xx/OMAP36xx.
  720. * XXX Does not handle voltage scaling - on OMAP2xxx this is currently
  721. * handled by the virt_prcm_set clock, but this should be handled by
  722. * the OPP layer. XXX This is intended to be handled by the OPP layer
  723. * code in the near future and should be removed from the clock code.
  724. * Returns -EINVAL if 'mpurate' is zero or if clk_set_rate() rejects
  725. * the rate, -ENOENT if the struct clk referred to by @mpurate_ck_name
  726. * cannot be found, or 0 upon success.
  727. */
  728. int __init omap2_clk_switch_mpurate_at_boot(const char *mpurate_ck_name)
  729. {
  730. struct clk *mpurate_ck;
  731. int r;
  732. if (!mpurate)
  733. return -EINVAL;
  734. mpurate_ck = clk_get(NULL, mpurate_ck_name);
  735. if (WARN(IS_ERR(mpurate_ck), "Failed to get %s.\n", mpurate_ck_name))
  736. return -ENOENT;
  737. r = clk_set_rate(mpurate_ck, mpurate);
  738. if (IS_ERR_VALUE(r)) {
  739. WARN(1, "clock: %s: unable to set MPU rate to %d: %d\n",
  740. mpurate_ck_name, mpurate, r);
  741. clk_put(mpurate_ck);
  742. return -EINVAL;
  743. }
  744. calibrate_delay();
  745. #ifndef CONFIG_COMMON_CLK
  746. recalculate_root_clocks();
  747. #endif
  748. clk_put(mpurate_ck);
  749. return 0;
  750. }
  751. /**
  752. * omap2_clk_print_new_rates - print summary of current clock tree rates
  753. * @hfclkin_ck_name: clk name for the off-chip HF oscillator
  754. * @core_ck_name: clk name for the on-chip CORE_CLK
  755. * @mpu_ck_name: clk name for the ARM MPU clock
  756. *
  757. * Prints a short message to the console with the HFCLKIN oscillator
  758. * rate, the rate of the CORE clock, and the rate of the ARM MPU clock.
  759. * Called by the boot-time MPU rate switching code. XXX This is intended
  760. * to be handled by the OPP layer code in the near future and should be
  761. * removed from the clock code. No return value.
  762. */
  763. void __init omap2_clk_print_new_rates(const char *hfclkin_ck_name,
  764. const char *core_ck_name,
  765. const char *mpu_ck_name)
  766. {
  767. struct clk *hfclkin_ck, *core_ck, *mpu_ck;
  768. unsigned long hfclkin_rate;
  769. mpu_ck = clk_get(NULL, mpu_ck_name);
  770. if (WARN(IS_ERR(mpu_ck), "clock: failed to get %s.\n", mpu_ck_name))
  771. return;
  772. core_ck = clk_get(NULL, core_ck_name);
  773. if (WARN(IS_ERR(core_ck), "clock: failed to get %s.\n", core_ck_name))
  774. return;
  775. hfclkin_ck = clk_get(NULL, hfclkin_ck_name);
  776. if (WARN(IS_ERR(hfclkin_ck), "Failed to get %s.\n", hfclkin_ck_name))
  777. return;
  778. hfclkin_rate = clk_get_rate(hfclkin_ck);
  779. pr_info("Switched to new clocking rate (Crystal/Core/MPU): %ld.%01ld/%ld/%ld MHz\n",
  780. (hfclkin_rate / 1000000), ((hfclkin_rate / 100000) % 10),
  781. (clk_get_rate(core_ck) / 1000000),
  782. (clk_get_rate(mpu_ck) / 1000000));
  783. }
  784. #ifndef CONFIG_COMMON_CLK
  785. /* Common data */
  786. int clk_enable(struct clk *clk)
  787. {
  788. unsigned long flags;
  789. int ret;
  790. if (clk == NULL || IS_ERR(clk))
  791. return -EINVAL;
  792. spin_lock_irqsave(&clockfw_lock, flags);
  793. ret = omap2_clk_enable(clk);
  794. spin_unlock_irqrestore(&clockfw_lock, flags);
  795. return ret;
  796. }
  797. EXPORT_SYMBOL(clk_enable);
  798. void clk_disable(struct clk *clk)
  799. {
  800. unsigned long flags;
  801. if (clk == NULL || IS_ERR(clk))
  802. return;
  803. spin_lock_irqsave(&clockfw_lock, flags);
  804. if (clk->usecount == 0) {
  805. pr_err("Trying disable clock %s with 0 usecount\n",
  806. clk->name);
  807. WARN_ON(1);
  808. goto out;
  809. }
  810. omap2_clk_disable(clk);
  811. out:
  812. spin_unlock_irqrestore(&clockfw_lock, flags);
  813. }
  814. EXPORT_SYMBOL(clk_disable);
  815. unsigned long clk_get_rate(struct clk *clk)
  816. {
  817. unsigned long flags;
  818. unsigned long ret;
  819. if (clk == NULL || IS_ERR(clk))
  820. return 0;
  821. spin_lock_irqsave(&clockfw_lock, flags);
  822. ret = clk->rate;
  823. spin_unlock_irqrestore(&clockfw_lock, flags);
  824. return ret;
  825. }
  826. EXPORT_SYMBOL(clk_get_rate);
  827. /*
  828. * Optional clock functions defined in include/linux/clk.h
  829. */
  830. long clk_round_rate(struct clk *clk, unsigned long rate)
  831. {
  832. unsigned long flags;
  833. long ret;
  834. if (clk == NULL || IS_ERR(clk))
  835. return 0;
  836. spin_lock_irqsave(&clockfw_lock, flags);
  837. ret = omap2_clk_round_rate(clk, rate);
  838. spin_unlock_irqrestore(&clockfw_lock, flags);
  839. return ret;
  840. }
  841. EXPORT_SYMBOL(clk_round_rate);
  842. int clk_set_rate(struct clk *clk, unsigned long rate)
  843. {
  844. unsigned long flags;
  845. int ret = -EINVAL;
  846. if (clk == NULL || IS_ERR(clk))
  847. return ret;
  848. spin_lock_irqsave(&clockfw_lock, flags);
  849. ret = omap2_clk_set_rate(clk, rate);
  850. if (ret == 0)
  851. propagate_rate(clk);
  852. spin_unlock_irqrestore(&clockfw_lock, flags);
  853. return ret;
  854. }
  855. EXPORT_SYMBOL(clk_set_rate);
  856. int clk_set_parent(struct clk *clk, struct clk *parent)
  857. {
  858. unsigned long flags;
  859. int ret = -EINVAL;
  860. if (clk == NULL || IS_ERR(clk) || parent == NULL || IS_ERR(parent))
  861. return ret;
  862. spin_lock_irqsave(&clockfw_lock, flags);
  863. if (clk->usecount == 0) {
  864. ret = omap2_clk_set_parent(clk, parent);
  865. if (ret == 0)
  866. propagate_rate(clk);
  867. } else {
  868. ret = -EBUSY;
  869. }
  870. spin_unlock_irqrestore(&clockfw_lock, flags);
  871. return ret;
  872. }
  873. EXPORT_SYMBOL(clk_set_parent);
  874. struct clk *clk_get_parent(struct clk *clk)
  875. {
  876. return clk->parent;
  877. }
  878. EXPORT_SYMBOL(clk_get_parent);
  879. /*
  880. * OMAP specific clock functions shared between omap1 and omap2
  881. */
  882. int __initdata mpurate;
  883. /*
  884. * By default we use the rate set by the bootloader.
  885. * You can override this with mpurate= cmdline option.
  886. */
  887. static int __init omap_clk_setup(char *str)
  888. {
  889. get_option(&str, &mpurate);
  890. if (!mpurate)
  891. return 1;
  892. if (mpurate < 1000)
  893. mpurate *= 1000000;
  894. return 1;
  895. }
  896. __setup("mpurate=", omap_clk_setup);
  897. /* Used for clocks that always have same value as the parent clock */
  898. unsigned long followparent_recalc(struct clk *clk)
  899. {
  900. return clk->parent->rate;
  901. }
  902. /*
  903. * Used for clocks that have the same value as the parent clock,
  904. * divided by some factor
  905. */
  906. unsigned long omap_fixed_divisor_recalc(struct clk *clk)
  907. {
  908. WARN_ON(!clk->fixed_div);
  909. return clk->parent->rate / clk->fixed_div;
  910. }
  911. void clk_reparent(struct clk *child, struct clk *parent)
  912. {
  913. list_del_init(&child->sibling);
  914. if (parent)
  915. list_add(&child->sibling, &parent->children);
  916. child->parent = parent;
  917. /* now do the debugfs renaming to reattach the child
  918. to the proper parent */
  919. }
  920. /* Propagate rate to children */
  921. void propagate_rate(struct clk *tclk)
  922. {
  923. struct clk *clkp;
  924. list_for_each_entry(clkp, &tclk->children, sibling) {
  925. if (clkp->recalc)
  926. clkp->rate = clkp->recalc(clkp);
  927. propagate_rate(clkp);
  928. }
  929. }
  930. static LIST_HEAD(root_clks);
  931. /**
  932. * recalculate_root_clocks - recalculate and propagate all root clocks
  933. *
  934. * Recalculates all root clocks (clocks with no parent), which if the
  935. * clock's .recalc is set correctly, should also propagate their rates.
  936. * Called at init.
  937. */
  938. void recalculate_root_clocks(void)
  939. {
  940. struct clk *clkp;
  941. list_for_each_entry(clkp, &root_clks, sibling) {
  942. if (clkp->recalc)
  943. clkp->rate = clkp->recalc(clkp);
  944. propagate_rate(clkp);
  945. }
  946. }
  947. /**
  948. * clk_preinit - initialize any fields in the struct clk before clk init
  949. * @clk: struct clk * to initialize
  950. *
  951. * Initialize any struct clk fields needed before normal clk initialization
  952. * can run. No return value.
  953. */
  954. void clk_preinit(struct clk *clk)
  955. {
  956. INIT_LIST_HEAD(&clk->children);
  957. }
  958. int clk_register(struct clk *clk)
  959. {
  960. if (clk == NULL || IS_ERR(clk))
  961. return -EINVAL;
  962. /*
  963. * trap out already registered clocks
  964. */
  965. if (clk->node.next || clk->node.prev)
  966. return 0;
  967. mutex_lock(&clocks_mutex);
  968. if (clk->parent)
  969. list_add(&clk->sibling, &clk->parent->children);
  970. else
  971. list_add(&clk->sibling, &root_clks);
  972. list_add(&clk->node, &clocks);
  973. if (clk->init)
  974. clk->init(clk);
  975. mutex_unlock(&clocks_mutex);
  976. return 0;
  977. }
  978. EXPORT_SYMBOL(clk_register);
  979. void clk_unregister(struct clk *clk)
  980. {
  981. if (clk == NULL || IS_ERR(clk))
  982. return;
  983. mutex_lock(&clocks_mutex);
  984. list_del(&clk->sibling);
  985. list_del(&clk->node);
  986. mutex_unlock(&clocks_mutex);
  987. }
  988. EXPORT_SYMBOL(clk_unregister);
  989. void clk_enable_init_clocks(void)
  990. {
  991. struct clk *clkp;
  992. list_for_each_entry(clkp, &clocks, node)
  993. if (clkp->flags & ENABLE_ON_INIT)
  994. clk_enable(clkp);
  995. }
  996. /**
  997. * omap_clk_get_by_name - locate OMAP struct clk by its name
  998. * @name: name of the struct clk to locate
  999. *
  1000. * Locate an OMAP struct clk by its name. Assumes that struct clk
  1001. * names are unique. Returns NULL if not found or a pointer to the
  1002. * struct clk if found.
  1003. */
  1004. struct clk *omap_clk_get_by_name(const char *name)
  1005. {
  1006. struct clk *c;
  1007. struct clk *ret = NULL;
  1008. mutex_lock(&clocks_mutex);
  1009. list_for_each_entry(c, &clocks, node) {
  1010. if (!strcmp(c->name, name)) {
  1011. ret = c;
  1012. break;
  1013. }
  1014. }
  1015. mutex_unlock(&clocks_mutex);
  1016. return ret;
  1017. }
  1018. int omap_clk_enable_autoidle_all(void)
  1019. {
  1020. struct clk *c;
  1021. unsigned long flags;
  1022. spin_lock_irqsave(&clockfw_lock, flags);
  1023. list_for_each_entry(c, &clocks, node)
  1024. if (c->ops->allow_idle)
  1025. c->ops->allow_idle(c);
  1026. spin_unlock_irqrestore(&clockfw_lock, flags);
  1027. return 0;
  1028. }
  1029. int omap_clk_disable_autoidle_all(void)
  1030. {
  1031. struct clk *c;
  1032. unsigned long flags;
  1033. spin_lock_irqsave(&clockfw_lock, flags);
  1034. list_for_each_entry(c, &clocks, node)
  1035. if (c->ops->deny_idle)
  1036. c->ops->deny_idle(c);
  1037. spin_unlock_irqrestore(&clockfw_lock, flags);
  1038. return 0;
  1039. }
  1040. /*
  1041. * Low level helpers
  1042. */
  1043. static int clkll_enable_null(struct clk *clk)
  1044. {
  1045. return 0;
  1046. }
  1047. static void clkll_disable_null(struct clk *clk)
  1048. {
  1049. }
  1050. const struct clkops clkops_null = {
  1051. .enable = clkll_enable_null,
  1052. .disable = clkll_disable_null,
  1053. };
  1054. /*
  1055. * Dummy clock
  1056. *
  1057. * Used for clock aliases that are needed on some OMAPs, but not others
  1058. */
  1059. struct clk dummy_ck = {
  1060. .name = "dummy",
  1061. .ops = &clkops_null,
  1062. };
  1063. /*
  1064. *
  1065. */
  1066. #ifdef CONFIG_OMAP_RESET_CLOCKS
  1067. /*
  1068. * Disable any unused clocks left on by the bootloader
  1069. */
  1070. static int __init clk_disable_unused(void)
  1071. {
  1072. struct clk *ck;
  1073. unsigned long flags;
  1074. pr_info("clock: disabling unused clocks to save power\n");
  1075. spin_lock_irqsave(&clockfw_lock, flags);
  1076. list_for_each_entry(ck, &clocks, node) {
  1077. if (ck->ops == &clkops_null)
  1078. continue;
  1079. if (ck->usecount > 0 || !ck->enable_reg)
  1080. continue;
  1081. omap2_clk_disable_unused(ck);
  1082. }
  1083. spin_unlock_irqrestore(&clockfw_lock, flags);
  1084. return 0;
  1085. }
  1086. late_initcall(clk_disable_unused);
  1087. late_initcall(omap_clk_enable_autoidle_all);
  1088. #endif
  1089. #if defined(CONFIG_PM_DEBUG) && defined(CONFIG_DEBUG_FS)
  1090. /*
  1091. * debugfs support to trace clock tree hierarchy and attributes
  1092. */
  1093. #include <linux/debugfs.h>
  1094. #include <linux/seq_file.h>
  1095. static struct dentry *clk_debugfs_root;
  1096. static int clk_dbg_show_summary(struct seq_file *s, void *unused)
  1097. {
  1098. struct clk *c;
  1099. struct clk *pa;
  1100. mutex_lock(&clocks_mutex);
  1101. seq_printf(s, "%-30s %-30s %-10s %s\n",
  1102. "clock-name", "parent-name", "rate", "use-count");
  1103. list_for_each_entry(c, &clocks, node) {
  1104. pa = c->parent;
  1105. seq_printf(s, "%-30s %-30s %-10lu %d\n",
  1106. c->name, pa ? pa->name : "none", c->rate,
  1107. c->usecount);
  1108. }
  1109. mutex_unlock(&clocks_mutex);
  1110. return 0;
  1111. }
  1112. static int clk_dbg_open(struct inode *inode, struct file *file)
  1113. {
  1114. return single_open(file, clk_dbg_show_summary, inode->i_private);
  1115. }
  1116. static const struct file_operations debug_clock_fops = {
  1117. .open = clk_dbg_open,
  1118. .read = seq_read,
  1119. .llseek = seq_lseek,
  1120. .release = single_release,
  1121. };
  1122. static int clk_debugfs_register_one(struct clk *c)
  1123. {
  1124. int err;
  1125. struct dentry *d;
  1126. struct clk *pa = c->parent;
  1127. d = debugfs_create_dir(c->name, pa ? pa->dent : clk_debugfs_root);
  1128. if (!d)
  1129. return -ENOMEM;
  1130. c->dent = d;
  1131. d = debugfs_create_u8("usecount", S_IRUGO, c->dent, (u8 *)&c->usecount);
  1132. if (!d) {
  1133. err = -ENOMEM;
  1134. goto err_out;
  1135. }
  1136. d = debugfs_create_u32("rate", S_IRUGO, c->dent, (u32 *)&c->rate);
  1137. if (!d) {
  1138. err = -ENOMEM;
  1139. goto err_out;
  1140. }
  1141. d = debugfs_create_x32("flags", S_IRUGO, c->dent, (u32 *)&c->flags);
  1142. if (!d) {
  1143. err = -ENOMEM;
  1144. goto err_out;
  1145. }
  1146. return 0;
  1147. err_out:
  1148. debugfs_remove_recursive(c->dent);
  1149. return err;
  1150. }
  1151. static int clk_debugfs_register(struct clk *c)
  1152. {
  1153. int err;
  1154. struct clk *pa = c->parent;
  1155. if (pa && !pa->dent) {
  1156. err = clk_debugfs_register(pa);
  1157. if (err)
  1158. return err;
  1159. }
  1160. if (!c->dent) {
  1161. err = clk_debugfs_register_one(c);
  1162. if (err)
  1163. return err;
  1164. }
  1165. return 0;
  1166. }
  1167. static int __init clk_debugfs_init(void)
  1168. {
  1169. struct clk *c;
  1170. struct dentry *d;
  1171. int err;
  1172. d = debugfs_create_dir("clock", NULL);
  1173. if (!d)
  1174. return -ENOMEM;
  1175. clk_debugfs_root = d;
  1176. list_for_each_entry(c, &clocks, node) {
  1177. err = clk_debugfs_register(c);
  1178. if (err)
  1179. goto err_out;
  1180. }
  1181. d = debugfs_create_file("summary", S_IRUGO,
  1182. d, NULL, &debug_clock_fops);
  1183. if (!d)
  1184. return -ENOMEM;
  1185. return 0;
  1186. err_out:
  1187. debugfs_remove_recursive(clk_debugfs_root);
  1188. return err;
  1189. }
  1190. late_initcall(clk_debugfs_init);
  1191. #endif /* defined(CONFIG_PM_DEBUG) && defined(CONFIG_DEBUG_FS) */
  1192. #endif /* CONFIG_COMMON_CLK */