clock.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706
  1. /*
  2. * Clock and PLL control for DaVinci devices
  3. *
  4. * Copyright (C) 2006-2007 Texas Instruments.
  5. * Copyright (C) 2008-2009 Deep Root Systems, LLC
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License as published by
  9. * the Free Software Foundation; either version 2 of the License, or
  10. * (at your option) any later version.
  11. */
  12. #include <linux/module.h>
  13. #include <linux/kernel.h>
  14. #include <linux/init.h>
  15. #include <linux/errno.h>
  16. #include <linux/clk.h>
  17. #include <linux/err.h>
  18. #include <linux/mutex.h>
  19. #include <linux/io.h>
  20. #include <linux/delay.h>
  21. #include <mach/hardware.h>
  22. #include <mach/clock.h>
  23. #include <mach/psc.h>
  24. #include <mach/cputype.h>
  25. #include "clock.h"
  26. static LIST_HEAD(clocks);
  27. static DEFINE_MUTEX(clocks_mutex);
  28. static DEFINE_SPINLOCK(clockfw_lock);
  29. static void __clk_enable(struct clk *clk)
  30. {
  31. if (clk->parent)
  32. __clk_enable(clk->parent);
  33. if (clk->usecount++ == 0 && (clk->flags & CLK_PSC))
  34. davinci_psc_config(clk->domain, clk->gpsc, clk->lpsc,
  35. true, clk->flags);
  36. }
  37. static void __clk_disable(struct clk *clk)
  38. {
  39. if (WARN_ON(clk->usecount == 0))
  40. return;
  41. if (--clk->usecount == 0 && !(clk->flags & CLK_PLL) &&
  42. (clk->flags & CLK_PSC))
  43. davinci_psc_config(clk->domain, clk->gpsc, clk->lpsc,
  44. false, clk->flags);
  45. if (clk->parent)
  46. __clk_disable(clk->parent);
  47. }
  48. int davinci_clk_reset(struct clk *clk, bool reset)
  49. {
  50. unsigned long flags;
  51. if (clk == NULL || IS_ERR(clk))
  52. return -EINVAL;
  53. spin_lock_irqsave(&clockfw_lock, flags);
  54. if (clk->flags & CLK_PSC)
  55. davinci_psc_reset(clk->gpsc, clk->lpsc, reset);
  56. spin_unlock_irqrestore(&clockfw_lock, flags);
  57. return 0;
  58. }
  59. EXPORT_SYMBOL(davinci_clk_reset);
  60. int davinci_clk_reset_assert(struct clk *clk)
  61. {
  62. if (clk == NULL || IS_ERR(clk) || !clk->reset)
  63. return -EINVAL;
  64. return clk->reset(clk, true);
  65. }
  66. EXPORT_SYMBOL(davinci_clk_reset_assert);
  67. int davinci_clk_reset_deassert(struct clk *clk)
  68. {
  69. if (clk == NULL || IS_ERR(clk) || !clk->reset)
  70. return -EINVAL;
  71. return clk->reset(clk, false);
  72. }
  73. EXPORT_SYMBOL(davinci_clk_reset_deassert);
  74. int clk_enable(struct clk *clk)
  75. {
  76. unsigned long flags;
  77. if (clk == NULL || IS_ERR(clk))
  78. return -EINVAL;
  79. spin_lock_irqsave(&clockfw_lock, flags);
  80. __clk_enable(clk);
  81. spin_unlock_irqrestore(&clockfw_lock, flags);
  82. return 0;
  83. }
  84. EXPORT_SYMBOL(clk_enable);
  85. void clk_disable(struct clk *clk)
  86. {
  87. unsigned long flags;
  88. if (clk == NULL || IS_ERR(clk))
  89. return;
  90. spin_lock_irqsave(&clockfw_lock, flags);
  91. __clk_disable(clk);
  92. spin_unlock_irqrestore(&clockfw_lock, flags);
  93. }
  94. EXPORT_SYMBOL(clk_disable);
  95. unsigned long clk_get_rate(struct clk *clk)
  96. {
  97. if (clk == NULL || IS_ERR(clk))
  98. return -EINVAL;
  99. return clk->rate;
  100. }
  101. EXPORT_SYMBOL(clk_get_rate);
  102. long clk_round_rate(struct clk *clk, unsigned long rate)
  103. {
  104. if (clk == NULL || IS_ERR(clk))
  105. return -EINVAL;
  106. if (clk->round_rate)
  107. return clk->round_rate(clk, rate);
  108. return clk->rate;
  109. }
  110. EXPORT_SYMBOL(clk_round_rate);
  111. /* Propagate rate to children */
  112. static void propagate_rate(struct clk *root)
  113. {
  114. struct clk *clk;
  115. list_for_each_entry(clk, &root->children, childnode) {
  116. if (clk->recalc)
  117. clk->rate = clk->recalc(clk);
  118. propagate_rate(clk);
  119. }
  120. }
  121. int clk_set_rate(struct clk *clk, unsigned long rate)
  122. {
  123. unsigned long flags;
  124. int ret = -EINVAL;
  125. if (clk == NULL || IS_ERR(clk))
  126. return ret;
  127. if (clk->set_rate)
  128. ret = clk->set_rate(clk, rate);
  129. spin_lock_irqsave(&clockfw_lock, flags);
  130. if (ret == 0) {
  131. if (clk->recalc)
  132. clk->rate = clk->recalc(clk);
  133. propagate_rate(clk);
  134. }
  135. spin_unlock_irqrestore(&clockfw_lock, flags);
  136. return ret;
  137. }
  138. EXPORT_SYMBOL(clk_set_rate);
  139. int clk_set_parent(struct clk *clk, struct clk *parent)
  140. {
  141. unsigned long flags;
  142. if (clk == NULL || IS_ERR(clk))
  143. return -EINVAL;
  144. /* Cannot change parent on enabled clock */
  145. if (WARN_ON(clk->usecount))
  146. return -EINVAL;
  147. mutex_lock(&clocks_mutex);
  148. clk->parent = parent;
  149. list_del_init(&clk->childnode);
  150. list_add(&clk->childnode, &clk->parent->children);
  151. mutex_unlock(&clocks_mutex);
  152. spin_lock_irqsave(&clockfw_lock, flags);
  153. if (clk->recalc)
  154. clk->rate = clk->recalc(clk);
  155. propagate_rate(clk);
  156. spin_unlock_irqrestore(&clockfw_lock, flags);
  157. return 0;
  158. }
  159. EXPORT_SYMBOL(clk_set_parent);
  160. int clk_register(struct clk *clk)
  161. {
  162. if (clk == NULL || IS_ERR(clk))
  163. return -EINVAL;
  164. if (WARN(clk->parent && !clk->parent->rate,
  165. "CLK: %s parent %s has no rate!\n",
  166. clk->name, clk->parent->name))
  167. return -EINVAL;
  168. INIT_LIST_HEAD(&clk->children);
  169. mutex_lock(&clocks_mutex);
  170. list_add_tail(&clk->node, &clocks);
  171. if (clk->parent)
  172. list_add_tail(&clk->childnode, &clk->parent->children);
  173. mutex_unlock(&clocks_mutex);
  174. /* If rate is already set, use it */
  175. if (clk->rate)
  176. return 0;
  177. /* Else, see if there is a way to calculate it */
  178. if (clk->recalc)
  179. clk->rate = clk->recalc(clk);
  180. /* Otherwise, default to parent rate */
  181. else if (clk->parent)
  182. clk->rate = clk->parent->rate;
  183. return 0;
  184. }
  185. EXPORT_SYMBOL(clk_register);
  186. void clk_unregister(struct clk *clk)
  187. {
  188. if (clk == NULL || IS_ERR(clk))
  189. return;
  190. mutex_lock(&clocks_mutex);
  191. list_del(&clk->node);
  192. list_del(&clk->childnode);
  193. mutex_unlock(&clocks_mutex);
  194. }
  195. EXPORT_SYMBOL(clk_unregister);
  196. #ifdef CONFIG_DAVINCI_RESET_CLOCKS
  197. /*
  198. * Disable any unused clocks left on by the bootloader
  199. */
  200. int __init davinci_clk_disable_unused(void)
  201. {
  202. struct clk *ck;
  203. spin_lock_irq(&clockfw_lock);
  204. list_for_each_entry(ck, &clocks, node) {
  205. if (ck->usecount > 0)
  206. continue;
  207. if (!(ck->flags & CLK_PSC))
  208. continue;
  209. /* ignore if in Disabled or SwRstDisable states */
  210. if (!davinci_psc_is_clk_active(ck->gpsc, ck->lpsc))
  211. continue;
  212. pr_debug("Clocks: disable unused %s\n", ck->name);
  213. davinci_psc_config(ck->domain, ck->gpsc, ck->lpsc,
  214. false, ck->flags);
  215. }
  216. spin_unlock_irq(&clockfw_lock);
  217. return 0;
  218. }
  219. #endif
  220. static unsigned long clk_sysclk_recalc(struct clk *clk)
  221. {
  222. u32 v, plldiv;
  223. struct pll_data *pll;
  224. unsigned long rate = clk->rate;
  225. /* If this is the PLL base clock, no more calculations needed */
  226. if (clk->pll_data)
  227. return rate;
  228. if (WARN_ON(!clk->parent))
  229. return rate;
  230. rate = clk->parent->rate;
  231. /* Otherwise, the parent must be a PLL */
  232. if (WARN_ON(!clk->parent->pll_data))
  233. return rate;
  234. pll = clk->parent->pll_data;
  235. /* If pre-PLL, source clock is before the multiplier and divider(s) */
  236. if (clk->flags & PRE_PLL)
  237. rate = pll->input_rate;
  238. if (!clk->div_reg)
  239. return rate;
  240. v = __raw_readl(pll->base + clk->div_reg);
  241. if (v & PLLDIV_EN) {
  242. plldiv = (v & pll->div_ratio_mask) + 1;
  243. if (plldiv)
  244. rate /= plldiv;
  245. }
  246. return rate;
  247. }
  248. int davinci_set_sysclk_rate(struct clk *clk, unsigned long rate)
  249. {
  250. unsigned v;
  251. struct pll_data *pll;
  252. unsigned long input;
  253. unsigned ratio = 0;
  254. /* If this is the PLL base clock, wrong function to call */
  255. if (clk->pll_data)
  256. return -EINVAL;
  257. /* There must be a parent... */
  258. if (WARN_ON(!clk->parent))
  259. return -EINVAL;
  260. /* ... the parent must be a PLL... */
  261. if (WARN_ON(!clk->parent->pll_data))
  262. return -EINVAL;
  263. /* ... and this clock must have a divider. */
  264. if (WARN_ON(!clk->div_reg))
  265. return -EINVAL;
  266. pll = clk->parent->pll_data;
  267. input = clk->parent->rate;
  268. /* If pre-PLL, source clock is before the multiplier and divider(s) */
  269. if (clk->flags & PRE_PLL)
  270. input = pll->input_rate;
  271. if (input > rate) {
  272. /*
  273. * Can afford to provide an output little higher than requested
  274. * only if maximum rate supported by hardware on this sysclk
  275. * is known.
  276. */
  277. if (clk->maxrate) {
  278. ratio = DIV_ROUND_CLOSEST(input, rate);
  279. if (input / ratio > clk->maxrate)
  280. ratio = 0;
  281. }
  282. if (ratio == 0)
  283. ratio = DIV_ROUND_UP(input, rate);
  284. ratio--;
  285. }
  286. if (ratio > pll->div_ratio_mask)
  287. return -EINVAL;
  288. do {
  289. v = __raw_readl(pll->base + PLLSTAT);
  290. } while (v & PLLSTAT_GOSTAT);
  291. v = __raw_readl(pll->base + clk->div_reg);
  292. v &= ~pll->div_ratio_mask;
  293. v |= ratio | PLLDIV_EN;
  294. __raw_writel(v, pll->base + clk->div_reg);
  295. v = __raw_readl(pll->base + PLLCMD);
  296. v |= PLLCMD_GOSET;
  297. __raw_writel(v, pll->base + PLLCMD);
  298. do {
  299. v = __raw_readl(pll->base + PLLSTAT);
  300. } while (v & PLLSTAT_GOSTAT);
  301. return 0;
  302. }
  303. EXPORT_SYMBOL(davinci_set_sysclk_rate);
  304. static unsigned long clk_leafclk_recalc(struct clk *clk)
  305. {
  306. if (WARN_ON(!clk->parent))
  307. return clk->rate;
  308. return clk->parent->rate;
  309. }
  310. int davinci_simple_set_rate(struct clk *clk, unsigned long rate)
  311. {
  312. clk->rate = rate;
  313. return 0;
  314. }
  315. static unsigned long clk_pllclk_recalc(struct clk *clk)
  316. {
  317. u32 ctrl, mult = 1, prediv = 1, postdiv = 1;
  318. u8 bypass;
  319. struct pll_data *pll = clk->pll_data;
  320. unsigned long rate = clk->rate;
  321. ctrl = __raw_readl(pll->base + PLLCTL);
  322. rate = pll->input_rate = clk->parent->rate;
  323. if (ctrl & PLLCTL_PLLEN) {
  324. bypass = 0;
  325. mult = __raw_readl(pll->base + PLLM);
  326. if (cpu_is_davinci_dm365())
  327. mult = 2 * (mult & PLLM_PLLM_MASK);
  328. else
  329. mult = (mult & PLLM_PLLM_MASK) + 1;
  330. } else
  331. bypass = 1;
  332. if (pll->flags & PLL_HAS_PREDIV) {
  333. prediv = __raw_readl(pll->base + PREDIV);
  334. if (prediv & PLLDIV_EN)
  335. prediv = (prediv & pll->div_ratio_mask) + 1;
  336. else
  337. prediv = 1;
  338. }
  339. /* pre-divider is fixed, but (some?) chips won't report that */
  340. if (cpu_is_davinci_dm355() && pll->num == 1)
  341. prediv = 8;
  342. if (pll->flags & PLL_HAS_POSTDIV) {
  343. postdiv = __raw_readl(pll->base + POSTDIV);
  344. if (postdiv & PLLDIV_EN)
  345. postdiv = (postdiv & pll->div_ratio_mask) + 1;
  346. else
  347. postdiv = 1;
  348. }
  349. if (!bypass) {
  350. rate /= prediv;
  351. rate *= mult;
  352. rate /= postdiv;
  353. }
  354. pr_debug("PLL%d: input = %lu MHz [ ",
  355. pll->num, clk->parent->rate / 1000000);
  356. if (bypass)
  357. pr_debug("bypass ");
  358. if (prediv > 1)
  359. pr_debug("/ %d ", prediv);
  360. if (mult > 1)
  361. pr_debug("* %d ", mult);
  362. if (postdiv > 1)
  363. pr_debug("/ %d ", postdiv);
  364. pr_debug("] --> %lu MHz output.\n", rate / 1000000);
  365. return rate;
  366. }
  367. /**
  368. * davinci_set_pllrate - set the output rate of a given PLL.
  369. *
  370. * Note: Currently tested to work with OMAP-L138 only.
  371. *
  372. * @pll: pll whose rate needs to be changed.
  373. * @prediv: The pre divider value. Passing 0 disables the pre-divider.
  374. * @pllm: The multiplier value. Passing 0 leads to multiply-by-one.
  375. * @postdiv: The post divider value. Passing 0 disables the post-divider.
  376. */
  377. int davinci_set_pllrate(struct pll_data *pll, unsigned int prediv,
  378. unsigned int mult, unsigned int postdiv)
  379. {
  380. u32 ctrl;
  381. unsigned int locktime;
  382. unsigned long flags;
  383. if (pll->base == NULL)
  384. return -EINVAL;
  385. /*
  386. * PLL lock time required per OMAP-L138 datasheet is
  387. * (2000 * prediv)/sqrt(pllm) OSCIN cycles. We approximate sqrt(pllm)
  388. * as 4 and OSCIN cycle as 25 MHz.
  389. */
  390. if (prediv) {
  391. locktime = ((2000 * prediv) / 100);
  392. prediv = (prediv - 1) | PLLDIV_EN;
  393. } else {
  394. locktime = PLL_LOCK_TIME;
  395. }
  396. if (postdiv)
  397. postdiv = (postdiv - 1) | PLLDIV_EN;
  398. if (mult)
  399. mult = mult - 1;
  400. /* Protect against simultaneous calls to PLL setting seqeunce */
  401. spin_lock_irqsave(&clockfw_lock, flags);
  402. ctrl = __raw_readl(pll->base + PLLCTL);
  403. /* Switch the PLL to bypass mode */
  404. ctrl &= ~(PLLCTL_PLLENSRC | PLLCTL_PLLEN);
  405. __raw_writel(ctrl, pll->base + PLLCTL);
  406. udelay(PLL_BYPASS_TIME);
  407. /* Reset and enable PLL */
  408. ctrl &= ~(PLLCTL_PLLRST | PLLCTL_PLLDIS);
  409. __raw_writel(ctrl, pll->base + PLLCTL);
  410. if (pll->flags & PLL_HAS_PREDIV)
  411. __raw_writel(prediv, pll->base + PREDIV);
  412. __raw_writel(mult, pll->base + PLLM);
  413. if (pll->flags & PLL_HAS_POSTDIV)
  414. __raw_writel(postdiv, pll->base + POSTDIV);
  415. udelay(PLL_RESET_TIME);
  416. /* Bring PLL out of reset */
  417. ctrl |= PLLCTL_PLLRST;
  418. __raw_writel(ctrl, pll->base + PLLCTL);
  419. udelay(locktime);
  420. /* Remove PLL from bypass mode */
  421. ctrl |= PLLCTL_PLLEN;
  422. __raw_writel(ctrl, pll->base + PLLCTL);
  423. spin_unlock_irqrestore(&clockfw_lock, flags);
  424. return 0;
  425. }
  426. EXPORT_SYMBOL(davinci_set_pllrate);
  427. /**
  428. * davinci_set_refclk_rate() - Set the reference clock rate
  429. * @rate: The new rate.
  430. *
  431. * Sets the reference clock rate to a given value. This will most likely
  432. * result in the entire clock tree getting updated.
  433. *
  434. * This is used to support boards which use a reference clock different
  435. * than that used by default in <soc>.c file. The reference clock rate
  436. * should be updated early in the boot process; ideally soon after the
  437. * clock tree has been initialized once with the default reference clock
  438. * rate (davinci_common_init()).
  439. *
  440. * Returns 0 on success, error otherwise.
  441. */
  442. int davinci_set_refclk_rate(unsigned long rate)
  443. {
  444. struct clk *refclk;
  445. refclk = clk_get(NULL, "ref");
  446. if (IS_ERR(refclk)) {
  447. pr_err("%s: failed to get reference clock.\n", __func__);
  448. return PTR_ERR(refclk);
  449. }
  450. clk_set_rate(refclk, rate);
  451. clk_put(refclk);
  452. return 0;
  453. }
  454. int __init davinci_clk_init(struct clk_lookup *clocks)
  455. {
  456. struct clk_lookup *c;
  457. struct clk *clk;
  458. size_t num_clocks = 0;
  459. for (c = clocks; c->clk; c++) {
  460. clk = c->clk;
  461. if (!clk->recalc) {
  462. /* Check if clock is a PLL */
  463. if (clk->pll_data)
  464. clk->recalc = clk_pllclk_recalc;
  465. /* Else, if it is a PLL-derived clock */
  466. else if (clk->flags & CLK_PLL)
  467. clk->recalc = clk_sysclk_recalc;
  468. /* Otherwise, it is a leaf clock (PSC clock) */
  469. else if (clk->parent)
  470. clk->recalc = clk_leafclk_recalc;
  471. }
  472. if (clk->pll_data) {
  473. struct pll_data *pll = clk->pll_data;
  474. if (!pll->div_ratio_mask)
  475. pll->div_ratio_mask = PLLDIV_RATIO_MASK;
  476. if (pll->phys_base && !pll->base) {
  477. pll->base = ioremap(pll->phys_base, SZ_4K);
  478. WARN_ON(!pll->base);
  479. }
  480. }
  481. if (clk->recalc)
  482. clk->rate = clk->recalc(clk);
  483. if (clk->lpsc)
  484. clk->flags |= CLK_PSC;
  485. if (clk->flags & PSC_LRST)
  486. clk->reset = davinci_clk_reset;
  487. clk_register(clk);
  488. num_clocks++;
  489. /* Turn on clocks that Linux doesn't otherwise manage */
  490. if (clk->flags & ALWAYS_ENABLED)
  491. clk_enable(clk);
  492. }
  493. clkdev_add_table(clocks, num_clocks);
  494. return 0;
  495. }
  496. #ifdef CONFIG_DEBUG_FS
  497. #include <linux/debugfs.h>
  498. #include <linux/seq_file.h>
  499. #define CLKNAME_MAX 10 /* longest clock name */
  500. #define NEST_DELTA 2
  501. #define NEST_MAX 4
  502. static void
  503. dump_clock(struct seq_file *s, unsigned nest, struct clk *parent)
  504. {
  505. char *state;
  506. char buf[CLKNAME_MAX + NEST_DELTA * NEST_MAX];
  507. struct clk *clk;
  508. unsigned i;
  509. if (parent->flags & CLK_PLL)
  510. state = "pll";
  511. else if (parent->flags & CLK_PSC)
  512. state = "psc";
  513. else
  514. state = "";
  515. /* <nest spaces> name <pad to end> */
  516. memset(buf, ' ', sizeof(buf) - 1);
  517. buf[sizeof(buf) - 1] = 0;
  518. i = strlen(parent->name);
  519. memcpy(buf + nest, parent->name,
  520. min(i, (unsigned)(sizeof(buf) - 1 - nest)));
  521. seq_printf(s, "%s users=%2d %-3s %9ld Hz\n",
  522. buf, parent->usecount, state, clk_get_rate(parent));
  523. /* REVISIT show device associations too */
  524. /* cost is now small, but not linear... */
  525. list_for_each_entry(clk, &parent->children, childnode) {
  526. dump_clock(s, nest + NEST_DELTA, clk);
  527. }
  528. }
  529. static int davinci_ck_show(struct seq_file *m, void *v)
  530. {
  531. struct clk *clk;
  532. /*
  533. * Show clock tree; We trust nonzero usecounts equate to PSC enables...
  534. */
  535. mutex_lock(&clocks_mutex);
  536. list_for_each_entry(clk, &clocks, node)
  537. if (!clk->parent)
  538. dump_clock(m, 0, clk);
  539. mutex_unlock(&clocks_mutex);
  540. return 0;
  541. }
  542. static int davinci_ck_open(struct inode *inode, struct file *file)
  543. {
  544. return single_open(file, davinci_ck_show, NULL);
  545. }
  546. static const struct file_operations davinci_ck_operations = {
  547. .open = davinci_ck_open,
  548. .read = seq_read,
  549. .llseek = seq_lseek,
  550. .release = single_release,
  551. };
  552. static int __init davinci_clk_debugfs_init(void)
  553. {
  554. debugfs_create_file("davinci_clocks", S_IFREG | S_IRUGO, NULL, NULL,
  555. &davinci_ck_operations);
  556. return 0;
  557. }
  558. device_initcall(davinci_clk_debugfs_init);
  559. #endif /* CONFIG_DEBUG_FS */