cpg.c 8.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363
  1. /*
  2. * Helper routines for SuperH Clock Pulse Generator blocks (CPG).
  3. *
  4. * Copyright (C) 2010 Magnus Damm
  5. * Copyright (C) 2010 - 2012 Paul Mundt
  6. *
  7. * This file is subject to the terms and conditions of the GNU General Public
  8. * License. See the file "COPYING" in the main directory of this archive
  9. * for more details.
  10. */
  11. #include <linux/clk.h>
  12. #include <linux/compiler.h>
  13. #include <linux/slab.h>
  14. #include <linux/io.h>
  15. #include <linux/sh_clk.h>
  16. #define CPG_CKSTP_BIT BIT(8)
  17. static unsigned int sh_clk_read(struct clk *clk)
  18. {
  19. if (clk->flags & CLK_ENABLE_REG_8BIT)
  20. return ioread8(clk->mapped_reg);
  21. else if (clk->flags & CLK_ENABLE_REG_16BIT)
  22. return ioread16(clk->mapped_reg);
  23. return ioread32(clk->mapped_reg);
  24. }
  25. static void sh_clk_write(int value, struct clk *clk)
  26. {
  27. if (clk->flags & CLK_ENABLE_REG_8BIT)
  28. iowrite8(value, clk->mapped_reg);
  29. else if (clk->flags & CLK_ENABLE_REG_16BIT)
  30. iowrite16(value, clk->mapped_reg);
  31. else
  32. iowrite32(value, clk->mapped_reg);
  33. }
  34. static int sh_clk_mstp_enable(struct clk *clk)
  35. {
  36. sh_clk_write(sh_clk_read(clk) & ~(1 << clk->enable_bit), clk);
  37. return 0;
  38. }
  39. static void sh_clk_mstp_disable(struct clk *clk)
  40. {
  41. sh_clk_write(sh_clk_read(clk) | (1 << clk->enable_bit), clk);
  42. }
  43. static struct sh_clk_ops sh_clk_mstp_clk_ops = {
  44. .enable = sh_clk_mstp_enable,
  45. .disable = sh_clk_mstp_disable,
  46. .recalc = followparent_recalc,
  47. };
  48. int __init sh_clk_mstp_register(struct clk *clks, int nr)
  49. {
  50. struct clk *clkp;
  51. int ret = 0;
  52. int k;
  53. for (k = 0; !ret && (k < nr); k++) {
  54. clkp = clks + k;
  55. clkp->ops = &sh_clk_mstp_clk_ops;
  56. ret |= clk_register(clkp);
  57. }
  58. return ret;
  59. }
  60. /*
  61. * Div/mult table lookup helpers
  62. */
  63. static inline struct clk_div_table *clk_to_div_table(struct clk *clk)
  64. {
  65. return clk->priv;
  66. }
  67. static inline struct clk_div_mult_table *clk_to_div_mult_table(struct clk *clk)
  68. {
  69. return clk_to_div_table(clk)->div_mult_table;
  70. }
  71. /*
  72. * Common div ops
  73. */
  74. static long sh_clk_div_round_rate(struct clk *clk, unsigned long rate)
  75. {
  76. return clk_rate_table_round(clk, clk->freq_table, rate);
  77. }
  78. static unsigned long sh_clk_div_recalc(struct clk *clk)
  79. {
  80. struct clk_div_mult_table *table = clk_to_div_mult_table(clk);
  81. unsigned int idx;
  82. clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
  83. table, clk->arch_flags ? &clk->arch_flags : NULL);
  84. idx = (sh_clk_read(clk) >> clk->enable_bit) & clk->div_mask;
  85. return clk->freq_table[idx].frequency;
  86. }
  87. static int sh_clk_div_set_rate(struct clk *clk, unsigned long rate)
  88. {
  89. struct clk_div_table *dt = clk_to_div_table(clk);
  90. unsigned long value;
  91. int idx;
  92. idx = clk_rate_table_find(clk, clk->freq_table, rate);
  93. if (idx < 0)
  94. return idx;
  95. value = sh_clk_read(clk);
  96. value &= ~(clk->div_mask << clk->enable_bit);
  97. value |= (idx << clk->enable_bit);
  98. sh_clk_write(value, clk);
  99. /* XXX: Should use a post-change notifier */
  100. if (dt->kick)
  101. dt->kick(clk);
  102. return 0;
  103. }
  104. static int sh_clk_div_enable(struct clk *clk)
  105. {
  106. sh_clk_write(sh_clk_read(clk) & ~CPG_CKSTP_BIT, clk);
  107. return 0;
  108. }
  109. static void sh_clk_div_disable(struct clk *clk)
  110. {
  111. unsigned int val;
  112. val = sh_clk_read(clk);
  113. val |= CPG_CKSTP_BIT;
  114. /*
  115. * div6 clocks require the divisor field to be non-zero or the
  116. * above CKSTP toggle silently fails. Ensure that the divisor
  117. * array is reset to its initial state on disable.
  118. */
  119. if (clk->flags & CLK_MASK_DIV_ON_DISABLE)
  120. val |= clk->div_mask;
  121. sh_clk_write(val, clk);
  122. }
  123. static struct sh_clk_ops sh_clk_div_clk_ops = {
  124. .recalc = sh_clk_div_recalc,
  125. .set_rate = sh_clk_div_set_rate,
  126. .round_rate = sh_clk_div_round_rate,
  127. };
  128. static struct sh_clk_ops sh_clk_div_enable_clk_ops = {
  129. .recalc = sh_clk_div_recalc,
  130. .set_rate = sh_clk_div_set_rate,
  131. .round_rate = sh_clk_div_round_rate,
  132. .enable = sh_clk_div_enable,
  133. .disable = sh_clk_div_disable,
  134. };
  135. static int __init sh_clk_init_parent(struct clk *clk)
  136. {
  137. u32 val;
  138. if (clk->parent)
  139. return 0;
  140. if (!clk->parent_table || !clk->parent_num)
  141. return 0;
  142. if (!clk->src_width) {
  143. pr_err("sh_clk_init_parent: cannot select parent clock\n");
  144. return -EINVAL;
  145. }
  146. val = (sh_clk_read(clk) >> clk->src_shift);
  147. val &= (1 << clk->src_width) - 1;
  148. if (val >= clk->parent_num) {
  149. pr_err("sh_clk_init_parent: parent table size failed\n");
  150. return -EINVAL;
  151. }
  152. clk_reparent(clk, clk->parent_table[val]);
  153. if (!clk->parent) {
  154. pr_err("sh_clk_init_parent: unable to set parent");
  155. return -EINVAL;
  156. }
  157. return 0;
  158. }
  159. static int __init sh_clk_div_register_ops(struct clk *clks, int nr,
  160. struct clk_div_table *table, struct sh_clk_ops *ops)
  161. {
  162. struct clk *clkp;
  163. void *freq_table;
  164. int nr_divs = table->div_mult_table->nr_divisors;
  165. int freq_table_size = sizeof(struct cpufreq_frequency_table);
  166. int ret = 0;
  167. int k;
  168. freq_table_size *= (nr_divs + 1);
  169. freq_table = kzalloc(freq_table_size * nr, GFP_KERNEL);
  170. if (!freq_table) {
  171. pr_err("%s: unable to alloc memory\n", __func__);
  172. return -ENOMEM;
  173. }
  174. for (k = 0; !ret && (k < nr); k++) {
  175. clkp = clks + k;
  176. clkp->ops = ops;
  177. clkp->priv = table;
  178. clkp->freq_table = freq_table + (k * freq_table_size);
  179. clkp->freq_table[nr_divs].frequency = CPUFREQ_TABLE_END;
  180. ret = clk_register(clkp);
  181. if (ret == 0)
  182. ret = sh_clk_init_parent(clkp);
  183. }
  184. return ret;
  185. }
  186. /*
  187. * div6 support
  188. */
  189. static int sh_clk_div6_divisors[64] = {
  190. 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
  191. 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
  192. 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48,
  193. 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64
  194. };
  195. static struct clk_div_mult_table div6_div_mult_table = {
  196. .divisors = sh_clk_div6_divisors,
  197. .nr_divisors = ARRAY_SIZE(sh_clk_div6_divisors),
  198. };
  199. static struct clk_div_table sh_clk_div6_table = {
  200. .div_mult_table = &div6_div_mult_table,
  201. };
  202. static int sh_clk_div6_set_parent(struct clk *clk, struct clk *parent)
  203. {
  204. struct clk_div_mult_table *table = clk_to_div_mult_table(clk);
  205. u32 value;
  206. int ret, i;
  207. if (!clk->parent_table || !clk->parent_num)
  208. return -EINVAL;
  209. /* Search the parent */
  210. for (i = 0; i < clk->parent_num; i++)
  211. if (clk->parent_table[i] == parent)
  212. break;
  213. if (i == clk->parent_num)
  214. return -ENODEV;
  215. ret = clk_reparent(clk, parent);
  216. if (ret < 0)
  217. return ret;
  218. value = sh_clk_read(clk) &
  219. ~(((1 << clk->src_width) - 1) << clk->src_shift);
  220. sh_clk_write(value | (i << clk->src_shift), clk);
  221. /* Rebuild the frequency table */
  222. clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
  223. table, NULL);
  224. return 0;
  225. }
  226. static struct sh_clk_ops sh_clk_div6_reparent_clk_ops = {
  227. .recalc = sh_clk_div_recalc,
  228. .round_rate = sh_clk_div_round_rate,
  229. .set_rate = sh_clk_div_set_rate,
  230. .enable = sh_clk_div_enable,
  231. .disable = sh_clk_div_disable,
  232. .set_parent = sh_clk_div6_set_parent,
  233. };
  234. int __init sh_clk_div6_register(struct clk *clks, int nr)
  235. {
  236. return sh_clk_div_register_ops(clks, nr, &sh_clk_div6_table,
  237. &sh_clk_div_enable_clk_ops);
  238. }
  239. int __init sh_clk_div6_reparent_register(struct clk *clks, int nr)
  240. {
  241. return sh_clk_div_register_ops(clks, nr, &sh_clk_div6_table,
  242. &sh_clk_div6_reparent_clk_ops);
  243. }
  244. /*
  245. * div4 support
  246. */
  247. static int sh_clk_div4_set_parent(struct clk *clk, struct clk *parent)
  248. {
  249. struct clk_div_mult_table *table = clk_to_div_mult_table(clk);
  250. u32 value;
  251. int ret;
  252. /* we really need a better way to determine parent index, but for
  253. * now assume internal parent comes with CLK_ENABLE_ON_INIT set,
  254. * no CLK_ENABLE_ON_INIT means external clock...
  255. */
  256. if (parent->flags & CLK_ENABLE_ON_INIT)
  257. value = sh_clk_read(clk) & ~(1 << 7);
  258. else
  259. value = sh_clk_read(clk) | (1 << 7);
  260. ret = clk_reparent(clk, parent);
  261. if (ret < 0)
  262. return ret;
  263. sh_clk_write(value, clk);
  264. /* Rebiuld the frequency table */
  265. clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
  266. table, &clk->arch_flags);
  267. return 0;
  268. }
  269. static struct sh_clk_ops sh_clk_div4_reparent_clk_ops = {
  270. .recalc = sh_clk_div_recalc,
  271. .set_rate = sh_clk_div_set_rate,
  272. .round_rate = sh_clk_div_round_rate,
  273. .enable = sh_clk_div_enable,
  274. .disable = sh_clk_div_disable,
  275. .set_parent = sh_clk_div4_set_parent,
  276. };
  277. int __init sh_clk_div4_register(struct clk *clks, int nr,
  278. struct clk_div4_table *table)
  279. {
  280. return sh_clk_div_register_ops(clks, nr, table, &sh_clk_div_clk_ops);
  281. }
  282. int __init sh_clk_div4_enable_register(struct clk *clks, int nr,
  283. struct clk_div4_table *table)
  284. {
  285. return sh_clk_div_register_ops(clks, nr, table,
  286. &sh_clk_div_enable_clk_ops);
  287. }
  288. int __init sh_clk_div4_reparent_register(struct clk *clks, int nr,
  289. struct clk_div4_table *table)
  290. {
  291. return sh_clk_div_register_ops(clks, nr, table,
  292. &sh_clk_div4_reparent_clk_ops);
  293. }