cpg.c 9.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412
  1. /*
  2. * Helper routines for SuperH Clock Pulse Generator blocks (CPG).
  3. *
  4. * Copyright (C) 2010 Magnus Damm
  5. * Copyright (C) 2010 - 2012 Paul Mundt
  6. *
  7. * This file is subject to the terms and conditions of the GNU General Public
  8. * License. See the file "COPYING" in the main directory of this archive
  9. * for more details.
  10. */
  11. #include <linux/clk.h>
  12. #include <linux/compiler.h>
  13. #include <linux/slab.h>
  14. #include <linux/io.h>
  15. #include <linux/sh_clk.h>
  16. static unsigned int sh_clk_read(struct clk *clk)
  17. {
  18. if (clk->flags & CLK_ENABLE_REG_8BIT)
  19. return ioread8(clk->mapped_reg);
  20. else if (clk->flags & CLK_ENABLE_REG_16BIT)
  21. return ioread16(clk->mapped_reg);
  22. return ioread32(clk->mapped_reg);
  23. }
  24. static void sh_clk_write(int value, struct clk *clk)
  25. {
  26. if (clk->flags & CLK_ENABLE_REG_8BIT)
  27. iowrite8(value, clk->mapped_reg);
  28. else if (clk->flags & CLK_ENABLE_REG_16BIT)
  29. iowrite16(value, clk->mapped_reg);
  30. else
  31. iowrite32(value, clk->mapped_reg);
  32. }
  33. static int sh_clk_mstp_enable(struct clk *clk)
  34. {
  35. sh_clk_write(sh_clk_read(clk) & ~(1 << clk->enable_bit), clk);
  36. return 0;
  37. }
  38. static void sh_clk_mstp_disable(struct clk *clk)
  39. {
  40. sh_clk_write(sh_clk_read(clk) | (1 << clk->enable_bit), clk);
  41. }
  42. static struct sh_clk_ops sh_clk_mstp_clk_ops = {
  43. .enable = sh_clk_mstp_enable,
  44. .disable = sh_clk_mstp_disable,
  45. .recalc = followparent_recalc,
  46. };
  47. int __init sh_clk_mstp_register(struct clk *clks, int nr)
  48. {
  49. struct clk *clkp;
  50. int ret = 0;
  51. int k;
  52. for (k = 0; !ret && (k < nr); k++) {
  53. clkp = clks + k;
  54. clkp->ops = &sh_clk_mstp_clk_ops;
  55. ret |= clk_register(clkp);
  56. }
  57. return ret;
  58. }
  59. static long sh_clk_div_round_rate(struct clk *clk, unsigned long rate)
  60. {
  61. return clk_rate_table_round(clk, clk->freq_table, rate);
  62. }
  63. static int sh_clk_div6_divisors[64] = {
  64. 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
  65. 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
  66. 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48,
  67. 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64
  68. };
  69. static struct clk_div_mult_table sh_clk_div6_table = {
  70. .divisors = sh_clk_div6_divisors,
  71. .nr_divisors = ARRAY_SIZE(sh_clk_div6_divisors),
  72. };
  73. static unsigned long sh_clk_div6_recalc(struct clk *clk)
  74. {
  75. struct clk_div_mult_table *table = &sh_clk_div6_table;
  76. unsigned int idx;
  77. clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
  78. table, NULL);
  79. idx = sh_clk_read(clk) & 0x003f;
  80. return clk->freq_table[idx].frequency;
  81. }
  82. static int sh_clk_div6_set_parent(struct clk *clk, struct clk *parent)
  83. {
  84. struct clk_div_mult_table *table = &sh_clk_div6_table;
  85. u32 value;
  86. int ret, i;
  87. if (!clk->parent_table || !clk->parent_num)
  88. return -EINVAL;
  89. /* Search the parent */
  90. for (i = 0; i < clk->parent_num; i++)
  91. if (clk->parent_table[i] == parent)
  92. break;
  93. if (i == clk->parent_num)
  94. return -ENODEV;
  95. ret = clk_reparent(clk, parent);
  96. if (ret < 0)
  97. return ret;
  98. value = sh_clk_read(clk) &
  99. ~(((1 << clk->src_width) - 1) << clk->src_shift);
  100. sh_clk_write(value | (i << clk->src_shift), clk);
  101. /* Rebuild the frequency table */
  102. clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
  103. table, NULL);
  104. return 0;
  105. }
  106. static int sh_clk_div6_set_rate(struct clk *clk, unsigned long rate)
  107. {
  108. unsigned long value;
  109. int idx;
  110. idx = clk_rate_table_find(clk, clk->freq_table, rate);
  111. if (idx < 0)
  112. return idx;
  113. value = sh_clk_read(clk);
  114. value &= ~0x3f;
  115. value |= idx;
  116. sh_clk_write(value, clk);
  117. return 0;
  118. }
  119. static int sh_clk_div6_enable(struct clk *clk)
  120. {
  121. unsigned long value;
  122. int ret;
  123. ret = sh_clk_div6_set_rate(clk, clk->rate);
  124. if (ret == 0) {
  125. value = sh_clk_read(clk);
  126. value &= ~0x100; /* clear stop bit to enable clock */
  127. sh_clk_write(value, clk);
  128. }
  129. return ret;
  130. }
  131. static void sh_clk_div6_disable(struct clk *clk)
  132. {
  133. unsigned long value;
  134. value = sh_clk_read(clk);
  135. value |= 0x100; /* stop clock */
  136. value |= 0x3f; /* VDIV bits must be non-zero, overwrite divider */
  137. sh_clk_write(value, clk);
  138. }
  139. static struct sh_clk_ops sh_clk_div6_clk_ops = {
  140. .recalc = sh_clk_div6_recalc,
  141. .round_rate = sh_clk_div_round_rate,
  142. .set_rate = sh_clk_div6_set_rate,
  143. .enable = sh_clk_div6_enable,
  144. .disable = sh_clk_div6_disable,
  145. };
  146. static struct sh_clk_ops sh_clk_div6_reparent_clk_ops = {
  147. .recalc = sh_clk_div6_recalc,
  148. .round_rate = sh_clk_div_round_rate,
  149. .set_rate = sh_clk_div6_set_rate,
  150. .enable = sh_clk_div6_enable,
  151. .disable = sh_clk_div6_disable,
  152. .set_parent = sh_clk_div6_set_parent,
  153. };
  154. static int __init sh_clk_init_parent(struct clk *clk)
  155. {
  156. u32 val;
  157. if (clk->parent)
  158. return 0;
  159. if (!clk->parent_table || !clk->parent_num)
  160. return 0;
  161. if (!clk->src_width) {
  162. pr_err("sh_clk_init_parent: cannot select parent clock\n");
  163. return -EINVAL;
  164. }
  165. val = (sh_clk_read(clk) >> clk->src_shift);
  166. val &= (1 << clk->src_width) - 1;
  167. if (val >= clk->parent_num) {
  168. pr_err("sh_clk_init_parent: parent table size failed\n");
  169. return -EINVAL;
  170. }
  171. clk_reparent(clk, clk->parent_table[val]);
  172. if (!clk->parent) {
  173. pr_err("sh_clk_init_parent: unable to set parent");
  174. return -EINVAL;
  175. }
  176. return 0;
  177. }
  178. static int __init sh_clk_div6_register_ops(struct clk *clks, int nr,
  179. struct sh_clk_ops *ops)
  180. {
  181. struct clk *clkp;
  182. void *freq_table;
  183. int nr_divs = sh_clk_div6_table.nr_divisors;
  184. int freq_table_size = sizeof(struct cpufreq_frequency_table);
  185. int ret = 0;
  186. int k;
  187. freq_table_size *= (nr_divs + 1);
  188. freq_table = kzalloc(freq_table_size * nr, GFP_KERNEL);
  189. if (!freq_table) {
  190. pr_err("sh_clk_div6_register: unable to alloc memory\n");
  191. return -ENOMEM;
  192. }
  193. for (k = 0; !ret && (k < nr); k++) {
  194. clkp = clks + k;
  195. clkp->ops = ops;
  196. clkp->freq_table = freq_table + (k * freq_table_size);
  197. clkp->freq_table[nr_divs].frequency = CPUFREQ_TABLE_END;
  198. ret = clk_register(clkp);
  199. if (ret < 0)
  200. break;
  201. ret = sh_clk_init_parent(clkp);
  202. }
  203. return ret;
  204. }
  205. int __init sh_clk_div6_register(struct clk *clks, int nr)
  206. {
  207. return sh_clk_div6_register_ops(clks, nr, &sh_clk_div6_clk_ops);
  208. }
  209. int __init sh_clk_div6_reparent_register(struct clk *clks, int nr)
  210. {
  211. return sh_clk_div6_register_ops(clks, nr,
  212. &sh_clk_div6_reparent_clk_ops);
  213. }
  214. static unsigned long sh_clk_div4_recalc(struct clk *clk)
  215. {
  216. struct clk_div4_table *d4t = clk->priv;
  217. struct clk_div_mult_table *table = d4t->div_mult_table;
  218. unsigned int idx;
  219. clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
  220. table, &clk->arch_flags);
  221. idx = (sh_clk_read(clk) >> clk->enable_bit) & 0x000f;
  222. return clk->freq_table[idx].frequency;
  223. }
  224. static int sh_clk_div4_set_parent(struct clk *clk, struct clk *parent)
  225. {
  226. struct clk_div4_table *d4t = clk->priv;
  227. struct clk_div_mult_table *table = d4t->div_mult_table;
  228. u32 value;
  229. int ret;
  230. /* we really need a better way to determine parent index, but for
  231. * now assume internal parent comes with CLK_ENABLE_ON_INIT set,
  232. * no CLK_ENABLE_ON_INIT means external clock...
  233. */
  234. if (parent->flags & CLK_ENABLE_ON_INIT)
  235. value = sh_clk_read(clk) & ~(1 << 7);
  236. else
  237. value = sh_clk_read(clk) | (1 << 7);
  238. ret = clk_reparent(clk, parent);
  239. if (ret < 0)
  240. return ret;
  241. sh_clk_write(value, clk);
  242. /* Rebiuld the frequency table */
  243. clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
  244. table, &clk->arch_flags);
  245. return 0;
  246. }
  247. static int sh_clk_div4_set_rate(struct clk *clk, unsigned long rate)
  248. {
  249. struct clk_div4_table *d4t = clk->priv;
  250. unsigned long value;
  251. int idx = clk_rate_table_find(clk, clk->freq_table, rate);
  252. if (idx < 0)
  253. return idx;
  254. value = sh_clk_read(clk);
  255. value &= ~(0xf << clk->enable_bit);
  256. value |= (idx << clk->enable_bit);
  257. sh_clk_write(value, clk);
  258. if (d4t->kick)
  259. d4t->kick(clk);
  260. return 0;
  261. }
  262. static int sh_clk_div4_enable(struct clk *clk)
  263. {
  264. sh_clk_write(sh_clk_read(clk) & ~(1 << 8), clk);
  265. return 0;
  266. }
  267. static void sh_clk_div4_disable(struct clk *clk)
  268. {
  269. sh_clk_write(sh_clk_read(clk) | (1 << 8), clk);
  270. }
  271. static struct sh_clk_ops sh_clk_div4_clk_ops = {
  272. .recalc = sh_clk_div4_recalc,
  273. .set_rate = sh_clk_div4_set_rate,
  274. .round_rate = sh_clk_div_round_rate,
  275. };
  276. static struct sh_clk_ops sh_clk_div4_enable_clk_ops = {
  277. .recalc = sh_clk_div4_recalc,
  278. .set_rate = sh_clk_div4_set_rate,
  279. .round_rate = sh_clk_div_round_rate,
  280. .enable = sh_clk_div4_enable,
  281. .disable = sh_clk_div4_disable,
  282. };
  283. static struct sh_clk_ops sh_clk_div4_reparent_clk_ops = {
  284. .recalc = sh_clk_div4_recalc,
  285. .set_rate = sh_clk_div4_set_rate,
  286. .round_rate = sh_clk_div_round_rate,
  287. .enable = sh_clk_div4_enable,
  288. .disable = sh_clk_div4_disable,
  289. .set_parent = sh_clk_div4_set_parent,
  290. };
  291. static int __init sh_clk_div4_register_ops(struct clk *clks, int nr,
  292. struct clk_div4_table *table, struct sh_clk_ops *ops)
  293. {
  294. struct clk *clkp;
  295. void *freq_table;
  296. int nr_divs = table->div_mult_table->nr_divisors;
  297. int freq_table_size = sizeof(struct cpufreq_frequency_table);
  298. int ret = 0;
  299. int k;
  300. freq_table_size *= (nr_divs + 1);
  301. freq_table = kzalloc(freq_table_size * nr, GFP_KERNEL);
  302. if (!freq_table) {
  303. pr_err("sh_clk_div4_register: unable to alloc memory\n");
  304. return -ENOMEM;
  305. }
  306. for (k = 0; !ret && (k < nr); k++) {
  307. clkp = clks + k;
  308. clkp->ops = ops;
  309. clkp->priv = table;
  310. clkp->freq_table = freq_table + (k * freq_table_size);
  311. clkp->freq_table[nr_divs].frequency = CPUFREQ_TABLE_END;
  312. ret = clk_register(clkp);
  313. }
  314. return ret;
  315. }
  316. int __init sh_clk_div4_register(struct clk *clks, int nr,
  317. struct clk_div4_table *table)
  318. {
  319. return sh_clk_div4_register_ops(clks, nr, table, &sh_clk_div4_clk_ops);
  320. }
  321. int __init sh_clk_div4_enable_register(struct clk *clks, int nr,
  322. struct clk_div4_table *table)
  323. {
  324. return sh_clk_div4_register_ops(clks, nr, table,
  325. &sh_clk_div4_enable_clk_ops);
  326. }
  327. int __init sh_clk_div4_reparent_register(struct clk *clks, int nr,
  328. struct clk_div4_table *table)
  329. {
  330. return sh_clk_div4_register_ops(clks, nr, table,
  331. &sh_clk_div4_reparent_clk_ops);
  332. }