clock-cpg.c 8.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352
  1. #include <linux/clk.h>
  2. #include <linux/compiler.h>
  3. #include <linux/slab.h>
  4. #include <linux/io.h>
  5. #include <asm/clock.h>
  6. static int sh_clk_mstp32_enable(struct clk *clk)
  7. {
  8. __raw_writel(__raw_readl(clk->enable_reg) & ~(1 << clk->enable_bit),
  9. clk->enable_reg);
  10. return 0;
  11. }
  12. static void sh_clk_mstp32_disable(struct clk *clk)
  13. {
  14. __raw_writel(__raw_readl(clk->enable_reg) | (1 << clk->enable_bit),
  15. clk->enable_reg);
  16. }
  17. static struct clk_ops sh_clk_mstp32_clk_ops = {
  18. .enable = sh_clk_mstp32_enable,
  19. .disable = sh_clk_mstp32_disable,
  20. .recalc = followparent_recalc,
  21. };
  22. int __init sh_clk_mstp32_register(struct clk *clks, int nr)
  23. {
  24. struct clk *clkp;
  25. int ret = 0;
  26. int k;
  27. for (k = 0; !ret && (k < nr); k++) {
  28. clkp = clks + k;
  29. clkp->ops = &sh_clk_mstp32_clk_ops;
  30. ret |= clk_register(clkp);
  31. }
  32. return ret;
  33. }
  34. static long sh_clk_div_round_rate(struct clk *clk, unsigned long rate)
  35. {
  36. return clk_rate_table_round(clk, clk->freq_table, rate);
  37. }
  38. static int sh_clk_div6_divisors[64] = {
  39. 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
  40. 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
  41. 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48,
  42. 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64
  43. };
  44. static struct clk_div_mult_table sh_clk_div6_table = {
  45. .divisors = sh_clk_div6_divisors,
  46. .nr_divisors = ARRAY_SIZE(sh_clk_div6_divisors),
  47. };
  48. static unsigned long sh_clk_div6_recalc(struct clk *clk)
  49. {
  50. struct clk_div_mult_table *table = &sh_clk_div6_table;
  51. unsigned int idx;
  52. clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
  53. table, NULL);
  54. idx = __raw_readl(clk->enable_reg) & 0x003f;
  55. return clk->freq_table[idx].frequency;
  56. }
  57. static int sh_clk_div6_set_rate(struct clk *clk,
  58. unsigned long rate, int algo_id)
  59. {
  60. unsigned long value;
  61. int idx;
  62. idx = clk_rate_table_find(clk, clk->freq_table, rate);
  63. if (idx < 0)
  64. return idx;
  65. value = __raw_readl(clk->enable_reg);
  66. value &= ~0x3f;
  67. value |= idx;
  68. __raw_writel(value, clk->enable_reg);
  69. return 0;
  70. }
  71. static int sh_clk_div6_enable(struct clk *clk)
  72. {
  73. unsigned long value;
  74. int ret;
  75. ret = sh_clk_div6_set_rate(clk, clk->rate, 0);
  76. if (ret == 0) {
  77. value = __raw_readl(clk->enable_reg);
  78. value &= ~0x100; /* clear stop bit to enable clock */
  79. __raw_writel(value, clk->enable_reg);
  80. }
  81. return ret;
  82. }
  83. static void sh_clk_div6_disable(struct clk *clk)
  84. {
  85. unsigned long value;
  86. value = __raw_readl(clk->enable_reg);
  87. value |= 0x100; /* stop clock */
  88. value |= 0x3f; /* VDIV bits must be non-zero, overwrite divider */
  89. __raw_writel(value, clk->enable_reg);
  90. }
  91. static struct clk_ops sh_clk_div6_clk_ops = {
  92. .recalc = sh_clk_div6_recalc,
  93. .round_rate = sh_clk_div_round_rate,
  94. .set_rate = sh_clk_div6_set_rate,
  95. .enable = sh_clk_div6_enable,
  96. .disable = sh_clk_div6_disable,
  97. };
  98. int __init sh_clk_div6_register(struct clk *clks, int nr)
  99. {
  100. struct clk *clkp;
  101. void *freq_table;
  102. int nr_divs = sh_clk_div6_table.nr_divisors;
  103. int freq_table_size = sizeof(struct cpufreq_frequency_table);
  104. int ret = 0;
  105. int k;
  106. freq_table_size *= (nr_divs + 1);
  107. freq_table = kzalloc(freq_table_size * nr, GFP_KERNEL);
  108. if (!freq_table) {
  109. pr_err("sh_clk_div6_register: unable to alloc memory\n");
  110. return -ENOMEM;
  111. }
  112. for (k = 0; !ret && (k < nr); k++) {
  113. clkp = clks + k;
  114. clkp->ops = &sh_clk_div6_clk_ops;
  115. clkp->id = -1;
  116. clkp->freq_table = freq_table + (k * freq_table_size);
  117. clkp->freq_table[nr_divs].frequency = CPUFREQ_TABLE_END;
  118. ret = clk_register(clkp);
  119. }
  120. return ret;
  121. }
  122. static unsigned long sh_clk_div4_recalc(struct clk *clk)
  123. {
  124. struct clk_div4_table *d4t = clk->priv;
  125. struct clk_div_mult_table *table = d4t->div_mult_table;
  126. unsigned int idx;
  127. clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
  128. table, &clk->arch_flags);
  129. idx = (__raw_readl(clk->enable_reg) >> clk->enable_bit) & 0x000f;
  130. return clk->freq_table[idx].frequency;
  131. }
  132. static int sh_clk_div4_set_parent(struct clk *clk, struct clk *parent)
  133. {
  134. struct clk_div4_table *d4t = clk->priv;
  135. struct clk_div_mult_table *table = d4t->div_mult_table;
  136. u32 value;
  137. int ret;
  138. if (!strcmp("pll_clk", parent->name))
  139. value = __raw_readl(clk->enable_reg) & ~(1 << 7);
  140. else
  141. value = __raw_readl(clk->enable_reg) | (1 << 7);
  142. ret = clk_reparent(clk, parent);
  143. if (ret < 0)
  144. return ret;
  145. __raw_writel(value, clk->enable_reg);
  146. /* Rebiuld the frequency table */
  147. clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
  148. table, &clk->arch_flags);
  149. return 0;
  150. }
  151. static int sh_clk_div4_set_rate(struct clk *clk, unsigned long rate, int algo_id)
  152. {
  153. struct clk_div4_table *d4t = clk->priv;
  154. unsigned long value;
  155. int idx = clk_rate_table_find(clk, clk->freq_table, rate);
  156. if (idx < 0)
  157. return idx;
  158. value = __raw_readl(clk->enable_reg);
  159. value &= ~(0xf << clk->enable_bit);
  160. value |= (idx << clk->enable_bit);
  161. __raw_writel(value, clk->enable_reg);
  162. if (d4t->kick)
  163. d4t->kick(clk);
  164. return 0;
  165. }
  166. static int sh_clk_div4_enable(struct clk *clk)
  167. {
  168. __raw_writel(__raw_readl(clk->enable_reg) & ~(1 << 8), clk->enable_reg);
  169. return 0;
  170. }
  171. static void sh_clk_div4_disable(struct clk *clk)
  172. {
  173. __raw_writel(__raw_readl(clk->enable_reg) | (1 << 8), clk->enable_reg);
  174. }
  175. static struct clk_ops sh_clk_div4_clk_ops = {
  176. .recalc = sh_clk_div4_recalc,
  177. .set_rate = sh_clk_div4_set_rate,
  178. .round_rate = sh_clk_div_round_rate,
  179. };
  180. static struct clk_ops sh_clk_div4_enable_clk_ops = {
  181. .recalc = sh_clk_div4_recalc,
  182. .set_rate = sh_clk_div4_set_rate,
  183. .round_rate = sh_clk_div_round_rate,
  184. .enable = sh_clk_div4_enable,
  185. .disable = sh_clk_div4_disable,
  186. };
  187. static struct clk_ops sh_clk_div4_reparent_clk_ops = {
  188. .recalc = sh_clk_div4_recalc,
  189. .set_rate = sh_clk_div4_set_rate,
  190. .round_rate = sh_clk_div_round_rate,
  191. .enable = sh_clk_div4_enable,
  192. .disable = sh_clk_div4_disable,
  193. .set_parent = sh_clk_div4_set_parent,
  194. };
  195. static int __init sh_clk_div4_register_ops(struct clk *clks, int nr,
  196. struct clk_div4_table *table, struct clk_ops *ops)
  197. {
  198. struct clk *clkp;
  199. void *freq_table;
  200. int nr_divs = table->div_mult_table->nr_divisors;
  201. int freq_table_size = sizeof(struct cpufreq_frequency_table);
  202. int ret = 0;
  203. int k;
  204. freq_table_size *= (nr_divs + 1);
  205. freq_table = kzalloc(freq_table_size * nr, GFP_KERNEL);
  206. if (!freq_table) {
  207. pr_err("sh_clk_div4_register: unable to alloc memory\n");
  208. return -ENOMEM;
  209. }
  210. for (k = 0; !ret && (k < nr); k++) {
  211. clkp = clks + k;
  212. clkp->ops = ops;
  213. clkp->id = -1;
  214. clkp->priv = table;
  215. clkp->freq_table = freq_table + (k * freq_table_size);
  216. clkp->freq_table[nr_divs].frequency = CPUFREQ_TABLE_END;
  217. ret = clk_register(clkp);
  218. }
  219. return ret;
  220. }
  221. int __init sh_clk_div4_register(struct clk *clks, int nr,
  222. struct clk_div4_table *table)
  223. {
  224. return sh_clk_div4_register_ops(clks, nr, table, &sh_clk_div4_clk_ops);
  225. }
  226. int __init sh_clk_div4_enable_register(struct clk *clks, int nr,
  227. struct clk_div4_table *table)
  228. {
  229. return sh_clk_div4_register_ops(clks, nr, table,
  230. &sh_clk_div4_enable_clk_ops);
  231. }
  232. int __init sh_clk_div4_reparent_register(struct clk *clks, int nr,
  233. struct clk_div4_table *table)
  234. {
  235. return sh_clk_div4_register_ops(clks, nr, table,
  236. &sh_clk_div4_reparent_clk_ops);
  237. }
  238. #ifdef CONFIG_SH_CLK_CPG_LEGACY
  239. static struct clk master_clk = {
  240. .name = "master_clk",
  241. .flags = CLK_ENABLE_ON_INIT,
  242. .rate = CONFIG_SH_PCLK_FREQ,
  243. };
  244. static struct clk peripheral_clk = {
  245. .name = "peripheral_clk",
  246. .parent = &master_clk,
  247. .flags = CLK_ENABLE_ON_INIT,
  248. };
  249. static struct clk bus_clk = {
  250. .name = "bus_clk",
  251. .parent = &master_clk,
  252. .flags = CLK_ENABLE_ON_INIT,
  253. };
  254. static struct clk cpu_clk = {
  255. .name = "cpu_clk",
  256. .parent = &master_clk,
  257. .flags = CLK_ENABLE_ON_INIT,
  258. };
  259. /*
  260. * The ordering of these clocks matters, do not change it.
  261. */
  262. static struct clk *onchip_clocks[] = {
  263. &master_clk,
  264. &peripheral_clk,
  265. &bus_clk,
  266. &cpu_clk,
  267. };
  268. int __init __deprecated cpg_clk_init(void)
  269. {
  270. int i, ret = 0;
  271. for (i = 0; i < ARRAY_SIZE(onchip_clocks); i++) {
  272. struct clk *clk = onchip_clocks[i];
  273. arch_init_clk_ops(&clk->ops, i);
  274. if (clk->ops)
  275. ret |= clk_register(clk);
  276. }
  277. return ret;
  278. }
  279. /*
  280. * Placeholder for compatability, until the lazy CPUs do this
  281. * on their own.
  282. */
  283. int __init __weak arch_clk_init(void)
  284. {
  285. return cpg_clk_init();
  286. }
  287. #endif /* CONFIG_SH_CPG_CLK_LEGACY */