clk-zynq.c 9.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383
  1. /*
  2. * Copyright (c) 2012 National Instruments
  3. *
  4. * Josh Cartwright <josh.cartwright@ni.com>
  5. *
  6. * This program is free software; you can redistribute it and/or modify it
  7. * under the terms and conditions of the GNU General Public License,
  8. * version 2, as published by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope it will be useful, but WITHOUT
  11. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  13. * more details.
  14. *
  15. * You should have received a copy of the GNU General Public License along with
  16. * this program. If not, see <http://www.gnu.org/licenses/>.
  17. */
  18. #include <linux/io.h>
  19. #include <linux/of.h>
  20. #include <linux/slab.h>
  21. #include <linux/kernel.h>
  22. #include <linux/clk-provider.h>
  23. static void __iomem *slcr_base;
  24. struct zynq_pll_clk {
  25. struct clk_hw hw;
  26. void __iomem *pll_ctrl;
  27. void __iomem *pll_cfg;
  28. };
  29. #define to_zynq_pll_clk(hw) container_of(hw, struct zynq_pll_clk, hw)
  30. #define CTRL_PLL_FDIV(x) ((x) >> 12)
  31. static unsigned long zynq_pll_recalc_rate(struct clk_hw *hw,
  32. unsigned long parent_rate)
  33. {
  34. struct zynq_pll_clk *pll = to_zynq_pll_clk(hw);
  35. return parent_rate * CTRL_PLL_FDIV(ioread32(pll->pll_ctrl));
  36. }
  37. static const struct clk_ops zynq_pll_clk_ops = {
  38. .recalc_rate = zynq_pll_recalc_rate,
  39. };
  40. static void __init zynq_pll_clk_setup(struct device_node *np)
  41. {
  42. struct clk_init_data init;
  43. struct zynq_pll_clk *pll;
  44. const char *parent_name;
  45. struct clk *clk;
  46. u32 regs[2];
  47. int ret;
  48. ret = of_property_read_u32_array(np, "reg", regs, ARRAY_SIZE(regs));
  49. if (WARN_ON(ret))
  50. return;
  51. pll = kzalloc(sizeof(*pll), GFP_KERNEL);
  52. if (WARN_ON(!pll))
  53. return;
  54. pll->pll_ctrl = slcr_base + regs[0];
  55. pll->pll_cfg = slcr_base + regs[1];
  56. of_property_read_string(np, "clock-output-names", &init.name);
  57. init.ops = &zynq_pll_clk_ops;
  58. parent_name = of_clk_get_parent_name(np, 0);
  59. init.parent_names = &parent_name;
  60. init.num_parents = 1;
  61. pll->hw.init = &init;
  62. clk = clk_register(NULL, &pll->hw);
  63. if (WARN_ON(IS_ERR(clk)))
  64. return;
  65. ret = of_clk_add_provider(np, of_clk_src_simple_get, clk);
  66. if (WARN_ON(ret))
  67. return;
  68. }
  69. struct zynq_periph_clk {
  70. struct clk_hw hw;
  71. struct clk_onecell_data onecell_data;
  72. struct clk *gates[2];
  73. void __iomem *clk_ctrl;
  74. spinlock_t clkact_lock;
  75. };
  76. #define to_zynq_periph_clk(hw) container_of(hw, struct zynq_periph_clk, hw)
  77. static const u8 periph_clk_parent_map[] = {
  78. 0, 0, 1, 2
  79. };
  80. #define PERIPH_CLK_CTRL_SRC(x) (periph_clk_parent_map[((x) & 0x30) >> 4])
  81. #define PERIPH_CLK_CTRL_DIV(x) (((x) & 0x3F00) >> 8)
  82. static unsigned long zynq_periph_recalc_rate(struct clk_hw *hw,
  83. unsigned long parent_rate)
  84. {
  85. struct zynq_periph_clk *periph = to_zynq_periph_clk(hw);
  86. return parent_rate / PERIPH_CLK_CTRL_DIV(ioread32(periph->clk_ctrl));
  87. }
  88. static u8 zynq_periph_get_parent(struct clk_hw *hw)
  89. {
  90. struct zynq_periph_clk *periph = to_zynq_periph_clk(hw);
  91. return PERIPH_CLK_CTRL_SRC(ioread32(periph->clk_ctrl));
  92. }
  93. static const struct clk_ops zynq_periph_clk_ops = {
  94. .recalc_rate = zynq_periph_recalc_rate,
  95. .get_parent = zynq_periph_get_parent,
  96. };
  97. static void __init zynq_periph_clk_setup(struct device_node *np)
  98. {
  99. struct zynq_periph_clk *periph;
  100. const char *parent_names[3];
  101. struct clk_init_data init;
  102. int clk_num = 0, err;
  103. const char *name;
  104. struct clk *clk;
  105. u32 reg;
  106. int i;
  107. err = of_property_read_u32(np, "reg", &reg);
  108. if (WARN_ON(err))
  109. return;
  110. periph = kzalloc(sizeof(*periph), GFP_KERNEL);
  111. if (WARN_ON(!periph))
  112. return;
  113. periph->clk_ctrl = slcr_base + reg;
  114. spin_lock_init(&periph->clkact_lock);
  115. init.name = np->name;
  116. init.ops = &zynq_periph_clk_ops;
  117. for (i = 0; i < ARRAY_SIZE(parent_names); i++)
  118. parent_names[i] = of_clk_get_parent_name(np, i);
  119. init.parent_names = parent_names;
  120. init.num_parents = ARRAY_SIZE(parent_names);
  121. periph->hw.init = &init;
  122. clk = clk_register(NULL, &periph->hw);
  123. if (WARN_ON(IS_ERR(clk)))
  124. return;
  125. err = of_clk_add_provider(np, of_clk_src_simple_get, clk);
  126. if (WARN_ON(err))
  127. return;
  128. err = of_property_read_string_index(np, "clock-output-names", 0,
  129. &name);
  130. if (WARN_ON(err))
  131. return;
  132. periph->gates[0] = clk_register_gate(NULL, name, np->name, 0,
  133. periph->clk_ctrl, 0, 0,
  134. &periph->clkact_lock);
  135. if (WARN_ON(IS_ERR(periph->gates[0])))
  136. return;
  137. clk_num++;
  138. /* some periph clks have 2 downstream gates */
  139. err = of_property_read_string_index(np, "clock-output-names", 1,
  140. &name);
  141. if (err != -ENODATA) {
  142. periph->gates[1] = clk_register_gate(NULL, name, np->name, 0,
  143. periph->clk_ctrl, 1, 0,
  144. &periph->clkact_lock);
  145. if (WARN_ON(IS_ERR(periph->gates[1])))
  146. return;
  147. clk_num++;
  148. }
  149. periph->onecell_data.clks = periph->gates;
  150. periph->onecell_data.clk_num = clk_num;
  151. err = of_clk_add_provider(np, of_clk_src_onecell_get,
  152. &periph->onecell_data);
  153. if (WARN_ON(err))
  154. return;
  155. }
  156. /* CPU Clock domain is modelled as a mux with 4 children subclks, whose
  157. * derivative rates depend on CLK_621_TRUE
  158. */
  159. struct zynq_cpu_clk {
  160. struct clk_hw hw;
  161. struct clk_onecell_data onecell_data;
  162. struct clk *subclks[4];
  163. void __iomem *clk_ctrl;
  164. spinlock_t clkact_lock;
  165. };
  166. #define to_zynq_cpu_clk(hw) container_of(hw, struct zynq_cpu_clk, hw)
  167. static const u8 zynq_cpu_clk_parent_map[] = {
  168. 1, 1, 2, 0
  169. };
  170. #define CPU_CLK_SRCSEL(x) (zynq_cpu_clk_parent_map[(((x) & 0x30) >> 4)])
  171. #define CPU_CLK_CTRL_DIV(x) (((x) & 0x3F00) >> 8)
  172. static u8 zynq_cpu_clk_get_parent(struct clk_hw *hw)
  173. {
  174. struct zynq_cpu_clk *cpuclk = to_zynq_cpu_clk(hw);
  175. return CPU_CLK_SRCSEL(ioread32(cpuclk->clk_ctrl));
  176. }
  177. static unsigned long zynq_cpu_clk_recalc_rate(struct clk_hw *hw,
  178. unsigned long parent_rate)
  179. {
  180. struct zynq_cpu_clk *cpuclk = to_zynq_cpu_clk(hw);
  181. return parent_rate / CPU_CLK_CTRL_DIV(ioread32(cpuclk->clk_ctrl));
  182. }
  183. static const struct clk_ops zynq_cpu_clk_ops = {
  184. .get_parent = zynq_cpu_clk_get_parent,
  185. .recalc_rate = zynq_cpu_clk_recalc_rate,
  186. };
  187. struct zynq_cpu_subclk {
  188. struct clk_hw hw;
  189. void __iomem *clk_621;
  190. enum {
  191. CPU_SUBCLK_6X4X,
  192. CPU_SUBCLK_3X2X,
  193. CPU_SUBCLK_2X,
  194. CPU_SUBCLK_1X,
  195. } which;
  196. };
  197. #define CLK_621_TRUE(x) ((x) & 1)
  198. #define to_zynq_cpu_subclk(hw) container_of(hw, struct zynq_cpu_subclk, hw);
  199. static unsigned long zynq_cpu_subclk_recalc_rate(struct clk_hw *hw,
  200. unsigned long parent_rate)
  201. {
  202. unsigned long uninitialized_var(rate);
  203. struct zynq_cpu_subclk *subclk;
  204. bool is_621;
  205. subclk = to_zynq_cpu_subclk(hw)
  206. is_621 = CLK_621_TRUE(ioread32(subclk->clk_621));
  207. switch (subclk->which) {
  208. case CPU_SUBCLK_6X4X:
  209. rate = parent_rate;
  210. break;
  211. case CPU_SUBCLK_3X2X:
  212. rate = parent_rate / 2;
  213. break;
  214. case CPU_SUBCLK_2X:
  215. rate = parent_rate / (is_621 ? 3 : 2);
  216. break;
  217. case CPU_SUBCLK_1X:
  218. rate = parent_rate / (is_621 ? 6 : 4);
  219. break;
  220. };
  221. return rate;
  222. }
  223. static const struct clk_ops zynq_cpu_subclk_ops = {
  224. .recalc_rate = zynq_cpu_subclk_recalc_rate,
  225. };
  226. static struct clk *zynq_cpu_subclk_setup(struct device_node *np, u8 which,
  227. void __iomem *clk_621)
  228. {
  229. struct zynq_cpu_subclk *subclk;
  230. struct clk_init_data init;
  231. struct clk *clk;
  232. int err;
  233. err = of_property_read_string_index(np, "clock-output-names",
  234. which, &init.name);
  235. if (WARN_ON(err))
  236. goto err_read_output_name;
  237. subclk = kzalloc(sizeof(*subclk), GFP_KERNEL);
  238. if (!subclk)
  239. goto err_subclk_alloc;
  240. subclk->clk_621 = clk_621;
  241. subclk->which = which;
  242. init.ops = &zynq_cpu_subclk_ops;
  243. init.parent_names = &np->name;
  244. init.num_parents = 1;
  245. subclk->hw.init = &init;
  246. clk = clk_register(NULL, &subclk->hw);
  247. if (WARN_ON(IS_ERR(clk)))
  248. goto err_clk_register;
  249. return clk;
  250. err_clk_register:
  251. kfree(subclk);
  252. err_subclk_alloc:
  253. err_read_output_name:
  254. return ERR_PTR(-EINVAL);
  255. }
  256. static void __init zynq_cpu_clk_setup(struct device_node *np)
  257. {
  258. struct zynq_cpu_clk *cpuclk;
  259. const char *parent_names[3];
  260. struct clk_init_data init;
  261. void __iomem *clk_621;
  262. struct clk *clk;
  263. u32 reg[2];
  264. int err;
  265. int i;
  266. err = of_property_read_u32_array(np, "reg", reg, ARRAY_SIZE(reg));
  267. if (WARN_ON(err))
  268. return;
  269. cpuclk = kzalloc(sizeof(*cpuclk), GFP_KERNEL);
  270. if (WARN_ON(!cpuclk))
  271. return;
  272. cpuclk->clk_ctrl = slcr_base + reg[0];
  273. clk_621 = slcr_base + reg[1];
  274. spin_lock_init(&cpuclk->clkact_lock);
  275. init.name = np->name;
  276. init.ops = &zynq_cpu_clk_ops;
  277. for (i = 0; i < ARRAY_SIZE(parent_names); i++)
  278. parent_names[i] = of_clk_get_parent_name(np, i);
  279. init.parent_names = parent_names;
  280. init.num_parents = ARRAY_SIZE(parent_names);
  281. cpuclk->hw.init = &init;
  282. clk = clk_register(NULL, &cpuclk->hw);
  283. if (WARN_ON(IS_ERR(clk)))
  284. return;
  285. err = of_clk_add_provider(np, of_clk_src_simple_get, clk);
  286. if (WARN_ON(err))
  287. return;
  288. for (i = 0; i < 4; i++) {
  289. cpuclk->subclks[i] = zynq_cpu_subclk_setup(np, i, clk_621);
  290. if (WARN_ON(IS_ERR(cpuclk->subclks[i])))
  291. return;
  292. }
  293. cpuclk->onecell_data.clks = cpuclk->subclks;
  294. cpuclk->onecell_data.clk_num = i;
  295. err = of_clk_add_provider(np, of_clk_src_onecell_get,
  296. &cpuclk->onecell_data);
  297. if (WARN_ON(err))
  298. return;
  299. }
  300. static const __initconst struct of_device_id zynq_clk_match[] = {
  301. { .compatible = "fixed-clock", .data = of_fixed_clk_setup, },
  302. { .compatible = "xlnx,zynq-pll", .data = zynq_pll_clk_setup, },
  303. { .compatible = "xlnx,zynq-periph-clock",
  304. .data = zynq_periph_clk_setup, },
  305. { .compatible = "xlnx,zynq-cpu-clock", .data = zynq_cpu_clk_setup, },
  306. {}
  307. };
  308. void __init xilinx_zynq_clocks_init(void __iomem *slcr)
  309. {
  310. slcr_base = slcr;
  311. of_clk_init(zynq_clk_match);
  312. }