clk-cpu.c 4.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178
  1. /*
  2. * Marvell MVEBU CPU clock handling.
  3. *
  4. * Copyright (C) 2012 Marvell
  5. *
  6. * Gregory CLEMENT <gregory.clement@free-electrons.com>
  7. *
  8. * This file is licensed under the terms of the GNU General Public
  9. * License version 2. This program is licensed "as is" without any
  10. * warranty of any kind, whether express or implied.
  11. */
  12. #include <linux/kernel.h>
  13. #include <linux/clkdev.h>
  14. #include <linux/clk-provider.h>
  15. #include <linux/of_address.h>
  16. #include <linux/io.h>
  17. #include <linux/of.h>
  18. #include <linux/delay.h>
  19. #define SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET 0x0
  20. #define SYS_CTRL_CLK_DIVIDER_VALUE_OFFSET 0xC
  21. #define SYS_CTRL_CLK_DIVIDER_MASK 0x3F
  22. #define MAX_CPU 4
  23. struct cpu_clk {
  24. struct clk_hw hw;
  25. int cpu;
  26. const char *clk_name;
  27. const char *parent_name;
  28. void __iomem *reg_base;
  29. };
  30. static struct clk **clks;
  31. static struct clk_onecell_data clk_data;
  32. #define to_cpu_clk(p) container_of(p, struct cpu_clk, hw)
  33. static unsigned long clk_cpu_recalc_rate(struct clk_hw *hwclk,
  34. unsigned long parent_rate)
  35. {
  36. struct cpu_clk *cpuclk = to_cpu_clk(hwclk);
  37. u32 reg, div;
  38. reg = readl(cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_VALUE_OFFSET);
  39. div = (reg >> (cpuclk->cpu * 8)) & SYS_CTRL_CLK_DIVIDER_MASK;
  40. return parent_rate / div;
  41. }
  42. static long clk_cpu_round_rate(struct clk_hw *hwclk, unsigned long rate,
  43. unsigned long *parent_rate)
  44. {
  45. /* Valid ratio are 1:1, 1:2 and 1:3 */
  46. u32 div;
  47. div = *parent_rate / rate;
  48. if (div == 0)
  49. div = 1;
  50. else if (div > 3)
  51. div = 3;
  52. return *parent_rate / div;
  53. }
  54. static int clk_cpu_set_rate(struct clk_hw *hwclk, unsigned long rate,
  55. unsigned long parent_rate)
  56. {
  57. struct cpu_clk *cpuclk = to_cpu_clk(hwclk);
  58. u32 reg, div;
  59. u32 reload_mask;
  60. div = parent_rate / rate;
  61. reg = (readl(cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_VALUE_OFFSET)
  62. & (~(SYS_CTRL_CLK_DIVIDER_MASK << (cpuclk->cpu * 8))))
  63. | (div << (cpuclk->cpu * 8));
  64. writel(reg, cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_VALUE_OFFSET);
  65. /* Set clock divider reload smooth bit mask */
  66. reload_mask = 1 << (20 + cpuclk->cpu);
  67. reg = readl(cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET)
  68. | reload_mask;
  69. writel(reg, cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET);
  70. /* Now trigger the clock update */
  71. reg = readl(cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET)
  72. | 1 << 24;
  73. writel(reg, cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET);
  74. /* Wait for clocks to settle down then clear reload request */
  75. udelay(1000);
  76. reg &= ~(reload_mask | 1 << 24);
  77. writel(reg, cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET);
  78. udelay(1000);
  79. return 0;
  80. }
  81. static const struct clk_ops cpu_ops = {
  82. .recalc_rate = clk_cpu_recalc_rate,
  83. .round_rate = clk_cpu_round_rate,
  84. .set_rate = clk_cpu_set_rate,
  85. };
  86. void __init of_cpu_clk_setup(struct device_node *node)
  87. {
  88. struct cpu_clk *cpuclk;
  89. void __iomem *clock_complex_base = of_iomap(node, 0);
  90. int ncpus = 0;
  91. struct device_node *dn;
  92. if (clock_complex_base == NULL) {
  93. pr_err("%s: clock-complex base register not set\n",
  94. __func__);
  95. return;
  96. }
  97. for_each_node_by_type(dn, "cpu")
  98. ncpus++;
  99. cpuclk = kzalloc(ncpus * sizeof(*cpuclk), GFP_KERNEL);
  100. if (WARN_ON(!cpuclk))
  101. goto cpuclk_out;
  102. clks = kzalloc(ncpus * sizeof(*clks), GFP_KERNEL);
  103. if (WARN_ON(!clks))
  104. goto clks_out;
  105. for_each_node_by_type(dn, "cpu") {
  106. struct clk_init_data init;
  107. struct clk *clk;
  108. struct clk *parent_clk;
  109. char *clk_name = kzalloc(5, GFP_KERNEL);
  110. int cpu, err;
  111. if (WARN_ON(!clk_name))
  112. goto bail_out;
  113. err = of_property_read_u32(dn, "reg", &cpu);
  114. if (WARN_ON(err))
  115. goto bail_out;
  116. sprintf(clk_name, "cpu%d", cpu);
  117. parent_clk = of_clk_get(node, 0);
  118. cpuclk[cpu].parent_name = __clk_get_name(parent_clk);
  119. cpuclk[cpu].clk_name = clk_name;
  120. cpuclk[cpu].cpu = cpu;
  121. cpuclk[cpu].reg_base = clock_complex_base;
  122. cpuclk[cpu].hw.init = &init;
  123. init.name = cpuclk[cpu].clk_name;
  124. init.ops = &cpu_ops;
  125. init.flags = 0;
  126. init.parent_names = &cpuclk[cpu].parent_name;
  127. init.num_parents = 1;
  128. clk = clk_register(NULL, &cpuclk[cpu].hw);
  129. if (WARN_ON(IS_ERR(clk)))
  130. goto bail_out;
  131. clks[cpu] = clk;
  132. }
  133. clk_data.clk_num = MAX_CPU;
  134. clk_data.clks = clks;
  135. of_clk_add_provider(node, of_clk_src_onecell_get, &clk_data);
  136. return;
  137. bail_out:
  138. kfree(clks);
  139. while(ncpus--)
  140. kfree(cpuclk[ncpus].clk_name);
  141. clks_out:
  142. kfree(cpuclk);
  143. cpuclk_out:
  144. iounmap(clock_complex_base);
  145. }
  146. CLK_OF_DECLARE(armada_xp_cpu_clock, "marvell,armada-xp-cpu-clock",
  147. of_cpu_clk_setup);