clk-cpu.c 4.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186
  1. /*
  2. * Marvell MVEBU CPU clock handling.
  3. *
  4. * Copyright (C) 2012 Marvell
  5. *
  6. * Gregory CLEMENT <gregory.clement@free-electrons.com>
  7. *
  8. * This file is licensed under the terms of the GNU General Public
  9. * License version 2. This program is licensed "as is" without any
  10. * warranty of any kind, whether express or implied.
  11. */
  12. #include <linux/kernel.h>
  13. #include <linux/clkdev.h>
  14. #include <linux/clk-provider.h>
  15. #include <linux/of_address.h>
  16. #include <linux/io.h>
  17. #include <linux/of.h>
  18. #include <linux/delay.h>
  19. #include "clk-cpu.h"
  20. #define SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET 0x0
  21. #define SYS_CTRL_CLK_DIVIDER_VALUE_OFFSET 0xC
  22. #define SYS_CTRL_CLK_DIVIDER_MASK 0x3F
  23. #define MAX_CPU 4
  24. struct cpu_clk {
  25. struct clk_hw hw;
  26. int cpu;
  27. const char *clk_name;
  28. const char *parent_name;
  29. void __iomem *reg_base;
  30. };
  31. static struct clk **clks;
  32. static struct clk_onecell_data clk_data;
  33. #define to_cpu_clk(p) container_of(p, struct cpu_clk, hw)
  34. static unsigned long clk_cpu_recalc_rate(struct clk_hw *hwclk,
  35. unsigned long parent_rate)
  36. {
  37. struct cpu_clk *cpuclk = to_cpu_clk(hwclk);
  38. u32 reg, div;
  39. reg = readl(cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_VALUE_OFFSET);
  40. div = (reg >> (cpuclk->cpu * 8)) & SYS_CTRL_CLK_DIVIDER_MASK;
  41. return parent_rate / div;
  42. }
  43. static long clk_cpu_round_rate(struct clk_hw *hwclk, unsigned long rate,
  44. unsigned long *parent_rate)
  45. {
  46. /* Valid ratio are 1:1, 1:2 and 1:3 */
  47. u32 div;
  48. div = *parent_rate / rate;
  49. if (div == 0)
  50. div = 1;
  51. else if (div > 3)
  52. div = 3;
  53. return *parent_rate / div;
  54. }
  55. static int clk_cpu_set_rate(struct clk_hw *hwclk, unsigned long rate,
  56. unsigned long parent_rate)
  57. {
  58. struct cpu_clk *cpuclk = to_cpu_clk(hwclk);
  59. u32 reg, div;
  60. u32 reload_mask;
  61. div = parent_rate / rate;
  62. reg = (readl(cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_VALUE_OFFSET)
  63. & (~(SYS_CTRL_CLK_DIVIDER_MASK << (cpuclk->cpu * 8))))
  64. | (div << (cpuclk->cpu * 8));
  65. writel(reg, cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_VALUE_OFFSET);
  66. /* Set clock divider reload smooth bit mask */
  67. reload_mask = 1 << (20 + cpuclk->cpu);
  68. reg = readl(cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET)
  69. | reload_mask;
  70. writel(reg, cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET);
  71. /* Now trigger the clock update */
  72. reg = readl(cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET)
  73. | 1 << 24;
  74. writel(reg, cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET);
  75. /* Wait for clocks to settle down then clear reload request */
  76. udelay(1000);
  77. reg &= ~(reload_mask | 1 << 24);
  78. writel(reg, cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET);
  79. udelay(1000);
  80. return 0;
  81. }
  82. static const struct clk_ops cpu_ops = {
  83. .recalc_rate = clk_cpu_recalc_rate,
  84. .round_rate = clk_cpu_round_rate,
  85. .set_rate = clk_cpu_set_rate,
  86. };
  87. void __init of_cpu_clk_setup(struct device_node *node)
  88. {
  89. struct cpu_clk *cpuclk;
  90. void __iomem *clock_complex_base = of_iomap(node, 0);
  91. int ncpus = 0;
  92. struct device_node *dn;
  93. if (clock_complex_base == NULL) {
  94. pr_err("%s: clock-complex base register not set\n",
  95. __func__);
  96. return;
  97. }
  98. for_each_node_by_type(dn, "cpu")
  99. ncpus++;
  100. cpuclk = kzalloc(ncpus * sizeof(*cpuclk), GFP_KERNEL);
  101. if (WARN_ON(!cpuclk))
  102. return;
  103. clks = kzalloc(ncpus * sizeof(*clks), GFP_KERNEL);
  104. if (WARN_ON(!clks))
  105. return;
  106. for_each_node_by_type(dn, "cpu") {
  107. struct clk_init_data init;
  108. struct clk *clk;
  109. struct clk *parent_clk;
  110. char *clk_name = kzalloc(5, GFP_KERNEL);
  111. int cpu, err;
  112. if (WARN_ON(!clk_name))
  113. return;
  114. err = of_property_read_u32(dn, "reg", &cpu);
  115. if (WARN_ON(err))
  116. return;
  117. sprintf(clk_name, "cpu%d", cpu);
  118. parent_clk = of_clk_get(node, 0);
  119. cpuclk[cpu].parent_name = __clk_get_name(parent_clk);
  120. cpuclk[cpu].clk_name = clk_name;
  121. cpuclk[cpu].cpu = cpu;
  122. cpuclk[cpu].reg_base = clock_complex_base;
  123. cpuclk[cpu].hw.init = &init;
  124. init.name = cpuclk[cpu].clk_name;
  125. init.ops = &cpu_ops;
  126. init.flags = 0;
  127. init.parent_names = &cpuclk[cpu].parent_name;
  128. init.num_parents = 1;
  129. clk = clk_register(NULL, &cpuclk[cpu].hw);
  130. if (WARN_ON(IS_ERR(clk)))
  131. goto bail_out;
  132. clks[cpu] = clk;
  133. }
  134. clk_data.clk_num = MAX_CPU;
  135. clk_data.clks = clks;
  136. of_clk_add_provider(node, of_clk_src_onecell_get, &clk_data);
  137. return;
  138. bail_out:
  139. kfree(clks);
  140. kfree(cpuclk);
  141. }
  142. static const __initconst struct of_device_id clk_cpu_match[] = {
  143. {
  144. .compatible = "marvell,armada-xp-cpu-clock",
  145. .data = of_cpu_clk_setup,
  146. },
  147. {
  148. /* sentinel */
  149. },
  150. };
  151. void __init mvebu_cpu_clk_init(void)
  152. {
  153. of_clk_init(clk_cpu_match);
  154. }