|
@@ -17,6 +17,9 @@
|
|
|
* GNU General Public License for more details.
|
|
|
*/
|
|
|
|
|
|
+#include <linux/clk-provider.h>
|
|
|
+#include <linux/clkdev.h>
|
|
|
+#include <linux/cpu.h>
|
|
|
#include <linux/delay.h>
|
|
|
#include <linux/err.h>
|
|
|
#include <linux/interrupt.h>
|
|
@@ -438,3 +441,102 @@ int __init ve_spc_init(void __iomem *baseaddr, u32 a15_clusid, int irq)
|
|
|
|
|
|
return 0;
|
|
|
}
|
|
|
+
|
|
|
+struct clk_spc {
|
|
|
+ struct clk_hw hw;
|
|
|
+ int cluster;
|
|
|
+};
|
|
|
+
|
|
|
+#define to_clk_spc(spc) container_of(spc, struct clk_spc, hw)
|
|
|
+static unsigned long spc_recalc_rate(struct clk_hw *hw,
|
|
|
+ unsigned long parent_rate)
|
|
|
+{
|
|
|
+ struct clk_spc *spc = to_clk_spc(hw);
|
|
|
+ u32 freq;
|
|
|
+
|
|
|
+ if (ve_spc_get_performance(spc->cluster, &freq))
|
|
|
+ return -EIO;
|
|
|
+
|
|
|
+ return freq * 1000;
|
|
|
+}
|
|
|
+
|
|
|
+static long spc_round_rate(struct clk_hw *hw, unsigned long drate,
|
|
|
+ unsigned long *parent_rate)
|
|
|
+{
|
|
|
+ struct clk_spc *spc = to_clk_spc(hw);
|
|
|
+
|
|
|
+ return ve_spc_round_performance(spc->cluster, drate);
|
|
|
+}
|
|
|
+
|
|
|
+static int spc_set_rate(struct clk_hw *hw, unsigned long rate,
|
|
|
+ unsigned long parent_rate)
|
|
|
+{
|
|
|
+ struct clk_spc *spc = to_clk_spc(hw);
|
|
|
+
|
|
|
+ return ve_spc_set_performance(spc->cluster, rate / 1000);
|
|
|
+}
|
|
|
+
|
|
|
+static struct clk_ops clk_spc_ops = {
|
|
|
+ .recalc_rate = spc_recalc_rate,
|
|
|
+ .round_rate = spc_round_rate,
|
|
|
+ .set_rate = spc_set_rate,
|
|
|
+};
|
|
|
+
|
|
|
+static struct clk *ve_spc_clk_register(struct device *cpu_dev)
|
|
|
+{
|
|
|
+ struct clk_init_data init;
|
|
|
+ struct clk_spc *spc;
|
|
|
+
|
|
|
+ spc = kzalloc(sizeof(*spc), GFP_KERNEL);
|
|
|
+ if (!spc) {
|
|
|
+ pr_err("could not allocate spc clk\n");
|
|
|
+ return ERR_PTR(-ENOMEM);
|
|
|
+ }
|
|
|
+
|
|
|
+ spc->hw.init = &init;
|
|
|
+ spc->cluster = topology_physical_package_id(cpu_dev->id);
|
|
|
+
|
|
|
+ init.name = dev_name(cpu_dev);
|
|
|
+ init.ops = &clk_spc_ops;
|
|
|
+ init.flags = CLK_IS_ROOT | CLK_GET_RATE_NOCACHE;
|
|
|
+ init.num_parents = 0;
|
|
|
+
|
|
|
+ return devm_clk_register(cpu_dev, &spc->hw);
|
|
|
+}
|
|
|
+
|
|
|
+static int __init ve_spc_clk_init(void)
|
|
|
+{
|
|
|
+ int cpu;
|
|
|
+ struct clk *clk;
|
|
|
+
|
|
|
+ if (!info)
|
|
|
+ return 0; /* Continue only if SPC is initialised */
|
|
|
+
|
|
|
+ if (ve_spc_populate_opps(0) || ve_spc_populate_opps(1)) {
|
|
|
+ pr_err("failed to build OPP table\n");
|
|
|
+ return -ENODEV;
|
|
|
+ }
|
|
|
+
|
|
|
+ for_each_possible_cpu(cpu) {
|
|
|
+ struct device *cpu_dev = get_cpu_device(cpu);
|
|
|
+ if (!cpu_dev) {
|
|
|
+ pr_warn("failed to get cpu%d device\n", cpu);
|
|
|
+ continue;
|
|
|
+ }
|
|
|
+ clk = ve_spc_clk_register(cpu_dev);
|
|
|
+ if (IS_ERR(clk)) {
|
|
|
+ pr_warn("failed to register cpu%d clock\n", cpu);
|
|
|
+ continue;
|
|
|
+ }
|
|
|
+ if (clk_register_clkdev(clk, NULL, dev_name(cpu_dev))) {
|
|
|
+ pr_warn("failed to register cpu%d clock lookup\n", cpu);
|
|
|
+ continue;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (ve_init_opp_table(cpu_dev))
|
|
|
+ pr_warn("failed to initialise cpu%d opp table\n", cpu);
|
|
|
+ }
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+module_init(ve_spc_clk_init);
|