arm_big_little.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558
  1. /*
  2. * ARM big.LITTLE Platforms CPUFreq support
  3. *
  4. * Copyright (C) 2013 ARM Ltd.
  5. * Sudeep KarkadaNagesha <sudeep.karkadanagesha@arm.com>
  6. *
  7. * Copyright (C) 2013 Linaro.
  8. * Viresh Kumar <viresh.kumar@linaro.org>
  9. *
  10. * This program is free software; you can redistribute it and/or modify
  11. * it under the terms of the GNU General Public License version 2 as
  12. * published by the Free Software Foundation.
  13. *
  14. * This program is distributed "as is" WITHOUT ANY WARRANTY of any
  15. * kind, whether express or implied; without even the implied warranty
  16. * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  17. * GNU General Public License for more details.
  18. */
  19. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  20. #include <linux/clk.h>
  21. #include <linux/cpu.h>
  22. #include <linux/cpufreq.h>
  23. #include <linux/cpumask.h>
  24. #include <linux/export.h>
  25. #include <linux/mutex.h>
  26. #include <linux/of_platform.h>
  27. #include <linux/pm_opp.h>
  28. #include <linux/slab.h>
  29. #include <linux/topology.h>
  30. #include <linux/types.h>
  31. #include <asm/bL_switcher.h>
  32. #include "arm_big_little.h"
  33. /* Currently we support only two clusters */
  34. #define A15_CLUSTER 0
  35. #define A7_CLUSTER 1
  36. #define MAX_CLUSTERS 2
  37. #ifdef CONFIG_BL_SWITCHER
  38. #define is_bL_switching_enabled() true
  39. #else
  40. #define is_bL_switching_enabled() false
  41. #endif
  42. #define ACTUAL_FREQ(cluster, freq) ((cluster == A7_CLUSTER) ? freq << 1 : freq)
  43. #define VIRT_FREQ(cluster, freq) ((cluster == A7_CLUSTER) ? freq >> 1 : freq)
  44. static struct cpufreq_arm_bL_ops *arm_bL_ops;
  45. static struct clk *clk[MAX_CLUSTERS];
  46. static struct cpufreq_frequency_table *freq_table[MAX_CLUSTERS + 1];
  47. static atomic_t cluster_usage[MAX_CLUSTERS + 1];
  48. static unsigned int clk_big_min; /* (Big) clock frequencies */
  49. static unsigned int clk_little_max; /* Maximum clock frequency (Little) */
  50. static DEFINE_PER_CPU(unsigned int, physical_cluster);
  51. static DEFINE_PER_CPU(unsigned int, cpu_last_req_freq);
  52. static struct mutex cluster_lock[MAX_CLUSTERS];
  53. static inline int raw_cpu_to_cluster(int cpu)
  54. {
  55. return topology_physical_package_id(cpu);
  56. }
  57. static inline int cpu_to_cluster(int cpu)
  58. {
  59. return is_bL_switching_enabled() ?
  60. MAX_CLUSTERS : raw_cpu_to_cluster(cpu);
  61. }
  62. static unsigned int find_cluster_maxfreq(int cluster)
  63. {
  64. int j;
  65. u32 max_freq = 0, cpu_freq;
  66. for_each_online_cpu(j) {
  67. cpu_freq = per_cpu(cpu_last_req_freq, j);
  68. if ((cluster == per_cpu(physical_cluster, j)) &&
  69. (max_freq < cpu_freq))
  70. max_freq = cpu_freq;
  71. }
  72. pr_debug("%s: cluster: %d, max freq: %d\n", __func__, cluster,
  73. max_freq);
  74. return max_freq;
  75. }
  76. static unsigned int clk_get_cpu_rate(unsigned int cpu)
  77. {
  78. u32 cur_cluster = per_cpu(physical_cluster, cpu);
  79. u32 rate = clk_get_rate(clk[cur_cluster]) / 1000;
  80. /* For switcher we use virtual A7 clock rates */
  81. if (is_bL_switching_enabled())
  82. rate = VIRT_FREQ(cur_cluster, rate);
  83. pr_debug("%s: cpu: %d, cluster: %d, freq: %u\n", __func__, cpu,
  84. cur_cluster, rate);
  85. return rate;
  86. }
  87. static unsigned int bL_cpufreq_get_rate(unsigned int cpu)
  88. {
  89. if (is_bL_switching_enabled()) {
  90. pr_debug("%s: freq: %d\n", __func__, per_cpu(cpu_last_req_freq,
  91. cpu));
  92. return per_cpu(cpu_last_req_freq, cpu);
  93. } else {
  94. return clk_get_cpu_rate(cpu);
  95. }
  96. }
  97. static unsigned int
  98. bL_cpufreq_set_rate(u32 cpu, u32 old_cluster, u32 new_cluster, u32 rate)
  99. {
  100. u32 new_rate, prev_rate;
  101. int ret;
  102. bool bLs = is_bL_switching_enabled();
  103. mutex_lock(&cluster_lock[new_cluster]);
  104. if (bLs) {
  105. prev_rate = per_cpu(cpu_last_req_freq, cpu);
  106. per_cpu(cpu_last_req_freq, cpu) = rate;
  107. per_cpu(physical_cluster, cpu) = new_cluster;
  108. new_rate = find_cluster_maxfreq(new_cluster);
  109. new_rate = ACTUAL_FREQ(new_cluster, new_rate);
  110. } else {
  111. new_rate = rate;
  112. }
  113. pr_debug("%s: cpu: %d, old cluster: %d, new cluster: %d, freq: %d\n",
  114. __func__, cpu, old_cluster, new_cluster, new_rate);
  115. ret = clk_set_rate(clk[new_cluster], new_rate * 1000);
  116. if (WARN_ON(ret)) {
  117. pr_err("clk_set_rate failed: %d, new cluster: %d\n", ret,
  118. new_cluster);
  119. if (bLs) {
  120. per_cpu(cpu_last_req_freq, cpu) = prev_rate;
  121. per_cpu(physical_cluster, cpu) = old_cluster;
  122. }
  123. mutex_unlock(&cluster_lock[new_cluster]);
  124. return ret;
  125. }
  126. mutex_unlock(&cluster_lock[new_cluster]);
  127. /* Recalc freq for old cluster when switching clusters */
  128. if (old_cluster != new_cluster) {
  129. pr_debug("%s: cpu: %d, old cluster: %d, new cluster: %d\n",
  130. __func__, cpu, old_cluster, new_cluster);
  131. /* Switch cluster */
  132. bL_switch_request(cpu, new_cluster);
  133. mutex_lock(&cluster_lock[old_cluster]);
  134. /* Set freq of old cluster if there are cpus left on it */
  135. new_rate = find_cluster_maxfreq(old_cluster);
  136. new_rate = ACTUAL_FREQ(old_cluster, new_rate);
  137. if (new_rate) {
  138. pr_debug("%s: Updating rate of old cluster: %d, to freq: %d\n",
  139. __func__, old_cluster, new_rate);
  140. if (clk_set_rate(clk[old_cluster], new_rate * 1000))
  141. pr_err("%s: clk_set_rate failed: %d, old cluster: %d\n",
  142. __func__, ret, old_cluster);
  143. }
  144. mutex_unlock(&cluster_lock[old_cluster]);
  145. }
  146. return 0;
  147. }
  148. /* Set clock frequency */
  149. static int bL_cpufreq_set_target(struct cpufreq_policy *policy,
  150. unsigned int index)
  151. {
  152. struct cpufreq_freqs freqs;
  153. u32 cpu = policy->cpu, cur_cluster, new_cluster, actual_cluster;
  154. int ret = 0;
  155. cur_cluster = cpu_to_cluster(cpu);
  156. new_cluster = actual_cluster = per_cpu(physical_cluster, cpu);
  157. freqs.old = bL_cpufreq_get_rate(cpu);
  158. freqs.new = freq_table[cur_cluster][index].frequency;
  159. pr_debug("%s: cpu: %d, cluster: %d, oldfreq: %d, target freq: %d, new freq: %d\n",
  160. __func__, cpu, cur_cluster, freqs.old, freqs.new,
  161. freqs.new);
  162. if (is_bL_switching_enabled()) {
  163. if ((actual_cluster == A15_CLUSTER) &&
  164. (freqs.new < clk_big_min)) {
  165. new_cluster = A7_CLUSTER;
  166. } else if ((actual_cluster == A7_CLUSTER) &&
  167. (freqs.new > clk_little_max)) {
  168. new_cluster = A15_CLUSTER;
  169. }
  170. }
  171. cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
  172. ret = bL_cpufreq_set_rate(cpu, actual_cluster, new_cluster, freqs.new);
  173. if (ret)
  174. freqs.new = freqs.old;
  175. cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
  176. return ret;
  177. }
  178. static inline u32 get_table_count(struct cpufreq_frequency_table *table)
  179. {
  180. int count;
  181. for (count = 0; table[count].frequency != CPUFREQ_TABLE_END; count++)
  182. ;
  183. return count;
  184. }
  185. /* get the minimum frequency in the cpufreq_frequency_table */
  186. static inline u32 get_table_min(struct cpufreq_frequency_table *table)
  187. {
  188. int i;
  189. uint32_t min_freq = ~0;
  190. for (i = 0; (table[i].frequency != CPUFREQ_TABLE_END); i++)
  191. if (table[i].frequency < min_freq)
  192. min_freq = table[i].frequency;
  193. return min_freq;
  194. }
  195. /* get the maximum frequency in the cpufreq_frequency_table */
  196. static inline u32 get_table_max(struct cpufreq_frequency_table *table)
  197. {
  198. int i;
  199. uint32_t max_freq = 0;
  200. for (i = 0; (table[i].frequency != CPUFREQ_TABLE_END); i++)
  201. if (table[i].frequency > max_freq)
  202. max_freq = table[i].frequency;
  203. return max_freq;
  204. }
  205. static int merge_cluster_tables(void)
  206. {
  207. int i, j, k = 0, count = 1;
  208. struct cpufreq_frequency_table *table;
  209. for (i = 0; i < MAX_CLUSTERS; i++)
  210. count += get_table_count(freq_table[i]);
  211. table = kzalloc(sizeof(*table) * count, GFP_KERNEL);
  212. if (!table)
  213. return -ENOMEM;
  214. freq_table[MAX_CLUSTERS] = table;
  215. /* Add in reverse order to get freqs in increasing order */
  216. for (i = MAX_CLUSTERS - 1; i >= 0; i--) {
  217. for (j = 0; freq_table[i][j].frequency != CPUFREQ_TABLE_END;
  218. j++) {
  219. table[k].frequency = VIRT_FREQ(i,
  220. freq_table[i][j].frequency);
  221. pr_debug("%s: index: %d, freq: %d\n", __func__, k,
  222. table[k].frequency);
  223. k++;
  224. }
  225. }
  226. table[k].driver_data = k;
  227. table[k].frequency = CPUFREQ_TABLE_END;
  228. pr_debug("%s: End, table: %p, count: %d\n", __func__, table, k);
  229. return 0;
  230. }
  231. static void _put_cluster_clk_and_freq_table(struct device *cpu_dev)
  232. {
  233. u32 cluster = raw_cpu_to_cluster(cpu_dev->id);
  234. if (!freq_table[cluster])
  235. return;
  236. clk_put(clk[cluster]);
  237. dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table[cluster]);
  238. dev_dbg(cpu_dev, "%s: cluster: %d\n", __func__, cluster);
  239. }
  240. static void put_cluster_clk_and_freq_table(struct device *cpu_dev)
  241. {
  242. u32 cluster = cpu_to_cluster(cpu_dev->id);
  243. int i;
  244. if (atomic_dec_return(&cluster_usage[cluster]))
  245. return;
  246. if (cluster < MAX_CLUSTERS)
  247. return _put_cluster_clk_and_freq_table(cpu_dev);
  248. for_each_present_cpu(i) {
  249. struct device *cdev = get_cpu_device(i);
  250. if (!cdev) {
  251. pr_err("%s: failed to get cpu%d device\n", __func__, i);
  252. return;
  253. }
  254. _put_cluster_clk_and_freq_table(cdev);
  255. }
  256. /* free virtual table */
  257. kfree(freq_table[cluster]);
  258. }
  259. static int _get_cluster_clk_and_freq_table(struct device *cpu_dev)
  260. {
  261. u32 cluster = raw_cpu_to_cluster(cpu_dev->id);
  262. char name[14] = "cpu-cluster.";
  263. int ret;
  264. if (freq_table[cluster])
  265. return 0;
  266. ret = arm_bL_ops->init_opp_table(cpu_dev);
  267. if (ret) {
  268. dev_err(cpu_dev, "%s: init_opp_table failed, cpu: %d, err: %d\n",
  269. __func__, cpu_dev->id, ret);
  270. goto out;
  271. }
  272. ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table[cluster]);
  273. if (ret) {
  274. dev_err(cpu_dev, "%s: failed to init cpufreq table, cpu: %d, err: %d\n",
  275. __func__, cpu_dev->id, ret);
  276. goto out;
  277. }
  278. name[12] = cluster + '0';
  279. clk[cluster] = clk_get(cpu_dev, name);
  280. if (!IS_ERR(clk[cluster])) {
  281. dev_dbg(cpu_dev, "%s: clk: %p & freq table: %p, cluster: %d\n",
  282. __func__, clk[cluster], freq_table[cluster],
  283. cluster);
  284. return 0;
  285. }
  286. dev_err(cpu_dev, "%s: Failed to get clk for cpu: %d, cluster: %d\n",
  287. __func__, cpu_dev->id, cluster);
  288. ret = PTR_ERR(clk[cluster]);
  289. dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table[cluster]);
  290. out:
  291. dev_err(cpu_dev, "%s: Failed to get data for cluster: %d\n", __func__,
  292. cluster);
  293. return ret;
  294. }
  295. static int get_cluster_clk_and_freq_table(struct device *cpu_dev)
  296. {
  297. u32 cluster = cpu_to_cluster(cpu_dev->id);
  298. int i, ret;
  299. if (atomic_inc_return(&cluster_usage[cluster]) != 1)
  300. return 0;
  301. if (cluster < MAX_CLUSTERS) {
  302. ret = _get_cluster_clk_and_freq_table(cpu_dev);
  303. if (ret)
  304. atomic_dec(&cluster_usage[cluster]);
  305. return ret;
  306. }
  307. /*
  308. * Get data for all clusters and fill virtual cluster with a merge of
  309. * both
  310. */
  311. for_each_present_cpu(i) {
  312. struct device *cdev = get_cpu_device(i);
  313. if (!cdev) {
  314. pr_err("%s: failed to get cpu%d device\n", __func__, i);
  315. return -ENODEV;
  316. }
  317. ret = _get_cluster_clk_and_freq_table(cdev);
  318. if (ret)
  319. goto put_clusters;
  320. }
  321. ret = merge_cluster_tables();
  322. if (ret)
  323. goto put_clusters;
  324. /* Assuming 2 cluster, set clk_big_min and clk_little_max */
  325. clk_big_min = get_table_min(freq_table[0]);
  326. clk_little_max = VIRT_FREQ(1, get_table_max(freq_table[1]));
  327. pr_debug("%s: cluster: %d, clk_big_min: %d, clk_little_max: %d\n",
  328. __func__, cluster, clk_big_min, clk_little_max);
  329. return 0;
  330. put_clusters:
  331. for_each_present_cpu(i) {
  332. struct device *cdev = get_cpu_device(i);
  333. if (!cdev) {
  334. pr_err("%s: failed to get cpu%d device\n", __func__, i);
  335. return -ENODEV;
  336. }
  337. _put_cluster_clk_and_freq_table(cdev);
  338. }
  339. atomic_dec(&cluster_usage[cluster]);
  340. return ret;
  341. }
  342. /* Per-CPU initialization */
  343. static int bL_cpufreq_init(struct cpufreq_policy *policy)
  344. {
  345. u32 cur_cluster = cpu_to_cluster(policy->cpu);
  346. struct device *cpu_dev;
  347. int ret;
  348. cpu_dev = get_cpu_device(policy->cpu);
  349. if (!cpu_dev) {
  350. pr_err("%s: failed to get cpu%d device\n", __func__,
  351. policy->cpu);
  352. return -ENODEV;
  353. }
  354. ret = get_cluster_clk_and_freq_table(cpu_dev);
  355. if (ret)
  356. return ret;
  357. ret = cpufreq_table_validate_and_show(policy, freq_table[cur_cluster]);
  358. if (ret) {
  359. dev_err(cpu_dev, "CPU %d, cluster: %d invalid freq table\n",
  360. policy->cpu, cur_cluster);
  361. put_cluster_clk_and_freq_table(cpu_dev);
  362. return ret;
  363. }
  364. if (cur_cluster < MAX_CLUSTERS) {
  365. cpumask_copy(policy->cpus, topology_core_cpumask(policy->cpu));
  366. per_cpu(physical_cluster, policy->cpu) = cur_cluster;
  367. } else {
  368. /* Assumption: during init, we are always running on A15 */
  369. per_cpu(physical_cluster, policy->cpu) = A15_CLUSTER;
  370. }
  371. if (arm_bL_ops->get_transition_latency)
  372. policy->cpuinfo.transition_latency =
  373. arm_bL_ops->get_transition_latency(cpu_dev);
  374. else
  375. policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
  376. if (is_bL_switching_enabled())
  377. per_cpu(cpu_last_req_freq, policy->cpu) = clk_get_cpu_rate(policy->cpu);
  378. dev_info(cpu_dev, "%s: CPU %d initialized\n", __func__, policy->cpu);
  379. return 0;
  380. }
  381. static int bL_cpufreq_exit(struct cpufreq_policy *policy)
  382. {
  383. struct device *cpu_dev;
  384. cpu_dev = get_cpu_device(policy->cpu);
  385. if (!cpu_dev) {
  386. pr_err("%s: failed to get cpu%d device\n", __func__,
  387. policy->cpu);
  388. return -ENODEV;
  389. }
  390. cpufreq_frequency_table_put_attr(policy->cpu);
  391. put_cluster_clk_and_freq_table(cpu_dev);
  392. dev_dbg(cpu_dev, "%s: Exited, cpu: %d\n", __func__, policy->cpu);
  393. return 0;
  394. }
  395. static struct cpufreq_driver bL_cpufreq_driver = {
  396. .name = "arm-big-little",
  397. .flags = CPUFREQ_STICKY |
  398. CPUFREQ_HAVE_GOVERNOR_PER_POLICY,
  399. .verify = cpufreq_generic_frequency_table_verify,
  400. .target_index = bL_cpufreq_set_target,
  401. .get = bL_cpufreq_get_rate,
  402. .init = bL_cpufreq_init,
  403. .exit = bL_cpufreq_exit,
  404. .attr = cpufreq_generic_attr,
  405. };
  406. int bL_cpufreq_register(struct cpufreq_arm_bL_ops *ops)
  407. {
  408. int ret, i;
  409. if (arm_bL_ops) {
  410. pr_debug("%s: Already registered: %s, exiting\n", __func__,
  411. arm_bL_ops->name);
  412. return -EBUSY;
  413. }
  414. if (!ops || !strlen(ops->name) || !ops->init_opp_table) {
  415. pr_err("%s: Invalid arm_bL_ops, exiting\n", __func__);
  416. return -ENODEV;
  417. }
  418. arm_bL_ops = ops;
  419. for (i = 0; i < MAX_CLUSTERS; i++)
  420. mutex_init(&cluster_lock[i]);
  421. ret = cpufreq_register_driver(&bL_cpufreq_driver);
  422. if (ret) {
  423. pr_info("%s: Failed registering platform driver: %s, err: %d\n",
  424. __func__, ops->name, ret);
  425. arm_bL_ops = NULL;
  426. } else {
  427. pr_info("%s: Registered platform driver: %s\n", __func__,
  428. ops->name);
  429. }
  430. return ret;
  431. }
  432. EXPORT_SYMBOL_GPL(bL_cpufreq_register);
  433. void bL_cpufreq_unregister(struct cpufreq_arm_bL_ops *ops)
  434. {
  435. if (arm_bL_ops != ops) {
  436. pr_err("%s: Registered with: %s, can't unregister, exiting\n",
  437. __func__, arm_bL_ops->name);
  438. return;
  439. }
  440. cpufreq_unregister_driver(&bL_cpufreq_driver);
  441. pr_info("%s: Un-registered platform driver: %s\n", __func__,
  442. arm_bL_ops->name);
  443. arm_bL_ops = NULL;
  444. }
  445. EXPORT_SYMBOL_GPL(bL_cpufreq_unregister);