cpu_cooling.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450
  1. /*
  2. * linux/drivers/thermal/cpu_cooling.c
  3. *
  4. * Copyright (C) 2012 Samsung Electronics Co., Ltd(http://www.samsung.com)
  5. * Copyright (C) 2012 Amit Daniel <amit.kachhap@linaro.org>
  6. *
  7. * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License as published by
  10. * the Free Software Foundation; version 2 of the License.
  11. *
  12. * This program is distributed in the hope that it will be useful, but
  13. * WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU General Public License along
  18. * with this program; if not, write to the Free Software Foundation, Inc.,
  19. * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
  20. *
  21. * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  22. */
  23. #include <linux/kernel.h>
  24. #include <linux/module.h>
  25. #include <linux/thermal.h>
  26. #include <linux/platform_device.h>
  27. #include <linux/cpufreq.h>
  28. #include <linux/err.h>
  29. #include <linux/slab.h>
  30. #include <linux/cpu.h>
  31. #include <linux/cpu_cooling.h>
  32. /**
  33. * struct cpufreq_cooling_device
  34. * @id: unique integer value corresponding to each cpufreq_cooling_device
  35. * registered.
  36. * @cool_dev: thermal_cooling_device pointer to keep track of the the
  37. * egistered cooling device.
  38. * @cpufreq_state: integer value representing the current state of cpufreq
  39. * cooling devices.
  40. * @cpufreq_val: integer value representing the absolute value of the clipped
  41. * frequency.
  42. * @allowed_cpus: all the cpus involved for this cpufreq_cooling_device.
  43. * @node: list_head to link all cpufreq_cooling_device together.
  44. *
  45. * This structure is required for keeping information of each
  46. * cpufreq_cooling_device registered as a list whose head is represented by
  47. * cooling_cpufreq_list. In order to prevent corruption of this list a
  48. * mutex lock cooling_cpufreq_lock is used.
  49. */
  50. struct cpufreq_cooling_device {
  51. int id;
  52. struct thermal_cooling_device *cool_dev;
  53. unsigned int cpufreq_state;
  54. unsigned int cpufreq_val;
  55. struct cpumask allowed_cpus;
  56. struct list_head node;
  57. };
  58. static LIST_HEAD(cooling_cpufreq_list);
  59. static DEFINE_IDR(cpufreq_idr);
  60. static struct mutex cooling_cpufreq_lock;
  61. /* notify_table passes value to the CPUFREQ_ADJUST callback function. */
  62. #define NOTIFY_INVALID NULL
  63. struct cpufreq_cooling_device *notify_device;
  64. /**
  65. * get_idr - function to get a unique id.
  66. * @idr: struct idr * handle used to create a id.
  67. * @id: int * value generated by this function.
  68. */
  69. static int get_idr(struct idr *idr, int *id)
  70. {
  71. int err;
  72. again:
  73. if (unlikely(idr_pre_get(idr, GFP_KERNEL) == 0))
  74. return -ENOMEM;
  75. mutex_lock(&cooling_cpufreq_lock);
  76. err = idr_get_new(idr, NULL, id);
  77. mutex_unlock(&cooling_cpufreq_lock);
  78. if (unlikely(err == -EAGAIN))
  79. goto again;
  80. else if (unlikely(err))
  81. return err;
  82. *id = *id & MAX_ID_MASK;
  83. return 0;
  84. }
  85. /**
  86. * release_idr - function to free the unique id.
  87. * @idr: struct idr * handle used for creating the id.
  88. * @id: int value representing the unique id.
  89. */
  90. static void release_idr(struct idr *idr, int id)
  91. {
  92. mutex_lock(&cooling_cpufreq_lock);
  93. idr_remove(idr, id);
  94. mutex_unlock(&cooling_cpufreq_lock);
  95. }
  96. /* Below code defines functions to be used for cpufreq as cooling device */
  97. /**
  98. * is_cpufreq_valid - function to check if a cpu has frequency transition policy.
  99. * @cpu: cpu for which check is needed.
  100. */
  101. static int is_cpufreq_valid(int cpu)
  102. {
  103. struct cpufreq_policy policy;
  104. return !cpufreq_get_policy(&policy, cpu);
  105. }
  106. /**
  107. * get_cpu_frequency - get the absolute value of frequency from level.
  108. * @cpu: cpu for which frequency is fetched.
  109. * @level: level of frequency of the CPU
  110. * e.g level=1 --> 1st MAX FREQ, LEVEL=2 ---> 2nd MAX FREQ, .... etc
  111. */
  112. static unsigned int get_cpu_frequency(unsigned int cpu, unsigned long level)
  113. {
  114. int ret = 0, i = 0;
  115. unsigned long level_index;
  116. bool descend = false;
  117. struct cpufreq_frequency_table *table =
  118. cpufreq_frequency_get_table(cpu);
  119. if (!table)
  120. return ret;
  121. while (table[i].frequency != CPUFREQ_TABLE_END) {
  122. if (table[i].frequency == CPUFREQ_ENTRY_INVALID)
  123. continue;
  124. /*check if table in ascending or descending order*/
  125. if ((table[i + 1].frequency != CPUFREQ_TABLE_END) &&
  126. (table[i + 1].frequency < table[i].frequency)
  127. && !descend) {
  128. descend = true;
  129. }
  130. /*return if level matched and table in descending order*/
  131. if (descend && i == level)
  132. return table[i].frequency;
  133. i++;
  134. }
  135. i--;
  136. if (level > i || descend)
  137. return ret;
  138. level_index = i - level;
  139. /*Scan the table in reverse order and match the level*/
  140. while (i >= 0) {
  141. if (table[i].frequency == CPUFREQ_ENTRY_INVALID)
  142. continue;
  143. /*return if level matched*/
  144. if (i == level_index)
  145. return table[i].frequency;
  146. i--;
  147. }
  148. return ret;
  149. }
  150. /**
  151. * cpufreq_apply_cooling - function to apply frequency clipping.
  152. * @cpufreq_device: cpufreq_cooling_device pointer containing frequency
  153. * clipping data.
  154. * @cooling_state: value of the cooling state.
  155. */
  156. static int cpufreq_apply_cooling(struct cpufreq_cooling_device *cpufreq_device,
  157. unsigned long cooling_state)
  158. {
  159. unsigned int cpuid, clip_freq;
  160. struct cpumask *maskPtr = &cpufreq_device->allowed_cpus;
  161. unsigned int cpu = cpumask_any(maskPtr);
  162. /* Check if the old cooling action is same as new cooling action */
  163. if (cpufreq_device->cpufreq_state == cooling_state)
  164. return 0;
  165. clip_freq = get_cpu_frequency(cpu, cooling_state);
  166. if (!clip_freq)
  167. return -EINVAL;
  168. cpufreq_device->cpufreq_state = cooling_state;
  169. cpufreq_device->cpufreq_val = clip_freq;
  170. notify_device = cpufreq_device;
  171. for_each_cpu(cpuid, maskPtr) {
  172. if (is_cpufreq_valid(cpuid))
  173. cpufreq_update_policy(cpuid);
  174. }
  175. notify_device = NOTIFY_INVALID;
  176. return 0;
  177. }
  178. /**
  179. * cpufreq_thermal_notifier - notifier callback for cpufreq policy change.
  180. * @nb: struct notifier_block * with callback info.
  181. * @event: value showing cpufreq event for which this function invoked.
  182. * @data: callback-specific data
  183. */
  184. static int cpufreq_thermal_notifier(struct notifier_block *nb,
  185. unsigned long event, void *data)
  186. {
  187. struct cpufreq_policy *policy = data;
  188. unsigned long max_freq = 0;
  189. if (event != CPUFREQ_ADJUST || notify_device == NOTIFY_INVALID)
  190. return 0;
  191. if (cpumask_test_cpu(policy->cpu, &notify_device->allowed_cpus))
  192. max_freq = notify_device->cpufreq_val;
  193. /* Never exceed user_policy.max*/
  194. if (max_freq > policy->user_policy.max)
  195. max_freq = policy->user_policy.max;
  196. if (policy->max != max_freq)
  197. cpufreq_verify_within_limits(policy, 0, max_freq);
  198. return 0;
  199. }
  200. /*
  201. * cpufreq cooling device callback functions are defined below
  202. */
  203. /**
  204. * cpufreq_get_max_state - callback function to get the max cooling state.
  205. * @cdev: thermal cooling device pointer.
  206. * @state: fill this variable with the max cooling state.
  207. */
  208. static int cpufreq_get_max_state(struct thermal_cooling_device *cdev,
  209. unsigned long *state)
  210. {
  211. int ret = -EINVAL, i = 0;
  212. struct cpufreq_cooling_device *cpufreq_device;
  213. struct cpumask *maskPtr;
  214. unsigned int cpu;
  215. struct cpufreq_frequency_table *table;
  216. mutex_lock(&cooling_cpufreq_lock);
  217. list_for_each_entry(cpufreq_device, &cooling_cpufreq_list, node) {
  218. if (cpufreq_device && cpufreq_device->cool_dev == cdev)
  219. break;
  220. }
  221. if (cpufreq_device == NULL)
  222. goto return_get_max_state;
  223. maskPtr = &cpufreq_device->allowed_cpus;
  224. cpu = cpumask_any(maskPtr);
  225. table = cpufreq_frequency_get_table(cpu);
  226. if (!table) {
  227. *state = 0;
  228. ret = 0;
  229. goto return_get_max_state;
  230. }
  231. while (table[i].frequency != CPUFREQ_TABLE_END) {
  232. if (table[i].frequency == CPUFREQ_ENTRY_INVALID)
  233. continue;
  234. i++;
  235. }
  236. if (i > 0) {
  237. *state = --i;
  238. ret = 0;
  239. }
  240. return_get_max_state:
  241. mutex_unlock(&cooling_cpufreq_lock);
  242. return ret;
  243. }
  244. /**
  245. * cpufreq_get_cur_state - callback function to get the current cooling state.
  246. * @cdev: thermal cooling device pointer.
  247. * @state: fill this variable with the current cooling state.
  248. */
  249. static int cpufreq_get_cur_state(struct thermal_cooling_device *cdev,
  250. unsigned long *state)
  251. {
  252. int ret = -EINVAL;
  253. struct cpufreq_cooling_device *cpufreq_device;
  254. mutex_lock(&cooling_cpufreq_lock);
  255. list_for_each_entry(cpufreq_device, &cooling_cpufreq_list, node) {
  256. if (cpufreq_device && cpufreq_device->cool_dev == cdev) {
  257. *state = cpufreq_device->cpufreq_state;
  258. ret = 0;
  259. break;
  260. }
  261. }
  262. mutex_unlock(&cooling_cpufreq_lock);
  263. return ret;
  264. }
  265. /**
  266. * cpufreq_set_cur_state - callback function to set the current cooling state.
  267. * @cdev: thermal cooling device pointer.
  268. * @state: set this variable to the current cooling state.
  269. */
  270. static int cpufreq_set_cur_state(struct thermal_cooling_device *cdev,
  271. unsigned long state)
  272. {
  273. int ret = -EINVAL;
  274. struct cpufreq_cooling_device *cpufreq_device;
  275. mutex_lock(&cooling_cpufreq_lock);
  276. list_for_each_entry(cpufreq_device, &cooling_cpufreq_list, node) {
  277. if (cpufreq_device && cpufreq_device->cool_dev == cdev) {
  278. ret = 0;
  279. break;
  280. }
  281. }
  282. if (!ret)
  283. ret = cpufreq_apply_cooling(cpufreq_device, state);
  284. mutex_unlock(&cooling_cpufreq_lock);
  285. return ret;
  286. }
  287. /* Bind cpufreq callbacks to thermal cooling device ops */
  288. static struct thermal_cooling_device_ops const cpufreq_cooling_ops = {
  289. .get_max_state = cpufreq_get_max_state,
  290. .get_cur_state = cpufreq_get_cur_state,
  291. .set_cur_state = cpufreq_set_cur_state,
  292. };
  293. /* Notifier for cpufreq policy change */
  294. static struct notifier_block thermal_cpufreq_notifier_block = {
  295. .notifier_call = cpufreq_thermal_notifier,
  296. };
  297. /**
  298. * cpufreq_cooling_register - function to create cpufreq cooling device.
  299. * @clip_cpus: cpumask of cpus where the frequency constraints will happen.
  300. */
  301. struct thermal_cooling_device *cpufreq_cooling_register(
  302. struct cpumask *clip_cpus)
  303. {
  304. struct thermal_cooling_device *cool_dev;
  305. struct cpufreq_cooling_device *cpufreq_dev = NULL;
  306. unsigned int cpufreq_dev_count = 0, min = 0, max = 0;
  307. char dev_name[THERMAL_NAME_LENGTH];
  308. int ret = 0, id = 0, i;
  309. struct cpufreq_policy policy;
  310. list_for_each_entry(cpufreq_dev, &cooling_cpufreq_list, node)
  311. cpufreq_dev_count++;
  312. /*Verify that all the clip cpus have same freq_min, freq_max limit*/
  313. for_each_cpu(i, clip_cpus) {
  314. /*continue if cpufreq policy not found and not return error*/
  315. if (!cpufreq_get_policy(&policy, i))
  316. continue;
  317. if (min == 0 && max == 0) {
  318. min = policy.cpuinfo.min_freq;
  319. max = policy.cpuinfo.max_freq;
  320. } else {
  321. if (min != policy.cpuinfo.min_freq ||
  322. max != policy.cpuinfo.max_freq)
  323. return ERR_PTR(-EINVAL);
  324. }
  325. }
  326. cpufreq_dev = kzalloc(sizeof(struct cpufreq_cooling_device),
  327. GFP_KERNEL);
  328. if (!cpufreq_dev)
  329. return ERR_PTR(-ENOMEM);
  330. cpumask_copy(&cpufreq_dev->allowed_cpus, clip_cpus);
  331. if (cpufreq_dev_count == 0)
  332. mutex_init(&cooling_cpufreq_lock);
  333. ret = get_idr(&cpufreq_idr, &cpufreq_dev->id);
  334. if (ret) {
  335. kfree(cpufreq_dev);
  336. return ERR_PTR(-EINVAL);
  337. }
  338. sprintf(dev_name, "thermal-cpufreq-%d", cpufreq_dev->id);
  339. cool_dev = thermal_cooling_device_register(dev_name, cpufreq_dev,
  340. &cpufreq_cooling_ops);
  341. if (!cool_dev) {
  342. release_idr(&cpufreq_idr, cpufreq_dev->id);
  343. kfree(cpufreq_dev);
  344. return ERR_PTR(-EINVAL);
  345. }
  346. cpufreq_dev->id = id;
  347. cpufreq_dev->cool_dev = cool_dev;
  348. cpufreq_dev->cpufreq_state = 0;
  349. mutex_lock(&cooling_cpufreq_lock);
  350. list_add_tail(&cpufreq_dev->node, &cooling_cpufreq_list);
  351. /* Register the notifier for first cpufreq cooling device */
  352. if (cpufreq_dev_count == 0)
  353. cpufreq_register_notifier(&thermal_cpufreq_notifier_block,
  354. CPUFREQ_POLICY_NOTIFIER);
  355. mutex_unlock(&cooling_cpufreq_lock);
  356. return cool_dev;
  357. }
  358. EXPORT_SYMBOL(cpufreq_cooling_register);
  359. /**
  360. * cpufreq_cooling_unregister - function to remove cpufreq cooling device.
  361. * @cdev: thermal cooling device pointer.
  362. */
  363. void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev)
  364. {
  365. struct cpufreq_cooling_device *cpufreq_dev = NULL;
  366. unsigned int cpufreq_dev_count = 0;
  367. mutex_lock(&cooling_cpufreq_lock);
  368. list_for_each_entry(cpufreq_dev, &cooling_cpufreq_list, node) {
  369. if (cpufreq_dev && cpufreq_dev->cool_dev == cdev)
  370. break;
  371. cpufreq_dev_count++;
  372. }
  373. if (!cpufreq_dev || cpufreq_dev->cool_dev != cdev) {
  374. mutex_unlock(&cooling_cpufreq_lock);
  375. return;
  376. }
  377. list_del(&cpufreq_dev->node);
  378. /* Unregister the notifier for the last cpufreq cooling device */
  379. if (cpufreq_dev_count == 1) {
  380. cpufreq_unregister_notifier(&thermal_cpufreq_notifier_block,
  381. CPUFREQ_POLICY_NOTIFIER);
  382. }
  383. mutex_unlock(&cooling_cpufreq_lock);
  384. thermal_cooling_device_unregister(cpufreq_dev->cool_dev);
  385. release_idr(&cpufreq_idr, cpufreq_dev->id);
  386. if (cpufreq_dev_count == 1)
  387. mutex_destroy(&cooling_cpufreq_lock);
  388. kfree(cpufreq_dev);
  389. }
  390. EXPORT_SYMBOL(cpufreq_cooling_unregister);