cpufreq_stats.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430
  1. /*
  2. * drivers/cpufreq/cpufreq_stats.c
  3. *
  4. * Copyright (C) 2003-2004 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
  5. * (C) 2004 Zou Nan hai <nanhai.zou@intel.com>.
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License version 2 as
  9. * published by the Free Software Foundation.
  10. */
  11. #include <linux/kernel.h>
  12. #include <linux/slab.h>
  13. #include <linux/cpu.h>
  14. #include <linux/sysfs.h>
  15. #include <linux/cpufreq.h>
  16. #include <linux/module.h>
  17. #include <linux/jiffies.h>
  18. #include <linux/percpu.h>
  19. #include <linux/kobject.h>
  20. #include <linux/spinlock.h>
  21. #include <linux/notifier.h>
  22. #include <asm/cputime.h>
  23. static spinlock_t cpufreq_stats_lock;
  24. struct cpufreq_stats {
  25. unsigned int cpu;
  26. unsigned int total_trans;
  27. unsigned long long last_time;
  28. unsigned int max_state;
  29. unsigned int state_num;
  30. unsigned int last_index;
  31. u64 *time_in_state;
  32. unsigned int *freq_table;
  33. #ifdef CONFIG_CPU_FREQ_STAT_DETAILS
  34. unsigned int *trans_table;
  35. #endif
  36. };
  37. static DEFINE_PER_CPU(struct cpufreq_stats *, cpufreq_stats_table);
  38. struct cpufreq_stats_attribute {
  39. struct attribute attr;
  40. ssize_t(*show) (struct cpufreq_stats *, char *);
  41. };
  42. static int cpufreq_stats_update(unsigned int cpu)
  43. {
  44. struct cpufreq_stats *stat;
  45. unsigned long long cur_time;
  46. cur_time = get_jiffies_64();
  47. spin_lock(&cpufreq_stats_lock);
  48. stat = per_cpu(cpufreq_stats_table, cpu);
  49. if (stat->time_in_state)
  50. stat->time_in_state[stat->last_index] +=
  51. cur_time - stat->last_time;
  52. stat->last_time = cur_time;
  53. spin_unlock(&cpufreq_stats_lock);
  54. return 0;
  55. }
  56. static ssize_t show_total_trans(struct cpufreq_policy *policy, char *buf)
  57. {
  58. struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table, policy->cpu);
  59. if (!stat)
  60. return 0;
  61. return sprintf(buf, "%d\n",
  62. per_cpu(cpufreq_stats_table, stat->cpu)->total_trans);
  63. }
  64. static ssize_t show_time_in_state(struct cpufreq_policy *policy, char *buf)
  65. {
  66. ssize_t len = 0;
  67. int i;
  68. struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table, policy->cpu);
  69. if (!stat)
  70. return 0;
  71. cpufreq_stats_update(stat->cpu);
  72. for (i = 0; i < stat->state_num; i++) {
  73. len += sprintf(buf + len, "%u %llu\n", stat->freq_table[i],
  74. (unsigned long long)
  75. cputime64_to_clock_t(stat->time_in_state[i]));
  76. }
  77. return len;
  78. }
  79. #ifdef CONFIG_CPU_FREQ_STAT_DETAILS
  80. static ssize_t show_trans_table(struct cpufreq_policy *policy, char *buf)
  81. {
  82. ssize_t len = 0;
  83. int i, j;
  84. struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table, policy->cpu);
  85. if (!stat)
  86. return 0;
  87. cpufreq_stats_update(stat->cpu);
  88. len += snprintf(buf + len, PAGE_SIZE - len, " From : To\n");
  89. len += snprintf(buf + len, PAGE_SIZE - len, " : ");
  90. for (i = 0; i < stat->state_num; i++) {
  91. if (len >= PAGE_SIZE)
  92. break;
  93. len += snprintf(buf + len, PAGE_SIZE - len, "%9u ",
  94. stat->freq_table[i]);
  95. }
  96. if (len >= PAGE_SIZE)
  97. return PAGE_SIZE;
  98. len += snprintf(buf + len, PAGE_SIZE - len, "\n");
  99. for (i = 0; i < stat->state_num; i++) {
  100. if (len >= PAGE_SIZE)
  101. break;
  102. len += snprintf(buf + len, PAGE_SIZE - len, "%9u: ",
  103. stat->freq_table[i]);
  104. for (j = 0; j < stat->state_num; j++) {
  105. if (len >= PAGE_SIZE)
  106. break;
  107. len += snprintf(buf + len, PAGE_SIZE - len, "%9u ",
  108. stat->trans_table[i*stat->max_state+j]);
  109. }
  110. if (len >= PAGE_SIZE)
  111. break;
  112. len += snprintf(buf + len, PAGE_SIZE - len, "\n");
  113. }
  114. if (len >= PAGE_SIZE)
  115. return PAGE_SIZE;
  116. return len;
  117. }
  118. cpufreq_freq_attr_ro(trans_table);
  119. #endif
  120. cpufreq_freq_attr_ro(total_trans);
  121. cpufreq_freq_attr_ro(time_in_state);
  122. static struct attribute *default_attrs[] = {
  123. &total_trans.attr,
  124. &time_in_state.attr,
  125. #ifdef CONFIG_CPU_FREQ_STAT_DETAILS
  126. &trans_table.attr,
  127. #endif
  128. NULL
  129. };
  130. static struct attribute_group stats_attr_group = {
  131. .attrs = default_attrs,
  132. .name = "stats"
  133. };
  134. static int freq_table_get_index(struct cpufreq_stats *stat, unsigned int freq)
  135. {
  136. int index;
  137. for (index = 0; index < stat->max_state; index++)
  138. if (stat->freq_table[index] == freq)
  139. return index;
  140. return -1;
  141. }
  142. /* should be called late in the CPU removal sequence so that the stats
  143. * memory is still available in case someone tries to use it.
  144. */
  145. static void cpufreq_stats_free_table(unsigned int cpu)
  146. {
  147. struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table, cpu);
  148. if (stat) {
  149. pr_debug("%s: Free stat table\n", __func__);
  150. kfree(stat->time_in_state);
  151. kfree(stat);
  152. per_cpu(cpufreq_stats_table, cpu) = NULL;
  153. }
  154. }
  155. /* must be called early in the CPU removal sequence (before
  156. * cpufreq_remove_dev) so that policy is still valid.
  157. */
  158. static void cpufreq_stats_free_sysfs(unsigned int cpu)
  159. {
  160. struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
  161. if (!policy)
  162. return;
  163. if (!cpufreq_frequency_get_table(cpu))
  164. goto put_ref;
  165. if (!policy_is_shared(policy)) {
  166. pr_debug("%s: Free sysfs stat\n", __func__);
  167. sysfs_remove_group(&policy->kobj, &stats_attr_group);
  168. }
  169. put_ref:
  170. cpufreq_cpu_put(policy);
  171. }
  172. static int cpufreq_stats_create_table(struct cpufreq_policy *policy,
  173. struct cpufreq_frequency_table *table)
  174. {
  175. unsigned int i, j, count = 0, ret = 0;
  176. struct cpufreq_stats *stat;
  177. struct cpufreq_policy *data;
  178. unsigned int alloc_size;
  179. unsigned int cpu = policy->cpu;
  180. if (per_cpu(cpufreq_stats_table, cpu))
  181. return -EBUSY;
  182. stat = kzalloc(sizeof(struct cpufreq_stats), GFP_KERNEL);
  183. if ((stat) == NULL)
  184. return -ENOMEM;
  185. data = cpufreq_cpu_get(cpu);
  186. if (data == NULL) {
  187. ret = -EINVAL;
  188. goto error_get_fail;
  189. }
  190. ret = sysfs_create_group(&data->kobj, &stats_attr_group);
  191. if (ret)
  192. goto error_out;
  193. stat->cpu = cpu;
  194. per_cpu(cpufreq_stats_table, cpu) = stat;
  195. for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
  196. unsigned int freq = table[i].frequency;
  197. if (freq == CPUFREQ_ENTRY_INVALID)
  198. continue;
  199. count++;
  200. }
  201. alloc_size = count * sizeof(int) + count * sizeof(u64);
  202. #ifdef CONFIG_CPU_FREQ_STAT_DETAILS
  203. alloc_size += count * count * sizeof(int);
  204. #endif
  205. stat->max_state = count;
  206. stat->time_in_state = kzalloc(alloc_size, GFP_KERNEL);
  207. if (!stat->time_in_state) {
  208. ret = -ENOMEM;
  209. goto error_out;
  210. }
  211. stat->freq_table = (unsigned int *)(stat->time_in_state + count);
  212. #ifdef CONFIG_CPU_FREQ_STAT_DETAILS
  213. stat->trans_table = stat->freq_table + count;
  214. #endif
  215. j = 0;
  216. for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
  217. unsigned int freq = table[i].frequency;
  218. if (freq == CPUFREQ_ENTRY_INVALID)
  219. continue;
  220. if (freq_table_get_index(stat, freq) == -1)
  221. stat->freq_table[j++] = freq;
  222. }
  223. stat->state_num = j;
  224. spin_lock(&cpufreq_stats_lock);
  225. stat->last_time = get_jiffies_64();
  226. stat->last_index = freq_table_get_index(stat, policy->cur);
  227. spin_unlock(&cpufreq_stats_lock);
  228. cpufreq_cpu_put(data);
  229. return 0;
  230. error_out:
  231. cpufreq_cpu_put(data);
  232. error_get_fail:
  233. kfree(stat);
  234. per_cpu(cpufreq_stats_table, cpu) = NULL;
  235. return ret;
  236. }
  237. static void cpufreq_stats_update_policy_cpu(struct cpufreq_policy *policy)
  238. {
  239. struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table,
  240. policy->last_cpu);
  241. pr_debug("Updating stats_table for new_cpu %u from last_cpu %u\n",
  242. policy->cpu, policy->last_cpu);
  243. per_cpu(cpufreq_stats_table, policy->cpu) = per_cpu(cpufreq_stats_table,
  244. policy->last_cpu);
  245. per_cpu(cpufreq_stats_table, policy->last_cpu) = NULL;
  246. stat->cpu = policy->cpu;
  247. }
  248. static int cpufreq_stat_notifier_policy(struct notifier_block *nb,
  249. unsigned long val, void *data)
  250. {
  251. int ret;
  252. struct cpufreq_policy *policy = data;
  253. struct cpufreq_frequency_table *table;
  254. unsigned int cpu = policy->cpu;
  255. if (val == CPUFREQ_UPDATE_POLICY_CPU) {
  256. cpufreq_stats_update_policy_cpu(policy);
  257. return 0;
  258. }
  259. if (val != CPUFREQ_NOTIFY)
  260. return 0;
  261. table = cpufreq_frequency_get_table(cpu);
  262. if (!table)
  263. return 0;
  264. ret = cpufreq_stats_create_table(policy, table);
  265. if (ret)
  266. return ret;
  267. return 0;
  268. }
  269. static int cpufreq_stat_notifier_trans(struct notifier_block *nb,
  270. unsigned long val, void *data)
  271. {
  272. struct cpufreq_freqs *freq = data;
  273. struct cpufreq_stats *stat;
  274. int old_index, new_index;
  275. if (val != CPUFREQ_POSTCHANGE)
  276. return 0;
  277. stat = per_cpu(cpufreq_stats_table, freq->cpu);
  278. if (!stat)
  279. return 0;
  280. old_index = stat->last_index;
  281. new_index = freq_table_get_index(stat, freq->new);
  282. /* We can't do stat->time_in_state[-1]= .. */
  283. if (old_index == -1 || new_index == -1)
  284. return 0;
  285. cpufreq_stats_update(freq->cpu);
  286. if (old_index == new_index)
  287. return 0;
  288. spin_lock(&cpufreq_stats_lock);
  289. stat->last_index = new_index;
  290. #ifdef CONFIG_CPU_FREQ_STAT_DETAILS
  291. stat->trans_table[old_index * stat->max_state + new_index]++;
  292. #endif
  293. stat->total_trans++;
  294. spin_unlock(&cpufreq_stats_lock);
  295. return 0;
  296. }
  297. static int __cpuinit cpufreq_stat_cpu_callback(struct notifier_block *nfb,
  298. unsigned long action,
  299. void *hcpu)
  300. {
  301. unsigned int cpu = (unsigned long)hcpu;
  302. switch (action) {
  303. case CPU_ONLINE:
  304. case CPU_ONLINE_FROZEN:
  305. cpufreq_update_policy(cpu);
  306. break;
  307. case CPU_DOWN_PREPARE:
  308. case CPU_DOWN_PREPARE_FROZEN:
  309. cpufreq_stats_free_sysfs(cpu);
  310. break;
  311. case CPU_DEAD:
  312. case CPU_DEAD_FROZEN:
  313. cpufreq_stats_free_table(cpu);
  314. break;
  315. }
  316. return NOTIFY_OK;
  317. }
  318. /* priority=1 so this will get called before cpufreq_remove_dev */
  319. static struct notifier_block cpufreq_stat_cpu_notifier __refdata = {
  320. .notifier_call = cpufreq_stat_cpu_callback,
  321. .priority = 1,
  322. };
  323. static struct notifier_block notifier_policy_block = {
  324. .notifier_call = cpufreq_stat_notifier_policy
  325. };
  326. static struct notifier_block notifier_trans_block = {
  327. .notifier_call = cpufreq_stat_notifier_trans
  328. };
  329. static int __init cpufreq_stats_init(void)
  330. {
  331. int ret;
  332. unsigned int cpu;
  333. spin_lock_init(&cpufreq_stats_lock);
  334. ret = cpufreq_register_notifier(&notifier_policy_block,
  335. CPUFREQ_POLICY_NOTIFIER);
  336. if (ret)
  337. return ret;
  338. register_hotcpu_notifier(&cpufreq_stat_cpu_notifier);
  339. for_each_online_cpu(cpu)
  340. cpufreq_update_policy(cpu);
  341. ret = cpufreq_register_notifier(&notifier_trans_block,
  342. CPUFREQ_TRANSITION_NOTIFIER);
  343. if (ret) {
  344. cpufreq_unregister_notifier(&notifier_policy_block,
  345. CPUFREQ_POLICY_NOTIFIER);
  346. unregister_hotcpu_notifier(&cpufreq_stat_cpu_notifier);
  347. for_each_online_cpu(cpu)
  348. cpufreq_stats_free_table(cpu);
  349. return ret;
  350. }
  351. return 0;
  352. }
  353. static void __exit cpufreq_stats_exit(void)
  354. {
  355. unsigned int cpu;
  356. cpufreq_unregister_notifier(&notifier_policy_block,
  357. CPUFREQ_POLICY_NOTIFIER);
  358. cpufreq_unregister_notifier(&notifier_trans_block,
  359. CPUFREQ_TRANSITION_NOTIFIER);
  360. unregister_hotcpu_notifier(&cpufreq_stat_cpu_notifier);
  361. for_each_online_cpu(cpu) {
  362. cpufreq_stats_free_table(cpu);
  363. cpufreq_stats_free_sysfs(cpu);
  364. }
  365. }
  366. MODULE_AUTHOR("Zou Nan hai <nanhai.zou@intel.com>");
  367. MODULE_DESCRIPTION("'cpufreq_stats' - A driver to export cpufreq stats "
  368. "through sysfs filesystem");
  369. MODULE_LICENSE("GPL");
  370. module_init(cpufreq_stats_init);
  371. module_exit(cpufreq_stats_exit);