cache.c 8.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388
  1. /*
  2. * Extract CPU cache information and expose them via sysfs.
  3. *
  4. * Copyright IBM Corp. 2012
  5. * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
  6. */
  7. #include <linux/notifier.h>
  8. #include <linux/seq_file.h>
  9. #include <linux/init.h>
  10. #include <linux/list.h>
  11. #include <linux/slab.h>
  12. #include <linux/cpu.h>
  13. #include <asm/facility.h>
  14. struct cache {
  15. unsigned long size;
  16. unsigned int line_size;
  17. unsigned int associativity;
  18. unsigned int nr_sets;
  19. unsigned int level : 3;
  20. unsigned int type : 2;
  21. unsigned int private : 1;
  22. struct list_head list;
  23. };
  24. struct cache_dir {
  25. struct kobject *kobj;
  26. struct cache_index_dir *index;
  27. };
  28. struct cache_index_dir {
  29. struct kobject kobj;
  30. int cpu;
  31. struct cache *cache;
  32. struct cache_index_dir *next;
  33. };
  34. enum {
  35. CACHE_SCOPE_NOTEXISTS,
  36. CACHE_SCOPE_PRIVATE,
  37. CACHE_SCOPE_SHARED,
  38. CACHE_SCOPE_RESERVED,
  39. };
  40. enum {
  41. CACHE_TYPE_SEPARATE,
  42. CACHE_TYPE_DATA,
  43. CACHE_TYPE_INSTRUCTION,
  44. CACHE_TYPE_UNIFIED,
  45. };
  46. enum {
  47. EXTRACT_TOPOLOGY,
  48. EXTRACT_LINE_SIZE,
  49. EXTRACT_SIZE,
  50. EXTRACT_ASSOCIATIVITY,
  51. };
  52. enum {
  53. CACHE_TI_UNIFIED = 0,
  54. CACHE_TI_DATA = 0,
  55. CACHE_TI_INSTRUCTION,
  56. };
  57. struct cache_info {
  58. unsigned char : 4;
  59. unsigned char scope : 2;
  60. unsigned char type : 2;
  61. };
  62. #define CACHE_MAX_LEVEL 8
  63. union cache_topology {
  64. struct cache_info ci[CACHE_MAX_LEVEL];
  65. unsigned long long raw;
  66. };
  67. static const char * const cache_type_string[] = {
  68. "Data",
  69. "Instruction",
  70. "Unified",
  71. };
  72. static struct cache_dir *cache_dir_cpu[NR_CPUS];
  73. static LIST_HEAD(cache_list);
  74. void show_cacheinfo(struct seq_file *m)
  75. {
  76. struct cache *cache;
  77. int index = 0;
  78. list_for_each_entry(cache, &cache_list, list) {
  79. seq_printf(m, "cache%-11d: ", index);
  80. seq_printf(m, "level=%d ", cache->level);
  81. seq_printf(m, "type=%s ", cache_type_string[cache->type]);
  82. seq_printf(m, "scope=%s ", cache->private ? "Private" : "Shared");
  83. seq_printf(m, "size=%luK ", cache->size >> 10);
  84. seq_printf(m, "line_size=%u ", cache->line_size);
  85. seq_printf(m, "associativity=%d", cache->associativity);
  86. seq_puts(m, "\n");
  87. index++;
  88. }
  89. }
  90. static inline unsigned long ecag(int ai, int li, int ti)
  91. {
  92. unsigned long cmd, val;
  93. cmd = ai << 4 | li << 1 | ti;
  94. asm volatile(".insn rsy,0xeb000000004c,%0,0,0(%1)" /* ecag */
  95. : "=d" (val) : "a" (cmd));
  96. return val;
  97. }
  98. static int __init cache_add(int level, int private, int type)
  99. {
  100. struct cache *cache;
  101. int ti;
  102. cache = kzalloc(sizeof(*cache), GFP_KERNEL);
  103. if (!cache)
  104. return -ENOMEM;
  105. if (type == CACHE_TYPE_INSTRUCTION)
  106. ti = CACHE_TI_INSTRUCTION;
  107. else
  108. ti = CACHE_TI_UNIFIED;
  109. cache->size = ecag(EXTRACT_SIZE, level, ti);
  110. cache->line_size = ecag(EXTRACT_LINE_SIZE, level, ti);
  111. cache->associativity = ecag(EXTRACT_ASSOCIATIVITY, level, ti);
  112. cache->nr_sets = cache->size / cache->associativity;
  113. cache->nr_sets /= cache->line_size;
  114. cache->private = private;
  115. cache->level = level + 1;
  116. cache->type = type - 1;
  117. list_add_tail(&cache->list, &cache_list);
  118. return 0;
  119. }
  120. static void __init cache_build_info(void)
  121. {
  122. struct cache *cache, *next;
  123. union cache_topology ct;
  124. int level, private, rc;
  125. ct.raw = ecag(EXTRACT_TOPOLOGY, 0, 0);
  126. for (level = 0; level < CACHE_MAX_LEVEL; level++) {
  127. switch (ct.ci[level].scope) {
  128. case CACHE_SCOPE_NOTEXISTS:
  129. case CACHE_SCOPE_RESERVED:
  130. return;
  131. case CACHE_SCOPE_SHARED:
  132. private = 0;
  133. break;
  134. case CACHE_SCOPE_PRIVATE:
  135. private = 1;
  136. break;
  137. }
  138. if (ct.ci[level].type == CACHE_TYPE_SEPARATE) {
  139. rc = cache_add(level, private, CACHE_TYPE_DATA);
  140. rc |= cache_add(level, private, CACHE_TYPE_INSTRUCTION);
  141. } else {
  142. rc = cache_add(level, private, ct.ci[level].type);
  143. }
  144. if (rc)
  145. goto error;
  146. }
  147. return;
  148. error:
  149. list_for_each_entry_safe(cache, next, &cache_list, list) {
  150. list_del(&cache->list);
  151. kfree(cache);
  152. }
  153. }
  154. static struct cache_dir *__cpuinit cache_create_cache_dir(int cpu)
  155. {
  156. struct cache_dir *cache_dir;
  157. struct kobject *kobj = NULL;
  158. struct device *dev;
  159. dev = get_cpu_device(cpu);
  160. if (!dev)
  161. goto out;
  162. kobj = kobject_create_and_add("cache", &dev->kobj);
  163. if (!kobj)
  164. goto out;
  165. cache_dir = kzalloc(sizeof(*cache_dir), GFP_KERNEL);
  166. if (!cache_dir)
  167. goto out;
  168. cache_dir->kobj = kobj;
  169. cache_dir_cpu[cpu] = cache_dir;
  170. return cache_dir;
  171. out:
  172. kobject_put(kobj);
  173. return NULL;
  174. }
  175. static struct cache_index_dir *kobj_to_cache_index_dir(struct kobject *kobj)
  176. {
  177. return container_of(kobj, struct cache_index_dir, kobj);
  178. }
  179. static void cache_index_release(struct kobject *kobj)
  180. {
  181. struct cache_index_dir *index;
  182. index = kobj_to_cache_index_dir(kobj);
  183. kfree(index);
  184. }
  185. static ssize_t cache_index_show(struct kobject *kobj,
  186. struct attribute *attr, char *buf)
  187. {
  188. struct kobj_attribute *kobj_attr;
  189. kobj_attr = container_of(attr, struct kobj_attribute, attr);
  190. return kobj_attr->show(kobj, kobj_attr, buf);
  191. }
  192. #define DEFINE_CACHE_ATTR(_name, _format, _value) \
  193. static ssize_t cache_##_name##_show(struct kobject *kobj, \
  194. struct kobj_attribute *attr, \
  195. char *buf) \
  196. { \
  197. struct cache_index_dir *index; \
  198. \
  199. index = kobj_to_cache_index_dir(kobj); \
  200. return sprintf(buf, _format, _value); \
  201. } \
  202. static struct kobj_attribute cache_##_name##_attr = \
  203. __ATTR(_name, 0444, cache_##_name##_show, NULL);
  204. DEFINE_CACHE_ATTR(size, "%luK\n", index->cache->size >> 10);
  205. DEFINE_CACHE_ATTR(coherency_line_size, "%u\n", index->cache->line_size);
  206. DEFINE_CACHE_ATTR(number_of_sets, "%u\n", index->cache->nr_sets);
  207. DEFINE_CACHE_ATTR(ways_of_associativity, "%u\n", index->cache->associativity);
  208. DEFINE_CACHE_ATTR(type, "%s\n", cache_type_string[index->cache->type]);
  209. DEFINE_CACHE_ATTR(level, "%d\n", index->cache->level);
  210. static ssize_t shared_cpu_map_func(struct kobject *kobj, int type, char *buf)
  211. {
  212. struct cache_index_dir *index;
  213. int len;
  214. index = kobj_to_cache_index_dir(kobj);
  215. len = type ?
  216. cpulist_scnprintf(buf, PAGE_SIZE - 2, cpumask_of(index->cpu)) :
  217. cpumask_scnprintf(buf, PAGE_SIZE - 2, cpumask_of(index->cpu));
  218. len += sprintf(&buf[len], "\n");
  219. return len;
  220. }
  221. static ssize_t shared_cpu_map_show(struct kobject *kobj,
  222. struct kobj_attribute *attr, char *buf)
  223. {
  224. return shared_cpu_map_func(kobj, 0, buf);
  225. }
  226. static struct kobj_attribute cache_shared_cpu_map_attr =
  227. __ATTR(shared_cpu_map, 0444, shared_cpu_map_show, NULL);
  228. static ssize_t shared_cpu_list_show(struct kobject *kobj,
  229. struct kobj_attribute *attr, char *buf)
  230. {
  231. return shared_cpu_map_func(kobj, 1, buf);
  232. }
  233. static struct kobj_attribute cache_shared_cpu_list_attr =
  234. __ATTR(shared_cpu_list, 0444, shared_cpu_list_show, NULL);
  235. static struct attribute *cache_index_default_attrs[] = {
  236. &cache_type_attr.attr,
  237. &cache_size_attr.attr,
  238. &cache_number_of_sets_attr.attr,
  239. &cache_ways_of_associativity_attr.attr,
  240. &cache_level_attr.attr,
  241. &cache_coherency_line_size_attr.attr,
  242. &cache_shared_cpu_map_attr.attr,
  243. &cache_shared_cpu_list_attr.attr,
  244. NULL,
  245. };
  246. static const struct sysfs_ops cache_index_ops = {
  247. .show = cache_index_show,
  248. };
  249. static struct kobj_type cache_index_type = {
  250. .sysfs_ops = &cache_index_ops,
  251. .release = cache_index_release,
  252. .default_attrs = cache_index_default_attrs,
  253. };
  254. static int __cpuinit cache_create_index_dir(struct cache_dir *cache_dir,
  255. struct cache *cache, int index,
  256. int cpu)
  257. {
  258. struct cache_index_dir *index_dir;
  259. int rc;
  260. index_dir = kzalloc(sizeof(*index_dir), GFP_KERNEL);
  261. if (!index_dir)
  262. return -ENOMEM;
  263. index_dir->cache = cache;
  264. index_dir->cpu = cpu;
  265. rc = kobject_init_and_add(&index_dir->kobj, &cache_index_type,
  266. cache_dir->kobj, "index%d", index);
  267. if (rc)
  268. goto out;
  269. index_dir->next = cache_dir->index;
  270. cache_dir->index = index_dir;
  271. return 0;
  272. out:
  273. kfree(index_dir);
  274. return rc;
  275. }
  276. static int __cpuinit cache_add_cpu(int cpu)
  277. {
  278. struct cache_dir *cache_dir;
  279. struct cache *cache;
  280. int rc, index = 0;
  281. if (list_empty(&cache_list))
  282. return 0;
  283. cache_dir = cache_create_cache_dir(cpu);
  284. if (!cache_dir)
  285. return -ENOMEM;
  286. list_for_each_entry(cache, &cache_list, list) {
  287. if (!cache->private)
  288. break;
  289. rc = cache_create_index_dir(cache_dir, cache, index, cpu);
  290. if (rc)
  291. return rc;
  292. index++;
  293. }
  294. return 0;
  295. }
  296. static void __cpuinit cache_remove_cpu(int cpu)
  297. {
  298. struct cache_index_dir *index, *next;
  299. struct cache_dir *cache_dir;
  300. cache_dir = cache_dir_cpu[cpu];
  301. if (!cache_dir)
  302. return;
  303. index = cache_dir->index;
  304. while (index) {
  305. next = index->next;
  306. kobject_put(&index->kobj);
  307. index = next;
  308. }
  309. kobject_put(cache_dir->kobj);
  310. kfree(cache_dir);
  311. cache_dir_cpu[cpu] = NULL;
  312. }
  313. static int __cpuinit cache_hotplug(struct notifier_block *nfb,
  314. unsigned long action, void *hcpu)
  315. {
  316. int cpu = (long)hcpu;
  317. int rc = 0;
  318. switch (action & ~CPU_TASKS_FROZEN) {
  319. case CPU_ONLINE:
  320. rc = cache_add_cpu(cpu);
  321. if (rc)
  322. cache_remove_cpu(cpu);
  323. break;
  324. case CPU_DEAD:
  325. cache_remove_cpu(cpu);
  326. break;
  327. }
  328. return rc ? NOTIFY_BAD : NOTIFY_OK;
  329. }
  330. static int __init cache_init(void)
  331. {
  332. int cpu;
  333. if (!test_facility(34))
  334. return 0;
  335. cache_build_info();
  336. for_each_online_cpu(cpu)
  337. cache_add_cpu(cpu);
  338. hotcpu_notifier(cache_hotplug, 0);
  339. return 0;
  340. }
  341. device_initcall(cache_init);