intel_cacheinfo.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674
  1. /*
  2. * Routines to indentify caches on Intel CPU.
  3. *
  4. * Changes:
  5. * Venkatesh Pallipadi : Adding cache identification through cpuid(4)
  6. * Ashok Raj <ashok.raj@intel.com>: Work with CPU hotplug infrastructure.
  7. */
  8. #include <linux/init.h>
  9. #include <linux/slab.h>
  10. #include <linux/device.h>
  11. #include <linux/compiler.h>
  12. #include <linux/cpu.h>
  13. #include <linux/sched.h>
  14. #include <asm/processor.h>
  15. #include <asm/smp.h>
  16. #define LVL_1_INST 1
  17. #define LVL_1_DATA 2
  18. #define LVL_2 3
  19. #define LVL_3 4
  20. #define LVL_TRACE 5
  21. struct _cache_table
  22. {
  23. unsigned char descriptor;
  24. char cache_type;
  25. short size;
  26. };
  27. /* all the cache descriptor types we care about (no TLB or trace cache entries) */
  28. static struct _cache_table cache_table[] __cpuinitdata =
  29. {
  30. { 0x06, LVL_1_INST, 8 }, /* 4-way set assoc, 32 byte line size */
  31. { 0x08, LVL_1_INST, 16 }, /* 4-way set assoc, 32 byte line size */
  32. { 0x0a, LVL_1_DATA, 8 }, /* 2 way set assoc, 32 byte line size */
  33. { 0x0c, LVL_1_DATA, 16 }, /* 4-way set assoc, 32 byte line size */
  34. { 0x22, LVL_3, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */
  35. { 0x23, LVL_3, 1024 }, /* 8-way set assoc, sectored cache, 64 byte line size */
  36. { 0x25, LVL_3, 2048 }, /* 8-way set assoc, sectored cache, 64 byte line size */
  37. { 0x29, LVL_3, 4096 }, /* 8-way set assoc, sectored cache, 64 byte line size */
  38. { 0x2c, LVL_1_DATA, 32 }, /* 8-way set assoc, 64 byte line size */
  39. { 0x30, LVL_1_INST, 32 }, /* 8-way set assoc, 64 byte line size */
  40. { 0x39, LVL_2, 128 }, /* 4-way set assoc, sectored cache, 64 byte line size */
  41. { 0x3a, LVL_2, 192 }, /* 6-way set assoc, sectored cache, 64 byte line size */
  42. { 0x3b, LVL_2, 128 }, /* 2-way set assoc, sectored cache, 64 byte line size */
  43. { 0x3c, LVL_2, 256 }, /* 4-way set assoc, sectored cache, 64 byte line size */
  44. { 0x3d, LVL_2, 384 }, /* 6-way set assoc, sectored cache, 64 byte line size */
  45. { 0x3e, LVL_2, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */
  46. { 0x41, LVL_2, 128 }, /* 4-way set assoc, 32 byte line size */
  47. { 0x42, LVL_2, 256 }, /* 4-way set assoc, 32 byte line size */
  48. { 0x43, LVL_2, 512 }, /* 4-way set assoc, 32 byte line size */
  49. { 0x44, LVL_2, 1024 }, /* 4-way set assoc, 32 byte line size */
  50. { 0x45, LVL_2, 2048 }, /* 4-way set assoc, 32 byte line size */
  51. { 0x46, LVL_3, 4096 }, /* 4-way set assoc, 64 byte line size */
  52. { 0x47, LVL_3, 8192 }, /* 8-way set assoc, 64 byte line size */
  53. { 0x49, LVL_3, 4096 }, /* 16-way set assoc, 64 byte line size */
  54. { 0x4a, LVL_3, 6144 }, /* 12-way set assoc, 64 byte line size */
  55. { 0x4b, LVL_3, 8192 }, /* 16-way set assoc, 64 byte line size */
  56. { 0x4c, LVL_3, 12288 }, /* 12-way set assoc, 64 byte line size */
  57. { 0x4d, LVL_3, 16384 }, /* 16-way set assoc, 64 byte line size */
  58. { 0x60, LVL_1_DATA, 16 }, /* 8-way set assoc, sectored cache, 64 byte line size */
  59. { 0x66, LVL_1_DATA, 8 }, /* 4-way set assoc, sectored cache, 64 byte line size */
  60. { 0x67, LVL_1_DATA, 16 }, /* 4-way set assoc, sectored cache, 64 byte line size */
  61. { 0x68, LVL_1_DATA, 32 }, /* 4-way set assoc, sectored cache, 64 byte line size */
  62. { 0x70, LVL_TRACE, 12 }, /* 8-way set assoc */
  63. { 0x71, LVL_TRACE, 16 }, /* 8-way set assoc */
  64. { 0x72, LVL_TRACE, 32 }, /* 8-way set assoc */
  65. { 0x73, LVL_TRACE, 64 }, /* 8-way set assoc */
  66. { 0x78, LVL_2, 1024 }, /* 4-way set assoc, 64 byte line size */
  67. { 0x79, LVL_2, 128 }, /* 8-way set assoc, sectored cache, 64 byte line size */
  68. { 0x7a, LVL_2, 256 }, /* 8-way set assoc, sectored cache, 64 byte line size */
  69. { 0x7b, LVL_2, 512 }, /* 8-way set assoc, sectored cache, 64 byte line size */
  70. { 0x7c, LVL_2, 1024 }, /* 8-way set assoc, sectored cache, 64 byte line size */
  71. { 0x7d, LVL_2, 2048 }, /* 8-way set assoc, 64 byte line size */
  72. { 0x7f, LVL_2, 512 }, /* 2-way set assoc, 64 byte line size */
  73. { 0x82, LVL_2, 256 }, /* 8-way set assoc, 32 byte line size */
  74. { 0x83, LVL_2, 512 }, /* 8-way set assoc, 32 byte line size */
  75. { 0x84, LVL_2, 1024 }, /* 8-way set assoc, 32 byte line size */
  76. { 0x85, LVL_2, 2048 }, /* 8-way set assoc, 32 byte line size */
  77. { 0x86, LVL_2, 512 }, /* 4-way set assoc, 64 byte line size */
  78. { 0x87, LVL_2, 1024 }, /* 8-way set assoc, 64 byte line size */
  79. { 0x00, 0, 0}
  80. };
  81. enum _cache_type
  82. {
  83. CACHE_TYPE_NULL = 0,
  84. CACHE_TYPE_DATA = 1,
  85. CACHE_TYPE_INST = 2,
  86. CACHE_TYPE_UNIFIED = 3
  87. };
  88. union _cpuid4_leaf_eax {
  89. struct {
  90. enum _cache_type type:5;
  91. unsigned int level:3;
  92. unsigned int is_self_initializing:1;
  93. unsigned int is_fully_associative:1;
  94. unsigned int reserved:4;
  95. unsigned int num_threads_sharing:12;
  96. unsigned int num_cores_on_die:6;
  97. } split;
  98. u32 full;
  99. };
  100. union _cpuid4_leaf_ebx {
  101. struct {
  102. unsigned int coherency_line_size:12;
  103. unsigned int physical_line_partition:10;
  104. unsigned int ways_of_associativity:10;
  105. } split;
  106. u32 full;
  107. };
  108. union _cpuid4_leaf_ecx {
  109. struct {
  110. unsigned int number_of_sets:32;
  111. } split;
  112. u32 full;
  113. };
  114. struct _cpuid4_info {
  115. union _cpuid4_leaf_eax eax;
  116. union _cpuid4_leaf_ebx ebx;
  117. union _cpuid4_leaf_ecx ecx;
  118. unsigned long size;
  119. cpumask_t shared_cpu_map;
  120. };
  121. static unsigned short num_cache_leaves;
  122. static int __cpuinit cpuid4_cache_lookup(int index, struct _cpuid4_info *this_leaf)
  123. {
  124. unsigned int eax, ebx, ecx, edx;
  125. union _cpuid4_leaf_eax cache_eax;
  126. cpuid_count(4, index, &eax, &ebx, &ecx, &edx);
  127. cache_eax.full = eax;
  128. if (cache_eax.split.type == CACHE_TYPE_NULL)
  129. return -EIO; /* better error ? */
  130. this_leaf->eax.full = eax;
  131. this_leaf->ebx.full = ebx;
  132. this_leaf->ecx.full = ecx;
  133. this_leaf->size = (this_leaf->ecx.split.number_of_sets + 1) *
  134. (this_leaf->ebx.split.coherency_line_size + 1) *
  135. (this_leaf->ebx.split.physical_line_partition + 1) *
  136. (this_leaf->ebx.split.ways_of_associativity + 1);
  137. return 0;
  138. }
  139. /* will only be called once; __init is safe here */
  140. static int __init find_num_cache_leaves(void)
  141. {
  142. unsigned int eax, ebx, ecx, edx;
  143. union _cpuid4_leaf_eax cache_eax;
  144. int i = -1;
  145. do {
  146. ++i;
  147. /* Do cpuid(4) loop to find out num_cache_leaves */
  148. cpuid_count(4, i, &eax, &ebx, &ecx, &edx);
  149. cache_eax.full = eax;
  150. } while (cache_eax.split.type != CACHE_TYPE_NULL);
  151. return i;
  152. }
  153. unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
  154. {
  155. unsigned int trace = 0, l1i = 0, l1d = 0, l2 = 0, l3 = 0; /* Cache sizes */
  156. unsigned int new_l1d = 0, new_l1i = 0; /* Cache sizes from cpuid(4) */
  157. unsigned int new_l2 = 0, new_l3 = 0, i; /* Cache sizes from cpuid(4) */
  158. unsigned int l2_id = 0, l3_id = 0, num_threads_sharing, index_msb;
  159. #ifdef CONFIG_SMP
  160. unsigned int cpu = (c == &boot_cpu_data) ? 0 : (c - cpu_data);
  161. #endif
  162. if (c->cpuid_level > 3) {
  163. static int is_initialized;
  164. if (is_initialized == 0) {
  165. /* Init num_cache_leaves from boot CPU */
  166. num_cache_leaves = find_num_cache_leaves();
  167. is_initialized++;
  168. }
  169. /*
  170. * Whenever possible use cpuid(4), deterministic cache
  171. * parameters cpuid leaf to find the cache details
  172. */
  173. for (i = 0; i < num_cache_leaves; i++) {
  174. struct _cpuid4_info this_leaf;
  175. int retval;
  176. retval = cpuid4_cache_lookup(i, &this_leaf);
  177. if (retval >= 0) {
  178. switch(this_leaf.eax.split.level) {
  179. case 1:
  180. if (this_leaf.eax.split.type ==
  181. CACHE_TYPE_DATA)
  182. new_l1d = this_leaf.size/1024;
  183. else if (this_leaf.eax.split.type ==
  184. CACHE_TYPE_INST)
  185. new_l1i = this_leaf.size/1024;
  186. break;
  187. case 2:
  188. new_l2 = this_leaf.size/1024;
  189. num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing;
  190. index_msb = get_count_order(num_threads_sharing);
  191. l2_id = c->apicid >> index_msb;
  192. break;
  193. case 3:
  194. new_l3 = this_leaf.size/1024;
  195. num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing;
  196. index_msb = get_count_order(num_threads_sharing);
  197. l3_id = c->apicid >> index_msb;
  198. break;
  199. default:
  200. break;
  201. }
  202. }
  203. }
  204. }
  205. if (c->cpuid_level > 1) {
  206. /* supports eax=2 call */
  207. int i, j, n;
  208. int regs[4];
  209. unsigned char *dp = (unsigned char *)regs;
  210. /* Number of times to iterate */
  211. n = cpuid_eax(2) & 0xFF;
  212. for ( i = 0 ; i < n ; i++ ) {
  213. cpuid(2, &regs[0], &regs[1], &regs[2], &regs[3]);
  214. /* If bit 31 is set, this is an unknown format */
  215. for ( j = 0 ; j < 3 ; j++ ) {
  216. if ( regs[j] < 0 ) regs[j] = 0;
  217. }
  218. /* Byte 0 is level count, not a descriptor */
  219. for ( j = 1 ; j < 16 ; j++ ) {
  220. unsigned char des = dp[j];
  221. unsigned char k = 0;
  222. /* look up this descriptor in the table */
  223. while (cache_table[k].descriptor != 0)
  224. {
  225. if (cache_table[k].descriptor == des) {
  226. switch (cache_table[k].cache_type) {
  227. case LVL_1_INST:
  228. l1i += cache_table[k].size;
  229. break;
  230. case LVL_1_DATA:
  231. l1d += cache_table[k].size;
  232. break;
  233. case LVL_2:
  234. l2 += cache_table[k].size;
  235. break;
  236. case LVL_3:
  237. l3 += cache_table[k].size;
  238. break;
  239. case LVL_TRACE:
  240. trace += cache_table[k].size;
  241. break;
  242. }
  243. break;
  244. }
  245. k++;
  246. }
  247. }
  248. }
  249. if (new_l1d)
  250. l1d = new_l1d;
  251. if (new_l1i)
  252. l1i = new_l1i;
  253. if (new_l2) {
  254. l2 = new_l2;
  255. #ifdef CONFIG_SMP
  256. cpu_llc_id[cpu] = l2_id;
  257. #endif
  258. }
  259. if (new_l3) {
  260. l3 = new_l3;
  261. #ifdef CONFIG_SMP
  262. cpu_llc_id[cpu] = l3_id;
  263. #endif
  264. }
  265. if ( trace )
  266. printk (KERN_INFO "CPU: Trace cache: %dK uops", trace);
  267. else if ( l1i )
  268. printk (KERN_INFO "CPU: L1 I cache: %dK", l1i);
  269. if ( l1d )
  270. printk(", L1 D cache: %dK\n", l1d);
  271. else
  272. printk("\n");
  273. if ( l2 )
  274. printk(KERN_INFO "CPU: L2 cache: %dK\n", l2);
  275. if ( l3 )
  276. printk(KERN_INFO "CPU: L3 cache: %dK\n", l3);
  277. c->x86_cache_size = l3 ? l3 : (l2 ? l2 : (l1i+l1d));
  278. }
  279. return l2;
  280. }
  281. /* pointer to _cpuid4_info array (for each cache leaf) */
  282. static struct _cpuid4_info *cpuid4_info[NR_CPUS];
  283. #define CPUID4_INFO_IDX(x,y) (&((cpuid4_info[x])[y]))
  284. #ifdef CONFIG_SMP
  285. static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
  286. {
  287. struct _cpuid4_info *this_leaf, *sibling_leaf;
  288. unsigned long num_threads_sharing;
  289. int index_msb, i;
  290. struct cpuinfo_x86 *c = cpu_data;
  291. this_leaf = CPUID4_INFO_IDX(cpu, index);
  292. num_threads_sharing = 1 + this_leaf->eax.split.num_threads_sharing;
  293. if (num_threads_sharing == 1)
  294. cpu_set(cpu, this_leaf->shared_cpu_map);
  295. else {
  296. index_msb = get_count_order(num_threads_sharing);
  297. for_each_online_cpu(i) {
  298. if (c[i].apicid >> index_msb ==
  299. c[cpu].apicid >> index_msb) {
  300. cpu_set(i, this_leaf->shared_cpu_map);
  301. if (i != cpu && cpuid4_info[i]) {
  302. sibling_leaf = CPUID4_INFO_IDX(i, index);
  303. cpu_set(cpu, sibling_leaf->shared_cpu_map);
  304. }
  305. }
  306. }
  307. }
  308. }
  309. static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index)
  310. {
  311. struct _cpuid4_info *this_leaf, *sibling_leaf;
  312. int sibling;
  313. this_leaf = CPUID4_INFO_IDX(cpu, index);
  314. for_each_cpu_mask(sibling, this_leaf->shared_cpu_map) {
  315. sibling_leaf = CPUID4_INFO_IDX(sibling, index);
  316. cpu_clear(cpu, sibling_leaf->shared_cpu_map);
  317. }
  318. }
  319. #else
  320. static void __init cache_shared_cpu_map_setup(unsigned int cpu, int index) {}
  321. static void __init cache_remove_shared_cpu_map(unsigned int cpu, int index) {}
  322. #endif
  323. static void free_cache_attributes(unsigned int cpu)
  324. {
  325. kfree(cpuid4_info[cpu]);
  326. cpuid4_info[cpu] = NULL;
  327. }
  328. static int __cpuinit detect_cache_attributes(unsigned int cpu)
  329. {
  330. struct _cpuid4_info *this_leaf;
  331. unsigned long j;
  332. int retval;
  333. cpumask_t oldmask;
  334. if (num_cache_leaves == 0)
  335. return -ENOENT;
  336. cpuid4_info[cpu] = kmalloc(
  337. sizeof(struct _cpuid4_info) * num_cache_leaves, GFP_KERNEL);
  338. if (unlikely(cpuid4_info[cpu] == NULL))
  339. return -ENOMEM;
  340. memset(cpuid4_info[cpu], 0,
  341. sizeof(struct _cpuid4_info) * num_cache_leaves);
  342. oldmask = current->cpus_allowed;
  343. retval = set_cpus_allowed(current, cpumask_of_cpu(cpu));
  344. if (retval)
  345. goto out;
  346. /* Do cpuid and store the results */
  347. retval = 0;
  348. for (j = 0; j < num_cache_leaves; j++) {
  349. this_leaf = CPUID4_INFO_IDX(cpu, j);
  350. retval = cpuid4_cache_lookup(j, this_leaf);
  351. if (unlikely(retval < 0))
  352. break;
  353. cache_shared_cpu_map_setup(cpu, j);
  354. }
  355. set_cpus_allowed(current, oldmask);
  356. out:
  357. if (retval)
  358. free_cache_attributes(cpu);
  359. return retval;
  360. }
  361. #ifdef CONFIG_SYSFS
  362. #include <linux/kobject.h>
  363. #include <linux/sysfs.h>
  364. extern struct sysdev_class cpu_sysdev_class; /* from drivers/base/cpu.c */
  365. /* pointer to kobject for cpuX/cache */
  366. static struct kobject * cache_kobject[NR_CPUS];
  367. struct _index_kobject {
  368. struct kobject kobj;
  369. unsigned int cpu;
  370. unsigned short index;
  371. };
  372. /* pointer to array of kobjects for cpuX/cache/indexY */
  373. static struct _index_kobject *index_kobject[NR_CPUS];
  374. #define INDEX_KOBJECT_PTR(x,y) (&((index_kobject[x])[y]))
  375. #define show_one_plus(file_name, object, val) \
  376. static ssize_t show_##file_name \
  377. (struct _cpuid4_info *this_leaf, char *buf) \
  378. { \
  379. return sprintf (buf, "%lu\n", (unsigned long)this_leaf->object + val); \
  380. }
  381. show_one_plus(level, eax.split.level, 0);
  382. show_one_plus(coherency_line_size, ebx.split.coherency_line_size, 1);
  383. show_one_plus(physical_line_partition, ebx.split.physical_line_partition, 1);
  384. show_one_plus(ways_of_associativity, ebx.split.ways_of_associativity, 1);
  385. show_one_plus(number_of_sets, ecx.split.number_of_sets, 1);
  386. static ssize_t show_size(struct _cpuid4_info *this_leaf, char *buf)
  387. {
  388. return sprintf (buf, "%luK\n", this_leaf->size / 1024);
  389. }
  390. static ssize_t show_shared_cpu_map(struct _cpuid4_info *this_leaf, char *buf)
  391. {
  392. char mask_str[NR_CPUS];
  393. cpumask_scnprintf(mask_str, NR_CPUS, this_leaf->shared_cpu_map);
  394. return sprintf(buf, "%s\n", mask_str);
  395. }
  396. static ssize_t show_type(struct _cpuid4_info *this_leaf, char *buf) {
  397. switch(this_leaf->eax.split.type) {
  398. case CACHE_TYPE_DATA:
  399. return sprintf(buf, "Data\n");
  400. break;
  401. case CACHE_TYPE_INST:
  402. return sprintf(buf, "Instruction\n");
  403. break;
  404. case CACHE_TYPE_UNIFIED:
  405. return sprintf(buf, "Unified\n");
  406. break;
  407. default:
  408. return sprintf(buf, "Unknown\n");
  409. break;
  410. }
  411. }
  412. struct _cache_attr {
  413. struct attribute attr;
  414. ssize_t (*show)(struct _cpuid4_info *, char *);
  415. ssize_t (*store)(struct _cpuid4_info *, const char *, size_t count);
  416. };
  417. #define define_one_ro(_name) \
  418. static struct _cache_attr _name = \
  419. __ATTR(_name, 0444, show_##_name, NULL)
  420. define_one_ro(level);
  421. define_one_ro(type);
  422. define_one_ro(coherency_line_size);
  423. define_one_ro(physical_line_partition);
  424. define_one_ro(ways_of_associativity);
  425. define_one_ro(number_of_sets);
  426. define_one_ro(size);
  427. define_one_ro(shared_cpu_map);
  428. static struct attribute * default_attrs[] = {
  429. &type.attr,
  430. &level.attr,
  431. &coherency_line_size.attr,
  432. &physical_line_partition.attr,
  433. &ways_of_associativity.attr,
  434. &number_of_sets.attr,
  435. &size.attr,
  436. &shared_cpu_map.attr,
  437. NULL
  438. };
  439. #define to_object(k) container_of(k, struct _index_kobject, kobj)
  440. #define to_attr(a) container_of(a, struct _cache_attr, attr)
  441. static ssize_t show(struct kobject * kobj, struct attribute * attr, char * buf)
  442. {
  443. struct _cache_attr *fattr = to_attr(attr);
  444. struct _index_kobject *this_leaf = to_object(kobj);
  445. ssize_t ret;
  446. ret = fattr->show ?
  447. fattr->show(CPUID4_INFO_IDX(this_leaf->cpu, this_leaf->index),
  448. buf) :
  449. 0;
  450. return ret;
  451. }
  452. static ssize_t store(struct kobject * kobj, struct attribute * attr,
  453. const char * buf, size_t count)
  454. {
  455. return 0;
  456. }
  457. static struct sysfs_ops sysfs_ops = {
  458. .show = show,
  459. .store = store,
  460. };
  461. static struct kobj_type ktype_cache = {
  462. .sysfs_ops = &sysfs_ops,
  463. .default_attrs = default_attrs,
  464. };
  465. static struct kobj_type ktype_percpu_entry = {
  466. .sysfs_ops = &sysfs_ops,
  467. };
  468. static void cpuid4_cache_sysfs_exit(unsigned int cpu)
  469. {
  470. kfree(cache_kobject[cpu]);
  471. kfree(index_kobject[cpu]);
  472. cache_kobject[cpu] = NULL;
  473. index_kobject[cpu] = NULL;
  474. free_cache_attributes(cpu);
  475. }
  476. static int __cpuinit cpuid4_cache_sysfs_init(unsigned int cpu)
  477. {
  478. if (num_cache_leaves == 0)
  479. return -ENOENT;
  480. detect_cache_attributes(cpu);
  481. if (cpuid4_info[cpu] == NULL)
  482. return -ENOENT;
  483. /* Allocate all required memory */
  484. cache_kobject[cpu] = kmalloc(sizeof(struct kobject), GFP_KERNEL);
  485. if (unlikely(cache_kobject[cpu] == NULL))
  486. goto err_out;
  487. memset(cache_kobject[cpu], 0, sizeof(struct kobject));
  488. index_kobject[cpu] = kmalloc(
  489. sizeof(struct _index_kobject ) * num_cache_leaves, GFP_KERNEL);
  490. if (unlikely(index_kobject[cpu] == NULL))
  491. goto err_out;
  492. memset(index_kobject[cpu], 0,
  493. sizeof(struct _index_kobject) * num_cache_leaves);
  494. return 0;
  495. err_out:
  496. cpuid4_cache_sysfs_exit(cpu);
  497. return -ENOMEM;
  498. }
  499. /* Add/Remove cache interface for CPU device */
  500. static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
  501. {
  502. unsigned int cpu = sys_dev->id;
  503. unsigned long i, j;
  504. struct _index_kobject *this_object;
  505. int retval = 0;
  506. retval = cpuid4_cache_sysfs_init(cpu);
  507. if (unlikely(retval < 0))
  508. return retval;
  509. cache_kobject[cpu]->parent = &sys_dev->kobj;
  510. kobject_set_name(cache_kobject[cpu], "%s", "cache");
  511. cache_kobject[cpu]->ktype = &ktype_percpu_entry;
  512. retval = kobject_register(cache_kobject[cpu]);
  513. for (i = 0; i < num_cache_leaves; i++) {
  514. this_object = INDEX_KOBJECT_PTR(cpu,i);
  515. this_object->cpu = cpu;
  516. this_object->index = i;
  517. this_object->kobj.parent = cache_kobject[cpu];
  518. kobject_set_name(&(this_object->kobj), "index%1lu", i);
  519. this_object->kobj.ktype = &ktype_cache;
  520. retval = kobject_register(&(this_object->kobj));
  521. if (unlikely(retval)) {
  522. for (j = 0; j < i; j++) {
  523. kobject_unregister(
  524. &(INDEX_KOBJECT_PTR(cpu,j)->kobj));
  525. }
  526. kobject_unregister(cache_kobject[cpu]);
  527. cpuid4_cache_sysfs_exit(cpu);
  528. break;
  529. }
  530. }
  531. return retval;
  532. }
  533. static void __cpuexit cache_remove_dev(struct sys_device * sys_dev)
  534. {
  535. unsigned int cpu = sys_dev->id;
  536. unsigned long i;
  537. for (i = 0; i < num_cache_leaves; i++) {
  538. cache_remove_shared_cpu_map(cpu, i);
  539. kobject_unregister(&(INDEX_KOBJECT_PTR(cpu,i)->kobj));
  540. }
  541. kobject_unregister(cache_kobject[cpu]);
  542. cpuid4_cache_sysfs_exit(cpu);
  543. return;
  544. }
  545. static int __cpuinit cacheinfo_cpu_callback(struct notifier_block *nfb,
  546. unsigned long action, void *hcpu)
  547. {
  548. unsigned int cpu = (unsigned long)hcpu;
  549. struct sys_device *sys_dev;
  550. sys_dev = get_cpu_sysdev(cpu);
  551. switch (action) {
  552. case CPU_ONLINE:
  553. cache_add_dev(sys_dev);
  554. break;
  555. case CPU_DEAD:
  556. cache_remove_dev(sys_dev);
  557. break;
  558. }
  559. return NOTIFY_OK;
  560. }
  561. static struct notifier_block cacheinfo_cpu_notifier =
  562. {
  563. .notifier_call = cacheinfo_cpu_callback,
  564. };
  565. static int __cpuinit cache_sysfs_init(void)
  566. {
  567. int i;
  568. if (num_cache_leaves == 0)
  569. return 0;
  570. register_cpu_notifier(&cacheinfo_cpu_notifier);
  571. for_each_online_cpu(i) {
  572. cacheinfo_cpu_callback(&cacheinfo_cpu_notifier, CPU_ONLINE,
  573. (void *)(long)i);
  574. }
  575. return 0;
  576. }
  577. device_initcall(cache_sysfs_init);
  578. #endif