intel_cacheinfo.c 27 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015
  1. /*
  2. * Routines to indentify caches on Intel CPU.
  3. *
  4. * Changes:
  5. * Venkatesh Pallipadi : Adding cache identification through cpuid(4)
  6. * Ashok Raj <ashok.raj@intel.com>: Work with CPU hotplug infrastructure.
  7. * Andi Kleen / Andreas Herrmann : CPUID4 emulation on AMD.
  8. */
  9. #include <linux/init.h>
  10. #include <linux/slab.h>
  11. #include <linux/device.h>
  12. #include <linux/compiler.h>
  13. #include <linux/cpu.h>
  14. #include <linux/sched.h>
  15. #include <linux/pci.h>
  16. #include <asm/processor.h>
  17. #include <asm/smp.h>
  18. #define LVL_1_INST 1
  19. #define LVL_1_DATA 2
  20. #define LVL_2 3
  21. #define LVL_3 4
  22. #define LVL_TRACE 5
  23. struct _cache_table
  24. {
  25. unsigned char descriptor;
  26. char cache_type;
  27. short size;
  28. };
  29. /* all the cache descriptor types we care about (no TLB or trace cache entries) */
  30. static const struct _cache_table __cpuinitconst cache_table[] =
  31. {
  32. { 0x06, LVL_1_INST, 8 }, /* 4-way set assoc, 32 byte line size */
  33. { 0x08, LVL_1_INST, 16 }, /* 4-way set assoc, 32 byte line size */
  34. { 0x09, LVL_1_INST, 32 }, /* 4-way set assoc, 64 byte line size */
  35. { 0x0a, LVL_1_DATA, 8 }, /* 2 way set assoc, 32 byte line size */
  36. { 0x0c, LVL_1_DATA, 16 }, /* 4-way set assoc, 32 byte line size */
  37. { 0x0d, LVL_1_DATA, 16 }, /* 4-way set assoc, 64 byte line size */
  38. { 0x21, LVL_2, 256 }, /* 8-way set assoc, 64 byte line size */
  39. { 0x22, LVL_3, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */
  40. { 0x23, LVL_3, 1024 }, /* 8-way set assoc, sectored cache, 64 byte line size */
  41. { 0x25, LVL_3, 2048 }, /* 8-way set assoc, sectored cache, 64 byte line size */
  42. { 0x29, LVL_3, 4096 }, /* 8-way set assoc, sectored cache, 64 byte line size */
  43. { 0x2c, LVL_1_DATA, 32 }, /* 8-way set assoc, 64 byte line size */
  44. { 0x30, LVL_1_INST, 32 }, /* 8-way set assoc, 64 byte line size */
  45. { 0x39, LVL_2, 128 }, /* 4-way set assoc, sectored cache, 64 byte line size */
  46. { 0x3a, LVL_2, 192 }, /* 6-way set assoc, sectored cache, 64 byte line size */
  47. { 0x3b, LVL_2, 128 }, /* 2-way set assoc, sectored cache, 64 byte line size */
  48. { 0x3c, LVL_2, 256 }, /* 4-way set assoc, sectored cache, 64 byte line size */
  49. { 0x3d, LVL_2, 384 }, /* 6-way set assoc, sectored cache, 64 byte line size */
  50. { 0x3e, LVL_2, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */
  51. { 0x3f, LVL_2, 256 }, /* 2-way set assoc, 64 byte line size */
  52. { 0x41, LVL_2, 128 }, /* 4-way set assoc, 32 byte line size */
  53. { 0x42, LVL_2, 256 }, /* 4-way set assoc, 32 byte line size */
  54. { 0x43, LVL_2, 512 }, /* 4-way set assoc, 32 byte line size */
  55. { 0x44, LVL_2, 1024 }, /* 4-way set assoc, 32 byte line size */
  56. { 0x45, LVL_2, 2048 }, /* 4-way set assoc, 32 byte line size */
  57. { 0x46, LVL_3, 4096 }, /* 4-way set assoc, 64 byte line size */
  58. { 0x47, LVL_3, 8192 }, /* 8-way set assoc, 64 byte line size */
  59. { 0x49, LVL_3, 4096 }, /* 16-way set assoc, 64 byte line size */
  60. { 0x4a, LVL_3, 6144 }, /* 12-way set assoc, 64 byte line size */
  61. { 0x4b, LVL_3, 8192 }, /* 16-way set assoc, 64 byte line size */
  62. { 0x4c, LVL_3, 12288 }, /* 12-way set assoc, 64 byte line size */
  63. { 0x4d, LVL_3, 16384 }, /* 16-way set assoc, 64 byte line size */
  64. { 0x4e, LVL_2, 6144 }, /* 24-way set assoc, 64 byte line size */
  65. { 0x60, LVL_1_DATA, 16 }, /* 8-way set assoc, sectored cache, 64 byte line size */
  66. { 0x66, LVL_1_DATA, 8 }, /* 4-way set assoc, sectored cache, 64 byte line size */
  67. { 0x67, LVL_1_DATA, 16 }, /* 4-way set assoc, sectored cache, 64 byte line size */
  68. { 0x68, LVL_1_DATA, 32 }, /* 4-way set assoc, sectored cache, 64 byte line size */
  69. { 0x70, LVL_TRACE, 12 }, /* 8-way set assoc */
  70. { 0x71, LVL_TRACE, 16 }, /* 8-way set assoc */
  71. { 0x72, LVL_TRACE, 32 }, /* 8-way set assoc */
  72. { 0x73, LVL_TRACE, 64 }, /* 8-way set assoc */
  73. { 0x78, LVL_2, 1024 }, /* 4-way set assoc, 64 byte line size */
  74. { 0x79, LVL_2, 128 }, /* 8-way set assoc, sectored cache, 64 byte line size */
  75. { 0x7a, LVL_2, 256 }, /* 8-way set assoc, sectored cache, 64 byte line size */
  76. { 0x7b, LVL_2, 512 }, /* 8-way set assoc, sectored cache, 64 byte line size */
  77. { 0x7c, LVL_2, 1024 }, /* 8-way set assoc, sectored cache, 64 byte line size */
  78. { 0x7d, LVL_2, 2048 }, /* 8-way set assoc, 64 byte line size */
  79. { 0x7f, LVL_2, 512 }, /* 2-way set assoc, 64 byte line size */
  80. { 0x82, LVL_2, 256 }, /* 8-way set assoc, 32 byte line size */
  81. { 0x83, LVL_2, 512 }, /* 8-way set assoc, 32 byte line size */
  82. { 0x84, LVL_2, 1024 }, /* 8-way set assoc, 32 byte line size */
  83. { 0x85, LVL_2, 2048 }, /* 8-way set assoc, 32 byte line size */
  84. { 0x86, LVL_2, 512 }, /* 4-way set assoc, 64 byte line size */
  85. { 0x87, LVL_2, 1024 }, /* 8-way set assoc, 64 byte line size */
  86. { 0xd0, LVL_3, 512 }, /* 4-way set assoc, 64 byte line size */
  87. { 0xd1, LVL_3, 1024 }, /* 4-way set assoc, 64 byte line size */
  88. { 0xd2, LVL_3, 2048 }, /* 4-way set assoc, 64 byte line size */
  89. { 0xd6, LVL_3, 1024 }, /* 8-way set assoc, 64 byte line size */
  90. { 0xd7, LVL_3, 2038 }, /* 8-way set assoc, 64 byte line size */
  91. { 0xd8, LVL_3, 4096 }, /* 12-way set assoc, 64 byte line size */
  92. { 0xdc, LVL_3, 2048 }, /* 12-way set assoc, 64 byte line size */
  93. { 0xdd, LVL_3, 4096 }, /* 12-way set assoc, 64 byte line size */
  94. { 0xde, LVL_3, 8192 }, /* 12-way set assoc, 64 byte line size */
  95. { 0xe2, LVL_3, 2048 }, /* 16-way set assoc, 64 byte line size */
  96. { 0xe3, LVL_3, 4096 }, /* 16-way set assoc, 64 byte line size */
  97. { 0xe4, LVL_3, 8192 }, /* 16-way set assoc, 64 byte line size */
  98. { 0x00, 0, 0}
  99. };
  100. enum _cache_type
  101. {
  102. CACHE_TYPE_NULL = 0,
  103. CACHE_TYPE_DATA = 1,
  104. CACHE_TYPE_INST = 2,
  105. CACHE_TYPE_UNIFIED = 3
  106. };
  107. union _cpuid4_leaf_eax {
  108. struct {
  109. enum _cache_type type:5;
  110. unsigned int level:3;
  111. unsigned int is_self_initializing:1;
  112. unsigned int is_fully_associative:1;
  113. unsigned int reserved:4;
  114. unsigned int num_threads_sharing:12;
  115. unsigned int num_cores_on_die:6;
  116. } split;
  117. u32 full;
  118. };
  119. union _cpuid4_leaf_ebx {
  120. struct {
  121. unsigned int coherency_line_size:12;
  122. unsigned int physical_line_partition:10;
  123. unsigned int ways_of_associativity:10;
  124. } split;
  125. u32 full;
  126. };
  127. union _cpuid4_leaf_ecx {
  128. struct {
  129. unsigned int number_of_sets:32;
  130. } split;
  131. u32 full;
  132. };
  133. struct _cpuid4_info {
  134. union _cpuid4_leaf_eax eax;
  135. union _cpuid4_leaf_ebx ebx;
  136. union _cpuid4_leaf_ecx ecx;
  137. unsigned long size;
  138. unsigned long can_disable;
  139. DECLARE_BITMAP(shared_cpu_map, NR_CPUS);
  140. };
  141. /* subset of above _cpuid4_info w/o shared_cpu_map */
  142. struct _cpuid4_info_regs {
  143. union _cpuid4_leaf_eax eax;
  144. union _cpuid4_leaf_ebx ebx;
  145. union _cpuid4_leaf_ecx ecx;
  146. unsigned long size;
  147. unsigned long can_disable;
  148. };
  149. #if defined(CONFIG_PCI) && defined(CONFIG_SYSFS)
  150. static struct pci_device_id k8_nb_id[] = {
  151. { PCI_DEVICE(PCI_VENDOR_ID_AMD, 0x1103) },
  152. { PCI_DEVICE(PCI_VENDOR_ID_AMD, 0x1203) },
  153. {}
  154. };
  155. #endif
  156. unsigned short num_cache_leaves;
  157. /* AMD doesn't have CPUID4. Emulate it here to report the same
  158. information to the user. This makes some assumptions about the machine:
  159. L2 not shared, no SMT etc. that is currently true on AMD CPUs.
  160. In theory the TLBs could be reported as fake type (they are in "dummy").
  161. Maybe later */
  162. union l1_cache {
  163. struct {
  164. unsigned line_size : 8;
  165. unsigned lines_per_tag : 8;
  166. unsigned assoc : 8;
  167. unsigned size_in_kb : 8;
  168. };
  169. unsigned val;
  170. };
  171. union l2_cache {
  172. struct {
  173. unsigned line_size : 8;
  174. unsigned lines_per_tag : 4;
  175. unsigned assoc : 4;
  176. unsigned size_in_kb : 16;
  177. };
  178. unsigned val;
  179. };
  180. union l3_cache {
  181. struct {
  182. unsigned line_size : 8;
  183. unsigned lines_per_tag : 4;
  184. unsigned assoc : 4;
  185. unsigned res : 2;
  186. unsigned size_encoded : 14;
  187. };
  188. unsigned val;
  189. };
  190. static const unsigned short __cpuinitconst assocs[] = {
  191. [1] = 1, [2] = 2, [4] = 4, [6] = 8,
  192. [8] = 16, [0xa] = 32, [0xb] = 48,
  193. [0xc] = 64,
  194. [0xf] = 0xffff // ??
  195. };
  196. static const unsigned char __cpuinitconst levels[] = { 1, 1, 2, 3 };
  197. static const unsigned char __cpuinitconst types[] = { 1, 2, 3, 3 };
  198. static void __cpuinit
  199. amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax,
  200. union _cpuid4_leaf_ebx *ebx,
  201. union _cpuid4_leaf_ecx *ecx)
  202. {
  203. unsigned dummy;
  204. unsigned line_size, lines_per_tag, assoc, size_in_kb;
  205. union l1_cache l1i, l1d;
  206. union l2_cache l2;
  207. union l3_cache l3;
  208. union l1_cache *l1 = &l1d;
  209. eax->full = 0;
  210. ebx->full = 0;
  211. ecx->full = 0;
  212. cpuid(0x80000005, &dummy, &dummy, &l1d.val, &l1i.val);
  213. cpuid(0x80000006, &dummy, &dummy, &l2.val, &l3.val);
  214. switch (leaf) {
  215. case 1:
  216. l1 = &l1i;
  217. case 0:
  218. if (!l1->val)
  219. return;
  220. assoc = l1->assoc;
  221. line_size = l1->line_size;
  222. lines_per_tag = l1->lines_per_tag;
  223. size_in_kb = l1->size_in_kb;
  224. break;
  225. case 2:
  226. if (!l2.val)
  227. return;
  228. assoc = l2.assoc;
  229. line_size = l2.line_size;
  230. lines_per_tag = l2.lines_per_tag;
  231. /* cpu_data has errata corrections for K7 applied */
  232. size_in_kb = current_cpu_data.x86_cache_size;
  233. break;
  234. case 3:
  235. if (!l3.val)
  236. return;
  237. assoc = l3.assoc;
  238. line_size = l3.line_size;
  239. lines_per_tag = l3.lines_per_tag;
  240. size_in_kb = l3.size_encoded * 512;
  241. break;
  242. default:
  243. return;
  244. }
  245. eax->split.is_self_initializing = 1;
  246. eax->split.type = types[leaf];
  247. eax->split.level = levels[leaf];
  248. if (leaf == 3)
  249. eax->split.num_threads_sharing = current_cpu_data.x86_max_cores - 1;
  250. else
  251. eax->split.num_threads_sharing = 0;
  252. eax->split.num_cores_on_die = current_cpu_data.x86_max_cores - 1;
  253. if (assoc == 0xf)
  254. eax->split.is_fully_associative = 1;
  255. ebx->split.coherency_line_size = line_size - 1;
  256. ebx->split.ways_of_associativity = assocs[assoc] - 1;
  257. ebx->split.physical_line_partition = lines_per_tag - 1;
  258. ecx->split.number_of_sets = (size_in_kb * 1024) / line_size /
  259. (ebx->split.ways_of_associativity + 1) - 1;
  260. }
  261. static void __cpuinit
  262. amd_check_l3_disable(int index, struct _cpuid4_info_regs *this_leaf)
  263. {
  264. if (index < 3)
  265. return;
  266. this_leaf->can_disable = 1;
  267. }
  268. static int
  269. __cpuinit cpuid4_cache_lookup_regs(int index,
  270. struct _cpuid4_info_regs *this_leaf)
  271. {
  272. union _cpuid4_leaf_eax eax;
  273. union _cpuid4_leaf_ebx ebx;
  274. union _cpuid4_leaf_ecx ecx;
  275. unsigned edx;
  276. if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
  277. amd_cpuid4(index, &eax, &ebx, &ecx);
  278. if (boot_cpu_data.x86 >= 0x10)
  279. amd_check_l3_disable(index, this_leaf);
  280. } else {
  281. cpuid_count(4, index, &eax.full, &ebx.full, &ecx.full, &edx);
  282. }
  283. if (eax.split.type == CACHE_TYPE_NULL)
  284. return -EIO; /* better error ? */
  285. this_leaf->eax = eax;
  286. this_leaf->ebx = ebx;
  287. this_leaf->ecx = ecx;
  288. this_leaf->size = (ecx.split.number_of_sets + 1) *
  289. (ebx.split.coherency_line_size + 1) *
  290. (ebx.split.physical_line_partition + 1) *
  291. (ebx.split.ways_of_associativity + 1);
  292. return 0;
  293. }
  294. static int __cpuinit find_num_cache_leaves(void)
  295. {
  296. unsigned int eax, ebx, ecx, edx;
  297. union _cpuid4_leaf_eax cache_eax;
  298. int i = -1;
  299. do {
  300. ++i;
  301. /* Do cpuid(4) loop to find out num_cache_leaves */
  302. cpuid_count(4, i, &eax, &ebx, &ecx, &edx);
  303. cache_eax.full = eax;
  304. } while (cache_eax.split.type != CACHE_TYPE_NULL);
  305. return i;
  306. }
  307. unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
  308. {
  309. unsigned int trace = 0, l1i = 0, l1d = 0, l2 = 0, l3 = 0; /* Cache sizes */
  310. unsigned int new_l1d = 0, new_l1i = 0; /* Cache sizes from cpuid(4) */
  311. unsigned int new_l2 = 0, new_l3 = 0, i; /* Cache sizes from cpuid(4) */
  312. unsigned int l2_id = 0, l3_id = 0, num_threads_sharing, index_msb;
  313. #ifdef CONFIG_X86_HT
  314. unsigned int cpu = c->cpu_index;
  315. #endif
  316. if (c->cpuid_level > 3) {
  317. static int is_initialized;
  318. if (is_initialized == 0) {
  319. /* Init num_cache_leaves from boot CPU */
  320. num_cache_leaves = find_num_cache_leaves();
  321. is_initialized++;
  322. }
  323. /*
  324. * Whenever possible use cpuid(4), deterministic cache
  325. * parameters cpuid leaf to find the cache details
  326. */
  327. for (i = 0; i < num_cache_leaves; i++) {
  328. struct _cpuid4_info_regs this_leaf;
  329. int retval;
  330. retval = cpuid4_cache_lookup_regs(i, &this_leaf);
  331. if (retval >= 0) {
  332. switch(this_leaf.eax.split.level) {
  333. case 1:
  334. if (this_leaf.eax.split.type ==
  335. CACHE_TYPE_DATA)
  336. new_l1d = this_leaf.size/1024;
  337. else if (this_leaf.eax.split.type ==
  338. CACHE_TYPE_INST)
  339. new_l1i = this_leaf.size/1024;
  340. break;
  341. case 2:
  342. new_l2 = this_leaf.size/1024;
  343. num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing;
  344. index_msb = get_count_order(num_threads_sharing);
  345. l2_id = c->apicid >> index_msb;
  346. break;
  347. case 3:
  348. new_l3 = this_leaf.size/1024;
  349. num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing;
  350. index_msb = get_count_order(num_threads_sharing);
  351. l3_id = c->apicid >> index_msb;
  352. break;
  353. default:
  354. break;
  355. }
  356. }
  357. }
  358. }
  359. /*
  360. * Don't use cpuid2 if cpuid4 is supported. For P4, we use cpuid2 for
  361. * trace cache
  362. */
  363. if ((num_cache_leaves == 0 || c->x86 == 15) && c->cpuid_level > 1) {
  364. /* supports eax=2 call */
  365. int j, n;
  366. unsigned int regs[4];
  367. unsigned char *dp = (unsigned char *)regs;
  368. int only_trace = 0;
  369. if (num_cache_leaves != 0 && c->x86 == 15)
  370. only_trace = 1;
  371. /* Number of times to iterate */
  372. n = cpuid_eax(2) & 0xFF;
  373. for ( i = 0 ; i < n ; i++ ) {
  374. cpuid(2, &regs[0], &regs[1], &regs[2], &regs[3]);
  375. /* If bit 31 is set, this is an unknown format */
  376. for ( j = 0 ; j < 3 ; j++ ) {
  377. if (regs[j] & (1 << 31)) regs[j] = 0;
  378. }
  379. /* Byte 0 is level count, not a descriptor */
  380. for ( j = 1 ; j < 16 ; j++ ) {
  381. unsigned char des = dp[j];
  382. unsigned char k = 0;
  383. /* look up this descriptor in the table */
  384. while (cache_table[k].descriptor != 0)
  385. {
  386. if (cache_table[k].descriptor == des) {
  387. if (only_trace && cache_table[k].cache_type != LVL_TRACE)
  388. break;
  389. switch (cache_table[k].cache_type) {
  390. case LVL_1_INST:
  391. l1i += cache_table[k].size;
  392. break;
  393. case LVL_1_DATA:
  394. l1d += cache_table[k].size;
  395. break;
  396. case LVL_2:
  397. l2 += cache_table[k].size;
  398. break;
  399. case LVL_3:
  400. l3 += cache_table[k].size;
  401. break;
  402. case LVL_TRACE:
  403. trace += cache_table[k].size;
  404. break;
  405. }
  406. break;
  407. }
  408. k++;
  409. }
  410. }
  411. }
  412. }
  413. if (new_l1d)
  414. l1d = new_l1d;
  415. if (new_l1i)
  416. l1i = new_l1i;
  417. if (new_l2) {
  418. l2 = new_l2;
  419. #ifdef CONFIG_X86_HT
  420. per_cpu(cpu_llc_id, cpu) = l2_id;
  421. #endif
  422. }
  423. if (new_l3) {
  424. l3 = new_l3;
  425. #ifdef CONFIG_X86_HT
  426. per_cpu(cpu_llc_id, cpu) = l3_id;
  427. #endif
  428. }
  429. if (trace)
  430. printk (KERN_INFO "CPU: Trace cache: %dK uops", trace);
  431. else if ( l1i )
  432. printk (KERN_INFO "CPU: L1 I cache: %dK", l1i);
  433. if (l1d)
  434. printk(", L1 D cache: %dK\n", l1d);
  435. else
  436. printk("\n");
  437. if (l2)
  438. printk(KERN_INFO "CPU: L2 cache: %dK\n", l2);
  439. if (l3)
  440. printk(KERN_INFO "CPU: L3 cache: %dK\n", l3);
  441. c->x86_cache_size = l3 ? l3 : (l2 ? l2 : (l1i+l1d));
  442. return l2;
  443. }
  444. #ifdef CONFIG_SYSFS
  445. /* pointer to _cpuid4_info array (for each cache leaf) */
  446. static DEFINE_PER_CPU(struct _cpuid4_info *, cpuid4_info);
  447. #define CPUID4_INFO_IDX(x, y) (&((per_cpu(cpuid4_info, x))[y]))
  448. #ifdef CONFIG_SMP
  449. static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
  450. {
  451. struct _cpuid4_info *this_leaf, *sibling_leaf;
  452. unsigned long num_threads_sharing;
  453. int index_msb, i;
  454. struct cpuinfo_x86 *c = &cpu_data(cpu);
  455. this_leaf = CPUID4_INFO_IDX(cpu, index);
  456. num_threads_sharing = 1 + this_leaf->eax.split.num_threads_sharing;
  457. if (num_threads_sharing == 1)
  458. cpumask_set_cpu(cpu, to_cpumask(this_leaf->shared_cpu_map));
  459. else {
  460. index_msb = get_count_order(num_threads_sharing);
  461. for_each_online_cpu(i) {
  462. if (cpu_data(i).apicid >> index_msb ==
  463. c->apicid >> index_msb) {
  464. cpumask_set_cpu(i,
  465. to_cpumask(this_leaf->shared_cpu_map));
  466. if (i != cpu && per_cpu(cpuid4_info, i)) {
  467. sibling_leaf =
  468. CPUID4_INFO_IDX(i, index);
  469. cpumask_set_cpu(cpu, to_cpumask(
  470. sibling_leaf->shared_cpu_map));
  471. }
  472. }
  473. }
  474. }
  475. }
  476. static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index)
  477. {
  478. struct _cpuid4_info *this_leaf, *sibling_leaf;
  479. int sibling;
  480. this_leaf = CPUID4_INFO_IDX(cpu, index);
  481. for_each_cpu(sibling, to_cpumask(this_leaf->shared_cpu_map)) {
  482. sibling_leaf = CPUID4_INFO_IDX(sibling, index);
  483. cpumask_clear_cpu(cpu,
  484. to_cpumask(sibling_leaf->shared_cpu_map));
  485. }
  486. }
  487. #else
  488. static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index) {}
  489. static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index) {}
  490. #endif
  491. static void __cpuinit free_cache_attributes(unsigned int cpu)
  492. {
  493. int i;
  494. for (i = 0; i < num_cache_leaves; i++)
  495. cache_remove_shared_cpu_map(cpu, i);
  496. kfree(per_cpu(cpuid4_info, cpu));
  497. per_cpu(cpuid4_info, cpu) = NULL;
  498. }
  499. static int
  500. __cpuinit cpuid4_cache_lookup(int index, struct _cpuid4_info *this_leaf)
  501. {
  502. struct _cpuid4_info_regs *leaf_regs =
  503. (struct _cpuid4_info_regs *)this_leaf;
  504. return cpuid4_cache_lookup_regs(index, leaf_regs);
  505. }
  506. static void __cpuinit get_cpu_leaves(void *_retval)
  507. {
  508. int j, *retval = _retval, cpu = smp_processor_id();
  509. /* Do cpuid and store the results */
  510. for (j = 0; j < num_cache_leaves; j++) {
  511. struct _cpuid4_info *this_leaf;
  512. this_leaf = CPUID4_INFO_IDX(cpu, j);
  513. *retval = cpuid4_cache_lookup(j, this_leaf);
  514. if (unlikely(*retval < 0)) {
  515. int i;
  516. for (i = 0; i < j; i++)
  517. cache_remove_shared_cpu_map(cpu, i);
  518. break;
  519. }
  520. cache_shared_cpu_map_setup(cpu, j);
  521. }
  522. }
  523. static int __cpuinit detect_cache_attributes(unsigned int cpu)
  524. {
  525. int retval;
  526. if (num_cache_leaves == 0)
  527. return -ENOENT;
  528. per_cpu(cpuid4_info, cpu) = kzalloc(
  529. sizeof(struct _cpuid4_info) * num_cache_leaves, GFP_KERNEL);
  530. if (per_cpu(cpuid4_info, cpu) == NULL)
  531. return -ENOMEM;
  532. smp_call_function_single(cpu, get_cpu_leaves, &retval, true);
  533. if (retval) {
  534. kfree(per_cpu(cpuid4_info, cpu));
  535. per_cpu(cpuid4_info, cpu) = NULL;
  536. }
  537. return retval;
  538. }
  539. #include <linux/kobject.h>
  540. #include <linux/sysfs.h>
  541. extern struct sysdev_class cpu_sysdev_class; /* from drivers/base/cpu.c */
  542. /* pointer to kobject for cpuX/cache */
  543. static DEFINE_PER_CPU(struct kobject *, cache_kobject);
  544. struct _index_kobject {
  545. struct kobject kobj;
  546. unsigned int cpu;
  547. unsigned short index;
  548. };
  549. /* pointer to array of kobjects for cpuX/cache/indexY */
  550. static DEFINE_PER_CPU(struct _index_kobject *, index_kobject);
  551. #define INDEX_KOBJECT_PTR(x, y) (&((per_cpu(index_kobject, x))[y]))
  552. #define show_one_plus(file_name, object, val) \
  553. static ssize_t show_##file_name \
  554. (struct _cpuid4_info *this_leaf, char *buf) \
  555. { \
  556. return sprintf (buf, "%lu\n", (unsigned long)this_leaf->object + val); \
  557. }
  558. show_one_plus(level, eax.split.level, 0);
  559. show_one_plus(coherency_line_size, ebx.split.coherency_line_size, 1);
  560. show_one_plus(physical_line_partition, ebx.split.physical_line_partition, 1);
  561. show_one_plus(ways_of_associativity, ebx.split.ways_of_associativity, 1);
  562. show_one_plus(number_of_sets, ecx.split.number_of_sets, 1);
  563. static ssize_t show_size(struct _cpuid4_info *this_leaf, char *buf)
  564. {
  565. return sprintf (buf, "%luK\n", this_leaf->size / 1024);
  566. }
  567. static ssize_t show_shared_cpu_map_func(struct _cpuid4_info *this_leaf,
  568. int type, char *buf)
  569. {
  570. ptrdiff_t len = PTR_ALIGN(buf + PAGE_SIZE - 1, PAGE_SIZE) - buf;
  571. int n = 0;
  572. if (len > 1) {
  573. const struct cpumask *mask;
  574. mask = to_cpumask(this_leaf->shared_cpu_map);
  575. n = type?
  576. cpulist_scnprintf(buf, len-2, mask) :
  577. cpumask_scnprintf(buf, len-2, mask);
  578. buf[n++] = '\n';
  579. buf[n] = '\0';
  580. }
  581. return n;
  582. }
  583. static inline ssize_t show_shared_cpu_map(struct _cpuid4_info *leaf, char *buf)
  584. {
  585. return show_shared_cpu_map_func(leaf, 0, buf);
  586. }
  587. static inline ssize_t show_shared_cpu_list(struct _cpuid4_info *leaf, char *buf)
  588. {
  589. return show_shared_cpu_map_func(leaf, 1, buf);
  590. }
  591. static ssize_t show_type(struct _cpuid4_info *this_leaf, char *buf)
  592. {
  593. switch (this_leaf->eax.split.type) {
  594. case CACHE_TYPE_DATA:
  595. return sprintf(buf, "Data\n");
  596. case CACHE_TYPE_INST:
  597. return sprintf(buf, "Instruction\n");
  598. case CACHE_TYPE_UNIFIED:
  599. return sprintf(buf, "Unified\n");
  600. default:
  601. return sprintf(buf, "Unknown\n");
  602. }
  603. }
  604. #define to_object(k) container_of(k, struct _index_kobject, kobj)
  605. #define to_attr(a) container_of(a, struct _cache_attr, attr)
  606. #ifdef CONFIG_PCI
  607. static struct pci_dev *get_k8_northbridge(int node)
  608. {
  609. struct pci_dev *dev = NULL;
  610. int i;
  611. for (i = 0; i <= node; i++) {
  612. do {
  613. dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev);
  614. if (!dev)
  615. break;
  616. } while (!pci_match_id(&k8_nb_id[0], dev));
  617. if (!dev)
  618. break;
  619. }
  620. return dev;
  621. }
  622. #else
  623. static struct pci_dev *get_k8_northbridge(int node)
  624. {
  625. return NULL;
  626. }
  627. #endif
  628. static ssize_t show_cache_disable(struct _cpuid4_info *this_leaf, char *buf)
  629. {
  630. const struct cpumask *mask = to_cpumask(this_leaf->shared_cpu_map);
  631. int node = cpu_to_node(cpumask_first(mask));
  632. struct pci_dev *dev = NULL;
  633. ssize_t ret = 0;
  634. int i;
  635. if (!this_leaf->can_disable)
  636. return sprintf(buf, "Feature not enabled\n");
  637. dev = get_k8_northbridge(node);
  638. if (!dev) {
  639. printk(KERN_ERR "Attempting AMD northbridge operation on a system with no northbridge\n");
  640. return -EINVAL;
  641. }
  642. for (i = 0; i < 2; i++) {
  643. unsigned int reg;
  644. pci_read_config_dword(dev, 0x1BC + i * 4, &reg);
  645. ret += sprintf(buf, "%sEntry: %d\n", buf, i);
  646. ret += sprintf(buf, "%sReads: %s\tNew Entries: %s\n",
  647. buf,
  648. reg & 0x80000000 ? "Disabled" : "Allowed",
  649. reg & 0x40000000 ? "Disabled" : "Allowed");
  650. ret += sprintf(buf, "%sSubCache: %x\tIndex: %x\n",
  651. buf, (reg & 0x30000) >> 16, reg & 0xfff);
  652. }
  653. return ret;
  654. }
  655. static ssize_t
  656. store_cache_disable(struct _cpuid4_info *this_leaf, const char *buf,
  657. size_t count)
  658. {
  659. const struct cpumask *mask = to_cpumask(this_leaf->shared_cpu_map);
  660. int node = cpu_to_node(cpumask_first(mask));
  661. struct pci_dev *dev = NULL;
  662. unsigned int ret, index, val;
  663. if (!this_leaf->can_disable)
  664. return 0;
  665. if (strlen(buf) > 15)
  666. return -EINVAL;
  667. ret = sscanf(buf, "%x %x", &index, &val);
  668. if (ret != 2)
  669. return -EINVAL;
  670. if (index > 1)
  671. return -EINVAL;
  672. val |= 0xc0000000;
  673. dev = get_k8_northbridge(node);
  674. if (!dev) {
  675. printk(KERN_ERR "Attempting AMD northbridge operation on a system with no northbridge\n");
  676. return -EINVAL;
  677. }
  678. pci_write_config_dword(dev, 0x1BC + index * 4, val & ~0x40000000);
  679. wbinvd();
  680. pci_write_config_dword(dev, 0x1BC + index * 4, val);
  681. return 1;
  682. }
  683. struct _cache_attr {
  684. struct attribute attr;
  685. ssize_t (*show)(struct _cpuid4_info *, char *);
  686. ssize_t (*store)(struct _cpuid4_info *, const char *, size_t count);
  687. };
  688. #define define_one_ro(_name) \
  689. static struct _cache_attr _name = \
  690. __ATTR(_name, 0444, show_##_name, NULL)
  691. define_one_ro(level);
  692. define_one_ro(type);
  693. define_one_ro(coherency_line_size);
  694. define_one_ro(physical_line_partition);
  695. define_one_ro(ways_of_associativity);
  696. define_one_ro(number_of_sets);
  697. define_one_ro(size);
  698. define_one_ro(shared_cpu_map);
  699. define_one_ro(shared_cpu_list);
  700. static struct _cache_attr cache_disable = __ATTR(cache_disable, 0644, show_cache_disable, store_cache_disable);
  701. static struct attribute * default_attrs[] = {
  702. &type.attr,
  703. &level.attr,
  704. &coherency_line_size.attr,
  705. &physical_line_partition.attr,
  706. &ways_of_associativity.attr,
  707. &number_of_sets.attr,
  708. &size.attr,
  709. &shared_cpu_map.attr,
  710. &shared_cpu_list.attr,
  711. &cache_disable.attr,
  712. NULL
  713. };
  714. static ssize_t show(struct kobject * kobj, struct attribute * attr, char * buf)
  715. {
  716. struct _cache_attr *fattr = to_attr(attr);
  717. struct _index_kobject *this_leaf = to_object(kobj);
  718. ssize_t ret;
  719. ret = fattr->show ?
  720. fattr->show(CPUID4_INFO_IDX(this_leaf->cpu, this_leaf->index),
  721. buf) :
  722. 0;
  723. return ret;
  724. }
  725. static ssize_t store(struct kobject * kobj, struct attribute * attr,
  726. const char * buf, size_t count)
  727. {
  728. struct _cache_attr *fattr = to_attr(attr);
  729. struct _index_kobject *this_leaf = to_object(kobj);
  730. ssize_t ret;
  731. ret = fattr->store ?
  732. fattr->store(CPUID4_INFO_IDX(this_leaf->cpu, this_leaf->index),
  733. buf, count) :
  734. 0;
  735. return ret;
  736. }
  737. static struct sysfs_ops sysfs_ops = {
  738. .show = show,
  739. .store = store,
  740. };
  741. static struct kobj_type ktype_cache = {
  742. .sysfs_ops = &sysfs_ops,
  743. .default_attrs = default_attrs,
  744. };
  745. static struct kobj_type ktype_percpu_entry = {
  746. .sysfs_ops = &sysfs_ops,
  747. };
  748. static void __cpuinit cpuid4_cache_sysfs_exit(unsigned int cpu)
  749. {
  750. kfree(per_cpu(cache_kobject, cpu));
  751. kfree(per_cpu(index_kobject, cpu));
  752. per_cpu(cache_kobject, cpu) = NULL;
  753. per_cpu(index_kobject, cpu) = NULL;
  754. free_cache_attributes(cpu);
  755. }
  756. static int __cpuinit cpuid4_cache_sysfs_init(unsigned int cpu)
  757. {
  758. int err;
  759. if (num_cache_leaves == 0)
  760. return -ENOENT;
  761. err = detect_cache_attributes(cpu);
  762. if (err)
  763. return err;
  764. /* Allocate all required memory */
  765. per_cpu(cache_kobject, cpu) =
  766. kzalloc(sizeof(struct kobject), GFP_KERNEL);
  767. if (unlikely(per_cpu(cache_kobject, cpu) == NULL))
  768. goto err_out;
  769. per_cpu(index_kobject, cpu) = kzalloc(
  770. sizeof(struct _index_kobject ) * num_cache_leaves, GFP_KERNEL);
  771. if (unlikely(per_cpu(index_kobject, cpu) == NULL))
  772. goto err_out;
  773. return 0;
  774. err_out:
  775. cpuid4_cache_sysfs_exit(cpu);
  776. return -ENOMEM;
  777. }
  778. static DECLARE_BITMAP(cache_dev_map, NR_CPUS);
  779. /* Add/Remove cache interface for CPU device */
  780. static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
  781. {
  782. unsigned int cpu = sys_dev->id;
  783. unsigned long i, j;
  784. struct _index_kobject *this_object;
  785. int retval;
  786. retval = cpuid4_cache_sysfs_init(cpu);
  787. if (unlikely(retval < 0))
  788. return retval;
  789. retval = kobject_init_and_add(per_cpu(cache_kobject, cpu),
  790. &ktype_percpu_entry,
  791. &sys_dev->kobj, "%s", "cache");
  792. if (retval < 0) {
  793. cpuid4_cache_sysfs_exit(cpu);
  794. return retval;
  795. }
  796. for (i = 0; i < num_cache_leaves; i++) {
  797. this_object = INDEX_KOBJECT_PTR(cpu,i);
  798. this_object->cpu = cpu;
  799. this_object->index = i;
  800. retval = kobject_init_and_add(&(this_object->kobj),
  801. &ktype_cache,
  802. per_cpu(cache_kobject, cpu),
  803. "index%1lu", i);
  804. if (unlikely(retval)) {
  805. for (j = 0; j < i; j++) {
  806. kobject_put(&(INDEX_KOBJECT_PTR(cpu,j)->kobj));
  807. }
  808. kobject_put(per_cpu(cache_kobject, cpu));
  809. cpuid4_cache_sysfs_exit(cpu);
  810. return retval;
  811. }
  812. kobject_uevent(&(this_object->kobj), KOBJ_ADD);
  813. }
  814. cpumask_set_cpu(cpu, to_cpumask(cache_dev_map));
  815. kobject_uevent(per_cpu(cache_kobject, cpu), KOBJ_ADD);
  816. return 0;
  817. }
  818. static void __cpuinit cache_remove_dev(struct sys_device * sys_dev)
  819. {
  820. unsigned int cpu = sys_dev->id;
  821. unsigned long i;
  822. if (per_cpu(cpuid4_info, cpu) == NULL)
  823. return;
  824. if (!cpumask_test_cpu(cpu, to_cpumask(cache_dev_map)))
  825. return;
  826. cpumask_clear_cpu(cpu, to_cpumask(cache_dev_map));
  827. for (i = 0; i < num_cache_leaves; i++)
  828. kobject_put(&(INDEX_KOBJECT_PTR(cpu,i)->kobj));
  829. kobject_put(per_cpu(cache_kobject, cpu));
  830. cpuid4_cache_sysfs_exit(cpu);
  831. }
  832. static int __cpuinit cacheinfo_cpu_callback(struct notifier_block *nfb,
  833. unsigned long action, void *hcpu)
  834. {
  835. unsigned int cpu = (unsigned long)hcpu;
  836. struct sys_device *sys_dev;
  837. sys_dev = get_cpu_sysdev(cpu);
  838. switch (action) {
  839. case CPU_ONLINE:
  840. case CPU_ONLINE_FROZEN:
  841. cache_add_dev(sys_dev);
  842. break;
  843. case CPU_DEAD:
  844. case CPU_DEAD_FROZEN:
  845. cache_remove_dev(sys_dev);
  846. break;
  847. }
  848. return NOTIFY_OK;
  849. }
  850. static struct notifier_block __cpuinitdata cacheinfo_cpu_notifier =
  851. {
  852. .notifier_call = cacheinfo_cpu_callback,
  853. };
  854. static int __cpuinit cache_sysfs_init(void)
  855. {
  856. int i;
  857. if (num_cache_leaves == 0)
  858. return 0;
  859. for_each_online_cpu(i) {
  860. int err;
  861. struct sys_device *sys_dev = get_cpu_sysdev(i);
  862. err = cache_add_dev(sys_dev);
  863. if (err)
  864. return err;
  865. }
  866. register_hotcpu_notifier(&cacheinfo_cpu_notifier);
  867. return 0;
  868. }
  869. device_initcall(cache_sysfs_init);
  870. #endif