intel_cacheinfo.c 29 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081
  1. /*
  2. * Routines to indentify caches on Intel CPU.
  3. *
  4. * Changes:
  5. * Venkatesh Pallipadi : Adding cache identification through cpuid(4)
  6. * Ashok Raj <ashok.raj@intel.com>: Work with CPU hotplug infrastructure.
  7. * Andi Kleen / Andreas Herrmann : CPUID4 emulation on AMD.
  8. */
  9. #include <linux/init.h>
  10. #include <linux/slab.h>
  11. #include <linux/device.h>
  12. #include <linux/compiler.h>
  13. #include <linux/cpu.h>
  14. #include <linux/sched.h>
  15. #include <linux/pci.h>
  16. #include <asm/processor.h>
  17. #include <linux/smp.h>
  18. #include <asm/k8.h>
  19. #include <asm/smp.h>
  20. #define LVL_1_INST 1
  21. #define LVL_1_DATA 2
  22. #define LVL_2 3
  23. #define LVL_3 4
  24. #define LVL_TRACE 5
  25. struct _cache_table {
  26. unsigned char descriptor;
  27. char cache_type;
  28. short size;
  29. };
  30. #define MB(x) ((x) * 1024)
  31. /* All the cache descriptor types we care about (no TLB or
  32. trace cache entries) */
  33. static const struct _cache_table __cpuinitconst cache_table[] =
  34. {
  35. { 0x06, LVL_1_INST, 8 }, /* 4-way set assoc, 32 byte line size */
  36. { 0x08, LVL_1_INST, 16 }, /* 4-way set assoc, 32 byte line size */
  37. { 0x09, LVL_1_INST, 32 }, /* 4-way set assoc, 64 byte line size */
  38. { 0x0a, LVL_1_DATA, 8 }, /* 2 way set assoc, 32 byte line size */
  39. { 0x0c, LVL_1_DATA, 16 }, /* 4-way set assoc, 32 byte line size */
  40. { 0x0d, LVL_1_DATA, 16 }, /* 4-way set assoc, 64 byte line size */
  41. { 0x21, LVL_2, 256 }, /* 8-way set assoc, 64 byte line size */
  42. { 0x22, LVL_3, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */
  43. { 0x23, LVL_3, MB(1) }, /* 8-way set assoc, sectored cache, 64 byte line size */
  44. { 0x25, LVL_3, MB(2) }, /* 8-way set assoc, sectored cache, 64 byte line size */
  45. { 0x29, LVL_3, MB(4) }, /* 8-way set assoc, sectored cache, 64 byte line size */
  46. { 0x2c, LVL_1_DATA, 32 }, /* 8-way set assoc, 64 byte line size */
  47. { 0x30, LVL_1_INST, 32 }, /* 8-way set assoc, 64 byte line size */
  48. { 0x39, LVL_2, 128 }, /* 4-way set assoc, sectored cache, 64 byte line size */
  49. { 0x3a, LVL_2, 192 }, /* 6-way set assoc, sectored cache, 64 byte line size */
  50. { 0x3b, LVL_2, 128 }, /* 2-way set assoc, sectored cache, 64 byte line size */
  51. { 0x3c, LVL_2, 256 }, /* 4-way set assoc, sectored cache, 64 byte line size */
  52. { 0x3d, LVL_2, 384 }, /* 6-way set assoc, sectored cache, 64 byte line size */
  53. { 0x3e, LVL_2, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */
  54. { 0x3f, LVL_2, 256 }, /* 2-way set assoc, 64 byte line size */
  55. { 0x41, LVL_2, 128 }, /* 4-way set assoc, 32 byte line size */
  56. { 0x42, LVL_2, 256 }, /* 4-way set assoc, 32 byte line size */
  57. { 0x43, LVL_2, 512 }, /* 4-way set assoc, 32 byte line size */
  58. { 0x44, LVL_2, MB(1) }, /* 4-way set assoc, 32 byte line size */
  59. { 0x45, LVL_2, MB(2) }, /* 4-way set assoc, 32 byte line size */
  60. { 0x46, LVL_3, MB(4) }, /* 4-way set assoc, 64 byte line size */
  61. { 0x47, LVL_3, MB(8) }, /* 8-way set assoc, 64 byte line size */
  62. { 0x49, LVL_3, MB(4) }, /* 16-way set assoc, 64 byte line size */
  63. { 0x4a, LVL_3, MB(6) }, /* 12-way set assoc, 64 byte line size */
  64. { 0x4b, LVL_3, MB(8) }, /* 16-way set assoc, 64 byte line size */
  65. { 0x4c, LVL_3, MB(12) }, /* 12-way set assoc, 64 byte line size */
  66. { 0x4d, LVL_3, MB(16) }, /* 16-way set assoc, 64 byte line size */
  67. { 0x4e, LVL_2, MB(6) }, /* 24-way set assoc, 64 byte line size */
  68. { 0x60, LVL_1_DATA, 16 }, /* 8-way set assoc, sectored cache, 64 byte line size */
  69. { 0x66, LVL_1_DATA, 8 }, /* 4-way set assoc, sectored cache, 64 byte line size */
  70. { 0x67, LVL_1_DATA, 16 }, /* 4-way set assoc, sectored cache, 64 byte line size */
  71. { 0x68, LVL_1_DATA, 32 }, /* 4-way set assoc, sectored cache, 64 byte line size */
  72. { 0x70, LVL_TRACE, 12 }, /* 8-way set assoc */
  73. { 0x71, LVL_TRACE, 16 }, /* 8-way set assoc */
  74. { 0x72, LVL_TRACE, 32 }, /* 8-way set assoc */
  75. { 0x73, LVL_TRACE, 64 }, /* 8-way set assoc */
  76. { 0x78, LVL_2, MB(1) }, /* 4-way set assoc, 64 byte line size */
  77. { 0x79, LVL_2, 128 }, /* 8-way set assoc, sectored cache, 64 byte line size */
  78. { 0x7a, LVL_2, 256 }, /* 8-way set assoc, sectored cache, 64 byte line size */
  79. { 0x7b, LVL_2, 512 }, /* 8-way set assoc, sectored cache, 64 byte line size */
  80. { 0x7c, LVL_2, MB(1) }, /* 8-way set assoc, sectored cache, 64 byte line size */
  81. { 0x7d, LVL_2, MB(2) }, /* 8-way set assoc, 64 byte line size */
  82. { 0x7f, LVL_2, 512 }, /* 2-way set assoc, 64 byte line size */
  83. { 0x82, LVL_2, 256 }, /* 8-way set assoc, 32 byte line size */
  84. { 0x83, LVL_2, 512 }, /* 8-way set assoc, 32 byte line size */
  85. { 0x84, LVL_2, MB(1) }, /* 8-way set assoc, 32 byte line size */
  86. { 0x85, LVL_2, MB(2) }, /* 8-way set assoc, 32 byte line size */
  87. { 0x86, LVL_2, 512 }, /* 4-way set assoc, 64 byte line size */
  88. { 0x87, LVL_2, MB(1) }, /* 8-way set assoc, 64 byte line size */
  89. { 0xd0, LVL_3, 512 }, /* 4-way set assoc, 64 byte line size */
  90. { 0xd1, LVL_3, MB(1) }, /* 4-way set assoc, 64 byte line size */
  91. { 0xd2, LVL_3, MB(2) }, /* 4-way set assoc, 64 byte line size */
  92. { 0xd6, LVL_3, MB(1) }, /* 8-way set assoc, 64 byte line size */
  93. { 0xd7, LVL_3, MB(2) }, /* 8-way set assoc, 64 byte line size */
  94. { 0xd8, LVL_3, MB(4) }, /* 12-way set assoc, 64 byte line size */
  95. { 0xdc, LVL_3, MB(2) }, /* 12-way set assoc, 64 byte line size */
  96. { 0xdd, LVL_3, MB(4) }, /* 12-way set assoc, 64 byte line size */
  97. { 0xde, LVL_3, MB(8) }, /* 12-way set assoc, 64 byte line size */
  98. { 0xe2, LVL_3, MB(2) }, /* 16-way set assoc, 64 byte line size */
  99. { 0xe3, LVL_3, MB(4) }, /* 16-way set assoc, 64 byte line size */
  100. { 0xe4, LVL_3, MB(8) }, /* 16-way set assoc, 64 byte line size */
  101. { 0xea, LVL_3, MB(12) }, /* 24-way set assoc, 64 byte line size */
  102. { 0xeb, LVL_3, MB(18) }, /* 24-way set assoc, 64 byte line size */
  103. { 0xec, LVL_3, MB(24) }, /* 24-way set assoc, 64 byte line size */
  104. { 0x00, 0, 0}
  105. };
  106. enum _cache_type {
  107. CACHE_TYPE_NULL = 0,
  108. CACHE_TYPE_DATA = 1,
  109. CACHE_TYPE_INST = 2,
  110. CACHE_TYPE_UNIFIED = 3
  111. };
  112. union _cpuid4_leaf_eax {
  113. struct {
  114. enum _cache_type type:5;
  115. unsigned int level:3;
  116. unsigned int is_self_initializing:1;
  117. unsigned int is_fully_associative:1;
  118. unsigned int reserved:4;
  119. unsigned int num_threads_sharing:12;
  120. unsigned int num_cores_on_die:6;
  121. } split;
  122. u32 full;
  123. };
  124. union _cpuid4_leaf_ebx {
  125. struct {
  126. unsigned int coherency_line_size:12;
  127. unsigned int physical_line_partition:10;
  128. unsigned int ways_of_associativity:10;
  129. } split;
  130. u32 full;
  131. };
  132. union _cpuid4_leaf_ecx {
  133. struct {
  134. unsigned int number_of_sets:32;
  135. } split;
  136. u32 full;
  137. };
  138. struct _cpuid4_info {
  139. union _cpuid4_leaf_eax eax;
  140. union _cpuid4_leaf_ebx ebx;
  141. union _cpuid4_leaf_ecx ecx;
  142. unsigned long size;
  143. bool can_disable;
  144. unsigned int l3_indices;
  145. DECLARE_BITMAP(shared_cpu_map, NR_CPUS);
  146. };
  147. /* subset of above _cpuid4_info w/o shared_cpu_map */
  148. struct _cpuid4_info_regs {
  149. union _cpuid4_leaf_eax eax;
  150. union _cpuid4_leaf_ebx ebx;
  151. union _cpuid4_leaf_ecx ecx;
  152. unsigned long size;
  153. bool can_disable;
  154. unsigned int l3_indices;
  155. };
  156. unsigned short num_cache_leaves;
  157. /* AMD doesn't have CPUID4. Emulate it here to report the same
  158. information to the user. This makes some assumptions about the machine:
  159. L2 not shared, no SMT etc. that is currently true on AMD CPUs.
  160. In theory the TLBs could be reported as fake type (they are in "dummy").
  161. Maybe later */
  162. union l1_cache {
  163. struct {
  164. unsigned line_size:8;
  165. unsigned lines_per_tag:8;
  166. unsigned assoc:8;
  167. unsigned size_in_kb:8;
  168. };
  169. unsigned val;
  170. };
  171. union l2_cache {
  172. struct {
  173. unsigned line_size:8;
  174. unsigned lines_per_tag:4;
  175. unsigned assoc:4;
  176. unsigned size_in_kb:16;
  177. };
  178. unsigned val;
  179. };
  180. union l3_cache {
  181. struct {
  182. unsigned line_size:8;
  183. unsigned lines_per_tag:4;
  184. unsigned assoc:4;
  185. unsigned res:2;
  186. unsigned size_encoded:14;
  187. };
  188. unsigned val;
  189. };
  190. static const unsigned short __cpuinitconst assocs[] = {
  191. [1] = 1,
  192. [2] = 2,
  193. [4] = 4,
  194. [6] = 8,
  195. [8] = 16,
  196. [0xa] = 32,
  197. [0xb] = 48,
  198. [0xc] = 64,
  199. [0xd] = 96,
  200. [0xe] = 128,
  201. [0xf] = 0xffff /* fully associative - no way to show this currently */
  202. };
  203. static const unsigned char __cpuinitconst levels[] = { 1, 1, 2, 3 };
  204. static const unsigned char __cpuinitconst types[] = { 1, 2, 3, 3 };
  205. static void __cpuinit
  206. amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax,
  207. union _cpuid4_leaf_ebx *ebx,
  208. union _cpuid4_leaf_ecx *ecx)
  209. {
  210. unsigned dummy;
  211. unsigned line_size, lines_per_tag, assoc, size_in_kb;
  212. union l1_cache l1i, l1d;
  213. union l2_cache l2;
  214. union l3_cache l3;
  215. union l1_cache *l1 = &l1d;
  216. eax->full = 0;
  217. ebx->full = 0;
  218. ecx->full = 0;
  219. cpuid(0x80000005, &dummy, &dummy, &l1d.val, &l1i.val);
  220. cpuid(0x80000006, &dummy, &dummy, &l2.val, &l3.val);
  221. switch (leaf) {
  222. case 1:
  223. l1 = &l1i;
  224. case 0:
  225. if (!l1->val)
  226. return;
  227. assoc = assocs[l1->assoc];
  228. line_size = l1->line_size;
  229. lines_per_tag = l1->lines_per_tag;
  230. size_in_kb = l1->size_in_kb;
  231. break;
  232. case 2:
  233. if (!l2.val)
  234. return;
  235. assoc = assocs[l2.assoc];
  236. line_size = l2.line_size;
  237. lines_per_tag = l2.lines_per_tag;
  238. /* cpu_data has errata corrections for K7 applied */
  239. size_in_kb = current_cpu_data.x86_cache_size;
  240. break;
  241. case 3:
  242. if (!l3.val)
  243. return;
  244. assoc = assocs[l3.assoc];
  245. line_size = l3.line_size;
  246. lines_per_tag = l3.lines_per_tag;
  247. size_in_kb = l3.size_encoded * 512;
  248. if (boot_cpu_has(X86_FEATURE_AMD_DCM)) {
  249. size_in_kb = size_in_kb >> 1;
  250. assoc = assoc >> 1;
  251. }
  252. break;
  253. default:
  254. return;
  255. }
  256. eax->split.is_self_initializing = 1;
  257. eax->split.type = types[leaf];
  258. eax->split.level = levels[leaf];
  259. eax->split.num_threads_sharing = 0;
  260. eax->split.num_cores_on_die = current_cpu_data.x86_max_cores - 1;
  261. if (assoc == 0xffff)
  262. eax->split.is_fully_associative = 1;
  263. ebx->split.coherency_line_size = line_size - 1;
  264. ebx->split.ways_of_associativity = assoc - 1;
  265. ebx->split.physical_line_partition = lines_per_tag - 1;
  266. ecx->split.number_of_sets = (size_in_kb * 1024) / line_size /
  267. (ebx->split.ways_of_associativity + 1) - 1;
  268. }
  269. struct _cache_attr {
  270. struct attribute attr;
  271. ssize_t (*show)(struct _cpuid4_info *, char *);
  272. ssize_t (*store)(struct _cpuid4_info *, const char *, size_t count);
  273. };
  274. #ifdef CONFIG_CPU_SUP_AMD
  275. static unsigned int __cpuinit amd_calc_l3_indices(void)
  276. {
  277. /*
  278. * We're called over smp_call_function_single() and therefore
  279. * are on the correct cpu.
  280. */
  281. int cpu = smp_processor_id();
  282. int node = cpu_to_node(cpu);
  283. struct pci_dev *dev = node_to_k8_nb_misc(node);
  284. unsigned int sc0, sc1, sc2, sc3;
  285. u32 val = 0;
  286. pci_read_config_dword(dev, 0x1C4, &val);
  287. /* calculate subcache sizes */
  288. sc0 = !(val & BIT(0));
  289. sc1 = !(val & BIT(4));
  290. sc2 = !(val & BIT(8)) + !(val & BIT(9));
  291. sc3 = !(val & BIT(12)) + !(val & BIT(13));
  292. return (max(max(max(sc0, sc1), sc2), sc3) << 10) - 1;
  293. }
  294. static void __cpuinit
  295. amd_check_l3_disable(int index, struct _cpuid4_info_regs *this_leaf)
  296. {
  297. if (boot_cpu_data.x86 != 0x10)
  298. return;
  299. if (index < 3)
  300. return;
  301. /* see errata #382 and #388 */
  302. if (boot_cpu_data.x86_model < 0x8)
  303. return;
  304. if ((boot_cpu_data.x86_model == 0x8 ||
  305. boot_cpu_data.x86_model == 0x9)
  306. &&
  307. boot_cpu_data.x86_mask < 0x1)
  308. return;
  309. /* not in virtualized environments */
  310. if (num_k8_northbridges == 0)
  311. return;
  312. this_leaf->can_disable = true;
  313. this_leaf->l3_indices = amd_calc_l3_indices();
  314. }
  315. static ssize_t show_cache_disable(struct _cpuid4_info *this_leaf, char *buf,
  316. unsigned int index)
  317. {
  318. int cpu = cpumask_first(to_cpumask(this_leaf->shared_cpu_map));
  319. int node = amd_get_nb_id(cpu);
  320. struct pci_dev *dev = node_to_k8_nb_misc(node);
  321. unsigned int reg = 0;
  322. if (!this_leaf->can_disable)
  323. return -EINVAL;
  324. if (!dev)
  325. return -EINVAL;
  326. pci_read_config_dword(dev, 0x1BC + index * 4, &reg);
  327. return sprintf(buf, "0x%08x\n", reg);
  328. }
  329. #define SHOW_CACHE_DISABLE(index) \
  330. static ssize_t \
  331. show_cache_disable_##index(struct _cpuid4_info *this_leaf, char *buf) \
  332. { \
  333. return show_cache_disable(this_leaf, buf, index); \
  334. }
  335. SHOW_CACHE_DISABLE(0)
  336. SHOW_CACHE_DISABLE(1)
  337. static ssize_t store_cache_disable(struct _cpuid4_info *this_leaf,
  338. const char *buf, size_t count, unsigned int index)
  339. {
  340. int cpu = cpumask_first(to_cpumask(this_leaf->shared_cpu_map));
  341. int node = amd_get_nb_id(cpu);
  342. struct pci_dev *dev = node_to_k8_nb_misc(node);
  343. unsigned long val = 0;
  344. #define SUBCACHE_MASK (3UL << 20)
  345. #define SUBCACHE_INDEX 0xfff
  346. if (!this_leaf->can_disable)
  347. return -EINVAL;
  348. if (!capable(CAP_SYS_ADMIN))
  349. return -EPERM;
  350. if (!dev)
  351. return -EINVAL;
  352. if (strict_strtoul(buf, 10, &val) < 0)
  353. return -EINVAL;
  354. /* do not allow writes outside of allowed bits */
  355. if ((val & ~(SUBCACHE_MASK | SUBCACHE_INDEX)) ||
  356. ((val & SUBCACHE_INDEX) > this_leaf->l3_indices))
  357. return -EINVAL;
  358. val |= BIT(30);
  359. pci_write_config_dword(dev, 0x1BC + index * 4, val);
  360. /*
  361. * We need to WBINVD on a core on the node containing the L3 cache which
  362. * indices we disable therefore a simple wbinvd() is not sufficient.
  363. */
  364. wbinvd_on_cpu(cpu);
  365. pci_write_config_dword(dev, 0x1BC + index * 4, val | BIT(31));
  366. return count;
  367. }
  368. #define STORE_CACHE_DISABLE(index) \
  369. static ssize_t \
  370. store_cache_disable_##index(struct _cpuid4_info *this_leaf, \
  371. const char *buf, size_t count) \
  372. { \
  373. return store_cache_disable(this_leaf, buf, count, index); \
  374. }
  375. STORE_CACHE_DISABLE(0)
  376. STORE_CACHE_DISABLE(1)
  377. static struct _cache_attr cache_disable_0 = __ATTR(cache_disable_0, 0644,
  378. show_cache_disable_0, store_cache_disable_0);
  379. static struct _cache_attr cache_disable_1 = __ATTR(cache_disable_1, 0644,
  380. show_cache_disable_1, store_cache_disable_1);
  381. #else /* CONFIG_CPU_SUP_AMD */
  382. static void __cpuinit
  383. amd_check_l3_disable(int index, struct _cpuid4_info_regs *this_leaf)
  384. {
  385. };
  386. #endif /* CONFIG_CPU_SUP_AMD */
  387. static int
  388. __cpuinit cpuid4_cache_lookup_regs(int index,
  389. struct _cpuid4_info_regs *this_leaf)
  390. {
  391. union _cpuid4_leaf_eax eax;
  392. union _cpuid4_leaf_ebx ebx;
  393. union _cpuid4_leaf_ecx ecx;
  394. unsigned edx;
  395. if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
  396. amd_cpuid4(index, &eax, &ebx, &ecx);
  397. amd_check_l3_disable(index, this_leaf);
  398. } else {
  399. cpuid_count(4, index, &eax.full, &ebx.full, &ecx.full, &edx);
  400. }
  401. if (eax.split.type == CACHE_TYPE_NULL)
  402. return -EIO; /* better error ? */
  403. this_leaf->eax = eax;
  404. this_leaf->ebx = ebx;
  405. this_leaf->ecx = ecx;
  406. this_leaf->size = (ecx.split.number_of_sets + 1) *
  407. (ebx.split.coherency_line_size + 1) *
  408. (ebx.split.physical_line_partition + 1) *
  409. (ebx.split.ways_of_associativity + 1);
  410. return 0;
  411. }
  412. static int __cpuinit find_num_cache_leaves(void)
  413. {
  414. unsigned int eax, ebx, ecx, edx;
  415. union _cpuid4_leaf_eax cache_eax;
  416. int i = -1;
  417. do {
  418. ++i;
  419. /* Do cpuid(4) loop to find out num_cache_leaves */
  420. cpuid_count(4, i, &eax, &ebx, &ecx, &edx);
  421. cache_eax.full = eax;
  422. } while (cache_eax.split.type != CACHE_TYPE_NULL);
  423. return i;
  424. }
  425. unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
  426. {
  427. /* Cache sizes */
  428. unsigned int trace = 0, l1i = 0, l1d = 0, l2 = 0, l3 = 0;
  429. unsigned int new_l1d = 0, new_l1i = 0; /* Cache sizes from cpuid(4) */
  430. unsigned int new_l2 = 0, new_l3 = 0, i; /* Cache sizes from cpuid(4) */
  431. unsigned int l2_id = 0, l3_id = 0, num_threads_sharing, index_msb;
  432. #ifdef CONFIG_X86_HT
  433. unsigned int cpu = c->cpu_index;
  434. #endif
  435. if (c->cpuid_level > 3) {
  436. static int is_initialized;
  437. if (is_initialized == 0) {
  438. /* Init num_cache_leaves from boot CPU */
  439. num_cache_leaves = find_num_cache_leaves();
  440. is_initialized++;
  441. }
  442. /*
  443. * Whenever possible use cpuid(4), deterministic cache
  444. * parameters cpuid leaf to find the cache details
  445. */
  446. for (i = 0; i < num_cache_leaves; i++) {
  447. struct _cpuid4_info_regs this_leaf;
  448. int retval;
  449. retval = cpuid4_cache_lookup_regs(i, &this_leaf);
  450. if (retval >= 0) {
  451. switch (this_leaf.eax.split.level) {
  452. case 1:
  453. if (this_leaf.eax.split.type ==
  454. CACHE_TYPE_DATA)
  455. new_l1d = this_leaf.size/1024;
  456. else if (this_leaf.eax.split.type ==
  457. CACHE_TYPE_INST)
  458. new_l1i = this_leaf.size/1024;
  459. break;
  460. case 2:
  461. new_l2 = this_leaf.size/1024;
  462. num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing;
  463. index_msb = get_count_order(num_threads_sharing);
  464. l2_id = c->apicid >> index_msb;
  465. break;
  466. case 3:
  467. new_l3 = this_leaf.size/1024;
  468. num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing;
  469. index_msb = get_count_order(
  470. num_threads_sharing);
  471. l3_id = c->apicid >> index_msb;
  472. break;
  473. default:
  474. break;
  475. }
  476. }
  477. }
  478. }
  479. /*
  480. * Don't use cpuid2 if cpuid4 is supported. For P4, we use cpuid2 for
  481. * trace cache
  482. */
  483. if ((num_cache_leaves == 0 || c->x86 == 15) && c->cpuid_level > 1) {
  484. /* supports eax=2 call */
  485. int j, n;
  486. unsigned int regs[4];
  487. unsigned char *dp = (unsigned char *)regs;
  488. int only_trace = 0;
  489. if (num_cache_leaves != 0 && c->x86 == 15)
  490. only_trace = 1;
  491. /* Number of times to iterate */
  492. n = cpuid_eax(2) & 0xFF;
  493. for (i = 0 ; i < n ; i++) {
  494. cpuid(2, &regs[0], &regs[1], &regs[2], &regs[3]);
  495. /* If bit 31 is set, this is an unknown format */
  496. for (j = 0 ; j < 3 ; j++)
  497. if (regs[j] & (1 << 31))
  498. regs[j] = 0;
  499. /* Byte 0 is level count, not a descriptor */
  500. for (j = 1 ; j < 16 ; j++) {
  501. unsigned char des = dp[j];
  502. unsigned char k = 0;
  503. /* look up this descriptor in the table */
  504. while (cache_table[k].descriptor != 0) {
  505. if (cache_table[k].descriptor == des) {
  506. if (only_trace && cache_table[k].cache_type != LVL_TRACE)
  507. break;
  508. switch (cache_table[k].cache_type) {
  509. case LVL_1_INST:
  510. l1i += cache_table[k].size;
  511. break;
  512. case LVL_1_DATA:
  513. l1d += cache_table[k].size;
  514. break;
  515. case LVL_2:
  516. l2 += cache_table[k].size;
  517. break;
  518. case LVL_3:
  519. l3 += cache_table[k].size;
  520. break;
  521. case LVL_TRACE:
  522. trace += cache_table[k].size;
  523. break;
  524. }
  525. break;
  526. }
  527. k++;
  528. }
  529. }
  530. }
  531. }
  532. if (new_l1d)
  533. l1d = new_l1d;
  534. if (new_l1i)
  535. l1i = new_l1i;
  536. if (new_l2) {
  537. l2 = new_l2;
  538. #ifdef CONFIG_X86_HT
  539. per_cpu(cpu_llc_id, cpu) = l2_id;
  540. #endif
  541. }
  542. if (new_l3) {
  543. l3 = new_l3;
  544. #ifdef CONFIG_X86_HT
  545. per_cpu(cpu_llc_id, cpu) = l3_id;
  546. #endif
  547. }
  548. c->x86_cache_size = l3 ? l3 : (l2 ? l2 : (l1i+l1d));
  549. return l2;
  550. }
  551. #ifdef CONFIG_SYSFS
  552. /* pointer to _cpuid4_info array (for each cache leaf) */
  553. static DEFINE_PER_CPU(struct _cpuid4_info *, ici_cpuid4_info);
  554. #define CPUID4_INFO_IDX(x, y) (&((per_cpu(ici_cpuid4_info, x))[y]))
  555. #ifdef CONFIG_SMP
  556. static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
  557. {
  558. struct _cpuid4_info *this_leaf, *sibling_leaf;
  559. unsigned long num_threads_sharing;
  560. int index_msb, i, sibling;
  561. struct cpuinfo_x86 *c = &cpu_data(cpu);
  562. if ((index == 3) && (c->x86_vendor == X86_VENDOR_AMD)) {
  563. for_each_cpu(i, c->llc_shared_map) {
  564. if (!per_cpu(ici_cpuid4_info, i))
  565. continue;
  566. this_leaf = CPUID4_INFO_IDX(i, index);
  567. for_each_cpu(sibling, c->llc_shared_map) {
  568. if (!cpu_online(sibling))
  569. continue;
  570. set_bit(sibling, this_leaf->shared_cpu_map);
  571. }
  572. }
  573. return;
  574. }
  575. this_leaf = CPUID4_INFO_IDX(cpu, index);
  576. num_threads_sharing = 1 + this_leaf->eax.split.num_threads_sharing;
  577. if (num_threads_sharing == 1)
  578. cpumask_set_cpu(cpu, to_cpumask(this_leaf->shared_cpu_map));
  579. else {
  580. index_msb = get_count_order(num_threads_sharing);
  581. for_each_online_cpu(i) {
  582. if (cpu_data(i).apicid >> index_msb ==
  583. c->apicid >> index_msb) {
  584. cpumask_set_cpu(i,
  585. to_cpumask(this_leaf->shared_cpu_map));
  586. if (i != cpu && per_cpu(ici_cpuid4_info, i)) {
  587. sibling_leaf =
  588. CPUID4_INFO_IDX(i, index);
  589. cpumask_set_cpu(cpu, to_cpumask(
  590. sibling_leaf->shared_cpu_map));
  591. }
  592. }
  593. }
  594. }
  595. }
  596. static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index)
  597. {
  598. struct _cpuid4_info *this_leaf, *sibling_leaf;
  599. int sibling;
  600. this_leaf = CPUID4_INFO_IDX(cpu, index);
  601. for_each_cpu(sibling, to_cpumask(this_leaf->shared_cpu_map)) {
  602. sibling_leaf = CPUID4_INFO_IDX(sibling, index);
  603. cpumask_clear_cpu(cpu,
  604. to_cpumask(sibling_leaf->shared_cpu_map));
  605. }
  606. }
  607. #else
  608. static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
  609. {
  610. }
  611. static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index)
  612. {
  613. }
  614. #endif
  615. static void __cpuinit free_cache_attributes(unsigned int cpu)
  616. {
  617. int i;
  618. for (i = 0; i < num_cache_leaves; i++)
  619. cache_remove_shared_cpu_map(cpu, i);
  620. kfree(per_cpu(ici_cpuid4_info, cpu));
  621. per_cpu(ici_cpuid4_info, cpu) = NULL;
  622. }
  623. static int
  624. __cpuinit cpuid4_cache_lookup(int index, struct _cpuid4_info *this_leaf)
  625. {
  626. struct _cpuid4_info_regs *leaf_regs =
  627. (struct _cpuid4_info_regs *)this_leaf;
  628. return cpuid4_cache_lookup_regs(index, leaf_regs);
  629. }
  630. static void __cpuinit get_cpu_leaves(void *_retval)
  631. {
  632. int j, *retval = _retval, cpu = smp_processor_id();
  633. /* Do cpuid and store the results */
  634. for (j = 0; j < num_cache_leaves; j++) {
  635. struct _cpuid4_info *this_leaf;
  636. this_leaf = CPUID4_INFO_IDX(cpu, j);
  637. *retval = cpuid4_cache_lookup(j, this_leaf);
  638. if (unlikely(*retval < 0)) {
  639. int i;
  640. for (i = 0; i < j; i++)
  641. cache_remove_shared_cpu_map(cpu, i);
  642. break;
  643. }
  644. cache_shared_cpu_map_setup(cpu, j);
  645. }
  646. }
  647. static int __cpuinit detect_cache_attributes(unsigned int cpu)
  648. {
  649. int retval;
  650. if (num_cache_leaves == 0)
  651. return -ENOENT;
  652. per_cpu(ici_cpuid4_info, cpu) = kzalloc(
  653. sizeof(struct _cpuid4_info) * num_cache_leaves, GFP_KERNEL);
  654. if (per_cpu(ici_cpuid4_info, cpu) == NULL)
  655. return -ENOMEM;
  656. smp_call_function_single(cpu, get_cpu_leaves, &retval, true);
  657. if (retval) {
  658. kfree(per_cpu(ici_cpuid4_info, cpu));
  659. per_cpu(ici_cpuid4_info, cpu) = NULL;
  660. }
  661. return retval;
  662. }
  663. #include <linux/kobject.h>
  664. #include <linux/sysfs.h>
  665. extern struct sysdev_class cpu_sysdev_class; /* from drivers/base/cpu.c */
  666. /* pointer to kobject for cpuX/cache */
  667. static DEFINE_PER_CPU(struct kobject *, ici_cache_kobject);
  668. struct _index_kobject {
  669. struct kobject kobj;
  670. unsigned int cpu;
  671. unsigned short index;
  672. };
  673. /* pointer to array of kobjects for cpuX/cache/indexY */
  674. static DEFINE_PER_CPU(struct _index_kobject *, ici_index_kobject);
  675. #define INDEX_KOBJECT_PTR(x, y) (&((per_cpu(ici_index_kobject, x))[y]))
  676. #define show_one_plus(file_name, object, val) \
  677. static ssize_t show_##file_name \
  678. (struct _cpuid4_info *this_leaf, char *buf) \
  679. { \
  680. return sprintf(buf, "%lu\n", (unsigned long)this_leaf->object + val); \
  681. }
  682. show_one_plus(level, eax.split.level, 0);
  683. show_one_plus(coherency_line_size, ebx.split.coherency_line_size, 1);
  684. show_one_plus(physical_line_partition, ebx.split.physical_line_partition, 1);
  685. show_one_plus(ways_of_associativity, ebx.split.ways_of_associativity, 1);
  686. show_one_plus(number_of_sets, ecx.split.number_of_sets, 1);
  687. static ssize_t show_size(struct _cpuid4_info *this_leaf, char *buf)
  688. {
  689. return sprintf(buf, "%luK\n", this_leaf->size / 1024);
  690. }
  691. static ssize_t show_shared_cpu_map_func(struct _cpuid4_info *this_leaf,
  692. int type, char *buf)
  693. {
  694. ptrdiff_t len = PTR_ALIGN(buf + PAGE_SIZE - 1, PAGE_SIZE) - buf;
  695. int n = 0;
  696. if (len > 1) {
  697. const struct cpumask *mask;
  698. mask = to_cpumask(this_leaf->shared_cpu_map);
  699. n = type ?
  700. cpulist_scnprintf(buf, len-2, mask) :
  701. cpumask_scnprintf(buf, len-2, mask);
  702. buf[n++] = '\n';
  703. buf[n] = '\0';
  704. }
  705. return n;
  706. }
  707. static inline ssize_t show_shared_cpu_map(struct _cpuid4_info *leaf, char *buf)
  708. {
  709. return show_shared_cpu_map_func(leaf, 0, buf);
  710. }
  711. static inline ssize_t show_shared_cpu_list(struct _cpuid4_info *leaf, char *buf)
  712. {
  713. return show_shared_cpu_map_func(leaf, 1, buf);
  714. }
  715. static ssize_t show_type(struct _cpuid4_info *this_leaf, char *buf)
  716. {
  717. switch (this_leaf->eax.split.type) {
  718. case CACHE_TYPE_DATA:
  719. return sprintf(buf, "Data\n");
  720. case CACHE_TYPE_INST:
  721. return sprintf(buf, "Instruction\n");
  722. case CACHE_TYPE_UNIFIED:
  723. return sprintf(buf, "Unified\n");
  724. default:
  725. return sprintf(buf, "Unknown\n");
  726. }
  727. }
  728. #define to_object(k) container_of(k, struct _index_kobject, kobj)
  729. #define to_attr(a) container_of(a, struct _cache_attr, attr)
  730. #define define_one_ro(_name) \
  731. static struct _cache_attr _name = \
  732. __ATTR(_name, 0444, show_##_name, NULL)
  733. define_one_ro(level);
  734. define_one_ro(type);
  735. define_one_ro(coherency_line_size);
  736. define_one_ro(physical_line_partition);
  737. define_one_ro(ways_of_associativity);
  738. define_one_ro(number_of_sets);
  739. define_one_ro(size);
  740. define_one_ro(shared_cpu_map);
  741. define_one_ro(shared_cpu_list);
  742. #define DEFAULT_SYSFS_CACHE_ATTRS \
  743. &type.attr, \
  744. &level.attr, \
  745. &coherency_line_size.attr, \
  746. &physical_line_partition.attr, \
  747. &ways_of_associativity.attr, \
  748. &number_of_sets.attr, \
  749. &size.attr, \
  750. &shared_cpu_map.attr, \
  751. &shared_cpu_list.attr
  752. static struct attribute *default_attrs[] = {
  753. DEFAULT_SYSFS_CACHE_ATTRS,
  754. NULL
  755. };
  756. static struct attribute *default_l3_attrs[] = {
  757. DEFAULT_SYSFS_CACHE_ATTRS,
  758. #ifdef CONFIG_CPU_SUP_AMD
  759. &cache_disable_0.attr,
  760. &cache_disable_1.attr,
  761. #endif
  762. NULL
  763. };
  764. static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
  765. {
  766. struct _cache_attr *fattr = to_attr(attr);
  767. struct _index_kobject *this_leaf = to_object(kobj);
  768. ssize_t ret;
  769. ret = fattr->show ?
  770. fattr->show(CPUID4_INFO_IDX(this_leaf->cpu, this_leaf->index),
  771. buf) :
  772. 0;
  773. return ret;
  774. }
  775. static ssize_t store(struct kobject *kobj, struct attribute *attr,
  776. const char *buf, size_t count)
  777. {
  778. struct _cache_attr *fattr = to_attr(attr);
  779. struct _index_kobject *this_leaf = to_object(kobj);
  780. ssize_t ret;
  781. ret = fattr->store ?
  782. fattr->store(CPUID4_INFO_IDX(this_leaf->cpu, this_leaf->index),
  783. buf, count) :
  784. 0;
  785. return ret;
  786. }
  787. static const struct sysfs_ops sysfs_ops = {
  788. .show = show,
  789. .store = store,
  790. };
  791. static struct kobj_type ktype_cache = {
  792. .sysfs_ops = &sysfs_ops,
  793. .default_attrs = default_attrs,
  794. };
  795. static struct kobj_type ktype_percpu_entry = {
  796. .sysfs_ops = &sysfs_ops,
  797. };
  798. static void __cpuinit cpuid4_cache_sysfs_exit(unsigned int cpu)
  799. {
  800. kfree(per_cpu(ici_cache_kobject, cpu));
  801. kfree(per_cpu(ici_index_kobject, cpu));
  802. per_cpu(ici_cache_kobject, cpu) = NULL;
  803. per_cpu(ici_index_kobject, cpu) = NULL;
  804. free_cache_attributes(cpu);
  805. }
  806. static int __cpuinit cpuid4_cache_sysfs_init(unsigned int cpu)
  807. {
  808. int err;
  809. if (num_cache_leaves == 0)
  810. return -ENOENT;
  811. err = detect_cache_attributes(cpu);
  812. if (err)
  813. return err;
  814. /* Allocate all required memory */
  815. per_cpu(ici_cache_kobject, cpu) =
  816. kzalloc(sizeof(struct kobject), GFP_KERNEL);
  817. if (unlikely(per_cpu(ici_cache_kobject, cpu) == NULL))
  818. goto err_out;
  819. per_cpu(ici_index_kobject, cpu) = kzalloc(
  820. sizeof(struct _index_kobject) * num_cache_leaves, GFP_KERNEL);
  821. if (unlikely(per_cpu(ici_index_kobject, cpu) == NULL))
  822. goto err_out;
  823. return 0;
  824. err_out:
  825. cpuid4_cache_sysfs_exit(cpu);
  826. return -ENOMEM;
  827. }
  828. static DECLARE_BITMAP(cache_dev_map, NR_CPUS);
  829. /* Add/Remove cache interface for CPU device */
  830. static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
  831. {
  832. unsigned int cpu = sys_dev->id;
  833. unsigned long i, j;
  834. struct _index_kobject *this_object;
  835. struct _cpuid4_info *this_leaf;
  836. int retval;
  837. retval = cpuid4_cache_sysfs_init(cpu);
  838. if (unlikely(retval < 0))
  839. return retval;
  840. retval = kobject_init_and_add(per_cpu(ici_cache_kobject, cpu),
  841. &ktype_percpu_entry,
  842. &sys_dev->kobj, "%s", "cache");
  843. if (retval < 0) {
  844. cpuid4_cache_sysfs_exit(cpu);
  845. return retval;
  846. }
  847. for (i = 0; i < num_cache_leaves; i++) {
  848. this_object = INDEX_KOBJECT_PTR(cpu, i);
  849. this_object->cpu = cpu;
  850. this_object->index = i;
  851. this_leaf = CPUID4_INFO_IDX(cpu, i);
  852. if (this_leaf->can_disable)
  853. ktype_cache.default_attrs = default_l3_attrs;
  854. else
  855. ktype_cache.default_attrs = default_attrs;
  856. retval = kobject_init_and_add(&(this_object->kobj),
  857. &ktype_cache,
  858. per_cpu(ici_cache_kobject, cpu),
  859. "index%1lu", i);
  860. if (unlikely(retval)) {
  861. for (j = 0; j < i; j++)
  862. kobject_put(&(INDEX_KOBJECT_PTR(cpu, j)->kobj));
  863. kobject_put(per_cpu(ici_cache_kobject, cpu));
  864. cpuid4_cache_sysfs_exit(cpu);
  865. return retval;
  866. }
  867. kobject_uevent(&(this_object->kobj), KOBJ_ADD);
  868. }
  869. cpumask_set_cpu(cpu, to_cpumask(cache_dev_map));
  870. kobject_uevent(per_cpu(ici_cache_kobject, cpu), KOBJ_ADD);
  871. return 0;
  872. }
  873. static void __cpuinit cache_remove_dev(struct sys_device * sys_dev)
  874. {
  875. unsigned int cpu = sys_dev->id;
  876. unsigned long i;
  877. if (per_cpu(ici_cpuid4_info, cpu) == NULL)
  878. return;
  879. if (!cpumask_test_cpu(cpu, to_cpumask(cache_dev_map)))
  880. return;
  881. cpumask_clear_cpu(cpu, to_cpumask(cache_dev_map));
  882. for (i = 0; i < num_cache_leaves; i++)
  883. kobject_put(&(INDEX_KOBJECT_PTR(cpu, i)->kobj));
  884. kobject_put(per_cpu(ici_cache_kobject, cpu));
  885. cpuid4_cache_sysfs_exit(cpu);
  886. }
  887. static int __cpuinit cacheinfo_cpu_callback(struct notifier_block *nfb,
  888. unsigned long action, void *hcpu)
  889. {
  890. unsigned int cpu = (unsigned long)hcpu;
  891. struct sys_device *sys_dev;
  892. sys_dev = get_cpu_sysdev(cpu);
  893. switch (action) {
  894. case CPU_ONLINE:
  895. case CPU_ONLINE_FROZEN:
  896. cache_add_dev(sys_dev);
  897. break;
  898. case CPU_DEAD:
  899. case CPU_DEAD_FROZEN:
  900. cache_remove_dev(sys_dev);
  901. break;
  902. }
  903. return NOTIFY_OK;
  904. }
  905. static struct notifier_block __cpuinitdata cacheinfo_cpu_notifier = {
  906. .notifier_call = cacheinfo_cpu_callback,
  907. };
  908. static int __cpuinit cache_sysfs_init(void)
  909. {
  910. int i;
  911. if (num_cache_leaves == 0)
  912. return 0;
  913. for_each_online_cpu(i) {
  914. int err;
  915. struct sys_device *sys_dev = get_cpu_sysdev(i);
  916. err = cache_add_dev(sys_dev);
  917. if (err)
  918. return err;
  919. }
  920. register_hotcpu_notifier(&cacheinfo_cpu_notifier);
  921. return 0;
  922. }
  923. device_initcall(cache_sysfs_init);
  924. #endif