intel_cacheinfo.c 29 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072
  1. /*
  2. * Routines to indentify caches on Intel CPU.
  3. *
  4. * Changes:
  5. * Venkatesh Pallipadi : Adding cache identification through cpuid(4)
  6. * Ashok Raj <ashok.raj@intel.com>: Work with CPU hotplug infrastructure.
  7. * Andi Kleen / Andreas Herrmann : CPUID4 emulation on AMD.
  8. */
  9. #include <linux/init.h>
  10. #include <linux/slab.h>
  11. #include <linux/device.h>
  12. #include <linux/compiler.h>
  13. #include <linux/cpu.h>
  14. #include <linux/sched.h>
  15. #include <linux/pci.h>
  16. #include <asm/processor.h>
  17. #include <linux/smp.h>
  18. #include <asm/k8.h>
  19. #include <asm/smp.h>
  20. #define LVL_1_INST 1
  21. #define LVL_1_DATA 2
  22. #define LVL_2 3
  23. #define LVL_3 4
  24. #define LVL_TRACE 5
  25. struct _cache_table {
  26. unsigned char descriptor;
  27. char cache_type;
  28. short size;
  29. };
  30. /* All the cache descriptor types we care about (no TLB or
  31. trace cache entries) */
  32. static const struct _cache_table __cpuinitconst cache_table[] =
  33. {
  34. { 0x06, LVL_1_INST, 8 }, /* 4-way set assoc, 32 byte line size */
  35. { 0x08, LVL_1_INST, 16 }, /* 4-way set assoc, 32 byte line size */
  36. { 0x09, LVL_1_INST, 32 }, /* 4-way set assoc, 64 byte line size */
  37. { 0x0a, LVL_1_DATA, 8 }, /* 2 way set assoc, 32 byte line size */
  38. { 0x0c, LVL_1_DATA, 16 }, /* 4-way set assoc, 32 byte line size */
  39. { 0x0d, LVL_1_DATA, 16 }, /* 4-way set assoc, 64 byte line size */
  40. { 0x21, LVL_2, 256 }, /* 8-way set assoc, 64 byte line size */
  41. { 0x22, LVL_3, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */
  42. { 0x23, LVL_3, 1024 }, /* 8-way set assoc, sectored cache, 64 byte line size */
  43. { 0x25, LVL_3, 2048 }, /* 8-way set assoc, sectored cache, 64 byte line size */
  44. { 0x29, LVL_3, 4096 }, /* 8-way set assoc, sectored cache, 64 byte line size */
  45. { 0x2c, LVL_1_DATA, 32 }, /* 8-way set assoc, 64 byte line size */
  46. { 0x30, LVL_1_INST, 32 }, /* 8-way set assoc, 64 byte line size */
  47. { 0x39, LVL_2, 128 }, /* 4-way set assoc, sectored cache, 64 byte line size */
  48. { 0x3a, LVL_2, 192 }, /* 6-way set assoc, sectored cache, 64 byte line size */
  49. { 0x3b, LVL_2, 128 }, /* 2-way set assoc, sectored cache, 64 byte line size */
  50. { 0x3c, LVL_2, 256 }, /* 4-way set assoc, sectored cache, 64 byte line size */
  51. { 0x3d, LVL_2, 384 }, /* 6-way set assoc, sectored cache, 64 byte line size */
  52. { 0x3e, LVL_2, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */
  53. { 0x3f, LVL_2, 256 }, /* 2-way set assoc, 64 byte line size */
  54. { 0x41, LVL_2, 128 }, /* 4-way set assoc, 32 byte line size */
  55. { 0x42, LVL_2, 256 }, /* 4-way set assoc, 32 byte line size */
  56. { 0x43, LVL_2, 512 }, /* 4-way set assoc, 32 byte line size */
  57. { 0x44, LVL_2, 1024 }, /* 4-way set assoc, 32 byte line size */
  58. { 0x45, LVL_2, 2048 }, /* 4-way set assoc, 32 byte line size */
  59. { 0x46, LVL_3, 4096 }, /* 4-way set assoc, 64 byte line size */
  60. { 0x47, LVL_3, 8192 }, /* 8-way set assoc, 64 byte line size */
  61. { 0x49, LVL_3, 4096 }, /* 16-way set assoc, 64 byte line size */
  62. { 0x4a, LVL_3, 6144 }, /* 12-way set assoc, 64 byte line size */
  63. { 0x4b, LVL_3, 8192 }, /* 16-way set assoc, 64 byte line size */
  64. { 0x4c, LVL_3, 12288 }, /* 12-way set assoc, 64 byte line size */
  65. { 0x4d, LVL_3, 16384 }, /* 16-way set assoc, 64 byte line size */
  66. { 0x4e, LVL_2, 6144 }, /* 24-way set assoc, 64 byte line size */
  67. { 0x60, LVL_1_DATA, 16 }, /* 8-way set assoc, sectored cache, 64 byte line size */
  68. { 0x66, LVL_1_DATA, 8 }, /* 4-way set assoc, sectored cache, 64 byte line size */
  69. { 0x67, LVL_1_DATA, 16 }, /* 4-way set assoc, sectored cache, 64 byte line size */
  70. { 0x68, LVL_1_DATA, 32 }, /* 4-way set assoc, sectored cache, 64 byte line size */
  71. { 0x70, LVL_TRACE, 12 }, /* 8-way set assoc */
  72. { 0x71, LVL_TRACE, 16 }, /* 8-way set assoc */
  73. { 0x72, LVL_TRACE, 32 }, /* 8-way set assoc */
  74. { 0x73, LVL_TRACE, 64 }, /* 8-way set assoc */
  75. { 0x78, LVL_2, 1024 }, /* 4-way set assoc, 64 byte line size */
  76. { 0x79, LVL_2, 128 }, /* 8-way set assoc, sectored cache, 64 byte line size */
  77. { 0x7a, LVL_2, 256 }, /* 8-way set assoc, sectored cache, 64 byte line size */
  78. { 0x7b, LVL_2, 512 }, /* 8-way set assoc, sectored cache, 64 byte line size */
  79. { 0x7c, LVL_2, 1024 }, /* 8-way set assoc, sectored cache, 64 byte line size */
  80. { 0x7d, LVL_2, 2048 }, /* 8-way set assoc, 64 byte line size */
  81. { 0x7f, LVL_2, 512 }, /* 2-way set assoc, 64 byte line size */
  82. { 0x82, LVL_2, 256 }, /* 8-way set assoc, 32 byte line size */
  83. { 0x83, LVL_2, 512 }, /* 8-way set assoc, 32 byte line size */
  84. { 0x84, LVL_2, 1024 }, /* 8-way set assoc, 32 byte line size */
  85. { 0x85, LVL_2, 2048 }, /* 8-way set assoc, 32 byte line size */
  86. { 0x86, LVL_2, 512 }, /* 4-way set assoc, 64 byte line size */
  87. { 0x87, LVL_2, 1024 }, /* 8-way set assoc, 64 byte line size */
  88. { 0xd0, LVL_3, 512 }, /* 4-way set assoc, 64 byte line size */
  89. { 0xd1, LVL_3, 1024 }, /* 4-way set assoc, 64 byte line size */
  90. { 0xd2, LVL_3, 2048 }, /* 4-way set assoc, 64 byte line size */
  91. { 0xd6, LVL_3, 1024 }, /* 8-way set assoc, 64 byte line size */
  92. { 0xd7, LVL_3, 2048 }, /* 8-way set assoc, 64 byte line size */
  93. { 0xd8, LVL_3, 4096 }, /* 12-way set assoc, 64 byte line size */
  94. { 0xdc, LVL_3, 2048 }, /* 12-way set assoc, 64 byte line size */
  95. { 0xdd, LVL_3, 4096 }, /* 12-way set assoc, 64 byte line size */
  96. { 0xde, LVL_3, 8192 }, /* 12-way set assoc, 64 byte line size */
  97. { 0xe2, LVL_3, 2048 }, /* 16-way set assoc, 64 byte line size */
  98. { 0xe3, LVL_3, 4096 }, /* 16-way set assoc, 64 byte line size */
  99. { 0xe4, LVL_3, 8192 }, /* 16-way set assoc, 64 byte line size */
  100. { 0xea, LVL_3, 12288 }, /* 24-way set assoc, 64 byte line size */
  101. { 0xeb, LVL_3, 18432 }, /* 24-way set assoc, 64 byte line size */
  102. { 0xec, LVL_3, 24576 }, /* 24-way set assoc, 64 byte line size */
  103. { 0x00, 0, 0}
  104. };
  105. enum _cache_type {
  106. CACHE_TYPE_NULL = 0,
  107. CACHE_TYPE_DATA = 1,
  108. CACHE_TYPE_INST = 2,
  109. CACHE_TYPE_UNIFIED = 3
  110. };
  111. union _cpuid4_leaf_eax {
  112. struct {
  113. enum _cache_type type:5;
  114. unsigned int level:3;
  115. unsigned int is_self_initializing:1;
  116. unsigned int is_fully_associative:1;
  117. unsigned int reserved:4;
  118. unsigned int num_threads_sharing:12;
  119. unsigned int num_cores_on_die:6;
  120. } split;
  121. u32 full;
  122. };
  123. union _cpuid4_leaf_ebx {
  124. struct {
  125. unsigned int coherency_line_size:12;
  126. unsigned int physical_line_partition:10;
  127. unsigned int ways_of_associativity:10;
  128. } split;
  129. u32 full;
  130. };
  131. union _cpuid4_leaf_ecx {
  132. struct {
  133. unsigned int number_of_sets:32;
  134. } split;
  135. u32 full;
  136. };
  137. struct _cpuid4_info {
  138. union _cpuid4_leaf_eax eax;
  139. union _cpuid4_leaf_ebx ebx;
  140. union _cpuid4_leaf_ecx ecx;
  141. unsigned long size;
  142. bool can_disable;
  143. unsigned int l3_indices;
  144. DECLARE_BITMAP(shared_cpu_map, NR_CPUS);
  145. };
  146. /* subset of above _cpuid4_info w/o shared_cpu_map */
  147. struct _cpuid4_info_regs {
  148. union _cpuid4_leaf_eax eax;
  149. union _cpuid4_leaf_ebx ebx;
  150. union _cpuid4_leaf_ecx ecx;
  151. unsigned long size;
  152. bool can_disable;
  153. unsigned int l3_indices;
  154. };
  155. unsigned short num_cache_leaves;
  156. /* AMD doesn't have CPUID4. Emulate it here to report the same
  157. information to the user. This makes some assumptions about the machine:
  158. L2 not shared, no SMT etc. that is currently true on AMD CPUs.
  159. In theory the TLBs could be reported as fake type (they are in "dummy").
  160. Maybe later */
  161. union l1_cache {
  162. struct {
  163. unsigned line_size:8;
  164. unsigned lines_per_tag:8;
  165. unsigned assoc:8;
  166. unsigned size_in_kb:8;
  167. };
  168. unsigned val;
  169. };
  170. union l2_cache {
  171. struct {
  172. unsigned line_size:8;
  173. unsigned lines_per_tag:4;
  174. unsigned assoc:4;
  175. unsigned size_in_kb:16;
  176. };
  177. unsigned val;
  178. };
  179. union l3_cache {
  180. struct {
  181. unsigned line_size:8;
  182. unsigned lines_per_tag:4;
  183. unsigned assoc:4;
  184. unsigned res:2;
  185. unsigned size_encoded:14;
  186. };
  187. unsigned val;
  188. };
  189. static const unsigned short __cpuinitconst assocs[] = {
  190. [1] = 1,
  191. [2] = 2,
  192. [4] = 4,
  193. [6] = 8,
  194. [8] = 16,
  195. [0xa] = 32,
  196. [0xb] = 48,
  197. [0xc] = 64,
  198. [0xd] = 96,
  199. [0xe] = 128,
  200. [0xf] = 0xffff /* fully associative - no way to show this currently */
  201. };
  202. static const unsigned char __cpuinitconst levels[] = { 1, 1, 2, 3 };
  203. static const unsigned char __cpuinitconst types[] = { 1, 2, 3, 3 };
  204. static void __cpuinit
  205. amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax,
  206. union _cpuid4_leaf_ebx *ebx,
  207. union _cpuid4_leaf_ecx *ecx)
  208. {
  209. unsigned dummy;
  210. unsigned line_size, lines_per_tag, assoc, size_in_kb;
  211. union l1_cache l1i, l1d;
  212. union l2_cache l2;
  213. union l3_cache l3;
  214. union l1_cache *l1 = &l1d;
  215. eax->full = 0;
  216. ebx->full = 0;
  217. ecx->full = 0;
  218. cpuid(0x80000005, &dummy, &dummy, &l1d.val, &l1i.val);
  219. cpuid(0x80000006, &dummy, &dummy, &l2.val, &l3.val);
  220. switch (leaf) {
  221. case 1:
  222. l1 = &l1i;
  223. case 0:
  224. if (!l1->val)
  225. return;
  226. assoc = assocs[l1->assoc];
  227. line_size = l1->line_size;
  228. lines_per_tag = l1->lines_per_tag;
  229. size_in_kb = l1->size_in_kb;
  230. break;
  231. case 2:
  232. if (!l2.val)
  233. return;
  234. assoc = assocs[l2.assoc];
  235. line_size = l2.line_size;
  236. lines_per_tag = l2.lines_per_tag;
  237. /* cpu_data has errata corrections for K7 applied */
  238. size_in_kb = current_cpu_data.x86_cache_size;
  239. break;
  240. case 3:
  241. if (!l3.val)
  242. return;
  243. assoc = assocs[l3.assoc];
  244. line_size = l3.line_size;
  245. lines_per_tag = l3.lines_per_tag;
  246. size_in_kb = l3.size_encoded * 512;
  247. if (boot_cpu_has(X86_FEATURE_AMD_DCM)) {
  248. size_in_kb = size_in_kb >> 1;
  249. assoc = assoc >> 1;
  250. }
  251. break;
  252. default:
  253. return;
  254. }
  255. eax->split.is_self_initializing = 1;
  256. eax->split.type = types[leaf];
  257. eax->split.level = levels[leaf];
  258. eax->split.num_threads_sharing = 0;
  259. eax->split.num_cores_on_die = current_cpu_data.x86_max_cores - 1;
  260. if (assoc == 0xffff)
  261. eax->split.is_fully_associative = 1;
  262. ebx->split.coherency_line_size = line_size - 1;
  263. ebx->split.ways_of_associativity = assoc - 1;
  264. ebx->split.physical_line_partition = lines_per_tag - 1;
  265. ecx->split.number_of_sets = (size_in_kb * 1024) / line_size /
  266. (ebx->split.ways_of_associativity + 1) - 1;
  267. }
  268. struct _cache_attr {
  269. struct attribute attr;
  270. ssize_t (*show)(struct _cpuid4_info *, char *);
  271. ssize_t (*store)(struct _cpuid4_info *, const char *, size_t count);
  272. };
  273. #ifdef CONFIG_CPU_SUP_AMD
  274. static unsigned int __cpuinit amd_calc_l3_indices(void)
  275. {
  276. /*
  277. * We're called over smp_call_function_single() and therefore
  278. * are on the correct cpu.
  279. */
  280. int cpu = smp_processor_id();
  281. int node = cpu_to_node(cpu);
  282. struct pci_dev *dev = node_to_k8_nb_misc(node);
  283. unsigned int sc0, sc1, sc2, sc3;
  284. u32 val = 0;
  285. pci_read_config_dword(dev, 0x1C4, &val);
  286. /* calculate subcache sizes */
  287. sc0 = !(val & BIT(0));
  288. sc1 = !(val & BIT(4));
  289. sc2 = !(val & BIT(8)) + !(val & BIT(9));
  290. sc3 = !(val & BIT(12)) + !(val & BIT(13));
  291. return (max(max(max(sc0, sc1), sc2), sc3) << 10) - 1;
  292. }
  293. static void __cpuinit
  294. amd_check_l3_disable(int index, struct _cpuid4_info_regs *this_leaf)
  295. {
  296. if (index < 3)
  297. return;
  298. if (boot_cpu_data.x86 == 0x11)
  299. return;
  300. /* see errata #382 and #388 */
  301. if ((boot_cpu_data.x86 == 0x10) &&
  302. ((boot_cpu_data.x86_model < 0x8) ||
  303. (boot_cpu_data.x86_mask < 0x1)))
  304. return;
  305. this_leaf->can_disable = true;
  306. this_leaf->l3_indices = amd_calc_l3_indices();
  307. }
  308. static ssize_t show_cache_disable(struct _cpuid4_info *this_leaf, char *buf,
  309. unsigned int index)
  310. {
  311. int cpu = cpumask_first(to_cpumask(this_leaf->shared_cpu_map));
  312. int node = amd_get_nb_id(cpu);
  313. struct pci_dev *dev = node_to_k8_nb_misc(node);
  314. unsigned int reg = 0;
  315. if (!this_leaf->can_disable)
  316. return -EINVAL;
  317. if (!dev)
  318. return -EINVAL;
  319. pci_read_config_dword(dev, 0x1BC + index * 4, &reg);
  320. return sprintf(buf, "0x%08x\n", reg);
  321. }
  322. #define SHOW_CACHE_DISABLE(index) \
  323. static ssize_t \
  324. show_cache_disable_##index(struct _cpuid4_info *this_leaf, char *buf) \
  325. { \
  326. return show_cache_disable(this_leaf, buf, index); \
  327. }
  328. SHOW_CACHE_DISABLE(0)
  329. SHOW_CACHE_DISABLE(1)
  330. static ssize_t store_cache_disable(struct _cpuid4_info *this_leaf,
  331. const char *buf, size_t count, unsigned int index)
  332. {
  333. int cpu = cpumask_first(to_cpumask(this_leaf->shared_cpu_map));
  334. int node = amd_get_nb_id(cpu);
  335. struct pci_dev *dev = node_to_k8_nb_misc(node);
  336. unsigned long val = 0;
  337. #define SUBCACHE_MASK (3UL << 20)
  338. #define SUBCACHE_INDEX 0xfff
  339. if (!this_leaf->can_disable)
  340. return -EINVAL;
  341. if (!capable(CAP_SYS_ADMIN))
  342. return -EPERM;
  343. if (!dev)
  344. return -EINVAL;
  345. if (strict_strtoul(buf, 10, &val) < 0)
  346. return -EINVAL;
  347. /* do not allow writes outside of allowed bits */
  348. if ((val & ~(SUBCACHE_MASK | SUBCACHE_INDEX)) ||
  349. ((val & SUBCACHE_INDEX) > this_leaf->l3_indices))
  350. return -EINVAL;
  351. val |= BIT(30);
  352. pci_write_config_dword(dev, 0x1BC + index * 4, val);
  353. /*
  354. * We need to WBINVD on a core on the node containing the L3 cache which
  355. * indices we disable therefore a simple wbinvd() is not sufficient.
  356. */
  357. wbinvd_on_cpu(cpu);
  358. pci_write_config_dword(dev, 0x1BC + index * 4, val | BIT(31));
  359. return count;
  360. }
  361. #define STORE_CACHE_DISABLE(index) \
  362. static ssize_t \
  363. store_cache_disable_##index(struct _cpuid4_info *this_leaf, \
  364. const char *buf, size_t count) \
  365. { \
  366. return store_cache_disable(this_leaf, buf, count, index); \
  367. }
  368. STORE_CACHE_DISABLE(0)
  369. STORE_CACHE_DISABLE(1)
  370. static struct _cache_attr cache_disable_0 = __ATTR(cache_disable_0, 0644,
  371. show_cache_disable_0, store_cache_disable_0);
  372. static struct _cache_attr cache_disable_1 = __ATTR(cache_disable_1, 0644,
  373. show_cache_disable_1, store_cache_disable_1);
  374. #else /* CONFIG_CPU_SUP_AMD */
  375. static void __cpuinit
  376. amd_check_l3_disable(int index, struct _cpuid4_info_regs *this_leaf)
  377. {
  378. };
  379. #endif /* CONFIG_CPU_SUP_AMD */
  380. static int
  381. __cpuinit cpuid4_cache_lookup_regs(int index,
  382. struct _cpuid4_info_regs *this_leaf)
  383. {
  384. union _cpuid4_leaf_eax eax;
  385. union _cpuid4_leaf_ebx ebx;
  386. union _cpuid4_leaf_ecx ecx;
  387. unsigned edx;
  388. if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
  389. amd_cpuid4(index, &eax, &ebx, &ecx);
  390. if (boot_cpu_data.x86 >= 0x10)
  391. amd_check_l3_disable(index, this_leaf);
  392. } else {
  393. cpuid_count(4, index, &eax.full, &ebx.full, &ecx.full, &edx);
  394. }
  395. if (eax.split.type == CACHE_TYPE_NULL)
  396. return -EIO; /* better error ? */
  397. this_leaf->eax = eax;
  398. this_leaf->ebx = ebx;
  399. this_leaf->ecx = ecx;
  400. this_leaf->size = (ecx.split.number_of_sets + 1) *
  401. (ebx.split.coherency_line_size + 1) *
  402. (ebx.split.physical_line_partition + 1) *
  403. (ebx.split.ways_of_associativity + 1);
  404. return 0;
  405. }
  406. static int __cpuinit find_num_cache_leaves(void)
  407. {
  408. unsigned int eax, ebx, ecx, edx;
  409. union _cpuid4_leaf_eax cache_eax;
  410. int i = -1;
  411. do {
  412. ++i;
  413. /* Do cpuid(4) loop to find out num_cache_leaves */
  414. cpuid_count(4, i, &eax, &ebx, &ecx, &edx);
  415. cache_eax.full = eax;
  416. } while (cache_eax.split.type != CACHE_TYPE_NULL);
  417. return i;
  418. }
  419. unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
  420. {
  421. /* Cache sizes */
  422. unsigned int trace = 0, l1i = 0, l1d = 0, l2 = 0, l3 = 0;
  423. unsigned int new_l1d = 0, new_l1i = 0; /* Cache sizes from cpuid(4) */
  424. unsigned int new_l2 = 0, new_l3 = 0, i; /* Cache sizes from cpuid(4) */
  425. unsigned int l2_id = 0, l3_id = 0, num_threads_sharing, index_msb;
  426. #ifdef CONFIG_X86_HT
  427. unsigned int cpu = c->cpu_index;
  428. #endif
  429. if (c->cpuid_level > 3) {
  430. static int is_initialized;
  431. if (is_initialized == 0) {
  432. /* Init num_cache_leaves from boot CPU */
  433. num_cache_leaves = find_num_cache_leaves();
  434. is_initialized++;
  435. }
  436. /*
  437. * Whenever possible use cpuid(4), deterministic cache
  438. * parameters cpuid leaf to find the cache details
  439. */
  440. for (i = 0; i < num_cache_leaves; i++) {
  441. struct _cpuid4_info_regs this_leaf;
  442. int retval;
  443. retval = cpuid4_cache_lookup_regs(i, &this_leaf);
  444. if (retval >= 0) {
  445. switch (this_leaf.eax.split.level) {
  446. case 1:
  447. if (this_leaf.eax.split.type ==
  448. CACHE_TYPE_DATA)
  449. new_l1d = this_leaf.size/1024;
  450. else if (this_leaf.eax.split.type ==
  451. CACHE_TYPE_INST)
  452. new_l1i = this_leaf.size/1024;
  453. break;
  454. case 2:
  455. new_l2 = this_leaf.size/1024;
  456. num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing;
  457. index_msb = get_count_order(num_threads_sharing);
  458. l2_id = c->apicid >> index_msb;
  459. break;
  460. case 3:
  461. new_l3 = this_leaf.size/1024;
  462. num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing;
  463. index_msb = get_count_order(
  464. num_threads_sharing);
  465. l3_id = c->apicid >> index_msb;
  466. break;
  467. default:
  468. break;
  469. }
  470. }
  471. }
  472. }
  473. /*
  474. * Don't use cpuid2 if cpuid4 is supported. For P4, we use cpuid2 for
  475. * trace cache
  476. */
  477. if ((num_cache_leaves == 0 || c->x86 == 15) && c->cpuid_level > 1) {
  478. /* supports eax=2 call */
  479. int j, n;
  480. unsigned int regs[4];
  481. unsigned char *dp = (unsigned char *)regs;
  482. int only_trace = 0;
  483. if (num_cache_leaves != 0 && c->x86 == 15)
  484. only_trace = 1;
  485. /* Number of times to iterate */
  486. n = cpuid_eax(2) & 0xFF;
  487. for (i = 0 ; i < n ; i++) {
  488. cpuid(2, &regs[0], &regs[1], &regs[2], &regs[3]);
  489. /* If bit 31 is set, this is an unknown format */
  490. for (j = 0 ; j < 3 ; j++)
  491. if (regs[j] & (1 << 31))
  492. regs[j] = 0;
  493. /* Byte 0 is level count, not a descriptor */
  494. for (j = 1 ; j < 16 ; j++) {
  495. unsigned char des = dp[j];
  496. unsigned char k = 0;
  497. /* look up this descriptor in the table */
  498. while (cache_table[k].descriptor != 0) {
  499. if (cache_table[k].descriptor == des) {
  500. if (only_trace && cache_table[k].cache_type != LVL_TRACE)
  501. break;
  502. switch (cache_table[k].cache_type) {
  503. case LVL_1_INST:
  504. l1i += cache_table[k].size;
  505. break;
  506. case LVL_1_DATA:
  507. l1d += cache_table[k].size;
  508. break;
  509. case LVL_2:
  510. l2 += cache_table[k].size;
  511. break;
  512. case LVL_3:
  513. l3 += cache_table[k].size;
  514. break;
  515. case LVL_TRACE:
  516. trace += cache_table[k].size;
  517. break;
  518. }
  519. break;
  520. }
  521. k++;
  522. }
  523. }
  524. }
  525. }
  526. if (new_l1d)
  527. l1d = new_l1d;
  528. if (new_l1i)
  529. l1i = new_l1i;
  530. if (new_l2) {
  531. l2 = new_l2;
  532. #ifdef CONFIG_X86_HT
  533. per_cpu(cpu_llc_id, cpu) = l2_id;
  534. #endif
  535. }
  536. if (new_l3) {
  537. l3 = new_l3;
  538. #ifdef CONFIG_X86_HT
  539. per_cpu(cpu_llc_id, cpu) = l3_id;
  540. #endif
  541. }
  542. c->x86_cache_size = l3 ? l3 : (l2 ? l2 : (l1i+l1d));
  543. return l2;
  544. }
  545. #ifdef CONFIG_SYSFS
  546. /* pointer to _cpuid4_info array (for each cache leaf) */
  547. static DEFINE_PER_CPU(struct _cpuid4_info *, ici_cpuid4_info);
  548. #define CPUID4_INFO_IDX(x, y) (&((per_cpu(ici_cpuid4_info, x))[y]))
  549. #ifdef CONFIG_SMP
  550. static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
  551. {
  552. struct _cpuid4_info *this_leaf, *sibling_leaf;
  553. unsigned long num_threads_sharing;
  554. int index_msb, i, sibling;
  555. struct cpuinfo_x86 *c = &cpu_data(cpu);
  556. if ((index == 3) && (c->x86_vendor == X86_VENDOR_AMD)) {
  557. for_each_cpu(i, c->llc_shared_map) {
  558. if (!per_cpu(ici_cpuid4_info, i))
  559. continue;
  560. this_leaf = CPUID4_INFO_IDX(i, index);
  561. for_each_cpu(sibling, c->llc_shared_map) {
  562. if (!cpu_online(sibling))
  563. continue;
  564. set_bit(sibling, this_leaf->shared_cpu_map);
  565. }
  566. }
  567. return;
  568. }
  569. this_leaf = CPUID4_INFO_IDX(cpu, index);
  570. num_threads_sharing = 1 + this_leaf->eax.split.num_threads_sharing;
  571. if (num_threads_sharing == 1)
  572. cpumask_set_cpu(cpu, to_cpumask(this_leaf->shared_cpu_map));
  573. else {
  574. index_msb = get_count_order(num_threads_sharing);
  575. for_each_online_cpu(i) {
  576. if (cpu_data(i).apicid >> index_msb ==
  577. c->apicid >> index_msb) {
  578. cpumask_set_cpu(i,
  579. to_cpumask(this_leaf->shared_cpu_map));
  580. if (i != cpu && per_cpu(ici_cpuid4_info, i)) {
  581. sibling_leaf =
  582. CPUID4_INFO_IDX(i, index);
  583. cpumask_set_cpu(cpu, to_cpumask(
  584. sibling_leaf->shared_cpu_map));
  585. }
  586. }
  587. }
  588. }
  589. }
  590. static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index)
  591. {
  592. struct _cpuid4_info *this_leaf, *sibling_leaf;
  593. int sibling;
  594. this_leaf = CPUID4_INFO_IDX(cpu, index);
  595. for_each_cpu(sibling, to_cpumask(this_leaf->shared_cpu_map)) {
  596. sibling_leaf = CPUID4_INFO_IDX(sibling, index);
  597. cpumask_clear_cpu(cpu,
  598. to_cpumask(sibling_leaf->shared_cpu_map));
  599. }
  600. }
  601. #else
  602. static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
  603. {
  604. }
  605. static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index)
  606. {
  607. }
  608. #endif
  609. static void __cpuinit free_cache_attributes(unsigned int cpu)
  610. {
  611. int i;
  612. for (i = 0; i < num_cache_leaves; i++)
  613. cache_remove_shared_cpu_map(cpu, i);
  614. kfree(per_cpu(ici_cpuid4_info, cpu));
  615. per_cpu(ici_cpuid4_info, cpu) = NULL;
  616. }
  617. static int
  618. __cpuinit cpuid4_cache_lookup(int index, struct _cpuid4_info *this_leaf)
  619. {
  620. struct _cpuid4_info_regs *leaf_regs =
  621. (struct _cpuid4_info_regs *)this_leaf;
  622. return cpuid4_cache_lookup_regs(index, leaf_regs);
  623. }
  624. static void __cpuinit get_cpu_leaves(void *_retval)
  625. {
  626. int j, *retval = _retval, cpu = smp_processor_id();
  627. /* Do cpuid and store the results */
  628. for (j = 0; j < num_cache_leaves; j++) {
  629. struct _cpuid4_info *this_leaf;
  630. this_leaf = CPUID4_INFO_IDX(cpu, j);
  631. *retval = cpuid4_cache_lookup(j, this_leaf);
  632. if (unlikely(*retval < 0)) {
  633. int i;
  634. for (i = 0; i < j; i++)
  635. cache_remove_shared_cpu_map(cpu, i);
  636. break;
  637. }
  638. cache_shared_cpu_map_setup(cpu, j);
  639. }
  640. }
  641. static int __cpuinit detect_cache_attributes(unsigned int cpu)
  642. {
  643. int retval;
  644. if (num_cache_leaves == 0)
  645. return -ENOENT;
  646. per_cpu(ici_cpuid4_info, cpu) = kzalloc(
  647. sizeof(struct _cpuid4_info) * num_cache_leaves, GFP_KERNEL);
  648. if (per_cpu(ici_cpuid4_info, cpu) == NULL)
  649. return -ENOMEM;
  650. smp_call_function_single(cpu, get_cpu_leaves, &retval, true);
  651. if (retval) {
  652. kfree(per_cpu(ici_cpuid4_info, cpu));
  653. per_cpu(ici_cpuid4_info, cpu) = NULL;
  654. }
  655. return retval;
  656. }
  657. #include <linux/kobject.h>
  658. #include <linux/sysfs.h>
  659. extern struct sysdev_class cpu_sysdev_class; /* from drivers/base/cpu.c */
  660. /* pointer to kobject for cpuX/cache */
  661. static DEFINE_PER_CPU(struct kobject *, ici_cache_kobject);
  662. struct _index_kobject {
  663. struct kobject kobj;
  664. unsigned int cpu;
  665. unsigned short index;
  666. };
  667. /* pointer to array of kobjects for cpuX/cache/indexY */
  668. static DEFINE_PER_CPU(struct _index_kobject *, ici_index_kobject);
  669. #define INDEX_KOBJECT_PTR(x, y) (&((per_cpu(ici_index_kobject, x))[y]))
  670. #define show_one_plus(file_name, object, val) \
  671. static ssize_t show_##file_name \
  672. (struct _cpuid4_info *this_leaf, char *buf) \
  673. { \
  674. return sprintf(buf, "%lu\n", (unsigned long)this_leaf->object + val); \
  675. }
  676. show_one_plus(level, eax.split.level, 0);
  677. show_one_plus(coherency_line_size, ebx.split.coherency_line_size, 1);
  678. show_one_plus(physical_line_partition, ebx.split.physical_line_partition, 1);
  679. show_one_plus(ways_of_associativity, ebx.split.ways_of_associativity, 1);
  680. show_one_plus(number_of_sets, ecx.split.number_of_sets, 1);
  681. static ssize_t show_size(struct _cpuid4_info *this_leaf, char *buf)
  682. {
  683. return sprintf(buf, "%luK\n", this_leaf->size / 1024);
  684. }
  685. static ssize_t show_shared_cpu_map_func(struct _cpuid4_info *this_leaf,
  686. int type, char *buf)
  687. {
  688. ptrdiff_t len = PTR_ALIGN(buf + PAGE_SIZE - 1, PAGE_SIZE) - buf;
  689. int n = 0;
  690. if (len > 1) {
  691. const struct cpumask *mask;
  692. mask = to_cpumask(this_leaf->shared_cpu_map);
  693. n = type ?
  694. cpulist_scnprintf(buf, len-2, mask) :
  695. cpumask_scnprintf(buf, len-2, mask);
  696. buf[n++] = '\n';
  697. buf[n] = '\0';
  698. }
  699. return n;
  700. }
  701. static inline ssize_t show_shared_cpu_map(struct _cpuid4_info *leaf, char *buf)
  702. {
  703. return show_shared_cpu_map_func(leaf, 0, buf);
  704. }
  705. static inline ssize_t show_shared_cpu_list(struct _cpuid4_info *leaf, char *buf)
  706. {
  707. return show_shared_cpu_map_func(leaf, 1, buf);
  708. }
  709. static ssize_t show_type(struct _cpuid4_info *this_leaf, char *buf)
  710. {
  711. switch (this_leaf->eax.split.type) {
  712. case CACHE_TYPE_DATA:
  713. return sprintf(buf, "Data\n");
  714. case CACHE_TYPE_INST:
  715. return sprintf(buf, "Instruction\n");
  716. case CACHE_TYPE_UNIFIED:
  717. return sprintf(buf, "Unified\n");
  718. default:
  719. return sprintf(buf, "Unknown\n");
  720. }
  721. }
  722. #define to_object(k) container_of(k, struct _index_kobject, kobj)
  723. #define to_attr(a) container_of(a, struct _cache_attr, attr)
  724. #define define_one_ro(_name) \
  725. static struct _cache_attr _name = \
  726. __ATTR(_name, 0444, show_##_name, NULL)
  727. define_one_ro(level);
  728. define_one_ro(type);
  729. define_one_ro(coherency_line_size);
  730. define_one_ro(physical_line_partition);
  731. define_one_ro(ways_of_associativity);
  732. define_one_ro(number_of_sets);
  733. define_one_ro(size);
  734. define_one_ro(shared_cpu_map);
  735. define_one_ro(shared_cpu_list);
  736. #define DEFAULT_SYSFS_CACHE_ATTRS \
  737. &type.attr, \
  738. &level.attr, \
  739. &coherency_line_size.attr, \
  740. &physical_line_partition.attr, \
  741. &ways_of_associativity.attr, \
  742. &number_of_sets.attr, \
  743. &size.attr, \
  744. &shared_cpu_map.attr, \
  745. &shared_cpu_list.attr
  746. static struct attribute *default_attrs[] = {
  747. DEFAULT_SYSFS_CACHE_ATTRS,
  748. NULL
  749. };
  750. static struct attribute *default_l3_attrs[] = {
  751. DEFAULT_SYSFS_CACHE_ATTRS,
  752. #ifdef CONFIG_CPU_SUP_AMD
  753. &cache_disable_0.attr,
  754. &cache_disable_1.attr,
  755. #endif
  756. NULL
  757. };
  758. static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
  759. {
  760. struct _cache_attr *fattr = to_attr(attr);
  761. struct _index_kobject *this_leaf = to_object(kobj);
  762. ssize_t ret;
  763. ret = fattr->show ?
  764. fattr->show(CPUID4_INFO_IDX(this_leaf->cpu, this_leaf->index),
  765. buf) :
  766. 0;
  767. return ret;
  768. }
  769. static ssize_t store(struct kobject *kobj, struct attribute *attr,
  770. const char *buf, size_t count)
  771. {
  772. struct _cache_attr *fattr = to_attr(attr);
  773. struct _index_kobject *this_leaf = to_object(kobj);
  774. ssize_t ret;
  775. ret = fattr->store ?
  776. fattr->store(CPUID4_INFO_IDX(this_leaf->cpu, this_leaf->index),
  777. buf, count) :
  778. 0;
  779. return ret;
  780. }
  781. static struct sysfs_ops sysfs_ops = {
  782. .show = show,
  783. .store = store,
  784. };
  785. static struct kobj_type ktype_cache = {
  786. .sysfs_ops = &sysfs_ops,
  787. .default_attrs = default_attrs,
  788. };
  789. static struct kobj_type ktype_percpu_entry = {
  790. .sysfs_ops = &sysfs_ops,
  791. };
  792. static void __cpuinit cpuid4_cache_sysfs_exit(unsigned int cpu)
  793. {
  794. kfree(per_cpu(ici_cache_kobject, cpu));
  795. kfree(per_cpu(ici_index_kobject, cpu));
  796. per_cpu(ici_cache_kobject, cpu) = NULL;
  797. per_cpu(ici_index_kobject, cpu) = NULL;
  798. free_cache_attributes(cpu);
  799. }
  800. static int __cpuinit cpuid4_cache_sysfs_init(unsigned int cpu)
  801. {
  802. int err;
  803. if (num_cache_leaves == 0)
  804. return -ENOENT;
  805. err = detect_cache_attributes(cpu);
  806. if (err)
  807. return err;
  808. /* Allocate all required memory */
  809. per_cpu(ici_cache_kobject, cpu) =
  810. kzalloc(sizeof(struct kobject), GFP_KERNEL);
  811. if (unlikely(per_cpu(ici_cache_kobject, cpu) == NULL))
  812. goto err_out;
  813. per_cpu(ici_index_kobject, cpu) = kzalloc(
  814. sizeof(struct _index_kobject) * num_cache_leaves, GFP_KERNEL);
  815. if (unlikely(per_cpu(ici_index_kobject, cpu) == NULL))
  816. goto err_out;
  817. return 0;
  818. err_out:
  819. cpuid4_cache_sysfs_exit(cpu);
  820. return -ENOMEM;
  821. }
  822. static DECLARE_BITMAP(cache_dev_map, NR_CPUS);
  823. /* Add/Remove cache interface for CPU device */
  824. static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
  825. {
  826. unsigned int cpu = sys_dev->id;
  827. unsigned long i, j;
  828. struct _index_kobject *this_object;
  829. struct _cpuid4_info *this_leaf;
  830. int retval;
  831. retval = cpuid4_cache_sysfs_init(cpu);
  832. if (unlikely(retval < 0))
  833. return retval;
  834. retval = kobject_init_and_add(per_cpu(ici_cache_kobject, cpu),
  835. &ktype_percpu_entry,
  836. &sys_dev->kobj, "%s", "cache");
  837. if (retval < 0) {
  838. cpuid4_cache_sysfs_exit(cpu);
  839. return retval;
  840. }
  841. for (i = 0; i < num_cache_leaves; i++) {
  842. this_object = INDEX_KOBJECT_PTR(cpu, i);
  843. this_object->cpu = cpu;
  844. this_object->index = i;
  845. this_leaf = CPUID4_INFO_IDX(cpu, i);
  846. if (this_leaf->can_disable)
  847. ktype_cache.default_attrs = default_l3_attrs;
  848. else
  849. ktype_cache.default_attrs = default_attrs;
  850. retval = kobject_init_and_add(&(this_object->kobj),
  851. &ktype_cache,
  852. per_cpu(ici_cache_kobject, cpu),
  853. "index%1lu", i);
  854. if (unlikely(retval)) {
  855. for (j = 0; j < i; j++)
  856. kobject_put(&(INDEX_KOBJECT_PTR(cpu, j)->kobj));
  857. kobject_put(per_cpu(ici_cache_kobject, cpu));
  858. cpuid4_cache_sysfs_exit(cpu);
  859. return retval;
  860. }
  861. kobject_uevent(&(this_object->kobj), KOBJ_ADD);
  862. }
  863. cpumask_set_cpu(cpu, to_cpumask(cache_dev_map));
  864. kobject_uevent(per_cpu(ici_cache_kobject, cpu), KOBJ_ADD);
  865. return 0;
  866. }
  867. static void __cpuinit cache_remove_dev(struct sys_device * sys_dev)
  868. {
  869. unsigned int cpu = sys_dev->id;
  870. unsigned long i;
  871. if (per_cpu(ici_cpuid4_info, cpu) == NULL)
  872. return;
  873. if (!cpumask_test_cpu(cpu, to_cpumask(cache_dev_map)))
  874. return;
  875. cpumask_clear_cpu(cpu, to_cpumask(cache_dev_map));
  876. for (i = 0; i < num_cache_leaves; i++)
  877. kobject_put(&(INDEX_KOBJECT_PTR(cpu, i)->kobj));
  878. kobject_put(per_cpu(ici_cache_kobject, cpu));
  879. cpuid4_cache_sysfs_exit(cpu);
  880. }
  881. static int __cpuinit cacheinfo_cpu_callback(struct notifier_block *nfb,
  882. unsigned long action, void *hcpu)
  883. {
  884. unsigned int cpu = (unsigned long)hcpu;
  885. struct sys_device *sys_dev;
  886. sys_dev = get_cpu_sysdev(cpu);
  887. switch (action) {
  888. case CPU_ONLINE:
  889. case CPU_ONLINE_FROZEN:
  890. cache_add_dev(sys_dev);
  891. break;
  892. case CPU_DEAD:
  893. case CPU_DEAD_FROZEN:
  894. cache_remove_dev(sys_dev);
  895. break;
  896. }
  897. return NOTIFY_OK;
  898. }
  899. static struct notifier_block __cpuinitdata cacheinfo_cpu_notifier = {
  900. .notifier_call = cacheinfo_cpu_callback,
  901. };
  902. static int __cpuinit cache_sysfs_init(void)
  903. {
  904. int i;
  905. if (num_cache_leaves == 0)
  906. return 0;
  907. for_each_online_cpu(i) {
  908. int err;
  909. struct sys_device *sys_dev = get_cpu_sysdev(i);
  910. err = cache_add_dev(sys_dev);
  911. if (err)
  912. return err;
  913. }
  914. register_hotcpu_notifier(&cacheinfo_cpu_notifier);
  915. return 0;
  916. }
  917. device_initcall(cache_sysfs_init);
  918. #endif