intel_cacheinfo.c 30 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149
  1. /*
  2. * Routines to indentify caches on Intel CPU.
  3. *
  4. * Changes:
  5. * Venkatesh Pallipadi : Adding cache identification through cpuid(4)
  6. * Ashok Raj <ashok.raj@intel.com>: Work with CPU hotplug infrastructure.
  7. * Andi Kleen / Andreas Herrmann : CPUID4 emulation on AMD.
  8. */
  9. #include <linux/init.h>
  10. #include <linux/slab.h>
  11. #include <linux/device.h>
  12. #include <linux/compiler.h>
  13. #include <linux/cpu.h>
  14. #include <linux/sched.h>
  15. #include <linux/pci.h>
  16. #include <asm/processor.h>
  17. #include <linux/smp.h>
  18. #include <asm/k8.h>
  19. #include <asm/smp.h>
  20. #define LVL_1_INST 1
  21. #define LVL_1_DATA 2
  22. #define LVL_2 3
  23. #define LVL_3 4
  24. #define LVL_TRACE 5
  25. struct _cache_table {
  26. unsigned char descriptor;
  27. char cache_type;
  28. short size;
  29. };
  30. #define MB(x) ((x) * 1024)
  31. /* All the cache descriptor types we care about (no TLB or
  32. trace cache entries) */
  33. static const struct _cache_table __cpuinitconst cache_table[] =
  34. {
  35. { 0x06, LVL_1_INST, 8 }, /* 4-way set assoc, 32 byte line size */
  36. { 0x08, LVL_1_INST, 16 }, /* 4-way set assoc, 32 byte line size */
  37. { 0x09, LVL_1_INST, 32 }, /* 4-way set assoc, 64 byte line size */
  38. { 0x0a, LVL_1_DATA, 8 }, /* 2 way set assoc, 32 byte line size */
  39. { 0x0c, LVL_1_DATA, 16 }, /* 4-way set assoc, 32 byte line size */
  40. { 0x0d, LVL_1_DATA, 16 }, /* 4-way set assoc, 64 byte line size */
  41. { 0x21, LVL_2, 256 }, /* 8-way set assoc, 64 byte line size */
  42. { 0x22, LVL_3, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */
  43. { 0x23, LVL_3, MB(1) }, /* 8-way set assoc, sectored cache, 64 byte line size */
  44. { 0x25, LVL_3, MB(2) }, /* 8-way set assoc, sectored cache, 64 byte line size */
  45. { 0x29, LVL_3, MB(4) }, /* 8-way set assoc, sectored cache, 64 byte line size */
  46. { 0x2c, LVL_1_DATA, 32 }, /* 8-way set assoc, 64 byte line size */
  47. { 0x30, LVL_1_INST, 32 }, /* 8-way set assoc, 64 byte line size */
  48. { 0x39, LVL_2, 128 }, /* 4-way set assoc, sectored cache, 64 byte line size */
  49. { 0x3a, LVL_2, 192 }, /* 6-way set assoc, sectored cache, 64 byte line size */
  50. { 0x3b, LVL_2, 128 }, /* 2-way set assoc, sectored cache, 64 byte line size */
  51. { 0x3c, LVL_2, 256 }, /* 4-way set assoc, sectored cache, 64 byte line size */
  52. { 0x3d, LVL_2, 384 }, /* 6-way set assoc, sectored cache, 64 byte line size */
  53. { 0x3e, LVL_2, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */
  54. { 0x3f, LVL_2, 256 }, /* 2-way set assoc, 64 byte line size */
  55. { 0x41, LVL_2, 128 }, /* 4-way set assoc, 32 byte line size */
  56. { 0x42, LVL_2, 256 }, /* 4-way set assoc, 32 byte line size */
  57. { 0x43, LVL_2, 512 }, /* 4-way set assoc, 32 byte line size */
  58. { 0x44, LVL_2, MB(1) }, /* 4-way set assoc, 32 byte line size */
  59. { 0x45, LVL_2, MB(2) }, /* 4-way set assoc, 32 byte line size */
  60. { 0x46, LVL_3, MB(4) }, /* 4-way set assoc, 64 byte line size */
  61. { 0x47, LVL_3, MB(8) }, /* 8-way set assoc, 64 byte line size */
  62. { 0x49, LVL_3, MB(4) }, /* 16-way set assoc, 64 byte line size */
  63. { 0x4a, LVL_3, MB(6) }, /* 12-way set assoc, 64 byte line size */
  64. { 0x4b, LVL_3, MB(8) }, /* 16-way set assoc, 64 byte line size */
  65. { 0x4c, LVL_3, MB(12) }, /* 12-way set assoc, 64 byte line size */
  66. { 0x4d, LVL_3, MB(16) }, /* 16-way set assoc, 64 byte line size */
  67. { 0x4e, LVL_2, MB(6) }, /* 24-way set assoc, 64 byte line size */
  68. { 0x60, LVL_1_DATA, 16 }, /* 8-way set assoc, sectored cache, 64 byte line size */
  69. { 0x66, LVL_1_DATA, 8 }, /* 4-way set assoc, sectored cache, 64 byte line size */
  70. { 0x67, LVL_1_DATA, 16 }, /* 4-way set assoc, sectored cache, 64 byte line size */
  71. { 0x68, LVL_1_DATA, 32 }, /* 4-way set assoc, sectored cache, 64 byte line size */
  72. { 0x70, LVL_TRACE, 12 }, /* 8-way set assoc */
  73. { 0x71, LVL_TRACE, 16 }, /* 8-way set assoc */
  74. { 0x72, LVL_TRACE, 32 }, /* 8-way set assoc */
  75. { 0x73, LVL_TRACE, 64 }, /* 8-way set assoc */
  76. { 0x78, LVL_2, MB(1) }, /* 4-way set assoc, 64 byte line size */
  77. { 0x79, LVL_2, 128 }, /* 8-way set assoc, sectored cache, 64 byte line size */
  78. { 0x7a, LVL_2, 256 }, /* 8-way set assoc, sectored cache, 64 byte line size */
  79. { 0x7b, LVL_2, 512 }, /* 8-way set assoc, sectored cache, 64 byte line size */
  80. { 0x7c, LVL_2, MB(1) }, /* 8-way set assoc, sectored cache, 64 byte line size */
  81. { 0x7d, LVL_2, MB(2) }, /* 8-way set assoc, 64 byte line size */
  82. { 0x7f, LVL_2, 512 }, /* 2-way set assoc, 64 byte line size */
  83. { 0x82, LVL_2, 256 }, /* 8-way set assoc, 32 byte line size */
  84. { 0x83, LVL_2, 512 }, /* 8-way set assoc, 32 byte line size */
  85. { 0x84, LVL_2, MB(1) }, /* 8-way set assoc, 32 byte line size */
  86. { 0x85, LVL_2, MB(2) }, /* 8-way set assoc, 32 byte line size */
  87. { 0x86, LVL_2, 512 }, /* 4-way set assoc, 64 byte line size */
  88. { 0x87, LVL_2, MB(1) }, /* 8-way set assoc, 64 byte line size */
  89. { 0xd0, LVL_3, 512 }, /* 4-way set assoc, 64 byte line size */
  90. { 0xd1, LVL_3, MB(1) }, /* 4-way set assoc, 64 byte line size */
  91. { 0xd2, LVL_3, MB(2) }, /* 4-way set assoc, 64 byte line size */
  92. { 0xd6, LVL_3, MB(1) }, /* 8-way set assoc, 64 byte line size */
  93. { 0xd7, LVL_3, MB(2) }, /* 8-way set assoc, 64 byte line size */
  94. { 0xd8, LVL_3, MB(4) }, /* 12-way set assoc, 64 byte line size */
  95. { 0xdc, LVL_3, MB(2) }, /* 12-way set assoc, 64 byte line size */
  96. { 0xdd, LVL_3, MB(4) }, /* 12-way set assoc, 64 byte line size */
  97. { 0xde, LVL_3, MB(8) }, /* 12-way set assoc, 64 byte line size */
  98. { 0xe2, LVL_3, MB(2) }, /* 16-way set assoc, 64 byte line size */
  99. { 0xe3, LVL_3, MB(4) }, /* 16-way set assoc, 64 byte line size */
  100. { 0xe4, LVL_3, MB(8) }, /* 16-way set assoc, 64 byte line size */
  101. { 0xea, LVL_3, MB(12) }, /* 24-way set assoc, 64 byte line size */
  102. { 0xeb, LVL_3, MB(18) }, /* 24-way set assoc, 64 byte line size */
  103. { 0xec, LVL_3, MB(24) }, /* 24-way set assoc, 64 byte line size */
  104. { 0x00, 0, 0}
  105. };
  106. enum _cache_type {
  107. CACHE_TYPE_NULL = 0,
  108. CACHE_TYPE_DATA = 1,
  109. CACHE_TYPE_INST = 2,
  110. CACHE_TYPE_UNIFIED = 3
  111. };
  112. union _cpuid4_leaf_eax {
  113. struct {
  114. enum _cache_type type:5;
  115. unsigned int level:3;
  116. unsigned int is_self_initializing:1;
  117. unsigned int is_fully_associative:1;
  118. unsigned int reserved:4;
  119. unsigned int num_threads_sharing:12;
  120. unsigned int num_cores_on_die:6;
  121. } split;
  122. u32 full;
  123. };
  124. union _cpuid4_leaf_ebx {
  125. struct {
  126. unsigned int coherency_line_size:12;
  127. unsigned int physical_line_partition:10;
  128. unsigned int ways_of_associativity:10;
  129. } split;
  130. u32 full;
  131. };
  132. union _cpuid4_leaf_ecx {
  133. struct {
  134. unsigned int number_of_sets:32;
  135. } split;
  136. u32 full;
  137. };
  138. struct amd_l3_cache {
  139. struct pci_dev *dev;
  140. bool can_disable;
  141. unsigned indices;
  142. u8 subcaches[4];
  143. };
  144. struct _cpuid4_info {
  145. union _cpuid4_leaf_eax eax;
  146. union _cpuid4_leaf_ebx ebx;
  147. union _cpuid4_leaf_ecx ecx;
  148. unsigned long size;
  149. struct amd_l3_cache *l3;
  150. DECLARE_BITMAP(shared_cpu_map, NR_CPUS);
  151. };
  152. /* subset of above _cpuid4_info w/o shared_cpu_map */
  153. struct _cpuid4_info_regs {
  154. union _cpuid4_leaf_eax eax;
  155. union _cpuid4_leaf_ebx ebx;
  156. union _cpuid4_leaf_ecx ecx;
  157. unsigned long size;
  158. struct amd_l3_cache *l3;
  159. };
  160. unsigned short num_cache_leaves;
  161. /* AMD doesn't have CPUID4. Emulate it here to report the same
  162. information to the user. This makes some assumptions about the machine:
  163. L2 not shared, no SMT etc. that is currently true on AMD CPUs.
  164. In theory the TLBs could be reported as fake type (they are in "dummy").
  165. Maybe later */
  166. union l1_cache {
  167. struct {
  168. unsigned line_size:8;
  169. unsigned lines_per_tag:8;
  170. unsigned assoc:8;
  171. unsigned size_in_kb:8;
  172. };
  173. unsigned val;
  174. };
  175. union l2_cache {
  176. struct {
  177. unsigned line_size:8;
  178. unsigned lines_per_tag:4;
  179. unsigned assoc:4;
  180. unsigned size_in_kb:16;
  181. };
  182. unsigned val;
  183. };
  184. union l3_cache {
  185. struct {
  186. unsigned line_size:8;
  187. unsigned lines_per_tag:4;
  188. unsigned assoc:4;
  189. unsigned res:2;
  190. unsigned size_encoded:14;
  191. };
  192. unsigned val;
  193. };
  194. static const unsigned short __cpuinitconst assocs[] = {
  195. [1] = 1,
  196. [2] = 2,
  197. [4] = 4,
  198. [6] = 8,
  199. [8] = 16,
  200. [0xa] = 32,
  201. [0xb] = 48,
  202. [0xc] = 64,
  203. [0xd] = 96,
  204. [0xe] = 128,
  205. [0xf] = 0xffff /* fully associative - no way to show this currently */
  206. };
  207. static const unsigned char __cpuinitconst levels[] = { 1, 1, 2, 3 };
  208. static const unsigned char __cpuinitconst types[] = { 1, 2, 3, 3 };
  209. static void __cpuinit
  210. amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax,
  211. union _cpuid4_leaf_ebx *ebx,
  212. union _cpuid4_leaf_ecx *ecx)
  213. {
  214. unsigned dummy;
  215. unsigned line_size, lines_per_tag, assoc, size_in_kb;
  216. union l1_cache l1i, l1d;
  217. union l2_cache l2;
  218. union l3_cache l3;
  219. union l1_cache *l1 = &l1d;
  220. eax->full = 0;
  221. ebx->full = 0;
  222. ecx->full = 0;
  223. cpuid(0x80000005, &dummy, &dummy, &l1d.val, &l1i.val);
  224. cpuid(0x80000006, &dummy, &dummy, &l2.val, &l3.val);
  225. switch (leaf) {
  226. case 1:
  227. l1 = &l1i;
  228. case 0:
  229. if (!l1->val)
  230. return;
  231. assoc = assocs[l1->assoc];
  232. line_size = l1->line_size;
  233. lines_per_tag = l1->lines_per_tag;
  234. size_in_kb = l1->size_in_kb;
  235. break;
  236. case 2:
  237. if (!l2.val)
  238. return;
  239. assoc = assocs[l2.assoc];
  240. line_size = l2.line_size;
  241. lines_per_tag = l2.lines_per_tag;
  242. /* cpu_data has errata corrections for K7 applied */
  243. size_in_kb = current_cpu_data.x86_cache_size;
  244. break;
  245. case 3:
  246. if (!l3.val)
  247. return;
  248. assoc = assocs[l3.assoc];
  249. line_size = l3.line_size;
  250. lines_per_tag = l3.lines_per_tag;
  251. size_in_kb = l3.size_encoded * 512;
  252. if (boot_cpu_has(X86_FEATURE_AMD_DCM)) {
  253. size_in_kb = size_in_kb >> 1;
  254. assoc = assoc >> 1;
  255. }
  256. break;
  257. default:
  258. return;
  259. }
  260. eax->split.is_self_initializing = 1;
  261. eax->split.type = types[leaf];
  262. eax->split.level = levels[leaf];
  263. eax->split.num_threads_sharing = 0;
  264. eax->split.num_cores_on_die = current_cpu_data.x86_max_cores - 1;
  265. if (assoc == 0xffff)
  266. eax->split.is_fully_associative = 1;
  267. ebx->split.coherency_line_size = line_size - 1;
  268. ebx->split.ways_of_associativity = assoc - 1;
  269. ebx->split.physical_line_partition = lines_per_tag - 1;
  270. ecx->split.number_of_sets = (size_in_kb * 1024) / line_size /
  271. (ebx->split.ways_of_associativity + 1) - 1;
  272. }
  273. struct _cache_attr {
  274. struct attribute attr;
  275. ssize_t (*show)(struct _cpuid4_info *, char *);
  276. ssize_t (*store)(struct _cpuid4_info *, const char *, size_t count);
  277. };
  278. #ifdef CONFIG_CPU_SUP_AMD
  279. /*
  280. * L3 cache descriptors
  281. */
  282. static struct amd_l3_cache **__cpuinitdata l3_caches;
  283. static void __cpuinit amd_calc_l3_indices(struct amd_l3_cache *l3)
  284. {
  285. unsigned int sc0, sc1, sc2, sc3;
  286. u32 val = 0;
  287. pci_read_config_dword(l3->dev, 0x1C4, &val);
  288. /* calculate subcache sizes */
  289. l3->subcaches[0] = sc0 = !(val & BIT(0));
  290. l3->subcaches[1] = sc1 = !(val & BIT(4));
  291. l3->subcaches[2] = sc2 = !(val & BIT(8)) + !(val & BIT(9));
  292. l3->subcaches[3] = sc3 = !(val & BIT(12)) + !(val & BIT(13));
  293. l3->indices = (max(max(max(sc0, sc1), sc2), sc3) << 10) - 1;
  294. }
  295. static struct amd_l3_cache * __cpuinit amd_init_l3_cache(int node)
  296. {
  297. struct amd_l3_cache *l3;
  298. struct pci_dev *dev = node_to_k8_nb_misc(node);
  299. l3 = kzalloc(sizeof(struct amd_l3_cache), GFP_ATOMIC);
  300. if (!l3) {
  301. printk(KERN_WARNING "Error allocating L3 struct\n");
  302. return NULL;
  303. }
  304. l3->dev = dev;
  305. amd_calc_l3_indices(l3);
  306. return l3;
  307. }
  308. static void __cpuinit
  309. amd_check_l3_disable(int index, struct _cpuid4_info_regs *this_leaf)
  310. {
  311. int node;
  312. if (boot_cpu_data.x86 != 0x10)
  313. return;
  314. if (index < 3)
  315. return;
  316. /* see errata #382 and #388 */
  317. if (boot_cpu_data.x86_model < 0x8)
  318. return;
  319. if ((boot_cpu_data.x86_model == 0x8 ||
  320. boot_cpu_data.x86_model == 0x9)
  321. &&
  322. boot_cpu_data.x86_mask < 0x1)
  323. return;
  324. /* not in virtualized environments */
  325. if (num_k8_northbridges == 0)
  326. return;
  327. /*
  328. * Strictly speaking, the amount in @size below is leaked since it is
  329. * never freed but this is done only on shutdown so it doesn't matter.
  330. */
  331. if (!l3_caches) {
  332. int size = num_k8_northbridges * sizeof(struct amd_l3_cache *);
  333. l3_caches = kzalloc(size, GFP_ATOMIC);
  334. if (!l3_caches)
  335. return;
  336. }
  337. node = amd_get_nb_id(smp_processor_id());
  338. if (!l3_caches[node]) {
  339. l3_caches[node] = amd_init_l3_cache(node);
  340. l3_caches[node]->can_disable = true;
  341. }
  342. WARN_ON(!l3_caches[node]);
  343. this_leaf->l3 = l3_caches[node];
  344. }
  345. static ssize_t show_cache_disable(struct _cpuid4_info *this_leaf, char *buf,
  346. unsigned int slot)
  347. {
  348. struct pci_dev *dev = this_leaf->l3->dev;
  349. unsigned int reg = 0;
  350. if (!this_leaf->l3 || !this_leaf->l3->can_disable)
  351. return -EINVAL;
  352. if (!dev)
  353. return -EINVAL;
  354. pci_read_config_dword(dev, 0x1BC + slot * 4, &reg);
  355. return sprintf(buf, "0x%08x\n", reg);
  356. }
  357. #define SHOW_CACHE_DISABLE(slot) \
  358. static ssize_t \
  359. show_cache_disable_##slot(struct _cpuid4_info *this_leaf, char *buf) \
  360. { \
  361. return show_cache_disable(this_leaf, buf, slot); \
  362. }
  363. SHOW_CACHE_DISABLE(0)
  364. SHOW_CACHE_DISABLE(1)
  365. static void amd_l3_disable_index(struct amd_l3_cache *l3, int cpu,
  366. unsigned slot, unsigned long idx)
  367. {
  368. int i;
  369. idx |= BIT(30);
  370. /*
  371. * disable index in all 4 subcaches
  372. */
  373. for (i = 0; i < 4; i++) {
  374. u32 reg = idx | (i << 20);
  375. if (!l3->subcaches[i])
  376. continue;
  377. pci_write_config_dword(l3->dev, 0x1BC + slot * 4, reg);
  378. /*
  379. * We need to WBINVD on a core on the node containing the L3
  380. * cache which indices we disable therefore a simple wbinvd()
  381. * is not sufficient.
  382. */
  383. wbinvd_on_cpu(cpu);
  384. reg |= BIT(31);
  385. pci_write_config_dword(l3->dev, 0x1BC + slot * 4, reg);
  386. }
  387. }
  388. static ssize_t store_cache_disable(struct _cpuid4_info *this_leaf,
  389. const char *buf, size_t count,
  390. unsigned int slot)
  391. {
  392. struct pci_dev *dev = this_leaf->l3->dev;
  393. int cpu = cpumask_first(to_cpumask(this_leaf->shared_cpu_map));
  394. unsigned long val = 0;
  395. #define SUBCACHE_MASK (3UL << 20)
  396. #define SUBCACHE_INDEX 0xfff
  397. if (!this_leaf->l3 || !this_leaf->l3->can_disable)
  398. return -EINVAL;
  399. if (!capable(CAP_SYS_ADMIN))
  400. return -EPERM;
  401. if (!dev)
  402. return -EINVAL;
  403. if (strict_strtoul(buf, 10, &val) < 0)
  404. return -EINVAL;
  405. /* do not allow writes outside of allowed bits */
  406. if ((val & ~(SUBCACHE_MASK | SUBCACHE_INDEX)) ||
  407. ((val & SUBCACHE_INDEX) > this_leaf->l3->indices))
  408. return -EINVAL;
  409. amd_l3_disable_index(this_leaf->l3, cpu, slot, val);
  410. return count;
  411. }
  412. #define STORE_CACHE_DISABLE(slot) \
  413. static ssize_t \
  414. store_cache_disable_##slot(struct _cpuid4_info *this_leaf, \
  415. const char *buf, size_t count) \
  416. { \
  417. return store_cache_disable(this_leaf, buf, count, slot); \
  418. }
  419. STORE_CACHE_DISABLE(0)
  420. STORE_CACHE_DISABLE(1)
  421. static struct _cache_attr cache_disable_0 = __ATTR(cache_disable_0, 0644,
  422. show_cache_disable_0, store_cache_disable_0);
  423. static struct _cache_attr cache_disable_1 = __ATTR(cache_disable_1, 0644,
  424. show_cache_disable_1, store_cache_disable_1);
  425. #else /* CONFIG_CPU_SUP_AMD */
  426. static void __cpuinit
  427. amd_check_l3_disable(int index, struct _cpuid4_info_regs *this_leaf)
  428. {
  429. };
  430. #endif /* CONFIG_CPU_SUP_AMD */
  431. static int
  432. __cpuinit cpuid4_cache_lookup_regs(int index,
  433. struct _cpuid4_info_regs *this_leaf)
  434. {
  435. union _cpuid4_leaf_eax eax;
  436. union _cpuid4_leaf_ebx ebx;
  437. union _cpuid4_leaf_ecx ecx;
  438. unsigned edx;
  439. if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
  440. amd_cpuid4(index, &eax, &ebx, &ecx);
  441. amd_check_l3_disable(index, this_leaf);
  442. } else {
  443. cpuid_count(4, index, &eax.full, &ebx.full, &ecx.full, &edx);
  444. }
  445. if (eax.split.type == CACHE_TYPE_NULL)
  446. return -EIO; /* better error ? */
  447. this_leaf->eax = eax;
  448. this_leaf->ebx = ebx;
  449. this_leaf->ecx = ecx;
  450. this_leaf->size = (ecx.split.number_of_sets + 1) *
  451. (ebx.split.coherency_line_size + 1) *
  452. (ebx.split.physical_line_partition + 1) *
  453. (ebx.split.ways_of_associativity + 1);
  454. return 0;
  455. }
  456. static int __cpuinit find_num_cache_leaves(void)
  457. {
  458. unsigned int eax, ebx, ecx, edx;
  459. union _cpuid4_leaf_eax cache_eax;
  460. int i = -1;
  461. do {
  462. ++i;
  463. /* Do cpuid(4) loop to find out num_cache_leaves */
  464. cpuid_count(4, i, &eax, &ebx, &ecx, &edx);
  465. cache_eax.full = eax;
  466. } while (cache_eax.split.type != CACHE_TYPE_NULL);
  467. return i;
  468. }
  469. unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
  470. {
  471. /* Cache sizes */
  472. unsigned int trace = 0, l1i = 0, l1d = 0, l2 = 0, l3 = 0;
  473. unsigned int new_l1d = 0, new_l1i = 0; /* Cache sizes from cpuid(4) */
  474. unsigned int new_l2 = 0, new_l3 = 0, i; /* Cache sizes from cpuid(4) */
  475. unsigned int l2_id = 0, l3_id = 0, num_threads_sharing, index_msb;
  476. #ifdef CONFIG_X86_HT
  477. unsigned int cpu = c->cpu_index;
  478. #endif
  479. if (c->cpuid_level > 3) {
  480. static int is_initialized;
  481. if (is_initialized == 0) {
  482. /* Init num_cache_leaves from boot CPU */
  483. num_cache_leaves = find_num_cache_leaves();
  484. is_initialized++;
  485. }
  486. /*
  487. * Whenever possible use cpuid(4), deterministic cache
  488. * parameters cpuid leaf to find the cache details
  489. */
  490. for (i = 0; i < num_cache_leaves; i++) {
  491. struct _cpuid4_info_regs this_leaf;
  492. int retval;
  493. retval = cpuid4_cache_lookup_regs(i, &this_leaf);
  494. if (retval >= 0) {
  495. switch (this_leaf.eax.split.level) {
  496. case 1:
  497. if (this_leaf.eax.split.type ==
  498. CACHE_TYPE_DATA)
  499. new_l1d = this_leaf.size/1024;
  500. else if (this_leaf.eax.split.type ==
  501. CACHE_TYPE_INST)
  502. new_l1i = this_leaf.size/1024;
  503. break;
  504. case 2:
  505. new_l2 = this_leaf.size/1024;
  506. num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing;
  507. index_msb = get_count_order(num_threads_sharing);
  508. l2_id = c->apicid >> index_msb;
  509. break;
  510. case 3:
  511. new_l3 = this_leaf.size/1024;
  512. num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing;
  513. index_msb = get_count_order(
  514. num_threads_sharing);
  515. l3_id = c->apicid >> index_msb;
  516. break;
  517. default:
  518. break;
  519. }
  520. }
  521. }
  522. }
  523. /*
  524. * Don't use cpuid2 if cpuid4 is supported. For P4, we use cpuid2 for
  525. * trace cache
  526. */
  527. if ((num_cache_leaves == 0 || c->x86 == 15) && c->cpuid_level > 1) {
  528. /* supports eax=2 call */
  529. int j, n;
  530. unsigned int regs[4];
  531. unsigned char *dp = (unsigned char *)regs;
  532. int only_trace = 0;
  533. if (num_cache_leaves != 0 && c->x86 == 15)
  534. only_trace = 1;
  535. /* Number of times to iterate */
  536. n = cpuid_eax(2) & 0xFF;
  537. for (i = 0 ; i < n ; i++) {
  538. cpuid(2, &regs[0], &regs[1], &regs[2], &regs[3]);
  539. /* If bit 31 is set, this is an unknown format */
  540. for (j = 0 ; j < 3 ; j++)
  541. if (regs[j] & (1 << 31))
  542. regs[j] = 0;
  543. /* Byte 0 is level count, not a descriptor */
  544. for (j = 1 ; j < 16 ; j++) {
  545. unsigned char des = dp[j];
  546. unsigned char k = 0;
  547. /* look up this descriptor in the table */
  548. while (cache_table[k].descriptor != 0) {
  549. if (cache_table[k].descriptor == des) {
  550. if (only_trace && cache_table[k].cache_type != LVL_TRACE)
  551. break;
  552. switch (cache_table[k].cache_type) {
  553. case LVL_1_INST:
  554. l1i += cache_table[k].size;
  555. break;
  556. case LVL_1_DATA:
  557. l1d += cache_table[k].size;
  558. break;
  559. case LVL_2:
  560. l2 += cache_table[k].size;
  561. break;
  562. case LVL_3:
  563. l3 += cache_table[k].size;
  564. break;
  565. case LVL_TRACE:
  566. trace += cache_table[k].size;
  567. break;
  568. }
  569. break;
  570. }
  571. k++;
  572. }
  573. }
  574. }
  575. }
  576. if (new_l1d)
  577. l1d = new_l1d;
  578. if (new_l1i)
  579. l1i = new_l1i;
  580. if (new_l2) {
  581. l2 = new_l2;
  582. #ifdef CONFIG_X86_HT
  583. per_cpu(cpu_llc_id, cpu) = l2_id;
  584. #endif
  585. }
  586. if (new_l3) {
  587. l3 = new_l3;
  588. #ifdef CONFIG_X86_HT
  589. per_cpu(cpu_llc_id, cpu) = l3_id;
  590. #endif
  591. }
  592. c->x86_cache_size = l3 ? l3 : (l2 ? l2 : (l1i+l1d));
  593. return l2;
  594. }
  595. #ifdef CONFIG_SYSFS
  596. /* pointer to _cpuid4_info array (for each cache leaf) */
  597. static DEFINE_PER_CPU(struct _cpuid4_info *, ici_cpuid4_info);
  598. #define CPUID4_INFO_IDX(x, y) (&((per_cpu(ici_cpuid4_info, x))[y]))
  599. #ifdef CONFIG_SMP
  600. static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
  601. {
  602. struct _cpuid4_info *this_leaf, *sibling_leaf;
  603. unsigned long num_threads_sharing;
  604. int index_msb, i, sibling;
  605. struct cpuinfo_x86 *c = &cpu_data(cpu);
  606. if ((index == 3) && (c->x86_vendor == X86_VENDOR_AMD)) {
  607. for_each_cpu(i, c->llc_shared_map) {
  608. if (!per_cpu(ici_cpuid4_info, i))
  609. continue;
  610. this_leaf = CPUID4_INFO_IDX(i, index);
  611. for_each_cpu(sibling, c->llc_shared_map) {
  612. if (!cpu_online(sibling))
  613. continue;
  614. set_bit(sibling, this_leaf->shared_cpu_map);
  615. }
  616. }
  617. return;
  618. }
  619. this_leaf = CPUID4_INFO_IDX(cpu, index);
  620. num_threads_sharing = 1 + this_leaf->eax.split.num_threads_sharing;
  621. if (num_threads_sharing == 1)
  622. cpumask_set_cpu(cpu, to_cpumask(this_leaf->shared_cpu_map));
  623. else {
  624. index_msb = get_count_order(num_threads_sharing);
  625. for_each_online_cpu(i) {
  626. if (cpu_data(i).apicid >> index_msb ==
  627. c->apicid >> index_msb) {
  628. cpumask_set_cpu(i,
  629. to_cpumask(this_leaf->shared_cpu_map));
  630. if (i != cpu && per_cpu(ici_cpuid4_info, i)) {
  631. sibling_leaf =
  632. CPUID4_INFO_IDX(i, index);
  633. cpumask_set_cpu(cpu, to_cpumask(
  634. sibling_leaf->shared_cpu_map));
  635. }
  636. }
  637. }
  638. }
  639. }
  640. static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index)
  641. {
  642. struct _cpuid4_info *this_leaf, *sibling_leaf;
  643. int sibling;
  644. this_leaf = CPUID4_INFO_IDX(cpu, index);
  645. for_each_cpu(sibling, to_cpumask(this_leaf->shared_cpu_map)) {
  646. sibling_leaf = CPUID4_INFO_IDX(sibling, index);
  647. cpumask_clear_cpu(cpu,
  648. to_cpumask(sibling_leaf->shared_cpu_map));
  649. }
  650. }
  651. #else
  652. static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
  653. {
  654. }
  655. static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index)
  656. {
  657. }
  658. #endif
  659. static void __cpuinit free_cache_attributes(unsigned int cpu)
  660. {
  661. int i;
  662. for (i = 0; i < num_cache_leaves; i++)
  663. cache_remove_shared_cpu_map(cpu, i);
  664. kfree(per_cpu(ici_cpuid4_info, cpu)->l3);
  665. kfree(per_cpu(ici_cpuid4_info, cpu));
  666. per_cpu(ici_cpuid4_info, cpu) = NULL;
  667. }
  668. static int
  669. __cpuinit cpuid4_cache_lookup(int index, struct _cpuid4_info *this_leaf)
  670. {
  671. struct _cpuid4_info_regs *leaf_regs =
  672. (struct _cpuid4_info_regs *)this_leaf;
  673. return cpuid4_cache_lookup_regs(index, leaf_regs);
  674. }
  675. static void __cpuinit get_cpu_leaves(void *_retval)
  676. {
  677. int j, *retval = _retval, cpu = smp_processor_id();
  678. /* Do cpuid and store the results */
  679. for (j = 0; j < num_cache_leaves; j++) {
  680. struct _cpuid4_info *this_leaf;
  681. this_leaf = CPUID4_INFO_IDX(cpu, j);
  682. *retval = cpuid4_cache_lookup(j, this_leaf);
  683. if (unlikely(*retval < 0)) {
  684. int i;
  685. for (i = 0; i < j; i++)
  686. cache_remove_shared_cpu_map(cpu, i);
  687. break;
  688. }
  689. cache_shared_cpu_map_setup(cpu, j);
  690. }
  691. }
  692. static int __cpuinit detect_cache_attributes(unsigned int cpu)
  693. {
  694. int retval;
  695. if (num_cache_leaves == 0)
  696. return -ENOENT;
  697. per_cpu(ici_cpuid4_info, cpu) = kzalloc(
  698. sizeof(struct _cpuid4_info) * num_cache_leaves, GFP_KERNEL);
  699. if (per_cpu(ici_cpuid4_info, cpu) == NULL)
  700. return -ENOMEM;
  701. smp_call_function_single(cpu, get_cpu_leaves, &retval, true);
  702. if (retval) {
  703. kfree(per_cpu(ici_cpuid4_info, cpu));
  704. per_cpu(ici_cpuid4_info, cpu) = NULL;
  705. }
  706. return retval;
  707. }
  708. #include <linux/kobject.h>
  709. #include <linux/sysfs.h>
  710. extern struct sysdev_class cpu_sysdev_class; /* from drivers/base/cpu.c */
  711. /* pointer to kobject for cpuX/cache */
  712. static DEFINE_PER_CPU(struct kobject *, ici_cache_kobject);
  713. struct _index_kobject {
  714. struct kobject kobj;
  715. unsigned int cpu;
  716. unsigned short index;
  717. };
  718. /* pointer to array of kobjects for cpuX/cache/indexY */
  719. static DEFINE_PER_CPU(struct _index_kobject *, ici_index_kobject);
  720. #define INDEX_KOBJECT_PTR(x, y) (&((per_cpu(ici_index_kobject, x))[y]))
  721. #define show_one_plus(file_name, object, val) \
  722. static ssize_t show_##file_name \
  723. (struct _cpuid4_info *this_leaf, char *buf) \
  724. { \
  725. return sprintf(buf, "%lu\n", (unsigned long)this_leaf->object + val); \
  726. }
  727. show_one_plus(level, eax.split.level, 0);
  728. show_one_plus(coherency_line_size, ebx.split.coherency_line_size, 1);
  729. show_one_plus(physical_line_partition, ebx.split.physical_line_partition, 1);
  730. show_one_plus(ways_of_associativity, ebx.split.ways_of_associativity, 1);
  731. show_one_plus(number_of_sets, ecx.split.number_of_sets, 1);
  732. static ssize_t show_size(struct _cpuid4_info *this_leaf, char *buf)
  733. {
  734. return sprintf(buf, "%luK\n", this_leaf->size / 1024);
  735. }
  736. static ssize_t show_shared_cpu_map_func(struct _cpuid4_info *this_leaf,
  737. int type, char *buf)
  738. {
  739. ptrdiff_t len = PTR_ALIGN(buf + PAGE_SIZE - 1, PAGE_SIZE) - buf;
  740. int n = 0;
  741. if (len > 1) {
  742. const struct cpumask *mask;
  743. mask = to_cpumask(this_leaf->shared_cpu_map);
  744. n = type ?
  745. cpulist_scnprintf(buf, len-2, mask) :
  746. cpumask_scnprintf(buf, len-2, mask);
  747. buf[n++] = '\n';
  748. buf[n] = '\0';
  749. }
  750. return n;
  751. }
  752. static inline ssize_t show_shared_cpu_map(struct _cpuid4_info *leaf, char *buf)
  753. {
  754. return show_shared_cpu_map_func(leaf, 0, buf);
  755. }
  756. static inline ssize_t show_shared_cpu_list(struct _cpuid4_info *leaf, char *buf)
  757. {
  758. return show_shared_cpu_map_func(leaf, 1, buf);
  759. }
  760. static ssize_t show_type(struct _cpuid4_info *this_leaf, char *buf)
  761. {
  762. switch (this_leaf->eax.split.type) {
  763. case CACHE_TYPE_DATA:
  764. return sprintf(buf, "Data\n");
  765. case CACHE_TYPE_INST:
  766. return sprintf(buf, "Instruction\n");
  767. case CACHE_TYPE_UNIFIED:
  768. return sprintf(buf, "Unified\n");
  769. default:
  770. return sprintf(buf, "Unknown\n");
  771. }
  772. }
  773. #define to_object(k) container_of(k, struct _index_kobject, kobj)
  774. #define to_attr(a) container_of(a, struct _cache_attr, attr)
  775. #define define_one_ro(_name) \
  776. static struct _cache_attr _name = \
  777. __ATTR(_name, 0444, show_##_name, NULL)
  778. define_one_ro(level);
  779. define_one_ro(type);
  780. define_one_ro(coherency_line_size);
  781. define_one_ro(physical_line_partition);
  782. define_one_ro(ways_of_associativity);
  783. define_one_ro(number_of_sets);
  784. define_one_ro(size);
  785. define_one_ro(shared_cpu_map);
  786. define_one_ro(shared_cpu_list);
  787. #define DEFAULT_SYSFS_CACHE_ATTRS \
  788. &type.attr, \
  789. &level.attr, \
  790. &coherency_line_size.attr, \
  791. &physical_line_partition.attr, \
  792. &ways_of_associativity.attr, \
  793. &number_of_sets.attr, \
  794. &size.attr, \
  795. &shared_cpu_map.attr, \
  796. &shared_cpu_list.attr
  797. static struct attribute *default_attrs[] = {
  798. DEFAULT_SYSFS_CACHE_ATTRS,
  799. NULL
  800. };
  801. static struct attribute *default_l3_attrs[] = {
  802. DEFAULT_SYSFS_CACHE_ATTRS,
  803. #ifdef CONFIG_CPU_SUP_AMD
  804. &cache_disable_0.attr,
  805. &cache_disable_1.attr,
  806. #endif
  807. NULL
  808. };
  809. static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
  810. {
  811. struct _cache_attr *fattr = to_attr(attr);
  812. struct _index_kobject *this_leaf = to_object(kobj);
  813. ssize_t ret;
  814. ret = fattr->show ?
  815. fattr->show(CPUID4_INFO_IDX(this_leaf->cpu, this_leaf->index),
  816. buf) :
  817. 0;
  818. return ret;
  819. }
  820. static ssize_t store(struct kobject *kobj, struct attribute *attr,
  821. const char *buf, size_t count)
  822. {
  823. struct _cache_attr *fattr = to_attr(attr);
  824. struct _index_kobject *this_leaf = to_object(kobj);
  825. ssize_t ret;
  826. ret = fattr->store ?
  827. fattr->store(CPUID4_INFO_IDX(this_leaf->cpu, this_leaf->index),
  828. buf, count) :
  829. 0;
  830. return ret;
  831. }
  832. static const struct sysfs_ops sysfs_ops = {
  833. .show = show,
  834. .store = store,
  835. };
  836. static struct kobj_type ktype_cache = {
  837. .sysfs_ops = &sysfs_ops,
  838. .default_attrs = default_attrs,
  839. };
  840. static struct kobj_type ktype_percpu_entry = {
  841. .sysfs_ops = &sysfs_ops,
  842. };
  843. static void __cpuinit cpuid4_cache_sysfs_exit(unsigned int cpu)
  844. {
  845. kfree(per_cpu(ici_cache_kobject, cpu));
  846. kfree(per_cpu(ici_index_kobject, cpu));
  847. per_cpu(ici_cache_kobject, cpu) = NULL;
  848. per_cpu(ici_index_kobject, cpu) = NULL;
  849. free_cache_attributes(cpu);
  850. }
  851. static int __cpuinit cpuid4_cache_sysfs_init(unsigned int cpu)
  852. {
  853. int err;
  854. if (num_cache_leaves == 0)
  855. return -ENOENT;
  856. err = detect_cache_attributes(cpu);
  857. if (err)
  858. return err;
  859. /* Allocate all required memory */
  860. per_cpu(ici_cache_kobject, cpu) =
  861. kzalloc(sizeof(struct kobject), GFP_KERNEL);
  862. if (unlikely(per_cpu(ici_cache_kobject, cpu) == NULL))
  863. goto err_out;
  864. per_cpu(ici_index_kobject, cpu) = kzalloc(
  865. sizeof(struct _index_kobject) * num_cache_leaves, GFP_KERNEL);
  866. if (unlikely(per_cpu(ici_index_kobject, cpu) == NULL))
  867. goto err_out;
  868. return 0;
  869. err_out:
  870. cpuid4_cache_sysfs_exit(cpu);
  871. return -ENOMEM;
  872. }
  873. static DECLARE_BITMAP(cache_dev_map, NR_CPUS);
  874. /* Add/Remove cache interface for CPU device */
  875. static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
  876. {
  877. unsigned int cpu = sys_dev->id;
  878. unsigned long i, j;
  879. struct _index_kobject *this_object;
  880. struct _cpuid4_info *this_leaf;
  881. int retval;
  882. retval = cpuid4_cache_sysfs_init(cpu);
  883. if (unlikely(retval < 0))
  884. return retval;
  885. retval = kobject_init_and_add(per_cpu(ici_cache_kobject, cpu),
  886. &ktype_percpu_entry,
  887. &sys_dev->kobj, "%s", "cache");
  888. if (retval < 0) {
  889. cpuid4_cache_sysfs_exit(cpu);
  890. return retval;
  891. }
  892. for (i = 0; i < num_cache_leaves; i++) {
  893. this_object = INDEX_KOBJECT_PTR(cpu, i);
  894. this_object->cpu = cpu;
  895. this_object->index = i;
  896. this_leaf = CPUID4_INFO_IDX(cpu, i);
  897. if (this_leaf->l3 && this_leaf->l3->can_disable)
  898. ktype_cache.default_attrs = default_l3_attrs;
  899. else
  900. ktype_cache.default_attrs = default_attrs;
  901. retval = kobject_init_and_add(&(this_object->kobj),
  902. &ktype_cache,
  903. per_cpu(ici_cache_kobject, cpu),
  904. "index%1lu", i);
  905. if (unlikely(retval)) {
  906. for (j = 0; j < i; j++)
  907. kobject_put(&(INDEX_KOBJECT_PTR(cpu, j)->kobj));
  908. kobject_put(per_cpu(ici_cache_kobject, cpu));
  909. cpuid4_cache_sysfs_exit(cpu);
  910. return retval;
  911. }
  912. kobject_uevent(&(this_object->kobj), KOBJ_ADD);
  913. }
  914. cpumask_set_cpu(cpu, to_cpumask(cache_dev_map));
  915. kobject_uevent(per_cpu(ici_cache_kobject, cpu), KOBJ_ADD);
  916. return 0;
  917. }
  918. static void __cpuinit cache_remove_dev(struct sys_device * sys_dev)
  919. {
  920. unsigned int cpu = sys_dev->id;
  921. unsigned long i;
  922. if (per_cpu(ici_cpuid4_info, cpu) == NULL)
  923. return;
  924. if (!cpumask_test_cpu(cpu, to_cpumask(cache_dev_map)))
  925. return;
  926. cpumask_clear_cpu(cpu, to_cpumask(cache_dev_map));
  927. for (i = 0; i < num_cache_leaves; i++)
  928. kobject_put(&(INDEX_KOBJECT_PTR(cpu, i)->kobj));
  929. kobject_put(per_cpu(ici_cache_kobject, cpu));
  930. cpuid4_cache_sysfs_exit(cpu);
  931. }
  932. static int __cpuinit cacheinfo_cpu_callback(struct notifier_block *nfb,
  933. unsigned long action, void *hcpu)
  934. {
  935. unsigned int cpu = (unsigned long)hcpu;
  936. struct sys_device *sys_dev;
  937. sys_dev = get_cpu_sysdev(cpu);
  938. switch (action) {
  939. case CPU_ONLINE:
  940. case CPU_ONLINE_FROZEN:
  941. cache_add_dev(sys_dev);
  942. break;
  943. case CPU_DEAD:
  944. case CPU_DEAD_FROZEN:
  945. cache_remove_dev(sys_dev);
  946. break;
  947. }
  948. return NOTIFY_OK;
  949. }
  950. static struct notifier_block __cpuinitdata cacheinfo_cpu_notifier = {
  951. .notifier_call = cacheinfo_cpu_callback,
  952. };
  953. static int __cpuinit cache_sysfs_init(void)
  954. {
  955. int i;
  956. if (num_cache_leaves == 0)
  957. return 0;
  958. for_each_online_cpu(i) {
  959. int err;
  960. struct sys_device *sys_dev = get_cpu_sysdev(i);
  961. err = cache_add_dev(sys_dev);
  962. if (err)
  963. return err;
  964. }
  965. register_hotcpu_notifier(&cacheinfo_cpu_notifier);
  966. return 0;
  967. }
  968. device_initcall(cache_sysfs_init);
  969. #endif