cpu_debug.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913
  1. /*
  2. * CPU x86 architecture debug code
  3. *
  4. * Copyright(C) 2009 Jaswinder Singh Rajput
  5. *
  6. * For licencing details see kernel-base/COPYING
  7. */
  8. #include <linux/interrupt.h>
  9. #include <linux/compiler.h>
  10. #include <linux/seq_file.h>
  11. #include <linux/debugfs.h>
  12. #include <linux/kprobes.h>
  13. #include <linux/uaccess.h>
  14. #include <linux/kernel.h>
  15. #include <linux/module.h>
  16. #include <linux/percpu.h>
  17. #include <linux/signal.h>
  18. #include <linux/errno.h>
  19. #include <linux/sched.h>
  20. #include <linux/types.h>
  21. #include <linux/init.h>
  22. #include <linux/slab.h>
  23. #include <linux/smp.h>
  24. #include <asm/cpu_debug.h>
  25. #include <asm/paravirt.h>
  26. #include <asm/system.h>
  27. #include <asm/traps.h>
  28. #include <asm/apic.h>
  29. #include <asm/desc.h>
  30. static DEFINE_PER_CPU(struct cpu_cpuX_base, cpu_arr[CPU_REG_ALL_BIT]);
  31. static DEFINE_PER_CPU(struct cpu_private *, priv_arr[MAX_CPU_FILES]);
  32. static DEFINE_PER_CPU(unsigned, cpu_modelflag);
  33. static DEFINE_PER_CPU(int, cpu_priv_count);
  34. static DEFINE_PER_CPU(unsigned, cpu_model);
  35. static DEFINE_MUTEX(cpu_debug_lock);
  36. static struct dentry *cpu_debugfs_dir;
  37. static struct cpu_debug_base cpu_base[] = {
  38. { "mc", CPU_MC, 0 },
  39. { "monitor", CPU_MONITOR, 0 },
  40. { "time", CPU_TIME, 0 },
  41. { "pmc", CPU_PMC, 1 },
  42. { "platform", CPU_PLATFORM, 0 },
  43. { "apic", CPU_APIC, 0 },
  44. { "poweron", CPU_POWERON, 0 },
  45. { "control", CPU_CONTROL, 0 },
  46. { "features", CPU_FEATURES, 0 },
  47. { "lastbranch", CPU_LBRANCH, 0 },
  48. { "bios", CPU_BIOS, 0 },
  49. { "freq", CPU_FREQ, 0 },
  50. { "mtrr", CPU_MTRR, 0 },
  51. { "perf", CPU_PERF, 0 },
  52. { "cache", CPU_CACHE, 0 },
  53. { "sysenter", CPU_SYSENTER, 0 },
  54. { "therm", CPU_THERM, 0 },
  55. { "misc", CPU_MISC, 0 },
  56. { "debug", CPU_DEBUG, 0 },
  57. { "pat", CPU_PAT, 0 },
  58. { "vmx", CPU_VMX, 0 },
  59. { "call", CPU_CALL, 0 },
  60. { "base", CPU_BASE, 0 },
  61. { "ver", CPU_VER, 0 },
  62. { "conf", CPU_CONF, 0 },
  63. { "smm", CPU_SMM, 0 },
  64. { "svm", CPU_SVM, 0 },
  65. { "osvm", CPU_OSVM, 0 },
  66. { "tss", CPU_TSS, 0 },
  67. { "cr", CPU_CR, 0 },
  68. { "dt", CPU_DT, 0 },
  69. { "registers", CPU_REG_ALL, 0 },
  70. };
  71. static struct cpu_file_base cpu_file[] = {
  72. { "index", CPU_REG_ALL, 0 },
  73. { "value", CPU_REG_ALL, 1 },
  74. };
  75. /* Intel Registers Range */
  76. static struct cpu_debug_range cpu_intel_range[] = {
  77. { 0x00000000, 0x00000001, CPU_MC, CPU_INTEL_ALL },
  78. { 0x00000006, 0x00000007, CPU_MONITOR, CPU_CX_AT_XE },
  79. { 0x00000010, 0x00000010, CPU_TIME, CPU_INTEL_ALL },
  80. { 0x00000011, 0x00000013, CPU_PMC, CPU_INTEL_PENTIUM },
  81. { 0x00000017, 0x00000017, CPU_PLATFORM, CPU_PX_CX_AT_XE },
  82. { 0x0000001B, 0x0000001B, CPU_APIC, CPU_P6_CX_AT_XE },
  83. { 0x0000002A, 0x0000002A, CPU_POWERON, CPU_PX_CX_AT_XE },
  84. { 0x0000002B, 0x0000002B, CPU_POWERON, CPU_INTEL_XEON },
  85. { 0x0000002C, 0x0000002C, CPU_FREQ, CPU_INTEL_XEON },
  86. { 0x0000003A, 0x0000003A, CPU_CONTROL, CPU_CX_AT_XE },
  87. { 0x00000040, 0x00000043, CPU_LBRANCH, CPU_PM_CX_AT_XE },
  88. { 0x00000044, 0x00000047, CPU_LBRANCH, CPU_PM_CO_AT },
  89. { 0x00000060, 0x00000063, CPU_LBRANCH, CPU_C2_AT },
  90. { 0x00000064, 0x00000067, CPU_LBRANCH, CPU_INTEL_ATOM },
  91. { 0x00000079, 0x00000079, CPU_BIOS, CPU_P6_CX_AT_XE },
  92. { 0x00000088, 0x0000008A, CPU_CACHE, CPU_INTEL_P6 },
  93. { 0x0000008B, 0x0000008B, CPU_BIOS, CPU_P6_CX_AT_XE },
  94. { 0x0000009B, 0x0000009B, CPU_MONITOR, CPU_INTEL_XEON },
  95. { 0x000000C1, 0x000000C2, CPU_PMC, CPU_P6_CX_AT },
  96. { 0x000000CD, 0x000000CD, CPU_FREQ, CPU_CX_AT },
  97. { 0x000000E7, 0x000000E8, CPU_PERF, CPU_CX_AT },
  98. { 0x000000FE, 0x000000FE, CPU_MTRR, CPU_P6_CX_XE },
  99. { 0x00000116, 0x00000116, CPU_CACHE, CPU_INTEL_P6 },
  100. { 0x00000118, 0x00000118, CPU_CACHE, CPU_INTEL_P6 },
  101. { 0x00000119, 0x00000119, CPU_CACHE, CPU_INTEL_PX },
  102. { 0x0000011A, 0x0000011B, CPU_CACHE, CPU_INTEL_P6 },
  103. { 0x0000011E, 0x0000011E, CPU_CACHE, CPU_PX_CX_AT },
  104. { 0x00000174, 0x00000176, CPU_SYSENTER, CPU_P6_CX_AT_XE },
  105. { 0x00000179, 0x0000017A, CPU_MC, CPU_PX_CX_AT_XE },
  106. { 0x0000017B, 0x0000017B, CPU_MC, CPU_P6_XE },
  107. { 0x00000186, 0x00000187, CPU_PMC, CPU_P6_CX_AT },
  108. { 0x00000198, 0x00000199, CPU_PERF, CPU_PM_CX_AT_XE },
  109. { 0x0000019A, 0x0000019A, CPU_TIME, CPU_PM_CX_AT_XE },
  110. { 0x0000019B, 0x0000019D, CPU_THERM, CPU_PM_CX_AT_XE },
  111. { 0x000001A0, 0x000001A0, CPU_MISC, CPU_PM_CX_AT_XE },
  112. { 0x000001C9, 0x000001C9, CPU_LBRANCH, CPU_PM_CX_AT },
  113. { 0x000001D7, 0x000001D8, CPU_LBRANCH, CPU_INTEL_XEON },
  114. { 0x000001D9, 0x000001D9, CPU_DEBUG, CPU_CX_AT_XE },
  115. { 0x000001DA, 0x000001DA, CPU_LBRANCH, CPU_INTEL_XEON },
  116. { 0x000001DB, 0x000001DB, CPU_LBRANCH, CPU_P6_XE },
  117. { 0x000001DC, 0x000001DC, CPU_LBRANCH, CPU_INTEL_P6 },
  118. { 0x000001DD, 0x000001DE, CPU_LBRANCH, CPU_PX_CX_AT_XE },
  119. { 0x000001E0, 0x000001E0, CPU_LBRANCH, CPU_INTEL_P6 },
  120. { 0x00000200, 0x0000020F, CPU_MTRR, CPU_P6_CX_XE },
  121. { 0x00000250, 0x00000250, CPU_MTRR, CPU_P6_CX_XE },
  122. { 0x00000258, 0x00000259, CPU_MTRR, CPU_P6_CX_XE },
  123. { 0x00000268, 0x0000026F, CPU_MTRR, CPU_P6_CX_XE },
  124. { 0x00000277, 0x00000277, CPU_PAT, CPU_C2_AT_XE },
  125. { 0x000002FF, 0x000002FF, CPU_MTRR, CPU_P6_CX_XE },
  126. { 0x00000300, 0x00000308, CPU_PMC, CPU_INTEL_XEON },
  127. { 0x00000309, 0x0000030B, CPU_PMC, CPU_C2_AT_XE },
  128. { 0x0000030C, 0x00000311, CPU_PMC, CPU_INTEL_XEON },
  129. { 0x00000345, 0x00000345, CPU_PMC, CPU_C2_AT },
  130. { 0x00000360, 0x00000371, CPU_PMC, CPU_INTEL_XEON },
  131. { 0x0000038D, 0x00000390, CPU_PMC, CPU_C2_AT },
  132. { 0x000003A0, 0x000003BE, CPU_PMC, CPU_INTEL_XEON },
  133. { 0x000003C0, 0x000003CD, CPU_PMC, CPU_INTEL_XEON },
  134. { 0x000003E0, 0x000003E1, CPU_PMC, CPU_INTEL_XEON },
  135. { 0x000003F0, 0x000003F0, CPU_PMC, CPU_INTEL_XEON },
  136. { 0x000003F1, 0x000003F1, CPU_PMC, CPU_C2_AT_XE },
  137. { 0x000003F2, 0x000003F2, CPU_PMC, CPU_INTEL_XEON },
  138. { 0x00000400, 0x00000402, CPU_MC, CPU_PM_CX_AT_XE },
  139. { 0x00000403, 0x00000403, CPU_MC, CPU_INTEL_XEON },
  140. { 0x00000404, 0x00000406, CPU_MC, CPU_PM_CX_AT_XE },
  141. { 0x00000407, 0x00000407, CPU_MC, CPU_INTEL_XEON },
  142. { 0x00000408, 0x0000040A, CPU_MC, CPU_PM_CX_AT_XE },
  143. { 0x0000040B, 0x0000040B, CPU_MC, CPU_INTEL_XEON },
  144. { 0x0000040C, 0x0000040E, CPU_MC, CPU_PM_CX_XE },
  145. { 0x0000040F, 0x0000040F, CPU_MC, CPU_INTEL_XEON },
  146. { 0x00000410, 0x00000412, CPU_MC, CPU_PM_CX_AT_XE },
  147. { 0x00000413, 0x00000417, CPU_MC, CPU_CX_AT_XE },
  148. { 0x00000480, 0x0000048B, CPU_VMX, CPU_CX_AT_XE },
  149. { 0x00000600, 0x00000600, CPU_DEBUG, CPU_PM_CX_AT_XE },
  150. { 0x00000680, 0x0000068F, CPU_LBRANCH, CPU_INTEL_XEON },
  151. { 0x000006C0, 0x000006CF, CPU_LBRANCH, CPU_INTEL_XEON },
  152. { 0x000107CC, 0x000107D3, CPU_PMC, CPU_INTEL_XEON_MP },
  153. { 0xC0000080, 0xC0000080, CPU_FEATURES, CPU_INTEL_XEON },
  154. { 0xC0000081, 0xC0000082, CPU_CALL, CPU_INTEL_XEON },
  155. { 0xC0000084, 0xC0000084, CPU_CALL, CPU_INTEL_XEON },
  156. { 0xC0000100, 0xC0000102, CPU_BASE, CPU_INTEL_XEON },
  157. };
  158. /* AMD Registers Range */
  159. static struct cpu_debug_range cpu_amd_range[] = {
  160. { 0x00000000, 0x00000001, CPU_MC, CPU_K10_PLUS, },
  161. { 0x00000010, 0x00000010, CPU_TIME, CPU_K8_PLUS, },
  162. { 0x0000001B, 0x0000001B, CPU_APIC, CPU_K8_PLUS, },
  163. { 0x0000002A, 0x0000002A, CPU_POWERON, CPU_K7_PLUS },
  164. { 0x0000008B, 0x0000008B, CPU_VER, CPU_K8_PLUS },
  165. { 0x000000FE, 0x000000FE, CPU_MTRR, CPU_K8_PLUS, },
  166. { 0x00000174, 0x00000176, CPU_SYSENTER, CPU_K8_PLUS, },
  167. { 0x00000179, 0x0000017B, CPU_MC, CPU_K8_PLUS, },
  168. { 0x000001D9, 0x000001D9, CPU_DEBUG, CPU_K8_PLUS, },
  169. { 0x000001DB, 0x000001DE, CPU_LBRANCH, CPU_K8_PLUS, },
  170. { 0x00000200, 0x0000020F, CPU_MTRR, CPU_K8_PLUS, },
  171. { 0x00000250, 0x00000250, CPU_MTRR, CPU_K8_PLUS, },
  172. { 0x00000258, 0x00000259, CPU_MTRR, CPU_K8_PLUS, },
  173. { 0x00000268, 0x0000026F, CPU_MTRR, CPU_K8_PLUS, },
  174. { 0x00000277, 0x00000277, CPU_PAT, CPU_K8_PLUS, },
  175. { 0x000002FF, 0x000002FF, CPU_MTRR, CPU_K8_PLUS, },
  176. { 0x00000400, 0x00000413, CPU_MC, CPU_K8_PLUS, },
  177. { 0xC0000080, 0xC0000080, CPU_FEATURES, CPU_AMD_ALL, },
  178. { 0xC0000081, 0xC0000084, CPU_CALL, CPU_K8_PLUS, },
  179. { 0xC0000100, 0xC0000102, CPU_BASE, CPU_K8_PLUS, },
  180. { 0xC0000103, 0xC0000103, CPU_TIME, CPU_K10_PLUS, },
  181. { 0xC0010000, 0xC0010007, CPU_PMC, CPU_K8_PLUS, },
  182. { 0xC0010010, 0xC0010010, CPU_CONF, CPU_K7_PLUS, },
  183. { 0xC0010015, 0xC0010015, CPU_CONF, CPU_K7_PLUS, },
  184. { 0xC0010016, 0xC001001A, CPU_MTRR, CPU_K8_PLUS, },
  185. { 0xC001001D, 0xC001001D, CPU_MTRR, CPU_K8_PLUS, },
  186. { 0xC001001F, 0xC001001F, CPU_CONF, CPU_K8_PLUS, },
  187. { 0xC0010030, 0xC0010035, CPU_BIOS, CPU_K8_PLUS, },
  188. { 0xC0010044, 0xC0010048, CPU_MC, CPU_K8_PLUS, },
  189. { 0xC0010050, 0xC0010056, CPU_SMM, CPU_K0F_PLUS, },
  190. { 0xC0010058, 0xC0010058, CPU_CONF, CPU_K10_PLUS, },
  191. { 0xC0010060, 0xC0010060, CPU_CACHE, CPU_AMD_11, },
  192. { 0xC0010061, 0xC0010068, CPU_SMM, CPU_K10_PLUS, },
  193. { 0xC0010069, 0xC001006B, CPU_SMM, CPU_AMD_11, },
  194. { 0xC0010070, 0xC0010071, CPU_SMM, CPU_K10_PLUS, },
  195. { 0xC0010111, 0xC0010113, CPU_SMM, CPU_K8_PLUS, },
  196. { 0xC0010114, 0xC0010118, CPU_SVM, CPU_K10_PLUS, },
  197. { 0xC0010140, 0xC0010141, CPU_OSVM, CPU_K10_PLUS, },
  198. { 0xC0011022, 0xC0011023, CPU_CONF, CPU_K10_PLUS, },
  199. };
  200. /* Intel */
  201. static int get_intel_modelflag(unsigned model)
  202. {
  203. int flag;
  204. switch (model) {
  205. case 0x0501:
  206. case 0x0502:
  207. case 0x0504:
  208. flag = CPU_INTEL_PENTIUM;
  209. break;
  210. case 0x0601:
  211. case 0x0603:
  212. case 0x0605:
  213. case 0x0607:
  214. case 0x0608:
  215. case 0x060A:
  216. case 0x060B:
  217. flag = CPU_INTEL_P6;
  218. break;
  219. case 0x0609:
  220. case 0x060D:
  221. flag = CPU_INTEL_PENTIUM_M;
  222. break;
  223. case 0x060E:
  224. flag = CPU_INTEL_CORE;
  225. break;
  226. case 0x060F:
  227. case 0x0617:
  228. flag = CPU_INTEL_CORE2;
  229. break;
  230. case 0x061C:
  231. flag = CPU_INTEL_ATOM;
  232. break;
  233. case 0x0F00:
  234. case 0x0F01:
  235. case 0x0F02:
  236. case 0x0F03:
  237. case 0x0F04:
  238. flag = CPU_INTEL_XEON_P4;
  239. break;
  240. case 0x0F06:
  241. flag = CPU_INTEL_XEON_MP;
  242. break;
  243. default:
  244. flag = CPU_NONE;
  245. break;
  246. }
  247. return flag;
  248. }
  249. /* AMD */
  250. static int get_amd_modelflag(unsigned model)
  251. {
  252. int flag;
  253. switch (model >> 8) {
  254. case 0x6:
  255. flag = CPU_AMD_K6;
  256. break;
  257. case 0x7:
  258. flag = CPU_AMD_K7;
  259. break;
  260. case 0x8:
  261. flag = CPU_AMD_K8;
  262. break;
  263. case 0xf:
  264. flag = CPU_AMD_0F;
  265. break;
  266. case 0x10:
  267. flag = CPU_AMD_10;
  268. break;
  269. case 0x11:
  270. flag = CPU_AMD_11;
  271. break;
  272. default:
  273. flag = CPU_NONE;
  274. break;
  275. }
  276. return flag;
  277. }
  278. static int get_cpu_modelflag(unsigned cpu)
  279. {
  280. int flag;
  281. flag = per_cpu(cpu_model, cpu);
  282. switch (flag >> 16) {
  283. case X86_VENDOR_INTEL:
  284. flag = get_intel_modelflag(flag);
  285. break;
  286. case X86_VENDOR_AMD:
  287. flag = get_amd_modelflag(flag & 0xffff);
  288. break;
  289. default:
  290. flag = CPU_NONE;
  291. break;
  292. }
  293. return flag;
  294. }
  295. static int get_cpu_range_count(unsigned cpu)
  296. {
  297. int index;
  298. switch (per_cpu(cpu_model, cpu) >> 16) {
  299. case X86_VENDOR_INTEL:
  300. index = ARRAY_SIZE(cpu_intel_range);
  301. break;
  302. case X86_VENDOR_AMD:
  303. index = ARRAY_SIZE(cpu_amd_range);
  304. break;
  305. default:
  306. index = 0;
  307. break;
  308. }
  309. return index;
  310. }
  311. static int is_typeflag_valid(unsigned cpu, unsigned flag)
  312. {
  313. unsigned vendor, modelflag;
  314. int i, index;
  315. /* Standard Registers should be always valid */
  316. if (flag >= CPU_TSS)
  317. return 1;
  318. modelflag = per_cpu(cpu_modelflag, cpu);
  319. vendor = per_cpu(cpu_model, cpu) >> 16;
  320. index = get_cpu_range_count(cpu);
  321. for (i = 0; i < index; i++) {
  322. switch (vendor) {
  323. case X86_VENDOR_INTEL:
  324. if ((cpu_intel_range[i].model & modelflag) &&
  325. (cpu_intel_range[i].flag & flag))
  326. return 1;
  327. break;
  328. case X86_VENDOR_AMD:
  329. if ((cpu_amd_range[i].model & modelflag) &&
  330. (cpu_amd_range[i].flag & flag))
  331. return 1;
  332. break;
  333. }
  334. }
  335. /* Invalid */
  336. return 0;
  337. }
  338. static unsigned get_cpu_range(unsigned cpu, unsigned *min, unsigned *max,
  339. int index, unsigned flag)
  340. {
  341. unsigned modelflag;
  342. modelflag = per_cpu(cpu_modelflag, cpu);
  343. *max = 0;
  344. switch (per_cpu(cpu_model, cpu) >> 16) {
  345. case X86_VENDOR_INTEL:
  346. if ((cpu_intel_range[index].model & modelflag) &&
  347. (cpu_intel_range[index].flag & flag)) {
  348. *min = cpu_intel_range[index].min;
  349. *max = cpu_intel_range[index].max;
  350. }
  351. break;
  352. case X86_VENDOR_AMD:
  353. if ((cpu_amd_range[index].model & modelflag) &&
  354. (cpu_amd_range[index].flag & flag)) {
  355. *min = cpu_amd_range[index].min;
  356. *max = cpu_amd_range[index].max;
  357. }
  358. break;
  359. }
  360. return *max;
  361. }
  362. /* This function can also be called with seq = NULL for printk */
  363. static void print_cpu_data(struct seq_file *seq, unsigned type,
  364. u32 low, u32 high)
  365. {
  366. struct cpu_private *priv;
  367. u64 val = high;
  368. if (seq) {
  369. priv = seq->private;
  370. if (priv->file) {
  371. val = (val << 32) | low;
  372. seq_printf(seq, "0x%llx\n", val);
  373. } else
  374. seq_printf(seq, " %08x: %08x_%08x\n",
  375. type, high, low);
  376. } else
  377. printk(KERN_INFO " %08x: %08x_%08x\n", type, high, low);
  378. }
  379. /* This function can also be called with seq = NULL for printk */
  380. static void print_msr(struct seq_file *seq, unsigned cpu, unsigned flag)
  381. {
  382. unsigned msr, msr_min, msr_max;
  383. struct cpu_private *priv;
  384. u32 low, high;
  385. int i, range;
  386. if (seq) {
  387. priv = seq->private;
  388. if (priv->file) {
  389. if (!rdmsr_safe_on_cpu(priv->cpu, priv->reg,
  390. &low, &high))
  391. print_cpu_data(seq, priv->reg, low, high);
  392. return;
  393. }
  394. }
  395. range = get_cpu_range_count(cpu);
  396. for (i = 0; i < range; i++) {
  397. if (!get_cpu_range(cpu, &msr_min, &msr_max, i, flag))
  398. continue;
  399. for (msr = msr_min; msr <= msr_max; msr++) {
  400. if (rdmsr_safe_on_cpu(cpu, msr, &low, &high))
  401. continue;
  402. print_cpu_data(seq, msr, low, high);
  403. }
  404. }
  405. }
  406. static void print_tss(void *arg)
  407. {
  408. struct pt_regs *regs = task_pt_regs(current);
  409. struct seq_file *seq = arg;
  410. unsigned int seg;
  411. seq_printf(seq, " RAX\t: %016lx\n", regs->ax);
  412. seq_printf(seq, " RBX\t: %016lx\n", regs->bx);
  413. seq_printf(seq, " RCX\t: %016lx\n", regs->cx);
  414. seq_printf(seq, " RDX\t: %016lx\n", regs->dx);
  415. seq_printf(seq, " RSI\t: %016lx\n", regs->si);
  416. seq_printf(seq, " RDI\t: %016lx\n", regs->di);
  417. seq_printf(seq, " RBP\t: %016lx\n", regs->bp);
  418. seq_printf(seq, " ESP\t: %016lx\n", regs->sp);
  419. #ifdef CONFIG_X86_64
  420. seq_printf(seq, " R08\t: %016lx\n", regs->r8);
  421. seq_printf(seq, " R09\t: %016lx\n", regs->r9);
  422. seq_printf(seq, " R10\t: %016lx\n", regs->r10);
  423. seq_printf(seq, " R11\t: %016lx\n", regs->r11);
  424. seq_printf(seq, " R12\t: %016lx\n", regs->r12);
  425. seq_printf(seq, " R13\t: %016lx\n", regs->r13);
  426. seq_printf(seq, " R14\t: %016lx\n", regs->r14);
  427. seq_printf(seq, " R15\t: %016lx\n", regs->r15);
  428. #endif
  429. asm("movl %%cs,%0" : "=r" (seg));
  430. seq_printf(seq, " CS\t: %04x\n", seg);
  431. asm("movl %%ds,%0" : "=r" (seg));
  432. seq_printf(seq, " DS\t: %04x\n", seg);
  433. seq_printf(seq, " SS\t: %04lx\n", regs->ss & 0xffff);
  434. asm("movl %%es,%0" : "=r" (seg));
  435. seq_printf(seq, " ES\t: %04x\n", seg);
  436. asm("movl %%fs,%0" : "=r" (seg));
  437. seq_printf(seq, " FS\t: %04x\n", seg);
  438. asm("movl %%gs,%0" : "=r" (seg));
  439. seq_printf(seq, " GS\t: %04x\n", seg);
  440. seq_printf(seq, " EFLAGS\t: %016lx\n", regs->flags);
  441. seq_printf(seq, " EIP\t: %016lx\n", regs->ip);
  442. }
  443. static void print_cr(void *arg)
  444. {
  445. struct seq_file *seq = arg;
  446. seq_printf(seq, " cr0\t: %016lx\n", read_cr0());
  447. seq_printf(seq, " cr2\t: %016lx\n", read_cr2());
  448. seq_printf(seq, " cr3\t: %016lx\n", read_cr3());
  449. seq_printf(seq, " cr4\t: %016lx\n", read_cr4_safe());
  450. #ifdef CONFIG_X86_64
  451. seq_printf(seq, " cr8\t: %016lx\n", read_cr8());
  452. #endif
  453. }
  454. static void print_desc_ptr(char *str, struct seq_file *seq, struct desc_ptr dt)
  455. {
  456. seq_printf(seq, " %s\t: %016llx\n", str, (u64)(dt.address | dt.size));
  457. }
  458. static void print_dt(void *seq)
  459. {
  460. struct desc_ptr dt;
  461. unsigned long ldt;
  462. /* IDT */
  463. store_idt((struct desc_ptr *)&dt);
  464. print_desc_ptr("IDT", seq, dt);
  465. /* GDT */
  466. store_gdt((struct desc_ptr *)&dt);
  467. print_desc_ptr("GDT", seq, dt);
  468. /* LDT */
  469. store_ldt(ldt);
  470. seq_printf(seq, " LDT\t: %016lx\n", ldt);
  471. /* TR */
  472. store_tr(ldt);
  473. seq_printf(seq, " TR\t: %016lx\n", ldt);
  474. }
  475. static void print_dr(void *arg)
  476. {
  477. struct seq_file *seq = arg;
  478. unsigned long dr;
  479. int i;
  480. for (i = 0; i < 8; i++) {
  481. /* Ignore db4, db5 */
  482. if ((i == 4) || (i == 5))
  483. continue;
  484. get_debugreg(dr, i);
  485. seq_printf(seq, " dr%d\t: %016lx\n", i, dr);
  486. }
  487. seq_printf(seq, "\n MSR\t:\n");
  488. }
  489. static void print_apic(void *arg)
  490. {
  491. struct seq_file *seq = arg;
  492. #ifdef CONFIG_X86_LOCAL_APIC
  493. seq_printf(seq, " LAPIC\t:\n");
  494. seq_printf(seq, " ID\t\t: %08x\n", apic_read(APIC_ID) >> 24);
  495. seq_printf(seq, " LVR\t\t: %08x\n", apic_read(APIC_LVR));
  496. seq_printf(seq, " TASKPRI\t: %08x\n", apic_read(APIC_TASKPRI));
  497. seq_printf(seq, " ARBPRI\t\t: %08x\n", apic_read(APIC_ARBPRI));
  498. seq_printf(seq, " PROCPRI\t: %08x\n", apic_read(APIC_PROCPRI));
  499. seq_printf(seq, " LDR\t\t: %08x\n", apic_read(APIC_LDR));
  500. seq_printf(seq, " DFR\t\t: %08x\n", apic_read(APIC_DFR));
  501. seq_printf(seq, " SPIV\t\t: %08x\n", apic_read(APIC_SPIV));
  502. seq_printf(seq, " ISR\t\t: %08x\n", apic_read(APIC_ISR));
  503. seq_printf(seq, " ESR\t\t: %08x\n", apic_read(APIC_ESR));
  504. seq_printf(seq, " ICR\t\t: %08x\n", apic_read(APIC_ICR));
  505. seq_printf(seq, " ICR2\t\t: %08x\n", apic_read(APIC_ICR2));
  506. seq_printf(seq, " LVTT\t\t: %08x\n", apic_read(APIC_LVTT));
  507. seq_printf(seq, " LVTTHMR\t: %08x\n", apic_read(APIC_LVTTHMR));
  508. seq_printf(seq, " LVTPC\t\t: %08x\n", apic_read(APIC_LVTPC));
  509. seq_printf(seq, " LVT0\t\t: %08x\n", apic_read(APIC_LVT0));
  510. seq_printf(seq, " LVT1\t\t: %08x\n", apic_read(APIC_LVT1));
  511. seq_printf(seq, " LVTERR\t\t: %08x\n", apic_read(APIC_LVTERR));
  512. seq_printf(seq, " TMICT\t\t: %08x\n", apic_read(APIC_TMICT));
  513. seq_printf(seq, " TMCCT\t\t: %08x\n", apic_read(APIC_TMCCT));
  514. seq_printf(seq, " TDCR\t\t: %08x\n", apic_read(APIC_TDCR));
  515. if (boot_cpu_has(X86_FEATURE_EXTAPIC)) {
  516. unsigned int i, v, maxeilvt;
  517. v = apic_read(APIC_EFEAT);
  518. maxeilvt = (v >> 16) & 0xff;
  519. seq_printf(seq, " EFEAT\t\t: %08x\n", v);
  520. seq_printf(seq, " ECTRL\t\t: %08x\n", apic_read(APIC_ECTRL));
  521. for (i = 0; i < maxeilvt; i++) {
  522. v = apic_read(APIC_EILVTn(i));
  523. seq_printf(seq, " EILVT%d\t\t: %08x\n", i, v);
  524. }
  525. }
  526. #endif /* CONFIG_X86_LOCAL_APIC */
  527. seq_printf(seq, "\n MSR\t:\n");
  528. }
  529. static int cpu_seq_show(struct seq_file *seq, void *v)
  530. {
  531. struct cpu_private *priv = seq->private;
  532. if (priv == NULL)
  533. return -EINVAL;
  534. switch (cpu_base[priv->type].flag) {
  535. case CPU_TSS:
  536. smp_call_function_single(priv->cpu, print_tss, seq, 1);
  537. break;
  538. case CPU_CR:
  539. smp_call_function_single(priv->cpu, print_cr, seq, 1);
  540. break;
  541. case CPU_DT:
  542. smp_call_function_single(priv->cpu, print_dt, seq, 1);
  543. break;
  544. case CPU_DEBUG:
  545. if (priv->file == CPU_INDEX_BIT)
  546. smp_call_function_single(priv->cpu, print_dr, seq, 1);
  547. print_msr(seq, priv->cpu, cpu_base[priv->type].flag);
  548. break;
  549. case CPU_APIC:
  550. if (priv->file == CPU_INDEX_BIT)
  551. smp_call_function_single(priv->cpu, print_apic, seq, 1);
  552. print_msr(seq, priv->cpu, cpu_base[priv->type].flag);
  553. break;
  554. default:
  555. print_msr(seq, priv->cpu, cpu_base[priv->type].flag);
  556. break;
  557. }
  558. seq_printf(seq, "\n");
  559. return 0;
  560. }
  561. static void *cpu_seq_start(struct seq_file *seq, loff_t *pos)
  562. {
  563. if (*pos == 0) /* One time is enough ;-) */
  564. return seq;
  565. return NULL;
  566. }
  567. static void *cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
  568. {
  569. (*pos)++;
  570. return cpu_seq_start(seq, pos);
  571. }
  572. static void cpu_seq_stop(struct seq_file *seq, void *v)
  573. {
  574. }
  575. static const struct seq_operations cpu_seq_ops = {
  576. .start = cpu_seq_start,
  577. .next = cpu_seq_next,
  578. .stop = cpu_seq_stop,
  579. .show = cpu_seq_show,
  580. };
  581. static int cpu_seq_open(struct inode *inode, struct file *file)
  582. {
  583. struct cpu_private *priv = inode->i_private;
  584. struct seq_file *seq;
  585. int err;
  586. err = seq_open(file, &cpu_seq_ops);
  587. if (!err) {
  588. seq = file->private_data;
  589. seq->private = priv;
  590. }
  591. return err;
  592. }
  593. static int write_msr(struct cpu_private *priv, u64 val)
  594. {
  595. u32 low, high;
  596. high = (val >> 32) & 0xffffffff;
  597. low = val & 0xffffffff;
  598. if (!wrmsr_safe_on_cpu(priv->cpu, priv->reg, low, high))
  599. return 0;
  600. return -EPERM;
  601. }
  602. static int write_cpu_register(struct cpu_private *priv, const char *buf)
  603. {
  604. int ret = -EPERM;
  605. u64 val;
  606. ret = strict_strtoull(buf, 0, &val);
  607. if (ret < 0)
  608. return ret;
  609. /* Supporting only MSRs */
  610. if (priv->type < CPU_TSS_BIT)
  611. return write_msr(priv, val);
  612. return ret;
  613. }
  614. static ssize_t cpu_write(struct file *file, const char __user *ubuf,
  615. size_t count, loff_t *off)
  616. {
  617. struct seq_file *seq = file->private_data;
  618. struct cpu_private *priv = seq->private;
  619. char buf[19];
  620. if ((priv == NULL) || (count >= sizeof(buf)))
  621. return -EINVAL;
  622. if (copy_from_user(&buf, ubuf, count))
  623. return -EFAULT;
  624. buf[count] = 0;
  625. if ((cpu_base[priv->type].write) && (cpu_file[priv->file].write))
  626. if (!write_cpu_register(priv, buf))
  627. return count;
  628. return -EACCES;
  629. }
  630. static const struct file_operations cpu_fops = {
  631. .owner = THIS_MODULE,
  632. .open = cpu_seq_open,
  633. .read = seq_read,
  634. .write = cpu_write,
  635. .llseek = seq_lseek,
  636. .release = seq_release,
  637. };
  638. static int cpu_create_file(unsigned cpu, unsigned type, unsigned reg,
  639. unsigned file, struct dentry *dentry)
  640. {
  641. struct cpu_private *priv = NULL;
  642. /* Already intialized */
  643. if (file == CPU_INDEX_BIT)
  644. if (per_cpu(cpu_arr[type].init, cpu))
  645. return 0;
  646. priv = kzalloc(sizeof(*priv), GFP_KERNEL);
  647. if (priv == NULL)
  648. return -ENOMEM;
  649. priv->cpu = cpu;
  650. priv->type = type;
  651. priv->reg = reg;
  652. priv->file = file;
  653. mutex_lock(&cpu_debug_lock);
  654. per_cpu(priv_arr[type], cpu) = priv;
  655. per_cpu(cpu_priv_count, cpu)++;
  656. mutex_unlock(&cpu_debug_lock);
  657. if (file)
  658. debugfs_create_file(cpu_file[file].name, S_IRUGO,
  659. dentry, (void *)priv, &cpu_fops);
  660. else {
  661. debugfs_create_file(cpu_base[type].name, S_IRUGO,
  662. per_cpu(cpu_arr[type].dentry, cpu),
  663. (void *)priv, &cpu_fops);
  664. mutex_lock(&cpu_debug_lock);
  665. per_cpu(cpu_arr[type].init, cpu) = 1;
  666. mutex_unlock(&cpu_debug_lock);
  667. }
  668. return 0;
  669. }
  670. static int cpu_init_regfiles(unsigned cpu, unsigned int type, unsigned reg,
  671. struct dentry *dentry)
  672. {
  673. unsigned file;
  674. int err = 0;
  675. for (file = 0; file < ARRAY_SIZE(cpu_file); file++) {
  676. err = cpu_create_file(cpu, type, reg, file, dentry);
  677. if (err)
  678. return err;
  679. }
  680. return err;
  681. }
  682. static int cpu_init_msr(unsigned cpu, unsigned type, struct dentry *dentry)
  683. {
  684. struct dentry *cpu_dentry = NULL;
  685. unsigned reg, reg_min, reg_max;
  686. int i, range, err = 0;
  687. char reg_dir[12];
  688. u32 low, high;
  689. range = get_cpu_range_count(cpu);
  690. for (i = 0; i < range; i++) {
  691. if (!get_cpu_range(cpu, &reg_min, &reg_max, i,
  692. cpu_base[type].flag))
  693. continue;
  694. for (reg = reg_min; reg <= reg_max; reg++) {
  695. if (rdmsr_safe_on_cpu(cpu, reg, &low, &high))
  696. continue;
  697. sprintf(reg_dir, "0x%x", reg);
  698. cpu_dentry = debugfs_create_dir(reg_dir, dentry);
  699. err = cpu_init_regfiles(cpu, type, reg, cpu_dentry);
  700. if (err)
  701. return err;
  702. }
  703. }
  704. return err;
  705. }
  706. static int cpu_init_allreg(unsigned cpu, struct dentry *dentry)
  707. {
  708. struct dentry *cpu_dentry = NULL;
  709. unsigned type;
  710. int err = 0;
  711. for (type = 0; type < ARRAY_SIZE(cpu_base) - 1; type++) {
  712. if (!is_typeflag_valid(cpu, cpu_base[type].flag))
  713. continue;
  714. cpu_dentry = debugfs_create_dir(cpu_base[type].name, dentry);
  715. per_cpu(cpu_arr[type].dentry, cpu) = cpu_dentry;
  716. if (type < CPU_TSS_BIT)
  717. err = cpu_init_msr(cpu, type, cpu_dentry);
  718. else
  719. err = cpu_create_file(cpu, type, 0, CPU_INDEX_BIT,
  720. cpu_dentry);
  721. if (err)
  722. return err;
  723. }
  724. return err;
  725. }
  726. static int cpu_init_cpu(void)
  727. {
  728. struct dentry *cpu_dentry = NULL;
  729. struct cpuinfo_x86 *cpui;
  730. char cpu_dir[12];
  731. unsigned cpu;
  732. int err = 0;
  733. for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
  734. cpui = &cpu_data(cpu);
  735. if (!cpu_has(cpui, X86_FEATURE_MSR))
  736. continue;
  737. per_cpu(cpu_model, cpu) = ((cpui->x86_vendor << 16) |
  738. (cpui->x86 << 8) |
  739. (cpui->x86_model));
  740. per_cpu(cpu_modelflag, cpu) = get_cpu_modelflag(cpu);
  741. sprintf(cpu_dir, "cpu%d", cpu);
  742. cpu_dentry = debugfs_create_dir(cpu_dir, cpu_debugfs_dir);
  743. err = cpu_init_allreg(cpu, cpu_dentry);
  744. pr_info("cpu%d(%d) debug files %d\n",
  745. cpu, nr_cpu_ids, per_cpu(cpu_priv_count, cpu));
  746. if (per_cpu(cpu_priv_count, cpu) > MAX_CPU_FILES) {
  747. pr_err("Register files count %d exceeds limit %d\n",
  748. per_cpu(cpu_priv_count, cpu), MAX_CPU_FILES);
  749. per_cpu(cpu_priv_count, cpu) = MAX_CPU_FILES;
  750. err = -ENFILE;
  751. }
  752. if (err)
  753. return err;
  754. }
  755. return err;
  756. }
  757. static int __init cpu_debug_init(void)
  758. {
  759. cpu_debugfs_dir = debugfs_create_dir("cpu", arch_debugfs_dir);
  760. return cpu_init_cpu();
  761. }
  762. static void __exit cpu_debug_exit(void)
  763. {
  764. int i, cpu;
  765. if (cpu_debugfs_dir)
  766. debugfs_remove_recursive(cpu_debugfs_dir);
  767. for (cpu = 0; cpu < nr_cpu_ids; cpu++)
  768. for (i = 0; i < per_cpu(cpu_priv_count, cpu); i++)
  769. kfree(per_cpu(priv_arr[i], cpu));
  770. }
  771. module_init(cpu_debug_init);
  772. module_exit(cpu_debug_exit);
  773. MODULE_AUTHOR("Jaswinder Singh Rajput");
  774. MODULE_DESCRIPTION("CPU Debug module");
  775. MODULE_LICENSE("GPL");