cpu_debug.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785
  1. /*
  2. * CPU x86 architecture debug code
  3. *
  4. * Copyright(C) 2009 Jaswinder Singh Rajput
  5. *
  6. * For licencing details see kernel-base/COPYING
  7. */
  8. #include <linux/interrupt.h>
  9. #include <linux/compiler.h>
  10. #include <linux/seq_file.h>
  11. #include <linux/debugfs.h>
  12. #include <linux/kprobes.h>
  13. #include <linux/kernel.h>
  14. #include <linux/module.h>
  15. #include <linux/percpu.h>
  16. #include <linux/signal.h>
  17. #include <linux/errno.h>
  18. #include <linux/sched.h>
  19. #include <linux/types.h>
  20. #include <linux/init.h>
  21. #include <linux/slab.h>
  22. #include <linux/smp.h>
  23. #include <asm/cpu_debug.h>
  24. #include <asm/paravirt.h>
  25. #include <asm/system.h>
  26. #include <asm/traps.h>
  27. #include <asm/apic.h>
  28. #include <asm/desc.h>
  29. static DEFINE_PER_CPU(struct cpu_cpuX_base, cpu_arr[CPU_REG_ALL_BIT]);
  30. static DEFINE_PER_CPU(struct cpu_private *, priv_arr[MAX_CPU_FILES]);
  31. static DEFINE_PER_CPU(unsigned, cpu_modelflag);
  32. static DEFINE_PER_CPU(int, cpu_priv_count);
  33. static DEFINE_PER_CPU(unsigned, cpu_model);
  34. static DEFINE_MUTEX(cpu_debug_lock);
  35. static struct dentry *cpu_debugfs_dir;
  36. static struct cpu_debug_base cpu_base[] = {
  37. { "mc", CPU_MC }, /* Machine Check */
  38. { "monitor", CPU_MONITOR }, /* Monitor */
  39. { "time", CPU_TIME }, /* Time */
  40. { "pmc", CPU_PMC }, /* Performance Monitor */
  41. { "platform", CPU_PLATFORM }, /* Platform */
  42. { "apic", CPU_APIC }, /* APIC */
  43. { "poweron", CPU_POWERON }, /* Power-on */
  44. { "control", CPU_CONTROL }, /* Control */
  45. { "features", CPU_FEATURES }, /* Features control */
  46. { "lastbranch", CPU_LBRANCH }, /* Last Branch */
  47. { "bios", CPU_BIOS }, /* BIOS */
  48. { "freq", CPU_FREQ }, /* Frequency */
  49. { "mtrr", CPU_MTRR }, /* MTRR */
  50. { "perf", CPU_PERF }, /* Performance */
  51. { "cache", CPU_CACHE }, /* Cache */
  52. { "sysenter", CPU_SYSENTER }, /* Sysenter */
  53. { "therm", CPU_THERM }, /* Thermal */
  54. { "misc", CPU_MISC }, /* Miscellaneous */
  55. { "debug", CPU_DEBUG }, /* Debug */
  56. { "pat", CPU_PAT }, /* PAT */
  57. { "vmx", CPU_VMX }, /* VMX */
  58. { "call", CPU_CALL }, /* System Call */
  59. { "base", CPU_BASE }, /* BASE Address */
  60. { "smm", CPU_SMM }, /* System mgmt mode */
  61. { "svm", CPU_SVM }, /*Secure Virtial Machine*/
  62. { "osvm", CPU_OSVM }, /* OS-Visible Workaround*/
  63. { "tss", CPU_TSS }, /* Task Stack Segment */
  64. { "cr", CPU_CR }, /* Control Registers */
  65. { "dt", CPU_DT }, /* Descriptor Table */
  66. { "registers", CPU_REG_ALL }, /* Select all Registers */
  67. };
  68. static struct cpu_file_base cpu_file[] = {
  69. { "index", CPU_REG_ALL }, /* index */
  70. { "value", CPU_REG_ALL }, /* value */
  71. };
  72. /* Intel Registers Range */
  73. static struct cpu_debug_range cpu_intel_range[] = {
  74. { 0x00000000, 0x00000001, CPU_MC, CPU_INTEL_ALL },
  75. { 0x00000006, 0x00000007, CPU_MONITOR, CPU_CX_AT_XE },
  76. { 0x00000010, 0x00000010, CPU_TIME, CPU_INTEL_ALL },
  77. { 0x00000011, 0x00000013, CPU_PMC, CPU_INTEL_PENTIUM },
  78. { 0x00000017, 0x00000017, CPU_PLATFORM, CPU_PX_CX_AT_XE },
  79. { 0x0000001B, 0x0000001B, CPU_APIC, CPU_P6_CX_AT_XE },
  80. { 0x0000002A, 0x0000002A, CPU_POWERON, CPU_PX_CX_AT_XE },
  81. { 0x0000002B, 0x0000002B, CPU_POWERON, CPU_INTEL_XEON },
  82. { 0x0000002C, 0x0000002C, CPU_FREQ, CPU_INTEL_XEON },
  83. { 0x0000003A, 0x0000003A, CPU_CONTROL, CPU_CX_AT_XE },
  84. { 0x00000040, 0x00000043, CPU_LBRANCH, CPU_PM_CX_AT_XE },
  85. { 0x00000044, 0x00000047, CPU_LBRANCH, CPU_PM_CO_AT },
  86. { 0x00000060, 0x00000063, CPU_LBRANCH, CPU_C2_AT },
  87. { 0x00000064, 0x00000067, CPU_LBRANCH, CPU_INTEL_ATOM },
  88. { 0x00000079, 0x00000079, CPU_BIOS, CPU_P6_CX_AT_XE },
  89. { 0x00000088, 0x0000008A, CPU_CACHE, CPU_INTEL_P6 },
  90. { 0x0000008B, 0x0000008B, CPU_BIOS, CPU_P6_CX_AT_XE },
  91. { 0x0000009B, 0x0000009B, CPU_MONITOR, CPU_INTEL_XEON },
  92. { 0x000000C1, 0x000000C2, CPU_PMC, CPU_P6_CX_AT },
  93. { 0x000000CD, 0x000000CD, CPU_FREQ, CPU_CX_AT },
  94. { 0x000000E7, 0x000000E8, CPU_PERF, CPU_CX_AT },
  95. { 0x000000FE, 0x000000FE, CPU_MTRR, CPU_P6_CX_XE },
  96. { 0x00000116, 0x00000116, CPU_CACHE, CPU_INTEL_P6 },
  97. { 0x00000118, 0x00000118, CPU_CACHE, CPU_INTEL_P6 },
  98. { 0x00000119, 0x00000119, CPU_CACHE, CPU_INTEL_PX },
  99. { 0x0000011A, 0x0000011B, CPU_CACHE, CPU_INTEL_P6 },
  100. { 0x0000011E, 0x0000011E, CPU_CACHE, CPU_PX_CX_AT },
  101. { 0x00000174, 0x00000176, CPU_SYSENTER, CPU_P6_CX_AT_XE },
  102. { 0x00000179, 0x0000017A, CPU_MC, CPU_PX_CX_AT_XE },
  103. { 0x0000017B, 0x0000017B, CPU_MC, CPU_P6_XE },
  104. { 0x00000186, 0x00000187, CPU_PMC, CPU_P6_CX_AT },
  105. { 0x00000198, 0x00000199, CPU_PERF, CPU_PM_CX_AT_XE },
  106. { 0x0000019A, 0x0000019A, CPU_TIME, CPU_PM_CX_AT_XE },
  107. { 0x0000019B, 0x0000019D, CPU_THERM, CPU_PM_CX_AT_XE },
  108. { 0x000001A0, 0x000001A0, CPU_MISC, CPU_PM_CX_AT_XE },
  109. { 0x000001C9, 0x000001C9, CPU_LBRANCH, CPU_PM_CX_AT },
  110. { 0x000001D7, 0x000001D8, CPU_LBRANCH, CPU_INTEL_XEON },
  111. { 0x000001D9, 0x000001D9, CPU_DEBUG, CPU_CX_AT_XE },
  112. { 0x000001DA, 0x000001DA, CPU_LBRANCH, CPU_INTEL_XEON },
  113. { 0x000001DB, 0x000001DB, CPU_LBRANCH, CPU_P6_XE },
  114. { 0x000001DC, 0x000001DC, CPU_LBRANCH, CPU_INTEL_P6 },
  115. { 0x000001DD, 0x000001DE, CPU_LBRANCH, CPU_PX_CX_AT_XE },
  116. { 0x000001E0, 0x000001E0, CPU_LBRANCH, CPU_INTEL_P6 },
  117. { 0x00000200, 0x0000020F, CPU_MTRR, CPU_P6_CX_XE },
  118. { 0x00000250, 0x00000250, CPU_MTRR, CPU_P6_CX_XE },
  119. { 0x00000258, 0x00000259, CPU_MTRR, CPU_P6_CX_XE },
  120. { 0x00000268, 0x0000026F, CPU_MTRR, CPU_P6_CX_XE },
  121. { 0x00000277, 0x00000277, CPU_PAT, CPU_C2_AT_XE },
  122. { 0x000002FF, 0x000002FF, CPU_MTRR, CPU_P6_CX_XE },
  123. { 0x00000300, 0x00000308, CPU_PMC, CPU_INTEL_XEON },
  124. { 0x00000309, 0x0000030B, CPU_PMC, CPU_C2_AT_XE },
  125. { 0x0000030C, 0x00000311, CPU_PMC, CPU_INTEL_XEON },
  126. { 0x00000345, 0x00000345, CPU_PMC, CPU_C2_AT },
  127. { 0x00000360, 0x00000371, CPU_PMC, CPU_INTEL_XEON },
  128. { 0x0000038D, 0x00000390, CPU_PMC, CPU_C2_AT },
  129. { 0x000003A0, 0x000003BE, CPU_PMC, CPU_INTEL_XEON },
  130. { 0x000003C0, 0x000003CD, CPU_PMC, CPU_INTEL_XEON },
  131. { 0x000003E0, 0x000003E1, CPU_PMC, CPU_INTEL_XEON },
  132. { 0x000003F0, 0x000003F0, CPU_PMC, CPU_INTEL_XEON },
  133. { 0x000003F1, 0x000003F1, CPU_PMC, CPU_C2_AT_XE },
  134. { 0x000003F2, 0x000003F2, CPU_PMC, CPU_INTEL_XEON },
  135. { 0x00000400, 0x00000402, CPU_MC, CPU_PM_CX_AT_XE },
  136. { 0x00000403, 0x00000403, CPU_MC, CPU_INTEL_XEON },
  137. { 0x00000404, 0x00000406, CPU_MC, CPU_PM_CX_AT_XE },
  138. { 0x00000407, 0x00000407, CPU_MC, CPU_INTEL_XEON },
  139. { 0x00000408, 0x0000040A, CPU_MC, CPU_PM_CX_AT_XE },
  140. { 0x0000040B, 0x0000040B, CPU_MC, CPU_INTEL_XEON },
  141. { 0x0000040C, 0x0000040E, CPU_MC, CPU_PM_CX_XE },
  142. { 0x0000040F, 0x0000040F, CPU_MC, CPU_INTEL_XEON },
  143. { 0x00000410, 0x00000412, CPU_MC, CPU_PM_CX_AT_XE },
  144. { 0x00000413, 0x00000417, CPU_MC, CPU_CX_AT_XE },
  145. { 0x00000480, 0x0000048B, CPU_VMX, CPU_CX_AT_XE },
  146. { 0x00000600, 0x00000600, CPU_DEBUG, CPU_PM_CX_AT_XE },
  147. { 0x00000680, 0x0000068F, CPU_LBRANCH, CPU_INTEL_XEON },
  148. { 0x000006C0, 0x000006CF, CPU_LBRANCH, CPU_INTEL_XEON },
  149. { 0x000107CC, 0x000107D3, CPU_PMC, CPU_INTEL_XEON_MP },
  150. { 0xC0000080, 0xC0000080, CPU_FEATURES, CPU_INTEL_XEON },
  151. { 0xC0000081, 0xC0000082, CPU_CALL, CPU_INTEL_XEON },
  152. { 0xC0000084, 0xC0000084, CPU_CALL, CPU_INTEL_XEON },
  153. { 0xC0000100, 0xC0000102, CPU_BASE, CPU_INTEL_XEON },
  154. };
  155. /* AMD Registers Range */
  156. static struct cpu_debug_range cpu_amd_range[] = {
  157. { 0x00000010, 0x00000010, CPU_TIME, CPU_ALL, },
  158. { 0x0000001B, 0x0000001B, CPU_APIC, CPU_ALL, },
  159. { 0x000000FE, 0x000000FE, CPU_MTRR, CPU_ALL, },
  160. { 0x00000174, 0x00000176, CPU_SYSENTER, CPU_ALL, },
  161. { 0x00000179, 0x0000017A, CPU_MC, CPU_ALL, },
  162. { 0x0000017B, 0x0000017B, CPU_MC, CPU_ALL, },
  163. { 0x000001D9, 0x000001D9, CPU_DEBUG, CPU_ALL, },
  164. { 0x000001DB, 0x000001DE, CPU_LBRANCH, CPU_ALL, },
  165. { 0x00000200, 0x0000020F, CPU_MTRR, CPU_ALL, },
  166. { 0x00000250, 0x00000250, CPU_MTRR, CPU_ALL, },
  167. { 0x00000258, 0x00000259, CPU_MTRR, CPU_ALL, },
  168. { 0x00000268, 0x0000026F, CPU_MTRR, CPU_ALL, },
  169. { 0x00000277, 0x00000277, CPU_PAT, CPU_ALL, },
  170. { 0x000002FF, 0x000002FF, CPU_MTRR, CPU_ALL, },
  171. { 0x00000400, 0x00000417, CPU_MC, CPU_ALL, },
  172. { 0xC0000080, 0xC0000080, CPU_FEATURES, CPU_ALL, },
  173. { 0xC0000081, 0xC0000084, CPU_CALL, CPU_ALL, },
  174. { 0xC0000100, 0xC0000102, CPU_BASE, CPU_ALL, },
  175. { 0xC0000103, 0xC0000103, CPU_TIME, CPU_ALL, },
  176. { 0xC0000408, 0xC000040A, CPU_MC, CPU_ALL, },
  177. { 0xc0010000, 0xc0010007, CPU_PMC, CPU_ALL, },
  178. { 0xc0010010, 0xc0010010, CPU_MTRR, CPU_ALL, },
  179. { 0xc0010016, 0xc001001A, CPU_MTRR, CPU_ALL, },
  180. { 0xc001001D, 0xc001001D, CPU_MTRR, CPU_ALL, },
  181. { 0xc0010030, 0xc0010035, CPU_BIOS, CPU_ALL, },
  182. { 0xc0010056, 0xc0010056, CPU_SMM, CPU_ALL, },
  183. { 0xc0010061, 0xc0010063, CPU_SMM, CPU_ALL, },
  184. { 0xc0010074, 0xc0010074, CPU_MC, CPU_ALL, },
  185. { 0xc0010111, 0xc0010113, CPU_SMM, CPU_ALL, },
  186. { 0xc0010114, 0xc0010118, CPU_SVM, CPU_ALL, },
  187. { 0xc0010119, 0xc001011A, CPU_SMM, CPU_ALL, },
  188. { 0xc0010140, 0xc0010141, CPU_OSVM, CPU_ALL, },
  189. { 0xc0010156, 0xc0010156, CPU_SMM, CPU_ALL, },
  190. };
  191. static int get_cpu_modelflag(unsigned cpu)
  192. {
  193. int flag;
  194. switch (per_cpu(cpu_model, cpu)) {
  195. /* Intel */
  196. case 0x0501:
  197. case 0x0502:
  198. case 0x0504:
  199. flag = CPU_INTEL_PENTIUM;
  200. break;
  201. case 0x0601:
  202. case 0x0603:
  203. case 0x0605:
  204. case 0x0607:
  205. case 0x0608:
  206. case 0x060A:
  207. case 0x060B:
  208. flag = CPU_INTEL_P6;
  209. break;
  210. case 0x0609:
  211. case 0x060D:
  212. flag = CPU_INTEL_PENTIUM_M;
  213. break;
  214. case 0x060E:
  215. flag = CPU_INTEL_CORE;
  216. break;
  217. case 0x060F:
  218. case 0x0617:
  219. flag = CPU_INTEL_CORE2;
  220. break;
  221. case 0x061C:
  222. flag = CPU_INTEL_ATOM;
  223. break;
  224. case 0x0F00:
  225. case 0x0F01:
  226. case 0x0F02:
  227. case 0x0F03:
  228. case 0x0F04:
  229. flag = CPU_INTEL_XEON_P4;
  230. break;
  231. case 0x0F06:
  232. flag = CPU_INTEL_XEON_MP;
  233. break;
  234. default:
  235. flag = CPU_NONE;
  236. break;
  237. }
  238. return flag;
  239. }
  240. static int get_cpu_range_count(unsigned cpu)
  241. {
  242. int index;
  243. switch (per_cpu(cpu_model, cpu) >> 16) {
  244. case X86_VENDOR_INTEL:
  245. index = ARRAY_SIZE(cpu_intel_range);
  246. break;
  247. case X86_VENDOR_AMD:
  248. index = ARRAY_SIZE(cpu_amd_range);
  249. break;
  250. default:
  251. index = 0;
  252. break;
  253. }
  254. return index;
  255. }
  256. static int is_typeflag_valid(unsigned cpu, unsigned flag)
  257. {
  258. unsigned vendor, modelflag;
  259. int i, index;
  260. /* Standard Registers should be always valid */
  261. if (flag >= CPU_TSS)
  262. return 1;
  263. modelflag = per_cpu(cpu_modelflag, cpu);
  264. vendor = per_cpu(cpu_model, cpu) >> 16;
  265. index = get_cpu_range_count(cpu);
  266. for (i = 0; i < index; i++) {
  267. switch (vendor) {
  268. case X86_VENDOR_INTEL:
  269. if ((cpu_intel_range[i].model & modelflag) &&
  270. (cpu_intel_range[i].flag & flag))
  271. return 1;
  272. break;
  273. case X86_VENDOR_AMD:
  274. if (cpu_amd_range[i].flag & flag)
  275. return 1;
  276. break;
  277. }
  278. }
  279. /* Invalid */
  280. return 0;
  281. }
  282. static unsigned get_cpu_range(unsigned cpu, unsigned *min, unsigned *max,
  283. int index, unsigned flag)
  284. {
  285. unsigned modelflag;
  286. modelflag = per_cpu(cpu_modelflag, cpu);
  287. *max = 0;
  288. switch (per_cpu(cpu_model, cpu) >> 16) {
  289. case X86_VENDOR_INTEL:
  290. if ((cpu_intel_range[index].model & modelflag) &&
  291. (cpu_intel_range[index].flag & flag)) {
  292. *min = cpu_intel_range[index].min;
  293. *max = cpu_intel_range[index].max;
  294. }
  295. break;
  296. case X86_VENDOR_AMD:
  297. if (cpu_amd_range[index].flag & flag) {
  298. *min = cpu_amd_range[index].min;
  299. *max = cpu_amd_range[index].max;
  300. }
  301. break;
  302. }
  303. return *max;
  304. }
  305. /* This function can also be called with seq = NULL for printk */
  306. static void print_cpu_data(struct seq_file *seq, unsigned type,
  307. u32 low, u32 high)
  308. {
  309. struct cpu_private *priv;
  310. u64 val = high;
  311. if (seq) {
  312. priv = seq->private;
  313. if (priv->file) {
  314. val = (val << 32) | low;
  315. seq_printf(seq, "0x%llx\n", val);
  316. } else
  317. seq_printf(seq, " %08x: %08x_%08x\n",
  318. type, high, low);
  319. } else
  320. printk(KERN_INFO " %08x: %08x_%08x\n", type, high, low);
  321. }
  322. /* This function can also be called with seq = NULL for printk */
  323. static void print_msr(struct seq_file *seq, unsigned cpu, unsigned flag)
  324. {
  325. unsigned msr, msr_min, msr_max;
  326. struct cpu_private *priv;
  327. u32 low, high;
  328. int i, range;
  329. if (seq) {
  330. priv = seq->private;
  331. if (priv->file) {
  332. if (!rdmsr_safe_on_cpu(priv->cpu, priv->reg,
  333. &low, &high))
  334. print_cpu_data(seq, priv->reg, low, high);
  335. return;
  336. }
  337. }
  338. range = get_cpu_range_count(cpu);
  339. for (i = 0; i < range; i++) {
  340. if (!get_cpu_range(cpu, &msr_min, &msr_max, i, flag))
  341. continue;
  342. for (msr = msr_min; msr <= msr_max; msr++) {
  343. if (rdmsr_safe_on_cpu(cpu, msr, &low, &high))
  344. continue;
  345. print_cpu_data(seq, msr, low, high);
  346. }
  347. }
  348. }
  349. static void print_tss(void *arg)
  350. {
  351. struct pt_regs *regs = task_pt_regs(current);
  352. struct seq_file *seq = arg;
  353. unsigned int seg;
  354. seq_printf(seq, " RAX\t: %016lx\n", regs->ax);
  355. seq_printf(seq, " RBX\t: %016lx\n", regs->bx);
  356. seq_printf(seq, " RCX\t: %016lx\n", regs->cx);
  357. seq_printf(seq, " RDX\t: %016lx\n", regs->dx);
  358. seq_printf(seq, " RSI\t: %016lx\n", regs->si);
  359. seq_printf(seq, " RDI\t: %016lx\n", regs->di);
  360. seq_printf(seq, " RBP\t: %016lx\n", regs->bp);
  361. seq_printf(seq, " ESP\t: %016lx\n", regs->sp);
  362. #ifdef CONFIG_X86_64
  363. seq_printf(seq, " R08\t: %016lx\n", regs->r8);
  364. seq_printf(seq, " R09\t: %016lx\n", regs->r9);
  365. seq_printf(seq, " R10\t: %016lx\n", regs->r10);
  366. seq_printf(seq, " R11\t: %016lx\n", regs->r11);
  367. seq_printf(seq, " R12\t: %016lx\n", regs->r12);
  368. seq_printf(seq, " R13\t: %016lx\n", regs->r13);
  369. seq_printf(seq, " R14\t: %016lx\n", regs->r14);
  370. seq_printf(seq, " R15\t: %016lx\n", regs->r15);
  371. #endif
  372. asm("movl %%cs,%0" : "=r" (seg));
  373. seq_printf(seq, " CS\t: %04x\n", seg);
  374. asm("movl %%ds,%0" : "=r" (seg));
  375. seq_printf(seq, " DS\t: %04x\n", seg);
  376. seq_printf(seq, " SS\t: %04lx\n", regs->ss & 0xffff);
  377. asm("movl %%es,%0" : "=r" (seg));
  378. seq_printf(seq, " ES\t: %04x\n", seg);
  379. asm("movl %%fs,%0" : "=r" (seg));
  380. seq_printf(seq, " FS\t: %04x\n", seg);
  381. asm("movl %%gs,%0" : "=r" (seg));
  382. seq_printf(seq, " GS\t: %04x\n", seg);
  383. seq_printf(seq, " EFLAGS\t: %016lx\n", regs->flags);
  384. seq_printf(seq, " EIP\t: %016lx\n", regs->ip);
  385. }
  386. static void print_cr(void *arg)
  387. {
  388. struct seq_file *seq = arg;
  389. seq_printf(seq, " cr0\t: %016lx\n", read_cr0());
  390. seq_printf(seq, " cr2\t: %016lx\n", read_cr2());
  391. seq_printf(seq, " cr3\t: %016lx\n", read_cr3());
  392. seq_printf(seq, " cr4\t: %016lx\n", read_cr4_safe());
  393. #ifdef CONFIG_X86_64
  394. seq_printf(seq, " cr8\t: %016lx\n", read_cr8());
  395. #endif
  396. }
  397. static void print_desc_ptr(char *str, struct seq_file *seq, struct desc_ptr dt)
  398. {
  399. seq_printf(seq, " %s\t: %016llx\n", str, (u64)(dt.address | dt.size));
  400. }
  401. static void print_dt(void *seq)
  402. {
  403. struct desc_ptr dt;
  404. unsigned long ldt;
  405. /* IDT */
  406. store_idt((struct desc_ptr *)&dt);
  407. print_desc_ptr("IDT", seq, dt);
  408. /* GDT */
  409. store_gdt((struct desc_ptr *)&dt);
  410. print_desc_ptr("GDT", seq, dt);
  411. /* LDT */
  412. store_ldt(ldt);
  413. seq_printf(seq, " LDT\t: %016lx\n", ldt);
  414. /* TR */
  415. store_tr(ldt);
  416. seq_printf(seq, " TR\t: %016lx\n", ldt);
  417. }
  418. static void print_dr(void *arg)
  419. {
  420. struct seq_file *seq = arg;
  421. unsigned long dr;
  422. int i;
  423. for (i = 0; i < 8; i++) {
  424. /* Ignore db4, db5 */
  425. if ((i == 4) || (i == 5))
  426. continue;
  427. get_debugreg(dr, i);
  428. seq_printf(seq, " dr%d\t: %016lx\n", i, dr);
  429. }
  430. seq_printf(seq, "\n MSR\t:\n");
  431. }
  432. static void print_apic(void *arg)
  433. {
  434. struct seq_file *seq = arg;
  435. #ifdef CONFIG_X86_LOCAL_APIC
  436. seq_printf(seq, " LAPIC\t:\n");
  437. seq_printf(seq, " ID\t\t: %08x\n", apic_read(APIC_ID) >> 24);
  438. seq_printf(seq, " LVR\t\t: %08x\n", apic_read(APIC_LVR));
  439. seq_printf(seq, " TASKPRI\t: %08x\n", apic_read(APIC_TASKPRI));
  440. seq_printf(seq, " ARBPRI\t\t: %08x\n", apic_read(APIC_ARBPRI));
  441. seq_printf(seq, " PROCPRI\t: %08x\n", apic_read(APIC_PROCPRI));
  442. seq_printf(seq, " LDR\t\t: %08x\n", apic_read(APIC_LDR));
  443. seq_printf(seq, " DFR\t\t: %08x\n", apic_read(APIC_DFR));
  444. seq_printf(seq, " SPIV\t\t: %08x\n", apic_read(APIC_SPIV));
  445. seq_printf(seq, " ISR\t\t: %08x\n", apic_read(APIC_ISR));
  446. seq_printf(seq, " ESR\t\t: %08x\n", apic_read(APIC_ESR));
  447. seq_printf(seq, " ICR\t\t: %08x\n", apic_read(APIC_ICR));
  448. seq_printf(seq, " ICR2\t\t: %08x\n", apic_read(APIC_ICR2));
  449. seq_printf(seq, " LVTT\t\t: %08x\n", apic_read(APIC_LVTT));
  450. seq_printf(seq, " LVTTHMR\t: %08x\n", apic_read(APIC_LVTTHMR));
  451. seq_printf(seq, " LVTPC\t\t: %08x\n", apic_read(APIC_LVTPC));
  452. seq_printf(seq, " LVT0\t\t: %08x\n", apic_read(APIC_LVT0));
  453. seq_printf(seq, " LVT1\t\t: %08x\n", apic_read(APIC_LVT1));
  454. seq_printf(seq, " LVTERR\t\t: %08x\n", apic_read(APIC_LVTERR));
  455. seq_printf(seq, " TMICT\t\t: %08x\n", apic_read(APIC_TMICT));
  456. seq_printf(seq, " TMCCT\t\t: %08x\n", apic_read(APIC_TMCCT));
  457. seq_printf(seq, " TDCR\t\t: %08x\n", apic_read(APIC_TDCR));
  458. #endif /* CONFIG_X86_LOCAL_APIC */
  459. seq_printf(seq, "\n MSR\t:\n");
  460. }
  461. static int cpu_seq_show(struct seq_file *seq, void *v)
  462. {
  463. struct cpu_private *priv = seq->private;
  464. if (priv == NULL)
  465. return -EINVAL;
  466. switch (cpu_base[priv->type].flag) {
  467. case CPU_TSS:
  468. smp_call_function_single(priv->cpu, print_tss, seq, 1);
  469. break;
  470. case CPU_CR:
  471. smp_call_function_single(priv->cpu, print_cr, seq, 1);
  472. break;
  473. case CPU_DT:
  474. smp_call_function_single(priv->cpu, print_dt, seq, 1);
  475. break;
  476. case CPU_DEBUG:
  477. if (priv->file == CPU_INDEX_BIT)
  478. smp_call_function_single(priv->cpu, print_dr, seq, 1);
  479. print_msr(seq, priv->cpu, cpu_base[priv->type].flag);
  480. break;
  481. case CPU_APIC:
  482. if (priv->file == CPU_INDEX_BIT)
  483. smp_call_function_single(priv->cpu, print_apic, seq, 1);
  484. print_msr(seq, priv->cpu, cpu_base[priv->type].flag);
  485. break;
  486. default:
  487. print_msr(seq, priv->cpu, cpu_base[priv->type].flag);
  488. break;
  489. }
  490. seq_printf(seq, "\n");
  491. return 0;
  492. }
  493. static void *cpu_seq_start(struct seq_file *seq, loff_t *pos)
  494. {
  495. if (*pos == 0) /* One time is enough ;-) */
  496. return seq;
  497. return NULL;
  498. }
  499. static void *cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
  500. {
  501. (*pos)++;
  502. return cpu_seq_start(seq, pos);
  503. }
  504. static void cpu_seq_stop(struct seq_file *seq, void *v)
  505. {
  506. }
  507. static const struct seq_operations cpu_seq_ops = {
  508. .start = cpu_seq_start,
  509. .next = cpu_seq_next,
  510. .stop = cpu_seq_stop,
  511. .show = cpu_seq_show,
  512. };
  513. static int cpu_seq_open(struct inode *inode, struct file *file)
  514. {
  515. struct cpu_private *priv = inode->i_private;
  516. struct seq_file *seq;
  517. int err;
  518. err = seq_open(file, &cpu_seq_ops);
  519. if (!err) {
  520. seq = file->private_data;
  521. seq->private = priv;
  522. }
  523. return err;
  524. }
  525. static const struct file_operations cpu_fops = {
  526. .open = cpu_seq_open,
  527. .read = seq_read,
  528. .llseek = seq_lseek,
  529. .release = seq_release,
  530. };
  531. static int cpu_create_file(unsigned cpu, unsigned type, unsigned reg,
  532. unsigned file, struct dentry *dentry)
  533. {
  534. struct cpu_private *priv = NULL;
  535. /* Already intialized */
  536. if (file == CPU_INDEX_BIT)
  537. if (per_cpu(cpu_arr[type].init, cpu))
  538. return 0;
  539. priv = kzalloc(sizeof(*priv), GFP_KERNEL);
  540. if (priv == NULL)
  541. return -ENOMEM;
  542. priv->cpu = cpu;
  543. priv->type = type;
  544. priv->reg = reg;
  545. priv->file = file;
  546. mutex_lock(&cpu_debug_lock);
  547. per_cpu(priv_arr[type], cpu) = priv;
  548. per_cpu(cpu_priv_count, cpu)++;
  549. mutex_unlock(&cpu_debug_lock);
  550. if (file)
  551. debugfs_create_file(cpu_file[file].name, S_IRUGO,
  552. dentry, (void *)priv, &cpu_fops);
  553. else {
  554. debugfs_create_file(cpu_base[type].name, S_IRUGO,
  555. per_cpu(cpu_arr[type].dentry, cpu),
  556. (void *)priv, &cpu_fops);
  557. mutex_lock(&cpu_debug_lock);
  558. per_cpu(cpu_arr[type].init, cpu) = 1;
  559. mutex_unlock(&cpu_debug_lock);
  560. }
  561. return 0;
  562. }
  563. static int cpu_init_regfiles(unsigned cpu, unsigned int type, unsigned reg,
  564. struct dentry *dentry)
  565. {
  566. unsigned file;
  567. int err = 0;
  568. for (file = 0; file < ARRAY_SIZE(cpu_file); file++) {
  569. err = cpu_create_file(cpu, type, reg, file, dentry);
  570. if (err)
  571. return err;
  572. }
  573. return err;
  574. }
  575. static int cpu_init_msr(unsigned cpu, unsigned type, struct dentry *dentry)
  576. {
  577. struct dentry *cpu_dentry = NULL;
  578. unsigned reg, reg_min, reg_max;
  579. int i, range, err = 0;
  580. char reg_dir[12];
  581. u32 low, high;
  582. range = get_cpu_range_count(cpu);
  583. for (i = 0; i < range; i++) {
  584. if (!get_cpu_range(cpu, &reg_min, &reg_max, i,
  585. cpu_base[type].flag))
  586. continue;
  587. for (reg = reg_min; reg <= reg_max; reg++) {
  588. if (rdmsr_safe_on_cpu(cpu, reg, &low, &high))
  589. continue;
  590. sprintf(reg_dir, "0x%x", reg);
  591. cpu_dentry = debugfs_create_dir(reg_dir, dentry);
  592. err = cpu_init_regfiles(cpu, type, reg, cpu_dentry);
  593. if (err)
  594. return err;
  595. }
  596. }
  597. return err;
  598. }
  599. static int cpu_init_allreg(unsigned cpu, struct dentry *dentry)
  600. {
  601. struct dentry *cpu_dentry = NULL;
  602. unsigned type;
  603. int err = 0;
  604. for (type = 0; type < ARRAY_SIZE(cpu_base) - 1; type++) {
  605. if (!is_typeflag_valid(cpu, cpu_base[type].flag))
  606. continue;
  607. cpu_dentry = debugfs_create_dir(cpu_base[type].name, dentry);
  608. per_cpu(cpu_arr[type].dentry, cpu) = cpu_dentry;
  609. if (type < CPU_TSS_BIT)
  610. err = cpu_init_msr(cpu, type, cpu_dentry);
  611. else
  612. err = cpu_create_file(cpu, type, 0, CPU_INDEX_BIT,
  613. cpu_dentry);
  614. if (err)
  615. return err;
  616. }
  617. return err;
  618. }
  619. static int cpu_init_cpu(void)
  620. {
  621. struct dentry *cpu_dentry = NULL;
  622. struct cpuinfo_x86 *cpui;
  623. char cpu_dir[12];
  624. unsigned cpu;
  625. int err = 0;
  626. for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
  627. cpui = &cpu_data(cpu);
  628. if (!cpu_has(cpui, X86_FEATURE_MSR))
  629. continue;
  630. per_cpu(cpu_model, cpu) = ((cpui->x86_vendor << 16) |
  631. (cpui->x86 << 8) |
  632. (cpui->x86_model));
  633. per_cpu(cpu_modelflag, cpu) = get_cpu_modelflag(cpu);
  634. sprintf(cpu_dir, "cpu%d", cpu);
  635. cpu_dentry = debugfs_create_dir(cpu_dir, cpu_debugfs_dir);
  636. err = cpu_init_allreg(cpu, cpu_dentry);
  637. pr_info("cpu%d(%d) debug files %d\n",
  638. cpu, nr_cpu_ids, per_cpu(cpu_priv_count, cpu));
  639. if (per_cpu(cpu_priv_count, cpu) > MAX_CPU_FILES) {
  640. pr_err("Register files count %d exceeds limit %d\n",
  641. per_cpu(cpu_priv_count, cpu), MAX_CPU_FILES);
  642. per_cpu(cpu_priv_count, cpu) = MAX_CPU_FILES;
  643. err = -ENFILE;
  644. }
  645. if (err)
  646. return err;
  647. }
  648. return err;
  649. }
  650. static int __init cpu_debug_init(void)
  651. {
  652. cpu_debugfs_dir = debugfs_create_dir("cpu", arch_debugfs_dir);
  653. return cpu_init_cpu();
  654. }
  655. static void __exit cpu_debug_exit(void)
  656. {
  657. int i, cpu;
  658. if (cpu_debugfs_dir)
  659. debugfs_remove_recursive(cpu_debugfs_dir);
  660. for (cpu = 0; cpu < nr_cpu_ids; cpu++)
  661. for (i = 0; i < per_cpu(cpu_priv_count, cpu); i++)
  662. kfree(per_cpu(priv_arr[i], cpu));
  663. }
  664. module_init(cpu_debug_init);
  665. module_exit(cpu_debug_exit);
  666. MODULE_AUTHOR("Jaswinder Singh Rajput");
  667. MODULE_DESCRIPTION("CPU Debug module");
  668. MODULE_LICENSE("GPL");