cpu_debug.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784
  1. /*
  2. * CPU x86 architecture debug code
  3. *
  4. * Copyright(C) 2009 Jaswinder Singh Rajput
  5. *
  6. * For licencing details see kernel-base/COPYING
  7. */
  8. #include <linux/interrupt.h>
  9. #include <linux/compiler.h>
  10. #include <linux/seq_file.h>
  11. #include <linux/debugfs.h>
  12. #include <linux/kprobes.h>
  13. #include <linux/kernel.h>
  14. #include <linux/module.h>
  15. #include <linux/percpu.h>
  16. #include <linux/signal.h>
  17. #include <linux/errno.h>
  18. #include <linux/sched.h>
  19. #include <linux/types.h>
  20. #include <linux/init.h>
  21. #include <linux/slab.h>
  22. #include <linux/smp.h>
  23. #include <asm/cpu_debug.h>
  24. #include <asm/system.h>
  25. #include <asm/traps.h>
  26. #include <asm/apic.h>
  27. #include <asm/desc.h>
  28. static DEFINE_PER_CPU(struct cpu_cpuX_base, cpu_arr[CPU_REG_ALL_BIT]);
  29. static DEFINE_PER_CPU(struct cpu_private *, priv_arr[MAX_CPU_FILES]);
  30. static DEFINE_PER_CPU(unsigned, cpu_modelflag);
  31. static DEFINE_PER_CPU(int, cpu_priv_count);
  32. static DEFINE_PER_CPU(unsigned, cpu_model);
  33. static DEFINE_MUTEX(cpu_debug_lock);
  34. static struct dentry *cpu_debugfs_dir;
  35. static struct cpu_debug_base cpu_base[] = {
  36. { "mc", CPU_MC }, /* Machine Check */
  37. { "monitor", CPU_MONITOR }, /* Monitor */
  38. { "time", CPU_TIME }, /* Time */
  39. { "pmc", CPU_PMC }, /* Performance Monitor */
  40. { "platform", CPU_PLATFORM }, /* Platform */
  41. { "apic", CPU_APIC }, /* APIC */
  42. { "poweron", CPU_POWERON }, /* Power-on */
  43. { "control", CPU_CONTROL }, /* Control */
  44. { "features", CPU_FEATURES }, /* Features control */
  45. { "lastbranch", CPU_LBRANCH }, /* Last Branch */
  46. { "bios", CPU_BIOS }, /* BIOS */
  47. { "freq", CPU_FREQ }, /* Frequency */
  48. { "mtrr", CPU_MTRR }, /* MTRR */
  49. { "perf", CPU_PERF }, /* Performance */
  50. { "cache", CPU_CACHE }, /* Cache */
  51. { "sysenter", CPU_SYSENTER }, /* Sysenter */
  52. { "therm", CPU_THERM }, /* Thermal */
  53. { "misc", CPU_MISC }, /* Miscellaneous */
  54. { "debug", CPU_DEBUG }, /* Debug */
  55. { "pat", CPU_PAT }, /* PAT */
  56. { "vmx", CPU_VMX }, /* VMX */
  57. { "call", CPU_CALL }, /* System Call */
  58. { "base", CPU_BASE }, /* BASE Address */
  59. { "smm", CPU_SMM }, /* System mgmt mode */
  60. { "svm", CPU_SVM }, /*Secure Virtial Machine*/
  61. { "osvm", CPU_OSVM }, /* OS-Visible Workaround*/
  62. { "tss", CPU_TSS }, /* Task Stack Segment */
  63. { "cr", CPU_CR }, /* Control Registers */
  64. { "dt", CPU_DT }, /* Descriptor Table */
  65. { "registers", CPU_REG_ALL }, /* Select all Registers */
  66. };
  67. static struct cpu_file_base cpu_file[] = {
  68. { "index", CPU_REG_ALL }, /* index */
  69. { "value", CPU_REG_ALL }, /* value */
  70. };
  71. /* Intel Registers Range */
  72. static struct cpu_debug_range cpu_intel_range[] = {
  73. { 0x00000000, 0x00000001, CPU_MC, CPU_INTEL_ALL },
  74. { 0x00000006, 0x00000007, CPU_MONITOR, CPU_CX_AT_XE },
  75. { 0x00000010, 0x00000010, CPU_TIME, CPU_INTEL_ALL },
  76. { 0x00000011, 0x00000013, CPU_PMC, CPU_INTEL_PENTIUM },
  77. { 0x00000017, 0x00000017, CPU_PLATFORM, CPU_PX_CX_AT_XE },
  78. { 0x0000001B, 0x0000001B, CPU_APIC, CPU_P6_CX_AT_XE },
  79. { 0x0000002A, 0x0000002A, CPU_POWERON, CPU_PX_CX_AT_XE },
  80. { 0x0000002B, 0x0000002B, CPU_POWERON, CPU_INTEL_XEON },
  81. { 0x0000002C, 0x0000002C, CPU_FREQ, CPU_INTEL_XEON },
  82. { 0x0000003A, 0x0000003A, CPU_CONTROL, CPU_CX_AT_XE },
  83. { 0x00000040, 0x00000043, CPU_LBRANCH, CPU_PM_CX_AT_XE },
  84. { 0x00000044, 0x00000047, CPU_LBRANCH, CPU_PM_CO_AT },
  85. { 0x00000060, 0x00000063, CPU_LBRANCH, CPU_C2_AT },
  86. { 0x00000064, 0x00000067, CPU_LBRANCH, CPU_INTEL_ATOM },
  87. { 0x00000079, 0x00000079, CPU_BIOS, CPU_P6_CX_AT_XE },
  88. { 0x00000088, 0x0000008A, CPU_CACHE, CPU_INTEL_P6 },
  89. { 0x0000008B, 0x0000008B, CPU_BIOS, CPU_P6_CX_AT_XE },
  90. { 0x0000009B, 0x0000009B, CPU_MONITOR, CPU_INTEL_XEON },
  91. { 0x000000C1, 0x000000C2, CPU_PMC, CPU_P6_CX_AT },
  92. { 0x000000CD, 0x000000CD, CPU_FREQ, CPU_CX_AT },
  93. { 0x000000E7, 0x000000E8, CPU_PERF, CPU_CX_AT },
  94. { 0x000000FE, 0x000000FE, CPU_MTRR, CPU_P6_CX_XE },
  95. { 0x00000116, 0x00000116, CPU_CACHE, CPU_INTEL_P6 },
  96. { 0x00000118, 0x00000118, CPU_CACHE, CPU_INTEL_P6 },
  97. { 0x00000119, 0x00000119, CPU_CACHE, CPU_INTEL_PX },
  98. { 0x0000011A, 0x0000011B, CPU_CACHE, CPU_INTEL_P6 },
  99. { 0x0000011E, 0x0000011E, CPU_CACHE, CPU_PX_CX_AT },
  100. { 0x00000174, 0x00000176, CPU_SYSENTER, CPU_P6_CX_AT_XE },
  101. { 0x00000179, 0x0000017A, CPU_MC, CPU_PX_CX_AT_XE },
  102. { 0x0000017B, 0x0000017B, CPU_MC, CPU_P6_XE },
  103. { 0x00000186, 0x00000187, CPU_PMC, CPU_P6_CX_AT },
  104. { 0x00000198, 0x00000199, CPU_PERF, CPU_PM_CX_AT_XE },
  105. { 0x0000019A, 0x0000019A, CPU_TIME, CPU_PM_CX_AT_XE },
  106. { 0x0000019B, 0x0000019D, CPU_THERM, CPU_PM_CX_AT_XE },
  107. { 0x000001A0, 0x000001A0, CPU_MISC, CPU_PM_CX_AT_XE },
  108. { 0x000001C9, 0x000001C9, CPU_LBRANCH, CPU_PM_CX_AT },
  109. { 0x000001D7, 0x000001D8, CPU_LBRANCH, CPU_INTEL_XEON },
  110. { 0x000001D9, 0x000001D9, CPU_DEBUG, CPU_CX_AT_XE },
  111. { 0x000001DA, 0x000001DA, CPU_LBRANCH, CPU_INTEL_XEON },
  112. { 0x000001DB, 0x000001DB, CPU_LBRANCH, CPU_P6_XE },
  113. { 0x000001DC, 0x000001DC, CPU_LBRANCH, CPU_INTEL_P6 },
  114. { 0x000001DD, 0x000001DE, CPU_LBRANCH, CPU_PX_CX_AT_XE },
  115. { 0x000001E0, 0x000001E0, CPU_LBRANCH, CPU_INTEL_P6 },
  116. { 0x00000200, 0x0000020F, CPU_MTRR, CPU_P6_CX_XE },
  117. { 0x00000250, 0x00000250, CPU_MTRR, CPU_P6_CX_XE },
  118. { 0x00000258, 0x00000259, CPU_MTRR, CPU_P6_CX_XE },
  119. { 0x00000268, 0x0000026F, CPU_MTRR, CPU_P6_CX_XE },
  120. { 0x00000277, 0x00000277, CPU_PAT, CPU_C2_AT_XE },
  121. { 0x000002FF, 0x000002FF, CPU_MTRR, CPU_P6_CX_XE },
  122. { 0x00000300, 0x00000308, CPU_PMC, CPU_INTEL_XEON },
  123. { 0x00000309, 0x0000030B, CPU_PMC, CPU_C2_AT_XE },
  124. { 0x0000030C, 0x00000311, CPU_PMC, CPU_INTEL_XEON },
  125. { 0x00000345, 0x00000345, CPU_PMC, CPU_C2_AT },
  126. { 0x00000360, 0x00000371, CPU_PMC, CPU_INTEL_XEON },
  127. { 0x0000038D, 0x00000390, CPU_PMC, CPU_C2_AT },
  128. { 0x000003A0, 0x000003BE, CPU_PMC, CPU_INTEL_XEON },
  129. { 0x000003C0, 0x000003CD, CPU_PMC, CPU_INTEL_XEON },
  130. { 0x000003E0, 0x000003E1, CPU_PMC, CPU_INTEL_XEON },
  131. { 0x000003F0, 0x000003F0, CPU_PMC, CPU_INTEL_XEON },
  132. { 0x000003F1, 0x000003F1, CPU_PMC, CPU_C2_AT_XE },
  133. { 0x000003F2, 0x000003F2, CPU_PMC, CPU_INTEL_XEON },
  134. { 0x00000400, 0x00000402, CPU_MC, CPU_PM_CX_AT_XE },
  135. { 0x00000403, 0x00000403, CPU_MC, CPU_INTEL_XEON },
  136. { 0x00000404, 0x00000406, CPU_MC, CPU_PM_CX_AT_XE },
  137. { 0x00000407, 0x00000407, CPU_MC, CPU_INTEL_XEON },
  138. { 0x00000408, 0x0000040A, CPU_MC, CPU_PM_CX_AT_XE },
  139. { 0x0000040B, 0x0000040B, CPU_MC, CPU_INTEL_XEON },
  140. { 0x0000040C, 0x0000040E, CPU_MC, CPU_PM_CX_XE },
  141. { 0x0000040F, 0x0000040F, CPU_MC, CPU_INTEL_XEON },
  142. { 0x00000410, 0x00000412, CPU_MC, CPU_PM_CX_AT_XE },
  143. { 0x00000413, 0x00000417, CPU_MC, CPU_CX_AT_XE },
  144. { 0x00000480, 0x0000048B, CPU_VMX, CPU_CX_AT_XE },
  145. { 0x00000600, 0x00000600, CPU_DEBUG, CPU_PM_CX_AT_XE },
  146. { 0x00000680, 0x0000068F, CPU_LBRANCH, CPU_INTEL_XEON },
  147. { 0x000006C0, 0x000006CF, CPU_LBRANCH, CPU_INTEL_XEON },
  148. { 0x000107CC, 0x000107D3, CPU_PMC, CPU_INTEL_XEON_MP },
  149. { 0xC0000080, 0xC0000080, CPU_FEATURES, CPU_INTEL_XEON },
  150. { 0xC0000081, 0xC0000082, CPU_CALL, CPU_INTEL_XEON },
  151. { 0xC0000084, 0xC0000084, CPU_CALL, CPU_INTEL_XEON },
  152. { 0xC0000100, 0xC0000102, CPU_BASE, CPU_INTEL_XEON },
  153. };
  154. /* AMD Registers Range */
  155. static struct cpu_debug_range cpu_amd_range[] = {
  156. { 0x00000010, 0x00000010, CPU_TIME, CPU_ALL, },
  157. { 0x0000001B, 0x0000001B, CPU_APIC, CPU_ALL, },
  158. { 0x000000FE, 0x000000FE, CPU_MTRR, CPU_ALL, },
  159. { 0x00000174, 0x00000176, CPU_SYSENTER, CPU_ALL, },
  160. { 0x00000179, 0x0000017A, CPU_MC, CPU_ALL, },
  161. { 0x0000017B, 0x0000017B, CPU_MC, CPU_ALL, },
  162. { 0x000001D9, 0x000001D9, CPU_DEBUG, CPU_ALL, },
  163. { 0x000001DB, 0x000001DE, CPU_LBRANCH, CPU_ALL, },
  164. { 0x00000200, 0x0000020F, CPU_MTRR, CPU_ALL, },
  165. { 0x00000250, 0x00000250, CPU_MTRR, CPU_ALL, },
  166. { 0x00000258, 0x00000259, CPU_MTRR, CPU_ALL, },
  167. { 0x00000268, 0x0000026F, CPU_MTRR, CPU_ALL, },
  168. { 0x00000277, 0x00000277, CPU_PAT, CPU_ALL, },
  169. { 0x000002FF, 0x000002FF, CPU_MTRR, CPU_ALL, },
  170. { 0x00000400, 0x00000417, CPU_MC, CPU_ALL, },
  171. { 0xC0000080, 0xC0000080, CPU_FEATURES, CPU_ALL, },
  172. { 0xC0000081, 0xC0000084, CPU_CALL, CPU_ALL, },
  173. { 0xC0000100, 0xC0000102, CPU_BASE, CPU_ALL, },
  174. { 0xC0000103, 0xC0000103, CPU_TIME, CPU_ALL, },
  175. { 0xC0000408, 0xC000040A, CPU_MC, CPU_ALL, },
  176. { 0xc0010000, 0xc0010007, CPU_PMC, CPU_ALL, },
  177. { 0xc0010010, 0xc0010010, CPU_MTRR, CPU_ALL, },
  178. { 0xc0010016, 0xc001001A, CPU_MTRR, CPU_ALL, },
  179. { 0xc001001D, 0xc001001D, CPU_MTRR, CPU_ALL, },
  180. { 0xc0010030, 0xc0010035, CPU_BIOS, CPU_ALL, },
  181. { 0xc0010056, 0xc0010056, CPU_SMM, CPU_ALL, },
  182. { 0xc0010061, 0xc0010063, CPU_SMM, CPU_ALL, },
  183. { 0xc0010074, 0xc0010074, CPU_MC, CPU_ALL, },
  184. { 0xc0010111, 0xc0010113, CPU_SMM, CPU_ALL, },
  185. { 0xc0010114, 0xc0010118, CPU_SVM, CPU_ALL, },
  186. { 0xc0010119, 0xc001011A, CPU_SMM, CPU_ALL, },
  187. { 0xc0010140, 0xc0010141, CPU_OSVM, CPU_ALL, },
  188. { 0xc0010156, 0xc0010156, CPU_SMM, CPU_ALL, },
  189. };
  190. static int get_cpu_modelflag(unsigned cpu)
  191. {
  192. int flag;
  193. switch (per_cpu(cpu_model, cpu)) {
  194. /* Intel */
  195. case 0x0501:
  196. case 0x0502:
  197. case 0x0504:
  198. flag = CPU_INTEL_PENTIUM;
  199. break;
  200. case 0x0601:
  201. case 0x0603:
  202. case 0x0605:
  203. case 0x0607:
  204. case 0x0608:
  205. case 0x060A:
  206. case 0x060B:
  207. flag = CPU_INTEL_P6;
  208. break;
  209. case 0x0609:
  210. case 0x060D:
  211. flag = CPU_INTEL_PENTIUM_M;
  212. break;
  213. case 0x060E:
  214. flag = CPU_INTEL_CORE;
  215. break;
  216. case 0x060F:
  217. case 0x0617:
  218. flag = CPU_INTEL_CORE2;
  219. break;
  220. case 0x061C:
  221. flag = CPU_INTEL_ATOM;
  222. break;
  223. case 0x0F00:
  224. case 0x0F01:
  225. case 0x0F02:
  226. case 0x0F03:
  227. case 0x0F04:
  228. flag = CPU_INTEL_XEON_P4;
  229. break;
  230. case 0x0F06:
  231. flag = CPU_INTEL_XEON_MP;
  232. break;
  233. default:
  234. flag = CPU_NONE;
  235. break;
  236. }
  237. return flag;
  238. }
  239. static int get_cpu_range_count(unsigned cpu)
  240. {
  241. int index;
  242. switch (per_cpu(cpu_model, cpu) >> 16) {
  243. case X86_VENDOR_INTEL:
  244. index = ARRAY_SIZE(cpu_intel_range);
  245. break;
  246. case X86_VENDOR_AMD:
  247. index = ARRAY_SIZE(cpu_amd_range);
  248. break;
  249. default:
  250. index = 0;
  251. break;
  252. }
  253. return index;
  254. }
  255. static int is_typeflag_valid(unsigned cpu, unsigned flag)
  256. {
  257. unsigned vendor, modelflag;
  258. int i, index;
  259. /* Standard Registers should be always valid */
  260. if (flag >= CPU_TSS)
  261. return 1;
  262. modelflag = per_cpu(cpu_modelflag, cpu);
  263. vendor = per_cpu(cpu_model, cpu) >> 16;
  264. index = get_cpu_range_count(cpu);
  265. for (i = 0; i < index; i++) {
  266. switch (vendor) {
  267. case X86_VENDOR_INTEL:
  268. if ((cpu_intel_range[i].model & modelflag) &&
  269. (cpu_intel_range[i].flag & flag))
  270. return 1;
  271. break;
  272. case X86_VENDOR_AMD:
  273. if (cpu_amd_range[i].flag & flag)
  274. return 1;
  275. break;
  276. }
  277. }
  278. /* Invalid */
  279. return 0;
  280. }
  281. static unsigned get_cpu_range(unsigned cpu, unsigned *min, unsigned *max,
  282. int index, unsigned flag)
  283. {
  284. unsigned modelflag;
  285. modelflag = per_cpu(cpu_modelflag, cpu);
  286. *max = 0;
  287. switch (per_cpu(cpu_model, cpu) >> 16) {
  288. case X86_VENDOR_INTEL:
  289. if ((cpu_intel_range[index].model & modelflag) &&
  290. (cpu_intel_range[index].flag & flag)) {
  291. *min = cpu_intel_range[index].min;
  292. *max = cpu_intel_range[index].max;
  293. }
  294. break;
  295. case X86_VENDOR_AMD:
  296. if (cpu_amd_range[index].flag & flag) {
  297. *min = cpu_amd_range[index].min;
  298. *max = cpu_amd_range[index].max;
  299. }
  300. break;
  301. }
  302. return *max;
  303. }
  304. /* This function can also be called with seq = NULL for printk */
  305. static void print_cpu_data(struct seq_file *seq, unsigned type,
  306. u32 low, u32 high)
  307. {
  308. struct cpu_private *priv;
  309. u64 val = high;
  310. if (seq) {
  311. priv = seq->private;
  312. if (priv->file) {
  313. val = (val << 32) | low;
  314. seq_printf(seq, "0x%llx\n", val);
  315. } else
  316. seq_printf(seq, " %08x: %08x_%08x\n",
  317. type, high, low);
  318. } else
  319. printk(KERN_INFO " %08x: %08x_%08x\n", type, high, low);
  320. }
  321. /* This function can also be called with seq = NULL for printk */
  322. static void print_msr(struct seq_file *seq, unsigned cpu, unsigned flag)
  323. {
  324. unsigned msr, msr_min, msr_max;
  325. struct cpu_private *priv;
  326. u32 low, high;
  327. int i, range;
  328. if (seq) {
  329. priv = seq->private;
  330. if (priv->file) {
  331. if (!rdmsr_safe_on_cpu(priv->cpu, priv->reg,
  332. &low, &high))
  333. print_cpu_data(seq, priv->reg, low, high);
  334. return;
  335. }
  336. }
  337. range = get_cpu_range_count(cpu);
  338. for (i = 0; i < range; i++) {
  339. if (!get_cpu_range(cpu, &msr_min, &msr_max, i, flag))
  340. continue;
  341. for (msr = msr_min; msr <= msr_max; msr++) {
  342. if (rdmsr_safe_on_cpu(cpu, msr, &low, &high))
  343. continue;
  344. print_cpu_data(seq, msr, low, high);
  345. }
  346. }
  347. }
  348. static void print_tss(void *arg)
  349. {
  350. struct pt_regs *regs = task_pt_regs(current);
  351. struct seq_file *seq = arg;
  352. unsigned int seg;
  353. seq_printf(seq, " RAX\t: %016lx\n", regs->ax);
  354. seq_printf(seq, " RBX\t: %016lx\n", regs->bx);
  355. seq_printf(seq, " RCX\t: %016lx\n", regs->cx);
  356. seq_printf(seq, " RDX\t: %016lx\n", regs->dx);
  357. seq_printf(seq, " RSI\t: %016lx\n", regs->si);
  358. seq_printf(seq, " RDI\t: %016lx\n", regs->di);
  359. seq_printf(seq, " RBP\t: %016lx\n", regs->bp);
  360. seq_printf(seq, " ESP\t: %016lx\n", regs->sp);
  361. #ifdef CONFIG_X86_64
  362. seq_printf(seq, " R08\t: %016lx\n", regs->r8);
  363. seq_printf(seq, " R09\t: %016lx\n", regs->r9);
  364. seq_printf(seq, " R10\t: %016lx\n", regs->r10);
  365. seq_printf(seq, " R11\t: %016lx\n", regs->r11);
  366. seq_printf(seq, " R12\t: %016lx\n", regs->r12);
  367. seq_printf(seq, " R13\t: %016lx\n", regs->r13);
  368. seq_printf(seq, " R14\t: %016lx\n", regs->r14);
  369. seq_printf(seq, " R15\t: %016lx\n", regs->r15);
  370. #endif
  371. asm("movl %%cs,%0" : "=r" (seg));
  372. seq_printf(seq, " CS\t: %04x\n", seg);
  373. asm("movl %%ds,%0" : "=r" (seg));
  374. seq_printf(seq, " DS\t: %04x\n", seg);
  375. seq_printf(seq, " SS\t: %04lx\n", regs->ss);
  376. asm("movl %%es,%0" : "=r" (seg));
  377. seq_printf(seq, " ES\t: %04x\n", seg);
  378. asm("movl %%fs,%0" : "=r" (seg));
  379. seq_printf(seq, " FS\t: %04x\n", seg);
  380. asm("movl %%gs,%0" : "=r" (seg));
  381. seq_printf(seq, " GS\t: %04x\n", seg);
  382. seq_printf(seq, " EFLAGS\t: %016lx\n", regs->flags);
  383. seq_printf(seq, " EIP\t: %016lx\n", regs->ip);
  384. }
  385. static void print_cr(void *arg)
  386. {
  387. struct seq_file *seq = arg;
  388. seq_printf(seq, " cr0\t: %016lx\n", read_cr0());
  389. seq_printf(seq, " cr2\t: %016lx\n", read_cr2());
  390. seq_printf(seq, " cr3\t: %016lx\n", read_cr3());
  391. seq_printf(seq, " cr4\t: %016lx\n", read_cr4_safe());
  392. #ifdef CONFIG_X86_64
  393. seq_printf(seq, " cr8\t: %016lx\n", read_cr8());
  394. #endif
  395. }
  396. static void print_desc_ptr(char *str, struct seq_file *seq, struct desc_ptr dt)
  397. {
  398. seq_printf(seq, " %s\t: %016llx\n", str, (u64)(dt.address | dt.size));
  399. }
  400. static void print_dt(void *seq)
  401. {
  402. struct desc_ptr dt;
  403. unsigned long ldt;
  404. /* IDT */
  405. store_idt((struct desc_ptr *)&dt);
  406. print_desc_ptr("IDT", seq, dt);
  407. /* GDT */
  408. store_gdt((struct desc_ptr *)&dt);
  409. print_desc_ptr("GDT", seq, dt);
  410. /* LDT */
  411. store_ldt(ldt);
  412. seq_printf(seq, " LDT\t: %016lx\n", ldt);
  413. /* TR */
  414. store_tr(ldt);
  415. seq_printf(seq, " TR\t: %016lx\n", ldt);
  416. }
  417. static void print_dr(void *arg)
  418. {
  419. struct seq_file *seq = arg;
  420. unsigned long dr;
  421. int i;
  422. for (i = 0; i < 8; i++) {
  423. /* Ignore db4, db5 */
  424. if ((i == 4) || (i == 5))
  425. continue;
  426. get_debugreg(dr, i);
  427. seq_printf(seq, " dr%d\t: %016lx\n", i, dr);
  428. }
  429. seq_printf(seq, "\n MSR\t:\n");
  430. }
  431. static void print_apic(void *arg)
  432. {
  433. struct seq_file *seq = arg;
  434. #ifdef CONFIG_X86_LOCAL_APIC
  435. seq_printf(seq, " LAPIC\t:\n");
  436. seq_printf(seq, " ID\t\t: %08x\n", apic_read(APIC_ID) >> 24);
  437. seq_printf(seq, " LVR\t\t: %08x\n", apic_read(APIC_LVR));
  438. seq_printf(seq, " TASKPRI\t: %08x\n", apic_read(APIC_TASKPRI));
  439. seq_printf(seq, " ARBPRI\t\t: %08x\n", apic_read(APIC_ARBPRI));
  440. seq_printf(seq, " PROCPRI\t: %08x\n", apic_read(APIC_PROCPRI));
  441. seq_printf(seq, " LDR\t\t: %08x\n", apic_read(APIC_LDR));
  442. seq_printf(seq, " DFR\t\t: %08x\n", apic_read(APIC_DFR));
  443. seq_printf(seq, " SPIV\t\t: %08x\n", apic_read(APIC_SPIV));
  444. seq_printf(seq, " ISR\t\t: %08x\n", apic_read(APIC_ISR));
  445. seq_printf(seq, " ESR\t\t: %08x\n", apic_read(APIC_ESR));
  446. seq_printf(seq, " ICR\t\t: %08x\n", apic_read(APIC_ICR));
  447. seq_printf(seq, " ICR2\t\t: %08x\n", apic_read(APIC_ICR2));
  448. seq_printf(seq, " LVTT\t\t: %08x\n", apic_read(APIC_LVTT));
  449. seq_printf(seq, " LVTTHMR\t: %08x\n", apic_read(APIC_LVTTHMR));
  450. seq_printf(seq, " LVTPC\t\t: %08x\n", apic_read(APIC_LVTPC));
  451. seq_printf(seq, " LVT0\t\t: %08x\n", apic_read(APIC_LVT0));
  452. seq_printf(seq, " LVT1\t\t: %08x\n", apic_read(APIC_LVT1));
  453. seq_printf(seq, " LVTERR\t\t: %08x\n", apic_read(APIC_LVTERR));
  454. seq_printf(seq, " TMICT\t\t: %08x\n", apic_read(APIC_TMICT));
  455. seq_printf(seq, " TMCCT\t\t: %08x\n", apic_read(APIC_TMCCT));
  456. seq_printf(seq, " TDCR\t\t: %08x\n", apic_read(APIC_TDCR));
  457. #endif /* CONFIG_X86_LOCAL_APIC */
  458. seq_printf(seq, "\n MSR\t:\n");
  459. }
  460. static int cpu_seq_show(struct seq_file *seq, void *v)
  461. {
  462. struct cpu_private *priv = seq->private;
  463. if (priv == NULL)
  464. return -EINVAL;
  465. switch (cpu_base[priv->type].flag) {
  466. case CPU_TSS:
  467. smp_call_function_single(priv->cpu, print_tss, seq, 1);
  468. break;
  469. case CPU_CR:
  470. smp_call_function_single(priv->cpu, print_cr, seq, 1);
  471. break;
  472. case CPU_DT:
  473. smp_call_function_single(priv->cpu, print_dt, seq, 1);
  474. break;
  475. case CPU_DEBUG:
  476. if (priv->file == CPU_INDEX_BIT)
  477. smp_call_function_single(priv->cpu, print_dr, seq, 1);
  478. print_msr(seq, priv->cpu, cpu_base[priv->type].flag);
  479. break;
  480. case CPU_APIC:
  481. if (priv->file == CPU_INDEX_BIT)
  482. smp_call_function_single(priv->cpu, print_apic, seq, 1);
  483. print_msr(seq, priv->cpu, cpu_base[priv->type].flag);
  484. break;
  485. default:
  486. print_msr(seq, priv->cpu, cpu_base[priv->type].flag);
  487. break;
  488. }
  489. seq_printf(seq, "\n");
  490. return 0;
  491. }
  492. static void *cpu_seq_start(struct seq_file *seq, loff_t *pos)
  493. {
  494. if (*pos == 0) /* One time is enough ;-) */
  495. return seq;
  496. return NULL;
  497. }
  498. static void *cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
  499. {
  500. (*pos)++;
  501. return cpu_seq_start(seq, pos);
  502. }
  503. static void cpu_seq_stop(struct seq_file *seq, void *v)
  504. {
  505. }
  506. static const struct seq_operations cpu_seq_ops = {
  507. .start = cpu_seq_start,
  508. .next = cpu_seq_next,
  509. .stop = cpu_seq_stop,
  510. .show = cpu_seq_show,
  511. };
  512. static int cpu_seq_open(struct inode *inode, struct file *file)
  513. {
  514. struct cpu_private *priv = inode->i_private;
  515. struct seq_file *seq;
  516. int err;
  517. err = seq_open(file, &cpu_seq_ops);
  518. if (!err) {
  519. seq = file->private_data;
  520. seq->private = priv;
  521. }
  522. return err;
  523. }
  524. static const struct file_operations cpu_fops = {
  525. .open = cpu_seq_open,
  526. .read = seq_read,
  527. .llseek = seq_lseek,
  528. .release = seq_release,
  529. };
  530. static int cpu_create_file(unsigned cpu, unsigned type, unsigned reg,
  531. unsigned file, struct dentry *dentry)
  532. {
  533. struct cpu_private *priv = NULL;
  534. /* Already intialized */
  535. if (file == CPU_INDEX_BIT)
  536. if (per_cpu(cpu_arr[type].init, cpu))
  537. return 0;
  538. priv = kzalloc(sizeof(*priv), GFP_KERNEL);
  539. if (priv == NULL)
  540. return -ENOMEM;
  541. priv->cpu = cpu;
  542. priv->type = type;
  543. priv->reg = reg;
  544. priv->file = file;
  545. mutex_lock(&cpu_debug_lock);
  546. per_cpu(priv_arr[type], cpu) = priv;
  547. per_cpu(cpu_priv_count, cpu)++;
  548. mutex_unlock(&cpu_debug_lock);
  549. if (file)
  550. debugfs_create_file(cpu_file[file].name, S_IRUGO,
  551. dentry, (void *)priv, &cpu_fops);
  552. else {
  553. debugfs_create_file(cpu_base[type].name, S_IRUGO,
  554. per_cpu(cpu_arr[type].dentry, cpu),
  555. (void *)priv, &cpu_fops);
  556. mutex_lock(&cpu_debug_lock);
  557. per_cpu(cpu_arr[type].init, cpu) = 1;
  558. mutex_unlock(&cpu_debug_lock);
  559. }
  560. return 0;
  561. }
  562. static int cpu_init_regfiles(unsigned cpu, unsigned int type, unsigned reg,
  563. struct dentry *dentry)
  564. {
  565. unsigned file;
  566. int err = 0;
  567. for (file = 0; file < ARRAY_SIZE(cpu_file); file++) {
  568. err = cpu_create_file(cpu, type, reg, file, dentry);
  569. if (err)
  570. return err;
  571. }
  572. return err;
  573. }
  574. static int cpu_init_msr(unsigned cpu, unsigned type, struct dentry *dentry)
  575. {
  576. struct dentry *cpu_dentry = NULL;
  577. unsigned reg, reg_min, reg_max;
  578. int i, range, err = 0;
  579. char reg_dir[12];
  580. u32 low, high;
  581. range = get_cpu_range_count(cpu);
  582. for (i = 0; i < range; i++) {
  583. if (!get_cpu_range(cpu, &reg_min, &reg_max, i,
  584. cpu_base[type].flag))
  585. continue;
  586. for (reg = reg_min; reg <= reg_max; reg++) {
  587. if (rdmsr_safe_on_cpu(cpu, reg, &low, &high))
  588. continue;
  589. sprintf(reg_dir, "0x%x", reg);
  590. cpu_dentry = debugfs_create_dir(reg_dir, dentry);
  591. err = cpu_init_regfiles(cpu, type, reg, cpu_dentry);
  592. if (err)
  593. return err;
  594. }
  595. }
  596. return err;
  597. }
  598. static int cpu_init_allreg(unsigned cpu, struct dentry *dentry)
  599. {
  600. struct dentry *cpu_dentry = NULL;
  601. unsigned type;
  602. int err = 0;
  603. for (type = 0; type < ARRAY_SIZE(cpu_base) - 1; type++) {
  604. if (!is_typeflag_valid(cpu, cpu_base[type].flag))
  605. continue;
  606. cpu_dentry = debugfs_create_dir(cpu_base[type].name, dentry);
  607. per_cpu(cpu_arr[type].dentry, cpu) = cpu_dentry;
  608. if (type < CPU_TSS_BIT)
  609. err = cpu_init_msr(cpu, type, cpu_dentry);
  610. else
  611. err = cpu_create_file(cpu, type, 0, CPU_INDEX_BIT,
  612. cpu_dentry);
  613. if (err)
  614. return err;
  615. }
  616. return err;
  617. }
  618. static int cpu_init_cpu(void)
  619. {
  620. struct dentry *cpu_dentry = NULL;
  621. struct cpuinfo_x86 *cpui;
  622. char cpu_dir[12];
  623. unsigned cpu;
  624. int err = 0;
  625. for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
  626. cpui = &cpu_data(cpu);
  627. if (!cpu_has(cpui, X86_FEATURE_MSR))
  628. continue;
  629. per_cpu(cpu_model, cpu) = ((cpui->x86_vendor << 16) |
  630. (cpui->x86 << 8) |
  631. (cpui->x86_model));
  632. per_cpu(cpu_modelflag, cpu) = get_cpu_modelflag(cpu);
  633. sprintf(cpu_dir, "cpu%d", cpu);
  634. cpu_dentry = debugfs_create_dir(cpu_dir, cpu_debugfs_dir);
  635. err = cpu_init_allreg(cpu, cpu_dentry);
  636. pr_info("cpu%d(%d) debug files %d\n",
  637. cpu, nr_cpu_ids, per_cpu(cpu_priv_count, cpu));
  638. if (per_cpu(cpu_priv_count, cpu) > MAX_CPU_FILES) {
  639. pr_err("Register files count %d exceeds limit %d\n",
  640. per_cpu(cpu_priv_count, cpu), MAX_CPU_FILES);
  641. per_cpu(cpu_priv_count, cpu) = MAX_CPU_FILES;
  642. err = -ENFILE;
  643. }
  644. if (err)
  645. return err;
  646. }
  647. return err;
  648. }
  649. static int __init cpu_debug_init(void)
  650. {
  651. cpu_debugfs_dir = debugfs_create_dir("cpu", arch_debugfs_dir);
  652. return cpu_init_cpu();
  653. }
  654. static void __exit cpu_debug_exit(void)
  655. {
  656. int i, cpu;
  657. if (cpu_debugfs_dir)
  658. debugfs_remove_recursive(cpu_debugfs_dir);
  659. for (cpu = 0; cpu < nr_cpu_ids; cpu++)
  660. for (i = 0; i < per_cpu(cpu_priv_count, cpu); i++)
  661. kfree(per_cpu(priv_arr[i], cpu));
  662. }
  663. module_init(cpu_debug_init);
  664. module_exit(cpu_debug_exit);
  665. MODULE_AUTHOR("Jaswinder Singh Rajput");
  666. MODULE_DESCRIPTION("CPU Debug module");
  667. MODULE_LICENSE("GPL");