cpu_debug.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839
  1. /*
  2. * CPU x86 architecture debug code
  3. *
  4. * Copyright(C) 2009 Jaswinder Singh Rajput
  5. *
  6. * For licencing details see kernel-base/COPYING
  7. */
  8. #include <linux/interrupt.h>
  9. #include <linux/compiler.h>
  10. #include <linux/seq_file.h>
  11. #include <linux/debugfs.h>
  12. #include <linux/kprobes.h>
  13. #include <linux/uaccess.h>
  14. #include <linux/kernel.h>
  15. #include <linux/module.h>
  16. #include <linux/percpu.h>
  17. #include <linux/signal.h>
  18. #include <linux/errno.h>
  19. #include <linux/sched.h>
  20. #include <linux/types.h>
  21. #include <linux/init.h>
  22. #include <linux/slab.h>
  23. #include <linux/smp.h>
  24. #include <asm/cpu_debug.h>
  25. #include <asm/paravirt.h>
  26. #include <asm/system.h>
  27. #include <asm/traps.h>
  28. #include <asm/apic.h>
  29. #include <asm/desc.h>
  30. static DEFINE_PER_CPU(struct cpu_cpuX_base, cpu_arr[CPU_REG_ALL_BIT]);
  31. static DEFINE_PER_CPU(struct cpu_private *, priv_arr[MAX_CPU_FILES]);
  32. static DEFINE_PER_CPU(unsigned, cpu_modelflag);
  33. static DEFINE_PER_CPU(int, cpu_priv_count);
  34. static DEFINE_PER_CPU(unsigned, cpu_model);
  35. static DEFINE_MUTEX(cpu_debug_lock);
  36. static struct dentry *cpu_debugfs_dir;
  37. static struct cpu_debug_base cpu_base[] = {
  38. { "mc", CPU_MC, 0 },
  39. { "monitor", CPU_MONITOR, 0 },
  40. { "time", CPU_TIME, 0 },
  41. { "pmc", CPU_PMC, 1 },
  42. { "platform", CPU_PLATFORM, 0 },
  43. { "apic", CPU_APIC, 0 },
  44. { "poweron", CPU_POWERON, 0 },
  45. { "control", CPU_CONTROL, 0 },
  46. { "features", CPU_FEATURES, 0 },
  47. { "lastbranch", CPU_LBRANCH, 0 },
  48. { "bios", CPU_BIOS, 0 },
  49. { "freq", CPU_FREQ, 0 },
  50. { "mtrr", CPU_MTRR, 0 },
  51. { "perf", CPU_PERF, 0 },
  52. { "cache", CPU_CACHE, 0 },
  53. { "sysenter", CPU_SYSENTER, 0 },
  54. { "therm", CPU_THERM, 0 },
  55. { "misc", CPU_MISC, 0 },
  56. { "debug", CPU_DEBUG, 0 },
  57. { "pat", CPU_PAT, 0 },
  58. { "vmx", CPU_VMX, 0 },
  59. { "call", CPU_CALL, 0 },
  60. { "base", CPU_BASE, 0 },
  61. { "smm", CPU_SMM, 0 },
  62. { "svm", CPU_SVM, 0 },
  63. { "osvm", CPU_OSVM, 0 },
  64. { "tss", CPU_TSS, 0 },
  65. { "cr", CPU_CR, 0 },
  66. { "dt", CPU_DT, 0 },
  67. { "registers", CPU_REG_ALL, 0 },
  68. };
  69. static struct cpu_file_base cpu_file[] = {
  70. { "index", CPU_REG_ALL, 0 },
  71. { "value", CPU_REG_ALL, 1 },
  72. };
  73. /* Intel Registers Range */
  74. static struct cpu_debug_range cpu_intel_range[] = {
  75. { 0x00000000, 0x00000001, CPU_MC, CPU_INTEL_ALL },
  76. { 0x00000006, 0x00000007, CPU_MONITOR, CPU_CX_AT_XE },
  77. { 0x00000010, 0x00000010, CPU_TIME, CPU_INTEL_ALL },
  78. { 0x00000011, 0x00000013, CPU_PMC, CPU_INTEL_PENTIUM },
  79. { 0x00000017, 0x00000017, CPU_PLATFORM, CPU_PX_CX_AT_XE },
  80. { 0x0000001B, 0x0000001B, CPU_APIC, CPU_P6_CX_AT_XE },
  81. { 0x0000002A, 0x0000002A, CPU_POWERON, CPU_PX_CX_AT_XE },
  82. { 0x0000002B, 0x0000002B, CPU_POWERON, CPU_INTEL_XEON },
  83. { 0x0000002C, 0x0000002C, CPU_FREQ, CPU_INTEL_XEON },
  84. { 0x0000003A, 0x0000003A, CPU_CONTROL, CPU_CX_AT_XE },
  85. { 0x00000040, 0x00000043, CPU_LBRANCH, CPU_PM_CX_AT_XE },
  86. { 0x00000044, 0x00000047, CPU_LBRANCH, CPU_PM_CO_AT },
  87. { 0x00000060, 0x00000063, CPU_LBRANCH, CPU_C2_AT },
  88. { 0x00000064, 0x00000067, CPU_LBRANCH, CPU_INTEL_ATOM },
  89. { 0x00000079, 0x00000079, CPU_BIOS, CPU_P6_CX_AT_XE },
  90. { 0x00000088, 0x0000008A, CPU_CACHE, CPU_INTEL_P6 },
  91. { 0x0000008B, 0x0000008B, CPU_BIOS, CPU_P6_CX_AT_XE },
  92. { 0x0000009B, 0x0000009B, CPU_MONITOR, CPU_INTEL_XEON },
  93. { 0x000000C1, 0x000000C2, CPU_PMC, CPU_P6_CX_AT },
  94. { 0x000000CD, 0x000000CD, CPU_FREQ, CPU_CX_AT },
  95. { 0x000000E7, 0x000000E8, CPU_PERF, CPU_CX_AT },
  96. { 0x000000FE, 0x000000FE, CPU_MTRR, CPU_P6_CX_XE },
  97. { 0x00000116, 0x00000116, CPU_CACHE, CPU_INTEL_P6 },
  98. { 0x00000118, 0x00000118, CPU_CACHE, CPU_INTEL_P6 },
  99. { 0x00000119, 0x00000119, CPU_CACHE, CPU_INTEL_PX },
  100. { 0x0000011A, 0x0000011B, CPU_CACHE, CPU_INTEL_P6 },
  101. { 0x0000011E, 0x0000011E, CPU_CACHE, CPU_PX_CX_AT },
  102. { 0x00000174, 0x00000176, CPU_SYSENTER, CPU_P6_CX_AT_XE },
  103. { 0x00000179, 0x0000017A, CPU_MC, CPU_PX_CX_AT_XE },
  104. { 0x0000017B, 0x0000017B, CPU_MC, CPU_P6_XE },
  105. { 0x00000186, 0x00000187, CPU_PMC, CPU_P6_CX_AT },
  106. { 0x00000198, 0x00000199, CPU_PERF, CPU_PM_CX_AT_XE },
  107. { 0x0000019A, 0x0000019A, CPU_TIME, CPU_PM_CX_AT_XE },
  108. { 0x0000019B, 0x0000019D, CPU_THERM, CPU_PM_CX_AT_XE },
  109. { 0x000001A0, 0x000001A0, CPU_MISC, CPU_PM_CX_AT_XE },
  110. { 0x000001C9, 0x000001C9, CPU_LBRANCH, CPU_PM_CX_AT },
  111. { 0x000001D7, 0x000001D8, CPU_LBRANCH, CPU_INTEL_XEON },
  112. { 0x000001D9, 0x000001D9, CPU_DEBUG, CPU_CX_AT_XE },
  113. { 0x000001DA, 0x000001DA, CPU_LBRANCH, CPU_INTEL_XEON },
  114. { 0x000001DB, 0x000001DB, CPU_LBRANCH, CPU_P6_XE },
  115. { 0x000001DC, 0x000001DC, CPU_LBRANCH, CPU_INTEL_P6 },
  116. { 0x000001DD, 0x000001DE, CPU_LBRANCH, CPU_PX_CX_AT_XE },
  117. { 0x000001E0, 0x000001E0, CPU_LBRANCH, CPU_INTEL_P6 },
  118. { 0x00000200, 0x0000020F, CPU_MTRR, CPU_P6_CX_XE },
  119. { 0x00000250, 0x00000250, CPU_MTRR, CPU_P6_CX_XE },
  120. { 0x00000258, 0x00000259, CPU_MTRR, CPU_P6_CX_XE },
  121. { 0x00000268, 0x0000026F, CPU_MTRR, CPU_P6_CX_XE },
  122. { 0x00000277, 0x00000277, CPU_PAT, CPU_C2_AT_XE },
  123. { 0x000002FF, 0x000002FF, CPU_MTRR, CPU_P6_CX_XE },
  124. { 0x00000300, 0x00000308, CPU_PMC, CPU_INTEL_XEON },
  125. { 0x00000309, 0x0000030B, CPU_PMC, CPU_C2_AT_XE },
  126. { 0x0000030C, 0x00000311, CPU_PMC, CPU_INTEL_XEON },
  127. { 0x00000345, 0x00000345, CPU_PMC, CPU_C2_AT },
  128. { 0x00000360, 0x00000371, CPU_PMC, CPU_INTEL_XEON },
  129. { 0x0000038D, 0x00000390, CPU_PMC, CPU_C2_AT },
  130. { 0x000003A0, 0x000003BE, CPU_PMC, CPU_INTEL_XEON },
  131. { 0x000003C0, 0x000003CD, CPU_PMC, CPU_INTEL_XEON },
  132. { 0x000003E0, 0x000003E1, CPU_PMC, CPU_INTEL_XEON },
  133. { 0x000003F0, 0x000003F0, CPU_PMC, CPU_INTEL_XEON },
  134. { 0x000003F1, 0x000003F1, CPU_PMC, CPU_C2_AT_XE },
  135. { 0x000003F2, 0x000003F2, CPU_PMC, CPU_INTEL_XEON },
  136. { 0x00000400, 0x00000402, CPU_MC, CPU_PM_CX_AT_XE },
  137. { 0x00000403, 0x00000403, CPU_MC, CPU_INTEL_XEON },
  138. { 0x00000404, 0x00000406, CPU_MC, CPU_PM_CX_AT_XE },
  139. { 0x00000407, 0x00000407, CPU_MC, CPU_INTEL_XEON },
  140. { 0x00000408, 0x0000040A, CPU_MC, CPU_PM_CX_AT_XE },
  141. { 0x0000040B, 0x0000040B, CPU_MC, CPU_INTEL_XEON },
  142. { 0x0000040C, 0x0000040E, CPU_MC, CPU_PM_CX_XE },
  143. { 0x0000040F, 0x0000040F, CPU_MC, CPU_INTEL_XEON },
  144. { 0x00000410, 0x00000412, CPU_MC, CPU_PM_CX_AT_XE },
  145. { 0x00000413, 0x00000417, CPU_MC, CPU_CX_AT_XE },
  146. { 0x00000480, 0x0000048B, CPU_VMX, CPU_CX_AT_XE },
  147. { 0x00000600, 0x00000600, CPU_DEBUG, CPU_PM_CX_AT_XE },
  148. { 0x00000680, 0x0000068F, CPU_LBRANCH, CPU_INTEL_XEON },
  149. { 0x000006C0, 0x000006CF, CPU_LBRANCH, CPU_INTEL_XEON },
  150. { 0x000107CC, 0x000107D3, CPU_PMC, CPU_INTEL_XEON_MP },
  151. { 0xC0000080, 0xC0000080, CPU_FEATURES, CPU_INTEL_XEON },
  152. { 0xC0000081, 0xC0000082, CPU_CALL, CPU_INTEL_XEON },
  153. { 0xC0000084, 0xC0000084, CPU_CALL, CPU_INTEL_XEON },
  154. { 0xC0000100, 0xC0000102, CPU_BASE, CPU_INTEL_XEON },
  155. };
  156. /* AMD Registers Range */
  157. static struct cpu_debug_range cpu_amd_range[] = {
  158. { 0x00000010, 0x00000010, CPU_TIME, CPU_ALL, },
  159. { 0x0000001B, 0x0000001B, CPU_APIC, CPU_ALL, },
  160. { 0x000000FE, 0x000000FE, CPU_MTRR, CPU_ALL, },
  161. { 0x00000174, 0x00000176, CPU_SYSENTER, CPU_ALL, },
  162. { 0x00000179, 0x0000017A, CPU_MC, CPU_ALL, },
  163. { 0x0000017B, 0x0000017B, CPU_MC, CPU_ALL, },
  164. { 0x000001D9, 0x000001D9, CPU_DEBUG, CPU_ALL, },
  165. { 0x000001DB, 0x000001DE, CPU_LBRANCH, CPU_ALL, },
  166. { 0x00000200, 0x0000020F, CPU_MTRR, CPU_ALL, },
  167. { 0x00000250, 0x00000250, CPU_MTRR, CPU_ALL, },
  168. { 0x00000258, 0x00000259, CPU_MTRR, CPU_ALL, },
  169. { 0x00000268, 0x0000026F, CPU_MTRR, CPU_ALL, },
  170. { 0x00000277, 0x00000277, CPU_PAT, CPU_ALL, },
  171. { 0x000002FF, 0x000002FF, CPU_MTRR, CPU_ALL, },
  172. { 0x00000400, 0x00000417, CPU_MC, CPU_ALL, },
  173. { 0xC0000080, 0xC0000080, CPU_FEATURES, CPU_ALL, },
  174. { 0xC0000081, 0xC0000084, CPU_CALL, CPU_ALL, },
  175. { 0xC0000100, 0xC0000102, CPU_BASE, CPU_ALL, },
  176. { 0xC0000103, 0xC0000103, CPU_TIME, CPU_ALL, },
  177. { 0xC0000408, 0xC000040A, CPU_MC, CPU_ALL, },
  178. { 0xc0010000, 0xc0010007, CPU_PMC, CPU_ALL, },
  179. { 0xc0010010, 0xc0010010, CPU_MTRR, CPU_ALL, },
  180. { 0xc0010016, 0xc001001A, CPU_MTRR, CPU_ALL, },
  181. { 0xc001001D, 0xc001001D, CPU_MTRR, CPU_ALL, },
  182. { 0xc0010030, 0xc0010035, CPU_BIOS, CPU_ALL, },
  183. { 0xc0010056, 0xc0010056, CPU_SMM, CPU_ALL, },
  184. { 0xc0010061, 0xc0010063, CPU_SMM, CPU_ALL, },
  185. { 0xc0010074, 0xc0010074, CPU_MC, CPU_ALL, },
  186. { 0xc0010111, 0xc0010113, CPU_SMM, CPU_ALL, },
  187. { 0xc0010114, 0xc0010118, CPU_SVM, CPU_ALL, },
  188. { 0xc0010119, 0xc001011A, CPU_SMM, CPU_ALL, },
  189. { 0xc0010140, 0xc0010141, CPU_OSVM, CPU_ALL, },
  190. { 0xc0010156, 0xc0010156, CPU_SMM, CPU_ALL, },
  191. };
  192. static int get_cpu_modelflag(unsigned cpu)
  193. {
  194. int flag;
  195. switch (per_cpu(cpu_model, cpu)) {
  196. /* Intel */
  197. case 0x0501:
  198. case 0x0502:
  199. case 0x0504:
  200. flag = CPU_INTEL_PENTIUM;
  201. break;
  202. case 0x0601:
  203. case 0x0603:
  204. case 0x0605:
  205. case 0x0607:
  206. case 0x0608:
  207. case 0x060A:
  208. case 0x060B:
  209. flag = CPU_INTEL_P6;
  210. break;
  211. case 0x0609:
  212. case 0x060D:
  213. flag = CPU_INTEL_PENTIUM_M;
  214. break;
  215. case 0x060E:
  216. flag = CPU_INTEL_CORE;
  217. break;
  218. case 0x060F:
  219. case 0x0617:
  220. flag = CPU_INTEL_CORE2;
  221. break;
  222. case 0x061C:
  223. flag = CPU_INTEL_ATOM;
  224. break;
  225. case 0x0F00:
  226. case 0x0F01:
  227. case 0x0F02:
  228. case 0x0F03:
  229. case 0x0F04:
  230. flag = CPU_INTEL_XEON_P4;
  231. break;
  232. case 0x0F06:
  233. flag = CPU_INTEL_XEON_MP;
  234. break;
  235. default:
  236. flag = CPU_NONE;
  237. break;
  238. }
  239. return flag;
  240. }
  241. static int get_cpu_range_count(unsigned cpu)
  242. {
  243. int index;
  244. switch (per_cpu(cpu_model, cpu) >> 16) {
  245. case X86_VENDOR_INTEL:
  246. index = ARRAY_SIZE(cpu_intel_range);
  247. break;
  248. case X86_VENDOR_AMD:
  249. index = ARRAY_SIZE(cpu_amd_range);
  250. break;
  251. default:
  252. index = 0;
  253. break;
  254. }
  255. return index;
  256. }
  257. static int is_typeflag_valid(unsigned cpu, unsigned flag)
  258. {
  259. unsigned vendor, modelflag;
  260. int i, index;
  261. /* Standard Registers should be always valid */
  262. if (flag >= CPU_TSS)
  263. return 1;
  264. modelflag = per_cpu(cpu_modelflag, cpu);
  265. vendor = per_cpu(cpu_model, cpu) >> 16;
  266. index = get_cpu_range_count(cpu);
  267. for (i = 0; i < index; i++) {
  268. switch (vendor) {
  269. case X86_VENDOR_INTEL:
  270. if ((cpu_intel_range[i].model & modelflag) &&
  271. (cpu_intel_range[i].flag & flag))
  272. return 1;
  273. break;
  274. case X86_VENDOR_AMD:
  275. if (cpu_amd_range[i].flag & flag)
  276. return 1;
  277. break;
  278. }
  279. }
  280. /* Invalid */
  281. return 0;
  282. }
  283. static unsigned get_cpu_range(unsigned cpu, unsigned *min, unsigned *max,
  284. int index, unsigned flag)
  285. {
  286. unsigned modelflag;
  287. modelflag = per_cpu(cpu_modelflag, cpu);
  288. *max = 0;
  289. switch (per_cpu(cpu_model, cpu) >> 16) {
  290. case X86_VENDOR_INTEL:
  291. if ((cpu_intel_range[index].model & modelflag) &&
  292. (cpu_intel_range[index].flag & flag)) {
  293. *min = cpu_intel_range[index].min;
  294. *max = cpu_intel_range[index].max;
  295. }
  296. break;
  297. case X86_VENDOR_AMD:
  298. if (cpu_amd_range[index].flag & flag) {
  299. *min = cpu_amd_range[index].min;
  300. *max = cpu_amd_range[index].max;
  301. }
  302. break;
  303. }
  304. return *max;
  305. }
  306. /* This function can also be called with seq = NULL for printk */
  307. static void print_cpu_data(struct seq_file *seq, unsigned type,
  308. u32 low, u32 high)
  309. {
  310. struct cpu_private *priv;
  311. u64 val = high;
  312. if (seq) {
  313. priv = seq->private;
  314. if (priv->file) {
  315. val = (val << 32) | low;
  316. seq_printf(seq, "0x%llx\n", val);
  317. } else
  318. seq_printf(seq, " %08x: %08x_%08x\n",
  319. type, high, low);
  320. } else
  321. printk(KERN_INFO " %08x: %08x_%08x\n", type, high, low);
  322. }
  323. /* This function can also be called with seq = NULL for printk */
  324. static void print_msr(struct seq_file *seq, unsigned cpu, unsigned flag)
  325. {
  326. unsigned msr, msr_min, msr_max;
  327. struct cpu_private *priv;
  328. u32 low, high;
  329. int i, range;
  330. if (seq) {
  331. priv = seq->private;
  332. if (priv->file) {
  333. if (!rdmsr_safe_on_cpu(priv->cpu, priv->reg,
  334. &low, &high))
  335. print_cpu_data(seq, priv->reg, low, high);
  336. return;
  337. }
  338. }
  339. range = get_cpu_range_count(cpu);
  340. for (i = 0; i < range; i++) {
  341. if (!get_cpu_range(cpu, &msr_min, &msr_max, i, flag))
  342. continue;
  343. for (msr = msr_min; msr <= msr_max; msr++) {
  344. if (rdmsr_safe_on_cpu(cpu, msr, &low, &high))
  345. continue;
  346. print_cpu_data(seq, msr, low, high);
  347. }
  348. }
  349. }
  350. static void print_tss(void *arg)
  351. {
  352. struct pt_regs *regs = task_pt_regs(current);
  353. struct seq_file *seq = arg;
  354. unsigned int seg;
  355. seq_printf(seq, " RAX\t: %016lx\n", regs->ax);
  356. seq_printf(seq, " RBX\t: %016lx\n", regs->bx);
  357. seq_printf(seq, " RCX\t: %016lx\n", regs->cx);
  358. seq_printf(seq, " RDX\t: %016lx\n", regs->dx);
  359. seq_printf(seq, " RSI\t: %016lx\n", regs->si);
  360. seq_printf(seq, " RDI\t: %016lx\n", regs->di);
  361. seq_printf(seq, " RBP\t: %016lx\n", regs->bp);
  362. seq_printf(seq, " ESP\t: %016lx\n", regs->sp);
  363. #ifdef CONFIG_X86_64
  364. seq_printf(seq, " R08\t: %016lx\n", regs->r8);
  365. seq_printf(seq, " R09\t: %016lx\n", regs->r9);
  366. seq_printf(seq, " R10\t: %016lx\n", regs->r10);
  367. seq_printf(seq, " R11\t: %016lx\n", regs->r11);
  368. seq_printf(seq, " R12\t: %016lx\n", regs->r12);
  369. seq_printf(seq, " R13\t: %016lx\n", regs->r13);
  370. seq_printf(seq, " R14\t: %016lx\n", regs->r14);
  371. seq_printf(seq, " R15\t: %016lx\n", regs->r15);
  372. #endif
  373. asm("movl %%cs,%0" : "=r" (seg));
  374. seq_printf(seq, " CS\t: %04x\n", seg);
  375. asm("movl %%ds,%0" : "=r" (seg));
  376. seq_printf(seq, " DS\t: %04x\n", seg);
  377. seq_printf(seq, " SS\t: %04lx\n", regs->ss & 0xffff);
  378. asm("movl %%es,%0" : "=r" (seg));
  379. seq_printf(seq, " ES\t: %04x\n", seg);
  380. asm("movl %%fs,%0" : "=r" (seg));
  381. seq_printf(seq, " FS\t: %04x\n", seg);
  382. asm("movl %%gs,%0" : "=r" (seg));
  383. seq_printf(seq, " GS\t: %04x\n", seg);
  384. seq_printf(seq, " EFLAGS\t: %016lx\n", regs->flags);
  385. seq_printf(seq, " EIP\t: %016lx\n", regs->ip);
  386. }
  387. static void print_cr(void *arg)
  388. {
  389. struct seq_file *seq = arg;
  390. seq_printf(seq, " cr0\t: %016lx\n", read_cr0());
  391. seq_printf(seq, " cr2\t: %016lx\n", read_cr2());
  392. seq_printf(seq, " cr3\t: %016lx\n", read_cr3());
  393. seq_printf(seq, " cr4\t: %016lx\n", read_cr4_safe());
  394. #ifdef CONFIG_X86_64
  395. seq_printf(seq, " cr8\t: %016lx\n", read_cr8());
  396. #endif
  397. }
  398. static void print_desc_ptr(char *str, struct seq_file *seq, struct desc_ptr dt)
  399. {
  400. seq_printf(seq, " %s\t: %016llx\n", str, (u64)(dt.address | dt.size));
  401. }
  402. static void print_dt(void *seq)
  403. {
  404. struct desc_ptr dt;
  405. unsigned long ldt;
  406. /* IDT */
  407. store_idt((struct desc_ptr *)&dt);
  408. print_desc_ptr("IDT", seq, dt);
  409. /* GDT */
  410. store_gdt((struct desc_ptr *)&dt);
  411. print_desc_ptr("GDT", seq, dt);
  412. /* LDT */
  413. store_ldt(ldt);
  414. seq_printf(seq, " LDT\t: %016lx\n", ldt);
  415. /* TR */
  416. store_tr(ldt);
  417. seq_printf(seq, " TR\t: %016lx\n", ldt);
  418. }
  419. static void print_dr(void *arg)
  420. {
  421. struct seq_file *seq = arg;
  422. unsigned long dr;
  423. int i;
  424. for (i = 0; i < 8; i++) {
  425. /* Ignore db4, db5 */
  426. if ((i == 4) || (i == 5))
  427. continue;
  428. get_debugreg(dr, i);
  429. seq_printf(seq, " dr%d\t: %016lx\n", i, dr);
  430. }
  431. seq_printf(seq, "\n MSR\t:\n");
  432. }
  433. static void print_apic(void *arg)
  434. {
  435. struct seq_file *seq = arg;
  436. #ifdef CONFIG_X86_LOCAL_APIC
  437. seq_printf(seq, " LAPIC\t:\n");
  438. seq_printf(seq, " ID\t\t: %08x\n", apic_read(APIC_ID) >> 24);
  439. seq_printf(seq, " LVR\t\t: %08x\n", apic_read(APIC_LVR));
  440. seq_printf(seq, " TASKPRI\t: %08x\n", apic_read(APIC_TASKPRI));
  441. seq_printf(seq, " ARBPRI\t\t: %08x\n", apic_read(APIC_ARBPRI));
  442. seq_printf(seq, " PROCPRI\t: %08x\n", apic_read(APIC_PROCPRI));
  443. seq_printf(seq, " LDR\t\t: %08x\n", apic_read(APIC_LDR));
  444. seq_printf(seq, " DFR\t\t: %08x\n", apic_read(APIC_DFR));
  445. seq_printf(seq, " SPIV\t\t: %08x\n", apic_read(APIC_SPIV));
  446. seq_printf(seq, " ISR\t\t: %08x\n", apic_read(APIC_ISR));
  447. seq_printf(seq, " ESR\t\t: %08x\n", apic_read(APIC_ESR));
  448. seq_printf(seq, " ICR\t\t: %08x\n", apic_read(APIC_ICR));
  449. seq_printf(seq, " ICR2\t\t: %08x\n", apic_read(APIC_ICR2));
  450. seq_printf(seq, " LVTT\t\t: %08x\n", apic_read(APIC_LVTT));
  451. seq_printf(seq, " LVTTHMR\t: %08x\n", apic_read(APIC_LVTTHMR));
  452. seq_printf(seq, " LVTPC\t\t: %08x\n", apic_read(APIC_LVTPC));
  453. seq_printf(seq, " LVT0\t\t: %08x\n", apic_read(APIC_LVT0));
  454. seq_printf(seq, " LVT1\t\t: %08x\n", apic_read(APIC_LVT1));
  455. seq_printf(seq, " LVTERR\t\t: %08x\n", apic_read(APIC_LVTERR));
  456. seq_printf(seq, " TMICT\t\t: %08x\n", apic_read(APIC_TMICT));
  457. seq_printf(seq, " TMCCT\t\t: %08x\n", apic_read(APIC_TMCCT));
  458. seq_printf(seq, " TDCR\t\t: %08x\n", apic_read(APIC_TDCR));
  459. #endif /* CONFIG_X86_LOCAL_APIC */
  460. seq_printf(seq, "\n MSR\t:\n");
  461. }
  462. static int cpu_seq_show(struct seq_file *seq, void *v)
  463. {
  464. struct cpu_private *priv = seq->private;
  465. if (priv == NULL)
  466. return -EINVAL;
  467. switch (cpu_base[priv->type].flag) {
  468. case CPU_TSS:
  469. smp_call_function_single(priv->cpu, print_tss, seq, 1);
  470. break;
  471. case CPU_CR:
  472. smp_call_function_single(priv->cpu, print_cr, seq, 1);
  473. break;
  474. case CPU_DT:
  475. smp_call_function_single(priv->cpu, print_dt, seq, 1);
  476. break;
  477. case CPU_DEBUG:
  478. if (priv->file == CPU_INDEX_BIT)
  479. smp_call_function_single(priv->cpu, print_dr, seq, 1);
  480. print_msr(seq, priv->cpu, cpu_base[priv->type].flag);
  481. break;
  482. case CPU_APIC:
  483. if (priv->file == CPU_INDEX_BIT)
  484. smp_call_function_single(priv->cpu, print_apic, seq, 1);
  485. print_msr(seq, priv->cpu, cpu_base[priv->type].flag);
  486. break;
  487. default:
  488. print_msr(seq, priv->cpu, cpu_base[priv->type].flag);
  489. break;
  490. }
  491. seq_printf(seq, "\n");
  492. return 0;
  493. }
  494. static void *cpu_seq_start(struct seq_file *seq, loff_t *pos)
  495. {
  496. if (*pos == 0) /* One time is enough ;-) */
  497. return seq;
  498. return NULL;
  499. }
  500. static void *cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
  501. {
  502. (*pos)++;
  503. return cpu_seq_start(seq, pos);
  504. }
  505. static void cpu_seq_stop(struct seq_file *seq, void *v)
  506. {
  507. }
  508. static const struct seq_operations cpu_seq_ops = {
  509. .start = cpu_seq_start,
  510. .next = cpu_seq_next,
  511. .stop = cpu_seq_stop,
  512. .show = cpu_seq_show,
  513. };
  514. static int cpu_seq_open(struct inode *inode, struct file *file)
  515. {
  516. struct cpu_private *priv = inode->i_private;
  517. struct seq_file *seq;
  518. int err;
  519. err = seq_open(file, &cpu_seq_ops);
  520. if (!err) {
  521. seq = file->private_data;
  522. seq->private = priv;
  523. }
  524. return err;
  525. }
  526. static int write_msr(struct cpu_private *priv, u64 val)
  527. {
  528. u32 low, high;
  529. high = (val >> 32) & 0xffffffff;
  530. low = val & 0xffffffff;
  531. if (!wrmsr_safe_on_cpu(priv->cpu, priv->reg, low, high))
  532. return 0;
  533. return -EPERM;
  534. }
  535. static int write_cpu_register(struct cpu_private *priv, const char *buf)
  536. {
  537. int ret = -EPERM;
  538. u64 val;
  539. ret = strict_strtoull(buf, 0, &val);
  540. if (ret < 0)
  541. return ret;
  542. /* Supporting only MSRs */
  543. if (priv->type < CPU_TSS_BIT)
  544. return write_msr(priv, val);
  545. return ret;
  546. }
  547. static ssize_t cpu_write(struct file *file, const char __user *ubuf,
  548. size_t count, loff_t *off)
  549. {
  550. struct seq_file *seq = file->private_data;
  551. struct cpu_private *priv = seq->private;
  552. char buf[19];
  553. if ((priv == NULL) || (count >= sizeof(buf)))
  554. return -EINVAL;
  555. if (copy_from_user(&buf, ubuf, count))
  556. return -EFAULT;
  557. buf[count] = 0;
  558. if ((cpu_base[priv->type].write) && (cpu_file[priv->file].write))
  559. if (!write_cpu_register(priv, buf))
  560. return count;
  561. return -EACCES;
  562. }
  563. static const struct file_operations cpu_fops = {
  564. .owner = THIS_MODULE,
  565. .open = cpu_seq_open,
  566. .read = seq_read,
  567. .write = cpu_write,
  568. .llseek = seq_lseek,
  569. .release = seq_release,
  570. };
  571. static int cpu_create_file(unsigned cpu, unsigned type, unsigned reg,
  572. unsigned file, struct dentry *dentry)
  573. {
  574. struct cpu_private *priv = NULL;
  575. /* Already intialized */
  576. if (file == CPU_INDEX_BIT)
  577. if (per_cpu(cpu_arr[type].init, cpu))
  578. return 0;
  579. priv = kzalloc(sizeof(*priv), GFP_KERNEL);
  580. if (priv == NULL)
  581. return -ENOMEM;
  582. priv->cpu = cpu;
  583. priv->type = type;
  584. priv->reg = reg;
  585. priv->file = file;
  586. mutex_lock(&cpu_debug_lock);
  587. per_cpu(priv_arr[type], cpu) = priv;
  588. per_cpu(cpu_priv_count, cpu)++;
  589. mutex_unlock(&cpu_debug_lock);
  590. if (file)
  591. debugfs_create_file(cpu_file[file].name, S_IRUGO,
  592. dentry, (void *)priv, &cpu_fops);
  593. else {
  594. debugfs_create_file(cpu_base[type].name, S_IRUGO,
  595. per_cpu(cpu_arr[type].dentry, cpu),
  596. (void *)priv, &cpu_fops);
  597. mutex_lock(&cpu_debug_lock);
  598. per_cpu(cpu_arr[type].init, cpu) = 1;
  599. mutex_unlock(&cpu_debug_lock);
  600. }
  601. return 0;
  602. }
  603. static int cpu_init_regfiles(unsigned cpu, unsigned int type, unsigned reg,
  604. struct dentry *dentry)
  605. {
  606. unsigned file;
  607. int err = 0;
  608. for (file = 0; file < ARRAY_SIZE(cpu_file); file++) {
  609. err = cpu_create_file(cpu, type, reg, file, dentry);
  610. if (err)
  611. return err;
  612. }
  613. return err;
  614. }
  615. static int cpu_init_msr(unsigned cpu, unsigned type, struct dentry *dentry)
  616. {
  617. struct dentry *cpu_dentry = NULL;
  618. unsigned reg, reg_min, reg_max;
  619. int i, range, err = 0;
  620. char reg_dir[12];
  621. u32 low, high;
  622. range = get_cpu_range_count(cpu);
  623. for (i = 0; i < range; i++) {
  624. if (!get_cpu_range(cpu, &reg_min, &reg_max, i,
  625. cpu_base[type].flag))
  626. continue;
  627. for (reg = reg_min; reg <= reg_max; reg++) {
  628. if (rdmsr_safe_on_cpu(cpu, reg, &low, &high))
  629. continue;
  630. sprintf(reg_dir, "0x%x", reg);
  631. cpu_dentry = debugfs_create_dir(reg_dir, dentry);
  632. err = cpu_init_regfiles(cpu, type, reg, cpu_dentry);
  633. if (err)
  634. return err;
  635. }
  636. }
  637. return err;
  638. }
  639. static int cpu_init_allreg(unsigned cpu, struct dentry *dentry)
  640. {
  641. struct dentry *cpu_dentry = NULL;
  642. unsigned type;
  643. int err = 0;
  644. for (type = 0; type < ARRAY_SIZE(cpu_base) - 1; type++) {
  645. if (!is_typeflag_valid(cpu, cpu_base[type].flag))
  646. continue;
  647. cpu_dentry = debugfs_create_dir(cpu_base[type].name, dentry);
  648. per_cpu(cpu_arr[type].dentry, cpu) = cpu_dentry;
  649. if (type < CPU_TSS_BIT)
  650. err = cpu_init_msr(cpu, type, cpu_dentry);
  651. else
  652. err = cpu_create_file(cpu, type, 0, CPU_INDEX_BIT,
  653. cpu_dentry);
  654. if (err)
  655. return err;
  656. }
  657. return err;
  658. }
  659. static int cpu_init_cpu(void)
  660. {
  661. struct dentry *cpu_dentry = NULL;
  662. struct cpuinfo_x86 *cpui;
  663. char cpu_dir[12];
  664. unsigned cpu;
  665. int err = 0;
  666. for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
  667. cpui = &cpu_data(cpu);
  668. if (!cpu_has(cpui, X86_FEATURE_MSR))
  669. continue;
  670. per_cpu(cpu_model, cpu) = ((cpui->x86_vendor << 16) |
  671. (cpui->x86 << 8) |
  672. (cpui->x86_model));
  673. per_cpu(cpu_modelflag, cpu) = get_cpu_modelflag(cpu);
  674. sprintf(cpu_dir, "cpu%d", cpu);
  675. cpu_dentry = debugfs_create_dir(cpu_dir, cpu_debugfs_dir);
  676. err = cpu_init_allreg(cpu, cpu_dentry);
  677. pr_info("cpu%d(%d) debug files %d\n",
  678. cpu, nr_cpu_ids, per_cpu(cpu_priv_count, cpu));
  679. if (per_cpu(cpu_priv_count, cpu) > MAX_CPU_FILES) {
  680. pr_err("Register files count %d exceeds limit %d\n",
  681. per_cpu(cpu_priv_count, cpu), MAX_CPU_FILES);
  682. per_cpu(cpu_priv_count, cpu) = MAX_CPU_FILES;
  683. err = -ENFILE;
  684. }
  685. if (err)
  686. return err;
  687. }
  688. return err;
  689. }
  690. static int __init cpu_debug_init(void)
  691. {
  692. cpu_debugfs_dir = debugfs_create_dir("cpu", arch_debugfs_dir);
  693. return cpu_init_cpu();
  694. }
  695. static void __exit cpu_debug_exit(void)
  696. {
  697. int i, cpu;
  698. if (cpu_debugfs_dir)
  699. debugfs_remove_recursive(cpu_debugfs_dir);
  700. for (cpu = 0; cpu < nr_cpu_ids; cpu++)
  701. for (i = 0; i < per_cpu(cpu_priv_count, cpu); i++)
  702. kfree(per_cpu(priv_arr[i], cpu));
  703. }
  704. module_init(cpu_debug_init);
  705. module_exit(cpu_debug_exit);
  706. MODULE_AUTHOR("Jaswinder Singh Rajput");
  707. MODULE_DESCRIPTION("CPU Debug module");
  708. MODULE_LICENSE("GPL");