mmio-mod.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538
  1. /*
  2. * This program is free software; you can redistribute it and/or modify
  3. * it under the terms of the GNU General Public License as published by
  4. * the Free Software Foundation; either version 2 of the License, or
  5. * (at your option) any later version.
  6. *
  7. * This program is distributed in the hope that it will be useful,
  8. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. * GNU General Public License for more details.
  11. *
  12. * You should have received a copy of the GNU General Public License
  13. * along with this program; if not, write to the Free Software
  14. * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  15. *
  16. * Copyright (C) IBM Corporation, 2005
  17. * Jeff Muizelaar, 2006, 2007
  18. * Pekka Paalanen, 2008 <pq@iki.fi>
  19. *
  20. * Derived from the read-mod example from relay-examples by Tom Zanussi.
  21. */
  22. #include <linux/module.h>
  23. #include <linux/relay.h>
  24. #include <linux/debugfs.h>
  25. #include <linux/proc_fs.h>
  26. #include <asm/io.h>
  27. #include <linux/version.h>
  28. #include <linux/kallsyms.h>
  29. #include <asm/pgtable.h>
  30. #include <linux/mmiotrace.h>
  31. #include <asm/e820.h> /* for ISA_START_ADDRESS */
  32. #include <asm/atomic.h>
  33. #include "kmmio.h"
  34. #include "pf_in.h"
  35. /* This app's relay channel files will appear in /debug/mmio-trace */
  36. #define APP_DIR "mmio-trace"
  37. /* the marker injection file in /proc */
  38. #define MARKER_FILE "mmio-marker"
  39. #define MODULE_NAME "mmiotrace"
  40. struct trap_reason {
  41. unsigned long addr;
  42. unsigned long ip;
  43. enum reason_type type;
  44. int active_traces;
  45. };
  46. /* Accessed per-cpu. */
  47. static struct trap_reason pf_reason[NR_CPUS];
  48. static struct mm_io_header_rw cpu_trace[NR_CPUS];
  49. /* Access to this is not per-cpu. */
  50. static atomic_t dropped[NR_CPUS];
  51. static struct file_operations mmio_fops = {
  52. .owner = THIS_MODULE,
  53. };
  54. static const size_t subbuf_size = 256*1024;
  55. static struct rchan *chan;
  56. static struct dentry *dir;
  57. static struct proc_dir_entry *proc_marker_file;
  58. /* module parameters */
  59. static unsigned int n_subbufs = 32*4;
  60. static unsigned long filter_offset;
  61. static int nommiotrace;
  62. static int ISA_trace;
  63. static int trace_pc;
  64. module_param(n_subbufs, uint, 0);
  65. module_param(filter_offset, ulong, 0);
  66. module_param(nommiotrace, bool, 0);
  67. module_param(ISA_trace, bool, 0);
  68. module_param(trace_pc, bool, 0);
  69. MODULE_PARM_DESC(n_subbufs, "Number of 256kB buffers, default 128.");
  70. MODULE_PARM_DESC(filter_offset, "Start address of traced mappings.");
  71. MODULE_PARM_DESC(nommiotrace, "Disable actual MMIO tracing.");
  72. MODULE_PARM_DESC(ISA_trace, "Do not exclude the low ISA range.");
  73. MODULE_PARM_DESC(trace_pc, "Record address of faulting instructions.");
  74. static void record_timestamp(struct mm_io_header *header)
  75. {
  76. struct timespec now;
  77. getnstimeofday(&now);
  78. header->sec = now.tv_sec;
  79. header->nsec = now.tv_nsec;
  80. }
  81. /*
  82. * Write callback for the /proc entry:
  83. * Read a marker and write it to the mmio trace log
  84. */
  85. static int write_marker(struct file *file, const char __user *buffer,
  86. unsigned long count, void *data)
  87. {
  88. char *event = NULL;
  89. struct mm_io_header *headp;
  90. int len = (count > 65535) ? 65535 : count;
  91. event = kzalloc(sizeof(*headp) + len, GFP_KERNEL);
  92. if (!event)
  93. return -ENOMEM;
  94. headp = (struct mm_io_header *)event;
  95. headp->type = MMIO_MAGIC | (MMIO_MARKER << MMIO_OPCODE_SHIFT);
  96. headp->data_len = len;
  97. record_timestamp(headp);
  98. if (copy_from_user(event + sizeof(*headp), buffer, len)) {
  99. kfree(event);
  100. return -EFAULT;
  101. }
  102. relay_write(chan, event, sizeof(*headp) + len);
  103. kfree(event);
  104. return len;
  105. }
  106. static void print_pte(unsigned long address)
  107. {
  108. int level;
  109. pte_t *pte = lookup_address(address, &level);
  110. if (!pte) {
  111. printk(KERN_ERR "Error in %s: no pte for page 0x%08lx\n",
  112. __FUNCTION__, address);
  113. return;
  114. }
  115. if (level == PG_LEVEL_2M) {
  116. printk(KERN_EMERG MODULE_NAME ": 4MB pages are not "
  117. "currently supported: %lx\n",
  118. address);
  119. BUG();
  120. }
  121. printk(KERN_DEBUG MODULE_NAME ": pte for 0x%lx: 0x%lx 0x%lx\n",
  122. address, pte_val(*pte),
  123. pte_val(*pte) & _PAGE_PRESENT);
  124. }
  125. /*
  126. * For some reason the pre/post pairs have been called in an
  127. * unmatched order. Report and die.
  128. */
  129. static void die_kmmio_nesting_error(struct pt_regs *regs, unsigned long addr)
  130. {
  131. const unsigned long cpu = smp_processor_id();
  132. printk(KERN_EMERG MODULE_NAME ": unexpected fault for address: %lx, "
  133. "last fault for address: %lx\n",
  134. addr, pf_reason[cpu].addr);
  135. print_pte(addr);
  136. #ifdef __i386__
  137. print_symbol(KERN_EMERG "faulting EIP is at %s\n", regs->ip);
  138. print_symbol(KERN_EMERG "last faulting EIP was at %s\n",
  139. pf_reason[cpu].ip);
  140. printk(KERN_EMERG
  141. "eax: %08lx ebx: %08lx ecx: %08lx edx: %08lx\n",
  142. regs->ax, regs->bx, regs->cx, regs->dx);
  143. printk(KERN_EMERG
  144. "esi: %08lx edi: %08lx ebp: %08lx esp: %08lx\n",
  145. regs->si, regs->di, regs->bp, regs->sp);
  146. #else
  147. print_symbol(KERN_EMERG "faulting RIP is at %s\n", regs->ip);
  148. print_symbol(KERN_EMERG "last faulting RIP was at %s\n",
  149. pf_reason[cpu].ip);
  150. printk(KERN_EMERG "rax: %016lx rcx: %016lx rdx: %016lx\n",
  151. regs->ax, regs->cx, regs->dx);
  152. printk(KERN_EMERG "rsi: %016lx rdi: %016lx "
  153. "rbp: %016lx rsp: %016lx\n",
  154. regs->si, regs->di, regs->bp, regs->sp);
  155. #endif
  156. BUG();
  157. }
  158. static void pre(struct kmmio_probe *p, struct pt_regs *regs,
  159. unsigned long addr)
  160. {
  161. const unsigned long cpu = smp_processor_id();
  162. const unsigned long instptr = instruction_pointer(regs);
  163. const enum reason_type type = get_ins_type(instptr);
  164. /* it doesn't make sense to have more than one active trace per cpu */
  165. if (pf_reason[cpu].active_traces)
  166. die_kmmio_nesting_error(regs, addr);
  167. else
  168. pf_reason[cpu].active_traces++;
  169. pf_reason[cpu].type = type;
  170. pf_reason[cpu].addr = addr;
  171. pf_reason[cpu].ip = instptr;
  172. cpu_trace[cpu].header.type = MMIO_MAGIC;
  173. cpu_trace[cpu].header.pid = 0;
  174. cpu_trace[cpu].header.data_len = sizeof(struct mm_io_rw);
  175. cpu_trace[cpu].rw.address = addr;
  176. /*
  177. * Only record the program counter when requested.
  178. * It may taint clean-room reverse engineering.
  179. */
  180. if (trace_pc)
  181. cpu_trace[cpu].rw.pc = instptr;
  182. else
  183. cpu_trace[cpu].rw.pc = 0;
  184. record_timestamp(&cpu_trace[cpu].header);
  185. switch (type) {
  186. case REG_READ:
  187. cpu_trace[cpu].header.type |=
  188. (MMIO_READ << MMIO_OPCODE_SHIFT) |
  189. (get_ins_mem_width(instptr) << MMIO_WIDTH_SHIFT);
  190. break;
  191. case REG_WRITE:
  192. cpu_trace[cpu].header.type |=
  193. (MMIO_WRITE << MMIO_OPCODE_SHIFT) |
  194. (get_ins_mem_width(instptr) << MMIO_WIDTH_SHIFT);
  195. cpu_trace[cpu].rw.value = get_ins_reg_val(instptr, regs);
  196. break;
  197. case IMM_WRITE:
  198. cpu_trace[cpu].header.type |=
  199. (MMIO_WRITE << MMIO_OPCODE_SHIFT) |
  200. (get_ins_mem_width(instptr) << MMIO_WIDTH_SHIFT);
  201. cpu_trace[cpu].rw.value = get_ins_imm_val(instptr);
  202. break;
  203. default:
  204. {
  205. unsigned char *ip = (unsigned char *)instptr;
  206. cpu_trace[cpu].header.type |=
  207. (MMIO_UNKNOWN_OP << MMIO_OPCODE_SHIFT);
  208. cpu_trace[cpu].rw.value = (*ip) << 16 |
  209. *(ip + 1) << 8 |
  210. *(ip + 2);
  211. }
  212. }
  213. }
  214. static void post(struct kmmio_probe *p, unsigned long condition,
  215. struct pt_regs *regs)
  216. {
  217. const unsigned long cpu = smp_processor_id();
  218. /* this should always return the active_trace count to 0 */
  219. pf_reason[cpu].active_traces--;
  220. if (pf_reason[cpu].active_traces) {
  221. printk(KERN_EMERG MODULE_NAME ": unexpected post handler");
  222. BUG();
  223. }
  224. switch (pf_reason[cpu].type) {
  225. case REG_READ:
  226. cpu_trace[cpu].rw.value = get_ins_reg_val(pf_reason[cpu].ip,
  227. regs);
  228. break;
  229. default:
  230. break;
  231. }
  232. relay_write(chan, &cpu_trace[cpu], sizeof(struct mm_io_header_rw));
  233. }
  234. /*
  235. * subbuf_start() relay callback.
  236. *
  237. * Defined so that we know when events are dropped due to the buffer-full
  238. * condition.
  239. */
  240. static int subbuf_start_handler(struct rchan_buf *buf, void *subbuf,
  241. void *prev_subbuf, size_t prev_padding)
  242. {
  243. unsigned int cpu = buf->cpu;
  244. atomic_t *drop = &dropped[cpu];
  245. int count;
  246. if (relay_buf_full(buf)) {
  247. if (atomic_inc_return(drop) == 1) {
  248. printk(KERN_ERR MODULE_NAME ": cpu %d buffer full!\n",
  249. cpu);
  250. }
  251. return 0;
  252. } else if ((count = atomic_read(drop))) {
  253. printk(KERN_ERR MODULE_NAME
  254. ": cpu %d buffer no longer full, "
  255. "missed %d events.\n",
  256. cpu, count);
  257. atomic_sub(count, drop);
  258. }
  259. return 1;
  260. }
  261. /* file_create() callback. Creates relay file in debugfs. */
  262. static struct dentry *create_buf_file_handler(const char *filename,
  263. struct dentry *parent,
  264. int mode,
  265. struct rchan_buf *buf,
  266. int *is_global)
  267. {
  268. struct dentry *buf_file;
  269. mmio_fops.read = relay_file_operations.read;
  270. mmio_fops.open = relay_file_operations.open;
  271. mmio_fops.poll = relay_file_operations.poll;
  272. mmio_fops.mmap = relay_file_operations.mmap;
  273. mmio_fops.release = relay_file_operations.release;
  274. mmio_fops.splice_read = relay_file_operations.splice_read;
  275. buf_file = debugfs_create_file(filename, mode, parent, buf,
  276. &mmio_fops);
  277. return buf_file;
  278. }
  279. /* file_remove() default callback. Removes relay file in debugfs. */
  280. static int remove_buf_file_handler(struct dentry *dentry)
  281. {
  282. debugfs_remove(dentry);
  283. return 0;
  284. }
  285. static struct rchan_callbacks relay_callbacks = {
  286. .subbuf_start = subbuf_start_handler,
  287. .create_buf_file = create_buf_file_handler,
  288. .remove_buf_file = remove_buf_file_handler,
  289. };
  290. /*
  291. * create_channel - creates channel /debug/APP_DIR/cpuXXX
  292. * Returns channel on success, NULL otherwise
  293. */
  294. static struct rchan *create_channel(unsigned size, unsigned n)
  295. {
  296. return relay_open("cpu", dir, size, n, &relay_callbacks, NULL);
  297. }
  298. /* destroy_channel - destroys channel /debug/APP_DIR/cpuXXX */
  299. static void destroy_channel(void)
  300. {
  301. if (chan) {
  302. relay_close(chan);
  303. chan = NULL;
  304. }
  305. }
  306. struct remap_trace {
  307. struct list_head list;
  308. struct kmmio_probe probe;
  309. };
  310. static LIST_HEAD(trace_list);
  311. static DEFINE_SPINLOCK(trace_list_lock);
  312. static void do_ioremap_trace_core(unsigned long offset, unsigned long size,
  313. void __iomem *addr)
  314. {
  315. struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
  316. struct mm_io_header_map event = {
  317. .header = {
  318. .type = MMIO_MAGIC |
  319. (MMIO_PROBE << MMIO_OPCODE_SHIFT),
  320. .sec = 0,
  321. .nsec = 0,
  322. .pid = 0,
  323. .data_len = sizeof(struct mm_io_map)
  324. },
  325. .map = {
  326. .phys = offset,
  327. .addr = (unsigned long)addr,
  328. .len = size,
  329. .pc = 0
  330. }
  331. };
  332. record_timestamp(&event.header);
  333. *trace = (struct remap_trace) {
  334. .probe = {
  335. .addr = (unsigned long)addr,
  336. .len = size,
  337. .pre_handler = pre,
  338. .post_handler = post,
  339. }
  340. };
  341. relay_write(chan, &event, sizeof(event));
  342. spin_lock(&trace_list_lock);
  343. list_add_tail(&trace->list, &trace_list);
  344. spin_unlock(&trace_list_lock);
  345. if (!nommiotrace)
  346. register_kmmio_probe(&trace->probe);
  347. }
  348. static void ioremap_trace_core(unsigned long offset, unsigned long size,
  349. void __iomem *addr)
  350. {
  351. if ((filter_offset) && (offset != filter_offset))
  352. return;
  353. /* Don't trace the low PCI/ISA area, it's always mapped.. */
  354. if (!ISA_trace && (offset < ISA_END_ADDRESS) &&
  355. (offset + size > ISA_START_ADDRESS)) {
  356. printk(KERN_NOTICE MODULE_NAME ": Ignoring map of low "
  357. "PCI/ISA area (0x%lx-0x%lx)\n",
  358. offset, offset + size);
  359. return;
  360. }
  361. do_ioremap_trace_core(offset, size, addr);
  362. }
  363. void __iomem *ioremap_cache_trace(unsigned long offset, unsigned long size)
  364. {
  365. void __iomem *p = ioremap_cache(offset, size);
  366. printk(KERN_DEBUG MODULE_NAME ": ioremap_cache(0x%lx, 0x%lx) = %p\n",
  367. offset, size, p);
  368. ioremap_trace_core(offset, size, p);
  369. return p;
  370. }
  371. EXPORT_SYMBOL(ioremap_cache_trace);
  372. void __iomem *ioremap_nocache_trace(unsigned long offset, unsigned long size)
  373. {
  374. void __iomem *p = ioremap_nocache(offset, size);
  375. printk(KERN_DEBUG MODULE_NAME ": ioremap_nocache(0x%lx, 0x%lx) = %p\n",
  376. offset, size, p);
  377. ioremap_trace_core(offset, size, p);
  378. return p;
  379. }
  380. EXPORT_SYMBOL(ioremap_nocache_trace);
  381. void iounmap_trace(volatile void __iomem *addr)
  382. {
  383. struct mm_io_header_map event = {
  384. .header = {
  385. .type = MMIO_MAGIC |
  386. (MMIO_UNPROBE << MMIO_OPCODE_SHIFT),
  387. .sec = 0,
  388. .nsec = 0,
  389. .pid = 0,
  390. .data_len = sizeof(struct mm_io_map)
  391. },
  392. .map = {
  393. .phys = 0,
  394. .addr = (unsigned long)addr,
  395. .len = 0,
  396. .pc = 0
  397. }
  398. };
  399. struct remap_trace *trace;
  400. struct remap_trace *tmp;
  401. printk(KERN_DEBUG MODULE_NAME ": Unmapping %p.\n", addr);
  402. record_timestamp(&event.header);
  403. spin_lock(&trace_list_lock);
  404. list_for_each_entry_safe(trace, tmp, &trace_list, list) {
  405. if ((unsigned long)addr == trace->probe.addr) {
  406. if (!nommiotrace)
  407. unregister_kmmio_probe(&trace->probe);
  408. list_del(&trace->list);
  409. kfree(trace);
  410. break;
  411. }
  412. }
  413. spin_unlock(&trace_list_lock);
  414. relay_write(chan, &event, sizeof(event));
  415. iounmap(addr);
  416. }
  417. EXPORT_SYMBOL(iounmap_trace);
  418. static void clear_trace_list(void)
  419. {
  420. struct remap_trace *trace;
  421. struct remap_trace *tmp;
  422. spin_lock(&trace_list_lock);
  423. list_for_each_entry_safe(trace, tmp, &trace_list, list) {
  424. printk(KERN_WARNING MODULE_NAME ": purging non-iounmapped "
  425. "trace @0x%08lx, size 0x%lx.\n",
  426. trace->probe.addr, trace->probe.len);
  427. if (!nommiotrace)
  428. unregister_kmmio_probe(&trace->probe);
  429. list_del(&trace->list);
  430. kfree(trace);
  431. break;
  432. }
  433. spin_unlock(&trace_list_lock);
  434. }
  435. static int __init init(void)
  436. {
  437. if (n_subbufs < 2)
  438. return -EINVAL;
  439. dir = debugfs_create_dir(APP_DIR, NULL);
  440. if (!dir) {
  441. printk(KERN_ERR MODULE_NAME
  442. ": Couldn't create relay app directory.\n");
  443. return -ENOMEM;
  444. }
  445. chan = create_channel(subbuf_size, n_subbufs);
  446. if (!chan) {
  447. debugfs_remove(dir);
  448. printk(KERN_ERR MODULE_NAME
  449. ": relay app channel creation failed\n");
  450. return -ENOMEM;
  451. }
  452. init_kmmio();
  453. proc_marker_file = create_proc_entry(MARKER_FILE, 0, NULL);
  454. if (proc_marker_file)
  455. proc_marker_file->write_proc = write_marker;
  456. printk(KERN_DEBUG MODULE_NAME ": loaded.\n");
  457. if (nommiotrace)
  458. printk(KERN_DEBUG MODULE_NAME ": MMIO tracing disabled.\n");
  459. if (ISA_trace)
  460. printk(KERN_WARNING MODULE_NAME
  461. ": Warning! low ISA range will be traced.\n");
  462. return 0;
  463. }
  464. static void __exit cleanup(void)
  465. {
  466. printk(KERN_DEBUG MODULE_NAME ": unload...\n");
  467. clear_trace_list();
  468. cleanup_kmmio();
  469. remove_proc_entry(MARKER_FILE, NULL);
  470. destroy_channel();
  471. if (dir)
  472. debugfs_remove(dir);
  473. }
  474. module_init(init);
  475. module_exit(cleanup);
  476. MODULE_LICENSE("GPL");