mmio-mod.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518
  1. /*
  2. * This program is free software; you can redistribute it and/or modify
  3. * it under the terms of the GNU General Public License as published by
  4. * the Free Software Foundation; either version 2 of the License, or
  5. * (at your option) any later version.
  6. *
  7. * This program is distributed in the hope that it will be useful,
  8. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. * GNU General Public License for more details.
  11. *
  12. * You should have received a copy of the GNU General Public License
  13. * along with this program; if not, write to the Free Software
  14. * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  15. *
  16. * Copyright (C) IBM Corporation, 2005
  17. * Jeff Muizelaar, 2006, 2007
  18. * Pekka Paalanen, 2008 <pq@iki.fi>
  19. *
  20. * Derived from the read-mod example from relay-examples by Tom Zanussi.
  21. */
  22. #define DEBUG 1
  23. #include <linux/module.h>
  24. #include <linux/debugfs.h>
  25. #include <linux/uaccess.h>
  26. #include <asm/io.h>
  27. #include <linux/version.h>
  28. #include <linux/kallsyms.h>
  29. #include <asm/pgtable.h>
  30. #include <linux/mmiotrace.h>
  31. #include <asm/e820.h> /* for ISA_START_ADDRESS */
  32. #include <asm/atomic.h>
  33. #include <linux/percpu.h>
  34. #include "pf_in.h"
  35. #define NAME "mmiotrace: "
  36. /* This app's relay channel files will appear in /debug/mmio-trace */
  37. static const char APP_DIR[] = "mmio-trace";
  38. /* the marker injection file in /debug/APP_DIR */
  39. static const char MARKER_FILE[] = "mmio-marker";
  40. struct trap_reason {
  41. unsigned long addr;
  42. unsigned long ip;
  43. enum reason_type type;
  44. int active_traces;
  45. };
  46. struct remap_trace {
  47. struct list_head list;
  48. struct kmmio_probe probe;
  49. unsigned long phys;
  50. unsigned long id;
  51. };
  52. static const size_t subbuf_size = 256*1024;
  53. /* Accessed per-cpu. */
  54. static DEFINE_PER_CPU(struct trap_reason, pf_reason);
  55. static DEFINE_PER_CPU(struct mm_io_header_rw, cpu_trace);
  56. #if 0 /* XXX: no way gather this info anymore */
  57. /* Access to this is not per-cpu. */
  58. static DEFINE_PER_CPU(atomic_t, dropped);
  59. #endif
  60. static struct dentry *dir;
  61. static struct dentry *marker_file;
  62. static DEFINE_MUTEX(mmiotrace_mutex);
  63. static DEFINE_SPINLOCK(trace_lock);
  64. static atomic_t mmiotrace_enabled;
  65. static LIST_HEAD(trace_list); /* struct remap_trace */
  66. /*
  67. * Locking in this file:
  68. * - mmiotrace_mutex enforces enable/disable_mmiotrace() critical sections.
  69. * - mmiotrace_enabled may be modified only when holding mmiotrace_mutex
  70. * and trace_lock.
  71. * - Routines depending on is_enabled() must take trace_lock.
  72. * - trace_list users must hold trace_lock.
  73. * - is_enabled() guarantees that chan is valid.
  74. * - pre/post callbacks assume the effect of is_enabled() being true.
  75. */
  76. /* module parameters */
  77. static unsigned int n_subbufs = 32*4;
  78. static unsigned long filter_offset;
  79. static int nommiotrace;
  80. static int ISA_trace;
  81. static int trace_pc;
  82. module_param(n_subbufs, uint, 0);
  83. module_param(filter_offset, ulong, 0);
  84. module_param(nommiotrace, bool, 0);
  85. module_param(ISA_trace, bool, 0);
  86. module_param(trace_pc, bool, 0);
  87. MODULE_PARM_DESC(n_subbufs, "Number of 256kB buffers, default 128.");
  88. MODULE_PARM_DESC(filter_offset, "Start address of traced mappings.");
  89. MODULE_PARM_DESC(nommiotrace, "Disable actual MMIO tracing.");
  90. MODULE_PARM_DESC(ISA_trace, "Do not exclude the low ISA range.");
  91. MODULE_PARM_DESC(trace_pc, "Record address of faulting instructions.");
  92. static bool is_enabled(void)
  93. {
  94. return atomic_read(&mmiotrace_enabled);
  95. }
  96. /*
  97. * Write callback for the debugfs entry:
  98. * Read a marker and write it to the mmio trace log
  99. */
  100. static ssize_t write_marker(struct file *file, const char __user *buffer,
  101. size_t count, loff_t *ppos)
  102. {
  103. char *event = NULL;
  104. struct mm_io_header *headp;
  105. ssize_t len = (count > 65535) ? 65535 : count;
  106. event = kzalloc(sizeof(*headp) + len, GFP_KERNEL);
  107. if (!event)
  108. return -ENOMEM;
  109. headp = (struct mm_io_header *)event;
  110. headp->type = MMIO_MAGIC | (MMIO_MARKER << MMIO_OPCODE_SHIFT);
  111. headp->data_len = len;
  112. if (copy_from_user(event + sizeof(*headp), buffer, len)) {
  113. kfree(event);
  114. return -EFAULT;
  115. }
  116. spin_lock_irq(&trace_lock);
  117. #if 0 /* XXX: convert this to use tracing */
  118. if (is_enabled())
  119. relay_write(chan, event, sizeof(*headp) + len);
  120. else
  121. #endif
  122. len = -EINVAL;
  123. spin_unlock_irq(&trace_lock);
  124. kfree(event);
  125. return len;
  126. }
  127. static void print_pte(unsigned long address)
  128. {
  129. int level;
  130. pte_t *pte = lookup_address(address, &level);
  131. if (!pte) {
  132. pr_err(NAME "Error in %s: no pte for page 0x%08lx\n",
  133. __func__, address);
  134. return;
  135. }
  136. if (level == PG_LEVEL_2M) {
  137. pr_emerg(NAME "4MB pages are not currently supported: "
  138. "0x%08lx\n", address);
  139. BUG();
  140. }
  141. pr_info(NAME "pte for 0x%lx: 0x%lx 0x%lx\n", address, pte_val(*pte),
  142. pte_val(*pte) & _PAGE_PRESENT);
  143. }
  144. /*
  145. * For some reason the pre/post pairs have been called in an
  146. * unmatched order. Report and die.
  147. */
  148. static void die_kmmio_nesting_error(struct pt_regs *regs, unsigned long addr)
  149. {
  150. const struct trap_reason *my_reason = &get_cpu_var(pf_reason);
  151. pr_emerg(NAME "unexpected fault for address: 0x%08lx, "
  152. "last fault for address: 0x%08lx\n",
  153. addr, my_reason->addr);
  154. print_pte(addr);
  155. print_symbol(KERN_EMERG "faulting IP is at %s\n", regs->ip);
  156. print_symbol(KERN_EMERG "last faulting IP was at %s\n", my_reason->ip);
  157. #ifdef __i386__
  158. pr_emerg("eax: %08lx ebx: %08lx ecx: %08lx edx: %08lx\n",
  159. regs->ax, regs->bx, regs->cx, regs->dx);
  160. pr_emerg("esi: %08lx edi: %08lx ebp: %08lx esp: %08lx\n",
  161. regs->si, regs->di, regs->bp, regs->sp);
  162. #else
  163. pr_emerg("rax: %016lx rcx: %016lx rdx: %016lx\n",
  164. regs->ax, regs->cx, regs->dx);
  165. pr_emerg("rsi: %016lx rdi: %016lx rbp: %016lx rsp: %016lx\n",
  166. regs->si, regs->di, regs->bp, regs->sp);
  167. #endif
  168. put_cpu_var(pf_reason);
  169. BUG();
  170. }
  171. static void pre(struct kmmio_probe *p, struct pt_regs *regs,
  172. unsigned long addr)
  173. {
  174. struct trap_reason *my_reason = &get_cpu_var(pf_reason);
  175. struct mm_io_header_rw *my_trace = &get_cpu_var(cpu_trace);
  176. const unsigned long instptr = instruction_pointer(regs);
  177. const enum reason_type type = get_ins_type(instptr);
  178. /* it doesn't make sense to have more than one active trace per cpu */
  179. if (my_reason->active_traces)
  180. die_kmmio_nesting_error(regs, addr);
  181. else
  182. my_reason->active_traces++;
  183. my_reason->type = type;
  184. my_reason->addr = addr;
  185. my_reason->ip = instptr;
  186. my_trace->header.type = MMIO_MAGIC;
  187. my_trace->header.pid = 0;
  188. my_trace->header.data_len = sizeof(struct mm_io_rw);
  189. my_trace->rw.address = addr;
  190. /*
  191. * struct remap_trace *trace = p->user_data;
  192. * phys = addr - trace->probe.addr + trace->phys;
  193. */
  194. /*
  195. * Only record the program counter when requested.
  196. * It may taint clean-room reverse engineering.
  197. */
  198. if (trace_pc)
  199. my_trace->rw.pc = instptr;
  200. else
  201. my_trace->rw.pc = 0;
  202. /*
  203. * XXX: the timestamp recorded will be *after* the tracing has been
  204. * done, not at the time we hit the instruction. SMP implications
  205. * on event ordering?
  206. */
  207. switch (type) {
  208. case REG_READ:
  209. my_trace->header.type |=
  210. (MMIO_READ << MMIO_OPCODE_SHIFT) |
  211. (get_ins_mem_width(instptr) << MMIO_WIDTH_SHIFT);
  212. break;
  213. case REG_WRITE:
  214. my_trace->header.type |=
  215. (MMIO_WRITE << MMIO_OPCODE_SHIFT) |
  216. (get_ins_mem_width(instptr) << MMIO_WIDTH_SHIFT);
  217. my_trace->rw.value = get_ins_reg_val(instptr, regs);
  218. break;
  219. case IMM_WRITE:
  220. my_trace->header.type |=
  221. (MMIO_WRITE << MMIO_OPCODE_SHIFT) |
  222. (get_ins_mem_width(instptr) << MMIO_WIDTH_SHIFT);
  223. my_trace->rw.value = get_ins_imm_val(instptr);
  224. break;
  225. default:
  226. {
  227. unsigned char *ip = (unsigned char *)instptr;
  228. my_trace->header.type |=
  229. (MMIO_UNKNOWN_OP << MMIO_OPCODE_SHIFT);
  230. my_trace->rw.value = (*ip) << 16 | *(ip + 1) << 8 |
  231. *(ip + 2);
  232. }
  233. }
  234. put_cpu_var(cpu_trace);
  235. put_cpu_var(pf_reason);
  236. }
  237. static void post(struct kmmio_probe *p, unsigned long condition,
  238. struct pt_regs *regs)
  239. {
  240. struct trap_reason *my_reason = &get_cpu_var(pf_reason);
  241. struct mm_io_header_rw *my_trace = &get_cpu_var(cpu_trace);
  242. /* this should always return the active_trace count to 0 */
  243. my_reason->active_traces--;
  244. if (my_reason->active_traces) {
  245. pr_emerg(NAME "unexpected post handler");
  246. BUG();
  247. }
  248. switch (my_reason->type) {
  249. case REG_READ:
  250. my_trace->rw.value = get_ins_reg_val(my_reason->ip, regs);
  251. break;
  252. default:
  253. break;
  254. }
  255. /*
  256. * XXX: Several required values are ignored:
  257. * - mapping id
  258. * - program counter
  259. * Also the address should be physical, not virtual.
  260. */
  261. mmio_trace_record(my_trace->header.type, my_trace->rw.address,
  262. my_trace->rw.value);
  263. put_cpu_var(cpu_trace);
  264. put_cpu_var(pf_reason);
  265. }
  266. static void ioremap_trace_core(unsigned long offset, unsigned long size,
  267. void __iomem *addr)
  268. {
  269. static atomic_t next_id;
  270. struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
  271. struct mm_io_header_map event = {
  272. .header = {
  273. .type = MMIO_MAGIC |
  274. (MMIO_PROBE << MMIO_OPCODE_SHIFT),
  275. .sec = 0,
  276. .nsec = 0,
  277. .pid = 0,
  278. .data_len = sizeof(struct mm_io_map)
  279. },
  280. .map = {
  281. .phys = offset,
  282. .addr = (unsigned long)addr,
  283. .len = size,
  284. .pc = 0
  285. }
  286. };
  287. if (!trace) {
  288. pr_err(NAME "kmalloc failed in ioremap\n");
  289. return;
  290. }
  291. *trace = (struct remap_trace) {
  292. .probe = {
  293. .addr = (unsigned long)addr,
  294. .len = size,
  295. .pre_handler = pre,
  296. .post_handler = post,
  297. .user_data = trace
  298. },
  299. .phys = offset,
  300. .id = atomic_inc_return(&next_id)
  301. };
  302. spin_lock_irq(&trace_lock);
  303. if (!is_enabled())
  304. goto not_enabled;
  305. /*
  306. * XXX: Insufficient data recorded!
  307. */
  308. mmio_trace_record(event.header.type, event.map.addr, event.map.len);
  309. list_add_tail(&trace->list, &trace_list);
  310. if (!nommiotrace)
  311. register_kmmio_probe(&trace->probe);
  312. not_enabled:
  313. spin_unlock_irq(&trace_lock);
  314. }
  315. void
  316. mmiotrace_ioremap(unsigned long offset, unsigned long size, void __iomem *addr)
  317. {
  318. if (!is_enabled()) /* recheck and proper locking in *_core() */
  319. return;
  320. pr_debug(NAME "ioremap_*(0x%lx, 0x%lx) = %p\n", offset, size, addr);
  321. if ((filter_offset) && (offset != filter_offset))
  322. return;
  323. ioremap_trace_core(offset, size, addr);
  324. }
  325. static void iounmap_trace_core(volatile void __iomem *addr)
  326. {
  327. struct mm_io_header_map event = {
  328. .header = {
  329. .type = MMIO_MAGIC |
  330. (MMIO_UNPROBE << MMIO_OPCODE_SHIFT),
  331. .sec = 0,
  332. .nsec = 0,
  333. .pid = 0,
  334. .data_len = sizeof(struct mm_io_map)
  335. },
  336. .map = {
  337. .phys = 0,
  338. .addr = (unsigned long)addr,
  339. .len = 0,
  340. .pc = 0
  341. }
  342. };
  343. struct remap_trace *trace;
  344. struct remap_trace *tmp;
  345. struct remap_trace *found_trace = NULL;
  346. pr_debug(NAME "Unmapping %p.\n", addr);
  347. spin_lock_irq(&trace_lock);
  348. if (!is_enabled())
  349. goto not_enabled;
  350. list_for_each_entry_safe(trace, tmp, &trace_list, list) {
  351. if ((unsigned long)addr == trace->probe.addr) {
  352. if (!nommiotrace)
  353. unregister_kmmio_probe(&trace->probe);
  354. list_del(&trace->list);
  355. found_trace = trace;
  356. break;
  357. }
  358. }
  359. mmio_trace_record(event.header.type, event.map.addr,
  360. found_trace ? found_trace->id : -1);
  361. not_enabled:
  362. spin_unlock_irq(&trace_lock);
  363. if (found_trace) {
  364. synchronize_rcu(); /* unregister_kmmio_probe() requirement */
  365. kfree(found_trace);
  366. }
  367. }
  368. void mmiotrace_iounmap(volatile void __iomem *addr)
  369. {
  370. might_sleep();
  371. if (is_enabled()) /* recheck and proper locking in *_core() */
  372. iounmap_trace_core(addr);
  373. }
  374. static void clear_trace_list(void)
  375. {
  376. struct remap_trace *trace;
  377. struct remap_trace *tmp;
  378. /*
  379. * No locking required, because the caller ensures we are in a
  380. * critical section via mutex, and is_enabled() is false,
  381. * i.e. nothing can traverse or modify this list.
  382. * Caller also ensures is_enabled() cannot change.
  383. */
  384. list_for_each_entry(trace, &trace_list, list) {
  385. pr_notice(NAME "purging non-iounmapped "
  386. "trace @0x%08lx, size 0x%lx.\n",
  387. trace->probe.addr, trace->probe.len);
  388. if (!nommiotrace)
  389. unregister_kmmio_probe(&trace->probe);
  390. }
  391. synchronize_rcu(); /* unregister_kmmio_probe() requirement */
  392. list_for_each_entry_safe(trace, tmp, &trace_list, list) {
  393. list_del(&trace->list);
  394. kfree(trace);
  395. }
  396. }
  397. static struct file_operations fops_marker = {
  398. .owner = THIS_MODULE,
  399. .write = write_marker
  400. };
  401. void enable_mmiotrace(void)
  402. {
  403. mutex_lock(&mmiotrace_mutex);
  404. if (is_enabled())
  405. goto out;
  406. reference_kmmio();
  407. #if 0 /* XXX: tracing does not support text entries */
  408. marker_file = debugfs_create_file("marker", 0660, dir, NULL,
  409. &fops_marker);
  410. #endif
  411. if (!marker_file)
  412. pr_err(NAME "marker file creation failed.\n");
  413. if (nommiotrace)
  414. pr_info(NAME "MMIO tracing disabled.\n");
  415. if (ISA_trace)
  416. pr_warning(NAME "Warning! low ISA range will be traced.\n");
  417. spin_lock_irq(&trace_lock);
  418. atomic_inc(&mmiotrace_enabled);
  419. spin_unlock_irq(&trace_lock);
  420. pr_info(NAME "enabled.\n");
  421. out:
  422. mutex_unlock(&mmiotrace_mutex);
  423. }
  424. void disable_mmiotrace(void)
  425. {
  426. mutex_lock(&mmiotrace_mutex);
  427. if (!is_enabled())
  428. goto out;
  429. spin_lock_irq(&trace_lock);
  430. atomic_dec(&mmiotrace_enabled);
  431. BUG_ON(is_enabled());
  432. spin_unlock_irq(&trace_lock);
  433. clear_trace_list(); /* guarantees: no more kmmio callbacks */
  434. unreference_kmmio();
  435. if (marker_file) {
  436. debugfs_remove(marker_file);
  437. marker_file = NULL;
  438. }
  439. pr_info(NAME "disabled.\n");
  440. out:
  441. mutex_unlock(&mmiotrace_mutex);
  442. }
  443. int __init init_mmiotrace(void)
  444. {
  445. pr_debug(NAME "load...\n");
  446. if (n_subbufs < 2)
  447. return -EINVAL;
  448. dir = debugfs_create_dir(APP_DIR, NULL);
  449. if (!dir) {
  450. pr_err(NAME "Couldn't create relay app directory.\n");
  451. return -ENOMEM;
  452. }
  453. return 0;
  454. }