|
@@ -49,53 +49,55 @@
|
|
|
#define PF_RSVD (1<<3)
|
|
|
#define PF_INSTR (1<<4)
|
|
|
|
|
|
-#ifdef CONFIG_PAGE_FAULT_HANDLERS
|
|
|
-static HLIST_HEAD(pf_handlers); /* protected by RCU */
|
|
|
-static DEFINE_SPINLOCK(pf_handlers_writer);
|
|
|
+#ifdef CONFIG_MMIOTRACE_HOOKS
|
|
|
+static pf_handler_func mmiotrace_pf_handler; /* protected by RCU */
|
|
|
+static DEFINE_SPINLOCK(mmiotrace_handler_lock);
|
|
|
|
|
|
-void register_page_fault_handler(struct pf_handler *new_pfh)
|
|
|
+int mmiotrace_register_pf(pf_handler_func new_pfh)
|
|
|
{
|
|
|
+ int ret = 0;
|
|
|
unsigned long flags;
|
|
|
- spin_lock_irqsave(&pf_handlers_writer, flags);
|
|
|
- hlist_add_head_rcu(&new_pfh->hlist, &pf_handlers);
|
|
|
- spin_unlock_irqrestore(&pf_handlers_writer, flags);
|
|
|
+ spin_lock_irqsave(&mmiotrace_handler_lock, flags);
|
|
|
+ if (mmiotrace_pf_handler)
|
|
|
+ ret = -EBUSY;
|
|
|
+ else
|
|
|
+ mmiotrace_pf_handler = new_pfh;
|
|
|
+ spin_unlock_irqrestore(&mmiotrace_handler_lock, flags);
|
|
|
+ return ret;
|
|
|
}
|
|
|
-EXPORT_SYMBOL_GPL(register_page_fault_handler);
|
|
|
+EXPORT_SYMBOL_GPL(mmiotrace_register_pf);
|
|
|
|
|
|
/**
|
|
|
- * unregister_page_fault_handler:
|
|
|
+ * mmiotrace_unregister_pf:
|
|
|
* The caller must ensure @old_pfh is not in use anymore before freeing it.
|
|
|
- * This function does not guarantee it. The list of handlers is protected by
|
|
|
- * RCU, so you can do this by e.g. calling synchronize_rcu().
|
|
|
+ * This function does not guarantee it. The handler function pointer is
|
|
|
+ * protected by RCU, so you can do this by e.g. calling synchronize_rcu().
|
|
|
*/
|
|
|
-void unregister_page_fault_handler(struct pf_handler *old_pfh)
|
|
|
+int mmiotrace_unregister_pf(pf_handler_func old_pfh)
|
|
|
{
|
|
|
+ int ret = 0;
|
|
|
unsigned long flags;
|
|
|
- spin_lock_irqsave(&pf_handlers_writer, flags);
|
|
|
- hlist_del_rcu(&old_pfh->hlist);
|
|
|
- spin_unlock_irqrestore(&pf_handlers_writer, flags);
|
|
|
+ spin_lock_irqsave(&mmiotrace_handler_lock, flags);
|
|
|
+ if (mmiotrace_pf_handler != old_pfh)
|
|
|
+ ret = -EPERM;
|
|
|
+ else
|
|
|
+ mmiotrace_pf_handler = NULL;
|
|
|
+ spin_unlock_irqrestore(&mmiotrace_handler_lock, flags);
|
|
|
+ return ret;
|
|
|
}
|
|
|
-EXPORT_SYMBOL_GPL(unregister_page_fault_handler);
|
|
|
-#endif
|
|
|
+EXPORT_SYMBOL_GPL(mmiotrace_unregister_pf);
|
|
|
+#endif /* CONFIG_MMIOTRACE_HOOKS */
|
|
|
|
|
|
/* returns non-zero if do_page_fault() should return */
|
|
|
-static int handle_custom_pf(struct pt_regs *regs, unsigned long error_code,
|
|
|
- unsigned long address)
|
|
|
+static inline int call_mmiotrace(struct pt_regs *regs,
|
|
|
+ unsigned long error_code,
|
|
|
+ unsigned long address)
|
|
|
{
|
|
|
-#ifdef CONFIG_PAGE_FAULT_HANDLERS
|
|
|
+#ifdef CONFIG_MMIOTRACE_HOOKS
|
|
|
int ret = 0;
|
|
|
- struct pf_handler *cur;
|
|
|
- struct hlist_node *ncur;
|
|
|
-
|
|
|
- if (hlist_empty(&pf_handlers))
|
|
|
- return 0;
|
|
|
-
|
|
|
rcu_read_lock();
|
|
|
- hlist_for_each_entry_rcu(cur, ncur, &pf_handlers, hlist) {
|
|
|
- ret = cur->handler(regs, error_code, address);
|
|
|
- if (ret)
|
|
|
- break;
|
|
|
- }
|
|
|
+ if (mmiotrace_pf_handler)
|
|
|
+ ret = mmiotrace_pf_handler(regs, error_code, address);
|
|
|
rcu_read_unlock();
|
|
|
return ret;
|
|
|
#else
|
|
@@ -655,7 +657,7 @@ void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code)
|
|
|
|
|
|
if (notify_page_fault(regs))
|
|
|
return;
|
|
|
- if (handle_custom_pf(regs, error_code, address))
|
|
|
+ if (call_mmiotrace(regs, error_code, address))
|
|
|
return;
|
|
|
|
|
|
/*
|