|
@@ -42,6 +42,7 @@
|
|
|
#include <linux/freezer.h>
|
|
|
#include <linux/seq_file.h>
|
|
|
#include <linux/debugfs.h>
|
|
|
+#include <linux/sysctl.h>
|
|
|
#include <linux/kdebug.h>
|
|
|
#include <linux/memory.h>
|
|
|
#include <linux/ftrace.h>
|
|
@@ -360,6 +361,9 @@ static inline void copy_kprobe(struct kprobe *old_p, struct kprobe *p)
|
|
|
}
|
|
|
|
|
|
#ifdef CONFIG_OPTPROBES
|
|
|
+/* NOTE: change this value only with kprobe_mutex held */
|
|
|
+static bool kprobes_allow_optimization;
|
|
|
+
|
|
|
/*
|
|
|
* Call all pre_handler on the list, but ignores its return value.
|
|
|
* This must be called from arch-dep optimized caller.
|
|
@@ -428,7 +432,7 @@ static __kprobes void kprobe_optimizer(struct work_struct *work)
|
|
|
/* Lock modules while optimizing kprobes */
|
|
|
mutex_lock(&module_mutex);
|
|
|
mutex_lock(&kprobe_mutex);
|
|
|
- if (kprobes_all_disarmed)
|
|
|
+ if (kprobes_all_disarmed || !kprobes_allow_optimization)
|
|
|
goto end;
|
|
|
|
|
|
/*
|
|
@@ -471,7 +475,7 @@ static __kprobes void optimize_kprobe(struct kprobe *p)
|
|
|
struct optimized_kprobe *op;
|
|
|
|
|
|
/* Check if the kprobe is disabled or not ready for optimization. */
|
|
|
- if (!kprobe_optready(p) ||
|
|
|
+ if (!kprobe_optready(p) || !kprobes_allow_optimization ||
|
|
|
(kprobe_disabled(p) || kprobes_all_disarmed))
|
|
|
return;
|
|
|
|
|
@@ -588,6 +592,80 @@ static __kprobes void try_to_optimize_kprobe(struct kprobe *p)
|
|
|
optimize_kprobe(ap);
|
|
|
}
|
|
|
|
|
|
+#ifdef CONFIG_SYSCTL
|
|
|
+static void __kprobes optimize_all_kprobes(void)
|
|
|
+{
|
|
|
+ struct hlist_head *head;
|
|
|
+ struct hlist_node *node;
|
|
|
+ struct kprobe *p;
|
|
|
+ unsigned int i;
|
|
|
+
|
|
|
+ /* If optimization is already allowed, just return */
|
|
|
+ if (kprobes_allow_optimization)
|
|
|
+ return;
|
|
|
+
|
|
|
+ kprobes_allow_optimization = true;
|
|
|
+ mutex_lock(&text_mutex);
|
|
|
+ for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
|
|
|
+ head = &kprobe_table[i];
|
|
|
+ hlist_for_each_entry_rcu(p, node, head, hlist)
|
|
|
+ if (!kprobe_disabled(p))
|
|
|
+ optimize_kprobe(p);
|
|
|
+ }
|
|
|
+ mutex_unlock(&text_mutex);
|
|
|
+ printk(KERN_INFO "Kprobes globally optimized\n");
|
|
|
+}
|
|
|
+
|
|
|
+static void __kprobes unoptimize_all_kprobes(void)
|
|
|
+{
|
|
|
+ struct hlist_head *head;
|
|
|
+ struct hlist_node *node;
|
|
|
+ struct kprobe *p;
|
|
|
+ unsigned int i;
|
|
|
+
|
|
|
+ /* If optimization is already prohibited, just return */
|
|
|
+ if (!kprobes_allow_optimization)
|
|
|
+ return;
|
|
|
+
|
|
|
+ kprobes_allow_optimization = false;
|
|
|
+ printk(KERN_INFO "Kprobes globally unoptimized\n");
|
|
|
+ get_online_cpus(); /* For avoiding text_mutex deadlock */
|
|
|
+ mutex_lock(&text_mutex);
|
|
|
+ for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
|
|
|
+ head = &kprobe_table[i];
|
|
|
+ hlist_for_each_entry_rcu(p, node, head, hlist) {
|
|
|
+ if (!kprobe_disabled(p))
|
|
|
+ unoptimize_kprobe(p);
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ mutex_unlock(&text_mutex);
|
|
|
+ put_online_cpus();
|
|
|
+ /* Allow all currently running kprobes to complete */
|
|
|
+ synchronize_sched();
|
|
|
+}
|
|
|
+
|
|
|
+int sysctl_kprobes_optimization;
|
|
|
+int proc_kprobes_optimization_handler(struct ctl_table *table, int write,
|
|
|
+ void __user *buffer, size_t *length,
|
|
|
+ loff_t *ppos)
|
|
|
+{
|
|
|
+ int ret;
|
|
|
+
|
|
|
+ mutex_lock(&kprobe_mutex);
|
|
|
+ sysctl_kprobes_optimization = kprobes_allow_optimization ? 1 : 0;
|
|
|
+ ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
|
|
|
+
|
|
|
+ if (sysctl_kprobes_optimization)
|
|
|
+ optimize_all_kprobes();
|
|
|
+ else
|
|
|
+ unoptimize_all_kprobes();
|
|
|
+ mutex_unlock(&kprobe_mutex);
|
|
|
+
|
|
|
+ return ret;
|
|
|
+}
|
|
|
+#endif /* CONFIG_SYSCTL */
|
|
|
+
|
|
|
static void __kprobes __arm_kprobe(struct kprobe *p)
|
|
|
{
|
|
|
struct kprobe *old_p;
|
|
@@ -1610,10 +1688,14 @@ static int __init init_kprobes(void)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
-#if defined(CONFIG_OPTPROBES) && defined(__ARCH_WANT_KPROBES_INSN_SLOT)
|
|
|
+#if defined(CONFIG_OPTPROBES)
|
|
|
+#if defined(__ARCH_WANT_KPROBES_INSN_SLOT)
|
|
|
/* Init kprobe_optinsn_slots */
|
|
|
kprobe_optinsn_slots.insn_size = MAX_OPTINSN_SIZE;
|
|
|
#endif
|
|
|
+ /* By default, kprobes can be optimized */
|
|
|
+ kprobes_allow_optimization = true;
|
|
|
+#endif
|
|
|
|
|
|
/* By default, kprobes are armed */
|
|
|
kprobes_all_disarmed = false;
|