|
@@ -42,9 +42,11 @@
|
|
|
#include <linux/freezer.h>
|
|
|
#include <linux/seq_file.h>
|
|
|
#include <linux/debugfs.h>
|
|
|
+#include <linux/sysctl.h>
|
|
|
#include <linux/kdebug.h>
|
|
|
#include <linux/memory.h>
|
|
|
#include <linux/ftrace.h>
|
|
|
+#include <linux/cpu.h>
|
|
|
|
|
|
#include <asm-generic/sections.h>
|
|
|
#include <asm/cacheflush.h>
|
|
@@ -105,57 +107,74 @@ static struct kprobe_blackpoint kprobe_blacklist[] = {
|
|
|
* stepping on the instruction on a vmalloced/kmalloced/data page
|
|
|
* is a recipe for disaster
|
|
|
*/
|
|
|
-#define INSNS_PER_PAGE (PAGE_SIZE/(MAX_INSN_SIZE * sizeof(kprobe_opcode_t)))
|
|
|
-
|
|
|
struct kprobe_insn_page {
|
|
|
struct list_head list;
|
|
|
kprobe_opcode_t *insns; /* Page of instruction slots */
|
|
|
- char slot_used[INSNS_PER_PAGE];
|
|
|
int nused;
|
|
|
int ngarbage;
|
|
|
+ char slot_used[];
|
|
|
+};
|
|
|
+
|
|
|
+#define KPROBE_INSN_PAGE_SIZE(slots) \
|
|
|
+ (offsetof(struct kprobe_insn_page, slot_used) + \
|
|
|
+ (sizeof(char) * (slots)))
|
|
|
+
|
|
|
+struct kprobe_insn_cache {
|
|
|
+ struct list_head pages; /* list of kprobe_insn_page */
|
|
|
+ size_t insn_size; /* size of instruction slot */
|
|
|
+ int nr_garbage;
|
|
|
};
|
|
|
|
|
|
+static int slots_per_page(struct kprobe_insn_cache *c)
|
|
|
+{
|
|
|
+ return PAGE_SIZE/(c->insn_size * sizeof(kprobe_opcode_t));
|
|
|
+}
|
|
|
+
|
|
|
enum kprobe_slot_state {
|
|
|
SLOT_CLEAN = 0,
|
|
|
SLOT_DIRTY = 1,
|
|
|
SLOT_USED = 2,
|
|
|
};
|
|
|
|
|
|
-static DEFINE_MUTEX(kprobe_insn_mutex); /* Protects kprobe_insn_pages */
|
|
|
-static LIST_HEAD(kprobe_insn_pages);
|
|
|
-static int kprobe_garbage_slots;
|
|
|
-static int collect_garbage_slots(void);
|
|
|
+static DEFINE_MUTEX(kprobe_insn_mutex); /* Protects kprobe_insn_slots */
|
|
|
+static struct kprobe_insn_cache kprobe_insn_slots = {
|
|
|
+ .pages = LIST_HEAD_INIT(kprobe_insn_slots.pages),
|
|
|
+ .insn_size = MAX_INSN_SIZE,
|
|
|
+ .nr_garbage = 0,
|
|
|
+};
|
|
|
+static int __kprobes collect_garbage_slots(struct kprobe_insn_cache *c);
|
|
|
|
|
|
/**
|
|
|
* __get_insn_slot() - Find a slot on an executable page for an instruction.
|
|
|
* We allocate an executable page if there's no room on existing ones.
|
|
|
*/
|
|
|
-static kprobe_opcode_t __kprobes *__get_insn_slot(void)
|
|
|
+static kprobe_opcode_t __kprobes *__get_insn_slot(struct kprobe_insn_cache *c)
|
|
|
{
|
|
|
struct kprobe_insn_page *kip;
|
|
|
|
|
|
retry:
|
|
|
- list_for_each_entry(kip, &kprobe_insn_pages, list) {
|
|
|
- if (kip->nused < INSNS_PER_PAGE) {
|
|
|
+ list_for_each_entry(kip, &c->pages, list) {
|
|
|
+ if (kip->nused < slots_per_page(c)) {
|
|
|
int i;
|
|
|
- for (i = 0; i < INSNS_PER_PAGE; i++) {
|
|
|
+ for (i = 0; i < slots_per_page(c); i++) {
|
|
|
if (kip->slot_used[i] == SLOT_CLEAN) {
|
|
|
kip->slot_used[i] = SLOT_USED;
|
|
|
kip->nused++;
|
|
|
- return kip->insns + (i * MAX_INSN_SIZE);
|
|
|
+ return kip->insns + (i * c->insn_size);
|
|
|
}
|
|
|
}
|
|
|
- /* Surprise! No unused slots. Fix kip->nused. */
|
|
|
- kip->nused = INSNS_PER_PAGE;
|
|
|
+ /* kip->nused is broken. Fix it. */
|
|
|
+ kip->nused = slots_per_page(c);
|
|
|
+ WARN_ON(1);
|
|
|
}
|
|
|
}
|
|
|
|
|
|
/* If there are any garbage slots, collect it and try again. */
|
|
|
- if (kprobe_garbage_slots && collect_garbage_slots() == 0) {
|
|
|
+ if (c->nr_garbage && collect_garbage_slots(c) == 0)
|
|
|
goto retry;
|
|
|
- }
|
|
|
- /* All out of space. Need to allocate a new page. Use slot 0. */
|
|
|
- kip = kmalloc(sizeof(struct kprobe_insn_page), GFP_KERNEL);
|
|
|
+
|
|
|
+ /* All out of space. Need to allocate a new page. */
|
|
|
+ kip = kmalloc(KPROBE_INSN_PAGE_SIZE(slots_per_page(c)), GFP_KERNEL);
|
|
|
if (!kip)
|
|
|
return NULL;
|
|
|
|
|
@@ -170,20 +189,23 @@ static kprobe_opcode_t __kprobes *__get_insn_slot(void)
|
|
|
return NULL;
|
|
|
}
|
|
|
INIT_LIST_HEAD(&kip->list);
|
|
|
- list_add(&kip->list, &kprobe_insn_pages);
|
|
|
- memset(kip->slot_used, SLOT_CLEAN, INSNS_PER_PAGE);
|
|
|
+ memset(kip->slot_used, SLOT_CLEAN, slots_per_page(c));
|
|
|
kip->slot_used[0] = SLOT_USED;
|
|
|
kip->nused = 1;
|
|
|
kip->ngarbage = 0;
|
|
|
+ list_add(&kip->list, &c->pages);
|
|
|
return kip->insns;
|
|
|
}
|
|
|
|
|
|
+
|
|
|
kprobe_opcode_t __kprobes *get_insn_slot(void)
|
|
|
{
|
|
|
- kprobe_opcode_t *ret;
|
|
|
+ kprobe_opcode_t *ret = NULL;
|
|
|
+
|
|
|
mutex_lock(&kprobe_insn_mutex);
|
|
|
- ret = __get_insn_slot();
|
|
|
+ ret = __get_insn_slot(&kprobe_insn_slots);
|
|
|
mutex_unlock(&kprobe_insn_mutex);
|
|
|
+
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
@@ -199,7 +221,7 @@ static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
|
|
|
* so as not to have to set it up again the
|
|
|
* next time somebody inserts a probe.
|
|
|
*/
|
|
|
- if (!list_is_singular(&kprobe_insn_pages)) {
|
|
|
+ if (!list_is_singular(&kip->list)) {
|
|
|
list_del(&kip->list);
|
|
|
module_free(NULL, kip->insns);
|
|
|
kfree(kip);
|
|
@@ -209,51 +231,84 @@ static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
-static int __kprobes collect_garbage_slots(void)
|
|
|
+static int __kprobes collect_garbage_slots(struct kprobe_insn_cache *c)
|
|
|
{
|
|
|
struct kprobe_insn_page *kip, *next;
|
|
|
|
|
|
/* Ensure no-one is interrupted on the garbages */
|
|
|
synchronize_sched();
|
|
|
|
|
|
- list_for_each_entry_safe(kip, next, &kprobe_insn_pages, list) {
|
|
|
+ list_for_each_entry_safe(kip, next, &c->pages, list) {
|
|
|
int i;
|
|
|
if (kip->ngarbage == 0)
|
|
|
continue;
|
|
|
kip->ngarbage = 0; /* we will collect all garbages */
|
|
|
- for (i = 0; i < INSNS_PER_PAGE; i++) {
|
|
|
+ for (i = 0; i < slots_per_page(c); i++) {
|
|
|
if (kip->slot_used[i] == SLOT_DIRTY &&
|
|
|
collect_one_slot(kip, i))
|
|
|
break;
|
|
|
}
|
|
|
}
|
|
|
- kprobe_garbage_slots = 0;
|
|
|
+ c->nr_garbage = 0;
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
-void __kprobes free_insn_slot(kprobe_opcode_t * slot, int dirty)
|
|
|
+static void __kprobes __free_insn_slot(struct kprobe_insn_cache *c,
|
|
|
+ kprobe_opcode_t *slot, int dirty)
|
|
|
{
|
|
|
struct kprobe_insn_page *kip;
|
|
|
|
|
|
- mutex_lock(&kprobe_insn_mutex);
|
|
|
- list_for_each_entry(kip, &kprobe_insn_pages, list) {
|
|
|
- if (kip->insns <= slot &&
|
|
|
- slot < kip->insns + (INSNS_PER_PAGE * MAX_INSN_SIZE)) {
|
|
|
- int i = (slot - kip->insns) / MAX_INSN_SIZE;
|
|
|
+ list_for_each_entry(kip, &c->pages, list) {
|
|
|
+ long idx = ((long)slot - (long)kip->insns) / c->insn_size;
|
|
|
+ if (idx >= 0 && idx < slots_per_page(c)) {
|
|
|
+ WARN_ON(kip->slot_used[idx] != SLOT_USED);
|
|
|
if (dirty) {
|
|
|
- kip->slot_used[i] = SLOT_DIRTY;
|
|
|
+ kip->slot_used[idx] = SLOT_DIRTY;
|
|
|
kip->ngarbage++;
|
|
|
+ if (++c->nr_garbage > slots_per_page(c))
|
|
|
+ collect_garbage_slots(c);
|
|
|
} else
|
|
|
- collect_one_slot(kip, i);
|
|
|
- break;
|
|
|
+ collect_one_slot(kip, idx);
|
|
|
+ return;
|
|
|
}
|
|
|
}
|
|
|
+ /* Could not free this slot. */
|
|
|
+ WARN_ON(1);
|
|
|
+}
|
|
|
|
|
|
- if (dirty && ++kprobe_garbage_slots > INSNS_PER_PAGE)
|
|
|
- collect_garbage_slots();
|
|
|
-
|
|
|
+void __kprobes free_insn_slot(kprobe_opcode_t * slot, int dirty)
|
|
|
+{
|
|
|
+ mutex_lock(&kprobe_insn_mutex);
|
|
|
+ __free_insn_slot(&kprobe_insn_slots, slot, dirty);
|
|
|
mutex_unlock(&kprobe_insn_mutex);
|
|
|
}
|
|
|
+#ifdef CONFIG_OPTPROBES
|
|
|
+/* For optimized_kprobe buffer */
|
|
|
+static DEFINE_MUTEX(kprobe_optinsn_mutex); /* Protects kprobe_optinsn_slots */
|
|
|
+static struct kprobe_insn_cache kprobe_optinsn_slots = {
|
|
|
+ .pages = LIST_HEAD_INIT(kprobe_optinsn_slots.pages),
|
|
|
+ /* .insn_size is initialized later */
|
|
|
+ .nr_garbage = 0,
|
|
|
+};
|
|
|
+/* Get a slot for optimized_kprobe buffer */
|
|
|
+kprobe_opcode_t __kprobes *get_optinsn_slot(void)
|
|
|
+{
|
|
|
+ kprobe_opcode_t *ret = NULL;
|
|
|
+
|
|
|
+ mutex_lock(&kprobe_optinsn_mutex);
|
|
|
+ ret = __get_insn_slot(&kprobe_optinsn_slots);
|
|
|
+ mutex_unlock(&kprobe_optinsn_mutex);
|
|
|
+
|
|
|
+ return ret;
|
|
|
+}
|
|
|
+
|
|
|
+void __kprobes free_optinsn_slot(kprobe_opcode_t * slot, int dirty)
|
|
|
+{
|
|
|
+ mutex_lock(&kprobe_optinsn_mutex);
|
|
|
+ __free_insn_slot(&kprobe_optinsn_slots, slot, dirty);
|
|
|
+ mutex_unlock(&kprobe_optinsn_mutex);
|
|
|
+}
|
|
|
+#endif
|
|
|
#endif
|
|
|
|
|
|
/* We have preemption disabled.. so it is safe to use __ versions */
|
|
@@ -284,23 +339,401 @@ struct kprobe __kprobes *get_kprobe(void *addr)
|
|
|
if (p->addr == addr)
|
|
|
return p;
|
|
|
}
|
|
|
+
|
|
|
+ return NULL;
|
|
|
+}
|
|
|
+
|
|
|
+static int __kprobes aggr_pre_handler(struct kprobe *p, struct pt_regs *regs);
|
|
|
+
|
|
|
+/* Return true if the kprobe is an aggregator */
|
|
|
+static inline int kprobe_aggrprobe(struct kprobe *p)
|
|
|
+{
|
|
|
+ return p->pre_handler == aggr_pre_handler;
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * Keep all fields in the kprobe consistent
|
|
|
+ */
|
|
|
+static inline void copy_kprobe(struct kprobe *old_p, struct kprobe *p)
|
|
|
+{
|
|
|
+ memcpy(&p->opcode, &old_p->opcode, sizeof(kprobe_opcode_t));
|
|
|
+ memcpy(&p->ainsn, &old_p->ainsn, sizeof(struct arch_specific_insn));
|
|
|
+}
|
|
|
+
|
|
|
+#ifdef CONFIG_OPTPROBES
|
|
|
+/* NOTE: change this value only with kprobe_mutex held */
|
|
|
+static bool kprobes_allow_optimization;
|
|
|
+
|
|
|
+/*
|
|
|
+ * Call all pre_handler on the list, but ignores its return value.
|
|
|
+ * This must be called from arch-dep optimized caller.
|
|
|
+ */
|
|
|
+void __kprobes opt_pre_handler(struct kprobe *p, struct pt_regs *regs)
|
|
|
+{
|
|
|
+ struct kprobe *kp;
|
|
|
+
|
|
|
+ list_for_each_entry_rcu(kp, &p->list, list) {
|
|
|
+ if (kp->pre_handler && likely(!kprobe_disabled(kp))) {
|
|
|
+ set_kprobe_instance(kp);
|
|
|
+ kp->pre_handler(kp, regs);
|
|
|
+ }
|
|
|
+ reset_kprobe_instance();
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+/* Return true(!0) if the kprobe is ready for optimization. */
|
|
|
+static inline int kprobe_optready(struct kprobe *p)
|
|
|
+{
|
|
|
+ struct optimized_kprobe *op;
|
|
|
+
|
|
|
+ if (kprobe_aggrprobe(p)) {
|
|
|
+ op = container_of(p, struct optimized_kprobe, kp);
|
|
|
+ return arch_prepared_optinsn(&op->optinsn);
|
|
|
+ }
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * Return an optimized kprobe whose optimizing code replaces
|
|
|
+ * instructions including addr (exclude breakpoint).
|
|
|
+ */
|
|
|
+struct kprobe *__kprobes get_optimized_kprobe(unsigned long addr)
|
|
|
+{
|
|
|
+ int i;
|
|
|
+ struct kprobe *p = NULL;
|
|
|
+ struct optimized_kprobe *op;
|
|
|
+
|
|
|
+ /* Don't check i == 0, since that is a breakpoint case. */
|
|
|
+ for (i = 1; !p && i < MAX_OPTIMIZED_LENGTH; i++)
|
|
|
+ p = get_kprobe((void *)(addr - i));
|
|
|
+
|
|
|
+ if (p && kprobe_optready(p)) {
|
|
|
+ op = container_of(p, struct optimized_kprobe, kp);
|
|
|
+ if (arch_within_optimized_kprobe(op, addr))
|
|
|
+ return p;
|
|
|
+ }
|
|
|
+
|
|
|
return NULL;
|
|
|
}
|
|
|
|
|
|
+/* Optimization staging list, protected by kprobe_mutex */
|
|
|
+static LIST_HEAD(optimizing_list);
|
|
|
+
|
|
|
+static void kprobe_optimizer(struct work_struct *work);
|
|
|
+static DECLARE_DELAYED_WORK(optimizing_work, kprobe_optimizer);
|
|
|
+#define OPTIMIZE_DELAY 5
|
|
|
+
|
|
|
+/* Kprobe jump optimizer */
|
|
|
+static __kprobes void kprobe_optimizer(struct work_struct *work)
|
|
|
+{
|
|
|
+ struct optimized_kprobe *op, *tmp;
|
|
|
+
|
|
|
+ /* Lock modules while optimizing kprobes */
|
|
|
+ mutex_lock(&module_mutex);
|
|
|
+ mutex_lock(&kprobe_mutex);
|
|
|
+ if (kprobes_all_disarmed || !kprobes_allow_optimization)
|
|
|
+ goto end;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Wait for quiesence period to ensure all running interrupts
|
|
|
+ * are done. Because optprobe may modify multiple instructions
|
|
|
+ * there is a chance that Nth instruction is interrupted. In that
|
|
|
+ * case, running interrupt can return to 2nd-Nth byte of jump
|
|
|
+ * instruction. This wait is for avoiding it.
|
|
|
+ */
|
|
|
+ synchronize_sched();
|
|
|
+
|
|
|
+ /*
|
|
|
+ * The optimization/unoptimization refers online_cpus via
|
|
|
+ * stop_machine() and cpu-hotplug modifies online_cpus.
|
|
|
+ * And same time, text_mutex will be held in cpu-hotplug and here.
|
|
|
+ * This combination can cause a deadlock (cpu-hotplug try to lock
|
|
|
+ * text_mutex but stop_machine can not be done because online_cpus
|
|
|
+ * has been changed)
|
|
|
+ * To avoid this deadlock, we need to call get_online_cpus()
|
|
|
+ * for preventing cpu-hotplug outside of text_mutex locking.
|
|
|
+ */
|
|
|
+ get_online_cpus();
|
|
|
+ mutex_lock(&text_mutex);
|
|
|
+ list_for_each_entry_safe(op, tmp, &optimizing_list, list) {
|
|
|
+ WARN_ON(kprobe_disabled(&op->kp));
|
|
|
+ if (arch_optimize_kprobe(op) < 0)
|
|
|
+ op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
|
|
|
+ list_del_init(&op->list);
|
|
|
+ }
|
|
|
+ mutex_unlock(&text_mutex);
|
|
|
+ put_online_cpus();
|
|
|
+end:
|
|
|
+ mutex_unlock(&kprobe_mutex);
|
|
|
+ mutex_unlock(&module_mutex);
|
|
|
+}
|
|
|
+
|
|
|
+/* Optimize kprobe if p is ready to be optimized */
|
|
|
+static __kprobes void optimize_kprobe(struct kprobe *p)
|
|
|
+{
|
|
|
+ struct optimized_kprobe *op;
|
|
|
+
|
|
|
+ /* Check if the kprobe is disabled or not ready for optimization. */
|
|
|
+ if (!kprobe_optready(p) || !kprobes_allow_optimization ||
|
|
|
+ (kprobe_disabled(p) || kprobes_all_disarmed))
|
|
|
+ return;
|
|
|
+
|
|
|
+ /* Both of break_handler and post_handler are not supported. */
|
|
|
+ if (p->break_handler || p->post_handler)
|
|
|
+ return;
|
|
|
+
|
|
|
+ op = container_of(p, struct optimized_kprobe, kp);
|
|
|
+
|
|
|
+ /* Check there is no other kprobes at the optimized instructions */
|
|
|
+ if (arch_check_optimized_kprobe(op) < 0)
|
|
|
+ return;
|
|
|
+
|
|
|
+ /* Check if it is already optimized. */
|
|
|
+ if (op->kp.flags & KPROBE_FLAG_OPTIMIZED)
|
|
|
+ return;
|
|
|
+
|
|
|
+ op->kp.flags |= KPROBE_FLAG_OPTIMIZED;
|
|
|
+ list_add(&op->list, &optimizing_list);
|
|
|
+ if (!delayed_work_pending(&optimizing_work))
|
|
|
+ schedule_delayed_work(&optimizing_work, OPTIMIZE_DELAY);
|
|
|
+}
|
|
|
+
|
|
|
+/* Unoptimize a kprobe if p is optimized */
|
|
|
+static __kprobes void unoptimize_kprobe(struct kprobe *p)
|
|
|
+{
|
|
|
+ struct optimized_kprobe *op;
|
|
|
+
|
|
|
+ if ((p->flags & KPROBE_FLAG_OPTIMIZED) && kprobe_aggrprobe(p)) {
|
|
|
+ op = container_of(p, struct optimized_kprobe, kp);
|
|
|
+ if (!list_empty(&op->list))
|
|
|
+ /* Dequeue from the optimization queue */
|
|
|
+ list_del_init(&op->list);
|
|
|
+ else
|
|
|
+ /* Replace jump with break */
|
|
|
+ arch_unoptimize_kprobe(op);
|
|
|
+ op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+/* Remove optimized instructions */
|
|
|
+static void __kprobes kill_optimized_kprobe(struct kprobe *p)
|
|
|
+{
|
|
|
+ struct optimized_kprobe *op;
|
|
|
+
|
|
|
+ op = container_of(p, struct optimized_kprobe, kp);
|
|
|
+ if (!list_empty(&op->list)) {
|
|
|
+ /* Dequeue from the optimization queue */
|
|
|
+ list_del_init(&op->list);
|
|
|
+ op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
|
|
|
+ }
|
|
|
+ /* Don't unoptimize, because the target code will be freed. */
|
|
|
+ arch_remove_optimized_kprobe(op);
|
|
|
+}
|
|
|
+
|
|
|
+/* Try to prepare optimized instructions */
|
|
|
+static __kprobes void prepare_optimized_kprobe(struct kprobe *p)
|
|
|
+{
|
|
|
+ struct optimized_kprobe *op;
|
|
|
+
|
|
|
+ op = container_of(p, struct optimized_kprobe, kp);
|
|
|
+ arch_prepare_optimized_kprobe(op);
|
|
|
+}
|
|
|
+
|
|
|
+/* Free optimized instructions and optimized_kprobe */
|
|
|
+static __kprobes void free_aggr_kprobe(struct kprobe *p)
|
|
|
+{
|
|
|
+ struct optimized_kprobe *op;
|
|
|
+
|
|
|
+ op = container_of(p, struct optimized_kprobe, kp);
|
|
|
+ arch_remove_optimized_kprobe(op);
|
|
|
+ kfree(op);
|
|
|
+}
|
|
|
+
|
|
|
+/* Allocate new optimized_kprobe and try to prepare optimized instructions */
|
|
|
+static __kprobes struct kprobe *alloc_aggr_kprobe(struct kprobe *p)
|
|
|
+{
|
|
|
+ struct optimized_kprobe *op;
|
|
|
+
|
|
|
+ op = kzalloc(sizeof(struct optimized_kprobe), GFP_KERNEL);
|
|
|
+ if (!op)
|
|
|
+ return NULL;
|
|
|
+
|
|
|
+ INIT_LIST_HEAD(&op->list);
|
|
|
+ op->kp.addr = p->addr;
|
|
|
+ arch_prepare_optimized_kprobe(op);
|
|
|
+
|
|
|
+ return &op->kp;
|
|
|
+}
|
|
|
+
|
|
|
+static void __kprobes init_aggr_kprobe(struct kprobe *ap, struct kprobe *p);
|
|
|
+
|
|
|
+/*
|
|
|
+ * Prepare an optimized_kprobe and optimize it
|
|
|
+ * NOTE: p must be a normal registered kprobe
|
|
|
+ */
|
|
|
+static __kprobes void try_to_optimize_kprobe(struct kprobe *p)
|
|
|
+{
|
|
|
+ struct kprobe *ap;
|
|
|
+ struct optimized_kprobe *op;
|
|
|
+
|
|
|
+ ap = alloc_aggr_kprobe(p);
|
|
|
+ if (!ap)
|
|
|
+ return;
|
|
|
+
|
|
|
+ op = container_of(ap, struct optimized_kprobe, kp);
|
|
|
+ if (!arch_prepared_optinsn(&op->optinsn)) {
|
|
|
+ /* If failed to setup optimizing, fallback to kprobe */
|
|
|
+ free_aggr_kprobe(ap);
|
|
|
+ return;
|
|
|
+ }
|
|
|
+
|
|
|
+ init_aggr_kprobe(ap, p);
|
|
|
+ optimize_kprobe(ap);
|
|
|
+}
|
|
|
+
|
|
|
+#ifdef CONFIG_SYSCTL
|
|
|
+static void __kprobes optimize_all_kprobes(void)
|
|
|
+{
|
|
|
+ struct hlist_head *head;
|
|
|
+ struct hlist_node *node;
|
|
|
+ struct kprobe *p;
|
|
|
+ unsigned int i;
|
|
|
+
|
|
|
+ /* If optimization is already allowed, just return */
|
|
|
+ if (kprobes_allow_optimization)
|
|
|
+ return;
|
|
|
+
|
|
|
+ kprobes_allow_optimization = true;
|
|
|
+ mutex_lock(&text_mutex);
|
|
|
+ for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
|
|
|
+ head = &kprobe_table[i];
|
|
|
+ hlist_for_each_entry_rcu(p, node, head, hlist)
|
|
|
+ if (!kprobe_disabled(p))
|
|
|
+ optimize_kprobe(p);
|
|
|
+ }
|
|
|
+ mutex_unlock(&text_mutex);
|
|
|
+ printk(KERN_INFO "Kprobes globally optimized\n");
|
|
|
+}
|
|
|
+
|
|
|
+static void __kprobes unoptimize_all_kprobes(void)
|
|
|
+{
|
|
|
+ struct hlist_head *head;
|
|
|
+ struct hlist_node *node;
|
|
|
+ struct kprobe *p;
|
|
|
+ unsigned int i;
|
|
|
+
|
|
|
+ /* If optimization is already prohibited, just return */
|
|
|
+ if (!kprobes_allow_optimization)
|
|
|
+ return;
|
|
|
+
|
|
|
+ kprobes_allow_optimization = false;
|
|
|
+ printk(KERN_INFO "Kprobes globally unoptimized\n");
|
|
|
+ get_online_cpus(); /* For avoiding text_mutex deadlock */
|
|
|
+ mutex_lock(&text_mutex);
|
|
|
+ for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
|
|
|
+ head = &kprobe_table[i];
|
|
|
+ hlist_for_each_entry_rcu(p, node, head, hlist) {
|
|
|
+ if (!kprobe_disabled(p))
|
|
|
+ unoptimize_kprobe(p);
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ mutex_unlock(&text_mutex);
|
|
|
+ put_online_cpus();
|
|
|
+ /* Allow all currently running kprobes to complete */
|
|
|
+ synchronize_sched();
|
|
|
+}
|
|
|
+
|
|
|
+int sysctl_kprobes_optimization;
|
|
|
+int proc_kprobes_optimization_handler(struct ctl_table *table, int write,
|
|
|
+ void __user *buffer, size_t *length,
|
|
|
+ loff_t *ppos)
|
|
|
+{
|
|
|
+ int ret;
|
|
|
+
|
|
|
+ mutex_lock(&kprobe_mutex);
|
|
|
+ sysctl_kprobes_optimization = kprobes_allow_optimization ? 1 : 0;
|
|
|
+ ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
|
|
|
+
|
|
|
+ if (sysctl_kprobes_optimization)
|
|
|
+ optimize_all_kprobes();
|
|
|
+ else
|
|
|
+ unoptimize_all_kprobes();
|
|
|
+ mutex_unlock(&kprobe_mutex);
|
|
|
+
|
|
|
+ return ret;
|
|
|
+}
|
|
|
+#endif /* CONFIG_SYSCTL */
|
|
|
+
|
|
|
+static void __kprobes __arm_kprobe(struct kprobe *p)
|
|
|
+{
|
|
|
+ struct kprobe *old_p;
|
|
|
+
|
|
|
+ /* Check collision with other optimized kprobes */
|
|
|
+ old_p = get_optimized_kprobe((unsigned long)p->addr);
|
|
|
+ if (unlikely(old_p))
|
|
|
+ unoptimize_kprobe(old_p); /* Fallback to unoptimized kprobe */
|
|
|
+
|
|
|
+ arch_arm_kprobe(p);
|
|
|
+ optimize_kprobe(p); /* Try to optimize (add kprobe to a list) */
|
|
|
+}
|
|
|
+
|
|
|
+static void __kprobes __disarm_kprobe(struct kprobe *p)
|
|
|
+{
|
|
|
+ struct kprobe *old_p;
|
|
|
+
|
|
|
+ unoptimize_kprobe(p); /* Try to unoptimize */
|
|
|
+ arch_disarm_kprobe(p);
|
|
|
+
|
|
|
+ /* If another kprobe was blocked, optimize it. */
|
|
|
+ old_p = get_optimized_kprobe((unsigned long)p->addr);
|
|
|
+ if (unlikely(old_p))
|
|
|
+ optimize_kprobe(old_p);
|
|
|
+}
|
|
|
+
|
|
|
+#else /* !CONFIG_OPTPROBES */
|
|
|
+
|
|
|
+#define optimize_kprobe(p) do {} while (0)
|
|
|
+#define unoptimize_kprobe(p) do {} while (0)
|
|
|
+#define kill_optimized_kprobe(p) do {} while (0)
|
|
|
+#define prepare_optimized_kprobe(p) do {} while (0)
|
|
|
+#define try_to_optimize_kprobe(p) do {} while (0)
|
|
|
+#define __arm_kprobe(p) arch_arm_kprobe(p)
|
|
|
+#define __disarm_kprobe(p) arch_disarm_kprobe(p)
|
|
|
+
|
|
|
+static __kprobes void free_aggr_kprobe(struct kprobe *p)
|
|
|
+{
|
|
|
+ kfree(p);
|
|
|
+}
|
|
|
+
|
|
|
+static __kprobes struct kprobe *alloc_aggr_kprobe(struct kprobe *p)
|
|
|
+{
|
|
|
+ return kzalloc(sizeof(struct kprobe), GFP_KERNEL);
|
|
|
+}
|
|
|
+#endif /* CONFIG_OPTPROBES */
|
|
|
+
|
|
|
/* Arm a kprobe with text_mutex */
|
|
|
static void __kprobes arm_kprobe(struct kprobe *kp)
|
|
|
{
|
|
|
+ /*
|
|
|
+ * Here, since __arm_kprobe() doesn't use stop_machine(),
|
|
|
+ * this doesn't cause deadlock on text_mutex. So, we don't
|
|
|
+ * need get_online_cpus().
|
|
|
+ */
|
|
|
mutex_lock(&text_mutex);
|
|
|
- arch_arm_kprobe(kp);
|
|
|
+ __arm_kprobe(kp);
|
|
|
mutex_unlock(&text_mutex);
|
|
|
}
|
|
|
|
|
|
/* Disarm a kprobe with text_mutex */
|
|
|
static void __kprobes disarm_kprobe(struct kprobe *kp)
|
|
|
{
|
|
|
+ get_online_cpus(); /* For avoiding text_mutex deadlock */
|
|
|
mutex_lock(&text_mutex);
|
|
|
- arch_disarm_kprobe(kp);
|
|
|
+ __disarm_kprobe(kp);
|
|
|
mutex_unlock(&text_mutex);
|
|
|
+ put_online_cpus();
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -369,7 +802,7 @@ static int __kprobes aggr_break_handler(struct kprobe *p, struct pt_regs *regs)
|
|
|
void __kprobes kprobes_inc_nmissed_count(struct kprobe *p)
|
|
|
{
|
|
|
struct kprobe *kp;
|
|
|
- if (p->pre_handler != aggr_pre_handler) {
|
|
|
+ if (!kprobe_aggrprobe(p)) {
|
|
|
p->nmissed++;
|
|
|
} else {
|
|
|
list_for_each_entry_rcu(kp, &p->list, list)
|
|
@@ -492,15 +925,6 @@ static void __kprobes cleanup_rp_inst(struct kretprobe *rp)
|
|
|
free_rp_inst(rp);
|
|
|
}
|
|
|
|
|
|
-/*
|
|
|
- * Keep all fields in the kprobe consistent
|
|
|
- */
|
|
|
-static inline void copy_kprobe(struct kprobe *old_p, struct kprobe *p)
|
|
|
-{
|
|
|
- memcpy(&p->opcode, &old_p->opcode, sizeof(kprobe_opcode_t));
|
|
|
- memcpy(&p->ainsn, &old_p->ainsn, sizeof(struct arch_specific_insn));
|
|
|
-}
|
|
|
-
|
|
|
/*
|
|
|
* Add the new probe to ap->list. Fail if this is the
|
|
|
* second jprobe at the address - two jprobes can't coexist
|
|
@@ -508,6 +932,10 @@ static inline void copy_kprobe(struct kprobe *old_p, struct kprobe *p)
|
|
|
static int __kprobes add_new_kprobe(struct kprobe *ap, struct kprobe *p)
|
|
|
{
|
|
|
BUG_ON(kprobe_gone(ap) || kprobe_gone(p));
|
|
|
+
|
|
|
+ if (p->break_handler || p->post_handler)
|
|
|
+ unoptimize_kprobe(ap); /* Fall back to normal kprobe */
|
|
|
+
|
|
|
if (p->break_handler) {
|
|
|
if (ap->break_handler)
|
|
|
return -EEXIST;
|
|
@@ -522,7 +950,7 @@ static int __kprobes add_new_kprobe(struct kprobe *ap, struct kprobe *p)
|
|
|
ap->flags &= ~KPROBE_FLAG_DISABLED;
|
|
|
if (!kprobes_all_disarmed)
|
|
|
/* Arm the breakpoint again. */
|
|
|
- arm_kprobe(ap);
|
|
|
+ __arm_kprobe(ap);
|
|
|
}
|
|
|
return 0;
|
|
|
}
|
|
@@ -531,12 +959,13 @@ static int __kprobes add_new_kprobe(struct kprobe *ap, struct kprobe *p)
|
|
|
* Fill in the required fields of the "manager kprobe". Replace the
|
|
|
* earlier kprobe in the hlist with the manager kprobe
|
|
|
*/
|
|
|
-static inline void add_aggr_kprobe(struct kprobe *ap, struct kprobe *p)
|
|
|
+static void __kprobes init_aggr_kprobe(struct kprobe *ap, struct kprobe *p)
|
|
|
{
|
|
|
+ /* Copy p's insn slot to ap */
|
|
|
copy_kprobe(p, ap);
|
|
|
flush_insn_slot(ap);
|
|
|
ap->addr = p->addr;
|
|
|
- ap->flags = p->flags;
|
|
|
+ ap->flags = p->flags & ~KPROBE_FLAG_OPTIMIZED;
|
|
|
ap->pre_handler = aggr_pre_handler;
|
|
|
ap->fault_handler = aggr_fault_handler;
|
|
|
/* We don't care the kprobe which has gone. */
|
|
@@ -546,8 +975,9 @@ static inline void add_aggr_kprobe(struct kprobe *ap, struct kprobe *p)
|
|
|
ap->break_handler = aggr_break_handler;
|
|
|
|
|
|
INIT_LIST_HEAD(&ap->list);
|
|
|
- list_add_rcu(&p->list, &ap->list);
|
|
|
+ INIT_HLIST_NODE(&ap->hlist);
|
|
|
|
|
|
+ list_add_rcu(&p->list, &ap->list);
|
|
|
hlist_replace_rcu(&p->hlist, &ap->hlist);
|
|
|
}
|
|
|
|
|
@@ -561,12 +991,12 @@ static int __kprobes register_aggr_kprobe(struct kprobe *old_p,
|
|
|
int ret = 0;
|
|
|
struct kprobe *ap = old_p;
|
|
|
|
|
|
- if (old_p->pre_handler != aggr_pre_handler) {
|
|
|
- /* If old_p is not an aggr_probe, create new aggr_kprobe. */
|
|
|
- ap = kzalloc(sizeof(struct kprobe), GFP_KERNEL);
|
|
|
+ if (!kprobe_aggrprobe(old_p)) {
|
|
|
+ /* If old_p is not an aggr_kprobe, create new aggr_kprobe. */
|
|
|
+ ap = alloc_aggr_kprobe(old_p);
|
|
|
if (!ap)
|
|
|
return -ENOMEM;
|
|
|
- add_aggr_kprobe(ap, old_p);
|
|
|
+ init_aggr_kprobe(ap, old_p);
|
|
|
}
|
|
|
|
|
|
if (kprobe_gone(ap)) {
|
|
@@ -585,6 +1015,9 @@ static int __kprobes register_aggr_kprobe(struct kprobe *old_p,
|
|
|
*/
|
|
|
return ret;
|
|
|
|
|
|
+ /* Prepare optimized instructions if possible. */
|
|
|
+ prepare_optimized_kprobe(ap);
|
|
|
+
|
|
|
/*
|
|
|
* Clear gone flag to prevent allocating new slot again, and
|
|
|
* set disabled flag because it is not armed yet.
|
|
@@ -593,6 +1026,7 @@ static int __kprobes register_aggr_kprobe(struct kprobe *old_p,
|
|
|
| KPROBE_FLAG_DISABLED;
|
|
|
}
|
|
|
|
|
|
+ /* Copy ap's insn slot to p */
|
|
|
copy_kprobe(ap, p);
|
|
|
return add_new_kprobe(ap, p);
|
|
|
}
|
|
@@ -743,27 +1177,34 @@ int __kprobes register_kprobe(struct kprobe *p)
|
|
|
p->nmissed = 0;
|
|
|
INIT_LIST_HEAD(&p->list);
|
|
|
mutex_lock(&kprobe_mutex);
|
|
|
+
|
|
|
+ get_online_cpus(); /* For avoiding text_mutex deadlock. */
|
|
|
+ mutex_lock(&text_mutex);
|
|
|
+
|
|
|
old_p = get_kprobe(p->addr);
|
|
|
if (old_p) {
|
|
|
+ /* Since this may unoptimize old_p, locking text_mutex. */
|
|
|
ret = register_aggr_kprobe(old_p, p);
|
|
|
goto out;
|
|
|
}
|
|
|
|
|
|
- mutex_lock(&text_mutex);
|
|
|
ret = arch_prepare_kprobe(p);
|
|
|
if (ret)
|
|
|
- goto out_unlock_text;
|
|
|
+ goto out;
|
|
|
|
|
|
INIT_HLIST_NODE(&p->hlist);
|
|
|
hlist_add_head_rcu(&p->hlist,
|
|
|
&kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]);
|
|
|
|
|
|
if (!kprobes_all_disarmed && !kprobe_disabled(p))
|
|
|
- arch_arm_kprobe(p);
|
|
|
+ __arm_kprobe(p);
|
|
|
+
|
|
|
+ /* Try to optimize kprobe */
|
|
|
+ try_to_optimize_kprobe(p);
|
|
|
|
|
|
-out_unlock_text:
|
|
|
- mutex_unlock(&text_mutex);
|
|
|
out:
|
|
|
+ mutex_unlock(&text_mutex);
|
|
|
+ put_online_cpus();
|
|
|
mutex_unlock(&kprobe_mutex);
|
|
|
|
|
|
if (probed_mod)
|
|
@@ -785,7 +1226,7 @@ static int __kprobes __unregister_kprobe_top(struct kprobe *p)
|
|
|
return -EINVAL;
|
|
|
|
|
|
if (old_p == p ||
|
|
|
- (old_p->pre_handler == aggr_pre_handler &&
|
|
|
+ (kprobe_aggrprobe(old_p) &&
|
|
|
list_is_singular(&old_p->list))) {
|
|
|
/*
|
|
|
* Only probe on the hash list. Disarm only if kprobes are
|
|
@@ -793,7 +1234,7 @@ static int __kprobes __unregister_kprobe_top(struct kprobe *p)
|
|
|
* already have been removed. We save on flushing icache.
|
|
|
*/
|
|
|
if (!kprobes_all_disarmed && !kprobe_disabled(old_p))
|
|
|
- disarm_kprobe(p);
|
|
|
+ disarm_kprobe(old_p);
|
|
|
hlist_del_rcu(&old_p->hlist);
|
|
|
} else {
|
|
|
if (p->break_handler && !kprobe_gone(p))
|
|
@@ -809,8 +1250,13 @@ noclean:
|
|
|
list_del_rcu(&p->list);
|
|
|
if (!kprobe_disabled(old_p)) {
|
|
|
try_to_disable_aggr_kprobe(old_p);
|
|
|
- if (!kprobes_all_disarmed && kprobe_disabled(old_p))
|
|
|
- disarm_kprobe(old_p);
|
|
|
+ if (!kprobes_all_disarmed) {
|
|
|
+ if (kprobe_disabled(old_p))
|
|
|
+ disarm_kprobe(old_p);
|
|
|
+ else
|
|
|
+ /* Try to optimize this probe again */
|
|
|
+ optimize_kprobe(old_p);
|
|
|
+ }
|
|
|
}
|
|
|
}
|
|
|
return 0;
|
|
@@ -827,7 +1273,7 @@ static void __kprobes __unregister_kprobe_bottom(struct kprobe *p)
|
|
|
old_p = list_entry(p->list.next, struct kprobe, list);
|
|
|
list_del(&p->list);
|
|
|
arch_remove_kprobe(old_p);
|
|
|
- kfree(old_p);
|
|
|
+ free_aggr_kprobe(old_p);
|
|
|
}
|
|
|
}
|
|
|
|
|
@@ -1123,7 +1569,7 @@ static void __kprobes kill_kprobe(struct kprobe *p)
|
|
|
struct kprobe *kp;
|
|
|
|
|
|
p->flags |= KPROBE_FLAG_GONE;
|
|
|
- if (p->pre_handler == aggr_pre_handler) {
|
|
|
+ if (kprobe_aggrprobe(p)) {
|
|
|
/*
|
|
|
* If this is an aggr_kprobe, we have to list all the
|
|
|
* chained probes and mark them GONE.
|
|
@@ -1132,6 +1578,7 @@ static void __kprobes kill_kprobe(struct kprobe *p)
|
|
|
kp->flags |= KPROBE_FLAG_GONE;
|
|
|
p->post_handler = NULL;
|
|
|
p->break_handler = NULL;
|
|
|
+ kill_optimized_kprobe(p);
|
|
|
}
|
|
|
/*
|
|
|
* Here, we can remove insn_slot safely, because no thread calls
|
|
@@ -1241,6 +1688,15 @@ static int __init init_kprobes(void)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
+#if defined(CONFIG_OPTPROBES)
|
|
|
+#if defined(__ARCH_WANT_KPROBES_INSN_SLOT)
|
|
|
+ /* Init kprobe_optinsn_slots */
|
|
|
+ kprobe_optinsn_slots.insn_size = MAX_OPTINSN_SIZE;
|
|
|
+#endif
|
|
|
+ /* By default, kprobes can be optimized */
|
|
|
+ kprobes_allow_optimization = true;
|
|
|
+#endif
|
|
|
+
|
|
|
/* By default, kprobes are armed */
|
|
|
kprobes_all_disarmed = false;
|
|
|
|
|
@@ -1259,7 +1715,7 @@ static int __init init_kprobes(void)
|
|
|
|
|
|
#ifdef CONFIG_DEBUG_FS
|
|
|
static void __kprobes report_probe(struct seq_file *pi, struct kprobe *p,
|
|
|
- const char *sym, int offset,char *modname)
|
|
|
+ const char *sym, int offset, char *modname, struct kprobe *pp)
|
|
|
{
|
|
|
char *kprobe_type;
|
|
|
|
|
@@ -1269,19 +1725,21 @@ static void __kprobes report_probe(struct seq_file *pi, struct kprobe *p,
|
|
|
kprobe_type = "j";
|
|
|
else
|
|
|
kprobe_type = "k";
|
|
|
+
|
|
|
if (sym)
|
|
|
- seq_printf(pi, "%p %s %s+0x%x %s %s%s\n",
|
|
|
+ seq_printf(pi, "%p %s %s+0x%x %s ",
|
|
|
p->addr, kprobe_type, sym, offset,
|
|
|
- (modname ? modname : " "),
|
|
|
- (kprobe_gone(p) ? "[GONE]" : ""),
|
|
|
- ((kprobe_disabled(p) && !kprobe_gone(p)) ?
|
|
|
- "[DISABLED]" : ""));
|
|
|
+ (modname ? modname : " "));
|
|
|
else
|
|
|
- seq_printf(pi, "%p %s %p %s%s\n",
|
|
|
- p->addr, kprobe_type, p->addr,
|
|
|
- (kprobe_gone(p) ? "[GONE]" : ""),
|
|
|
- ((kprobe_disabled(p) && !kprobe_gone(p)) ?
|
|
|
- "[DISABLED]" : ""));
|
|
|
+ seq_printf(pi, "%p %s %p ",
|
|
|
+ p->addr, kprobe_type, p->addr);
|
|
|
+
|
|
|
+ if (!pp)
|
|
|
+ pp = p;
|
|
|
+ seq_printf(pi, "%s%s%s\n",
|
|
|
+ (kprobe_gone(p) ? "[GONE]" : ""),
|
|
|
+ ((kprobe_disabled(p) && !kprobe_gone(p)) ? "[DISABLED]" : ""),
|
|
|
+ (kprobe_optimized(pp) ? "[OPTIMIZED]" : ""));
|
|
|
}
|
|
|
|
|
|
static void __kprobes *kprobe_seq_start(struct seq_file *f, loff_t *pos)
|
|
@@ -1317,11 +1775,11 @@ static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v)
|
|
|
hlist_for_each_entry_rcu(p, node, head, hlist) {
|
|
|
sym = kallsyms_lookup((unsigned long)p->addr, NULL,
|
|
|
&offset, &modname, namebuf);
|
|
|
- if (p->pre_handler == aggr_pre_handler) {
|
|
|
+ if (kprobe_aggrprobe(p)) {
|
|
|
list_for_each_entry_rcu(kp, &p->list, list)
|
|
|
- report_probe(pi, kp, sym, offset, modname);
|
|
|
+ report_probe(pi, kp, sym, offset, modname, p);
|
|
|
} else
|
|
|
- report_probe(pi, p, sym, offset, modname);
|
|
|
+ report_probe(pi, p, sym, offset, modname, NULL);
|
|
|
}
|
|
|
preempt_enable();
|
|
|
return 0;
|
|
@@ -1399,12 +1857,13 @@ int __kprobes enable_kprobe(struct kprobe *kp)
|
|
|
goto out;
|
|
|
}
|
|
|
|
|
|
- if (!kprobes_all_disarmed && kprobe_disabled(p))
|
|
|
- arm_kprobe(p);
|
|
|
-
|
|
|
- p->flags &= ~KPROBE_FLAG_DISABLED;
|
|
|
if (p != kp)
|
|
|
kp->flags &= ~KPROBE_FLAG_DISABLED;
|
|
|
+
|
|
|
+ if (!kprobes_all_disarmed && kprobe_disabled(p)) {
|
|
|
+ p->flags &= ~KPROBE_FLAG_DISABLED;
|
|
|
+ arm_kprobe(p);
|
|
|
+ }
|
|
|
out:
|
|
|
mutex_unlock(&kprobe_mutex);
|
|
|
return ret;
|
|
@@ -1424,12 +1883,13 @@ static void __kprobes arm_all_kprobes(void)
|
|
|
if (!kprobes_all_disarmed)
|
|
|
goto already_enabled;
|
|
|
|
|
|
+ /* Arming kprobes doesn't optimize kprobe itself */
|
|
|
mutex_lock(&text_mutex);
|
|
|
for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
|
|
|
head = &kprobe_table[i];
|
|
|
hlist_for_each_entry_rcu(p, node, head, hlist)
|
|
|
if (!kprobe_disabled(p))
|
|
|
- arch_arm_kprobe(p);
|
|
|
+ __arm_kprobe(p);
|
|
|
}
|
|
|
mutex_unlock(&text_mutex);
|
|
|
|
|
@@ -1456,16 +1916,23 @@ static void __kprobes disarm_all_kprobes(void)
|
|
|
|
|
|
kprobes_all_disarmed = true;
|
|
|
printk(KERN_INFO "Kprobes globally disabled\n");
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Here we call get_online_cpus() for avoiding text_mutex deadlock,
|
|
|
+ * because disarming may also unoptimize kprobes.
|
|
|
+ */
|
|
|
+ get_online_cpus();
|
|
|
mutex_lock(&text_mutex);
|
|
|
for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
|
|
|
head = &kprobe_table[i];
|
|
|
hlist_for_each_entry_rcu(p, node, head, hlist) {
|
|
|
if (!arch_trampoline_kprobe(p) && !kprobe_disabled(p))
|
|
|
- arch_disarm_kprobe(p);
|
|
|
+ __disarm_kprobe(p);
|
|
|
}
|
|
|
}
|
|
|
|
|
|
mutex_unlock(&text_mutex);
|
|
|
+ put_online_cpus();
|
|
|
mutex_unlock(&kprobe_mutex);
|
|
|
/* Allow all currently running kprobes to complete */
|
|
|
synchronize_sched();
|