|
@@ -467,6 +467,7 @@ static struct kprobe *__kprobes get_optimized_kprobe(unsigned long addr)
|
|
|
/* Optimization staging list, protected by kprobe_mutex */
|
|
|
static LIST_HEAD(optimizing_list);
|
|
|
static LIST_HEAD(unoptimizing_list);
|
|
|
+static LIST_HEAD(freeing_list);
|
|
|
|
|
|
static void kprobe_optimizer(struct work_struct *work);
|
|
|
static DECLARE_DELAYED_WORK(optimizing_work, kprobe_optimizer);
|
|
@@ -504,7 +505,7 @@ static __kprobes void do_optimize_kprobes(void)
|
|
|
* Unoptimize (replace a jump with a breakpoint and remove the breakpoint
|
|
|
* if need) kprobes listed on unoptimizing_list.
|
|
|
*/
|
|
|
-static __kprobes void do_unoptimize_kprobes(struct list_head *free_list)
|
|
|
+static __kprobes void do_unoptimize_kprobes(void)
|
|
|
{
|
|
|
struct optimized_kprobe *op, *tmp;
|
|
|
|
|
@@ -515,9 +516,9 @@ static __kprobes void do_unoptimize_kprobes(struct list_head *free_list)
|
|
|
/* Ditto to do_optimize_kprobes */
|
|
|
get_online_cpus();
|
|
|
mutex_lock(&text_mutex);
|
|
|
- arch_unoptimize_kprobes(&unoptimizing_list, free_list);
|
|
|
+ arch_unoptimize_kprobes(&unoptimizing_list, &freeing_list);
|
|
|
/* Loop free_list for disarming */
|
|
|
- list_for_each_entry_safe(op, tmp, free_list, list) {
|
|
|
+ list_for_each_entry_safe(op, tmp, &freeing_list, list) {
|
|
|
/* Disarm probes if marked disabled */
|
|
|
if (kprobe_disabled(&op->kp))
|
|
|
arch_disarm_kprobe(&op->kp);
|
|
@@ -536,11 +537,11 @@ static __kprobes void do_unoptimize_kprobes(struct list_head *free_list)
|
|
|
}
|
|
|
|
|
|
/* Reclaim all kprobes on the free_list */
|
|
|
-static __kprobes void do_free_cleaned_kprobes(struct list_head *free_list)
|
|
|
+static __kprobes void do_free_cleaned_kprobes(void)
|
|
|
{
|
|
|
struct optimized_kprobe *op, *tmp;
|
|
|
|
|
|
- list_for_each_entry_safe(op, tmp, free_list, list) {
|
|
|
+ list_for_each_entry_safe(op, tmp, &freeing_list, list) {
|
|
|
BUG_ON(!kprobe_unused(&op->kp));
|
|
|
list_del_init(&op->list);
|
|
|
free_aggr_kprobe(&op->kp);
|
|
@@ -556,8 +557,6 @@ static __kprobes void kick_kprobe_optimizer(void)
|
|
|
/* Kprobe jump optimizer */
|
|
|
static __kprobes void kprobe_optimizer(struct work_struct *work)
|
|
|
{
|
|
|
- LIST_HEAD(free_list);
|
|
|
-
|
|
|
mutex_lock(&kprobe_mutex);
|
|
|
/* Lock modules while optimizing kprobes */
|
|
|
mutex_lock(&module_mutex);
|
|
@@ -566,7 +565,7 @@ static __kprobes void kprobe_optimizer(struct work_struct *work)
|
|
|
* Step 1: Unoptimize kprobes and collect cleaned (unused and disarmed)
|
|
|
* kprobes before waiting for quiesence period.
|
|
|
*/
|
|
|
- do_unoptimize_kprobes(&free_list);
|
|
|
+ do_unoptimize_kprobes();
|
|
|
|
|
|
/*
|
|
|
* Step 2: Wait for quiesence period to ensure all running interrupts
|
|
@@ -581,7 +580,7 @@ static __kprobes void kprobe_optimizer(struct work_struct *work)
|
|
|
do_optimize_kprobes();
|
|
|
|
|
|
/* Step 4: Free cleaned kprobes after quiesence period */
|
|
|
- do_free_cleaned_kprobes(&free_list);
|
|
|
+ do_free_cleaned_kprobes();
|
|
|
|
|
|
mutex_unlock(&module_mutex);
|
|
|
mutex_unlock(&kprobe_mutex);
|
|
@@ -723,8 +722,19 @@ static void __kprobes kill_optimized_kprobe(struct kprobe *p)
|
|
|
if (!list_empty(&op->list))
|
|
|
/* Dequeue from the (un)optimization queue */
|
|
|
list_del_init(&op->list);
|
|
|
-
|
|
|
op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
|
|
|
+
|
|
|
+ if (kprobe_unused(p)) {
|
|
|
+ /* Enqueue if it is unused */
|
|
|
+ list_add(&op->list, &freeing_list);
|
|
|
+ /*
|
|
|
+ * Remove unused probes from the hash list. After waiting
|
|
|
+ * for synchronization, this probe is reclaimed.
|
|
|
+ * (reclaiming is done by do_free_cleaned_kprobes().)
|
|
|
+ */
|
|
|
+ hlist_del_rcu(&op->kp.hlist);
|
|
|
+ }
|
|
|
+
|
|
|
/* Don't touch the code, because it is already freed. */
|
|
|
arch_remove_optimized_kprobe(op);
|
|
|
}
|