|
@@ -121,12 +121,6 @@ struct kprobe_insn_page {
|
|
|
(offsetof(struct kprobe_insn_page, slot_used) + \
|
|
|
(sizeof(char) * (slots)))
|
|
|
|
|
|
-struct kprobe_insn_cache {
|
|
|
- struct list_head pages; /* list of kprobe_insn_page */
|
|
|
- size_t insn_size; /* size of instruction slot */
|
|
|
- int nr_garbage;
|
|
|
-};
|
|
|
-
|
|
|
static int slots_per_page(struct kprobe_insn_cache *c)
|
|
|
{
|
|
|
return PAGE_SIZE/(c->insn_size * sizeof(kprobe_opcode_t));
|
|
@@ -138,8 +132,8 @@ enum kprobe_slot_state {
|
|
|
SLOT_USED = 2,
|
|
|
};
|
|
|
|
|
|
-static DEFINE_MUTEX(kprobe_insn_mutex); /* Protects kprobe_insn_slots */
|
|
|
-static struct kprobe_insn_cache kprobe_insn_slots = {
|
|
|
+struct kprobe_insn_cache kprobe_insn_slots = {
|
|
|
+ .mutex = __MUTEX_INITIALIZER(kprobe_insn_slots.mutex),
|
|
|
.pages = LIST_HEAD_INIT(kprobe_insn_slots.pages),
|
|
|
.insn_size = MAX_INSN_SIZE,
|
|
|
.nr_garbage = 0,
|
|
@@ -150,10 +144,12 @@ static int __kprobes collect_garbage_slots(struct kprobe_insn_cache *c);
|
|
|
* __get_insn_slot() - Find a slot on an executable page for an instruction.
|
|
|
* We allocate an executable page if there's no room on existing ones.
|
|
|
*/
|
|
|
-static kprobe_opcode_t __kprobes *__get_insn_slot(struct kprobe_insn_cache *c)
|
|
|
+kprobe_opcode_t __kprobes *__get_insn_slot(struct kprobe_insn_cache *c)
|
|
|
{
|
|
|
struct kprobe_insn_page *kip;
|
|
|
+ kprobe_opcode_t *slot = NULL;
|
|
|
|
|
|
+ mutex_lock(&c->mutex);
|
|
|
retry:
|
|
|
list_for_each_entry(kip, &c->pages, list) {
|
|
|
if (kip->nused < slots_per_page(c)) {
|
|
@@ -162,7 +158,8 @@ static kprobe_opcode_t __kprobes *__get_insn_slot(struct kprobe_insn_cache *c)
|
|
|
if (kip->slot_used[i] == SLOT_CLEAN) {
|
|
|
kip->slot_used[i] = SLOT_USED;
|
|
|
kip->nused++;
|
|
|
- return kip->insns + (i * c->insn_size);
|
|
|
+ slot = kip->insns + (i * c->insn_size);
|
|
|
+ goto out;
|
|
|
}
|
|
|
}
|
|
|
/* kip->nused is broken. Fix it. */
|
|
@@ -178,7 +175,7 @@ static kprobe_opcode_t __kprobes *__get_insn_slot(struct kprobe_insn_cache *c)
|
|
|
/* All out of space. Need to allocate a new page. */
|
|
|
kip = kmalloc(KPROBE_INSN_PAGE_SIZE(slots_per_page(c)), GFP_KERNEL);
|
|
|
if (!kip)
|
|
|
- return NULL;
|
|
|
+ goto out;
|
|
|
|
|
|
/*
|
|
|
* Use module_alloc so this page is within +/- 2GB of where the
|
|
@@ -188,7 +185,7 @@ static kprobe_opcode_t __kprobes *__get_insn_slot(struct kprobe_insn_cache *c)
|
|
|
kip->insns = module_alloc(PAGE_SIZE);
|
|
|
if (!kip->insns) {
|
|
|
kfree(kip);
|
|
|
- return NULL;
|
|
|
+ goto out;
|
|
|
}
|
|
|
INIT_LIST_HEAD(&kip->list);
|
|
|
memset(kip->slot_used, SLOT_CLEAN, slots_per_page(c));
|
|
@@ -196,19 +193,10 @@ static kprobe_opcode_t __kprobes *__get_insn_slot(struct kprobe_insn_cache *c)
|
|
|
kip->nused = 1;
|
|
|
kip->ngarbage = 0;
|
|
|
list_add(&kip->list, &c->pages);
|
|
|
- return kip->insns;
|
|
|
-}
|
|
|
-
|
|
|
-
|
|
|
-kprobe_opcode_t __kprobes *get_insn_slot(void)
|
|
|
-{
|
|
|
- kprobe_opcode_t *ret = NULL;
|
|
|
-
|
|
|
- mutex_lock(&kprobe_insn_mutex);
|
|
|
- ret = __get_insn_slot(&kprobe_insn_slots);
|
|
|
- mutex_unlock(&kprobe_insn_mutex);
|
|
|
-
|
|
|
- return ret;
|
|
|
+ slot = kip->insns;
|
|
|
+out:
|
|
|
+ mutex_unlock(&c->mutex);
|
|
|
+ return slot;
|
|
|
}
|
|
|
|
|
|
/* Return 1 if all garbages are collected, otherwise 0. */
|
|
@@ -255,11 +243,12 @@ static int __kprobes collect_garbage_slots(struct kprobe_insn_cache *c)
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
-static void __kprobes __free_insn_slot(struct kprobe_insn_cache *c,
|
|
|
- kprobe_opcode_t *slot, int dirty)
|
|
|
+void __kprobes __free_insn_slot(struct kprobe_insn_cache *c,
|
|
|
+ kprobe_opcode_t *slot, int dirty)
|
|
|
{
|
|
|
struct kprobe_insn_page *kip;
|
|
|
|
|
|
+ mutex_lock(&c->mutex);
|
|
|
list_for_each_entry(kip, &c->pages, list) {
|
|
|
long idx = ((long)slot - (long)kip->insns) /
|
|
|
(c->insn_size * sizeof(kprobe_opcode_t));
|
|
@@ -272,45 +261,23 @@ static void __kprobes __free_insn_slot(struct kprobe_insn_cache *c,
|
|
|
collect_garbage_slots(c);
|
|
|
} else
|
|
|
collect_one_slot(kip, idx);
|
|
|
- return;
|
|
|
+ goto out;
|
|
|
}
|
|
|
}
|
|
|
/* Could not free this slot. */
|
|
|
WARN_ON(1);
|
|
|
+out:
|
|
|
+ mutex_unlock(&c->mutex);
|
|
|
}
|
|
|
|
|
|
-void __kprobes free_insn_slot(kprobe_opcode_t * slot, int dirty)
|
|
|
-{
|
|
|
- mutex_lock(&kprobe_insn_mutex);
|
|
|
- __free_insn_slot(&kprobe_insn_slots, slot, dirty);
|
|
|
- mutex_unlock(&kprobe_insn_mutex);
|
|
|
-}
|
|
|
#ifdef CONFIG_OPTPROBES
|
|
|
/* For optimized_kprobe buffer */
|
|
|
-static DEFINE_MUTEX(kprobe_optinsn_mutex); /* Protects kprobe_optinsn_slots */
|
|
|
-static struct kprobe_insn_cache kprobe_optinsn_slots = {
|
|
|
+struct kprobe_insn_cache kprobe_optinsn_slots = {
|
|
|
+ .mutex = __MUTEX_INITIALIZER(kprobe_optinsn_slots.mutex),
|
|
|
.pages = LIST_HEAD_INIT(kprobe_optinsn_slots.pages),
|
|
|
/* .insn_size is initialized later */
|
|
|
.nr_garbage = 0,
|
|
|
};
|
|
|
-/* Get a slot for optimized_kprobe buffer */
|
|
|
-kprobe_opcode_t __kprobes *get_optinsn_slot(void)
|
|
|
-{
|
|
|
- kprobe_opcode_t *ret = NULL;
|
|
|
-
|
|
|
- mutex_lock(&kprobe_optinsn_mutex);
|
|
|
- ret = __get_insn_slot(&kprobe_optinsn_slots);
|
|
|
- mutex_unlock(&kprobe_optinsn_mutex);
|
|
|
-
|
|
|
- return ret;
|
|
|
-}
|
|
|
-
|
|
|
-void __kprobes free_optinsn_slot(kprobe_opcode_t * slot, int dirty)
|
|
|
-{
|
|
|
- mutex_lock(&kprobe_optinsn_mutex);
|
|
|
- __free_insn_slot(&kprobe_optinsn_slots, slot, dirty);
|
|
|
- mutex_unlock(&kprobe_optinsn_mutex);
|
|
|
-}
|
|
|
#endif
|
|
|
#endif
|
|
|
|