瀏覽代碼

x86: kprobes leftover cleanups

Eliminate __always_inline, all of these static functions are
only called once.  Minor whitespace cleanup.  Eliminate one
supefluous return at end of void function.  Change the one
#ifndef to #ifdef to match the sense of the rest of the config
tests.

Signed-off-by: Harvey Harrison <harvey.harrison@gmail.com>
Acked-by: Masami Hiramatsu <mhiramat@redhat.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Harvey Harrison 17 年之前
父節點
當前提交
e7b5e11eaa
共有 1 個文件被更改,包括 6 次插入8 次删除
  1. 6 8
      arch/x86/kernel/kprobes.c

+ 6 - 8
arch/x86/kernel/kprobes.c

@@ -159,7 +159,7 @@ struct kretprobe_blackpoint kretprobe_blacklist[] = {
 const int kretprobe_blacklist_size = ARRAY_SIZE(kretprobe_blacklist);
 const int kretprobe_blacklist_size = ARRAY_SIZE(kretprobe_blacklist);
 
 
 /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
 /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
-static __always_inline void set_jmp_op(void *from, void *to)
+static void __kprobes set_jmp_op(void *from, void *to)
 {
 {
 	struct __arch_jmp_op {
 	struct __arch_jmp_op {
 		char op;
 		char op;
@@ -174,7 +174,7 @@ static __always_inline void set_jmp_op(void *from, void *to)
  * Returns non-zero if opcode is boostable.
  * Returns non-zero if opcode is boostable.
  * RIP relative instructions are adjusted at copying time in 64 bits mode
  * RIP relative instructions are adjusted at copying time in 64 bits mode
  */
  */
-static __always_inline int can_boost(kprobe_opcode_t *opcodes)
+static int __kprobes can_boost(kprobe_opcode_t *opcodes)
 {
 {
 	kprobe_opcode_t opcode;
 	kprobe_opcode_t opcode;
 	kprobe_opcode_t *orig_opcodes = opcodes;
 	kprobe_opcode_t *orig_opcodes = opcodes;
@@ -392,13 +392,13 @@ static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
 		kcb->kprobe_saved_flags &= ~X86_EFLAGS_IF;
 		kcb->kprobe_saved_flags &= ~X86_EFLAGS_IF;
 }
 }
 
 
-static __always_inline void clear_btf(void)
+static void __kprobes clear_btf(void)
 {
 {
 	if (test_thread_flag(TIF_DEBUGCTLMSR))
 	if (test_thread_flag(TIF_DEBUGCTLMSR))
 		wrmsr(MSR_IA32_DEBUGCTLMSR, 0, 0);
 		wrmsr(MSR_IA32_DEBUGCTLMSR, 0, 0);
 }
 }
 
 
-static __always_inline void restore_btf(void)
+static void __kprobes restore_btf(void)
 {
 {
 	if (test_thread_flag(TIF_DEBUGCTLMSR))
 	if (test_thread_flag(TIF_DEBUGCTLMSR))
 		wrmsr(MSR_IA32_DEBUGCTLMSR, current->thread.debugctlmsr, 0);
 		wrmsr(MSR_IA32_DEBUGCTLMSR, current->thread.debugctlmsr, 0);
@@ -409,7 +409,7 @@ static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
 	clear_btf();
 	clear_btf();
 	regs->flags |= X86_EFLAGS_TF;
 	regs->flags |= X86_EFLAGS_TF;
 	regs->flags &= ~X86_EFLAGS_IF;
 	regs->flags &= ~X86_EFLAGS_IF;
-	/*single step inline if the instruction is an int3*/
+	/* single step inline if the instruction is an int3 */
 	if (p->opcode == BREAKPOINT_INSTRUCTION)
 	if (p->opcode == BREAKPOINT_INSTRUCTION)
 		regs->ip = (unsigned long)p->addr;
 		regs->ip = (unsigned long)p->addr;
 	else
 	else
@@ -767,7 +767,7 @@ static void __kprobes resume_execution(struct kprobe *p,
 	case 0xe8:	/* call relative - Fix return addr */
 	case 0xe8:	/* call relative - Fix return addr */
 		*tos = orig_ip + (*tos - copy_ip);
 		*tos = orig_ip + (*tos - copy_ip);
 		break;
 		break;
-#ifndef CONFIG_X86_64
+#ifdef CONFIG_X86_32
 	case 0x9a:	/* call absolute -- same as call absolute, indirect */
 	case 0x9a:	/* call absolute -- same as call absolute, indirect */
 		*tos = orig_ip + (*tos - copy_ip);
 		*tos = orig_ip + (*tos - copy_ip);
 		goto no_change;
 		goto no_change;
@@ -813,8 +813,6 @@ static void __kprobes resume_execution(struct kprobe *p,
 
 
 no_change:
 no_change:
 	restore_btf();
 	restore_btf();
-
-	return;
 }
 }
 
 
 /*
 /*