浏览代码

x86: entry_64.S: remove whitespace at end of lines

Impact: cleanup

All blame goes to: color white,red "[^[:graph:]]+$"
in .nanorc ;).

Signed-off-by: Alexander van Heukelum <heukelum@fastmail.fm>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Alexander van Heukelum 16 年之前
父节点
当前提交
0bd7b79851
共有 1 个文件被更改,包括 95 次插入95 次删除
  1. 95 95
      arch/x86/kernel/entry_64.S

+ 95 - 95
arch/x86/kernel/entry_64.S

@@ -11,15 +11,15 @@
  *
  *
  * NOTE: This code handles signal-recognition, which happens every time
  * NOTE: This code handles signal-recognition, which happens every time
  * after an interrupt and after each system call.
  * after an interrupt and after each system call.
- * 
- * Normal syscalls and interrupts don't save a full stack frame, this is 
+ *
+ * Normal syscalls and interrupts don't save a full stack frame, this is
  * only done for syscall tracing, signals or fork/exec et.al.
  * only done for syscall tracing, signals or fork/exec et.al.
- * 
- * A note on terminology:	 
- * - top of stack: Architecture defined interrupt frame from SS to RIP 
- * at the top of the kernel process stack.	
+ *
+ * A note on terminology:
+ * - top of stack: Architecture defined interrupt frame from SS to RIP
+ * at the top of the kernel process stack.
  * - partial stack frame: partially saved registers upto R11.
  * - partial stack frame: partially saved registers upto R11.
- * - full stack frame: Like partial stack frame, but all register saved. 
+ * - full stack frame: Like partial stack frame, but all register saved.
  *
  *
  * Some macro usage:
  * Some macro usage:
  * - CFI macros are used to generate dwarf2 unwind information for better
  * - CFI macros are used to generate dwarf2 unwind information for better
@@ -142,7 +142,7 @@ END(mcount)
 
 
 #ifndef CONFIG_PREEMPT
 #ifndef CONFIG_PREEMPT
 #define retint_kernel retint_restore_args
 #define retint_kernel retint_restore_args
-#endif	
+#endif
 
 
 #ifdef CONFIG_PARAVIRT
 #ifdef CONFIG_PARAVIRT
 ENTRY(native_usergs_sysret64)
 ENTRY(native_usergs_sysret64)
@@ -161,14 +161,14 @@ ENTRY(native_usergs_sysret64)
 .endm
 .endm
 
 
 /*
 /*
- * C code is not supposed to know about undefined top of stack. Every time 
- * a C function with an pt_regs argument is called from the SYSCALL based 
+ * C code is not supposed to know about undefined top of stack. Every time
+ * a C function with an pt_regs argument is called from the SYSCALL based
  * fast path FIXUP_TOP_OF_STACK is needed.
  * fast path FIXUP_TOP_OF_STACK is needed.
  * RESTORE_TOP_OF_STACK syncs the syscall state after any possible ptregs
  * RESTORE_TOP_OF_STACK syncs the syscall state after any possible ptregs
  * manipulation.
  * manipulation.
- */        	
-		
-	/* %rsp:at FRAMEEND */ 
+ */
+
+	/* %rsp:at FRAMEEND */
 	.macro FIXUP_TOP_OF_STACK tmp
 	.macro FIXUP_TOP_OF_STACK tmp
 	movq	%gs:pda_oldrsp,\tmp
 	movq	%gs:pda_oldrsp,\tmp
 	movq  	\tmp,RSP(%rsp)
 	movq  	\tmp,RSP(%rsp)
@@ -244,8 +244,8 @@ ENTRY(native_usergs_sysret64)
 	.endm
 	.endm
 /*
 /*
  * A newly forked process directly context switches into this.
  * A newly forked process directly context switches into this.
- */ 	
-/* rdi:	prev */	
+ */
+/* rdi:	prev */
 ENTRY(ret_from_fork)
 ENTRY(ret_from_fork)
 	CFI_DEFAULT_STACK
 	CFI_DEFAULT_STACK
 	push kernel_eflags(%rip)
 	push kernel_eflags(%rip)
@@ -256,7 +256,7 @@ ENTRY(ret_from_fork)
 	GET_THREAD_INFO(%rcx)
 	GET_THREAD_INFO(%rcx)
 	testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT),TI_flags(%rcx)
 	testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT),TI_flags(%rcx)
 	jnz rff_trace
 	jnz rff_trace
-rff_action:	
+rff_action:
 	RESTORE_REST
 	RESTORE_REST
 	testl $3,CS-ARGOFFSET(%rsp)	# from kernel_thread?
 	testl $3,CS-ARGOFFSET(%rsp)	# from kernel_thread?
 	je   int_ret_from_sys_call
 	je   int_ret_from_sys_call
@@ -267,7 +267,7 @@ rff_action:
 rff_trace:
 rff_trace:
 	movq %rsp,%rdi
 	movq %rsp,%rdi
 	call syscall_trace_leave
 	call syscall_trace_leave
-	GET_THREAD_INFO(%rcx)	
+	GET_THREAD_INFO(%rcx)
 	jmp rff_action
 	jmp rff_action
 	CFI_ENDPROC
 	CFI_ENDPROC
 END(ret_from_fork)
 END(ret_from_fork)
@@ -278,20 +278,20 @@ END(ret_from_fork)
  * SYSCALL does not save anything on the stack and does not change the
  * SYSCALL does not save anything on the stack and does not change the
  * stack pointer.
  * stack pointer.
  */
  */
-		
+
 /*
 /*
- * Register setup:	
+ * Register setup:
  * rax  system call number
  * rax  system call number
  * rdi  arg0
  * rdi  arg0
- * rcx  return address for syscall/sysret, C arg3 
+ * rcx  return address for syscall/sysret, C arg3
  * rsi  arg1
  * rsi  arg1
- * rdx  arg2	
+ * rdx  arg2
  * r10  arg3 	(--> moved to rcx for C)
  * r10  arg3 	(--> moved to rcx for C)
  * r8   arg4
  * r8   arg4
  * r9   arg5
  * r9   arg5
  * r11  eflags for syscall/sysret, temporary for C
  * r11  eflags for syscall/sysret, temporary for C
- * r12-r15,rbp,rbx saved by C code, not touched. 		
- * 
+ * r12-r15,rbp,rbx saved by C code, not touched.
+ *
  * Interrupts are off on entry.
  * Interrupts are off on entry.
  * Only called from user space.
  * Only called from user space.
  *
  *
@@ -301,7 +301,7 @@ END(ret_from_fork)
  * When user can change the frames always force IRET. That is because
  * When user can change the frames always force IRET. That is because
  * it deals with uncanonical addresses better. SYSRET has trouble
  * it deals with uncanonical addresses better. SYSRET has trouble
  * with them due to bugs in both AMD and Intel CPUs.
  * with them due to bugs in both AMD and Intel CPUs.
- */ 			 		
+ */
 
 
 ENTRY(system_call)
 ENTRY(system_call)
 	CFI_STARTPROC	simple
 	CFI_STARTPROC	simple
@@ -317,7 +317,7 @@ ENTRY(system_call)
 	 */
 	 */
 ENTRY(system_call_after_swapgs)
 ENTRY(system_call_after_swapgs)
 
 
-	movq	%rsp,%gs:pda_oldrsp 
+	movq	%rsp,%gs:pda_oldrsp
 	movq	%gs:pda_kernelstack,%rsp
 	movq	%gs:pda_kernelstack,%rsp
 	/*
 	/*
 	 * No need to follow this irqs off/on section - it's straight
 	 * No need to follow this irqs off/on section - it's straight
@@ -325,7 +325,7 @@ ENTRY(system_call_after_swapgs)
 	 */
 	 */
 	ENABLE_INTERRUPTS(CLBR_NONE)
 	ENABLE_INTERRUPTS(CLBR_NONE)
 	SAVE_ARGS 8,1
 	SAVE_ARGS 8,1
-	movq  %rax,ORIG_RAX-ARGOFFSET(%rsp) 
+	movq  %rax,ORIG_RAX-ARGOFFSET(%rsp)
 	movq  %rcx,RIP-ARGOFFSET(%rsp)
 	movq  %rcx,RIP-ARGOFFSET(%rsp)
 	CFI_REL_OFFSET rip,RIP-ARGOFFSET
 	CFI_REL_OFFSET rip,RIP-ARGOFFSET
 	GET_THREAD_INFO(%rcx)
 	GET_THREAD_INFO(%rcx)
@@ -339,19 +339,19 @@ system_call_fastpath:
 	movq %rax,RAX-ARGOFFSET(%rsp)
 	movq %rax,RAX-ARGOFFSET(%rsp)
 /*
 /*
  * Syscall return path ending with SYSRET (fast path)
  * Syscall return path ending with SYSRET (fast path)
- * Has incomplete stack frame and undefined top of stack. 
- */		
+ * Has incomplete stack frame and undefined top of stack.
+ */
 ret_from_sys_call:
 ret_from_sys_call:
 	movl $_TIF_ALLWORK_MASK,%edi
 	movl $_TIF_ALLWORK_MASK,%edi
 	/* edi:	flagmask */
 	/* edi:	flagmask */
-sysret_check:		
+sysret_check:
 	LOCKDEP_SYS_EXIT
 	LOCKDEP_SYS_EXIT
 	GET_THREAD_INFO(%rcx)
 	GET_THREAD_INFO(%rcx)
 	DISABLE_INTERRUPTS(CLBR_NONE)
 	DISABLE_INTERRUPTS(CLBR_NONE)
 	TRACE_IRQS_OFF
 	TRACE_IRQS_OFF
 	movl TI_flags(%rcx),%edx
 	movl TI_flags(%rcx),%edx
 	andl %edi,%edx
 	andl %edi,%edx
-	jnz  sysret_careful 
+	jnz  sysret_careful
 	CFI_REMEMBER_STATE
 	CFI_REMEMBER_STATE
 	/*
 	/*
 	 * sysretq will re-enable interrupts:
 	 * sysretq will re-enable interrupts:
@@ -366,7 +366,7 @@ sysret_check:
 
 
 	CFI_RESTORE_STATE
 	CFI_RESTORE_STATE
 	/* Handle reschedules */
 	/* Handle reschedules */
-	/* edx:	work, edi: workmask */	
+	/* edx:	work, edi: workmask */
 sysret_careful:
 sysret_careful:
 	bt $TIF_NEED_RESCHED,%edx
 	bt $TIF_NEED_RESCHED,%edx
 	jnc sysret_signal
 	jnc sysret_signal
@@ -379,7 +379,7 @@ sysret_careful:
 	CFI_ADJUST_CFA_OFFSET -8
 	CFI_ADJUST_CFA_OFFSET -8
 	jmp sysret_check
 	jmp sysret_check
 
 
-	/* Handle a signal */ 
+	/* Handle a signal */
 sysret_signal:
 sysret_signal:
 	TRACE_IRQS_ON
 	TRACE_IRQS_ON
 	ENABLE_INTERRUPTS(CLBR_NONE)
 	ENABLE_INTERRUPTS(CLBR_NONE)
@@ -398,7 +398,7 @@ sysret_signal:
 	DISABLE_INTERRUPTS(CLBR_NONE)
 	DISABLE_INTERRUPTS(CLBR_NONE)
 	TRACE_IRQS_OFF
 	TRACE_IRQS_OFF
 	jmp int_with_check
 	jmp int_with_check
-	
+
 badsys:
 badsys:
 	movq $-ENOSYS,RAX-ARGOFFSET(%rsp)
 	movq $-ENOSYS,RAX-ARGOFFSET(%rsp)
 	jmp ret_from_sys_call
 	jmp ret_from_sys_call
@@ -437,7 +437,7 @@ sysret_audit:
 #endif	/* CONFIG_AUDITSYSCALL */
 #endif	/* CONFIG_AUDITSYSCALL */
 
 
 	/* Do syscall tracing */
 	/* Do syscall tracing */
-tracesys:			 
+tracesys:
 #ifdef CONFIG_AUDITSYSCALL
 #ifdef CONFIG_AUDITSYSCALL
 	testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%rcx)
 	testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%rcx)
 	jz auditsys
 	jz auditsys
@@ -460,8 +460,8 @@ tracesys:
 	call *sys_call_table(,%rax,8)
 	call *sys_call_table(,%rax,8)
 	movq %rax,RAX-ARGOFFSET(%rsp)
 	movq %rax,RAX-ARGOFFSET(%rsp)
 	/* Use IRET because user could have changed frame */
 	/* Use IRET because user could have changed frame */
-		
-/* 
+
+/*
  * Syscall return path ending with IRET.
  * Syscall return path ending with IRET.
  * Has correct top of stack, but partial stack frame.
  * Has correct top of stack, but partial stack frame.
  */
  */
@@ -505,18 +505,18 @@ int_very_careful:
 	TRACE_IRQS_ON
 	TRACE_IRQS_ON
 	ENABLE_INTERRUPTS(CLBR_NONE)
 	ENABLE_INTERRUPTS(CLBR_NONE)
 	SAVE_REST
 	SAVE_REST
-	/* Check for syscall exit trace */	
+	/* Check for syscall exit trace */
 	testl $_TIF_WORK_SYSCALL_EXIT,%edx
 	testl $_TIF_WORK_SYSCALL_EXIT,%edx
 	jz int_signal
 	jz int_signal
 	pushq %rdi
 	pushq %rdi
 	CFI_ADJUST_CFA_OFFSET 8
 	CFI_ADJUST_CFA_OFFSET 8
-	leaq 8(%rsp),%rdi	# &ptregs -> arg1	
+	leaq 8(%rsp),%rdi	# &ptregs -> arg1
 	call syscall_trace_leave
 	call syscall_trace_leave
 	popq %rdi
 	popq %rdi
 	CFI_ADJUST_CFA_OFFSET -8
 	CFI_ADJUST_CFA_OFFSET -8
 	andl $~(_TIF_WORK_SYSCALL_EXIT|_TIF_SYSCALL_EMU),%edi
 	andl $~(_TIF_WORK_SYSCALL_EXIT|_TIF_SYSCALL_EMU),%edi
 	jmp int_restore_rest
 	jmp int_restore_rest
-	
+
 int_signal:
 int_signal:
 	testl $_TIF_DO_NOTIFY_MASK,%edx
 	testl $_TIF_DO_NOTIFY_MASK,%edx
 	jz 1f
 	jz 1f
@@ -531,11 +531,11 @@ int_restore_rest:
 	jmp int_with_check
 	jmp int_with_check
 	CFI_ENDPROC
 	CFI_ENDPROC
 END(system_call)
 END(system_call)
-		
-/* 
+
+/*
  * Certain special system calls that need to save a complete full stack frame.
  * Certain special system calls that need to save a complete full stack frame.
- */ 								
-	
+ */
+
 	.macro PTREGSCALL label,func,arg
 	.macro PTREGSCALL label,func,arg
 	.globl \label
 	.globl \label
 \label:
 \label:
@@ -572,7 +572,7 @@ ENTRY(ptregscall_common)
 	ret
 	ret
 	CFI_ENDPROC
 	CFI_ENDPROC
 END(ptregscall_common)
 END(ptregscall_common)
-	
+
 ENTRY(stub_execve)
 ENTRY(stub_execve)
 	CFI_STARTPROC
 	CFI_STARTPROC
 	popq %r11
 	popq %r11
@@ -588,11 +588,11 @@ ENTRY(stub_execve)
 	jmp int_ret_from_sys_call
 	jmp int_ret_from_sys_call
 	CFI_ENDPROC
 	CFI_ENDPROC
 END(stub_execve)
 END(stub_execve)
-	
+
 /*
 /*
  * sigreturn is special because it needs to restore all registers on return.
  * sigreturn is special because it needs to restore all registers on return.
  * This cannot be done with SYSRET, so use the IRET return path instead.
  * This cannot be done with SYSRET, so use the IRET return path instead.
- */                
+ */
 ENTRY(stub_rt_sigreturn)
 ENTRY(stub_rt_sigreturn)
 	CFI_STARTPROC
 	CFI_STARTPROC
 	addq $8, %rsp
 	addq $8, %rsp
@@ -685,12 +685,12 @@ exit_intr:
 	GET_THREAD_INFO(%rcx)
 	GET_THREAD_INFO(%rcx)
 	testl $3,CS-ARGOFFSET(%rsp)
 	testl $3,CS-ARGOFFSET(%rsp)
 	je retint_kernel
 	je retint_kernel
-	
+
 	/* Interrupt came from user space */
 	/* Interrupt came from user space */
 	/*
 	/*
 	 * Has a correct top of stack, but a partial stack frame
 	 * Has a correct top of stack, but a partial stack frame
 	 * %rcx: thread info. Interrupts off.
 	 * %rcx: thread info. Interrupts off.
-	 */		
+	 */
 retint_with_reschedule:
 retint_with_reschedule:
 	movl $_TIF_WORK_MASK,%edi
 	movl $_TIF_WORK_MASK,%edi
 retint_check:
 retint_check:
@@ -763,20 +763,20 @@ retint_careful:
 	pushq %rdi
 	pushq %rdi
 	CFI_ADJUST_CFA_OFFSET	8
 	CFI_ADJUST_CFA_OFFSET	8
 	call  schedule
 	call  schedule
-	popq %rdi		
+	popq %rdi
 	CFI_ADJUST_CFA_OFFSET	-8
 	CFI_ADJUST_CFA_OFFSET	-8
 	GET_THREAD_INFO(%rcx)
 	GET_THREAD_INFO(%rcx)
 	DISABLE_INTERRUPTS(CLBR_NONE)
 	DISABLE_INTERRUPTS(CLBR_NONE)
 	TRACE_IRQS_OFF
 	TRACE_IRQS_OFF
 	jmp retint_check
 	jmp retint_check
-	
+
 retint_signal:
 retint_signal:
 	testl $_TIF_DO_NOTIFY_MASK,%edx
 	testl $_TIF_DO_NOTIFY_MASK,%edx
 	jz    retint_swapgs
 	jz    retint_swapgs
 	TRACE_IRQS_ON
 	TRACE_IRQS_ON
 	ENABLE_INTERRUPTS(CLBR_NONE)
 	ENABLE_INTERRUPTS(CLBR_NONE)
 	SAVE_REST
 	SAVE_REST
-	movq $-1,ORIG_RAX(%rsp) 			
+	movq $-1,ORIG_RAX(%rsp)
 	xorl %esi,%esi		# oldset
 	xorl %esi,%esi		# oldset
 	movq %rsp,%rdi		# &pt_regs
 	movq %rsp,%rdi		# &pt_regs
 	call do_notify_resume
 	call do_notify_resume
@@ -798,14 +798,14 @@ ENTRY(retint_kernel)
 	jnc  retint_restore_args
 	jnc  retint_restore_args
 	call preempt_schedule_irq
 	call preempt_schedule_irq
 	jmp exit_intr
 	jmp exit_intr
-#endif	
+#endif
 
 
 	CFI_ENDPROC
 	CFI_ENDPROC
 END(common_interrupt)
 END(common_interrupt)
-	
+
 /*
 /*
  * APIC interrupts.
  * APIC interrupts.
- */		
+ */
 	.macro apicinterrupt num,func
 	.macro apicinterrupt num,func
 	INTR_FRAME
 	INTR_FRAME
 	pushq $~(\num)
 	pushq $~(\num)
@@ -823,14 +823,14 @@ ENTRY(threshold_interrupt)
 	apicinterrupt THRESHOLD_APIC_VECTOR,mce_threshold_interrupt
 	apicinterrupt THRESHOLD_APIC_VECTOR,mce_threshold_interrupt
 END(threshold_interrupt)
 END(threshold_interrupt)
 
 
-#ifdef CONFIG_SMP	
+#ifdef CONFIG_SMP
 ENTRY(reschedule_interrupt)
 ENTRY(reschedule_interrupt)
 	apicinterrupt RESCHEDULE_VECTOR,smp_reschedule_interrupt
 	apicinterrupt RESCHEDULE_VECTOR,smp_reschedule_interrupt
 END(reschedule_interrupt)
 END(reschedule_interrupt)
 
 
 	.macro INVALIDATE_ENTRY num
 	.macro INVALIDATE_ENTRY num
 ENTRY(invalidate_interrupt\num)
 ENTRY(invalidate_interrupt\num)
-	apicinterrupt INVALIDATE_TLB_VECTOR_START+\num,smp_invalidate_interrupt	
+	apicinterrupt INVALIDATE_TLB_VECTOR_START+\num,smp_invalidate_interrupt
 END(invalidate_interrupt\num)
 END(invalidate_interrupt\num)
 	.endm
 	.endm
 
 
@@ -869,22 +869,22 @@ END(error_interrupt)
 ENTRY(spurious_interrupt)
 ENTRY(spurious_interrupt)
 	apicinterrupt SPURIOUS_APIC_VECTOR,smp_spurious_interrupt
 	apicinterrupt SPURIOUS_APIC_VECTOR,smp_spurious_interrupt
 END(spurious_interrupt)
 END(spurious_interrupt)
-				
+
 /*
 /*
  * Exception entry points.
  * Exception entry points.
- */ 		
+ */
 	.macro zeroentry sym
 	.macro zeroentry sym
 	INTR_FRAME
 	INTR_FRAME
 	PARAVIRT_ADJUST_EXCEPTION_FRAME
 	PARAVIRT_ADJUST_EXCEPTION_FRAME
-	pushq $0	/* push error code/oldrax */ 
+	pushq $0	/* push error code/oldrax */
 	CFI_ADJUST_CFA_OFFSET 8
 	CFI_ADJUST_CFA_OFFSET 8
-	pushq %rax	/* push real oldrax to the rdi slot */ 
+	pushq %rax	/* push real oldrax to the rdi slot */
 	CFI_ADJUST_CFA_OFFSET 8
 	CFI_ADJUST_CFA_OFFSET 8
 	CFI_REL_OFFSET rax,0
 	CFI_REL_OFFSET rax,0
 	leaq  \sym(%rip),%rax
 	leaq  \sym(%rip),%rax
 	jmp error_entry
 	jmp error_entry
 	CFI_ENDPROC
 	CFI_ENDPROC
-	.endm	
+	.endm
 
 
 	.macro errorentry sym
 	.macro errorentry sym
 	XCPT_FRAME
 	XCPT_FRAME
@@ -998,13 +998,13 @@ paranoid_schedule\trace:
 
 
 /*
 /*
  * Exception entry point. This expects an error code/orig_rax on the stack
  * Exception entry point. This expects an error code/orig_rax on the stack
- * and the exception handler in %rax.	
- */ 		  				
+ * and the exception handler in %rax.
+ */
 KPROBE_ENTRY(error_entry)
 KPROBE_ENTRY(error_entry)
 	_frame RDI
 	_frame RDI
 	CFI_REL_OFFSET rax,0
 	CFI_REL_OFFSET rax,0
 	/* rdi slot contains rax, oldrax contains error code */
 	/* rdi slot contains rax, oldrax contains error code */
-	cld	
+	cld
 	subq  $14*8,%rsp
 	subq  $14*8,%rsp
 	CFI_ADJUST_CFA_OFFSET	(14*8)
 	CFI_ADJUST_CFA_OFFSET	(14*8)
 	movq %rsi,13*8(%rsp)
 	movq %rsi,13*8(%rsp)
@@ -1015,7 +1015,7 @@ KPROBE_ENTRY(error_entry)
 	CFI_REL_OFFSET	rdx,RDX
 	CFI_REL_OFFSET	rdx,RDX
 	movq %rcx,11*8(%rsp)
 	movq %rcx,11*8(%rsp)
 	CFI_REL_OFFSET	rcx,RCX
 	CFI_REL_OFFSET	rcx,RCX
-	movq %rsi,10*8(%rsp)	/* store rax */ 
+	movq %rsi,10*8(%rsp)	/* store rax */
 	CFI_REL_OFFSET	rax,RAX
 	CFI_REL_OFFSET	rax,RAX
 	movq %r8, 9*8(%rsp)
 	movq %r8, 9*8(%rsp)
 	CFI_REL_OFFSET	r8,R8
 	CFI_REL_OFFSET	r8,R8
@@ -1025,29 +1025,29 @@ KPROBE_ENTRY(error_entry)
 	CFI_REL_OFFSET	r10,R10
 	CFI_REL_OFFSET	r10,R10
 	movq %r11,6*8(%rsp)
 	movq %r11,6*8(%rsp)
 	CFI_REL_OFFSET	r11,R11
 	CFI_REL_OFFSET	r11,R11
-	movq %rbx,5*8(%rsp) 
+	movq %rbx,5*8(%rsp)
 	CFI_REL_OFFSET	rbx,RBX
 	CFI_REL_OFFSET	rbx,RBX
-	movq %rbp,4*8(%rsp) 
+	movq %rbp,4*8(%rsp)
 	CFI_REL_OFFSET	rbp,RBP
 	CFI_REL_OFFSET	rbp,RBP
-	movq %r12,3*8(%rsp) 
+	movq %r12,3*8(%rsp)
 	CFI_REL_OFFSET	r12,R12
 	CFI_REL_OFFSET	r12,R12
-	movq %r13,2*8(%rsp) 
+	movq %r13,2*8(%rsp)
 	CFI_REL_OFFSET	r13,R13
 	CFI_REL_OFFSET	r13,R13
-	movq %r14,1*8(%rsp) 
+	movq %r14,1*8(%rsp)
 	CFI_REL_OFFSET	r14,R14
 	CFI_REL_OFFSET	r14,R14
-	movq %r15,(%rsp) 
+	movq %r15,(%rsp)
 	CFI_REL_OFFSET	r15,R15
 	CFI_REL_OFFSET	r15,R15
-	xorl %ebx,%ebx	
+	xorl %ebx,%ebx
 	testl $3,CS(%rsp)
 	testl $3,CS(%rsp)
 	je  error_kernelspace
 	je  error_kernelspace
-error_swapgs:	
+error_swapgs:
 	SWAPGS
 	SWAPGS
 error_sti:
 error_sti:
 	TRACE_IRQS_OFF
 	TRACE_IRQS_OFF
-	movq %rdi,RDI(%rsp) 	
+	movq %rdi,RDI(%rsp)
 	CFI_REL_OFFSET	rdi,RDI
 	CFI_REL_OFFSET	rdi,RDI
 	movq %rsp,%rdi
 	movq %rsp,%rdi
-	movq ORIG_RAX(%rsp),%rsi	/* get error code */ 
+	movq ORIG_RAX(%rsp),%rsi	/* get error code */
 	movq $-1,ORIG_RAX(%rsp)
 	movq $-1,ORIG_RAX(%rsp)
 	call *%rax
 	call *%rax
 	/* ebx:	no swapgs flag (1: don't need swapgs, 0: need it) */
 	/* ebx:	no swapgs flag (1: don't need swapgs, 0: need it) */
@@ -1056,7 +1056,7 @@ error_exit:
 	RESTORE_REST
 	RESTORE_REST
 	DISABLE_INTERRUPTS(CLBR_NONE)
 	DISABLE_INTERRUPTS(CLBR_NONE)
 	TRACE_IRQS_OFF
 	TRACE_IRQS_OFF
-	GET_THREAD_INFO(%rcx)	
+	GET_THREAD_INFO(%rcx)
 	testl %eax,%eax
 	testl %eax,%eax
 	jne  retint_kernel
 	jne  retint_kernel
 	LOCKDEP_SYS_EXIT_IRQ
 	LOCKDEP_SYS_EXIT_IRQ
@@ -1072,7 +1072,7 @@ error_kernelspace:
        /* There are two places in the kernel that can potentially fault with
        /* There are two places in the kernel that can potentially fault with
           usergs. Handle them here. The exception handlers after
           usergs. Handle them here. The exception handlers after
 	   iret run with kernel gs again, so don't set the user space flag.
 	   iret run with kernel gs again, so don't set the user space flag.
-	   B stepping K8s sometimes report an truncated RIP for IRET 
+	   B stepping K8s sometimes report an truncated RIP for IRET
 	   exceptions returning to compat mode. Check for these here too. */
 	   exceptions returning to compat mode. Check for these here too. */
 	leaq irq_return(%rip),%rcx
 	leaq irq_return(%rip),%rcx
 	cmpq %rcx,RIP(%rsp)
 	cmpq %rcx,RIP(%rsp)
@@ -1084,17 +1084,17 @@ error_kernelspace:
         je   error_swapgs
         je   error_swapgs
 	jmp  error_sti
 	jmp  error_sti
 KPROBE_END(error_entry)
 KPROBE_END(error_entry)
-	
+
        /* Reload gs selector with exception handling */
        /* Reload gs selector with exception handling */
-       /* edi:  new selector */ 
+       /* edi:  new selector */
 ENTRY(native_load_gs_index)
 ENTRY(native_load_gs_index)
 	CFI_STARTPROC
 	CFI_STARTPROC
 	pushf
 	pushf
 	CFI_ADJUST_CFA_OFFSET 8
 	CFI_ADJUST_CFA_OFFSET 8
 	DISABLE_INTERRUPTS(CLBR_ANY | ~(CLBR_RDI))
 	DISABLE_INTERRUPTS(CLBR_ANY | ~(CLBR_RDI))
         SWAPGS
         SWAPGS
-gs_change:     
-        movl %edi,%gs   
+gs_change:
+        movl %edi,%gs
 2:	mfence		/* workaround */
 2:	mfence		/* workaround */
 	SWAPGS
 	SWAPGS
         popf
         popf
@@ -1102,20 +1102,20 @@ gs_change:
         ret
         ret
 	CFI_ENDPROC
 	CFI_ENDPROC
 ENDPROC(native_load_gs_index)
 ENDPROC(native_load_gs_index)
-       
+
         .section __ex_table,"a"
         .section __ex_table,"a"
         .align 8
         .align 8
         .quad gs_change,bad_gs
         .quad gs_change,bad_gs
         .previous
         .previous
         .section .fixup,"ax"
         .section .fixup,"ax"
 	/* running with kernelgs */
 	/* running with kernelgs */
-bad_gs: 
+bad_gs:
 	SWAPGS			/* switch back to user gs */
 	SWAPGS			/* switch back to user gs */
 	xorl %eax,%eax
 	xorl %eax,%eax
         movl %eax,%gs
         movl %eax,%gs
         jmp  2b
         jmp  2b
-        .previous       
-	
+        .previous
+
 /*
 /*
  * Create a kernel thread.
  * Create a kernel thread.
  *
  *
@@ -1138,7 +1138,7 @@ ENTRY(kernel_thread)
 
 
 	xorl %r8d,%r8d
 	xorl %r8d,%r8d
 	xorl %r9d,%r9d
 	xorl %r9d,%r9d
-	
+
 	# clone now
 	# clone now
 	call do_fork
 	call do_fork
 	movq %rax,RAX(%rsp)
 	movq %rax,RAX(%rsp)
@@ -1149,14 +1149,14 @@ ENTRY(kernel_thread)
 	 * so internally to the x86_64 port you can rely on kernel_thread()
 	 * so internally to the x86_64 port you can rely on kernel_thread()
 	 * not to reschedule the child before returning, this avoids the need
 	 * not to reschedule the child before returning, this avoids the need
 	 * of hacks for example to fork off the per-CPU idle tasks.
 	 * of hacks for example to fork off the per-CPU idle tasks.
-         * [Hopefully no generic code relies on the reschedule -AK]	
+         * [Hopefully no generic code relies on the reschedule -AK]
 	 */
 	 */
 	RESTORE_ALL
 	RESTORE_ALL
 	UNFAKE_STACK_FRAME
 	UNFAKE_STACK_FRAME
 	ret
 	ret
 	CFI_ENDPROC
 	CFI_ENDPROC
 ENDPROC(kernel_thread)
 ENDPROC(kernel_thread)
-	
+
 child_rip:
 child_rip:
 	pushq $0		# fake return address
 	pushq $0		# fake return address
 	CFI_STARTPROC
 	CFI_STARTPROC
@@ -1191,10 +1191,10 @@ ENDPROC(child_rip)
 ENTRY(kernel_execve)
 ENTRY(kernel_execve)
 	CFI_STARTPROC
 	CFI_STARTPROC
 	FAKE_STACK_FRAME $0
 	FAKE_STACK_FRAME $0
-	SAVE_ALL	
+	SAVE_ALL
 	movq %rsp,%rcx
 	movq %rsp,%rcx
 	call sys_execve
 	call sys_execve
-	movq %rax, RAX(%rsp)	
+	movq %rax, RAX(%rsp)
 	RESTORE_REST
 	RESTORE_REST
 	testq %rax,%rax
 	testq %rax,%rax
 	je int_ret_from_sys_call
 	je int_ret_from_sys_call
@@ -1213,7 +1213,7 @@ ENTRY(coprocessor_error)
 END(coprocessor_error)
 END(coprocessor_error)
 
 
 ENTRY(simd_coprocessor_error)
 ENTRY(simd_coprocessor_error)
-	zeroentry do_simd_coprocessor_error	
+	zeroentry do_simd_coprocessor_error
 END(simd_coprocessor_error)
 END(simd_coprocessor_error)
 
 
 ENTRY(device_not_available)
 ENTRY(device_not_available)
@@ -1225,12 +1225,12 @@ KPROBE_ENTRY(debug)
  	INTR_FRAME
  	INTR_FRAME
 	PARAVIRT_ADJUST_EXCEPTION_FRAME
 	PARAVIRT_ADJUST_EXCEPTION_FRAME
 	pushq $0
 	pushq $0
-	CFI_ADJUST_CFA_OFFSET 8		
+	CFI_ADJUST_CFA_OFFSET 8
 	paranoidentry do_debug, DEBUG_STACK
 	paranoidentry do_debug, DEBUG_STACK
 	paranoidexit
 	paranoidexit
 KPROBE_END(debug)
 KPROBE_END(debug)
 
 
-	/* runs on exception stack */	
+	/* runs on exception stack */
 KPROBE_ENTRY(nmi)
 KPROBE_ENTRY(nmi)
 	INTR_FRAME
 	INTR_FRAME
 	PARAVIRT_ADJUST_EXCEPTION_FRAME
 	PARAVIRT_ADJUST_EXCEPTION_FRAME
@@ -1264,7 +1264,7 @@ ENTRY(bounds)
 END(bounds)
 END(bounds)
 
 
 ENTRY(invalid_op)
 ENTRY(invalid_op)
-	zeroentry do_invalid_op	
+	zeroentry do_invalid_op
 END(invalid_op)
 END(invalid_op)
 
 
 ENTRY(coprocessor_segment_overrun)
 ENTRY(coprocessor_segment_overrun)
@@ -1319,7 +1319,7 @@ ENTRY(machine_check)
 	INTR_FRAME
 	INTR_FRAME
 	PARAVIRT_ADJUST_EXCEPTION_FRAME
 	PARAVIRT_ADJUST_EXCEPTION_FRAME
 	pushq $0
 	pushq $0
-	CFI_ADJUST_CFA_OFFSET 8	
+	CFI_ADJUST_CFA_OFFSET 8
 	paranoidentry do_machine_check
 	paranoidentry do_machine_check
 	jmp paranoid_exit1
 	jmp paranoid_exit1
 	CFI_ENDPROC
 	CFI_ENDPROC