浏览代码

[PARISC] Replace uses of __LP64__ with CONFIG_64BIT

2.6.12-rc4-pa3 s/__LP64__/CONFIG_64BIT/ and fixup config.h usage

Signed-off-by: Grant Grundler <grundler@parisc-linux.org>

Signed-off-by: Kyle McMartin <kyle@parisc-linux.org>
Grant Grundler 19 年之前
父节点
当前提交
413059f28e

+ 49 - 49
arch/parisc/kernel/entry.S

@@ -37,7 +37,7 @@
 #include <asm/unistd.h>
 #include <asm/unistd.h>
 #include <asm/thread_info.h>
 #include <asm/thread_info.h>
 
 
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
 #define CMPIB           cmpib,*
 #define CMPIB           cmpib,*
 #define CMPB            cmpb,*
 #define CMPB            cmpb,*
 #define COND(x)		*x
 #define COND(x)		*x
@@ -217,7 +217,7 @@
 	va  = r8	/* virtual address for which the trap occured */
 	va  = r8	/* virtual address for which the trap occured */
 	spc = r24	/* space for which the trap occured */
 	spc = r24	/* space for which the trap occured */
 
 
-#ifndef __LP64__
+#ifndef CONFIG_64BIT
 
 
 	/*
 	/*
 	 * itlb miss interruption handler (parisc 1.1 - 32 bit)
 	 * itlb miss interruption handler (parisc 1.1 - 32 bit)
@@ -239,7 +239,7 @@
 
 
 	.macro	itlb_20 code
 	.macro	itlb_20 code
 	mfctl	%pcsq, spc
 	mfctl	%pcsq, spc
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
 	b       itlb_miss_20w
 	b       itlb_miss_20w
 #else
 #else
 	b	itlb_miss_20
 	b	itlb_miss_20
@@ -249,7 +249,7 @@
 	.align		32
 	.align		32
 	.endm
 	.endm
 	
 	
-#ifndef __LP64__
+#ifndef CONFIG_64BIT
 	/*
 	/*
 	 * naitlb miss interruption handler (parisc 1.1 - 32 bit)
 	 * naitlb miss interruption handler (parisc 1.1 - 32 bit)
 	 *
 	 *
@@ -286,7 +286,7 @@
 	.macro	naitlb_20 code
 	.macro	naitlb_20 code
 
 
 	mfctl	%isr,spc
 	mfctl	%isr,spc
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
 	b       itlb_miss_20w
 	b       itlb_miss_20w
 #else
 #else
 	b	itlb_miss_20
 	b	itlb_miss_20
@@ -299,7 +299,7 @@
 	.align		32
 	.align		32
 	.endm
 	.endm
 	
 	
-#ifndef __LP64__
+#ifndef CONFIG_64BIT
 	/*
 	/*
 	 * dtlb miss interruption handler (parisc 1.1 - 32 bit)
 	 * dtlb miss interruption handler (parisc 1.1 - 32 bit)
 	 */
 	 */
@@ -321,7 +321,7 @@
 	.macro	dtlb_20 code
 	.macro	dtlb_20 code
 
 
 	mfctl	%isr, spc
 	mfctl	%isr, spc
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
 	b       dtlb_miss_20w
 	b       dtlb_miss_20w
 #else
 #else
 	b	dtlb_miss_20
 	b	dtlb_miss_20
@@ -331,7 +331,7 @@
 	.align		32
 	.align		32
 	.endm
 	.endm
 	
 	
-#ifndef __LP64__
+#ifndef CONFIG_64BIT
 	/* nadtlb miss interruption handler (parisc 1.1 - 32 bit) */
 	/* nadtlb miss interruption handler (parisc 1.1 - 32 bit) */
 
 
 	.macro	nadtlb_11 code
 	.macro	nadtlb_11 code
@@ -349,7 +349,7 @@
 	.macro	nadtlb_20 code
 	.macro	nadtlb_20 code
 
 
 	mfctl	%isr,spc
 	mfctl	%isr,spc
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
 	b       nadtlb_miss_20w
 	b       nadtlb_miss_20w
 #else
 #else
 	b       nadtlb_miss_20
 	b       nadtlb_miss_20
@@ -359,7 +359,7 @@
 	.align		32
 	.align		32
 	.endm
 	.endm
 	
 	
-#ifndef __LP64__
+#ifndef CONFIG_64BIT
 	/*
 	/*
 	 * dirty bit trap interruption handler (parisc 1.1 - 32 bit)
 	 * dirty bit trap interruption handler (parisc 1.1 - 32 bit)
 	 */
 	 */
@@ -381,7 +381,7 @@
 	.macro	dbit_20 code
 	.macro	dbit_20 code
 
 
 	mfctl	%isr,spc
 	mfctl	%isr,spc
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
 	b       dbit_trap_20w
 	b       dbit_trap_20w
 #else
 #else
 	b	dbit_trap_20
 	b	dbit_trap_20
@@ -394,7 +394,7 @@
 	/* The following are simple 32 vs 64 bit instruction
 	/* The following are simple 32 vs 64 bit instruction
 	 * abstractions for the macros */
 	 * abstractions for the macros */
 	.macro		EXTR	reg1,start,length,reg2
 	.macro		EXTR	reg1,start,length,reg2
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
 	extrd,u		\reg1,32+\start,\length,\reg2
 	extrd,u		\reg1,32+\start,\length,\reg2
 #else
 #else
 	extrw,u		\reg1,\start,\length,\reg2
 	extrw,u		\reg1,\start,\length,\reg2
@@ -402,7 +402,7 @@
 	.endm
 	.endm
 
 
 	.macro		DEP	reg1,start,length,reg2
 	.macro		DEP	reg1,start,length,reg2
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
 	depd		\reg1,32+\start,\length,\reg2
 	depd		\reg1,32+\start,\length,\reg2
 #else
 #else
 	depw		\reg1,\start,\length,\reg2
 	depw		\reg1,\start,\length,\reg2
@@ -410,7 +410,7 @@
 	.endm
 	.endm
 
 
 	.macro		DEPI	val,start,length,reg
 	.macro		DEPI	val,start,length,reg
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
 	depdi		\val,32+\start,\length,\reg
 	depdi		\val,32+\start,\length,\reg
 #else
 #else
 	depwi		\val,\start,\length,\reg
 	depwi		\val,\start,\length,\reg
@@ -421,7 +421,7 @@
 	 * fault.  We have to extract this and place it in the va,
 	 * fault.  We have to extract this and place it in the va,
 	 * zeroing the corresponding bits in the space register */
 	 * zeroing the corresponding bits in the space register */
 	.macro		space_adjust	spc,va,tmp
 	.macro		space_adjust	spc,va,tmp
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
 	extrd,u		\spc,63,SPACEID_SHIFT,\tmp
 	extrd,u		\spc,63,SPACEID_SHIFT,\tmp
 	depd		%r0,63,SPACEID_SHIFT,\spc
 	depd		%r0,63,SPACEID_SHIFT,\spc
 	depd		\tmp,31,SPACEID_SHIFT,\va
 	depd		\tmp,31,SPACEID_SHIFT,\va
@@ -479,7 +479,7 @@
 	bb,>=,n		\pmd,_PxD_PRESENT_BIT,\fault
 	bb,>=,n		\pmd,_PxD_PRESENT_BIT,\fault
 	DEP		%r0,31,PxD_FLAG_SHIFT,\pmd /* clear flags */
 	DEP		%r0,31,PxD_FLAG_SHIFT,\pmd /* clear flags */
 	copy		\pmd,%r9
 	copy		\pmd,%r9
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
 	shld		%r9,PxD_VALUE_SHIFT,\pmd
 	shld		%r9,PxD_VALUE_SHIFT,\pmd
 #else
 #else
 	shlw		%r9,PxD_VALUE_SHIFT,\pmd
 	shlw		%r9,PxD_VALUE_SHIFT,\pmd
@@ -610,7 +610,7 @@
 	.macro		do_alias	spc,tmp,tmp1,va,pte,prot,fault
 	.macro		do_alias	spc,tmp,tmp1,va,pte,prot,fault
 	cmpib,COND(<>),n 0,\spc,\fault
 	cmpib,COND(<>),n 0,\spc,\fault
 	ldil		L%(TMPALIAS_MAP_START),\tmp
 	ldil		L%(TMPALIAS_MAP_START),\tmp
-#if defined(__LP64__) && (TMPALIAS_MAP_START >= 0x80000000)
+#if defined(CONFIG_64BIT) && (TMPALIAS_MAP_START >= 0x80000000)
 	/* on LP64, ldi will sign extend into the upper 32 bits,
 	/* on LP64, ldi will sign extend into the upper 32 bits,
 	 * which is behaviour we don't want */
 	 * which is behaviour we don't want */
 	depdi		0,31,32,\tmp
 	depdi		0,31,32,\tmp
@@ -624,7 +624,7 @@
 	 * OK, it is in the temp alias region, check whether "from" or "to".
 	 * OK, it is in the temp alias region, check whether "from" or "to".
 	 * Check "subtle" note in pacache.S re: r23/r26.
 	 * Check "subtle" note in pacache.S re: r23/r26.
 	 */
 	 */
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
 	extrd,u,*=	\va,41,1,%r0
 	extrd,u,*=	\va,41,1,%r0
 #else
 #else
 	extrw,u,=	\va,9,1,%r0
 	extrw,u,=	\va,9,1,%r0
@@ -691,7 +691,7 @@ fault_vector_20:
 	def		30
 	def		30
 	def		31
 	def		31
 
 
-#ifndef __LP64__
+#ifndef CONFIG_64BIT
 
 
 	.export fault_vector_11
 	.export fault_vector_11
 	
 	
@@ -764,7 +764,7 @@ __kernel_thread:
 
 
 	copy	%r30, %r1
 	copy	%r30, %r1
 	ldo	PT_SZ_ALGN(%r30),%r30
 	ldo	PT_SZ_ALGN(%r30),%r30
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
 	/* Yo, function pointers in wide mode are little structs... -PB */
 	/* Yo, function pointers in wide mode are little structs... -PB */
 	ldd	24(%r26), %r2
 	ldd	24(%r26), %r2
 	STREG	%r2, PT_GR27(%r1)	/* Store childs %dp */
 	STREG	%r2, PT_GR27(%r1)	/* Store childs %dp */
@@ -780,7 +780,7 @@ __kernel_thread:
 	or	%r26, %r24, %r26      /* will have kernel mappings.	 */
 	or	%r26, %r24, %r26      /* will have kernel mappings.	 */
 	ldi	1, %r25			/* stack_start, signals kernel thread */
 	ldi	1, %r25			/* stack_start, signals kernel thread */
 	stw	%r0, -52(%r30)	     	/* user_tid */
 	stw	%r0, -52(%r30)	     	/* user_tid */
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
 	ldo	-16(%r30),%r29		/* Reference param save area */
 	ldo	-16(%r30),%r29		/* Reference param save area */
 #endif
 #endif
 	BL	do_fork, %r2
 	BL	do_fork, %r2
@@ -809,7 +809,7 @@ ret_from_kernel_thread:
 
 
 	LDREG	TI_TASK-THREAD_SZ_ALGN(%r30), %r1
 	LDREG	TI_TASK-THREAD_SZ_ALGN(%r30), %r1
 	LDREG	TASK_PT_GR25(%r1), %r26
 	LDREG	TASK_PT_GR25(%r1), %r26
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
 	LDREG	TASK_PT_GR27(%r1), %r27
 	LDREG	TASK_PT_GR27(%r1), %r27
 	LDREG	TASK_PT_GR22(%r1), %r22
 	LDREG	TASK_PT_GR22(%r1), %r22
 #endif
 #endif
@@ -817,7 +817,7 @@ ret_from_kernel_thread:
 	ble	0(%sr7, %r1)
 	ble	0(%sr7, %r1)
 	copy	%r31, %r2
 	copy	%r31, %r2
 
 
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
 	ldo	-16(%r30),%r29		/* Reference param save area */
 	ldo	-16(%r30),%r29		/* Reference param save area */
 	loadgp				/* Thread could have been in a module */
 	loadgp				/* Thread could have been in a module */
 #endif
 #endif
@@ -838,7 +838,7 @@ __execve:
 	STREG	%r26, PT_GR26(%r16)
 	STREG	%r26, PT_GR26(%r16)
 	STREG	%r25, PT_GR25(%r16)
 	STREG	%r25, PT_GR25(%r16)
 	STREG	%r24, PT_GR24(%r16)
 	STREG	%r24, PT_GR24(%r16)
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
 	ldo	-16(%r30),%r29		/* Reference param save area */
 	ldo	-16(%r30),%r29		/* Reference param save area */
 #endif
 #endif
 	BL	sys_execve, %r2
 	BL	sys_execve, %r2
@@ -916,7 +916,7 @@ syscall_exit_rfi:
 	STREG	%r19,PT_IAOQ1(%r16)
 	STREG	%r19,PT_IAOQ1(%r16)
 	LDREG   PT_PSW(%r16),%r19
 	LDREG   PT_PSW(%r16),%r19
 	load32	USER_PSW_MASK,%r1
 	load32	USER_PSW_MASK,%r1
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
 	load32	USER_PSW_HI_MASK,%r20
 	load32	USER_PSW_HI_MASK,%r20
 	depd    %r20,31,32,%r1
 	depd    %r20,31,32,%r1
 #endif
 #endif
@@ -960,7 +960,7 @@ intr_return:
 	/* shift left ____cacheline_aligned (aka L1_CACHE_BYTES) amount
 	/* shift left ____cacheline_aligned (aka L1_CACHE_BYTES) amount
 	** irq_stat[] is defined using ____cacheline_aligned.
 	** irq_stat[] is defined using ____cacheline_aligned.
 	*/
 	*/
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
 	shld	%r1, 6, %r20
 	shld	%r1, 6, %r20
 #else
 #else
 	shlw	%r1, 5, %r20
 	shlw	%r1, 5, %r20
@@ -1018,7 +1018,7 @@ intr_restore:
 	.import do_softirq,code
 	.import do_softirq,code
 intr_do_softirq:
 intr_do_softirq:
 	BL      do_softirq,%r2
 	BL      do_softirq,%r2
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
 	ldo	-16(%r30),%r29		/* Reference param save area */
 	ldo	-16(%r30),%r29		/* Reference param save area */
 #else
 #else
 	nop
 	nop
@@ -1036,7 +1036,7 @@ intr_do_resched:
 	CMPIB= 0,%r20,intr_restore /* backward */
 	CMPIB= 0,%r20,intr_restore /* backward */
 	nop
 	nop
 
 
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
 	ldo	-16(%r30),%r29		/* Reference param save area */
 	ldo	-16(%r30),%r29		/* Reference param save area */
 #endif
 #endif
 
 
@@ -1069,7 +1069,7 @@ intr_do_signal:
 
 
 	copy	%r0, %r24			/* unsigned long in_syscall */
 	copy	%r0, %r24			/* unsigned long in_syscall */
 	copy	%r16, %r25			/* struct pt_regs *regs */
 	copy	%r16, %r25			/* struct pt_regs *regs */
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
 	ldo	-16(%r30),%r29			/* Reference param save area */
 	ldo	-16(%r30),%r29			/* Reference param save area */
 #endif
 #endif
 
 
@@ -1093,7 +1093,7 @@ intr_extint:
 	mfctl	%cr31,%r1
 	mfctl	%cr31,%r1
 	copy	%r30,%r17
 	copy	%r30,%r17
 	/* FIXME! depi below has hardcoded idea of interrupt stack size (32k)*/
 	/* FIXME! depi below has hardcoded idea of interrupt stack size (32k)*/
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
 	depdi	0,63,15,%r17
 	depdi	0,63,15,%r17
 #else
 #else
 	depi	0,31,15,%r17
 	depi	0,31,15,%r17
@@ -1120,7 +1120,7 @@ intr_extint:
 
 
 	ldil	L%intr_return, %r2
 	ldil	L%intr_return, %r2
 
 
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
 	ldo	-16(%r30),%r29	/* Reference param save area */
 	ldo	-16(%r30),%r29	/* Reference param save area */
 #endif
 #endif
 
 
@@ -1164,7 +1164,7 @@ intr_save:
 	mfctl           %cr21, %r17 /* ior */
 	mfctl           %cr21, %r17 /* ior */
 
 
 
 
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
 	/*
 	/*
 	 * If the interrupted code was running with W bit off (32 bit),
 	 * If the interrupted code was running with W bit off (32 bit),
 	 * clear the b bits (bits 0 & 1) in the ior.
 	 * clear the b bits (bits 0 & 1) in the ior.
@@ -1199,7 +1199,7 @@ skip_save_ior:
 	loadgp
 	loadgp
 
 
 	copy		%r29, %r25	/* arg1 is pt_regs */
 	copy		%r29, %r25	/* arg1 is pt_regs */
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
 	ldo		-16(%r30),%r29	/* Reference param save area */
 	ldo		-16(%r30),%r29	/* Reference param save area */
 #endif
 #endif
 
 
@@ -1237,7 +1237,7 @@ skip_save_ior:
 	spc  = r24	/* space for which the trap occured */
 	spc  = r24	/* space for which the trap occured */
 	ptp = r25	/* page directory/page table pointer */
 	ptp = r25	/* page directory/page table pointer */
 
 
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
 
 
 dtlb_miss_20w:
 dtlb_miss_20w:
 	space_adjust	spc,va,t0
 	space_adjust	spc,va,t0
@@ -1528,7 +1528,7 @@ nadtlb_probe_check:
 	nop
 	nop
 
 
 
 
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
 itlb_miss_20w:
 itlb_miss_20w:
 
 
 	/*
 	/*
@@ -1595,7 +1595,7 @@ itlb_miss_20:
 
 
 #endif
 #endif
 
 
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
 
 
 dbit_trap_20w:
 dbit_trap_20w:
 	space_adjust	spc,va,t0
 	space_adjust	spc,va,t0
@@ -1804,7 +1804,7 @@ sys_fork_wrapper:
 
 
 	STREG	%r2,-RP_OFFSET(%r30)
 	STREG	%r2,-RP_OFFSET(%r30)
 	ldo	FRAME_SIZE(%r30),%r30
 	ldo	FRAME_SIZE(%r30),%r30
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
 	ldo	-16(%r30),%r29		/* Reference param save area */
 	ldo	-16(%r30),%r29		/* Reference param save area */
 #endif
 #endif
 
 
@@ -1854,7 +1854,7 @@ sys_clone_wrapper:
 
 
 	STREG	%r2,-RP_OFFSET(%r30)
 	STREG	%r2,-RP_OFFSET(%r30)
 	ldo	FRAME_SIZE(%r30),%r30
 	ldo	FRAME_SIZE(%r30),%r30
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
 	ldo	-16(%r30),%r29		/* Reference param save area */
 	ldo	-16(%r30),%r29		/* Reference param save area */
 #endif
 #endif
 
 
@@ -1876,7 +1876,7 @@ sys_vfork_wrapper:
 
 
 	STREG	%r2,-RP_OFFSET(%r30)
 	STREG	%r2,-RP_OFFSET(%r30)
 	ldo	FRAME_SIZE(%r30),%r30
 	ldo	FRAME_SIZE(%r30),%r30
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
 	ldo	-16(%r30),%r29		/* Reference param save area */
 	ldo	-16(%r30),%r29		/* Reference param save area */
 #endif
 #endif
 
 
@@ -1904,7 +1904,7 @@ sys_vfork_wrapper:
 
 
 	STREG %r2,-RP_OFFSET(%r30)
 	STREG %r2,-RP_OFFSET(%r30)
 	ldo FRAME_SIZE(%r30),%r30
 	ldo FRAME_SIZE(%r30),%r30
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
 	ldo	-16(%r30),%r29		/* Reference param save area */
 	ldo	-16(%r30),%r29		/* Reference param save area */
 #endif
 #endif
 	BL \execve,%r2
 	BL \execve,%r2
@@ -1930,7 +1930,7 @@ error_\execve:
 sys_execve_wrapper:
 sys_execve_wrapper:
 	execve_wrapper sys_execve
 	execve_wrapper sys_execve
 
 
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
 	.export sys32_execve_wrapper
 	.export sys32_execve_wrapper
 	.import sys32_execve
 	.import sys32_execve
 
 
@@ -1944,7 +1944,7 @@ sys_rt_sigreturn_wrapper:
 	ldo	TASK_REGS(%r26),%r26	/* get pt regs */
 	ldo	TASK_REGS(%r26),%r26	/* get pt regs */
 	/* Don't save regs, we are going to restore them from sigcontext. */
 	/* Don't save regs, we are going to restore them from sigcontext. */
 	STREG	%r2, -RP_OFFSET(%r30)
 	STREG	%r2, -RP_OFFSET(%r30)
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
 	ldo	FRAME_SIZE(%r30), %r30
 	ldo	FRAME_SIZE(%r30), %r30
 	BL	sys_rt_sigreturn,%r2
 	BL	sys_rt_sigreturn,%r2
 	ldo	-16(%r30),%r29		/* Reference param save area */
 	ldo	-16(%r30),%r29		/* Reference param save area */
@@ -1975,7 +1975,7 @@ sys_sigaltstack_wrapper:
 	ldo	TASK_REGS(%r1),%r24	/* get pt regs */
 	ldo	TASK_REGS(%r1),%r24	/* get pt regs */
 	LDREG	TASK_PT_GR30(%r24),%r24
 	LDREG	TASK_PT_GR30(%r24),%r24
 	STREG	%r2, -RP_OFFSET(%r30)
 	STREG	%r2, -RP_OFFSET(%r30)
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
 	ldo	FRAME_SIZE(%r30), %r30
 	ldo	FRAME_SIZE(%r30), %r30
 	b,l	do_sigaltstack,%r2
 	b,l	do_sigaltstack,%r2
 	ldo	-16(%r30),%r29		/* Reference param save area */
 	ldo	-16(%r30),%r29		/* Reference param save area */
@@ -1989,7 +1989,7 @@ sys_sigaltstack_wrapper:
 	bv	%r0(%r2)
 	bv	%r0(%r2)
 	nop
 	nop
 
 
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
 	.export sys32_sigaltstack_wrapper
 	.export sys32_sigaltstack_wrapper
 sys32_sigaltstack_wrapper:
 sys32_sigaltstack_wrapper:
 	/* Get the user stack pointer */
 	/* Get the user stack pointer */
@@ -2013,7 +2013,7 @@ sys_rt_sigsuspend_wrapper:
 	reg_save %r24
 	reg_save %r24
 
 
 	STREG	%r2, -RP_OFFSET(%r30)
 	STREG	%r2, -RP_OFFSET(%r30)
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
 	ldo	FRAME_SIZE(%r30), %r30
 	ldo	FRAME_SIZE(%r30), %r30
 	b,l	sys_rt_sigsuspend,%r2
 	b,l	sys_rt_sigsuspend,%r2
 	ldo	-16(%r30),%r29		/* Reference param save area */
 	ldo	-16(%r30),%r29		/* Reference param save area */
@@ -2086,7 +2086,7 @@ syscall_check_bh:
 	ldw     TI_CPU-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r26 /* cpu # */
 	ldw     TI_CPU-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r26 /* cpu # */
 
 
 	/* shift left ____cacheline_aligned (aka L1_CACHE_BYTES) bits */
 	/* shift left ____cacheline_aligned (aka L1_CACHE_BYTES) bits */
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
 	shld	%r26, 6, %r20
 	shld	%r26, 6, %r20
 #else
 #else
 	shlw	%r26, 5, %r20
 	shlw	%r26, 5, %r20
@@ -2151,7 +2151,7 @@ syscall_restore:
 
 
 	depi	3,31,2,%r31			   /* ensure return to user mode. */
 	depi	3,31,2,%r31			   /* ensure return to user mode. */
 
 
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
 	/* decide whether to reset the wide mode bit
 	/* decide whether to reset the wide mode bit
 	 *
 	 *
 	 * For a syscall, the W bit is stored in the lowest bit
 	 * For a syscall, the W bit is stored in the lowest bit
@@ -2247,7 +2247,7 @@ syscall_do_softirq:
 	.import schedule,code
 	.import schedule,code
 syscall_do_resched:
 syscall_do_resched:
 	BL	schedule,%r2
 	BL	schedule,%r2
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
 	ldo	-16(%r30),%r29		/* Reference param save area */
 	ldo	-16(%r30),%r29		/* Reference param save area */
 #else
 #else
 	nop
 	nop
@@ -2267,7 +2267,7 @@ syscall_do_signal:
 
 
 	ldi	1, %r24				/* unsigned long in_syscall */
 	ldi	1, %r24				/* unsigned long in_syscall */
 
 
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
 	ldo	-16(%r30),%r29			/* Reference param save area */
 	ldo	-16(%r30),%r29			/* Reference param save area */
 #endif
 #endif
 	BL	do_signal,%r2
 	BL	do_signal,%r2

+ 13 - 13
arch/parisc/kernel/head.S

@@ -12,7 +12,7 @@
  * Initial Version 04-23-1999 by Helge Deller <deller@gmx.de>
  * Initial Version 04-23-1999 by Helge Deller <deller@gmx.de>
  */
  */
 
 
-#include <linux/autoconf.h>	/* for CONFIG_SMP */
+#include <linux/config.h>	/* for CONFIG_SMP */
 
 
 #include <asm/asm-offsets.h>
 #include <asm/asm-offsets.h>
 #include <asm/psw.h>
 #include <asm/psw.h>
@@ -36,10 +36,10 @@ boot_args:
 	.align	4
 	.align	4
 	.import init_thread_union,data
 	.import init_thread_union,data
 	.import fault_vector_20,code    /* IVA parisc 2.0 32 bit */
 	.import fault_vector_20,code    /* IVA parisc 2.0 32 bit */
-#ifndef __LP64__
+#ifndef CONFIG_64BIT
         .import fault_vector_11,code    /* IVA parisc 1.1 32 bit */
         .import fault_vector_11,code    /* IVA parisc 1.1 32 bit */
 	.import	$global$		/* forward declaration */
 	.import	$global$		/* forward declaration */
-#endif /*!LP64*/
+#endif /*!CONFIG_64BIT*/
 	.export stext
 	.export stext
 	.export _stext,data		/* Kernel want it this way! */
 	.export _stext,data		/* Kernel want it this way! */
 _stext:
 _stext:
@@ -76,7 +76,7 @@ $bss_loop:
 	mtctl		%r4,%cr24	/* Initialize kernel root pointer */
 	mtctl		%r4,%cr24	/* Initialize kernel root pointer */
 	mtctl		%r4,%cr25	/* Initialize user root pointer */
 	mtctl		%r4,%cr25	/* Initialize user root pointer */
 
 
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
 	/* Set pmd in pgd */
 	/* Set pmd in pgd */
 	load32		PA(pmd0),%r5
 	load32		PA(pmd0),%r5
 	shrd            %r5,PxD_VALUE_SHIFT,%r3	
 	shrd            %r5,PxD_VALUE_SHIFT,%r3	
@@ -99,7 +99,7 @@ $bss_loop:
 	stw		%r3,0(%r4)
 	stw		%r3,0(%r4)
 	ldo		(ASM_PAGE_SIZE >> PxD_VALUE_SHIFT)(%r3),%r3
 	ldo		(ASM_PAGE_SIZE >> PxD_VALUE_SHIFT)(%r3),%r3
 	addib,>		-1,%r1,1b
 	addib,>		-1,%r1,1b
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
 	ldo             ASM_PMD_ENTRY_SIZE(%r4),%r4
 	ldo             ASM_PMD_ENTRY_SIZE(%r4),%r4
 #else
 #else
 	ldo             ASM_PGD_ENTRY_SIZE(%r4),%r4
 	ldo             ASM_PGD_ENTRY_SIZE(%r4),%r4
@@ -170,7 +170,7 @@ common_stext:
 	stw		%r0,0x28(%r0)	/* MEM_RENDEZ_HI */
 	stw		%r0,0x28(%r0)	/* MEM_RENDEZ_HI */
 #endif /*CONFIG_SMP*/
 #endif /*CONFIG_SMP*/
 
 
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
 	tophys_r1	%sp
 	tophys_r1	%sp
 
 
 	/* Save the rfi target address */
 	/* Save the rfi target address */
@@ -233,7 +233,7 @@ stext_pdc_ret:
 	 * following short sequence of instructions can determine this
 	 * following short sequence of instructions can determine this
 	 * (without being illegal on a PA1.1 machine).
 	 * (without being illegal on a PA1.1 machine).
 	 */
 	 */
-#ifndef __LP64__
+#ifndef CONFIG_64BIT
 	ldi		32,%r10
 	ldi		32,%r10
 	mtctl		%r10,%cr11
 	mtctl		%r10,%cr11
 	.level 2.0
 	.level 2.0
@@ -246,7 +246,7 @@ stext_pdc_ret:
 
 
 $is_pa20:
 $is_pa20:
 	.level		LEVEL /* restore 1.1 || 2.0w */
 	.level		LEVEL /* restore 1.1 || 2.0w */
-#endif /*!LP64*/
+#endif /*!CONFIG_64BIT*/
 	load32		PA(fault_vector_20),%r10
 	load32		PA(fault_vector_20),%r10
 
 
 $install_iva:
 $install_iva:
@@ -284,7 +284,7 @@ aligned_rfi:
 	.import smp_init_current_idle_task,data
 	.import smp_init_current_idle_task,data
 	.import	smp_callin,code
 	.import	smp_callin,code
 
 
-#ifndef __LP64__
+#ifndef CONFIG_64BIT
 smp_callin_rtn:
 smp_callin_rtn:
         .proc
         .proc
 	.callinfo
 	.callinfo
@@ -292,7 +292,7 @@ smp_callin_rtn:
 	nop
 	nop
 	nop
 	nop
         .procend
         .procend
-#endif /*!LP64*/
+#endif /*!CONFIG_64BIT*/
 
 
 /***************************************************************************
 /***************************************************************************
 * smp_slave_stext is executed by all non-monarch Processors when the Monarch
 * smp_slave_stext is executed by all non-monarch Processors when the Monarch
@@ -327,7 +327,7 @@ smp_slave_stext:
 	mtctl		%r4,%cr24	/* Initialize kernel root pointer */
 	mtctl		%r4,%cr24	/* Initialize kernel root pointer */
 	mtctl		%r4,%cr25	/* Initialize user root pointer */
 	mtctl		%r4,%cr25	/* Initialize user root pointer */
 
 
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
 	/* Setup PDCE_PROC entry */
 	/* Setup PDCE_PROC entry */
 	copy            %arg0,%r3
 	copy            %arg0,%r3
 #else
 #else
@@ -344,7 +344,7 @@ smp_slave_stext:
 
 
 	.procend
 	.procend
 #endif /* CONFIG_SMP */
 #endif /* CONFIG_SMP */
-#ifndef __LP64__
+#ifndef CONFIG_64BIT
 	.data
 	.data
 
 
 	.align	4
 	.align	4
@@ -354,4 +354,4 @@ smp_slave_stext:
 	.size	$global$,4
 	.size	$global$,4
 $global$:	
 $global$:	
 	.word 0
 	.word 0
-#endif /*!LP64*/
+#endif /*!CONFIG_64BIT*/

+ 16 - 14
arch/parisc/kernel/pacache.S

@@ -26,7 +26,7 @@
  *       can be used.
  *       can be used.
  */
  */
 
 
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
 #define ADDIB	addib,*
 #define ADDIB	addib,*
 #define CMPB	cmpb,*
 #define CMPB	cmpb,*
 #define ANDCM	andcm,*
 #define ANDCM	andcm,*
@@ -40,6 +40,8 @@
 	.level	2.0
 	.level	2.0
 #endif
 #endif
 
 
+#include <linux/config.h>
+
 #include <asm/psw.h>
 #include <asm/psw.h>
 #include <asm/assembly.h>
 #include <asm/assembly.h>
 #include <asm/pgtable.h>
 #include <asm/pgtable.h>
@@ -294,7 +296,7 @@ copy_user_page_asm:
 	.callinfo NO_CALLS
 	.callinfo NO_CALLS
 	.entry
 	.entry
 
 
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
 	/* PA8x00 CPUs can consume 2 loads or 1 store per cycle.
 	/* PA8x00 CPUs can consume 2 loads or 1 store per cycle.
 	 * Unroll the loop by hand and arrange insn appropriately.
 	 * Unroll the loop by hand and arrange insn appropriately.
 	 * GCC probably can do this just as well.
 	 * GCC probably can do this just as well.
@@ -454,7 +456,7 @@ copy_user_page_asm:
 	sub		%r25, %r1, %r23		/* move physical addr into non shadowed reg */
 	sub		%r25, %r1, %r23		/* move physical addr into non shadowed reg */
 
 
 	ldil		L%(TMPALIAS_MAP_START), %r28
 	ldil		L%(TMPALIAS_MAP_START), %r28
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
 	extrd,u		%r26,56,32, %r26		/* convert phys addr to tlb insert format */
 	extrd,u		%r26,56,32, %r26		/* convert phys addr to tlb insert format */
 	extrd,u		%r23,56,32, %r23		/* convert phys addr to tlb insert format */
 	extrd,u		%r23,56,32, %r23		/* convert phys addr to tlb insert format */
 	depd		%r24,63,22, %r28		/* Form aliased virtual address 'to' */
 	depd		%r24,63,22, %r28		/* Form aliased virtual address 'to' */
@@ -541,7 +543,7 @@ __clear_user_page_asm:
 	tophys_r1	%r26
 	tophys_r1	%r26
 
 
 	ldil		L%(TMPALIAS_MAP_START), %r28
 	ldil		L%(TMPALIAS_MAP_START), %r28
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
 #if (TMPALIAS_MAP_START >= 0x80000000)
 #if (TMPALIAS_MAP_START >= 0x80000000)
 	depdi		0, 31,32, %r28		/* clear any sign extension */
 	depdi		0, 31,32, %r28		/* clear any sign extension */
 #endif
 #endif
@@ -558,7 +560,7 @@ __clear_user_page_asm:
 
 
 	pdtlb		0(%r28)
 	pdtlb		0(%r28)
 
 
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
 	ldi		32, %r1			/* PAGE_SIZE/128 == 32 */
 	ldi		32, %r1			/* PAGE_SIZE/128 == 32 */
 
 
 	/* PREFETCH (Write) has not (yet) been proven to help here */
 	/* PREFETCH (Write) has not (yet) been proven to help here */
@@ -583,7 +585,7 @@ __clear_user_page_asm:
 	ADDIB>		-1, %r1, 1b
 	ADDIB>		-1, %r1, 1b
 	ldo		128(%r28), %r28
 	ldo		128(%r28), %r28
 
 
-#else	/* ! __LP64 */
+#else	/* ! CONFIG_64BIT */
 
 
 	ldi		64, %r1			/* PAGE_SIZE/64 == 64 */
 	ldi		64, %r1			/* PAGE_SIZE/64 == 64 */
 
 
@@ -606,7 +608,7 @@ __clear_user_page_asm:
 	stw		%r0, 60(%r28)
 	stw		%r0, 60(%r28)
 	ADDIB>		-1, %r1, 1b
 	ADDIB>		-1, %r1, 1b
 	ldo		64(%r28), %r28
 	ldo		64(%r28), %r28
-#endif	/* __LP64 */
+#endif	/* CONFIG_64BIT */
 
 
 	bv		%r0(%r2)
 	bv		%r0(%r2)
 	nop
 	nop
@@ -624,7 +626,7 @@ flush_kernel_dcache_page:
 	ldil		L%dcache_stride, %r1
 	ldil		L%dcache_stride, %r1
 	ldw		R%dcache_stride(%r1), %r23
 	ldw		R%dcache_stride(%r1), %r23
 
 
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
 	depdi,z		1, 63-PAGE_SHIFT,1, %r25
 	depdi,z		1, 63-PAGE_SHIFT,1, %r25
 #else
 #else
 	depwi,z		1, 31-PAGE_SHIFT,1, %r25
 	depwi,z		1, 31-PAGE_SHIFT,1, %r25
@@ -668,7 +670,7 @@ flush_user_dcache_page:
 	ldil		L%dcache_stride, %r1
 	ldil		L%dcache_stride, %r1
 	ldw		R%dcache_stride(%r1), %r23
 	ldw		R%dcache_stride(%r1), %r23
 
 
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
 	depdi,z		1,63-PAGE_SHIFT,1, %r25
 	depdi,z		1,63-PAGE_SHIFT,1, %r25
 #else
 #else
 	depwi,z		1,31-PAGE_SHIFT,1, %r25
 	depwi,z		1,31-PAGE_SHIFT,1, %r25
@@ -712,7 +714,7 @@ flush_user_icache_page:
 	ldil		L%dcache_stride, %r1
 	ldil		L%dcache_stride, %r1
 	ldw		R%dcache_stride(%r1), %r23
 	ldw		R%dcache_stride(%r1), %r23
 
 
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
 	depdi,z		1, 63-PAGE_SHIFT,1, %r25
 	depdi,z		1, 63-PAGE_SHIFT,1, %r25
 #else
 #else
 	depwi,z		1, 31-PAGE_SHIFT,1, %r25
 	depwi,z		1, 31-PAGE_SHIFT,1, %r25
@@ -757,7 +759,7 @@ purge_kernel_dcache_page:
 	ldil		L%dcache_stride, %r1
 	ldil		L%dcache_stride, %r1
 	ldw		R%dcache_stride(%r1), %r23
 	ldw		R%dcache_stride(%r1), %r23
 
 
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
 	depdi,z		1, 63-PAGE_SHIFT,1, %r25
 	depdi,z		1, 63-PAGE_SHIFT,1, %r25
 #else
 #else
 	depwi,z		1, 31-PAGE_SHIFT,1, %r25
 	depwi,z		1, 31-PAGE_SHIFT,1, %r25
@@ -805,7 +807,7 @@ flush_alias_page:
 	tophys_r1		%r26
 	tophys_r1		%r26
 
 
 	ldil		L%(TMPALIAS_MAP_START), %r28
 	ldil		L%(TMPALIAS_MAP_START), %r28
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
 	extrd,u		%r26, 56,32, %r26	/* convert phys addr to tlb insert format */
 	extrd,u		%r26, 56,32, %r26	/* convert phys addr to tlb insert format */
 	depd		%r25, 63,22, %r28	/* Form aliased virtual address 'to' */
 	depd		%r25, 63,22, %r28	/* Form aliased virtual address 'to' */
 	depdi		0, 63,12, %r28		/* Clear any offset bits */
 	depdi		0, 63,12, %r28		/* Clear any offset bits */
@@ -822,7 +824,7 @@ flush_alias_page:
 	ldil		L%dcache_stride, %r1
 	ldil		L%dcache_stride, %r1
 	ldw		R%dcache_stride(%r1), %r23
 	ldw		R%dcache_stride(%r1), %r23
 
 
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
 	depdi,z		1, 63-PAGE_SHIFT,1, %r29
 	depdi,z		1, 63-PAGE_SHIFT,1, %r29
 #else
 #else
 	depwi,z		1, 31-PAGE_SHIFT,1, %r29
 	depwi,z		1, 31-PAGE_SHIFT,1, %r29
@@ -933,7 +935,7 @@ flush_kernel_icache_page:
 	ldil		L%icache_stride, %r1
 	ldil		L%icache_stride, %r1
 	ldw		R%icache_stride(%r1), %r23
 	ldw		R%icache_stride(%r1), %r23
 
 
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
 	depdi,z		1, 63-PAGE_SHIFT,1, %r25
 	depdi,z		1, 63-PAGE_SHIFT,1, %r25
 #else
 #else
 	depwi,z		1, 31-PAGE_SHIFT,1, %r25
 	depwi,z		1, 31-PAGE_SHIFT,1, %r25

+ 9 - 7
arch/parisc/kernel/real2.S

@@ -7,6 +7,8 @@
  * Copyright (C) 2000 Hewlett Packard (Paul Bame bame@puffin.external.hp.com)
  * Copyright (C) 2000 Hewlett Packard (Paul Bame bame@puffin.external.hp.com)
  *
  *
  */
  */
+#include <linux/config.h>
+
 #include <asm/psw.h>
 #include <asm/psw.h>
 #include <asm/assembly.h>
 #include <asm/assembly.h>
 
 
@@ -20,7 +22,7 @@ real32_stack:
 real64_stack:
 real64_stack:
 	.block	8192
 	.block	8192
 
 
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
 #  define REG_SZ 8
 #  define REG_SZ 8
 #else
 #else
 #  define REG_SZ 4
 #  define REG_SZ 4
@@ -50,7 +52,7 @@ save_cr_end:
 
 
 real32_call_asm:
 real32_call_asm:
 	STREG	%rp, -RP_OFFSET(%sp)	/* save RP */
 	STREG	%rp, -RP_OFFSET(%sp)	/* save RP */
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
 	callee_save
 	callee_save
 	ldo	2*REG_SZ(%sp), %sp	/* room for a couple more saves */
 	ldo	2*REG_SZ(%sp), %sp	/* room for a couple more saves */
 	STREG	%r27, -1*REG_SZ(%sp)
 	STREG	%r27, -1*REG_SZ(%sp)
@@ -77,7 +79,7 @@ real32_call_asm:
 	b,l	save_control_regs,%r2		/* modifies r1, r2, r28 */
 	b,l	save_control_regs,%r2		/* modifies r1, r2, r28 */
 	nop
 	nop
 
 
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
 	rsm	PSW_SM_W, %r0		/* go narrow */
 	rsm	PSW_SM_W, %r0		/* go narrow */
 #endif
 #endif
 
 
@@ -85,7 +87,7 @@ real32_call_asm:
 	bv	0(%r31)
 	bv	0(%r31)
 	nop
 	nop
 ric_ret:
 ric_ret:
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
 	ssm	PSW_SM_W, %r0		/* go wide */
 	ssm	PSW_SM_W, %r0		/* go wide */
 #endif
 #endif
 	/* restore CRs before going virtual in case we page fault */
 	/* restore CRs before going virtual in case we page fault */
@@ -97,7 +99,7 @@ ric_ret:
 
 
 	tovirt_r1 %sp
 	tovirt_r1 %sp
 	LDREG	-REG_SZ(%sp), %sp	/* restore SP */
 	LDREG	-REG_SZ(%sp), %sp	/* restore SP */
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
 	LDREG	-1*REG_SZ(%sp), %r27
 	LDREG	-1*REG_SZ(%sp), %r27
 	LDREG	-2*REG_SZ(%sp), %r29
 	LDREG	-2*REG_SZ(%sp), %r29
 	ldo	-2*REG_SZ(%sp), %sp
 	ldo	-2*REG_SZ(%sp), %sp
@@ -212,7 +214,7 @@ rfi_r2v_1:
 	bv	0(%r2)
 	bv	0(%r2)
 	nop
 	nop
 
 
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
 
 
 /************************ 64-bit real-mode calls ***********************/
 /************************ 64-bit real-mode calls ***********************/
 /* This is only usable in wide kernels right now and will probably stay so */
 /* This is only usable in wide kernels right now and will probably stay so */
@@ -290,7 +292,7 @@ pc_in_user_space:
 	**	comparing function pointers.
 	**	comparing function pointers.
 	*/
 	*/
 __canonicalize_funcptr_for_compare:
 __canonicalize_funcptr_for_compare:
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
 	bve (%r2)
 	bve (%r2)
 #else
 #else
 	bv %r0(%r2)
 	bv %r0(%r2)

+ 1 - 1
arch/parisc/kernel/smp.c

@@ -18,7 +18,7 @@
 */
 */
 #undef ENTRY_SYS_CPUS	/* syscall support for iCOD-like functionality */
 #undef ENTRY_SYS_CPUS	/* syscall support for iCOD-like functionality */
 
 
-#include <linux/autoconf.h>
+#include <linux/config.h>
 
 
 #include <linux/types.h>
 #include <linux/types.h>
 #include <linux/spinlock.h>
 #include <linux/spinlock.h>

+ 18 - 19
arch/parisc/kernel/syscall.S

@@ -6,6 +6,7 @@
  * thanks to Philipp Rumpf, Mike Shaver and various others
  * thanks to Philipp Rumpf, Mike Shaver and various others
  * sorry about the wall, puffin..
  * sorry about the wall, puffin..
  */
  */
+#include <linux/config.h> /* for CONFIG_SMP */
 
 
 #include <asm/asm-offsets.h>
 #include <asm/asm-offsets.h>
 #include <asm/unistd.h>
 #include <asm/unistd.h>
@@ -22,15 +23,13 @@
 	 */
 	 */
 #define KILL_INSN	break	0,0
 #define KILL_INSN	break	0,0
 
 
-#include <linux/config.h> /* for CONFIG_SMP */
-
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
 	.level          2.0w
 	.level          2.0w
 #else
 #else
 	.level		1.1
 	.level		1.1
 #endif
 #endif
 
 
-#ifndef __LP64__
+#ifndef CONFIG_64BIT
 	.macro fixup_branch,lbl
 	.macro fixup_branch,lbl
 	b	    \lbl
 	b	    \lbl
 	.endm
 	.endm
@@ -103,7 +102,7 @@ linux_gateway_entry:
 	mfsp    %sr7,%r1                        /* save user sr7 */
 	mfsp    %sr7,%r1                        /* save user sr7 */
 	mtsp    %r1,%sr3                        /* and store it in sr3 */
 	mtsp    %r1,%sr3                        /* and store it in sr3 */
 
 
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
 	/* for now we can *always* set the W bit on entry to the syscall
 	/* for now we can *always* set the W bit on entry to the syscall
 	 * since we don't support wide userland processes.  We could
 	 * since we don't support wide userland processes.  We could
 	 * also save the current SM other than in r0 and restore it on
 	 * also save the current SM other than in r0 and restore it on
@@ -155,7 +154,7 @@ linux_gateway_entry:
 	STREG	%r19, TASK_PT_GR19(%r1)
 	STREG	%r19, TASK_PT_GR19(%r1)
 
 
 	LDREGM	-FRAME_SIZE(%r30), %r2		/* get users sp back */
 	LDREGM	-FRAME_SIZE(%r30), %r2		/* get users sp back */
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
 	extrd,u	%r2,63,1,%r19			/* W hidden in bottom bit */
 	extrd,u	%r2,63,1,%r19			/* W hidden in bottom bit */
 #if 0
 #if 0
 	xor	%r19,%r2,%r2			/* clear bottom bit */
 	xor	%r19,%r2,%r2			/* clear bottom bit */
@@ -186,7 +185,7 @@ linux_gateway_entry:
 
 
 	loadgp
 	loadgp
 
 
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
 	ldo	-16(%r30),%r29			/* Reference param save area */
 	ldo	-16(%r30),%r29			/* Reference param save area */
 	copy	%r19,%r2			/* W bit back to r2 */
 	copy	%r19,%r2			/* W bit back to r2 */
 #else
 #else
@@ -205,7 +204,7 @@ linux_gateway_entry:
 	/* Note!  We cannot use the syscall table that is mapped
 	/* Note!  We cannot use the syscall table that is mapped
 	nearby since the gateway page is mapped execute-only. */
 	nearby since the gateway page is mapped execute-only. */
 
 
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
 	ldil	L%sys_call_table, %r1
 	ldil	L%sys_call_table, %r1
 	or,=	%r2,%r2,%r2
 	or,=	%r2,%r2,%r2
 	addil	L%(sys_call_table64-sys_call_table), %r1
 	addil	L%(sys_call_table64-sys_call_table), %r1
@@ -321,7 +320,7 @@ tracesys_next:
 	LDREG   TASK_PT_GR25(%r1), %r25
 	LDREG   TASK_PT_GR25(%r1), %r25
 	LDREG   TASK_PT_GR24(%r1), %r24
 	LDREG   TASK_PT_GR24(%r1), %r24
 	LDREG   TASK_PT_GR23(%r1), %r23
 	LDREG   TASK_PT_GR23(%r1), %r23
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
 	LDREG   TASK_PT_GR22(%r1), %r22
 	LDREG   TASK_PT_GR22(%r1), %r22
 	LDREG   TASK_PT_GR21(%r1), %r21
 	LDREG   TASK_PT_GR21(%r1), %r21
 	ldo	-16(%r30),%r29			/* Reference param save area */
 	ldo	-16(%r30),%r29			/* Reference param save area */
@@ -350,7 +349,7 @@ tracesys_next:
 tracesys_exit:
 tracesys_exit:
 	ldo     -THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1      /* get task ptr */
 	ldo     -THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1      /* get task ptr */
 	LDREG	TI_TASK(%r1), %r1
 	LDREG	TI_TASK(%r1), %r1
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
 	ldo	-16(%r30),%r29			/* Reference param save area */
 	ldo	-16(%r30),%r29			/* Reference param save area */
 #endif
 #endif
 	bl	syscall_trace, %r2
 	bl	syscall_trace, %r2
@@ -371,7 +370,7 @@ tracesys_exit:
 tracesys_sigexit:
 tracesys_sigexit:
 	ldo     -THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1      /* get task ptr */
 	ldo     -THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1      /* get task ptr */
 	LDREG	0(%r1), %r1
 	LDREG	0(%r1), %r1
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
 	ldo	-16(%r30),%r29			/* Reference param save area */
 	ldo	-16(%r30),%r29			/* Reference param save area */
 #endif
 #endif
 	bl	syscall_trace, %r2
 	bl	syscall_trace, %r2
@@ -404,7 +403,7 @@ lws_start:
 	gate	.+8, %r0
 	gate	.+8, %r0
 	depi	3, 31, 2, %r31	/* Ensure we return to userspace */
 	depi	3, 31, 2, %r31	/* Ensure we return to userspace */
 
 
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
 	/* FIXME: If we are a 64-bit kernel just
 	/* FIXME: If we are a 64-bit kernel just
 	 *        turn this on unconditionally.
 	 *        turn this on unconditionally.
 	 */
 	 */
@@ -440,7 +439,7 @@ lws_exit_nosys:
 	/* Fall through: Return to userspace */
 	/* Fall through: Return to userspace */
 
 
 lws_exit:
 lws_exit:
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
 	/* decide whether to reset the wide mode bit
 	/* decide whether to reset the wide mode bit
 	 *
 	 *
 	 * For a syscall, the W bit is stored in the lowest bit
 	 * For a syscall, the W bit is stored in the lowest bit
@@ -486,7 +485,7 @@ lws_exit:
 
 
 	/* ELF64 Process entry path */
 	/* ELF64 Process entry path */
 lws_compare_and_swap64:
 lws_compare_and_swap64:
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
 	b,n	lws_compare_and_swap
 	b,n	lws_compare_and_swap
 #else
 #else
 	/* If we are not a 64-bit kernel, then we don't
 	/* If we are not a 64-bit kernel, then we don't
@@ -497,7 +496,7 @@ lws_compare_and_swap64:
 
 
 	/* ELF32 Process entry path */
 	/* ELF32 Process entry path */
 lws_compare_and_swap32:
 lws_compare_and_swap32:
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
 	/* Clip all the input registers */
 	/* Clip all the input registers */
 	depdi	0, 31, 32, %r26
 	depdi	0, 31, 32, %r26
 	depdi	0, 31, 32, %r25
 	depdi	0, 31, 32, %r25
@@ -608,7 +607,7 @@ cas_action:
 	   the other for the store. Either return -EFAULT.
 	   the other for the store. Either return -EFAULT.
 	   Each of the entries must be relocated. */
 	   Each of the entries must be relocated. */
 	.section __ex_table,"aw"
 	.section __ex_table,"aw"
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
 	/* Pad the address calculation */
 	/* Pad the address calculation */
 	.word	0,(2b - linux_gateway_page)
 	.word	0,(2b - linux_gateway_page)
 	.word	0,(3b - linux_gateway_page)
 	.word	0,(3b - linux_gateway_page)
@@ -619,7 +618,7 @@ cas_action:
 	.previous
 	.previous
 
 
 	.section __ex_table,"aw"
 	.section __ex_table,"aw"
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
 	/* Pad the address calculation */
 	/* Pad the address calculation */
 	.word	0,(1b - linux_gateway_page)
 	.word	0,(1b - linux_gateway_page)
 	.word	0,(3b - linux_gateway_page)
 	.word	0,(3b - linux_gateway_page)
@@ -638,7 +637,7 @@ end_linux_gateway_page:
 
 
 	/* Relocate symbols assuming linux_gateway_page is mapped
 	/* Relocate symbols assuming linux_gateway_page is mapped
 	   to virtual address 0x0 */
 	   to virtual address 0x0 */
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
 	/* FIXME: The code will always be on the gateay page
 	/* FIXME: The code will always be on the gateay page
 		  and thus it will be on the first 4k, the
 		  and thus it will be on the first 4k, the
 		  assembler seems to think that the final
 		  assembler seems to think that the final
@@ -666,7 +665,7 @@ lws_table:
 sys_call_table:
 sys_call_table:
 #include "syscall_table.S"
 #include "syscall_table.S"
 
 
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
 	.align 4096
 	.align 4096
 	.export sys_call_table64
 	.export sys_call_table64
 .Lsys_call_table64:
 .Lsys_call_table64:

+ 2 - 2
arch/parisc/kernel/syscall_table.S

@@ -35,7 +35,7 @@
 #undef ENTRY_UHOH
 #undef ENTRY_UHOH
 #undef ENTRY_COMP
 #undef ENTRY_COMP
 #undef ENTRY_OURS
 #undef ENTRY_OURS
-#if defined(__LP64__) && !defined(SYSCALL_TABLE_64BIT)
+#if defined(CONFIG_64BIT) && !defined(SYSCALL_TABLE_64BIT)
 /* Use ENTRY_SAME for 32-bit syscalls which are the same on wide and
 /* Use ENTRY_SAME for 32-bit syscalls which are the same on wide and
  * narrow palinux.  Use ENTRY_DIFF for those where a 32-bit specific
  * narrow palinux.  Use ENTRY_DIFF for those where a 32-bit specific
  * implementation is required on wide palinux.  Use ENTRY_COMP where
  * implementation is required on wide palinux.  Use ENTRY_COMP where
@@ -46,7 +46,7 @@
 #define ENTRY_UHOH(_name_) .dword sys32_##unimplemented
 #define ENTRY_UHOH(_name_) .dword sys32_##unimplemented
 #define ENTRY_OURS(_name_) .dword parisc_##_name_
 #define ENTRY_OURS(_name_) .dword parisc_##_name_
 #define ENTRY_COMP(_name_) .dword compat_sys_##_name_
 #define ENTRY_COMP(_name_) .dword compat_sys_##_name_
-#elif defined(__LP64__) && defined(SYSCALL_TABLE_64BIT)
+#elif defined(CONFIG_64BIT) && defined(SYSCALL_TABLE_64BIT)
 #define ENTRY_SAME(_name_) .dword sys_##_name_
 #define ENTRY_SAME(_name_) .dword sys_##_name_
 #define ENTRY_DIFF(_name_) .dword sys_##_name_
 #define ENTRY_DIFF(_name_) .dword sys_##_name_
 #define ENTRY_UHOH(_name_) .dword sys_##_name_
 #define ENTRY_UHOH(_name_) .dword sys_##_name_