Преглед на файлове

Merge branch 'upstream' of git://ftp.linux-mips.org/pub/scm/upstream-linus

* 'upstream' of git://ftp.linux-mips.org/pub/scm/upstream-linus:
  [MIPS] IP27: Build fix
  [MIPS] Wire up ioprio_set and ioprio_get.
  [MIPS] Fix __raw_read_trylock() to allow multiple readers
  [MIPS] Export __copy_user_inatomic.
  [MIPS] R2 bitops compile fix for gcc < 4.0.
  [MIPS] TX39: Remove redundant tx39_blast_icache() calls
  [MIPS] Cobalt: Fix early printk
  [MIPS] SMTC: De-obscure Malta hooks.
  [MIPS] SMTC: Add fordward declarations for mm_struct and task_struct.
  [MIPS] SMTC: <asm/mips_mt.h> must include <linux/cpumask.h>
  [MIPS] SMTC: <asm/smtc_ipi.h> must include <linux/spinlock.h>
  [MIPS] Atlas, Malta: Fix build warning.
Linus Torvalds преди 18 години
родител
ревизия
185d84b4e1

+ 1 - 1
arch/mips/Kconfig

@@ -167,6 +167,7 @@ config MIPS_COBALT
 	select IRQ_CPU
 	select MIPS_GT64111
 	select SYS_HAS_CPU_NEVADA
+	select SYS_HAS_EARLY_PRINTK
 	select SYS_SUPPORTS_32BIT_KERNEL
 	select SYS_SUPPORTS_64BIT_KERNEL if EXPERIMENTAL
 	select SYS_SUPPORTS_LITTLE_ENDIAN
@@ -837,7 +838,6 @@ source "arch/mips/tx4927/Kconfig"
 source "arch/mips/tx4938/Kconfig"
 source "arch/mips/vr41xx/Kconfig"
 source "arch/mips/philips/pnx8550/common/Kconfig"
-source "arch/mips/cobalt/Kconfig"
 
 endmenu
 

+ 0 - 7
arch/mips/cobalt/Kconfig

@@ -1,7 +0,0 @@
-config EARLY_PRINTK
-	bool "Early console support"
-	depends on MIPS_COBALT
-	help
-	  Provide early console support by direct access to the
-	  on board UART. The UART must have been previously
-	  initialised by the boot loader.

+ 1 - 4
arch/mips/cobalt/console.c

@@ -9,11 +9,8 @@
 #include <asm/addrspace.h>
 #include <asm/mach-cobalt/cobalt.h>
 
-static void putchar(int c)
+void prom_putchar(char c)
 {
-	if(c == '\n')
-		putchar('\r');
-
 	while(!(COBALT_UART[UART_LSR] & UART_LSR_THRE))
 		;
 

+ 1 - 0
arch/mips/kernel/mips_ksyms.c

@@ -37,6 +37,7 @@ EXPORT_SYMBOL(kernel_thread);
  * Userspace access stuff.
  */
 EXPORT_SYMBOL(__copy_user);
+EXPORT_SYMBOL(__copy_user_inatomic);
 EXPORT_SYMBOL(__bzero);
 EXPORT_SYMBOL(__strncpy_from_user_nocheck_asm);
 EXPORT_SYMBOL(__strncpy_from_user_asm);

+ 2 - 0
arch/mips/kernel/scall32-o32.S

@@ -656,6 +656,8 @@ einval:	li	v0, -EINVAL
 	sys	sys_kexec_load		4
 	sys	sys_getcpu		3
 	sys	sys_epoll_pwait		6
+	sys	sys_ioprio_set		3
+	sys	sys_ioprio_get		2
 	.endm
 
 	/* We pre-compute the number of _instruction_ bytes needed to

+ 3 - 0
arch/mips/kernel/scall64-64.S

@@ -471,3 +471,6 @@ sys_call_table:
 	PTR	sys_kexec_load			/* 5270 */
 	PTR	sys_getcpu
 	PTR	sys_epoll_pwait
+	PTR	sys_ioprio_set
+	PTR	sys_ioprio_get
+	.size	sys_call_table,.-sys_call_table

+ 4 - 1
arch/mips/kernel/scall64-n32.S

@@ -395,5 +395,8 @@ EXPORT(sysn32_call_table)
 	PTR	compat_sys_set_robust_list
 	PTR	compat_sys_get_robust_list
 	PTR	compat_sys_kexec_load
-	PTR	sys_getcpu
+	PTR	sys_getcpu			/* 6275 */
 	PTR	compat_sys_epoll_pwait
+	PTR	sys_ioprio_set
+	PTR	sys_ioprio_get
+	.size	sysn32_call_table,.-sysn32_call_table

+ 2 - 0
arch/mips/kernel/scall64-o32.S

@@ -519,4 +519,6 @@ sys_call_table:
 	PTR	compat_sys_kexec_load
 	PTR	sys_getcpu
 	PTR	compat_sys_epoll_pwait
+	PTR	sys_ioprio_set
+	PTR	sys_ioprio_get			/* 4315 */
 	.size	sys_call_table,.-sys_call_table

+ 2 - 2
arch/mips/mips-boards/generic/init.c

@@ -251,8 +251,6 @@ void __init mips_ejtag_setup (void)
 
 void __init prom_init(void)
 {
-	u32 start, map, mask, data;
-
 	prom_argc = fw_arg0;
 	_prom_argv = (int *) fw_arg1;
 	_prom_envp = (int *) fw_arg2;
@@ -278,6 +276,8 @@ void __init prom_init(void)
 			mips_revision_corid = MIPS_REVISION_CORID_CORE_EMUL_MSC;
 	}
 	switch(mips_revision_corid) {
+		u32 start, map, mask, data;
+
 	case MIPS_REVISION_CORID_QED_RM5261:
 	case MIPS_REVISION_CORID_CORE_LV:
 	case MIPS_REVISION_CORID_CORE_FPGA:

+ 1 - 1
arch/mips/mips-boards/malta/Makefile

@@ -21,4 +21,4 @@
 
 obj-y := malta_int.o malta_setup.o
 obj-$(CONFIG_MTD) += malta_mtd.o
-obj-$(CONFIG_SMP) += malta_smp.o
+obj-$(CONFIG_MIPS_MT_SMTC) += malta_smtc.o

+ 5 - 26
arch/mips/mips-boards/malta/malta_smp.c → arch/mips/mips-boards/malta/malta_smtc.c

@@ -1,25 +1,14 @@
 /*
  * Malta Platform-specific hooks for SMP operation
  */
+#include <linux/init.h>
 
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/cpumask.h>
-#include <linux/interrupt.h>
-
-#include <asm/atomic.h>
-#include <asm/cpu.h>
-#include <asm/processor.h>
-#include <asm/system.h>
-#include <asm/hardirq.h>
-#include <asm/mmu_context.h>
-#include <asm/smp.h>
-#ifdef CONFIG_MIPS_MT_SMTC
+#include <asm/mipsregs.h>
+#include <asm/mipsmtregs.h>
+#include <asm/smtc.h>
 #include <asm/smtc_ipi.h>
-#endif /* CONFIG_MIPS_MT_SMTC */
 
 /* VPE/SMP Prototype implements platform interfaces directly */
-#if !defined(CONFIG_MIPS_MT_SMP)
 
 /*
  * Cause the specified action to be performed on a targeted "CPU"
@@ -27,10 +16,8 @@
 
 void core_send_ipi(int cpu, unsigned int action)
 {
-/* "CPU" may be TC of same VPE, VPE of same CPU, or different CPU */
-#ifdef CONFIG_MIPS_MT_SMTC
+	/* "CPU" may be TC of same VPE, VPE of same CPU, or different CPU */
 	smtc_send_ipi(cpu, LINUX_SMP_IPI, action);
-#endif /* CONFIG_MIPS_MT_SMTC */
 }
 
 /*
@@ -39,9 +26,7 @@ void core_send_ipi(int cpu, unsigned int action)
 
 void prom_boot_secondary(int cpu, struct task_struct *idle)
 {
-#ifdef CONFIG_MIPS_MT_SMTC
 	smtc_boot_secondary(cpu, idle);
-#endif /* CONFIG_MIPS_MT_SMTC */
 }
 
 /*
@@ -50,7 +35,6 @@ void prom_boot_secondary(int cpu, struct task_struct *idle)
 
 void prom_init_secondary(void)
 {
-#ifdef CONFIG_MIPS_MT_SMTC
         void smtc_init_secondary(void);
 	int myvpe;
 
@@ -65,7 +49,6 @@ void prom_init_secondary(void)
 	}
 
         smtc_init_secondary();
-#endif /* CONFIG_MIPS_MT_SMTC */
 }
 
 /*
@@ -93,9 +76,7 @@ void __init plat_prepare_cpus(unsigned int max_cpus)
 
 void prom_smp_finish(void)
 {
-#ifdef CONFIG_MIPS_MT_SMTC
 	smtc_smp_finish();
-#endif /* CONFIG_MIPS_MT_SMTC */
 }
 
 /*
@@ -105,5 +86,3 @@ void prom_smp_finish(void)
 void prom_cpus_done(void)
 {
 }
-
-#endif /* CONFIG_MIPS32R2_MT_SMP */

+ 6 - 12
arch/mips/mm/c-tx39.c

@@ -128,7 +128,6 @@ static inline void tx39_flush_cache_all(void)
 		return;
 
 	tx39_blast_dcache();
-	tx39_blast_icache();
 }
 
 static inline void tx39___flush_cache_all(void)
@@ -142,24 +141,19 @@ static void tx39_flush_cache_mm(struct mm_struct *mm)
 	if (!cpu_has_dc_aliases)
 		return;
 
-	if (cpu_context(smp_processor_id(), mm) != 0) {
-		tx39_flush_cache_all();
-	}
+	if (cpu_context(smp_processor_id(), mm) != 0)
+		tx39_blast_dcache();
 }
 
 static void tx39_flush_cache_range(struct vm_area_struct *vma,
 	unsigned long start, unsigned long end)
 {
-	int exec;
-
+	if (!cpu_has_dc_aliases)
+		return;
 	if (!(cpu_context(smp_processor_id(), vma->vm_mm)))
 		return;
 
-	exec = vma->vm_flags & VM_EXEC;
-	if (cpu_has_dc_aliases || exec)
-		tx39_blast_dcache();
-	if (exec)
-		tx39_blast_icache();
+	tx39_blast_dcache();
 }
 
 static void tx39_flush_cache_page(struct vm_area_struct *vma, unsigned long page, unsigned long pfn)
@@ -218,7 +212,7 @@ static void tx39_flush_cache_page(struct vm_area_struct *vma, unsigned long page
 
 static void local_tx39_flush_data_cache_page(void * addr)
 {
-	tx39_blast_dcache_page(addr);
+	tx39_blast_dcache_page((unsigned long)addr);
 }
 
 static void tx39_flush_data_cache_page(unsigned long addr)

+ 0 - 2
arch/mips/sgi-ip27/ip27-init.c

@@ -191,7 +191,6 @@ static inline void ioc3_eth_init(void)
 	ioc3->eier = 0;
 }
 
-extern void ip27_setup_console(void);
 extern void ip27_time_init(void);
 extern void ip27_reboot_setup(void);
 
@@ -200,7 +199,6 @@ void __init plat_mem_setup(void)
 	hubreg_t p, e, n_mode;
 	nasid_t nid;
 
-	ip27_setup_console();
 	ip27_reboot_setup();
 
 	/*

+ 33 - 23
include/asm-mips/bitops.h

@@ -54,6 +54,7 @@
 static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
 {
 	unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
+	unsigned short bit = nr & SZLONG_MASK;
 	unsigned long temp;
 
 	if (cpu_has_llsc && R10000_LLSC_WAR) {
@@ -65,9 +66,9 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
 		"	beqzl	%0, 1b					\n"
 		"	.set	mips0					\n"
 		: "=&r" (temp), "=m" (*m)
-		: "ir" (1UL << (nr & SZLONG_MASK)), "m" (*m));
+		: "ir" (1UL << bit), "m" (*m));
 #ifdef CONFIG_CPU_MIPSR2
-	} else if (__builtin_constant_p(nr)) {
+	} else if (__builtin_constant_p(bit)) {
 		__asm__ __volatile__(
 		"1:	" __LL "%0, %1			# set_bit	\n"
 		"	" __INS "%0, %4, %2, 1				\n"
@@ -77,7 +78,7 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
 		"2:	b	1b					\n"
 		"	.previous					\n"
 		: "=&r" (temp), "=m" (*m)
-		: "ir" (nr & SZLONG_MASK), "m" (*m), "r" (~0));
+		: "ir" (bit), "m" (*m), "r" (~0));
 #endif /* CONFIG_CPU_MIPSR2 */
 	} else if (cpu_has_llsc) {
 		__asm__ __volatile__(
@@ -91,14 +92,14 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
 		"	.previous					\n"
 		"	.set	mips0					\n"
 		: "=&r" (temp), "=m" (*m)
-		: "ir" (1UL << (nr & SZLONG_MASK)), "m" (*m));
+		: "ir" (1UL << bit), "m" (*m));
 	} else {
 		volatile unsigned long *a = addr;
 		unsigned long mask;
 		unsigned long flags;
 
 		a += nr >> SZLONG_LOG;
-		mask = 1UL << (nr & SZLONG_MASK);
+		mask = 1UL << bit;
 		local_irq_save(flags);
 		*a |= mask;
 		local_irq_restore(flags);
@@ -118,6 +119,7 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
 static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
 {
 	unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
+	unsigned short bit = nr & SZLONG_MASK;
 	unsigned long temp;
 
 	if (cpu_has_llsc && R10000_LLSC_WAR) {
@@ -129,9 +131,9 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
 		"	beqzl	%0, 1b					\n"
 		"	.set	mips0					\n"
 		: "=&r" (temp), "=m" (*m)
-		: "ir" (~(1UL << (nr & SZLONG_MASK))), "m" (*m));
+		: "ir" (~(1UL << bit)), "m" (*m));
 #ifdef CONFIG_CPU_MIPSR2
-	} else if (__builtin_constant_p(nr)) {
+	} else if (__builtin_constant_p(bit)) {
 		__asm__ __volatile__(
 		"1:	" __LL "%0, %1			# clear_bit	\n"
 		"	" __INS "%0, $0, %2, 1				\n"
@@ -141,7 +143,7 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
 		"2:	b	1b					\n"
 		"	.previous					\n"
 		: "=&r" (temp), "=m" (*m)
-		: "ir" (nr & SZLONG_MASK), "m" (*m));
+		: "ir" (bit), "m" (*m));
 #endif /* CONFIG_CPU_MIPSR2 */
 	} else if (cpu_has_llsc) {
 		__asm__ __volatile__(
@@ -155,14 +157,14 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
 		"	.previous					\n"
 		"	.set	mips0					\n"
 		: "=&r" (temp), "=m" (*m)
-		: "ir" (~(1UL << (nr & SZLONG_MASK))), "m" (*m));
+		: "ir" (~(1UL << bit)), "m" (*m));
 	} else {
 		volatile unsigned long *a = addr;
 		unsigned long mask;
 		unsigned long flags;
 
 		a += nr >> SZLONG_LOG;
-		mask = 1UL << (nr & SZLONG_MASK);
+		mask = 1UL << bit;
 		local_irq_save(flags);
 		*a &= ~mask;
 		local_irq_restore(flags);
@@ -180,6 +182,8 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
  */
 static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
 {
+	unsigned short bit = nr & SZLONG_MASK;
+
 	if (cpu_has_llsc && R10000_LLSC_WAR) {
 		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
 		unsigned long temp;
@@ -192,7 +196,7 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
 		"	beqzl	%0, 1b				\n"
 		"	.set	mips0				\n"
 		: "=&r" (temp), "=m" (*m)
-		: "ir" (1UL << (nr & SZLONG_MASK)), "m" (*m));
+		: "ir" (1UL << bit), "m" (*m));
 	} else if (cpu_has_llsc) {
 		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
 		unsigned long temp;
@@ -208,14 +212,14 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
 		"	.previous				\n"
 		"	.set	mips0				\n"
 		: "=&r" (temp), "=m" (*m)
-		: "ir" (1UL << (nr & SZLONG_MASK)), "m" (*m));
+		: "ir" (1UL << bit), "m" (*m));
 	} else {
 		volatile unsigned long *a = addr;
 		unsigned long mask;
 		unsigned long flags;
 
 		a += nr >> SZLONG_LOG;
-		mask = 1UL << (nr & SZLONG_MASK);
+		mask = 1UL << bit;
 		local_irq_save(flags);
 		*a ^= mask;
 		local_irq_restore(flags);
@@ -233,6 +237,8 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
 static inline int test_and_set_bit(unsigned long nr,
 	volatile unsigned long *addr)
 {
+	unsigned short bit = nr & SZLONG_MASK;
+
 	if (cpu_has_llsc && R10000_LLSC_WAR) {
 		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
 		unsigned long temp, res;
@@ -246,7 +252,7 @@ static inline int test_and_set_bit(unsigned long nr,
 		"	and	%2, %0, %3				\n"
 		"	.set	mips0					\n"
 		: "=&r" (temp), "=m" (*m), "=&r" (res)
-		: "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
+		: "r" (1UL << bit), "m" (*m)
 		: "memory");
 
 		return res != 0;
@@ -269,7 +275,7 @@ static inline int test_and_set_bit(unsigned long nr,
 		"	.previous					\n"
 		"	.set	pop					\n"
 		: "=&r" (temp), "=m" (*m), "=&r" (res)
-		: "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
+		: "r" (1UL << bit), "m" (*m)
 		: "memory");
 
 		return res != 0;
@@ -280,7 +286,7 @@ static inline int test_and_set_bit(unsigned long nr,
 		unsigned long flags;
 
 		a += nr >> SZLONG_LOG;
-		mask = 1UL << (nr & SZLONG_MASK);
+		mask = 1UL << bit;
 		local_irq_save(flags);
 		retval = (mask & *a) != 0;
 		*a |= mask;
@@ -303,6 +309,8 @@ static inline int test_and_set_bit(unsigned long nr,
 static inline int test_and_clear_bit(unsigned long nr,
 	volatile unsigned long *addr)
 {
+	unsigned short bit = nr & SZLONG_MASK;
+
 	if (cpu_has_llsc && R10000_LLSC_WAR) {
 		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
 		unsigned long temp, res;
@@ -317,7 +325,7 @@ static inline int test_and_clear_bit(unsigned long nr,
 		"	and	%2, %0, %3				\n"
 		"	.set	mips0					\n"
 		: "=&r" (temp), "=m" (*m), "=&r" (res)
-		: "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
+		: "r" (1UL << bit), "m" (*m)
 		: "memory");
 
 		return res != 0;
@@ -336,7 +344,7 @@ static inline int test_and_clear_bit(unsigned long nr,
 		"2:	b	1b					\n"
 		"	.previous					\n"
 		: "=&r" (temp), "=m" (*m), "=&r" (res)
-		: "ri" (nr & SZLONG_MASK), "m" (*m)
+		: "ri" (bit), "m" (*m)
 		: "memory");
 
 		return res;
@@ -361,7 +369,7 @@ static inline int test_and_clear_bit(unsigned long nr,
 		"	.previous					\n"
 		"	.set	pop					\n"
 		: "=&r" (temp), "=m" (*m), "=&r" (res)
-		: "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
+		: "r" (1UL << bit), "m" (*m)
 		: "memory");
 
 		return res != 0;
@@ -372,7 +380,7 @@ static inline int test_and_clear_bit(unsigned long nr,
 		unsigned long flags;
 
 		a += nr >> SZLONG_LOG;
-		mask = 1UL << (nr & SZLONG_MASK);
+		mask = 1UL << bit;
 		local_irq_save(flags);
 		retval = (mask & *a) != 0;
 		*a &= ~mask;
@@ -395,6 +403,8 @@ static inline int test_and_clear_bit(unsigned long nr,
 static inline int test_and_change_bit(unsigned long nr,
 	volatile unsigned long *addr)
 {
+	unsigned short bit = nr & SZLONG_MASK;
+
 	if (cpu_has_llsc && R10000_LLSC_WAR) {
 		unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
 		unsigned long temp, res;
@@ -408,7 +418,7 @@ static inline int test_and_change_bit(unsigned long nr,
 		"	and	%2, %0, %3				\n"
 		"	.set	mips0					\n"
 		: "=&r" (temp), "=m" (*m), "=&r" (res)
-		: "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
+		: "r" (1UL << bit), "m" (*m)
 		: "memory");
 
 		return res != 0;
@@ -431,7 +441,7 @@ static inline int test_and_change_bit(unsigned long nr,
 		"	.previous					\n"
 		"	.set	pop					\n"
 		: "=&r" (temp), "=m" (*m), "=&r" (res)
-		: "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
+		: "r" (1UL << bit), "m" (*m)
 		: "memory");
 
 		return res != 0;
@@ -441,7 +451,7 @@ static inline int test_and_change_bit(unsigned long nr,
 		unsigned long flags;
 
 		a += nr >> SZLONG_LOG;
-		mask = 1UL << (nr & SZLONG_MASK);
+		mask = 1UL << bit;
 		local_irq_save(flags);
 		retval = (mask & *a) != 0;
 		*a ^= mask;

+ 2 - 0
include/asm-mips/mips_mt.h

@@ -6,6 +6,8 @@
 #ifndef __ASM_MIPS_MT_H
 #define __ASM_MIPS_MT_H
 
+#include <linux/cpumask.h>
+
 extern cpumask_t mt_fpu_cpumask;
 extern unsigned long mt_fpemul_threshold;
 

+ 3 - 0
include/asm-mips/smtc.h

@@ -34,6 +34,9 @@ typedef long asiduse;
 
 extern asiduse smtc_live_asid[MAX_SMTC_TLBS][MAX_SMTC_ASIDS];
 
+struct mm_struct;
+struct task_struct;
+
 void smtc_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu);
 
 void smtc_flush_tlb_asid(unsigned long asid);

+ 2 - 0
include/asm-mips/smtc_ipi.h

@@ -4,6 +4,8 @@
 #ifndef __ASM_SMTC_IPI_H
 #define __ASM_SMTC_IPI_H
 
+#include <linux/spinlock.h>
+
 //#define SMTC_IPI_DEBUG
 
 #ifdef SMTC_IPI_DEBUG

+ 2 - 2
include/asm-mips/spinlock.h

@@ -287,7 +287,7 @@ static inline int __raw_read_trylock(raw_rwlock_t *rw)
 		"	.set	noreorder	# __raw_read_trylock	\n"
 		"	li	%2, 0					\n"
 		"1:	ll	%1, %3					\n"
-		"	bnez	%1, 2f					\n"
+		"	bltz	%1, 2f					\n"
 		"	 addu	%1, 1					\n"
 		"	sc	%1, %0					\n"
 		"	.set	reorder					\n"
@@ -304,7 +304,7 @@ static inline int __raw_read_trylock(raw_rwlock_t *rw)
 		"	.set	noreorder	# __raw_read_trylock	\n"
 		"	li	%2, 0					\n"
 		"1:	ll	%1, %3					\n"
-		"	bnez	%1, 2f					\n"
+		"	bltz	%1, 2f					\n"
 		"	 addu	%1, 1					\n"
 		"	sc	%1, %0					\n"
 		"	beqz	%1, 1b					\n"

+ 2 - 0
include/asm-mips/uaccess.h

@@ -435,6 +435,8 @@ extern size_t __copy_user(void *__to, const void *__from, size_t __n);
 	__cu_len;							\
 })
 
+extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);
+
 #define __copy_to_user_inatomic(to,from,n)				\
 ({									\
 	void __user *__cu_to;						\

+ 12 - 6
include/asm-mips/unistd.h

@@ -334,16 +334,18 @@
 #define __NR_kexec_load			(__NR_Linux + 311)
 #define __NR_getcpu			(__NR_Linux + 312)
 #define __NR_epoll_pwait		(__NR_Linux + 313)
+#define __NR_ioprio_set			(__NR_Linux + 314)
+#define __NR_ioprio_get			(__NR_Linux + 315)
 
 /*
  * Offset of the last Linux o32 flavoured syscall
  */
-#define __NR_Linux_syscalls		313
+#define __NR_Linux_syscalls		315
 
 #endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */
 
 #define __NR_O32_Linux			4000
-#define __NR_O32_Linux_syscalls		313
+#define __NR_O32_Linux_syscalls		315
 
 #if _MIPS_SIM == _MIPS_SIM_ABI64
 
@@ -624,16 +626,18 @@
 #define __NR_kexec_load			(__NR_Linux + 270)
 #define __NR_getcpu			(__NR_Linux + 271)
 #define __NR_epoll_pwait		(__NR_Linux + 272)
+#define __NR_ioprio_set			(__NR_Linux + 273)
+#define __NR_ioprio_get			(__NR_Linux + 274)
 
 /*
  * Offset of the last Linux 64-bit flavoured syscall
  */
-#define __NR_Linux_syscalls		272
+#define __NR_Linux_syscalls		274
 
 #endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */
 
 #define __NR_64_Linux			5000
-#define __NR_64_Linux_syscalls		272
+#define __NR_64_Linux_syscalls		274
 
 #if _MIPS_SIM == _MIPS_SIM_NABI32
 
@@ -918,16 +922,18 @@
 #define __NR_kexec_load			(__NR_Linux + 274)
 #define __NR_getcpu			(__NR_Linux + 275)
 #define __NR_epoll_pwait		(__NR_Linux + 276)
+#define __NR_ioprio_set			(__NR_Linux + 277)
+#define __NR_ioprio_get			(__NR_Linux + 278)
 
 /*
  * Offset of the last N32 flavoured syscall
  */
-#define __NR_Linux_syscalls		276
+#define __NR_Linux_syscalls		278
 
 #endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */
 
 #define __NR_N32_Linux			6000
-#define __NR_N32_Linux_syscalls		276
+#define __NR_N32_Linux_syscalls		278
 
 #ifdef __KERNEL__