Przeglądaj źródła

Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6

Felix Blyakher 16 lat temu
rodzic
commit
47d1ff6e1a

+ 1 - 1
Makefile

@@ -1,7 +1,7 @@
 VERSION = 2
 VERSION = 2
 PATCHLEVEL = 6
 PATCHLEVEL = 6
 SUBLEVEL = 29
 SUBLEVEL = 29
-EXTRAVERSION = -rc6
+EXTRAVERSION = -rc7
 NAME = Erotic Pickled Herring
 NAME = Erotic Pickled Herring
 
 
 # *DOCUMENTATION*
 # *DOCUMENTATION*

+ 7 - 6
arch/arm/kernel/setup.c

@@ -233,12 +233,13 @@ static void __init cacheid_init(void)
 	unsigned int cachetype = read_cpuid_cachetype();
 	unsigned int cachetype = read_cpuid_cachetype();
 	unsigned int arch = cpu_architecture();
 	unsigned int arch = cpu_architecture();
 
 
-	if (arch >= CPU_ARCH_ARMv7) {
-		cacheid = CACHEID_VIPT_NONALIASING;
-		if ((cachetype & (3 << 14)) == 1 << 14)
-			cacheid |= CACHEID_ASID_TAGGED;
-	} else if (arch >= CPU_ARCH_ARMv6) {
-		if (cachetype & (1 << 23))
+	if (arch >= CPU_ARCH_ARMv6) {
+		if ((cachetype & (7 << 29)) == 4 << 29) {
+			/* ARMv7 register format */
+			cacheid = CACHEID_VIPT_NONALIASING;
+			if ((cachetype & (3 << 14)) == 1 << 14)
+				cacheid |= CACHEID_ASID_TAGGED;
+		} else if (cachetype & (1 << 23))
 			cacheid = CACHEID_VIPT_ALIASING;
 			cacheid = CACHEID_VIPT_ALIASING;
 		else
 		else
 			cacheid = CACHEID_VIPT_NONALIASING;
 			cacheid = CACHEID_VIPT_NONALIASING;

+ 0 - 1
arch/arm/mach-at91/pm.c

@@ -332,7 +332,6 @@ static int at91_pm_enter(suspend_state_t state)
 			at91_sys_read(AT91_AIC_IPR) & at91_sys_read(AT91_AIC_IMR));
 			at91_sys_read(AT91_AIC_IPR) & at91_sys_read(AT91_AIC_IMR));
 
 
 error:
 error:
-	sdram_selfrefresh_disable();
 	target_state = PM_SUSPEND_ON;
 	target_state = PM_SUSPEND_ON;
 	at91_irq_resume();
 	at91_irq_resume();
 	at91_gpio_resume();
 	at91_gpio_resume();

+ 2 - 1
arch/arm/mm/abort-ev6.S

@@ -23,7 +23,8 @@ ENTRY(v6_early_abort)
 #ifdef CONFIG_CPU_32v6K
 #ifdef CONFIG_CPU_32v6K
 	clrex
 	clrex
 #else
 #else
-	strex	r0, r1, [sp]			@ Clear the exclusive monitor
+	sub	r1, sp, #4			@ Get unused stack location
+	strex	r0, r1, [r1]			@ Clear the exclusive monitor
 #endif
 #endif
 	mrc	p15, 0, r1, c5, c0, 0		@ get FSR
 	mrc	p15, 0, r1, c5, c0, 0		@ get FSR
 	mrc	p15, 0, r0, c6, c0, 0		@ get FAR
 	mrc	p15, 0, r0, c6, c0, 0		@ get FAR

+ 1 - 1
arch/arm/plat-s3c64xx/irq-eint.c

@@ -55,7 +55,7 @@ static void s3c_irq_eint_unmask(unsigned int irq)
 	u32 mask;
 	u32 mask;
 
 
 	mask = __raw_readl(S3C64XX_EINT0MASK);
 	mask = __raw_readl(S3C64XX_EINT0MASK);
-	mask |= eint_irq_to_bit(irq);
+	mask &= ~eint_irq_to_bit(irq);
 	__raw_writel(mask, S3C64XX_EINT0MASK);
 	__raw_writel(mask, S3C64XX_EINT0MASK);
 }
 }
 
 

+ 4 - 0
arch/powerpc/platforms/86xx/gef_sbc610.c

@@ -142,6 +142,10 @@ static void __init gef_sbc610_nec_fixup(struct pci_dev *pdev)
 {
 {
 	unsigned int val;
 	unsigned int val;
 
 
+	/* Do not do the fixup on other platforms! */
+	if (!machine_is(gef_sbc610))
+		return;
+
 	printk(KERN_INFO "Running NEC uPD720101 Fixup\n");
 	printk(KERN_INFO "Running NEC uPD720101 Fixup\n");
 
 
 	/* Ensure ports 1, 2, 3, 4 & 5 are enabled */
 	/* Ensure ports 1, 2, 3, 4 & 5 are enabled */

+ 1 - 1
arch/s390/crypto/aes_s390.c

@@ -556,7 +556,7 @@ static void __exit aes_s390_fini(void)
 module_init(aes_s390_init);
 module_init(aes_s390_init);
 module_exit(aes_s390_fini);
 module_exit(aes_s390_fini);
 
 
-MODULE_ALIAS("aes");
+MODULE_ALIAS("aes-all");
 
 
 MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm");
 MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm");
 MODULE_LICENSE("GPL");
 MODULE_LICENSE("GPL");

+ 2 - 0
arch/x86/mm/init_64.c

@@ -714,6 +714,8 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
 	pos = start_pfn << PAGE_SHIFT;
 	pos = start_pfn << PAGE_SHIFT;
 	end_pfn = ((pos + (PMD_SIZE - 1)) >> PMD_SHIFT)
 	end_pfn = ((pos + (PMD_SIZE - 1)) >> PMD_SHIFT)
 			<< (PMD_SHIFT - PAGE_SHIFT);
 			<< (PMD_SHIFT - PAGE_SHIFT);
+	if (end_pfn > (end >> PAGE_SHIFT))
+		end_pfn = end >> PAGE_SHIFT;
 	if (start_pfn < end_pfn) {
 	if (start_pfn < end_pfn) {
 		nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0);
 		nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0);
 		pos = end_pfn << PAGE_SHIFT;
 		pos = end_pfn << PAGE_SHIFT;

+ 96 - 53
arch/x86/mm/kmmio.c

@@ -32,11 +32,14 @@ struct kmmio_fault_page {
 	struct list_head list;
 	struct list_head list;
 	struct kmmio_fault_page *release_next;
 	struct kmmio_fault_page *release_next;
 	unsigned long page; /* location of the fault page */
 	unsigned long page; /* location of the fault page */
+	bool old_presence; /* page presence prior to arming */
+	bool armed;
 
 
 	/*
 	/*
 	 * Number of times this page has been registered as a part
 	 * Number of times this page has been registered as a part
 	 * of a probe. If zero, page is disarmed and this may be freed.
 	 * of a probe. If zero, page is disarmed and this may be freed.
-	 * Used only by writers (RCU).
+	 * Used only by writers (RCU) and post_kmmio_handler().
+	 * Protected by kmmio_lock, when linked into kmmio_page_table.
 	 */
 	 */
 	int count;
 	int count;
 };
 };
@@ -105,57 +108,85 @@ static struct kmmio_fault_page *get_kmmio_fault_page(unsigned long page)
 	return NULL;
 	return NULL;
 }
 }
 
 
-static void set_page_present(unsigned long addr, bool present,
-							unsigned int *pglevel)
+static void set_pmd_presence(pmd_t *pmd, bool present, bool *old)
+{
+	pmdval_t v = pmd_val(*pmd);
+	*old = !!(v & _PAGE_PRESENT);
+	v &= ~_PAGE_PRESENT;
+	if (present)
+		v |= _PAGE_PRESENT;
+	set_pmd(pmd, __pmd(v));
+}
+
+static void set_pte_presence(pte_t *pte, bool present, bool *old)
+{
+	pteval_t v = pte_val(*pte);
+	*old = !!(v & _PAGE_PRESENT);
+	v &= ~_PAGE_PRESENT;
+	if (present)
+		v |= _PAGE_PRESENT;
+	set_pte_atomic(pte, __pte(v));
+}
+
+static int set_page_presence(unsigned long addr, bool present, bool *old)
 {
 {
-	pteval_t pteval;
-	pmdval_t pmdval;
 	unsigned int level;
 	unsigned int level;
-	pmd_t *pmd;
 	pte_t *pte = lookup_address(addr, &level);
 	pte_t *pte = lookup_address(addr, &level);
 
 
 	if (!pte) {
 	if (!pte) {
 		pr_err("kmmio: no pte for page 0x%08lx\n", addr);
 		pr_err("kmmio: no pte for page 0x%08lx\n", addr);
-		return;
+		return -1;
 	}
 	}
 
 
-	if (pglevel)
-		*pglevel = level;
-
 	switch (level) {
 	switch (level) {
 	case PG_LEVEL_2M:
 	case PG_LEVEL_2M:
-		pmd = (pmd_t *)pte;
-		pmdval = pmd_val(*pmd) & ~_PAGE_PRESENT;
-		if (present)
-			pmdval |= _PAGE_PRESENT;
-		set_pmd(pmd, __pmd(pmdval));
+		set_pmd_presence((pmd_t *)pte, present, old);
 		break;
 		break;
-
 	case PG_LEVEL_4K:
 	case PG_LEVEL_4K:
-		pteval = pte_val(*pte) & ~_PAGE_PRESENT;
-		if (present)
-			pteval |= _PAGE_PRESENT;
-		set_pte_atomic(pte, __pte(pteval));
+		set_pte_presence(pte, present, old);
 		break;
 		break;
-
 	default:
 	default:
 		pr_err("kmmio: unexpected page level 0x%x.\n", level);
 		pr_err("kmmio: unexpected page level 0x%x.\n", level);
-		return;
+		return -1;
 	}
 	}
 
 
 	__flush_tlb_one(addr);
 	__flush_tlb_one(addr);
+	return 0;
 }
 }
 
 
-/** Mark the given page as not present. Access to it will trigger a fault. */
-static void arm_kmmio_fault_page(unsigned long page, unsigned int *pglevel)
+/*
+ * Mark the given page as not present. Access to it will trigger a fault.
+ *
+ * Struct kmmio_fault_page is protected by RCU and kmmio_lock, but the
+ * protection is ignored here. RCU read lock is assumed held, so the struct
+ * will not disappear unexpectedly. Furthermore, the caller must guarantee,
+ * that double arming the same virtual address (page) cannot occur.
+ *
+ * Double disarming on the other hand is allowed, and may occur when a fault
+ * and mmiotrace shutdown happen simultaneously.
+ */
+static int arm_kmmio_fault_page(struct kmmio_fault_page *f)
 {
 {
-	set_page_present(page & PAGE_MASK, false, pglevel);
+	int ret;
+	WARN_ONCE(f->armed, KERN_ERR "kmmio page already armed.\n");
+	if (f->armed) {
+		pr_warning("kmmio double-arm: page 0x%08lx, ref %d, old %d\n",
+					f->page, f->count, f->old_presence);
+	}
+	ret = set_page_presence(f->page, false, &f->old_presence);
+	WARN_ONCE(ret < 0, KERN_ERR "kmmio arming 0x%08lx failed.\n", f->page);
+	f->armed = true;
+	return ret;
 }
 }
 
 
-/** Mark the given page as present. */
-static void disarm_kmmio_fault_page(unsigned long page, unsigned int *pglevel)
+/** Restore the given page to saved presence state. */
+static void disarm_kmmio_fault_page(struct kmmio_fault_page *f)
 {
 {
-	set_page_present(page & PAGE_MASK, true, pglevel);
+	bool tmp;
+	int ret = set_page_presence(f->page, f->old_presence, &tmp);
+	WARN_ONCE(ret < 0,
+			KERN_ERR "kmmio disarming 0x%08lx failed.\n", f->page);
+	f->armed = false;
 }
 }
 
 
 /*
 /*
@@ -202,28 +233,32 @@ int kmmio_handler(struct pt_regs *regs, unsigned long addr)
 
 
 	ctx = &get_cpu_var(kmmio_ctx);
 	ctx = &get_cpu_var(kmmio_ctx);
 	if (ctx->active) {
 	if (ctx->active) {
-		disarm_kmmio_fault_page(faultpage->page, NULL);
 		if (addr == ctx->addr) {
 		if (addr == ctx->addr) {
 			/*
 			/*
-			 * On SMP we sometimes get recursive probe hits on the
-			 * same address. Context is already saved, fall out.
+			 * A second fault on the same page means some other
+			 * condition needs handling by do_page_fault(), the
+			 * page really not being present is the most common.
 			 */
 			 */
-			pr_debug("kmmio: duplicate probe hit on CPU %d, for "
-						"address 0x%08lx.\n",
-						smp_processor_id(), addr);
-			ret = 1;
-			goto no_kmmio_ctx;
-		}
-		/*
-		 * Prevent overwriting already in-flight context.
-		 * This should not happen, let's hope disarming at least
-		 * prevents a panic.
-		 */
-		pr_emerg("kmmio: recursive probe hit on CPU %d, "
+			pr_debug("kmmio: secondary hit for 0x%08lx CPU %d.\n",
+					addr, smp_processor_id());
+
+			if (!faultpage->old_presence)
+				pr_info("kmmio: unexpected secondary hit for "
+					"address 0x%08lx on CPU %d.\n", addr,
+					smp_processor_id());
+		} else {
+			/*
+			 * Prevent overwriting already in-flight context.
+			 * This should not happen, let's hope disarming at
+			 * least prevents a panic.
+			 */
+			pr_emerg("kmmio: recursive probe hit on CPU %d, "
 					"for address 0x%08lx. Ignoring.\n",
 					"for address 0x%08lx. Ignoring.\n",
 					smp_processor_id(), addr);
 					smp_processor_id(), addr);
-		pr_emerg("kmmio: previous hit was at 0x%08lx.\n",
-					ctx->addr);
+			pr_emerg("kmmio: previous hit was at 0x%08lx.\n",
+						ctx->addr);
+			disarm_kmmio_fault_page(faultpage);
+		}
 		goto no_kmmio_ctx;
 		goto no_kmmio_ctx;
 	}
 	}
 	ctx->active++;
 	ctx->active++;
@@ -244,7 +279,7 @@ int kmmio_handler(struct pt_regs *regs, unsigned long addr)
 	regs->flags &= ~X86_EFLAGS_IF;
 	regs->flags &= ~X86_EFLAGS_IF;
 
 
 	/* Now we set present bit in PTE and single step. */
 	/* Now we set present bit in PTE and single step. */
-	disarm_kmmio_fault_page(ctx->fpage->page, NULL);
+	disarm_kmmio_fault_page(ctx->fpage);
 
 
 	/*
 	/*
 	 * If another cpu accesses the same page while we are stepping,
 	 * If another cpu accesses the same page while we are stepping,
@@ -275,7 +310,7 @@ static int post_kmmio_handler(unsigned long condition, struct pt_regs *regs)
 	struct kmmio_context *ctx = &get_cpu_var(kmmio_ctx);
 	struct kmmio_context *ctx = &get_cpu_var(kmmio_ctx);
 
 
 	if (!ctx->active) {
 	if (!ctx->active) {
-		pr_debug("kmmio: spurious debug trap on CPU %d.\n",
+		pr_warning("kmmio: spurious debug trap on CPU %d.\n",
 							smp_processor_id());
 							smp_processor_id());
 		goto out;
 		goto out;
 	}
 	}
@@ -283,7 +318,11 @@ static int post_kmmio_handler(unsigned long condition, struct pt_regs *regs)
 	if (ctx->probe && ctx->probe->post_handler)
 	if (ctx->probe && ctx->probe->post_handler)
 		ctx->probe->post_handler(ctx->probe, condition, regs);
 		ctx->probe->post_handler(ctx->probe, condition, regs);
 
 
-	arm_kmmio_fault_page(ctx->fpage->page, NULL);
+	/* Prevent racing against release_kmmio_fault_page(). */
+	spin_lock(&kmmio_lock);
+	if (ctx->fpage->count)
+		arm_kmmio_fault_page(ctx->fpage);
+	spin_unlock(&kmmio_lock);
 
 
 	regs->flags &= ~X86_EFLAGS_TF;
 	regs->flags &= ~X86_EFLAGS_TF;
 	regs->flags |= ctx->saved_flags;
 	regs->flags |= ctx->saved_flags;
@@ -315,20 +354,24 @@ static int add_kmmio_fault_page(unsigned long page)
 	f = get_kmmio_fault_page(page);
 	f = get_kmmio_fault_page(page);
 	if (f) {
 	if (f) {
 		if (!f->count)
 		if (!f->count)
-			arm_kmmio_fault_page(f->page, NULL);
+			arm_kmmio_fault_page(f);
 		f->count++;
 		f->count++;
 		return 0;
 		return 0;
 	}
 	}
 
 
-	f = kmalloc(sizeof(*f), GFP_ATOMIC);
+	f = kzalloc(sizeof(*f), GFP_ATOMIC);
 	if (!f)
 	if (!f)
 		return -1;
 		return -1;
 
 
 	f->count = 1;
 	f->count = 1;
 	f->page = page;
 	f->page = page;
-	list_add_rcu(&f->list, kmmio_page_list(f->page));
 
 
-	arm_kmmio_fault_page(f->page, NULL);
+	if (arm_kmmio_fault_page(f)) {
+		kfree(f);
+		return -1;
+	}
+
+	list_add_rcu(&f->list, kmmio_page_list(f->page));
 
 
 	return 0;
 	return 0;
 }
 }
@@ -347,7 +390,7 @@ static void release_kmmio_fault_page(unsigned long page,
 	f->count--;
 	f->count--;
 	BUG_ON(f->count < 0);
 	BUG_ON(f->count < 0);
 	if (!f->count) {
 	if (!f->count) {
-		disarm_kmmio_fault_page(f->page, NULL);
+		disarm_kmmio_fault_page(f);
 		f->release_next = *release_list;
 		f->release_next = *release_list;
 		*release_list = f;
 		*release_list = f;
 	}
 	}

+ 57 - 13
arch/x86/mm/testmmiotrace.c

@@ -1,5 +1,5 @@
 /*
 /*
- * Written by Pekka Paalanen, 2008 <pq@iki.fi>
+ * Written by Pekka Paalanen, 2008-2009 <pq@iki.fi>
  */
  */
 #include <linux/module.h>
 #include <linux/module.h>
 #include <linux/io.h>
 #include <linux/io.h>
@@ -9,35 +9,74 @@
 
 
 static unsigned long mmio_address;
 static unsigned long mmio_address;
 module_param(mmio_address, ulong, 0);
 module_param(mmio_address, ulong, 0);
-MODULE_PARM_DESC(mmio_address, "Start address of the mapping of 16 kB.");
+MODULE_PARM_DESC(mmio_address, " Start address of the mapping of 16 kB "
+				"(or 8 MB if read_far is non-zero).");
+
+static unsigned long read_far = 0x400100;
+module_param(read_far, ulong, 0);
+MODULE_PARM_DESC(read_far, " Offset of a 32-bit read within 8 MB "
+				"(default: 0x400100).");
+
+static unsigned v16(unsigned i)
+{
+	return i * 12 + 7;
+}
+
+static unsigned v32(unsigned i)
+{
+	return i * 212371 + 13;
+}
 
 
 static void do_write_test(void __iomem *p)
 static void do_write_test(void __iomem *p)
 {
 {
 	unsigned int i;
 	unsigned int i;
+	pr_info(MODULE_NAME ": write test.\n");
 	mmiotrace_printk("Write test.\n");
 	mmiotrace_printk("Write test.\n");
+
 	for (i = 0; i < 256; i++)
 	for (i = 0; i < 256; i++)
 		iowrite8(i, p + i);
 		iowrite8(i, p + i);
+
 	for (i = 1024; i < (5 * 1024); i += 2)
 	for (i = 1024; i < (5 * 1024); i += 2)
-		iowrite16(i * 12 + 7, p + i);
+		iowrite16(v16(i), p + i);
+
 	for (i = (5 * 1024); i < (16 * 1024); i += 4)
 	for (i = (5 * 1024); i < (16 * 1024); i += 4)
-		iowrite32(i * 212371 + 13, p + i);
+		iowrite32(v32(i), p + i);
 }
 }
 
 
 static void do_read_test(void __iomem *p)
 static void do_read_test(void __iomem *p)
 {
 {
 	unsigned int i;
 	unsigned int i;
+	unsigned errs[3] = { 0 };
+	pr_info(MODULE_NAME ": read test.\n");
 	mmiotrace_printk("Read test.\n");
 	mmiotrace_printk("Read test.\n");
+
 	for (i = 0; i < 256; i++)
 	for (i = 0; i < 256; i++)
-		ioread8(p + i);
+		if (ioread8(p + i) != i)
+			++errs[0];
+
 	for (i = 1024; i < (5 * 1024); i += 2)
 	for (i = 1024; i < (5 * 1024); i += 2)
-		ioread16(p + i);
+		if (ioread16(p + i) != v16(i))
+			++errs[1];
+
 	for (i = (5 * 1024); i < (16 * 1024); i += 4)
 	for (i = (5 * 1024); i < (16 * 1024); i += 4)
-		ioread32(p + i);
+		if (ioread32(p + i) != v32(i))
+			++errs[2];
+
+	mmiotrace_printk("Read errors: 8-bit %d, 16-bit %d, 32-bit %d.\n",
+						errs[0], errs[1], errs[2]);
 }
 }
 
 
-static void do_test(void)
+static void do_read_far_test(void __iomem *p)
 {
 {
-	void __iomem *p = ioremap_nocache(mmio_address, 0x4000);
+	pr_info(MODULE_NAME ": read far test.\n");
+	mmiotrace_printk("Read far test.\n");
+
+	ioread32(p + read_far);
+}
+
+static void do_test(unsigned long size)
+{
+	void __iomem *p = ioremap_nocache(mmio_address, size);
 	if (!p) {
 	if (!p) {
 		pr_err(MODULE_NAME ": could not ioremap, aborting.\n");
 		pr_err(MODULE_NAME ": could not ioremap, aborting.\n");
 		return;
 		return;
@@ -45,11 +84,15 @@ static void do_test(void)
 	mmiotrace_printk("ioremap returned %p.\n", p);
 	mmiotrace_printk("ioremap returned %p.\n", p);
 	do_write_test(p);
 	do_write_test(p);
 	do_read_test(p);
 	do_read_test(p);
+	if (read_far && read_far < size - 4)
+		do_read_far_test(p);
 	iounmap(p);
 	iounmap(p);
 }
 }
 
 
 static int __init init(void)
 static int __init init(void)
 {
 {
+	unsigned long size = (read_far) ? (8 << 20) : (16 << 10);
+
 	if (mmio_address == 0) {
 	if (mmio_address == 0) {
 		pr_err(MODULE_NAME ": you have to use the module argument "
 		pr_err(MODULE_NAME ": you have to use the module argument "
 							"mmio_address.\n");
 							"mmio_address.\n");
@@ -58,10 +101,11 @@ static int __init init(void)
 		return -ENXIO;
 		return -ENXIO;
 	}
 	}
 
 
-	pr_warning(MODULE_NAME ": WARNING: mapping 16 kB @ 0x%08lx "
-					"in PCI address space, and writing "
-					"rubbish in there.\n", mmio_address);
-	do_test();
+	pr_warning(MODULE_NAME ": WARNING: mapping %lu kB @ 0x%08lx in PCI "
+		"address space, and writing 16 kB of rubbish in there.\n",
+		 size >> 10, mmio_address);
+	do_test(size);
+	pr_info(MODULE_NAME ": All done.\n");
 	return 0;
 	return 0;
 }
 }
 
 

+ 12 - 2
arch/x86/oprofile/op_model_ppro.c

@@ -78,8 +78,18 @@ static void ppro_setup_ctrs(struct op_msrs const * const msrs)
 	if (cpu_has_arch_perfmon) {
 	if (cpu_has_arch_perfmon) {
 		union cpuid10_eax eax;
 		union cpuid10_eax eax;
 		eax.full = cpuid_eax(0xa);
 		eax.full = cpuid_eax(0xa);
-		if (counter_width < eax.split.bit_width)
-			counter_width = eax.split.bit_width;
+
+		/*
+		 * For Core2 (family 6, model 15), don't reset the
+		 * counter width:
+		 */
+		if (!(eax.split.version_id == 0 &&
+			current_cpu_data.x86 == 6 &&
+				current_cpu_data.x86_model == 15)) {
+
+			if (counter_width < eax.split.bit_width)
+				counter_width = eax.split.bit_width;
+		}
 	}
 	}
 
 
 	/* clear all counters */
 	/* clear all counters */

+ 13 - 2
crypto/api.c

@@ -215,8 +215,19 @@ struct crypto_alg *crypto_larval_lookup(const char *name, u32 type, u32 mask)
 	mask &= ~(CRYPTO_ALG_LARVAL | CRYPTO_ALG_DEAD);
 	mask &= ~(CRYPTO_ALG_LARVAL | CRYPTO_ALG_DEAD);
 	type &= mask;
 	type &= mask;
 
 
-	alg = try_then_request_module(crypto_alg_lookup(name, type, mask),
-				      name);
+	alg = crypto_alg_lookup(name, type, mask);
+	if (!alg) {
+		char tmp[CRYPTO_MAX_ALG_NAME];
+
+		request_module(name);
+
+		if (!((type ^ CRYPTO_ALG_NEED_FALLBACK) & mask) &&
+		    snprintf(tmp, sizeof(tmp), "%s-all", name) < sizeof(tmp))
+			request_module(tmp);
+
+		alg = crypto_alg_lookup(name, type, mask);
+	}
+
 	if (alg)
 	if (alg)
 		return crypto_is_larval(alg) ? crypto_larval_wait(alg) : alg;
 		return crypto_is_larval(alg) ? crypto_larval_wait(alg) : alg;
 
 

+ 4 - 2
drivers/crypto/ixp4xx_crypto.c

@@ -457,10 +457,12 @@ static int init_ixp_crypto(void)
 	if (!ctx_pool) {
 	if (!ctx_pool) {
 		goto err;
 		goto err;
 	}
 	}
-	ret = qmgr_request_queue(SEND_QID, NPE_QLEN_TOTAL, 0, 0);
+	ret = qmgr_request_queue(SEND_QID, NPE_QLEN_TOTAL, 0, 0,
+				 "ixp_crypto:out", NULL);
 	if (ret)
 	if (ret)
 		goto err;
 		goto err;
-	ret = qmgr_request_queue(RECV_QID, NPE_QLEN, 0, 0);
+	ret = qmgr_request_queue(RECV_QID, NPE_QLEN, 0, 0,
+				 "ixp_crypto:in", NULL);
 	if (ret) {
 	if (ret) {
 		qmgr_release_queue(SEND_QID);
 		qmgr_release_queue(SEND_QID);
 		goto err;
 		goto err;

+ 1 - 1
drivers/crypto/padlock-aes.c

@@ -489,4 +489,4 @@ MODULE_DESCRIPTION("VIA PadLock AES algorithm support");
 MODULE_LICENSE("GPL");
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Michal Ludvig");
 MODULE_AUTHOR("Michal Ludvig");
 
 
-MODULE_ALIAS("aes");
+MODULE_ALIAS("aes-all");

+ 2 - 2
drivers/crypto/padlock-sha.c

@@ -304,7 +304,7 @@ MODULE_DESCRIPTION("VIA PadLock SHA1/SHA256 algorithms support.");
 MODULE_LICENSE("GPL");
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Michal Ludvig");
 MODULE_AUTHOR("Michal Ludvig");
 
 
-MODULE_ALIAS("sha1");
-MODULE_ALIAS("sha256");
+MODULE_ALIAS("sha1-all");
+MODULE_ALIAS("sha256-all");
 MODULE_ALIAS("sha1-padlock");
 MODULE_ALIAS("sha1-padlock");
 MODULE_ALIAS("sha256-padlock");
 MODULE_ALIAS("sha256-padlock");

+ 1 - 1
drivers/dma/iop-adma.c

@@ -1401,7 +1401,7 @@ MODULE_ALIAS("platform:iop-adma");
 
 
 static struct platform_driver iop_adma_driver = {
 static struct platform_driver iop_adma_driver = {
 	.probe		= iop_adma_probe,
 	.probe		= iop_adma_probe,
-	.remove		= iop_adma_remove,
+	.remove		= __devexit_p(iop_adma_remove),
 	.driver		= {
 	.driver		= {
 		.owner	= THIS_MODULE,
 		.owner	= THIS_MODULE,
 		.name	= "iop-adma",
 		.name	= "iop-adma",

+ 1 - 1
drivers/dma/mv_xor.c

@@ -1287,7 +1287,7 @@ mv_xor_conf_mbus_windows(struct mv_xor_shared_private *msp,
 
 
 static struct platform_driver mv_xor_driver = {
 static struct platform_driver mv_xor_driver = {
 	.probe		= mv_xor_probe,
 	.probe		= mv_xor_probe,
-	.remove		= mv_xor_remove,
+	.remove		= __devexit_p(mv_xor_remove),
 	.driver		= {
 	.driver		= {
 		.owner	= THIS_MODULE,
 		.owner	= THIS_MODULE,
 		.name	= MV_XOR_NAME,
 		.name	= MV_XOR_NAME,

+ 1 - 1
drivers/gpu/drm/drm_stub.c

@@ -168,7 +168,7 @@ int drm_setmaster_ioctl(struct drm_device *dev, void *data,
 	    file_priv->minor->master != file_priv->master) {
 	    file_priv->minor->master != file_priv->master) {
 		mutex_lock(&dev->struct_mutex);
 		mutex_lock(&dev->struct_mutex);
 		file_priv->minor->master = drm_master_get(file_priv->master);
 		file_priv->minor->master = drm_master_get(file_priv->master);
-		mutex_lock(&dev->struct_mutex);
+		mutex_unlock(&dev->struct_mutex);
 	}
 	}
 
 
 	return 0;
 	return 0;

+ 2 - 2
drivers/i2c/busses/i2c-mv64xxx.c

@@ -482,7 +482,7 @@ mv64xxx_i2c_map_regs(struct platform_device *pd,
 	return 0;
 	return 0;
 }
 }
 
 
-static void __devexit
+static void
 mv64xxx_i2c_unmap_regs(struct mv64xxx_i2c_data *drv_data)
 mv64xxx_i2c_unmap_regs(struct mv64xxx_i2c_data *drv_data)
 {
 {
 	if (drv_data->reg_base) {
 	if (drv_data->reg_base) {
@@ -577,7 +577,7 @@ mv64xxx_i2c_remove(struct platform_device *dev)
 
 
 static struct platform_driver mv64xxx_i2c_driver = {
 static struct platform_driver mv64xxx_i2c_driver = {
 	.probe	= mv64xxx_i2c_probe,
 	.probe	= mv64xxx_i2c_probe,
-	.remove	= mv64xxx_i2c_remove,
+	.remove	= __devexit_p(mv64xxx_i2c_remove),
 	.driver	= {
 	.driver	= {
 		.owner	= THIS_MODULE,
 		.owner	= THIS_MODULE,
 		.name	= MV64XXX_I2C_CTLR_NAME,
 		.name	= MV64XXX_I2C_CTLR_NAME,

+ 1 - 1
drivers/mtd/nand/orion_nand.c

@@ -149,7 +149,7 @@ static int __devexit orion_nand_remove(struct platform_device *pdev)
 
 
 static struct platform_driver orion_nand_driver = {
 static struct platform_driver orion_nand_driver = {
 	.probe		= orion_nand_probe,
 	.probe		= orion_nand_probe,
-	.remove		= orion_nand_remove,
+	.remove		= __devexit_p(orion_nand_remove),
 	.driver		= {
 	.driver		= {
 		.name	= "orion_nand",
 		.name	= "orion_nand",
 		.owner	= THIS_MODULE,
 		.owner	= THIS_MODULE,

+ 1 - 1
drivers/net/arm/Makefile

@@ -4,7 +4,7 @@
 #
 #
 
 
 obj-$(CONFIG_ARM_AM79C961A)	+= am79c961a.o
 obj-$(CONFIG_ARM_AM79C961A)	+= am79c961a.o
-obj-$(CONFIG_ARM_ETHERH)	+= etherh.o ../8390.o
+obj-$(CONFIG_ARM_ETHERH)	+= etherh.o
 obj-$(CONFIG_ARM_ETHER3)	+= ether3.o
 obj-$(CONFIG_ARM_ETHER3)	+= ether3.o
 obj-$(CONFIG_ARM_ETHER1)	+= ether1.o
 obj-$(CONFIG_ARM_ETHER1)	+= ether1.o
 obj-$(CONFIG_ARM_AT91_ETHER)	+= at91_ether.o
 obj-$(CONFIG_ARM_AT91_ETHER)	+= at91_ether.o

+ 5 - 5
drivers/net/arm/etherh.c

@@ -641,15 +641,15 @@ static const struct net_device_ops etherh_netdev_ops = {
 	.ndo_open		= etherh_open,
 	.ndo_open		= etherh_open,
 	.ndo_stop		= etherh_close,
 	.ndo_stop		= etherh_close,
 	.ndo_set_config		= etherh_set_config,
 	.ndo_set_config		= etherh_set_config,
-	.ndo_start_xmit		= ei_start_xmit,
-	.ndo_tx_timeout		= ei_tx_timeout,
-	.ndo_get_stats		= ei_get_stats,
-	.ndo_set_multicast_list = ei_set_multicast_list,
+	.ndo_start_xmit		= __ei_start_xmit,
+	.ndo_tx_timeout		= __ei_tx_timeout,
+	.ndo_get_stats		= __ei_get_stats,
+	.ndo_set_multicast_list = __ei_set_multicast_list,
 	.ndo_validate_addr	= eth_validate_addr,
 	.ndo_validate_addr	= eth_validate_addr,
 	.ndo_set_mac_address	= eth_mac_addr,
 	.ndo_set_mac_address	= eth_mac_addr,
 	.ndo_change_mtu		= eth_change_mtu,
 	.ndo_change_mtu		= eth_change_mtu,
 #ifdef CONFIG_NET_POLL_CONTROLLER
 #ifdef CONFIG_NET_POLL_CONTROLLER
-	.ndo_poll_controller	= ei_poll,
+	.ndo_poll_controller	= __ei_poll,
 #endif
 #endif
 };
 };
 
 

+ 1 - 1
drivers/video/pxafb.c

@@ -2230,7 +2230,7 @@ static int __devexit pxafb_remove(struct platform_device *dev)
 
 
 static struct platform_driver pxafb_driver = {
 static struct platform_driver pxafb_driver = {
 	.probe		= pxafb_probe,
 	.probe		= pxafb_probe,
-	.remove 	= pxafb_remove,
+	.remove 	= __devexit_p(pxafb_remove),
 	.suspend	= pxafb_suspend,
 	.suspend	= pxafb_suspend,
 	.resume		= pxafb_resume,
 	.resume		= pxafb_resume,
 	.driver		= {
 	.driver		= {

+ 6 - 0
include/linux/rcuclassic.h

@@ -181,4 +181,10 @@ extern long rcu_batches_completed_bh(void);
 #define rcu_enter_nohz()	do { } while (0)
 #define rcu_enter_nohz()	do { } while (0)
 #define rcu_exit_nohz()		do { } while (0)
 #define rcu_exit_nohz()		do { } while (0)
 
 
+/* A context switch is a grace period for rcuclassic. */
+static inline int rcu_blocking_is_gp(void)
+{
+	return num_online_cpus() == 1;
+}
+
 #endif /* __LINUX_RCUCLASSIC_H */
 #endif /* __LINUX_RCUCLASSIC_H */

+ 4 - 0
include/linux/rcupdate.h

@@ -52,6 +52,9 @@ struct rcu_head {
 	void (*func)(struct rcu_head *head);
 	void (*func)(struct rcu_head *head);
 };
 };
 
 
+/* Internal to kernel, but needed by rcupreempt.h. */
+extern int rcu_scheduler_active;
+
 #if defined(CONFIG_CLASSIC_RCU)
 #if defined(CONFIG_CLASSIC_RCU)
 #include <linux/rcuclassic.h>
 #include <linux/rcuclassic.h>
 #elif defined(CONFIG_TREE_RCU)
 #elif defined(CONFIG_TREE_RCU)
@@ -265,6 +268,7 @@ extern void rcu_barrier_sched(void);
 
 
 /* Internal to kernel */
 /* Internal to kernel */
 extern void rcu_init(void);
 extern void rcu_init(void);
+extern void rcu_scheduler_starting(void);
 extern int rcu_needs_cpu(int cpu);
 extern int rcu_needs_cpu(int cpu);
 
 
 #endif /* __LINUX_RCUPDATE_H */
 #endif /* __LINUX_RCUPDATE_H */

+ 15 - 0
include/linux/rcupreempt.h

@@ -142,4 +142,19 @@ static inline void rcu_exit_nohz(void)
 #define rcu_exit_nohz()		do { } while (0)
 #define rcu_exit_nohz()		do { } while (0)
 #endif /* CONFIG_NO_HZ */
 #endif /* CONFIG_NO_HZ */
 
 
+/*
+ * A context switch is a grace period for rcupreempt synchronize_rcu()
+ * only during early boot, before the scheduler has been initialized.
+ * So, how the heck do we get a context switch?  Well, if the caller
+ * invokes synchronize_rcu(), they are willing to accept a context
+ * switch, so we simply pretend that one happened.
+ *
+ * After boot, there might be a blocked or preempted task in an RCU
+ * read-side critical section, so we cannot then take the fastpath.
+ */
+static inline int rcu_blocking_is_gp(void)
+{
+	return num_online_cpus() == 1 && !rcu_scheduler_active;
+}
+
 #endif /* __LINUX_RCUPREEMPT_H */
 #endif /* __LINUX_RCUPREEMPT_H */

+ 6 - 0
include/linux/rcutree.h

@@ -326,4 +326,10 @@ static inline void rcu_exit_nohz(void)
 }
 }
 #endif /* CONFIG_NO_HZ */
 #endif /* CONFIG_NO_HZ */
 
 
+/* A context switch is a grace period for rcutree. */
+static inline int rcu_blocking_is_gp(void)
+{
+	return num_online_cpus() == 1;
+}
+
 #endif /* __LINUX_RCUTREE_H */
 #endif /* __LINUX_RCUTREE_H */

+ 4 - 0
include/linux/sched.h

@@ -2291,9 +2291,13 @@ extern long sched_group_rt_runtime(struct task_group *tg);
 extern int sched_group_set_rt_period(struct task_group *tg,
 extern int sched_group_set_rt_period(struct task_group *tg,
 				      long rt_period_us);
 				      long rt_period_us);
 extern long sched_group_rt_period(struct task_group *tg);
 extern long sched_group_rt_period(struct task_group *tg);
+extern int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk);
 #endif
 #endif
 #endif
 #endif
 
 
+extern int task_can_switch_user(struct user_struct *up,
+					struct task_struct *tsk);
+
 #ifdef CONFIG_TASK_XACCT
 #ifdef CONFIG_TASK_XACCT
 static inline void add_rchar(struct task_struct *tsk, ssize_t amt)
 static inline void add_rchar(struct task_struct *tsk, ssize_t amt)
 {
 {

+ 2 - 1
init/main.c

@@ -97,7 +97,7 @@ static inline void mark_rodata_ro(void) { }
 extern void tc_init(void);
 extern void tc_init(void);
 #endif
 #endif
 
 
-enum system_states system_state;
+enum system_states system_state __read_mostly;
 EXPORT_SYMBOL(system_state);
 EXPORT_SYMBOL(system_state);
 
 
 /*
 /*
@@ -463,6 +463,7 @@ static noinline void __init_refok rest_init(void)
 	 * at least once to get things moving:
 	 * at least once to get things moving:
 	 */
 	 */
 	init_idle_bootup_task(current);
 	init_idle_bootup_task(current);
+	rcu_scheduler_starting();
 	preempt_enable_no_resched();
 	preempt_enable_no_resched();
 	schedule();
 	schedule();
 	preempt_disable();
 	preempt_disable();

+ 2 - 2
kernel/rcuclassic.c

@@ -679,8 +679,8 @@ int rcu_needs_cpu(int cpu)
 void rcu_check_callbacks(int cpu, int user)
 void rcu_check_callbacks(int cpu, int user)
 {
 {
 	if (user ||
 	if (user ||
-	    (idle_cpu(cpu) && !in_softirq() &&
-				hardirq_count() <= (1 << HARDIRQ_SHIFT))) {
+	    (idle_cpu(cpu) && rcu_scheduler_active &&
+	     !in_softirq() && hardirq_count() <= (1 << HARDIRQ_SHIFT))) {
 
 
 		/*
 		/*
 		 * Get here if this CPU took its interrupt from user
 		 * Get here if this CPU took its interrupt from user

+ 12 - 0
kernel/rcupdate.c

@@ -44,6 +44,7 @@
 #include <linux/cpu.h>
 #include <linux/cpu.h>
 #include <linux/mutex.h>
 #include <linux/mutex.h>
 #include <linux/module.h>
 #include <linux/module.h>
+#include <linux/kernel_stat.h>
 
 
 enum rcu_barrier {
 enum rcu_barrier {
 	RCU_BARRIER_STD,
 	RCU_BARRIER_STD,
@@ -55,6 +56,7 @@ static DEFINE_PER_CPU(struct rcu_head, rcu_barrier_head) = {NULL};
 static atomic_t rcu_barrier_cpu_count;
 static atomic_t rcu_barrier_cpu_count;
 static DEFINE_MUTEX(rcu_barrier_mutex);
 static DEFINE_MUTEX(rcu_barrier_mutex);
 static struct completion rcu_barrier_completion;
 static struct completion rcu_barrier_completion;
+int rcu_scheduler_active __read_mostly;
 
 
 /*
 /*
  * Awaken the corresponding synchronize_rcu() instance now that a
  * Awaken the corresponding synchronize_rcu() instance now that a
@@ -80,6 +82,10 @@ void wakeme_after_rcu(struct rcu_head  *head)
 void synchronize_rcu(void)
 void synchronize_rcu(void)
 {
 {
 	struct rcu_synchronize rcu;
 	struct rcu_synchronize rcu;
+
+	if (rcu_blocking_is_gp())
+		return;
+
 	init_completion(&rcu.completion);
 	init_completion(&rcu.completion);
 	/* Will wake me after RCU finished. */
 	/* Will wake me after RCU finished. */
 	call_rcu(&rcu.head, wakeme_after_rcu);
 	call_rcu(&rcu.head, wakeme_after_rcu);
@@ -175,3 +181,9 @@ void __init rcu_init(void)
 	__rcu_init();
 	__rcu_init();
 }
 }
 
 
+void rcu_scheduler_starting(void)
+{
+	WARN_ON(num_online_cpus() != 1);
+	WARN_ON(nr_context_switches() > 0);
+	rcu_scheduler_active = 1;
+}

+ 3 - 0
kernel/rcupreempt.c

@@ -1181,6 +1181,9 @@ void __synchronize_sched(void)
 {
 {
 	struct rcu_synchronize rcu;
 	struct rcu_synchronize rcu;
 
 
+	if (num_online_cpus() == 1)
+		return;  /* blocking is gp if only one CPU! */
+
 	init_completion(&rcu.completion);
 	init_completion(&rcu.completion);
 	/* Will wake me after RCU finished. */
 	/* Will wake me after RCU finished. */
 	call_rcu_sched(&rcu.head, wakeme_after_rcu);
 	call_rcu_sched(&rcu.head, wakeme_after_rcu);

+ 2 - 2
kernel/rcutree.c

@@ -948,8 +948,8 @@ static void rcu_do_batch(struct rcu_data *rdp)
 void rcu_check_callbacks(int cpu, int user)
 void rcu_check_callbacks(int cpu, int user)
 {
 {
 	if (user ||
 	if (user ||
-	    (idle_cpu(cpu) && !in_softirq() &&
-				hardirq_count() <= (1 << HARDIRQ_SHIFT))) {
+	    (idle_cpu(cpu) && rcu_scheduler_active &&
+	     !in_softirq() && hardirq_count() <= (1 << HARDIRQ_SHIFT))) {
 
 
 		/*
 		/*
 		 * Get here if this CPU took its interrupt from user
 		 * Get here if this CPU took its interrupt from user

+ 12 - 3
kernel/sched.c

@@ -223,7 +223,7 @@ static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
 {
 {
 	ktime_t now;
 	ktime_t now;
 
 
-	if (rt_bandwidth_enabled() && rt_b->rt_runtime == RUNTIME_INF)
+	if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)
 		return;
 		return;
 
 
 	if (hrtimer_active(&rt_b->rt_period_timer))
 	if (hrtimer_active(&rt_b->rt_period_timer))
@@ -9224,6 +9224,16 @@ static int sched_rt_global_constraints(void)
 
 
 	return ret;
 	return ret;
 }
 }
+
+int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk)
+{
+	/* Don't accept realtime tasks when there is no way for them to run */
+	if (rt_task(tsk) && tg->rt_bandwidth.rt_runtime == 0)
+		return 0;
+
+	return 1;
+}
+
 #else /* !CONFIG_RT_GROUP_SCHED */
 #else /* !CONFIG_RT_GROUP_SCHED */
 static int sched_rt_global_constraints(void)
 static int sched_rt_global_constraints(void)
 {
 {
@@ -9317,8 +9327,7 @@ cpu_cgroup_can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
 		      struct task_struct *tsk)
 		      struct task_struct *tsk)
 {
 {
 #ifdef CONFIG_RT_GROUP_SCHED
 #ifdef CONFIG_RT_GROUP_SCHED
-	/* Don't accept realtime tasks when there is no way for them to run */
-	if (rt_task(tsk) && cgroup_tg(cgrp)->rt_bandwidth.rt_runtime == 0)
+	if (!sched_rt_can_attach(cgroup_tg(cgrp), tsk))
 		return -EINVAL;
 		return -EINVAL;
 #else
 #else
 	/* We don't support RT-tasks being in separate groups */
 	/* We don't support RT-tasks being in separate groups */

+ 20 - 11
kernel/sys.c

@@ -559,7 +559,7 @@ error:
 	abort_creds(new);
 	abort_creds(new);
 	return retval;
 	return retval;
 }
 }
-  
+
 /*
 /*
  * change the user struct in a credentials set to match the new UID
  * change the user struct in a credentials set to match the new UID
  */
  */
@@ -571,6 +571,11 @@ static int set_user(struct cred *new)
 	if (!new_user)
 	if (!new_user)
 		return -EAGAIN;
 		return -EAGAIN;
 
 
+	if (!task_can_switch_user(new_user, current)) {
+		free_uid(new_user);
+		return -EINVAL;
+	}
+
 	if (atomic_read(&new_user->processes) >=
 	if (atomic_read(&new_user->processes) >=
 				current->signal->rlim[RLIMIT_NPROC].rlim_cur &&
 				current->signal->rlim[RLIMIT_NPROC].rlim_cur &&
 			new_user != INIT_USER) {
 			new_user != INIT_USER) {
@@ -631,10 +636,11 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
 			goto error;
 			goto error;
 	}
 	}
 
 
-	retval = -EAGAIN;
-	if (new->uid != old->uid && set_user(new) < 0)
-		goto error;
-
+	if (new->uid != old->uid) {
+		retval = set_user(new);
+		if (retval < 0)
+			goto error;
+	}
 	if (ruid != (uid_t) -1 ||
 	if (ruid != (uid_t) -1 ||
 	    (euid != (uid_t) -1 && euid != old->uid))
 	    (euid != (uid_t) -1 && euid != old->uid))
 		new->suid = new->euid;
 		new->suid = new->euid;
@@ -680,9 +686,10 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
 	retval = -EPERM;
 	retval = -EPERM;
 	if (capable(CAP_SETUID)) {
 	if (capable(CAP_SETUID)) {
 		new->suid = new->uid = uid;
 		new->suid = new->uid = uid;
-		if (uid != old->uid && set_user(new) < 0) {
-			retval = -EAGAIN;
-			goto error;
+		if (uid != old->uid) {
+			retval = set_user(new);
+			if (retval < 0)
+				goto error;
 		}
 		}
 	} else if (uid != old->uid && uid != new->suid) {
 	} else if (uid != old->uid && uid != new->suid) {
 		goto error;
 		goto error;
@@ -734,11 +741,13 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
 			goto error;
 			goto error;
 	}
 	}
 
 
-	retval = -EAGAIN;
 	if (ruid != (uid_t) -1) {
 	if (ruid != (uid_t) -1) {
 		new->uid = ruid;
 		new->uid = ruid;
-		if (ruid != old->uid && set_user(new) < 0)
-			goto error;
+		if (ruid != old->uid) {
+			retval = set_user(new);
+			if (retval < 0)
+				goto error;
+		}
 	}
 	}
 	if (euid != (uid_t) -1)
 	if (euid != (uid_t) -1)
 		new->euid = euid;
 		new->euid = euid;

+ 18 - 0
kernel/user.c

@@ -362,6 +362,24 @@ static void free_user(struct user_struct *up, unsigned long flags)
 
 
 #endif
 #endif
 
 
+#if defined(CONFIG_RT_GROUP_SCHED) && defined(CONFIG_USER_SCHED)
+/*
+ * We need to check if a setuid can take place. This function should be called
+ * before successfully completing the setuid.
+ */
+int task_can_switch_user(struct user_struct *up, struct task_struct *tsk)
+{
+
+	return sched_rt_can_attach(up->tg, tsk);
+
+}
+#else
+int task_can_switch_user(struct user_struct *up, struct task_struct *tsk)
+{
+	return 1;
+}
+#endif
+
 /*
 /*
  * Locate the user_struct for the passed UID.  If found, take a ref on it.  The
  * Locate the user_struct for the passed UID.  If found, take a ref on it.  The
  * caller must undo that ref with free_uid().
  * caller must undo that ref with free_uid().