浏览代码

Merge branch 'linux-2.6' into merge

Paul Mackerras 17 年之前
父节点
当前提交
16fddf5457

+ 8 - 3
arch/powerpc/mm/hash_utils_64.c

@@ -351,9 +351,14 @@ static void __init htab_init_page_sizes(void)
 		mmu_vmalloc_psize = MMU_PAGE_64K;
 		mmu_vmalloc_psize = MMU_PAGE_64K;
 		if (mmu_linear_psize == MMU_PAGE_4K)
 		if (mmu_linear_psize == MMU_PAGE_4K)
 			mmu_linear_psize = MMU_PAGE_64K;
 			mmu_linear_psize = MMU_PAGE_64K;
-		if (cpu_has_feature(CPU_FTR_CI_LARGE_PAGE))
-			mmu_io_psize = MMU_PAGE_64K;
-		else
+		if (cpu_has_feature(CPU_FTR_CI_LARGE_PAGE)) {
+			/*
+			 * Don't use 64k pages for ioremap on pSeries, since
+			 * that would stop us accessing the HEA ethernet.
+			 */
+			if (!machine_is(pseries))
+				mmu_io_psize = MMU_PAGE_64K;
+		} else
 			mmu_ci_restrictions = 1;
 			mmu_ci_restrictions = 1;
 	}
 	}
 #endif /* CONFIG_PPC_64K_PAGES */
 #endif /* CONFIG_PPC_64K_PAGES */

+ 6 - 2
arch/powerpc/sysdev/bestcomm/bestcomm.c

@@ -52,6 +52,10 @@ bcom_task_alloc(int bd_count, int bd_size, int priv_size)
 	int i, tasknum = -1;
 	int i, tasknum = -1;
 	struct bcom_task *tsk;
 	struct bcom_task *tsk;
 
 
+	/* Don't try to do anything if bestcomm init failed */
+	if (!bcom_eng)
+		return NULL;
+
 	/* Get and reserve a task num */
 	/* Get and reserve a task num */
 	spin_lock(&bcom_eng->lock);
 	spin_lock(&bcom_eng->lock);
 
 
@@ -484,8 +488,8 @@ mpc52xx_bcom_remove(struct of_device *op)
 }
 }
 
 
 static struct of_device_id mpc52xx_bcom_of_match[] = {
 static struct of_device_id mpc52xx_bcom_of_match[] = {
-	{ .type = "dma-controller", .compatible = "fsl,mpc5200-bestcomm", },
-	{ .type = "dma-controller", .compatible = "mpc5200-bestcomm", },
+	{ .compatible = "fsl,mpc5200-bestcomm", },
+	{ .compatible = "mpc5200-bestcomm", },
 	{},
 	{},
 };
 };
 
 

+ 1 - 1
arch/powerpc/sysdev/ipic.c

@@ -906,7 +906,7 @@ static int __init init_ipic_sysfs(void)
 {
 {
 	int rc;
 	int rc;
 
 
-	if (!primary_ipic->regs)
+	if (!primary_ipic || !primary_ipic->regs)
 		return -ENODEV;
 		return -ENODEV;
 	printk(KERN_DEBUG "Registering ipic with sysfs...\n");
 	printk(KERN_DEBUG "Registering ipic with sysfs...\n");
 
 

+ 1 - 2
arch/sparc64/kernel/ds.c

@@ -972,8 +972,7 @@ static void process_ds_work(void)
 	LIST_HEAD(todo);
 	LIST_HEAD(todo);
 
 
 	spin_lock_irqsave(&ds_lock, flags);
 	spin_lock_irqsave(&ds_lock, flags);
-	list_splice(&ds_work_list, &todo);
-	INIT_LIST_HEAD(&ds_work_list);
+	list_splice_init(&ds_work_list, &todo);
 	spin_unlock_irqrestore(&ds_lock, flags);
 	spin_unlock_irqrestore(&ds_lock, flags);
 
 
 	list_for_each_entry_safe(qp, tmp, &todo, list) {
 	list_for_each_entry_safe(qp, tmp, &todo, list) {

+ 6 - 2
arch/sparc64/kernel/head.S

@@ -288,8 +288,12 @@ sun4v_chip_type:
 	/* Leave arg2 as-is, prom_mmu_ihandle_cache */
 	/* Leave arg2 as-is, prom_mmu_ihandle_cache */
 	mov	-1, %l3
 	mov	-1, %l3
 	stx	%l3, [%sp + 2047 + 128 + 0x28]	! arg3: mode (-1 default)
 	stx	%l3, [%sp + 2047 + 128 + 0x28]	! arg3: mode (-1 default)
-	sethi	%hi(8 * 1024 * 1024), %l3
-	stx	%l3, [%sp + 2047 + 128 + 0x30]	! arg4: size (8MB)
+	/* 4MB align the kernel image size. */
+	set	(_end - KERNBASE), %l3
+	set	((4 * 1024 * 1024) - 1), %l4
+	add	%l3, %l4, %l3
+	andn	%l3, %l4, %l3
+	stx	%l3, [%sp + 2047 + 128 + 0x30]	! arg4: roundup(ksize, 4MB)
 	sethi	%hi(KERNBASE), %l3
 	sethi	%hi(KERNBASE), %l3
 	stx	%l3, [%sp + 2047 + 128 + 0x38]	! arg5: vaddr (KERNBASE)
 	stx	%l3, [%sp + 2047 + 128 + 0x38]	! arg5: vaddr (KERNBASE)
 	stx	%g0, [%sp + 2047 + 128 + 0x40]	! arg6: empty
 	stx	%g0, [%sp + 2047 + 128 + 0x40]	! arg6: empty

+ 0 - 3
arch/sparc64/kernel/process.c

@@ -731,9 +731,6 @@ asmlinkage int sparc_execve(struct pt_regs *regs)
 		current_thread_info()->xfsr[0] = 0;
 		current_thread_info()->xfsr[0] = 0;
 		current_thread_info()->fpsaved[0] = 0;
 		current_thread_info()->fpsaved[0] = 0;
 		regs->tstate &= ~TSTATE_PEF;
 		regs->tstate &= ~TSTATE_PEF;
-		task_lock(current);
-		current->ptrace &= ~PT_DTRACE;
-		task_unlock(current);
 	}
 	}
 out:
 out:
 	return error;
 	return error;

+ 9 - 8
arch/sparc64/kernel/smp.c

@@ -284,14 +284,17 @@ static void ldom_startcpu_cpuid(unsigned int cpu, unsigned long thread_reg)
 {
 {
 	extern unsigned long sparc64_ttable_tl0;
 	extern unsigned long sparc64_ttable_tl0;
 	extern unsigned long kern_locked_tte_data;
 	extern unsigned long kern_locked_tte_data;
-	extern int bigkernel;
 	struct hvtramp_descr *hdesc;
 	struct hvtramp_descr *hdesc;
 	unsigned long trampoline_ra;
 	unsigned long trampoline_ra;
 	struct trap_per_cpu *tb;
 	struct trap_per_cpu *tb;
 	u64 tte_vaddr, tte_data;
 	u64 tte_vaddr, tte_data;
 	unsigned long hv_err;
 	unsigned long hv_err;
+	int i;
 
 
-	hdesc = kzalloc(sizeof(*hdesc), GFP_KERNEL);
+	hdesc = kzalloc(sizeof(*hdesc) +
+			(sizeof(struct hvtramp_mapping) *
+			 num_kernel_image_mappings - 1),
+			GFP_KERNEL);
 	if (!hdesc) {
 	if (!hdesc) {
 		printk(KERN_ERR "ldom_startcpu_cpuid: Cannot allocate "
 		printk(KERN_ERR "ldom_startcpu_cpuid: Cannot allocate "
 		       "hvtramp_descr.\n");
 		       "hvtramp_descr.\n");
@@ -299,7 +302,7 @@ static void ldom_startcpu_cpuid(unsigned int cpu, unsigned long thread_reg)
 	}
 	}
 
 
 	hdesc->cpu = cpu;
 	hdesc->cpu = cpu;
-	hdesc->num_mappings = (bigkernel ? 2 : 1);
+	hdesc->num_mappings = num_kernel_image_mappings;
 
 
 	tb = &trap_block[cpu];
 	tb = &trap_block[cpu];
 	tb->hdesc = hdesc;
 	tb->hdesc = hdesc;
@@ -312,13 +315,11 @@ static void ldom_startcpu_cpuid(unsigned int cpu, unsigned long thread_reg)
 	tte_vaddr = (unsigned long) KERNBASE;
 	tte_vaddr = (unsigned long) KERNBASE;
 	tte_data = kern_locked_tte_data;
 	tte_data = kern_locked_tte_data;
 
 
-	hdesc->maps[0].vaddr = tte_vaddr;
-	hdesc->maps[0].tte   = tte_data;
-	if (bigkernel) {
+	for (i = 0; i < hdesc->num_mappings; i++) {
+		hdesc->maps[i].vaddr = tte_vaddr;
+		hdesc->maps[i].tte   = tte_data;
 		tte_vaddr += 0x400000;
 		tte_vaddr += 0x400000;
 		tte_data  += 0x400000;
 		tte_data  += 0x400000;
-		hdesc->maps[1].vaddr = tte_vaddr;
-		hdesc->maps[1].tte   = tte_data;
 	}
 	}
 
 
 	trampoline_ra = kimage_addr_to_ra(hv_cpu_startup);
 	trampoline_ra = kimage_addr_to_ra(hv_cpu_startup);

+ 0 - 3
arch/sparc64/kernel/sys_sparc32.c

@@ -678,9 +678,6 @@ asmlinkage long sparc32_execve(struct pt_regs *regs)
 		current_thread_info()->xfsr[0] = 0;
 		current_thread_info()->xfsr[0] = 0;
 		current_thread_info()->fpsaved[0] = 0;
 		current_thread_info()->fpsaved[0] = 0;
 		regs->tstate &= ~TSTATE_PEF;
 		regs->tstate &= ~TSTATE_PEF;
-		task_lock(current);
-		current->ptrace &= ~PT_DTRACE;
-		task_unlock(current);
 	}
 	}
 out:
 out:
 	return error;
 	return error;

+ 64 - 124
arch/sparc64/kernel/trampoline.S

@@ -105,7 +105,7 @@ startup_continue:
 	wr		%g2, 0, %tick_cmpr
 	wr		%g2, 0, %tick_cmpr
 
 
 	/* Call OBP by hand to lock KERNBASE into i/d tlbs.
 	/* Call OBP by hand to lock KERNBASE into i/d tlbs.
-	 * We lock 2 consequetive entries if we are 'bigkernel'.
+	 * We lock 'num_kernel_image_mappings' consequetive entries.
 	 */
 	 */
 	sethi		%hi(prom_entry_lock), %g2
 	sethi		%hi(prom_entry_lock), %g2
 1:	ldstub		[%g2 + %lo(prom_entry_lock)], %g1
 1:	ldstub		[%g2 + %lo(prom_entry_lock)], %g1
@@ -119,6 +119,29 @@ startup_continue:
 	add		%l2, -(192 + 128), %sp
 	add		%l2, -(192 + 128), %sp
 	flushw
 	flushw
 
 
+	/* Setup the loop variables:
+	 * %l3: VADDR base
+	 * %l4: TTE base
+	 * %l5: Loop iterator, iterates from 0 to 'num_kernel_image_mappings'
+	 * %l6: Number of TTE entries to map
+	 * %l7: Highest TTE entry number, we count down
+	 */
+	sethi		%hi(KERNBASE), %l3
+	sethi		%hi(kern_locked_tte_data), %l4
+	ldx		[%l4 + %lo(kern_locked_tte_data)], %l4
+	clr		%l5
+	sethi		%hi(num_kernel_image_mappings), %l6
+	lduw		[%l6 + %lo(num_kernel_image_mappings)], %l6
+	add		%l6, 1, %l6
+
+	mov		15, %l7
+	BRANCH_IF_ANY_CHEETAH(g1,g5,2f)
+
+	mov		63, %l7
+2:
+
+3:
+	/* Lock into I-MMU */
 	sethi		%hi(call_method), %g2
 	sethi		%hi(call_method), %g2
 	or		%g2, %lo(call_method), %g2
 	or		%g2, %lo(call_method), %g2
 	stx		%g2, [%sp + 2047 + 128 + 0x00]
 	stx		%g2, [%sp + 2047 + 128 + 0x00]
@@ -132,63 +155,26 @@ startup_continue:
 	sethi		%hi(prom_mmu_ihandle_cache), %g2
 	sethi		%hi(prom_mmu_ihandle_cache), %g2
 	lduw		[%g2 + %lo(prom_mmu_ihandle_cache)], %g2
 	lduw		[%g2 + %lo(prom_mmu_ihandle_cache)], %g2
 	stx		%g2, [%sp + 2047 + 128 + 0x20]
 	stx		%g2, [%sp + 2047 + 128 + 0x20]
-	sethi		%hi(KERNBASE), %g2
-	stx		%g2, [%sp + 2047 + 128 + 0x28]
-	sethi		%hi(kern_locked_tte_data), %g2
-	ldx		[%g2 + %lo(kern_locked_tte_data)], %g2
-	stx		%g2, [%sp + 2047 + 128 + 0x30]
-
-	mov		15, %g2
-	BRANCH_IF_ANY_CHEETAH(g1,g5,1f)
 
 
-	mov		63, %g2
-1:
-	stx		%g2, [%sp + 2047 + 128 + 0x38]
-	sethi		%hi(p1275buf), %g2
-	or		%g2, %lo(p1275buf), %g2
-	ldx		[%g2 + 0x08], %o1
-	call		%o1
-	 add		%sp, (2047 + 128), %o0
+	/* Each TTE maps 4MB, convert index to offset.  */
+	sllx		%l5, 22, %g1
 
 
-	sethi		%hi(bigkernel), %g2
-	lduw		[%g2 + %lo(bigkernel)], %g2
-	brz,pt		%g2, do_dtlb
-	 nop
+	add		%l3, %g1, %g2
+	stx		%g2, [%sp + 2047 + 128 + 0x28]	! VADDR
+	add		%l4, %g1, %g2
+	stx		%g2, [%sp + 2047 + 128 + 0x30]	! TTE
 
 
-	sethi		%hi(call_method), %g2
-	or		%g2, %lo(call_method), %g2
-	stx		%g2, [%sp + 2047 + 128 + 0x00]
-	mov		5, %g2
-	stx		%g2, [%sp + 2047 + 128 + 0x08]
-	mov		1, %g2
-	stx		%g2, [%sp + 2047 + 128 + 0x10]
-	sethi		%hi(itlb_load), %g2
-	or		%g2, %lo(itlb_load), %g2
-	stx		%g2, [%sp + 2047 + 128 + 0x18]
-	sethi		%hi(prom_mmu_ihandle_cache), %g2
-	lduw		[%g2 + %lo(prom_mmu_ihandle_cache)], %g2
-	stx		%g2, [%sp + 2047 + 128 + 0x20]
-	sethi		%hi(KERNBASE + 0x400000), %g2
-	stx		%g2, [%sp + 2047 + 128 + 0x28]
-	sethi		%hi(kern_locked_tte_data), %g2
-	ldx		[%g2 + %lo(kern_locked_tte_data)], %g2
-	sethi		%hi(0x400000), %g1
-	add		%g2, %g1, %g2
-	stx		%g2, [%sp + 2047 + 128 + 0x30]
-
-	mov		14, %g2
-	BRANCH_IF_ANY_CHEETAH(g1,g5,1f)
-
-	mov		62, %g2
-1:
+	/* TTE index is highest minus loop index.  */
+	sub		%l7, %l5, %g2
 	stx		%g2, [%sp + 2047 + 128 + 0x38]
 	stx		%g2, [%sp + 2047 + 128 + 0x38]
+
 	sethi		%hi(p1275buf), %g2
 	sethi		%hi(p1275buf), %g2
 	or		%g2, %lo(p1275buf), %g2
 	or		%g2, %lo(p1275buf), %g2
 	ldx		[%g2 + 0x08], %o1
 	ldx		[%g2 + 0x08], %o1
 	call		%o1
 	call		%o1
 	 add		%sp, (2047 + 128), %o0
 	 add		%sp, (2047 + 128), %o0
 
 
-do_dtlb:
+	/* Lock into D-MMU */
 	sethi		%hi(call_method), %g2
 	sethi		%hi(call_method), %g2
 	or		%g2, %lo(call_method), %g2
 	or		%g2, %lo(call_method), %g2
 	stx		%g2, [%sp + 2047 + 128 + 0x00]
 	stx		%g2, [%sp + 2047 + 128 + 0x00]
@@ -202,65 +188,30 @@ do_dtlb:
 	sethi		%hi(prom_mmu_ihandle_cache), %g2
 	sethi		%hi(prom_mmu_ihandle_cache), %g2
 	lduw		[%g2 + %lo(prom_mmu_ihandle_cache)], %g2
 	lduw		[%g2 + %lo(prom_mmu_ihandle_cache)], %g2
 	stx		%g2, [%sp + 2047 + 128 + 0x20]
 	stx		%g2, [%sp + 2047 + 128 + 0x20]
-	sethi		%hi(KERNBASE), %g2
-	stx		%g2, [%sp + 2047 + 128 + 0x28]
-	sethi		%hi(kern_locked_tte_data), %g2
-	ldx		[%g2 + %lo(kern_locked_tte_data)], %g2
-	stx		%g2, [%sp + 2047 + 128 + 0x30]
 
 
-	mov		15, %g2
-	BRANCH_IF_ANY_CHEETAH(g1,g5,1f)
+	/* Each TTE maps 4MB, convert index to offset.  */
+	sllx		%l5, 22, %g1
 
 
-	mov		63, %g2
-1:
+	add		%l3, %g1, %g2
+	stx		%g2, [%sp + 2047 + 128 + 0x28]	! VADDR
+	add		%l4, %g1, %g2
+	stx		%g2, [%sp + 2047 + 128 + 0x30]	! TTE
 
 
+	/* TTE index is highest minus loop index.  */
+	sub		%l7, %l5, %g2
 	stx		%g2, [%sp + 2047 + 128 + 0x38]
 	stx		%g2, [%sp + 2047 + 128 + 0x38]
+
 	sethi		%hi(p1275buf), %g2
 	sethi		%hi(p1275buf), %g2
 	or		%g2, %lo(p1275buf), %g2
 	or		%g2, %lo(p1275buf), %g2
 	ldx		[%g2 + 0x08], %o1
 	ldx		[%g2 + 0x08], %o1
 	call		%o1
 	call		%o1
 	 add		%sp, (2047 + 128), %o0
 	 add		%sp, (2047 + 128), %o0
 
 
-	sethi		%hi(bigkernel), %g2
-	lduw		[%g2 + %lo(bigkernel)], %g2
-	brz,pt		%g2, do_unlock
+	add		%l5, 1, %l5
+	cmp		%l5, %l6
+	bne,pt		%xcc, 3b
 	 nop
 	 nop
 
 
-	sethi		%hi(call_method), %g2
-	or		%g2, %lo(call_method), %g2
-	stx		%g2, [%sp + 2047 + 128 + 0x00]
-	mov		5, %g2
-	stx		%g2, [%sp + 2047 + 128 + 0x08]
-	mov		1, %g2
-	stx		%g2, [%sp + 2047 + 128 + 0x10]
-	sethi		%hi(dtlb_load), %g2
-	or		%g2, %lo(dtlb_load), %g2
-	stx		%g2, [%sp + 2047 + 128 + 0x18]
-	sethi		%hi(prom_mmu_ihandle_cache), %g2
-	lduw		[%g2 + %lo(prom_mmu_ihandle_cache)], %g2
-	stx		%g2, [%sp + 2047 + 128 + 0x20]
-	sethi		%hi(KERNBASE + 0x400000), %g2
-	stx		%g2, [%sp + 2047 + 128 + 0x28]
-	sethi		%hi(kern_locked_tte_data), %g2
-	ldx		[%g2 + %lo(kern_locked_tte_data)], %g2
-	sethi		%hi(0x400000), %g1
-	add		%g2, %g1, %g2
-	stx		%g2, [%sp + 2047 + 128 + 0x30]
-
-	mov		14, %g2
-	BRANCH_IF_ANY_CHEETAH(g1,g5,1f)
-
-	mov		62, %g2
-1:
-
-	stx		%g2, [%sp + 2047 + 128 + 0x38]
-	sethi		%hi(p1275buf), %g2
-	or		%g2, %lo(p1275buf), %g2
-	ldx		[%g2 + 0x08], %o1
-	call		%o1
-	 add		%sp, (2047 + 128), %o0
-
-do_unlock:
 	sethi		%hi(prom_entry_lock), %g2
 	sethi		%hi(prom_entry_lock), %g2
 	stb		%g0, [%g2 + %lo(prom_entry_lock)]
 	stb		%g0, [%g2 + %lo(prom_entry_lock)]
 	membar		#StoreStore | #StoreLoad
 	membar		#StoreStore | #StoreLoad
@@ -269,47 +220,36 @@ do_unlock:
 	 nop
 	 nop
 
 
 niagara_lock_tlb:
 niagara_lock_tlb:
+	sethi		%hi(KERNBASE), %l3
+	sethi		%hi(kern_locked_tte_data), %l4
+	ldx		[%l4 + %lo(kern_locked_tte_data)], %l4
+	clr		%l5
+	sethi		%hi(num_kernel_image_mappings), %l6
+	lduw		[%l6 + %lo(num_kernel_image_mappings)], %l6
+	add		%l6, 1, %l6
+
+1:
 	mov		HV_FAST_MMU_MAP_PERM_ADDR, %o5
 	mov		HV_FAST_MMU_MAP_PERM_ADDR, %o5
-	sethi		%hi(KERNBASE), %o0
+	sllx		%l5, 22, %g2
+	add		%l3, %g2, %o0
 	clr		%o1
 	clr		%o1
-	sethi		%hi(kern_locked_tte_data), %o2
-	ldx		[%o2 + %lo(kern_locked_tte_data)], %o2
+	add		%l4, %g2, %o2
 	mov		HV_MMU_IMMU, %o3
 	mov		HV_MMU_IMMU, %o3
 	ta		HV_FAST_TRAP
 	ta		HV_FAST_TRAP
 
 
 	mov		HV_FAST_MMU_MAP_PERM_ADDR, %o5
 	mov		HV_FAST_MMU_MAP_PERM_ADDR, %o5
-	sethi		%hi(KERNBASE), %o0
+	sllx		%l5, 22, %g2
+	add		%l3, %g2, %o0
 	clr		%o1
 	clr		%o1
-	sethi		%hi(kern_locked_tte_data), %o2
-	ldx		[%o2 + %lo(kern_locked_tte_data)], %o2
+	add		%l4, %g2, %o2
 	mov		HV_MMU_DMMU, %o3
 	mov		HV_MMU_DMMU, %o3
 	ta		HV_FAST_TRAP
 	ta		HV_FAST_TRAP
 
 
-	sethi		%hi(bigkernel), %g2
-	lduw		[%g2 + %lo(bigkernel)], %g2
-	brz,pt		%g2, after_lock_tlb
+	add		%l5, 1, %l5
+	cmp		%l5, %l6
+	bne,pt		%xcc, 1b
 	 nop
 	 nop
 
 
-	mov		HV_FAST_MMU_MAP_PERM_ADDR, %o5
-	sethi		%hi(KERNBASE + 0x400000), %o0
-	clr		%o1
-	sethi		%hi(kern_locked_tte_data), %o2
-	ldx		[%o2 + %lo(kern_locked_tte_data)], %o2
-	sethi		%hi(0x400000), %o3
-	add		%o2, %o3, %o2
-	mov		HV_MMU_IMMU, %o3
-	ta		HV_FAST_TRAP
-
-	mov		HV_FAST_MMU_MAP_PERM_ADDR, %o5
-	sethi		%hi(KERNBASE + 0x400000), %o0
-	clr		%o1
-	sethi		%hi(kern_locked_tte_data), %o2
-	ldx		[%o2 + %lo(kern_locked_tte_data)], %o2
-	sethi		%hi(0x400000), %o3
-	add		%o2, %o3, %o2
-	mov		HV_MMU_DMMU, %o3
-	ta		HV_FAST_TRAP
-
 after_lock_tlb:
 after_lock_tlb:
 	wrpr		%g0, (PSTATE_PRIV | PSTATE_PEF), %pstate
 	wrpr		%g0, (PSTATE_PRIV | PSTATE_PEF), %pstate
 	wr		%g0, 0, %fprs
 	wr		%g0, 0, %fprs

+ 14 - 24
arch/sparc64/mm/init.c

@@ -166,7 +166,7 @@ unsigned long sparc64_kern_pri_context __read_mostly;
 unsigned long sparc64_kern_pri_nuc_bits __read_mostly;
 unsigned long sparc64_kern_pri_nuc_bits __read_mostly;
 unsigned long sparc64_kern_sec_context __read_mostly;
 unsigned long sparc64_kern_sec_context __read_mostly;
 
 
-int bigkernel = 0;
+int num_kernel_image_mappings;
 
 
 #ifdef CONFIG_DEBUG_DCFLUSH
 #ifdef CONFIG_DEBUG_DCFLUSH
 atomic_t dcpage_flushes = ATOMIC_INIT(0);
 atomic_t dcpage_flushes = ATOMIC_INIT(0);
@@ -572,7 +572,7 @@ static unsigned long kern_large_tte(unsigned long paddr);
 static void __init remap_kernel(void)
 static void __init remap_kernel(void)
 {
 {
 	unsigned long phys_page, tte_vaddr, tte_data;
 	unsigned long phys_page, tte_vaddr, tte_data;
-	int tlb_ent = sparc64_highest_locked_tlbent();
+	int i, tlb_ent = sparc64_highest_locked_tlbent();
 
 
 	tte_vaddr = (unsigned long) KERNBASE;
 	tte_vaddr = (unsigned long) KERNBASE;
 	phys_page = (prom_boot_mapping_phys_low >> 22UL) << 22UL;
 	phys_page = (prom_boot_mapping_phys_low >> 22UL) << 22UL;
@@ -582,27 +582,20 @@ static void __init remap_kernel(void)
 
 
 	/* Now lock us into the TLBs via Hypervisor or OBP. */
 	/* Now lock us into the TLBs via Hypervisor or OBP. */
 	if (tlb_type == hypervisor) {
 	if (tlb_type == hypervisor) {
-		hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_DMMU);
-		hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_IMMU);
-		if (bigkernel) {
-			tte_vaddr += 0x400000;
-			tte_data += 0x400000;
+		for (i = 0; i < num_kernel_image_mappings; i++) {
 			hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_DMMU);
 			hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_DMMU);
 			hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_IMMU);
 			hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_IMMU);
+			tte_vaddr += 0x400000;
+			tte_data += 0x400000;
 		}
 		}
 	} else {
 	} else {
-		prom_dtlb_load(tlb_ent, tte_data, tte_vaddr);
-		prom_itlb_load(tlb_ent, tte_data, tte_vaddr);
-		if (bigkernel) {
-			tlb_ent -= 1;
-			prom_dtlb_load(tlb_ent,
-				       tte_data + 0x400000, 
-				       tte_vaddr + 0x400000);
-			prom_itlb_load(tlb_ent,
-				       tte_data + 0x400000, 
-				       tte_vaddr + 0x400000);
+		for (i = 0; i < num_kernel_image_mappings; i++) {
+			prom_dtlb_load(tlb_ent - i, tte_data, tte_vaddr);
+			prom_itlb_load(tlb_ent - i, tte_data, tte_vaddr);
+			tte_vaddr += 0x400000;
+			tte_data += 0x400000;
 		}
 		}
-		sparc64_highest_unlocked_tlb_ent = tlb_ent - 1;
+		sparc64_highest_unlocked_tlb_ent = tlb_ent - i;
 	}
 	}
 	if (tlb_type == cheetah_plus) {
 	if (tlb_type == cheetah_plus) {
 		sparc64_kern_pri_context = (CTX_CHEETAH_PLUS_CTX0 |
 		sparc64_kern_pri_context = (CTX_CHEETAH_PLUS_CTX0 |
@@ -1352,12 +1345,9 @@ void __init paging_init(void)
 	shift = kern_base + PAGE_OFFSET - ((unsigned long)KERNBASE);
 	shift = kern_base + PAGE_OFFSET - ((unsigned long)KERNBASE);
 
 
 	real_end = (unsigned long)_end;
 	real_end = (unsigned long)_end;
-	if ((real_end > ((unsigned long)KERNBASE + 0x400000)))
-		bigkernel = 1;
-	if ((real_end > ((unsigned long)KERNBASE + 0x800000))) {
-		prom_printf("paging_init: Kernel > 8MB, too large.\n");
-		prom_halt();
-	}
+	num_kernel_image_mappings = DIV_ROUND_UP(real_end - KERNBASE, 1 << 22);
+	printk("Kernel: Using %d locked TLB entries for main kernel image.\n",
+	       num_kernel_image_mappings);
 
 
 	/* Set kernel pgd to upper alias so physical page computations
 	/* Set kernel pgd to upper alias so physical page computations
 	 * work.
 	 * work.

+ 3 - 3
arch/x86/mm/ioremap.c

@@ -106,7 +106,7 @@ static int ioremap_change_attr(unsigned long vaddr, unsigned long size,
  * have to convert them into an offset in a page-aligned mapping, but the
  * have to convert them into an offset in a page-aligned mapping, but the
  * caller shouldn't need to know that small detail.
  * caller shouldn't need to know that small detail.
  */
  */
-static void __iomem *__ioremap(unsigned long phys_addr, unsigned long size,
+static void __iomem *__ioremap(resource_size_t phys_addr, unsigned long size,
 			       enum ioremap_mode mode)
 			       enum ioremap_mode mode)
 {
 {
 	unsigned long pfn, offset, last_addr, vaddr;
 	unsigned long pfn, offset, last_addr, vaddr;
@@ -193,13 +193,13 @@ static void __iomem *__ioremap(unsigned long phys_addr, unsigned long size,
  *
  *
  * Must be freed with iounmap.
  * Must be freed with iounmap.
  */
  */
-void __iomem *ioremap_nocache(unsigned long phys_addr, unsigned long size)
+void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size)
 {
 {
 	return __ioremap(phys_addr, size, IOR_MODE_UNCACHED);
 	return __ioremap(phys_addr, size, IOR_MODE_UNCACHED);
 }
 }
 EXPORT_SYMBOL(ioremap_nocache);
 EXPORT_SYMBOL(ioremap_nocache);
 
 
-void __iomem *ioremap_cache(unsigned long phys_addr, unsigned long size)
+void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size)
 {
 {
 	return __ioremap(phys_addr, size, IOR_MODE_CACHED);
 	return __ioremap(phys_addr, size, IOR_MODE_CACHED);
 }
 }

+ 1 - 1
drivers/connector/cn_queue.c

@@ -146,7 +146,7 @@ struct cn_queue_dev *cn_queue_alloc_dev(char *name, struct sock *nls)
 
 
 	dev->nls = nls;
 	dev->nls = nls;
 
 
-	dev->cn_queue = create_workqueue(dev->name);
+	dev->cn_queue = create_singlethread_workqueue(dev->name);
 	if (!dev->cn_queue) {
 	if (!dev->cn_queue) {
 		kfree(dev);
 		kfree(dev);
 		return NULL;
 		return NULL;

+ 2 - 34
drivers/net/bnx2x.c

@@ -63,8 +63,8 @@
 #include "bnx2x.h"
 #include "bnx2x.h"
 #include "bnx2x_init.h"
 #include "bnx2x_init.h"
 
 
-#define DRV_MODULE_VERSION      "1.40.22"
-#define DRV_MODULE_RELDATE      "2007/11/27"
+#define DRV_MODULE_VERSION      "1.42.3"
+#define DRV_MODULE_RELDATE      "2008/3/9"
 #define BNX2X_BC_VER    	0x040200
 #define BNX2X_BC_VER    	0x040200
 
 
 /* Time in jiffies before concluding the transmitter is hung. */
 /* Time in jiffies before concluding the transmitter is hung. */
@@ -8008,38 +8008,6 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
 	   cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
 	   cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
 	   cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
 	   cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
 
 
-	switch (cmd->port) {
-	case PORT_TP:
-		if (!(bp->supported & SUPPORTED_TP)) {
-			DP(NETIF_MSG_LINK, "TP not supported\n");
-			return -EINVAL;
-		}
-
-		if (bp->phy_flags & PHY_XGXS_FLAG) {
-			bnx2x_link_reset(bp);
-			bnx2x_link_settings_supported(bp, SWITCH_CFG_1G);
-			bnx2x_phy_deassert(bp);
-		}
-		break;
-
-	case PORT_FIBRE:
-		if (!(bp->supported & SUPPORTED_FIBRE)) {
-			DP(NETIF_MSG_LINK, "FIBRE not supported\n");
-			return -EINVAL;
-		}
-
-		if (!(bp->phy_flags & PHY_XGXS_FLAG)) {
-			bnx2x_link_reset(bp);
-			bnx2x_link_settings_supported(bp, SWITCH_CFG_10G);
-			bnx2x_phy_deassert(bp);
-		}
-		break;
-
-	default:
-		DP(NETIF_MSG_LINK, "Unknown port type\n");
-		return -EINVAL;
-	}
-
 	if (cmd->autoneg == AUTONEG_ENABLE) {
 	if (cmd->autoneg == AUTONEG_ENABLE) {
 		if (!(bp->supported & SUPPORTED_Autoneg)) {
 		if (!(bp->supported & SUPPORTED_Autoneg)) {
 			DP(NETIF_MSG_LINK, "Aotoneg not supported\n");
 			DP(NETIF_MSG_LINK, "Aotoneg not supported\n");

+ 2 - 1
drivers/net/fec_mpc52xx_phy.c

@@ -109,7 +109,8 @@ static int mpc52xx_fec_mdio_probe(struct of_device *of, const struct of_device_i
 		int irq = irq_of_parse_and_map(child, 0);
 		int irq = irq_of_parse_and_map(child, 0);
 		if (irq != NO_IRQ) {
 		if (irq != NO_IRQ) {
 			const u32 *id = of_get_property(child, "reg", NULL);
 			const u32 *id = of_get_property(child, "reg", NULL);
-			bus->irq[*id] = irq;
+			if (id)
+				bus->irq[*id] = irq;
 		}
 		}
 	}
 	}
 
 

+ 1 - 1
drivers/net/sungem.c

@@ -912,7 +912,7 @@ static int gem_poll(struct napi_struct *napi, int budget)
 		 * rx ring - must call napi_disable(), which
 		 * rx ring - must call napi_disable(), which
 		 * schedule_timeout()'s if polling is already disabled.
 		 * schedule_timeout()'s if polling is already disabled.
 		 */
 		 */
-		work_done += gem_rx(gp, budget);
+		work_done += gem_rx(gp, budget - work_done);
 
 
 		if (work_done >= budget)
 		if (work_done >= budget)
 			return work_done;
 			return work_done;

+ 1 - 1
include/asm-sparc64/hvtramp.h

@@ -16,7 +16,7 @@ struct hvtramp_descr {
 	__u64			fault_info_va;
 	__u64			fault_info_va;
 	__u64			fault_info_pa;
 	__u64			fault_info_pa;
 	__u64			thread_reg;
 	__u64			thread_reg;
-	struct hvtramp_mapping	maps[2];
+	struct hvtramp_mapping	maps[1];
 };
 };
 
 
 extern void hv_cpu_startup(unsigned long hvdescr_pa);
 extern void hv_cpu_startup(unsigned long hvdescr_pa);

+ 2 - 0
include/asm-sparc64/spitfire.h

@@ -63,6 +63,8 @@ extern void cheetah_enable_pcache(void);
 	 SPITFIRE_HIGHEST_LOCKED_TLBENT : \
 	 SPITFIRE_HIGHEST_LOCKED_TLBENT : \
 	 CHEETAH_HIGHEST_LOCKED_TLBENT)
 	 CHEETAH_HIGHEST_LOCKED_TLBENT)
 
 
+extern int num_kernel_image_mappings;
+
 /* The data cache is write through, so this just invalidates the
 /* The data cache is write through, so this just invalidates the
  * specified line.
  * specified line.
  */
  */

+ 3 - 3
include/asm-x86/io_32.h

@@ -114,13 +114,13 @@ static inline void * phys_to_virt(unsigned long address)
  * If the area you are trying to map is a PCI BAR you should have a
  * If the area you are trying to map is a PCI BAR you should have a
  * look at pci_iomap().
  * look at pci_iomap().
  */
  */
-extern void __iomem *ioremap_nocache(unsigned long offset, unsigned long size);
-extern void __iomem *ioremap_cache(unsigned long offset, unsigned long size);
+extern void __iomem *ioremap_nocache(resource_size_t offset, unsigned long size);
+extern void __iomem *ioremap_cache(resource_size_t offset, unsigned long size);
 
 
 /*
 /*
  * The default ioremap() behavior is non-cached:
  * The default ioremap() behavior is non-cached:
  */
  */
-static inline void __iomem *ioremap(unsigned long offset, unsigned long size)
+static inline void __iomem *ioremap(resource_size_t offset, unsigned long size)
 {
 {
 	return ioremap_nocache(offset, size);
 	return ioremap_nocache(offset, size);
 }
 }

+ 3 - 3
include/asm-x86/io_64.h

@@ -158,13 +158,13 @@ extern void early_iounmap(void *addr, unsigned long size);
  * it's useful if some control registers are in such an area and write combining
  * it's useful if some control registers are in such an area and write combining
  * or read caching is not desirable:
  * or read caching is not desirable:
  */
  */
-extern void __iomem *ioremap_nocache(unsigned long offset, unsigned long size);
-extern void __iomem *ioremap_cache(unsigned long offset, unsigned long size);
+extern void __iomem *ioremap_nocache(resource_size_t offset, unsigned long size);
+extern void __iomem *ioremap_cache(resource_size_t offset, unsigned long size);
 
 
 /*
 /*
  * The default ioremap() behavior is non-cached:
  * The default ioremap() behavior is non-cached:
  */
  */
-static inline void __iomem *ioremap(unsigned long offset, unsigned long size)
+static inline void __iomem *ioremap(resource_size_t offset, unsigned long size)
 {
 {
 	return ioremap_nocache(offset, size);
 	return ioremap_nocache(offset, size);
 }
 }

+ 1 - 1
include/net/sctp/sctp.h

@@ -389,7 +389,7 @@ void sctp_v6_del_protocol(void);
 
 
 #else /* #ifdef defined(CONFIG_IPV6) */
 #else /* #ifdef defined(CONFIG_IPV6) */
 
 
-static inline void sctp_v6_pf_init(void) { return 0; }
+static inline void sctp_v6_pf_init(void) { return; }
 static inline void sctp_v6_pf_exit(void) { return; }
 static inline void sctp_v6_pf_exit(void) { return; }
 static inline int sctp_v6_protosw_init(void) { return 0; }
 static inline int sctp_v6_protosw_init(void) { return 0; }
 static inline void sctp_v6_protosw_exit(void) { return; }
 static inline void sctp_v6_protosw_exit(void) { return; }

+ 4 - 0
kernel/time/timekeeping.c

@@ -191,8 +191,12 @@ static void change_clocksource(void)
 
 
 	tick_clock_notify();
 	tick_clock_notify();
 
 
+	/*
+	 * We're holding xtime lock and waking up klogd would deadlock
+	 * us on enqueue.  So no printing!
 	printk(KERN_INFO "Time: %s clocksource has been installed.\n",
 	printk(KERN_INFO "Time: %s clocksource has been installed.\n",
 	       clock->name);
 	       clock->name);
+	 */
 }
 }
 #else
 #else
 static inline void change_clocksource(void) { }
 static inline void change_clocksource(void) { }

+ 1 - 1
lib/iomap.c

@@ -256,7 +256,7 @@ EXPORT_SYMBOL(ioport_unmap);
  * */
  * */
 void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen)
 void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen)
 {
 {
-	unsigned long start = pci_resource_start(dev, bar);
+	resource_size_t start = pci_resource_start(dev, bar);
 	unsigned long len = pci_resource_len(dev, bar);
 	unsigned long len = pci_resource_len(dev, bar);
 	unsigned long flags = pci_resource_flags(dev, bar);
 	unsigned long flags = pci_resource_flags(dev, bar);
 
 

+ 0 - 2
net/9p/trans_fd.c

@@ -861,7 +861,6 @@ static void p9_mux_free_request(struct p9_conn *m, struct p9_req *req)
 
 
 static void p9_mux_flush_cb(struct p9_req *freq, void *a)
 static void p9_mux_flush_cb(struct p9_req *freq, void *a)
 {
 {
-	p9_conn_req_callback cb;
 	int tag;
 	int tag;
 	struct p9_conn *m;
 	struct p9_conn *m;
 	struct p9_req *req, *rreq, *rptr;
 	struct p9_req *req, *rreq, *rptr;
@@ -872,7 +871,6 @@ static void p9_mux_flush_cb(struct p9_req *freq, void *a)
 		freq->tcall->params.tflush.oldtag);
 		freq->tcall->params.tflush.oldtag);
 
 
 	spin_lock(&m->lock);
 	spin_lock(&m->lock);
-	cb = NULL;
 	tag = freq->tcall->params.tflush.oldtag;
 	tag = freq->tcall->params.tflush.oldtag;
 	req = NULL;
 	req = NULL;
 	list_for_each_entry_safe(rreq, rptr, &m->req_list, req_list) {
 	list_for_each_entry_safe(rreq, rptr, &m->req_list, req_list) {

+ 16 - 3
net/atm/clip.c

@@ -947,6 +947,8 @@ static const struct file_operations arp_seq_fops = {
 };
 };
 #endif
 #endif
 
 
+static void atm_clip_exit_noproc(void);
+
 static int __init atm_clip_init(void)
 static int __init atm_clip_init(void)
 {
 {
 	neigh_table_init_no_netlink(&clip_tbl);
 	neigh_table_init_no_netlink(&clip_tbl);
@@ -963,18 +965,22 @@ static int __init atm_clip_init(void)
 		struct proc_dir_entry *p;
 		struct proc_dir_entry *p;
 
 
 		p = proc_create("arp", S_IRUGO, atm_proc_root, &arp_seq_fops);
 		p = proc_create("arp", S_IRUGO, atm_proc_root, &arp_seq_fops);
+		if (!p) {
+			printk(KERN_ERR "Unable to initialize "
+			       "/proc/net/atm/arp\n");
+			atm_clip_exit_noproc();
+			return -ENOMEM;
+		}
 	}
 	}
 #endif
 #endif
 
 
 	return 0;
 	return 0;
 }
 }
 
 
-static void __exit atm_clip_exit(void)
+static void atm_clip_exit_noproc(void)
 {
 {
 	struct net_device *dev, *next;
 	struct net_device *dev, *next;
 
 
-	remove_proc_entry("arp", atm_proc_root);
-
 	unregister_inetaddr_notifier(&clip_inet_notifier);
 	unregister_inetaddr_notifier(&clip_inet_notifier);
 	unregister_netdevice_notifier(&clip_dev_notifier);
 	unregister_netdevice_notifier(&clip_dev_notifier);
 
 
@@ -1005,6 +1011,13 @@ static void __exit atm_clip_exit(void)
 	clip_tbl_hook = NULL;
 	clip_tbl_hook = NULL;
 }
 }
 
 
+static void __exit atm_clip_exit(void)
+{
+	remove_proc_entry("arp", atm_proc_root);
+
+	atm_clip_exit_noproc();
+}
+
 module_init(atm_clip_init);
 module_init(atm_clip_init);
 module_exit(atm_clip_exit);
 module_exit(atm_clip_exit);
 MODULE_AUTHOR("Werner Almesberger");
 MODULE_AUTHOR("Werner Almesberger");

+ 4 - 0
net/atm/lec.c

@@ -1250,6 +1250,10 @@ static int __init lane_module_init(void)
 	struct proc_dir_entry *p;
 	struct proc_dir_entry *p;
 
 
 	p = proc_create("lec", S_IRUGO, atm_proc_root, &lec_seq_fops);
 	p = proc_create("lec", S_IRUGO, atm_proc_root, &lec_seq_fops);
+	if (!p) {
+		printk(KERN_ERR "Unable to initialize /proc/net/atm/lec\n");
+		return -ENOMEM;
+	}
 #endif
 #endif
 
 
 	register_atm_ioctl(&lane_ioctl_ops);
 	register_atm_ioctl(&lane_ioctl_ops);

+ 5 - 2
net/ipv4/fib_trie.c

@@ -177,10 +177,13 @@ static inline struct tnode *node_parent_rcu(struct node *node)
 	return rcu_dereference(ret);
 	return rcu_dereference(ret);
 }
 }
 
 
+/* Same as rcu_assign_pointer
+ * but that macro() assumes that value is a pointer.
+ */
 static inline void node_set_parent(struct node *node, struct tnode *ptr)
 static inline void node_set_parent(struct node *node, struct tnode *ptr)
 {
 {
-	rcu_assign_pointer(node->parent,
-			   (unsigned long)ptr | NODE_TYPE(node));
+	smp_wmb();
+	node->parent = (unsigned long)ptr | NODE_TYPE(node);
 }
 }
 
 
 static inline struct node *tnode_get_child(struct tnode *tn, unsigned int i)
 static inline struct node *tnode_get_child(struct tnode *tn, unsigned int i)

+ 1 - 1
net/ipv4/ip_fragment.c

@@ -568,7 +568,7 @@ int ip_defrag(struct sk_buff *skb, u32 user)
 
 
 	IP_INC_STATS_BH(IPSTATS_MIB_REASMREQDS);
 	IP_INC_STATS_BH(IPSTATS_MIB_REASMREQDS);
 
 
-	net = skb->dev->nd_net;
+	net = skb->dev ? skb->dev->nd_net : skb->dst->dev->nd_net;
 	/* Start by cleaning up the memory. */
 	/* Start by cleaning up the memory. */
 	if (atomic_read(&net->ipv4.frags.mem) > net->ipv4.frags.high_thresh)
 	if (atomic_read(&net->ipv4.frags.mem) > net->ipv4.frags.high_thresh)
 		ip_evictor(net);
 		ip_evictor(net);

+ 2 - 2
net/ipv4/tcp.c

@@ -735,7 +735,7 @@ new_segment:
 		if (!(psize -= copy))
 		if (!(psize -= copy))
 			goto out;
 			goto out;
 
 
-		if (skb->len < mss_now || (flags & MSG_OOB))
+		if (skb->len < size_goal || (flags & MSG_OOB))
 			continue;
 			continue;
 
 
 		if (forced_push(tp)) {
 		if (forced_push(tp)) {
@@ -981,7 +981,7 @@ new_segment:
 			if ((seglen -= copy) == 0 && iovlen == 0)
 			if ((seglen -= copy) == 0 && iovlen == 0)
 				goto out;
 				goto out;
 
 
-			if (skb->len < mss_now || (flags & MSG_OOB))
+			if (skb->len < size_goal || (flags & MSG_OOB))
 				continue;
 				continue;
 
 
 			if (forced_push(tp)) {
 			if (forced_push(tp)) {

+ 0 - 2
net/ipv6/ndisc.c

@@ -1420,7 +1420,6 @@ void ndisc_send_redirect(struct sk_buff *skb, struct neighbour *neigh,
 	u8 *opt;
 	u8 *opt;
 	int rd_len;
 	int rd_len;
 	int err;
 	int err;
-	int hlen;
 	u8 ha_buf[MAX_ADDR_LEN], *ha = NULL;
 	u8 ha_buf[MAX_ADDR_LEN], *ha = NULL;
 
 
 	dev = skb->dev;
 	dev = skb->dev;
@@ -1491,7 +1490,6 @@ void ndisc_send_redirect(struct sk_buff *skb, struct neighbour *neigh,
 		return;
 		return;
 	}
 	}
 
 
-	hlen = 0;
 
 
 	skb_reserve(buff, LL_RESERVED_SPACE(dev));
 	skb_reserve(buff, LL_RESERVED_SPACE(dev));
 	ip6_nd_hdr(sk, buff, dev, &saddr_buf, &ipv6_hdr(skb)->saddr,
 	ip6_nd_hdr(sk, buff, dev, &saddr_buf, &ipv6_hdr(skb)->saddr,

+ 7 - 6
net/sched/sch_htb.c

@@ -711,9 +711,11 @@ static void htb_charge_class(struct htb_sched *q, struct htb_class *cl,
  */
  */
 static psched_time_t htb_do_events(struct htb_sched *q, int level)
 static psched_time_t htb_do_events(struct htb_sched *q, int level)
 {
 {
-	int i;
-
-	for (i = 0; i < 500; i++) {
+	/* don't run for longer than 2 jiffies; 2 is used instead of
+	   1 to simplify things when jiffy is going to be incremented
+	   too soon */
+	unsigned long stop_at = jiffies + 2;
+	while (time_before(jiffies, stop_at)) {
 		struct htb_class *cl;
 		struct htb_class *cl;
 		long diff;
 		long diff;
 		struct rb_node *p = rb_first(&q->wait_pq[level]);
 		struct rb_node *p = rb_first(&q->wait_pq[level]);
@@ -731,9 +733,8 @@ static psched_time_t htb_do_events(struct htb_sched *q, int level)
 		if (cl->cmode != HTB_CAN_SEND)
 		if (cl->cmode != HTB_CAN_SEND)
 			htb_add_to_wait_tree(q, cl, diff);
 			htb_add_to_wait_tree(q, cl, diff);
 	}
 	}
-	if (net_ratelimit())
-		printk(KERN_WARNING "htb: too many events !\n");
-	return q->now + PSCHED_TICKS_PER_SEC / 10;
+	/* too much load - let's continue on next jiffie */
+	return q->now + PSCHED_TICKS_PER_SEC / HZ;
 }
 }
 
 
 /* Returns class->node+prio from id-tree where classe's id is >= id. NULL
 /* Returns class->node+prio from id-tree where classe's id is >= id. NULL

+ 3 - 4
net/socket.c

@@ -909,11 +909,10 @@ static long sock_ioctl(struct file *file, unsigned cmd, unsigned long arg)
 			if (!dlci_ioctl_hook)
 			if (!dlci_ioctl_hook)
 				request_module("dlci");
 				request_module("dlci");
 
 
-			if (dlci_ioctl_hook) {
-				mutex_lock(&dlci_ioctl_mutex);
+			mutex_lock(&dlci_ioctl_mutex);
+			if (dlci_ioctl_hook)
 				err = dlci_ioctl_hook(cmd, argp);
 				err = dlci_ioctl_hook(cmd, argp);
-				mutex_unlock(&dlci_ioctl_mutex);
-			}
+			mutex_unlock(&dlci_ioctl_mutex);
 			break;
 			break;
 		default:
 		default:
 			err = sock->ops->ioctl(sock, cmd, arg);
 			err = sock->ops->ioctl(sock, cmd, arg);

+ 0 - 2
net/sunrpc/xprtrdma/svc_rdma_recvfrom.c

@@ -237,14 +237,12 @@ static void rdma_set_ctxt_sge(struct svc_rdma_op_ctxt *ctxt,
 
 
 static int rdma_read_max_sge(struct svcxprt_rdma *xprt, int sge_count)
 static int rdma_read_max_sge(struct svcxprt_rdma *xprt, int sge_count)
 {
 {
-#ifdef RDMA_TRANSPORT_IWARP
 	if ((RDMA_TRANSPORT_IWARP ==
 	if ((RDMA_TRANSPORT_IWARP ==
 	     rdma_node_get_transport(xprt->sc_cm_id->
 	     rdma_node_get_transport(xprt->sc_cm_id->
 				     device->node_type))
 				     device->node_type))
 	    && sge_count > 1)
 	    && sge_count > 1)
 		return 1;
 		return 1;
 	else
 	else
-#endif
 		return min_t(int, sge_count, xprt->sc_max_sge);
 		return min_t(int, sge_count, xprt->sc_max_sge);
 }
 }