浏览代码

merge linus head to drm-mm branch

Dave Airlie 19 年之前
父节点
当前提交
23bfc1a339

+ 8 - 7
arch/arm/mach-integrator/impd1.c

@@ -67,7 +67,7 @@ static void impd1_setvco(struct clk *clk, struct icst525_vco vco)
 	}
 	writel(0, impd1->base + IMPD1_LOCK);
 
-#if DEBUG
+#ifdef DEBUG
 	vco.v = val & 0x1ff;
 	vco.r = (val >> 9) & 0x7f;
 	vco.s = (val >> 16) & 7;
@@ -427,17 +427,18 @@ static int impd1_probe(struct lm_device *dev)
 	return ret;
 }
 
+static int impd1_remove_one(struct device *dev, void *data)
+{
+	device_unregister(dev);
+	return 0;
+}
+
 static void impd1_remove(struct lm_device *dev)
 {
 	struct impd1_module *impd1 = lm_get_drvdata(dev);
-	struct list_head *l, *n;
 	int i;
 
-	list_for_each_safe(l, n, &dev->dev.children) {
-		struct device *d = list_to_dev(l);
-
-		device_unregister(d);
-	}
+	device_for_each_child(&dev->dev, NULL, impd1_remove_one);
 
 	for (i = 0; i < ARRAY_SIZE(impd1->vcos); i++)
 		clk_unregister(&impd1->vcos[i]);

+ 20 - 0
arch/arm/mach-pxa/generic.c

@@ -250,6 +250,25 @@ void __init pxa_set_i2c_info(struct i2c_pxa_platform_data *info)
 	i2c_device.dev.platform_data = info;
 }
 
+static struct resource i2s_resources[] = {
+	{
+		.start	= 0x40400000,
+		.end	= 0x40400083,
+		.flags	= IORESOURCE_MEM,
+	}, {
+		.start	= IRQ_I2S,
+		.end	= IRQ_I2S,
+		.flags	= IORESOURCE_IRQ,
+	},
+};
+
+static struct platform_device i2s_device = {
+	.name		= "pxa2xx-i2s",
+	.id		= -1,
+	.resource	= i2c_resources,
+	.num_resources	= ARRAY_SIZE(i2s_resources),
+};
+
 static struct platform_device *devices[] __initdata = {
 	&pxamci_device,
 	&udc_device,
@@ -258,6 +277,7 @@ static struct platform_device *devices[] __initdata = {
 	&btuart_device,
 	&stuart_device,
 	&i2c_device,
+	&i2s_device,
 };
 
 static int __init pxa_init(void)

+ 3 - 3
arch/arm/mach-s3c2410/mach-bast.c

@@ -307,9 +307,9 @@ static void bast_nand_select(struct s3c2410_nand_set *set, int slot)
 }
 
 static struct s3c2410_platform_nand bast_nand_info = {
-	.tacls		= 40,
-	.twrph0		= 80,
-	.twrph1		= 80,
+	.tacls		= 30,
+	.twrph0		= 60,
+	.twrph1		= 60,
 	.nr_sets	= ARRAY_SIZE(bast_nand_sets),
 	.sets		= bast_nand_sets,
 	.select_chip	= bast_nand_select,

+ 19 - 11
arch/i386/kernel/cpu/cpufreq/powernow-k8.c

@@ -44,7 +44,7 @@
 
 #define PFX "powernow-k8: "
 #define BFX PFX "BIOS error: "
-#define VERSION "version 1.50.3"
+#define VERSION "version 1.50.4"
 #include "powernow-k8.h"
 
 /* serialize freq changes  */
@@ -111,8 +111,8 @@ static int query_current_values_with_pending_wait(struct powernow_k8_data *data)
 	u32 i = 0;
 
 	do {
-		if (i++ > 0x1000000) {
-			printk(KERN_ERR PFX "detected change pending stuck\n");
+		if (i++ > 10000) {
+			dprintk("detected change pending stuck\n");
 			return 1;
 		}
 		rdmsr(MSR_FIDVID_STATUS, lo, hi);
@@ -159,6 +159,7 @@ static int write_new_fid(struct powernow_k8_data *data, u32 fid)
 {
 	u32 lo;
 	u32 savevid = data->currvid;
+	u32 i = 0;
 
 	if ((fid & INVALID_FID_MASK) || (data->currvid & INVALID_VID_MASK)) {
 		printk(KERN_ERR PFX "internal error - overflow on fid write\n");
@@ -170,10 +171,13 @@ static int write_new_fid(struct powernow_k8_data *data, u32 fid)
 	dprintk("writing fid 0x%x, lo 0x%x, hi 0x%x\n",
 		fid, lo, data->plllock * PLL_LOCK_CONVERSION);
 
-	wrmsr(MSR_FIDVID_CTL, lo, data->plllock * PLL_LOCK_CONVERSION);
-
-	if (query_current_values_with_pending_wait(data))
-		return 1;
+	do {
+		wrmsr(MSR_FIDVID_CTL, lo, data->plllock * PLL_LOCK_CONVERSION);
+		if (i++ > 100) {
+			printk(KERN_ERR PFX "internal error - pending bit very stuck - no further pstate changes possible\n");
+			return 1;
+		}			
+	} while (query_current_values_with_pending_wait(data));
 
 	count_off_irt(data);
 
@@ -197,6 +201,7 @@ static int write_new_vid(struct powernow_k8_data *data, u32 vid)
 {
 	u32 lo;
 	u32 savefid = data->currfid;
+	int i = 0;
 
 	if ((data->currfid & INVALID_FID_MASK) || (vid & INVALID_VID_MASK)) {
 		printk(KERN_ERR PFX "internal error - overflow on vid write\n");
@@ -208,10 +213,13 @@ static int write_new_vid(struct powernow_k8_data *data, u32 vid)
 	dprintk("writing vid 0x%x, lo 0x%x, hi 0x%x\n",
 		vid, lo, STOP_GRANT_5NS);
 
-	wrmsr(MSR_FIDVID_CTL, lo, STOP_GRANT_5NS);
-
-	if (query_current_values_with_pending_wait(data))
-		return 1;
+	do {
+		wrmsr(MSR_FIDVID_CTL, lo, STOP_GRANT_5NS);
+                if (i++ > 100) {
+                        printk(KERN_ERR PFX "internal error - pending bit very stuck - no further pstate changes possible\n");
+                        return 1;
+                }
+	} while (query_current_values_with_pending_wait(data));
 
 	if (savefid != data->currfid) {
 		printk(KERN_ERR PFX "fid changed on vid trans, old 0x%x new 0x%x\n",

+ 1 - 1
arch/ppc64/kernel/iSeries_htab.c

@@ -66,7 +66,7 @@ static long iSeries_hpte_insert(unsigned long hpte_group, unsigned long va,
 	}
 
 	if (slot < 0) {		/* MSB set means secondary group */
-		vflags |= HPTE_V_VALID;
+		vflags |= HPTE_V_SECONDARY;
 		secondary = 1;
 		slot &= 0x7fffffffffffffff;
 	}

+ 1 - 1
arch/ppc64/kernel/time.c

@@ -870,7 +870,7 @@ void div128_by_32( unsigned long dividend_high, unsigned long dividend_low,
 	rb = ((ra + b) - (x * divisor)) << 32;
 
 	y = (rb + c)/divisor;
-	rc = ((rb + b) - (y * divisor)) << 32;
+	rc = ((rb + c) - (y * divisor)) << 32;
 
 	z = (rc + d)/divisor;
 

+ 1 - 2
arch/ppc64/mm/init.c

@@ -799,8 +799,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long ea,
 	if (cpus_equal(vma->vm_mm->cpu_vm_mask, tmp))
 		local = 1;
 
-	__hash_page(ea, pte_val(pte) & (_PAGE_USER|_PAGE_RW), vsid, ptep,
-		    0x300, local);
+	__hash_page(ea, 0, vsid, ptep, 0x300, local);
 	local_irq_restore(flags);
 }
 

+ 1 - 1
drivers/char/drm/mga_drv.h

@@ -228,7 +228,7 @@ static inline u32 _MGA_READ(u32 * addr)
 #define MGA_EMIT_STATE( dev_priv, dirty )				\
 do {									\
 	if ( (dirty) & ~MGA_UPLOAD_CLIPRECTS ) {			\
-		if ( dev_priv->chipset == MGA_CARD_TYPE_G400 ) {	\
+		if ( dev_priv->chipset >= MGA_CARD_TYPE_G400 ) {	\
 			mga_g400_emit_state( dev_priv );		\
 		} else {						\
 			mga_g200_emit_state( dev_priv );		\

+ 1 - 1
drivers/char/drm/mga_state.c

@@ -53,7 +53,7 @@ static void mga_emit_clip_rect(drm_mga_private_t * dev_priv,
 
 	/* Force reset of DWGCTL on G400 (eliminates clip disable bit).
 	 */
-	if (dev_priv->chipset == MGA_CARD_TYPE_G400) {
+	if (dev_priv->chipset >= MGA_CARD_TYPE_G400) {
 		DMA_BLOCK(MGA_DWGCTL, ctx->dwgctl,
 			  MGA_LEN + MGA_EXEC, 0x80000000,
 			  MGA_DWGCTL, ctx->dwgctl,

+ 8 - 4
drivers/message/fusion/mptsas.c

@@ -257,8 +257,8 @@ static void mptsas_print_device_pg0(SasDevicePage0_t *pg0)
 	printk("SAS Address=0x%llX\n", le64_to_cpu(sas_address));
 	printk("Target ID=0x%X\n", pg0->TargetID);
 	printk("Bus=0x%X\n", pg0->Bus);
-	printk("PhyNum=0x%X\n", pg0->PhyNum);
-	printk("AccessStatus=0x%X\n", le16_to_cpu(pg0->AccessStatus));
+	printk("Parent Phy Num=0x%X\n", pg0->PhyNum);
+	printk("Access Status=0x%X\n", le16_to_cpu(pg0->AccessStatus));
 	printk("Device Info=0x%X\n", le32_to_cpu(pg0->DeviceInfo));
 	printk("Flags=0x%X\n", le16_to_cpu(pg0->Flags));
 	printk("Physical Port=0x%X\n", pg0->PhysicalPort);
@@ -270,7 +270,7 @@ static void mptsas_print_expander_pg1(SasExpanderPage1_t *pg1)
 	printk("---- SAS EXPANDER PAGE 1 ------------\n");
 
 	printk("Physical Port=0x%X\n", pg1->PhysicalPort);
-	printk("PHY Identifier=0x%X\n", pg1->Phy);
+	printk("PHY Identifier=0x%X\n", pg1->PhyIdentifier);
 	printk("Negotiated Link Rate=0x%X\n", pg1->NegotiatedLinkRate);
 	printk("Programmed Link Rate=0x%X\n", pg1->ProgrammedLinkRate);
 	printk("Hardware Link Rate=0x%X\n", pg1->HwLinkRate);
@@ -604,7 +604,7 @@ mptsas_sas_expander_pg1(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info,
 	mptsas_print_expander_pg1(buffer);
 
 	/* save config data */
-	phy_info->phy_id = buffer->Phy;
+	phy_info->phy_id = buffer->PhyIdentifier;
 	phy_info->port_id = buffer->PhysicalPort;
 	phy_info->negotiated_link_rate = buffer->NegotiatedLinkRate;
 	phy_info->programmed_link_rate = buffer->ProgrammedLinkRate;
@@ -825,6 +825,8 @@ mptsas_probe_hba_phys(MPT_ADAPTER *ioc, int *index)
 		mptsas_sas_device_pg0(ioc, &port_info->phy_info[i].identify,
 			(MPI_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE <<
 			 MPI_SAS_DEVICE_PGAD_FORM_SHIFT), handle);
+		port_info->phy_info[i].identify.phy_id =
+		    port_info->phy_info[i].phy_id;
 		handle = port_info->phy_info[i].identify.handle;
 
 		if (port_info->phy_info[i].attached.handle) {
@@ -881,6 +883,8 @@ mptsas_probe_expander_phys(MPT_ADAPTER *ioc, u32 *handle, int *index)
 				(MPI_SAS_DEVICE_PGAD_FORM_HANDLE <<
 				 MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
 				port_info->phy_info[i].identify.handle);
+			port_info->phy_info[i].identify.phy_id =
+			    port_info->phy_info[i].phy_id;
 		}
 
 		if (port_info->phy_info[i].attached.handle) {

+ 17 - 4
include/asm-arm/arch-s3c2410/regs-clock.h

@@ -18,7 +18,9 @@
  *    10-Feb-2005 Ben Dooks	    Fixed CAMDIVN address (Guillaume Gourat)
  *    10-Mar-2005 Lucas Villa Real  Changed S3C2410_VA to S3C24XX_VA
  *    27-Aug-2005 Ben Dooks	    Add clock-slow info
- */
+ *    20-Oct-2005 Ben Dooks	    Fixed overflow in PLL (Guillaume Gourat)
+ *    20-Oct-2005 Ben Dooks	    Add masks for DCLK (Guillaume Gourat)
+*/
 
 #ifndef __ASM_ARM_REGS_CLOCK
 #define __ASM_ARM_REGS_CLOCK "$Id: clock.h,v 1.4 2003/04/30 14:50:51 ben Exp $"
@@ -66,11 +68,16 @@
 #define S3C2410_DCLKCON_DCLK0_UCLK   (1<<1)
 #define S3C2410_DCLKCON_DCLK0_DIV(x) (((x) - 1 )<<4)
 #define S3C2410_DCLKCON_DCLK0_CMP(x) (((x) - 1 )<<8)
+#define S3C2410_DCLKCON_DCLK0_DIV_MASK ((0xf)<<4)
+#define S3C2410_DCLKCON_DCLK0_CMP_MASK ((0xf)<<8)
 
 #define S3C2410_DCLKCON_DCLK1EN	     (1<<16)
 #define S3C2410_DCLKCON_DCLK1_PCLK   (0<<17)
 #define S3C2410_DCLKCON_DCLK1_UCLK   (1<<17)
 #define S3C2410_DCLKCON_DCLK1_DIV(x) (((x) - 1) <<20)
+#define S3C2410_DCLKCON_DCLK1_CMP(x) (((x) - 1) <<24)
+#define S3C2410_DCLKCON_DCLK1_DIV_MASK ((0xf) <<20)
+#define S3C2410_DCLKCON_DCLK1_CMP_MASK ((0xf) <<24)
 
 #define S3C2410_CLKDIVN_PDIVN	     (1<<0)
 #define S3C2410_CLKDIVN_HDIVN	     (1<<1)
@@ -83,10 +90,13 @@
 
 #ifndef __ASSEMBLY__
 
+#include <asm/div64.h>
+
 static inline unsigned int
-s3c2410_get_pll(int pllval, int baseclk)
+s3c2410_get_pll(unsigned int pllval, unsigned int baseclk)
 {
-	int mdiv, pdiv, sdiv;
+	unsigned int mdiv, pdiv, sdiv;
+	uint64_t fvco;
 
 	mdiv = pllval >> S3C2410_PLLCON_MDIVSHIFT;
 	pdiv = pllval >> S3C2410_PLLCON_PDIVSHIFT;
@@ -96,7 +106,10 @@ s3c2410_get_pll(int pllval, int baseclk)
 	pdiv &= S3C2410_PLLCON_PDIVMASK;
 	sdiv &= S3C2410_PLLCON_SDIVMASK;
 
-	return (baseclk * (mdiv + 8)) / ((pdiv + 2) << sdiv);
+	fvco = (uint64_t)baseclk * (mdiv + 8);
+	do_div(fvco, (pdiv + 2) << sdiv);
+
+	return (unsigned int)fvco;
 }
 
 #endif /* __ASSEMBLY__ */

+ 3 - 13
include/linux/hugetlb.h

@@ -25,6 +25,8 @@ int is_hugepage_mem_enough(size_t);
 unsigned long hugetlb_total_pages(void);
 struct page *alloc_huge_page(void);
 void free_huge_page(struct page *);
+int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
+			unsigned long address, int write_access);
 
 extern unsigned long max_huge_pages;
 extern const unsigned long hugetlb_zero, hugetlb_infinity;
@@ -99,6 +101,7 @@ static inline unsigned long hugetlb_total_pages(void)
 						do { } while (0)
 #define alloc_huge_page()			({ NULL; })
 #define free_huge_page(p)			({ (void)(p); BUG(); })
+#define hugetlb_fault(mm, vma, addr, write)	({ BUG(); 0; })
 
 #ifndef HPAGE_MASK
 #define HPAGE_MASK	0		/* Keep the compiler happy */
@@ -155,24 +158,11 @@ static inline void set_file_hugepages(struct file *file)
 {
 	file->f_op = &hugetlbfs_file_operations;
 }
-
-static inline int valid_hugetlb_file_off(struct vm_area_struct *vma, 
-					  unsigned long address) 
-{
-	struct inode *inode = vma->vm_file->f_dentry->d_inode;
-	loff_t file_off = address - vma->vm_start;
-	
-	file_off += (vma->vm_pgoff << PAGE_SHIFT);
-	
-	return (file_off < inode->i_size);
-}
-
 #else /* !CONFIG_HUGETLBFS */
 
 #define is_file_hugepages(file)		0
 #define set_file_hugepages(file)	BUG()
 #define hugetlb_zero_setup(size)	ERR_PTR(-ENOSYS)
-#define valid_hugetlb_file_off(vma, address) 	0
 
 #endif /* !CONFIG_HUGETLBFS */
 

+ 1 - 0
kernel/exit.c

@@ -843,6 +843,7 @@ fastcall NORET_TYPE void do_exit(long code)
 	group_dead = atomic_dec_and_test(&tsk->signal->live);
 	if (group_dead) {
  		del_timer_sync(&tsk->signal->real_timer);
+		exit_itimers(tsk->signal);
 		acct_process(code);
 	}
 	exit_mm(tsk);

+ 17 - 11
kernel/posix-cpu-timers.c

@@ -387,19 +387,25 @@ int posix_cpu_timer_del(struct k_itimer *timer)
 	if (unlikely(p == NULL))
 		return 0;
 
-	spin_lock(&p->sighand->siglock);
 	if (!list_empty(&timer->it.cpu.entry)) {
-		/*
-		 * Take us off the task's timer list.  We don't need to
-		 * take tasklist_lock and check for the task being reaped.
-		 * If it was reaped, it already called posix_cpu_timers_exit
-		 * and posix_cpu_timers_exit_group to clear all the timers
-		 * that pointed to it.
-		 */
-		list_del(&timer->it.cpu.entry);
-		put_task_struct(p);
+		read_lock(&tasklist_lock);
+		if (unlikely(p->signal == NULL)) {
+			/*
+			 * We raced with the reaping of the task.
+			 * The deletion should have cleared us off the list.
+			 */
+			BUG_ON(!list_empty(&timer->it.cpu.entry));
+		} else {
+			/*
+			 * Take us off the task's timer list.
+			 */
+			spin_lock(&p->sighand->siglock);
+			list_del(&timer->it.cpu.entry);
+			spin_unlock(&p->sighand->siglock);
+		}
+		read_unlock(&tasklist_lock);
 	}
-	spin_unlock(&p->sighand->siglock);
+	put_task_struct(p);
 
 	return 0;
 }

+ 1 - 1
kernel/posix-timers.c

@@ -1157,7 +1157,7 @@ retry_delete:
 }
 
 /*
- * This is called by __exit_signal, only when there are no more
+ * This is called by do_exit or de_thread, only when there are no more
  * references to the shared signal_struct.
  */
 void exit_itimers(struct signal_struct *sig)

+ 1 - 13
kernel/signal.c

@@ -397,20 +397,8 @@ void __exit_signal(struct task_struct *tsk)
 	flush_sigqueue(&tsk->pending);
 	if (sig) {
 		/*
-		 * We are cleaning up the signal_struct here.  We delayed
-		 * calling exit_itimers until after flush_sigqueue, just in
-		 * case our thread-local pending queue contained a queued
-		 * timer signal that would have been cleared in
-		 * exit_itimers.  When that called sigqueue_free, it would
-		 * attempt to re-take the tasklist_lock and deadlock.  This
-		 * can never happen if we ensure that all queues the
-		 * timer's signal might be queued on have been flushed
-		 * first.  The shared_pending queue, and our own pending
-		 * queue are the only queues the timer could be on, since
-		 * there are no other threads left in the group and timer
-		 * signals are constrained to threads inside the group.
+		 * We are cleaning up the signal_struct here.
 		 */
-		exit_itimers(sig);
 		exit_thread_group_keys(sig);
 		kmem_cache_free(signal_cachep, sig);
 	}

+ 22 - 0
mm/hugetlb.c

@@ -394,6 +394,28 @@ out:
 	return ret;
 }
 
+/*
+ * On ia64 at least, it is possible to receive a hugetlb fault from a
+ * stale zero entry left in the TLB from earlier hardware prefetching.
+ * Low-level arch code should already have flushed the stale entry as
+ * part of its fault handling, but we do need to accept this minor fault
+ * and return successfully.  Whereas the "normal" case is that this is
+ * an access to a hugetlb page which has been truncated off since mmap.
+ */
+int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
+			unsigned long address, int write_access)
+{
+	int ret = VM_FAULT_SIGBUS;
+	pte_t *pte;
+
+	spin_lock(&mm->page_table_lock);
+	pte = huge_pte_offset(mm, address);
+	if (pte && !pte_none(*pte))
+		ret = VM_FAULT_MINOR;
+	spin_unlock(&mm->page_table_lock);
+	return ret;
+}
+
 int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
 			struct page **pages, struct vm_area_struct **vmas,
 			unsigned long *position, int *length, int i)

+ 2 - 12
mm/memory.c

@@ -2045,18 +2045,8 @@ int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct * vma,
 
 	inc_page_state(pgfault);
 
-	if (unlikely(is_vm_hugetlb_page(vma))) {
-		if (valid_hugetlb_file_off(vma, address))
-			/* We get here only if there was a stale(zero) TLB entry 
-			 * (because of  HW prefetching). 
-			 * Low-level arch code (if needed) should have already
-			 * purged the stale entry as part of this fault handling.  
-			 * Here we just return.
-			 */
-			return VM_FAULT_MINOR; 
-		else
-			return VM_FAULT_SIGBUS;	/* mapping truncation does this. */
-	}
+	if (unlikely(is_vm_hugetlb_page(vma)))
+		return hugetlb_fault(mm, vma, address, write_access);
 
 	/*
 	 * We need the page table lock to synchronize with kswapd

+ 2 - 0
net/dccp/ipv4.c

@@ -463,6 +463,7 @@ static int dccp_v4_send_response(struct sock *sk, struct request_sock *req,
 	if (skb != NULL) {
 		const struct inet_request_sock *ireq = inet_rsk(req);
 
+		memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
 		err = ip_build_and_send_pkt(skb, sk, ireq->loc_addr,
 					    ireq->rmt_addr,
 					    ireq->opt);
@@ -647,6 +648,7 @@ int dccp_v4_send_reset(struct sock *sk, enum dccp_reset_codes code)
 	if (skb != NULL) {
 		const struct inet_sock *inet = inet_sk(sk);
 
+		memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
 		err = ip_build_and_send_pkt(skb, sk,
 					    inet->saddr, inet->daddr, NULL);
 		if (err == NET_XMIT_CN)

+ 5 - 5
net/dccp/output.c

@@ -62,10 +62,8 @@ int dccp_transmit_skb(struct sock *sk, struct sk_buff *skb)
 		
 		skb->h.raw = skb_push(skb, dccp_header_size);
 		dh = dccp_hdr(skb);
-		/*
-		 * Data packets are not cloned as they are never retransmitted
-		 */
-		if (skb_cloned(skb))
+
+		if (!skb->sk)
 			skb_set_owner_w(skb, sk);
 
 		/* Build DCCP header and checksum it. */
@@ -102,6 +100,7 @@ int dccp_transmit_skb(struct sock *sk, struct sk_buff *skb)
 
 		DCCP_INC_STATS(DCCP_MIB_OUTSEGS);
 
+		memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
 		err = ip_queue_xmit(skb, 0);
 		if (err <= 0)
 			return err;
@@ -243,7 +242,8 @@ int dccp_write_xmit(struct sock *sk, struct sk_buff *skb, long *timeo)
 
 		err = dccp_transmit_skb(sk, skb);
 		ccid_hc_tx_packet_sent(dp->dccps_hc_tx_ccid, sk, 0, len);
-	}
+	} else
+		kfree_skb(skb);
 
 	return err;
 }

+ 0 - 2
net/dccp/proto.c

@@ -402,8 +402,6 @@ int dccp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
 	 *     This bug was _quickly_ found & fixed by just looking at an OSTRA
 	 *     generated callgraph 8) -acme
 	 */
-	if (rc != 0)
-		goto out_discard;
 out_release:
 	release_sock(sk);
 	return rc ? : len;

+ 1 - 11
net/ipv4/tcp_output.c

@@ -435,17 +435,7 @@ int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len, unsigned int mss
 	int nsize, old_factor;
 	u16 flags;
 
-	if (unlikely(len >= skb->len)) {
-		if (net_ratelimit()) {
-			printk(KERN_DEBUG "TCP: seg_size=%u, mss=%u, seq=%u, "
-			       "end_seq=%u, skb->len=%u.\n", len, mss_now,
-			       TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
-			       skb->len);
-			WARN_ON(1);
-		}
-		return 0;
-	}
-
+	BUG_ON(len > skb->len);
 	nsize = skb_headlen(skb) - len;
 	if (nsize < 0)
 		nsize = 0;