Browse Source

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6: (21 commits)
  dca: disable dca on IOAT ver.3.0 multiple-IOH platforms
  netpoll: Disable IRQ around RCU dereference in netpoll_rx
  sctp: Do not reset the packet during sctp_packet_config().
  net/llc: storing negative error codes in unsigned short
  MAINTAINERS: move atlx discussions to netdev
  drivers/net/cxgb3/cxgb3_main.c: prevent reading uninitialized stack memory
  drivers/net/eql.c: prevent reading uninitialized stack memory
  drivers/net/usb/hso.c: prevent reading uninitialized memory
  xfrm: dont assume rcu_read_lock in xfrm_output_one()
  r8169: Handle rxfifo errors on 8168 chips
  3c59x: Remove atomic context inside vortex_{set|get}_wol
  tcp: Prevent overzealous packetization by SWS logic.
  net: RPS needs to depend upon USE_GENERIC_SMP_HELPERS
  phylib: fix PAL state machine restart on resume
  net: use rcu_barrier() in rollback_registered_many
  bonding: correctly process non-linear skbs
  ipv4: enable getsockopt() for IP_NODEFRAG
  ipv4: force_igmp_version ignored when a IGMPv3 query received
  ppp: potential NULL dereference in ppp_mp_explode()
  net/llc: make opt unsigned in llc_ui_setsockopt()
  ...
Linus Torvalds 14 years ago
parent
commit
7d7dee96e1

+ 1 - 1
MAINTAINERS

@@ -1135,7 +1135,7 @@ ATLX ETHERNET DRIVERS
 M:	Jay Cliburn <jcliburn@gmail.com>
 M:	Jay Cliburn <jcliburn@gmail.com>
 M:	Chris Snook <chris.snook@gmail.com>
 M:	Chris Snook <chris.snook@gmail.com>
 M:	Jie Yang <jie.yang@atheros.com>
 M:	Jie Yang <jie.yang@atheros.com>
-L:	atl1-devel@lists.sourceforge.net
+L:	netdev@vger.kernel.org
 W:	http://sourceforge.net/projects/atl1
 W:	http://sourceforge.net/projects/atl1
 W:	http://atl1.sourceforge.net
 W:	http://atl1.sourceforge.net
 S:	Maintained
 S:	Maintained

+ 79 - 6
drivers/dca/dca-core.c

@@ -39,6 +39,10 @@ static DEFINE_SPINLOCK(dca_lock);
 
 
 static LIST_HEAD(dca_domains);
 static LIST_HEAD(dca_domains);
 
 
+static BLOCKING_NOTIFIER_HEAD(dca_provider_chain);
+
+static int dca_providers_blocked;
+
 static struct pci_bus *dca_pci_rc_from_dev(struct device *dev)
 static struct pci_bus *dca_pci_rc_from_dev(struct device *dev)
 {
 {
 	struct pci_dev *pdev = to_pci_dev(dev);
 	struct pci_dev *pdev = to_pci_dev(dev);
@@ -70,6 +74,60 @@ static void dca_free_domain(struct dca_domain *domain)
 	kfree(domain);
 	kfree(domain);
 }
 }
 
 
+static int dca_provider_ioat_ver_3_0(struct device *dev)
+{
+	struct pci_dev *pdev = to_pci_dev(dev);
+
+	return ((pdev->vendor == PCI_VENDOR_ID_INTEL) &&
+		((pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG0) ||
+		(pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG1) ||
+		(pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG2) ||
+		(pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG3) ||
+		(pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG4) ||
+		(pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG5) ||
+		(pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG6) ||
+		(pdev->device == PCI_DEVICE_ID_INTEL_IOAT_TBG7)));
+}
+
+static void unregister_dca_providers(void)
+{
+	struct dca_provider *dca, *_dca;
+	struct list_head unregistered_providers;
+	struct dca_domain *domain;
+	unsigned long flags;
+
+	blocking_notifier_call_chain(&dca_provider_chain,
+				     DCA_PROVIDER_REMOVE, NULL);
+
+	INIT_LIST_HEAD(&unregistered_providers);
+
+	spin_lock_irqsave(&dca_lock, flags);
+
+	if (list_empty(&dca_domains)) {
+		spin_unlock_irqrestore(&dca_lock, flags);
+		return;
+	}
+
+	/* at this point only one domain in the list is expected */
+	domain = list_first_entry(&dca_domains, struct dca_domain, node);
+	if (!domain)
+		return;
+
+	list_for_each_entry_safe(dca, _dca, &domain->dca_providers, node) {
+		list_del(&dca->node);
+		list_add(&dca->node, &unregistered_providers);
+	}
+
+	dca_free_domain(domain);
+
+	spin_unlock_irqrestore(&dca_lock, flags);
+
+	list_for_each_entry_safe(dca, _dca, &unregistered_providers, node) {
+		dca_sysfs_remove_provider(dca);
+		list_del(&dca->node);
+	}
+}
+
 static struct dca_domain *dca_find_domain(struct pci_bus *rc)
 static struct dca_domain *dca_find_domain(struct pci_bus *rc)
 {
 {
 	struct dca_domain *domain;
 	struct dca_domain *domain;
@@ -90,9 +148,13 @@ static struct dca_domain *dca_get_domain(struct device *dev)
 	domain = dca_find_domain(rc);
 	domain = dca_find_domain(rc);
 
 
 	if (!domain) {
 	if (!domain) {
-		domain = dca_allocate_domain(rc);
-		if (domain)
-			list_add(&domain->node, &dca_domains);
+		if (dca_provider_ioat_ver_3_0(dev) && !list_empty(&dca_domains)) {
+			dca_providers_blocked = 1;
+		} else {
+			domain = dca_allocate_domain(rc);
+			if (domain)
+				list_add(&domain->node, &dca_domains);
+		}
 	}
 	}
 
 
 	return domain;
 	return domain;
@@ -293,8 +355,6 @@ void free_dca_provider(struct dca_provider *dca)
 }
 }
 EXPORT_SYMBOL_GPL(free_dca_provider);
 EXPORT_SYMBOL_GPL(free_dca_provider);
 
 
-static BLOCKING_NOTIFIER_HEAD(dca_provider_chain);
-
 /**
 /**
  * register_dca_provider - register a dca provider
  * register_dca_provider - register a dca provider
  * @dca - struct created by alloc_dca_provider()
  * @dca - struct created by alloc_dca_provider()
@@ -306,6 +366,13 @@ int register_dca_provider(struct dca_provider *dca, struct device *dev)
 	unsigned long flags;
 	unsigned long flags;
 	struct dca_domain *domain;
 	struct dca_domain *domain;
 
 
+	spin_lock_irqsave(&dca_lock, flags);
+	if (dca_providers_blocked) {
+		spin_unlock_irqrestore(&dca_lock, flags);
+		return -ENODEV;
+	}
+	spin_unlock_irqrestore(&dca_lock, flags);
+
 	err = dca_sysfs_add_provider(dca, dev);
 	err = dca_sysfs_add_provider(dca, dev);
 	if (err)
 	if (err)
 		return err;
 		return err;
@@ -313,7 +380,13 @@ int register_dca_provider(struct dca_provider *dca, struct device *dev)
 	spin_lock_irqsave(&dca_lock, flags);
 	spin_lock_irqsave(&dca_lock, flags);
 	domain = dca_get_domain(dev);
 	domain = dca_get_domain(dev);
 	if (!domain) {
 	if (!domain) {
-		spin_unlock_irqrestore(&dca_lock, flags);
+		if (dca_providers_blocked) {
+			spin_unlock_irqrestore(&dca_lock, flags);
+			dca_sysfs_remove_provider(dca);
+			unregister_dca_providers();
+		} else {
+			spin_unlock_irqrestore(&dca_lock, flags);
+		}
 		return -ENODEV;
 		return -ENODEV;
 	}
 	}
 	list_add(&dca->node, &domain->dca_providers);
 	list_add(&dca->node, &domain->dca_providers);

+ 3 - 4
drivers/net/3c59x.c

@@ -635,6 +635,9 @@ struct vortex_private {
 		must_free_region:1,				/* Flag: if zero, Cardbus owns the I/O region */
 		must_free_region:1,				/* Flag: if zero, Cardbus owns the I/O region */
 		large_frames:1,			/* accept large frames */
 		large_frames:1,			/* accept large frames */
 		handling_irq:1;			/* private in_irq indicator */
 		handling_irq:1;			/* private in_irq indicator */
+	/* {get|set}_wol operations are already serialized by rtnl.
+	 * no additional locking is required for the enable_wol and acpi_set_WOL()
+	 */
 	int drv_flags;
 	int drv_flags;
 	u16 status_enable;
 	u16 status_enable;
 	u16 intr_enable;
 	u16 intr_enable;
@@ -2939,13 +2942,11 @@ static void vortex_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
 {
 {
 	struct vortex_private *vp = netdev_priv(dev);
 	struct vortex_private *vp = netdev_priv(dev);
 
 
-	spin_lock_irq(&vp->lock);
 	wol->supported = WAKE_MAGIC;
 	wol->supported = WAKE_MAGIC;
 
 
 	wol->wolopts = 0;
 	wol->wolopts = 0;
 	if (vp->enable_wol)
 	if (vp->enable_wol)
 		wol->wolopts |= WAKE_MAGIC;
 		wol->wolopts |= WAKE_MAGIC;
-	spin_unlock_irq(&vp->lock);
 }
 }
 
 
 static int vortex_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
 static int vortex_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
@@ -2954,13 +2955,11 @@ static int vortex_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
 	if (wol->wolopts & ~WAKE_MAGIC)
 	if (wol->wolopts & ~WAKE_MAGIC)
 		return -EINVAL;
 		return -EINVAL;
 
 
-	spin_lock_irq(&vp->lock);
 	if (wol->wolopts & WAKE_MAGIC)
 	if (wol->wolopts & WAKE_MAGIC)
 		vp->enable_wol = 1;
 		vp->enable_wol = 1;
 	else
 	else
 		vp->enable_wol = 0;
 		vp->enable_wol = 0;
 	acpi_set_WOL(dev);
 	acpi_set_WOL(dev);
-	spin_unlock_irq(&vp->lock);
 
 
 	return 0;
 	return 0;
 }
 }

+ 3 - 0
drivers/net/bonding/bond_3ad.c

@@ -2466,6 +2466,9 @@ int bond_3ad_lacpdu_recv(struct sk_buff *skb, struct net_device *dev, struct pac
 	if (!(dev->flags & IFF_MASTER))
 	if (!(dev->flags & IFF_MASTER))
 		goto out;
 		goto out;
 
 
+	if (!pskb_may_pull(skb, sizeof(struct lacpdu)))
+		goto out;
+
 	read_lock(&bond->lock);
 	read_lock(&bond->lock);
 	slave = bond_get_slave_by_dev((struct bonding *)netdev_priv(dev),
 	slave = bond_get_slave_by_dev((struct bonding *)netdev_priv(dev),
 					orig_dev);
 					orig_dev);

+ 3 - 0
drivers/net/bonding/bond_alb.c

@@ -362,6 +362,9 @@ static int rlb_arp_recv(struct sk_buff *skb, struct net_device *bond_dev, struct
 		goto out;
 		goto out;
 	}
 	}
 
 
+	if (!pskb_may_pull(skb, arp_hdr_len(bond_dev)))
+		goto out;
+
 	if (skb->len < sizeof(struct arp_pkt)) {
 	if (skb->len < sizeof(struct arp_pkt)) {
 		pr_debug("Packet is too small to be an ARP\n");
 		pr_debug("Packet is too small to be an ARP\n");
 		goto out;
 		goto out;

+ 2 - 0
drivers/net/cxgb3/cxgb3_main.c

@@ -2296,6 +2296,8 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
 	case CHELSIO_GET_QSET_NUM:{
 	case CHELSIO_GET_QSET_NUM:{
 		struct ch_reg edata;
 		struct ch_reg edata;
 
 
+		memset(&edata, 0, sizeof(struct ch_reg));
+
 		edata.cmd = CHELSIO_GET_QSET_NUM;
 		edata.cmd = CHELSIO_GET_QSET_NUM;
 		edata.val = pi->nqsets;
 		edata.val = pi->nqsets;
 		if (copy_to_user(useraddr, &edata, sizeof(edata)))
 		if (copy_to_user(useraddr, &edata, sizeof(edata)))

+ 2 - 0
drivers/net/eql.c

@@ -555,6 +555,8 @@ static int eql_g_master_cfg(struct net_device *dev, master_config_t __user *mcp)
 	equalizer_t *eql;
 	equalizer_t *eql;
 	master_config_t mc;
 	master_config_t mc;
 
 
+	memset(&mc, 0, sizeof(master_config_t));
+
 	if (eql_is_master(dev)) {
 	if (eql_is_master(dev)) {
 		eql = netdev_priv(dev);
 		eql = netdev_priv(dev);
 		mc.max_slaves = eql->max_slaves;
 		mc.max_slaves = eql->max_slaves;

+ 2 - 2
drivers/net/phy/mdio_bus.c

@@ -308,7 +308,7 @@ static int mdio_bus_suspend(struct device *dev)
 	 * may call phy routines that try to grab the same lock, and that may
 	 * may call phy routines that try to grab the same lock, and that may
 	 * lead to a deadlock.
 	 * lead to a deadlock.
 	 */
 	 */
-	if (phydev->attached_dev)
+	if (phydev->attached_dev && phydev->adjust_link)
 		phy_stop_machine(phydev);
 		phy_stop_machine(phydev);
 
 
 	if (!mdio_bus_phy_may_suspend(phydev))
 	if (!mdio_bus_phy_may_suspend(phydev))
@@ -331,7 +331,7 @@ static int mdio_bus_resume(struct device *dev)
 		return ret;
 		return ret;
 
 
 no_resume:
 no_resume:
-	if (phydev->attached_dev)
+	if (phydev->attached_dev && phydev->adjust_link)
 		phy_start_machine(phydev, NULL);
 		phy_start_machine(phydev, NULL);
 
 
 	return 0;
 	return 0;

+ 7 - 2
drivers/net/ppp_generic.c

@@ -1314,8 +1314,13 @@ static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb)
 	hdrlen = (ppp->flags & SC_MP_XSHORTSEQ)? MPHDRLEN_SSN: MPHDRLEN;
 	hdrlen = (ppp->flags & SC_MP_XSHORTSEQ)? MPHDRLEN_SSN: MPHDRLEN;
 	i = 0;
 	i = 0;
 	list_for_each_entry(pch, &ppp->channels, clist) {
 	list_for_each_entry(pch, &ppp->channels, clist) {
-		navail += pch->avail = (pch->chan != NULL);
-		pch->speed = pch->chan->speed;
+		if (pch->chan) {
+			pch->avail = 1;
+			navail++;
+			pch->speed = pch->chan->speed;
+		} else {
+			pch->avail = 0;
+		}
 		if (pch->avail) {
 		if (pch->avail) {
 			if (skb_queue_empty(&pch->file.xq) ||
 			if (skb_queue_empty(&pch->file.xq) ||
 				!pch->had_frag) {
 				!pch->had_frag) {

+ 2 - 3
drivers/net/r8169.c

@@ -2934,7 +2934,7 @@ static const struct rtl_cfg_info {
 		.hw_start	= rtl_hw_start_8168,
 		.hw_start	= rtl_hw_start_8168,
 		.region		= 2,
 		.region		= 2,
 		.align		= 8,
 		.align		= 8,
-		.intr_event	= SYSErr | LinkChg | RxOverflow |
+		.intr_event	= SYSErr | RxFIFOOver | LinkChg | RxOverflow |
 				  TxErr | TxOK | RxOK | RxErr,
 				  TxErr | TxOK | RxOK | RxErr,
 		.napi_event	= TxErr | TxOK | RxOK | RxOverflow,
 		.napi_event	= TxErr | TxOK | RxOK | RxOverflow,
 		.features	= RTL_FEATURE_GMII | RTL_FEATURE_MSI,
 		.features	= RTL_FEATURE_GMII | RTL_FEATURE_MSI,
@@ -4625,8 +4625,7 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
 		}
 		}
 
 
 		/* Work around for rx fifo overflow */
 		/* Work around for rx fifo overflow */
-		if (unlikely(status & RxFIFOOver) &&
-		(tp->mac_version == RTL_GIGA_MAC_VER_11)) {
+		if (unlikely(status & RxFIFOOver)) {
 			netif_stop_queue(dev);
 			netif_stop_queue(dev);
 			rtl8169_tx_timeout(dev);
 			rtl8169_tx_timeout(dev);
 			break;
 			break;

+ 2 - 0
drivers/net/usb/hso.c

@@ -1652,6 +1652,8 @@ static int hso_get_count(struct hso_serial *serial,
 	struct uart_icount cnow;
 	struct uart_icount cnow;
 	struct hso_tiocmget  *tiocmget = serial->tiocmget;
 	struct hso_tiocmget  *tiocmget = serial->tiocmget;
 
 
+	memset(&icount, 0, sizeof(struct serial_icounter_struct));
+
 	if (!tiocmget)
 	if (!tiocmget)
 		 return -ENOENT;
 		 return -ENOENT;
 	spin_lock_irq(&serial->serial_lock);
 	spin_lock_irq(&serial->serial_lock);

+ 4 - 4
include/linux/netpoll.h

@@ -63,20 +63,20 @@ static inline bool netpoll_rx(struct sk_buff *skb)
 	unsigned long flags;
 	unsigned long flags;
 	bool ret = false;
 	bool ret = false;
 
 
-	rcu_read_lock_bh();
+	local_irq_save(flags);
 	npinfo = rcu_dereference_bh(skb->dev->npinfo);
 	npinfo = rcu_dereference_bh(skb->dev->npinfo);
 
 
 	if (!npinfo || (list_empty(&npinfo->rx_np) && !npinfo->rx_flags))
 	if (!npinfo || (list_empty(&npinfo->rx_np) && !npinfo->rx_flags))
 		goto out;
 		goto out;
 
 
-	spin_lock_irqsave(&npinfo->rx_lock, flags);
+	spin_lock(&npinfo->rx_lock);
 	/* check rx_flags again with the lock held */
 	/* check rx_flags again with the lock held */
 	if (npinfo->rx_flags && __netpoll_rx(skb))
 	if (npinfo->rx_flags && __netpoll_rx(skb))
 		ret = true;
 		ret = true;
-	spin_unlock_irqrestore(&npinfo->rx_lock, flags);
+	spin_unlock(&npinfo->rx_lock);
 
 
 out:
 out:
-	rcu_read_unlock_bh();
+	local_irq_restore(flags);
 	return ret;
 	return ret;
 }
 }
 
 

+ 16 - 2
include/net/tcp.h

@@ -475,8 +475,22 @@ extern unsigned int tcp_current_mss(struct sock *sk);
 /* Bound MSS / TSO packet size with the half of the window */
 /* Bound MSS / TSO packet size with the half of the window */
 static inline int tcp_bound_to_half_wnd(struct tcp_sock *tp, int pktsize)
 static inline int tcp_bound_to_half_wnd(struct tcp_sock *tp, int pktsize)
 {
 {
-	if (tp->max_window && pktsize > (tp->max_window >> 1))
-		return max(tp->max_window >> 1, 68U - tp->tcp_header_len);
+	int cutoff;
+
+	/* When peer uses tiny windows, there is no use in packetizing
+	 * to sub-MSS pieces for the sake of SWS or making sure there
+	 * are enough packets in the pipe for fast recovery.
+	 *
+	 * On the other hand, for extremely large MSS devices, handling
+	 * smaller than MSS windows in this way does make sense.
+	 */
+	if (tp->max_window >= 512)
+		cutoff = (tp->max_window >> 1);
+	else
+		cutoff = tp->max_window;
+
+	if (cutoff && pktsize > cutoff)
+		return max_t(int, cutoff, 68U - tp->tcp_header_len);
 	else
 	else
 		return pktsize;
 		return pktsize;
 }
 }

+ 1 - 1
net/Kconfig

@@ -217,7 +217,7 @@ source "net/dns_resolver/Kconfig"
 
 
 config RPS
 config RPS
 	boolean
 	boolean
-	depends on SMP && SYSFS
+	depends on SMP && SYSFS && USE_GENERIC_SMP_HELPERS
 	default y
 	default y
 
 
 menu "Network testing"
 menu "Network testing"

+ 1 - 1
net/core/dev.c

@@ -4845,7 +4845,7 @@ static void rollback_registered_many(struct list_head *head)
 	dev = list_first_entry(head, struct net_device, unreg_list);
 	dev = list_first_entry(head, struct net_device, unreg_list);
 	call_netdevice_notifiers(NETDEV_UNREGISTER_BATCH, dev);
 	call_netdevice_notifiers(NETDEV_UNREGISTER_BATCH, dev);
 
 
-	synchronize_net();
+	rcu_barrier();
 
 
 	list_for_each_entry(dev, head, unreg_list)
 	list_for_each_entry(dev, head, unreg_list)
 		dev_put(dev);
 		dev_put(dev);

+ 1 - 1
net/ipv4/igmp.c

@@ -834,7 +834,7 @@ static void igmp_heard_query(struct in_device *in_dev, struct sk_buff *skb,
 	int			mark = 0;
 	int			mark = 0;
 
 
 
 
-	if (len == 8) {
+	if (len == 8 || IGMP_V2_SEEN(in_dev)) {
 		if (ih->code == 0) {
 		if (ih->code == 0) {
 			/* Alas, old v1 router presents here. */
 			/* Alas, old v1 router presents here. */
 
 

+ 3 - 0
net/ipv4/ip_sockglue.c

@@ -1129,6 +1129,9 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
 	case IP_HDRINCL:
 	case IP_HDRINCL:
 		val = inet->hdrincl;
 		val = inet->hdrincl;
 		break;
 		break;
+	case IP_NODEFRAG:
+		val = inet->nodefrag;
+		break;
 	case IP_MTU_DISCOVER:
 	case IP_MTU_DISCOVER:
 		val = inet->pmtudisc;
 		val = inet->pmtudisc;
 		break;
 		break;

+ 2 - 1
net/llc/af_llc.c

@@ -1024,7 +1024,8 @@ static int llc_ui_setsockopt(struct socket *sock, int level, int optname,
 {
 {
 	struct sock *sk = sock->sk;
 	struct sock *sk = sock->sk;
 	struct llc_sock *llc = llc_sk(sk);
 	struct llc_sock *llc = llc_sk(sk);
-	int rc = -EINVAL, opt;
+	unsigned int opt;
+	int rc = -EINVAL;
 
 
 	lock_sock(sk);
 	lock_sock(sk);
 	if (unlikely(level != SOL_LLC || optlen != sizeof(int)))
 	if (unlikely(level != SOL_LLC || optlen != sizeof(int)))

+ 1 - 1
net/llc/llc_station.c

@@ -689,7 +689,7 @@ static void llc_station_rcv(struct sk_buff *skb)
 
 
 int __init llc_station_init(void)
 int __init llc_station_init(void)
 {
 {
-	u16 rc = -ENOBUFS;
+	int rc = -ENOBUFS;
 	struct sk_buff *skb;
 	struct sk_buff *skb;
 	struct llc_station_state_ev *ev;
 	struct llc_station_state_ev *ev;
 
 

+ 0 - 4
net/sched/sch_atm.c

@@ -255,10 +255,6 @@ static int atm_tc_change(struct Qdisc *sch, u32 classid, u32 parent,
 			error = -EINVAL;
 			error = -EINVAL;
 			goto err_out;
 			goto err_out;
 		}
 		}
-		if (!list_empty(&flow->list)) {
-			error = -EEXIST;
-			goto err_out;
-		}
 	} else {
 	} else {
 		int i;
 		int i;
 		unsigned long cl;
 		unsigned long cl;

+ 0 - 1
net/sctp/output.c

@@ -92,7 +92,6 @@ struct sctp_packet *sctp_packet_config(struct sctp_packet *packet,
 	SCTP_DEBUG_PRINTK("%s: packet:%p vtag:0x%x\n", __func__,
 	SCTP_DEBUG_PRINTK("%s: packet:%p vtag:0x%x\n", __func__,
 			  packet, vtag);
 			  packet, vtag);
 
 
-	sctp_packet_reset(packet);
 	packet->vtag = vtag;
 	packet->vtag = vtag;
 
 
 	if (ecn_capable && sctp_packet_empty(packet)) {
 	if (ecn_capable && sctp_packet_empty(packet)) {

+ 1 - 1
net/xfrm/xfrm_output.c

@@ -101,7 +101,7 @@ resume:
 			err = -EHOSTUNREACH;
 			err = -EHOSTUNREACH;
 			goto error_nolock;
 			goto error_nolock;
 		}
 		}
-		skb_dst_set_noref(skb, dst);
+		skb_dst_set(skb, dst_clone(dst));
 		x = dst->xfrm;
 		x = dst->xfrm;
 	} while (x && !(x->outer_mode->flags & XFRM_MODE_FLAG_TUNNEL));
 	} while (x && !(x->outer_mode->flags & XFRM_MODE_FLAG_TUNNEL));