Browse Source

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6:
  ipv6: protocol for address routes
  icmp: icmp_sk() should not use smp_processor_id() in preemptible code
  pkt_sched: Fix qdisc list locking
  pkt_sched: Fix qdisc_watchdog() vs. dev_deactivate() race
  sctp: fix potential panics in the SCTP-AUTH API.
Linus Torvalds 16 years ago
parent
commit
6450f65168

+ 1 - 0
include/net/pkt_sched.h

@@ -78,6 +78,7 @@ extern struct Qdisc *fifo_create_dflt(struct Qdisc *sch, struct Qdisc_ops *ops,
 
 
 extern int register_qdisc(struct Qdisc_ops *qops);
 extern int register_qdisc(struct Qdisc_ops *qops);
 extern int unregister_qdisc(struct Qdisc_ops *qops);
 extern int unregister_qdisc(struct Qdisc_ops *qops);
+extern void qdisc_list_del(struct Qdisc *q);
 extern struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle);
 extern struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle);
 extern struct Qdisc *qdisc_lookup_class(struct net_device *dev, u32 handle);
 extern struct Qdisc *qdisc_lookup_class(struct net_device *dev, u32 handle);
 extern struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r,
 extern struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r,

+ 5 - 0
include/net/sch_generic.h

@@ -193,6 +193,11 @@ static inline struct Qdisc *qdisc_root(struct Qdisc *qdisc)
 	return qdisc->dev_queue->qdisc;
 	return qdisc->dev_queue->qdisc;
 }
 }
 
 
+static inline struct Qdisc *qdisc_root_sleeping(struct Qdisc *qdisc)
+{
+	return qdisc->dev_queue->qdisc_sleeping;
+}
+
 /* The qdisc root lock is a mechanism by which to top level
 /* The qdisc root lock is a mechanism by which to top level
  * of a qdisc tree can be locked from any qdisc node in the
  * of a qdisc tree can be locked from any qdisc node in the
  * forest.  This allows changing the configuration of some
  * forest.  This allows changing the configuration of some

+ 14 - 8
net/ipv4/icmp.c

@@ -204,18 +204,22 @@ static struct sock *icmp_sk(struct net *net)
 	return net->ipv4.icmp_sk[smp_processor_id()];
 	return net->ipv4.icmp_sk[smp_processor_id()];
 }
 }
 
 
-static inline int icmp_xmit_lock(struct sock *sk)
+static inline struct sock *icmp_xmit_lock(struct net *net)
 {
 {
+	struct sock *sk;
+
 	local_bh_disable();
 	local_bh_disable();
 
 
+	sk = icmp_sk(net);
+
 	if (unlikely(!spin_trylock(&sk->sk_lock.slock))) {
 	if (unlikely(!spin_trylock(&sk->sk_lock.slock))) {
 		/* This can happen if the output path signals a
 		/* This can happen if the output path signals a
 		 * dst_link_failure() for an outgoing ICMP packet.
 		 * dst_link_failure() for an outgoing ICMP packet.
 		 */
 		 */
 		local_bh_enable();
 		local_bh_enable();
-		return 1;
+		return NULL;
 	}
 	}
-	return 0;
+	return sk;
 }
 }
 
 
 static inline void icmp_xmit_unlock(struct sock *sk)
 static inline void icmp_xmit_unlock(struct sock *sk)
@@ -354,15 +358,17 @@ static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb)
 	struct ipcm_cookie ipc;
 	struct ipcm_cookie ipc;
 	struct rtable *rt = skb->rtable;
 	struct rtable *rt = skb->rtable;
 	struct net *net = dev_net(rt->u.dst.dev);
 	struct net *net = dev_net(rt->u.dst.dev);
-	struct sock *sk = icmp_sk(net);
-	struct inet_sock *inet = inet_sk(sk);
+	struct sock *sk;
+	struct inet_sock *inet;
 	__be32 daddr;
 	__be32 daddr;
 
 
 	if (ip_options_echo(&icmp_param->replyopts, skb))
 	if (ip_options_echo(&icmp_param->replyopts, skb))
 		return;
 		return;
 
 
-	if (icmp_xmit_lock(sk))
+	sk = icmp_xmit_lock(net);
+	if (sk == NULL)
 		return;
 		return;
+	inet = inet_sk(sk);
 
 
 	icmp_param->data.icmph.checksum = 0;
 	icmp_param->data.icmph.checksum = 0;
 
 
@@ -419,7 +425,6 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
 	if (!rt)
 	if (!rt)
 		goto out;
 		goto out;
 	net = dev_net(rt->u.dst.dev);
 	net = dev_net(rt->u.dst.dev);
-	sk = icmp_sk(net);
 
 
 	/*
 	/*
 	 *	Find the original header. It is expected to be valid, of course.
 	 *	Find the original header. It is expected to be valid, of course.
@@ -483,7 +488,8 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
 		}
 		}
 	}
 	}
 
 
-	if (icmp_xmit_lock(sk))
+	sk = icmp_xmit_lock(net);
+	if (sk == NULL)
 		return;
 		return;
 
 
 	/*
 	/*

+ 1 - 0
net/ipv6/addrconf.c

@@ -1688,6 +1688,7 @@ addrconf_prefix_route(struct in6_addr *pfx, int plen, struct net_device *dev,
 		.fc_dst_len = plen,
 		.fc_dst_len = plen,
 		.fc_flags = RTF_UP | flags,
 		.fc_flags = RTF_UP | flags,
 		.fc_nlinfo.nl_net = dev_net(dev),
 		.fc_nlinfo.nl_net = dev_net(dev),
+		.fc_protocol = RTPROT_KERNEL,
 	};
 	};
 
 
 	ipv6_addr_copy(&cfg.fc_dst, pfx);
 	ipv6_addr_copy(&cfg.fc_dst, pfx);

+ 12 - 11
net/ipv6/icmp.c

@@ -91,19 +91,22 @@ static struct inet6_protocol icmpv6_protocol = {
 	.flags		=	INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
 	.flags		=	INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
 };
 };
 
 
-static __inline__ int icmpv6_xmit_lock(struct sock *sk)
+static __inline__ struct sock *icmpv6_xmit_lock(struct net *net)
 {
 {
+	struct sock *sk;
+
 	local_bh_disable();
 	local_bh_disable();
 
 
+	sk = icmpv6_sk(net);
 	if (unlikely(!spin_trylock(&sk->sk_lock.slock))) {
 	if (unlikely(!spin_trylock(&sk->sk_lock.slock))) {
 		/* This can happen if the output path (f.e. SIT or
 		/* This can happen if the output path (f.e. SIT or
 		 * ip6ip6 tunnel) signals dst_link_failure() for an
 		 * ip6ip6 tunnel) signals dst_link_failure() for an
 		 * outgoing ICMP6 packet.
 		 * outgoing ICMP6 packet.
 		 */
 		 */
 		local_bh_enable();
 		local_bh_enable();
-		return 1;
+		return NULL;
 	}
 	}
-	return 0;
+	return sk;
 }
 }
 
 
 static __inline__ void icmpv6_xmit_unlock(struct sock *sk)
 static __inline__ void icmpv6_xmit_unlock(struct sock *sk)
@@ -392,11 +395,10 @@ void icmpv6_send(struct sk_buff *skb, int type, int code, __u32 info,
 	fl.fl_icmp_code = code;
 	fl.fl_icmp_code = code;
 	security_skb_classify_flow(skb, &fl);
 	security_skb_classify_flow(skb, &fl);
 
 
-	sk = icmpv6_sk(net);
-	np = inet6_sk(sk);
-
-	if (icmpv6_xmit_lock(sk))
+	sk = icmpv6_xmit_lock(net);
+	if (sk == NULL)
 		return;
 		return;
+	np = inet6_sk(sk);
 
 
 	if (!icmpv6_xrlim_allow(sk, type, &fl))
 	if (!icmpv6_xrlim_allow(sk, type, &fl))
 		goto out;
 		goto out;
@@ -539,11 +541,10 @@ static void icmpv6_echo_reply(struct sk_buff *skb)
 	fl.fl_icmp_type = ICMPV6_ECHO_REPLY;
 	fl.fl_icmp_type = ICMPV6_ECHO_REPLY;
 	security_skb_classify_flow(skb, &fl);
 	security_skb_classify_flow(skb, &fl);
 
 
-	sk = icmpv6_sk(net);
-	np = inet6_sk(sk);
-
-	if (icmpv6_xmit_lock(sk))
+	sk = icmpv6_xmit_lock(net);
+	if (sk == NULL)
 		return;
 		return;
+	np = inet6_sk(sk);
 
 
 	if (!fl.oif && ipv6_addr_is_multicast(&fl.fl6_dst))
 	if (!fl.oif && ipv6_addr_is_multicast(&fl.fl6_dst))
 		fl.oif = np->mcast_oif;
 		fl.oif = np->mcast_oif;

+ 43 - 5
net/sched/sch_api.c

@@ -199,19 +199,53 @@ struct Qdisc *qdisc_match_from_root(struct Qdisc *root, u32 handle)
 	return NULL;
 	return NULL;
 }
 }
 
 
+/*
+ * This lock is needed until some qdiscs stop calling qdisc_tree_decrease_qlen()
+ * without rtnl_lock(); currently hfsc_dequeue(), netem_dequeue(), tbf_dequeue()
+ */
+static DEFINE_SPINLOCK(qdisc_list_lock);
+
+static void qdisc_list_add(struct Qdisc *q)
+{
+	if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) {
+		spin_lock_bh(&qdisc_list_lock);
+		list_add_tail(&q->list, &qdisc_root_sleeping(q)->list);
+		spin_unlock_bh(&qdisc_list_lock);
+	}
+}
+
+void qdisc_list_del(struct Qdisc *q)
+{
+	if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) {
+		spin_lock_bh(&qdisc_list_lock);
+		list_del(&q->list);
+		spin_unlock_bh(&qdisc_list_lock);
+	}
+}
+EXPORT_SYMBOL(qdisc_list_del);
+
 struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle)
 struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle)
 {
 {
 	unsigned int i;
 	unsigned int i;
+	struct Qdisc *q;
+
+	spin_lock_bh(&qdisc_list_lock);
 
 
 	for (i = 0; i < dev->num_tx_queues; i++) {
 	for (i = 0; i < dev->num_tx_queues; i++) {
 		struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
 		struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
-		struct Qdisc *q, *txq_root = txq->qdisc_sleeping;
+		struct Qdisc *txq_root = txq->qdisc_sleeping;
 
 
 		q = qdisc_match_from_root(txq_root, handle);
 		q = qdisc_match_from_root(txq_root, handle);
 		if (q)
 		if (q)
-			return q;
+			goto unlock;
 	}
 	}
-	return qdisc_match_from_root(dev->rx_queue.qdisc_sleeping, handle);
+
+	q = qdisc_match_from_root(dev->rx_queue.qdisc_sleeping, handle);
+
+unlock:
+	spin_unlock_bh(&qdisc_list_lock);
+
+	return q;
 }
 }
 
 
 static struct Qdisc *qdisc_leaf(struct Qdisc *p, u32 classid)
 static struct Qdisc *qdisc_leaf(struct Qdisc *p, u32 classid)
@@ -444,6 +478,10 @@ void qdisc_watchdog_schedule(struct qdisc_watchdog *wd, psched_time_t expires)
 {
 {
 	ktime_t time;
 	ktime_t time;
 
 
+	if (test_bit(__QDISC_STATE_DEACTIVATED,
+		     &qdisc_root_sleeping(wd->qdisc)->state))
+		return;
+
 	wd->qdisc->flags |= TCQ_F_THROTTLED;
 	wd->qdisc->flags |= TCQ_F_THROTTLED;
 	time = ktime_set(0, 0);
 	time = ktime_set(0, 0);
 	time = ktime_add_ns(time, PSCHED_US2NS(expires));
 	time = ktime_add_ns(time, PSCHED_US2NS(expires));
@@ -806,8 +844,8 @@ qdisc_create(struct net_device *dev, struct netdev_queue *dev_queue,
 				goto err_out3;
 				goto err_out3;
 			}
 			}
 		}
 		}
-		if ((parent != TC_H_ROOT) && !(sch->flags & TCQ_F_INGRESS))
-			list_add_tail(&sch->list, &dev_queue->qdisc_sleeping->list);
+
+		qdisc_list_add(sch);
 
 
 		return sch;
 		return sch;
 	}
 	}

+ 4 - 0
net/sched/sch_cbq.c

@@ -521,6 +521,10 @@ static void cbq_ovl_delay(struct cbq_class *cl)
 	struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
 	struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
 	psched_tdiff_t delay = cl->undertime - q->now;
 	psched_tdiff_t delay = cl->undertime - q->now;
 
 
+	if (test_bit(__QDISC_STATE_DEACTIVATED,
+		     &qdisc_root_sleeping(cl->qdisc)->state))
+		return;
+
 	if (!cl->delayed) {
 	if (!cl->delayed) {
 		psched_time_t sched = q->now;
 		psched_time_t sched = q->now;
 		ktime_t expires;
 		ktime_t expires;

+ 2 - 3
net/sched/sch_generic.c

@@ -526,10 +526,9 @@ void qdisc_destroy(struct Qdisc *qdisc)
 	    !atomic_dec_and_test(&qdisc->refcnt))
 	    !atomic_dec_and_test(&qdisc->refcnt))
 		return;
 		return;
 
 
-	if (qdisc->parent)
-		list_del(&qdisc->list);
-
 #ifdef CONFIG_NET_SCHED
 #ifdef CONFIG_NET_SCHED
+	qdisc_list_del(qdisc);
+
 	qdisc_put_stab(qdisc->stab);
 	qdisc_put_stab(qdisc->stab);
 #endif
 #endif
 	gen_kill_estimator(&qdisc->bstats, &qdisc->rate_est);
 	gen_kill_estimator(&qdisc->bstats, &qdisc->rate_est);

+ 2 - 2
net/sctp/endpointola.c

@@ -103,6 +103,7 @@ static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep,
 
 
 		/* Initialize the CHUNKS parameter */
 		/* Initialize the CHUNKS parameter */
 		auth_chunks->param_hdr.type = SCTP_PARAM_CHUNKS;
 		auth_chunks->param_hdr.type = SCTP_PARAM_CHUNKS;
+		auth_chunks->param_hdr.length = htons(sizeof(sctp_paramhdr_t));
 
 
 		/* If the Add-IP functionality is enabled, we must
 		/* If the Add-IP functionality is enabled, we must
 		 * authenticate, ASCONF and ASCONF-ACK chunks
 		 * authenticate, ASCONF and ASCONF-ACK chunks
@@ -110,8 +111,7 @@ static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep,
 		if (sctp_addip_enable) {
 		if (sctp_addip_enable) {
 			auth_chunks->chunks[0] = SCTP_CID_ASCONF;
 			auth_chunks->chunks[0] = SCTP_CID_ASCONF;
 			auth_chunks->chunks[1] = SCTP_CID_ASCONF_ACK;
 			auth_chunks->chunks[1] = SCTP_CID_ASCONF_ACK;
-			auth_chunks->param_hdr.length =
-					htons(sizeof(sctp_paramhdr_t) + 2);
+			auth_chunks->param_hdr.length += htons(2);
 		}
 		}
 	}
 	}
 
 

+ 65 - 20
net/sctp/socket.c

@@ -3055,6 +3055,9 @@ static int sctp_setsockopt_auth_chunk(struct sock *sk,
 {
 {
 	struct sctp_authchunk val;
 	struct sctp_authchunk val;
 
 
+	if (!sctp_auth_enable)
+		return -EACCES;
+
 	if (optlen != sizeof(struct sctp_authchunk))
 	if (optlen != sizeof(struct sctp_authchunk))
 		return -EINVAL;
 		return -EINVAL;
 	if (copy_from_user(&val, optval, optlen))
 	if (copy_from_user(&val, optval, optlen))
@@ -3085,6 +3088,9 @@ static int sctp_setsockopt_hmac_ident(struct sock *sk,
 	struct sctp_hmacalgo *hmacs;
 	struct sctp_hmacalgo *hmacs;
 	int err;
 	int err;
 
 
+	if (!sctp_auth_enable)
+		return -EACCES;
+
 	if (optlen < sizeof(struct sctp_hmacalgo))
 	if (optlen < sizeof(struct sctp_hmacalgo))
 		return -EINVAL;
 		return -EINVAL;
 
 
@@ -3123,6 +3129,9 @@ static int sctp_setsockopt_auth_key(struct sock *sk,
 	struct sctp_association *asoc;
 	struct sctp_association *asoc;
 	int ret;
 	int ret;
 
 
+	if (!sctp_auth_enable)
+		return -EACCES;
+
 	if (optlen <= sizeof(struct sctp_authkey))
 	if (optlen <= sizeof(struct sctp_authkey))
 		return -EINVAL;
 		return -EINVAL;
 
 
@@ -3160,6 +3169,9 @@ static int sctp_setsockopt_active_key(struct sock *sk,
 	struct sctp_authkeyid val;
 	struct sctp_authkeyid val;
 	struct sctp_association *asoc;
 	struct sctp_association *asoc;
 
 
+	if (!sctp_auth_enable)
+		return -EACCES;
+
 	if (optlen != sizeof(struct sctp_authkeyid))
 	if (optlen != sizeof(struct sctp_authkeyid))
 		return -EINVAL;
 		return -EINVAL;
 	if (copy_from_user(&val, optval, optlen))
 	if (copy_from_user(&val, optval, optlen))
@@ -3185,6 +3197,9 @@ static int sctp_setsockopt_del_key(struct sock *sk,
 	struct sctp_authkeyid val;
 	struct sctp_authkeyid val;
 	struct sctp_association *asoc;
 	struct sctp_association *asoc;
 
 
+	if (!sctp_auth_enable)
+		return -EACCES;
+
 	if (optlen != sizeof(struct sctp_authkeyid))
 	if (optlen != sizeof(struct sctp_authkeyid))
 		return -EINVAL;
 		return -EINVAL;
 	if (copy_from_user(&val, optval, optlen))
 	if (copy_from_user(&val, optval, optlen))
@@ -5197,19 +5212,29 @@ static int sctp_getsockopt_maxburst(struct sock *sk, int len,
 static int sctp_getsockopt_hmac_ident(struct sock *sk, int len,
 static int sctp_getsockopt_hmac_ident(struct sock *sk, int len,
 				    char __user *optval, int __user *optlen)
 				    char __user *optval, int __user *optlen)
 {
 {
+	struct sctp_hmacalgo  __user *p = (void __user *)optval;
 	struct sctp_hmac_algo_param *hmacs;
 	struct sctp_hmac_algo_param *hmacs;
-	__u16 param_len;
+	__u16 data_len = 0;
+	u32 num_idents;
+
+	if (!sctp_auth_enable)
+		return -EACCES;
 
 
 	hmacs = sctp_sk(sk)->ep->auth_hmacs_list;
 	hmacs = sctp_sk(sk)->ep->auth_hmacs_list;
-	param_len = ntohs(hmacs->param_hdr.length);
+	data_len = ntohs(hmacs->param_hdr.length) - sizeof(sctp_paramhdr_t);
 
 
-	if (len < param_len)
+	if (len < sizeof(struct sctp_hmacalgo) + data_len)
 		return -EINVAL;
 		return -EINVAL;
+
+	len = sizeof(struct sctp_hmacalgo) + data_len;
+	num_idents = data_len / sizeof(u16);
+
 	if (put_user(len, optlen))
 	if (put_user(len, optlen))
 		return -EFAULT;
 		return -EFAULT;
-	if (copy_to_user(optval, hmacs->hmac_ids, len))
+	if (put_user(num_idents, &p->shmac_num_idents))
+		return -EFAULT;
+	if (copy_to_user(p->shmac_idents, hmacs->hmac_ids, data_len))
 		return -EFAULT;
 		return -EFAULT;
-
 	return 0;
 	return 0;
 }
 }
 
 
@@ -5219,6 +5244,9 @@ static int sctp_getsockopt_active_key(struct sock *sk, int len,
 	struct sctp_authkeyid val;
 	struct sctp_authkeyid val;
 	struct sctp_association *asoc;
 	struct sctp_association *asoc;
 
 
+	if (!sctp_auth_enable)
+		return -EACCES;
+
 	if (len < sizeof(struct sctp_authkeyid))
 	if (len < sizeof(struct sctp_authkeyid))
 		return -EINVAL;
 		return -EINVAL;
 	if (copy_from_user(&val, optval, sizeof(struct sctp_authkeyid)))
 	if (copy_from_user(&val, optval, sizeof(struct sctp_authkeyid)))
@@ -5233,6 +5261,12 @@ static int sctp_getsockopt_active_key(struct sock *sk, int len,
 	else
 	else
 		val.scact_keynumber = sctp_sk(sk)->ep->active_key_id;
 		val.scact_keynumber = sctp_sk(sk)->ep->active_key_id;
 
 
+	len = sizeof(struct sctp_authkeyid);
+	if (put_user(len, optlen))
+		return -EFAULT;
+	if (copy_to_user(optval, &val, len))
+		return -EFAULT;
+
 	return 0;
 	return 0;
 }
 }
 
 
@@ -5243,13 +5277,16 @@ static int sctp_getsockopt_peer_auth_chunks(struct sock *sk, int len,
 	struct sctp_authchunks val;
 	struct sctp_authchunks val;
 	struct sctp_association *asoc;
 	struct sctp_association *asoc;
 	struct sctp_chunks_param *ch;
 	struct sctp_chunks_param *ch;
-	u32    num_chunks;
+	u32    num_chunks = 0;
 	char __user *to;
 	char __user *to;
 
 
-	if (len <= sizeof(struct sctp_authchunks))
+	if (!sctp_auth_enable)
+		return -EACCES;
+
+	if (len < sizeof(struct sctp_authchunks))
 		return -EINVAL;
 		return -EINVAL;
 
 
-	if (copy_from_user(&val, p, sizeof(struct sctp_authchunks)))
+	if (copy_from_user(&val, optval, sizeof(struct sctp_authchunks)))
 		return -EFAULT;
 		return -EFAULT;
 
 
 	to = p->gauth_chunks;
 	to = p->gauth_chunks;
@@ -5258,20 +5295,21 @@ static int sctp_getsockopt_peer_auth_chunks(struct sock *sk, int len,
 		return -EINVAL;
 		return -EINVAL;
 
 
 	ch = asoc->peer.peer_chunks;
 	ch = asoc->peer.peer_chunks;
+	if (!ch)
+		goto num;
 
 
 	/* See if the user provided enough room for all the data */
 	/* See if the user provided enough room for all the data */
 	num_chunks = ntohs(ch->param_hdr.length) - sizeof(sctp_paramhdr_t);
 	num_chunks = ntohs(ch->param_hdr.length) - sizeof(sctp_paramhdr_t);
 	if (len < num_chunks)
 	if (len < num_chunks)
 		return -EINVAL;
 		return -EINVAL;
 
 
-	len = num_chunks;
-	if (put_user(len, optlen))
+	if (copy_to_user(to, ch->chunks, num_chunks))
 		return -EFAULT;
 		return -EFAULT;
+num:
+	len = sizeof(struct sctp_authchunks) + num_chunks;
+	if (put_user(len, optlen)) return -EFAULT;
 	if (put_user(num_chunks, &p->gauth_number_of_chunks))
 	if (put_user(num_chunks, &p->gauth_number_of_chunks))
 		return -EFAULT;
 		return -EFAULT;
-	if (copy_to_user(to, ch->chunks, len))
-		return -EFAULT;
-
 	return 0;
 	return 0;
 }
 }
 
 
@@ -5282,13 +5320,16 @@ static int sctp_getsockopt_local_auth_chunks(struct sock *sk, int len,
 	struct sctp_authchunks val;
 	struct sctp_authchunks val;
 	struct sctp_association *asoc;
 	struct sctp_association *asoc;
 	struct sctp_chunks_param *ch;
 	struct sctp_chunks_param *ch;
-	u32    num_chunks;
+	u32    num_chunks = 0;
 	char __user *to;
 	char __user *to;
 
 
-	if (len <= sizeof(struct sctp_authchunks))
+	if (!sctp_auth_enable)
+		return -EACCES;
+
+	if (len < sizeof(struct sctp_authchunks))
 		return -EINVAL;
 		return -EINVAL;
 
 
-	if (copy_from_user(&val, p, sizeof(struct sctp_authchunks)))
+	if (copy_from_user(&val, optval, sizeof(struct sctp_authchunks)))
 		return -EFAULT;
 		return -EFAULT;
 
 
 	to = p->gauth_chunks;
 	to = p->gauth_chunks;
@@ -5301,17 +5342,21 @@ static int sctp_getsockopt_local_auth_chunks(struct sock *sk, int len,
 	else
 	else
 		ch = sctp_sk(sk)->ep->auth_chunk_list;
 		ch = sctp_sk(sk)->ep->auth_chunk_list;
 
 
+	if (!ch)
+		goto num;
+
 	num_chunks = ntohs(ch->param_hdr.length) - sizeof(sctp_paramhdr_t);
 	num_chunks = ntohs(ch->param_hdr.length) - sizeof(sctp_paramhdr_t);
-	if (len < num_chunks)
+	if (len < sizeof(struct sctp_authchunks) + num_chunks)
 		return -EINVAL;
 		return -EINVAL;
 
 
-	len = num_chunks;
+	if (copy_to_user(to, ch->chunks, num_chunks))
+		return -EFAULT;
+num:
+	len = sizeof(struct sctp_authchunks) + num_chunks;
 	if (put_user(len, optlen))
 	if (put_user(len, optlen))
 		return -EFAULT;
 		return -EFAULT;
 	if (put_user(num_chunks, &p->gauth_number_of_chunks))
 	if (put_user(num_chunks, &p->gauth_number_of_chunks))
 		return -EFAULT;
 		return -EFAULT;
-	if (copy_to_user(to, ch->chunks, len))
-		return -EFAULT;
 
 
 	return 0;
 	return 0;
 }
 }