فهرست منبع

udp: Drop socket lock for encapsulated packets

The socket lock is there to protect the normal UDP receive path.
Encapsulation UDP sockets don't need that protection.  In fact
the locking is deadly for them as they may contain another UDP
packet within, possibly with the same addresses.

Also the nested bit was copied from TCP.  TCP needs it because
of accept(2) spawning sockets.  This simply doesn't apply to UDP
so I've removed it.

Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Signed-off-by: David S. Miller <davem@davemloft.net>
Herbert Xu 16 سال پیش
والد
کامیت
d97106ea52
2فایلهای تغییر یافته به همراه7 افزوده شده و 5 حذف شده
  1. 4 2
      net/ipv4/udp.c
  2. 3 3
      net/ipv6/udp.c

+ 4 - 2
net/ipv4/udp.c

@@ -989,7 +989,9 @@ int udp_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
 		    up->encap_rcv != NULL) {
 		    up->encap_rcv != NULL) {
 			int ret;
 			int ret;
 
 
+			bh_unlock_sock(sk);
 			ret = (*up->encap_rcv)(sk, skb);
 			ret = (*up->encap_rcv)(sk, skb);
+			bh_lock_sock(sk);
 			if (ret <= 0) {
 			if (ret <= 0) {
 				UDP_INC_STATS_BH(sock_net(sk),
 				UDP_INC_STATS_BH(sock_net(sk),
 						 UDP_MIB_INDATAGRAMS,
 						 UDP_MIB_INDATAGRAMS,
@@ -1092,7 +1094,7 @@ static int __udp4_lib_mcast_deliver(struct net *net, struct sk_buff *skb,
 			if (skb1) {
 			if (skb1) {
 				int ret = 0;
 				int ret = 0;
 
 
-				bh_lock_sock_nested(sk);
+				bh_lock_sock(sk);
 				if (!sock_owned_by_user(sk))
 				if (!sock_owned_by_user(sk))
 					ret = udp_queue_rcv_skb(sk, skb1);
 					ret = udp_queue_rcv_skb(sk, skb1);
 				else
 				else
@@ -1194,7 +1196,7 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct hlist_head udptable[],
 
 
 	if (sk != NULL) {
 	if (sk != NULL) {
 		int ret = 0;
 		int ret = 0;
-		bh_lock_sock_nested(sk);
+		bh_lock_sock(sk);
 		if (!sock_owned_by_user(sk))
 		if (!sock_owned_by_user(sk))
 			ret = udp_queue_rcv_skb(sk, skb);
 			ret = udp_queue_rcv_skb(sk, skb);
 		else
 		else

+ 3 - 3
net/ipv6/udp.c

@@ -379,7 +379,7 @@ static int __udp6_lib_mcast_deliver(struct net *net, struct sk_buff *skb,
 					uh->source, saddr, dif))) {
 					uh->source, saddr, dif))) {
 		struct sk_buff *buff = skb_clone(skb, GFP_ATOMIC);
 		struct sk_buff *buff = skb_clone(skb, GFP_ATOMIC);
 		if (buff) {
 		if (buff) {
-			bh_lock_sock_nested(sk2);
+			bh_lock_sock(sk2);
 			if (!sock_owned_by_user(sk2))
 			if (!sock_owned_by_user(sk2))
 				udpv6_queue_rcv_skb(sk2, buff);
 				udpv6_queue_rcv_skb(sk2, buff);
 			else
 			else
@@ -387,7 +387,7 @@ static int __udp6_lib_mcast_deliver(struct net *net, struct sk_buff *skb,
 			bh_unlock_sock(sk2);
 			bh_unlock_sock(sk2);
 		}
 		}
 	}
 	}
-	bh_lock_sock_nested(sk);
+	bh_lock_sock(sk);
 	if (!sock_owned_by_user(sk))
 	if (!sock_owned_by_user(sk))
 		udpv6_queue_rcv_skb(sk, skb);
 		udpv6_queue_rcv_skb(sk, skb);
 	else
 	else
@@ -508,7 +508,7 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct hlist_head udptable[],
 
 
 	/* deliver */
 	/* deliver */
 
 
-	bh_lock_sock_nested(sk);
+	bh_lock_sock(sk);
 	if (!sock_owned_by_user(sk))
 	if (!sock_owned_by_user(sk))
 		udpv6_queue_rcv_skb(sk, skb);
 		udpv6_queue_rcv_skb(sk, skb);
 	else
 	else