|
@@ -1063,10 +1063,11 @@ static unsigned int first_packet_length(struct sock *sk)
|
|
spin_unlock_bh(&rcvq->lock);
|
|
spin_unlock_bh(&rcvq->lock);
|
|
|
|
|
|
if (!skb_queue_empty(&list_kill)) {
|
|
if (!skb_queue_empty(&list_kill)) {
|
|
- lock_sock_bh(sk);
|
|
|
|
|
|
+ bool slow = lock_sock_fast(sk);
|
|
|
|
+
|
|
__skb_queue_purge(&list_kill);
|
|
__skb_queue_purge(&list_kill);
|
|
sk_mem_reclaim_partial(sk);
|
|
sk_mem_reclaim_partial(sk);
|
|
- unlock_sock_bh(sk);
|
|
|
|
|
|
+ unlock_sock_fast(sk, slow);
|
|
}
|
|
}
|
|
return res;
|
|
return res;
|
|
}
|
|
}
|
|
@@ -1123,6 +1124,7 @@ int udp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
|
|
int peeked;
|
|
int peeked;
|
|
int err;
|
|
int err;
|
|
int is_udplite = IS_UDPLITE(sk);
|
|
int is_udplite = IS_UDPLITE(sk);
|
|
|
|
+ bool slow;
|
|
|
|
|
|
/*
|
|
/*
|
|
* Check any passed addresses
|
|
* Check any passed addresses
|
|
@@ -1197,10 +1199,10 @@ out:
|
|
return err;
|
|
return err;
|
|
|
|
|
|
csum_copy_err:
|
|
csum_copy_err:
|
|
- lock_sock_bh(sk);
|
|
|
|
|
|
+ slow = lock_sock_fast(sk);
|
|
if (!skb_kill_datagram(sk, skb, flags))
|
|
if (!skb_kill_datagram(sk, skb, flags))
|
|
UDP_INC_STATS_USER(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
|
|
UDP_INC_STATS_USER(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
|
|
- unlock_sock_bh(sk);
|
|
|
|
|
|
+ unlock_sock_fast(sk, slow);
|
|
|
|
|
|
if (noblock)
|
|
if (noblock)
|
|
return -EAGAIN;
|
|
return -EAGAIN;
|
|
@@ -1625,9 +1627,9 @@ int udp_rcv(struct sk_buff *skb)
|
|
|
|
|
|
void udp_destroy_sock(struct sock *sk)
|
|
void udp_destroy_sock(struct sock *sk)
|
|
{
|
|
{
|
|
- lock_sock_bh(sk);
|
|
|
|
|
|
+ bool slow = lock_sock_fast(sk);
|
|
udp_flush_pending_frames(sk);
|
|
udp_flush_pending_frames(sk);
|
|
- unlock_sock_bh(sk);
|
|
|
|
|
|
+ unlock_sock_fast(sk, slow);
|
|
}
|
|
}
|
|
|
|
|
|
/*
|
|
/*
|