|
@@ -524,31 +524,6 @@ static inline unsigned int run_filter(struct sk_buff *skb, struct sock *sk,
|
|
return res;
|
|
return res;
|
|
}
|
|
}
|
|
|
|
|
|
-/*
|
|
|
|
- * If we've lost frames since the last time we queued one to the
|
|
|
|
- * sk_receive_queue, we need to record it here.
|
|
|
|
- * This must be called under the protection of the socket lock
|
|
|
|
- * to prevent racing with other softirqs and user space
|
|
|
|
- */
|
|
|
|
-static inline void record_packet_gap(struct sk_buff *skb,
|
|
|
|
- struct packet_sock *po)
|
|
|
|
-{
|
|
|
|
- /*
|
|
|
|
- * We overload the mark field here, since we're about
|
|
|
|
- * to enqueue to a receive queue and no body else will
|
|
|
|
- * use this field at this point
|
|
|
|
- */
|
|
|
|
- skb->mark = po->stats.tp_gap;
|
|
|
|
- po->stats.tp_gap = 0;
|
|
|
|
- return;
|
|
|
|
-
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-static inline __u32 check_packet_gap(struct sk_buff *skb)
|
|
|
|
-{
|
|
|
|
- return skb->mark;
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
/*
|
|
/*
|
|
This function makes lazy skb cloning in hope that most of packets
|
|
This function makes lazy skb cloning in hope that most of packets
|
|
are discarded by BPF.
|
|
are discarded by BPF.
|
|
@@ -652,7 +627,6 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
|
|
|
|
|
|
spin_lock(&sk->sk_receive_queue.lock);
|
|
spin_lock(&sk->sk_receive_queue.lock);
|
|
po->stats.tp_packets++;
|
|
po->stats.tp_packets++;
|
|
- record_packet_gap(skb, po);
|
|
|
|
__skb_queue_tail(&sk->sk_receive_queue, skb);
|
|
__skb_queue_tail(&sk->sk_receive_queue, skb);
|
|
spin_unlock(&sk->sk_receive_queue.lock);
|
|
spin_unlock(&sk->sk_receive_queue.lock);
|
|
sk->sk_data_ready(sk, skb->len);
|
|
sk->sk_data_ready(sk, skb->len);
|
|
@@ -661,7 +635,6 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
|
|
drop_n_acct:
|
|
drop_n_acct:
|
|
spin_lock(&sk->sk_receive_queue.lock);
|
|
spin_lock(&sk->sk_receive_queue.lock);
|
|
po->stats.tp_drops++;
|
|
po->stats.tp_drops++;
|
|
- po->stats.tp_gap++;
|
|
|
|
spin_unlock(&sk->sk_receive_queue.lock);
|
|
spin_unlock(&sk->sk_receive_queue.lock);
|
|
|
|
|
|
drop_n_restore:
|
|
drop_n_restore:
|
|
@@ -839,7 +812,6 @@ drop:
|
|
|
|
|
|
ring_is_full:
|
|
ring_is_full:
|
|
po->stats.tp_drops++;
|
|
po->stats.tp_drops++;
|
|
- po->stats.tp_gap++;
|
|
|
|
spin_unlock(&sk->sk_receive_queue.lock);
|
|
spin_unlock(&sk->sk_receive_queue.lock);
|
|
|
|
|
|
sk->sk_data_ready(sk, 0);
|
|
sk->sk_data_ready(sk, 0);
|
|
@@ -1449,7 +1421,6 @@ static int packet_recvmsg(struct kiocb *iocb, struct socket *sock,
|
|
struct sk_buff *skb;
|
|
struct sk_buff *skb;
|
|
int copied, err;
|
|
int copied, err;
|
|
struct sockaddr_ll *sll;
|
|
struct sockaddr_ll *sll;
|
|
- __u32 gap;
|
|
|
|
|
|
|
|
err = -EINVAL;
|
|
err = -EINVAL;
|
|
if (flags & ~(MSG_PEEK|MSG_DONTWAIT|MSG_TRUNC|MSG_CMSG_COMPAT))
|
|
if (flags & ~(MSG_PEEK|MSG_DONTWAIT|MSG_TRUNC|MSG_CMSG_COMPAT))
|
|
@@ -1528,10 +1499,6 @@ static int packet_recvmsg(struct kiocb *iocb, struct socket *sock,
|
|
put_cmsg(msg, SOL_PACKET, PACKET_AUXDATA, sizeof(aux), &aux);
|
|
put_cmsg(msg, SOL_PACKET, PACKET_AUXDATA, sizeof(aux), &aux);
|
|
}
|
|
}
|
|
|
|
|
|
- gap = check_packet_gap(skb);
|
|
|
|
- if (gap)
|
|
|
|
- put_cmsg(msg, SOL_PACKET, PACKET_GAPDATA, sizeof(__u32), &gap);
|
|
|
|
-
|
|
|
|
/*
|
|
/*
|
|
* Free or return the buffer as appropriate. Again this
|
|
* Free or return the buffer as appropriate. Again this
|
|
* hides all the races and re-entrancy issues from us.
|
|
* hides all the races and re-entrancy issues from us.
|