|
@@ -39,6 +39,8 @@ static struct sk_buff_head skb_pool;
|
|
static atomic_t trapped;
|
|
static atomic_t trapped;
|
|
|
|
|
|
#define USEC_PER_POLL 50
|
|
#define USEC_PER_POLL 50
|
|
|
|
+#define NETPOLL_RX_ENABLED 1
|
|
|
|
+#define NETPOLL_RX_DROP 2
|
|
|
|
|
|
#define MAX_SKB_SIZE \
|
|
#define MAX_SKB_SIZE \
|
|
(MAX_UDP_CHUNK + sizeof(struct udphdr) + \
|
|
(MAX_UDP_CHUNK + sizeof(struct udphdr) + \
|
|
@@ -126,11 +128,13 @@ static int poll_one_napi(struct netpoll_info *npinfo,
|
|
if (!test_bit(NAPI_STATE_SCHED, &napi->state))
|
|
if (!test_bit(NAPI_STATE_SCHED, &napi->state))
|
|
return budget;
|
|
return budget;
|
|
|
|
|
|
|
|
+ npinfo->rx_flags |= NETPOLL_RX_DROP;
|
|
atomic_inc(&trapped);
|
|
atomic_inc(&trapped);
|
|
|
|
|
|
work = napi->poll(napi, budget);
|
|
work = napi->poll(napi, budget);
|
|
|
|
|
|
atomic_dec(&trapped);
|
|
atomic_dec(&trapped);
|
|
|
|
+ npinfo->rx_flags &= ~NETPOLL_RX_DROP;
|
|
|
|
|
|
return budget - work;
|
|
return budget - work;
|
|
}
|
|
}
|
|
@@ -472,7 +476,7 @@ int __netpoll_rx(struct sk_buff *skb)
|
|
if (skb->dev->type != ARPHRD_ETHER)
|
|
if (skb->dev->type != ARPHRD_ETHER)
|
|
goto out;
|
|
goto out;
|
|
|
|
|
|
- /* if receive ARP during middle of NAPI poll, then queue */
|
|
|
|
|
|
+ /* check if netpoll clients need ARP */
|
|
if (skb->protocol == htons(ETH_P_ARP) &&
|
|
if (skb->protocol == htons(ETH_P_ARP) &&
|
|
atomic_read(&trapped)) {
|
|
atomic_read(&trapped)) {
|
|
skb_queue_tail(&npi->arp_tx, skb);
|
|
skb_queue_tail(&npi->arp_tx, skb);
|
|
@@ -534,9 +538,6 @@ int __netpoll_rx(struct sk_buff *skb)
|
|
return 1;
|
|
return 1;
|
|
|
|
|
|
out:
|
|
out:
|
|
- /* If packet received while already in poll then just
|
|
|
|
- * silently drop.
|
|
|
|
- */
|
|
|
|
if (atomic_read(&trapped)) {
|
|
if (atomic_read(&trapped)) {
|
|
kfree_skb(skb);
|
|
kfree_skb(skb);
|
|
return 1;
|
|
return 1;
|
|
@@ -675,6 +676,7 @@ int netpoll_setup(struct netpoll *np)
|
|
goto release;
|
|
goto release;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+ npinfo->rx_flags = 0;
|
|
npinfo->rx_np = NULL;
|
|
npinfo->rx_np = NULL;
|
|
|
|
|
|
spin_lock_init(&npinfo->rx_lock);
|
|
spin_lock_init(&npinfo->rx_lock);
|
|
@@ -756,6 +758,7 @@ int netpoll_setup(struct netpoll *np)
|
|
|
|
|
|
if (np->rx_hook) {
|
|
if (np->rx_hook) {
|
|
spin_lock_irqsave(&npinfo->rx_lock, flags);
|
|
spin_lock_irqsave(&npinfo->rx_lock, flags);
|
|
|
|
+ npinfo->rx_flags |= NETPOLL_RX_ENABLED;
|
|
npinfo->rx_np = np;
|
|
npinfo->rx_np = np;
|
|
spin_unlock_irqrestore(&npinfo->rx_lock, flags);
|
|
spin_unlock_irqrestore(&npinfo->rx_lock, flags);
|
|
}
|
|
}
|
|
@@ -797,6 +800,7 @@ void netpoll_cleanup(struct netpoll *np)
|
|
if (npinfo->rx_np == np) {
|
|
if (npinfo->rx_np == np) {
|
|
spin_lock_irqsave(&npinfo->rx_lock, flags);
|
|
spin_lock_irqsave(&npinfo->rx_lock, flags);
|
|
npinfo->rx_np = NULL;
|
|
npinfo->rx_np = NULL;
|
|
|
|
+ npinfo->rx_flags &= ~NETPOLL_RX_ENABLED;
|
|
spin_unlock_irqrestore(&npinfo->rx_lock, flags);
|
|
spin_unlock_irqrestore(&npinfo->rx_lock, flags);
|
|
}
|
|
}
|
|
|
|
|