|
@@ -888,6 +888,13 @@ static void netbk_tx_err(struct xenvif *vif,
|
|
|
xenvif_put(vif);
|
|
|
}
|
|
|
|
|
|
+static void netbk_fatal_tx_err(struct xenvif *vif)
|
|
|
+{
|
|
|
+ netdev_err(vif->dev, "fatal error; disabling device\n");
|
|
|
+ xenvif_carrier_off(vif);
|
|
|
+ xenvif_put(vif);
|
|
|
+}
|
|
|
+
|
|
|
static int netbk_count_requests(struct xenvif *vif,
|
|
|
struct xen_netif_tx_request *first,
|
|
|
struct xen_netif_tx_request *txp,
|
|
@@ -901,19 +908,22 @@ static int netbk_count_requests(struct xenvif *vif,
|
|
|
|
|
|
do {
|
|
|
if (frags >= work_to_do) {
|
|
|
- netdev_dbg(vif->dev, "Need more frags\n");
|
|
|
+ netdev_err(vif->dev, "Need more frags\n");
|
|
|
+ netbk_fatal_tx_err(vif);
|
|
|
return -frags;
|
|
|
}
|
|
|
|
|
|
if (unlikely(frags >= MAX_SKB_FRAGS)) {
|
|
|
- netdev_dbg(vif->dev, "Too many frags\n");
|
|
|
+ netdev_err(vif->dev, "Too many frags\n");
|
|
|
+ netbk_fatal_tx_err(vif);
|
|
|
return -frags;
|
|
|
}
|
|
|
|
|
|
memcpy(txp, RING_GET_REQUEST(&vif->tx, cons + frags),
|
|
|
sizeof(*txp));
|
|
|
if (txp->size > first->size) {
|
|
|
- netdev_dbg(vif->dev, "Frags galore\n");
|
|
|
+ netdev_err(vif->dev, "Frag is bigger than frame.\n");
|
|
|
+ netbk_fatal_tx_err(vif);
|
|
|
return -frags;
|
|
|
}
|
|
|
|
|
@@ -921,8 +931,9 @@ static int netbk_count_requests(struct xenvif *vif,
|
|
|
frags++;
|
|
|
|
|
|
if (unlikely((txp->offset + txp->size) > PAGE_SIZE)) {
|
|
|
- netdev_dbg(vif->dev, "txp->offset: %x, size: %u\n",
|
|
|
+ netdev_err(vif->dev, "txp->offset: %x, size: %u\n",
|
|
|
txp->offset, txp->size);
|
|
|
+ netbk_fatal_tx_err(vif);
|
|
|
return -frags;
|
|
|
}
|
|
|
} while ((txp++)->flags & XEN_NETTXF_more_data);
|
|
@@ -1095,7 +1106,8 @@ static int xen_netbk_get_extras(struct xenvif *vif,
|
|
|
|
|
|
do {
|
|
|
if (unlikely(work_to_do-- <= 0)) {
|
|
|
- netdev_dbg(vif->dev, "Missing extra info\n");
|
|
|
+ netdev_err(vif->dev, "Missing extra info\n");
|
|
|
+ netbk_fatal_tx_err(vif);
|
|
|
return -EBADR;
|
|
|
}
|
|
|
|
|
@@ -1104,8 +1116,9 @@ static int xen_netbk_get_extras(struct xenvif *vif,
|
|
|
if (unlikely(!extra.type ||
|
|
|
extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
|
|
|
vif->tx.req_cons = ++cons;
|
|
|
- netdev_dbg(vif->dev,
|
|
|
+ netdev_err(vif->dev,
|
|
|
"Invalid extra type: %d\n", extra.type);
|
|
|
+ netbk_fatal_tx_err(vif);
|
|
|
return -EINVAL;
|
|
|
}
|
|
|
|
|
@@ -1121,13 +1134,15 @@ static int netbk_set_skb_gso(struct xenvif *vif,
|
|
|
struct xen_netif_extra_info *gso)
|
|
|
{
|
|
|
if (!gso->u.gso.size) {
|
|
|
- netdev_dbg(vif->dev, "GSO size must not be zero.\n");
|
|
|
+ netdev_err(vif->dev, "GSO size must not be zero.\n");
|
|
|
+ netbk_fatal_tx_err(vif);
|
|
|
return -EINVAL;
|
|
|
}
|
|
|
|
|
|
/* Currently only TCPv4 S.O. is supported. */
|
|
|
if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4) {
|
|
|
- netdev_dbg(vif->dev, "Bad GSO type %d.\n", gso->u.gso.type);
|
|
|
+ netdev_err(vif->dev, "Bad GSO type %d.\n", gso->u.gso.type);
|
|
|
+ netbk_fatal_tx_err(vif);
|
|
|
return -EINVAL;
|
|
|
}
|
|
|
|
|
@@ -1264,9 +1279,25 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
|
|
|
|
|
|
/* Get a netif from the list with work to do. */
|
|
|
vif = poll_net_schedule_list(netbk);
|
|
|
+ /* This can sometimes happen because the test of
|
|
|
+ * list_empty(net_schedule_list) at the top of the
|
|
|
+ * loop is unlocked. Just go back and have another
|
|
|
+ * look.
|
|
|
+ */
|
|
|
if (!vif)
|
|
|
continue;
|
|
|
|
|
|
+ if (vif->tx.sring->req_prod - vif->tx.req_cons >
|
|
|
+ XEN_NETIF_TX_RING_SIZE) {
|
|
|
+ netdev_err(vif->dev,
|
|
|
+ "Impossible number of requests. "
|
|
|
+ "req_prod %d, req_cons %d, size %ld\n",
|
|
|
+ vif->tx.sring->req_prod, vif->tx.req_cons,
|
|
|
+ XEN_NETIF_TX_RING_SIZE);
|
|
|
+ netbk_fatal_tx_err(vif);
|
|
|
+ continue;
|
|
|
+ }
|
|
|
+
|
|
|
RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, work_to_do);
|
|
|
if (!work_to_do) {
|
|
|
xenvif_put(vif);
|
|
@@ -1294,17 +1325,14 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
|
|
|
work_to_do = xen_netbk_get_extras(vif, extras,
|
|
|
work_to_do);
|
|
|
idx = vif->tx.req_cons;
|
|
|
- if (unlikely(work_to_do < 0)) {
|
|
|
- netbk_tx_err(vif, &txreq, idx);
|
|
|
+ if (unlikely(work_to_do < 0))
|
|
|
continue;
|
|
|
- }
|
|
|
}
|
|
|
|
|
|
ret = netbk_count_requests(vif, &txreq, txfrags, work_to_do);
|
|
|
- if (unlikely(ret < 0)) {
|
|
|
- netbk_tx_err(vif, &txreq, idx - ret);
|
|
|
+ if (unlikely(ret < 0))
|
|
|
continue;
|
|
|
- }
|
|
|
+
|
|
|
idx += ret;
|
|
|
|
|
|
if (unlikely(txreq.size < ETH_HLEN)) {
|
|
@@ -1316,11 +1344,11 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
|
|
|
|
|
|
/* No crossing a page as the payload mustn't fragment. */
|
|
|
if (unlikely((txreq.offset + txreq.size) > PAGE_SIZE)) {
|
|
|
- netdev_dbg(vif->dev,
|
|
|
+ netdev_err(vif->dev,
|
|
|
"txreq.offset: %x, size: %u, end: %lu\n",
|
|
|
txreq.offset, txreq.size,
|
|
|
(txreq.offset&~PAGE_MASK) + txreq.size);
|
|
|
- netbk_tx_err(vif, &txreq, idx);
|
|
|
+ netbk_fatal_tx_err(vif);
|
|
|
continue;
|
|
|
}
|
|
|
|
|
@@ -1348,8 +1376,8 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
|
|
|
gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
|
|
|
|
|
|
if (netbk_set_skb_gso(vif, skb, gso)) {
|
|
|
+ /* Failure in netbk_set_skb_gso is fatal. */
|
|
|
kfree_skb(skb);
|
|
|
- netbk_tx_err(vif, &txreq, idx);
|
|
|
continue;
|
|
|
}
|
|
|
}
|