|
@@ -96,8 +96,10 @@ static DEFINE_PCI_DEVICE_TABLE(qlge_pci_tbl) = {
|
|
|
|
|
|
MODULE_DEVICE_TABLE(pci, qlge_pci_tbl);
|
|
|
|
|
|
-static int ql_wol(struct ql_adapter *qdev);
|
|
|
-static void qlge_set_multicast_list(struct net_device *ndev);
|
|
|
+static int ql_wol(struct ql_adapter *);
|
|
|
+static void qlge_set_multicast_list(struct net_device *);
|
|
|
+static int ql_adapter_down(struct ql_adapter *);
|
|
|
+static int ql_adapter_up(struct ql_adapter *);
|
|
|
|
|
|
/* This hardware semaphore causes exclusive access to
|
|
|
* resources shared between the NIC driver, MPI firmware,
|
|
@@ -1464,6 +1466,29 @@ static void ql_categorize_rx_err(struct ql_adapter *qdev, u8 rx_err,
|
|
|
}
|
|
|
}
|
|
|
|
|
|
+/**
|
|
|
+ * ql_update_mac_hdr_len - helper routine to update the mac header length
|
|
|
+ * based on vlan tags if present
|
|
|
+ */
|
|
|
+static void ql_update_mac_hdr_len(struct ql_adapter *qdev,
|
|
|
+ struct ib_mac_iocb_rsp *ib_mac_rsp,
|
|
|
+ void *page, size_t *len)
|
|
|
+{
|
|
|
+ u16 *tags;
|
|
|
+
|
|
|
+ if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
|
|
|
+ return;
|
|
|
+ if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) {
|
|
|
+ tags = (u16 *)page;
|
|
|
+ /* Look for stacked vlan tags in ethertype field */
|
|
|
+ if (tags[6] == ETH_P_8021Q &&
|
|
|
+ tags[8] == ETH_P_8021Q)
|
|
|
+ *len += 2 * VLAN_HLEN;
|
|
|
+ else
|
|
|
+ *len += VLAN_HLEN;
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
/* Process an inbound completion from an rx ring. */
|
|
|
static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev,
|
|
|
struct rx_ring *rx_ring,
|
|
@@ -1523,6 +1548,7 @@ static void ql_process_mac_rx_page(struct ql_adapter *qdev,
|
|
|
void *addr;
|
|
|
struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
|
|
|
struct napi_struct *napi = &rx_ring->napi;
|
|
|
+ size_t hlen = ETH_HLEN;
|
|
|
|
|
|
skb = netdev_alloc_skb(ndev, length);
|
|
|
if (!skb) {
|
|
@@ -1540,25 +1566,28 @@ static void ql_process_mac_rx_page(struct ql_adapter *qdev,
|
|
|
goto err_out;
|
|
|
}
|
|
|
|
|
|
+ /* Update the MAC header length*/
|
|
|
+ ql_update_mac_hdr_len(qdev, ib_mac_rsp, addr, &hlen);
|
|
|
+
|
|
|
/* The max framesize filter on this chip is set higher than
|
|
|
* MTU since FCoE uses 2k frames.
|
|
|
*/
|
|
|
- if (skb->len > ndev->mtu + ETH_HLEN) {
|
|
|
+ if (skb->len > ndev->mtu + hlen) {
|
|
|
netif_err(qdev, drv, qdev->ndev,
|
|
|
"Segment too small, dropping.\n");
|
|
|
rx_ring->rx_dropped++;
|
|
|
goto err_out;
|
|
|
}
|
|
|
- memcpy(skb_put(skb, ETH_HLEN), addr, ETH_HLEN);
|
|
|
+ memcpy(skb_put(skb, hlen), addr, hlen);
|
|
|
netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
|
|
|
"%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
|
|
|
length);
|
|
|
skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
|
|
|
- lbq_desc->p.pg_chunk.offset+ETH_HLEN,
|
|
|
- length-ETH_HLEN);
|
|
|
- skb->len += length-ETH_HLEN;
|
|
|
- skb->data_len += length-ETH_HLEN;
|
|
|
- skb->truesize += length-ETH_HLEN;
|
|
|
+ lbq_desc->p.pg_chunk.offset + hlen,
|
|
|
+ length - hlen);
|
|
|
+ skb->len += length - hlen;
|
|
|
+ skb->data_len += length - hlen;
|
|
|
+ skb->truesize += length - hlen;
|
|
|
|
|
|
rx_ring->rx_packets++;
|
|
|
rx_ring->rx_bytes += skb->len;
|
|
@@ -1576,7 +1605,7 @@ static void ql_process_mac_rx_page(struct ql_adapter *qdev,
|
|
|
(ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
|
|
|
/* Unfragmented ipv4 UDP frame. */
|
|
|
struct iphdr *iph =
|
|
|
- (struct iphdr *) ((u8 *)addr + ETH_HLEN);
|
|
|
+ (struct iphdr *)((u8 *)addr + hlen);
|
|
|
if (!(iph->frag_off &
|
|
|
htons(IP_MF|IP_OFFSET))) {
|
|
|
skb->ip_summed = CHECKSUM_UNNECESSARY;
|
|
@@ -1726,7 +1755,8 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
|
|
|
struct bq_desc *sbq_desc;
|
|
|
struct sk_buff *skb = NULL;
|
|
|
u32 length = le32_to_cpu(ib_mac_rsp->data_len);
|
|
|
- u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len);
|
|
|
+ u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len);
|
|
|
+ size_t hlen = ETH_HLEN;
|
|
|
|
|
|
/*
|
|
|
* Handle the header buffer if present.
|
|
@@ -1853,9 +1883,10 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
|
|
|
skb->data_len += length;
|
|
|
skb->truesize += length;
|
|
|
length -= length;
|
|
|
- __pskb_pull_tail(skb,
|
|
|
- (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
|
|
|
- VLAN_ETH_HLEN : ETH_HLEN);
|
|
|
+ ql_update_mac_hdr_len(qdev, ib_mac_rsp,
|
|
|
+ lbq_desc->p.pg_chunk.va,
|
|
|
+ &hlen);
|
|
|
+ __pskb_pull_tail(skb, hlen);
|
|
|
}
|
|
|
} else {
|
|
|
/*
|
|
@@ -1910,8 +1941,9 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
|
|
|
length -= size;
|
|
|
i++;
|
|
|
}
|
|
|
- __pskb_pull_tail(skb, (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
|
|
|
- VLAN_ETH_HLEN : ETH_HLEN);
|
|
|
+ ql_update_mac_hdr_len(qdev, ib_mac_rsp, lbq_desc->p.pg_chunk.va,
|
|
|
+ &hlen);
|
|
|
+ __pskb_pull_tail(skb, hlen);
|
|
|
}
|
|
|
return skb;
|
|
|
}
|
|
@@ -2003,7 +2035,7 @@ static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
|
|
|
rx_ring->rx_packets++;
|
|
|
rx_ring->rx_bytes += skb->len;
|
|
|
skb_record_rx_queue(skb, rx_ring->cq_id);
|
|
|
- if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) && (vlan_id != 0))
|
|
|
+ if (vlan_id != 0xffff)
|
|
|
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
|
|
|
if (skb->ip_summed == CHECKSUM_UNNECESSARY)
|
|
|
napi_gro_receive(&rx_ring->napi, skb);
|
|
@@ -2017,7 +2049,8 @@ static unsigned long ql_process_mac_rx_intr(struct ql_adapter *qdev,
|
|
|
struct ib_mac_iocb_rsp *ib_mac_rsp)
|
|
|
{
|
|
|
u32 length = le32_to_cpu(ib_mac_rsp->data_len);
|
|
|
- u16 vlan_id = (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
|
|
|
+ u16 vlan_id = ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) &&
|
|
|
+ (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)) ?
|
|
|
((le16_to_cpu(ib_mac_rsp->vlan_id) &
|
|
|
IB_MAC_IOCB_RSP_VLAN_MASK)) : 0xffff;
|
|
|
|
|
@@ -2310,9 +2343,39 @@ static void qlge_vlan_mode(struct net_device *ndev, netdev_features_t features)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
+/**
|
|
|
+ * qlge_update_hw_vlan_features - helper routine to reinitialize the adapter
|
|
|
+ * based on the features to enable/disable hardware vlan accel
|
|
|
+ */
|
|
|
+static int qlge_update_hw_vlan_features(struct net_device *ndev,
|
|
|
+ netdev_features_t features)
|
|
|
+{
|
|
|
+ struct ql_adapter *qdev = netdev_priv(ndev);
|
|
|
+ int status = 0;
|
|
|
+
|
|
|
+ status = ql_adapter_down(qdev);
|
|
|
+ if (status) {
|
|
|
+ netif_err(qdev, link, qdev->ndev,
|
|
|
+ "Failed to bring down the adapter\n");
|
|
|
+ return status;
|
|
|
+ }
|
|
|
+
|
|
|
+ /* update the features with resent change */
|
|
|
+ ndev->features = features;
|
|
|
+
|
|
|
+ status = ql_adapter_up(qdev);
|
|
|
+ if (status) {
|
|
|
+ netif_err(qdev, link, qdev->ndev,
|
|
|
+ "Failed to bring up the adapter\n");
|
|
|
+ return status;
|
|
|
+ }
|
|
|
+ return status;
|
|
|
+}
|
|
|
+
|
|
|
static netdev_features_t qlge_fix_features(struct net_device *ndev,
|
|
|
netdev_features_t features)
|
|
|
{
|
|
|
+ int err;
|
|
|
/*
|
|
|
* Since there is no support for separate rx/tx vlan accel
|
|
|
* enable/disable make sure tx flag is always in same state as rx.
|
|
@@ -2322,6 +2385,11 @@ static netdev_features_t qlge_fix_features(struct net_device *ndev,
|
|
|
else
|
|
|
features &= ~NETIF_F_HW_VLAN_CTAG_TX;
|
|
|
|
|
|
+ /* Update the behavior of vlan accel in the adapter */
|
|
|
+ err = qlge_update_hw_vlan_features(ndev, features);
|
|
|
+ if (err)
|
|
|
+ return err;
|
|
|
+
|
|
|
return features;
|
|
|
}
|
|
|
|
|
@@ -3704,8 +3772,12 @@ static int ql_adapter_initialize(struct ql_adapter *qdev)
|
|
|
ql_write32(qdev, SYS, mask | value);
|
|
|
|
|
|
/* Set the default queue, and VLAN behavior. */
|
|
|
- value = NIC_RCV_CFG_DFQ | NIC_RCV_CFG_RV;
|
|
|
- mask = NIC_RCV_CFG_DFQ_MASK | (NIC_RCV_CFG_RV << 16);
|
|
|
+ value = NIC_RCV_CFG_DFQ;
|
|
|
+ mask = NIC_RCV_CFG_DFQ_MASK;
|
|
|
+ if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX) {
|
|
|
+ value |= NIC_RCV_CFG_RV;
|
|
|
+ mask |= (NIC_RCV_CFG_RV << 16);
|
|
|
+ }
|
|
|
ql_write32(qdev, NIC_RCV_CFG, (mask | value));
|
|
|
|
|
|
/* Set the MPI interrupt to enabled. */
|
|
@@ -4692,11 +4764,15 @@ static int qlge_probe(struct pci_dev *pdev,
|
|
|
|
|
|
qdev = netdev_priv(ndev);
|
|
|
SET_NETDEV_DEV(ndev, &pdev->dev);
|
|
|
- ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
|
|
|
- NETIF_F_TSO | NETIF_F_TSO_ECN |
|
|
|
- NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_RXCSUM;
|
|
|
- ndev->features = ndev->hw_features |
|
|
|
- NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
|
|
|
+ ndev->hw_features = NETIF_F_SG |
|
|
|
+ NETIF_F_IP_CSUM |
|
|
|
+ NETIF_F_TSO |
|
|
|
+ NETIF_F_TSO_ECN |
|
|
|
+ NETIF_F_HW_VLAN_CTAG_TX |
|
|
|
+ NETIF_F_HW_VLAN_CTAG_RX |
|
|
|
+ NETIF_F_HW_VLAN_CTAG_FILTER |
|
|
|
+ NETIF_F_RXCSUM;
|
|
|
+ ndev->features = ndev->hw_features;
|
|
|
ndev->vlan_features = ndev->hw_features;
|
|
|
|
|
|
if (test_bit(QL_DMA64, &qdev->flags))
|