|
@@ -128,10 +128,10 @@ static int enic_wq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc,
|
|
|
completed_index, enic_wq_free_buf,
|
|
|
opaque);
|
|
|
|
|
|
- if (netif_queue_stopped(enic->netdev) &&
|
|
|
+ if (netif_tx_queue_stopped(netdev_get_tx_queue(enic->netdev, q_number)) &&
|
|
|
vnic_wq_desc_avail(&enic->wq[q_number]) >=
|
|
|
(MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS))
|
|
|
- netif_wake_queue(enic->netdev);
|
|
|
+ netif_wake_subqueue(enic->netdev, q_number);
|
|
|
|
|
|
spin_unlock(&enic->wq_lock[q_number]);
|
|
|
|
|
@@ -292,10 +292,15 @@ static irqreturn_t enic_isr_msix_rq(int irq, void *data)
|
|
|
static irqreturn_t enic_isr_msix_wq(int irq, void *data)
|
|
|
{
|
|
|
struct enic *enic = data;
|
|
|
- unsigned int cq = enic_cq_wq(enic, 0);
|
|
|
- unsigned int intr = enic_msix_wq_intr(enic, 0);
|
|
|
+ unsigned int cq;
|
|
|
+ unsigned int intr;
|
|
|
unsigned int wq_work_to_do = -1; /* no limit */
|
|
|
unsigned int wq_work_done;
|
|
|
+ unsigned int wq_irq;
|
|
|
+
|
|
|
+ wq_irq = (u32)irq - enic->msix_entry[enic_msix_wq_intr(enic, 0)].vector;
|
|
|
+ cq = enic_cq_wq(enic, wq_irq);
|
|
|
+ intr = enic_msix_wq_intr(enic, wq_irq);
|
|
|
|
|
|
wq_work_done = vnic_cq_service(&enic->cq[cq],
|
|
|
wq_work_to_do, enic_wq_service, NULL);
|
|
@@ -511,14 +516,18 @@ static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb,
|
|
|
struct net_device *netdev)
|
|
|
{
|
|
|
struct enic *enic = netdev_priv(netdev);
|
|
|
- struct vnic_wq *wq = &enic->wq[0];
|
|
|
+ struct vnic_wq *wq;
|
|
|
unsigned long flags;
|
|
|
+ unsigned int txq_map;
|
|
|
|
|
|
if (skb->len <= 0) {
|
|
|
dev_kfree_skb(skb);
|
|
|
return NETDEV_TX_OK;
|
|
|
}
|
|
|
|
|
|
+ txq_map = skb_get_queue_mapping(skb) % enic->wq_count;
|
|
|
+ wq = &enic->wq[txq_map];
|
|
|
+
|
|
|
/* Non-TSO sends must fit within ENIC_NON_TSO_MAX_DESC descs,
|
|
|
* which is very likely. In the off chance it's going to take
|
|
|
* more than * ENIC_NON_TSO_MAX_DESC, linearize the skb.
|
|
@@ -531,23 +540,23 @@ static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb,
|
|
|
return NETDEV_TX_OK;
|
|
|
}
|
|
|
|
|
|
- spin_lock_irqsave(&enic->wq_lock[0], flags);
|
|
|
+ spin_lock_irqsave(&enic->wq_lock[txq_map], flags);
|
|
|
|
|
|
if (vnic_wq_desc_avail(wq) <
|
|
|
skb_shinfo(skb)->nr_frags + ENIC_DESC_MAX_SPLITS) {
|
|
|
- netif_stop_queue(netdev);
|
|
|
+ netif_tx_stop_queue(netdev_get_tx_queue(netdev, txq_map));
|
|
|
/* This is a hard error, log it */
|
|
|
netdev_err(netdev, "BUG! Tx ring full when queue awake!\n");
|
|
|
- spin_unlock_irqrestore(&enic->wq_lock[0], flags);
|
|
|
+ spin_unlock_irqrestore(&enic->wq_lock[txq_map], flags);
|
|
|
return NETDEV_TX_BUSY;
|
|
|
}
|
|
|
|
|
|
enic_queue_wq_skb(enic, wq, skb);
|
|
|
|
|
|
if (vnic_wq_desc_avail(wq) < MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS)
|
|
|
- netif_stop_queue(netdev);
|
|
|
+ netif_tx_stop_queue(netdev_get_tx_queue(netdev, txq_map));
|
|
|
|
|
|
- spin_unlock_irqrestore(&enic->wq_lock[0], flags);
|
|
|
+ spin_unlock_irqrestore(&enic->wq_lock[txq_map], flags);
|
|
|
|
|
|
return NETDEV_TX_OK;
|
|
|
}
|
|
@@ -1025,6 +1034,14 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq,
|
|
|
|
|
|
skb_put(skb, bytes_written);
|
|
|
skb->protocol = eth_type_trans(skb, netdev);
|
|
|
+ skb_record_rx_queue(skb, q_number);
|
|
|
+ if (netdev->features & NETIF_F_RXHASH) {
|
|
|
+ skb->rxhash = rss_hash;
|
|
|
+ if (rss_type & (NIC_CFG_RSS_HASH_TYPE_TCP_IPV6_EX |
|
|
|
+ NIC_CFG_RSS_HASH_TYPE_TCP_IPV6 |
|
|
|
+ NIC_CFG_RSS_HASH_TYPE_TCP_IPV4))
|
|
|
+ skb->l4_rxhash = true;
|
|
|
+ }
|
|
|
|
|
|
if ((netdev->features & NETIF_F_RXCSUM) && !csum_not_calc) {
|
|
|
skb->csum = htons(checksum);
|
|
@@ -1369,7 +1386,7 @@ static int enic_open(struct net_device *netdev)
|
|
|
|
|
|
enic_set_rx_mode(netdev);
|
|
|
|
|
|
- netif_wake_queue(netdev);
|
|
|
+ netif_tx_wake_all_queues(netdev);
|
|
|
|
|
|
for (i = 0; i < enic->rq_count; i++)
|
|
|
napi_enable(&enic->napi[i]);
|
|
@@ -2032,7 +2049,8 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|
|
* instance data is initialized to zero.
|
|
|
*/
|
|
|
|
|
|
- netdev = alloc_etherdev(sizeof(struct enic));
|
|
|
+ netdev = alloc_etherdev_mqs(sizeof(struct enic),
|
|
|
+ ENIC_RQ_MAX, ENIC_WQ_MAX);
|
|
|
if (!netdev)
|
|
|
return -ENOMEM;
|
|
|
|
|
@@ -2062,11 +2080,11 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|
|
pci_set_master(pdev);
|
|
|
|
|
|
/* Query PCI controller on system for DMA addressing
|
|
|
- * limitation for the device. Try 40-bit first, and
|
|
|
+ * limitation for the device. Try 64-bit first, and
|
|
|
* fail to 32-bit.
|
|
|
*/
|
|
|
|
|
|
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(40));
|
|
|
+ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
|
|
|
if (err) {
|
|
|
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
|
|
|
if (err) {
|
|
@@ -2080,10 +2098,10 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|
|
goto err_out_release_regions;
|
|
|
}
|
|
|
} else {
|
|
|
- err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(40));
|
|
|
+ err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
|
|
|
if (err) {
|
|
|
dev_err(dev, "Unable to obtain %u-bit DMA "
|
|
|
- "for consistent allocations, aborting\n", 40);
|
|
|
+ "for consistent allocations, aborting\n", 64);
|
|
|
goto err_out_release_regions;
|
|
|
}
|
|
|
using_dac = 1;
|
|
@@ -2198,6 +2216,9 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|
|
goto err_out_dev_close;
|
|
|
}
|
|
|
|
|
|
+ netif_set_real_num_tx_queues(netdev, enic->wq_count);
|
|
|
+ netif_set_real_num_rx_queues(netdev, enic->rq_count);
|
|
|
+
|
|
|
/* Setup notification timer, HW reset task, and wq locks
|
|
|
*/
|
|
|
|
|
@@ -2246,6 +2267,8 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|
|
if (ENIC_SETTING(enic, TSO))
|
|
|
netdev->hw_features |= NETIF_F_TSO |
|
|
|
NETIF_F_TSO6 | NETIF_F_TSO_ECN;
|
|
|
+ if (ENIC_SETTING(enic, RSS))
|
|
|
+ netdev->hw_features |= NETIF_F_RXHASH;
|
|
|
if (ENIC_SETTING(enic, RXCSUM))
|
|
|
netdev->hw_features |= NETIF_F_RXCSUM;
|
|
|
|