|
@@ -1440,7 +1440,7 @@ static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter)
|
|
|
goto err_nomem;
|
|
|
}
|
|
|
|
|
|
- tx_ring->size = tx_ring->count * sizeof(struct ixgbe_legacy_tx_desc);
|
|
|
+ tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
|
|
|
tx_ring->size = ALIGN(tx_ring->size, 4096);
|
|
|
if (!(tx_ring->desc = pci_alloc_consistent(pdev, tx_ring->size,
|
|
|
&tx_ring->dma))) {
|
|
@@ -1454,7 +1454,7 @@ static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter)
|
|
|
IXGBE_WRITE_REG(&adapter->hw, IXGBE_TDBAH(0),
|
|
|
((u64) tx_ring->dma >> 32));
|
|
|
IXGBE_WRITE_REG(&adapter->hw, IXGBE_TDLEN(0),
|
|
|
- tx_ring->count * sizeof(struct ixgbe_legacy_tx_desc));
|
|
|
+ tx_ring->count * sizeof(union ixgbe_adv_tx_desc));
|
|
|
IXGBE_WRITE_REG(&adapter->hw, IXGBE_TDH(0), 0);
|
|
|
IXGBE_WRITE_REG(&adapter->hw, IXGBE_TDT(0), 0);
|
|
|
|
|
@@ -1472,7 +1472,7 @@ static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter)
|
|
|
IXGBE_WRITE_REG(&adapter->hw, IXGBE_TXDCTL(0), reg_data);
|
|
|
|
|
|
for (i = 0; i < tx_ring->count; i++) {
|
|
|
- struct ixgbe_legacy_tx_desc *desc = IXGBE_TX_DESC(*tx_ring, i);
|
|
|
+ union ixgbe_adv_tx_desc *desc = IXGBE_TX_DESC_ADV(*tx_ring, i);
|
|
|
struct sk_buff *skb;
|
|
|
unsigned int size = 1024;
|
|
|
|
|
@@ -1486,13 +1486,18 @@ static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter)
|
|
|
tx_ring->tx_buffer_info[i].length = skb->len;
|
|
|
tx_ring->tx_buffer_info[i].dma =
|
|
|
pci_map_single(pdev, skb->data, skb->len,
|
|
|
- PCI_DMA_TODEVICE);
|
|
|
- desc->buffer_addr = cpu_to_le64(tx_ring->tx_buffer_info[i].dma);
|
|
|
- desc->lower.data = cpu_to_le32(skb->len);
|
|
|
- desc->lower.data |= cpu_to_le32(IXGBE_TXD_CMD_EOP |
|
|
|
- IXGBE_TXD_CMD_IFCS |
|
|
|
- IXGBE_TXD_CMD_RS);
|
|
|
- desc->upper.data = 0;
|
|
|
+ PCI_DMA_TODEVICE);
|
|
|
+ desc->read.buffer_addr =
|
|
|
+ cpu_to_le64(tx_ring->tx_buffer_info[i].dma);
|
|
|
+ desc->read.cmd_type_len = cpu_to_le32(skb->len);
|
|
|
+ desc->read.cmd_type_len |= cpu_to_le32(IXGBE_TXD_CMD_EOP |
|
|
|
+ IXGBE_TXD_CMD_IFCS |
|
|
|
+ IXGBE_TXD_CMD_RS);
|
|
|
+ desc->read.olinfo_status = 0;
|
|
|
+ if (adapter->hw.mac.type == ixgbe_mac_82599EB)
|
|
|
+ desc->read.olinfo_status |=
|
|
|
+ (skb->len << IXGBE_ADVTXD_PAYLEN_SHIFT);
|
|
|
+
|
|
|
}
|
|
|
|
|
|
/* Setup Rx Descriptor ring and Rx buffers */
|
|
@@ -1508,7 +1513,7 @@ static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter)
|
|
|
goto err_nomem;
|
|
|
}
|
|
|
|
|
|
- rx_ring->size = rx_ring->count * sizeof(struct ixgbe_legacy_rx_desc);
|
|
|
+ rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
|
|
|
rx_ring->size = ALIGN(rx_ring->size, 4096);
|
|
|
if (!(rx_ring->desc = pci_alloc_consistent(pdev, rx_ring->size,
|
|
|
&rx_ring->dma))) {
|
|
@@ -1566,8 +1571,8 @@ static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter)
|
|
|
IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXCTRL, rctl);
|
|
|
|
|
|
for (i = 0; i < rx_ring->count; i++) {
|
|
|
- struct ixgbe_legacy_rx_desc *rx_desc =
|
|
|
- IXGBE_RX_DESC(*rx_ring, i);
|
|
|
+ union ixgbe_adv_rx_desc *rx_desc =
|
|
|
+ IXGBE_RX_DESC_ADV(*rx_ring, i);
|
|
|
struct sk_buff *skb;
|
|
|
|
|
|
skb = alloc_skb(IXGBE_RXBUFFER_2048 + NET_IP_ALIGN, GFP_KERNEL);
|
|
@@ -1580,7 +1585,7 @@ static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter)
|
|
|
rx_ring->rx_buffer_info[i].dma =
|
|
|
pci_map_single(pdev, skb->data, IXGBE_RXBUFFER_2048,
|
|
|
PCI_DMA_FROMDEVICE);
|
|
|
- rx_desc->buffer_addr =
|
|
|
+ rx_desc->read.pkt_addr =
|
|
|
cpu_to_le64(rx_ring->rx_buffer_info[i].dma);
|
|
|
memset(skb->data, 0x00, skb->len);
|
|
|
}
|