|
@@ -562,7 +562,7 @@ rx_status_loop:
|
|
|
|
|
|
skb_reserve(new_skb, RX_OFFSET);
|
|
|
|
|
|
- pci_unmap_single(cp->pdev, mapping,
|
|
|
+ dma_unmap_single(&cp->pdev->dev, mapping,
|
|
|
buflen, PCI_DMA_FROMDEVICE);
|
|
|
|
|
|
/* Handle checksum offloading for incoming packets. */
|
|
@@ -573,7 +573,7 @@ rx_status_loop:
|
|
|
|
|
|
skb_put(skb, len);
|
|
|
|
|
|
- mapping = pci_map_single(cp->pdev, new_skb->data, buflen,
|
|
|
+ mapping = dma_map_single(&cp->pdev->dev, new_skb->data, buflen,
|
|
|
PCI_DMA_FROMDEVICE);
|
|
|
cp->rx_skb[rx_tail] = new_skb;
|
|
|
|
|
@@ -701,7 +701,7 @@ static void cp_tx (struct cp_private *cp)
|
|
|
skb = cp->tx_skb[tx_tail];
|
|
|
BUG_ON(!skb);
|
|
|
|
|
|
- pci_unmap_single(cp->pdev, le64_to_cpu(txd->addr),
|
|
|
+ dma_unmap_single(&cp->pdev->dev, le64_to_cpu(txd->addr),
|
|
|
le32_to_cpu(txd->opts1) & 0xffff,
|
|
|
PCI_DMA_TODEVICE);
|
|
|
|
|
@@ -779,7 +779,7 @@ static int cp_start_xmit (struct sk_buff *skb, struct net_device *dev)
|
|
|
dma_addr_t mapping;
|
|
|
|
|
|
len = skb->len;
|
|
|
- mapping = pci_map_single(cp->pdev, skb->data, len, PCI_DMA_TODEVICE);
|
|
|
+ mapping = dma_map_single(&cp->pdev->dev, skb->data, len, PCI_DMA_TODEVICE);
|
|
|
CP_VLAN_TX_TAG(txd, vlan_tag);
|
|
|
txd->addr = cpu_to_le64(mapping);
|
|
|
wmb();
|
|
@@ -815,7 +815,7 @@ static int cp_start_xmit (struct sk_buff *skb, struct net_device *dev)
|
|
|
*/
|
|
|
first_eor = eor;
|
|
|
first_len = skb_headlen(skb);
|
|
|
- first_mapping = pci_map_single(cp->pdev, skb->data,
|
|
|
+ first_mapping = dma_map_single(&cp->pdev->dev, skb->data,
|
|
|
first_len, PCI_DMA_TODEVICE);
|
|
|
cp->tx_skb[entry] = skb;
|
|
|
entry = NEXT_TX(entry);
|
|
@@ -827,7 +827,7 @@ static int cp_start_xmit (struct sk_buff *skb, struct net_device *dev)
|
|
|
dma_addr_t mapping;
|
|
|
|
|
|
len = this_frag->size;
|
|
|
- mapping = pci_map_single(cp->pdev,
|
|
|
+ mapping = dma_map_single(&cp->pdev->dev,
|
|
|
((void *) page_address(this_frag->page) +
|
|
|
this_frag->page_offset),
|
|
|
len, PCI_DMA_TODEVICE);
|
|
@@ -1066,8 +1066,8 @@ static int cp_refill_rx (struct cp_private *cp)
|
|
|
|
|
|
skb_reserve(skb, RX_OFFSET);
|
|
|
|
|
|
- mapping = pci_map_single(cp->pdev, skb->data, cp->rx_buf_sz,
|
|
|
- PCI_DMA_FROMDEVICE);
|
|
|
+ mapping = dma_map_single(&cp->pdev->dev, skb->data,
|
|
|
+ cp->rx_buf_sz, PCI_DMA_FROMDEVICE);
|
|
|
cp->rx_skb[i] = skb;
|
|
|
|
|
|
cp->rx_ring[i].opts2 = 0;
|
|
@@ -1107,7 +1107,8 @@ static int cp_alloc_rings (struct cp_private *cp)
|
|
|
{
|
|
|
void *mem;
|
|
|
|
|
|
- mem = pci_alloc_consistent(cp->pdev, CP_RING_BYTES, &cp->ring_dma);
|
|
|
+ mem = dma_alloc_coherent(&cp->pdev->dev, CP_RING_BYTES,
|
|
|
+ &cp->ring_dma, GFP_KERNEL);
|
|
|
if (!mem)
|
|
|
return -ENOMEM;
|
|
|
|
|
@@ -1125,7 +1126,7 @@ static void cp_clean_rings (struct cp_private *cp)
|
|
|
for (i = 0; i < CP_RX_RING_SIZE; i++) {
|
|
|
if (cp->rx_skb[i]) {
|
|
|
desc = cp->rx_ring + i;
|
|
|
- pci_unmap_single(cp->pdev, le64_to_cpu(desc->addr),
|
|
|
+ dma_unmap_single(&cp->pdev->dev,le64_to_cpu(desc->addr),
|
|
|
cp->rx_buf_sz, PCI_DMA_FROMDEVICE);
|
|
|
dev_kfree_skb(cp->rx_skb[i]);
|
|
|
}
|
|
@@ -1136,7 +1137,7 @@ static void cp_clean_rings (struct cp_private *cp)
|
|
|
struct sk_buff *skb = cp->tx_skb[i];
|
|
|
|
|
|
desc = cp->tx_ring + i;
|
|
|
- pci_unmap_single(cp->pdev, le64_to_cpu(desc->addr),
|
|
|
+ dma_unmap_single(&cp->pdev->dev,le64_to_cpu(desc->addr),
|
|
|
le32_to_cpu(desc->opts1) & 0xffff,
|
|
|
PCI_DMA_TODEVICE);
|
|
|
if (le32_to_cpu(desc->opts1) & LastFrag)
|
|
@@ -1155,7 +1156,8 @@ static void cp_clean_rings (struct cp_private *cp)
|
|
|
static void cp_free_rings (struct cp_private *cp)
|
|
|
{
|
|
|
cp_clean_rings(cp);
|
|
|
- pci_free_consistent(cp->pdev, CP_RING_BYTES, cp->rx_ring, cp->ring_dma);
|
|
|
+ dma_free_coherent(&cp->pdev->dev, CP_RING_BYTES, cp->rx_ring,
|
|
|
+ cp->ring_dma);
|
|
|
cp->rx_ring = NULL;
|
|
|
cp->tx_ring = NULL;
|
|
|
}
|
|
@@ -1519,7 +1521,8 @@ static void cp_get_ethtool_stats (struct net_device *dev,
|
|
|
dma_addr_t dma;
|
|
|
int i;
|
|
|
|
|
|
- nic_stats = pci_alloc_consistent(cp->pdev, sizeof(*nic_stats), &dma);
|
|
|
+ nic_stats = dma_alloc_coherent(&cp->pdev->dev, sizeof(*nic_stats),
|
|
|
+ &dma, GFP_KERNEL);
|
|
|
if (!nic_stats)
|
|
|
return;
|
|
|
|
|
@@ -1554,7 +1557,7 @@ static void cp_get_ethtool_stats (struct net_device *dev,
|
|
|
tmp_stats[i++] = cp->cp_stats.rx_frags;
|
|
|
BUG_ON(i != CP_NUM_STATS);
|
|
|
|
|
|
- pci_free_consistent(cp->pdev, sizeof(*nic_stats), nic_stats, dma);
|
|
|
+ dma_free_coherent(&cp->pdev->dev, sizeof(*nic_stats), nic_stats, dma);
|
|
|
}
|
|
|
|
|
|
static const struct ethtool_ops cp_ethtool_ops = {
|