|
@@ -48,7 +48,6 @@ static int mlx4_en_alloc_frag(struct mlx4_en_priv *priv,
|
|
|
struct mlx4_en_rx_alloc *ring_alloc,
|
|
|
int i)
|
|
|
{
|
|
|
- struct mlx4_en_dev *mdev = priv->mdev;
|
|
|
struct mlx4_en_frag_info *frag_info = &priv->frag_info[i];
|
|
|
struct mlx4_en_rx_alloc *page_alloc = &ring_alloc[i];
|
|
|
struct page *page;
|
|
@@ -72,7 +71,7 @@ static int mlx4_en_alloc_frag(struct mlx4_en_priv *priv,
|
|
|
skb_frags[i].offset = page_alloc->offset;
|
|
|
page_alloc->offset += frag_info->frag_stride;
|
|
|
}
|
|
|
- dma = pci_map_single(mdev->pdev, page_address(skb_frags[i].page) +
|
|
|
+ dma = dma_map_single(priv->ddev, page_address(skb_frags[i].page) +
|
|
|
skb_frags[i].offset, frag_info->frag_size,
|
|
|
PCI_DMA_FROMDEVICE);
|
|
|
rx_desc->data[i].addr = cpu_to_be64(dma);
|
|
@@ -186,7 +185,6 @@ static void mlx4_en_free_rx_desc(struct mlx4_en_priv *priv,
|
|
|
struct mlx4_en_rx_ring *ring,
|
|
|
int index)
|
|
|
{
|
|
|
- struct mlx4_en_dev *mdev = priv->mdev;
|
|
|
struct page_frag *skb_frags;
|
|
|
struct mlx4_en_rx_desc *rx_desc = ring->buf + (index << ring->log_stride);
|
|
|
dma_addr_t dma;
|
|
@@ -198,7 +196,7 @@ static void mlx4_en_free_rx_desc(struct mlx4_en_priv *priv,
|
|
|
dma = be64_to_cpu(rx_desc->data[nr].addr);
|
|
|
|
|
|
en_dbg(DRV, priv, "Unmapping buffer at dma:0x%llx\n", (u64) dma);
|
|
|
- pci_unmap_single(mdev->pdev, dma, skb_frags[nr].size,
|
|
|
+ dma_unmap_single(priv->ddev, dma, skb_frags[nr].size,
|
|
|
PCI_DMA_FROMDEVICE);
|
|
|
put_page(skb_frags[nr].page);
|
|
|
}
|
|
@@ -412,7 +410,6 @@ static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
|
|
|
int length)
|
|
|
{
|
|
|
struct skb_frag_struct *skb_frags_rx = skb_shinfo(skb)->frags;
|
|
|
- struct mlx4_en_dev *mdev = priv->mdev;
|
|
|
struct mlx4_en_frag_info *frag_info;
|
|
|
int nr;
|
|
|
dma_addr_t dma;
|
|
@@ -435,7 +432,7 @@ static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
|
|
|
goto fail;
|
|
|
|
|
|
/* Unmap buffer */
|
|
|
- pci_unmap_single(mdev->pdev, dma, skb_frag_size(&skb_frags_rx[nr]),
|
|
|
+ dma_unmap_single(priv->ddev, dma, skb_frag_size(&skb_frags_rx[nr]),
|
|
|
PCI_DMA_FROMDEVICE);
|
|
|
}
|
|
|
/* Adjust size of last fragment to match actual length */
|
|
@@ -461,7 +458,6 @@ static struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv,
|
|
|
struct mlx4_en_rx_alloc *page_alloc,
|
|
|
unsigned int length)
|
|
|
{
|
|
|
- struct mlx4_en_dev *mdev = priv->mdev;
|
|
|
struct sk_buff *skb;
|
|
|
void *va;
|
|
|
int used_frags;
|
|
@@ -483,10 +479,10 @@ static struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv,
|
|
|
/* We are copying all relevant data to the skb - temporarily
|
|
|
* synch buffers for the copy */
|
|
|
dma = be64_to_cpu(rx_desc->data[0].addr);
|
|
|
- dma_sync_single_for_cpu(&mdev->pdev->dev, dma, length,
|
|
|
+ dma_sync_single_for_cpu(priv->ddev, dma, length,
|
|
|
DMA_FROM_DEVICE);
|
|
|
skb_copy_to_linear_data(skb, va, length);
|
|
|
- dma_sync_single_for_device(&mdev->pdev->dev, dma, length,
|
|
|
+ dma_sync_single_for_device(priv->ddev, dma, length,
|
|
|
DMA_FROM_DEVICE);
|
|
|
skb->tail += length;
|
|
|
} else {
|