|
@@ -243,7 +243,7 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
|
|
/* Map for DMA */
|
|
/* Map for DMA */
|
|
unmap_single = false;
|
|
unmap_single = false;
|
|
dma_addr = skb_frag_dma_map(&pci_dev->dev, fragment, 0, len,
|
|
dma_addr = skb_frag_dma_map(&pci_dev->dev, fragment, 0, len,
|
|
- PCI_DMA_TODEVICE);
|
|
|
|
|
|
+ DMA_TO_DEVICE);
|
|
}
|
|
}
|
|
|
|
|
|
/* Transfer ownership of the skb to the final buffer */
|
|
/* Transfer ownership of the skb to the final buffer */
|
|
@@ -926,8 +926,8 @@ static int tso_get_fragment(struct tso_state *st, struct efx_nic *efx,
|
|
skb_frag_t *frag)
|
|
skb_frag_t *frag)
|
|
{
|
|
{
|
|
st->unmap_addr = skb_frag_dma_map(&efx->pci_dev->dev, frag, 0,
|
|
st->unmap_addr = skb_frag_dma_map(&efx->pci_dev->dev, frag, 0,
|
|
- frag->size, PCI_DMA_TODEVICE);
|
|
|
|
- if (likely(!pci_dma_mapping_error(efx->pci_dev, st->unmap_addr))) {
|
|
|
|
|
|
+ frag->size, DMA_TO_DEVICE);
|
|
|
|
+ if (likely(!dma_mapping_error(&efx->pci_dev->dev, st->unmap_addr))) {
|
|
st->unmap_single = false;
|
|
st->unmap_single = false;
|
|
st->unmap_len = frag->size;
|
|
st->unmap_len = frag->size;
|
|
st->in_len = frag->size;
|
|
st->in_len = frag->size;
|