|
@@ -118,7 +118,7 @@ struct rx_sw_desc { /* SW state per Rx descriptor */
|
|
|
struct sk_buff *skb;
|
|
|
struct fl_pg_chunk pg_chunk;
|
|
|
};
|
|
|
- DECLARE_PCI_UNMAP_ADDR(dma_addr);
|
|
|
+ DEFINE_DMA_UNMAP_ADDR(dma_addr);
|
|
|
};
|
|
|
|
|
|
struct rsp_desc { /* response queue descriptor */
|
|
@@ -208,7 +208,7 @@ static inline int need_skb_unmap(void)
|
|
|
* unmapping by checking if DECLARE_PCI_UNMAP_ADDR defines anything.
|
|
|
*/
|
|
|
struct dummy {
|
|
|
- DECLARE_PCI_UNMAP_ADDR(addr);
|
|
|
+ DEFINE_DMA_UNMAP_ADDR(addr);
|
|
|
};
|
|
|
|
|
|
return sizeof(struct dummy) != 0;
|
|
@@ -363,7 +363,7 @@ static void clear_rx_desc(struct pci_dev *pdev, const struct sge_fl *q,
|
|
|
put_page(d->pg_chunk.page);
|
|
|
d->pg_chunk.page = NULL;
|
|
|
} else {
|
|
|
- pci_unmap_single(pdev, pci_unmap_addr(d, dma_addr),
|
|
|
+ pci_unmap_single(pdev, dma_unmap_addr(d, dma_addr),
|
|
|
q->buf_size, PCI_DMA_FROMDEVICE);
|
|
|
kfree_skb(d->skb);
|
|
|
d->skb = NULL;
|
|
@@ -419,7 +419,7 @@ static inline int add_one_rx_buf(void *va, unsigned int len,
|
|
|
if (unlikely(pci_dma_mapping_error(pdev, mapping)))
|
|
|
return -ENOMEM;
|
|
|
|
|
|
- pci_unmap_addr_set(sd, dma_addr, mapping);
|
|
|
+ dma_unmap_addr_set(sd, dma_addr, mapping);
|
|
|
|
|
|
d->addr_lo = cpu_to_be32(mapping);
|
|
|
d->addr_hi = cpu_to_be32((u64) mapping >> 32);
|
|
@@ -515,7 +515,7 @@ nomem: q->alloc_failed++;
|
|
|
break;
|
|
|
}
|
|
|
mapping = sd->pg_chunk.mapping + sd->pg_chunk.offset;
|
|
|
- pci_unmap_addr_set(sd, dma_addr, mapping);
|
|
|
+ dma_unmap_addr_set(sd, dma_addr, mapping);
|
|
|
|
|
|
add_one_rx_chunk(mapping, d, q->gen);
|
|
|
pci_dma_sync_single_for_device(adap->pdev, mapping,
|
|
@@ -791,11 +791,11 @@ static struct sk_buff *get_packet(struct adapter *adap, struct sge_fl *fl,
|
|
|
if (likely(skb != NULL)) {
|
|
|
__skb_put(skb, len);
|
|
|
pci_dma_sync_single_for_cpu(adap->pdev,
|
|
|
- pci_unmap_addr(sd, dma_addr), len,
|
|
|
+ dma_unmap_addr(sd, dma_addr), len,
|
|
|
PCI_DMA_FROMDEVICE);
|
|
|
memcpy(skb->data, sd->skb->data, len);
|
|
|
pci_dma_sync_single_for_device(adap->pdev,
|
|
|
- pci_unmap_addr(sd, dma_addr), len,
|
|
|
+ dma_unmap_addr(sd, dma_addr), len,
|
|
|
PCI_DMA_FROMDEVICE);
|
|
|
} else if (!drop_thres)
|
|
|
goto use_orig_buf;
|
|
@@ -810,7 +810,7 @@ recycle:
|
|
|
goto recycle;
|
|
|
|
|
|
use_orig_buf:
|
|
|
- pci_unmap_single(adap->pdev, pci_unmap_addr(sd, dma_addr),
|
|
|
+ pci_unmap_single(adap->pdev, dma_unmap_addr(sd, dma_addr),
|
|
|
fl->buf_size, PCI_DMA_FROMDEVICE);
|
|
|
skb = sd->skb;
|
|
|
skb_put(skb, len);
|
|
@@ -843,7 +843,7 @@ static struct sk_buff *get_packet_pg(struct adapter *adap, struct sge_fl *fl,
|
|
|
struct sk_buff *newskb, *skb;
|
|
|
struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
|
|
|
|
|
|
- dma_addr_t dma_addr = pci_unmap_addr(sd, dma_addr);
|
|
|
+ dma_addr_t dma_addr = dma_unmap_addr(sd, dma_addr);
|
|
|
|
|
|
newskb = skb = q->pg_skb;
|
|
|
if (!skb && (len <= SGE_RX_COPY_THRES)) {
|
|
@@ -2097,7 +2097,7 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs,
|
|
|
fl->credits--;
|
|
|
|
|
|
pci_dma_sync_single_for_cpu(adap->pdev,
|
|
|
- pci_unmap_addr(sd, dma_addr),
|
|
|
+ dma_unmap_addr(sd, dma_addr),
|
|
|
fl->buf_size - SGE_PG_RSVD,
|
|
|
PCI_DMA_FROMDEVICE);
|
|
|
|