|
@@ -313,10 +313,10 @@ vmxnet3_unmap_tx_buf(struct vmxnet3_tx_buf_info *tbi,
|
|
|
struct pci_dev *pdev)
|
|
|
{
|
|
|
if (tbi->map_type == VMXNET3_MAP_SINGLE)
|
|
|
- pci_unmap_single(pdev, tbi->dma_addr, tbi->len,
|
|
|
+ dma_unmap_single(&pdev->dev, tbi->dma_addr, tbi->len,
|
|
|
PCI_DMA_TODEVICE);
|
|
|
else if (tbi->map_type == VMXNET3_MAP_PAGE)
|
|
|
- pci_unmap_page(pdev, tbi->dma_addr, tbi->len,
|
|
|
+ dma_unmap_page(&pdev->dev, tbi->dma_addr, tbi->len,
|
|
|
PCI_DMA_TODEVICE);
|
|
|
else
|
|
|
BUG_ON(tbi->map_type != VMXNET3_MAP_NONE);
|
|
@@ -429,25 +429,29 @@ vmxnet3_tq_destroy(struct vmxnet3_tx_queue *tq,
|
|
|
struct vmxnet3_adapter *adapter)
|
|
|
{
|
|
|
if (tq->tx_ring.base) {
|
|
|
- pci_free_consistent(adapter->pdev, tq->tx_ring.size *
|
|
|
- sizeof(struct Vmxnet3_TxDesc),
|
|
|
- tq->tx_ring.base, tq->tx_ring.basePA);
|
|
|
+ dma_free_coherent(&adapter->pdev->dev, tq->tx_ring.size *
|
|
|
+ sizeof(struct Vmxnet3_TxDesc),
|
|
|
+ tq->tx_ring.base, tq->tx_ring.basePA);
|
|
|
tq->tx_ring.base = NULL;
|
|
|
}
|
|
|
if (tq->data_ring.base) {
|
|
|
- pci_free_consistent(adapter->pdev, tq->data_ring.size *
|
|
|
- sizeof(struct Vmxnet3_TxDataDesc),
|
|
|
- tq->data_ring.base, tq->data_ring.basePA);
|
|
|
+ dma_free_coherent(&adapter->pdev->dev, tq->data_ring.size *
|
|
|
+ sizeof(struct Vmxnet3_TxDataDesc),
|
|
|
+ tq->data_ring.base, tq->data_ring.basePA);
|
|
|
tq->data_ring.base = NULL;
|
|
|
}
|
|
|
if (tq->comp_ring.base) {
|
|
|
- pci_free_consistent(adapter->pdev, tq->comp_ring.size *
|
|
|
- sizeof(struct Vmxnet3_TxCompDesc),
|
|
|
- tq->comp_ring.base, tq->comp_ring.basePA);
|
|
|
+ dma_free_coherent(&adapter->pdev->dev, tq->comp_ring.size *
|
|
|
+ sizeof(struct Vmxnet3_TxCompDesc),
|
|
|
+ tq->comp_ring.base, tq->comp_ring.basePA);
|
|
|
tq->comp_ring.base = NULL;
|
|
|
}
|
|
|
- kfree(tq->buf_info);
|
|
|
- tq->buf_info = NULL;
|
|
|
+ if (tq->buf_info) {
|
|
|
+ dma_free_coherent(&adapter->pdev->dev,
|
|
|
+ tq->tx_ring.size * sizeof(tq->buf_info[0]),
|
|
|
+ tq->buf_info, tq->buf_info_pa);
|
|
|
+ tq->buf_info = NULL;
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
|
|
@@ -496,37 +500,38 @@ static int
|
|
|
vmxnet3_tq_create(struct vmxnet3_tx_queue *tq,
|
|
|
struct vmxnet3_adapter *adapter)
|
|
|
{
|
|
|
+ size_t sz;
|
|
|
+
|
|
|
BUG_ON(tq->tx_ring.base || tq->data_ring.base ||
|
|
|
tq->comp_ring.base || tq->buf_info);
|
|
|
|
|
|
- tq->tx_ring.base = pci_alloc_consistent(adapter->pdev, tq->tx_ring.size
|
|
|
- * sizeof(struct Vmxnet3_TxDesc),
|
|
|
- &tq->tx_ring.basePA);
|
|
|
+ tq->tx_ring.base = dma_alloc_coherent(&adapter->pdev->dev,
|
|
|
+ tq->tx_ring.size * sizeof(struct Vmxnet3_TxDesc),
|
|
|
+ &tq->tx_ring.basePA, GFP_KERNEL);
|
|
|
if (!tq->tx_ring.base) {
|
|
|
netdev_err(adapter->netdev, "failed to allocate tx ring\n");
|
|
|
goto err;
|
|
|
}
|
|
|
|
|
|
- tq->data_ring.base = pci_alloc_consistent(adapter->pdev,
|
|
|
- tq->data_ring.size *
|
|
|
- sizeof(struct Vmxnet3_TxDataDesc),
|
|
|
- &tq->data_ring.basePA);
|
|
|
+ tq->data_ring.base = dma_alloc_coherent(&adapter->pdev->dev,
|
|
|
+ tq->data_ring.size * sizeof(struct Vmxnet3_TxDataDesc),
|
|
|
+ &tq->data_ring.basePA, GFP_KERNEL);
|
|
|
if (!tq->data_ring.base) {
|
|
|
netdev_err(adapter->netdev, "failed to allocate data ring\n");
|
|
|
goto err;
|
|
|
}
|
|
|
|
|
|
- tq->comp_ring.base = pci_alloc_consistent(adapter->pdev,
|
|
|
- tq->comp_ring.size *
|
|
|
- sizeof(struct Vmxnet3_TxCompDesc),
|
|
|
- &tq->comp_ring.basePA);
|
|
|
+ tq->comp_ring.base = dma_alloc_coherent(&adapter->pdev->dev,
|
|
|
+ tq->comp_ring.size * sizeof(struct Vmxnet3_TxCompDesc),
|
|
|
+ &tq->comp_ring.basePA, GFP_KERNEL);
|
|
|
if (!tq->comp_ring.base) {
|
|
|
netdev_err(adapter->netdev, "failed to allocate tx comp ring\n");
|
|
|
goto err;
|
|
|
}
|
|
|
|
|
|
- tq->buf_info = kcalloc(tq->tx_ring.size, sizeof(tq->buf_info[0]),
|
|
|
- GFP_KERNEL);
|
|
|
+ sz = tq->tx_ring.size * sizeof(tq->buf_info[0]);
|
|
|
+ tq->buf_info = dma_zalloc_coherent(&adapter->pdev->dev, sz,
|
|
|
+ &tq->buf_info_pa, GFP_KERNEL);
|
|
|
if (!tq->buf_info)
|
|
|
goto err;
|
|
|
|
|
@@ -578,7 +583,8 @@ vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx,
|
|
|
break;
|
|
|
}
|
|
|
|
|
|
- rbi->dma_addr = pci_map_single(adapter->pdev,
|
|
|
+ rbi->dma_addr = dma_map_single(
|
|
|
+ &adapter->pdev->dev,
|
|
|
rbi->skb->data, rbi->len,
|
|
|
PCI_DMA_FROMDEVICE);
|
|
|
} else {
|
|
@@ -595,7 +601,8 @@ vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx,
|
|
|
rq->stats.rx_buf_alloc_failure++;
|
|
|
break;
|
|
|
}
|
|
|
- rbi->dma_addr = pci_map_page(adapter->pdev,
|
|
|
+ rbi->dma_addr = dma_map_page(
|
|
|
+ &adapter->pdev->dev,
|
|
|
rbi->page, 0, PAGE_SIZE,
|
|
|
PCI_DMA_FROMDEVICE);
|
|
|
} else {
|
|
@@ -705,7 +712,7 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
|
|
|
|
|
|
tbi = tq->buf_info + tq->tx_ring.next2fill;
|
|
|
tbi->map_type = VMXNET3_MAP_SINGLE;
|
|
|
- tbi->dma_addr = pci_map_single(adapter->pdev,
|
|
|
+ tbi->dma_addr = dma_map_single(&adapter->pdev->dev,
|
|
|
skb->data + buf_offset, buf_size,
|
|
|
PCI_DMA_TODEVICE);
|
|
|
|
|
@@ -1221,7 +1228,8 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
|
|
|
goto rcd_done;
|
|
|
}
|
|
|
|
|
|
- pci_unmap_single(adapter->pdev, rbi->dma_addr, rbi->len,
|
|
|
+ dma_unmap_single(&adapter->pdev->dev, rbi->dma_addr,
|
|
|
+ rbi->len,
|
|
|
PCI_DMA_FROMDEVICE);
|
|
|
|
|
|
#ifdef VMXNET3_RSS
|
|
@@ -1233,7 +1241,7 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
|
|
|
|
|
|
/* Immediate refill */
|
|
|
rbi->skb = new_skb;
|
|
|
- rbi->dma_addr = pci_map_single(adapter->pdev,
|
|
|
+ rbi->dma_addr = dma_map_single(&adapter->pdev->dev,
|
|
|
rbi->skb->data, rbi->len,
|
|
|
PCI_DMA_FROMDEVICE);
|
|
|
rxd->addr = cpu_to_le64(rbi->dma_addr);
|
|
@@ -1267,7 +1275,7 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
|
|
|
}
|
|
|
|
|
|
if (rcd->len) {
|
|
|
- pci_unmap_page(adapter->pdev,
|
|
|
+ dma_unmap_page(&adapter->pdev->dev,
|
|
|
rbi->dma_addr, rbi->len,
|
|
|
PCI_DMA_FROMDEVICE);
|
|
|
|
|
@@ -1276,7 +1284,8 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
|
|
|
|
|
|
/* Immediate refill */
|
|
|
rbi->page = new_page;
|
|
|
- rbi->dma_addr = pci_map_page(adapter->pdev, rbi->page,
|
|
|
+ rbi->dma_addr = dma_map_page(&adapter->pdev->dev,
|
|
|
+ rbi->page,
|
|
|
0, PAGE_SIZE,
|
|
|
PCI_DMA_FROMDEVICE);
|
|
|
rxd->addr = cpu_to_le64(rbi->dma_addr);
|
|
@@ -1352,13 +1361,13 @@ vmxnet3_rq_cleanup(struct vmxnet3_rx_queue *rq,
|
|
|
|
|
|
if (rxd->btype == VMXNET3_RXD_BTYPE_HEAD &&
|
|
|
rq->buf_info[ring_idx][i].skb) {
|
|
|
- pci_unmap_single(adapter->pdev, rxd->addr,
|
|
|
+ dma_unmap_single(&adapter->pdev->dev, rxd->addr,
|
|
|
rxd->len, PCI_DMA_FROMDEVICE);
|
|
|
dev_kfree_skb(rq->buf_info[ring_idx][i].skb);
|
|
|
rq->buf_info[ring_idx][i].skb = NULL;
|
|
|
} else if (rxd->btype == VMXNET3_RXD_BTYPE_BODY &&
|
|
|
rq->buf_info[ring_idx][i].page) {
|
|
|
- pci_unmap_page(adapter->pdev, rxd->addr,
|
|
|
+ dma_unmap_page(&adapter->pdev->dev, rxd->addr,
|
|
|
rxd->len, PCI_DMA_FROMDEVICE);
|
|
|
put_page(rq->buf_info[ring_idx][i].page);
|
|
|
rq->buf_info[ring_idx][i].page = NULL;
|
|
@@ -1400,25 +1409,31 @@ static void vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq,
|
|
|
}
|
|
|
|
|
|
|
|
|
- kfree(rq->buf_info[0]);
|
|
|
-
|
|
|
for (i = 0; i < 2; i++) {
|
|
|
if (rq->rx_ring[i].base) {
|
|
|
- pci_free_consistent(adapter->pdev, rq->rx_ring[i].size
|
|
|
- * sizeof(struct Vmxnet3_RxDesc),
|
|
|
- rq->rx_ring[i].base,
|
|
|
- rq->rx_ring[i].basePA);
|
|
|
+ dma_free_coherent(&adapter->pdev->dev,
|
|
|
+ rq->rx_ring[i].size
|
|
|
+ * sizeof(struct Vmxnet3_RxDesc),
|
|
|
+ rq->rx_ring[i].base,
|
|
|
+ rq->rx_ring[i].basePA);
|
|
|
rq->rx_ring[i].base = NULL;
|
|
|
}
|
|
|
rq->buf_info[i] = NULL;
|
|
|
}
|
|
|
|
|
|
if (rq->comp_ring.base) {
|
|
|
- pci_free_consistent(adapter->pdev, rq->comp_ring.size *
|
|
|
- sizeof(struct Vmxnet3_RxCompDesc),
|
|
|
- rq->comp_ring.base, rq->comp_ring.basePA);
|
|
|
+ dma_free_coherent(&adapter->pdev->dev, rq->comp_ring.size
|
|
|
+ * sizeof(struct Vmxnet3_RxCompDesc),
|
|
|
+ rq->comp_ring.base, rq->comp_ring.basePA);
|
|
|
rq->comp_ring.base = NULL;
|
|
|
}
|
|
|
+
|
|
|
+ if (rq->buf_info[0]) {
|
|
|
+ size_t sz = sizeof(struct vmxnet3_rx_buf_info) *
|
|
|
+ (rq->rx_ring[0].size + rq->rx_ring[1].size);
|
|
|
+ dma_free_coherent(&adapter->pdev->dev, sz, rq->buf_info[0],
|
|
|
+ rq->buf_info_pa);
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
|
|
@@ -1503,8 +1518,10 @@ vmxnet3_rq_create(struct vmxnet3_rx_queue *rq, struct vmxnet3_adapter *adapter)
|
|
|
for (i = 0; i < 2; i++) {
|
|
|
|
|
|
sz = rq->rx_ring[i].size * sizeof(struct Vmxnet3_RxDesc);
|
|
|
- rq->rx_ring[i].base = pci_alloc_consistent(adapter->pdev, sz,
|
|
|
- &rq->rx_ring[i].basePA);
|
|
|
+ rq->rx_ring[i].base = dma_alloc_coherent(
|
|
|
+ &adapter->pdev->dev, sz,
|
|
|
+ &rq->rx_ring[i].basePA,
|
|
|
+ GFP_KERNEL);
|
|
|
if (!rq->rx_ring[i].base) {
|
|
|
netdev_err(adapter->netdev,
|
|
|
"failed to allocate rx ring %d\n", i);
|
|
@@ -1513,8 +1530,9 @@ vmxnet3_rq_create(struct vmxnet3_rx_queue *rq, struct vmxnet3_adapter *adapter)
|
|
|
}
|
|
|
|
|
|
sz = rq->comp_ring.size * sizeof(struct Vmxnet3_RxCompDesc);
|
|
|
- rq->comp_ring.base = pci_alloc_consistent(adapter->pdev, sz,
|
|
|
- &rq->comp_ring.basePA);
|
|
|
+ rq->comp_ring.base = dma_alloc_coherent(&adapter->pdev->dev, sz,
|
|
|
+ &rq->comp_ring.basePA,
|
|
|
+ GFP_KERNEL);
|
|
|
if (!rq->comp_ring.base) {
|
|
|
netdev_err(adapter->netdev, "failed to allocate rx comp ring\n");
|
|
|
goto err;
|
|
@@ -1522,7 +1540,8 @@ vmxnet3_rq_create(struct vmxnet3_rx_queue *rq, struct vmxnet3_adapter *adapter)
|
|
|
|
|
|
sz = sizeof(struct vmxnet3_rx_buf_info) * (rq->rx_ring[0].size +
|
|
|
rq->rx_ring[1].size);
|
|
|
- bi = kzalloc(sz, GFP_KERNEL);
|
|
|
+ bi = dma_zalloc_coherent(&adapter->pdev->dev, sz, &rq->buf_info_pa,
|
|
|
+ GFP_KERNEL);
|
|
|
if (!bi)
|
|
|
goto err;
|
|
|
|
|
@@ -2005,6 +2024,7 @@ vmxnet3_set_mc(struct net_device *netdev)
|
|
|
struct Vmxnet3_RxFilterConf *rxConf =
|
|
|
&adapter->shared->devRead.rxFilterConf;
|
|
|
u8 *new_table = NULL;
|
|
|
+ dma_addr_t new_table_pa = 0;
|
|
|
u32 new_mode = VMXNET3_RXM_UCAST;
|
|
|
|
|
|
if (netdev->flags & IFF_PROMISC) {
|
|
@@ -2028,8 +2048,12 @@ vmxnet3_set_mc(struct net_device *netdev)
|
|
|
new_mode |= VMXNET3_RXM_MCAST;
|
|
|
rxConf->mfTableLen = cpu_to_le16(
|
|
|
netdev_mc_count(netdev) * ETH_ALEN);
|
|
|
- rxConf->mfTablePA = cpu_to_le64(virt_to_phys(
|
|
|
- new_table));
|
|
|
+ new_table_pa = dma_map_single(
|
|
|
+ &adapter->pdev->dev,
|
|
|
+ new_table,
|
|
|
+ rxConf->mfTableLen,
|
|
|
+ PCI_DMA_TODEVICE);
|
|
|
+ rxConf->mfTablePA = cpu_to_le64(new_table_pa);
|
|
|
} else {
|
|
|
netdev_info(netdev, "failed to copy mcast list"
|
|
|
", setting ALL_MULTI\n");
|
|
@@ -2056,7 +2080,11 @@ vmxnet3_set_mc(struct net_device *netdev)
|
|
|
VMXNET3_CMD_UPDATE_MAC_FILTERS);
|
|
|
spin_unlock_irqrestore(&adapter->cmd_lock, flags);
|
|
|
|
|
|
- kfree(new_table);
|
|
|
+ if (new_table) {
|
|
|
+ dma_unmap_single(&adapter->pdev->dev, new_table_pa,
|
|
|
+ rxConf->mfTableLen, PCI_DMA_TODEVICE);
|
|
|
+ kfree(new_table);
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
void
|
|
@@ -2096,7 +2124,7 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
|
|
|
devRead->misc.driverInfo.vmxnet3RevSpt = cpu_to_le32(1);
|
|
|
devRead->misc.driverInfo.uptVerSpt = cpu_to_le32(1);
|
|
|
|
|
|
- devRead->misc.ddPA = cpu_to_le64(virt_to_phys(adapter));
|
|
|
+ devRead->misc.ddPA = cpu_to_le64(adapter->adapter_pa);
|
|
|
devRead->misc.ddLen = cpu_to_le32(sizeof(struct vmxnet3_adapter));
|
|
|
|
|
|
/* set up feature flags */
|
|
@@ -2125,7 +2153,7 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
|
|
|
tqc->txRingBasePA = cpu_to_le64(tq->tx_ring.basePA);
|
|
|
tqc->dataRingBasePA = cpu_to_le64(tq->data_ring.basePA);
|
|
|
tqc->compRingBasePA = cpu_to_le64(tq->comp_ring.basePA);
|
|
|
- tqc->ddPA = cpu_to_le64(virt_to_phys(tq->buf_info));
|
|
|
+ tqc->ddPA = cpu_to_le64(tq->buf_info_pa);
|
|
|
tqc->txRingSize = cpu_to_le32(tq->tx_ring.size);
|
|
|
tqc->dataRingSize = cpu_to_le32(tq->data_ring.size);
|
|
|
tqc->compRingSize = cpu_to_le32(tq->comp_ring.size);
|
|
@@ -2143,8 +2171,7 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
|
|
|
rqc->rxRingBasePA[0] = cpu_to_le64(rq->rx_ring[0].basePA);
|
|
|
rqc->rxRingBasePA[1] = cpu_to_le64(rq->rx_ring[1].basePA);
|
|
|
rqc->compRingBasePA = cpu_to_le64(rq->comp_ring.basePA);
|
|
|
- rqc->ddPA = cpu_to_le64(virt_to_phys(
|
|
|
- rq->buf_info));
|
|
|
+ rqc->ddPA = cpu_to_le64(rq->buf_info_pa);
|
|
|
rqc->rxRingSize[0] = cpu_to_le32(rq->rx_ring[0].size);
|
|
|
rqc->rxRingSize[1] = cpu_to_le32(rq->rx_ring[1].size);
|
|
|
rqc->compRingSize = cpu_to_le32(rq->comp_ring.size);
|
|
@@ -2184,8 +2211,9 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
|
|
|
i, adapter->num_rx_queues);
|
|
|
|
|
|
devRead->rssConfDesc.confVer = 1;
|
|
|
- devRead->rssConfDesc.confLen = sizeof(*rssConf);
|
|
|
- devRead->rssConfDesc.confPA = virt_to_phys(rssConf);
|
|
|
+ devRead->rssConfDesc.confLen = cpu_to_le32(sizeof(*rssConf));
|
|
|
+ devRead->rssConfDesc.confPA =
|
|
|
+ cpu_to_le64(adapter->rss_conf_pa);
|
|
|
}
|
|
|
|
|
|
#endif /* VMXNET3_RSS */
|
|
@@ -2948,9 +2976,13 @@ vmxnet3_probe_device(struct pci_dev *pdev,
|
|
|
adapter->pdev = pdev;
|
|
|
|
|
|
spin_lock_init(&adapter->cmd_lock);
|
|
|
- adapter->shared = pci_alloc_consistent(adapter->pdev,
|
|
|
- sizeof(struct Vmxnet3_DriverShared),
|
|
|
- &adapter->shared_pa);
|
|
|
+ adapter->adapter_pa = dma_map_single(&adapter->pdev->dev, adapter,
|
|
|
+ sizeof(struct vmxnet3_adapter),
|
|
|
+ PCI_DMA_TODEVICE);
|
|
|
+ adapter->shared = dma_alloc_coherent(
|
|
|
+ &adapter->pdev->dev,
|
|
|
+ sizeof(struct Vmxnet3_DriverShared),
|
|
|
+ &adapter->shared_pa, GFP_KERNEL);
|
|
|
if (!adapter->shared) {
|
|
|
dev_err(&pdev->dev, "Failed to allocate memory\n");
|
|
|
err = -ENOMEM;
|
|
@@ -2963,8 +2995,9 @@ vmxnet3_probe_device(struct pci_dev *pdev,
|
|
|
|
|
|
size = sizeof(struct Vmxnet3_TxQueueDesc) * adapter->num_tx_queues;
|
|
|
size += sizeof(struct Vmxnet3_RxQueueDesc) * adapter->num_rx_queues;
|
|
|
- adapter->tqd_start = pci_alloc_consistent(adapter->pdev, size,
|
|
|
- &adapter->queue_desc_pa);
|
|
|
+ adapter->tqd_start = dma_alloc_coherent(&adapter->pdev->dev, size,
|
|
|
+ &adapter->queue_desc_pa,
|
|
|
+ GFP_KERNEL);
|
|
|
|
|
|
if (!adapter->tqd_start) {
|
|
|
dev_err(&pdev->dev, "Failed to allocate memory\n");
|
|
@@ -2974,7 +3007,10 @@ vmxnet3_probe_device(struct pci_dev *pdev,
|
|
|
adapter->rqd_start = (struct Vmxnet3_RxQueueDesc *)(adapter->tqd_start +
|
|
|
adapter->num_tx_queues);
|
|
|
|
|
|
- adapter->pm_conf = kmalloc(sizeof(struct Vmxnet3_PMConf), GFP_KERNEL);
|
|
|
+ adapter->pm_conf = dma_alloc_coherent(&adapter->pdev->dev,
|
|
|
+ sizeof(struct Vmxnet3_PMConf),
|
|
|
+ &adapter->pm_conf_pa,
|
|
|
+ GFP_KERNEL);
|
|
|
if (adapter->pm_conf == NULL) {
|
|
|
err = -ENOMEM;
|
|
|
goto err_alloc_pm;
|
|
@@ -2982,7 +3018,10 @@ vmxnet3_probe_device(struct pci_dev *pdev,
|
|
|
|
|
|
#ifdef VMXNET3_RSS
|
|
|
|
|
|
- adapter->rss_conf = kmalloc(sizeof(struct UPT1_RSSConf), GFP_KERNEL);
|
|
|
+ adapter->rss_conf = dma_alloc_coherent(&adapter->pdev->dev,
|
|
|
+ sizeof(struct UPT1_RSSConf),
|
|
|
+ &adapter->rss_conf_pa,
|
|
|
+ GFP_KERNEL);
|
|
|
if (adapter->rss_conf == NULL) {
|
|
|
err = -ENOMEM;
|
|
|
goto err_alloc_rss;
|
|
@@ -3077,17 +3116,22 @@ err_ver:
|
|
|
vmxnet3_free_pci_resources(adapter);
|
|
|
err_alloc_pci:
|
|
|
#ifdef VMXNET3_RSS
|
|
|
- kfree(adapter->rss_conf);
|
|
|
+ dma_free_coherent(&adapter->pdev->dev, sizeof(struct UPT1_RSSConf),
|
|
|
+ adapter->rss_conf, adapter->rss_conf_pa);
|
|
|
err_alloc_rss:
|
|
|
#endif
|
|
|
- kfree(adapter->pm_conf);
|
|
|
+ dma_free_coherent(&adapter->pdev->dev, sizeof(struct Vmxnet3_PMConf),
|
|
|
+ adapter->pm_conf, adapter->pm_conf_pa);
|
|
|
err_alloc_pm:
|
|
|
- pci_free_consistent(adapter->pdev, size, adapter->tqd_start,
|
|
|
- adapter->queue_desc_pa);
|
|
|
+ dma_free_coherent(&adapter->pdev->dev, size, adapter->tqd_start,
|
|
|
+ adapter->queue_desc_pa);
|
|
|
err_alloc_queue_desc:
|
|
|
- pci_free_consistent(adapter->pdev, sizeof(struct Vmxnet3_DriverShared),
|
|
|
- adapter->shared, adapter->shared_pa);
|
|
|
+ dma_free_coherent(&adapter->pdev->dev,
|
|
|
+ sizeof(struct Vmxnet3_DriverShared),
|
|
|
+ adapter->shared, adapter->shared_pa);
|
|
|
err_alloc_shared:
|
|
|
+ dma_unmap_single(&adapter->pdev->dev, adapter->adapter_pa,
|
|
|
+ sizeof(struct vmxnet3_adapter), PCI_DMA_TODEVICE);
|
|
|
pci_set_drvdata(pdev, NULL);
|
|
|
free_netdev(netdev);
|
|
|
return err;
|
|
@@ -3118,16 +3162,21 @@ vmxnet3_remove_device(struct pci_dev *pdev)
|
|
|
vmxnet3_free_intr_resources(adapter);
|
|
|
vmxnet3_free_pci_resources(adapter);
|
|
|
#ifdef VMXNET3_RSS
|
|
|
- kfree(adapter->rss_conf);
|
|
|
+ dma_free_coherent(&adapter->pdev->dev, sizeof(struct UPT1_RSSConf),
|
|
|
+ adapter->rss_conf, adapter->rss_conf_pa);
|
|
|
#endif
|
|
|
- kfree(adapter->pm_conf);
|
|
|
+ dma_free_coherent(&adapter->pdev->dev, sizeof(struct Vmxnet3_PMConf),
|
|
|
+ adapter->pm_conf, adapter->pm_conf_pa);
|
|
|
|
|
|
size = sizeof(struct Vmxnet3_TxQueueDesc) * adapter->num_tx_queues;
|
|
|
size += sizeof(struct Vmxnet3_RxQueueDesc) * num_rx_queues;
|
|
|
- pci_free_consistent(adapter->pdev, size, adapter->tqd_start,
|
|
|
- adapter->queue_desc_pa);
|
|
|
- pci_free_consistent(adapter->pdev, sizeof(struct Vmxnet3_DriverShared),
|
|
|
- adapter->shared, adapter->shared_pa);
|
|
|
+ dma_free_coherent(&adapter->pdev->dev, size, adapter->tqd_start,
|
|
|
+ adapter->queue_desc_pa);
|
|
|
+ dma_free_coherent(&adapter->pdev->dev,
|
|
|
+ sizeof(struct Vmxnet3_DriverShared),
|
|
|
+ adapter->shared, adapter->shared_pa);
|
|
|
+ dma_unmap_single(&adapter->pdev->dev, adapter->adapter_pa,
|
|
|
+ sizeof(struct vmxnet3_adapter), PCI_DMA_TODEVICE);
|
|
|
free_netdev(netdev);
|
|
|
}
|
|
|
|
|
@@ -3227,8 +3276,8 @@ skip_arp:
|
|
|
adapter->shared->devRead.pmConfDesc.confVer = cpu_to_le32(1);
|
|
|
adapter->shared->devRead.pmConfDesc.confLen = cpu_to_le32(sizeof(
|
|
|
*pmConf));
|
|
|
- adapter->shared->devRead.pmConfDesc.confPA = cpu_to_le64(virt_to_phys(
|
|
|
- pmConf));
|
|
|
+ adapter->shared->devRead.pmConfDesc.confPA =
|
|
|
+ cpu_to_le64(adapter->pm_conf_pa);
|
|
|
|
|
|
spin_lock_irqsave(&adapter->cmd_lock, flags);
|
|
|
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
|
|
@@ -3265,8 +3314,8 @@ vmxnet3_resume(struct device *device)
|
|
|
adapter->shared->devRead.pmConfDesc.confVer = cpu_to_le32(1);
|
|
|
adapter->shared->devRead.pmConfDesc.confLen = cpu_to_le32(sizeof(
|
|
|
*pmConf));
|
|
|
- adapter->shared->devRead.pmConfDesc.confPA = cpu_to_le64(virt_to_phys(
|
|
|
- pmConf));
|
|
|
+ adapter->shared->devRead.pmConfDesc.confPA =
|
|
|
+ cpu_to_le64(adapter->pm_conf_pa);
|
|
|
|
|
|
netif_device_attach(netdev);
|
|
|
pci_set_power_state(pdev, PCI_D0);
|