|
@@ -517,14 +517,12 @@ int c2_rnic_init(struct c2_dev *c2dev)
|
|
|
/* Initialize the Verbs Reply Queue */
|
|
|
qsize = be32_to_cpu(readl(mmio_regs + C2_REGS_Q1_QSIZE));
|
|
|
msgsize = be32_to_cpu(readl(mmio_regs + C2_REGS_Q1_MSGSIZE));
|
|
|
- q1_pages = kmalloc(qsize * msgsize, GFP_KERNEL);
|
|
|
+ q1_pages = dma_alloc_coherent(&c2dev->pcidev->dev, qsize * msgsize,
|
|
|
+ &c2dev->rep_vq.host_dma, GFP_KERNEL);
|
|
|
if (!q1_pages) {
|
|
|
err = -ENOMEM;
|
|
|
goto bail1;
|
|
|
}
|
|
|
- c2dev->rep_vq.host_dma = dma_map_single(c2dev->ibdev.dma_device,
|
|
|
- (void *)q1_pages, qsize * msgsize,
|
|
|
- DMA_FROM_DEVICE);
|
|
|
pci_unmap_addr_set(&c2dev->rep_vq, mapping, c2dev->rep_vq.host_dma);
|
|
|
pr_debug("%s rep_vq va %p dma %llx\n", __FUNCTION__, q1_pages,
|
|
|
(unsigned long long) c2dev->rep_vq.host_dma);
|
|
@@ -540,14 +538,12 @@ int c2_rnic_init(struct c2_dev *c2dev)
|
|
|
/* Initialize the Asynchronus Event Queue */
|
|
|
qsize = be32_to_cpu(readl(mmio_regs + C2_REGS_Q2_QSIZE));
|
|
|
msgsize = be32_to_cpu(readl(mmio_regs + C2_REGS_Q2_MSGSIZE));
|
|
|
- q2_pages = kmalloc(qsize * msgsize, GFP_KERNEL);
|
|
|
+ q2_pages = dma_alloc_coherent(&c2dev->pcidev->dev, qsize * msgsize,
|
|
|
+ &c2dev->aeq.host_dma, GFP_KERNEL);
|
|
|
if (!q2_pages) {
|
|
|
err = -ENOMEM;
|
|
|
goto bail2;
|
|
|
}
|
|
|
- c2dev->aeq.host_dma = dma_map_single(c2dev->ibdev.dma_device,
|
|
|
- (void *)q2_pages, qsize * msgsize,
|
|
|
- DMA_FROM_DEVICE);
|
|
|
pci_unmap_addr_set(&c2dev->aeq, mapping, c2dev->aeq.host_dma);
|
|
|
pr_debug("%s aeq va %p dma %llx\n", __FUNCTION__, q1_pages,
|
|
|
(unsigned long long) c2dev->rep_vq.host_dma);
|
|
@@ -597,17 +593,13 @@ int c2_rnic_init(struct c2_dev *c2dev)
|
|
|
bail4:
|
|
|
vq_term(c2dev);
|
|
|
bail3:
|
|
|
- dma_unmap_single(c2dev->ibdev.dma_device,
|
|
|
- pci_unmap_addr(&c2dev->aeq, mapping),
|
|
|
- c2dev->aeq.q_size * c2dev->aeq.msg_size,
|
|
|
- DMA_FROM_DEVICE);
|
|
|
- kfree(q2_pages);
|
|
|
+ dma_free_coherent(&c2dev->pcidev->dev,
|
|
|
+ c2dev->aeq.q_size * c2dev->aeq.msg_size,
|
|
|
+ q2_pages, pci_unmap_addr(&c2dev->aeq, mapping));
|
|
|
bail2:
|
|
|
- dma_unmap_single(c2dev->ibdev.dma_device,
|
|
|
- pci_unmap_addr(&c2dev->rep_vq, mapping),
|
|
|
- c2dev->rep_vq.q_size * c2dev->rep_vq.msg_size,
|
|
|
- DMA_FROM_DEVICE);
|
|
|
- kfree(q1_pages);
|
|
|
+ dma_free_coherent(&c2dev->pcidev->dev,
|
|
|
+ c2dev->rep_vq.q_size * c2dev->rep_vq.msg_size,
|
|
|
+ q1_pages, pci_unmap_addr(&c2dev->rep_vq, mapping));
|
|
|
bail1:
|
|
|
c2_free_mqsp_pool(c2dev, c2dev->kern_mqsp_pool);
|
|
|
bail0:
|
|
@@ -640,19 +632,17 @@ void c2_rnic_term(struct c2_dev *c2dev)
|
|
|
/* Free the verbs request allocator */
|
|
|
vq_term(c2dev);
|
|
|
|
|
|
- /* Unmap and free the asynchronus event queue */
|
|
|
- dma_unmap_single(c2dev->ibdev.dma_device,
|
|
|
- pci_unmap_addr(&c2dev->aeq, mapping),
|
|
|
- c2dev->aeq.q_size * c2dev->aeq.msg_size,
|
|
|
- DMA_FROM_DEVICE);
|
|
|
- kfree(c2dev->aeq.msg_pool.host);
|
|
|
-
|
|
|
- /* Unmap and free the verbs reply queue */
|
|
|
- dma_unmap_single(c2dev->ibdev.dma_device,
|
|
|
- pci_unmap_addr(&c2dev->rep_vq, mapping),
|
|
|
- c2dev->rep_vq.q_size * c2dev->rep_vq.msg_size,
|
|
|
- DMA_FROM_DEVICE);
|
|
|
- kfree(c2dev->rep_vq.msg_pool.host);
|
|
|
+ /* Free the asynchronus event queue */
|
|
|
+ dma_free_coherent(&c2dev->pcidev->dev,
|
|
|
+ c2dev->aeq.q_size * c2dev->aeq.msg_size,
|
|
|
+ c2dev->aeq.msg_pool.host,
|
|
|
+ pci_unmap_addr(&c2dev->aeq, mapping));
|
|
|
+
|
|
|
+ /* Free the verbs reply queue */
|
|
|
+ dma_free_coherent(&c2dev->pcidev->dev,
|
|
|
+ c2dev->rep_vq.q_size * c2dev->rep_vq.msg_size,
|
|
|
+ c2dev->rep_vq.msg_pool.host,
|
|
|
+ pci_unmap_addr(&c2dev->rep_vq, mapping));
|
|
|
|
|
|
/* Free the MQ shared pointer pool */
|
|
|
c2_free_mqsp_pool(c2dev, c2dev->kern_mqsp_pool);
|