|
@@ -333,10 +333,10 @@ static inline
|
|
|
dma_addr_t dmaaddr;
|
|
|
|
|
|
if (tx) {
|
|
|
- dmaaddr = dma_map_single(ring->dev->sdev->dma_dev,
|
|
|
+ dmaaddr = dma_map_single(ring->dev->dev->dma_dev,
|
|
|
buf, len, DMA_TO_DEVICE);
|
|
|
} else {
|
|
|
- dmaaddr = dma_map_single(ring->dev->sdev->dma_dev,
|
|
|
+ dmaaddr = dma_map_single(ring->dev->dev->dma_dev,
|
|
|
buf, len, DMA_FROM_DEVICE);
|
|
|
}
|
|
|
|
|
@@ -348,10 +348,10 @@ static inline
|
|
|
dma_addr_t addr, size_t len, int tx)
|
|
|
{
|
|
|
if (tx) {
|
|
|
- dma_unmap_single(ring->dev->sdev->dma_dev,
|
|
|
+ dma_unmap_single(ring->dev->dev->dma_dev,
|
|
|
addr, len, DMA_TO_DEVICE);
|
|
|
} else {
|
|
|
- dma_unmap_single(ring->dev->sdev->dma_dev,
|
|
|
+ dma_unmap_single(ring->dev->dev->dma_dev,
|
|
|
addr, len, DMA_FROM_DEVICE);
|
|
|
}
|
|
|
}
|
|
@@ -361,7 +361,7 @@ static inline
|
|
|
dma_addr_t addr, size_t len)
|
|
|
{
|
|
|
B43_WARN_ON(ring->tx);
|
|
|
- dma_sync_single_for_cpu(ring->dev->sdev->dma_dev,
|
|
|
+ dma_sync_single_for_cpu(ring->dev->dev->dma_dev,
|
|
|
addr, len, DMA_FROM_DEVICE);
|
|
|
}
|
|
|
|
|
@@ -370,7 +370,7 @@ static inline
|
|
|
dma_addr_t addr, size_t len)
|
|
|
{
|
|
|
B43_WARN_ON(ring->tx);
|
|
|
- dma_sync_single_for_device(ring->dev->sdev->dma_dev,
|
|
|
+ dma_sync_single_for_device(ring->dev->dev->dma_dev,
|
|
|
addr, len, DMA_FROM_DEVICE);
|
|
|
}
|
|
|
|
|
@@ -401,7 +401,7 @@ static int alloc_ringmemory(struct b43_dmaring *ring)
|
|
|
*/
|
|
|
if (ring->type == B43_DMA_64BIT)
|
|
|
flags |= GFP_DMA;
|
|
|
- ring->descbase = dma_alloc_coherent(ring->dev->sdev->dma_dev,
|
|
|
+ ring->descbase = dma_alloc_coherent(ring->dev->dev->dma_dev,
|
|
|
B43_DMA_RINGMEMSIZE,
|
|
|
&(ring->dmabase), flags);
|
|
|
if (!ring->descbase) {
|
|
@@ -415,7 +415,7 @@ static int alloc_ringmemory(struct b43_dmaring *ring)
|
|
|
|
|
|
static void free_ringmemory(struct b43_dmaring *ring)
|
|
|
{
|
|
|
- dma_free_coherent(ring->dev->sdev->dma_dev, B43_DMA_RINGMEMSIZE,
|
|
|
+ dma_free_coherent(ring->dev->dev->dma_dev, B43_DMA_RINGMEMSIZE,
|
|
|
ring->descbase, ring->dmabase);
|
|
|
}
|
|
|
|
|
@@ -523,7 +523,7 @@ static bool b43_dma_mapping_error(struct b43_dmaring *ring,
|
|
|
dma_addr_t addr,
|
|
|
size_t buffersize, bool dma_to_device)
|
|
|
{
|
|
|
- if (unlikely(dma_mapping_error(ring->dev->sdev->dma_dev, addr)))
|
|
|
+ if (unlikely(dma_mapping_error(ring->dev->dev->dma_dev, addr)))
|
|
|
return 1;
|
|
|
|
|
|
switch (ring->type) {
|
|
@@ -869,7 +869,7 @@ struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev,
|
|
|
goto err_kfree_meta;
|
|
|
|
|
|
/* test for ability to dma to txhdr_cache */
|
|
|
- dma_test = dma_map_single(dev->sdev->dma_dev,
|
|
|
+ dma_test = dma_map_single(dev->dev->dma_dev,
|
|
|
ring->txhdr_cache,
|
|
|
b43_txhdr_size(dev),
|
|
|
DMA_TO_DEVICE);
|
|
@@ -884,7 +884,7 @@ struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev,
|
|
|
if (!ring->txhdr_cache)
|
|
|
goto err_kfree_meta;
|
|
|
|
|
|
- dma_test = dma_map_single(dev->sdev->dma_dev,
|
|
|
+ dma_test = dma_map_single(dev->dev->dma_dev,
|
|
|
ring->txhdr_cache,
|
|
|
b43_txhdr_size(dev),
|
|
|
DMA_TO_DEVICE);
|
|
@@ -898,7 +898,7 @@ struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev,
|
|
|
}
|
|
|
}
|
|
|
|
|
|
- dma_unmap_single(dev->sdev->dma_dev,
|
|
|
+ dma_unmap_single(dev->dev->dma_dev,
|
|
|
dma_test, b43_txhdr_size(dev),
|
|
|
DMA_TO_DEVICE);
|
|
|
}
|
|
@@ -1013,9 +1013,9 @@ static int b43_dma_set_mask(struct b43_wldev *dev, u64 mask)
|
|
|
/* Try to set the DMA mask. If it fails, try falling back to a
|
|
|
* lower mask, as we can always also support a lower one. */
|
|
|
while (1) {
|
|
|
- err = dma_set_mask(dev->sdev->dma_dev, mask);
|
|
|
+ err = dma_set_mask(dev->dev->dma_dev, mask);
|
|
|
if (!err) {
|
|
|
- err = dma_set_coherent_mask(dev->sdev->dma_dev, mask);
|
|
|
+ err = dma_set_coherent_mask(dev->dev->dma_dev, mask);
|
|
|
if (!err)
|
|
|
break;
|
|
|
}
|