|
@@ -337,7 +337,7 @@ static inline int txring_to_priority(struct b43_dmaring *ring)
|
|
|
return idx_to_prio[index];
|
|
|
}
|
|
|
|
|
|
-u16 b43_dmacontroller_base(int dma64bit, int controller_idx)
|
|
|
+static u16 b43_dmacontroller_base(enum b43_dmatype type, int controller_idx)
|
|
|
{
|
|
|
static const u16 map64[] = {
|
|
|
B43_MMIO_DMA64_BASE0,
|
|
@@ -356,7 +356,7 @@ u16 b43_dmacontroller_base(int dma64bit, int controller_idx)
|
|
|
B43_MMIO_DMA32_BASE5,
|
|
|
};
|
|
|
|
|
|
- if (dma64bit) {
|
|
|
+ if (type == B43_DMA_64BIT) {
|
|
|
B43_WARN_ON(!(controller_idx >= 0 &&
|
|
|
controller_idx < ARRAY_SIZE(map64)));
|
|
|
return map64[controller_idx];
|
|
@@ -437,7 +437,7 @@ static int alloc_ringmemory(struct b43_dmaring *ring)
|
|
|
* 02, which uses 64-bit DMA, needs the ring buffer in very low memory,
|
|
|
* which accounts for the GFP_DMA flag below.
|
|
|
*/
|
|
|
- if (ring->dma64)
|
|
|
+ if (ring->type == B43_DMA_64BIT)
|
|
|
flags |= GFP_DMA;
|
|
|
ring->descbase = dma_alloc_coherent(dev, B43_DMA_RINGMEMSIZE,
|
|
|
&(ring->dmabase), flags);
|
|
@@ -459,7 +459,8 @@ static void free_ringmemory(struct b43_dmaring *ring)
|
|
|
}
|
|
|
|
|
|
/* Reset the RX DMA channel */
|
|
|
-int b43_dmacontroller_rx_reset(struct b43_wldev *dev, u16 mmio_base, int dma64)
|
|
|
+static int b43_dmacontroller_rx_reset(struct b43_wldev *dev, u16 mmio_base,
|
|
|
+ enum b43_dmatype type)
|
|
|
{
|
|
|
int i;
|
|
|
u32 value;
|
|
@@ -467,12 +468,13 @@ int b43_dmacontroller_rx_reset(struct b43_wldev *dev, u16 mmio_base, int dma64)
|
|
|
|
|
|
might_sleep();
|
|
|
|
|
|
- offset = dma64 ? B43_DMA64_RXCTL : B43_DMA32_RXCTL;
|
|
|
+ offset = (type == B43_DMA_64BIT) ? B43_DMA64_RXCTL : B43_DMA32_RXCTL;
|
|
|
b43_write32(dev, mmio_base + offset, 0);
|
|
|
for (i = 0; i < 10; i++) {
|
|
|
- offset = dma64 ? B43_DMA64_RXSTATUS : B43_DMA32_RXSTATUS;
|
|
|
+ offset = (type == B43_DMA_64BIT) ? B43_DMA64_RXSTATUS :
|
|
|
+ B43_DMA32_RXSTATUS;
|
|
|
value = b43_read32(dev, mmio_base + offset);
|
|
|
- if (dma64) {
|
|
|
+ if (type == B43_DMA_64BIT) {
|
|
|
value &= B43_DMA64_RXSTAT;
|
|
|
if (value == B43_DMA64_RXSTAT_DISABLED) {
|
|
|
i = -1;
|
|
@@ -496,7 +498,8 @@ int b43_dmacontroller_rx_reset(struct b43_wldev *dev, u16 mmio_base, int dma64)
|
|
|
}
|
|
|
|
|
|
/* Reset the TX DMA channel */
|
|
|
-int b43_dmacontroller_tx_reset(struct b43_wldev *dev, u16 mmio_base, int dma64)
|
|
|
+static int b43_dmacontroller_tx_reset(struct b43_wldev *dev, u16 mmio_base,
|
|
|
+ enum b43_dmatype type)
|
|
|
{
|
|
|
int i;
|
|
|
u32 value;
|
|
@@ -505,9 +508,10 @@ int b43_dmacontroller_tx_reset(struct b43_wldev *dev, u16 mmio_base, int dma64)
|
|
|
might_sleep();
|
|
|
|
|
|
for (i = 0; i < 10; i++) {
|
|
|
- offset = dma64 ? B43_DMA64_TXSTATUS : B43_DMA32_TXSTATUS;
|
|
|
+ offset = (type == B43_DMA_64BIT) ? B43_DMA64_TXSTATUS :
|
|
|
+ B43_DMA32_TXSTATUS;
|
|
|
value = b43_read32(dev, mmio_base + offset);
|
|
|
- if (dma64) {
|
|
|
+ if (type == B43_DMA_64BIT) {
|
|
|
value &= B43_DMA64_TXSTAT;
|
|
|
if (value == B43_DMA64_TXSTAT_DISABLED ||
|
|
|
value == B43_DMA64_TXSTAT_IDLEWAIT ||
|
|
@@ -522,12 +526,13 @@ int b43_dmacontroller_tx_reset(struct b43_wldev *dev, u16 mmio_base, int dma64)
|
|
|
}
|
|
|
msleep(1);
|
|
|
}
|
|
|
- offset = dma64 ? B43_DMA64_TXCTL : B43_DMA32_TXCTL;
|
|
|
+ offset = (type == B43_DMA_64BIT) ? B43_DMA64_TXCTL : B43_DMA32_TXCTL;
|
|
|
b43_write32(dev, mmio_base + offset, 0);
|
|
|
for (i = 0; i < 10; i++) {
|
|
|
- offset = dma64 ? B43_DMA64_TXSTATUS : B43_DMA32_TXSTATUS;
|
|
|
+ offset = (type == B43_DMA_64BIT) ? B43_DMA64_TXSTATUS :
|
|
|
+ B43_DMA32_TXSTATUS;
|
|
|
value = b43_read32(dev, mmio_base + offset);
|
|
|
- if (dma64) {
|
|
|
+ if (type == B43_DMA_64BIT) {
|
|
|
value &= B43_DMA64_TXSTAT;
|
|
|
if (value == B43_DMA64_TXSTAT_DISABLED) {
|
|
|
i = -1;
|
|
@@ -552,6 +557,33 @@ int b43_dmacontroller_tx_reset(struct b43_wldev *dev, u16 mmio_base, int dma64)
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
+/* Check if a DMA mapping address is invalid. */
|
|
|
+static bool b43_dma_mapping_error(struct b43_dmaring *ring,
|
|
|
+ dma_addr_t addr,
|
|
|
+ size_t buffersize)
|
|
|
+{
|
|
|
+ if (unlikely(dma_mapping_error(addr)))
|
|
|
+ return 1;
|
|
|
+
|
|
|
+ switch (ring->type) {
|
|
|
+ case B43_DMA_30BIT:
|
|
|
+ if ((u64)addr + buffersize > (1ULL << 30))
|
|
|
+ return 1;
|
|
|
+ break;
|
|
|
+ case B43_DMA_32BIT:
|
|
|
+ if ((u64)addr + buffersize > (1ULL << 32))
|
|
|
+ return 1;
|
|
|
+ break;
|
|
|
+ case B43_DMA_64BIT:
|
|
|
+ /* Currently we can't have addresses beyond
|
|
|
+ * 64bit in the kernel. */
|
|
|
+ break;
|
|
|
+ }
|
|
|
+
|
|
|
+ /* The address is OK. */
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
static int setup_rx_descbuffer(struct b43_dmaring *ring,
|
|
|
struct b43_dmadesc_generic *desc,
|
|
|
struct b43_dmadesc_meta *meta, gfp_t gfp_flags)
|
|
@@ -567,7 +599,7 @@ static int setup_rx_descbuffer(struct b43_dmaring *ring,
|
|
|
if (unlikely(!skb))
|
|
|
return -ENOMEM;
|
|
|
dmaaddr = map_descbuffer(ring, skb->data, ring->rx_buffersize, 0);
|
|
|
- if (dma_mapping_error(dmaaddr)) {
|
|
|
+ if (b43_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize)) {
|
|
|
/* ugh. try to realloc in zone_dma */
|
|
|
gfp_flags |= GFP_DMA;
|
|
|
|
|
@@ -580,7 +612,7 @@ static int setup_rx_descbuffer(struct b43_dmaring *ring,
|
|
|
ring->rx_buffersize, 0);
|
|
|
}
|
|
|
|
|
|
- if (dma_mapping_error(dmaaddr)) {
|
|
|
+ if (b43_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize)) {
|
|
|
dev_kfree_skb_any(skb);
|
|
|
return -EIO;
|
|
|
}
|
|
@@ -645,7 +677,7 @@ static int dmacontroller_setup(struct b43_dmaring *ring)
|
|
|
u32 trans = ssb_dma_translation(ring->dev->dev);
|
|
|
|
|
|
if (ring->tx) {
|
|
|
- if (ring->dma64) {
|
|
|
+ if (ring->type == B43_DMA_64BIT) {
|
|
|
u64 ringbase = (u64) (ring->dmabase);
|
|
|
|
|
|
addrext = ((ringbase >> 32) & SSB_DMA_TRANSLATION_MASK)
|
|
@@ -677,7 +709,7 @@ static int dmacontroller_setup(struct b43_dmaring *ring)
|
|
|
err = alloc_initial_descbuffers(ring);
|
|
|
if (err)
|
|
|
goto out;
|
|
|
- if (ring->dma64) {
|
|
|
+ if (ring->type == B43_DMA_64BIT) {
|
|
|
u64 ringbase = (u64) (ring->dmabase);
|
|
|
|
|
|
addrext = ((ringbase >> 32) & SSB_DMA_TRANSLATION_MASK)
|
|
@@ -722,16 +754,16 @@ static void dmacontroller_cleanup(struct b43_dmaring *ring)
|
|
|
{
|
|
|
if (ring->tx) {
|
|
|
b43_dmacontroller_tx_reset(ring->dev, ring->mmio_base,
|
|
|
- ring->dma64);
|
|
|
- if (ring->dma64) {
|
|
|
+ ring->type);
|
|
|
+ if (ring->type == B43_DMA_64BIT) {
|
|
|
b43_dma_write(ring, B43_DMA64_TXRINGLO, 0);
|
|
|
b43_dma_write(ring, B43_DMA64_TXRINGHI, 0);
|
|
|
} else
|
|
|
b43_dma_write(ring, B43_DMA32_TXRING, 0);
|
|
|
} else {
|
|
|
b43_dmacontroller_rx_reset(ring->dev, ring->mmio_base,
|
|
|
- ring->dma64);
|
|
|
- if (ring->dma64) {
|
|
|
+ ring->type);
|
|
|
+ if (ring->type == B43_DMA_64BIT) {
|
|
|
b43_dma_write(ring, B43_DMA64_RXRINGLO, 0);
|
|
|
b43_dma_write(ring, B43_DMA64_RXRINGHI, 0);
|
|
|
} else
|
|
@@ -786,7 +818,8 @@ static u64 supported_dma_mask(struct b43_wldev *dev)
|
|
|
static
|
|
|
struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev,
|
|
|
int controller_index,
|
|
|
- int for_tx, int dma64)
|
|
|
+ int for_tx,
|
|
|
+ enum b43_dmatype type)
|
|
|
{
|
|
|
struct b43_dmaring *ring;
|
|
|
int err;
|
|
@@ -796,6 +829,7 @@ struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev,
|
|
|
ring = kzalloc(sizeof(*ring), GFP_KERNEL);
|
|
|
if (!ring)
|
|
|
goto out;
|
|
|
+ ring->type = type;
|
|
|
|
|
|
nr_slots = B43_RXRING_SLOTS;
|
|
|
if (for_tx)
|
|
@@ -818,7 +852,7 @@ struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev,
|
|
|
b43_txhdr_size(dev),
|
|
|
DMA_TO_DEVICE);
|
|
|
|
|
|
- if (dma_mapping_error(dma_test)) {
|
|
|
+ if (b43_dma_mapping_error(ring, dma_test, b43_txhdr_size(dev))) {
|
|
|
/* ugh realloc */
|
|
|
kfree(ring->txhdr_cache);
|
|
|
ring->txhdr_cache = kcalloc(nr_slots,
|
|
@@ -832,7 +866,8 @@ struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev,
|
|
|
b43_txhdr_size(dev),
|
|
|
DMA_TO_DEVICE);
|
|
|
|
|
|
- if (dma_mapping_error(dma_test))
|
|
|
+ if (b43_dma_mapping_error(ring, dma_test,
|
|
|
+ b43_txhdr_size(dev)))
|
|
|
goto err_kfree_txhdr_cache;
|
|
|
}
|
|
|
|
|
@@ -843,10 +878,9 @@ struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev,
|
|
|
|
|
|
ring->dev = dev;
|
|
|
ring->nr_slots = nr_slots;
|
|
|
- ring->mmio_base = b43_dmacontroller_base(dma64, controller_index);
|
|
|
+ ring->mmio_base = b43_dmacontroller_base(type, controller_index);
|
|
|
ring->index = controller_index;
|
|
|
- ring->dma64 = !!dma64;
|
|
|
- if (dma64)
|
|
|
+ if (type == B43_DMA_64BIT)
|
|
|
ring->ops = &dma64_ops;
|
|
|
else
|
|
|
ring->ops = &dma32_ops;
|
|
@@ -896,8 +930,8 @@ static void b43_destroy_dmaring(struct b43_dmaring *ring)
|
|
|
if (!ring)
|
|
|
return;
|
|
|
|
|
|
- b43dbg(ring->dev->wl, "DMA-%s 0x%04X (%s) max used slots: %d/%d\n",
|
|
|
- (ring->dma64) ? "64" : "32",
|
|
|
+ b43dbg(ring->dev->wl, "DMA-%u 0x%04X (%s) max used slots: %d/%d\n",
|
|
|
+ (unsigned int)(ring->type),
|
|
|
ring->mmio_base,
|
|
|
(ring->tx) ? "TX" : "RX", ring->max_used_slots, ring->nr_slots);
|
|
|
/* Device IRQs are disabled prior entering this function,
|
|
@@ -941,12 +975,22 @@ int b43_dma_init(struct b43_wldev *dev)
|
|
|
struct b43_dmaring *ring;
|
|
|
int err;
|
|
|
u64 dmamask;
|
|
|
- int dma64 = 0;
|
|
|
+ enum b43_dmatype type;
|
|
|
|
|
|
dmamask = supported_dma_mask(dev);
|
|
|
- if (dmamask == DMA_64BIT_MASK)
|
|
|
- dma64 = 1;
|
|
|
-
|
|
|
+ switch (dmamask) {
|
|
|
+ default:
|
|
|
+ B43_WARN_ON(1);
|
|
|
+ case DMA_30BIT_MASK:
|
|
|
+ type = B43_DMA_30BIT;
|
|
|
+ break;
|
|
|
+ case DMA_32BIT_MASK:
|
|
|
+ type = B43_DMA_32BIT;
|
|
|
+ break;
|
|
|
+ case DMA_64BIT_MASK:
|
|
|
+ type = B43_DMA_64BIT;
|
|
|
+ break;
|
|
|
+ }
|
|
|
err = ssb_dma_set_mask(dev->dev, dmamask);
|
|
|
if (err) {
|
|
|
b43err(dev->wl, "The machine/kernel does not support "
|
|
@@ -958,52 +1002,51 @@ int b43_dma_init(struct b43_wldev *dev)
|
|
|
|
|
|
err = -ENOMEM;
|
|
|
/* setup TX DMA channels. */
|
|
|
- ring = b43_setup_dmaring(dev, 0, 1, dma64);
|
|
|
+ ring = b43_setup_dmaring(dev, 0, 1, type);
|
|
|
if (!ring)
|
|
|
goto out;
|
|
|
dma->tx_ring0 = ring;
|
|
|
|
|
|
- ring = b43_setup_dmaring(dev, 1, 1, dma64);
|
|
|
+ ring = b43_setup_dmaring(dev, 1, 1, type);
|
|
|
if (!ring)
|
|
|
goto err_destroy_tx0;
|
|
|
dma->tx_ring1 = ring;
|
|
|
|
|
|
- ring = b43_setup_dmaring(dev, 2, 1, dma64);
|
|
|
+ ring = b43_setup_dmaring(dev, 2, 1, type);
|
|
|
if (!ring)
|
|
|
goto err_destroy_tx1;
|
|
|
dma->tx_ring2 = ring;
|
|
|
|
|
|
- ring = b43_setup_dmaring(dev, 3, 1, dma64);
|
|
|
+ ring = b43_setup_dmaring(dev, 3, 1, type);
|
|
|
if (!ring)
|
|
|
goto err_destroy_tx2;
|
|
|
dma->tx_ring3 = ring;
|
|
|
|
|
|
- ring = b43_setup_dmaring(dev, 4, 1, dma64);
|
|
|
+ ring = b43_setup_dmaring(dev, 4, 1, type);
|
|
|
if (!ring)
|
|
|
goto err_destroy_tx3;
|
|
|
dma->tx_ring4 = ring;
|
|
|
|
|
|
- ring = b43_setup_dmaring(dev, 5, 1, dma64);
|
|
|
+ ring = b43_setup_dmaring(dev, 5, 1, type);
|
|
|
if (!ring)
|
|
|
goto err_destroy_tx4;
|
|
|
dma->tx_ring5 = ring;
|
|
|
|
|
|
/* setup RX DMA channels. */
|
|
|
- ring = b43_setup_dmaring(dev, 0, 0, dma64);
|
|
|
+ ring = b43_setup_dmaring(dev, 0, 0, type);
|
|
|
if (!ring)
|
|
|
goto err_destroy_tx5;
|
|
|
dma->rx_ring0 = ring;
|
|
|
|
|
|
if (dev->dev->id.revision < 5) {
|
|
|
- ring = b43_setup_dmaring(dev, 3, 0, dma64);
|
|
|
+ ring = b43_setup_dmaring(dev, 3, 0, type);
|
|
|
if (!ring)
|
|
|
goto err_destroy_rx0;
|
|
|
dma->rx_ring3 = ring;
|
|
|
}
|
|
|
|
|
|
- b43dbg(dev->wl, "%d-bit DMA initialized\n",
|
|
|
- (dmamask == DMA_64BIT_MASK) ? 64 :
|
|
|
- (dmamask == DMA_32BIT_MASK) ? 32 : 30);
|
|
|
+ b43dbg(dev->wl, "%u-bit DMA initialized\n",
|
|
|
+ (unsigned int)type);
|
|
|
err = 0;
|
|
|
out:
|
|
|
return err;
|
|
@@ -1146,7 +1189,7 @@ static int dma_tx_fragment(struct b43_dmaring *ring,
|
|
|
|
|
|
meta_hdr->dmaaddr = map_descbuffer(ring, (unsigned char *)header,
|
|
|
hdrsize, 1);
|
|
|
- if (dma_mapping_error(meta_hdr->dmaaddr)) {
|
|
|
+ if (b43_dma_mapping_error(ring, meta_hdr->dmaaddr, hdrsize)) {
|
|
|
ring->current_slot = old_top_slot;
|
|
|
ring->used_slots = old_used_slots;
|
|
|
return -EIO;
|
|
@@ -1165,7 +1208,7 @@ static int dma_tx_fragment(struct b43_dmaring *ring,
|
|
|
|
|
|
meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
|
|
|
/* create a bounce buffer in zone_dma on mapping failure. */
|
|
|
- if (dma_mapping_error(meta->dmaaddr)) {
|
|
|
+ if (b43_dma_mapping_error(ring, meta->dmaaddr, skb->len)) {
|
|
|
bounce_skb = __dev_alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA);
|
|
|
if (!bounce_skb) {
|
|
|
ring->current_slot = old_top_slot;
|
|
@@ -1179,7 +1222,7 @@ static int dma_tx_fragment(struct b43_dmaring *ring,
|
|
|
skb = bounce_skb;
|
|
|
meta->skb = skb;
|
|
|
meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
|
|
|
- if (dma_mapping_error(meta->dmaaddr)) {
|
|
|
+ if (b43_dma_mapping_error(ring, meta->dmaaddr, skb->len)) {
|
|
|
ring->current_slot = old_top_slot;
|
|
|
ring->used_slots = old_used_slots;
|
|
|
err = -EIO;
|