|
@@ -555,11 +555,32 @@ address_error:
|
|
return 1;
|
|
return 1;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+static bool b43_rx_buffer_is_poisoned(struct b43_dmaring *ring, struct sk_buff *skb)
|
|
|
|
+{
|
|
|
|
+ unsigned char *f = skb->data + ring->frameoffset;
|
|
|
|
+
|
|
|
|
+ return ((f[0] & f[1] & f[2] & f[3] & f[4] & f[5] & f[6] & f[7]) == 0xFF);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void b43_poison_rx_buffer(struct b43_dmaring *ring, struct sk_buff *skb)
|
|
|
|
+{
|
|
|
|
+ struct b43_rxhdr_fw4 *rxhdr;
|
|
|
|
+ unsigned char *frame;
|
|
|
|
+
|
|
|
|
+ /* This poisons the RX buffer to detect DMA failures. */
|
|
|
|
+
|
|
|
|
+ rxhdr = (struct b43_rxhdr_fw4 *)(skb->data);
|
|
|
|
+ rxhdr->frame_len = 0;
|
|
|
|
+
|
|
|
|
+ B43_WARN_ON(ring->rx_buffersize < ring->frameoffset + sizeof(struct b43_plcp_hdr6) + 2);
|
|
|
|
+ frame = skb->data + ring->frameoffset;
|
|
|
|
+ memset(frame, 0xFF, sizeof(struct b43_plcp_hdr6) + 2 /* padding */);
|
|
|
|
+}
|
|
|
|
+
|
|
static int setup_rx_descbuffer(struct b43_dmaring *ring,
|
|
static int setup_rx_descbuffer(struct b43_dmaring *ring,
|
|
struct b43_dmadesc_generic *desc,
|
|
struct b43_dmadesc_generic *desc,
|
|
struct b43_dmadesc_meta *meta, gfp_t gfp_flags)
|
|
struct b43_dmadesc_meta *meta, gfp_t gfp_flags)
|
|
{
|
|
{
|
|
- struct b43_rxhdr_fw4 *rxhdr;
|
|
|
|
dma_addr_t dmaaddr;
|
|
dma_addr_t dmaaddr;
|
|
struct sk_buff *skb;
|
|
struct sk_buff *skb;
|
|
|
|
|
|
@@ -568,6 +589,7 @@ static int setup_rx_descbuffer(struct b43_dmaring *ring,
|
|
skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags);
|
|
skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags);
|
|
if (unlikely(!skb))
|
|
if (unlikely(!skb))
|
|
return -ENOMEM;
|
|
return -ENOMEM;
|
|
|
|
+ b43_poison_rx_buffer(ring, skb);
|
|
dmaaddr = map_descbuffer(ring, skb->data, ring->rx_buffersize, 0);
|
|
dmaaddr = map_descbuffer(ring, skb->data, ring->rx_buffersize, 0);
|
|
if (b43_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize, 0)) {
|
|
if (b43_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize, 0)) {
|
|
/* ugh. try to realloc in zone_dma */
|
|
/* ugh. try to realloc in zone_dma */
|
|
@@ -578,6 +600,7 @@ static int setup_rx_descbuffer(struct b43_dmaring *ring,
|
|
skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags);
|
|
skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags);
|
|
if (unlikely(!skb))
|
|
if (unlikely(!skb))
|
|
return -ENOMEM;
|
|
return -ENOMEM;
|
|
|
|
+ b43_poison_rx_buffer(ring, skb);
|
|
dmaaddr = map_descbuffer(ring, skb->data,
|
|
dmaaddr = map_descbuffer(ring, skb->data,
|
|
ring->rx_buffersize, 0);
|
|
ring->rx_buffersize, 0);
|
|
if (b43_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize, 0)) {
|
|
if (b43_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize, 0)) {
|
|
@@ -592,9 +615,6 @@ static int setup_rx_descbuffer(struct b43_dmaring *ring,
|
|
ring->ops->fill_descriptor(ring, desc, dmaaddr,
|
|
ring->ops->fill_descriptor(ring, desc, dmaaddr,
|
|
ring->rx_buffersize, 0, 0, 0);
|
|
ring->rx_buffersize, 0, 0, 0);
|
|
|
|
|
|
- rxhdr = (struct b43_rxhdr_fw4 *)(skb->data);
|
|
|
|
- rxhdr->frame_len = 0;
|
|
|
|
-
|
|
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -1483,12 +1503,17 @@ static void dma_rx(struct b43_dmaring *ring, int *slot)
|
|
len = le16_to_cpu(rxhdr->frame_len);
|
|
len = le16_to_cpu(rxhdr->frame_len);
|
|
} while (len == 0 && i++ < 5);
|
|
} while (len == 0 && i++ < 5);
|
|
if (unlikely(len == 0)) {
|
|
if (unlikely(len == 0)) {
|
|
- /* recycle the descriptor buffer. */
|
|
|
|
- sync_descbuffer_for_device(ring, meta->dmaaddr,
|
|
|
|
- ring->rx_buffersize);
|
|
|
|
- goto drop;
|
|
|
|
|
|
+ dmaaddr = meta->dmaaddr;
|
|
|
|
+ goto drop_recycle_buffer;
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
+ if (unlikely(b43_rx_buffer_is_poisoned(ring, skb))) {
|
|
|
|
+ /* Something went wrong with the DMA.
|
|
|
|
+ * The device did not touch the buffer and did not overwrite the poison. */
|
|
|
|
+ b43dbg(ring->dev->wl, "DMA RX: Dropping poisoned buffer.\n");
|
|
|
|
+ dmaaddr = meta->dmaaddr;
|
|
|
|
+ goto drop_recycle_buffer;
|
|
|
|
+ }
|
|
if (unlikely(len > ring->rx_buffersize)) {
|
|
if (unlikely(len > ring->rx_buffersize)) {
|
|
/* The data did not fit into one descriptor buffer
|
|
/* The data did not fit into one descriptor buffer
|
|
* and is split over multiple buffers.
|
|
* and is split over multiple buffers.
|
|
@@ -1501,6 +1526,7 @@ static void dma_rx(struct b43_dmaring *ring, int *slot)
|
|
while (1) {
|
|
while (1) {
|
|
desc = ops->idx2desc(ring, *slot, &meta);
|
|
desc = ops->idx2desc(ring, *slot, &meta);
|
|
/* recycle the descriptor buffer. */
|
|
/* recycle the descriptor buffer. */
|
|
|
|
+ b43_poison_rx_buffer(ring, meta->skb);
|
|
sync_descbuffer_for_device(ring, meta->dmaaddr,
|
|
sync_descbuffer_for_device(ring, meta->dmaaddr,
|
|
ring->rx_buffersize);
|
|
ring->rx_buffersize);
|
|
*slot = next_slot(ring, *slot);
|
|
*slot = next_slot(ring, *slot);
|
|
@@ -1519,8 +1545,7 @@ static void dma_rx(struct b43_dmaring *ring, int *slot)
|
|
err = setup_rx_descbuffer(ring, desc, meta, GFP_ATOMIC);
|
|
err = setup_rx_descbuffer(ring, desc, meta, GFP_ATOMIC);
|
|
if (unlikely(err)) {
|
|
if (unlikely(err)) {
|
|
b43dbg(ring->dev->wl, "DMA RX: setup_rx_descbuffer() failed\n");
|
|
b43dbg(ring->dev->wl, "DMA RX: setup_rx_descbuffer() failed\n");
|
|
- sync_descbuffer_for_device(ring, dmaaddr, ring->rx_buffersize);
|
|
|
|
- goto drop;
|
|
|
|
|
|
+ goto drop_recycle_buffer;
|
|
}
|
|
}
|
|
|
|
|
|
unmap_descbuffer(ring, dmaaddr, ring->rx_buffersize, 0);
|
|
unmap_descbuffer(ring, dmaaddr, ring->rx_buffersize, 0);
|
|
@@ -1530,6 +1555,11 @@ static void dma_rx(struct b43_dmaring *ring, int *slot)
|
|
b43_rx(ring->dev, skb, rxhdr);
|
|
b43_rx(ring->dev, skb, rxhdr);
|
|
drop:
|
|
drop:
|
|
return;
|
|
return;
|
|
|
|
+
|
|
|
|
+drop_recycle_buffer:
|
|
|
|
+ /* Poison and recycle the RX buffer. */
|
|
|
|
+ b43_poison_rx_buffer(ring, skb);
|
|
|
|
+ sync_descbuffer_for_device(ring, dmaaddr, ring->rx_buffersize);
|
|
}
|
|
}
|
|
|
|
|
|
void b43_dma_rx(struct b43_dmaring *ring)
|
|
void b43_dma_rx(struct b43_dmaring *ring)
|