|
@@ -1157,18 +1157,17 @@ struct b43_dmaring *parse_cookie(struct b43_wldev *dev, u16 cookie, int *slot)
|
|
}
|
|
}
|
|
|
|
|
|
static int dma_tx_fragment(struct b43_dmaring *ring,
|
|
static int dma_tx_fragment(struct b43_dmaring *ring,
|
|
- struct sk_buff **in_skb)
|
|
|
|
|
|
+ struct sk_buff *skb)
|
|
{
|
|
{
|
|
- struct sk_buff *skb = *in_skb;
|
|
|
|
const struct b43_dma_ops *ops = ring->ops;
|
|
const struct b43_dma_ops *ops = ring->ops;
|
|
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
|
|
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
|
|
|
|
+ struct b43_private_tx_info *priv_info = b43_get_priv_tx_info(info);
|
|
u8 *header;
|
|
u8 *header;
|
|
int slot, old_top_slot, old_used_slots;
|
|
int slot, old_top_slot, old_used_slots;
|
|
int err;
|
|
int err;
|
|
struct b43_dmadesc_generic *desc;
|
|
struct b43_dmadesc_generic *desc;
|
|
struct b43_dmadesc_meta *meta;
|
|
struct b43_dmadesc_meta *meta;
|
|
struct b43_dmadesc_meta *meta_hdr;
|
|
struct b43_dmadesc_meta *meta_hdr;
|
|
- struct sk_buff *bounce_skb;
|
|
|
|
u16 cookie;
|
|
u16 cookie;
|
|
size_t hdrsize = b43_txhdr_size(ring->dev);
|
|
size_t hdrsize = b43_txhdr_size(ring->dev);
|
|
|
|
|
|
@@ -1212,34 +1211,28 @@ static int dma_tx_fragment(struct b43_dmaring *ring,
|
|
|
|
|
|
meta->skb = skb;
|
|
meta->skb = skb;
|
|
meta->is_last_fragment = 1;
|
|
meta->is_last_fragment = 1;
|
|
|
|
+ priv_info->bouncebuffer = NULL;
|
|
|
|
|
|
meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
|
|
meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
|
|
/* create a bounce buffer in zone_dma on mapping failure. */
|
|
/* create a bounce buffer in zone_dma on mapping failure. */
|
|
if (b43_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) {
|
|
if (b43_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) {
|
|
- bounce_skb = __dev_alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA);
|
|
|
|
- if (!bounce_skb) {
|
|
|
|
|
|
+ priv_info->bouncebuffer = kmalloc(skb->len, GFP_ATOMIC | GFP_DMA);
|
|
|
|
+ if (!priv_info->bouncebuffer) {
|
|
ring->current_slot = old_top_slot;
|
|
ring->current_slot = old_top_slot;
|
|
ring->used_slots = old_used_slots;
|
|
ring->used_slots = old_used_slots;
|
|
err = -ENOMEM;
|
|
err = -ENOMEM;
|
|
goto out_unmap_hdr;
|
|
goto out_unmap_hdr;
|
|
}
|
|
}
|
|
|
|
+ memcpy(priv_info->bouncebuffer, skb->data, skb->len);
|
|
|
|
|
|
- memcpy(skb_put(bounce_skb, skb->len), skb->data, skb->len);
|
|
|
|
- memcpy(bounce_skb->cb, skb->cb, sizeof(skb->cb));
|
|
|
|
- bounce_skb->dev = skb->dev;
|
|
|
|
- skb_set_queue_mapping(bounce_skb, skb_get_queue_mapping(skb));
|
|
|
|
- info = IEEE80211_SKB_CB(bounce_skb);
|
|
|
|
-
|
|
|
|
- dev_kfree_skb_any(skb);
|
|
|
|
- skb = bounce_skb;
|
|
|
|
- *in_skb = bounce_skb;
|
|
|
|
- meta->skb = skb;
|
|
|
|
- meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
|
|
|
|
|
|
+ meta->dmaaddr = map_descbuffer(ring, priv_info->bouncebuffer, skb->len, 1);
|
|
if (b43_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) {
|
|
if (b43_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) {
|
|
|
|
+ kfree(priv_info->bouncebuffer);
|
|
|
|
+ priv_info->bouncebuffer = NULL;
|
|
ring->current_slot = old_top_slot;
|
|
ring->current_slot = old_top_slot;
|
|
ring->used_slots = old_used_slots;
|
|
ring->used_slots = old_used_slots;
|
|
err = -EIO;
|
|
err = -EIO;
|
|
- goto out_free_bounce;
|
|
|
|
|
|
+ goto out_unmap_hdr;
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
@@ -1256,8 +1249,6 @@ static int dma_tx_fragment(struct b43_dmaring *ring,
|
|
ops->poke_tx(ring, next_slot(ring, slot));
|
|
ops->poke_tx(ring, next_slot(ring, slot));
|
|
return 0;
|
|
return 0;
|
|
|
|
|
|
-out_free_bounce:
|
|
|
|
- dev_kfree_skb_any(skb);
|
|
|
|
out_unmap_hdr:
|
|
out_unmap_hdr:
|
|
unmap_descbuffer(ring, meta_hdr->dmaaddr,
|
|
unmap_descbuffer(ring, meta_hdr->dmaaddr,
|
|
hdrsize, 1);
|
|
hdrsize, 1);
|
|
@@ -1362,11 +1353,7 @@ int b43_dma_tx(struct b43_wldev *dev, struct sk_buff *skb)
|
|
* static, so we don't need to store it per frame. */
|
|
* static, so we don't need to store it per frame. */
|
|
ring->queue_prio = skb_get_queue_mapping(skb);
|
|
ring->queue_prio = skb_get_queue_mapping(skb);
|
|
|
|
|
|
- /* dma_tx_fragment might reallocate the skb, so invalidate pointers pointing
|
|
|
|
- * into the skb data or cb now. */
|
|
|
|
- hdr = NULL;
|
|
|
|
- info = NULL;
|
|
|
|
- err = dma_tx_fragment(ring, &skb);
|
|
|
|
|
|
+ err = dma_tx_fragment(ring, skb);
|
|
if (unlikely(err == -ENOKEY)) {
|
|
if (unlikely(err == -ENOKEY)) {
|
|
/* Drop this packet, as we don't have the encryption key
|
|
/* Drop this packet, as we don't have the encryption key
|
|
* anymore and must not transmit it unencrypted. */
|
|
* anymore and must not transmit it unencrypted. */
|
|
@@ -1413,12 +1400,17 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev,
|
|
B43_WARN_ON(!(slot >= 0 && slot < ring->nr_slots));
|
|
B43_WARN_ON(!(slot >= 0 && slot < ring->nr_slots));
|
|
desc = ops->idx2desc(ring, slot, &meta);
|
|
desc = ops->idx2desc(ring, slot, &meta);
|
|
|
|
|
|
- if (meta->skb)
|
|
|
|
- unmap_descbuffer(ring, meta->dmaaddr, meta->skb->len,
|
|
|
|
- 1);
|
|
|
|
- else
|
|
|
|
|
|
+ if (meta->skb) {
|
|
|
|
+ struct b43_private_tx_info *priv_info =
|
|
|
|
+ b43_get_priv_tx_info(IEEE80211_SKB_CB(meta->skb));
|
|
|
|
+
|
|
|
|
+ unmap_descbuffer(ring, meta->dmaaddr, meta->skb->len, 1);
|
|
|
|
+ kfree(priv_info->bouncebuffer);
|
|
|
|
+ priv_info->bouncebuffer = NULL;
|
|
|
|
+ } else {
|
|
unmap_descbuffer(ring, meta->dmaaddr,
|
|
unmap_descbuffer(ring, meta->dmaaddr,
|
|
b43_txhdr_size(dev), 1);
|
|
b43_txhdr_size(dev), 1);
|
|
|
|
+ }
|
|
|
|
|
|
if (meta->is_last_fragment) {
|
|
if (meta->is_last_fragment) {
|
|
struct ieee80211_tx_info *info;
|
|
struct ieee80211_tx_info *info;
|