|
@@ -214,7 +214,9 @@ static int alloc_ringmemory(struct bcm43xx_dmaring *ring)
|
|
return -ENOMEM;
|
|
return -ENOMEM;
|
|
}
|
|
}
|
|
if (ring->dmabase + BCM43xx_DMA_RINGMEMSIZE > BCM43xx_DMA_BUSADDRMAX) {
|
|
if (ring->dmabase + BCM43xx_DMA_RINGMEMSIZE > BCM43xx_DMA_BUSADDRMAX) {
|
|
- printk(KERN_ERR PFX ">>>FATAL ERROR<<< DMA RINGMEMORY >1G\n");
|
|
|
|
|
|
+ printk(KERN_ERR PFX ">>>FATAL ERROR<<< DMA RINGMEMORY >1G "
|
|
|
|
+ "(0x%08x, len: %lu)\n",
|
|
|
|
+ ring->dmabase, BCM43xx_DMA_RINGMEMSIZE);
|
|
dma_free_coherent(dev, BCM43xx_DMA_RINGMEMSIZE,
|
|
dma_free_coherent(dev, BCM43xx_DMA_RINGMEMSIZE,
|
|
ring->vbase, ring->dmabase);
|
|
ring->vbase, ring->dmabase);
|
|
return -ENOMEM;
|
|
return -ENOMEM;
|
|
@@ -261,13 +263,6 @@ int bcm43xx_dmacontroller_rx_reset(struct bcm43xx_private *bcm,
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
|
|
-static inline int dmacontroller_rx_reset(struct bcm43xx_dmaring *ring)
|
|
|
|
-{
|
|
|
|
- assert(!ring->tx);
|
|
|
|
-
|
|
|
|
- return bcm43xx_dmacontroller_rx_reset(ring->bcm, ring->mmio_base);
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
/* Reset the RX DMA channel */
|
|
/* Reset the RX DMA channel */
|
|
int bcm43xx_dmacontroller_tx_reset(struct bcm43xx_private *bcm,
|
|
int bcm43xx_dmacontroller_tx_reset(struct bcm43xx_private *bcm,
|
|
u16 mmio_base)
|
|
u16 mmio_base)
|
|
@@ -308,13 +303,6 @@ int bcm43xx_dmacontroller_tx_reset(struct bcm43xx_private *bcm,
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
|
|
-static inline int dmacontroller_tx_reset(struct bcm43xx_dmaring *ring)
|
|
|
|
-{
|
|
|
|
- assert(ring->tx);
|
|
|
|
-
|
|
|
|
- return bcm43xx_dmacontroller_tx_reset(ring->bcm, ring->mmio_base);
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
static int setup_rx_descbuffer(struct bcm43xx_dmaring *ring,
|
|
static int setup_rx_descbuffer(struct bcm43xx_dmaring *ring,
|
|
struct bcm43xx_dmadesc *desc,
|
|
struct bcm43xx_dmadesc *desc,
|
|
struct bcm43xx_dmadesc_meta *meta,
|
|
struct bcm43xx_dmadesc_meta *meta,
|
|
@@ -337,7 +325,9 @@ static int setup_rx_descbuffer(struct bcm43xx_dmaring *ring,
|
|
if (unlikely(dmaaddr + ring->rx_buffersize > BCM43xx_DMA_BUSADDRMAX)) {
|
|
if (unlikely(dmaaddr + ring->rx_buffersize > BCM43xx_DMA_BUSADDRMAX)) {
|
|
unmap_descbuffer(ring, dmaaddr, ring->rx_buffersize, 0);
|
|
unmap_descbuffer(ring, dmaaddr, ring->rx_buffersize, 0);
|
|
dev_kfree_skb_any(skb);
|
|
dev_kfree_skb_any(skb);
|
|
- printk(KERN_ERR PFX ">>>FATAL ERROR<<< DMA RX SKB >1G\n");
|
|
|
|
|
|
+ printk(KERN_ERR PFX ">>>FATAL ERROR<<< DMA RX SKB >1G "
|
|
|
|
+ "(0x%08x, len: %u)\n",
|
|
|
|
+ dmaaddr, ring->rx_buffersize);
|
|
return -ENOMEM;
|
|
return -ENOMEM;
|
|
}
|
|
}
|
|
meta->skb = skb;
|
|
meta->skb = skb;
|
|
@@ -365,7 +355,7 @@ static int setup_rx_descbuffer(struct bcm43xx_dmaring *ring,
|
|
static int alloc_initial_descbuffers(struct bcm43xx_dmaring *ring)
|
|
static int alloc_initial_descbuffers(struct bcm43xx_dmaring *ring)
|
|
{
|
|
{
|
|
int i, err = -ENOMEM;
|
|
int i, err = -ENOMEM;
|
|
- struct bcm43xx_dmadesc *desc = NULL;
|
|
|
|
|
|
+ struct bcm43xx_dmadesc *desc;
|
|
struct bcm43xx_dmadesc_meta *meta;
|
|
struct bcm43xx_dmadesc_meta *meta;
|
|
|
|
|
|
for (i = 0; i < ring->nr_slots; i++) {
|
|
for (i = 0; i < ring->nr_slots; i++) {
|
|
@@ -375,24 +365,20 @@ static int alloc_initial_descbuffers(struct bcm43xx_dmaring *ring)
|
|
err = setup_rx_descbuffer(ring, desc, meta, GFP_KERNEL);
|
|
err = setup_rx_descbuffer(ring, desc, meta, GFP_KERNEL);
|
|
if (err)
|
|
if (err)
|
|
goto err_unwind;
|
|
goto err_unwind;
|
|
-
|
|
|
|
- assert(ring->used_slots <= ring->nr_slots);
|
|
|
|
}
|
|
}
|
|
ring->used_slots = ring->nr_slots;
|
|
ring->used_slots = ring->nr_slots;
|
|
-
|
|
|
|
err = 0;
|
|
err = 0;
|
|
out:
|
|
out:
|
|
return err;
|
|
return err;
|
|
|
|
|
|
err_unwind:
|
|
err_unwind:
|
|
- for ( ; i >= 0; i--) {
|
|
|
|
|
|
+ for (i--; i >= 0; i--) {
|
|
desc = ring->vbase + i;
|
|
desc = ring->vbase + i;
|
|
meta = ring->meta + i;
|
|
meta = ring->meta + i;
|
|
|
|
|
|
unmap_descbuffer(ring, meta->dmaaddr, ring->rx_buffersize, 0);
|
|
unmap_descbuffer(ring, meta->dmaaddr, ring->rx_buffersize, 0);
|
|
dev_kfree_skb(meta->skb);
|
|
dev_kfree_skb(meta->skb);
|
|
}
|
|
}
|
|
- ring->used_slots = 0;
|
|
|
|
goto out;
|
|
goto out;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -442,13 +428,13 @@ out:
|
|
static void dmacontroller_cleanup(struct bcm43xx_dmaring *ring)
|
|
static void dmacontroller_cleanup(struct bcm43xx_dmaring *ring)
|
|
{
|
|
{
|
|
if (ring->tx) {
|
|
if (ring->tx) {
|
|
- dmacontroller_tx_reset(ring);
|
|
|
|
|
|
+ bcm43xx_dmacontroller_tx_reset(ring->bcm, ring->mmio_base);
|
|
/* Zero out Transmit Descriptor ring address. */
|
|
/* Zero out Transmit Descriptor ring address. */
|
|
bcm43xx_write32(ring->bcm,
|
|
bcm43xx_write32(ring->bcm,
|
|
ring->mmio_base + BCM43xx_DMA_TX_DESC_RING,
|
|
ring->mmio_base + BCM43xx_DMA_TX_DESC_RING,
|
|
0x00000000);
|
|
0x00000000);
|
|
} else {
|
|
} else {
|
|
- dmacontroller_rx_reset(ring);
|
|
|
|
|
|
+ bcm43xx_dmacontroller_rx_reset(ring->bcm, ring->mmio_base);
|
|
/* Zero out Receive Descriptor ring address. */
|
|
/* Zero out Receive Descriptor ring address. */
|
|
bcm43xx_write32(ring->bcm,
|
|
bcm43xx_write32(ring->bcm,
|
|
ring->mmio_base + BCM43xx_DMA_RX_DESC_RING,
|
|
ring->mmio_base + BCM43xx_DMA_RX_DESC_RING,
|
|
@@ -508,9 +494,7 @@ struct bcm43xx_dmaring * bcm43xx_setup_dmaring(struct bcm43xx_private *bcm,
|
|
if (bcm->pci_dev->bus->number == 0)
|
|
if (bcm->pci_dev->bus->number == 0)
|
|
ring->memoffset = 0;
|
|
ring->memoffset = 0;
|
|
#endif
|
|
#endif
|
|
-
|
|
|
|
-
|
|
|
|
- spin_lock_init(&ring->lock);
|
|
|
|
|
|
+
|
|
ring->bcm = bcm;
|
|
ring->bcm = bcm;
|
|
ring->nr_slots = nr_descriptor_slots;
|
|
ring->nr_slots = nr_descriptor_slots;
|
|
ring->suspend_mark = ring->nr_slots * BCM43xx_TXSUSPEND_PERCENT / 100;
|
|
ring->suspend_mark = ring->nr_slots * BCM43xx_TXSUSPEND_PERCENT / 100;
|
|
@@ -578,22 +562,25 @@ static void bcm43xx_destroy_dmaring(struct bcm43xx_dmaring *ring)
|
|
|
|
|
|
void bcm43xx_dma_free(struct bcm43xx_private *bcm)
|
|
void bcm43xx_dma_free(struct bcm43xx_private *bcm)
|
|
{
|
|
{
|
|
- bcm43xx_destroy_dmaring(bcm->current_core->dma->rx_ring1);
|
|
|
|
- bcm->current_core->dma->rx_ring1 = NULL;
|
|
|
|
- bcm43xx_destroy_dmaring(bcm->current_core->dma->rx_ring0);
|
|
|
|
- bcm->current_core->dma->rx_ring0 = NULL;
|
|
|
|
- bcm43xx_destroy_dmaring(bcm->current_core->dma->tx_ring3);
|
|
|
|
- bcm->current_core->dma->tx_ring3 = NULL;
|
|
|
|
- bcm43xx_destroy_dmaring(bcm->current_core->dma->tx_ring2);
|
|
|
|
- bcm->current_core->dma->tx_ring2 = NULL;
|
|
|
|
- bcm43xx_destroy_dmaring(bcm->current_core->dma->tx_ring1);
|
|
|
|
- bcm->current_core->dma->tx_ring1 = NULL;
|
|
|
|
- bcm43xx_destroy_dmaring(bcm->current_core->dma->tx_ring0);
|
|
|
|
- bcm->current_core->dma->tx_ring0 = NULL;
|
|
|
|
|
|
+ struct bcm43xx_dma *dma = bcm->current_core->dma;
|
|
|
|
+
|
|
|
|
+ bcm43xx_destroy_dmaring(dma->rx_ring1);
|
|
|
|
+ dma->rx_ring1 = NULL;
|
|
|
|
+ bcm43xx_destroy_dmaring(dma->rx_ring0);
|
|
|
|
+ dma->rx_ring0 = NULL;
|
|
|
|
+ bcm43xx_destroy_dmaring(dma->tx_ring3);
|
|
|
|
+ dma->tx_ring3 = NULL;
|
|
|
|
+ bcm43xx_destroy_dmaring(dma->tx_ring2);
|
|
|
|
+ dma->tx_ring2 = NULL;
|
|
|
|
+ bcm43xx_destroy_dmaring(dma->tx_ring1);
|
|
|
|
+ dma->tx_ring1 = NULL;
|
|
|
|
+ bcm43xx_destroy_dmaring(dma->tx_ring0);
|
|
|
|
+ dma->tx_ring0 = NULL;
|
|
}
|
|
}
|
|
|
|
|
|
int bcm43xx_dma_init(struct bcm43xx_private *bcm)
|
|
int bcm43xx_dma_init(struct bcm43xx_private *bcm)
|
|
{
|
|
{
|
|
|
|
+ struct bcm43xx_dma *dma = bcm->current_core->dma;
|
|
struct bcm43xx_dmaring *ring;
|
|
struct bcm43xx_dmaring *ring;
|
|
int err = -ENOMEM;
|
|
int err = -ENOMEM;
|
|
|
|
|
|
@@ -602,39 +589,39 @@ int bcm43xx_dma_init(struct bcm43xx_private *bcm)
|
|
BCM43xx_TXRING_SLOTS, 1);
|
|
BCM43xx_TXRING_SLOTS, 1);
|
|
if (!ring)
|
|
if (!ring)
|
|
goto out;
|
|
goto out;
|
|
- bcm->current_core->dma->tx_ring0 = ring;
|
|
|
|
|
|
+ dma->tx_ring0 = ring;
|
|
|
|
|
|
ring = bcm43xx_setup_dmaring(bcm, BCM43xx_MMIO_DMA2_BASE,
|
|
ring = bcm43xx_setup_dmaring(bcm, BCM43xx_MMIO_DMA2_BASE,
|
|
BCM43xx_TXRING_SLOTS, 1);
|
|
BCM43xx_TXRING_SLOTS, 1);
|
|
if (!ring)
|
|
if (!ring)
|
|
goto err_destroy_tx0;
|
|
goto err_destroy_tx0;
|
|
- bcm->current_core->dma->tx_ring1 = ring;
|
|
|
|
|
|
+ dma->tx_ring1 = ring;
|
|
|
|
|
|
ring = bcm43xx_setup_dmaring(bcm, BCM43xx_MMIO_DMA3_BASE,
|
|
ring = bcm43xx_setup_dmaring(bcm, BCM43xx_MMIO_DMA3_BASE,
|
|
BCM43xx_TXRING_SLOTS, 1);
|
|
BCM43xx_TXRING_SLOTS, 1);
|
|
if (!ring)
|
|
if (!ring)
|
|
goto err_destroy_tx1;
|
|
goto err_destroy_tx1;
|
|
- bcm->current_core->dma->tx_ring2 = ring;
|
|
|
|
|
|
+ dma->tx_ring2 = ring;
|
|
|
|
|
|
ring = bcm43xx_setup_dmaring(bcm, BCM43xx_MMIO_DMA4_BASE,
|
|
ring = bcm43xx_setup_dmaring(bcm, BCM43xx_MMIO_DMA4_BASE,
|
|
BCM43xx_TXRING_SLOTS, 1);
|
|
BCM43xx_TXRING_SLOTS, 1);
|
|
if (!ring)
|
|
if (!ring)
|
|
goto err_destroy_tx2;
|
|
goto err_destroy_tx2;
|
|
- bcm->current_core->dma->tx_ring3 = ring;
|
|
|
|
|
|
+ dma->tx_ring3 = ring;
|
|
|
|
|
|
/* setup RX DMA channels. */
|
|
/* setup RX DMA channels. */
|
|
ring = bcm43xx_setup_dmaring(bcm, BCM43xx_MMIO_DMA1_BASE,
|
|
ring = bcm43xx_setup_dmaring(bcm, BCM43xx_MMIO_DMA1_BASE,
|
|
BCM43xx_RXRING_SLOTS, 0);
|
|
BCM43xx_RXRING_SLOTS, 0);
|
|
if (!ring)
|
|
if (!ring)
|
|
goto err_destroy_tx3;
|
|
goto err_destroy_tx3;
|
|
- bcm->current_core->dma->rx_ring0 = ring;
|
|
|
|
|
|
+ dma->rx_ring0 = ring;
|
|
|
|
|
|
if (bcm->current_core->rev < 5) {
|
|
if (bcm->current_core->rev < 5) {
|
|
ring = bcm43xx_setup_dmaring(bcm, BCM43xx_MMIO_DMA4_BASE,
|
|
ring = bcm43xx_setup_dmaring(bcm, BCM43xx_MMIO_DMA4_BASE,
|
|
BCM43xx_RXRING_SLOTS, 0);
|
|
BCM43xx_RXRING_SLOTS, 0);
|
|
if (!ring)
|
|
if (!ring)
|
|
goto err_destroy_rx0;
|
|
goto err_destroy_rx0;
|
|
- bcm->current_core->dma->rx_ring1 = ring;
|
|
|
|
|
|
+ dma->rx_ring1 = ring;
|
|
}
|
|
}
|
|
|
|
|
|
dprintk(KERN_INFO PFX "DMA initialized\n");
|
|
dprintk(KERN_INFO PFX "DMA initialized\n");
|
|
@@ -643,27 +630,26 @@ out:
|
|
return err;
|
|
return err;
|
|
|
|
|
|
err_destroy_rx0:
|
|
err_destroy_rx0:
|
|
- bcm43xx_destroy_dmaring(bcm->current_core->dma->rx_ring0);
|
|
|
|
- bcm->current_core->dma->rx_ring0 = NULL;
|
|
|
|
|
|
+ bcm43xx_destroy_dmaring(dma->rx_ring0);
|
|
|
|
+ dma->rx_ring0 = NULL;
|
|
err_destroy_tx3:
|
|
err_destroy_tx3:
|
|
- bcm43xx_destroy_dmaring(bcm->current_core->dma->tx_ring3);
|
|
|
|
- bcm->current_core->dma->tx_ring3 = NULL;
|
|
|
|
|
|
+ bcm43xx_destroy_dmaring(dma->tx_ring3);
|
|
|
|
+ dma->tx_ring3 = NULL;
|
|
err_destroy_tx2:
|
|
err_destroy_tx2:
|
|
- bcm43xx_destroy_dmaring(bcm->current_core->dma->tx_ring2);
|
|
|
|
- bcm->current_core->dma->tx_ring2 = NULL;
|
|
|
|
|
|
+ bcm43xx_destroy_dmaring(dma->tx_ring2);
|
|
|
|
+ dma->tx_ring2 = NULL;
|
|
err_destroy_tx1:
|
|
err_destroy_tx1:
|
|
- bcm43xx_destroy_dmaring(bcm->current_core->dma->tx_ring1);
|
|
|
|
- bcm->current_core->dma->tx_ring1 = NULL;
|
|
|
|
|
|
+ bcm43xx_destroy_dmaring(dma->tx_ring1);
|
|
|
|
+ dma->tx_ring1 = NULL;
|
|
err_destroy_tx0:
|
|
err_destroy_tx0:
|
|
- bcm43xx_destroy_dmaring(bcm->current_core->dma->tx_ring0);
|
|
|
|
- bcm->current_core->dma->tx_ring0 = NULL;
|
|
|
|
|
|
+ bcm43xx_destroy_dmaring(dma->tx_ring0);
|
|
|
|
+ dma->tx_ring0 = NULL;
|
|
goto out;
|
|
goto out;
|
|
}
|
|
}
|
|
|
|
|
|
/* Generate a cookie for the TX header. */
|
|
/* Generate a cookie for the TX header. */
|
|
-static inline
|
|
|
|
-u16 generate_cookie(struct bcm43xx_dmaring *ring,
|
|
|
|
- int slot)
|
|
|
|
|
|
+static u16 generate_cookie(struct bcm43xx_dmaring *ring,
|
|
|
|
+ int slot)
|
|
{
|
|
{
|
|
u16 cookie = 0x0000;
|
|
u16 cookie = 0x0000;
|
|
|
|
|
|
@@ -693,24 +679,25 @@ u16 generate_cookie(struct bcm43xx_dmaring *ring,
|
|
}
|
|
}
|
|
|
|
|
|
/* Inspect a cookie and find out to which controller/slot it belongs. */
|
|
/* Inspect a cookie and find out to which controller/slot it belongs. */
|
|
-static inline
|
|
|
|
|
|
+static
|
|
struct bcm43xx_dmaring * parse_cookie(struct bcm43xx_private *bcm,
|
|
struct bcm43xx_dmaring * parse_cookie(struct bcm43xx_private *bcm,
|
|
u16 cookie, int *slot)
|
|
u16 cookie, int *slot)
|
|
{
|
|
{
|
|
|
|
+ struct bcm43xx_dma *dma = bcm->current_core->dma;
|
|
struct bcm43xx_dmaring *ring = NULL;
|
|
struct bcm43xx_dmaring *ring = NULL;
|
|
|
|
|
|
switch (cookie & 0xF000) {
|
|
switch (cookie & 0xF000) {
|
|
case 0x0000:
|
|
case 0x0000:
|
|
- ring = bcm->current_core->dma->tx_ring0;
|
|
|
|
|
|
+ ring = dma->tx_ring0;
|
|
break;
|
|
break;
|
|
case 0x1000:
|
|
case 0x1000:
|
|
- ring = bcm->current_core->dma->tx_ring1;
|
|
|
|
|
|
+ ring = dma->tx_ring1;
|
|
break;
|
|
break;
|
|
case 0x2000:
|
|
case 0x2000:
|
|
- ring = bcm->current_core->dma->tx_ring2;
|
|
|
|
|
|
+ ring = dma->tx_ring2;
|
|
break;
|
|
break;
|
|
case 0x3000:
|
|
case 0x3000:
|
|
- ring = bcm->current_core->dma->tx_ring3;
|
|
|
|
|
|
+ ring = dma->tx_ring3;
|
|
break;
|
|
break;
|
|
default:
|
|
default:
|
|
assert(0);
|
|
assert(0);
|
|
@@ -721,8 +708,8 @@ struct bcm43xx_dmaring * parse_cookie(struct bcm43xx_private *bcm,
|
|
return ring;
|
|
return ring;
|
|
}
|
|
}
|
|
|
|
|
|
-static inline void dmacontroller_poke_tx(struct bcm43xx_dmaring *ring,
|
|
|
|
- int slot)
|
|
|
|
|
|
+static void dmacontroller_poke_tx(struct bcm43xx_dmaring *ring,
|
|
|
|
+ int slot)
|
|
{
|
|
{
|
|
/* Everything is ready to start. Buffers are DMA mapped and
|
|
/* Everything is ready to start. Buffers are DMA mapped and
|
|
* associated with slots.
|
|
* associated with slots.
|
|
@@ -736,11 +723,10 @@ static inline void dmacontroller_poke_tx(struct bcm43xx_dmaring *ring,
|
|
(u32)(slot * sizeof(struct bcm43xx_dmadesc)));
|
|
(u32)(slot * sizeof(struct bcm43xx_dmadesc)));
|
|
}
|
|
}
|
|
|
|
|
|
-static inline
|
|
|
|
-int dma_tx_fragment(struct bcm43xx_dmaring *ring,
|
|
|
|
- struct sk_buff *skb,
|
|
|
|
- struct ieee80211_txb *txb,
|
|
|
|
- u8 cur_frag)
|
|
|
|
|
|
+static int dma_tx_fragment(struct bcm43xx_dmaring *ring,
|
|
|
|
+ struct sk_buff *skb,
|
|
|
|
+ struct ieee80211_txb *txb,
|
|
|
|
+ u8 cur_frag)
|
|
{
|
|
{
|
|
int slot;
|
|
int slot;
|
|
struct bcm43xx_dmadesc *desc;
|
|
struct bcm43xx_dmadesc *desc;
|
|
@@ -777,7 +763,9 @@ int dma_tx_fragment(struct bcm43xx_dmaring *ring,
|
|
meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
|
|
meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
|
|
if (unlikely(meta->dmaaddr + skb->len > BCM43xx_DMA_BUSADDRMAX)) {
|
|
if (unlikely(meta->dmaaddr + skb->len > BCM43xx_DMA_BUSADDRMAX)) {
|
|
return_slot(ring, slot);
|
|
return_slot(ring, slot);
|
|
- printk(KERN_ERR PFX ">>>FATAL ERROR<<< DMA TX SKB >1G\n");
|
|
|
|
|
|
+ printk(KERN_ERR PFX ">>>FATAL ERROR<<< DMA TX SKB >1G "
|
|
|
|
+ "(0x%08x, len: %u)\n",
|
|
|
|
+ meta->dmaaddr, skb->len);
|
|
return -ENOMEM;
|
|
return -ENOMEM;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -797,14 +785,15 @@ int dma_tx_fragment(struct bcm43xx_dmaring *ring,
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
|
|
-static inline int dma_transfer_txb(struct bcm43xx_dmaring *ring,
|
|
|
|
- struct ieee80211_txb *txb)
|
|
|
|
|
|
+int bcm43xx_dma_tx(struct bcm43xx_private *bcm,
|
|
|
|
+ struct ieee80211_txb *txb)
|
|
{
|
|
{
|
|
/* We just received a packet from the kernel network subsystem.
|
|
/* We just received a packet from the kernel network subsystem.
|
|
* Add headers and DMA map the memory. Poke
|
|
* Add headers and DMA map the memory. Poke
|
|
* the device to send the stuff.
|
|
* the device to send the stuff.
|
|
* Note that this is called from atomic context.
|
|
* Note that this is called from atomic context.
|
|
*/
|
|
*/
|
|
|
|
+ struct bcm43xx_dmaring *ring = bcm->current_core->dma->tx_ring1;
|
|
u8 i;
|
|
u8 i;
|
|
struct sk_buff *skb;
|
|
struct sk_buff *skb;
|
|
|
|
|
|
@@ -818,8 +807,6 @@ static inline int dma_transfer_txb(struct bcm43xx_dmaring *ring,
|
|
return -ENOMEM;
|
|
return -ENOMEM;
|
|
}
|
|
}
|
|
|
|
|
|
- assert(irqs_disabled());
|
|
|
|
- spin_lock(&ring->lock);
|
|
|
|
for (i = 0; i < txb->nr_frags; i++) {
|
|
for (i = 0; i < txb->nr_frags; i++) {
|
|
skb = txb->fragments[i];
|
|
skb = txb->fragments[i];
|
|
/* We do not free the skb, as it is freed as
|
|
/* We do not free the skb, as it is freed as
|
|
@@ -829,22 +816,12 @@ static inline int dma_transfer_txb(struct bcm43xx_dmaring *ring,
|
|
dma_tx_fragment(ring, skb, txb, i);
|
|
dma_tx_fragment(ring, skb, txb, i);
|
|
//TODO: handle failure of dma_tx_fragment
|
|
//TODO: handle failure of dma_tx_fragment
|
|
}
|
|
}
|
|
- spin_unlock(&ring->lock);
|
|
|
|
|
|
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
|
|
-int fastcall
|
|
|
|
-bcm43xx_dma_transfer_txb(struct bcm43xx_private *bcm,
|
|
|
|
- struct ieee80211_txb *txb)
|
|
|
|
-{
|
|
|
|
- return dma_transfer_txb(bcm->current_core->dma->tx_ring1,
|
|
|
|
- txb);
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-void fastcall
|
|
|
|
-bcm43xx_dma_handle_xmitstatus(struct bcm43xx_private *bcm,
|
|
|
|
- struct bcm43xx_xmitstatus *status)
|
|
|
|
|
|
+void bcm43xx_dma_handle_xmitstatus(struct bcm43xx_private *bcm,
|
|
|
|
+ struct bcm43xx_xmitstatus *status)
|
|
{
|
|
{
|
|
struct bcm43xx_dmaring *ring;
|
|
struct bcm43xx_dmaring *ring;
|
|
struct bcm43xx_dmadesc *desc;
|
|
struct bcm43xx_dmadesc *desc;
|
|
@@ -855,9 +832,6 @@ bcm43xx_dma_handle_xmitstatus(struct bcm43xx_private *bcm,
|
|
ring = parse_cookie(bcm, status->cookie, &slot);
|
|
ring = parse_cookie(bcm, status->cookie, &slot);
|
|
assert(ring);
|
|
assert(ring);
|
|
assert(ring->tx);
|
|
assert(ring->tx);
|
|
- assert(irqs_disabled());
|
|
|
|
- spin_lock(&ring->lock);
|
|
|
|
-
|
|
|
|
assert(get_desc_ctl(ring->vbase + slot) & BCM43xx_DMADTOR_FRAMESTART);
|
|
assert(get_desc_ctl(ring->vbase + slot) & BCM43xx_DMADTOR_FRAMESTART);
|
|
while (1) {
|
|
while (1) {
|
|
assert(slot >= 0 && slot < ring->nr_slots);
|
|
assert(slot >= 0 && slot < ring->nr_slots);
|
|
@@ -877,13 +851,10 @@ bcm43xx_dma_handle_xmitstatus(struct bcm43xx_private *bcm,
|
|
slot = next_slot(ring, slot);
|
|
slot = next_slot(ring, slot);
|
|
}
|
|
}
|
|
bcm->stats.last_tx = jiffies;
|
|
bcm->stats.last_tx = jiffies;
|
|
-
|
|
|
|
- spin_unlock(&ring->lock);
|
|
|
|
}
|
|
}
|
|
|
|
|
|
-static inline
|
|
|
|
-void dma_rx(struct bcm43xx_dmaring *ring,
|
|
|
|
- int *slot)
|
|
|
|
|
|
+static void dma_rx(struct bcm43xx_dmaring *ring,
|
|
|
|
+ int *slot)
|
|
{
|
|
{
|
|
struct bcm43xx_dmadesc *desc;
|
|
struct bcm43xx_dmadesc *desc;
|
|
struct bcm43xx_dmadesc_meta *meta;
|
|
struct bcm43xx_dmadesc_meta *meta;
|
|
@@ -928,8 +899,12 @@ void dma_rx(struct bcm43xx_dmaring *ring,
|
|
barrier();
|
|
barrier();
|
|
len = le16_to_cpu(rxhdr->frame_length);
|
|
len = le16_to_cpu(rxhdr->frame_length);
|
|
} while (len == 0 && i++ < 5);
|
|
} while (len == 0 && i++ < 5);
|
|
- if (len == 0)
|
|
|
|
|
|
+ if (unlikely(len == 0)) {
|
|
|
|
+ /* recycle the descriptor buffer. */
|
|
|
|
+ sync_descbuffer_for_device(ring, meta->dmaaddr,
|
|
|
|
+ ring->rx_buffersize);
|
|
goto drop;
|
|
goto drop;
|
|
|
|
+ }
|
|
}
|
|
}
|
|
if (unlikely(len > ring->rx_buffersize)) {
|
|
if (unlikely(len > ring->rx_buffersize)) {
|
|
/* The data did not fit into one descriptor buffer
|
|
/* The data did not fit into one descriptor buffer
|
|
@@ -937,15 +912,24 @@ void dma_rx(struct bcm43xx_dmaring *ring,
|
|
* This should never happen, as we try to allocate buffers
|
|
* This should never happen, as we try to allocate buffers
|
|
* big enough. So simply ignore this packet.
|
|
* big enough. So simply ignore this packet.
|
|
*/
|
|
*/
|
|
- int cnt = 1;
|
|
|
|
- s32 tmp = len - ring->rx_buffersize;
|
|
|
|
-
|
|
|
|
- for ( ; tmp > 0; tmp -= ring->rx_buffersize) {
|
|
|
|
|
|
+ int cnt = 0;
|
|
|
|
+ s32 tmp = len;
|
|
|
|
+
|
|
|
|
+ while (1) {
|
|
|
|
+ desc = ring->vbase + *slot;
|
|
|
|
+ meta = ring->meta + *slot;
|
|
|
|
+ /* recycle the descriptor buffer. */
|
|
|
|
+ sync_descbuffer_for_device(ring, meta->dmaaddr,
|
|
|
|
+ ring->rx_buffersize);
|
|
*slot = next_slot(ring, *slot);
|
|
*slot = next_slot(ring, *slot);
|
|
cnt++;
|
|
cnt++;
|
|
|
|
+ tmp -= ring->rx_buffersize;
|
|
|
|
+ if (tmp <= 0)
|
|
|
|
+ break;
|
|
}
|
|
}
|
|
- printkl(KERN_ERR PFX "DMA RX buffer too small. %d dropped.\n",
|
|
|
|
- cnt);
|
|
|
|
|
|
+ printkl(KERN_ERR PFX "DMA RX buffer too small "
|
|
|
|
+ "(len: %u, buffer: %u, nr-dropped: %d)\n",
|
|
|
|
+ len, ring->rx_buffersize, cnt);
|
|
goto drop;
|
|
goto drop;
|
|
}
|
|
}
|
|
len -= IEEE80211_FCS_LEN;
|
|
len -= IEEE80211_FCS_LEN;
|
|
@@ -954,6 +938,8 @@ void dma_rx(struct bcm43xx_dmaring *ring,
|
|
err = setup_rx_descbuffer(ring, desc, meta, GFP_ATOMIC);
|
|
err = setup_rx_descbuffer(ring, desc, meta, GFP_ATOMIC);
|
|
if (unlikely(err)) {
|
|
if (unlikely(err)) {
|
|
dprintkl(KERN_ERR PFX "DMA RX: setup_rx_descbuffer() failed\n");
|
|
dprintkl(KERN_ERR PFX "DMA RX: setup_rx_descbuffer() failed\n");
|
|
|
|
+ sync_descbuffer_for_device(ring, dmaaddr,
|
|
|
|
+ ring->rx_buffersize);
|
|
goto drop;
|
|
goto drop;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -971,8 +957,7 @@ drop:
|
|
return;
|
|
return;
|
|
}
|
|
}
|
|
|
|
|
|
-void fastcall
|
|
|
|
-bcm43xx_dma_rx(struct bcm43xx_dmaring *ring)
|
|
|
|
|
|
+void bcm43xx_dma_rx(struct bcm43xx_dmaring *ring)
|
|
{
|
|
{
|
|
u32 status;
|
|
u32 status;
|
|
u16 descptr;
|
|
u16 descptr;
|
|
@@ -982,9 +967,6 @@ bcm43xx_dma_rx(struct bcm43xx_dmaring *ring)
|
|
#endif
|
|
#endif
|
|
|
|
|
|
assert(!ring->tx);
|
|
assert(!ring->tx);
|
|
- assert(irqs_disabled());
|
|
|
|
- spin_lock(&ring->lock);
|
|
|
|
-
|
|
|
|
status = bcm43xx_read32(ring->bcm, ring->mmio_base + BCM43xx_DMA_RX_STATUS);
|
|
status = bcm43xx_read32(ring->bcm, ring->mmio_base + BCM43xx_DMA_RX_STATUS);
|
|
descptr = (status & BCM43xx_DMA_RXSTAT_DPTR_MASK);
|
|
descptr = (status & BCM43xx_DMA_RXSTAT_DPTR_MASK);
|
|
current_slot = descptr / sizeof(struct bcm43xx_dmadesc);
|
|
current_slot = descptr / sizeof(struct bcm43xx_dmadesc);
|
|
@@ -1002,8 +984,6 @@ bcm43xx_dma_rx(struct bcm43xx_dmaring *ring)
|
|
ring->mmio_base + BCM43xx_DMA_RX_DESC_INDEX,
|
|
ring->mmio_base + BCM43xx_DMA_RX_DESC_INDEX,
|
|
(u32)(slot * sizeof(struct bcm43xx_dmadesc)));
|
|
(u32)(slot * sizeof(struct bcm43xx_dmadesc)));
|
|
ring->current_slot = slot;
|
|
ring->current_slot = slot;
|
|
-
|
|
|
|
- spin_unlock(&ring->lock);
|
|
|
|
}
|
|
}
|
|
|
|
|
|
/* vim: set ts=8 sw=8 sts=8: */
|
|
/* vim: set ts=8 sw=8 sts=8: */
|