|
@@ -522,12 +522,8 @@ jme_setup_tx_resources(struct jme_adapter *jme)
|
|
|
&(txring->dmaalloc),
|
|
|
GFP_ATOMIC);
|
|
|
|
|
|
- if (!txring->alloc) {
|
|
|
- txring->desc = NULL;
|
|
|
- txring->dmaalloc = 0;
|
|
|
- txring->dma = 0;
|
|
|
- return -ENOMEM;
|
|
|
- }
|
|
|
+ if (!txring->alloc)
|
|
|
+ goto err_set_null;
|
|
|
|
|
|
/*
|
|
|
* 16 Bytes align
|
|
@@ -539,6 +535,11 @@ jme_setup_tx_resources(struct jme_adapter *jme)
|
|
|
atomic_set(&txring->next_to_clean, 0);
|
|
|
atomic_set(&txring->nr_free, jme->tx_ring_size);
|
|
|
|
|
|
+ txring->bufinf = kmalloc(sizeof(struct jme_buffer_info) *
|
|
|
+ jme->tx_ring_size, GFP_ATOMIC);
|
|
|
+ if (unlikely(!(txring->bufinf)))
|
|
|
+ goto err_free_txring;
|
|
|
+
|
|
|
/*
|
|
|
* Initialize Transmit Descriptors
|
|
|
*/
|
|
@@ -547,6 +548,20 @@ jme_setup_tx_resources(struct jme_adapter *jme)
|
|
|
sizeof(struct jme_buffer_info) * jme->tx_ring_size);
|
|
|
|
|
|
return 0;
|
|
|
+
|
|
|
+err_free_txring:
|
|
|
+ dma_free_coherent(&(jme->pdev->dev),
|
|
|
+ TX_RING_ALLOC_SIZE(jme->tx_ring_size),
|
|
|
+ txring->alloc,
|
|
|
+ txring->dmaalloc);
|
|
|
+
|
|
|
+err_set_null:
|
|
|
+ txring->desc = NULL;
|
|
|
+ txring->dmaalloc = 0;
|
|
|
+ txring->dma = 0;
|
|
|
+ txring->bufinf = NULL;
|
|
|
+
|
|
|
+ return -ENOMEM;
|
|
|
}
|
|
|
|
|
|
static void
|
|
@@ -557,16 +572,19 @@ jme_free_tx_resources(struct jme_adapter *jme)
|
|
|
struct jme_buffer_info *txbi;
|
|
|
|
|
|
if (txring->alloc) {
|
|
|
- for (i = 0 ; i < jme->tx_ring_size ; ++i) {
|
|
|
- txbi = txring->bufinf + i;
|
|
|
- if (txbi->skb) {
|
|
|
- dev_kfree_skb(txbi->skb);
|
|
|
- txbi->skb = NULL;
|
|
|
+ if (txring->bufinf) {
|
|
|
+ for (i = 0 ; i < jme->tx_ring_size ; ++i) {
|
|
|
+ txbi = txring->bufinf + i;
|
|
|
+ if (txbi->skb) {
|
|
|
+ dev_kfree_skb(txbi->skb);
|
|
|
+ txbi->skb = NULL;
|
|
|
+ }
|
|
|
+ txbi->mapping = 0;
|
|
|
+ txbi->len = 0;
|
|
|
+ txbi->nr_desc = 0;
|
|
|
+ txbi->start_xmit = 0;
|
|
|
}
|
|
|
- txbi->mapping = 0;
|
|
|
- txbi->len = 0;
|
|
|
- txbi->nr_desc = 0;
|
|
|
- txbi->start_xmit = 0;
|
|
|
+ kfree(txring->bufinf);
|
|
|
}
|
|
|
|
|
|
dma_free_coherent(&(jme->pdev->dev),
|
|
@@ -578,11 +596,11 @@ jme_free_tx_resources(struct jme_adapter *jme)
|
|
|
txring->desc = NULL;
|
|
|
txring->dmaalloc = 0;
|
|
|
txring->dma = 0;
|
|
|
+ txring->bufinf = NULL;
|
|
|
}
|
|
|
txring->next_to_use = 0;
|
|
|
atomic_set(&txring->next_to_clean, 0);
|
|
|
atomic_set(&txring->nr_free, 0);
|
|
|
-
|
|
|
}
|
|
|
|
|
|
static inline void
|
|
@@ -720,8 +738,11 @@ jme_free_rx_resources(struct jme_adapter *jme)
|
|
|
struct jme_ring *rxring = &(jme->rxring[0]);
|
|
|
|
|
|
if (rxring->alloc) {
|
|
|
- for (i = 0 ; i < jme->rx_ring_size ; ++i)
|
|
|
- jme_free_rx_buf(jme, i);
|
|
|
+ if (rxring->bufinf) {
|
|
|
+ for (i = 0 ; i < jme->rx_ring_size ; ++i)
|
|
|
+ jme_free_rx_buf(jme, i);
|
|
|
+ kfree(rxring->bufinf);
|
|
|
+ }
|
|
|
|
|
|
dma_free_coherent(&(jme->pdev->dev),
|
|
|
RX_RING_ALLOC_SIZE(jme->rx_ring_size),
|
|
@@ -731,6 +752,7 @@ jme_free_rx_resources(struct jme_adapter *jme)
|
|
|
rxring->desc = NULL;
|
|
|
rxring->dmaalloc = 0;
|
|
|
rxring->dma = 0;
|
|
|
+ rxring->bufinf = NULL;
|
|
|
}
|
|
|
rxring->next_to_use = 0;
|
|
|
atomic_set(&rxring->next_to_clean, 0);
|
|
@@ -746,12 +768,8 @@ jme_setup_rx_resources(struct jme_adapter *jme)
|
|
|
RX_RING_ALLOC_SIZE(jme->rx_ring_size),
|
|
|
&(rxring->dmaalloc),
|
|
|
GFP_ATOMIC);
|
|
|
- if (!rxring->alloc) {
|
|
|
- rxring->desc = NULL;
|
|
|
- rxring->dmaalloc = 0;
|
|
|
- rxring->dma = 0;
|
|
|
- return -ENOMEM;
|
|
|
- }
|
|
|
+ if (!rxring->alloc)
|
|
|
+ goto err_set_null;
|
|
|
|
|
|
/*
|
|
|
* 16 Bytes align
|
|
@@ -762,9 +780,16 @@ jme_setup_rx_resources(struct jme_adapter *jme)
|
|
|
rxring->next_to_use = 0;
|
|
|
atomic_set(&rxring->next_to_clean, 0);
|
|
|
|
|
|
+ rxring->bufinf = kmalloc(sizeof(struct jme_buffer_info) *
|
|
|
+ jme->rx_ring_size, GFP_ATOMIC);
|
|
|
+ if (unlikely(!(rxring->bufinf)))
|
|
|
+ goto err_free_rxring;
|
|
|
+
|
|
|
/*
|
|
|
* Initiallize Receive Descriptors
|
|
|
*/
|
|
|
+ memset(rxring->bufinf, 0,
|
|
|
+ sizeof(struct jme_buffer_info) * jme->rx_ring_size);
|
|
|
for (i = 0 ; i < jme->rx_ring_size ; ++i) {
|
|
|
if (unlikely(jme_make_new_rx_buf(jme, i))) {
|
|
|
jme_free_rx_resources(jme);
|
|
@@ -775,6 +800,19 @@ jme_setup_rx_resources(struct jme_adapter *jme)
|
|
|
}
|
|
|
|
|
|
return 0;
|
|
|
+
|
|
|
+err_free_rxring:
|
|
|
+ dma_free_coherent(&(jme->pdev->dev),
|
|
|
+ RX_RING_ALLOC_SIZE(jme->rx_ring_size),
|
|
|
+ rxring->alloc,
|
|
|
+ rxring->dmaalloc);
|
|
|
+err_set_null:
|
|
|
+ rxring->desc = NULL;
|
|
|
+ rxring->dmaalloc = 0;
|
|
|
+ rxring->dma = 0;
|
|
|
+ rxring->bufinf = NULL;
|
|
|
+
|
|
|
+ return -ENOMEM;
|
|
|
}
|
|
|
|
|
|
static inline void
|