|
@@ -70,6 +70,8 @@ do { \
|
|
(sizeof(struct bnad_skb_unmap) * ((_depth) - 1)); \
|
|
(sizeof(struct bnad_skb_unmap) * ((_depth) - 1)); \
|
|
} while (0)
|
|
} while (0)
|
|
|
|
|
|
|
|
+#define BNAD_TXRX_SYNC_MDELAY 250 /* 250 msecs */
|
|
|
|
+
|
|
/*
|
|
/*
|
|
* Reinitialize completions in CQ, once Rx is taken down
|
|
* Reinitialize completions in CQ, once Rx is taken down
|
|
*/
|
|
*/
|
|
@@ -130,7 +132,9 @@ bnad_free_all_txbufs(struct bnad *bnad,
|
|
PCI_DMA_TODEVICE);
|
|
PCI_DMA_TODEVICE);
|
|
|
|
|
|
pci_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 0);
|
|
pci_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 0);
|
|
- unmap_cons++;
|
|
|
|
|
|
+ if (++unmap_cons >= unmap_q->q_depth)
|
|
|
|
+ break;
|
|
|
|
+
|
|
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
|
|
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
|
|
pci_unmap_page(bnad->pcidev,
|
|
pci_unmap_page(bnad->pcidev,
|
|
pci_unmap_addr(&unmap_array[unmap_cons],
|
|
pci_unmap_addr(&unmap_array[unmap_cons],
|
|
@@ -139,7 +143,8 @@ bnad_free_all_txbufs(struct bnad *bnad,
|
|
PCI_DMA_TODEVICE);
|
|
PCI_DMA_TODEVICE);
|
|
pci_unmap_addr_set(&unmap_array[unmap_cons], dma_addr,
|
|
pci_unmap_addr_set(&unmap_array[unmap_cons], dma_addr,
|
|
0);
|
|
0);
|
|
- unmap_cons++;
|
|
|
|
|
|
+ if (++unmap_cons >= unmap_q->q_depth)
|
|
|
|
+ break;
|
|
}
|
|
}
|
|
dev_kfree_skb_any(skb);
|
|
dev_kfree_skb_any(skb);
|
|
}
|
|
}
|
|
@@ -167,11 +172,11 @@ bnad_free_txbufs(struct bnad *bnad,
|
|
/*
|
|
/*
|
|
* Just return if TX is stopped. This check is useful
|
|
* Just return if TX is stopped. This check is useful
|
|
* when bnad_free_txbufs() runs out of a tasklet scheduled
|
|
* when bnad_free_txbufs() runs out of a tasklet scheduled
|
|
- * before bnad_cb_tx_cleanup() cleared BNAD_RF_TX_STARTED bit
|
|
|
|
|
|
+ * before bnad_cb_tx_cleanup() cleared BNAD_TXQ_TX_STARTED bit
|
|
* but this routine runs actually after the cleanup has been
|
|
* but this routine runs actually after the cleanup has been
|
|
* executed.
|
|
* executed.
|
|
*/
|
|
*/
|
|
- if (!test_bit(BNAD_RF_TX_STARTED, &bnad->run_flags))
|
|
|
|
|
|
+ if (!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))
|
|
return 0;
|
|
return 0;
|
|
|
|
|
|
updated_hw_cons = *(tcb->hw_consumer_index);
|
|
updated_hw_cons = *(tcb->hw_consumer_index);
|
|
@@ -252,7 +257,9 @@ bnad_tx_free_tasklet(unsigned long bnad_ptr)
|
|
(!test_and_set_bit(BNAD_TXQ_FREE_SENT,
|
|
(!test_and_set_bit(BNAD_TXQ_FREE_SENT,
|
|
&tcb->flags))) {
|
|
&tcb->flags))) {
|
|
acked = bnad_free_txbufs(bnad, tcb);
|
|
acked = bnad_free_txbufs(bnad, tcb);
|
|
- bna_ib_ack(tcb->i_dbell, acked);
|
|
|
|
|
|
+ if (likely(test_bit(BNAD_TXQ_TX_STARTED,
|
|
|
|
+ &tcb->flags)))
|
|
|
|
+ bna_ib_ack(tcb->i_dbell, acked);
|
|
smp_mb__before_clear_bit();
|
|
smp_mb__before_clear_bit();
|
|
clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
|
|
clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
|
|
}
|
|
}
|
|
@@ -264,7 +271,7 @@ static u32
|
|
bnad_tx(struct bnad *bnad, struct bna_tcb *tcb)
|
|
bnad_tx(struct bnad *bnad, struct bna_tcb *tcb)
|
|
{
|
|
{
|
|
struct net_device *netdev = bnad->netdev;
|
|
struct net_device *netdev = bnad->netdev;
|
|
- u32 sent;
|
|
|
|
|
|
+ u32 sent = 0;
|
|
|
|
|
|
if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags))
|
|
if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags))
|
|
return 0;
|
|
return 0;
|
|
@@ -275,12 +282,15 @@ bnad_tx(struct bnad *bnad, struct bna_tcb *tcb)
|
|
netif_carrier_ok(netdev) &&
|
|
netif_carrier_ok(netdev) &&
|
|
BNA_QE_FREE_CNT(tcb, tcb->q_depth) >=
|
|
BNA_QE_FREE_CNT(tcb, tcb->q_depth) >=
|
|
BNAD_NETIF_WAKE_THRESHOLD) {
|
|
BNAD_NETIF_WAKE_THRESHOLD) {
|
|
- netif_wake_queue(netdev);
|
|
|
|
- BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
|
|
|
|
|
|
+ if (test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)) {
|
|
|
|
+ netif_wake_queue(netdev);
|
|
|
|
+ BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
|
|
|
|
+ }
|
|
}
|
|
}
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
|
|
bna_ib_ack(tcb->i_dbell, sent);
|
|
bna_ib_ack(tcb->i_dbell, sent);
|
|
- } else
|
|
|
|
- bna_ib_ack(tcb->i_dbell, 0);
|
|
|
|
|
|
|
|
smp_mb__before_clear_bit();
|
|
smp_mb__before_clear_bit();
|
|
clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
|
|
clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
|
|
@@ -313,25 +323,26 @@ bnad_reset_rcb(struct bnad *bnad, struct bna_rcb *rcb)
|
|
}
|
|
}
|
|
|
|
|
|
static void
|
|
static void
|
|
-bnad_free_rxbufs(struct bnad *bnad, struct bna_rcb *rcb)
|
|
|
|
|
|
+bnad_free_all_rxbufs(struct bnad *bnad, struct bna_rcb *rcb)
|
|
{
|
|
{
|
|
struct bnad_unmap_q *unmap_q;
|
|
struct bnad_unmap_q *unmap_q;
|
|
struct sk_buff *skb;
|
|
struct sk_buff *skb;
|
|
|
|
+ int unmap_cons;
|
|
|
|
|
|
unmap_q = rcb->unmap_q;
|
|
unmap_q = rcb->unmap_q;
|
|
- while (BNA_QE_IN_USE_CNT(unmap_q, unmap_q->q_depth)) {
|
|
|
|
- skb = unmap_q->unmap_array[unmap_q->consumer_index].skb;
|
|
|
|
- BUG_ON(!(skb));
|
|
|
|
- unmap_q->unmap_array[unmap_q->consumer_index].skb = NULL;
|
|
|
|
|
|
+ for (unmap_cons = 0; unmap_cons < unmap_q->q_depth; unmap_cons++) {
|
|
|
|
+ skb = unmap_q->unmap_array[unmap_cons].skb;
|
|
|
|
+ if (!skb)
|
|
|
|
+ continue;
|
|
|
|
+ BUG_ON(!(pci_unmap_addr(
|
|
|
|
+ &unmap_q->unmap_array[unmap_cons], dma_addr)));
|
|
|
|
+ unmap_q->unmap_array[unmap_cons].skb = NULL;
|
|
pci_unmap_single(bnad->pcidev, pci_unmap_addr(&unmap_q->
|
|
pci_unmap_single(bnad->pcidev, pci_unmap_addr(&unmap_q->
|
|
- unmap_array[unmap_q->consumer_index],
|
|
|
|
- dma_addr), rcb->rxq->buffer_size +
|
|
|
|
- NET_IP_ALIGN, PCI_DMA_FROMDEVICE);
|
|
|
|
|
|
+ unmap_array[unmap_cons],
|
|
|
|
+ dma_addr), rcb->rxq->buffer_size,
|
|
|
|
+ PCI_DMA_FROMDEVICE);
|
|
dev_kfree_skb(skb);
|
|
dev_kfree_skb(skb);
|
|
- BNA_QE_INDX_ADD(unmap_q->consumer_index, 1, unmap_q->q_depth);
|
|
|
|
- BNA_QE_INDX_ADD(rcb->consumer_index, 1, rcb->q_depth);
|
|
|
|
}
|
|
}
|
|
-
|
|
|
|
bnad_reset_rcb(bnad, rcb);
|
|
bnad_reset_rcb(bnad, rcb);
|
|
}
|
|
}
|
|
|
|
|
|
@@ -385,41 +396,9 @@ finishing:
|
|
unmap_q->producer_index = unmap_prod;
|
|
unmap_q->producer_index = unmap_prod;
|
|
rcb->producer_index = unmap_prod;
|
|
rcb->producer_index = unmap_prod;
|
|
smp_mb();
|
|
smp_mb();
|
|
- bna_rxq_prod_indx_doorbell(rcb);
|
|
|
|
- }
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-/*
|
|
|
|
- * Locking is required in the enable path
|
|
|
|
- * because it is called from a napi poll
|
|
|
|
- * context, where the bna_lock is not held
|
|
|
|
- * unlike the IRQ context.
|
|
|
|
- */
|
|
|
|
-static void
|
|
|
|
-bnad_enable_txrx_irqs(struct bnad *bnad)
|
|
|
|
-{
|
|
|
|
- struct bna_tcb *tcb;
|
|
|
|
- struct bna_ccb *ccb;
|
|
|
|
- int i, j;
|
|
|
|
- unsigned long flags;
|
|
|
|
-
|
|
|
|
- spin_lock_irqsave(&bnad->bna_lock, flags);
|
|
|
|
- for (i = 0; i < bnad->num_tx; i++) {
|
|
|
|
- for (j = 0; j < bnad->num_txq_per_tx; j++) {
|
|
|
|
- tcb = bnad->tx_info[i].tcb[j];
|
|
|
|
- bna_ib_coalescing_timer_set(tcb->i_dbell,
|
|
|
|
- tcb->txq->ib->ib_config.coalescing_timeo);
|
|
|
|
- bna_ib_ack(tcb->i_dbell, 0);
|
|
|
|
- }
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
- for (i = 0; i < bnad->num_rx; i++) {
|
|
|
|
- for (j = 0; j < bnad->num_rxp_per_rx; j++) {
|
|
|
|
- ccb = bnad->rx_info[i].rx_ctrl[j].ccb;
|
|
|
|
- bnad_enable_rx_irq_unsafe(ccb);
|
|
|
|
- }
|
|
|
|
|
|
+ if (likely(test_bit(BNAD_RXQ_STARTED, &rcb->flags)))
|
|
|
|
+ bna_rxq_prod_indx_doorbell(rcb);
|
|
}
|
|
}
|
|
- spin_unlock_irqrestore(&bnad->bna_lock, flags);
|
|
|
|
}
|
|
}
|
|
|
|
|
|
static inline void
|
|
static inline void
|
|
@@ -448,6 +427,9 @@ bnad_poll_cq(struct bnad *bnad, struct bna_ccb *ccb, int budget)
|
|
u32 qid0 = ccb->rcb[0]->rxq->rxq_id;
|
|
u32 qid0 = ccb->rcb[0]->rxq->rxq_id;
|
|
struct bna_pkt_rate *pkt_rt = &ccb->pkt_rate;
|
|
struct bna_pkt_rate *pkt_rt = &ccb->pkt_rate;
|
|
|
|
|
|
|
|
+ if (!test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags))
|
|
|
|
+ return 0;
|
|
|
|
+
|
|
prefetch(bnad->netdev);
|
|
prefetch(bnad->netdev);
|
|
BNA_CQ_QPGE_PTR_GET(ccb->producer_index, ccb->sw_qpt, cmpl,
|
|
BNA_CQ_QPGE_PTR_GET(ccb->producer_index, ccb->sw_qpt, cmpl,
|
|
wi_range);
|
|
wi_range);
|
|
@@ -544,12 +526,15 @@ next:
|
|
BNA_QE_INDX_ADD(ccb->producer_index, wis, ccb->q_depth);
|
|
BNA_QE_INDX_ADD(ccb->producer_index, wis, ccb->q_depth);
|
|
|
|
|
|
if (likely(ccb)) {
|
|
if (likely(ccb)) {
|
|
- bna_ib_ack(ccb->i_dbell, packets);
|
|
|
|
|
|
+ if (likely(test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags)))
|
|
|
|
+ bna_ib_ack(ccb->i_dbell, packets);
|
|
bnad_refill_rxq(bnad, ccb->rcb[0]);
|
|
bnad_refill_rxq(bnad, ccb->rcb[0]);
|
|
if (ccb->rcb[1])
|
|
if (ccb->rcb[1])
|
|
bnad_refill_rxq(bnad, ccb->rcb[1]);
|
|
bnad_refill_rxq(bnad, ccb->rcb[1]);
|
|
- } else
|
|
|
|
- bna_ib_ack(ccb->i_dbell, 0);
|
|
|
|
|
|
+ } else {
|
|
|
|
+ if (likely(test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags)))
|
|
|
|
+ bna_ib_ack(ccb->i_dbell, 0);
|
|
|
|
+ }
|
|
|
|
|
|
return packets;
|
|
return packets;
|
|
}
|
|
}
|
|
@@ -557,6 +542,9 @@ next:
|
|
static void
|
|
static void
|
|
bnad_disable_rx_irq(struct bnad *bnad, struct bna_ccb *ccb)
|
|
bnad_disable_rx_irq(struct bnad *bnad, struct bna_ccb *ccb)
|
|
{
|
|
{
|
|
|
|
+ if (unlikely(!test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags)))
|
|
|
|
+ return;
|
|
|
|
+
|
|
bna_ib_coalescing_timer_set(ccb->i_dbell, 0);
|
|
bna_ib_coalescing_timer_set(ccb->i_dbell, 0);
|
|
bna_ib_ack(ccb->i_dbell, 0);
|
|
bna_ib_ack(ccb->i_dbell, 0);
|
|
}
|
|
}
|
|
@@ -575,9 +563,11 @@ static void
|
|
bnad_netif_rx_schedule_poll(struct bnad *bnad, struct bna_ccb *ccb)
|
|
bnad_netif_rx_schedule_poll(struct bnad *bnad, struct bna_ccb *ccb)
|
|
{
|
|
{
|
|
struct bnad_rx_ctrl *rx_ctrl = (struct bnad_rx_ctrl *)(ccb->ctrl);
|
|
struct bnad_rx_ctrl *rx_ctrl = (struct bnad_rx_ctrl *)(ccb->ctrl);
|
|
- if (likely(napi_schedule_prep((&rx_ctrl->napi)))) {
|
|
|
|
|
|
+ struct napi_struct *napi = &rx_ctrl->napi;
|
|
|
|
+
|
|
|
|
+ if (likely(napi_schedule_prep(napi))) {
|
|
bnad_disable_rx_irq(bnad, ccb);
|
|
bnad_disable_rx_irq(bnad, ccb);
|
|
- __napi_schedule((&rx_ctrl->napi));
|
|
|
|
|
|
+ __napi_schedule(napi);
|
|
}
|
|
}
|
|
BNAD_UPDATE_CTR(bnad, netif_rx_schedule);
|
|
BNAD_UPDATE_CTR(bnad, netif_rx_schedule);
|
|
}
|
|
}
|
|
@@ -602,12 +592,11 @@ bnad_msix_mbox_handler(int irq, void *data)
|
|
{
|
|
{
|
|
u32 intr_status;
|
|
u32 intr_status;
|
|
unsigned long flags;
|
|
unsigned long flags;
|
|
- struct net_device *netdev = data;
|
|
|
|
- struct bnad *bnad;
|
|
|
|
|
|
+ struct bnad *bnad = (struct bnad *)data;
|
|
|
|
|
|
- bnad = netdev_priv(netdev);
|
|
|
|
|
|
+ if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags)))
|
|
|
|
+ return IRQ_HANDLED;
|
|
|
|
|
|
- /* BNA_ISR_GET(bnad); Inc Ref count */
|
|
|
|
spin_lock_irqsave(&bnad->bna_lock, flags);
|
|
spin_lock_irqsave(&bnad->bna_lock, flags);
|
|
|
|
|
|
bna_intr_status_get(&bnad->bna, intr_status);
|
|
bna_intr_status_get(&bnad->bna, intr_status);
|
|
@@ -617,7 +606,6 @@ bnad_msix_mbox_handler(int irq, void *data)
|
|
|
|
|
|
spin_unlock_irqrestore(&bnad->bna_lock, flags);
|
|
spin_unlock_irqrestore(&bnad->bna_lock, flags);
|
|
|
|
|
|
- /* BNAD_ISR_PUT(bnad); Dec Ref count */
|
|
|
|
return IRQ_HANDLED;
|
|
return IRQ_HANDLED;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -627,8 +615,7 @@ bnad_isr(int irq, void *data)
|
|
int i, j;
|
|
int i, j;
|
|
u32 intr_status;
|
|
u32 intr_status;
|
|
unsigned long flags;
|
|
unsigned long flags;
|
|
- struct net_device *netdev = data;
|
|
|
|
- struct bnad *bnad = netdev_priv(netdev);
|
|
|
|
|
|
+ struct bnad *bnad = (struct bnad *)data;
|
|
struct bnad_rx_info *rx_info;
|
|
struct bnad_rx_info *rx_info;
|
|
struct bnad_rx_ctrl *rx_ctrl;
|
|
struct bnad_rx_ctrl *rx_ctrl;
|
|
|
|
|
|
@@ -642,16 +629,21 @@ bnad_isr(int irq, void *data)
|
|
|
|
|
|
spin_lock_irqsave(&bnad->bna_lock, flags);
|
|
spin_lock_irqsave(&bnad->bna_lock, flags);
|
|
|
|
|
|
- if (BNA_IS_MBOX_ERR_INTR(intr_status)) {
|
|
|
|
|
|
+ if (BNA_IS_MBOX_ERR_INTR(intr_status))
|
|
bna_mbox_handler(&bnad->bna, intr_status);
|
|
bna_mbox_handler(&bnad->bna, intr_status);
|
|
- if (!BNA_IS_INTX_DATA_INTR(intr_status)) {
|
|
|
|
- spin_unlock_irqrestore(&bnad->bna_lock, flags);
|
|
|
|
- goto done;
|
|
|
|
- }
|
|
|
|
- }
|
|
|
|
|
|
+
|
|
spin_unlock_irqrestore(&bnad->bna_lock, flags);
|
|
spin_unlock_irqrestore(&bnad->bna_lock, flags);
|
|
|
|
|
|
|
|
+ if (!BNA_IS_INTX_DATA_INTR(intr_status))
|
|
|
|
+ return IRQ_HANDLED;
|
|
|
|
+
|
|
/* Process data interrupts */
|
|
/* Process data interrupts */
|
|
|
|
+ /* Tx processing */
|
|
|
|
+ for (i = 0; i < bnad->num_tx; i++) {
|
|
|
|
+ for (j = 0; j < bnad->num_txq_per_tx; j++)
|
|
|
|
+ bnad_tx(bnad, bnad->tx_info[i].tcb[j]);
|
|
|
|
+ }
|
|
|
|
+ /* Rx processing */
|
|
for (i = 0; i < bnad->num_rx; i++) {
|
|
for (i = 0; i < bnad->num_rx; i++) {
|
|
rx_info = &bnad->rx_info[i];
|
|
rx_info = &bnad->rx_info[i];
|
|
if (!rx_info->rx)
|
|
if (!rx_info->rx)
|
|
@@ -663,7 +655,6 @@ bnad_isr(int irq, void *data)
|
|
rx_ctrl->ccb);
|
|
rx_ctrl->ccb);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
-done:
|
|
|
|
return IRQ_HANDLED;
|
|
return IRQ_HANDLED;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -674,11 +665,7 @@ done:
|
|
static void
|
|
static void
|
|
bnad_enable_mbox_irq(struct bnad *bnad)
|
|
bnad_enable_mbox_irq(struct bnad *bnad)
|
|
{
|
|
{
|
|
- int irq = BNAD_GET_MBOX_IRQ(bnad);
|
|
|
|
-
|
|
|
|
- if (test_and_clear_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags))
|
|
|
|
- if (bnad->cfg_flags & BNAD_CF_MSIX)
|
|
|
|
- enable_irq(irq);
|
|
|
|
|
|
+ clear_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
|
|
|
|
|
|
BNAD_UPDATE_CTR(bnad, mbox_intr_enabled);
|
|
BNAD_UPDATE_CTR(bnad, mbox_intr_enabled);
|
|
}
|
|
}
|
|
@@ -690,14 +677,19 @@ bnad_enable_mbox_irq(struct bnad *bnad)
|
|
static void
|
|
static void
|
|
bnad_disable_mbox_irq(struct bnad *bnad)
|
|
bnad_disable_mbox_irq(struct bnad *bnad)
|
|
{
|
|
{
|
|
- int irq = BNAD_GET_MBOX_IRQ(bnad);
|
|
|
|
|
|
+ set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
|
|
|
|
|
|
|
|
+ BNAD_UPDATE_CTR(bnad, mbox_intr_disabled);
|
|
|
|
+}
|
|
|
|
|
|
- if (!test_and_set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags))
|
|
|
|
- if (bnad->cfg_flags & BNAD_CF_MSIX)
|
|
|
|
- disable_irq_nosync(irq);
|
|
|
|
|
|
+static void
|
|
|
|
+bnad_set_netdev_perm_addr(struct bnad *bnad)
|
|
|
|
+{
|
|
|
|
+ struct net_device *netdev = bnad->netdev;
|
|
|
|
|
|
- BNAD_UPDATE_CTR(bnad, mbox_intr_disabled);
|
|
|
|
|
|
+ memcpy(netdev->perm_addr, &bnad->perm_addr, netdev->addr_len);
|
|
|
|
+ if (is_zero_ether_addr(netdev->dev_addr))
|
|
|
|
+ memcpy(netdev->dev_addr, &bnad->perm_addr, netdev->addr_len);
|
|
}
|
|
}
|
|
|
|
|
|
/* Control Path Handlers */
|
|
/* Control Path Handlers */
|
|
@@ -755,11 +747,14 @@ bnad_cb_port_link_status(struct bnad *bnad,
|
|
|
|
|
|
if (link_up) {
|
|
if (link_up) {
|
|
if (!netif_carrier_ok(bnad->netdev)) {
|
|
if (!netif_carrier_ok(bnad->netdev)) {
|
|
|
|
+ struct bna_tcb *tcb = bnad->tx_info[0].tcb[0];
|
|
|
|
+ if (!tcb)
|
|
|
|
+ return;
|
|
pr_warn("bna: %s link up\n",
|
|
pr_warn("bna: %s link up\n",
|
|
bnad->netdev->name);
|
|
bnad->netdev->name);
|
|
netif_carrier_on(bnad->netdev);
|
|
netif_carrier_on(bnad->netdev);
|
|
BNAD_UPDATE_CTR(bnad, link_toggle);
|
|
BNAD_UPDATE_CTR(bnad, link_toggle);
|
|
- if (test_bit(BNAD_RF_TX_STARTED, &bnad->run_flags)) {
|
|
|
|
|
|
+ if (test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)) {
|
|
/* Force an immediate Transmit Schedule */
|
|
/* Force an immediate Transmit Schedule */
|
|
pr_info("bna: %s TX_STARTED\n",
|
|
pr_info("bna: %s TX_STARTED\n",
|
|
bnad->netdev->name);
|
|
bnad->netdev->name);
|
|
@@ -807,6 +802,18 @@ bnad_cb_tcb_destroy(struct bnad *bnad, struct bna_tcb *tcb)
|
|
{
|
|
{
|
|
struct bnad_tx_info *tx_info =
|
|
struct bnad_tx_info *tx_info =
|
|
(struct bnad_tx_info *)tcb->txq->tx->priv;
|
|
(struct bnad_tx_info *)tcb->txq->tx->priv;
|
|
|
|
+ struct bnad_unmap_q *unmap_q = tcb->unmap_q;
|
|
|
|
+
|
|
|
|
+ while (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags))
|
|
|
|
+ cpu_relax();
|
|
|
|
+
|
|
|
|
+ bnad_free_all_txbufs(bnad, tcb);
|
|
|
|
+
|
|
|
|
+ unmap_q->producer_index = 0;
|
|
|
|
+ unmap_q->consumer_index = 0;
|
|
|
|
+
|
|
|
|
+ smp_mb__before_clear_bit();
|
|
|
|
+ clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
|
|
|
|
|
|
tx_info->tcb[tcb->id] = NULL;
|
|
tx_info->tcb[tcb->id] = NULL;
|
|
}
|
|
}
|
|
@@ -821,6 +828,12 @@ bnad_cb_rcb_setup(struct bnad *bnad, struct bna_rcb *rcb)
|
|
unmap_q->q_depth = BNAD_RX_UNMAPQ_DEPTH;
|
|
unmap_q->q_depth = BNAD_RX_UNMAPQ_DEPTH;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+static void
|
|
|
|
+bnad_cb_rcb_destroy(struct bnad *bnad, struct bna_rcb *rcb)
|
|
|
|
+{
|
|
|
|
+ bnad_free_all_rxbufs(bnad, rcb);
|
|
|
|
+}
|
|
|
|
+
|
|
static void
|
|
static void
|
|
bnad_cb_ccb_setup(struct bnad *bnad, struct bna_ccb *ccb)
|
|
bnad_cb_ccb_setup(struct bnad *bnad, struct bna_ccb *ccb)
|
|
{
|
|
{
|
|
@@ -849,7 +862,7 @@ bnad_cb_tx_stall(struct bnad *bnad, struct bna_tcb *tcb)
|
|
if (tx_info != &bnad->tx_info[0])
|
|
if (tx_info != &bnad->tx_info[0])
|
|
return;
|
|
return;
|
|
|
|
|
|
- clear_bit(BNAD_RF_TX_STARTED, &bnad->run_flags);
|
|
|
|
|
|
+ clear_bit(BNAD_TXQ_TX_STARTED, &tcb->flags);
|
|
netif_stop_queue(bnad->netdev);
|
|
netif_stop_queue(bnad->netdev);
|
|
pr_info("bna: %s TX_STOPPED\n", bnad->netdev->name);
|
|
pr_info("bna: %s TX_STOPPED\n", bnad->netdev->name);
|
|
}
|
|
}
|
|
@@ -857,30 +870,15 @@ bnad_cb_tx_stall(struct bnad *bnad, struct bna_tcb *tcb)
|
|
static void
|
|
static void
|
|
bnad_cb_tx_resume(struct bnad *bnad, struct bna_tcb *tcb)
|
|
bnad_cb_tx_resume(struct bnad *bnad, struct bna_tcb *tcb)
|
|
{
|
|
{
|
|
- if (test_and_set_bit(BNAD_RF_TX_STARTED, &bnad->run_flags))
|
|
|
|
- return;
|
|
|
|
-
|
|
|
|
- if (netif_carrier_ok(bnad->netdev)) {
|
|
|
|
- pr_info("bna: %s TX_STARTED\n", bnad->netdev->name);
|
|
|
|
- netif_wake_queue(bnad->netdev);
|
|
|
|
- BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
|
|
|
|
- }
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-static void
|
|
|
|
-bnad_cb_tx_cleanup(struct bnad *bnad, struct bna_tcb *tcb)
|
|
|
|
-{
|
|
|
|
- struct bnad_unmap_q *unmap_q;
|
|
|
|
|
|
+ struct bnad_unmap_q *unmap_q = tcb->unmap_q;
|
|
|
|
|
|
- if (!tcb || (!tcb->unmap_q))
|
|
|
|
|
|
+ if (test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))
|
|
return;
|
|
return;
|
|
|
|
|
|
- unmap_q = tcb->unmap_q;
|
|
|
|
- if (!unmap_q->unmap_array)
|
|
|
|
- return;
|
|
|
|
|
|
+ clear_bit(BNAD_RF_TX_SHUTDOWN_DELAYED, &bnad->run_flags);
|
|
|
|
|
|
- if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags))
|
|
|
|
- return;
|
|
|
|
|
|
+ while (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags))
|
|
|
|
+ cpu_relax();
|
|
|
|
|
|
bnad_free_all_txbufs(bnad, tcb);
|
|
bnad_free_all_txbufs(bnad, tcb);
|
|
|
|
|
|
@@ -889,21 +887,45 @@ bnad_cb_tx_cleanup(struct bnad *bnad, struct bna_tcb *tcb)
|
|
|
|
|
|
smp_mb__before_clear_bit();
|
|
smp_mb__before_clear_bit();
|
|
clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
|
|
clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * Workaround for first device enable failure & we
|
|
|
|
+ * get a 0 MAC address. We try to get the MAC address
|
|
|
|
+ * again here.
|
|
|
|
+ */
|
|
|
|
+ if (is_zero_ether_addr(&bnad->perm_addr.mac[0])) {
|
|
|
|
+ bna_port_mac_get(&bnad->bna.port, &bnad->perm_addr);
|
|
|
|
+ bnad_set_netdev_perm_addr(bnad);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ set_bit(BNAD_TXQ_TX_STARTED, &tcb->flags);
|
|
|
|
+
|
|
|
|
+ if (netif_carrier_ok(bnad->netdev)) {
|
|
|
|
+ pr_info("bna: %s TX_STARTED\n", bnad->netdev->name);
|
|
|
|
+ netif_wake_queue(bnad->netdev);
|
|
|
|
+ BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
|
|
|
|
+ }
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void
|
|
|
|
+bnad_cb_tx_cleanup(struct bnad *bnad, struct bna_tcb *tcb)
|
|
|
|
+{
|
|
|
|
+ /* Delay only once for the whole Tx Path Shutdown */
|
|
|
|
+ if (!test_and_set_bit(BNAD_RF_TX_SHUTDOWN_DELAYED, &bnad->run_flags))
|
|
|
|
+ mdelay(BNAD_TXRX_SYNC_MDELAY);
|
|
}
|
|
}
|
|
|
|
|
|
static void
|
|
static void
|
|
bnad_cb_rx_cleanup(struct bnad *bnad,
|
|
bnad_cb_rx_cleanup(struct bnad *bnad,
|
|
struct bna_ccb *ccb)
|
|
struct bna_ccb *ccb)
|
|
{
|
|
{
|
|
- bnad_cq_cmpl_init(bnad, ccb);
|
|
|
|
-
|
|
|
|
- bnad_free_rxbufs(bnad, ccb->rcb[0]);
|
|
|
|
clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags);
|
|
clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags);
|
|
|
|
|
|
- if (ccb->rcb[1]) {
|
|
|
|
- bnad_free_rxbufs(bnad, ccb->rcb[1]);
|
|
|
|
|
|
+ if (ccb->rcb[1])
|
|
clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[1]->flags);
|
|
clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[1]->flags);
|
|
- }
|
|
|
|
|
|
+
|
|
|
|
+ if (!test_and_set_bit(BNAD_RF_RX_SHUTDOWN_DELAYED, &bnad->run_flags))
|
|
|
|
+ mdelay(BNAD_TXRX_SYNC_MDELAY);
|
|
}
|
|
}
|
|
|
|
|
|
static void
|
|
static void
|
|
@@ -911,6 +933,13 @@ bnad_cb_rx_post(struct bnad *bnad, struct bna_rcb *rcb)
|
|
{
|
|
{
|
|
struct bnad_unmap_q *unmap_q = rcb->unmap_q;
|
|
struct bnad_unmap_q *unmap_q = rcb->unmap_q;
|
|
|
|
|
|
|
|
+ clear_bit(BNAD_RF_RX_SHUTDOWN_DELAYED, &bnad->run_flags);
|
|
|
|
+
|
|
|
|
+ if (rcb == rcb->cq->ccb->rcb[0])
|
|
|
|
+ bnad_cq_cmpl_init(bnad, rcb->cq->ccb);
|
|
|
|
+
|
|
|
|
+ bnad_free_all_rxbufs(bnad, rcb);
|
|
|
|
+
|
|
set_bit(BNAD_RXQ_STARTED, &rcb->flags);
|
|
set_bit(BNAD_RXQ_STARTED, &rcb->flags);
|
|
|
|
|
|
/* Now allocate & post buffers for this RCB */
|
|
/* Now allocate & post buffers for this RCB */
|
|
@@ -1047,7 +1076,7 @@ bnad_mbox_irq_free(struct bnad *bnad,
|
|
spin_unlock_irqrestore(&bnad->bna_lock, flags);
|
|
spin_unlock_irqrestore(&bnad->bna_lock, flags);
|
|
|
|
|
|
irq = BNAD_GET_MBOX_IRQ(bnad);
|
|
irq = BNAD_GET_MBOX_IRQ(bnad);
|
|
- free_irq(irq, bnad->netdev);
|
|
|
|
|
|
+ free_irq(irq, bnad);
|
|
|
|
|
|
kfree(intr_info->idl);
|
|
kfree(intr_info->idl);
|
|
}
|
|
}
|
|
@@ -1061,7 +1090,7 @@ static int
|
|
bnad_mbox_irq_alloc(struct bnad *bnad,
|
|
bnad_mbox_irq_alloc(struct bnad *bnad,
|
|
struct bna_intr_info *intr_info)
|
|
struct bna_intr_info *intr_info)
|
|
{
|
|
{
|
|
- int err;
|
|
|
|
|
|
+ int err = 0;
|
|
unsigned long flags;
|
|
unsigned long flags;
|
|
u32 irq;
|
|
u32 irq;
|
|
irq_handler_t irq_handler;
|
|
irq_handler_t irq_handler;
|
|
@@ -1096,22 +1125,17 @@ bnad_mbox_irq_alloc(struct bnad *bnad,
|
|
*/
|
|
*/
|
|
set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
|
|
set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
|
|
|
|
|
|
|
|
+ BNAD_UPDATE_CTR(bnad, mbox_intr_disabled);
|
|
|
|
+
|
|
err = request_irq(irq, irq_handler, flags,
|
|
err = request_irq(irq, irq_handler, flags,
|
|
- bnad->mbox_irq_name, bnad->netdev);
|
|
|
|
|
|
+ bnad->mbox_irq_name, bnad);
|
|
|
|
|
|
if (err) {
|
|
if (err) {
|
|
kfree(intr_info->idl);
|
|
kfree(intr_info->idl);
|
|
intr_info->idl = NULL;
|
|
intr_info->idl = NULL;
|
|
- return err;
|
|
|
|
}
|
|
}
|
|
|
|
|
|
- spin_lock_irqsave(&bnad->bna_lock, flags);
|
|
|
|
-
|
|
|
|
- if (bnad->cfg_flags & BNAD_CF_MSIX)
|
|
|
|
- disable_irq_nosync(irq);
|
|
|
|
-
|
|
|
|
- spin_unlock_irqrestore(&bnad->bna_lock, flags);
|
|
|
|
- return 0;
|
|
|
|
|
|
+ return err;
|
|
}
|
|
}
|
|
|
|
|
|
static void
|
|
static void
|
|
@@ -1555,62 +1579,19 @@ poll_exit:
|
|
return rcvd;
|
|
return rcvd;
|
|
}
|
|
}
|
|
|
|
|
|
-static int
|
|
|
|
-bnad_napi_poll_txrx(struct napi_struct *napi, int budget)
|
|
|
|
-{
|
|
|
|
- struct bnad_rx_ctrl *rx_ctrl =
|
|
|
|
- container_of(napi, struct bnad_rx_ctrl, napi);
|
|
|
|
- struct bna_ccb *ccb;
|
|
|
|
- struct bnad *bnad;
|
|
|
|
- int rcvd = 0;
|
|
|
|
- int i, j;
|
|
|
|
-
|
|
|
|
- ccb = rx_ctrl->ccb;
|
|
|
|
-
|
|
|
|
- bnad = ccb->bnad;
|
|
|
|
-
|
|
|
|
- if (!netif_carrier_ok(bnad->netdev))
|
|
|
|
- goto poll_exit;
|
|
|
|
-
|
|
|
|
- /* Handle Tx Completions, if any */
|
|
|
|
- for (i = 0; i < bnad->num_tx; i++) {
|
|
|
|
- for (j = 0; j < bnad->num_txq_per_tx; j++)
|
|
|
|
- bnad_tx(bnad, bnad->tx_info[i].tcb[j]);
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
- /* Handle Rx Completions */
|
|
|
|
- rcvd = bnad_poll_cq(bnad, ccb, budget);
|
|
|
|
- if (rcvd == budget)
|
|
|
|
- return rcvd;
|
|
|
|
-poll_exit:
|
|
|
|
- napi_complete((napi));
|
|
|
|
-
|
|
|
|
- BNAD_UPDATE_CTR(bnad, netif_rx_complete);
|
|
|
|
-
|
|
|
|
- bnad_enable_txrx_irqs(bnad);
|
|
|
|
- return rcvd;
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
static void
|
|
static void
|
|
bnad_napi_enable(struct bnad *bnad, u32 rx_id)
|
|
bnad_napi_enable(struct bnad *bnad, u32 rx_id)
|
|
{
|
|
{
|
|
- int (*napi_poll) (struct napi_struct *, int);
|
|
|
|
struct bnad_rx_ctrl *rx_ctrl;
|
|
struct bnad_rx_ctrl *rx_ctrl;
|
|
int i;
|
|
int i;
|
|
- unsigned long flags;
|
|
|
|
-
|
|
|
|
- spin_lock_irqsave(&bnad->bna_lock, flags);
|
|
|
|
- if (bnad->cfg_flags & BNAD_CF_MSIX)
|
|
|
|
- napi_poll = bnad_napi_poll_rx;
|
|
|
|
- else
|
|
|
|
- napi_poll = bnad_napi_poll_txrx;
|
|
|
|
- spin_unlock_irqrestore(&bnad->bna_lock, flags);
|
|
|
|
|
|
|
|
/* Initialize & enable NAPI */
|
|
/* Initialize & enable NAPI */
|
|
for (i = 0; i < bnad->num_rxp_per_rx; i++) {
|
|
for (i = 0; i < bnad->num_rxp_per_rx; i++) {
|
|
rx_ctrl = &bnad->rx_info[rx_id].rx_ctrl[i];
|
|
rx_ctrl = &bnad->rx_info[rx_id].rx_ctrl[i];
|
|
|
|
+
|
|
netif_napi_add(bnad->netdev, &rx_ctrl->napi,
|
|
netif_napi_add(bnad->netdev, &rx_ctrl->napi,
|
|
- napi_poll, 64);
|
|
|
|
|
|
+ bnad_napi_poll_rx, 64);
|
|
|
|
+
|
|
napi_enable(&rx_ctrl->napi);
|
|
napi_enable(&rx_ctrl->napi);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
@@ -1825,6 +1806,7 @@ bnad_setup_rx(struct bnad *bnad, uint rx_id)
|
|
|
|
|
|
/* Initialize the Rx event handlers */
|
|
/* Initialize the Rx event handlers */
|
|
rx_cbfn.rcb_setup_cbfn = bnad_cb_rcb_setup;
|
|
rx_cbfn.rcb_setup_cbfn = bnad_cb_rcb_setup;
|
|
|
|
+ rx_cbfn.rcb_destroy_cbfn = bnad_cb_rcb_destroy;
|
|
rx_cbfn.rcb_destroy_cbfn = NULL;
|
|
rx_cbfn.rcb_destroy_cbfn = NULL;
|
|
rx_cbfn.ccb_setup_cbfn = bnad_cb_ccb_setup;
|
|
rx_cbfn.ccb_setup_cbfn = bnad_cb_ccb_setup;
|
|
rx_cbfn.ccb_destroy_cbfn = bnad_cb_ccb_destroy;
|
|
rx_cbfn.ccb_destroy_cbfn = bnad_cb_ccb_destroy;
|
|
@@ -2152,16 +2134,6 @@ bnad_q_num_adjust(struct bnad *bnad, int msix_vectors)
|
|
bnad->num_rxp_per_rx = 1;
|
|
bnad->num_rxp_per_rx = 1;
|
|
}
|
|
}
|
|
|
|
|
|
-static void
|
|
|
|
-bnad_set_netdev_perm_addr(struct bnad *bnad)
|
|
|
|
-{
|
|
|
|
- struct net_device *netdev = bnad->netdev;
|
|
|
|
-
|
|
|
|
- memcpy(netdev->perm_addr, &bnad->perm_addr, netdev->addr_len);
|
|
|
|
- if (is_zero_ether_addr(netdev->dev_addr))
|
|
|
|
- memcpy(netdev->dev_addr, &bnad->perm_addr, netdev->addr_len);
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
/* Enable / disable device */
|
|
/* Enable / disable device */
|
|
static void
|
|
static void
|
|
bnad_device_disable(struct bnad *bnad)
|
|
bnad_device_disable(struct bnad *bnad)
|
|
@@ -2433,21 +2405,21 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
|
|
return NETDEV_TX_OK;
|
|
return NETDEV_TX_OK;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+ tx_id = 0;
|
|
|
|
+
|
|
|
|
+ tx_info = &bnad->tx_info[tx_id];
|
|
|
|
+ tcb = tx_info->tcb[tx_id];
|
|
|
|
+ unmap_q = tcb->unmap_q;
|
|
|
|
+
|
|
/*
|
|
/*
|
|
* Takes care of the Tx that is scheduled between clearing the flag
|
|
* Takes care of the Tx that is scheduled between clearing the flag
|
|
* and the netif_stop_queue() call.
|
|
* and the netif_stop_queue() call.
|
|
*/
|
|
*/
|
|
- if (unlikely(!test_bit(BNAD_RF_TX_STARTED, &bnad->run_flags))) {
|
|
|
|
|
|
+ if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) {
|
|
dev_kfree_skb(skb);
|
|
dev_kfree_skb(skb);
|
|
return NETDEV_TX_OK;
|
|
return NETDEV_TX_OK;
|
|
}
|
|
}
|
|
|
|
|
|
- tx_id = 0;
|
|
|
|
-
|
|
|
|
- tx_info = &bnad->tx_info[tx_id];
|
|
|
|
- tcb = tx_info->tcb[tx_id];
|
|
|
|
- unmap_q = tcb->unmap_q;
|
|
|
|
-
|
|
|
|
vectors = 1 + skb_shinfo(skb)->nr_frags;
|
|
vectors = 1 + skb_shinfo(skb)->nr_frags;
|
|
if (vectors > BFI_TX_MAX_VECTORS_PER_PKT) {
|
|
if (vectors > BFI_TX_MAX_VECTORS_PER_PKT) {
|
|
dev_kfree_skb(skb);
|
|
dev_kfree_skb(skb);
|
|
@@ -2462,7 +2434,8 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
|
|
tcb->consumer_index &&
|
|
tcb->consumer_index &&
|
|
!test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) {
|
|
!test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) {
|
|
acked = bnad_free_txbufs(bnad, tcb);
|
|
acked = bnad_free_txbufs(bnad, tcb);
|
|
- bna_ib_ack(tcb->i_dbell, acked);
|
|
|
|
|
|
+ if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
|
|
|
|
+ bna_ib_ack(tcb->i_dbell, acked);
|
|
smp_mb__before_clear_bit();
|
|
smp_mb__before_clear_bit();
|
|
clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
|
|
clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
|
|
} else {
|
|
} else {
|
|
@@ -2624,6 +2597,10 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
|
|
tcb->producer_index = txq_prod;
|
|
tcb->producer_index = txq_prod;
|
|
|
|
|
|
smp_mb();
|
|
smp_mb();
|
|
|
|
+
|
|
|
|
+ if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
|
|
|
|
+ return NETDEV_TX_OK;
|
|
|
|
+
|
|
bna_txq_prod_indx_doorbell(tcb);
|
|
bna_txq_prod_indx_doorbell(tcb);
|
|
|
|
|
|
if ((u16) (*tcb->hw_consumer_index) != tcb->consumer_index)
|
|
if ((u16) (*tcb->hw_consumer_index) != tcb->consumer_index)
|
|
@@ -3066,7 +3043,7 @@ bnad_pci_probe(struct pci_dev *pdev,
|
|
/*
|
|
/*
|
|
* PCI initialization
|
|
* PCI initialization
|
|
* Output : using_dac = 1 for 64 bit DMA
|
|
* Output : using_dac = 1 for 64 bit DMA
|
|
- * = 0 for 32 bit DMA
|
|
|
|
|
|
+ * = 0 for 32 bit DMA
|
|
*/
|
|
*/
|
|
err = bnad_pci_init(bnad, pdev, &using_dac);
|
|
err = bnad_pci_init(bnad, pdev, &using_dac);
|
|
if (err)
|
|
if (err)
|