|
@@ -73,6 +73,8 @@
|
|
|
_EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_TX_DRAIN, \
|
|
|
(_tx_queue)->queue)
|
|
|
|
|
|
+static void efx_magic_event(struct efx_channel *channel, u32 magic);
|
|
|
+
|
|
|
/**************************************************************************
|
|
|
*
|
|
|
* Solarstorm hardware access
|
|
@@ -255,9 +257,6 @@ static int efx_alloc_special_buffer(struct efx_nic *efx,
|
|
|
buffer->entries = len / EFX_BUF_SIZE;
|
|
|
BUG_ON(buffer->dma_addr & (EFX_BUF_SIZE - 1));
|
|
|
|
|
|
- /* All zeros is a potentially valid event so memset to 0xff */
|
|
|
- memset(buffer->addr, 0xff, len);
|
|
|
-
|
|
|
/* Select new buffer ID */
|
|
|
buffer->index = efx->next_buffer_table;
|
|
|
efx->next_buffer_table += buffer->entries;
|
|
@@ -494,6 +493,9 @@ static void efx_flush_tx_queue(struct efx_tx_queue *tx_queue)
|
|
|
struct efx_nic *efx = tx_queue->efx;
|
|
|
efx_oword_t tx_flush_descq;
|
|
|
|
|
|
+ WARN_ON(atomic_read(&tx_queue->flush_outstanding));
|
|
|
+ atomic_set(&tx_queue->flush_outstanding, 1);
|
|
|
+
|
|
|
EFX_POPULATE_OWORD_2(tx_flush_descq,
|
|
|
FRF_AZ_TX_FLUSH_DESCQ_CMD, 1,
|
|
|
FRF_AZ_TX_FLUSH_DESCQ, tx_queue->queue);
|
|
@@ -669,6 +671,47 @@ static bool efx_flush_wake(struct efx_nic *efx)
|
|
|
&& atomic_read(&efx->rxq_flush_pending) > 0));
|
|
|
}
|
|
|
|
|
|
+static bool efx_check_tx_flush_complete(struct efx_nic *efx)
|
|
|
+{
|
|
|
+ bool i = true;
|
|
|
+ efx_oword_t txd_ptr_tbl;
|
|
|
+ struct efx_channel *channel;
|
|
|
+ struct efx_tx_queue *tx_queue;
|
|
|
+
|
|
|
+ efx_for_each_channel(channel, efx) {
|
|
|
+ efx_for_each_channel_tx_queue(tx_queue, channel) {
|
|
|
+ efx_reado_table(efx, &txd_ptr_tbl,
|
|
|
+ FR_BZ_TX_DESC_PTR_TBL, tx_queue->queue);
|
|
|
+ if (EFX_OWORD_FIELD(txd_ptr_tbl,
|
|
|
+ FRF_AZ_TX_DESCQ_FLUSH) ||
|
|
|
+ EFX_OWORD_FIELD(txd_ptr_tbl,
|
|
|
+ FRF_AZ_TX_DESCQ_EN)) {
|
|
|
+ netif_dbg(efx, hw, efx->net_dev,
|
|
|
+ "flush did not complete on TXQ %d\n",
|
|
|
+ tx_queue->queue);
|
|
|
+ i = false;
|
|
|
+ } else if (atomic_cmpxchg(&tx_queue->flush_outstanding,
|
|
|
+ 1, 0)) {
|
|
|
+ /* The flush is complete, but we didn't
|
|
|
+ * receive a flush completion event
|
|
|
+ */
|
|
|
+ netif_dbg(efx, hw, efx->net_dev,
|
|
|
+ "flush complete on TXQ %d, so drain "
|
|
|
+ "the queue\n", tx_queue->queue);
|
|
|
+ /* Don't need to increment drain_pending as it
|
|
|
+ * has already been incremented for the queues
|
|
|
+ * which did not drain
|
|
|
+ */
|
|
|
+ efx_magic_event(channel,
|
|
|
+ EFX_CHANNEL_MAGIC_TX_DRAIN(
|
|
|
+ tx_queue));
|
|
|
+ }
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ return i;
|
|
|
+}
|
|
|
+
|
|
|
/* Flush all the transmit queues, and continue flushing receive queues until
|
|
|
* they're all flushed. Wait for the DRAIN events to be recieved so that there
|
|
|
* are no more RX and TX events left on any channel. */
|
|
@@ -680,7 +723,6 @@ int efx_nic_flush_queues(struct efx_nic *efx)
|
|
|
struct efx_tx_queue *tx_queue;
|
|
|
int rc = 0;
|
|
|
|
|
|
- efx->fc_disable++;
|
|
|
efx->type->prepare_flush(efx);
|
|
|
|
|
|
efx_for_each_channel(channel, efx) {
|
|
@@ -730,7 +772,8 @@ int efx_nic_flush_queues(struct efx_nic *efx)
|
|
|
timeout);
|
|
|
}
|
|
|
|
|
|
- if (atomic_read(&efx->drain_pending)) {
|
|
|
+ if (atomic_read(&efx->drain_pending) &&
|
|
|
+ !efx_check_tx_flush_complete(efx)) {
|
|
|
netif_err(efx, hw, efx->net_dev, "failed to flush %d queues "
|
|
|
"(rx %d+%d)\n", atomic_read(&efx->drain_pending),
|
|
|
atomic_read(&efx->rxq_flush_outstanding),
|
|
@@ -742,7 +785,7 @@ int efx_nic_flush_queues(struct efx_nic *efx)
|
|
|
atomic_set(&efx->rxq_flush_outstanding, 0);
|
|
|
}
|
|
|
|
|
|
- efx->fc_disable--;
|
|
|
+ efx->type->finish_flush(efx);
|
|
|
|
|
|
return rc;
|
|
|
}
|
|
@@ -766,8 +809,13 @@ void efx_nic_eventq_read_ack(struct efx_channel *channel)
|
|
|
|
|
|
EFX_POPULATE_DWORD_1(reg, FRF_AZ_EVQ_RPTR,
|
|
|
channel->eventq_read_ptr & channel->eventq_mask);
|
|
|
- efx_writed_table(efx, ®, efx->type->evq_rptr_tbl_base,
|
|
|
- channel->channel);
|
|
|
+
|
|
|
+ /* For Falcon A1, EVQ_RPTR_KER is documented as having a step size
|
|
|
+ * of 4 bytes, but it is really 16 bytes just like later revisions.
|
|
|
+ */
|
|
|
+ efx_writed(efx, ®,
|
|
|
+ efx->type->evq_rptr_tbl_base +
|
|
|
+ FR_BZ_EVQ_RPTR_STEP * channel->channel);
|
|
|
}
|
|
|
|
|
|
/* Use HW to insert a SW defined event */
|
|
@@ -1017,9 +1065,10 @@ efx_handle_tx_flush_done(struct efx_nic *efx, efx_qword_t *event)
|
|
|
if (qid < EFX_TXQ_TYPES * efx->n_tx_channels) {
|
|
|
tx_queue = efx_get_tx_queue(efx, qid / EFX_TXQ_TYPES,
|
|
|
qid % EFX_TXQ_TYPES);
|
|
|
-
|
|
|
- efx_magic_event(tx_queue->channel,
|
|
|
- EFX_CHANNEL_MAGIC_TX_DRAIN(tx_queue));
|
|
|
+ if (atomic_cmpxchg(&tx_queue->flush_outstanding, 1, 0)) {
|
|
|
+ efx_magic_event(tx_queue->channel,
|
|
|
+ EFX_CHANNEL_MAGIC_TX_DRAIN(tx_queue));
|
|
|
+ }
|
|
|
}
|
|
|
}
|
|
|
|
|
@@ -1565,7 +1614,9 @@ void efx_nic_push_rx_indir_table(struct efx_nic *efx)
|
|
|
for (i = 0; i < FR_BZ_RX_INDIRECTION_TBL_ROWS; i++) {
|
|
|
EFX_POPULATE_DWORD_1(dword, FRF_BZ_IT_QUEUE,
|
|
|
efx->rx_indir_table[i]);
|
|
|
- efx_writed_table(efx, &dword, FR_BZ_RX_INDIRECTION_TBL, i);
|
|
|
+ efx_writed(efx, &dword,
|
|
|
+ FR_BZ_RX_INDIRECTION_TBL +
|
|
|
+ FR_BZ_RX_INDIRECTION_TBL_STEP * i);
|
|
|
}
|
|
|
}
|
|
|
|
|
@@ -2029,15 +2080,15 @@ void efx_nic_get_regs(struct efx_nic *efx, void *buf)
|
|
|
|
|
|
for (i = 0; i < table->rows; i++) {
|
|
|
switch (table->step) {
|
|
|
- case 4: /* 32-bit register or SRAM */
|
|
|
- efx_readd_table(efx, buf, table->offset, i);
|
|
|
+ case 4: /* 32-bit SRAM */
|
|
|
+ efx_readd(efx, buf, table->offset + 4 * i);
|
|
|
break;
|
|
|
case 8: /* 64-bit SRAM */
|
|
|
efx_sram_readq(efx,
|
|
|
efx->membase + table->offset,
|
|
|
buf, i);
|
|
|
break;
|
|
|
- case 16: /* 128-bit register */
|
|
|
+ case 16: /* 128-bit-readable register */
|
|
|
efx_reado_table(efx, buf, table->offset, i);
|
|
|
break;
|
|
|
case 32: /* 128-bit register, interleaved */
|