|
@@ -433,6 +433,26 @@ out_free_arrays:
|
|
|
}
|
|
|
EXPORT_SYMBOL(iwl_tx_queue_init);
|
|
|
|
|
|
+void iwl_tx_queue_reset(struct iwl_priv *priv, struct iwl_tx_queue *txq,
|
|
|
+ int slots_num, u32 txq_id)
|
|
|
+{
|
|
|
+ int actual_slots = slots_num;
|
|
|
+
|
|
|
+ if (txq_id == IWL_CMD_QUEUE_NUM)
|
|
|
+ actual_slots++;
|
|
|
+
|
|
|
+ memset(txq->meta, 0, sizeof(struct iwl_cmd_meta) * actual_slots);
|
|
|
+
|
|
|
+ txq->need_update = 0;
|
|
|
+
|
|
|
+ /* Initialize queue's high/low-water marks, and head/tail indexes */
|
|
|
+ iwl_queue_init(priv, &txq->q, TFD_QUEUE_SIZE_MAX, slots_num, txq_id);
|
|
|
+
|
|
|
+ /* Tell device where to find queue */
|
|
|
+ priv->cfg->ops->lib->txq_init(priv, txq);
|
|
|
+}
|
|
|
+EXPORT_SYMBOL(iwl_tx_queue_reset);
|
|
|
+
|
|
|
/**
|
|
|
* iwl_hw_txq_ctx_free - Free TXQ Context
|
|
|
*
|
|
@@ -444,8 +464,7 @@ void iwl_hw_txq_ctx_free(struct iwl_priv *priv)
|
|
|
|
|
|
/* Tx queues */
|
|
|
if (priv->txq) {
|
|
|
- for (txq_id = 0; txq_id < priv->hw_params.max_txq_num;
|
|
|
- txq_id++)
|
|
|
+ for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
|
|
|
if (txq_id == IWL_CMD_QUEUE_NUM)
|
|
|
iwl_cmd_queue_free(priv);
|
|
|
else
|
|
@@ -461,15 +480,15 @@ void iwl_hw_txq_ctx_free(struct iwl_priv *priv)
|
|
|
EXPORT_SYMBOL(iwl_hw_txq_ctx_free);
|
|
|
|
|
|
/**
|
|
|
- * iwl_txq_ctx_reset - Reset TX queue context
|
|
|
- * Destroys all DMA structures and initialize them again
|
|
|
+ * iwl_txq_ctx_alloc - allocate TX queue context
|
|
|
+ * Allocate all Tx DMA structures and initialize them
|
|
|
*
|
|
|
* @param priv
|
|
|
* @return error code
|
|
|
*/
|
|
|
-int iwl_txq_ctx_reset(struct iwl_priv *priv)
|
|
|
+int iwl_txq_ctx_alloc(struct iwl_priv *priv)
|
|
|
{
|
|
|
- int ret = 0;
|
|
|
+ int ret;
|
|
|
int txq_id, slots_num;
|
|
|
unsigned long flags;
|
|
|
|
|
@@ -527,8 +546,31 @@ int iwl_txq_ctx_reset(struct iwl_priv *priv)
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
+void iwl_txq_ctx_reset(struct iwl_priv *priv)
|
|
|
+{
|
|
|
+ int txq_id, slots_num;
|
|
|
+ unsigned long flags;
|
|
|
+
|
|
|
+ spin_lock_irqsave(&priv->lock, flags);
|
|
|
+
|
|
|
+ /* Turn off all Tx DMA fifos */
|
|
|
+ priv->cfg->ops->lib->txq_set_sched(priv, 0);
|
|
|
+
|
|
|
+ /* Tell NIC where to find the "keep warm" buffer */
|
|
|
+ iwl_write_direct32(priv, FH_KW_MEM_ADDR_REG, priv->kw.dma >> 4);
|
|
|
+
|
|
|
+ spin_unlock_irqrestore(&priv->lock, flags);
|
|
|
+
|
|
|
+ /* Alloc and init all Tx queues, including the command queue (#4) */
|
|
|
+ for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
|
|
|
+ slots_num = txq_id == IWL_CMD_QUEUE_NUM ?
|
|
|
+ TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
|
|
|
+ iwl_tx_queue_reset(priv, &priv->txq[txq_id], slots_num, txq_id);
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
/**
|
|
|
- * iwl_txq_ctx_stop - Stop all Tx DMA channels, free Tx queue memory
|
|
|
+ * iwl_txq_ctx_stop - Stop all Tx DMA channels
|
|
|
*/
|
|
|
void iwl_txq_ctx_stop(struct iwl_priv *priv)
|
|
|
{
|
|
@@ -548,9 +590,6 @@ void iwl_txq_ctx_stop(struct iwl_priv *priv)
|
|
|
1000);
|
|
|
}
|
|
|
spin_unlock_irqrestore(&priv->lock, flags);
|
|
|
-
|
|
|
- /* Deallocate memory for all Tx queues */
|
|
|
- iwl_hw_txq_ctx_free(priv);
|
|
|
}
|
|
|
EXPORT_SYMBOL(iwl_txq_ctx_stop);
|
|
|
|