|
@@ -560,32 +560,92 @@ int iwl_trans_pcie_tx_agg_alloc(struct iwl_trans *trans,
|
|
|
|
|
|
return 0;
|
|
|
}
|
|
|
-int iwl_trans_pcie_txq_agg_disable(struct iwl_priv *priv, u16 txq_id)
|
|
|
+
|
|
|
+void iwl_trans_pcie_txq_agg_disable(struct iwl_trans *trans, int txq_id)
|
|
|
{
|
|
|
- struct iwl_trans *trans = trans(priv);
|
|
|
+ iwlagn_tx_queue_stop_scheduler(trans, txq_id);
|
|
|
+
|
|
|
+ iwl_clear_bits_prph(bus(trans), SCD_AGGR_SEL, (1 << txq_id));
|
|
|
+
|
|
|
+ priv(trans)->txq[txq_id].q.read_ptr = 0;
|
|
|
+ priv(trans)->txq[txq_id].q.write_ptr = 0;
|
|
|
+ /* supposes that ssn_idx is valid (!= 0xFFF) */
|
|
|
+ iwl_trans_set_wr_ptrs(trans, txq_id, 0);
|
|
|
+
|
|
|
+ iwl_clear_bits_prph(bus(trans), SCD_INTERRUPT_MASK, (1 << txq_id));
|
|
|
+ iwl_txq_ctx_deactivate(priv(trans), txq_id);
|
|
|
+ iwl_trans_tx_queue_set_status(priv(trans),
|
|
|
+ &priv(trans)->txq[txq_id], 0, 0);
|
|
|
+}
|
|
|
+
|
|
|
+int iwl_trans_pcie_tx_agg_disable(struct iwl_trans *trans,
|
|
|
+ enum iwl_rxon_context_id ctx, int sta_id,
|
|
|
+ int tid)
|
|
|
+{
|
|
|
+ unsigned long flags;
|
|
|
+ int read_ptr, write_ptr;
|
|
|
+ struct iwl_tid_data *tid_data;
|
|
|
+ int txq_id;
|
|
|
+
|
|
|
+ spin_lock_irqsave(&trans->shrd->sta_lock, flags);
|
|
|
+
|
|
|
+ tid_data = &trans->shrd->tid_data[sta_id][tid];
|
|
|
+ txq_id = tid_data->agg.txq_id;
|
|
|
+
|
|
|
if ((IWLAGN_FIRST_AMPDU_QUEUE > txq_id) ||
|
|
|
(IWLAGN_FIRST_AMPDU_QUEUE +
|
|
|
- hw_params(priv).num_ampdu_queues <= txq_id)) {
|
|
|
- IWL_ERR(priv,
|
|
|
+ hw_params(trans).num_ampdu_queues <= txq_id)) {
|
|
|
+ IWL_ERR(trans,
|
|
|
"queue number out of range: %d, must be %d to %d\n",
|
|
|
txq_id, IWLAGN_FIRST_AMPDU_QUEUE,
|
|
|
IWLAGN_FIRST_AMPDU_QUEUE +
|
|
|
- hw_params(priv).num_ampdu_queues - 1);
|
|
|
+ hw_params(trans).num_ampdu_queues - 1);
|
|
|
+ spin_unlock_irqrestore(&trans->shrd->sta_lock, flags);
|
|
|
return -EINVAL;
|
|
|
}
|
|
|
|
|
|
- iwlagn_tx_queue_stop_scheduler(trans, txq_id);
|
|
|
+ switch (trans->shrd->tid_data[sta_id][tid].agg.state) {
|
|
|
+ case IWL_EMPTYING_HW_QUEUE_ADDBA:
|
|
|
+ /*
|
|
|
+ * This can happen if the peer stops aggregation
|
|
|
+ * again before we've had a chance to drain the
|
|
|
+ * queue we selected previously, i.e. before the
|
|
|
+ * session was really started completely.
|
|
|
+ */
|
|
|
+ IWL_DEBUG_HT(trans, "AGG stop before setup done\n");
|
|
|
+ goto turn_off;
|
|
|
+ case IWL_AGG_ON:
|
|
|
+ break;
|
|
|
+ default:
|
|
|
+ IWL_WARN(trans, "Stopping AGG while state not ON"
|
|
|
+ "or starting\n");
|
|
|
+ }
|
|
|
|
|
|
- iwl_clear_bits_prph(bus(priv), SCD_AGGR_SEL, (1 << txq_id));
|
|
|
+ write_ptr = priv(trans)->txq[txq_id].q.write_ptr;
|
|
|
+ read_ptr = priv(trans)->txq[txq_id].q.read_ptr;
|
|
|
|
|
|
- priv->txq[txq_id].q.read_ptr = 0;
|
|
|
- priv->txq[txq_id].q.write_ptr = 0;
|
|
|
- /* supposes that ssn_idx is valid (!= 0xFFF) */
|
|
|
- iwl_trans_set_wr_ptrs(trans, txq_id, 0);
|
|
|
+ /* The queue is not empty */
|
|
|
+ if (write_ptr != read_ptr) {
|
|
|
+ IWL_DEBUG_HT(trans, "Stopping a non empty AGG HW QUEUE\n");
|
|
|
+ trans->shrd->tid_data[sta_id][tid].agg.state =
|
|
|
+ IWL_EMPTYING_HW_QUEUE_DELBA;
|
|
|
+ spin_unlock_irqrestore(&trans->shrd->sta_lock, flags);
|
|
|
+ return 0;
|
|
|
+ }
|
|
|
+
|
|
|
+ IWL_DEBUG_HT(trans, "HW queue is empty\n");
|
|
|
+turn_off:
|
|
|
+ trans->shrd->tid_data[sta_id][tid].agg.state = IWL_AGG_OFF;
|
|
|
+
|
|
|
+ /* do not restore/save irqs */
|
|
|
+ spin_unlock(&trans->shrd->sta_lock);
|
|
|
+ spin_lock(&trans->shrd->lock);
|
|
|
+
|
|
|
+ iwl_trans_pcie_txq_agg_disable(trans, txq_id);
|
|
|
+
|
|
|
+ spin_unlock_irqrestore(&trans->shrd->lock, flags);
|
|
|
|
|
|
- iwl_clear_bits_prph(bus(priv), SCD_INTERRUPT_MASK, (1 << txq_id));
|
|
|
- iwl_txq_ctx_deactivate(priv, txq_id);
|
|
|
- iwl_trans_tx_queue_set_status(priv, &priv->txq[txq_id], 0, 0);
|
|
|
+ iwl_stop_tx_ba_trans_ready(priv(trans), ctx, sta_id, tid);
|
|
|
|
|
|
return 0;
|
|
|
}
|