|
@@ -348,6 +348,10 @@ int iwl_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq,
|
|
|
|
|
|
txq->need_update = 0;
|
|
|
|
|
|
+ /* aggregation TX queues will get their ID when aggregation begins */
|
|
|
+ if (txq_id <= IWL_TX_FIFO_AC3)
|
|
|
+ txq->swq_id = txq_id;
|
|
|
+
|
|
|
/* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
|
|
|
* iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */
|
|
|
BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
|
|
@@ -734,8 +738,7 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
|
|
|
|
|
|
IWL_DEBUG_TX(priv, "station Id %d\n", sta_id);
|
|
|
|
|
|
- swq_id = skb_get_queue_mapping(skb);
|
|
|
- txq_id = swq_id;
|
|
|
+ txq_id = skb_get_queue_mapping(skb);
|
|
|
if (ieee80211_is_data_qos(fc)) {
|
|
|
qc = ieee80211_get_qos_ctl(hdr);
|
|
|
tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
|
|
@@ -746,16 +749,14 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
|
|
|
hdr->seq_ctrl |= cpu_to_le16(seq_number);
|
|
|
seq_number += 0x10;
|
|
|
/* aggregation is on for this <sta,tid> */
|
|
|
- if (info->flags & IEEE80211_TX_CTL_AMPDU) {
|
|
|
+ if (info->flags & IEEE80211_TX_CTL_AMPDU)
|
|
|
txq_id = priv->stations[sta_id].tid[tid].agg.txq_id;
|
|
|
- swq_id = iwl_virtual_agg_queue_num(swq_id, txq_id);
|
|
|
- }
|
|
|
priv->stations[sta_id].tid[tid].tfds_in_queue++;
|
|
|
}
|
|
|
|
|
|
txq = &priv->txq[txq_id];
|
|
|
+ swq_id = txq->swq_id;
|
|
|
q = &txq->q;
|
|
|
- txq->swq_id = swq_id;
|
|
|
|
|
|
spin_lock_irqsave(&priv->lock, flags);
|
|
|
|
|
@@ -1186,6 +1187,7 @@ int iwl_tx_agg_start(struct iwl_priv *priv, const u8 *ra, u16 tid, u16 *ssn)
|
|
|
tid_data = &priv->stations[sta_id].tid[tid];
|
|
|
*ssn = SEQ_TO_SN(tid_data->seq_number);
|
|
|
tid_data->agg.txq_id = txq_id;
|
|
|
+ priv->txq[txq_id].swq_id = iwl_virtual_agg_queue_num(tx_fifo, txq_id);
|
|
|
spin_unlock_irqrestore(&priv->sta_lock, flags);
|
|
|
|
|
|
ret = priv->cfg->ops->lib->txq_agg_enable(priv, txq_id, tx_fifo,
|