Explorar o código

ath9k: fix tx aggregation flush on AR9003

Completing aggregate frames can lead to new buffers being pushed into
the tid queues due to software retransmission.
When the tx queues are being drained, all pending aggregates must be
completed before the tid queues get drained, otherwise buffers might be
leaked.

Signed-off-by: Felix Fietkau <nbd@openwrt.org>
Cc: stable@kernel.org
Signed-off-by: John W. Linville <linville@tuxdriver.com>
Felix Fietkau %!s(int64=14) %!d(string=hai) anos
pai
achega
e609e2ea2c
Modificáronse 1 ficheiros con 9 adicións e 9 borrados
  1. 9 9
      drivers/net/wireless/ath/ath9k/xmit.c

+ 9 - 9
drivers/net/wireless/ath/ath9k/xmit.c

@@ -1089,15 +1089,6 @@ void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx)
 	txq->axq_tx_inprogress = false;
 	txq->axq_tx_inprogress = false;
 	spin_unlock_bh(&txq->axq_lock);
 	spin_unlock_bh(&txq->axq_lock);
 
 
-	/* flush any pending frames if aggregation is enabled */
-	if (sc->sc_flags & SC_OP_TXAGGR) {
-		if (!retry_tx) {
-			spin_lock_bh(&txq->axq_lock);
-			ath_txq_drain_pending_buffers(sc, txq);
-			spin_unlock_bh(&txq->axq_lock);
-		}
-	}
-
 	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
 	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
 		spin_lock_bh(&txq->axq_lock);
 		spin_lock_bh(&txq->axq_lock);
 		while (!list_empty(&txq->txq_fifo_pending)) {
 		while (!list_empty(&txq->txq_fifo_pending)) {
@@ -1118,6 +1109,15 @@ void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx)
 		}
 		}
 		spin_unlock_bh(&txq->axq_lock);
 		spin_unlock_bh(&txq->axq_lock);
 	}
 	}
+
+	/* flush any pending frames if aggregation is enabled */
+	if (sc->sc_flags & SC_OP_TXAGGR) {
+		if (!retry_tx) {
+			spin_lock_bh(&txq->axq_lock);
+			ath_txq_drain_pending_buffers(sc, txq);
+			spin_unlock_bh(&txq->axq_lock);
+		}
+	}
 }
 }
 
 
 void ath_drain_all_txq(struct ath_softc *sc, bool retry_tx)
 void ath_drain_all_txq(struct ath_softc *sc, bool retry_tx)