|
@@ -286,17 +286,17 @@ static struct ath_buf *ath_tx_get_buffer(struct ath_softc *sc)
|
|
|
{
|
|
|
struct ath_buf *bf = NULL;
|
|
|
|
|
|
- spin_lock_bh(&sc->sc_txbuflock);
|
|
|
+ spin_lock_bh(&sc->tx.txbuflock);
|
|
|
|
|
|
- if (unlikely(list_empty(&sc->sc_txbuf))) {
|
|
|
- spin_unlock_bh(&sc->sc_txbuflock);
|
|
|
+ if (unlikely(list_empty(&sc->tx.txbuf))) {
|
|
|
+ spin_unlock_bh(&sc->tx.txbuflock);
|
|
|
return NULL;
|
|
|
}
|
|
|
|
|
|
- bf = list_first_entry(&sc->sc_txbuf, struct ath_buf, list);
|
|
|
+ bf = list_first_entry(&sc->tx.txbuf, struct ath_buf, list);
|
|
|
list_del(&bf->list);
|
|
|
|
|
|
- spin_unlock_bh(&sc->sc_txbuflock);
|
|
|
+ spin_unlock_bh(&sc->tx.txbuflock);
|
|
|
|
|
|
return bf;
|
|
|
}
|
|
@@ -341,9 +341,9 @@ static void ath_tx_complete_buf(struct ath_softc *sc,
|
|
|
/*
|
|
|
* Return the list of ath_buf of this mpdu to free queue
|
|
|
*/
|
|
|
- spin_lock_irqsave(&sc->sc_txbuflock, flags);
|
|
|
- list_splice_tail_init(bf_q, &sc->sc_txbuf);
|
|
|
- spin_unlock_irqrestore(&sc->sc_txbuflock, flags);
|
|
|
+ spin_lock_irqsave(&sc->tx.txbuflock, flags);
|
|
|
+ list_splice_tail_init(bf_q, &sc->tx.txbuf);
|
|
|
+ spin_unlock_irqrestore(&sc->tx.txbuflock, flags);
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -384,7 +384,7 @@ static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid)
|
|
|
|
|
|
static void ath_tx_pause_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
|
|
|
{
|
|
|
- struct ath_txq *txq = &sc->sc_txq[tid->ac->qnum];
|
|
|
+ struct ath_txq *txq = &sc->tx.txq[tid->ac->qnum];
|
|
|
|
|
|
spin_lock_bh(&txq->axq_lock);
|
|
|
|
|
@@ -397,7 +397,7 @@ static void ath_tx_pause_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
|
|
|
|
|
|
void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
|
|
|
{
|
|
|
- struct ath_txq *txq = &sc->sc_txq[tid->ac->qnum];
|
|
|
+ struct ath_txq *txq = &sc->tx.txq[tid->ac->qnum];
|
|
|
|
|
|
ASSERT(tid->paused > 0);
|
|
|
spin_lock_bh(&txq->axq_lock);
|
|
@@ -686,7 +686,7 @@ static int ath_tx_send_normal(struct ath_softc *sc,
|
|
|
|
|
|
static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
|
|
|
{
|
|
|
- struct ath_txq *txq = &sc->sc_txq[tid->ac->qnum];
|
|
|
+ struct ath_txq *txq = &sc->tx.txq[tid->ac->qnum];
|
|
|
struct ath_buf *bf;
|
|
|
struct list_head bf_head;
|
|
|
INIT_LIST_HEAD(&bf_head);
|
|
@@ -861,12 +861,12 @@ static void ath_tx_complete_aggr_rifs(struct ath_softc *sc,
|
|
|
struct ath_buf *tbf;
|
|
|
|
|
|
/* allocate new descriptor */
|
|
|
- spin_lock_bh(&sc->sc_txbuflock);
|
|
|
- ASSERT(!list_empty((&sc->sc_txbuf)));
|
|
|
- tbf = list_first_entry(&sc->sc_txbuf,
|
|
|
+ spin_lock_bh(&sc->tx.txbuflock);
|
|
|
+ ASSERT(!list_empty((&sc->tx.txbuf)));
|
|
|
+ tbf = list_first_entry(&sc->tx.txbuf,
|
|
|
struct ath_buf, list);
|
|
|
list_del(&tbf->list);
|
|
|
- spin_unlock_bh(&sc->sc_txbuflock);
|
|
|
+ spin_unlock_bh(&sc->tx.txbuflock);
|
|
|
|
|
|
ATH_TXBUF_RESET(tbf);
|
|
|
|
|
@@ -1058,9 +1058,9 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
|
|
|
|
|
|
if (bf_held) {
|
|
|
list_del(&bf_held->list);
|
|
|
- spin_lock_bh(&sc->sc_txbuflock);
|
|
|
- list_add_tail(&bf_held->list, &sc->sc_txbuf);
|
|
|
- spin_unlock_bh(&sc->sc_txbuflock);
|
|
|
+ spin_lock_bh(&sc->tx.txbuflock);
|
|
|
+ list_add_tail(&bf_held->list, &sc->tx.txbuf);
|
|
|
+ spin_unlock_bh(&sc->tx.txbuflock);
|
|
|
}
|
|
|
|
|
|
if (!bf_isampdu(bf)) {
|
|
@@ -1129,11 +1129,11 @@ static void ath_drain_txdataq(struct ath_softc *sc, bool retry_tx)
|
|
|
if (!(sc->sc_flags & SC_OP_INVALID)) {
|
|
|
for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
|
|
|
if (ATH_TXQ_SETUP(sc, i)) {
|
|
|
- ath_tx_stopdma(sc, &sc->sc_txq[i]);
|
|
|
+ ath_tx_stopdma(sc, &sc->tx.txq[i]);
|
|
|
/* The TxDMA may not really be stopped.
|
|
|
* Double check the hal tx pending count */
|
|
|
npend += ath9k_hw_numtxpending(ah,
|
|
|
- sc->sc_txq[i].axq_qnum);
|
|
|
+ sc->tx.txq[i].axq_qnum);
|
|
|
}
|
|
|
}
|
|
|
}
|
|
@@ -1158,7 +1158,7 @@ static void ath_drain_txdataq(struct ath_softc *sc, bool retry_tx)
|
|
|
|
|
|
for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
|
|
|
if (ATH_TXQ_SETUP(sc, i))
|
|
|
- ath_tx_draintxq(sc, &sc->sc_txq[i], retry_tx);
|
|
|
+ ath_tx_draintxq(sc, &sc->tx.txq[i], retry_tx);
|
|
|
}
|
|
|
}
|
|
|
|
|
@@ -1820,9 +1820,9 @@ int ath_tx_start(struct ath_softc *sc, struct sk_buff *skb,
|
|
|
}
|
|
|
spin_unlock_bh(&txq->axq_lock);
|
|
|
|
|
|
- spin_lock_bh(&sc->sc_txbuflock);
|
|
|
- list_add_tail(&bf->list, &sc->sc_txbuf);
|
|
|
- spin_unlock_bh(&sc->sc_txbuflock);
|
|
|
+ spin_lock_bh(&sc->tx.txbuflock);
|
|
|
+ list_add_tail(&bf->list, &sc->tx.txbuf);
|
|
|
+ spin_unlock_bh(&sc->tx.txbuflock);
|
|
|
|
|
|
return r;
|
|
|
}
|
|
@@ -1839,10 +1839,10 @@ int ath_tx_init(struct ath_softc *sc, int nbufs)
|
|
|
int error = 0;
|
|
|
|
|
|
do {
|
|
|
- spin_lock_init(&sc->sc_txbuflock);
|
|
|
+ spin_lock_init(&sc->tx.txbuflock);
|
|
|
|
|
|
/* Setup tx descriptors */
|
|
|
- error = ath_descdma_setup(sc, &sc->sc_txdma, &sc->sc_txbuf,
|
|
|
+ error = ath_descdma_setup(sc, &sc->tx.txdma, &sc->tx.txbuf,
|
|
|
"tx", nbufs, 1);
|
|
|
if (error != 0) {
|
|
|
DPRINTF(sc, ATH_DBG_FATAL,
|
|
@@ -1852,7 +1852,7 @@ int ath_tx_init(struct ath_softc *sc, int nbufs)
|
|
|
}
|
|
|
|
|
|
/* XXX allocate beacon state together with vap */
|
|
|
- error = ath_descdma_setup(sc, &sc->sc_bdma, &sc->sc_bbuf,
|
|
|
+ error = ath_descdma_setup(sc, &sc->beacon.bdma, &sc->beacon.bbuf,
|
|
|
"beacon", ATH_BCBUF, 1);
|
|
|
if (error != 0) {
|
|
|
DPRINTF(sc, ATH_DBG_FATAL,
|
|
@@ -1874,12 +1874,12 @@ int ath_tx_init(struct ath_softc *sc, int nbufs)
|
|
|
int ath_tx_cleanup(struct ath_softc *sc)
|
|
|
{
|
|
|
/* cleanup beacon descriptors */
|
|
|
- if (sc->sc_bdma.dd_desc_len != 0)
|
|
|
- ath_descdma_cleanup(sc, &sc->sc_bdma, &sc->sc_bbuf);
|
|
|
+ if (sc->beacon.bdma.dd_desc_len != 0)
|
|
|
+ ath_descdma_cleanup(sc, &sc->beacon.bdma, &sc->beacon.bbuf);
|
|
|
|
|
|
/* cleanup tx descriptors */
|
|
|
- if (sc->sc_txdma.dd_desc_len != 0)
|
|
|
- ath_descdma_cleanup(sc, &sc->sc_txdma, &sc->sc_txbuf);
|
|
|
+ if (sc->tx.txdma.dd_desc_len != 0)
|
|
|
+ ath_descdma_cleanup(sc, &sc->tx.txdma, &sc->tx.txbuf);
|
|
|
|
|
|
return 0;
|
|
|
}
|
|
@@ -1927,15 +1927,15 @@ struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
|
|
|
*/
|
|
|
return NULL;
|
|
|
}
|
|
|
- if (qnum >= ARRAY_SIZE(sc->sc_txq)) {
|
|
|
+ if (qnum >= ARRAY_SIZE(sc->tx.txq)) {
|
|
|
DPRINTF(sc, ATH_DBG_FATAL,
|
|
|
"qnum %u out of range, max %u!\n",
|
|
|
- qnum, (unsigned int)ARRAY_SIZE(sc->sc_txq));
|
|
|
+ qnum, (unsigned int)ARRAY_SIZE(sc->tx.txq));
|
|
|
ath9k_hw_releasetxqueue(ah, qnum);
|
|
|
return NULL;
|
|
|
}
|
|
|
if (!ATH_TXQ_SETUP(sc, qnum)) {
|
|
|
- struct ath_txq *txq = &sc->sc_txq[qnum];
|
|
|
+ struct ath_txq *txq = &sc->tx.txq[qnum];
|
|
|
|
|
|
txq->axq_qnum = qnum;
|
|
|
txq->axq_link = NULL;
|
|
@@ -1946,9 +1946,9 @@ struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
|
|
|
txq->axq_aggr_depth = 0;
|
|
|
txq->axq_totalqueued = 0;
|
|
|
txq->axq_linkbuf = NULL;
|
|
|
- sc->sc_txqsetup |= 1<<qnum;
|
|
|
+ sc->tx.txqsetup |= 1<<qnum;
|
|
|
}
|
|
|
- return &sc->sc_txq[qnum];
|
|
|
+ return &sc->tx.txq[qnum];
|
|
|
}
|
|
|
|
|
|
/* Reclaim resources for a setup queue */
|
|
@@ -1956,7 +1956,7 @@ struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
|
|
|
void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
|
|
|
{
|
|
|
ath9k_hw_releasetxqueue(sc->sc_ah, txq->axq_qnum);
|
|
|
- sc->sc_txqsetup &= ~(1<<txq->axq_qnum);
|
|
|
+ sc->tx.txqsetup &= ~(1<<txq->axq_qnum);
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -1973,15 +1973,15 @@ int ath_tx_setup(struct ath_softc *sc, int haltype)
|
|
|
{
|
|
|
struct ath_txq *txq;
|
|
|
|
|
|
- if (haltype >= ARRAY_SIZE(sc->sc_haltype2q)) {
|
|
|
+ if (haltype >= ARRAY_SIZE(sc->tx.hwq_map)) {
|
|
|
DPRINTF(sc, ATH_DBG_FATAL,
|
|
|
"HAL AC %u out of range, max %zu!\n",
|
|
|
- haltype, ARRAY_SIZE(sc->sc_haltype2q));
|
|
|
+ haltype, ARRAY_SIZE(sc->tx.hwq_map));
|
|
|
return 0;
|
|
|
}
|
|
|
txq = ath_txq_setup(sc, ATH9K_TX_QUEUE_DATA, haltype);
|
|
|
if (txq != NULL) {
|
|
|
- sc->sc_haltype2q[haltype] = txq->axq_qnum;
|
|
|
+ sc->tx.hwq_map[haltype] = txq->axq_qnum;
|
|
|
return 1;
|
|
|
} else
|
|
|
return 0;
|
|
@@ -1993,19 +1993,19 @@ int ath_tx_get_qnum(struct ath_softc *sc, int qtype, int haltype)
|
|
|
|
|
|
switch (qtype) {
|
|
|
case ATH9K_TX_QUEUE_DATA:
|
|
|
- if (haltype >= ARRAY_SIZE(sc->sc_haltype2q)) {
|
|
|
+ if (haltype >= ARRAY_SIZE(sc->tx.hwq_map)) {
|
|
|
DPRINTF(sc, ATH_DBG_FATAL,
|
|
|
"HAL AC %u out of range, max %zu!\n",
|
|
|
- haltype, ARRAY_SIZE(sc->sc_haltype2q));
|
|
|
+ haltype, ARRAY_SIZE(sc->tx.hwq_map));
|
|
|
return -1;
|
|
|
}
|
|
|
- qnum = sc->sc_haltype2q[haltype];
|
|
|
+ qnum = sc->tx.hwq_map[haltype];
|
|
|
break;
|
|
|
case ATH9K_TX_QUEUE_BEACON:
|
|
|
- qnum = sc->sc_bhalq;
|
|
|
+ qnum = sc->beacon.beaconq;
|
|
|
break;
|
|
|
case ATH9K_TX_QUEUE_CAB:
|
|
|
- qnum = sc->sc_cabq->axq_qnum;
|
|
|
+ qnum = sc->beacon.cabq->axq_qnum;
|
|
|
break;
|
|
|
default:
|
|
|
qnum = -1;
|
|
@@ -2021,7 +2021,7 @@ struct ath_txq *ath_test_get_txq(struct ath_softc *sc, struct sk_buff *skb)
|
|
|
int qnum;
|
|
|
|
|
|
qnum = ath_get_hal_qnum(skb_get_queue_mapping(skb), sc);
|
|
|
- txq = &sc->sc_txq[qnum];
|
|
|
+ txq = &sc->tx.txq[qnum];
|
|
|
|
|
|
spin_lock_bh(&txq->axq_lock);
|
|
|
|
|
@@ -2050,17 +2050,17 @@ int ath_txq_update(struct ath_softc *sc, int qnum,
|
|
|
int error = 0;
|
|
|
struct ath9k_tx_queue_info qi;
|
|
|
|
|
|
- if (qnum == sc->sc_bhalq) {
|
|
|
+ if (qnum == sc->beacon.beaconq) {
|
|
|
/*
|
|
|
* XXX: for beacon queue, we just save the parameter.
|
|
|
* It will be picked up by ath_beaconq_config when
|
|
|
* it's necessary.
|
|
|
*/
|
|
|
- sc->sc_beacon_qi = *qinfo;
|
|
|
+ sc->beacon.beacon_qi = *qinfo;
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
- ASSERT(sc->sc_txq[qnum].axq_qnum == qnum);
|
|
|
+ ASSERT(sc->tx.txq[qnum].axq_qnum == qnum);
|
|
|
|
|
|
ath9k_hw_get_txq_props(ah, qnum, &qi);
|
|
|
qi.tqi_aifs = qinfo->tqi_aifs;
|
|
@@ -2083,7 +2083,7 @@ int ath_txq_update(struct ath_softc *sc, int qnum,
|
|
|
int ath_cabq_update(struct ath_softc *sc)
|
|
|
{
|
|
|
struct ath9k_tx_queue_info qi;
|
|
|
- int qnum = sc->sc_cabq->axq_qnum;
|
|
|
+ int qnum = sc->beacon.cabq->axq_qnum;
|
|
|
struct ath_beacon_config conf;
|
|
|
|
|
|
ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi);
|
|
@@ -2117,7 +2117,7 @@ void ath_tx_tasklet(struct ath_softc *sc)
|
|
|
*/
|
|
|
for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
|
|
|
if (ATH_TXQ_SETUP(sc, i) && (qcumask & (1 << i)))
|
|
|
- ath_tx_processq(sc, &sc->sc_txq[i]);
|
|
|
+ ath_tx_processq(sc, &sc->tx.txq[i]);
|
|
|
}
|
|
|
}
|
|
|
|
|
@@ -2149,9 +2149,9 @@ void ath_tx_draintxq(struct ath_softc *sc,
|
|
|
list_del(&bf->list);
|
|
|
spin_unlock_bh(&txq->axq_lock);
|
|
|
|
|
|
- spin_lock_bh(&sc->sc_txbuflock);
|
|
|
- list_add_tail(&bf->list, &sc->sc_txbuf);
|
|
|
- spin_unlock_bh(&sc->sc_txbuflock);
|
|
|
+ spin_lock_bh(&sc->tx.txbuflock);
|
|
|
+ list_add_tail(&bf->list, &sc->tx.txbuf);
|
|
|
+ spin_unlock_bh(&sc->tx.txbuflock);
|
|
|
continue;
|
|
|
}
|
|
|
|
|
@@ -2189,9 +2189,9 @@ void ath_draintxq(struct ath_softc *sc, bool retry_tx)
|
|
|
/* stop beacon queue. The beacon will be freed when
|
|
|
* we go to INIT state */
|
|
|
if (!(sc->sc_flags & SC_OP_INVALID)) {
|
|
|
- (void) ath9k_hw_stoptxdma(sc->sc_ah, sc->sc_bhalq);
|
|
|
+ (void) ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq);
|
|
|
DPRINTF(sc, ATH_DBG_XMIT, "beacon queue %x\n",
|
|
|
- ath9k_hw_gettxbuf(sc->sc_ah, sc->sc_bhalq));
|
|
|
+ ath9k_hw_gettxbuf(sc->sc_ah, sc->beacon.beaconq));
|
|
|
}
|
|
|
|
|
|
ath_drain_txdataq(sc, retry_tx);
|
|
@@ -2199,12 +2199,12 @@ void ath_draintxq(struct ath_softc *sc, bool retry_tx)
|
|
|
|
|
|
u32 ath_txq_depth(struct ath_softc *sc, int qnum)
|
|
|
{
|
|
|
- return sc->sc_txq[qnum].axq_depth;
|
|
|
+ return sc->tx.txq[qnum].axq_depth;
|
|
|
}
|
|
|
|
|
|
u32 ath_txq_aggr_depth(struct ath_softc *sc, int qnum)
|
|
|
{
|
|
|
- return sc->sc_txq[qnum].axq_aggr_depth;
|
|
|
+ return sc->tx.txq[qnum].axq_aggr_depth;
|
|
|
}
|
|
|
|
|
|
bool ath_tx_aggr_check(struct ath_softc *sc, struct ath_node *an, u8 tidno)
|
|
@@ -2285,7 +2285,7 @@ void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid
|
|
|
void ath_tx_aggr_teardown(struct ath_softc *sc, struct ath_node *an, u8 tid)
|
|
|
{
|
|
|
struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid);
|
|
|
- struct ath_txq *txq = &sc->sc_txq[txtid->ac->qnum];
|
|
|
+ struct ath_txq *txq = &sc->tx.txq[txtid->ac->qnum];
|
|
|
struct ath_buf *bf;
|
|
|
struct list_head bf_head;
|
|
|
INIT_LIST_HEAD(&bf_head);
|
|
@@ -2467,7 +2467,7 @@ void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
|
|
|
struct ath_txq *txq;
|
|
|
for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
|
|
|
if (ATH_TXQ_SETUP(sc, i)) {
|
|
|
- txq = &sc->sc_txq[i];
|
|
|
+ txq = &sc->tx.txq[i];
|
|
|
|
|
|
spin_lock(&txq->axq_lock);
|
|
|
|
|
@@ -2512,9 +2512,9 @@ void ath_tx_cabq(struct ath_softc *sc, struct sk_buff *skb)
|
|
|
if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
|
|
|
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
|
|
|
if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
|
|
|
- sc->seq_no += 0x10;
|
|
|
+ sc->tx.seq_no += 0x10;
|
|
|
hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
|
|
|
- hdr->seq_ctrl |= cpu_to_le16(sc->seq_no);
|
|
|
+ hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no);
|
|
|
}
|
|
|
|
|
|
/* Add the padding after the header if this is not already done */
|
|
@@ -2530,7 +2530,7 @@ void ath_tx_cabq(struct ath_softc *sc, struct sk_buff *skb)
|
|
|
memmove(skb->data, skb->data + padsize, hdrlen);
|
|
|
}
|
|
|
|
|
|
- txctl.txq = sc->sc_cabq;
|
|
|
+ txctl.txq = sc->beacon.cabq;
|
|
|
|
|
|
DPRINTF(sc, ATH_DBG_XMIT, "transmitting CABQ packet, skb: %p\n", skb);
|
|
|
|