|
@@ -2946,378 +2946,6 @@ void iwl4965_set_rxon_chain(struct iwl4965_priv *priv)
|
|
|
IWL_DEBUG_ASSOC("rx chain %X\n", priv->staging_rxon.rx_chain);
|
|
|
}
|
|
|
|
|
|
-#ifdef CONFIG_IWL4965_HT
|
|
|
-#ifdef CONFIG_IWL4965_HT_AGG
|
|
|
-/*
|
|
|
- get the traffic load value for tid
|
|
|
-*/
|
|
|
-static u32 iwl4965_tl_get_load(struct iwl4965_priv *priv, u8 tid)
|
|
|
-{
|
|
|
- u32 load = 0;
|
|
|
- u32 current_time = jiffies_to_msecs(jiffies);
|
|
|
- u32 time_diff;
|
|
|
- s32 index;
|
|
|
- unsigned long flags;
|
|
|
- struct iwl4965_traffic_load *tid_ptr = NULL;
|
|
|
-
|
|
|
- if (tid >= TID_MAX_LOAD_COUNT)
|
|
|
- return 0;
|
|
|
-
|
|
|
- tid_ptr = &(priv->lq_mngr.agg_ctrl.traffic_load[tid]);
|
|
|
-
|
|
|
- current_time -= current_time % TID_ROUND_VALUE;
|
|
|
-
|
|
|
- spin_lock_irqsave(&priv->lq_mngr.lock, flags);
|
|
|
- if (!(tid_ptr->queue_count))
|
|
|
- goto out;
|
|
|
-
|
|
|
- time_diff = TIME_WRAP_AROUND(tid_ptr->time_stamp, current_time);
|
|
|
- index = time_diff / TID_QUEUE_CELL_SPACING;
|
|
|
-
|
|
|
- if (index >= TID_QUEUE_MAX_SIZE) {
|
|
|
- u32 oldest_time = current_time - TID_MAX_TIME_DIFF;
|
|
|
-
|
|
|
- while (tid_ptr->queue_count &&
|
|
|
- (tid_ptr->time_stamp < oldest_time)) {
|
|
|
- tid_ptr->total -= tid_ptr->packet_count[tid_ptr->head];
|
|
|
- tid_ptr->packet_count[tid_ptr->head] = 0;
|
|
|
- tid_ptr->time_stamp += TID_QUEUE_CELL_SPACING;
|
|
|
- tid_ptr->queue_count--;
|
|
|
- tid_ptr->head++;
|
|
|
- if (tid_ptr->head >= TID_QUEUE_MAX_SIZE)
|
|
|
- tid_ptr->head = 0;
|
|
|
- }
|
|
|
- }
|
|
|
- load = tid_ptr->total;
|
|
|
-
|
|
|
- out:
|
|
|
- spin_unlock_irqrestore(&priv->lq_mngr.lock, flags);
|
|
|
- return load;
|
|
|
-}
|
|
|
-
|
|
|
-/*
|
|
|
- increment traffic load value for tid and also remove
|
|
|
- any old values if passed the certian time period
|
|
|
-*/
|
|
|
-static void iwl4965_tl_add_packet(struct iwl4965_priv *priv, u8 tid)
|
|
|
-{
|
|
|
- u32 current_time = jiffies_to_msecs(jiffies);
|
|
|
- u32 time_diff;
|
|
|
- s32 index;
|
|
|
- unsigned long flags;
|
|
|
- struct iwl4965_traffic_load *tid_ptr = NULL;
|
|
|
-
|
|
|
- if (tid >= TID_MAX_LOAD_COUNT)
|
|
|
- return;
|
|
|
-
|
|
|
- tid_ptr = &(priv->lq_mngr.agg_ctrl.traffic_load[tid]);
|
|
|
-
|
|
|
- current_time -= current_time % TID_ROUND_VALUE;
|
|
|
-
|
|
|
- spin_lock_irqsave(&priv->lq_mngr.lock, flags);
|
|
|
- if (!(tid_ptr->queue_count)) {
|
|
|
- tid_ptr->total = 1;
|
|
|
- tid_ptr->time_stamp = current_time;
|
|
|
- tid_ptr->queue_count = 1;
|
|
|
- tid_ptr->head = 0;
|
|
|
- tid_ptr->packet_count[0] = 1;
|
|
|
- goto out;
|
|
|
- }
|
|
|
-
|
|
|
- time_diff = TIME_WRAP_AROUND(tid_ptr->time_stamp, current_time);
|
|
|
- index = time_diff / TID_QUEUE_CELL_SPACING;
|
|
|
-
|
|
|
- if (index >= TID_QUEUE_MAX_SIZE) {
|
|
|
- u32 oldest_time = current_time - TID_MAX_TIME_DIFF;
|
|
|
-
|
|
|
- while (tid_ptr->queue_count &&
|
|
|
- (tid_ptr->time_stamp < oldest_time)) {
|
|
|
- tid_ptr->total -= tid_ptr->packet_count[tid_ptr->head];
|
|
|
- tid_ptr->packet_count[tid_ptr->head] = 0;
|
|
|
- tid_ptr->time_stamp += TID_QUEUE_CELL_SPACING;
|
|
|
- tid_ptr->queue_count--;
|
|
|
- tid_ptr->head++;
|
|
|
- if (tid_ptr->head >= TID_QUEUE_MAX_SIZE)
|
|
|
- tid_ptr->head = 0;
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
- index = (tid_ptr->head + index) % TID_QUEUE_MAX_SIZE;
|
|
|
- tid_ptr->packet_count[index] = tid_ptr->packet_count[index] + 1;
|
|
|
- tid_ptr->total = tid_ptr->total + 1;
|
|
|
-
|
|
|
- if ((index + 1) > tid_ptr->queue_count)
|
|
|
- tid_ptr->queue_count = index + 1;
|
|
|
- out:
|
|
|
- spin_unlock_irqrestore(&priv->lq_mngr.lock, flags);
|
|
|
-
|
|
|
-}
|
|
|
-
|
|
|
-#define MMAC_SCHED_MAX_NUMBER_OF_HT_BACK_FLOWS 7
|
|
|
-enum HT_STATUS {
|
|
|
- BA_STATUS_FAILURE = 0,
|
|
|
- BA_STATUS_INITIATOR_DELBA,
|
|
|
- BA_STATUS_RECIPIENT_DELBA,
|
|
|
- BA_STATUS_RENEW_ADDBA_REQUEST,
|
|
|
- BA_STATUS_ACTIVE,
|
|
|
-};
|
|
|
-
|
|
|
-/**
|
|
|
- * iwl4964_tl_ba_avail - Find out if an unused aggregation queue is available
|
|
|
- */
|
|
|
-static u8 iwl4964_tl_ba_avail(struct iwl4965_priv *priv)
|
|
|
-{
|
|
|
- int i;
|
|
|
- struct iwl4965_lq_mngr *lq;
|
|
|
- u8 count = 0;
|
|
|
- u16 msk;
|
|
|
-
|
|
|
- lq = (struct iwl4965_lq_mngr *)&(priv->lq_mngr);
|
|
|
-
|
|
|
- /* Find out how many agg queues are in use */
|
|
|
- for (i = 0; i < TID_MAX_LOAD_COUNT ; i++) {
|
|
|
- msk = 1 << i;
|
|
|
- if ((lq->agg_ctrl.granted_ba & msk) ||
|
|
|
- (lq->agg_ctrl.wait_for_agg_status & msk))
|
|
|
- count++;
|
|
|
- }
|
|
|
-
|
|
|
- if (count < MMAC_SCHED_MAX_NUMBER_OF_HT_BACK_FLOWS)
|
|
|
- return 1;
|
|
|
-
|
|
|
- return 0;
|
|
|
-}
|
|
|
-
|
|
|
-static void iwl4965_ba_status(struct iwl4965_priv *priv,
|
|
|
- u8 tid, enum HT_STATUS status);
|
|
|
-
|
|
|
-static int iwl4965_perform_addba(struct iwl4965_priv *priv, u8 tid, u32 length,
|
|
|
- u32 ba_timeout)
|
|
|
-{
|
|
|
- int rc;
|
|
|
-
|
|
|
- rc = ieee80211_start_BA_session(priv->hw, priv->bssid, tid);
|
|
|
- if (rc)
|
|
|
- iwl4965_ba_status(priv, tid, BA_STATUS_FAILURE);
|
|
|
-
|
|
|
- return rc;
|
|
|
-}
|
|
|
-
|
|
|
-static int iwl4965_perform_delba(struct iwl4965_priv *priv, u8 tid)
|
|
|
-{
|
|
|
- int rc;
|
|
|
-
|
|
|
- rc = ieee80211_stop_BA_session(priv->hw, priv->bssid, tid);
|
|
|
- if (rc)
|
|
|
- iwl4965_ba_status(priv, tid, BA_STATUS_FAILURE);
|
|
|
-
|
|
|
- return rc;
|
|
|
-}
|
|
|
-
|
|
|
-static void iwl4965_turn_on_agg_for_tid(struct iwl4965_priv *priv,
|
|
|
- struct iwl4965_lq_mngr *lq,
|
|
|
- u8 auto_agg, u8 tid)
|
|
|
-{
|
|
|
- u32 tid_msk = (1 << tid);
|
|
|
- unsigned long flags;
|
|
|
-
|
|
|
- spin_lock_irqsave(&priv->lq_mngr.lock, flags);
|
|
|
-/*
|
|
|
- if ((auto_agg) && (!lq->enable_counter)){
|
|
|
- lq->agg_ctrl.next_retry = 0;
|
|
|
- lq->agg_ctrl.tid_retry = 0;
|
|
|
- spin_unlock_irqrestore(&priv->lq_mngr.lock, flags);
|
|
|
- return;
|
|
|
- }
|
|
|
-*/
|
|
|
- if (!(lq->agg_ctrl.granted_ba & tid_msk) &&
|
|
|
- (lq->agg_ctrl.requested_ba & tid_msk)) {
|
|
|
- u8 available_queues;
|
|
|
- u32 load;
|
|
|
-
|
|
|
- spin_unlock_irqrestore(&priv->lq_mngr.lock, flags);
|
|
|
- available_queues = iwl4964_tl_ba_avail(priv);
|
|
|
- load = iwl4965_tl_get_load(priv, tid);
|
|
|
-
|
|
|
- spin_lock_irqsave(&priv->lq_mngr.lock, flags);
|
|
|
- if (!available_queues) {
|
|
|
- if (auto_agg)
|
|
|
- lq->agg_ctrl.tid_retry |= tid_msk;
|
|
|
- else {
|
|
|
- lq->agg_ctrl.requested_ba &= ~tid_msk;
|
|
|
- lq->agg_ctrl.wait_for_agg_status &= ~tid_msk;
|
|
|
- }
|
|
|
- } else if ((auto_agg) &&
|
|
|
- ((load <= lq->agg_ctrl.tid_traffic_load_threshold) ||
|
|
|
- ((lq->agg_ctrl.wait_for_agg_status & tid_msk))))
|
|
|
- lq->agg_ctrl.tid_retry |= tid_msk;
|
|
|
- else {
|
|
|
- lq->agg_ctrl.wait_for_agg_status |= tid_msk;
|
|
|
- spin_unlock_irqrestore(&priv->lq_mngr.lock, flags);
|
|
|
- iwl4965_perform_addba(priv, tid, 0x40,
|
|
|
- lq->agg_ctrl.ba_timeout);
|
|
|
- spin_lock_irqsave(&priv->lq_mngr.lock, flags);
|
|
|
- }
|
|
|
- }
|
|
|
- spin_unlock_irqrestore(&priv->lq_mngr.lock, flags);
|
|
|
-}
|
|
|
-
|
|
|
-static void iwl4965_turn_on_agg(struct iwl4965_priv *priv, u8 tid)
|
|
|
-{
|
|
|
- struct iwl4965_lq_mngr *lq;
|
|
|
- unsigned long flags;
|
|
|
-
|
|
|
- lq = (struct iwl4965_lq_mngr *)&(priv->lq_mngr);
|
|
|
-
|
|
|
- if ((tid < TID_MAX_LOAD_COUNT))
|
|
|
- iwl4965_turn_on_agg_for_tid(priv, lq, lq->agg_ctrl.auto_agg,
|
|
|
- tid);
|
|
|
- else if (tid == TID_ALL_SPECIFIED) {
|
|
|
- if (lq->agg_ctrl.requested_ba) {
|
|
|
- for (tid = 0; tid < TID_MAX_LOAD_COUNT; tid++)
|
|
|
- iwl4965_turn_on_agg_for_tid(priv, lq,
|
|
|
- lq->agg_ctrl.auto_agg, tid);
|
|
|
- } else {
|
|
|
- spin_lock_irqsave(&priv->lq_mngr.lock, flags);
|
|
|
- lq->agg_ctrl.tid_retry = 0;
|
|
|
- lq->agg_ctrl.next_retry = 0;
|
|
|
- spin_unlock_irqrestore(&priv->lq_mngr.lock, flags);
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
-}
|
|
|
-
|
|
|
-void iwl4965_turn_off_agg(struct iwl4965_priv *priv, u8 tid)
|
|
|
-{
|
|
|
- u32 tid_msk;
|
|
|
- struct iwl4965_lq_mngr *lq;
|
|
|
- unsigned long flags;
|
|
|
-
|
|
|
- lq = (struct iwl4965_lq_mngr *)&(priv->lq_mngr);
|
|
|
-
|
|
|
- if ((tid < TID_MAX_LOAD_COUNT)) {
|
|
|
- tid_msk = 1 << tid;
|
|
|
- spin_lock_irqsave(&priv->lq_mngr.lock, flags);
|
|
|
- lq->agg_ctrl.wait_for_agg_status |= tid_msk;
|
|
|
- lq->agg_ctrl.requested_ba &= ~tid_msk;
|
|
|
- spin_unlock_irqrestore(&priv->lq_mngr.lock, flags);
|
|
|
- iwl4965_perform_delba(priv, tid);
|
|
|
- } else if (tid == TID_ALL_SPECIFIED) {
|
|
|
- spin_lock_irqsave(&priv->lq_mngr.lock, flags);
|
|
|
- for (tid = 0; tid < TID_MAX_LOAD_COUNT; tid++) {
|
|
|
- tid_msk = 1 << tid;
|
|
|
- lq->agg_ctrl.wait_for_agg_status |= tid_msk;
|
|
|
- spin_unlock_irqrestore(&priv->lq_mngr.lock, flags);
|
|
|
- iwl4965_perform_delba(priv, tid);
|
|
|
- spin_lock_irqsave(&priv->lq_mngr.lock, flags);
|
|
|
- }
|
|
|
- lq->agg_ctrl.requested_ba = 0;
|
|
|
- spin_unlock_irqrestore(&priv->lq_mngr.lock, flags);
|
|
|
- }
|
|
|
-}
|
|
|
-
|
|
|
-/**
|
|
|
- * iwl4965_ba_status - Update driver's link quality mgr with tid's HT status
|
|
|
- */
|
|
|
-static void iwl4965_ba_status(struct iwl4965_priv *priv,
|
|
|
- u8 tid, enum HT_STATUS status)
|
|
|
-{
|
|
|
- struct iwl4965_lq_mngr *lq;
|
|
|
- u32 tid_msk = (1 << tid);
|
|
|
- unsigned long flags;
|
|
|
-
|
|
|
- lq = (struct iwl4965_lq_mngr *)&(priv->lq_mngr);
|
|
|
-
|
|
|
- if ((tid >= TID_MAX_LOAD_COUNT))
|
|
|
- goto out;
|
|
|
-
|
|
|
- spin_lock_irqsave(&priv->lq_mngr.lock, flags);
|
|
|
- switch (status) {
|
|
|
- case BA_STATUS_ACTIVE:
|
|
|
- if (!(lq->agg_ctrl.granted_ba & tid_msk))
|
|
|
- lq->agg_ctrl.granted_ba |= tid_msk;
|
|
|
- break;
|
|
|
- default:
|
|
|
- if ((lq->agg_ctrl.granted_ba & tid_msk))
|
|
|
- lq->agg_ctrl.granted_ba &= ~tid_msk;
|
|
|
- break;
|
|
|
- }
|
|
|
-
|
|
|
- lq->agg_ctrl.wait_for_agg_status &= ~tid_msk;
|
|
|
- if (status != BA_STATUS_ACTIVE) {
|
|
|
- if (lq->agg_ctrl.auto_agg) {
|
|
|
- lq->agg_ctrl.tid_retry |= tid_msk;
|
|
|
- lq->agg_ctrl.next_retry =
|
|
|
- jiffies + msecs_to_jiffies(500);
|
|
|
- } else
|
|
|
- lq->agg_ctrl.requested_ba &= ~tid_msk;
|
|
|
- }
|
|
|
- spin_unlock_irqrestore(&priv->lq_mngr.lock, flags);
|
|
|
- out:
|
|
|
- return;
|
|
|
-}
|
|
|
-
|
|
|
-static void iwl4965_bg_agg_work(struct work_struct *work)
|
|
|
-{
|
|
|
- struct iwl4965_priv *priv = container_of(work, struct iwl4965_priv,
|
|
|
- agg_work);
|
|
|
-
|
|
|
- u32 tid;
|
|
|
- u32 retry_tid;
|
|
|
- u32 tid_msk;
|
|
|
- unsigned long flags;
|
|
|
- struct iwl4965_lq_mngr *lq = (struct iwl4965_lq_mngr *)&(priv->lq_mngr);
|
|
|
-
|
|
|
- spin_lock_irqsave(&priv->lq_mngr.lock, flags);
|
|
|
- retry_tid = lq->agg_ctrl.tid_retry;
|
|
|
- lq->agg_ctrl.tid_retry = 0;
|
|
|
- spin_unlock_irqrestore(&priv->lq_mngr.lock, flags);
|
|
|
-
|
|
|
- if (retry_tid == TID_ALL_SPECIFIED)
|
|
|
- iwl4965_turn_on_agg(priv, TID_ALL_SPECIFIED);
|
|
|
- else {
|
|
|
- for (tid = 0; tid < TID_MAX_LOAD_COUNT; tid++) {
|
|
|
- tid_msk = (1 << tid);
|
|
|
- if (retry_tid & tid_msk)
|
|
|
- iwl4965_turn_on_agg(priv, tid);
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
- spin_lock_irqsave(&priv->lq_mngr.lock, flags);
|
|
|
- if (lq->agg_ctrl.tid_retry)
|
|
|
- lq->agg_ctrl.next_retry = jiffies + msecs_to_jiffies(500);
|
|
|
- spin_unlock_irqrestore(&priv->lq_mngr.lock, flags);
|
|
|
- return;
|
|
|
-}
|
|
|
-
|
|
|
-/* TODO: move this functionality to rate scaling */
|
|
|
-void iwl4965_tl_get_stats(struct iwl4965_priv *priv,
|
|
|
- struct ieee80211_hdr *hdr)
|
|
|
-{
|
|
|
- __le16 *qc = ieee80211_get_qos_ctrl(hdr);
|
|
|
-
|
|
|
- if (qc &&
|
|
|
- (priv->iw_mode != IEEE80211_IF_TYPE_IBSS)) {
|
|
|
- u8 tid = 0;
|
|
|
- tid = (u8) (le16_to_cpu(*qc) & 0xF);
|
|
|
- if (tid < TID_MAX_LOAD_COUNT)
|
|
|
- iwl4965_tl_add_packet(priv, tid);
|
|
|
- }
|
|
|
-
|
|
|
- if (priv->lq_mngr.agg_ctrl.next_retry &&
|
|
|
- (time_after(priv->lq_mngr.agg_ctrl.next_retry, jiffies))) {
|
|
|
- unsigned long flags;
|
|
|
-
|
|
|
- spin_lock_irqsave(&priv->lq_mngr.lock, flags);
|
|
|
- priv->lq_mngr.agg_ctrl.next_retry = 0;
|
|
|
- spin_unlock_irqrestore(&priv->lq_mngr.lock, flags);
|
|
|
- schedule_work(&priv->agg_work);
|
|
|
- }
|
|
|
-}
|
|
|
-
|
|
|
-#endif /*CONFIG_IWL4965_HT_AGG */
|
|
|
-#endif /* CONFIG_IWL4965_HT */
|
|
|
-
|
|
|
/**
|
|
|
* sign_extend - Sign extend a value using specified bit as sign-bit
|
|
|
*
|
|
@@ -4191,25 +3819,6 @@ static void iwl4965_rx_missed_beacon_notif(struct iwl4965_priv *priv,
|
|
|
}
|
|
|
|
|
|
#ifdef CONFIG_IWL4965_HT
|
|
|
-#ifdef CONFIG_IWL4965_HT_AGG
|
|
|
-
|
|
|
-/**
|
|
|
- * iwl4965_set_tx_status - Update driver's record of one Tx frame's status
|
|
|
- *
|
|
|
- * This will get sent to mac80211.
|
|
|
- */
|
|
|
-static void iwl4965_set_tx_status(struct iwl4965_priv *priv, int txq_id, int idx,
|
|
|
- u32 status, u32 retry_count, u32 rate)
|
|
|
-{
|
|
|
- struct ieee80211_tx_status *tx_status =
|
|
|
- &(priv->txq[txq_id].txb[idx].status);
|
|
|
-
|
|
|
- tx_status->flags = status ? IEEE80211_TX_STATUS_ACK : 0;
|
|
|
- tx_status->retry_count += retry_count;
|
|
|
- tx_status->control.tx_rate = rate;
|
|
|
-}
|
|
|
-
|
|
|
-#endif/* CONFIG_IWL4965_HT_AGG */
|
|
|
|
|
|
/**
|
|
|
* iwl4965_sta_modify_enable_tid_tx - Enable Tx for this TID in station table
|
|
@@ -4984,11 +4593,6 @@ void iwl4965_hw_setup_deferred_work(struct iwl4965_priv *priv)
|
|
|
#ifdef CONFIG_IWL4965_SENSITIVITY
|
|
|
INIT_WORK(&priv->sensitivity_work, iwl4965_bg_sensitivity_work);
|
|
|
#endif
|
|
|
-#ifdef CONFIG_IWL4965_HT
|
|
|
-#ifdef CONFIG_IWL4965_HT_AGG
|
|
|
- INIT_WORK(&priv->agg_work, iwl4965_bg_agg_work);
|
|
|
-#endif /* CONFIG_IWL4965_HT_AGG */
|
|
|
-#endif /* CONFIG_IWL4965_HT */
|
|
|
init_timer(&priv->statistics_periodic);
|
|
|
priv->statistics_periodic.data = (unsigned long)priv;
|
|
|
priv->statistics_periodic.function = iwl4965_bg_statistics_periodic;
|