tx.c 41 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453
  1. /******************************************************************************
  2. *
  3. * GPL LICENSE SUMMARY
  4. *
  5. * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved.
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of version 2 of the GNU General Public License as
  9. * published by the Free Software Foundation.
  10. *
  11. * This program is distributed in the hope that it will be useful, but
  12. * WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU General Public License
  17. * along with this program; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
  19. * USA
  20. *
  21. * The full GNU General Public License is included in this distribution
  22. * in the file called COPYING.
  23. *
  24. * Contact Information:
  25. * Intel Linux Wireless <ilw@linux.intel.com>
  26. * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
  27. *
  28. *****************************************************************************/
  29. #include <linux/kernel.h>
  30. #include <linux/module.h>
  31. #include <linux/init.h>
  32. #include <linux/sched.h>
  33. #include <linux/ieee80211.h>
  34. #include "iwl-io.h"
  35. #include "iwl-trans.h"
  36. #include "iwl-agn-hw.h"
  37. #include "dev.h"
  38. #include "agn.h"
  39. static const u8 tid_to_ac[] = {
  40. IEEE80211_AC_BE,
  41. IEEE80211_AC_BK,
  42. IEEE80211_AC_BK,
  43. IEEE80211_AC_BE,
  44. IEEE80211_AC_VI,
  45. IEEE80211_AC_VI,
  46. IEEE80211_AC_VO,
  47. IEEE80211_AC_VO,
  48. };
  49. static void iwlagn_tx_cmd_protection(struct iwl_priv *priv,
  50. struct ieee80211_tx_info *info,
  51. __le16 fc, __le32 *tx_flags)
  52. {
  53. if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS ||
  54. info->control.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT ||
  55. info->flags & IEEE80211_TX_CTL_AMPDU)
  56. *tx_flags |= TX_CMD_FLG_PROT_REQUIRE_MSK;
  57. }
  58. /*
  59. * handle build REPLY_TX command notification.
  60. */
  61. static void iwlagn_tx_cmd_build_basic(struct iwl_priv *priv,
  62. struct sk_buff *skb,
  63. struct iwl_tx_cmd *tx_cmd,
  64. struct ieee80211_tx_info *info,
  65. struct ieee80211_hdr *hdr, u8 sta_id)
  66. {
  67. __le16 fc = hdr->frame_control;
  68. __le32 tx_flags = tx_cmd->tx_flags;
  69. tx_cmd->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
  70. if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
  71. tx_flags |= TX_CMD_FLG_ACK_MSK;
  72. else
  73. tx_flags &= ~TX_CMD_FLG_ACK_MSK;
  74. if (ieee80211_is_probe_resp(fc))
  75. tx_flags |= TX_CMD_FLG_TSF_MSK;
  76. else if (ieee80211_is_back_req(fc))
  77. tx_flags |= TX_CMD_FLG_ACK_MSK | TX_CMD_FLG_IMM_BA_RSP_MASK;
  78. else if (info->band == IEEE80211_BAND_2GHZ &&
  79. priv->cfg->bt_params &&
  80. priv->cfg->bt_params->advanced_bt_coexist &&
  81. (ieee80211_is_auth(fc) || ieee80211_is_assoc_req(fc) ||
  82. ieee80211_is_reassoc_req(fc) ||
  83. skb->protocol == cpu_to_be16(ETH_P_PAE)))
  84. tx_flags |= TX_CMD_FLG_IGNORE_BT;
  85. tx_cmd->sta_id = sta_id;
  86. if (ieee80211_has_morefrags(fc))
  87. tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK;
  88. if (ieee80211_is_data_qos(fc)) {
  89. u8 *qc = ieee80211_get_qos_ctl(hdr);
  90. tx_cmd->tid_tspec = qc[0] & 0xf;
  91. tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
  92. } else {
  93. tx_cmd->tid_tspec = IWL_TID_NON_QOS;
  94. if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)
  95. tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
  96. else
  97. tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
  98. }
  99. iwlagn_tx_cmd_protection(priv, info, fc, &tx_flags);
  100. tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK);
  101. if (ieee80211_is_mgmt(fc)) {
  102. if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc))
  103. tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(3);
  104. else
  105. tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(2);
  106. } else {
  107. tx_cmd->timeout.pm_frame_timeout = 0;
  108. }
  109. tx_cmd->driver_txop = 0;
  110. tx_cmd->tx_flags = tx_flags;
  111. tx_cmd->next_frame_len = 0;
  112. }
  113. static void iwlagn_tx_cmd_build_rate(struct iwl_priv *priv,
  114. struct iwl_tx_cmd *tx_cmd,
  115. struct ieee80211_tx_info *info,
  116. struct ieee80211_sta *sta,
  117. __le16 fc)
  118. {
  119. u32 rate_flags;
  120. int rate_idx;
  121. u8 rts_retry_limit;
  122. u8 data_retry_limit;
  123. u8 rate_plcp;
  124. if (priv->wowlan) {
  125. rts_retry_limit = IWLAGN_LOW_RETRY_LIMIT;
  126. data_retry_limit = IWLAGN_LOW_RETRY_LIMIT;
  127. } else {
  128. /* Set retry limit on RTS packets */
  129. rts_retry_limit = IWLAGN_RTS_DFAULT_RETRY_LIMIT;
  130. /* Set retry limit on DATA packets and Probe Responses*/
  131. if (ieee80211_is_probe_resp(fc)) {
  132. data_retry_limit = IWLAGN_MGMT_DFAULT_RETRY_LIMIT;
  133. rts_retry_limit =
  134. min(data_retry_limit, rts_retry_limit);
  135. } else if (ieee80211_is_back_req(fc))
  136. data_retry_limit = IWLAGN_BAR_DFAULT_RETRY_LIMIT;
  137. else
  138. data_retry_limit = IWLAGN_DEFAULT_TX_RETRY;
  139. }
  140. tx_cmd->data_retry_limit = data_retry_limit;
  141. tx_cmd->rts_retry_limit = rts_retry_limit;
  142. /* DATA packets will use the uCode station table for rate/antenna
  143. * selection */
  144. if (ieee80211_is_data(fc)) {
  145. tx_cmd->initial_rate_index = 0;
  146. tx_cmd->tx_flags |= TX_CMD_FLG_STA_RATE_MSK;
  147. #ifdef CONFIG_IWLWIFI_DEVICE_TESTMODE
  148. if (priv->tm_fixed_rate) {
  149. /*
  150. * rate overwrite by testmode
  151. * we not only send lq command to change rate
  152. * we also re-enforce per data pkt base.
  153. */
  154. tx_cmd->tx_flags &= ~TX_CMD_FLG_STA_RATE_MSK;
  155. memcpy(&tx_cmd->rate_n_flags, &priv->tm_fixed_rate,
  156. sizeof(tx_cmd->rate_n_flags));
  157. }
  158. #endif
  159. return;
  160. } else if (ieee80211_is_back_req(fc))
  161. tx_cmd->tx_flags |= TX_CMD_FLG_STA_RATE_MSK;
  162. /**
  163. * If the current TX rate stored in mac80211 has the MCS bit set, it's
  164. * not really a TX rate. Thus, we use the lowest supported rate for
  165. * this band. Also use the lowest supported rate if the stored rate
  166. * index is invalid.
  167. */
  168. rate_idx = info->control.rates[0].idx;
  169. if (info->control.rates[0].flags & IEEE80211_TX_RC_MCS ||
  170. (rate_idx < 0) || (rate_idx > IWL_RATE_COUNT_LEGACY))
  171. rate_idx = rate_lowest_index(
  172. &priv->nvm_data->bands[info->band], sta);
  173. /* For 5 GHZ band, remap mac80211 rate indices into driver indices */
  174. if (info->band == IEEE80211_BAND_5GHZ)
  175. rate_idx += IWL_FIRST_OFDM_RATE;
  176. /* Get PLCP rate for tx_cmd->rate_n_flags */
  177. rate_plcp = iwl_rates[rate_idx].plcp;
  178. /* Zero out flags for this packet */
  179. rate_flags = 0;
  180. /* Set CCK flag as needed */
  181. if ((rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE))
  182. rate_flags |= RATE_MCS_CCK_MSK;
  183. /* Set up antennas */
  184. if (priv->cfg->bt_params &&
  185. priv->cfg->bt_params->advanced_bt_coexist &&
  186. priv->bt_full_concurrent) {
  187. /* operated as 1x1 in full concurrency mode */
  188. priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant,
  189. first_antenna(priv->nvm_data->valid_tx_ant));
  190. } else
  191. priv->mgmt_tx_ant = iwl_toggle_tx_ant(
  192. priv, priv->mgmt_tx_ant,
  193. priv->nvm_data->valid_tx_ant);
  194. rate_flags |= iwl_ant_idx_to_flags(priv->mgmt_tx_ant);
  195. /* Set the rate in the TX cmd */
  196. tx_cmd->rate_n_flags = iwl_hw_set_rate_n_flags(rate_plcp, rate_flags);
  197. }
  198. static void iwlagn_tx_cmd_build_hwcrypto(struct iwl_priv *priv,
  199. struct ieee80211_tx_info *info,
  200. struct iwl_tx_cmd *tx_cmd,
  201. struct sk_buff *skb_frag)
  202. {
  203. struct ieee80211_key_conf *keyconf = info->control.hw_key;
  204. switch (keyconf->cipher) {
  205. case WLAN_CIPHER_SUITE_CCMP:
  206. tx_cmd->sec_ctl = TX_CMD_SEC_CCM;
  207. memcpy(tx_cmd->key, keyconf->key, keyconf->keylen);
  208. if (info->flags & IEEE80211_TX_CTL_AMPDU)
  209. tx_cmd->tx_flags |= TX_CMD_FLG_AGG_CCMP_MSK;
  210. break;
  211. case WLAN_CIPHER_SUITE_TKIP:
  212. tx_cmd->sec_ctl = TX_CMD_SEC_TKIP;
  213. ieee80211_get_tkip_p2k(keyconf, skb_frag, tx_cmd->key);
  214. break;
  215. case WLAN_CIPHER_SUITE_WEP104:
  216. tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128;
  217. /* fall through */
  218. case WLAN_CIPHER_SUITE_WEP40:
  219. tx_cmd->sec_ctl |= (TX_CMD_SEC_WEP |
  220. (keyconf->keyidx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT);
  221. memcpy(&tx_cmd->key[3], keyconf->key, keyconf->keylen);
  222. IWL_DEBUG_TX(priv, "Configuring packet for WEP encryption "
  223. "with key %d\n", keyconf->keyidx);
  224. break;
  225. default:
  226. IWL_ERR(priv, "Unknown encode cipher %x\n", keyconf->cipher);
  227. break;
  228. }
  229. }
  230. /**
  231. * iwl_sta_id_or_broadcast - return sta_id or broadcast sta
  232. * @context: the current context
  233. * @sta: mac80211 station
  234. *
  235. * In certain circumstances mac80211 passes a station pointer
  236. * that may be %NULL, for example during TX or key setup. In
  237. * that case, we need to use the broadcast station, so this
  238. * inline wraps that pattern.
  239. */
  240. static int iwl_sta_id_or_broadcast(struct iwl_rxon_context *context,
  241. struct ieee80211_sta *sta)
  242. {
  243. int sta_id;
  244. if (!sta)
  245. return context->bcast_sta_id;
  246. sta_id = iwl_sta_id(sta);
  247. /*
  248. * mac80211 should not be passing a partially
  249. * initialised station!
  250. */
  251. WARN_ON(sta_id == IWL_INVALID_STATION);
  252. return sta_id;
  253. }
  254. /*
  255. * start REPLY_TX command process
  256. */
  257. int iwlagn_tx_skb(struct iwl_priv *priv,
  258. struct ieee80211_sta *sta,
  259. struct sk_buff *skb)
  260. {
  261. struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
  262. struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
  263. struct iwl_station_priv *sta_priv = NULL;
  264. struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
  265. struct iwl_device_cmd *dev_cmd;
  266. struct iwl_tx_cmd *tx_cmd;
  267. __le16 fc;
  268. u8 hdr_len;
  269. u16 len, seq_number = 0;
  270. u8 sta_id, tid = IWL_MAX_TID_COUNT;
  271. bool is_agg = false, is_data_qos = false;
  272. int txq_id;
  273. if (info->control.vif)
  274. ctx = iwl_rxon_ctx_from_vif(info->control.vif);
  275. if (iwl_is_rfkill(priv)) {
  276. IWL_DEBUG_DROP(priv, "Dropping - RF KILL\n");
  277. goto drop_unlock_priv;
  278. }
  279. fc = hdr->frame_control;
  280. #ifdef CONFIG_IWLWIFI_DEBUG
  281. if (ieee80211_is_auth(fc))
  282. IWL_DEBUG_TX(priv, "Sending AUTH frame\n");
  283. else if (ieee80211_is_assoc_req(fc))
  284. IWL_DEBUG_TX(priv, "Sending ASSOC frame\n");
  285. else if (ieee80211_is_reassoc_req(fc))
  286. IWL_DEBUG_TX(priv, "Sending REASSOC frame\n");
  287. #endif
  288. if (unlikely(ieee80211_is_probe_resp(fc))) {
  289. struct iwl_wipan_noa_data *noa_data =
  290. rcu_dereference(priv->noa_data);
  291. if (noa_data &&
  292. pskb_expand_head(skb, 0, noa_data->length,
  293. GFP_ATOMIC) == 0) {
  294. memcpy(skb_put(skb, noa_data->length),
  295. noa_data->data, noa_data->length);
  296. hdr = (struct ieee80211_hdr *)skb->data;
  297. }
  298. }
  299. hdr_len = ieee80211_hdrlen(fc);
  300. /* For management frames use broadcast id to do not break aggregation */
  301. if (!ieee80211_is_data(fc))
  302. sta_id = ctx->bcast_sta_id;
  303. else {
  304. /* Find index into station table for destination station */
  305. sta_id = iwl_sta_id_or_broadcast(ctx, sta);
  306. if (sta_id == IWL_INVALID_STATION) {
  307. IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n",
  308. hdr->addr1);
  309. goto drop_unlock_priv;
  310. }
  311. }
  312. if (sta)
  313. sta_priv = (void *)sta->drv_priv;
  314. if (sta_priv && sta_priv->asleep &&
  315. (info->flags & IEEE80211_TX_CTL_NO_PS_BUFFER)) {
  316. /*
  317. * This sends an asynchronous command to the device,
  318. * but we can rely on it being processed before the
  319. * next frame is processed -- and the next frame to
  320. * this station is the one that will consume this
  321. * counter.
  322. * For now set the counter to just 1 since we do not
  323. * support uAPSD yet.
  324. *
  325. * FIXME: If we get two non-bufferable frames one
  326. * after the other, we might only send out one of
  327. * them because this is racy.
  328. */
  329. iwl_sta_modify_sleep_tx_count(priv, sta_id, 1);
  330. }
  331. dev_cmd = iwl_trans_alloc_tx_cmd(priv->trans);
  332. if (unlikely(!dev_cmd))
  333. goto drop_unlock_priv;
  334. memset(dev_cmd, 0, sizeof(*dev_cmd));
  335. tx_cmd = (struct iwl_tx_cmd *) dev_cmd->payload;
  336. /* Total # bytes to be transmitted */
  337. len = (u16)skb->len;
  338. tx_cmd->len = cpu_to_le16(len);
  339. if (info->control.hw_key)
  340. iwlagn_tx_cmd_build_hwcrypto(priv, info, tx_cmd, skb);
  341. /* TODO need this for burst mode later on */
  342. iwlagn_tx_cmd_build_basic(priv, skb, tx_cmd, info, hdr, sta_id);
  343. iwlagn_tx_cmd_build_rate(priv, tx_cmd, info, sta, fc);
  344. memset(&info->status, 0, sizeof(info->status));
  345. info->driver_data[0] = ctx;
  346. info->driver_data[1] = dev_cmd;
  347. /* From now on, we cannot access info->control */
  348. spin_lock(&priv->sta_lock);
  349. if (ieee80211_is_data_qos(fc) && !ieee80211_is_qos_nullfunc(fc)) {
  350. u8 *qc = NULL;
  351. struct iwl_tid_data *tid_data;
  352. qc = ieee80211_get_qos_ctl(hdr);
  353. tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
  354. if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT))
  355. goto drop_unlock_sta;
  356. tid_data = &priv->tid_data[sta_id][tid];
  357. /* aggregation is on for this <sta,tid> */
  358. if (info->flags & IEEE80211_TX_CTL_AMPDU &&
  359. tid_data->agg.state != IWL_AGG_ON) {
  360. IWL_ERR(priv, "TX_CTL_AMPDU while not in AGG:"
  361. " Tx flags = 0x%08x, agg.state = %d",
  362. info->flags, tid_data->agg.state);
  363. IWL_ERR(priv, "sta_id = %d, tid = %d seq_num = %d",
  364. sta_id, tid,
  365. IEEE80211_SEQ_TO_SN(tid_data->seq_number));
  366. goto drop_unlock_sta;
  367. }
  368. /* We can receive packets from the stack in IWL_AGG_{ON,OFF}
  369. * only. Check this here.
  370. */
  371. if (WARN_ONCE(tid_data->agg.state != IWL_AGG_ON &&
  372. tid_data->agg.state != IWL_AGG_OFF,
  373. "Tx while agg.state = %d", tid_data->agg.state))
  374. goto drop_unlock_sta;
  375. seq_number = tid_data->seq_number;
  376. seq_number &= IEEE80211_SCTL_SEQ;
  377. hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
  378. hdr->seq_ctrl |= cpu_to_le16(seq_number);
  379. seq_number += 0x10;
  380. if (info->flags & IEEE80211_TX_CTL_AMPDU)
  381. is_agg = true;
  382. is_data_qos = true;
  383. }
  384. /* Copy MAC header from skb into command buffer */
  385. memcpy(tx_cmd->hdr, hdr, hdr_len);
  386. if (is_agg)
  387. txq_id = priv->tid_data[sta_id][tid].agg.txq_id;
  388. else if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
  389. /*
  390. * Send this frame after DTIM -- there's a special queue
  391. * reserved for this for contexts that support AP mode.
  392. */
  393. txq_id = ctx->mcast_queue;
  394. /*
  395. * The microcode will clear the more data
  396. * bit in the last frame it transmits.
  397. */
  398. hdr->frame_control |=
  399. cpu_to_le16(IEEE80211_FCTL_MOREDATA);
  400. } else if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN)
  401. txq_id = IWL_AUX_QUEUE;
  402. else
  403. txq_id = ctx->ac_to_queue[skb_get_queue_mapping(skb)];
  404. WARN_ON_ONCE(!is_agg && txq_id != info->hw_queue);
  405. WARN_ON_ONCE(is_agg &&
  406. priv->queue_to_mac80211[txq_id] != info->hw_queue);
  407. IWL_DEBUG_TX(priv, "TX to [%d|%d] Q:%d - seq: 0x%x\n", sta_id, tid,
  408. txq_id, seq_number);
  409. if (iwl_trans_tx(priv->trans, skb, dev_cmd, txq_id))
  410. goto drop_unlock_sta;
  411. if (is_data_qos && !ieee80211_has_morefrags(fc))
  412. priv->tid_data[sta_id][tid].seq_number = seq_number;
  413. spin_unlock(&priv->sta_lock);
  414. /*
  415. * Avoid atomic ops if it isn't an associated client.
  416. * Also, if this is a packet for aggregation, don't
  417. * increase the counter because the ucode will stop
  418. * aggregation queues when their respective station
  419. * goes to sleep.
  420. */
  421. if (sta_priv && sta_priv->client && !is_agg)
  422. atomic_inc(&sta_priv->pending_frames);
  423. if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN)
  424. iwl_scan_offchannel_skb(priv);
  425. return 0;
  426. drop_unlock_sta:
  427. if (dev_cmd)
  428. iwl_trans_free_tx_cmd(priv->trans, dev_cmd);
  429. spin_unlock(&priv->sta_lock);
  430. drop_unlock_priv:
  431. return -1;
  432. }
  433. static int iwlagn_alloc_agg_txq(struct iwl_priv *priv, int mq)
  434. {
  435. int q;
  436. for (q = IWLAGN_FIRST_AMPDU_QUEUE;
  437. q < priv->cfg->base_params->num_of_queues; q++) {
  438. if (!test_and_set_bit(q, priv->agg_q_alloc)) {
  439. priv->queue_to_mac80211[q] = mq;
  440. return q;
  441. }
  442. }
  443. return -ENOSPC;
  444. }
  445. static void iwlagn_dealloc_agg_txq(struct iwl_priv *priv, int q)
  446. {
  447. clear_bit(q, priv->agg_q_alloc);
  448. priv->queue_to_mac80211[q] = IWL_INVALID_MAC80211_QUEUE;
  449. }
  450. int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
  451. struct ieee80211_sta *sta, u16 tid)
  452. {
  453. struct iwl_tid_data *tid_data;
  454. int sta_id, txq_id;
  455. enum iwl_agg_state agg_state;
  456. sta_id = iwl_sta_id(sta);
  457. if (sta_id == IWL_INVALID_STATION) {
  458. IWL_ERR(priv, "Invalid station for AGG tid %d\n", tid);
  459. return -ENXIO;
  460. }
  461. spin_lock_bh(&priv->sta_lock);
  462. tid_data = &priv->tid_data[sta_id][tid];
  463. txq_id = tid_data->agg.txq_id;
  464. switch (tid_data->agg.state) {
  465. case IWL_EMPTYING_HW_QUEUE_ADDBA:
  466. /*
  467. * This can happen if the peer stops aggregation
  468. * again before we've had a chance to drain the
  469. * queue we selected previously, i.e. before the
  470. * session was really started completely.
  471. */
  472. IWL_DEBUG_HT(priv, "AGG stop before setup done\n");
  473. goto turn_off;
  474. case IWL_AGG_STARTING:
  475. /*
  476. * This can happen when the session is stopped before
  477. * we receive ADDBA response
  478. */
  479. IWL_DEBUG_HT(priv, "AGG stop before AGG became operational\n");
  480. goto turn_off;
  481. case IWL_AGG_ON:
  482. break;
  483. default:
  484. IWL_WARN(priv,
  485. "Stopping AGG while state not ON or starting for %d on %d (%d)\n",
  486. sta_id, tid, tid_data->agg.state);
  487. spin_unlock_bh(&priv->sta_lock);
  488. return 0;
  489. }
  490. tid_data->agg.ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
  491. /* There are still packets for this RA / TID in the HW */
  492. if (!test_bit(txq_id, priv->agg_q_alloc)) {
  493. IWL_DEBUG_TX_QUEUES(priv,
  494. "stopping AGG on STA/TID %d/%d but hwq %d not used\n",
  495. sta_id, tid, txq_id);
  496. } else if (tid_data->agg.ssn != tid_data->next_reclaimed) {
  497. IWL_DEBUG_TX_QUEUES(priv,
  498. "Can't proceed: ssn %d, next_recl = %d\n",
  499. tid_data->agg.ssn,
  500. tid_data->next_reclaimed);
  501. tid_data->agg.state = IWL_EMPTYING_HW_QUEUE_DELBA;
  502. spin_unlock_bh(&priv->sta_lock);
  503. return 0;
  504. }
  505. IWL_DEBUG_TX_QUEUES(priv, "Can proceed: ssn = next_recl = %d\n",
  506. tid_data->agg.ssn);
  507. turn_off:
  508. agg_state = tid_data->agg.state;
  509. tid_data->agg.state = IWL_AGG_OFF;
  510. spin_unlock_bh(&priv->sta_lock);
  511. if (test_bit(txq_id, priv->agg_q_alloc)) {
  512. /*
  513. * If the transport didn't know that we wanted to start
  514. * agreggation, don't tell it that we want to stop them.
  515. * This can happen when we don't get the addBA response on
  516. * time, or we hadn't time to drain the AC queues.
  517. */
  518. if (agg_state == IWL_AGG_ON)
  519. iwl_trans_txq_disable(priv->trans, txq_id);
  520. else
  521. IWL_DEBUG_TX_QUEUES(priv, "Don't disable tx agg: %d\n",
  522. agg_state);
  523. iwlagn_dealloc_agg_txq(priv, txq_id);
  524. }
  525. ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
  526. return 0;
  527. }
  528. int iwlagn_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif,
  529. struct ieee80211_sta *sta, u16 tid, u16 *ssn)
  530. {
  531. struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
  532. struct iwl_tid_data *tid_data;
  533. int sta_id, txq_id, ret;
  534. IWL_DEBUG_HT(priv, "TX AGG request on ra = %pM tid = %d\n",
  535. sta->addr, tid);
  536. sta_id = iwl_sta_id(sta);
  537. if (sta_id == IWL_INVALID_STATION) {
  538. IWL_ERR(priv, "Start AGG on invalid station\n");
  539. return -ENXIO;
  540. }
  541. if (unlikely(tid >= IWL_MAX_TID_COUNT))
  542. return -EINVAL;
  543. if (priv->tid_data[sta_id][tid].agg.state != IWL_AGG_OFF) {
  544. IWL_ERR(priv, "Start AGG when state is not IWL_AGG_OFF !\n");
  545. return -ENXIO;
  546. }
  547. txq_id = iwlagn_alloc_agg_txq(priv, ctx->ac_to_queue[tid_to_ac[tid]]);
  548. if (txq_id < 0) {
  549. IWL_DEBUG_TX_QUEUES(priv,
  550. "No free aggregation queue for %pM/%d\n",
  551. sta->addr, tid);
  552. return txq_id;
  553. }
  554. ret = iwl_sta_tx_modify_enable_tid(priv, sta_id, tid);
  555. if (ret)
  556. return ret;
  557. spin_lock_bh(&priv->sta_lock);
  558. tid_data = &priv->tid_data[sta_id][tid];
  559. tid_data->agg.ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
  560. tid_data->agg.txq_id = txq_id;
  561. *ssn = tid_data->agg.ssn;
  562. if (*ssn == tid_data->next_reclaimed) {
  563. IWL_DEBUG_TX_QUEUES(priv, "Can proceed: ssn = next_recl = %d\n",
  564. tid_data->agg.ssn);
  565. tid_data->agg.state = IWL_AGG_STARTING;
  566. ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
  567. } else {
  568. IWL_DEBUG_TX_QUEUES(priv, "Can't proceed: ssn %d, "
  569. "next_reclaimed = %d\n",
  570. tid_data->agg.ssn,
  571. tid_data->next_reclaimed);
  572. tid_data->agg.state = IWL_EMPTYING_HW_QUEUE_ADDBA;
  573. }
  574. spin_unlock_bh(&priv->sta_lock);
  575. return ret;
  576. }
  577. int iwlagn_tx_agg_flush(struct iwl_priv *priv, struct ieee80211_vif *vif,
  578. struct ieee80211_sta *sta, u16 tid)
  579. {
  580. struct iwl_tid_data *tid_data;
  581. enum iwl_agg_state agg_state;
  582. int sta_id, txq_id;
  583. sta_id = iwl_sta_id(sta);
  584. /*
  585. * First set the agg state to OFF to avoid calling
  586. * ieee80211_stop_tx_ba_cb in iwlagn_check_ratid_empty.
  587. */
  588. spin_lock_bh(&priv->sta_lock);
  589. tid_data = &priv->tid_data[sta_id][tid];
  590. txq_id = tid_data->agg.txq_id;
  591. agg_state = tid_data->agg.state;
  592. IWL_DEBUG_TX_QUEUES(priv, "Flush AGG: sta %d tid %d q %d state %d\n",
  593. sta_id, tid, txq_id, tid_data->agg.state);
  594. tid_data->agg.state = IWL_AGG_OFF;
  595. spin_unlock_bh(&priv->sta_lock);
  596. if (iwlagn_txfifo_flush(priv, BIT(txq_id)))
  597. IWL_ERR(priv, "Couldn't flush the AGG queue\n");
  598. if (test_bit(txq_id, priv->agg_q_alloc)) {
  599. /*
  600. * If the transport didn't know that we wanted to start
  601. * agreggation, don't tell it that we want to stop them.
  602. * This can happen when we don't get the addBA response on
  603. * time, or we hadn't time to drain the AC queues.
  604. */
  605. if (agg_state == IWL_AGG_ON)
  606. iwl_trans_txq_disable(priv->trans, txq_id);
  607. else
  608. IWL_DEBUG_TX_QUEUES(priv, "Don't disable tx agg: %d\n",
  609. agg_state);
  610. iwlagn_dealloc_agg_txq(priv, txq_id);
  611. }
  612. return 0;
  613. }
  614. int iwlagn_tx_agg_oper(struct iwl_priv *priv, struct ieee80211_vif *vif,
  615. struct ieee80211_sta *sta, u16 tid, u8 buf_size)
  616. {
  617. struct iwl_station_priv *sta_priv = (void *) sta->drv_priv;
  618. struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
  619. int q, fifo;
  620. u16 ssn;
  621. buf_size = min_t(int, buf_size, LINK_QUAL_AGG_FRAME_LIMIT_DEF);
  622. spin_lock_bh(&priv->sta_lock);
  623. ssn = priv->tid_data[sta_priv->sta_id][tid].agg.ssn;
  624. q = priv->tid_data[sta_priv->sta_id][tid].agg.txq_id;
  625. priv->tid_data[sta_priv->sta_id][tid].agg.state = IWL_AGG_ON;
  626. spin_unlock_bh(&priv->sta_lock);
  627. fifo = ctx->ac_to_fifo[tid_to_ac[tid]];
  628. iwl_trans_txq_enable(priv->trans, q, fifo, sta_priv->sta_id, tid,
  629. buf_size, ssn);
  630. /*
  631. * If the limit is 0, then it wasn't initialised yet,
  632. * use the default. We can do that since we take the
  633. * minimum below, and we don't want to go above our
  634. * default due to hardware restrictions.
  635. */
  636. if (sta_priv->max_agg_bufsize == 0)
  637. sta_priv->max_agg_bufsize =
  638. LINK_QUAL_AGG_FRAME_LIMIT_DEF;
  639. /*
  640. * Even though in theory the peer could have different
  641. * aggregation reorder buffer sizes for different sessions,
  642. * our ucode doesn't allow for that and has a global limit
  643. * for each station. Therefore, use the minimum of all the
  644. * aggregation sessions and our default value.
  645. */
  646. sta_priv->max_agg_bufsize =
  647. min(sta_priv->max_agg_bufsize, buf_size);
  648. if (priv->hw_params.use_rts_for_aggregation) {
  649. /*
  650. * switch to RTS/CTS if it is the prefer protection
  651. * method for HT traffic
  652. */
  653. sta_priv->lq_sta.lq.general_params.flags |=
  654. LINK_QUAL_FLAGS_SET_STA_TLC_RTS_MSK;
  655. }
  656. priv->agg_tids_count++;
  657. IWL_DEBUG_HT(priv, "priv->agg_tids_count = %u\n",
  658. priv->agg_tids_count);
  659. sta_priv->lq_sta.lq.agg_params.agg_frame_cnt_limit =
  660. sta_priv->max_agg_bufsize;
  661. IWL_DEBUG_HT(priv, "Tx aggregation enabled on ra = %pM tid = %d\n",
  662. sta->addr, tid);
  663. return iwl_send_lq_cmd(priv, ctx,
  664. &sta_priv->lq_sta.lq, CMD_ASYNC, false);
  665. }
  666. static void iwlagn_check_ratid_empty(struct iwl_priv *priv, int sta_id, u8 tid)
  667. {
  668. struct iwl_tid_data *tid_data = &priv->tid_data[sta_id][tid];
  669. enum iwl_rxon_context_id ctx;
  670. struct ieee80211_vif *vif;
  671. u8 *addr;
  672. lockdep_assert_held(&priv->sta_lock);
  673. addr = priv->stations[sta_id].sta.sta.addr;
  674. ctx = priv->stations[sta_id].ctxid;
  675. vif = priv->contexts[ctx].vif;
  676. switch (priv->tid_data[sta_id][tid].agg.state) {
  677. case IWL_EMPTYING_HW_QUEUE_DELBA:
  678. /* There are no packets for this RA / TID in the HW any more */
  679. if (tid_data->agg.ssn == tid_data->next_reclaimed) {
  680. IWL_DEBUG_TX_QUEUES(priv,
  681. "Can continue DELBA flow ssn = next_recl ="
  682. " %d", tid_data->next_reclaimed);
  683. iwl_trans_txq_disable(priv->trans,
  684. tid_data->agg.txq_id);
  685. iwlagn_dealloc_agg_txq(priv, tid_data->agg.txq_id);
  686. tid_data->agg.state = IWL_AGG_OFF;
  687. ieee80211_stop_tx_ba_cb_irqsafe(vif, addr, tid);
  688. }
  689. break;
  690. case IWL_EMPTYING_HW_QUEUE_ADDBA:
  691. /* There are no packets for this RA / TID in the HW any more */
  692. if (tid_data->agg.ssn == tid_data->next_reclaimed) {
  693. IWL_DEBUG_TX_QUEUES(priv,
  694. "Can continue ADDBA flow ssn = next_recl ="
  695. " %d", tid_data->next_reclaimed);
  696. tid_data->agg.state = IWL_AGG_STARTING;
  697. ieee80211_start_tx_ba_cb_irqsafe(vif, addr, tid);
  698. }
  699. break;
  700. default:
  701. break;
  702. }
  703. }
  704. static void iwlagn_non_agg_tx_status(struct iwl_priv *priv,
  705. struct iwl_rxon_context *ctx,
  706. const u8 *addr1)
  707. {
  708. struct ieee80211_sta *sta;
  709. struct iwl_station_priv *sta_priv;
  710. rcu_read_lock();
  711. sta = ieee80211_find_sta(ctx->vif, addr1);
  712. if (sta) {
  713. sta_priv = (void *)sta->drv_priv;
  714. /* avoid atomic ops if this isn't a client */
  715. if (sta_priv->client &&
  716. atomic_dec_return(&sta_priv->pending_frames) == 0)
  717. ieee80211_sta_block_awake(priv->hw, sta, false);
  718. }
  719. rcu_read_unlock();
  720. }
  721. /**
  722. * translate ucode response to mac80211 tx status control values
  723. */
  724. static void iwlagn_hwrate_to_tx_control(struct iwl_priv *priv, u32 rate_n_flags,
  725. struct ieee80211_tx_info *info)
  726. {
  727. struct ieee80211_tx_rate *r = &info->status.rates[0];
  728. info->status.antenna =
  729. ((rate_n_flags & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS);
  730. if (rate_n_flags & RATE_MCS_HT_MSK)
  731. r->flags |= IEEE80211_TX_RC_MCS;
  732. if (rate_n_flags & RATE_MCS_GF_MSK)
  733. r->flags |= IEEE80211_TX_RC_GREEN_FIELD;
  734. if (rate_n_flags & RATE_MCS_HT40_MSK)
  735. r->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
  736. if (rate_n_flags & RATE_MCS_DUP_MSK)
  737. r->flags |= IEEE80211_TX_RC_DUP_DATA;
  738. if (rate_n_flags & RATE_MCS_SGI_MSK)
  739. r->flags |= IEEE80211_TX_RC_SHORT_GI;
  740. r->idx = iwlagn_hwrate_to_mac80211_idx(rate_n_flags, info->band);
  741. }
  742. #ifdef CONFIG_IWLWIFI_DEBUG
  743. const char *iwl_get_tx_fail_reason(u32 status)
  744. {
  745. #define TX_STATUS_FAIL(x) case TX_STATUS_FAIL_ ## x: return #x
  746. #define TX_STATUS_POSTPONE(x) case TX_STATUS_POSTPONE_ ## x: return #x
  747. switch (status & TX_STATUS_MSK) {
  748. case TX_STATUS_SUCCESS:
  749. return "SUCCESS";
  750. TX_STATUS_POSTPONE(DELAY);
  751. TX_STATUS_POSTPONE(FEW_BYTES);
  752. TX_STATUS_POSTPONE(BT_PRIO);
  753. TX_STATUS_POSTPONE(QUIET_PERIOD);
  754. TX_STATUS_POSTPONE(CALC_TTAK);
  755. TX_STATUS_FAIL(INTERNAL_CROSSED_RETRY);
  756. TX_STATUS_FAIL(SHORT_LIMIT);
  757. TX_STATUS_FAIL(LONG_LIMIT);
  758. TX_STATUS_FAIL(FIFO_UNDERRUN);
  759. TX_STATUS_FAIL(DRAIN_FLOW);
  760. TX_STATUS_FAIL(RFKILL_FLUSH);
  761. TX_STATUS_FAIL(LIFE_EXPIRE);
  762. TX_STATUS_FAIL(DEST_PS);
  763. TX_STATUS_FAIL(HOST_ABORTED);
  764. TX_STATUS_FAIL(BT_RETRY);
  765. TX_STATUS_FAIL(STA_INVALID);
  766. TX_STATUS_FAIL(FRAG_DROPPED);
  767. TX_STATUS_FAIL(TID_DISABLE);
  768. TX_STATUS_FAIL(FIFO_FLUSHED);
  769. TX_STATUS_FAIL(INSUFFICIENT_CF_POLL);
  770. TX_STATUS_FAIL(PASSIVE_NO_RX);
  771. TX_STATUS_FAIL(NO_BEACON_ON_RADAR);
  772. }
  773. return "UNKNOWN";
  774. #undef TX_STATUS_FAIL
  775. #undef TX_STATUS_POSTPONE
  776. }
  777. #endif /* CONFIG_IWLWIFI_DEBUG */
  778. static void iwlagn_count_agg_tx_err_status(struct iwl_priv *priv, u16 status)
  779. {
  780. status &= AGG_TX_STATUS_MSK;
  781. switch (status) {
  782. case AGG_TX_STATE_UNDERRUN_MSK:
  783. priv->reply_agg_tx_stats.underrun++;
  784. break;
  785. case AGG_TX_STATE_BT_PRIO_MSK:
  786. priv->reply_agg_tx_stats.bt_prio++;
  787. break;
  788. case AGG_TX_STATE_FEW_BYTES_MSK:
  789. priv->reply_agg_tx_stats.few_bytes++;
  790. break;
  791. case AGG_TX_STATE_ABORT_MSK:
  792. priv->reply_agg_tx_stats.abort++;
  793. break;
  794. case AGG_TX_STATE_LAST_SENT_TTL_MSK:
  795. priv->reply_agg_tx_stats.last_sent_ttl++;
  796. break;
  797. case AGG_TX_STATE_LAST_SENT_TRY_CNT_MSK:
  798. priv->reply_agg_tx_stats.last_sent_try++;
  799. break;
  800. case AGG_TX_STATE_LAST_SENT_BT_KILL_MSK:
  801. priv->reply_agg_tx_stats.last_sent_bt_kill++;
  802. break;
  803. case AGG_TX_STATE_SCD_QUERY_MSK:
  804. priv->reply_agg_tx_stats.scd_query++;
  805. break;
  806. case AGG_TX_STATE_TEST_BAD_CRC32_MSK:
  807. priv->reply_agg_tx_stats.bad_crc32++;
  808. break;
  809. case AGG_TX_STATE_RESPONSE_MSK:
  810. priv->reply_agg_tx_stats.response++;
  811. break;
  812. case AGG_TX_STATE_DUMP_TX_MSK:
  813. priv->reply_agg_tx_stats.dump_tx++;
  814. break;
  815. case AGG_TX_STATE_DELAY_TX_MSK:
  816. priv->reply_agg_tx_stats.delay_tx++;
  817. break;
  818. default:
  819. priv->reply_agg_tx_stats.unknown++;
  820. break;
  821. }
  822. }
  823. static inline u32 iwlagn_get_scd_ssn(struct iwlagn_tx_resp *tx_resp)
  824. {
  825. return le32_to_cpup((__le32 *)&tx_resp->status +
  826. tx_resp->frame_count) & IEEE80211_MAX_SN;
  827. }
  828. static void iwl_rx_reply_tx_agg(struct iwl_priv *priv,
  829. struct iwlagn_tx_resp *tx_resp)
  830. {
  831. struct agg_tx_status *frame_status = &tx_resp->status;
  832. int tid = (tx_resp->ra_tid & IWLAGN_TX_RES_TID_MSK) >>
  833. IWLAGN_TX_RES_TID_POS;
  834. int sta_id = (tx_resp->ra_tid & IWLAGN_TX_RES_RA_MSK) >>
  835. IWLAGN_TX_RES_RA_POS;
  836. struct iwl_ht_agg *agg = &priv->tid_data[sta_id][tid].agg;
  837. u32 status = le16_to_cpu(tx_resp->status.status);
  838. int i;
  839. WARN_ON(tid == IWL_TID_NON_QOS);
  840. if (agg->wait_for_ba)
  841. IWL_DEBUG_TX_REPLY(priv,
  842. "got tx response w/o block-ack\n");
  843. agg->rate_n_flags = le32_to_cpu(tx_resp->rate_n_flags);
  844. agg->wait_for_ba = (tx_resp->frame_count > 1);
  845. /*
  846. * If the BT kill count is non-zero, we'll get this
  847. * notification again.
  848. */
  849. if (tx_resp->bt_kill_count && tx_resp->frame_count == 1 &&
  850. priv->cfg->bt_params &&
  851. priv->cfg->bt_params->advanced_bt_coexist) {
  852. IWL_DEBUG_COEX(priv, "receive reply tx w/ bt_kill\n");
  853. }
  854. if (tx_resp->frame_count == 1)
  855. return;
  856. IWL_DEBUG_TX_REPLY(priv, "TXQ %d initial_rate 0x%x ssn %d frm_cnt %d\n",
  857. agg->txq_id,
  858. le32_to_cpu(tx_resp->rate_n_flags),
  859. iwlagn_get_scd_ssn(tx_resp), tx_resp->frame_count);
  860. /* Construct bit-map of pending frames within Tx window */
  861. for (i = 0; i < tx_resp->frame_count; i++) {
  862. u16 fstatus = le16_to_cpu(frame_status[i].status);
  863. u8 retry_cnt = (fstatus & AGG_TX_TRY_MSK) >> AGG_TX_TRY_POS;
  864. if (status & AGG_TX_STATUS_MSK)
  865. iwlagn_count_agg_tx_err_status(priv, fstatus);
  866. if (status & (AGG_TX_STATE_FEW_BYTES_MSK |
  867. AGG_TX_STATE_ABORT_MSK))
  868. continue;
  869. if (status & AGG_TX_STATUS_MSK || retry_cnt > 1)
  870. IWL_DEBUG_TX_REPLY(priv,
  871. "%d: status %s (0x%04x), try-count (0x%01x)\n",
  872. i,
  873. iwl_get_agg_tx_fail_reason(fstatus),
  874. fstatus & AGG_TX_STATUS_MSK,
  875. retry_cnt);
  876. }
  877. }
  878. #ifdef CONFIG_IWLWIFI_DEBUG
  879. #define AGG_TX_STATE_FAIL(x) case AGG_TX_STATE_ ## x: return #x
  880. const char *iwl_get_agg_tx_fail_reason(u16 status)
  881. {
  882. status &= AGG_TX_STATUS_MSK;
  883. switch (status) {
  884. case AGG_TX_STATE_TRANSMITTED:
  885. return "SUCCESS";
  886. AGG_TX_STATE_FAIL(UNDERRUN_MSK);
  887. AGG_TX_STATE_FAIL(BT_PRIO_MSK);
  888. AGG_TX_STATE_FAIL(FEW_BYTES_MSK);
  889. AGG_TX_STATE_FAIL(ABORT_MSK);
  890. AGG_TX_STATE_FAIL(LAST_SENT_TTL_MSK);
  891. AGG_TX_STATE_FAIL(LAST_SENT_TRY_CNT_MSK);
  892. AGG_TX_STATE_FAIL(LAST_SENT_BT_KILL_MSK);
  893. AGG_TX_STATE_FAIL(SCD_QUERY_MSK);
  894. AGG_TX_STATE_FAIL(TEST_BAD_CRC32_MSK);
  895. AGG_TX_STATE_FAIL(RESPONSE_MSK);
  896. AGG_TX_STATE_FAIL(DUMP_TX_MSK);
  897. AGG_TX_STATE_FAIL(DELAY_TX_MSK);
  898. }
  899. return "UNKNOWN";
  900. }
  901. #endif /* CONFIG_IWLWIFI_DEBUG */
  902. static void iwlagn_count_tx_err_status(struct iwl_priv *priv, u16 status)
  903. {
  904. status &= TX_STATUS_MSK;
  905. switch (status) {
  906. case TX_STATUS_POSTPONE_DELAY:
  907. priv->reply_tx_stats.pp_delay++;
  908. break;
  909. case TX_STATUS_POSTPONE_FEW_BYTES:
  910. priv->reply_tx_stats.pp_few_bytes++;
  911. break;
  912. case TX_STATUS_POSTPONE_BT_PRIO:
  913. priv->reply_tx_stats.pp_bt_prio++;
  914. break;
  915. case TX_STATUS_POSTPONE_QUIET_PERIOD:
  916. priv->reply_tx_stats.pp_quiet_period++;
  917. break;
  918. case TX_STATUS_POSTPONE_CALC_TTAK:
  919. priv->reply_tx_stats.pp_calc_ttak++;
  920. break;
  921. case TX_STATUS_FAIL_INTERNAL_CROSSED_RETRY:
  922. priv->reply_tx_stats.int_crossed_retry++;
  923. break;
  924. case TX_STATUS_FAIL_SHORT_LIMIT:
  925. priv->reply_tx_stats.short_limit++;
  926. break;
  927. case TX_STATUS_FAIL_LONG_LIMIT:
  928. priv->reply_tx_stats.long_limit++;
  929. break;
  930. case TX_STATUS_FAIL_FIFO_UNDERRUN:
  931. priv->reply_tx_stats.fifo_underrun++;
  932. break;
  933. case TX_STATUS_FAIL_DRAIN_FLOW:
  934. priv->reply_tx_stats.drain_flow++;
  935. break;
  936. case TX_STATUS_FAIL_RFKILL_FLUSH:
  937. priv->reply_tx_stats.rfkill_flush++;
  938. break;
  939. case TX_STATUS_FAIL_LIFE_EXPIRE:
  940. priv->reply_tx_stats.life_expire++;
  941. break;
  942. case TX_STATUS_FAIL_DEST_PS:
  943. priv->reply_tx_stats.dest_ps++;
  944. break;
  945. case TX_STATUS_FAIL_HOST_ABORTED:
  946. priv->reply_tx_stats.host_abort++;
  947. break;
  948. case TX_STATUS_FAIL_BT_RETRY:
  949. priv->reply_tx_stats.bt_retry++;
  950. break;
  951. case TX_STATUS_FAIL_STA_INVALID:
  952. priv->reply_tx_stats.sta_invalid++;
  953. break;
  954. case TX_STATUS_FAIL_FRAG_DROPPED:
  955. priv->reply_tx_stats.frag_drop++;
  956. break;
  957. case TX_STATUS_FAIL_TID_DISABLE:
  958. priv->reply_tx_stats.tid_disable++;
  959. break;
  960. case TX_STATUS_FAIL_FIFO_FLUSHED:
  961. priv->reply_tx_stats.fifo_flush++;
  962. break;
  963. case TX_STATUS_FAIL_INSUFFICIENT_CF_POLL:
  964. priv->reply_tx_stats.insuff_cf_poll++;
  965. break;
  966. case TX_STATUS_FAIL_PASSIVE_NO_RX:
  967. priv->reply_tx_stats.fail_hw_drop++;
  968. break;
  969. case TX_STATUS_FAIL_NO_BEACON_ON_RADAR:
  970. priv->reply_tx_stats.sta_color_mismatch++;
  971. break;
  972. default:
  973. priv->reply_tx_stats.unknown++;
  974. break;
  975. }
  976. }
  977. static void iwlagn_set_tx_status(struct iwl_priv *priv,
  978. struct ieee80211_tx_info *info,
  979. struct iwlagn_tx_resp *tx_resp)
  980. {
  981. u16 status = le16_to_cpu(tx_resp->status.status);
  982. info->flags &= ~IEEE80211_TX_CTL_AMPDU;
  983. info->status.rates[0].count = tx_resp->failure_frame + 1;
  984. info->flags |= iwl_tx_status_to_mac80211(status);
  985. iwlagn_hwrate_to_tx_control(priv, le32_to_cpu(tx_resp->rate_n_flags),
  986. info);
  987. if (!iwl_is_tx_success(status))
  988. iwlagn_count_tx_err_status(priv, status);
  989. }
  990. static void iwl_check_abort_status(struct iwl_priv *priv,
  991. u8 frame_count, u32 status)
  992. {
  993. if (frame_count == 1 && status == TX_STATUS_FAIL_RFKILL_FLUSH) {
  994. IWL_ERR(priv, "Tx flush command to flush out all frames\n");
  995. if (!test_bit(STATUS_EXIT_PENDING, &priv->status))
  996. queue_work(priv->workqueue, &priv->tx_flush);
  997. }
  998. }
  999. int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
  1000. struct iwl_device_cmd *cmd)
  1001. {
  1002. struct iwl_rx_packet *pkt = rxb_addr(rxb);
  1003. u16 sequence = le16_to_cpu(pkt->hdr.sequence);
  1004. int txq_id = SEQ_TO_QUEUE(sequence);
  1005. int cmd_index __maybe_unused = SEQ_TO_INDEX(sequence);
  1006. struct iwlagn_tx_resp *tx_resp = (void *)pkt->data;
  1007. struct ieee80211_hdr *hdr;
  1008. u32 status = le16_to_cpu(tx_resp->status.status);
  1009. u16 ssn = iwlagn_get_scd_ssn(tx_resp);
  1010. int tid;
  1011. int sta_id;
  1012. int freed;
  1013. struct ieee80211_tx_info *info;
  1014. struct sk_buff_head skbs;
  1015. struct sk_buff *skb;
  1016. struct iwl_rxon_context *ctx;
  1017. bool is_agg = (txq_id >= IWLAGN_FIRST_AMPDU_QUEUE);
  1018. bool is_offchannel_skb;
  1019. tid = (tx_resp->ra_tid & IWLAGN_TX_RES_TID_MSK) >>
  1020. IWLAGN_TX_RES_TID_POS;
  1021. sta_id = (tx_resp->ra_tid & IWLAGN_TX_RES_RA_MSK) >>
  1022. IWLAGN_TX_RES_RA_POS;
  1023. spin_lock_bh(&priv->sta_lock);
  1024. if (is_agg) {
  1025. WARN_ON_ONCE(sta_id >= IWLAGN_STATION_COUNT ||
  1026. tid >= IWL_MAX_TID_COUNT);
  1027. if (txq_id != priv->tid_data[sta_id][tid].agg.txq_id)
  1028. IWL_ERR(priv, "txq_id mismatch: %d %d\n", txq_id,
  1029. priv->tid_data[sta_id][tid].agg.txq_id);
  1030. iwl_rx_reply_tx_agg(priv, tx_resp);
  1031. }
  1032. __skb_queue_head_init(&skbs);
  1033. is_offchannel_skb = false;
  1034. if (tx_resp->frame_count == 1) {
  1035. u16 next_reclaimed = le16_to_cpu(tx_resp->seq_ctl);
  1036. next_reclaimed = IEEE80211_SEQ_TO_SN(next_reclaimed + 0x10);
  1037. if (is_agg) {
  1038. /* If this is an aggregation queue, we can rely on the
  1039. * ssn since the wifi sequence number corresponds to
  1040. * the index in the TFD ring (%256).
  1041. * The seq_ctl is the sequence control of the packet
  1042. * to which this Tx response relates. But if there is a
  1043. * hole in the bitmap of the BA we received, this Tx
  1044. * response may allow to reclaim the hole and all the
  1045. * subsequent packets that were already acked.
  1046. * In that case, seq_ctl != ssn, and the next packet
  1047. * to be reclaimed will be ssn and not seq_ctl.
  1048. */
  1049. next_reclaimed = ssn;
  1050. }
  1051. if (tid != IWL_TID_NON_QOS) {
  1052. priv->tid_data[sta_id][tid].next_reclaimed =
  1053. next_reclaimed;
  1054. IWL_DEBUG_TX_REPLY(priv, "Next reclaimed packet:%d\n",
  1055. next_reclaimed);
  1056. }
  1057. iwl_trans_reclaim(priv->trans, txq_id, ssn, &skbs);
  1058. iwlagn_check_ratid_empty(priv, sta_id, tid);
  1059. freed = 0;
  1060. /* process frames */
  1061. skb_queue_walk(&skbs, skb) {
  1062. hdr = (struct ieee80211_hdr *)skb->data;
  1063. if (!ieee80211_is_data_qos(hdr->frame_control))
  1064. priv->last_seq_ctl = tx_resp->seq_ctl;
  1065. info = IEEE80211_SKB_CB(skb);
  1066. ctx = info->driver_data[0];
  1067. iwl_trans_free_tx_cmd(priv->trans,
  1068. info->driver_data[1]);
  1069. memset(&info->status, 0, sizeof(info->status));
  1070. if (status == TX_STATUS_FAIL_PASSIVE_NO_RX &&
  1071. ctx->vif &&
  1072. ctx->vif->type == NL80211_IFTYPE_STATION) {
  1073. /* block and stop all queues */
  1074. priv->passive_no_rx = true;
  1075. IWL_DEBUG_TX_QUEUES(priv, "stop all queues: "
  1076. "passive channel");
  1077. ieee80211_stop_queues(priv->hw);
  1078. IWL_DEBUG_TX_REPLY(priv,
  1079. "TXQ %d status %s (0x%08x) "
  1080. "rate_n_flags 0x%x retries %d\n",
  1081. txq_id,
  1082. iwl_get_tx_fail_reason(status),
  1083. status,
  1084. le32_to_cpu(tx_resp->rate_n_flags),
  1085. tx_resp->failure_frame);
  1086. IWL_DEBUG_TX_REPLY(priv,
  1087. "FrameCnt = %d, idx=%d\n",
  1088. tx_resp->frame_count, cmd_index);
  1089. }
  1090. /* check if BAR is needed */
  1091. if (is_agg && !iwl_is_tx_success(status))
  1092. info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
  1093. iwlagn_set_tx_status(priv, IEEE80211_SKB_CB(skb),
  1094. tx_resp);
  1095. if (!is_agg)
  1096. iwlagn_non_agg_tx_status(priv, ctx, hdr->addr1);
  1097. is_offchannel_skb =
  1098. (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN);
  1099. freed++;
  1100. }
  1101. if (tid != IWL_TID_NON_QOS) {
  1102. priv->tid_data[sta_id][tid].next_reclaimed =
  1103. next_reclaimed;
  1104. IWL_DEBUG_TX_REPLY(priv, "Next reclaimed packet:%d\n",
  1105. next_reclaimed);
  1106. }
  1107. if (!is_agg && freed != 1)
  1108. IWL_ERR(priv, "Q: %d, freed %d\n", txq_id, freed);
  1109. /*
  1110. * An offchannel frame can be send only on the AUX queue, where
  1111. * there is no aggregation (and reordering) so it only is single
  1112. * skb is expected to be processed.
  1113. */
  1114. if (is_offchannel_skb && freed != 1)
  1115. IWL_ERR(priv, "OFFCHANNEL SKB freed %d\n", freed);
  1116. IWL_DEBUG_TX_REPLY(priv, "TXQ %d status %s (0x%08x)\n", txq_id,
  1117. iwl_get_tx_fail_reason(status), status);
  1118. IWL_DEBUG_TX_REPLY(priv,
  1119. "\t\t\t\tinitial_rate 0x%x retries %d, idx=%d ssn=%d seq_ctl=0x%x\n",
  1120. le32_to_cpu(tx_resp->rate_n_flags),
  1121. tx_resp->failure_frame,
  1122. SEQ_TO_INDEX(sequence), ssn,
  1123. le16_to_cpu(tx_resp->seq_ctl));
  1124. }
  1125. iwl_check_abort_status(priv, tx_resp->frame_count, status);
  1126. spin_unlock_bh(&priv->sta_lock);
  1127. while (!skb_queue_empty(&skbs)) {
  1128. skb = __skb_dequeue(&skbs);
  1129. ieee80211_tx_status_ni(priv->hw, skb);
  1130. }
  1131. if (is_offchannel_skb)
  1132. iwl_scan_offchannel_skb_status(priv);
  1133. return 0;
  1134. }
  1135. /**
  1136. * iwlagn_rx_reply_compressed_ba - Handler for REPLY_COMPRESSED_BA
  1137. *
  1138. * Handles block-acknowledge notification from device, which reports success
  1139. * of frames sent via aggregation.
  1140. */
  1141. int iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
  1142. struct iwl_rx_cmd_buffer *rxb,
  1143. struct iwl_device_cmd *cmd)
  1144. {
  1145. struct iwl_rx_packet *pkt = rxb_addr(rxb);
  1146. struct iwl_compressed_ba_resp *ba_resp = (void *)pkt->data;
  1147. struct iwl_ht_agg *agg;
  1148. struct sk_buff_head reclaimed_skbs;
  1149. struct ieee80211_tx_info *info;
  1150. struct ieee80211_hdr *hdr;
  1151. struct sk_buff *skb;
  1152. int sta_id;
  1153. int tid;
  1154. int freed;
  1155. /* "flow" corresponds to Tx queue */
  1156. u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
  1157. /* "ssn" is start of block-ack Tx window, corresponds to index
  1158. * (in Tx queue's circular buffer) of first TFD/frame in window */
  1159. u16 ba_resp_scd_ssn = le16_to_cpu(ba_resp->scd_ssn);
  1160. if (scd_flow >= priv->cfg->base_params->num_of_queues) {
  1161. IWL_ERR(priv,
  1162. "BUG_ON scd_flow is bigger than number of queues\n");
  1163. return 0;
  1164. }
  1165. sta_id = ba_resp->sta_id;
  1166. tid = ba_resp->tid;
  1167. agg = &priv->tid_data[sta_id][tid].agg;
  1168. spin_lock_bh(&priv->sta_lock);
  1169. if (unlikely(!agg->wait_for_ba)) {
  1170. if (unlikely(ba_resp->bitmap))
  1171. IWL_ERR(priv, "Received BA when not expected\n");
  1172. spin_unlock_bh(&priv->sta_lock);
  1173. return 0;
  1174. }
  1175. if (unlikely(scd_flow != agg->txq_id)) {
  1176. /*
  1177. * FIXME: this is a uCode bug which need to be addressed,
  1178. * log the information and return for now.
  1179. * Since it is can possibly happen very often and in order
  1180. * not to fill the syslog, don't use IWL_ERR or IWL_WARN
  1181. */
  1182. IWL_DEBUG_TX_QUEUES(priv,
  1183. "Bad queue mapping txq_id=%d, agg_txq[sta:%d,tid:%d]=%d\n",
  1184. scd_flow, sta_id, tid, agg->txq_id);
  1185. spin_unlock_bh(&priv->sta_lock);
  1186. return 0;
  1187. }
  1188. __skb_queue_head_init(&reclaimed_skbs);
  1189. /* Release all TFDs before the SSN, i.e. all TFDs in front of
  1190. * block-ack window (we assume that they've been successfully
  1191. * transmitted ... if not, it's too late anyway). */
  1192. iwl_trans_reclaim(priv->trans, scd_flow, ba_resp_scd_ssn,
  1193. &reclaimed_skbs);
  1194. IWL_DEBUG_TX_REPLY(priv, "REPLY_COMPRESSED_BA [%d] Received from %pM, "
  1195. "sta_id = %d\n",
  1196. agg->wait_for_ba,
  1197. (u8 *) &ba_resp->sta_addr_lo32,
  1198. ba_resp->sta_id);
  1199. IWL_DEBUG_TX_REPLY(priv, "TID = %d, SeqCtl = %d, bitmap = 0x%llx, "
  1200. "scd_flow = %d, scd_ssn = %d sent:%d, acked:%d\n",
  1201. ba_resp->tid, le16_to_cpu(ba_resp->seq_ctl),
  1202. (unsigned long long)le64_to_cpu(ba_resp->bitmap),
  1203. scd_flow, ba_resp_scd_ssn, ba_resp->txed,
  1204. ba_resp->txed_2_done);
  1205. /* Mark that the expected block-ack response arrived */
  1206. agg->wait_for_ba = false;
  1207. /* Sanity check values reported by uCode */
  1208. if (ba_resp->txed_2_done > ba_resp->txed) {
  1209. IWL_DEBUG_TX_REPLY(priv,
  1210. "bogus sent(%d) and ack(%d) count\n",
  1211. ba_resp->txed, ba_resp->txed_2_done);
  1212. /*
  1213. * set txed_2_done = txed,
  1214. * so it won't impact rate scale
  1215. */
  1216. ba_resp->txed = ba_resp->txed_2_done;
  1217. }
  1218. priv->tid_data[sta_id][tid].next_reclaimed = ba_resp_scd_ssn;
  1219. iwlagn_check_ratid_empty(priv, sta_id, tid);
  1220. freed = 0;
  1221. skb_queue_walk(&reclaimed_skbs, skb) {
  1222. hdr = (struct ieee80211_hdr *)skb->data;
  1223. if (ieee80211_is_data_qos(hdr->frame_control))
  1224. freed++;
  1225. else
  1226. WARN_ON_ONCE(1);
  1227. info = IEEE80211_SKB_CB(skb);
  1228. iwl_trans_free_tx_cmd(priv->trans, info->driver_data[1]);
  1229. if (freed == 1) {
  1230. /* this is the first skb we deliver in this batch */
  1231. /* put the rate scaling data there */
  1232. info = IEEE80211_SKB_CB(skb);
  1233. memset(&info->status, 0, sizeof(info->status));
  1234. info->flags |= IEEE80211_TX_STAT_ACK;
  1235. info->flags |= IEEE80211_TX_STAT_AMPDU;
  1236. info->status.ampdu_ack_len = ba_resp->txed_2_done;
  1237. info->status.ampdu_len = ba_resp->txed;
  1238. iwlagn_hwrate_to_tx_control(priv, agg->rate_n_flags,
  1239. info);
  1240. }
  1241. }
  1242. spin_unlock_bh(&priv->sta_lock);
  1243. while (!skb_queue_empty(&reclaimed_skbs)) {
  1244. skb = __skb_dequeue(&reclaimed_skbs);
  1245. ieee80211_tx_status_ni(priv->hw, skb);
  1246. }
  1247. return 0;
  1248. }