iwl-agn-lib.c 71 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447
  1. /******************************************************************************
  2. *
  3. * GPL LICENSE SUMMARY
  4. *
  5. * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved.
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of version 2 of the GNU General Public License as
  9. * published by the Free Software Foundation.
  10. *
  11. * This program is distributed in the hope that it will be useful, but
  12. * WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU General Public License
  17. * along with this program; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
  19. * USA
  20. *
  21. * The full GNU General Public License is included in this distribution
  22. * in the file called LICENSE.GPL.
  23. *
  24. * Contact Information:
  25. * Intel Linux Wireless <ilw@linux.intel.com>
  26. * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
  27. *
  28. *****************************************************************************/
  29. #include <linux/etherdevice.h>
  30. #include <linux/kernel.h>
  31. #include <linux/module.h>
  32. #include <linux/init.h>
  33. #include <linux/sched.h>
  34. #include "iwl-dev.h"
  35. #include "iwl-core.h"
  36. #include "iwl-io.h"
  37. #include "iwl-helpers.h"
  38. #include "iwl-agn-hw.h"
  39. #include "iwl-agn.h"
  40. #include "iwl-sta.h"
  41. static inline u32 iwlagn_get_scd_ssn(struct iwlagn_tx_resp *tx_resp)
  42. {
  43. return le32_to_cpup((__le32 *)&tx_resp->status +
  44. tx_resp->frame_count) & MAX_SN;
  45. }
  46. static void iwlagn_count_tx_err_status(struct iwl_priv *priv, u16 status)
  47. {
  48. status &= TX_STATUS_MSK;
  49. switch (status) {
  50. case TX_STATUS_POSTPONE_DELAY:
  51. priv->_agn.reply_tx_stats.pp_delay++;
  52. break;
  53. case TX_STATUS_POSTPONE_FEW_BYTES:
  54. priv->_agn.reply_tx_stats.pp_few_bytes++;
  55. break;
  56. case TX_STATUS_POSTPONE_BT_PRIO:
  57. priv->_agn.reply_tx_stats.pp_bt_prio++;
  58. break;
  59. case TX_STATUS_POSTPONE_QUIET_PERIOD:
  60. priv->_agn.reply_tx_stats.pp_quiet_period++;
  61. break;
  62. case TX_STATUS_POSTPONE_CALC_TTAK:
  63. priv->_agn.reply_tx_stats.pp_calc_ttak++;
  64. break;
  65. case TX_STATUS_FAIL_INTERNAL_CROSSED_RETRY:
  66. priv->_agn.reply_tx_stats.int_crossed_retry++;
  67. break;
  68. case TX_STATUS_FAIL_SHORT_LIMIT:
  69. priv->_agn.reply_tx_stats.short_limit++;
  70. break;
  71. case TX_STATUS_FAIL_LONG_LIMIT:
  72. priv->_agn.reply_tx_stats.long_limit++;
  73. break;
  74. case TX_STATUS_FAIL_FIFO_UNDERRUN:
  75. priv->_agn.reply_tx_stats.fifo_underrun++;
  76. break;
  77. case TX_STATUS_FAIL_DRAIN_FLOW:
  78. priv->_agn.reply_tx_stats.drain_flow++;
  79. break;
  80. case TX_STATUS_FAIL_RFKILL_FLUSH:
  81. priv->_agn.reply_tx_stats.rfkill_flush++;
  82. break;
  83. case TX_STATUS_FAIL_LIFE_EXPIRE:
  84. priv->_agn.reply_tx_stats.life_expire++;
  85. break;
  86. case TX_STATUS_FAIL_DEST_PS:
  87. priv->_agn.reply_tx_stats.dest_ps++;
  88. break;
  89. case TX_STATUS_FAIL_HOST_ABORTED:
  90. priv->_agn.reply_tx_stats.host_abort++;
  91. break;
  92. case TX_STATUS_FAIL_BT_RETRY:
  93. priv->_agn.reply_tx_stats.bt_retry++;
  94. break;
  95. case TX_STATUS_FAIL_STA_INVALID:
  96. priv->_agn.reply_tx_stats.sta_invalid++;
  97. break;
  98. case TX_STATUS_FAIL_FRAG_DROPPED:
  99. priv->_agn.reply_tx_stats.frag_drop++;
  100. break;
  101. case TX_STATUS_FAIL_TID_DISABLE:
  102. priv->_agn.reply_tx_stats.tid_disable++;
  103. break;
  104. case TX_STATUS_FAIL_FIFO_FLUSHED:
  105. priv->_agn.reply_tx_stats.fifo_flush++;
  106. break;
  107. case TX_STATUS_FAIL_INSUFFICIENT_CF_POLL:
  108. priv->_agn.reply_tx_stats.insuff_cf_poll++;
  109. break;
  110. case TX_STATUS_FAIL_PASSIVE_NO_RX:
  111. priv->_agn.reply_tx_stats.fail_hw_drop++;
  112. break;
  113. case TX_STATUS_FAIL_NO_BEACON_ON_RADAR:
  114. priv->_agn.reply_tx_stats.sta_color_mismatch++;
  115. break;
  116. default:
  117. priv->_agn.reply_tx_stats.unknown++;
  118. break;
  119. }
  120. }
  121. static void iwlagn_count_agg_tx_err_status(struct iwl_priv *priv, u16 status)
  122. {
  123. status &= AGG_TX_STATUS_MSK;
  124. switch (status) {
  125. case AGG_TX_STATE_UNDERRUN_MSK:
  126. priv->_agn.reply_agg_tx_stats.underrun++;
  127. break;
  128. case AGG_TX_STATE_BT_PRIO_MSK:
  129. priv->_agn.reply_agg_tx_stats.bt_prio++;
  130. break;
  131. case AGG_TX_STATE_FEW_BYTES_MSK:
  132. priv->_agn.reply_agg_tx_stats.few_bytes++;
  133. break;
  134. case AGG_TX_STATE_ABORT_MSK:
  135. priv->_agn.reply_agg_tx_stats.abort++;
  136. break;
  137. case AGG_TX_STATE_LAST_SENT_TTL_MSK:
  138. priv->_agn.reply_agg_tx_stats.last_sent_ttl++;
  139. break;
  140. case AGG_TX_STATE_LAST_SENT_TRY_CNT_MSK:
  141. priv->_agn.reply_agg_tx_stats.last_sent_try++;
  142. break;
  143. case AGG_TX_STATE_LAST_SENT_BT_KILL_MSK:
  144. priv->_agn.reply_agg_tx_stats.last_sent_bt_kill++;
  145. break;
  146. case AGG_TX_STATE_SCD_QUERY_MSK:
  147. priv->_agn.reply_agg_tx_stats.scd_query++;
  148. break;
  149. case AGG_TX_STATE_TEST_BAD_CRC32_MSK:
  150. priv->_agn.reply_agg_tx_stats.bad_crc32++;
  151. break;
  152. case AGG_TX_STATE_RESPONSE_MSK:
  153. priv->_agn.reply_agg_tx_stats.response++;
  154. break;
  155. case AGG_TX_STATE_DUMP_TX_MSK:
  156. priv->_agn.reply_agg_tx_stats.dump_tx++;
  157. break;
  158. case AGG_TX_STATE_DELAY_TX_MSK:
  159. priv->_agn.reply_agg_tx_stats.delay_tx++;
  160. break;
  161. default:
  162. priv->_agn.reply_agg_tx_stats.unknown++;
  163. break;
  164. }
  165. }
  166. static void iwlagn_set_tx_status(struct iwl_priv *priv,
  167. struct ieee80211_tx_info *info,
  168. struct iwlagn_tx_resp *tx_resp,
  169. int txq_id, bool is_agg)
  170. {
  171. u16 status = le16_to_cpu(tx_resp->status.status);
  172. info->status.rates[0].count = tx_resp->failure_frame + 1;
  173. if (is_agg)
  174. info->flags &= ~IEEE80211_TX_CTL_AMPDU;
  175. info->flags |= iwl_tx_status_to_mac80211(status);
  176. iwlagn_hwrate_to_tx_control(priv, le32_to_cpu(tx_resp->rate_n_flags),
  177. info);
  178. if (!iwl_is_tx_success(status))
  179. iwlagn_count_tx_err_status(priv, status);
  180. IWL_DEBUG_TX_REPLY(priv, "TXQ %d status %s (0x%08x) rate_n_flags "
  181. "0x%x retries %d\n",
  182. txq_id,
  183. iwl_get_tx_fail_reason(status), status,
  184. le32_to_cpu(tx_resp->rate_n_flags),
  185. tx_resp->failure_frame);
  186. }
  187. #ifdef CONFIG_IWLWIFI_DEBUG
  188. #define AGG_TX_STATE_FAIL(x) case AGG_TX_STATE_ ## x: return #x
  189. const char *iwl_get_agg_tx_fail_reason(u16 status)
  190. {
  191. status &= AGG_TX_STATUS_MSK;
  192. switch (status) {
  193. case AGG_TX_STATE_TRANSMITTED:
  194. return "SUCCESS";
  195. AGG_TX_STATE_FAIL(UNDERRUN_MSK);
  196. AGG_TX_STATE_FAIL(BT_PRIO_MSK);
  197. AGG_TX_STATE_FAIL(FEW_BYTES_MSK);
  198. AGG_TX_STATE_FAIL(ABORT_MSK);
  199. AGG_TX_STATE_FAIL(LAST_SENT_TTL_MSK);
  200. AGG_TX_STATE_FAIL(LAST_SENT_TRY_CNT_MSK);
  201. AGG_TX_STATE_FAIL(LAST_SENT_BT_KILL_MSK);
  202. AGG_TX_STATE_FAIL(SCD_QUERY_MSK);
  203. AGG_TX_STATE_FAIL(TEST_BAD_CRC32_MSK);
  204. AGG_TX_STATE_FAIL(RESPONSE_MSK);
  205. AGG_TX_STATE_FAIL(DUMP_TX_MSK);
  206. AGG_TX_STATE_FAIL(DELAY_TX_MSK);
  207. }
  208. return "UNKNOWN";
  209. }
  210. #endif /* CONFIG_IWLWIFI_DEBUG */
  211. static int iwlagn_tx_status_reply_tx(struct iwl_priv *priv,
  212. struct iwl_ht_agg *agg,
  213. struct iwlagn_tx_resp *tx_resp,
  214. int txq_id, u16 start_idx)
  215. {
  216. u16 status;
  217. struct agg_tx_status *frame_status = &tx_resp->status;
  218. struct ieee80211_hdr *hdr = NULL;
  219. int i, sh, idx;
  220. u16 seq;
  221. if (agg->wait_for_ba)
  222. IWL_DEBUG_TX_REPLY(priv, "got tx response w/o block-ack\n");
  223. agg->frame_count = tx_resp->frame_count;
  224. agg->start_idx = start_idx;
  225. agg->rate_n_flags = le32_to_cpu(tx_resp->rate_n_flags);
  226. agg->bitmap = 0;
  227. /* # frames attempted by Tx command */
  228. if (agg->frame_count == 1) {
  229. /* Only one frame was attempted; no block-ack will arrive */
  230. idx = start_idx;
  231. IWL_DEBUG_TX_REPLY(priv, "FrameCnt = %d, StartIdx=%d idx=%d\n",
  232. agg->frame_count, agg->start_idx, idx);
  233. iwlagn_set_tx_status(priv,
  234. IEEE80211_SKB_CB(
  235. priv->txq[txq_id].txb[idx].skb),
  236. tx_resp, txq_id, true);
  237. agg->wait_for_ba = 0;
  238. } else {
  239. /* Two or more frames were attempted; expect block-ack */
  240. u64 bitmap = 0;
  241. /*
  242. * Start is the lowest frame sent. It may not be the first
  243. * frame in the batch; we figure this out dynamically during
  244. * the following loop.
  245. */
  246. int start = agg->start_idx;
  247. /* Construct bit-map of pending frames within Tx window */
  248. for (i = 0; i < agg->frame_count; i++) {
  249. u16 sc;
  250. status = le16_to_cpu(frame_status[i].status);
  251. seq = le16_to_cpu(frame_status[i].sequence);
  252. idx = SEQ_TO_INDEX(seq);
  253. txq_id = SEQ_TO_QUEUE(seq);
  254. if (status & AGG_TX_STATUS_MSK)
  255. iwlagn_count_agg_tx_err_status(priv, status);
  256. if (status & (AGG_TX_STATE_FEW_BYTES_MSK |
  257. AGG_TX_STATE_ABORT_MSK))
  258. continue;
  259. IWL_DEBUG_TX_REPLY(priv, "FrameCnt = %d, txq_id=%d idx=%d\n",
  260. agg->frame_count, txq_id, idx);
  261. IWL_DEBUG_TX_REPLY(priv, "status %s (0x%08x), "
  262. "try-count (0x%08x)\n",
  263. iwl_get_agg_tx_fail_reason(status),
  264. status & AGG_TX_STATUS_MSK,
  265. status & AGG_TX_TRY_MSK);
  266. hdr = iwl_tx_queue_get_hdr(priv, txq_id, idx);
  267. if (!hdr) {
  268. IWL_ERR(priv,
  269. "BUG_ON idx doesn't point to valid skb"
  270. " idx=%d, txq_id=%d\n", idx, txq_id);
  271. return -1;
  272. }
  273. sc = le16_to_cpu(hdr->seq_ctrl);
  274. if (idx != (SEQ_TO_SN(sc) & 0xff)) {
  275. IWL_ERR(priv,
  276. "BUG_ON idx doesn't match seq control"
  277. " idx=%d, seq_idx=%d, seq=%d\n",
  278. idx, SEQ_TO_SN(sc),
  279. hdr->seq_ctrl);
  280. return -1;
  281. }
  282. IWL_DEBUG_TX_REPLY(priv, "AGG Frame i=%d idx %d seq=%d\n",
  283. i, idx, SEQ_TO_SN(sc));
  284. /*
  285. * sh -> how many frames ahead of the starting frame is
  286. * the current one?
  287. *
  288. * Note that all frames sent in the batch must be in a
  289. * 64-frame window, so this number should be in [0,63].
  290. * If outside of this window, then we've found a new
  291. * "first" frame in the batch and need to change start.
  292. */
  293. sh = idx - start;
  294. /*
  295. * If >= 64, out of window. start must be at the front
  296. * of the circular buffer, idx must be near the end of
  297. * the buffer, and idx is the new "first" frame. Shift
  298. * the indices around.
  299. */
  300. if (sh >= 64) {
  301. /* Shift bitmap by start - idx, wrapped */
  302. sh = 0x100 - idx + start;
  303. bitmap = bitmap << sh;
  304. /* Now idx is the new start so sh = 0 */
  305. sh = 0;
  306. start = idx;
  307. /*
  308. * If <= -64 then wraps the 256-pkt circular buffer
  309. * (e.g., start = 255 and idx = 0, sh should be 1)
  310. */
  311. } else if (sh <= -64) {
  312. sh = 0x100 - start + idx;
  313. /*
  314. * If < 0 but > -64, out of window. idx is before start
  315. * but not wrapped. Shift the indices around.
  316. */
  317. } else if (sh < 0) {
  318. /* Shift by how far start is ahead of idx */
  319. sh = start - idx;
  320. bitmap = bitmap << sh;
  321. /* Now idx is the new start so sh = 0 */
  322. start = idx;
  323. sh = 0;
  324. }
  325. /* Sequence number start + sh was sent in this batch */
  326. bitmap |= 1ULL << sh;
  327. IWL_DEBUG_TX_REPLY(priv, "start=%d bitmap=0x%llx\n",
  328. start, (unsigned long long)bitmap);
  329. }
  330. /*
  331. * Store the bitmap and possibly the new start, if we wrapped
  332. * the buffer above
  333. */
  334. agg->bitmap = bitmap;
  335. agg->start_idx = start;
  336. IWL_DEBUG_TX_REPLY(priv, "Frames %d start_idx=%d bitmap=0x%llx\n",
  337. agg->frame_count, agg->start_idx,
  338. (unsigned long long)agg->bitmap);
  339. if (bitmap)
  340. agg->wait_for_ba = 1;
  341. }
  342. return 0;
  343. }
  344. void iwl_check_abort_status(struct iwl_priv *priv,
  345. u8 frame_count, u32 status)
  346. {
  347. if (frame_count == 1 && status == TX_STATUS_FAIL_RFKILL_FLUSH) {
  348. IWL_ERR(priv, "Tx flush command to flush out all frames\n");
  349. if (!test_bit(STATUS_EXIT_PENDING, &priv->status))
  350. queue_work(priv->workqueue, &priv->tx_flush);
  351. }
  352. }
  353. static void iwlagn_rx_reply_tx(struct iwl_priv *priv,
  354. struct iwl_rx_mem_buffer *rxb)
  355. {
  356. struct iwl_rx_packet *pkt = rxb_addr(rxb);
  357. u16 sequence = le16_to_cpu(pkt->hdr.sequence);
  358. int txq_id = SEQ_TO_QUEUE(sequence);
  359. int index = SEQ_TO_INDEX(sequence);
  360. struct iwl_tx_queue *txq = &priv->txq[txq_id];
  361. struct ieee80211_tx_info *info;
  362. struct iwlagn_tx_resp *tx_resp = (void *)&pkt->u.raw[0];
  363. u32 status = le16_to_cpu(tx_resp->status.status);
  364. int tid;
  365. int sta_id;
  366. int freed;
  367. unsigned long flags;
  368. if ((index >= txq->q.n_bd) || (iwl_queue_used(&txq->q, index) == 0)) {
  369. IWL_ERR(priv, "Read index for DMA queue txq_id (%d) index %d "
  370. "is out of range [0-%d] %d %d\n", txq_id,
  371. index, txq->q.n_bd, txq->q.write_ptr,
  372. txq->q.read_ptr);
  373. return;
  374. }
  375. txq->time_stamp = jiffies;
  376. info = IEEE80211_SKB_CB(txq->txb[txq->q.read_ptr].skb);
  377. memset(&info->status, 0, sizeof(info->status));
  378. tid = (tx_resp->ra_tid & IWLAGN_TX_RES_TID_MSK) >>
  379. IWLAGN_TX_RES_TID_POS;
  380. sta_id = (tx_resp->ra_tid & IWLAGN_TX_RES_RA_MSK) >>
  381. IWLAGN_TX_RES_RA_POS;
  382. spin_lock_irqsave(&priv->sta_lock, flags);
  383. if (txq->sched_retry) {
  384. const u32 scd_ssn = iwlagn_get_scd_ssn(tx_resp);
  385. struct iwl_ht_agg *agg;
  386. agg = &priv->stations[sta_id].tid[tid].agg;
  387. /*
  388. * If the BT kill count is non-zero, we'll get this
  389. * notification again.
  390. */
  391. if (tx_resp->bt_kill_count && tx_resp->frame_count == 1 &&
  392. priv->cfg->bt_params &&
  393. priv->cfg->bt_params->advanced_bt_coexist) {
  394. IWL_WARN(priv, "receive reply tx with bt_kill\n");
  395. }
  396. iwlagn_tx_status_reply_tx(priv, agg, tx_resp, txq_id, index);
  397. /* check if BAR is needed */
  398. if ((tx_resp->frame_count == 1) && !iwl_is_tx_success(status))
  399. info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
  400. if (txq->q.read_ptr != (scd_ssn & 0xff)) {
  401. index = iwl_queue_dec_wrap(scd_ssn & 0xff, txq->q.n_bd);
  402. IWL_DEBUG_TX_REPLY(priv, "Retry scheduler reclaim "
  403. "scd_ssn=%d idx=%d txq=%d swq=%d\n",
  404. scd_ssn , index, txq_id, txq->swq_id);
  405. freed = iwlagn_tx_queue_reclaim(priv, txq_id, index);
  406. iwl_free_tfds_in_queue(priv, sta_id, tid, freed);
  407. if (priv->mac80211_registered &&
  408. (iwl_queue_space(&txq->q) > txq->q.low_mark) &&
  409. (agg->state != IWL_EMPTYING_HW_QUEUE_DELBA))
  410. iwl_wake_queue(priv, txq);
  411. }
  412. } else {
  413. iwlagn_set_tx_status(priv, info, tx_resp, txq_id, false);
  414. freed = iwlagn_tx_queue_reclaim(priv, txq_id, index);
  415. iwl_free_tfds_in_queue(priv, sta_id, tid, freed);
  416. if (priv->mac80211_registered &&
  417. (iwl_queue_space(&txq->q) > txq->q.low_mark))
  418. iwl_wake_queue(priv, txq);
  419. }
  420. iwlagn_txq_check_empty(priv, sta_id, tid, txq_id);
  421. iwl_check_abort_status(priv, tx_resp->frame_count, status);
  422. spin_unlock_irqrestore(&priv->sta_lock, flags);
  423. }
  424. void iwlagn_rx_handler_setup(struct iwl_priv *priv)
  425. {
  426. /* init calibration handlers */
  427. priv->rx_handlers[CALIBRATION_RES_NOTIFICATION] =
  428. iwlagn_rx_calib_result;
  429. priv->rx_handlers[CALIBRATION_COMPLETE_NOTIFICATION] =
  430. iwlagn_rx_calib_complete;
  431. priv->rx_handlers[REPLY_TX] = iwlagn_rx_reply_tx;
  432. /* set up notification wait support */
  433. spin_lock_init(&priv->_agn.notif_wait_lock);
  434. INIT_LIST_HEAD(&priv->_agn.notif_waits);
  435. init_waitqueue_head(&priv->_agn.notif_waitq);
  436. }
  437. void iwlagn_setup_deferred_work(struct iwl_priv *priv)
  438. {
  439. /* in agn, the tx power calibration is done in uCode */
  440. priv->disable_tx_power_cal = 1;
  441. }
  442. int iwlagn_hw_valid_rtc_data_addr(u32 addr)
  443. {
  444. return (addr >= IWLAGN_RTC_DATA_LOWER_BOUND) &&
  445. (addr < IWLAGN_RTC_DATA_UPPER_BOUND);
  446. }
  447. int iwlagn_send_tx_power(struct iwl_priv *priv)
  448. {
  449. struct iwlagn_tx_power_dbm_cmd tx_power_cmd;
  450. u8 tx_ant_cfg_cmd;
  451. if (WARN_ONCE(test_bit(STATUS_SCAN_HW, &priv->status),
  452. "TX Power requested while scanning!\n"))
  453. return -EAGAIN;
  454. /* half dBm need to multiply */
  455. tx_power_cmd.global_lmt = (s8)(2 * priv->tx_power_user_lmt);
  456. if (priv->tx_power_lmt_in_half_dbm &&
  457. priv->tx_power_lmt_in_half_dbm < tx_power_cmd.global_lmt) {
  458. /*
  459. * For the newer devices which using enhanced/extend tx power
  460. * table in EEPROM, the format is in half dBm. driver need to
  461. * convert to dBm format before report to mac80211.
  462. * By doing so, there is a possibility of 1/2 dBm resolution
  463. * lost. driver will perform "round-up" operation before
  464. * reporting, but it will cause 1/2 dBm tx power over the
  465. * regulatory limit. Perform the checking here, if the
  466. * "tx_power_user_lmt" is higher than EEPROM value (in
  467. * half-dBm format), lower the tx power based on EEPROM
  468. */
  469. tx_power_cmd.global_lmt = priv->tx_power_lmt_in_half_dbm;
  470. }
  471. tx_power_cmd.flags = IWLAGN_TX_POWER_NO_CLOSED;
  472. tx_power_cmd.srv_chan_lmt = IWLAGN_TX_POWER_AUTO;
  473. if (IWL_UCODE_API(priv->ucode_ver) == 1)
  474. tx_ant_cfg_cmd = REPLY_TX_POWER_DBM_CMD_V1;
  475. else
  476. tx_ant_cfg_cmd = REPLY_TX_POWER_DBM_CMD;
  477. return iwl_send_cmd_pdu(priv, tx_ant_cfg_cmd, sizeof(tx_power_cmd),
  478. &tx_power_cmd);
  479. }
  480. void iwlagn_temperature(struct iwl_priv *priv)
  481. {
  482. /* store temperature from statistics (in Celsius) */
  483. priv->temperature =
  484. le32_to_cpu(priv->_agn.statistics.general.common.temperature);
  485. iwl_tt_handler(priv);
  486. }
  487. u16 iwlagn_eeprom_calib_version(struct iwl_priv *priv)
  488. {
  489. struct iwl_eeprom_calib_hdr {
  490. u8 version;
  491. u8 pa_type;
  492. u16 voltage;
  493. } *hdr;
  494. hdr = (struct iwl_eeprom_calib_hdr *)iwl_eeprom_query_addr(priv,
  495. EEPROM_CALIB_ALL);
  496. return hdr->version;
  497. }
  498. /*
  499. * EEPROM
  500. */
  501. static u32 eeprom_indirect_address(const struct iwl_priv *priv, u32 address)
  502. {
  503. u16 offset = 0;
  504. if ((address & INDIRECT_ADDRESS) == 0)
  505. return address;
  506. switch (address & INDIRECT_TYPE_MSK) {
  507. case INDIRECT_HOST:
  508. offset = iwl_eeprom_query16(priv, EEPROM_LINK_HOST);
  509. break;
  510. case INDIRECT_GENERAL:
  511. offset = iwl_eeprom_query16(priv, EEPROM_LINK_GENERAL);
  512. break;
  513. case INDIRECT_REGULATORY:
  514. offset = iwl_eeprom_query16(priv, EEPROM_LINK_REGULATORY);
  515. break;
  516. case INDIRECT_TXP_LIMIT:
  517. offset = iwl_eeprom_query16(priv, EEPROM_LINK_TXP_LIMIT);
  518. break;
  519. case INDIRECT_TXP_LIMIT_SIZE:
  520. offset = iwl_eeprom_query16(priv, EEPROM_LINK_TXP_LIMIT_SIZE);
  521. break;
  522. case INDIRECT_CALIBRATION:
  523. offset = iwl_eeprom_query16(priv, EEPROM_LINK_CALIBRATION);
  524. break;
  525. case INDIRECT_PROCESS_ADJST:
  526. offset = iwl_eeprom_query16(priv, EEPROM_LINK_PROCESS_ADJST);
  527. break;
  528. case INDIRECT_OTHERS:
  529. offset = iwl_eeprom_query16(priv, EEPROM_LINK_OTHERS);
  530. break;
  531. default:
  532. IWL_ERR(priv, "illegal indirect type: 0x%X\n",
  533. address & INDIRECT_TYPE_MSK);
  534. break;
  535. }
  536. /* translate the offset from words to byte */
  537. return (address & ADDRESS_MSK) + (offset << 1);
  538. }
  539. const u8 *iwlagn_eeprom_query_addr(const struct iwl_priv *priv,
  540. size_t offset)
  541. {
  542. u32 address = eeprom_indirect_address(priv, offset);
  543. BUG_ON(address >= priv->cfg->base_params->eeprom_size);
  544. return &priv->eeprom[address];
  545. }
  546. struct iwl_mod_params iwlagn_mod_params = {
  547. .amsdu_size_8K = 1,
  548. .restart_fw = 1,
  549. .plcp_check = true,
  550. /* the rest are 0 by default */
  551. };
  552. void iwlagn_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
  553. {
  554. unsigned long flags;
  555. int i;
  556. spin_lock_irqsave(&rxq->lock, flags);
  557. INIT_LIST_HEAD(&rxq->rx_free);
  558. INIT_LIST_HEAD(&rxq->rx_used);
  559. /* Fill the rx_used queue with _all_ of the Rx buffers */
  560. for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
  561. /* In the reset function, these buffers may have been allocated
  562. * to an SKB, so we need to unmap and free potential storage */
  563. if (rxq->pool[i].page != NULL) {
  564. pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
  565. PAGE_SIZE << priv->hw_params.rx_page_order,
  566. PCI_DMA_FROMDEVICE);
  567. __iwl_free_pages(priv, rxq->pool[i].page);
  568. rxq->pool[i].page = NULL;
  569. }
  570. list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
  571. }
  572. for (i = 0; i < RX_QUEUE_SIZE; i++)
  573. rxq->queue[i] = NULL;
  574. /* Set us so that we have processed and used all buffers, but have
  575. * not restocked the Rx queue with fresh buffers */
  576. rxq->read = rxq->write = 0;
  577. rxq->write_actual = 0;
  578. rxq->free_count = 0;
  579. spin_unlock_irqrestore(&rxq->lock, flags);
  580. }
  581. int iwlagn_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
  582. {
  583. u32 rb_size;
  584. const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */
  585. u32 rb_timeout = 0; /* FIXME: RX_RB_TIMEOUT for all devices? */
  586. if (!priv->cfg->base_params->use_isr_legacy)
  587. rb_timeout = RX_RB_TIMEOUT;
  588. if (priv->cfg->mod_params->amsdu_size_8K)
  589. rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
  590. else
  591. rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
  592. /* Stop Rx DMA */
  593. iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
  594. /* Reset driver's Rx queue write index */
  595. iwl_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
  596. /* Tell device where to find RBD circular buffer in DRAM */
  597. iwl_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
  598. (u32)(rxq->bd_dma >> 8));
  599. /* Tell device where in DRAM to update its Rx status */
  600. iwl_write_direct32(priv, FH_RSCSR_CHNL0_STTS_WPTR_REG,
  601. rxq->rb_stts_dma >> 4);
  602. /* Enable Rx DMA
  603. * FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in
  604. * the credit mechanism in 5000 HW RX FIFO
  605. * Direct rx interrupts to hosts
  606. * Rx buffer size 4 or 8k
  607. * RB timeout 0x10
  608. * 256 RBDs
  609. */
  610. iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG,
  611. FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
  612. FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY |
  613. FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
  614. FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK |
  615. rb_size|
  616. (rb_timeout << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)|
  617. (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
  618. /* Set interrupt coalescing timer to default (2048 usecs) */
  619. iwl_write8(priv, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
  620. return 0;
  621. }
  622. static void iwlagn_set_pwr_vmain(struct iwl_priv *priv)
  623. {
  624. /*
  625. * (for documentation purposes)
  626. * to set power to V_AUX, do:
  627. if (pci_pme_capable(priv->pci_dev, PCI_D3cold))
  628. iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
  629. APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
  630. ~APMG_PS_CTRL_MSK_PWR_SRC);
  631. */
  632. iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
  633. APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
  634. ~APMG_PS_CTRL_MSK_PWR_SRC);
  635. }
  636. int iwlagn_hw_nic_init(struct iwl_priv *priv)
  637. {
  638. unsigned long flags;
  639. struct iwl_rx_queue *rxq = &priv->rxq;
  640. int ret;
  641. /* nic_init */
  642. spin_lock_irqsave(&priv->lock, flags);
  643. priv->cfg->ops->lib->apm_ops.init(priv);
  644. /* Set interrupt coalescing calibration timer to default (512 usecs) */
  645. iwl_write8(priv, CSR_INT_COALESCING, IWL_HOST_INT_CALIB_TIMEOUT_DEF);
  646. spin_unlock_irqrestore(&priv->lock, flags);
  647. iwlagn_set_pwr_vmain(priv);
  648. priv->cfg->ops->lib->apm_ops.config(priv);
  649. /* Allocate the RX queue, or reset if it is already allocated */
  650. if (!rxq->bd) {
  651. ret = iwl_rx_queue_alloc(priv);
  652. if (ret) {
  653. IWL_ERR(priv, "Unable to initialize Rx queue\n");
  654. return -ENOMEM;
  655. }
  656. } else
  657. iwlagn_rx_queue_reset(priv, rxq);
  658. iwlagn_rx_replenish(priv);
  659. iwlagn_rx_init(priv, rxq);
  660. spin_lock_irqsave(&priv->lock, flags);
  661. rxq->need_update = 1;
  662. iwl_rx_queue_update_write_ptr(priv, rxq);
  663. spin_unlock_irqrestore(&priv->lock, flags);
  664. /* Allocate or reset and init all Tx and Command queues */
  665. if (!priv->txq) {
  666. ret = iwlagn_txq_ctx_alloc(priv);
  667. if (ret)
  668. return ret;
  669. } else
  670. iwlagn_txq_ctx_reset(priv);
  671. if (priv->cfg->base_params->shadow_reg_enable) {
  672. /* enable shadow regs in HW */
  673. iwl_set_bit(priv, CSR_MAC_SHADOW_REG_CTRL,
  674. 0x800FFFFF);
  675. }
  676. set_bit(STATUS_INIT, &priv->status);
  677. return 0;
  678. }
  679. /**
  680. * iwlagn_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
  681. */
  682. static inline __le32 iwlagn_dma_addr2rbd_ptr(struct iwl_priv *priv,
  683. dma_addr_t dma_addr)
  684. {
  685. return cpu_to_le32((u32)(dma_addr >> 8));
  686. }
  687. /**
  688. * iwlagn_rx_queue_restock - refill RX queue from pre-allocated pool
  689. *
  690. * If there are slots in the RX queue that need to be restocked,
  691. * and we have free pre-allocated buffers, fill the ranks as much
  692. * as we can, pulling from rx_free.
  693. *
  694. * This moves the 'write' index forward to catch up with 'processed', and
  695. * also updates the memory address in the firmware to reference the new
  696. * target buffer.
  697. */
  698. void iwlagn_rx_queue_restock(struct iwl_priv *priv)
  699. {
  700. struct iwl_rx_queue *rxq = &priv->rxq;
  701. struct list_head *element;
  702. struct iwl_rx_mem_buffer *rxb;
  703. unsigned long flags;
  704. spin_lock_irqsave(&rxq->lock, flags);
  705. while ((iwl_rx_queue_space(rxq) > 0) && (rxq->free_count)) {
  706. /* The overwritten rxb must be a used one */
  707. rxb = rxq->queue[rxq->write];
  708. BUG_ON(rxb && rxb->page);
  709. /* Get next free Rx buffer, remove from free list */
  710. element = rxq->rx_free.next;
  711. rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
  712. list_del(element);
  713. /* Point to Rx buffer via next RBD in circular buffer */
  714. rxq->bd[rxq->write] = iwlagn_dma_addr2rbd_ptr(priv,
  715. rxb->page_dma);
  716. rxq->queue[rxq->write] = rxb;
  717. rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
  718. rxq->free_count--;
  719. }
  720. spin_unlock_irqrestore(&rxq->lock, flags);
  721. /* If the pre-allocated buffer pool is dropping low, schedule to
  722. * refill it */
  723. if (rxq->free_count <= RX_LOW_WATERMARK)
  724. queue_work(priv->workqueue, &priv->rx_replenish);
  725. /* If we've added more space for the firmware to place data, tell it.
  726. * Increment device's write pointer in multiples of 8. */
  727. if (rxq->write_actual != (rxq->write & ~0x7)) {
  728. spin_lock_irqsave(&rxq->lock, flags);
  729. rxq->need_update = 1;
  730. spin_unlock_irqrestore(&rxq->lock, flags);
  731. iwl_rx_queue_update_write_ptr(priv, rxq);
  732. }
  733. }
  734. /**
  735. * iwlagn_rx_replenish - Move all used packet from rx_used to rx_free
  736. *
  737. * When moving to rx_free an SKB is allocated for the slot.
  738. *
  739. * Also restock the Rx queue via iwl_rx_queue_restock.
  740. * This is called as a scheduled work item (except for during initialization)
  741. */
  742. void iwlagn_rx_allocate(struct iwl_priv *priv, gfp_t priority)
  743. {
  744. struct iwl_rx_queue *rxq = &priv->rxq;
  745. struct list_head *element;
  746. struct iwl_rx_mem_buffer *rxb;
  747. struct page *page;
  748. unsigned long flags;
  749. gfp_t gfp_mask = priority;
  750. while (1) {
  751. spin_lock_irqsave(&rxq->lock, flags);
  752. if (list_empty(&rxq->rx_used)) {
  753. spin_unlock_irqrestore(&rxq->lock, flags);
  754. return;
  755. }
  756. spin_unlock_irqrestore(&rxq->lock, flags);
  757. if (rxq->free_count > RX_LOW_WATERMARK)
  758. gfp_mask |= __GFP_NOWARN;
  759. if (priv->hw_params.rx_page_order > 0)
  760. gfp_mask |= __GFP_COMP;
  761. /* Alloc a new receive buffer */
  762. page = alloc_pages(gfp_mask, priv->hw_params.rx_page_order);
  763. if (!page) {
  764. if (net_ratelimit())
  765. IWL_DEBUG_INFO(priv, "alloc_pages failed, "
  766. "order: %d\n",
  767. priv->hw_params.rx_page_order);
  768. if ((rxq->free_count <= RX_LOW_WATERMARK) &&
  769. net_ratelimit())
  770. IWL_CRIT(priv, "Failed to alloc_pages with %s. Only %u free buffers remaining.\n",
  771. priority == GFP_ATOMIC ? "GFP_ATOMIC" : "GFP_KERNEL",
  772. rxq->free_count);
  773. /* We don't reschedule replenish work here -- we will
  774. * call the restock method and if it still needs
  775. * more buffers it will schedule replenish */
  776. return;
  777. }
  778. spin_lock_irqsave(&rxq->lock, flags);
  779. if (list_empty(&rxq->rx_used)) {
  780. spin_unlock_irqrestore(&rxq->lock, flags);
  781. __free_pages(page, priv->hw_params.rx_page_order);
  782. return;
  783. }
  784. element = rxq->rx_used.next;
  785. rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
  786. list_del(element);
  787. spin_unlock_irqrestore(&rxq->lock, flags);
  788. BUG_ON(rxb->page);
  789. rxb->page = page;
  790. /* Get physical address of the RB */
  791. rxb->page_dma = pci_map_page(priv->pci_dev, page, 0,
  792. PAGE_SIZE << priv->hw_params.rx_page_order,
  793. PCI_DMA_FROMDEVICE);
  794. /* dma address must be no more than 36 bits */
  795. BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36));
  796. /* and also 256 byte aligned! */
  797. BUG_ON(rxb->page_dma & DMA_BIT_MASK(8));
  798. spin_lock_irqsave(&rxq->lock, flags);
  799. list_add_tail(&rxb->list, &rxq->rx_free);
  800. rxq->free_count++;
  801. priv->alloc_rxb_page++;
  802. spin_unlock_irqrestore(&rxq->lock, flags);
  803. }
  804. }
  805. void iwlagn_rx_replenish(struct iwl_priv *priv)
  806. {
  807. unsigned long flags;
  808. iwlagn_rx_allocate(priv, GFP_KERNEL);
  809. spin_lock_irqsave(&priv->lock, flags);
  810. iwlagn_rx_queue_restock(priv);
  811. spin_unlock_irqrestore(&priv->lock, flags);
  812. }
  813. void iwlagn_rx_replenish_now(struct iwl_priv *priv)
  814. {
  815. iwlagn_rx_allocate(priv, GFP_ATOMIC);
  816. iwlagn_rx_queue_restock(priv);
  817. }
  818. /* Assumes that the skb field of the buffers in 'pool' is kept accurate.
  819. * If an SKB has been detached, the POOL needs to have its SKB set to NULL
  820. * This free routine walks the list of POOL entries and if SKB is set to
  821. * non NULL it is unmapped and freed
  822. */
  823. void iwlagn_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
  824. {
  825. int i;
  826. for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
  827. if (rxq->pool[i].page != NULL) {
  828. pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
  829. PAGE_SIZE << priv->hw_params.rx_page_order,
  830. PCI_DMA_FROMDEVICE);
  831. __iwl_free_pages(priv, rxq->pool[i].page);
  832. rxq->pool[i].page = NULL;
  833. }
  834. }
  835. dma_free_coherent(&priv->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
  836. rxq->bd_dma);
  837. dma_free_coherent(&priv->pci_dev->dev, sizeof(struct iwl_rb_status),
  838. rxq->rb_stts, rxq->rb_stts_dma);
  839. rxq->bd = NULL;
  840. rxq->rb_stts = NULL;
  841. }
  842. int iwlagn_rxq_stop(struct iwl_priv *priv)
  843. {
  844. /* stop Rx DMA */
  845. iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
  846. iwl_poll_direct_bit(priv, FH_MEM_RSSR_RX_STATUS_REG,
  847. FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000);
  848. return 0;
  849. }
  850. int iwlagn_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band)
  851. {
  852. int idx = 0;
  853. int band_offset = 0;
  854. /* HT rate format: mac80211 wants an MCS number, which is just LSB */
  855. if (rate_n_flags & RATE_MCS_HT_MSK) {
  856. idx = (rate_n_flags & 0xff);
  857. return idx;
  858. /* Legacy rate format, search for match in table */
  859. } else {
  860. if (band == IEEE80211_BAND_5GHZ)
  861. band_offset = IWL_FIRST_OFDM_RATE;
  862. for (idx = band_offset; idx < IWL_RATE_COUNT_LEGACY; idx++)
  863. if (iwl_rates[idx].plcp == (rate_n_flags & 0xFF))
  864. return idx - band_offset;
  865. }
  866. return -1;
  867. }
  868. /* Calc max signal level (dBm) among 3 possible receivers */
  869. static inline int iwlagn_calc_rssi(struct iwl_priv *priv,
  870. struct iwl_rx_phy_res *rx_resp)
  871. {
  872. return priv->cfg->ops->utils->calc_rssi(priv, rx_resp);
  873. }
  874. static u32 iwlagn_translate_rx_status(struct iwl_priv *priv, u32 decrypt_in)
  875. {
  876. u32 decrypt_out = 0;
  877. if ((decrypt_in & RX_RES_STATUS_STATION_FOUND) ==
  878. RX_RES_STATUS_STATION_FOUND)
  879. decrypt_out |= (RX_RES_STATUS_STATION_FOUND |
  880. RX_RES_STATUS_NO_STATION_INFO_MISMATCH);
  881. decrypt_out |= (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK);
  882. /* packet was not encrypted */
  883. if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
  884. RX_RES_STATUS_SEC_TYPE_NONE)
  885. return decrypt_out;
  886. /* packet was encrypted with unknown alg */
  887. if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
  888. RX_RES_STATUS_SEC_TYPE_ERR)
  889. return decrypt_out;
  890. /* decryption was not done in HW */
  891. if ((decrypt_in & RX_MPDU_RES_STATUS_DEC_DONE_MSK) !=
  892. RX_MPDU_RES_STATUS_DEC_DONE_MSK)
  893. return decrypt_out;
  894. switch (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) {
  895. case RX_RES_STATUS_SEC_TYPE_CCMP:
  896. /* alg is CCM: check MIC only */
  897. if (!(decrypt_in & RX_MPDU_RES_STATUS_MIC_OK))
  898. /* Bad MIC */
  899. decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
  900. else
  901. decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
  902. break;
  903. case RX_RES_STATUS_SEC_TYPE_TKIP:
  904. if (!(decrypt_in & RX_MPDU_RES_STATUS_TTAK_OK)) {
  905. /* Bad TTAK */
  906. decrypt_out |= RX_RES_STATUS_BAD_KEY_TTAK;
  907. break;
  908. }
  909. /* fall through if TTAK OK */
  910. default:
  911. if (!(decrypt_in & RX_MPDU_RES_STATUS_ICV_OK))
  912. decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
  913. else
  914. decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
  915. break;
  916. }
  917. IWL_DEBUG_RX(priv, "decrypt_in:0x%x decrypt_out = 0x%x\n",
  918. decrypt_in, decrypt_out);
  919. return decrypt_out;
  920. }
  921. static void iwlagn_pass_packet_to_mac80211(struct iwl_priv *priv,
  922. struct ieee80211_hdr *hdr,
  923. u16 len,
  924. u32 ampdu_status,
  925. struct iwl_rx_mem_buffer *rxb,
  926. struct ieee80211_rx_status *stats)
  927. {
  928. struct sk_buff *skb;
  929. __le16 fc = hdr->frame_control;
  930. /* We only process data packets if the interface is open */
  931. if (unlikely(!priv->is_open)) {
  932. IWL_DEBUG_DROP_LIMIT(priv,
  933. "Dropping packet while interface is not open.\n");
  934. return;
  935. }
  936. /* In case of HW accelerated crypto and bad decryption, drop */
  937. if (!priv->cfg->mod_params->sw_crypto &&
  938. iwl_set_decrypted_flag(priv, hdr, ampdu_status, stats))
  939. return;
  940. skb = dev_alloc_skb(128);
  941. if (!skb) {
  942. IWL_ERR(priv, "dev_alloc_skb failed\n");
  943. return;
  944. }
  945. skb_add_rx_frag(skb, 0, rxb->page, (void *)hdr - rxb_addr(rxb), len);
  946. iwl_update_stats(priv, false, fc, len);
  947. memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
  948. ieee80211_rx(priv->hw, skb);
  949. priv->alloc_rxb_page--;
  950. rxb->page = NULL;
  951. }
  952. /* Called for REPLY_RX (legacy ABG frames), or
  953. * REPLY_RX_MPDU_CMD (HT high-throughput N frames). */
  954. void iwlagn_rx_reply_rx(struct iwl_priv *priv,
  955. struct iwl_rx_mem_buffer *rxb)
  956. {
  957. struct ieee80211_hdr *header;
  958. struct ieee80211_rx_status rx_status;
  959. struct iwl_rx_packet *pkt = rxb_addr(rxb);
  960. struct iwl_rx_phy_res *phy_res;
  961. __le32 rx_pkt_status;
  962. struct iwl_rx_mpdu_res_start *amsdu;
  963. u32 len;
  964. u32 ampdu_status;
  965. u32 rate_n_flags;
  966. /**
  967. * REPLY_RX and REPLY_RX_MPDU_CMD are handled differently.
  968. * REPLY_RX: physical layer info is in this buffer
  969. * REPLY_RX_MPDU_CMD: physical layer info was sent in separate
  970. * command and cached in priv->last_phy_res
  971. *
  972. * Here we set up local variables depending on which command is
  973. * received.
  974. */
  975. if (pkt->hdr.cmd == REPLY_RX) {
  976. phy_res = (struct iwl_rx_phy_res *)pkt->u.raw;
  977. header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*phy_res)
  978. + phy_res->cfg_phy_cnt);
  979. len = le16_to_cpu(phy_res->byte_count);
  980. rx_pkt_status = *(__le32 *)(pkt->u.raw + sizeof(*phy_res) +
  981. phy_res->cfg_phy_cnt + len);
  982. ampdu_status = le32_to_cpu(rx_pkt_status);
  983. } else {
  984. if (!priv->_agn.last_phy_res_valid) {
  985. IWL_ERR(priv, "MPDU frame without cached PHY data\n");
  986. return;
  987. }
  988. phy_res = &priv->_agn.last_phy_res;
  989. amsdu = (struct iwl_rx_mpdu_res_start *)pkt->u.raw;
  990. header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*amsdu));
  991. len = le16_to_cpu(amsdu->byte_count);
  992. rx_pkt_status = *(__le32 *)(pkt->u.raw + sizeof(*amsdu) + len);
  993. ampdu_status = iwlagn_translate_rx_status(priv,
  994. le32_to_cpu(rx_pkt_status));
  995. }
  996. if ((unlikely(phy_res->cfg_phy_cnt > 20))) {
  997. IWL_DEBUG_DROP(priv, "dsp size out of range [0,20]: %d/n",
  998. phy_res->cfg_phy_cnt);
  999. return;
  1000. }
  1001. if (!(rx_pkt_status & RX_RES_STATUS_NO_CRC32_ERROR) ||
  1002. !(rx_pkt_status & RX_RES_STATUS_NO_RXE_OVERFLOW)) {
  1003. IWL_DEBUG_RX(priv, "Bad CRC or FIFO: 0x%08X.\n",
  1004. le32_to_cpu(rx_pkt_status));
  1005. return;
  1006. }
  1007. /* This will be used in several places later */
  1008. rate_n_flags = le32_to_cpu(phy_res->rate_n_flags);
  1009. /* rx_status carries information about the packet to mac80211 */
  1010. rx_status.mactime = le64_to_cpu(phy_res->timestamp);
  1011. rx_status.band = (phy_res->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ?
  1012. IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
  1013. rx_status.freq =
  1014. ieee80211_channel_to_frequency(le16_to_cpu(phy_res->channel),
  1015. rx_status.band);
  1016. rx_status.rate_idx =
  1017. iwlagn_hwrate_to_mac80211_idx(rate_n_flags, rx_status.band);
  1018. rx_status.flag = 0;
  1019. /* TSF isn't reliable. In order to allow smooth user experience,
  1020. * this W/A doesn't propagate it to the mac80211 */
  1021. /*rx_status.flag |= RX_FLAG_MACTIME_MPDU;*/
  1022. priv->ucode_beacon_time = le32_to_cpu(phy_res->beacon_time_stamp);
  1023. /* Find max signal strength (dBm) among 3 antenna/receiver chains */
  1024. rx_status.signal = iwlagn_calc_rssi(priv, phy_res);
  1025. iwl_dbg_log_rx_data_frame(priv, len, header);
  1026. IWL_DEBUG_STATS_LIMIT(priv, "Rssi %d, TSF %llu\n",
  1027. rx_status.signal, (unsigned long long)rx_status.mactime);
  1028. /*
  1029. * "antenna number"
  1030. *
  1031. * It seems that the antenna field in the phy flags value
  1032. * is actually a bit field. This is undefined by radiotap,
  1033. * it wants an actual antenna number but I always get "7"
  1034. * for most legacy frames I receive indicating that the
  1035. * same frame was received on all three RX chains.
  1036. *
  1037. * I think this field should be removed in favor of a
  1038. * new 802.11n radiotap field "RX chains" that is defined
  1039. * as a bitmask.
  1040. */
  1041. rx_status.antenna =
  1042. (le16_to_cpu(phy_res->phy_flags) & RX_RES_PHY_FLAGS_ANTENNA_MSK)
  1043. >> RX_RES_PHY_FLAGS_ANTENNA_POS;
  1044. /* set the preamble flag if appropriate */
  1045. if (phy_res->phy_flags & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK)
  1046. rx_status.flag |= RX_FLAG_SHORTPRE;
  1047. /* Set up the HT phy flags */
  1048. if (rate_n_flags & RATE_MCS_HT_MSK)
  1049. rx_status.flag |= RX_FLAG_HT;
  1050. if (rate_n_flags & RATE_MCS_HT40_MSK)
  1051. rx_status.flag |= RX_FLAG_40MHZ;
  1052. if (rate_n_flags & RATE_MCS_SGI_MSK)
  1053. rx_status.flag |= RX_FLAG_SHORT_GI;
  1054. iwlagn_pass_packet_to_mac80211(priv, header, len, ampdu_status,
  1055. rxb, &rx_status);
  1056. }
  1057. /* Cache phy data (Rx signal strength, etc) for HT frame (REPLY_RX_PHY_CMD).
  1058. * This will be used later in iwl_rx_reply_rx() for REPLY_RX_MPDU_CMD. */
  1059. void iwlagn_rx_reply_rx_phy(struct iwl_priv *priv,
  1060. struct iwl_rx_mem_buffer *rxb)
  1061. {
  1062. struct iwl_rx_packet *pkt = rxb_addr(rxb);
  1063. priv->_agn.last_phy_res_valid = true;
  1064. memcpy(&priv->_agn.last_phy_res, pkt->u.raw,
  1065. sizeof(struct iwl_rx_phy_res));
  1066. }
  1067. static int iwl_get_single_channel_for_scan(struct iwl_priv *priv,
  1068. struct ieee80211_vif *vif,
  1069. enum ieee80211_band band,
  1070. struct iwl_scan_channel *scan_ch)
  1071. {
  1072. const struct ieee80211_supported_band *sband;
  1073. u16 passive_dwell = 0;
  1074. u16 active_dwell = 0;
  1075. int added = 0;
  1076. u16 channel = 0;
  1077. sband = iwl_get_hw_mode(priv, band);
  1078. if (!sband) {
  1079. IWL_ERR(priv, "invalid band\n");
  1080. return added;
  1081. }
  1082. active_dwell = iwl_get_active_dwell_time(priv, band, 0);
  1083. passive_dwell = iwl_get_passive_dwell_time(priv, band, vif);
  1084. if (passive_dwell <= active_dwell)
  1085. passive_dwell = active_dwell + 1;
  1086. channel = iwl_get_single_channel_number(priv, band);
  1087. if (channel) {
  1088. scan_ch->channel = cpu_to_le16(channel);
  1089. scan_ch->type = SCAN_CHANNEL_TYPE_PASSIVE;
  1090. scan_ch->active_dwell = cpu_to_le16(active_dwell);
  1091. scan_ch->passive_dwell = cpu_to_le16(passive_dwell);
  1092. /* Set txpower levels to defaults */
  1093. scan_ch->dsp_atten = 110;
  1094. if (band == IEEE80211_BAND_5GHZ)
  1095. scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3;
  1096. else
  1097. scan_ch->tx_gain = ((1 << 5) | (5 << 3));
  1098. added++;
  1099. } else
  1100. IWL_ERR(priv, "no valid channel found\n");
  1101. return added;
  1102. }
  1103. static int iwl_get_channels_for_scan(struct iwl_priv *priv,
  1104. struct ieee80211_vif *vif,
  1105. enum ieee80211_band band,
  1106. u8 is_active, u8 n_probes,
  1107. struct iwl_scan_channel *scan_ch)
  1108. {
  1109. struct ieee80211_channel *chan;
  1110. const struct ieee80211_supported_band *sband;
  1111. const struct iwl_channel_info *ch_info;
  1112. u16 passive_dwell = 0;
  1113. u16 active_dwell = 0;
  1114. int added, i;
  1115. u16 channel;
  1116. sband = iwl_get_hw_mode(priv, band);
  1117. if (!sband)
  1118. return 0;
  1119. active_dwell = iwl_get_active_dwell_time(priv, band, n_probes);
  1120. passive_dwell = iwl_get_passive_dwell_time(priv, band, vif);
  1121. if (passive_dwell <= active_dwell)
  1122. passive_dwell = active_dwell + 1;
  1123. for (i = 0, added = 0; i < priv->scan_request->n_channels; i++) {
  1124. chan = priv->scan_request->channels[i];
  1125. if (chan->band != band)
  1126. continue;
  1127. channel = chan->hw_value;
  1128. scan_ch->channel = cpu_to_le16(channel);
  1129. ch_info = iwl_get_channel_info(priv, band, channel);
  1130. if (!is_channel_valid(ch_info)) {
  1131. IWL_DEBUG_SCAN(priv, "Channel %d is INVALID for this band.\n",
  1132. channel);
  1133. continue;
  1134. }
  1135. if (!is_active || is_channel_passive(ch_info) ||
  1136. (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN))
  1137. scan_ch->type = SCAN_CHANNEL_TYPE_PASSIVE;
  1138. else
  1139. scan_ch->type = SCAN_CHANNEL_TYPE_ACTIVE;
  1140. if (n_probes)
  1141. scan_ch->type |= IWL_SCAN_PROBE_MASK(n_probes);
  1142. scan_ch->active_dwell = cpu_to_le16(active_dwell);
  1143. scan_ch->passive_dwell = cpu_to_le16(passive_dwell);
  1144. /* Set txpower levels to defaults */
  1145. scan_ch->dsp_atten = 110;
  1146. /* NOTE: if we were doing 6Mb OFDM for scans we'd use
  1147. * power level:
  1148. * scan_ch->tx_gain = ((1 << 5) | (2 << 3)) | 3;
  1149. */
  1150. if (band == IEEE80211_BAND_5GHZ)
  1151. scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3;
  1152. else
  1153. scan_ch->tx_gain = ((1 << 5) | (5 << 3));
  1154. IWL_DEBUG_SCAN(priv, "Scanning ch=%d prob=0x%X [%s %d]\n",
  1155. channel, le32_to_cpu(scan_ch->type),
  1156. (scan_ch->type & SCAN_CHANNEL_TYPE_ACTIVE) ?
  1157. "ACTIVE" : "PASSIVE",
  1158. (scan_ch->type & SCAN_CHANNEL_TYPE_ACTIVE) ?
  1159. active_dwell : passive_dwell);
  1160. scan_ch++;
  1161. added++;
  1162. }
  1163. IWL_DEBUG_SCAN(priv, "total channels to scan %d\n", added);
  1164. return added;
  1165. }
  1166. int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
  1167. {
  1168. struct iwl_host_cmd cmd = {
  1169. .id = REPLY_SCAN_CMD,
  1170. .len = sizeof(struct iwl_scan_cmd),
  1171. .flags = CMD_SIZE_HUGE,
  1172. };
  1173. struct iwl_scan_cmd *scan;
  1174. struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
  1175. u32 rate_flags = 0;
  1176. u16 cmd_len;
  1177. u16 rx_chain = 0;
  1178. enum ieee80211_band band;
  1179. u8 n_probes = 0;
  1180. u8 rx_ant = priv->hw_params.valid_rx_ant;
  1181. u8 rate;
  1182. bool is_active = false;
  1183. int chan_mod;
  1184. u8 active_chains;
  1185. u8 scan_tx_antennas = priv->hw_params.valid_tx_ant;
  1186. int ret;
  1187. lockdep_assert_held(&priv->mutex);
  1188. if (vif)
  1189. ctx = iwl_rxon_ctx_from_vif(vif);
  1190. if (!priv->scan_cmd) {
  1191. priv->scan_cmd = kmalloc(sizeof(struct iwl_scan_cmd) +
  1192. IWL_MAX_SCAN_SIZE, GFP_KERNEL);
  1193. if (!priv->scan_cmd) {
  1194. IWL_DEBUG_SCAN(priv,
  1195. "fail to allocate memory for scan\n");
  1196. return -ENOMEM;
  1197. }
  1198. }
  1199. scan = priv->scan_cmd;
  1200. memset(scan, 0, sizeof(struct iwl_scan_cmd) + IWL_MAX_SCAN_SIZE);
  1201. scan->quiet_plcp_th = IWL_PLCP_QUIET_THRESH;
  1202. scan->quiet_time = IWL_ACTIVE_QUIET_TIME;
  1203. if (iwl_is_any_associated(priv)) {
  1204. u16 interval = 0;
  1205. u32 extra;
  1206. u32 suspend_time = 100;
  1207. u32 scan_suspend_time = 100;
  1208. IWL_DEBUG_INFO(priv, "Scanning while associated...\n");
  1209. if (priv->is_internal_short_scan)
  1210. interval = 0;
  1211. else
  1212. interval = vif->bss_conf.beacon_int;
  1213. scan->suspend_time = 0;
  1214. scan->max_out_time = cpu_to_le32(200 * 1024);
  1215. if (!interval)
  1216. interval = suspend_time;
  1217. extra = (suspend_time / interval) << 22;
  1218. scan_suspend_time = (extra |
  1219. ((suspend_time % interval) * 1024));
  1220. scan->suspend_time = cpu_to_le32(scan_suspend_time);
  1221. IWL_DEBUG_SCAN(priv, "suspend_time 0x%X beacon interval %d\n",
  1222. scan_suspend_time, interval);
  1223. }
  1224. if (priv->is_internal_short_scan) {
  1225. IWL_DEBUG_SCAN(priv, "Start internal passive scan.\n");
  1226. } else if (priv->scan_request->n_ssids) {
  1227. int i, p = 0;
  1228. IWL_DEBUG_SCAN(priv, "Kicking off active scan\n");
  1229. for (i = 0; i < priv->scan_request->n_ssids; i++) {
  1230. /* always does wildcard anyway */
  1231. if (!priv->scan_request->ssids[i].ssid_len)
  1232. continue;
  1233. scan->direct_scan[p].id = WLAN_EID_SSID;
  1234. scan->direct_scan[p].len =
  1235. priv->scan_request->ssids[i].ssid_len;
  1236. memcpy(scan->direct_scan[p].ssid,
  1237. priv->scan_request->ssids[i].ssid,
  1238. priv->scan_request->ssids[i].ssid_len);
  1239. n_probes++;
  1240. p++;
  1241. }
  1242. is_active = true;
  1243. } else
  1244. IWL_DEBUG_SCAN(priv, "Start passive scan.\n");
  1245. scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK;
  1246. scan->tx_cmd.sta_id = ctx->bcast_sta_id;
  1247. scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
  1248. switch (priv->scan_band) {
  1249. case IEEE80211_BAND_2GHZ:
  1250. scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK;
  1251. chan_mod = le32_to_cpu(
  1252. priv->contexts[IWL_RXON_CTX_BSS].active.flags &
  1253. RXON_FLG_CHANNEL_MODE_MSK)
  1254. >> RXON_FLG_CHANNEL_MODE_POS;
  1255. if (chan_mod == CHANNEL_MODE_PURE_40) {
  1256. rate = IWL_RATE_6M_PLCP;
  1257. } else {
  1258. rate = IWL_RATE_1M_PLCP;
  1259. rate_flags = RATE_MCS_CCK_MSK;
  1260. }
  1261. /*
  1262. * Internal scans are passive, so we can indiscriminately set
  1263. * the BT ignore flag on 2.4 GHz since it applies to TX only.
  1264. */
  1265. if (priv->cfg->bt_params &&
  1266. priv->cfg->bt_params->advanced_bt_coexist)
  1267. scan->tx_cmd.tx_flags |= TX_CMD_FLG_IGNORE_BT;
  1268. break;
  1269. case IEEE80211_BAND_5GHZ:
  1270. rate = IWL_RATE_6M_PLCP;
  1271. break;
  1272. default:
  1273. IWL_WARN(priv, "Invalid scan band\n");
  1274. return -EIO;
  1275. }
  1276. /*
  1277. * If active scanning is requested but a certain channel is
  1278. * marked passive, we can do active scanning if we detect
  1279. * transmissions.
  1280. *
  1281. * There is an issue with some firmware versions that triggers
  1282. * a sysassert on a "good CRC threshold" of zero (== disabled),
  1283. * on a radar channel even though this means that we should NOT
  1284. * send probes.
  1285. *
  1286. * The "good CRC threshold" is the number of frames that we
  1287. * need to receive during our dwell time on a channel before
  1288. * sending out probes -- setting this to a huge value will
  1289. * mean we never reach it, but at the same time work around
  1290. * the aforementioned issue. Thus use IWL_GOOD_CRC_TH_NEVER
  1291. * here instead of IWL_GOOD_CRC_TH_DISABLED.
  1292. */
  1293. scan->good_CRC_th = is_active ? IWL_GOOD_CRC_TH_DEFAULT :
  1294. IWL_GOOD_CRC_TH_NEVER;
  1295. band = priv->scan_band;
  1296. if (priv->cfg->scan_rx_antennas[band])
  1297. rx_ant = priv->cfg->scan_rx_antennas[band];
  1298. if (band == IEEE80211_BAND_2GHZ &&
  1299. priv->cfg->bt_params &&
  1300. priv->cfg->bt_params->advanced_bt_coexist) {
  1301. /* transmit 2.4 GHz probes only on first antenna */
  1302. scan_tx_antennas = first_antenna(scan_tx_antennas);
  1303. }
  1304. priv->scan_tx_ant[band] = iwl_toggle_tx_ant(priv, priv->scan_tx_ant[band],
  1305. scan_tx_antennas);
  1306. rate_flags |= iwl_ant_idx_to_flags(priv->scan_tx_ant[band]);
  1307. scan->tx_cmd.rate_n_flags = iwl_hw_set_rate_n_flags(rate, rate_flags);
  1308. /* In power save mode use one chain, otherwise use all chains */
  1309. if (test_bit(STATUS_POWER_PMI, &priv->status)) {
  1310. /* rx_ant has been set to all valid chains previously */
  1311. active_chains = rx_ant &
  1312. ((u8)(priv->chain_noise_data.active_chains));
  1313. if (!active_chains)
  1314. active_chains = rx_ant;
  1315. IWL_DEBUG_SCAN(priv, "chain_noise_data.active_chains: %u\n",
  1316. priv->chain_noise_data.active_chains);
  1317. rx_ant = first_antenna(active_chains);
  1318. }
  1319. if (priv->cfg->bt_params &&
  1320. priv->cfg->bt_params->advanced_bt_coexist &&
  1321. priv->bt_full_concurrent) {
  1322. /* operated as 1x1 in full concurrency mode */
  1323. rx_ant = first_antenna(rx_ant);
  1324. }
  1325. /* MIMO is not used here, but value is required */
  1326. rx_chain |= priv->hw_params.valid_rx_ant << RXON_RX_CHAIN_VALID_POS;
  1327. rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS;
  1328. rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_SEL_POS;
  1329. rx_chain |= 0x1 << RXON_RX_CHAIN_DRIVER_FORCE_POS;
  1330. scan->rx_chain = cpu_to_le16(rx_chain);
  1331. if (!priv->is_internal_short_scan) {
  1332. cmd_len = iwl_fill_probe_req(priv,
  1333. (struct ieee80211_mgmt *)scan->data,
  1334. vif->addr,
  1335. priv->scan_request->ie,
  1336. priv->scan_request->ie_len,
  1337. IWL_MAX_SCAN_SIZE - sizeof(*scan));
  1338. } else {
  1339. /* use bcast addr, will not be transmitted but must be valid */
  1340. cmd_len = iwl_fill_probe_req(priv,
  1341. (struct ieee80211_mgmt *)scan->data,
  1342. iwl_bcast_addr, NULL, 0,
  1343. IWL_MAX_SCAN_SIZE - sizeof(*scan));
  1344. }
  1345. scan->tx_cmd.len = cpu_to_le16(cmd_len);
  1346. scan->filter_flags |= (RXON_FILTER_ACCEPT_GRP_MSK |
  1347. RXON_FILTER_BCON_AWARE_MSK);
  1348. if (priv->is_internal_short_scan) {
  1349. scan->channel_count =
  1350. iwl_get_single_channel_for_scan(priv, vif, band,
  1351. (void *)&scan->data[le16_to_cpu(
  1352. scan->tx_cmd.len)]);
  1353. } else {
  1354. scan->channel_count =
  1355. iwl_get_channels_for_scan(priv, vif, band,
  1356. is_active, n_probes,
  1357. (void *)&scan->data[le16_to_cpu(
  1358. scan->tx_cmd.len)]);
  1359. }
  1360. if (scan->channel_count == 0) {
  1361. IWL_DEBUG_SCAN(priv, "channel count %d\n", scan->channel_count);
  1362. return -EIO;
  1363. }
  1364. cmd.len += le16_to_cpu(scan->tx_cmd.len) +
  1365. scan->channel_count * sizeof(struct iwl_scan_channel);
  1366. cmd.data = scan;
  1367. scan->len = cpu_to_le16(cmd.len);
  1368. /* set scan bit here for PAN params */
  1369. set_bit(STATUS_SCAN_HW, &priv->status);
  1370. if (priv->cfg->ops->hcmd->set_pan_params) {
  1371. ret = priv->cfg->ops->hcmd->set_pan_params(priv);
  1372. if (ret)
  1373. return ret;
  1374. }
  1375. ret = iwl_send_cmd_sync(priv, &cmd);
  1376. if (ret) {
  1377. clear_bit(STATUS_SCAN_HW, &priv->status);
  1378. if (priv->cfg->ops->hcmd->set_pan_params)
  1379. priv->cfg->ops->hcmd->set_pan_params(priv);
  1380. }
  1381. return ret;
  1382. }
  1383. int iwlagn_manage_ibss_station(struct iwl_priv *priv,
  1384. struct ieee80211_vif *vif, bool add)
  1385. {
  1386. struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
  1387. if (add)
  1388. return iwlagn_add_bssid_station(priv, vif_priv->ctx,
  1389. vif->bss_conf.bssid,
  1390. &vif_priv->ibss_bssid_sta_id);
  1391. return iwl_remove_station(priv, vif_priv->ibss_bssid_sta_id,
  1392. vif->bss_conf.bssid);
  1393. }
  1394. void iwl_free_tfds_in_queue(struct iwl_priv *priv,
  1395. int sta_id, int tid, int freed)
  1396. {
  1397. lockdep_assert_held(&priv->sta_lock);
  1398. if (priv->stations[sta_id].tid[tid].tfds_in_queue >= freed)
  1399. priv->stations[sta_id].tid[tid].tfds_in_queue -= freed;
  1400. else {
  1401. IWL_DEBUG_TX(priv, "free more than tfds_in_queue (%u:%d)\n",
  1402. priv->stations[sta_id].tid[tid].tfds_in_queue,
  1403. freed);
  1404. priv->stations[sta_id].tid[tid].tfds_in_queue = 0;
  1405. }
  1406. }
  1407. #define IWL_FLUSH_WAIT_MS 2000
  1408. int iwlagn_wait_tx_queue_empty(struct iwl_priv *priv)
  1409. {
  1410. struct iwl_tx_queue *txq;
  1411. struct iwl_queue *q;
  1412. int cnt;
  1413. unsigned long now = jiffies;
  1414. int ret = 0;
  1415. /* waiting for all the tx frames complete might take a while */
  1416. for (cnt = 0; cnt < priv->hw_params.max_txq_num; cnt++) {
  1417. if (cnt == priv->cmd_queue)
  1418. continue;
  1419. txq = &priv->txq[cnt];
  1420. q = &txq->q;
  1421. while (q->read_ptr != q->write_ptr && !time_after(jiffies,
  1422. now + msecs_to_jiffies(IWL_FLUSH_WAIT_MS)))
  1423. msleep(1);
  1424. if (q->read_ptr != q->write_ptr) {
  1425. IWL_ERR(priv, "fail to flush all tx fifo queues\n");
  1426. ret = -ETIMEDOUT;
  1427. break;
  1428. }
  1429. }
  1430. return ret;
  1431. }
  1432. #define IWL_TX_QUEUE_MSK 0xfffff
  1433. /**
  1434. * iwlagn_txfifo_flush: send REPLY_TXFIFO_FLUSH command to uCode
  1435. *
  1436. * pre-requirements:
  1437. * 1. acquire mutex before calling
  1438. * 2. make sure rf is on and not in exit state
  1439. */
  1440. int iwlagn_txfifo_flush(struct iwl_priv *priv, u16 flush_control)
  1441. {
  1442. struct iwl_txfifo_flush_cmd flush_cmd;
  1443. struct iwl_host_cmd cmd = {
  1444. .id = REPLY_TXFIFO_FLUSH,
  1445. .len = sizeof(struct iwl_txfifo_flush_cmd),
  1446. .flags = CMD_SYNC,
  1447. .data = &flush_cmd,
  1448. };
  1449. might_sleep();
  1450. memset(&flush_cmd, 0, sizeof(flush_cmd));
  1451. flush_cmd.fifo_control = IWL_TX_FIFO_VO_MSK | IWL_TX_FIFO_VI_MSK |
  1452. IWL_TX_FIFO_BE_MSK | IWL_TX_FIFO_BK_MSK;
  1453. if (priv->cfg->sku & IWL_SKU_N)
  1454. flush_cmd.fifo_control |= IWL_AGG_TX_QUEUE_MSK;
  1455. IWL_DEBUG_INFO(priv, "fifo queue control: 0X%x\n",
  1456. flush_cmd.fifo_control);
  1457. flush_cmd.flush_control = cpu_to_le16(flush_control);
  1458. return iwl_send_cmd(priv, &cmd);
  1459. }
  1460. void iwlagn_dev_txfifo_flush(struct iwl_priv *priv, u16 flush_control)
  1461. {
  1462. mutex_lock(&priv->mutex);
  1463. ieee80211_stop_queues(priv->hw);
  1464. if (priv->cfg->ops->lib->txfifo_flush(priv, IWL_DROP_ALL)) {
  1465. IWL_ERR(priv, "flush request fail\n");
  1466. goto done;
  1467. }
  1468. IWL_DEBUG_INFO(priv, "wait transmit/flush all frames\n");
  1469. iwlagn_wait_tx_queue_empty(priv);
  1470. done:
  1471. ieee80211_wake_queues(priv->hw);
  1472. mutex_unlock(&priv->mutex);
  1473. }
  1474. /*
  1475. * BT coex
  1476. */
  1477. /*
  1478. * Macros to access the lookup table.
  1479. *
  1480. * The lookup table has 7 inputs: bt3_prio, bt3_txrx, bt_rf_act, wifi_req,
  1481. * wifi_prio, wifi_txrx and wifi_sh_ant_req.
  1482. *
  1483. * It has three outputs: WLAN_ACTIVE, WLAN_KILL and ANT_SWITCH
  1484. *
  1485. * The format is that "registers" 8 through 11 contain the WLAN_ACTIVE bits
  1486. * one after another in 32-bit registers, and "registers" 0 through 7 contain
  1487. * the WLAN_KILL and ANT_SWITCH bits interleaved (in that order).
  1488. *
  1489. * These macros encode that format.
  1490. */
  1491. #define LUT_VALUE(bt3_prio, bt3_txrx, bt_rf_act, wifi_req, wifi_prio, \
  1492. wifi_txrx, wifi_sh_ant_req) \
  1493. (bt3_prio | (bt3_txrx << 1) | (bt_rf_act << 2) | (wifi_req << 3) | \
  1494. (wifi_prio << 4) | (wifi_txrx << 5) | (wifi_sh_ant_req << 6))
  1495. #define LUT_PTA_WLAN_ACTIVE_OP(lut, op, val) \
  1496. lut[8 + ((val) >> 5)] op (cpu_to_le32(BIT((val) & 0x1f)))
  1497. #define LUT_TEST_PTA_WLAN_ACTIVE(lut, bt3_prio, bt3_txrx, bt_rf_act, wifi_req, \
  1498. wifi_prio, wifi_txrx, wifi_sh_ant_req) \
  1499. (!!(LUT_PTA_WLAN_ACTIVE_OP(lut, &, LUT_VALUE(bt3_prio, bt3_txrx, \
  1500. bt_rf_act, wifi_req, wifi_prio, wifi_txrx, \
  1501. wifi_sh_ant_req))))
  1502. #define LUT_SET_PTA_WLAN_ACTIVE(lut, bt3_prio, bt3_txrx, bt_rf_act, wifi_req, \
  1503. wifi_prio, wifi_txrx, wifi_sh_ant_req) \
  1504. LUT_PTA_WLAN_ACTIVE_OP(lut, |=, LUT_VALUE(bt3_prio, bt3_txrx, \
  1505. bt_rf_act, wifi_req, wifi_prio, wifi_txrx, \
  1506. wifi_sh_ant_req))
  1507. #define LUT_CLEAR_PTA_WLAN_ACTIVE(lut, bt3_prio, bt3_txrx, bt_rf_act, \
  1508. wifi_req, wifi_prio, wifi_txrx, \
  1509. wifi_sh_ant_req) \
  1510. LUT_PTA_WLAN_ACTIVE_OP(lut, &= ~, LUT_VALUE(bt3_prio, bt3_txrx, \
  1511. bt_rf_act, wifi_req, wifi_prio, wifi_txrx, \
  1512. wifi_sh_ant_req))
  1513. #define LUT_WLAN_KILL_OP(lut, op, val) \
  1514. lut[(val) >> 4] op (cpu_to_le32(BIT(((val) << 1) & 0x1e)))
  1515. #define LUT_TEST_WLAN_KILL(lut, bt3_prio, bt3_txrx, bt_rf_act, wifi_req, \
  1516. wifi_prio, wifi_txrx, wifi_sh_ant_req) \
  1517. (!!(LUT_WLAN_KILL_OP(lut, &, LUT_VALUE(bt3_prio, bt3_txrx, bt_rf_act, \
  1518. wifi_req, wifi_prio, wifi_txrx, wifi_sh_ant_req))))
  1519. #define LUT_SET_WLAN_KILL(lut, bt3_prio, bt3_txrx, bt_rf_act, wifi_req, \
  1520. wifi_prio, wifi_txrx, wifi_sh_ant_req) \
  1521. LUT_WLAN_KILL_OP(lut, |=, LUT_VALUE(bt3_prio, bt3_txrx, bt_rf_act, \
  1522. wifi_req, wifi_prio, wifi_txrx, wifi_sh_ant_req))
  1523. #define LUT_CLEAR_WLAN_KILL(lut, bt3_prio, bt3_txrx, bt_rf_act, wifi_req, \
  1524. wifi_prio, wifi_txrx, wifi_sh_ant_req) \
  1525. LUT_WLAN_KILL_OP(lut, &= ~, LUT_VALUE(bt3_prio, bt3_txrx, bt_rf_act, \
  1526. wifi_req, wifi_prio, wifi_txrx, wifi_sh_ant_req))
  1527. #define LUT_ANT_SWITCH_OP(lut, op, val) \
  1528. lut[(val) >> 4] op (cpu_to_le32(BIT((((val) << 1) & 0x1e) + 1)))
  1529. #define LUT_TEST_ANT_SWITCH(lut, bt3_prio, bt3_txrx, bt_rf_act, wifi_req, \
  1530. wifi_prio, wifi_txrx, wifi_sh_ant_req) \
  1531. (!!(LUT_ANT_SWITCH_OP(lut, &, LUT_VALUE(bt3_prio, bt3_txrx, bt_rf_act, \
  1532. wifi_req, wifi_prio, wifi_txrx, \
  1533. wifi_sh_ant_req))))
  1534. #define LUT_SET_ANT_SWITCH(lut, bt3_prio, bt3_txrx, bt_rf_act, wifi_req, \
  1535. wifi_prio, wifi_txrx, wifi_sh_ant_req) \
  1536. LUT_ANT_SWITCH_OP(lut, |=, LUT_VALUE(bt3_prio, bt3_txrx, bt_rf_act, \
  1537. wifi_req, wifi_prio, wifi_txrx, wifi_sh_ant_req))
  1538. #define LUT_CLEAR_ANT_SWITCH(lut, bt3_prio, bt3_txrx, bt_rf_act, wifi_req, \
  1539. wifi_prio, wifi_txrx, wifi_sh_ant_req) \
  1540. LUT_ANT_SWITCH_OP(lut, &= ~, LUT_VALUE(bt3_prio, bt3_txrx, bt_rf_act, \
  1541. wifi_req, wifi_prio, wifi_txrx, wifi_sh_ant_req))
  1542. static const __le32 iwlagn_def_3w_lookup[12] = {
  1543. cpu_to_le32(0xaaaaaaaa),
  1544. cpu_to_le32(0xaaaaaaaa),
  1545. cpu_to_le32(0xaeaaaaaa),
  1546. cpu_to_le32(0xaaaaaaaa),
  1547. cpu_to_le32(0xcc00ff28),
  1548. cpu_to_le32(0x0000aaaa),
  1549. cpu_to_le32(0xcc00aaaa),
  1550. cpu_to_le32(0x0000aaaa),
  1551. cpu_to_le32(0xc0004000),
  1552. cpu_to_le32(0x00004000),
  1553. cpu_to_le32(0xf0005000),
  1554. cpu_to_le32(0xf0005000),
  1555. };
  1556. static const __le32 iwlagn_concurrent_lookup[12] = {
  1557. cpu_to_le32(0xaaaaaaaa),
  1558. cpu_to_le32(0xaaaaaaaa),
  1559. cpu_to_le32(0xaaaaaaaa),
  1560. cpu_to_le32(0xaaaaaaaa),
  1561. cpu_to_le32(0xaaaaaaaa),
  1562. cpu_to_le32(0xaaaaaaaa),
  1563. cpu_to_le32(0xaaaaaaaa),
  1564. cpu_to_le32(0xaaaaaaaa),
  1565. cpu_to_le32(0x00000000),
  1566. cpu_to_le32(0x00000000),
  1567. cpu_to_le32(0x00000000),
  1568. cpu_to_le32(0x00000000),
  1569. };
  1570. void iwlagn_send_advance_bt_config(struct iwl_priv *priv)
  1571. {
  1572. struct iwl_basic_bt_cmd basic = {
  1573. .max_kill = IWLAGN_BT_MAX_KILL_DEFAULT,
  1574. .bt3_timer_t7_value = IWLAGN_BT3_T7_DEFAULT,
  1575. .bt3_prio_sample_time = IWLAGN_BT3_PRIO_SAMPLE_DEFAULT,
  1576. .bt3_timer_t2_value = IWLAGN_BT3_T2_DEFAULT,
  1577. };
  1578. struct iwl6000_bt_cmd bt_cmd_6000;
  1579. struct iwl2000_bt_cmd bt_cmd_2000;
  1580. int ret;
  1581. BUILD_BUG_ON(sizeof(iwlagn_def_3w_lookup) !=
  1582. sizeof(basic.bt3_lookup_table));
  1583. if (priv->cfg->bt_params) {
  1584. if (priv->cfg->bt_params->bt_session_2) {
  1585. bt_cmd_2000.prio_boost = cpu_to_le32(
  1586. priv->cfg->bt_params->bt_prio_boost);
  1587. bt_cmd_2000.tx_prio_boost = 0;
  1588. bt_cmd_2000.rx_prio_boost = 0;
  1589. } else {
  1590. bt_cmd_6000.prio_boost =
  1591. priv->cfg->bt_params->bt_prio_boost;
  1592. bt_cmd_6000.tx_prio_boost = 0;
  1593. bt_cmd_6000.rx_prio_boost = 0;
  1594. }
  1595. } else {
  1596. IWL_ERR(priv, "failed to construct BT Coex Config\n");
  1597. return;
  1598. }
  1599. basic.kill_ack_mask = priv->kill_ack_mask;
  1600. basic.kill_cts_mask = priv->kill_cts_mask;
  1601. basic.valid = priv->bt_valid;
  1602. /*
  1603. * Configure BT coex mode to "no coexistence" when the
  1604. * user disabled BT coexistence, we have no interface
  1605. * (might be in monitor mode), or the interface is in
  1606. * IBSS mode (no proper uCode support for coex then).
  1607. */
  1608. if (!bt_coex_active || priv->iw_mode == NL80211_IFTYPE_ADHOC) {
  1609. basic.flags = IWLAGN_BT_FLAG_COEX_MODE_DISABLED;
  1610. } else {
  1611. basic.flags = IWLAGN_BT_FLAG_COEX_MODE_3W <<
  1612. IWLAGN_BT_FLAG_COEX_MODE_SHIFT;
  1613. if (priv->cfg->bt_params &&
  1614. priv->cfg->bt_params->bt_sco_disable)
  1615. basic.flags |= IWLAGN_BT_FLAG_SYNC_2_BT_DISABLE;
  1616. if (priv->bt_ch_announce)
  1617. basic.flags |= IWLAGN_BT_FLAG_CHANNEL_INHIBITION;
  1618. IWL_DEBUG_INFO(priv, "BT coex flag: 0X%x\n", basic.flags);
  1619. }
  1620. priv->bt_enable_flag = basic.flags;
  1621. if (priv->bt_full_concurrent)
  1622. memcpy(basic.bt3_lookup_table, iwlagn_concurrent_lookup,
  1623. sizeof(iwlagn_concurrent_lookup));
  1624. else
  1625. memcpy(basic.bt3_lookup_table, iwlagn_def_3w_lookup,
  1626. sizeof(iwlagn_def_3w_lookup));
  1627. IWL_DEBUG_INFO(priv, "BT coex %s in %s mode\n",
  1628. basic.flags ? "active" : "disabled",
  1629. priv->bt_full_concurrent ?
  1630. "full concurrency" : "3-wire");
  1631. if (priv->cfg->bt_params->bt_session_2) {
  1632. memcpy(&bt_cmd_2000.basic, &basic,
  1633. sizeof(basic));
  1634. ret = iwl_send_cmd_pdu(priv, REPLY_BT_CONFIG,
  1635. sizeof(bt_cmd_2000), &bt_cmd_2000);
  1636. } else {
  1637. memcpy(&bt_cmd_6000.basic, &basic,
  1638. sizeof(basic));
  1639. ret = iwl_send_cmd_pdu(priv, REPLY_BT_CONFIG,
  1640. sizeof(bt_cmd_6000), &bt_cmd_6000);
  1641. }
  1642. if (ret)
  1643. IWL_ERR(priv, "failed to send BT Coex Config\n");
  1644. }
  1645. static void iwlagn_bt_traffic_change_work(struct work_struct *work)
  1646. {
  1647. struct iwl_priv *priv =
  1648. container_of(work, struct iwl_priv, bt_traffic_change_work);
  1649. struct iwl_rxon_context *ctx;
  1650. int smps_request = -1;
  1651. if (priv->bt_enable_flag == IWLAGN_BT_FLAG_COEX_MODE_DISABLED) {
  1652. /* bt coex disabled */
  1653. return;
  1654. }
  1655. /*
  1656. * Note: bt_traffic_load can be overridden by scan complete and
  1657. * coex profile notifications. Ignore that since only bad consequence
  1658. * can be not matching debug print with actual state.
  1659. */
  1660. IWL_DEBUG_INFO(priv, "BT traffic load changes: %d\n",
  1661. priv->bt_traffic_load);
  1662. switch (priv->bt_traffic_load) {
  1663. case IWL_BT_COEX_TRAFFIC_LOAD_NONE:
  1664. if (priv->bt_status)
  1665. smps_request = IEEE80211_SMPS_DYNAMIC;
  1666. else
  1667. smps_request = IEEE80211_SMPS_AUTOMATIC;
  1668. break;
  1669. case IWL_BT_COEX_TRAFFIC_LOAD_LOW:
  1670. smps_request = IEEE80211_SMPS_DYNAMIC;
  1671. break;
  1672. case IWL_BT_COEX_TRAFFIC_LOAD_HIGH:
  1673. case IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS:
  1674. smps_request = IEEE80211_SMPS_STATIC;
  1675. break;
  1676. default:
  1677. IWL_ERR(priv, "Invalid BT traffic load: %d\n",
  1678. priv->bt_traffic_load);
  1679. break;
  1680. }
  1681. mutex_lock(&priv->mutex);
  1682. /*
  1683. * We can not send command to firmware while scanning. When the scan
  1684. * complete we will schedule this work again. We do check with mutex
  1685. * locked to prevent new scan request to arrive. We do not check
  1686. * STATUS_SCANNING to avoid race when queue_work two times from
  1687. * different notifications, but quit and not perform any work at all.
  1688. */
  1689. if (test_bit(STATUS_SCAN_HW, &priv->status))
  1690. goto out;
  1691. if (priv->cfg->ops->lib->update_chain_flags)
  1692. priv->cfg->ops->lib->update_chain_flags(priv);
  1693. if (smps_request != -1) {
  1694. for_each_context(priv, ctx) {
  1695. if (ctx->vif && ctx->vif->type == NL80211_IFTYPE_STATION)
  1696. ieee80211_request_smps(ctx->vif, smps_request);
  1697. }
  1698. }
  1699. out:
  1700. mutex_unlock(&priv->mutex);
  1701. }
  1702. static void iwlagn_print_uartmsg(struct iwl_priv *priv,
  1703. struct iwl_bt_uart_msg *uart_msg)
  1704. {
  1705. IWL_DEBUG_NOTIF(priv, "Message Type = 0x%X, SSN = 0x%X, "
  1706. "Update Req = 0x%X",
  1707. (BT_UART_MSG_FRAME1MSGTYPE_MSK & uart_msg->frame1) >>
  1708. BT_UART_MSG_FRAME1MSGTYPE_POS,
  1709. (BT_UART_MSG_FRAME1SSN_MSK & uart_msg->frame1) >>
  1710. BT_UART_MSG_FRAME1SSN_POS,
  1711. (BT_UART_MSG_FRAME1UPDATEREQ_MSK & uart_msg->frame1) >>
  1712. BT_UART_MSG_FRAME1UPDATEREQ_POS);
  1713. IWL_DEBUG_NOTIF(priv, "Open connections = 0x%X, Traffic load = 0x%X, "
  1714. "Chl_SeqN = 0x%X, In band = 0x%X",
  1715. (BT_UART_MSG_FRAME2OPENCONNECTIONS_MSK & uart_msg->frame2) >>
  1716. BT_UART_MSG_FRAME2OPENCONNECTIONS_POS,
  1717. (BT_UART_MSG_FRAME2TRAFFICLOAD_MSK & uart_msg->frame2) >>
  1718. BT_UART_MSG_FRAME2TRAFFICLOAD_POS,
  1719. (BT_UART_MSG_FRAME2CHLSEQN_MSK & uart_msg->frame2) >>
  1720. BT_UART_MSG_FRAME2CHLSEQN_POS,
  1721. (BT_UART_MSG_FRAME2INBAND_MSK & uart_msg->frame2) >>
  1722. BT_UART_MSG_FRAME2INBAND_POS);
  1723. IWL_DEBUG_NOTIF(priv, "SCO/eSCO = 0x%X, Sniff = 0x%X, A2DP = 0x%X, "
  1724. "ACL = 0x%X, Master = 0x%X, OBEX = 0x%X",
  1725. (BT_UART_MSG_FRAME3SCOESCO_MSK & uart_msg->frame3) >>
  1726. BT_UART_MSG_FRAME3SCOESCO_POS,
  1727. (BT_UART_MSG_FRAME3SNIFF_MSK & uart_msg->frame3) >>
  1728. BT_UART_MSG_FRAME3SNIFF_POS,
  1729. (BT_UART_MSG_FRAME3A2DP_MSK & uart_msg->frame3) >>
  1730. BT_UART_MSG_FRAME3A2DP_POS,
  1731. (BT_UART_MSG_FRAME3ACL_MSK & uart_msg->frame3) >>
  1732. BT_UART_MSG_FRAME3ACL_POS,
  1733. (BT_UART_MSG_FRAME3MASTER_MSK & uart_msg->frame3) >>
  1734. BT_UART_MSG_FRAME3MASTER_POS,
  1735. (BT_UART_MSG_FRAME3OBEX_MSK & uart_msg->frame3) >>
  1736. BT_UART_MSG_FRAME3OBEX_POS);
  1737. IWL_DEBUG_NOTIF(priv, "Idle duration = 0x%X",
  1738. (BT_UART_MSG_FRAME4IDLEDURATION_MSK & uart_msg->frame4) >>
  1739. BT_UART_MSG_FRAME4IDLEDURATION_POS);
  1740. IWL_DEBUG_NOTIF(priv, "Tx Activity = 0x%X, Rx Activity = 0x%X, "
  1741. "eSCO Retransmissions = 0x%X",
  1742. (BT_UART_MSG_FRAME5TXACTIVITY_MSK & uart_msg->frame5) >>
  1743. BT_UART_MSG_FRAME5TXACTIVITY_POS,
  1744. (BT_UART_MSG_FRAME5RXACTIVITY_MSK & uart_msg->frame5) >>
  1745. BT_UART_MSG_FRAME5RXACTIVITY_POS,
  1746. (BT_UART_MSG_FRAME5ESCORETRANSMIT_MSK & uart_msg->frame5) >>
  1747. BT_UART_MSG_FRAME5ESCORETRANSMIT_POS);
  1748. IWL_DEBUG_NOTIF(priv, "Sniff Interval = 0x%X, Discoverable = 0x%X",
  1749. (BT_UART_MSG_FRAME6SNIFFINTERVAL_MSK & uart_msg->frame6) >>
  1750. BT_UART_MSG_FRAME6SNIFFINTERVAL_POS,
  1751. (BT_UART_MSG_FRAME6DISCOVERABLE_MSK & uart_msg->frame6) >>
  1752. BT_UART_MSG_FRAME6DISCOVERABLE_POS);
  1753. IWL_DEBUG_NOTIF(priv, "Sniff Activity = 0x%X, Page = "
  1754. "0x%X, Inquiry = 0x%X, Connectable = 0x%X",
  1755. (BT_UART_MSG_FRAME7SNIFFACTIVITY_MSK & uart_msg->frame7) >>
  1756. BT_UART_MSG_FRAME7SNIFFACTIVITY_POS,
  1757. (BT_UART_MSG_FRAME7PAGE_MSK & uart_msg->frame7) >>
  1758. BT_UART_MSG_FRAME7PAGE_POS,
  1759. (BT_UART_MSG_FRAME7INQUIRY_MSK & uart_msg->frame7) >>
  1760. BT_UART_MSG_FRAME7INQUIRY_POS,
  1761. (BT_UART_MSG_FRAME7CONNECTABLE_MSK & uart_msg->frame7) >>
  1762. BT_UART_MSG_FRAME7CONNECTABLE_POS);
  1763. }
  1764. static void iwlagn_set_kill_msk(struct iwl_priv *priv,
  1765. struct iwl_bt_uart_msg *uart_msg)
  1766. {
  1767. u8 kill_msk;
  1768. static const __le32 bt_kill_ack_msg[2] = {
  1769. IWLAGN_BT_KILL_ACK_MASK_DEFAULT,
  1770. IWLAGN_BT_KILL_ACK_CTS_MASK_SCO };
  1771. static const __le32 bt_kill_cts_msg[2] = {
  1772. IWLAGN_BT_KILL_CTS_MASK_DEFAULT,
  1773. IWLAGN_BT_KILL_ACK_CTS_MASK_SCO };
  1774. kill_msk = (BT_UART_MSG_FRAME3SCOESCO_MSK & uart_msg->frame3)
  1775. ? 1 : 0;
  1776. if (priv->kill_ack_mask != bt_kill_ack_msg[kill_msk] ||
  1777. priv->kill_cts_mask != bt_kill_cts_msg[kill_msk]) {
  1778. priv->bt_valid |= IWLAGN_BT_VALID_KILL_ACK_MASK;
  1779. priv->kill_ack_mask = bt_kill_ack_msg[kill_msk];
  1780. priv->bt_valid |= IWLAGN_BT_VALID_KILL_CTS_MASK;
  1781. priv->kill_cts_mask = bt_kill_cts_msg[kill_msk];
  1782. /* schedule to send runtime bt_config */
  1783. queue_work(priv->workqueue, &priv->bt_runtime_config);
  1784. }
  1785. }
  1786. void iwlagn_bt_coex_profile_notif(struct iwl_priv *priv,
  1787. struct iwl_rx_mem_buffer *rxb)
  1788. {
  1789. unsigned long flags;
  1790. struct iwl_rx_packet *pkt = rxb_addr(rxb);
  1791. struct iwl_bt_coex_profile_notif *coex = &pkt->u.bt_coex_profile_notif;
  1792. struct iwl_bt_uart_msg *uart_msg = &coex->last_bt_uart_msg;
  1793. if (priv->bt_enable_flag == IWLAGN_BT_FLAG_COEX_MODE_DISABLED) {
  1794. /* bt coex disabled */
  1795. return;
  1796. }
  1797. IWL_DEBUG_NOTIF(priv, "BT Coex notification:\n");
  1798. IWL_DEBUG_NOTIF(priv, " status: %d\n", coex->bt_status);
  1799. IWL_DEBUG_NOTIF(priv, " traffic load: %d\n", coex->bt_traffic_load);
  1800. IWL_DEBUG_NOTIF(priv, " CI compliance: %d\n",
  1801. coex->bt_ci_compliance);
  1802. iwlagn_print_uartmsg(priv, uart_msg);
  1803. priv->last_bt_traffic_load = priv->bt_traffic_load;
  1804. if (priv->iw_mode != NL80211_IFTYPE_ADHOC) {
  1805. if (priv->bt_status != coex->bt_status ||
  1806. priv->last_bt_traffic_load != coex->bt_traffic_load) {
  1807. if (coex->bt_status) {
  1808. /* BT on */
  1809. if (!priv->bt_ch_announce)
  1810. priv->bt_traffic_load =
  1811. IWL_BT_COEX_TRAFFIC_LOAD_HIGH;
  1812. else
  1813. priv->bt_traffic_load =
  1814. coex->bt_traffic_load;
  1815. } else {
  1816. /* BT off */
  1817. priv->bt_traffic_load =
  1818. IWL_BT_COEX_TRAFFIC_LOAD_NONE;
  1819. }
  1820. priv->bt_status = coex->bt_status;
  1821. queue_work(priv->workqueue,
  1822. &priv->bt_traffic_change_work);
  1823. }
  1824. }
  1825. iwlagn_set_kill_msk(priv, uart_msg);
  1826. /* FIXME: based on notification, adjust the prio_boost */
  1827. spin_lock_irqsave(&priv->lock, flags);
  1828. priv->bt_ci_compliance = coex->bt_ci_compliance;
  1829. spin_unlock_irqrestore(&priv->lock, flags);
  1830. }
  1831. void iwlagn_bt_rx_handler_setup(struct iwl_priv *priv)
  1832. {
  1833. iwlagn_rx_handler_setup(priv);
  1834. priv->rx_handlers[REPLY_BT_COEX_PROFILE_NOTIF] =
  1835. iwlagn_bt_coex_profile_notif;
  1836. }
  1837. void iwlagn_bt_setup_deferred_work(struct iwl_priv *priv)
  1838. {
  1839. iwlagn_setup_deferred_work(priv);
  1840. INIT_WORK(&priv->bt_traffic_change_work,
  1841. iwlagn_bt_traffic_change_work);
  1842. }
  1843. void iwlagn_bt_cancel_deferred_work(struct iwl_priv *priv)
  1844. {
  1845. cancel_work_sync(&priv->bt_traffic_change_work);
  1846. }
  1847. static bool is_single_rx_stream(struct iwl_priv *priv)
  1848. {
  1849. return priv->current_ht_config.smps == IEEE80211_SMPS_STATIC ||
  1850. priv->current_ht_config.single_chain_sufficient;
  1851. }
  1852. #define IWL_NUM_RX_CHAINS_MULTIPLE 3
  1853. #define IWL_NUM_RX_CHAINS_SINGLE 2
  1854. #define IWL_NUM_IDLE_CHAINS_DUAL 2
  1855. #define IWL_NUM_IDLE_CHAINS_SINGLE 1
  1856. /*
  1857. * Determine how many receiver/antenna chains to use.
  1858. *
  1859. * More provides better reception via diversity. Fewer saves power
  1860. * at the expense of throughput, but only when not in powersave to
  1861. * start with.
  1862. *
  1863. * MIMO (dual stream) requires at least 2, but works better with 3.
  1864. * This does not determine *which* chains to use, just how many.
  1865. */
  1866. static int iwl_get_active_rx_chain_count(struct iwl_priv *priv)
  1867. {
  1868. if (priv->cfg->bt_params &&
  1869. priv->cfg->bt_params->advanced_bt_coexist &&
  1870. (priv->bt_full_concurrent ||
  1871. priv->bt_traffic_load >= IWL_BT_COEX_TRAFFIC_LOAD_HIGH)) {
  1872. /*
  1873. * only use chain 'A' in bt high traffic load or
  1874. * full concurrency mode
  1875. */
  1876. return IWL_NUM_RX_CHAINS_SINGLE;
  1877. }
  1878. /* # of Rx chains to use when expecting MIMO. */
  1879. if (is_single_rx_stream(priv))
  1880. return IWL_NUM_RX_CHAINS_SINGLE;
  1881. else
  1882. return IWL_NUM_RX_CHAINS_MULTIPLE;
  1883. }
  1884. /*
  1885. * When we are in power saving mode, unless device support spatial
  1886. * multiplexing power save, use the active count for rx chain count.
  1887. */
  1888. static int iwl_get_idle_rx_chain_count(struct iwl_priv *priv, int active_cnt)
  1889. {
  1890. /* # Rx chains when idling, depending on SMPS mode */
  1891. switch (priv->current_ht_config.smps) {
  1892. case IEEE80211_SMPS_STATIC:
  1893. case IEEE80211_SMPS_DYNAMIC:
  1894. return IWL_NUM_IDLE_CHAINS_SINGLE;
  1895. case IEEE80211_SMPS_OFF:
  1896. return active_cnt;
  1897. default:
  1898. WARN(1, "invalid SMPS mode %d",
  1899. priv->current_ht_config.smps);
  1900. return active_cnt;
  1901. }
  1902. }
  1903. /* up to 4 chains */
  1904. static u8 iwl_count_chain_bitmap(u32 chain_bitmap)
  1905. {
  1906. u8 res;
  1907. res = (chain_bitmap & BIT(0)) >> 0;
  1908. res += (chain_bitmap & BIT(1)) >> 1;
  1909. res += (chain_bitmap & BIT(2)) >> 2;
  1910. res += (chain_bitmap & BIT(3)) >> 3;
  1911. return res;
  1912. }
  1913. /**
  1914. * iwlagn_set_rxon_chain - Set up Rx chain usage in "staging" RXON image
  1915. *
  1916. * Selects how many and which Rx receivers/antennas/chains to use.
  1917. * This should not be used for scan command ... it puts data in wrong place.
  1918. */
  1919. void iwlagn_set_rxon_chain(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
  1920. {
  1921. bool is_single = is_single_rx_stream(priv);
  1922. bool is_cam = !test_bit(STATUS_POWER_PMI, &priv->status);
  1923. u8 idle_rx_cnt, active_rx_cnt, valid_rx_cnt;
  1924. u32 active_chains;
  1925. u16 rx_chain;
  1926. /* Tell uCode which antennas are actually connected.
  1927. * Before first association, we assume all antennas are connected.
  1928. * Just after first association, iwl_chain_noise_calibration()
  1929. * checks which antennas actually *are* connected. */
  1930. if (priv->chain_noise_data.active_chains)
  1931. active_chains = priv->chain_noise_data.active_chains;
  1932. else
  1933. active_chains = priv->hw_params.valid_rx_ant;
  1934. if (priv->cfg->bt_params &&
  1935. priv->cfg->bt_params->advanced_bt_coexist &&
  1936. (priv->bt_full_concurrent ||
  1937. priv->bt_traffic_load >= IWL_BT_COEX_TRAFFIC_LOAD_HIGH)) {
  1938. /*
  1939. * only use chain 'A' in bt high traffic load or
  1940. * full concurrency mode
  1941. */
  1942. active_chains = first_antenna(active_chains);
  1943. }
  1944. rx_chain = active_chains << RXON_RX_CHAIN_VALID_POS;
  1945. /* How many receivers should we use? */
  1946. active_rx_cnt = iwl_get_active_rx_chain_count(priv);
  1947. idle_rx_cnt = iwl_get_idle_rx_chain_count(priv, active_rx_cnt);
  1948. /* correct rx chain count according hw settings
  1949. * and chain noise calibration
  1950. */
  1951. valid_rx_cnt = iwl_count_chain_bitmap(active_chains);
  1952. if (valid_rx_cnt < active_rx_cnt)
  1953. active_rx_cnt = valid_rx_cnt;
  1954. if (valid_rx_cnt < idle_rx_cnt)
  1955. idle_rx_cnt = valid_rx_cnt;
  1956. rx_chain |= active_rx_cnt << RXON_RX_CHAIN_MIMO_CNT_POS;
  1957. rx_chain |= idle_rx_cnt << RXON_RX_CHAIN_CNT_POS;
  1958. ctx->staging.rx_chain = cpu_to_le16(rx_chain);
  1959. if (!is_single && (active_rx_cnt >= IWL_NUM_RX_CHAINS_SINGLE) && is_cam)
  1960. ctx->staging.rx_chain |= RXON_RX_CHAIN_MIMO_FORCE_MSK;
  1961. else
  1962. ctx->staging.rx_chain &= ~RXON_RX_CHAIN_MIMO_FORCE_MSK;
  1963. IWL_DEBUG_ASSOC(priv, "rx_chain=0x%X active=%d idle=%d\n",
  1964. ctx->staging.rx_chain,
  1965. active_rx_cnt, idle_rx_cnt);
  1966. WARN_ON(active_rx_cnt == 0 || idle_rx_cnt == 0 ||
  1967. active_rx_cnt < idle_rx_cnt);
  1968. }
  1969. u8 iwl_toggle_tx_ant(struct iwl_priv *priv, u8 ant, u8 valid)
  1970. {
  1971. int i;
  1972. u8 ind = ant;
  1973. if (priv->band == IEEE80211_BAND_2GHZ &&
  1974. priv->bt_traffic_load >= IWL_BT_COEX_TRAFFIC_LOAD_HIGH)
  1975. return 0;
  1976. for (i = 0; i < RATE_ANT_NUM - 1; i++) {
  1977. ind = (ind + 1) < RATE_ANT_NUM ? ind + 1 : 0;
  1978. if (valid & BIT(ind))
  1979. return ind;
  1980. }
  1981. return ant;
  1982. }
  1983. static const char *get_csr_string(int cmd)
  1984. {
  1985. switch (cmd) {
  1986. IWL_CMD(CSR_HW_IF_CONFIG_REG);
  1987. IWL_CMD(CSR_INT_COALESCING);
  1988. IWL_CMD(CSR_INT);
  1989. IWL_CMD(CSR_INT_MASK);
  1990. IWL_CMD(CSR_FH_INT_STATUS);
  1991. IWL_CMD(CSR_GPIO_IN);
  1992. IWL_CMD(CSR_RESET);
  1993. IWL_CMD(CSR_GP_CNTRL);
  1994. IWL_CMD(CSR_HW_REV);
  1995. IWL_CMD(CSR_EEPROM_REG);
  1996. IWL_CMD(CSR_EEPROM_GP);
  1997. IWL_CMD(CSR_OTP_GP_REG);
  1998. IWL_CMD(CSR_GIO_REG);
  1999. IWL_CMD(CSR_GP_UCODE_REG);
  2000. IWL_CMD(CSR_GP_DRIVER_REG);
  2001. IWL_CMD(CSR_UCODE_DRV_GP1);
  2002. IWL_CMD(CSR_UCODE_DRV_GP2);
  2003. IWL_CMD(CSR_LED_REG);
  2004. IWL_CMD(CSR_DRAM_INT_TBL_REG);
  2005. IWL_CMD(CSR_GIO_CHICKEN_BITS);
  2006. IWL_CMD(CSR_ANA_PLL_CFG);
  2007. IWL_CMD(CSR_HW_REV_WA_REG);
  2008. IWL_CMD(CSR_DBG_HPET_MEM_REG);
  2009. default:
  2010. return "UNKNOWN";
  2011. }
  2012. }
  2013. void iwl_dump_csr(struct iwl_priv *priv)
  2014. {
  2015. int i;
  2016. static const u32 csr_tbl[] = {
  2017. CSR_HW_IF_CONFIG_REG,
  2018. CSR_INT_COALESCING,
  2019. CSR_INT,
  2020. CSR_INT_MASK,
  2021. CSR_FH_INT_STATUS,
  2022. CSR_GPIO_IN,
  2023. CSR_RESET,
  2024. CSR_GP_CNTRL,
  2025. CSR_HW_REV,
  2026. CSR_EEPROM_REG,
  2027. CSR_EEPROM_GP,
  2028. CSR_OTP_GP_REG,
  2029. CSR_GIO_REG,
  2030. CSR_GP_UCODE_REG,
  2031. CSR_GP_DRIVER_REG,
  2032. CSR_UCODE_DRV_GP1,
  2033. CSR_UCODE_DRV_GP2,
  2034. CSR_LED_REG,
  2035. CSR_DRAM_INT_TBL_REG,
  2036. CSR_GIO_CHICKEN_BITS,
  2037. CSR_ANA_PLL_CFG,
  2038. CSR_HW_REV_WA_REG,
  2039. CSR_DBG_HPET_MEM_REG
  2040. };
  2041. IWL_ERR(priv, "CSR values:\n");
  2042. IWL_ERR(priv, "(2nd byte of CSR_INT_COALESCING is "
  2043. "CSR_INT_PERIODIC_REG)\n");
  2044. for (i = 0; i < ARRAY_SIZE(csr_tbl); i++) {
  2045. IWL_ERR(priv, " %25s: 0X%08x\n",
  2046. get_csr_string(csr_tbl[i]),
  2047. iwl_read32(priv, csr_tbl[i]));
  2048. }
  2049. }
  2050. static const char *get_fh_string(int cmd)
  2051. {
  2052. switch (cmd) {
  2053. IWL_CMD(FH_RSCSR_CHNL0_STTS_WPTR_REG);
  2054. IWL_CMD(FH_RSCSR_CHNL0_RBDCB_BASE_REG);
  2055. IWL_CMD(FH_RSCSR_CHNL0_WPTR);
  2056. IWL_CMD(FH_MEM_RCSR_CHNL0_CONFIG_REG);
  2057. IWL_CMD(FH_MEM_RSSR_SHARED_CTRL_REG);
  2058. IWL_CMD(FH_MEM_RSSR_RX_STATUS_REG);
  2059. IWL_CMD(FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV);
  2060. IWL_CMD(FH_TSSR_TX_STATUS_REG);
  2061. IWL_CMD(FH_TSSR_TX_ERROR_REG);
  2062. default:
  2063. return "UNKNOWN";
  2064. }
  2065. }
  2066. int iwl_dump_fh(struct iwl_priv *priv, char **buf, bool display)
  2067. {
  2068. int i;
  2069. #ifdef CONFIG_IWLWIFI_DEBUG
  2070. int pos = 0;
  2071. size_t bufsz = 0;
  2072. #endif
  2073. static const u32 fh_tbl[] = {
  2074. FH_RSCSR_CHNL0_STTS_WPTR_REG,
  2075. FH_RSCSR_CHNL0_RBDCB_BASE_REG,
  2076. FH_RSCSR_CHNL0_WPTR,
  2077. FH_MEM_RCSR_CHNL0_CONFIG_REG,
  2078. FH_MEM_RSSR_SHARED_CTRL_REG,
  2079. FH_MEM_RSSR_RX_STATUS_REG,
  2080. FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV,
  2081. FH_TSSR_TX_STATUS_REG,
  2082. FH_TSSR_TX_ERROR_REG
  2083. };
  2084. #ifdef CONFIG_IWLWIFI_DEBUG
  2085. if (display) {
  2086. bufsz = ARRAY_SIZE(fh_tbl) * 48 + 40;
  2087. *buf = kmalloc(bufsz, GFP_KERNEL);
  2088. if (!*buf)
  2089. return -ENOMEM;
  2090. pos += scnprintf(*buf + pos, bufsz - pos,
  2091. "FH register values:\n");
  2092. for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) {
  2093. pos += scnprintf(*buf + pos, bufsz - pos,
  2094. " %34s: 0X%08x\n",
  2095. get_fh_string(fh_tbl[i]),
  2096. iwl_read_direct32(priv, fh_tbl[i]));
  2097. }
  2098. return pos;
  2099. }
  2100. #endif
  2101. IWL_ERR(priv, "FH register values:\n");
  2102. for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) {
  2103. IWL_ERR(priv, " %34s: 0X%08x\n",
  2104. get_fh_string(fh_tbl[i]),
  2105. iwl_read_direct32(priv, fh_tbl[i]));
  2106. }
  2107. return 0;
  2108. }
  2109. /* notification wait support */
  2110. void iwlagn_init_notification_wait(struct iwl_priv *priv,
  2111. struct iwl_notification_wait *wait_entry,
  2112. void (*fn)(struct iwl_priv *priv,
  2113. struct iwl_rx_packet *pkt),
  2114. u8 cmd)
  2115. {
  2116. wait_entry->fn = fn;
  2117. wait_entry->cmd = cmd;
  2118. wait_entry->triggered = false;
  2119. spin_lock_bh(&priv->_agn.notif_wait_lock);
  2120. list_add(&wait_entry->list, &priv->_agn.notif_waits);
  2121. spin_unlock_bh(&priv->_agn.notif_wait_lock);
  2122. }
  2123. signed long iwlagn_wait_notification(struct iwl_priv *priv,
  2124. struct iwl_notification_wait *wait_entry,
  2125. unsigned long timeout)
  2126. {
  2127. int ret;
  2128. ret = wait_event_timeout(priv->_agn.notif_waitq,
  2129. &wait_entry->triggered,
  2130. timeout);
  2131. spin_lock_bh(&priv->_agn.notif_wait_lock);
  2132. list_del(&wait_entry->list);
  2133. spin_unlock_bh(&priv->_agn.notif_wait_lock);
  2134. return ret;
  2135. }
  2136. void iwlagn_remove_notification(struct iwl_priv *priv,
  2137. struct iwl_notification_wait *wait_entry)
  2138. {
  2139. spin_lock_bh(&priv->_agn.notif_wait_lock);
  2140. list_del(&wait_entry->list);
  2141. spin_unlock_bh(&priv->_agn.notif_wait_lock);
  2142. }