main.c 61 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552
  1. /*
  2. * Copyright (c) 2008-2011 Atheros Communications Inc.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for any
  5. * purpose with or without fee is hereby granted, provided that the above
  6. * copyright notice and this permission notice appear in all copies.
  7. *
  8. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  9. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  10. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  11. * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  12. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  13. * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  14. * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  15. */
  16. #include <linux/nl80211.h>
  17. #include <linux/delay.h>
  18. #include "ath9k.h"
  19. #include "btcoex.h"
  20. static void ath9k_set_assoc_state(struct ath_softc *sc,
  21. struct ieee80211_vif *vif);
  22. u8 ath9k_parse_mpdudensity(u8 mpdudensity)
  23. {
  24. /*
  25. * 802.11n D2.0 defined values for "Minimum MPDU Start Spacing":
  26. * 0 for no restriction
  27. * 1 for 1/4 us
  28. * 2 for 1/2 us
  29. * 3 for 1 us
  30. * 4 for 2 us
  31. * 5 for 4 us
  32. * 6 for 8 us
  33. * 7 for 16 us
  34. */
  35. switch (mpdudensity) {
  36. case 0:
  37. return 0;
  38. case 1:
  39. case 2:
  40. case 3:
  41. /* Our lower layer calculations limit our precision to
  42. 1 microsecond */
  43. return 1;
  44. case 4:
  45. return 2;
  46. case 5:
  47. return 4;
  48. case 6:
  49. return 8;
  50. case 7:
  51. return 16;
  52. default:
  53. return 0;
  54. }
  55. }
  56. static bool ath9k_has_pending_frames(struct ath_softc *sc, struct ath_txq *txq)
  57. {
  58. bool pending = false;
  59. spin_lock_bh(&txq->axq_lock);
  60. if (txq->axq_depth || !list_empty(&txq->axq_acq))
  61. pending = true;
  62. spin_unlock_bh(&txq->axq_lock);
  63. return pending;
  64. }
  65. static bool ath9k_setpower(struct ath_softc *sc, enum ath9k_power_mode mode)
  66. {
  67. unsigned long flags;
  68. bool ret;
  69. spin_lock_irqsave(&sc->sc_pm_lock, flags);
  70. ret = ath9k_hw_setpower(sc->sc_ah, mode);
  71. spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
  72. return ret;
  73. }
  74. void ath9k_ps_wakeup(struct ath_softc *sc)
  75. {
  76. struct ath_common *common = ath9k_hw_common(sc->sc_ah);
  77. unsigned long flags;
  78. enum ath9k_power_mode power_mode;
  79. spin_lock_irqsave(&sc->sc_pm_lock, flags);
  80. if (++sc->ps_usecount != 1)
  81. goto unlock;
  82. power_mode = sc->sc_ah->power_mode;
  83. ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_AWAKE);
  84. /*
  85. * While the hardware is asleep, the cycle counters contain no
  86. * useful data. Better clear them now so that they don't mess up
  87. * survey data results.
  88. */
  89. if (power_mode != ATH9K_PM_AWAKE) {
  90. spin_lock(&common->cc_lock);
  91. ath_hw_cycle_counters_update(common);
  92. memset(&common->cc_survey, 0, sizeof(common->cc_survey));
  93. memset(&common->cc_ani, 0, sizeof(common->cc_ani));
  94. spin_unlock(&common->cc_lock);
  95. }
  96. unlock:
  97. spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
  98. }
  99. void ath9k_ps_restore(struct ath_softc *sc)
  100. {
  101. struct ath_common *common = ath9k_hw_common(sc->sc_ah);
  102. enum ath9k_power_mode mode;
  103. unsigned long flags;
  104. bool reset;
  105. spin_lock_irqsave(&sc->sc_pm_lock, flags);
  106. if (--sc->ps_usecount != 0)
  107. goto unlock;
  108. if (sc->ps_idle) {
  109. ath9k_hw_setrxabort(sc->sc_ah, 1);
  110. ath9k_hw_stopdmarecv(sc->sc_ah, &reset);
  111. mode = ATH9K_PM_FULL_SLEEP;
  112. } else if (sc->ps_enabled &&
  113. !(sc->ps_flags & (PS_WAIT_FOR_BEACON |
  114. PS_WAIT_FOR_CAB |
  115. PS_WAIT_FOR_PSPOLL_DATA |
  116. PS_WAIT_FOR_TX_ACK |
  117. PS_WAIT_FOR_ANI))) {
  118. mode = ATH9K_PM_NETWORK_SLEEP;
  119. if (ath9k_hw_btcoex_is_enabled(sc->sc_ah))
  120. ath9k_btcoex_stop_gen_timer(sc);
  121. } else {
  122. goto unlock;
  123. }
  124. spin_lock(&common->cc_lock);
  125. ath_hw_cycle_counters_update(common);
  126. spin_unlock(&common->cc_lock);
  127. ath9k_hw_setpower(sc->sc_ah, mode);
  128. unlock:
  129. spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
  130. }
  131. static void __ath_cancel_work(struct ath_softc *sc)
  132. {
  133. cancel_work_sync(&sc->paprd_work);
  134. cancel_work_sync(&sc->hw_check_work);
  135. cancel_delayed_work_sync(&sc->tx_complete_work);
  136. cancel_delayed_work_sync(&sc->hw_pll_work);
  137. #ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
  138. if (ath9k_hw_mci_is_enabled(sc->sc_ah))
  139. cancel_work_sync(&sc->mci_work);
  140. #endif
  141. }
  142. static void ath_cancel_work(struct ath_softc *sc)
  143. {
  144. __ath_cancel_work(sc);
  145. cancel_work_sync(&sc->hw_reset_work);
  146. }
  147. static void ath_restart_work(struct ath_softc *sc)
  148. {
  149. ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work, 0);
  150. if (AR_SREV_9340(sc->sc_ah) || AR_SREV_9330(sc->sc_ah))
  151. ieee80211_queue_delayed_work(sc->hw, &sc->hw_pll_work,
  152. msecs_to_jiffies(ATH_PLL_WORK_INTERVAL));
  153. ath_start_rx_poll(sc, 3);
  154. ath_start_ani(sc);
  155. }
  156. static bool ath_prepare_reset(struct ath_softc *sc)
  157. {
  158. struct ath_hw *ah = sc->sc_ah;
  159. bool ret = true;
  160. ieee80211_stop_queues(sc->hw);
  161. sc->hw_busy_count = 0;
  162. ath_stop_ani(sc);
  163. del_timer_sync(&sc->rx_poll_timer);
  164. ath9k_hw_disable_interrupts(ah);
  165. if (!ath_drain_all_txq(sc))
  166. ret = false;
  167. if (!ath_stoprecv(sc))
  168. ret = false;
  169. return ret;
  170. }
  171. static bool ath_complete_reset(struct ath_softc *sc, bool start)
  172. {
  173. struct ath_hw *ah = sc->sc_ah;
  174. struct ath_common *common = ath9k_hw_common(ah);
  175. unsigned long flags;
  176. int i;
  177. if (ath_startrecv(sc) != 0) {
  178. ath_err(common, "Unable to restart recv logic\n");
  179. return false;
  180. }
  181. ath9k_cmn_update_txpow(ah, sc->curtxpow,
  182. sc->config.txpowlimit, &sc->curtxpow);
  183. clear_bit(SC_OP_HW_RESET, &sc->sc_flags);
  184. ath9k_hw_set_interrupts(ah);
  185. ath9k_hw_enable_interrupts(ah);
  186. if (!(sc->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL) && start) {
  187. if (!test_bit(SC_OP_BEACONS, &sc->sc_flags))
  188. goto work;
  189. if (ah->opmode == NL80211_IFTYPE_STATION &&
  190. test_bit(SC_OP_PRIM_STA_VIF, &sc->sc_flags)) {
  191. spin_lock_irqsave(&sc->sc_pm_lock, flags);
  192. sc->ps_flags |= PS_BEACON_SYNC | PS_WAIT_FOR_BEACON;
  193. spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
  194. } else {
  195. ath9k_set_beacon(sc);
  196. }
  197. work:
  198. ath_restart_work(sc);
  199. for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
  200. if (!ATH_TXQ_SETUP(sc, i))
  201. continue;
  202. spin_lock_bh(&sc->tx.txq[i].axq_lock);
  203. ath_txq_schedule(sc, &sc->tx.txq[i]);
  204. spin_unlock_bh(&sc->tx.txq[i].axq_lock);
  205. }
  206. }
  207. ieee80211_wake_queues(sc->hw);
  208. return true;
  209. }
  210. static int ath_reset_internal(struct ath_softc *sc, struct ath9k_channel *hchan)
  211. {
  212. struct ath_hw *ah = sc->sc_ah;
  213. struct ath_common *common = ath9k_hw_common(ah);
  214. struct ath9k_hw_cal_data *caldata = NULL;
  215. bool fastcc = true;
  216. int r;
  217. __ath_cancel_work(sc);
  218. tasklet_disable(&sc->intr_tq);
  219. spin_lock_bh(&sc->sc_pcu_lock);
  220. if (!(sc->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)) {
  221. fastcc = false;
  222. caldata = &sc->caldata;
  223. }
  224. if (!hchan) {
  225. fastcc = false;
  226. hchan = ah->curchan;
  227. }
  228. if (!ath_prepare_reset(sc))
  229. fastcc = false;
  230. ath_dbg(common, CONFIG, "Reset to %u MHz, HT40: %d fastcc: %d\n",
  231. hchan->channel, IS_CHAN_HT40(hchan), fastcc);
  232. r = ath9k_hw_reset(ah, hchan, caldata, fastcc);
  233. if (r) {
  234. ath_err(common,
  235. "Unable to reset channel, reset status %d\n", r);
  236. ath9k_hw_enable_interrupts(ah);
  237. ath9k_queue_reset(sc, RESET_TYPE_BB_HANG);
  238. goto out;
  239. }
  240. if (ath9k_hw_mci_is_enabled(sc->sc_ah) &&
  241. (sc->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL))
  242. ath9k_mci_set_txpower(sc, true, false);
  243. if (!ath_complete_reset(sc, true))
  244. r = -EIO;
  245. out:
  246. spin_unlock_bh(&sc->sc_pcu_lock);
  247. tasklet_enable(&sc->intr_tq);
  248. return r;
  249. }
  250. /*
  251. * Set/change channels. If the channel is really being changed, it's done
  252. * by reseting the chip. To accomplish this we must first cleanup any pending
  253. * DMA, then restart stuff.
  254. */
  255. static int ath_set_channel(struct ath_softc *sc, struct cfg80211_chan_def *chandef)
  256. {
  257. struct ath_hw *ah = sc->sc_ah;
  258. struct ath_common *common = ath9k_hw_common(ah);
  259. struct ieee80211_hw *hw = sc->hw;
  260. struct ath9k_channel *hchan;
  261. struct ieee80211_channel *chan = chandef->chan;
  262. unsigned long flags;
  263. bool offchannel;
  264. int pos = chan->hw_value;
  265. int old_pos = -1;
  266. int r;
  267. if (test_bit(SC_OP_INVALID, &sc->sc_flags))
  268. return -EIO;
  269. offchannel = !!(hw->conf.flags & IEEE80211_CONF_OFFCHANNEL);
  270. if (ah->curchan)
  271. old_pos = ah->curchan - &ah->channels[0];
  272. ath_dbg(common, CONFIG, "Set channel: %d MHz width: %d\n",
  273. chan->center_freq, chandef->width);
  274. /* update survey stats for the old channel before switching */
  275. spin_lock_irqsave(&common->cc_lock, flags);
  276. ath_update_survey_stats(sc);
  277. spin_unlock_irqrestore(&common->cc_lock, flags);
  278. ath9k_cmn_get_channel(hw, ah, chandef);
  279. /*
  280. * If the operating channel changes, change the survey in-use flags
  281. * along with it.
  282. * Reset the survey data for the new channel, unless we're switching
  283. * back to the operating channel from an off-channel operation.
  284. */
  285. if (!offchannel && sc->cur_survey != &sc->survey[pos]) {
  286. if (sc->cur_survey)
  287. sc->cur_survey->filled &= ~SURVEY_INFO_IN_USE;
  288. sc->cur_survey = &sc->survey[pos];
  289. memset(sc->cur_survey, 0, sizeof(struct survey_info));
  290. sc->cur_survey->filled |= SURVEY_INFO_IN_USE;
  291. } else if (!(sc->survey[pos].filled & SURVEY_INFO_IN_USE)) {
  292. memset(&sc->survey[pos], 0, sizeof(struct survey_info));
  293. }
  294. hchan = &sc->sc_ah->channels[pos];
  295. r = ath_reset_internal(sc, hchan);
  296. if (r)
  297. return r;
  298. /*
  299. * The most recent snapshot of channel->noisefloor for the old
  300. * channel is only available after the hardware reset. Copy it to
  301. * the survey stats now.
  302. */
  303. if (old_pos >= 0)
  304. ath_update_survey_nf(sc, old_pos);
  305. /*
  306. * Enable radar pulse detection if on a DFS channel. Spectral
  307. * scanning and radar detection can not be used concurrently.
  308. */
  309. if (hw->conf.radar_enabled) {
  310. u32 rxfilter;
  311. /* set HW specific DFS configuration */
  312. ath9k_hw_set_radar_params(ah);
  313. rxfilter = ath9k_hw_getrxfilter(ah);
  314. rxfilter |= ATH9K_RX_FILTER_PHYRADAR |
  315. ATH9K_RX_FILTER_PHYERR;
  316. ath9k_hw_setrxfilter(ah, rxfilter);
  317. ath_dbg(common, DFS, "DFS enabled at freq %d\n",
  318. chan->center_freq);
  319. } else {
  320. /* perform spectral scan if requested. */
  321. if (test_bit(SC_OP_SCANNING, &sc->sc_flags) &&
  322. sc->spectral_mode == SPECTRAL_CHANSCAN)
  323. ath9k_spectral_scan_trigger(hw);
  324. }
  325. return 0;
  326. }
  327. static void ath_node_attach(struct ath_softc *sc, struct ieee80211_sta *sta,
  328. struct ieee80211_vif *vif)
  329. {
  330. struct ath_node *an;
  331. an = (struct ath_node *)sta->drv_priv;
  332. an->sc = sc;
  333. an->sta = sta;
  334. an->vif = vif;
  335. ath_tx_node_init(sc, an);
  336. if (sta->ht_cap.ht_supported) {
  337. an->maxampdu = 1 << (IEEE80211_HT_MAX_AMPDU_FACTOR +
  338. sta->ht_cap.ampdu_factor);
  339. an->mpdudensity = ath9k_parse_mpdudensity(sta->ht_cap.ampdu_density);
  340. }
  341. }
  342. static void ath_node_detach(struct ath_softc *sc, struct ieee80211_sta *sta)
  343. {
  344. struct ath_node *an = (struct ath_node *)sta->drv_priv;
  345. ath_tx_node_cleanup(sc, an);
  346. }
  347. void ath9k_tasklet(unsigned long data)
  348. {
  349. struct ath_softc *sc = (struct ath_softc *)data;
  350. struct ath_hw *ah = sc->sc_ah;
  351. struct ath_common *common = ath9k_hw_common(ah);
  352. enum ath_reset_type type;
  353. unsigned long flags;
  354. u32 status = sc->intrstatus;
  355. u32 rxmask;
  356. ath9k_ps_wakeup(sc);
  357. spin_lock(&sc->sc_pcu_lock);
  358. if ((status & ATH9K_INT_FATAL) ||
  359. (status & ATH9K_INT_BB_WATCHDOG)) {
  360. if (status & ATH9K_INT_FATAL)
  361. type = RESET_TYPE_FATAL_INT;
  362. else
  363. type = RESET_TYPE_BB_WATCHDOG;
  364. ath9k_queue_reset(sc, type);
  365. /*
  366. * Increment the ref. counter here so that
  367. * interrupts are enabled in the reset routine.
  368. */
  369. atomic_inc(&ah->intr_ref_cnt);
  370. ath_dbg(common, ANY, "FATAL: Skipping interrupts\n");
  371. goto out;
  372. }
  373. spin_lock_irqsave(&sc->sc_pm_lock, flags);
  374. if ((status & ATH9K_INT_TSFOOR) && sc->ps_enabled) {
  375. /*
  376. * TSF sync does not look correct; remain awake to sync with
  377. * the next Beacon.
  378. */
  379. ath_dbg(common, PS, "TSFOOR - Sync with next Beacon\n");
  380. sc->ps_flags |= PS_WAIT_FOR_BEACON | PS_BEACON_SYNC;
  381. }
  382. spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
  383. if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
  384. rxmask = (ATH9K_INT_RXHP | ATH9K_INT_RXLP | ATH9K_INT_RXEOL |
  385. ATH9K_INT_RXORN);
  386. else
  387. rxmask = (ATH9K_INT_RX | ATH9K_INT_RXEOL | ATH9K_INT_RXORN);
  388. if (status & rxmask) {
  389. /* Check for high priority Rx first */
  390. if ((ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) &&
  391. (status & ATH9K_INT_RXHP))
  392. ath_rx_tasklet(sc, 0, true);
  393. ath_rx_tasklet(sc, 0, false);
  394. }
  395. if (status & ATH9K_INT_TX) {
  396. if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
  397. ath_tx_edma_tasklet(sc);
  398. else
  399. ath_tx_tasklet(sc);
  400. }
  401. ath9k_btcoex_handle_interrupt(sc, status);
  402. /* re-enable hardware interrupt */
  403. ath9k_hw_enable_interrupts(ah);
  404. out:
  405. spin_unlock(&sc->sc_pcu_lock);
  406. ath9k_ps_restore(sc);
  407. }
  408. irqreturn_t ath_isr(int irq, void *dev)
  409. {
  410. #define SCHED_INTR ( \
  411. ATH9K_INT_FATAL | \
  412. ATH9K_INT_BB_WATCHDOG | \
  413. ATH9K_INT_RXORN | \
  414. ATH9K_INT_RXEOL | \
  415. ATH9K_INT_RX | \
  416. ATH9K_INT_RXLP | \
  417. ATH9K_INT_RXHP | \
  418. ATH9K_INT_TX | \
  419. ATH9K_INT_BMISS | \
  420. ATH9K_INT_CST | \
  421. ATH9K_INT_TSFOOR | \
  422. ATH9K_INT_GENTIMER | \
  423. ATH9K_INT_MCI)
  424. struct ath_softc *sc = dev;
  425. struct ath_hw *ah = sc->sc_ah;
  426. struct ath_common *common = ath9k_hw_common(ah);
  427. enum ath9k_int status;
  428. bool sched = false;
  429. /*
  430. * The hardware is not ready/present, don't
  431. * touch anything. Note this can happen early
  432. * on if the IRQ is shared.
  433. */
  434. if (test_bit(SC_OP_INVALID, &sc->sc_flags))
  435. return IRQ_NONE;
  436. /* shared irq, not for us */
  437. if (!ath9k_hw_intrpend(ah))
  438. return IRQ_NONE;
  439. if (test_bit(SC_OP_HW_RESET, &sc->sc_flags)) {
  440. ath9k_hw_kill_interrupts(ah);
  441. return IRQ_HANDLED;
  442. }
  443. /*
  444. * Figure out the reason(s) for the interrupt. Note
  445. * that the hal returns a pseudo-ISR that may include
  446. * bits we haven't explicitly enabled so we mask the
  447. * value to insure we only process bits we requested.
  448. */
  449. ath9k_hw_getisr(ah, &status); /* NB: clears ISR too */
  450. status &= ah->imask; /* discard unasked-for bits */
  451. /*
  452. * If there are no status bits set, then this interrupt was not
  453. * for me (should have been caught above).
  454. */
  455. if (!status)
  456. return IRQ_NONE;
  457. /* Cache the status */
  458. sc->intrstatus = status;
  459. if (status & SCHED_INTR)
  460. sched = true;
  461. /*
  462. * If a FATAL or RXORN interrupt is received, we have to reset the
  463. * chip immediately.
  464. */
  465. if ((status & ATH9K_INT_FATAL) || ((status & ATH9K_INT_RXORN) &&
  466. !(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)))
  467. goto chip_reset;
  468. if ((ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) &&
  469. (status & ATH9K_INT_BB_WATCHDOG)) {
  470. spin_lock(&common->cc_lock);
  471. ath_hw_cycle_counters_update(common);
  472. ar9003_hw_bb_watchdog_dbg_info(ah);
  473. spin_unlock(&common->cc_lock);
  474. goto chip_reset;
  475. }
  476. #ifdef CONFIG_PM_SLEEP
  477. if (status & ATH9K_INT_BMISS) {
  478. if (atomic_read(&sc->wow_sleep_proc_intr) == 0) {
  479. ath_dbg(common, ANY, "during WoW we got a BMISS\n");
  480. atomic_inc(&sc->wow_got_bmiss_intr);
  481. atomic_dec(&sc->wow_sleep_proc_intr);
  482. }
  483. }
  484. #endif
  485. if (status & ATH9K_INT_SWBA)
  486. tasklet_schedule(&sc->bcon_tasklet);
  487. if (status & ATH9K_INT_TXURN)
  488. ath9k_hw_updatetxtriglevel(ah, true);
  489. if (status & ATH9K_INT_RXEOL) {
  490. ah->imask &= ~(ATH9K_INT_RXEOL | ATH9K_INT_RXORN);
  491. ath9k_hw_set_interrupts(ah);
  492. }
  493. if (!(ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP))
  494. if (status & ATH9K_INT_TIM_TIMER) {
  495. if (ATH_DBG_WARN_ON_ONCE(sc->ps_idle))
  496. goto chip_reset;
  497. /* Clear RxAbort bit so that we can
  498. * receive frames */
  499. ath9k_setpower(sc, ATH9K_PM_AWAKE);
  500. spin_lock(&sc->sc_pm_lock);
  501. ath9k_hw_setrxabort(sc->sc_ah, 0);
  502. sc->ps_flags |= PS_WAIT_FOR_BEACON;
  503. spin_unlock(&sc->sc_pm_lock);
  504. }
  505. chip_reset:
  506. ath_debug_stat_interrupt(sc, status);
  507. if (sched) {
  508. /* turn off every interrupt */
  509. ath9k_hw_disable_interrupts(ah);
  510. tasklet_schedule(&sc->intr_tq);
  511. }
  512. return IRQ_HANDLED;
  513. #undef SCHED_INTR
  514. }
  515. static int ath_reset(struct ath_softc *sc)
  516. {
  517. int r;
  518. ath9k_ps_wakeup(sc);
  519. r = ath_reset_internal(sc, NULL);
  520. ath9k_ps_restore(sc);
  521. return r;
  522. }
  523. void ath9k_queue_reset(struct ath_softc *sc, enum ath_reset_type type)
  524. {
  525. #ifdef CONFIG_ATH9K_DEBUGFS
  526. RESET_STAT_INC(sc, type);
  527. #endif
  528. set_bit(SC_OP_HW_RESET, &sc->sc_flags);
  529. ieee80211_queue_work(sc->hw, &sc->hw_reset_work);
  530. }
  531. void ath_reset_work(struct work_struct *work)
  532. {
  533. struct ath_softc *sc = container_of(work, struct ath_softc, hw_reset_work);
  534. ath_reset(sc);
  535. }
  536. /**********************/
  537. /* mac80211 callbacks */
  538. /**********************/
  539. static int ath9k_start(struct ieee80211_hw *hw)
  540. {
  541. struct ath_softc *sc = hw->priv;
  542. struct ath_hw *ah = sc->sc_ah;
  543. struct ath_common *common = ath9k_hw_common(ah);
  544. struct ieee80211_channel *curchan = hw->conf.chandef.chan;
  545. struct ath9k_channel *init_channel;
  546. int r;
  547. ath_dbg(common, CONFIG,
  548. "Starting driver with initial channel: %d MHz\n",
  549. curchan->center_freq);
  550. ath9k_ps_wakeup(sc);
  551. mutex_lock(&sc->mutex);
  552. init_channel = ath9k_cmn_get_channel(hw, ah, &hw->conf.chandef);
  553. /* Reset SERDES registers */
  554. ath9k_hw_configpcipowersave(ah, false);
  555. /*
  556. * The basic interface to setting the hardware in a good
  557. * state is ``reset''. On return the hardware is known to
  558. * be powered up and with interrupts disabled. This must
  559. * be followed by initialization of the appropriate bits
  560. * and then setup of the interrupt mask.
  561. */
  562. spin_lock_bh(&sc->sc_pcu_lock);
  563. atomic_set(&ah->intr_ref_cnt, -1);
  564. r = ath9k_hw_reset(ah, init_channel, ah->caldata, false);
  565. if (r) {
  566. ath_err(common,
  567. "Unable to reset hardware; reset status %d (freq %u MHz)\n",
  568. r, curchan->center_freq);
  569. ah->reset_power_on = false;
  570. }
  571. /* Setup our intr mask. */
  572. ah->imask = ATH9K_INT_TX | ATH9K_INT_RXEOL |
  573. ATH9K_INT_RXORN | ATH9K_INT_FATAL |
  574. ATH9K_INT_GLOBAL;
  575. if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
  576. ah->imask |= ATH9K_INT_RXHP |
  577. ATH9K_INT_RXLP |
  578. ATH9K_INT_BB_WATCHDOG;
  579. else
  580. ah->imask |= ATH9K_INT_RX;
  581. ah->imask |= ATH9K_INT_GTT;
  582. if (ah->caps.hw_caps & ATH9K_HW_CAP_HT)
  583. ah->imask |= ATH9K_INT_CST;
  584. ath_mci_enable(sc);
  585. clear_bit(SC_OP_INVALID, &sc->sc_flags);
  586. sc->sc_ah->is_monitoring = false;
  587. if (!ath_complete_reset(sc, false))
  588. ah->reset_power_on = false;
  589. if (ah->led_pin >= 0) {
  590. ath9k_hw_cfg_output(ah, ah->led_pin,
  591. AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
  592. ath9k_hw_set_gpio(ah, ah->led_pin, 0);
  593. }
  594. /*
  595. * Reset key cache to sane defaults (all entries cleared) instead of
  596. * semi-random values after suspend/resume.
  597. */
  598. ath9k_cmn_init_crypto(sc->sc_ah);
  599. spin_unlock_bh(&sc->sc_pcu_lock);
  600. mutex_unlock(&sc->mutex);
  601. ath9k_ps_restore(sc);
  602. return 0;
  603. }
  604. static void ath9k_tx(struct ieee80211_hw *hw,
  605. struct ieee80211_tx_control *control,
  606. struct sk_buff *skb)
  607. {
  608. struct ath_softc *sc = hw->priv;
  609. struct ath_common *common = ath9k_hw_common(sc->sc_ah);
  610. struct ath_tx_control txctl;
  611. struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
  612. unsigned long flags;
  613. if (sc->ps_enabled) {
  614. /*
  615. * mac80211 does not set PM field for normal data frames, so we
  616. * need to update that based on the current PS mode.
  617. */
  618. if (ieee80211_is_data(hdr->frame_control) &&
  619. !ieee80211_is_nullfunc(hdr->frame_control) &&
  620. !ieee80211_has_pm(hdr->frame_control)) {
  621. ath_dbg(common, PS,
  622. "Add PM=1 for a TX frame while in PS mode\n");
  623. hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PM);
  624. }
  625. }
  626. if (unlikely(sc->sc_ah->power_mode == ATH9K_PM_NETWORK_SLEEP)) {
  627. /*
  628. * We are using PS-Poll and mac80211 can request TX while in
  629. * power save mode. Need to wake up hardware for the TX to be
  630. * completed and if needed, also for RX of buffered frames.
  631. */
  632. ath9k_ps_wakeup(sc);
  633. spin_lock_irqsave(&sc->sc_pm_lock, flags);
  634. if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP))
  635. ath9k_hw_setrxabort(sc->sc_ah, 0);
  636. if (ieee80211_is_pspoll(hdr->frame_control)) {
  637. ath_dbg(common, PS,
  638. "Sending PS-Poll to pick a buffered frame\n");
  639. sc->ps_flags |= PS_WAIT_FOR_PSPOLL_DATA;
  640. } else {
  641. ath_dbg(common, PS, "Wake up to complete TX\n");
  642. sc->ps_flags |= PS_WAIT_FOR_TX_ACK;
  643. }
  644. /*
  645. * The actual restore operation will happen only after
  646. * the ps_flags bit is cleared. We are just dropping
  647. * the ps_usecount here.
  648. */
  649. spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
  650. ath9k_ps_restore(sc);
  651. }
  652. /*
  653. * Cannot tx while the hardware is in full sleep, it first needs a full
  654. * chip reset to recover from that
  655. */
  656. if (unlikely(sc->sc_ah->power_mode == ATH9K_PM_FULL_SLEEP)) {
  657. ath_err(common, "TX while HW is in FULL_SLEEP mode\n");
  658. goto exit;
  659. }
  660. memset(&txctl, 0, sizeof(struct ath_tx_control));
  661. txctl.txq = sc->tx.txq_map[skb_get_queue_mapping(skb)];
  662. txctl.sta = control->sta;
  663. ath_dbg(common, XMIT, "transmitting packet, skb: %p\n", skb);
  664. if (ath_tx_start(hw, skb, &txctl) != 0) {
  665. ath_dbg(common, XMIT, "TX failed\n");
  666. TX_STAT_INC(txctl.txq->axq_qnum, txfailed);
  667. goto exit;
  668. }
  669. return;
  670. exit:
  671. ieee80211_free_txskb(hw, skb);
  672. }
  673. static void ath9k_stop(struct ieee80211_hw *hw)
  674. {
  675. struct ath_softc *sc = hw->priv;
  676. struct ath_hw *ah = sc->sc_ah;
  677. struct ath_common *common = ath9k_hw_common(ah);
  678. bool prev_idle;
  679. mutex_lock(&sc->mutex);
  680. ath_cancel_work(sc);
  681. del_timer_sync(&sc->rx_poll_timer);
  682. if (test_bit(SC_OP_INVALID, &sc->sc_flags)) {
  683. ath_dbg(common, ANY, "Device not present\n");
  684. mutex_unlock(&sc->mutex);
  685. return;
  686. }
  687. /* Ensure HW is awake when we try to shut it down. */
  688. ath9k_ps_wakeup(sc);
  689. spin_lock_bh(&sc->sc_pcu_lock);
  690. /* prevent tasklets to enable interrupts once we disable them */
  691. ah->imask &= ~ATH9K_INT_GLOBAL;
  692. /* make sure h/w will not generate any interrupt
  693. * before setting the invalid flag. */
  694. ath9k_hw_disable_interrupts(ah);
  695. spin_unlock_bh(&sc->sc_pcu_lock);
  696. /* we can now sync irq and kill any running tasklets, since we already
  697. * disabled interrupts and not holding a spin lock */
  698. synchronize_irq(sc->irq);
  699. tasklet_kill(&sc->intr_tq);
  700. tasklet_kill(&sc->bcon_tasklet);
  701. prev_idle = sc->ps_idle;
  702. sc->ps_idle = true;
  703. spin_lock_bh(&sc->sc_pcu_lock);
  704. if (ah->led_pin >= 0) {
  705. ath9k_hw_set_gpio(ah, ah->led_pin, 1);
  706. ath9k_hw_cfg_gpio_input(ah, ah->led_pin);
  707. }
  708. ath_prepare_reset(sc);
  709. if (sc->rx.frag) {
  710. dev_kfree_skb_any(sc->rx.frag);
  711. sc->rx.frag = NULL;
  712. }
  713. if (!ah->curchan)
  714. ah->curchan = ath9k_cmn_get_channel(hw, ah, &hw->conf.chandef);
  715. ath9k_hw_reset(ah, ah->curchan, ah->caldata, false);
  716. ath9k_hw_phy_disable(ah);
  717. ath9k_hw_configpcipowersave(ah, true);
  718. spin_unlock_bh(&sc->sc_pcu_lock);
  719. ath9k_ps_restore(sc);
  720. set_bit(SC_OP_INVALID, &sc->sc_flags);
  721. sc->ps_idle = prev_idle;
  722. mutex_unlock(&sc->mutex);
  723. ath_dbg(common, CONFIG, "Driver halt\n");
  724. }
  725. static bool ath9k_uses_beacons(int type)
  726. {
  727. switch (type) {
  728. case NL80211_IFTYPE_AP:
  729. case NL80211_IFTYPE_ADHOC:
  730. case NL80211_IFTYPE_MESH_POINT:
  731. return true;
  732. default:
  733. return false;
  734. }
  735. }
  736. static void ath9k_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
  737. {
  738. struct ath9k_vif_iter_data *iter_data = data;
  739. int i;
  740. if (iter_data->has_hw_macaddr) {
  741. for (i = 0; i < ETH_ALEN; i++)
  742. iter_data->mask[i] &=
  743. ~(iter_data->hw_macaddr[i] ^ mac[i]);
  744. } else {
  745. memcpy(iter_data->hw_macaddr, mac, ETH_ALEN);
  746. iter_data->has_hw_macaddr = true;
  747. }
  748. switch (vif->type) {
  749. case NL80211_IFTYPE_AP:
  750. iter_data->naps++;
  751. break;
  752. case NL80211_IFTYPE_STATION:
  753. iter_data->nstations++;
  754. break;
  755. case NL80211_IFTYPE_ADHOC:
  756. iter_data->nadhocs++;
  757. break;
  758. case NL80211_IFTYPE_MESH_POINT:
  759. iter_data->nmeshes++;
  760. break;
  761. case NL80211_IFTYPE_WDS:
  762. iter_data->nwds++;
  763. break;
  764. default:
  765. break;
  766. }
  767. }
  768. static void ath9k_sta_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
  769. {
  770. struct ath_softc *sc = data;
  771. struct ath_vif *avp = (void *)vif->drv_priv;
  772. if (vif->type != NL80211_IFTYPE_STATION)
  773. return;
  774. if (avp->primary_sta_vif)
  775. ath9k_set_assoc_state(sc, vif);
  776. }
  777. /* Called with sc->mutex held. */
  778. void ath9k_calculate_iter_data(struct ieee80211_hw *hw,
  779. struct ieee80211_vif *vif,
  780. struct ath9k_vif_iter_data *iter_data)
  781. {
  782. struct ath_softc *sc = hw->priv;
  783. struct ath_hw *ah = sc->sc_ah;
  784. struct ath_common *common = ath9k_hw_common(ah);
  785. /*
  786. * Use the hardware MAC address as reference, the hardware uses it
  787. * together with the BSSID mask when matching addresses.
  788. */
  789. memset(iter_data, 0, sizeof(*iter_data));
  790. memset(&iter_data->mask, 0xff, ETH_ALEN);
  791. if (vif)
  792. ath9k_vif_iter(iter_data, vif->addr, vif);
  793. /* Get list of all active MAC addresses */
  794. ieee80211_iterate_active_interfaces_atomic(
  795. sc->hw, IEEE80211_IFACE_ITER_RESUME_ALL,
  796. ath9k_vif_iter, iter_data);
  797. memcpy(common->macaddr, iter_data->hw_macaddr, ETH_ALEN);
  798. }
  799. /* Called with sc->mutex held. */
  800. static void ath9k_calculate_summary_state(struct ieee80211_hw *hw,
  801. struct ieee80211_vif *vif)
  802. {
  803. struct ath_softc *sc = hw->priv;
  804. struct ath_hw *ah = sc->sc_ah;
  805. struct ath_common *common = ath9k_hw_common(ah);
  806. struct ath9k_vif_iter_data iter_data;
  807. enum nl80211_iftype old_opmode = ah->opmode;
  808. ath9k_calculate_iter_data(hw, vif, &iter_data);
  809. memcpy(common->bssidmask, iter_data.mask, ETH_ALEN);
  810. ath_hw_setbssidmask(common);
  811. if (iter_data.naps > 0) {
  812. ath9k_hw_set_tsfadjust(ah, true);
  813. ah->opmode = NL80211_IFTYPE_AP;
  814. } else {
  815. ath9k_hw_set_tsfadjust(ah, false);
  816. if (iter_data.nmeshes)
  817. ah->opmode = NL80211_IFTYPE_MESH_POINT;
  818. else if (iter_data.nwds)
  819. ah->opmode = NL80211_IFTYPE_AP;
  820. else if (iter_data.nadhocs)
  821. ah->opmode = NL80211_IFTYPE_ADHOC;
  822. else
  823. ah->opmode = NL80211_IFTYPE_STATION;
  824. }
  825. ath9k_hw_setopmode(ah);
  826. if ((iter_data.nstations + iter_data.nadhocs + iter_data.nmeshes) > 0)
  827. ah->imask |= ATH9K_INT_TSFOOR;
  828. else
  829. ah->imask &= ~ATH9K_INT_TSFOOR;
  830. ath9k_hw_set_interrupts(ah);
  831. /*
  832. * If we are changing the opmode to STATION,
  833. * a beacon sync needs to be done.
  834. */
  835. if (ah->opmode == NL80211_IFTYPE_STATION &&
  836. old_opmode == NL80211_IFTYPE_AP &&
  837. test_bit(SC_OP_PRIM_STA_VIF, &sc->sc_flags)) {
  838. ieee80211_iterate_active_interfaces_atomic(
  839. sc->hw, IEEE80211_IFACE_ITER_RESUME_ALL,
  840. ath9k_sta_vif_iter, sc);
  841. }
  842. }
  843. static int ath9k_add_interface(struct ieee80211_hw *hw,
  844. struct ieee80211_vif *vif)
  845. {
  846. struct ath_softc *sc = hw->priv;
  847. struct ath_hw *ah = sc->sc_ah;
  848. struct ath_common *common = ath9k_hw_common(ah);
  849. struct ath_vif *avp = (void *)vif->drv_priv;
  850. struct ath_node *an = &avp->mcast_node;
  851. mutex_lock(&sc->mutex);
  852. if (config_enabled(CONFIG_ATH9K_TX99)) {
  853. if (sc->nvifs >= 1) {
  854. mutex_unlock(&sc->mutex);
  855. return -EOPNOTSUPP;
  856. }
  857. sc->tx99_vif = vif;
  858. }
  859. ath_dbg(common, CONFIG, "Attach a VIF of type: %d\n", vif->type);
  860. sc->nvifs++;
  861. ath9k_ps_wakeup(sc);
  862. ath9k_calculate_summary_state(hw, vif);
  863. ath9k_ps_restore(sc);
  864. if (ath9k_uses_beacons(vif->type))
  865. ath9k_beacon_assign_slot(sc, vif);
  866. an->sc = sc;
  867. an->sta = NULL;
  868. an->vif = vif;
  869. an->no_ps_filter = true;
  870. ath_tx_node_init(sc, an);
  871. mutex_unlock(&sc->mutex);
  872. return 0;
  873. }
  874. static int ath9k_change_interface(struct ieee80211_hw *hw,
  875. struct ieee80211_vif *vif,
  876. enum nl80211_iftype new_type,
  877. bool p2p)
  878. {
  879. struct ath_softc *sc = hw->priv;
  880. struct ath_common *common = ath9k_hw_common(sc->sc_ah);
  881. mutex_lock(&sc->mutex);
  882. if (config_enabled(CONFIG_ATH9K_TX99)) {
  883. mutex_unlock(&sc->mutex);
  884. return -EOPNOTSUPP;
  885. }
  886. ath_dbg(common, CONFIG, "Change Interface\n");
  887. if (ath9k_uses_beacons(vif->type))
  888. ath9k_beacon_remove_slot(sc, vif);
  889. vif->type = new_type;
  890. vif->p2p = p2p;
  891. ath9k_ps_wakeup(sc);
  892. ath9k_calculate_summary_state(hw, vif);
  893. ath9k_ps_restore(sc);
  894. if (ath9k_uses_beacons(vif->type))
  895. ath9k_beacon_assign_slot(sc, vif);
  896. mutex_unlock(&sc->mutex);
  897. return 0;
  898. }
  899. static void ath9k_remove_interface(struct ieee80211_hw *hw,
  900. struct ieee80211_vif *vif)
  901. {
  902. struct ath_softc *sc = hw->priv;
  903. struct ath_common *common = ath9k_hw_common(sc->sc_ah);
  904. struct ath_vif *avp = (void *)vif->drv_priv;
  905. ath_dbg(common, CONFIG, "Detach Interface\n");
  906. mutex_lock(&sc->mutex);
  907. sc->nvifs--;
  908. sc->tx99_vif = NULL;
  909. if (ath9k_uses_beacons(vif->type))
  910. ath9k_beacon_remove_slot(sc, vif);
  911. if (sc->csa_vif == vif)
  912. sc->csa_vif = NULL;
  913. ath9k_ps_wakeup(sc);
  914. ath9k_calculate_summary_state(hw, NULL);
  915. ath9k_ps_restore(sc);
  916. ath_tx_node_cleanup(sc, &avp->mcast_node);
  917. mutex_unlock(&sc->mutex);
  918. }
  919. static void ath9k_enable_ps(struct ath_softc *sc)
  920. {
  921. struct ath_hw *ah = sc->sc_ah;
  922. struct ath_common *common = ath9k_hw_common(ah);
  923. if (config_enabled(CONFIG_ATH9K_TX99))
  924. return;
  925. sc->ps_enabled = true;
  926. if (!(ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) {
  927. if ((ah->imask & ATH9K_INT_TIM_TIMER) == 0) {
  928. ah->imask |= ATH9K_INT_TIM_TIMER;
  929. ath9k_hw_set_interrupts(ah);
  930. }
  931. ath9k_hw_setrxabort(ah, 1);
  932. }
  933. ath_dbg(common, PS, "PowerSave enabled\n");
  934. }
  935. static void ath9k_disable_ps(struct ath_softc *sc)
  936. {
  937. struct ath_hw *ah = sc->sc_ah;
  938. struct ath_common *common = ath9k_hw_common(ah);
  939. if (config_enabled(CONFIG_ATH9K_TX99))
  940. return;
  941. sc->ps_enabled = false;
  942. ath9k_hw_setpower(ah, ATH9K_PM_AWAKE);
  943. if (!(ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) {
  944. ath9k_hw_setrxabort(ah, 0);
  945. sc->ps_flags &= ~(PS_WAIT_FOR_BEACON |
  946. PS_WAIT_FOR_CAB |
  947. PS_WAIT_FOR_PSPOLL_DATA |
  948. PS_WAIT_FOR_TX_ACK);
  949. if (ah->imask & ATH9K_INT_TIM_TIMER) {
  950. ah->imask &= ~ATH9K_INT_TIM_TIMER;
  951. ath9k_hw_set_interrupts(ah);
  952. }
  953. }
  954. ath_dbg(common, PS, "PowerSave disabled\n");
  955. }
  956. void ath9k_spectral_scan_trigger(struct ieee80211_hw *hw)
  957. {
  958. struct ath_softc *sc = hw->priv;
  959. struct ath_hw *ah = sc->sc_ah;
  960. struct ath_common *common = ath9k_hw_common(ah);
  961. u32 rxfilter;
  962. if (config_enabled(CONFIG_ATH9K_TX99))
  963. return;
  964. if (!ath9k_hw_ops(ah)->spectral_scan_trigger) {
  965. ath_err(common, "spectrum analyzer not implemented on this hardware\n");
  966. return;
  967. }
  968. ath9k_ps_wakeup(sc);
  969. rxfilter = ath9k_hw_getrxfilter(ah);
  970. ath9k_hw_setrxfilter(ah, rxfilter |
  971. ATH9K_RX_FILTER_PHYRADAR |
  972. ATH9K_RX_FILTER_PHYERR);
  973. /* TODO: usually this should not be neccesary, but for some reason
  974. * (or in some mode?) the trigger must be called after the
  975. * configuration, otherwise the register will have its values reset
  976. * (on my ar9220 to value 0x01002310)
  977. */
  978. ath9k_spectral_scan_config(hw, sc->spectral_mode);
  979. ath9k_hw_ops(ah)->spectral_scan_trigger(ah);
  980. ath9k_ps_restore(sc);
  981. }
  982. int ath9k_spectral_scan_config(struct ieee80211_hw *hw,
  983. enum spectral_mode spectral_mode)
  984. {
  985. struct ath_softc *sc = hw->priv;
  986. struct ath_hw *ah = sc->sc_ah;
  987. struct ath_common *common = ath9k_hw_common(ah);
  988. if (!ath9k_hw_ops(ah)->spectral_scan_trigger) {
  989. ath_err(common, "spectrum analyzer not implemented on this hardware\n");
  990. return -1;
  991. }
  992. switch (spectral_mode) {
  993. case SPECTRAL_DISABLED:
  994. sc->spec_config.enabled = 0;
  995. break;
  996. case SPECTRAL_BACKGROUND:
  997. /* send endless samples.
  998. * TODO: is this really useful for "background"?
  999. */
  1000. sc->spec_config.endless = 1;
  1001. sc->spec_config.enabled = 1;
  1002. break;
  1003. case SPECTRAL_CHANSCAN:
  1004. case SPECTRAL_MANUAL:
  1005. sc->spec_config.endless = 0;
  1006. sc->spec_config.enabled = 1;
  1007. break;
  1008. default:
  1009. return -1;
  1010. }
  1011. ath9k_ps_wakeup(sc);
  1012. ath9k_hw_ops(ah)->spectral_scan_config(ah, &sc->spec_config);
  1013. ath9k_ps_restore(sc);
  1014. sc->spectral_mode = spectral_mode;
  1015. return 0;
  1016. }
  1017. static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
  1018. {
  1019. struct ath_softc *sc = hw->priv;
  1020. struct ath_hw *ah = sc->sc_ah;
  1021. struct ath_common *common = ath9k_hw_common(ah);
  1022. struct ieee80211_conf *conf = &hw->conf;
  1023. bool reset_channel = false;
  1024. ath9k_ps_wakeup(sc);
  1025. mutex_lock(&sc->mutex);
  1026. if (changed & IEEE80211_CONF_CHANGE_IDLE) {
  1027. sc->ps_idle = !!(conf->flags & IEEE80211_CONF_IDLE);
  1028. if (sc->ps_idle) {
  1029. ath_cancel_work(sc);
  1030. ath9k_stop_btcoex(sc);
  1031. } else {
  1032. ath9k_start_btcoex(sc);
  1033. /*
  1034. * The chip needs a reset to properly wake up from
  1035. * full sleep
  1036. */
  1037. reset_channel = ah->chip_fullsleep;
  1038. }
  1039. }
  1040. /*
  1041. * We just prepare to enable PS. We have to wait until our AP has
  1042. * ACK'd our null data frame to disable RX otherwise we'll ignore
  1043. * those ACKs and end up retransmitting the same null data frames.
  1044. * IEEE80211_CONF_CHANGE_PS is only passed by mac80211 for STA mode.
  1045. */
  1046. if (changed & IEEE80211_CONF_CHANGE_PS) {
  1047. unsigned long flags;
  1048. spin_lock_irqsave(&sc->sc_pm_lock, flags);
  1049. if (conf->flags & IEEE80211_CONF_PS)
  1050. ath9k_enable_ps(sc);
  1051. else
  1052. ath9k_disable_ps(sc);
  1053. spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
  1054. }
  1055. if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
  1056. if (conf->flags & IEEE80211_CONF_MONITOR) {
  1057. ath_dbg(common, CONFIG, "Monitor mode is enabled\n");
  1058. sc->sc_ah->is_monitoring = true;
  1059. } else {
  1060. ath_dbg(common, CONFIG, "Monitor mode is disabled\n");
  1061. sc->sc_ah->is_monitoring = false;
  1062. }
  1063. }
  1064. if ((changed & IEEE80211_CONF_CHANGE_CHANNEL) || reset_channel) {
  1065. if (ath_set_channel(sc, &hw->conf.chandef) < 0) {
  1066. ath_err(common, "Unable to set channel\n");
  1067. mutex_unlock(&sc->mutex);
  1068. ath9k_ps_restore(sc);
  1069. return -EINVAL;
  1070. }
  1071. }
  1072. if (changed & IEEE80211_CONF_CHANGE_POWER) {
  1073. ath_dbg(common, CONFIG, "Set power: %d\n", conf->power_level);
  1074. sc->config.txpowlimit = 2 * conf->power_level;
  1075. ath9k_cmn_update_txpow(ah, sc->curtxpow,
  1076. sc->config.txpowlimit, &sc->curtxpow);
  1077. }
  1078. mutex_unlock(&sc->mutex);
  1079. ath9k_ps_restore(sc);
  1080. return 0;
  1081. }
  1082. #define SUPPORTED_FILTERS \
  1083. (FIF_PROMISC_IN_BSS | \
  1084. FIF_ALLMULTI | \
  1085. FIF_CONTROL | \
  1086. FIF_PSPOLL | \
  1087. FIF_OTHER_BSS | \
  1088. FIF_BCN_PRBRESP_PROMISC | \
  1089. FIF_PROBE_REQ | \
  1090. FIF_FCSFAIL)
  1091. /* FIXME: sc->sc_full_reset ? */
  1092. static void ath9k_configure_filter(struct ieee80211_hw *hw,
  1093. unsigned int changed_flags,
  1094. unsigned int *total_flags,
  1095. u64 multicast)
  1096. {
  1097. struct ath_softc *sc = hw->priv;
  1098. u32 rfilt;
  1099. changed_flags &= SUPPORTED_FILTERS;
  1100. *total_flags &= SUPPORTED_FILTERS;
  1101. sc->rx.rxfilter = *total_flags;
  1102. ath9k_ps_wakeup(sc);
  1103. rfilt = ath_calcrxfilter(sc);
  1104. ath9k_hw_setrxfilter(sc->sc_ah, rfilt);
  1105. ath9k_ps_restore(sc);
  1106. ath_dbg(ath9k_hw_common(sc->sc_ah), CONFIG, "Set HW RX filter: 0x%x\n",
  1107. rfilt);
  1108. }
  1109. static int ath9k_sta_add(struct ieee80211_hw *hw,
  1110. struct ieee80211_vif *vif,
  1111. struct ieee80211_sta *sta)
  1112. {
  1113. struct ath_softc *sc = hw->priv;
  1114. struct ath_common *common = ath9k_hw_common(sc->sc_ah);
  1115. struct ath_node *an = (struct ath_node *) sta->drv_priv;
  1116. struct ieee80211_key_conf ps_key = { };
  1117. int key;
  1118. ath_node_attach(sc, sta, vif);
  1119. if (vif->type != NL80211_IFTYPE_AP &&
  1120. vif->type != NL80211_IFTYPE_AP_VLAN)
  1121. return 0;
  1122. key = ath_key_config(common, vif, sta, &ps_key);
  1123. if (key > 0)
  1124. an->ps_key = key;
  1125. return 0;
  1126. }
  1127. static void ath9k_del_ps_key(struct ath_softc *sc,
  1128. struct ieee80211_vif *vif,
  1129. struct ieee80211_sta *sta)
  1130. {
  1131. struct ath_common *common = ath9k_hw_common(sc->sc_ah);
  1132. struct ath_node *an = (struct ath_node *) sta->drv_priv;
  1133. struct ieee80211_key_conf ps_key = { .hw_key_idx = an->ps_key };
  1134. if (!an->ps_key)
  1135. return;
  1136. ath_key_delete(common, &ps_key);
  1137. an->ps_key = 0;
  1138. }
  1139. static int ath9k_sta_remove(struct ieee80211_hw *hw,
  1140. struct ieee80211_vif *vif,
  1141. struct ieee80211_sta *sta)
  1142. {
  1143. struct ath_softc *sc = hw->priv;
  1144. ath9k_del_ps_key(sc, vif, sta);
  1145. ath_node_detach(sc, sta);
  1146. return 0;
  1147. }
  1148. static void ath9k_sta_notify(struct ieee80211_hw *hw,
  1149. struct ieee80211_vif *vif,
  1150. enum sta_notify_cmd cmd,
  1151. struct ieee80211_sta *sta)
  1152. {
  1153. struct ath_softc *sc = hw->priv;
  1154. struct ath_node *an = (struct ath_node *) sta->drv_priv;
  1155. switch (cmd) {
  1156. case STA_NOTIFY_SLEEP:
  1157. an->sleeping = true;
  1158. ath_tx_aggr_sleep(sta, sc, an);
  1159. break;
  1160. case STA_NOTIFY_AWAKE:
  1161. an->sleeping = false;
  1162. ath_tx_aggr_wakeup(sc, an);
  1163. break;
  1164. }
  1165. }
  1166. static int ath9k_conf_tx(struct ieee80211_hw *hw,
  1167. struct ieee80211_vif *vif, u16 queue,
  1168. const struct ieee80211_tx_queue_params *params)
  1169. {
  1170. struct ath_softc *sc = hw->priv;
  1171. struct ath_common *common = ath9k_hw_common(sc->sc_ah);
  1172. struct ath_txq *txq;
  1173. struct ath9k_tx_queue_info qi;
  1174. int ret = 0;
  1175. if (queue >= IEEE80211_NUM_ACS)
  1176. return 0;
  1177. txq = sc->tx.txq_map[queue];
  1178. ath9k_ps_wakeup(sc);
  1179. mutex_lock(&sc->mutex);
  1180. memset(&qi, 0, sizeof(struct ath9k_tx_queue_info));
  1181. qi.tqi_aifs = params->aifs;
  1182. qi.tqi_cwmin = params->cw_min;
  1183. qi.tqi_cwmax = params->cw_max;
  1184. qi.tqi_burstTime = params->txop * 32;
  1185. ath_dbg(common, CONFIG,
  1186. "Configure tx [queue/halq] [%d/%d], aifs: %d, cw_min: %d, cw_max: %d, txop: %d\n",
  1187. queue, txq->axq_qnum, params->aifs, params->cw_min,
  1188. params->cw_max, params->txop);
  1189. ath_update_max_aggr_framelen(sc, queue, qi.tqi_burstTime);
  1190. ret = ath_txq_update(sc, txq->axq_qnum, &qi);
  1191. if (ret)
  1192. ath_err(common, "TXQ Update failed\n");
  1193. mutex_unlock(&sc->mutex);
  1194. ath9k_ps_restore(sc);
  1195. return ret;
  1196. }
  1197. static int ath9k_set_key(struct ieee80211_hw *hw,
  1198. enum set_key_cmd cmd,
  1199. struct ieee80211_vif *vif,
  1200. struct ieee80211_sta *sta,
  1201. struct ieee80211_key_conf *key)
  1202. {
  1203. struct ath_softc *sc = hw->priv;
  1204. struct ath_common *common = ath9k_hw_common(sc->sc_ah);
  1205. int ret = 0;
  1206. if (ath9k_modparam_nohwcrypt)
  1207. return -ENOSPC;
  1208. if ((vif->type == NL80211_IFTYPE_ADHOC ||
  1209. vif->type == NL80211_IFTYPE_MESH_POINT) &&
  1210. (key->cipher == WLAN_CIPHER_SUITE_TKIP ||
  1211. key->cipher == WLAN_CIPHER_SUITE_CCMP) &&
  1212. !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
  1213. /*
  1214. * For now, disable hw crypto for the RSN IBSS group keys. This
  1215. * could be optimized in the future to use a modified key cache
  1216. * design to support per-STA RX GTK, but until that gets
  1217. * implemented, use of software crypto for group addressed
  1218. * frames is a acceptable to allow RSN IBSS to be used.
  1219. */
  1220. return -EOPNOTSUPP;
  1221. }
  1222. mutex_lock(&sc->mutex);
  1223. ath9k_ps_wakeup(sc);
  1224. ath_dbg(common, CONFIG, "Set HW Key\n");
  1225. switch (cmd) {
  1226. case SET_KEY:
  1227. if (sta)
  1228. ath9k_del_ps_key(sc, vif, sta);
  1229. ret = ath_key_config(common, vif, sta, key);
  1230. if (ret >= 0) {
  1231. key->hw_key_idx = ret;
  1232. /* push IV and Michael MIC generation to stack */
  1233. key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
  1234. if (key->cipher == WLAN_CIPHER_SUITE_TKIP)
  1235. key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
  1236. if (sc->sc_ah->sw_mgmt_crypto &&
  1237. key->cipher == WLAN_CIPHER_SUITE_CCMP)
  1238. key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX;
  1239. ret = 0;
  1240. }
  1241. break;
  1242. case DISABLE_KEY:
  1243. ath_key_delete(common, key);
  1244. break;
  1245. default:
  1246. ret = -EINVAL;
  1247. }
  1248. ath9k_ps_restore(sc);
  1249. mutex_unlock(&sc->mutex);
  1250. return ret;
  1251. }
  1252. static void ath9k_set_assoc_state(struct ath_softc *sc,
  1253. struct ieee80211_vif *vif)
  1254. {
  1255. struct ath_common *common = ath9k_hw_common(sc->sc_ah);
  1256. struct ath_vif *avp = (void *)vif->drv_priv;
  1257. struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
  1258. unsigned long flags;
  1259. set_bit(SC_OP_PRIM_STA_VIF, &sc->sc_flags);
  1260. avp->primary_sta_vif = true;
  1261. /*
  1262. * Set the AID, BSSID and do beacon-sync only when
  1263. * the HW opmode is STATION.
  1264. *
  1265. * But the primary bit is set above in any case.
  1266. */
  1267. if (sc->sc_ah->opmode != NL80211_IFTYPE_STATION)
  1268. return;
  1269. memcpy(common->curbssid, bss_conf->bssid, ETH_ALEN);
  1270. common->curaid = bss_conf->aid;
  1271. ath9k_hw_write_associd(sc->sc_ah);
  1272. sc->last_rssi = ATH_RSSI_DUMMY_MARKER;
  1273. sc->sc_ah->stats.avgbrssi = ATH_RSSI_DUMMY_MARKER;
  1274. spin_lock_irqsave(&sc->sc_pm_lock, flags);
  1275. sc->ps_flags |= PS_BEACON_SYNC | PS_WAIT_FOR_BEACON;
  1276. spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
  1277. if (ath9k_hw_mci_is_enabled(sc->sc_ah))
  1278. ath9k_mci_update_wlan_channels(sc, false);
  1279. ath_dbg(common, CONFIG,
  1280. "Primary Station interface: %pM, BSSID: %pM\n",
  1281. vif->addr, common->curbssid);
  1282. }
  1283. static void ath9k_bss_assoc_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
  1284. {
  1285. struct ath_softc *sc = data;
  1286. struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
  1287. if (test_bit(SC_OP_PRIM_STA_VIF, &sc->sc_flags))
  1288. return;
  1289. if (bss_conf->assoc)
  1290. ath9k_set_assoc_state(sc, vif);
  1291. }
  1292. static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
  1293. struct ieee80211_vif *vif,
  1294. struct ieee80211_bss_conf *bss_conf,
  1295. u32 changed)
  1296. {
  1297. #define CHECK_ANI \
  1298. (BSS_CHANGED_ASSOC | \
  1299. BSS_CHANGED_IBSS | \
  1300. BSS_CHANGED_BEACON_ENABLED)
  1301. struct ath_softc *sc = hw->priv;
  1302. struct ath_hw *ah = sc->sc_ah;
  1303. struct ath_common *common = ath9k_hw_common(ah);
  1304. struct ath_vif *avp = (void *)vif->drv_priv;
  1305. int slottime;
  1306. ath9k_ps_wakeup(sc);
  1307. mutex_lock(&sc->mutex);
  1308. if (changed & BSS_CHANGED_ASSOC) {
  1309. ath_dbg(common, CONFIG, "BSSID %pM Changed ASSOC %d\n",
  1310. bss_conf->bssid, bss_conf->assoc);
  1311. if (avp->primary_sta_vif && !bss_conf->assoc) {
  1312. clear_bit(SC_OP_PRIM_STA_VIF, &sc->sc_flags);
  1313. avp->primary_sta_vif = false;
  1314. if (ah->opmode == NL80211_IFTYPE_STATION)
  1315. clear_bit(SC_OP_BEACONS, &sc->sc_flags);
  1316. }
  1317. ieee80211_iterate_active_interfaces_atomic(
  1318. sc->hw, IEEE80211_IFACE_ITER_RESUME_ALL,
  1319. ath9k_bss_assoc_iter, sc);
  1320. if (!test_bit(SC_OP_PRIM_STA_VIF, &sc->sc_flags) &&
  1321. ah->opmode == NL80211_IFTYPE_STATION) {
  1322. memset(common->curbssid, 0, ETH_ALEN);
  1323. common->curaid = 0;
  1324. ath9k_hw_write_associd(sc->sc_ah);
  1325. if (ath9k_hw_mci_is_enabled(sc->sc_ah))
  1326. ath9k_mci_update_wlan_channels(sc, true);
  1327. }
  1328. }
  1329. if (changed & BSS_CHANGED_IBSS) {
  1330. memcpy(common->curbssid, bss_conf->bssid, ETH_ALEN);
  1331. common->curaid = bss_conf->aid;
  1332. ath9k_hw_write_associd(sc->sc_ah);
  1333. }
  1334. if ((changed & BSS_CHANGED_BEACON_ENABLED) ||
  1335. (changed & BSS_CHANGED_BEACON_INT)) {
  1336. if (ah->opmode == NL80211_IFTYPE_AP &&
  1337. bss_conf->enable_beacon)
  1338. ath9k_set_tsfadjust(sc, vif);
  1339. if (ath9k_allow_beacon_config(sc, vif))
  1340. ath9k_beacon_config(sc, vif, changed);
  1341. }
  1342. if (changed & BSS_CHANGED_ERP_SLOT) {
  1343. if (bss_conf->use_short_slot)
  1344. slottime = 9;
  1345. else
  1346. slottime = 20;
  1347. if (vif->type == NL80211_IFTYPE_AP) {
  1348. /*
  1349. * Defer update, so that connected stations can adjust
  1350. * their settings at the same time.
  1351. * See beacon.c for more details
  1352. */
  1353. sc->beacon.slottime = slottime;
  1354. sc->beacon.updateslot = UPDATE;
  1355. } else {
  1356. ah->slottime = slottime;
  1357. ath9k_hw_init_global_settings(ah);
  1358. }
  1359. }
  1360. if (changed & CHECK_ANI)
  1361. ath_check_ani(sc);
  1362. mutex_unlock(&sc->mutex);
  1363. ath9k_ps_restore(sc);
  1364. #undef CHECK_ANI
  1365. }
  1366. static u64 ath9k_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
  1367. {
  1368. struct ath_softc *sc = hw->priv;
  1369. u64 tsf;
  1370. mutex_lock(&sc->mutex);
  1371. ath9k_ps_wakeup(sc);
  1372. tsf = ath9k_hw_gettsf64(sc->sc_ah);
  1373. ath9k_ps_restore(sc);
  1374. mutex_unlock(&sc->mutex);
  1375. return tsf;
  1376. }
  1377. static void ath9k_set_tsf(struct ieee80211_hw *hw,
  1378. struct ieee80211_vif *vif,
  1379. u64 tsf)
  1380. {
  1381. struct ath_softc *sc = hw->priv;
  1382. mutex_lock(&sc->mutex);
  1383. ath9k_ps_wakeup(sc);
  1384. ath9k_hw_settsf64(sc->sc_ah, tsf);
  1385. ath9k_ps_restore(sc);
  1386. mutex_unlock(&sc->mutex);
  1387. }
  1388. static void ath9k_reset_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
  1389. {
  1390. struct ath_softc *sc = hw->priv;
  1391. mutex_lock(&sc->mutex);
  1392. ath9k_ps_wakeup(sc);
  1393. ath9k_hw_reset_tsf(sc->sc_ah);
  1394. ath9k_ps_restore(sc);
  1395. mutex_unlock(&sc->mutex);
  1396. }
  1397. static int ath9k_ampdu_action(struct ieee80211_hw *hw,
  1398. struct ieee80211_vif *vif,
  1399. enum ieee80211_ampdu_mlme_action action,
  1400. struct ieee80211_sta *sta,
  1401. u16 tid, u16 *ssn, u8 buf_size)
  1402. {
  1403. struct ath_softc *sc = hw->priv;
  1404. bool flush = false;
  1405. int ret = 0;
  1406. mutex_lock(&sc->mutex);
  1407. switch (action) {
  1408. case IEEE80211_AMPDU_RX_START:
  1409. break;
  1410. case IEEE80211_AMPDU_RX_STOP:
  1411. break;
  1412. case IEEE80211_AMPDU_TX_START:
  1413. ath9k_ps_wakeup(sc);
  1414. ret = ath_tx_aggr_start(sc, sta, tid, ssn);
  1415. if (!ret)
  1416. ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
  1417. ath9k_ps_restore(sc);
  1418. break;
  1419. case IEEE80211_AMPDU_TX_STOP_FLUSH:
  1420. case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
  1421. flush = true;
  1422. case IEEE80211_AMPDU_TX_STOP_CONT:
  1423. ath9k_ps_wakeup(sc);
  1424. ath_tx_aggr_stop(sc, sta, tid);
  1425. if (!flush)
  1426. ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
  1427. ath9k_ps_restore(sc);
  1428. break;
  1429. case IEEE80211_AMPDU_TX_OPERATIONAL:
  1430. ath9k_ps_wakeup(sc);
  1431. ath_tx_aggr_resume(sc, sta, tid);
  1432. ath9k_ps_restore(sc);
  1433. break;
  1434. default:
  1435. ath_err(ath9k_hw_common(sc->sc_ah), "Unknown AMPDU action\n");
  1436. }
  1437. mutex_unlock(&sc->mutex);
  1438. return ret;
  1439. }
  1440. static int ath9k_get_survey(struct ieee80211_hw *hw, int idx,
  1441. struct survey_info *survey)
  1442. {
  1443. struct ath_softc *sc = hw->priv;
  1444. struct ath_common *common = ath9k_hw_common(sc->sc_ah);
  1445. struct ieee80211_supported_band *sband;
  1446. struct ieee80211_channel *chan;
  1447. unsigned long flags;
  1448. int pos;
  1449. if (config_enabled(CONFIG_ATH9K_TX99))
  1450. return -EOPNOTSUPP;
  1451. spin_lock_irqsave(&common->cc_lock, flags);
  1452. if (idx == 0)
  1453. ath_update_survey_stats(sc);
  1454. sband = hw->wiphy->bands[IEEE80211_BAND_2GHZ];
  1455. if (sband && idx >= sband->n_channels) {
  1456. idx -= sband->n_channels;
  1457. sband = NULL;
  1458. }
  1459. if (!sband)
  1460. sband = hw->wiphy->bands[IEEE80211_BAND_5GHZ];
  1461. if (!sband || idx >= sband->n_channels) {
  1462. spin_unlock_irqrestore(&common->cc_lock, flags);
  1463. return -ENOENT;
  1464. }
  1465. chan = &sband->channels[idx];
  1466. pos = chan->hw_value;
  1467. memcpy(survey, &sc->survey[pos], sizeof(*survey));
  1468. survey->channel = chan;
  1469. spin_unlock_irqrestore(&common->cc_lock, flags);
  1470. return 0;
  1471. }
  1472. static void ath9k_set_coverage_class(struct ieee80211_hw *hw, u8 coverage_class)
  1473. {
  1474. struct ath_softc *sc = hw->priv;
  1475. struct ath_hw *ah = sc->sc_ah;
  1476. if (config_enabled(CONFIG_ATH9K_TX99))
  1477. return;
  1478. mutex_lock(&sc->mutex);
  1479. ah->coverage_class = coverage_class;
  1480. ath9k_ps_wakeup(sc);
  1481. ath9k_hw_init_global_settings(ah);
  1482. ath9k_ps_restore(sc);
  1483. mutex_unlock(&sc->mutex);
  1484. }
  1485. static void ath9k_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
  1486. {
  1487. struct ath_softc *sc = hw->priv;
  1488. struct ath_hw *ah = sc->sc_ah;
  1489. struct ath_common *common = ath9k_hw_common(ah);
  1490. int timeout = 200; /* ms */
  1491. int i, j;
  1492. bool drain_txq;
  1493. mutex_lock(&sc->mutex);
  1494. cancel_delayed_work_sync(&sc->tx_complete_work);
  1495. if (ah->ah_flags & AH_UNPLUGGED) {
  1496. ath_dbg(common, ANY, "Device has been unplugged!\n");
  1497. mutex_unlock(&sc->mutex);
  1498. return;
  1499. }
  1500. if (test_bit(SC_OP_INVALID, &sc->sc_flags)) {
  1501. ath_dbg(common, ANY, "Device not present\n");
  1502. mutex_unlock(&sc->mutex);
  1503. return;
  1504. }
  1505. for (j = 0; j < timeout; j++) {
  1506. bool npend = false;
  1507. if (j)
  1508. usleep_range(1000, 2000);
  1509. for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
  1510. if (!ATH_TXQ_SETUP(sc, i))
  1511. continue;
  1512. npend = ath9k_has_pending_frames(sc, &sc->tx.txq[i]);
  1513. if (npend)
  1514. break;
  1515. }
  1516. if (!npend)
  1517. break;
  1518. }
  1519. if (drop) {
  1520. ath9k_ps_wakeup(sc);
  1521. spin_lock_bh(&sc->sc_pcu_lock);
  1522. drain_txq = ath_drain_all_txq(sc);
  1523. spin_unlock_bh(&sc->sc_pcu_lock);
  1524. if (!drain_txq)
  1525. ath_reset(sc);
  1526. ath9k_ps_restore(sc);
  1527. ieee80211_wake_queues(hw);
  1528. }
  1529. ieee80211_queue_delayed_work(hw, &sc->tx_complete_work, 0);
  1530. mutex_unlock(&sc->mutex);
  1531. }
  1532. static bool ath9k_tx_frames_pending(struct ieee80211_hw *hw)
  1533. {
  1534. struct ath_softc *sc = hw->priv;
  1535. int i;
  1536. for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
  1537. if (!ATH_TXQ_SETUP(sc, i))
  1538. continue;
  1539. if (ath9k_has_pending_frames(sc, &sc->tx.txq[i]))
  1540. return true;
  1541. }
  1542. return false;
  1543. }
  1544. static int ath9k_tx_last_beacon(struct ieee80211_hw *hw)
  1545. {
  1546. struct ath_softc *sc = hw->priv;
  1547. struct ath_hw *ah = sc->sc_ah;
  1548. struct ieee80211_vif *vif;
  1549. struct ath_vif *avp;
  1550. struct ath_buf *bf;
  1551. struct ath_tx_status ts;
  1552. bool edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA);
  1553. int status;
  1554. vif = sc->beacon.bslot[0];
  1555. if (!vif)
  1556. return 0;
  1557. if (!vif->bss_conf.enable_beacon)
  1558. return 0;
  1559. avp = (void *)vif->drv_priv;
  1560. if (!sc->beacon.tx_processed && !edma) {
  1561. tasklet_disable(&sc->bcon_tasklet);
  1562. bf = avp->av_bcbuf;
  1563. if (!bf || !bf->bf_mpdu)
  1564. goto skip;
  1565. status = ath9k_hw_txprocdesc(ah, bf->bf_desc, &ts);
  1566. if (status == -EINPROGRESS)
  1567. goto skip;
  1568. sc->beacon.tx_processed = true;
  1569. sc->beacon.tx_last = !(ts.ts_status & ATH9K_TXERR_MASK);
  1570. skip:
  1571. tasklet_enable(&sc->bcon_tasklet);
  1572. }
  1573. return sc->beacon.tx_last;
  1574. }
  1575. static int ath9k_get_stats(struct ieee80211_hw *hw,
  1576. struct ieee80211_low_level_stats *stats)
  1577. {
  1578. struct ath_softc *sc = hw->priv;
  1579. struct ath_hw *ah = sc->sc_ah;
  1580. struct ath9k_mib_stats *mib_stats = &ah->ah_mibStats;
  1581. stats->dot11ACKFailureCount = mib_stats->ackrcv_bad;
  1582. stats->dot11RTSFailureCount = mib_stats->rts_bad;
  1583. stats->dot11FCSErrorCount = mib_stats->fcs_bad;
  1584. stats->dot11RTSSuccessCount = mib_stats->rts_good;
  1585. return 0;
  1586. }
  1587. static u32 fill_chainmask(u32 cap, u32 new)
  1588. {
  1589. u32 filled = 0;
  1590. int i;
  1591. for (i = 0; cap && new; i++, cap >>= 1) {
  1592. if (!(cap & BIT(0)))
  1593. continue;
  1594. if (new & BIT(0))
  1595. filled |= BIT(i);
  1596. new >>= 1;
  1597. }
  1598. return filled;
  1599. }
  1600. static bool validate_antenna_mask(struct ath_hw *ah, u32 val)
  1601. {
  1602. if (AR_SREV_9300_20_OR_LATER(ah))
  1603. return true;
  1604. switch (val & 0x7) {
  1605. case 0x1:
  1606. case 0x3:
  1607. case 0x7:
  1608. return true;
  1609. case 0x2:
  1610. return (ah->caps.rx_chainmask == 1);
  1611. default:
  1612. return false;
  1613. }
  1614. }
  1615. static int ath9k_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant)
  1616. {
  1617. struct ath_softc *sc = hw->priv;
  1618. struct ath_hw *ah = sc->sc_ah;
  1619. if (ah->caps.rx_chainmask != 1)
  1620. rx_ant |= tx_ant;
  1621. if (!validate_antenna_mask(ah, rx_ant) || !tx_ant)
  1622. return -EINVAL;
  1623. sc->ant_rx = rx_ant;
  1624. sc->ant_tx = tx_ant;
  1625. if (ah->caps.rx_chainmask == 1)
  1626. return 0;
  1627. /* AR9100 runs into calibration issues if not all rx chains are enabled */
  1628. if (AR_SREV_9100(ah))
  1629. ah->rxchainmask = 0x7;
  1630. else
  1631. ah->rxchainmask = fill_chainmask(ah->caps.rx_chainmask, rx_ant);
  1632. ah->txchainmask = fill_chainmask(ah->caps.tx_chainmask, tx_ant);
  1633. ath9k_reload_chainmask_settings(sc);
  1634. return 0;
  1635. }
  1636. static int ath9k_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant)
  1637. {
  1638. struct ath_softc *sc = hw->priv;
  1639. *tx_ant = sc->ant_tx;
  1640. *rx_ant = sc->ant_rx;
  1641. return 0;
  1642. }
  1643. #ifdef CONFIG_PM_SLEEP
  1644. static void ath9k_wow_map_triggers(struct ath_softc *sc,
  1645. struct cfg80211_wowlan *wowlan,
  1646. u32 *wow_triggers)
  1647. {
  1648. if (wowlan->disconnect)
  1649. *wow_triggers |= AH_WOW_LINK_CHANGE |
  1650. AH_WOW_BEACON_MISS;
  1651. if (wowlan->magic_pkt)
  1652. *wow_triggers |= AH_WOW_MAGIC_PATTERN_EN;
  1653. if (wowlan->n_patterns)
  1654. *wow_triggers |= AH_WOW_USER_PATTERN_EN;
  1655. sc->wow_enabled = *wow_triggers;
  1656. }
  1657. static void ath9k_wow_add_disassoc_deauth_pattern(struct ath_softc *sc)
  1658. {
  1659. struct ath_hw *ah = sc->sc_ah;
  1660. struct ath_common *common = ath9k_hw_common(ah);
  1661. int pattern_count = 0;
  1662. int i, byte_cnt;
  1663. u8 dis_deauth_pattern[MAX_PATTERN_SIZE];
  1664. u8 dis_deauth_mask[MAX_PATTERN_SIZE];
  1665. memset(dis_deauth_pattern, 0, MAX_PATTERN_SIZE);
  1666. memset(dis_deauth_mask, 0, MAX_PATTERN_SIZE);
  1667. /*
  1668. * Create Dissassociate / Deauthenticate packet filter
  1669. *
  1670. * 2 bytes 2 byte 6 bytes 6 bytes 6 bytes
  1671. * +--------------+----------+---------+--------+--------+----
  1672. * + Frame Control+ Duration + DA + SA + BSSID +
  1673. * +--------------+----------+---------+--------+--------+----
  1674. *
  1675. * The above is the management frame format for disassociate/
  1676. * deauthenticate pattern, from this we need to match the first byte
  1677. * of 'Frame Control' and DA, SA, and BSSID fields
  1678. * (skipping 2nd byte of FC and Duration feild.
  1679. *
  1680. * Disassociate pattern
  1681. * --------------------
  1682. * Frame control = 00 00 1010
  1683. * DA, SA, BSSID = x:x:x:x:x:x
  1684. * Pattern will be A0000000 | x:x:x:x:x:x | x:x:x:x:x:x
  1685. * | x:x:x:x:x:x -- 22 bytes
  1686. *
  1687. * Deauthenticate pattern
  1688. * ----------------------
  1689. * Frame control = 00 00 1100
  1690. * DA, SA, BSSID = x:x:x:x:x:x
  1691. * Pattern will be C0000000 | x:x:x:x:x:x | x:x:x:x:x:x
  1692. * | x:x:x:x:x:x -- 22 bytes
  1693. */
  1694. /* Create Disassociate Pattern first */
  1695. byte_cnt = 0;
  1696. /* Fill out the mask with all FF's */
  1697. for (i = 0; i < MAX_PATTERN_MASK_SIZE; i++)
  1698. dis_deauth_mask[i] = 0xff;
  1699. /* copy the first byte of frame control field */
  1700. dis_deauth_pattern[byte_cnt] = 0xa0;
  1701. byte_cnt++;
  1702. /* skip 2nd byte of frame control and Duration field */
  1703. byte_cnt += 3;
  1704. /*
  1705. * need not match the destination mac address, it can be a broadcast
  1706. * mac address or an unicast to this station
  1707. */
  1708. byte_cnt += 6;
  1709. /* copy the source mac address */
  1710. memcpy((dis_deauth_pattern + byte_cnt), common->curbssid, ETH_ALEN);
  1711. byte_cnt += 6;
  1712. /* copy the bssid, its same as the source mac address */
  1713. memcpy((dis_deauth_pattern + byte_cnt), common->curbssid, ETH_ALEN);
  1714. /* Create Disassociate pattern mask */
  1715. dis_deauth_mask[0] = 0xfe;
  1716. dis_deauth_mask[1] = 0x03;
  1717. dis_deauth_mask[2] = 0xc0;
  1718. ath_dbg(common, WOW, "Adding disassoc/deauth patterns for WoW\n");
  1719. ath9k_hw_wow_apply_pattern(ah, dis_deauth_pattern, dis_deauth_mask,
  1720. pattern_count, byte_cnt);
  1721. pattern_count++;
  1722. /*
  1723. * for de-authenticate pattern, only the first byte of the frame
  1724. * control field gets changed from 0xA0 to 0xC0
  1725. */
  1726. dis_deauth_pattern[0] = 0xC0;
  1727. ath9k_hw_wow_apply_pattern(ah, dis_deauth_pattern, dis_deauth_mask,
  1728. pattern_count, byte_cnt);
  1729. }
  1730. static void ath9k_wow_add_pattern(struct ath_softc *sc,
  1731. struct cfg80211_wowlan *wowlan)
  1732. {
  1733. struct ath_hw *ah = sc->sc_ah;
  1734. struct ath9k_wow_pattern *wow_pattern = NULL;
  1735. struct cfg80211_pkt_pattern *patterns = wowlan->patterns;
  1736. int mask_len;
  1737. s8 i = 0;
  1738. if (!wowlan->n_patterns)
  1739. return;
  1740. /*
  1741. * Add the new user configured patterns
  1742. */
  1743. for (i = 0; i < wowlan->n_patterns; i++) {
  1744. wow_pattern = kzalloc(sizeof(*wow_pattern), GFP_KERNEL);
  1745. if (!wow_pattern)
  1746. return;
  1747. /*
  1748. * TODO: convert the generic user space pattern to
  1749. * appropriate chip specific/802.11 pattern.
  1750. */
  1751. mask_len = DIV_ROUND_UP(wowlan->patterns[i].pattern_len, 8);
  1752. memset(wow_pattern->pattern_bytes, 0, MAX_PATTERN_SIZE);
  1753. memset(wow_pattern->mask_bytes, 0, MAX_PATTERN_SIZE);
  1754. memcpy(wow_pattern->pattern_bytes, patterns[i].pattern,
  1755. patterns[i].pattern_len);
  1756. memcpy(wow_pattern->mask_bytes, patterns[i].mask, mask_len);
  1757. wow_pattern->pattern_len = patterns[i].pattern_len;
  1758. /*
  1759. * just need to take care of deauth and disssoc pattern,
  1760. * make sure we don't overwrite them.
  1761. */
  1762. ath9k_hw_wow_apply_pattern(ah, wow_pattern->pattern_bytes,
  1763. wow_pattern->mask_bytes,
  1764. i + 2,
  1765. wow_pattern->pattern_len);
  1766. kfree(wow_pattern);
  1767. }
  1768. }
  1769. static int ath9k_suspend(struct ieee80211_hw *hw,
  1770. struct cfg80211_wowlan *wowlan)
  1771. {
  1772. struct ath_softc *sc = hw->priv;
  1773. struct ath_hw *ah = sc->sc_ah;
  1774. struct ath_common *common = ath9k_hw_common(ah);
  1775. u32 wow_triggers_enabled = 0;
  1776. int ret = 0;
  1777. mutex_lock(&sc->mutex);
  1778. ath_cancel_work(sc);
  1779. ath_stop_ani(sc);
  1780. del_timer_sync(&sc->rx_poll_timer);
  1781. if (test_bit(SC_OP_INVALID, &sc->sc_flags)) {
  1782. ath_dbg(common, ANY, "Device not present\n");
  1783. ret = -EINVAL;
  1784. goto fail_wow;
  1785. }
  1786. if (WARN_ON(!wowlan)) {
  1787. ath_dbg(common, WOW, "None of the WoW triggers enabled\n");
  1788. ret = -EINVAL;
  1789. goto fail_wow;
  1790. }
  1791. if (!device_can_wakeup(sc->dev)) {
  1792. ath_dbg(common, WOW, "device_can_wakeup failed, WoW is not enabled\n");
  1793. ret = 1;
  1794. goto fail_wow;
  1795. }
  1796. /*
  1797. * none of the sta vifs are associated
  1798. * and we are not currently handling multivif
  1799. * cases, for instance we have to seperately
  1800. * configure 'keep alive frame' for each
  1801. * STA.
  1802. */
  1803. if (!test_bit(SC_OP_PRIM_STA_VIF, &sc->sc_flags)) {
  1804. ath_dbg(common, WOW, "None of the STA vifs are associated\n");
  1805. ret = 1;
  1806. goto fail_wow;
  1807. }
  1808. if (sc->nvifs > 1) {
  1809. ath_dbg(common, WOW, "WoW for multivif is not yet supported\n");
  1810. ret = 1;
  1811. goto fail_wow;
  1812. }
  1813. ath9k_wow_map_triggers(sc, wowlan, &wow_triggers_enabled);
  1814. ath_dbg(common, WOW, "WoW triggers enabled 0x%x\n",
  1815. wow_triggers_enabled);
  1816. ath9k_ps_wakeup(sc);
  1817. ath9k_stop_btcoex(sc);
  1818. /*
  1819. * Enable wake up on recieving disassoc/deauth
  1820. * frame by default.
  1821. */
  1822. ath9k_wow_add_disassoc_deauth_pattern(sc);
  1823. if (wow_triggers_enabled & AH_WOW_USER_PATTERN_EN)
  1824. ath9k_wow_add_pattern(sc, wowlan);
  1825. spin_lock_bh(&sc->sc_pcu_lock);
  1826. /*
  1827. * To avoid false wake, we enable beacon miss interrupt only
  1828. * when we go to sleep. We save the current interrupt mask
  1829. * so we can restore it after the system wakes up
  1830. */
  1831. sc->wow_intr_before_sleep = ah->imask;
  1832. ah->imask &= ~ATH9K_INT_GLOBAL;
  1833. ath9k_hw_disable_interrupts(ah);
  1834. ah->imask = ATH9K_INT_BMISS | ATH9K_INT_GLOBAL;
  1835. ath9k_hw_set_interrupts(ah);
  1836. ath9k_hw_enable_interrupts(ah);
  1837. spin_unlock_bh(&sc->sc_pcu_lock);
  1838. /*
  1839. * we can now sync irq and kill any running tasklets, since we already
  1840. * disabled interrupts and not holding a spin lock
  1841. */
  1842. synchronize_irq(sc->irq);
  1843. tasklet_kill(&sc->intr_tq);
  1844. ath9k_hw_wow_enable(ah, wow_triggers_enabled);
  1845. ath9k_ps_restore(sc);
  1846. ath_dbg(common, ANY, "WoW enabled in ath9k\n");
  1847. atomic_inc(&sc->wow_sleep_proc_intr);
  1848. fail_wow:
  1849. mutex_unlock(&sc->mutex);
  1850. return ret;
  1851. }
  1852. static int ath9k_resume(struct ieee80211_hw *hw)
  1853. {
  1854. struct ath_softc *sc = hw->priv;
  1855. struct ath_hw *ah = sc->sc_ah;
  1856. struct ath_common *common = ath9k_hw_common(ah);
  1857. u32 wow_status;
  1858. mutex_lock(&sc->mutex);
  1859. ath9k_ps_wakeup(sc);
  1860. spin_lock_bh(&sc->sc_pcu_lock);
  1861. ath9k_hw_disable_interrupts(ah);
  1862. ah->imask = sc->wow_intr_before_sleep;
  1863. ath9k_hw_set_interrupts(ah);
  1864. ath9k_hw_enable_interrupts(ah);
  1865. spin_unlock_bh(&sc->sc_pcu_lock);
  1866. wow_status = ath9k_hw_wow_wakeup(ah);
  1867. if (atomic_read(&sc->wow_got_bmiss_intr) == 0) {
  1868. /*
  1869. * some devices may not pick beacon miss
  1870. * as the reason they woke up so we add
  1871. * that here for that shortcoming.
  1872. */
  1873. wow_status |= AH_WOW_BEACON_MISS;
  1874. atomic_dec(&sc->wow_got_bmiss_intr);
  1875. ath_dbg(common, ANY, "Beacon miss interrupt picked up during WoW sleep\n");
  1876. }
  1877. atomic_dec(&sc->wow_sleep_proc_intr);
  1878. if (wow_status) {
  1879. ath_dbg(common, ANY, "Waking up due to WoW triggers %s with WoW status = %x\n",
  1880. ath9k_hw_wow_event_to_string(wow_status), wow_status);
  1881. }
  1882. ath_restart_work(sc);
  1883. ath9k_start_btcoex(sc);
  1884. ath9k_ps_restore(sc);
  1885. mutex_unlock(&sc->mutex);
  1886. return 0;
  1887. }
  1888. static void ath9k_set_wakeup(struct ieee80211_hw *hw, bool enabled)
  1889. {
  1890. struct ath_softc *sc = hw->priv;
  1891. mutex_lock(&sc->mutex);
  1892. device_init_wakeup(sc->dev, 1);
  1893. device_set_wakeup_enable(sc->dev, enabled);
  1894. mutex_unlock(&sc->mutex);
  1895. }
  1896. #endif
  1897. static void ath9k_sw_scan_start(struct ieee80211_hw *hw)
  1898. {
  1899. struct ath_softc *sc = hw->priv;
  1900. set_bit(SC_OP_SCANNING, &sc->sc_flags);
  1901. }
  1902. static void ath9k_sw_scan_complete(struct ieee80211_hw *hw)
  1903. {
  1904. struct ath_softc *sc = hw->priv;
  1905. clear_bit(SC_OP_SCANNING, &sc->sc_flags);
  1906. }
  1907. static void ath9k_channel_switch_beacon(struct ieee80211_hw *hw,
  1908. struct ieee80211_vif *vif,
  1909. struct cfg80211_chan_def *chandef)
  1910. {
  1911. struct ath_softc *sc = hw->priv;
  1912. /* mac80211 does not support CSA in multi-if cases (yet) */
  1913. if (WARN_ON(sc->csa_vif))
  1914. return;
  1915. sc->csa_vif = vif;
  1916. }
  1917. static void ath9k_tx99_stop(struct ath_softc *sc)
  1918. {
  1919. struct ath_hw *ah = sc->sc_ah;
  1920. struct ath_common *common = ath9k_hw_common(ah);
  1921. ath_drain_all_txq(sc);
  1922. ath_startrecv(sc);
  1923. ath9k_hw_set_interrupts(ah);
  1924. ath9k_hw_enable_interrupts(ah);
  1925. ieee80211_wake_queues(sc->hw);
  1926. kfree_skb(sc->tx99_skb);
  1927. sc->tx99_skb = NULL;
  1928. sc->tx99_state = false;
  1929. ath9k_hw_tx99_stop(sc->sc_ah);
  1930. ath_dbg(common, XMIT, "TX99 stopped\n");
  1931. }
  1932. static struct sk_buff *ath9k_build_tx99_skb(struct ath_softc *sc)
  1933. {
  1934. static u8 PN9Data[] = {0xff, 0x87, 0xb8, 0x59, 0xb7, 0xa1, 0xcc, 0x24,
  1935. 0x57, 0x5e, 0x4b, 0x9c, 0x0e, 0xe9, 0xea, 0x50,
  1936. 0x2a, 0xbe, 0xb4, 0x1b, 0xb6, 0xb0, 0x5d, 0xf1,
  1937. 0xe6, 0x9a, 0xe3, 0x45, 0xfd, 0x2c, 0x53, 0x18,
  1938. 0x0c, 0xca, 0xc9, 0xfb, 0x49, 0x37, 0xe5, 0xa8,
  1939. 0x51, 0x3b, 0x2f, 0x61, 0xaa, 0x72, 0x18, 0x84,
  1940. 0x02, 0x23, 0x23, 0xab, 0x63, 0x89, 0x51, 0xb3,
  1941. 0xe7, 0x8b, 0x72, 0x90, 0x4c, 0xe8, 0xfb, 0xc0};
  1942. u32 len = 1200;
  1943. struct ieee80211_hw *hw = sc->hw;
  1944. struct ieee80211_hdr *hdr;
  1945. struct ieee80211_tx_info *tx_info;
  1946. struct sk_buff *skb;
  1947. skb = alloc_skb(len, GFP_KERNEL);
  1948. if (!skb)
  1949. return NULL;
  1950. skb_put(skb, len);
  1951. memset(skb->data, 0, len);
  1952. hdr = (struct ieee80211_hdr *)skb->data;
  1953. hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA);
  1954. hdr->duration_id = 0;
  1955. memcpy(hdr->addr1, hw->wiphy->perm_addr, ETH_ALEN);
  1956. memcpy(hdr->addr2, hw->wiphy->perm_addr, ETH_ALEN);
  1957. memcpy(hdr->addr3, hw->wiphy->perm_addr, ETH_ALEN);
  1958. hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no);
  1959. tx_info = IEEE80211_SKB_CB(skb);
  1960. memset(tx_info, 0, sizeof(*tx_info));
  1961. tx_info->band = hw->conf.chandef.chan->band;
  1962. tx_info->flags = IEEE80211_TX_CTL_NO_ACK;
  1963. tx_info->control.vif = sc->tx99_vif;
  1964. memcpy(skb->data + sizeof(*hdr), PN9Data, sizeof(PN9Data));
  1965. return skb;
  1966. }
  1967. void ath9k_tx99_deinit(struct ath_softc *sc)
  1968. {
  1969. ath_reset(sc);
  1970. ath9k_ps_wakeup(sc);
  1971. ath9k_tx99_stop(sc);
  1972. ath9k_ps_restore(sc);
  1973. }
  1974. int ath9k_tx99_init(struct ath_softc *sc)
  1975. {
  1976. struct ieee80211_hw *hw = sc->hw;
  1977. struct ath_hw *ah = sc->sc_ah;
  1978. struct ath_common *common = ath9k_hw_common(ah);
  1979. struct ath_tx_control txctl;
  1980. int r;
  1981. if (sc->sc_flags & SC_OP_INVALID) {
  1982. ath_err(common,
  1983. "driver is in invalid state unable to use TX99");
  1984. return -EINVAL;
  1985. }
  1986. sc->tx99_skb = ath9k_build_tx99_skb(sc);
  1987. if (!sc->tx99_skb)
  1988. return -ENOMEM;
  1989. memset(&txctl, 0, sizeof(txctl));
  1990. txctl.txq = sc->tx.txq_map[IEEE80211_AC_VO];
  1991. ath_reset(sc);
  1992. ath9k_ps_wakeup(sc);
  1993. ath9k_hw_disable_interrupts(ah);
  1994. atomic_set(&ah->intr_ref_cnt, -1);
  1995. ath_drain_all_txq(sc);
  1996. ath_stoprecv(sc);
  1997. sc->tx99_state = true;
  1998. ieee80211_stop_queues(hw);
  1999. if (sc->tx99_power == MAX_RATE_POWER + 1)
  2000. sc->tx99_power = MAX_RATE_POWER;
  2001. ath9k_hw_tx99_set_txpower(ah, sc->tx99_power);
  2002. r = ath9k_tx99_send(sc, sc->tx99_skb, &txctl);
  2003. if (r) {
  2004. ath_dbg(common, XMIT, "Failed to xmit TX99 skb\n");
  2005. return r;
  2006. }
  2007. ath_dbg(common, XMIT, "TX99 xmit started using %d ( %ddBm)\n",
  2008. sc->tx99_power,
  2009. sc->tx99_power / 2);
  2010. /* We leave the harware awake as it will be chugging on */
  2011. return 0;
  2012. }
  2013. struct ieee80211_ops ath9k_ops = {
  2014. .tx = ath9k_tx,
  2015. .start = ath9k_start,
  2016. .stop = ath9k_stop,
  2017. .add_interface = ath9k_add_interface,
  2018. .change_interface = ath9k_change_interface,
  2019. .remove_interface = ath9k_remove_interface,
  2020. .config = ath9k_config,
  2021. .configure_filter = ath9k_configure_filter,
  2022. .sta_add = ath9k_sta_add,
  2023. .sta_remove = ath9k_sta_remove,
  2024. .sta_notify = ath9k_sta_notify,
  2025. .conf_tx = ath9k_conf_tx,
  2026. .bss_info_changed = ath9k_bss_info_changed,
  2027. .set_key = ath9k_set_key,
  2028. .get_tsf = ath9k_get_tsf,
  2029. .set_tsf = ath9k_set_tsf,
  2030. .reset_tsf = ath9k_reset_tsf,
  2031. .ampdu_action = ath9k_ampdu_action,
  2032. .get_survey = ath9k_get_survey,
  2033. .rfkill_poll = ath9k_rfkill_poll_state,
  2034. .set_coverage_class = ath9k_set_coverage_class,
  2035. .flush = ath9k_flush,
  2036. .tx_frames_pending = ath9k_tx_frames_pending,
  2037. .tx_last_beacon = ath9k_tx_last_beacon,
  2038. .release_buffered_frames = ath9k_release_buffered_frames,
  2039. .get_stats = ath9k_get_stats,
  2040. .set_antenna = ath9k_set_antenna,
  2041. .get_antenna = ath9k_get_antenna,
  2042. #ifdef CONFIG_PM_SLEEP
  2043. .suspend = ath9k_suspend,
  2044. .resume = ath9k_resume,
  2045. .set_wakeup = ath9k_set_wakeup,
  2046. #endif
  2047. #ifdef CONFIG_ATH9K_DEBUGFS
  2048. .get_et_sset_count = ath9k_get_et_sset_count,
  2049. .get_et_stats = ath9k_get_et_stats,
  2050. .get_et_strings = ath9k_get_et_strings,
  2051. #endif
  2052. #if defined(CONFIG_MAC80211_DEBUGFS) && defined(CONFIG_ATH9K_DEBUGFS)
  2053. .sta_add_debugfs = ath9k_sta_add_debugfs,
  2054. #endif
  2055. .sw_scan_start = ath9k_sw_scan_start,
  2056. .sw_scan_complete = ath9k_sw_scan_complete,
  2057. .channel_switch_beacon = ath9k_channel_switch_beacon,
  2058. };