core.c 44 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763
  1. /*
  2. * Copyright (c) 2008, Atheros Communications Inc.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for any
  5. * purpose with or without fee is hereby granted, provided that the above
  6. * copyright notice and this permission notice appear in all copies.
  7. *
  8. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  9. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  10. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  11. * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  12. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  13. * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  14. * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  15. */
  16. /* Implementation of the main "ATH" layer. */
  17. #include "core.h"
  18. #include "regd.h"
  19. static int ath_outdoor; /* enable outdoor use */
  20. static u32 ath_chainmask_sel_up_rssi_thres =
  21. ATH_CHAINMASK_SEL_UP_RSSI_THRES;
  22. static u32 ath_chainmask_sel_down_rssi_thres =
  23. ATH_CHAINMASK_SEL_DOWN_RSSI_THRES;
  24. static u32 ath_chainmask_sel_period =
  25. ATH_CHAINMASK_SEL_TIMEOUT;
  26. /* return bus cachesize in 4B word units */
  27. static void bus_read_cachesize(struct ath_softc *sc, int *csz)
  28. {
  29. u8 u8tmp;
  30. pci_read_config_byte(sc->pdev, PCI_CACHE_LINE_SIZE, (u8 *)&u8tmp);
  31. *csz = (int)u8tmp;
  32. /*
  33. * This check was put in to avoid "unplesant" consequences if
  34. * the bootrom has not fully initialized all PCI devices.
  35. * Sometimes the cache line size register is not set
  36. */
  37. if (*csz == 0)
  38. *csz = DEFAULT_CACHELINE >> 2; /* Use the default size */
  39. }
  40. /*
  41. * Set current operating mode
  42. *
  43. * This function initializes and fills the rate table in the ATH object based
  44. * on the operating mode.
  45. */
  46. static void ath_setcurmode(struct ath_softc *sc, enum wireless_mode mode)
  47. {
  48. const struct ath9k_rate_table *rt;
  49. int i;
  50. memset(sc->sc_rixmap, 0xff, sizeof(sc->sc_rixmap));
  51. rt = ath9k_hw_getratetable(sc->sc_ah, mode);
  52. BUG_ON(!rt);
  53. for (i = 0; i < rt->rateCount; i++)
  54. sc->sc_rixmap[rt->info[i].rateCode] = (u8) i;
  55. memzero(sc->sc_hwmap, sizeof(sc->sc_hwmap));
  56. for (i = 0; i < 256; i++) {
  57. u8 ix = rt->rateCodeToIndex[i];
  58. if (ix == 0xff)
  59. continue;
  60. sc->sc_hwmap[i].ieeerate =
  61. rt->info[ix].dot11Rate & IEEE80211_RATE_VAL;
  62. sc->sc_hwmap[i].rateKbps = rt->info[ix].rateKbps;
  63. if (rt->info[ix].shortPreamble ||
  64. rt->info[ix].phy == PHY_OFDM) {
  65. /* XXX: Handle this */
  66. }
  67. /* NB: this uses the last entry if the rate isn't found */
  68. /* XXX beware of overlow */
  69. }
  70. sc->sc_currates = rt;
  71. sc->sc_curmode = mode;
  72. /*
  73. * All protection frames are transmited at 2Mb/s for
  74. * 11g, otherwise at 1Mb/s.
  75. * XXX select protection rate index from rate table.
  76. */
  77. sc->sc_protrix = (mode == ATH9K_MODE_11G ? 1 : 0);
  78. }
  79. /*
  80. * Set up rate table (legacy rates)
  81. */
  82. static void ath_setup_rates(struct ath_softc *sc, enum ieee80211_band band)
  83. {
  84. struct ath_hal *ah = sc->sc_ah;
  85. const struct ath9k_rate_table *rt = NULL;
  86. struct ieee80211_supported_band *sband;
  87. struct ieee80211_rate *rate;
  88. int i, maxrates;
  89. switch (band) {
  90. case IEEE80211_BAND_2GHZ:
  91. rt = ath9k_hw_getratetable(ah, ATH9K_MODE_11G);
  92. break;
  93. case IEEE80211_BAND_5GHZ:
  94. rt = ath9k_hw_getratetable(ah, ATH9K_MODE_11A);
  95. break;
  96. default:
  97. break;
  98. }
  99. if (rt == NULL)
  100. return;
  101. sband = &sc->sbands[band];
  102. rate = sc->rates[band];
  103. if (rt->rateCount > ATH_RATE_MAX)
  104. maxrates = ATH_RATE_MAX;
  105. else
  106. maxrates = rt->rateCount;
  107. for (i = 0; i < maxrates; i++) {
  108. rate[i].bitrate = rt->info[i].rateKbps / 100;
  109. rate[i].hw_value = rt->info[i].rateCode;
  110. sband->n_bitrates++;
  111. DPRINTF(sc, ATH_DBG_CONFIG,
  112. "%s: Rate: %2dMbps, ratecode: %2d\n",
  113. __func__,
  114. rate[i].bitrate / 10,
  115. rate[i].hw_value);
  116. }
  117. }
  118. /*
  119. * Set up channel list
  120. */
  121. static int ath_setup_channels(struct ath_softc *sc)
  122. {
  123. struct ath_hal *ah = sc->sc_ah;
  124. int nchan, i, a = 0, b = 0;
  125. u8 regclassids[ATH_REGCLASSIDS_MAX];
  126. u32 nregclass = 0;
  127. struct ieee80211_supported_band *band_2ghz;
  128. struct ieee80211_supported_band *band_5ghz;
  129. struct ieee80211_channel *chan_2ghz;
  130. struct ieee80211_channel *chan_5ghz;
  131. struct ath9k_channel *c;
  132. /* Fill in ah->ah_channels */
  133. if (!ath9k_regd_init_channels(ah,
  134. ATH_CHAN_MAX,
  135. (u32 *)&nchan,
  136. regclassids,
  137. ATH_REGCLASSIDS_MAX,
  138. &nregclass,
  139. CTRY_DEFAULT,
  140. false,
  141. 1)) {
  142. u32 rd = ah->ah_currentRD;
  143. DPRINTF(sc, ATH_DBG_FATAL,
  144. "%s: unable to collect channel list; "
  145. "regdomain likely %u country code %u\n",
  146. __func__, rd, CTRY_DEFAULT);
  147. return -EINVAL;
  148. }
  149. band_2ghz = &sc->sbands[IEEE80211_BAND_2GHZ];
  150. band_5ghz = &sc->sbands[IEEE80211_BAND_5GHZ];
  151. chan_2ghz = sc->channels[IEEE80211_BAND_2GHZ];
  152. chan_5ghz = sc->channels[IEEE80211_BAND_5GHZ];
  153. for (i = 0; i < nchan; i++) {
  154. c = &ah->ah_channels[i];
  155. if (IS_CHAN_2GHZ(c)) {
  156. chan_2ghz[a].band = IEEE80211_BAND_2GHZ;
  157. chan_2ghz[a].center_freq = c->channel;
  158. chan_2ghz[a].max_power = c->maxTxPower;
  159. if (c->privFlags & CHANNEL_DISALLOW_ADHOC)
  160. chan_2ghz[a].flags |=
  161. IEEE80211_CHAN_NO_IBSS;
  162. if (c->channelFlags & CHANNEL_PASSIVE)
  163. chan_2ghz[a].flags |=
  164. IEEE80211_CHAN_PASSIVE_SCAN;
  165. band_2ghz->n_channels = ++a;
  166. DPRINTF(sc, ATH_DBG_CONFIG,
  167. "%s: 2MHz channel: %d, "
  168. "channelFlags: 0x%x\n",
  169. __func__,
  170. c->channel,
  171. c->channelFlags);
  172. } else if (IS_CHAN_5GHZ(c)) {
  173. chan_5ghz[b].band = IEEE80211_BAND_5GHZ;
  174. chan_5ghz[b].center_freq = c->channel;
  175. chan_5ghz[b].max_power = c->maxTxPower;
  176. if (c->privFlags & CHANNEL_DISALLOW_ADHOC)
  177. chan_5ghz[b].flags |=
  178. IEEE80211_CHAN_NO_IBSS;
  179. if (c->channelFlags & CHANNEL_PASSIVE)
  180. chan_5ghz[b].flags |=
  181. IEEE80211_CHAN_PASSIVE_SCAN;
  182. band_5ghz->n_channels = ++b;
  183. DPRINTF(sc, ATH_DBG_CONFIG,
  184. "%s: 5MHz channel: %d, "
  185. "channelFlags: 0x%x\n",
  186. __func__,
  187. c->channel,
  188. c->channelFlags);
  189. }
  190. }
  191. return 0;
  192. }
  193. /*
  194. * Determine mode from channel flags
  195. *
  196. * This routine will provide the enumerated WIRELESSS_MODE value based
  197. * on the settings of the channel flags. If no valid set of flags
  198. * exist, the lowest mode (11b) is selected.
  199. */
  200. static enum wireless_mode ath_chan2mode(struct ath9k_channel *chan)
  201. {
  202. if (chan->chanmode == CHANNEL_A)
  203. return ATH9K_MODE_11A;
  204. else if (chan->chanmode == CHANNEL_G)
  205. return ATH9K_MODE_11G;
  206. else if (chan->chanmode == CHANNEL_B)
  207. return ATH9K_MODE_11B;
  208. else if (chan->chanmode == CHANNEL_A_HT20)
  209. return ATH9K_MODE_11NA_HT20;
  210. else if (chan->chanmode == CHANNEL_G_HT20)
  211. return ATH9K_MODE_11NG_HT20;
  212. else if (chan->chanmode == CHANNEL_A_HT40PLUS)
  213. return ATH9K_MODE_11NA_HT40PLUS;
  214. else if (chan->chanmode == CHANNEL_A_HT40MINUS)
  215. return ATH9K_MODE_11NA_HT40MINUS;
  216. else if (chan->chanmode == CHANNEL_G_HT40PLUS)
  217. return ATH9K_MODE_11NG_HT40PLUS;
  218. else if (chan->chanmode == CHANNEL_G_HT40MINUS)
  219. return ATH9K_MODE_11NG_HT40MINUS;
  220. WARN_ON(1); /* should not get here */
  221. return ATH9K_MODE_11B;
  222. }
  223. /*
  224. * Stop the device, grabbing the top-level lock to protect
  225. * against concurrent entry through ath_init (which can happen
  226. * if another thread does a system call and the thread doing the
  227. * stop is preempted).
  228. */
  229. static int ath_stop(struct ath_softc *sc)
  230. {
  231. struct ath_hal *ah = sc->sc_ah;
  232. DPRINTF(sc, ATH_DBG_CONFIG, "%s: invalid %ld\n",
  233. __func__, sc->sc_flags & SC_OP_INVALID);
  234. /*
  235. * Shutdown the hardware and driver:
  236. * stop output from above
  237. * turn off timers
  238. * disable interrupts
  239. * clear transmit machinery
  240. * clear receive machinery
  241. * turn off the radio
  242. * reclaim beacon resources
  243. *
  244. * Note that some of this work is not possible if the
  245. * hardware is gone (invalid).
  246. */
  247. if (!(sc->sc_flags & SC_OP_INVALID))
  248. ath9k_hw_set_interrupts(ah, 0);
  249. ath_draintxq(sc, false);
  250. if (!(sc->sc_flags & SC_OP_INVALID)) {
  251. ath_stoprecv(sc);
  252. ath9k_hw_phy_disable(ah);
  253. } else
  254. sc->sc_rxlink = NULL;
  255. return 0;
  256. }
  257. /*
  258. * Set the current channel
  259. *
  260. * Set/change channels. If the channel is really being changed, it's done
  261. * by reseting the chip. To accomplish this we must first cleanup any pending
  262. * DMA, then restart stuff after a la ath_init.
  263. */
  264. int ath_set_channel(struct ath_softc *sc, struct ath9k_channel *hchan)
  265. {
  266. struct ath_hal *ah = sc->sc_ah;
  267. bool fastcc = true, stopped;
  268. if (sc->sc_flags & SC_OP_INVALID) /* the device is invalid or removed */
  269. return -EIO;
  270. DPRINTF(sc, ATH_DBG_CONFIG,
  271. "%s: %u (%u MHz) -> %u (%u MHz), cflags:%x\n",
  272. __func__,
  273. ath9k_hw_mhz2ieee(ah, sc->sc_ah->ah_curchan->channel,
  274. sc->sc_ah->ah_curchan->channelFlags),
  275. sc->sc_ah->ah_curchan->channel,
  276. ath9k_hw_mhz2ieee(ah, hchan->channel, hchan->channelFlags),
  277. hchan->channel, hchan->channelFlags);
  278. if (hchan->channel != sc->sc_ah->ah_curchan->channel ||
  279. hchan->channelFlags != sc->sc_ah->ah_curchan->channelFlags ||
  280. (sc->sc_flags & SC_OP_CHAINMASK_UPDATE) ||
  281. (sc->sc_flags & SC_OP_FULL_RESET)) {
  282. int status;
  283. /*
  284. * This is only performed if the channel settings have
  285. * actually changed.
  286. *
  287. * To switch channels clear any pending DMA operations;
  288. * wait long enough for the RX fifo to drain, reset the
  289. * hardware at the new frequency, and then re-enable
  290. * the relevant bits of the h/w.
  291. */
  292. ath9k_hw_set_interrupts(ah, 0); /* disable interrupts */
  293. ath_draintxq(sc, false); /* clear pending tx frames */
  294. stopped = ath_stoprecv(sc); /* turn off frame recv */
  295. /* XXX: do not flush receive queue here. We don't want
  296. * to flush data frames already in queue because of
  297. * changing channel. */
  298. if (!stopped || (sc->sc_flags & SC_OP_FULL_RESET))
  299. fastcc = false;
  300. spin_lock_bh(&sc->sc_resetlock);
  301. if (!ath9k_hw_reset(ah, hchan,
  302. sc->sc_ht_info.tx_chan_width,
  303. sc->sc_tx_chainmask,
  304. sc->sc_rx_chainmask,
  305. sc->sc_ht_extprotspacing,
  306. fastcc, &status)) {
  307. DPRINTF(sc, ATH_DBG_FATAL,
  308. "%s: unable to reset channel %u (%uMhz) "
  309. "flags 0x%x hal status %u\n", __func__,
  310. ath9k_hw_mhz2ieee(ah, hchan->channel,
  311. hchan->channelFlags),
  312. hchan->channel, hchan->channelFlags, status);
  313. spin_unlock_bh(&sc->sc_resetlock);
  314. return -EIO;
  315. }
  316. spin_unlock_bh(&sc->sc_resetlock);
  317. sc->sc_flags &= ~SC_OP_CHAINMASK_UPDATE;
  318. sc->sc_flags &= ~SC_OP_FULL_RESET;
  319. /* Re-enable rx framework */
  320. if (ath_startrecv(sc) != 0) {
  321. DPRINTF(sc, ATH_DBG_FATAL,
  322. "%s: unable to restart recv logic\n", __func__);
  323. return -EIO;
  324. }
  325. /*
  326. * Change channels and update the h/w rate map
  327. * if we're switching; e.g. 11a to 11b/g.
  328. */
  329. ath_setcurmode(sc, ath_chan2mode(hchan));
  330. ath_update_txpow(sc); /* update tx power state */
  331. /*
  332. * Re-enable interrupts.
  333. */
  334. ath9k_hw_set_interrupts(ah, sc->sc_imask);
  335. }
  336. return 0;
  337. }
  338. /**********************/
  339. /* Chainmask Handling */
  340. /**********************/
  341. static void ath_chainmask_sel_timertimeout(unsigned long data)
  342. {
  343. struct ath_chainmask_sel *cm = (struct ath_chainmask_sel *)data;
  344. cm->switch_allowed = 1;
  345. }
  346. /* Start chainmask select timer */
  347. static void ath_chainmask_sel_timerstart(struct ath_chainmask_sel *cm)
  348. {
  349. cm->switch_allowed = 0;
  350. mod_timer(&cm->timer, ath_chainmask_sel_period);
  351. }
  352. /* Stop chainmask select timer */
  353. static void ath_chainmask_sel_timerstop(struct ath_chainmask_sel *cm)
  354. {
  355. cm->switch_allowed = 0;
  356. del_timer_sync(&cm->timer);
  357. }
  358. static void ath_chainmask_sel_init(struct ath_softc *sc, struct ath_node *an)
  359. {
  360. struct ath_chainmask_sel *cm = &an->an_chainmask_sel;
  361. memzero(cm, sizeof(struct ath_chainmask_sel));
  362. cm->cur_tx_mask = sc->sc_tx_chainmask;
  363. cm->cur_rx_mask = sc->sc_rx_chainmask;
  364. cm->tx_avgrssi = ATH_RSSI_DUMMY_MARKER;
  365. setup_timer(&cm->timer,
  366. ath_chainmask_sel_timertimeout, (unsigned long) cm);
  367. }
  368. int ath_chainmask_sel_logic(struct ath_softc *sc, struct ath_node *an)
  369. {
  370. struct ath_chainmask_sel *cm = &an->an_chainmask_sel;
  371. /*
  372. * Disable auto-swtiching in one of the following if conditions.
  373. * sc_chainmask_auto_sel is used for internal global auto-switching
  374. * enabled/disabled setting
  375. */
  376. if (sc->sc_ah->ah_caps.tx_chainmask != ATH_CHAINMASK_SEL_3X3) {
  377. cm->cur_tx_mask = sc->sc_tx_chainmask;
  378. return cm->cur_tx_mask;
  379. }
  380. if (cm->tx_avgrssi == ATH_RSSI_DUMMY_MARKER)
  381. return cm->cur_tx_mask;
  382. if (cm->switch_allowed) {
  383. /* Switch down from tx 3 to tx 2. */
  384. if (cm->cur_tx_mask == ATH_CHAINMASK_SEL_3X3 &&
  385. ATH_RSSI_OUT(cm->tx_avgrssi) >=
  386. ath_chainmask_sel_down_rssi_thres) {
  387. cm->cur_tx_mask = sc->sc_tx_chainmask;
  388. /* Don't let another switch happen until
  389. * this timer expires */
  390. ath_chainmask_sel_timerstart(cm);
  391. }
  392. /* Switch up from tx 2 to 3. */
  393. else if (cm->cur_tx_mask == sc->sc_tx_chainmask &&
  394. ATH_RSSI_OUT(cm->tx_avgrssi) <=
  395. ath_chainmask_sel_up_rssi_thres) {
  396. cm->cur_tx_mask = ATH_CHAINMASK_SEL_3X3;
  397. /* Don't let another switch happen
  398. * until this timer expires */
  399. ath_chainmask_sel_timerstart(cm);
  400. }
  401. }
  402. return cm->cur_tx_mask;
  403. }
  404. /*
  405. * Update tx/rx chainmask. For legacy association,
  406. * hard code chainmask to 1x1, for 11n association, use
  407. * the chainmask configuration.
  408. */
  409. void ath_update_chainmask(struct ath_softc *sc, int is_ht)
  410. {
  411. sc->sc_flags |= SC_OP_CHAINMASK_UPDATE;
  412. if (is_ht) {
  413. sc->sc_tx_chainmask = sc->sc_ah->ah_caps.tx_chainmask;
  414. sc->sc_rx_chainmask = sc->sc_ah->ah_caps.rx_chainmask;
  415. } else {
  416. sc->sc_tx_chainmask = 1;
  417. sc->sc_rx_chainmask = 1;
  418. }
  419. DPRINTF(sc, ATH_DBG_CONFIG, "%s: tx chmask: %d, rx chmask: %d\n",
  420. __func__, sc->sc_tx_chainmask, sc->sc_rx_chainmask);
  421. }
  422. /******************/
  423. /* VAP management */
  424. /******************/
  425. int ath_vap_attach(struct ath_softc *sc,
  426. int if_id,
  427. struct ieee80211_vif *if_data,
  428. enum ath9k_opmode opmode)
  429. {
  430. struct ath_vap *avp;
  431. if (if_id >= ATH_BCBUF || sc->sc_vaps[if_id] != NULL) {
  432. DPRINTF(sc, ATH_DBG_FATAL,
  433. "%s: Invalid interface id = %u\n", __func__, if_id);
  434. return -EINVAL;
  435. }
  436. switch (opmode) {
  437. case ATH9K_M_STA:
  438. case ATH9K_M_IBSS:
  439. case ATH9K_M_MONITOR:
  440. break;
  441. case ATH9K_M_HOSTAP:
  442. /* XXX not right, beacon buffer is allocated on RUN trans */
  443. if (list_empty(&sc->sc_bbuf))
  444. return -ENOMEM;
  445. break;
  446. default:
  447. return -EINVAL;
  448. }
  449. /* create ath_vap */
  450. avp = kmalloc(sizeof(struct ath_vap), GFP_KERNEL);
  451. if (avp == NULL)
  452. return -ENOMEM;
  453. memzero(avp, sizeof(struct ath_vap));
  454. avp->av_if_data = if_data;
  455. /* Set the VAP opmode */
  456. avp->av_opmode = opmode;
  457. avp->av_bslot = -1;
  458. ath9k_hw_set_tsfadjust(sc->sc_ah, 1);
  459. sc->sc_vaps[if_id] = avp;
  460. sc->sc_nvaps++;
  461. /* Set the device opmode */
  462. sc->sc_ah->ah_opmode = opmode;
  463. /* default VAP configuration */
  464. avp->av_config.av_fixed_rateset = IEEE80211_FIXED_RATE_NONE;
  465. avp->av_config.av_fixed_retryset = 0x03030303;
  466. return 0;
  467. }
  468. int ath_vap_detach(struct ath_softc *sc, int if_id)
  469. {
  470. struct ath_hal *ah = sc->sc_ah;
  471. struct ath_vap *avp;
  472. avp = sc->sc_vaps[if_id];
  473. if (avp == NULL) {
  474. DPRINTF(sc, ATH_DBG_FATAL, "%s: invalid interface id %u\n",
  475. __func__, if_id);
  476. return -EINVAL;
  477. }
  478. /*
  479. * Quiesce the hardware while we remove the vap. In
  480. * particular we need to reclaim all references to the
  481. * vap state by any frames pending on the tx queues.
  482. *
  483. * XXX can we do this w/o affecting other vap's?
  484. */
  485. ath9k_hw_set_interrupts(ah, 0); /* disable interrupts */
  486. ath_draintxq(sc, false); /* stop xmit side */
  487. ath_stoprecv(sc); /* stop recv side */
  488. ath_flushrecv(sc); /* flush recv queue */
  489. kfree(avp);
  490. sc->sc_vaps[if_id] = NULL;
  491. sc->sc_nvaps--;
  492. return 0;
  493. }
  494. int ath_vap_config(struct ath_softc *sc,
  495. int if_id, struct ath_vap_config *if_config)
  496. {
  497. struct ath_vap *avp;
  498. if (if_id >= ATH_BCBUF) {
  499. DPRINTF(sc, ATH_DBG_FATAL,
  500. "%s: Invalid interface id = %u\n", __func__, if_id);
  501. return -EINVAL;
  502. }
  503. avp = sc->sc_vaps[if_id];
  504. ASSERT(avp != NULL);
  505. if (avp)
  506. memcpy(&avp->av_config, if_config, sizeof(avp->av_config));
  507. return 0;
  508. }
  509. /********/
  510. /* Core */
  511. /********/
  512. int ath_open(struct ath_softc *sc, struct ath9k_channel *initial_chan)
  513. {
  514. struct ath_hal *ah = sc->sc_ah;
  515. int status;
  516. int error = 0;
  517. DPRINTF(sc, ATH_DBG_CONFIG, "%s: mode %d\n",
  518. __func__, sc->sc_ah->ah_opmode);
  519. /*
  520. * Stop anything previously setup. This is safe
  521. * whether this is the first time through or not.
  522. */
  523. ath_stop(sc);
  524. /* Initialize chanmask selection */
  525. sc->sc_tx_chainmask = ah->ah_caps.tx_chainmask;
  526. sc->sc_rx_chainmask = ah->ah_caps.rx_chainmask;
  527. /* Reset SERDES registers */
  528. ath9k_hw_configpcipowersave(ah, 0);
  529. /*
  530. * The basic interface to setting the hardware in a good
  531. * state is ``reset''. On return the hardware is known to
  532. * be powered up and with interrupts disabled. This must
  533. * be followed by initialization of the appropriate bits
  534. * and then setup of the interrupt mask.
  535. */
  536. spin_lock_bh(&sc->sc_resetlock);
  537. if (!ath9k_hw_reset(ah, initial_chan,
  538. sc->sc_ht_info.tx_chan_width,
  539. sc->sc_tx_chainmask, sc->sc_rx_chainmask,
  540. sc->sc_ht_extprotspacing, false, &status)) {
  541. DPRINTF(sc, ATH_DBG_FATAL,
  542. "%s: unable to reset hardware; hal status %u "
  543. "(freq %u flags 0x%x)\n", __func__, status,
  544. initial_chan->channel, initial_chan->channelFlags);
  545. error = -EIO;
  546. spin_unlock_bh(&sc->sc_resetlock);
  547. goto done;
  548. }
  549. spin_unlock_bh(&sc->sc_resetlock);
  550. /*
  551. * This is needed only to setup initial state
  552. * but it's best done after a reset.
  553. */
  554. ath_update_txpow(sc);
  555. /*
  556. * Setup the hardware after reset:
  557. * The receive engine is set going.
  558. * Frame transmit is handled entirely
  559. * in the frame output path; there's nothing to do
  560. * here except setup the interrupt mask.
  561. */
  562. if (ath_startrecv(sc) != 0) {
  563. DPRINTF(sc, ATH_DBG_FATAL,
  564. "%s: unable to start recv logic\n", __func__);
  565. error = -EIO;
  566. goto done;
  567. }
  568. /* Setup our intr mask. */
  569. sc->sc_imask = ATH9K_INT_RX | ATH9K_INT_TX
  570. | ATH9K_INT_RXEOL | ATH9K_INT_RXORN
  571. | ATH9K_INT_FATAL | ATH9K_INT_GLOBAL;
  572. if (ah->ah_caps.hw_caps & ATH9K_HW_CAP_GTT)
  573. sc->sc_imask |= ATH9K_INT_GTT;
  574. if (ah->ah_caps.hw_caps & ATH9K_HW_CAP_HT)
  575. sc->sc_imask |= ATH9K_INT_CST;
  576. /*
  577. * Enable MIB interrupts when there are hardware phy counters.
  578. * Note we only do this (at the moment) for station mode.
  579. */
  580. if (ath9k_hw_phycounters(ah) &&
  581. ((sc->sc_ah->ah_opmode == ATH9K_M_STA) ||
  582. (sc->sc_ah->ah_opmode == ATH9K_M_IBSS)))
  583. sc->sc_imask |= ATH9K_INT_MIB;
  584. /*
  585. * Some hardware processes the TIM IE and fires an
  586. * interrupt when the TIM bit is set. For hardware
  587. * that does, if not overridden by configuration,
  588. * enable the TIM interrupt when operating as station.
  589. */
  590. if ((ah->ah_caps.hw_caps & ATH9K_HW_CAP_ENHANCEDPM) &&
  591. (sc->sc_ah->ah_opmode == ATH9K_M_STA) &&
  592. !sc->sc_config.swBeaconProcess)
  593. sc->sc_imask |= ATH9K_INT_TIM;
  594. /*
  595. * Don't enable interrupts here as we've not yet built our
  596. * vap and node data structures, which will be needed as soon
  597. * as we start receiving.
  598. */
  599. ath_setcurmode(sc, ath_chan2mode(initial_chan));
  600. /* XXX: we must make sure h/w is ready and clear invalid flag
  601. * before turning on interrupt. */
  602. sc->sc_flags &= ~SC_OP_INVALID;
  603. done:
  604. return error;
  605. }
  606. int ath_reset(struct ath_softc *sc, bool retry_tx)
  607. {
  608. struct ath_hal *ah = sc->sc_ah;
  609. int status;
  610. int error = 0;
  611. ath9k_hw_set_interrupts(ah, 0); /* disable interrupts */
  612. ath_draintxq(sc, retry_tx); /* stop xmit */
  613. ath_stoprecv(sc); /* stop recv */
  614. ath_flushrecv(sc); /* flush recv queue */
  615. /* Reset chip */
  616. spin_lock_bh(&sc->sc_resetlock);
  617. if (!ath9k_hw_reset(ah, sc->sc_ah->ah_curchan,
  618. sc->sc_ht_info.tx_chan_width,
  619. sc->sc_tx_chainmask, sc->sc_rx_chainmask,
  620. sc->sc_ht_extprotspacing, false, &status)) {
  621. DPRINTF(sc, ATH_DBG_FATAL,
  622. "%s: unable to reset hardware; hal status %u\n",
  623. __func__, status);
  624. error = -EIO;
  625. }
  626. spin_unlock_bh(&sc->sc_resetlock);
  627. if (ath_startrecv(sc) != 0) /* restart recv */
  628. DPRINTF(sc, ATH_DBG_FATAL,
  629. "%s: unable to start recv logic\n", __func__);
  630. /*
  631. * We may be doing a reset in response to a request
  632. * that changes the channel so update any state that
  633. * might change as a result.
  634. */
  635. ath_setcurmode(sc, ath_chan2mode(sc->sc_ah->ah_curchan));
  636. ath_update_txpow(sc);
  637. if (sc->sc_flags & SC_OP_BEACONS)
  638. ath_beacon_config(sc, ATH_IF_ID_ANY); /* restart beacons */
  639. ath9k_hw_set_interrupts(ah, sc->sc_imask);
  640. /* Restart the txq */
  641. if (retry_tx) {
  642. int i;
  643. for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
  644. if (ATH_TXQ_SETUP(sc, i)) {
  645. spin_lock_bh(&sc->sc_txq[i].axq_lock);
  646. ath_txq_schedule(sc, &sc->sc_txq[i]);
  647. spin_unlock_bh(&sc->sc_txq[i].axq_lock);
  648. }
  649. }
  650. }
  651. return error;
  652. }
  653. int ath_suspend(struct ath_softc *sc)
  654. {
  655. struct ath_hal *ah = sc->sc_ah;
  656. /* No I/O if device has been surprise removed */
  657. if (sc->sc_flags & SC_OP_INVALID)
  658. return -EIO;
  659. /* Shut off the interrupt before setting sc->sc_invalid to '1' */
  660. ath9k_hw_set_interrupts(ah, 0);
  661. /* XXX: we must make sure h/w will not generate any interrupt
  662. * before setting the invalid flag. */
  663. sc->sc_flags |= SC_OP_INVALID;
  664. /* disable HAL and put h/w to sleep */
  665. ath9k_hw_disable(sc->sc_ah);
  666. ath9k_hw_configpcipowersave(sc->sc_ah, 1);
  667. return 0;
  668. }
  669. /* Interrupt handler. Most of the actual processing is deferred.
  670. * It's the caller's responsibility to ensure the chip is awake. */
  671. irqreturn_t ath_isr(int irq, void *dev)
  672. {
  673. struct ath_softc *sc = dev;
  674. struct ath_hal *ah = sc->sc_ah;
  675. enum ath9k_int status;
  676. bool sched = false;
  677. do {
  678. if (sc->sc_flags & SC_OP_INVALID) {
  679. /*
  680. * The hardware is not ready/present, don't
  681. * touch anything. Note this can happen early
  682. * on if the IRQ is shared.
  683. */
  684. return IRQ_NONE;
  685. }
  686. if (!ath9k_hw_intrpend(ah)) { /* shared irq, not for us */
  687. return IRQ_NONE;
  688. }
  689. /*
  690. * Figure out the reason(s) for the interrupt. Note
  691. * that the hal returns a pseudo-ISR that may include
  692. * bits we haven't explicitly enabled so we mask the
  693. * value to insure we only process bits we requested.
  694. */
  695. ath9k_hw_getisr(ah, &status); /* NB: clears ISR too */
  696. status &= sc->sc_imask; /* discard unasked-for bits */
  697. /*
  698. * If there are no status bits set, then this interrupt was not
  699. * for me (should have been caught above).
  700. */
  701. if (!status)
  702. return IRQ_NONE;
  703. sc->sc_intrstatus = status;
  704. if (status & ATH9K_INT_FATAL) {
  705. /* need a chip reset */
  706. sched = true;
  707. } else if (status & ATH9K_INT_RXORN) {
  708. /* need a chip reset */
  709. sched = true;
  710. } else {
  711. if (status & ATH9K_INT_SWBA) {
  712. /* schedule a tasklet for beacon handling */
  713. tasklet_schedule(&sc->bcon_tasklet);
  714. }
  715. if (status & ATH9K_INT_RXEOL) {
  716. /*
  717. * NB: the hardware should re-read the link when
  718. * RXE bit is written, but it doesn't work
  719. * at least on older hardware revs.
  720. */
  721. sched = true;
  722. }
  723. if (status & ATH9K_INT_TXURN)
  724. /* bump tx trigger level */
  725. ath9k_hw_updatetxtriglevel(ah, true);
  726. /* XXX: optimize this */
  727. if (status & ATH9K_INT_RX)
  728. sched = true;
  729. if (status & ATH9K_INT_TX)
  730. sched = true;
  731. if (status & ATH9K_INT_BMISS)
  732. sched = true;
  733. /* carrier sense timeout */
  734. if (status & ATH9K_INT_CST)
  735. sched = true;
  736. if (status & ATH9K_INT_MIB) {
  737. /*
  738. * Disable interrupts until we service the MIB
  739. * interrupt; otherwise it will continue to
  740. * fire.
  741. */
  742. ath9k_hw_set_interrupts(ah, 0);
  743. /*
  744. * Let the hal handle the event. We assume
  745. * it will clear whatever condition caused
  746. * the interrupt.
  747. */
  748. ath9k_hw_procmibevent(ah, &sc->sc_halstats);
  749. ath9k_hw_set_interrupts(ah, sc->sc_imask);
  750. }
  751. if (status & ATH9K_INT_TIM_TIMER) {
  752. if (!(ah->ah_caps.hw_caps &
  753. ATH9K_HW_CAP_AUTOSLEEP)) {
  754. /* Clear RxAbort bit so that we can
  755. * receive frames */
  756. ath9k_hw_setrxabort(ah, 0);
  757. sched = true;
  758. }
  759. }
  760. }
  761. } while (0);
  762. if (sched) {
  763. /* turn off every interrupt except SWBA */
  764. ath9k_hw_set_interrupts(ah, (sc->sc_imask & ATH9K_INT_SWBA));
  765. tasklet_schedule(&sc->intr_tq);
  766. }
  767. return IRQ_HANDLED;
  768. }
  769. /* Deferred interrupt processing */
  770. static void ath9k_tasklet(unsigned long data)
  771. {
  772. struct ath_softc *sc = (struct ath_softc *)data;
  773. u32 status = sc->sc_intrstatus;
  774. if (status & ATH9K_INT_FATAL) {
  775. /* need a chip reset */
  776. ath_reset(sc, false);
  777. return;
  778. } else {
  779. if (status &
  780. (ATH9K_INT_RX | ATH9K_INT_RXEOL | ATH9K_INT_RXORN)) {
  781. /* XXX: fill me in */
  782. /*
  783. if (status & ATH9K_INT_RXORN) {
  784. }
  785. if (status & ATH9K_INT_RXEOL) {
  786. }
  787. */
  788. spin_lock_bh(&sc->sc_rxflushlock);
  789. ath_rx_tasklet(sc, 0);
  790. spin_unlock_bh(&sc->sc_rxflushlock);
  791. }
  792. /* XXX: optimize this */
  793. if (status & ATH9K_INT_TX)
  794. ath_tx_tasklet(sc);
  795. /* XXX: fill me in */
  796. /*
  797. if (status & ATH9K_INT_BMISS) {
  798. }
  799. if (status & (ATH9K_INT_TIM | ATH9K_INT_DTIMSYNC)) {
  800. if (status & ATH9K_INT_TIM) {
  801. }
  802. if (status & ATH9K_INT_DTIMSYNC) {
  803. }
  804. }
  805. */
  806. }
  807. /* re-enable hardware interrupt */
  808. ath9k_hw_set_interrupts(sc->sc_ah, sc->sc_imask);
  809. }
  810. int ath_init(u16 devid, struct ath_softc *sc)
  811. {
  812. struct ath_hal *ah = NULL;
  813. int status;
  814. int error = 0, i;
  815. int csz = 0;
  816. /* XXX: hardware will not be ready until ath_open() being called */
  817. sc->sc_flags |= SC_OP_INVALID;
  818. sc->sc_debug = DBG_DEFAULT;
  819. DPRINTF(sc, ATH_DBG_CONFIG, "%s: devid 0x%x\n", __func__, devid);
  820. /* Initialize tasklet */
  821. tasklet_init(&sc->intr_tq, ath9k_tasklet, (unsigned long)sc);
  822. tasklet_init(&sc->bcon_tasklet, ath9k_beacon_tasklet,
  823. (unsigned long)sc);
  824. /*
  825. * Cache line size is used to size and align various
  826. * structures used to communicate with the hardware.
  827. */
  828. bus_read_cachesize(sc, &csz);
  829. /* XXX assert csz is non-zero */
  830. sc->sc_cachelsz = csz << 2; /* convert to bytes */
  831. spin_lock_init(&sc->sc_resetlock);
  832. ah = ath9k_hw_attach(devid, sc, sc->mem, &status);
  833. if (ah == NULL) {
  834. DPRINTF(sc, ATH_DBG_FATAL,
  835. "%s: unable to attach hardware; HAL status %u\n",
  836. __func__, status);
  837. error = -ENXIO;
  838. goto bad;
  839. }
  840. sc->sc_ah = ah;
  841. /* Get the hardware key cache size. */
  842. sc->sc_keymax = ah->ah_caps.keycache_size;
  843. if (sc->sc_keymax > ATH_KEYMAX) {
  844. DPRINTF(sc, ATH_DBG_KEYCACHE,
  845. "%s: Warning, using only %u entries in %u key cache\n",
  846. __func__, ATH_KEYMAX, sc->sc_keymax);
  847. sc->sc_keymax = ATH_KEYMAX;
  848. }
  849. /*
  850. * Reset the key cache since some parts do not
  851. * reset the contents on initial power up.
  852. */
  853. for (i = 0; i < sc->sc_keymax; i++)
  854. ath9k_hw_keyreset(ah, (u16) i);
  855. /*
  856. * Mark key cache slots associated with global keys
  857. * as in use. If we knew TKIP was not to be used we
  858. * could leave the +32, +64, and +32+64 slots free.
  859. * XXX only for splitmic.
  860. */
  861. for (i = 0; i < IEEE80211_WEP_NKID; i++) {
  862. set_bit(i, sc->sc_keymap);
  863. set_bit(i + 32, sc->sc_keymap);
  864. set_bit(i + 64, sc->sc_keymap);
  865. set_bit(i + 32 + 64, sc->sc_keymap);
  866. }
  867. /*
  868. * Collect the channel list using the default country
  869. * code and including outdoor channels. The 802.11 layer
  870. * is resposible for filtering this list based on settings
  871. * like the phy mode.
  872. */
  873. error = ath_setup_channels(sc);
  874. if (error)
  875. goto bad;
  876. /* default to STA mode */
  877. sc->sc_ah->ah_opmode = ATH9K_M_MONITOR;
  878. /* Setup rate tables */
  879. ath_setup_rates(sc, IEEE80211_BAND_2GHZ);
  880. ath_setup_rates(sc, IEEE80211_BAND_5GHZ);
  881. /* NB: setup here so ath_rate_update is happy */
  882. ath_setcurmode(sc, ATH9K_MODE_11A);
  883. /*
  884. * Allocate hardware transmit queues: one queue for
  885. * beacon frames and one data queue for each QoS
  886. * priority. Note that the hal handles reseting
  887. * these queues at the needed time.
  888. */
  889. sc->sc_bhalq = ath_beaconq_setup(ah);
  890. if (sc->sc_bhalq == -1) {
  891. DPRINTF(sc, ATH_DBG_FATAL,
  892. "%s: unable to setup a beacon xmit queue\n", __func__);
  893. error = -EIO;
  894. goto bad2;
  895. }
  896. sc->sc_cabq = ath_txq_setup(sc, ATH9K_TX_QUEUE_CAB, 0);
  897. if (sc->sc_cabq == NULL) {
  898. DPRINTF(sc, ATH_DBG_FATAL,
  899. "%s: unable to setup CAB xmit queue\n", __func__);
  900. error = -EIO;
  901. goto bad2;
  902. }
  903. sc->sc_config.cabqReadytime = ATH_CABQ_READY_TIME;
  904. ath_cabq_update(sc);
  905. for (i = 0; i < ARRAY_SIZE(sc->sc_haltype2q); i++)
  906. sc->sc_haltype2q[i] = -1;
  907. /* Setup data queues */
  908. /* NB: ensure BK queue is the lowest priority h/w queue */
  909. if (!ath_tx_setup(sc, ATH9K_WME_AC_BK)) {
  910. DPRINTF(sc, ATH_DBG_FATAL,
  911. "%s: unable to setup xmit queue for BK traffic\n",
  912. __func__);
  913. error = -EIO;
  914. goto bad2;
  915. }
  916. if (!ath_tx_setup(sc, ATH9K_WME_AC_BE)) {
  917. DPRINTF(sc, ATH_DBG_FATAL,
  918. "%s: unable to setup xmit queue for BE traffic\n",
  919. __func__);
  920. error = -EIO;
  921. goto bad2;
  922. }
  923. if (!ath_tx_setup(sc, ATH9K_WME_AC_VI)) {
  924. DPRINTF(sc, ATH_DBG_FATAL,
  925. "%s: unable to setup xmit queue for VI traffic\n",
  926. __func__);
  927. error = -EIO;
  928. goto bad2;
  929. }
  930. if (!ath_tx_setup(sc, ATH9K_WME_AC_VO)) {
  931. DPRINTF(sc, ATH_DBG_FATAL,
  932. "%s: unable to setup xmit queue for VO traffic\n",
  933. __func__);
  934. error = -EIO;
  935. goto bad2;
  936. }
  937. sc->sc_rc = ath_rate_attach(ah);
  938. if (sc->sc_rc == NULL) {
  939. error = -EIO;
  940. goto bad2;
  941. }
  942. if (ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
  943. ATH9K_CIPHER_TKIP, NULL)) {
  944. /*
  945. * Whether we should enable h/w TKIP MIC.
  946. * XXX: if we don't support WME TKIP MIC, then we wouldn't
  947. * report WMM capable, so it's always safe to turn on
  948. * TKIP MIC in this case.
  949. */
  950. ath9k_hw_setcapability(sc->sc_ah, ATH9K_CAP_TKIP_MIC,
  951. 0, 1, NULL);
  952. }
  953. /*
  954. * Check whether the separate key cache entries
  955. * are required to handle both tx+rx MIC keys.
  956. * With split mic keys the number of stations is limited
  957. * to 27 otherwise 59.
  958. */
  959. if (ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
  960. ATH9K_CIPHER_TKIP, NULL)
  961. && ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
  962. ATH9K_CIPHER_MIC, NULL)
  963. && ath9k_hw_getcapability(ah, ATH9K_CAP_TKIP_SPLIT,
  964. 0, NULL))
  965. sc->sc_splitmic = 1;
  966. /* turn on mcast key search if possible */
  967. if (!ath9k_hw_getcapability(ah, ATH9K_CAP_MCAST_KEYSRCH, 0, NULL))
  968. (void)ath9k_hw_setcapability(ah, ATH9K_CAP_MCAST_KEYSRCH, 1,
  969. 1, NULL);
  970. sc->sc_config.txpowlimit = ATH_TXPOWER_MAX;
  971. sc->sc_config.txpowlimit_override = 0;
  972. /* 11n Capabilities */
  973. if (ah->ah_caps.hw_caps & ATH9K_HW_CAP_HT) {
  974. sc->sc_flags |= SC_OP_TXAGGR;
  975. sc->sc_flags |= SC_OP_RXAGGR;
  976. }
  977. sc->sc_tx_chainmask = ah->ah_caps.tx_chainmask;
  978. sc->sc_rx_chainmask = ah->ah_caps.rx_chainmask;
  979. ath9k_hw_setcapability(ah, ATH9K_CAP_DIVERSITY, 1, true, NULL);
  980. sc->sc_defant = ath9k_hw_getdefantenna(ah);
  981. ath9k_hw_getmac(ah, sc->sc_myaddr);
  982. if (ah->ah_caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK) {
  983. ath9k_hw_getbssidmask(ah, sc->sc_bssidmask);
  984. ATH_SET_VAP_BSSID_MASK(sc->sc_bssidmask);
  985. ath9k_hw_setbssidmask(ah, sc->sc_bssidmask);
  986. }
  987. sc->sc_slottime = ATH9K_SLOT_TIME_9; /* default to short slot time */
  988. /* initialize beacon slots */
  989. for (i = 0; i < ARRAY_SIZE(sc->sc_bslot); i++)
  990. sc->sc_bslot[i] = ATH_IF_ID_ANY;
  991. /* save MISC configurations */
  992. sc->sc_config.swBeaconProcess = 1;
  993. #ifdef CONFIG_SLOW_ANT_DIV
  994. /* range is 40 - 255, we use something in the middle */
  995. ath_slow_ant_div_init(&sc->sc_antdiv, sc, 0x127);
  996. #endif
  997. return 0;
  998. bad2:
  999. /* cleanup tx queues */
  1000. for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
  1001. if (ATH_TXQ_SETUP(sc, i))
  1002. ath_tx_cleanupq(sc, &sc->sc_txq[i]);
  1003. bad:
  1004. if (ah)
  1005. ath9k_hw_detach(ah);
  1006. return error;
  1007. }
  1008. void ath_deinit(struct ath_softc *sc)
  1009. {
  1010. struct ath_hal *ah = sc->sc_ah;
  1011. int i;
  1012. DPRINTF(sc, ATH_DBG_CONFIG, "%s\n", __func__);
  1013. ath_stop(sc);
  1014. if (!(sc->sc_flags & SC_OP_INVALID))
  1015. ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_AWAKE);
  1016. ath_rate_detach(sc->sc_rc);
  1017. /* cleanup tx queues */
  1018. for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
  1019. if (ATH_TXQ_SETUP(sc, i))
  1020. ath_tx_cleanupq(sc, &sc->sc_txq[i]);
  1021. ath9k_hw_detach(ah);
  1022. }
  1023. /*******************/
  1024. /* Node Management */
  1025. /*******************/
  1026. struct ath_node *ath_node_attach(struct ath_softc *sc, u8 *addr, int if_id)
  1027. {
  1028. struct ath_vap *avp;
  1029. struct ath_node *an;
  1030. DECLARE_MAC_BUF(mac);
  1031. avp = sc->sc_vaps[if_id];
  1032. ASSERT(avp != NULL);
  1033. /* mac80211 sta_notify callback is from an IRQ context, so no sleep */
  1034. an = kmalloc(sizeof(struct ath_node), GFP_ATOMIC);
  1035. if (an == NULL)
  1036. return NULL;
  1037. memzero(an, sizeof(*an));
  1038. an->an_sc = sc;
  1039. memcpy(an->an_addr, addr, ETH_ALEN);
  1040. atomic_set(&an->an_refcnt, 1);
  1041. /* set up per-node tx/rx state */
  1042. ath_tx_node_init(sc, an);
  1043. ath_rx_node_init(sc, an);
  1044. ath_chainmask_sel_init(sc, an);
  1045. ath_chainmask_sel_timerstart(&an->an_chainmask_sel);
  1046. list_add(&an->list, &sc->node_list);
  1047. return an;
  1048. }
  1049. void ath_node_detach(struct ath_softc *sc, struct ath_node *an, bool bh_flag)
  1050. {
  1051. unsigned long flags;
  1052. DECLARE_MAC_BUF(mac);
  1053. ath_chainmask_sel_timerstop(&an->an_chainmask_sel);
  1054. an->an_flags |= ATH_NODE_CLEAN;
  1055. ath_tx_node_cleanup(sc, an, bh_flag);
  1056. ath_rx_node_cleanup(sc, an);
  1057. ath_tx_node_free(sc, an);
  1058. ath_rx_node_free(sc, an);
  1059. spin_lock_irqsave(&sc->node_lock, flags);
  1060. list_del(&an->list);
  1061. spin_unlock_irqrestore(&sc->node_lock, flags);
  1062. kfree(an);
  1063. }
  1064. /* Finds a node and increases the refcnt if found */
  1065. struct ath_node *ath_node_get(struct ath_softc *sc, u8 *addr)
  1066. {
  1067. struct ath_node *an = NULL, *an_found = NULL;
  1068. if (list_empty(&sc->node_list)) /* FIXME */
  1069. goto out;
  1070. list_for_each_entry(an, &sc->node_list, list) {
  1071. if (!compare_ether_addr(an->an_addr, addr)) {
  1072. atomic_inc(&an->an_refcnt);
  1073. an_found = an;
  1074. break;
  1075. }
  1076. }
  1077. out:
  1078. return an_found;
  1079. }
  1080. /* Decrements the refcnt and if it drops to zero, detach the node */
  1081. void ath_node_put(struct ath_softc *sc, struct ath_node *an, bool bh_flag)
  1082. {
  1083. if (atomic_dec_and_test(&an->an_refcnt))
  1084. ath_node_detach(sc, an, bh_flag);
  1085. }
  1086. /* Finds a node, doesn't increment refcnt. Caller must hold sc->node_lock */
  1087. struct ath_node *ath_node_find(struct ath_softc *sc, u8 *addr)
  1088. {
  1089. struct ath_node *an = NULL, *an_found = NULL;
  1090. if (list_empty(&sc->node_list))
  1091. return NULL;
  1092. list_for_each_entry(an, &sc->node_list, list)
  1093. if (!compare_ether_addr(an->an_addr, addr)) {
  1094. an_found = an;
  1095. break;
  1096. }
  1097. return an_found;
  1098. }
  1099. /*
  1100. * Set up New Node
  1101. *
  1102. * Setup driver-specific state for a newly associated node. This routine
  1103. * really only applies if compression or XR are enabled, there is no code
  1104. * covering any other cases.
  1105. */
  1106. void ath_newassoc(struct ath_softc *sc,
  1107. struct ath_node *an, int isnew, int isuapsd)
  1108. {
  1109. int tidno;
  1110. /* if station reassociates, tear down the aggregation state. */
  1111. if (!isnew) {
  1112. for (tidno = 0; tidno < WME_NUM_TID; tidno++) {
  1113. if (sc->sc_flags & SC_OP_TXAGGR)
  1114. ath_tx_aggr_teardown(sc, an, tidno);
  1115. if (sc->sc_flags & SC_OP_RXAGGR)
  1116. ath_rx_aggr_teardown(sc, an, tidno);
  1117. }
  1118. }
  1119. an->an_flags = 0;
  1120. }
  1121. /**************/
  1122. /* Encryption */
  1123. /**************/
  1124. void ath_key_reset(struct ath_softc *sc, u16 keyix, int freeslot)
  1125. {
  1126. ath9k_hw_keyreset(sc->sc_ah, keyix);
  1127. if (freeslot)
  1128. clear_bit(keyix, sc->sc_keymap);
  1129. }
  1130. int ath_keyset(struct ath_softc *sc,
  1131. u16 keyix,
  1132. struct ath9k_keyval *hk,
  1133. const u8 mac[ETH_ALEN])
  1134. {
  1135. bool status;
  1136. status = ath9k_hw_set_keycache_entry(sc->sc_ah,
  1137. keyix, hk, mac, false);
  1138. return status != false;
  1139. }
  1140. /***********************/
  1141. /* TX Power/Regulatory */
  1142. /***********************/
  1143. /*
  1144. * Set Transmit power in HAL
  1145. *
  1146. * This routine makes the actual HAL calls to set the new transmit power
  1147. * limit.
  1148. */
  1149. void ath_update_txpow(struct ath_softc *sc)
  1150. {
  1151. struct ath_hal *ah = sc->sc_ah;
  1152. u32 txpow;
  1153. if (sc->sc_curtxpow != sc->sc_config.txpowlimit) {
  1154. ath9k_hw_set_txpowerlimit(ah, sc->sc_config.txpowlimit);
  1155. /* read back in case value is clamped */
  1156. ath9k_hw_getcapability(ah, ATH9K_CAP_TXPOW, 1, &txpow);
  1157. sc->sc_curtxpow = txpow;
  1158. }
  1159. }
  1160. /* Return the current country and domain information */
  1161. void ath_get_currentCountry(struct ath_softc *sc,
  1162. struct ath9k_country_entry *ctry)
  1163. {
  1164. ath9k_regd_get_current_country(sc->sc_ah, ctry);
  1165. /* If HAL not specific yet, since it is band dependent,
  1166. * use the one we passed in. */
  1167. if (ctry->countryCode == CTRY_DEFAULT) {
  1168. ctry->iso[0] = 0;
  1169. ctry->iso[1] = 0;
  1170. } else if (ctry->iso[0] && ctry->iso[1]) {
  1171. if (!ctry->iso[2]) {
  1172. if (ath_outdoor)
  1173. ctry->iso[2] = 'O';
  1174. else
  1175. ctry->iso[2] = 'I';
  1176. }
  1177. }
  1178. }
  1179. /**************************/
  1180. /* Slow Antenna Diversity */
  1181. /**************************/
  1182. void ath_slow_ant_div_init(struct ath_antdiv *antdiv,
  1183. struct ath_softc *sc,
  1184. int32_t rssitrig)
  1185. {
  1186. int trig;
  1187. /* antdivf_rssitrig can range from 40 - 0xff */
  1188. trig = (rssitrig > 0xff) ? 0xff : rssitrig;
  1189. trig = (rssitrig < 40) ? 40 : rssitrig;
  1190. antdiv->antdiv_sc = sc;
  1191. antdiv->antdivf_rssitrig = trig;
  1192. }
  1193. void ath_slow_ant_div_start(struct ath_antdiv *antdiv,
  1194. u8 num_antcfg,
  1195. const u8 *bssid)
  1196. {
  1197. antdiv->antdiv_num_antcfg =
  1198. num_antcfg < ATH_ANT_DIV_MAX_CFG ?
  1199. num_antcfg : ATH_ANT_DIV_MAX_CFG;
  1200. antdiv->antdiv_state = ATH_ANT_DIV_IDLE;
  1201. antdiv->antdiv_curcfg = 0;
  1202. antdiv->antdiv_bestcfg = 0;
  1203. antdiv->antdiv_laststatetsf = 0;
  1204. memcpy(antdiv->antdiv_bssid, bssid, sizeof(antdiv->antdiv_bssid));
  1205. antdiv->antdiv_start = 1;
  1206. }
  1207. void ath_slow_ant_div_stop(struct ath_antdiv *antdiv)
  1208. {
  1209. antdiv->antdiv_start = 0;
  1210. }
  1211. static int32_t ath_find_max_val(int32_t *val,
  1212. u8 num_val, u8 *max_index)
  1213. {
  1214. u32 MaxVal = *val++;
  1215. u32 cur_index = 0;
  1216. *max_index = 0;
  1217. while (++cur_index < num_val) {
  1218. if (*val > MaxVal) {
  1219. MaxVal = *val;
  1220. *max_index = cur_index;
  1221. }
  1222. val++;
  1223. }
  1224. return MaxVal;
  1225. }
  1226. void ath_slow_ant_div(struct ath_antdiv *antdiv,
  1227. struct ieee80211_hdr *hdr,
  1228. struct ath_rx_status *rx_stats)
  1229. {
  1230. struct ath_softc *sc = antdiv->antdiv_sc;
  1231. struct ath_hal *ah = sc->sc_ah;
  1232. u64 curtsf = 0;
  1233. u8 bestcfg, curcfg = antdiv->antdiv_curcfg;
  1234. __le16 fc = hdr->frame_control;
  1235. if (antdiv->antdiv_start && ieee80211_is_beacon(fc)
  1236. && !compare_ether_addr(hdr->addr3, antdiv->antdiv_bssid)) {
  1237. antdiv->antdiv_lastbrssi[curcfg] = rx_stats->rs_rssi;
  1238. antdiv->antdiv_lastbtsf[curcfg] = ath9k_hw_gettsf64(sc->sc_ah);
  1239. curtsf = antdiv->antdiv_lastbtsf[curcfg];
  1240. } else {
  1241. return;
  1242. }
  1243. switch (antdiv->antdiv_state) {
  1244. case ATH_ANT_DIV_IDLE:
  1245. if ((antdiv->antdiv_lastbrssi[curcfg] <
  1246. antdiv->antdivf_rssitrig)
  1247. && ((curtsf - antdiv->antdiv_laststatetsf) >
  1248. ATH_ANT_DIV_MIN_IDLE_US)) {
  1249. curcfg++;
  1250. if (curcfg == antdiv->antdiv_num_antcfg)
  1251. curcfg = 0;
  1252. if (!ath9k_hw_select_antconfig(ah, curcfg)) {
  1253. antdiv->antdiv_bestcfg = antdiv->antdiv_curcfg;
  1254. antdiv->antdiv_curcfg = curcfg;
  1255. antdiv->antdiv_laststatetsf = curtsf;
  1256. antdiv->antdiv_state = ATH_ANT_DIV_SCAN;
  1257. }
  1258. }
  1259. break;
  1260. case ATH_ANT_DIV_SCAN:
  1261. if ((curtsf - antdiv->antdiv_laststatetsf) <
  1262. ATH_ANT_DIV_MIN_SCAN_US)
  1263. break;
  1264. curcfg++;
  1265. if (curcfg == antdiv->antdiv_num_antcfg)
  1266. curcfg = 0;
  1267. if (curcfg == antdiv->antdiv_bestcfg) {
  1268. ath_find_max_val(antdiv->antdiv_lastbrssi,
  1269. antdiv->antdiv_num_antcfg, &bestcfg);
  1270. if (!ath9k_hw_select_antconfig(ah, bestcfg)) {
  1271. antdiv->antdiv_bestcfg = bestcfg;
  1272. antdiv->antdiv_curcfg = bestcfg;
  1273. antdiv->antdiv_laststatetsf = curtsf;
  1274. antdiv->antdiv_state = ATH_ANT_DIV_IDLE;
  1275. }
  1276. } else {
  1277. if (!ath9k_hw_select_antconfig(ah, curcfg)) {
  1278. antdiv->antdiv_curcfg = curcfg;
  1279. antdiv->antdiv_laststatetsf = curtsf;
  1280. antdiv->antdiv_state = ATH_ANT_DIV_SCAN;
  1281. }
  1282. }
  1283. break;
  1284. }
  1285. }
  1286. /***********************/
  1287. /* Descriptor Handling */
  1288. /***********************/
  1289. /*
  1290. * Set up DMA descriptors
  1291. *
  1292. * This function will allocate both the DMA descriptor structure, and the
  1293. * buffers it contains. These are used to contain the descriptors used
  1294. * by the system.
  1295. */
  1296. int ath_descdma_setup(struct ath_softc *sc,
  1297. struct ath_descdma *dd,
  1298. struct list_head *head,
  1299. const char *name,
  1300. int nbuf,
  1301. int ndesc)
  1302. {
  1303. #define DS2PHYS(_dd, _ds) \
  1304. ((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc))
  1305. #define ATH_DESC_4KB_BOUND_CHECK(_daddr) ((((_daddr) & 0xFFF) > 0xF7F) ? 1 : 0)
  1306. #define ATH_DESC_4KB_BOUND_NUM_SKIPPED(_len) ((_len) / 4096)
  1307. struct ath_desc *ds;
  1308. struct ath_buf *bf;
  1309. int i, bsize, error;
  1310. DPRINTF(sc, ATH_DBG_CONFIG, "%s: %s DMA: %u buffers %u desc/buf\n",
  1311. __func__, name, nbuf, ndesc);
  1312. /* ath_desc must be a multiple of DWORDs */
  1313. if ((sizeof(struct ath_desc) % 4) != 0) {
  1314. DPRINTF(sc, ATH_DBG_FATAL, "%s: ath_desc not DWORD aligned\n",
  1315. __func__);
  1316. ASSERT((sizeof(struct ath_desc) % 4) == 0);
  1317. error = -ENOMEM;
  1318. goto fail;
  1319. }
  1320. dd->dd_name = name;
  1321. dd->dd_desc_len = sizeof(struct ath_desc) * nbuf * ndesc;
  1322. /*
  1323. * Need additional DMA memory because we can't use
  1324. * descriptors that cross the 4K page boundary. Assume
  1325. * one skipped descriptor per 4K page.
  1326. */
  1327. if (!(sc->sc_ah->ah_caps.hw_caps & ATH9K_HW_CAP_4KB_SPLITTRANS)) {
  1328. u32 ndesc_skipped =
  1329. ATH_DESC_4KB_BOUND_NUM_SKIPPED(dd->dd_desc_len);
  1330. u32 dma_len;
  1331. while (ndesc_skipped) {
  1332. dma_len = ndesc_skipped * sizeof(struct ath_desc);
  1333. dd->dd_desc_len += dma_len;
  1334. ndesc_skipped = ATH_DESC_4KB_BOUND_NUM_SKIPPED(dma_len);
  1335. };
  1336. }
  1337. /* allocate descriptors */
  1338. dd->dd_desc = pci_alloc_consistent(sc->pdev,
  1339. dd->dd_desc_len,
  1340. &dd->dd_desc_paddr);
  1341. if (dd->dd_desc == NULL) {
  1342. error = -ENOMEM;
  1343. goto fail;
  1344. }
  1345. ds = dd->dd_desc;
  1346. DPRINTF(sc, ATH_DBG_CONFIG, "%s: %s DMA map: %p (%u) -> %llx (%u)\n",
  1347. __func__, dd->dd_name, ds, (u32) dd->dd_desc_len,
  1348. ito64(dd->dd_desc_paddr), /*XXX*/(u32) dd->dd_desc_len);
  1349. /* allocate buffers */
  1350. bsize = sizeof(struct ath_buf) * nbuf;
  1351. bf = kmalloc(bsize, GFP_KERNEL);
  1352. if (bf == NULL) {
  1353. error = -ENOMEM;
  1354. goto fail2;
  1355. }
  1356. memzero(bf, bsize);
  1357. dd->dd_bufptr = bf;
  1358. INIT_LIST_HEAD(head);
  1359. for (i = 0; i < nbuf; i++, bf++, ds += ndesc) {
  1360. bf->bf_desc = ds;
  1361. bf->bf_daddr = DS2PHYS(dd, ds);
  1362. if (!(sc->sc_ah->ah_caps.hw_caps &
  1363. ATH9K_HW_CAP_4KB_SPLITTRANS)) {
  1364. /*
  1365. * Skip descriptor addresses which can cause 4KB
  1366. * boundary crossing (addr + length) with a 32 dword
  1367. * descriptor fetch.
  1368. */
  1369. while (ATH_DESC_4KB_BOUND_CHECK(bf->bf_daddr)) {
  1370. ASSERT((caddr_t) bf->bf_desc <
  1371. ((caddr_t) dd->dd_desc +
  1372. dd->dd_desc_len));
  1373. ds += ndesc;
  1374. bf->bf_desc = ds;
  1375. bf->bf_daddr = DS2PHYS(dd, ds);
  1376. }
  1377. }
  1378. list_add_tail(&bf->list, head);
  1379. }
  1380. return 0;
  1381. fail2:
  1382. pci_free_consistent(sc->pdev,
  1383. dd->dd_desc_len, dd->dd_desc, dd->dd_desc_paddr);
  1384. fail:
  1385. memzero(dd, sizeof(*dd));
  1386. return error;
  1387. #undef ATH_DESC_4KB_BOUND_CHECK
  1388. #undef ATH_DESC_4KB_BOUND_NUM_SKIPPED
  1389. #undef DS2PHYS
  1390. }
  1391. /*
  1392. * Cleanup DMA descriptors
  1393. *
  1394. * This function will free the DMA block that was allocated for the descriptor
  1395. * pool. Since this was allocated as one "chunk", it is freed in the same
  1396. * manner.
  1397. */
  1398. void ath_descdma_cleanup(struct ath_softc *sc,
  1399. struct ath_descdma *dd,
  1400. struct list_head *head)
  1401. {
  1402. /* Free memory associated with descriptors */
  1403. pci_free_consistent(sc->pdev,
  1404. dd->dd_desc_len, dd->dd_desc, dd->dd_desc_paddr);
  1405. INIT_LIST_HEAD(head);
  1406. kfree(dd->dd_bufptr);
  1407. memzero(dd, sizeof(*dd));
  1408. }
  1409. /*************/
  1410. /* Utilities */
  1411. /*************/
  1412. int ath_get_hal_qnum(u16 queue, struct ath_softc *sc)
  1413. {
  1414. int qnum;
  1415. switch (queue) {
  1416. case 0:
  1417. qnum = sc->sc_haltype2q[ATH9K_WME_AC_VO];
  1418. break;
  1419. case 1:
  1420. qnum = sc->sc_haltype2q[ATH9K_WME_AC_VI];
  1421. break;
  1422. case 2:
  1423. qnum = sc->sc_haltype2q[ATH9K_WME_AC_BE];
  1424. break;
  1425. case 3:
  1426. qnum = sc->sc_haltype2q[ATH9K_WME_AC_BK];
  1427. break;
  1428. default:
  1429. qnum = sc->sc_haltype2q[ATH9K_WME_AC_BE];
  1430. break;
  1431. }
  1432. return qnum;
  1433. }
  1434. int ath_get_mac80211_qnum(u32 queue, struct ath_softc *sc)
  1435. {
  1436. int qnum;
  1437. switch (queue) {
  1438. case ATH9K_WME_AC_VO:
  1439. qnum = 0;
  1440. break;
  1441. case ATH9K_WME_AC_VI:
  1442. qnum = 1;
  1443. break;
  1444. case ATH9K_WME_AC_BE:
  1445. qnum = 2;
  1446. break;
  1447. case ATH9K_WME_AC_BK:
  1448. qnum = 3;
  1449. break;
  1450. default:
  1451. qnum = -1;
  1452. break;
  1453. }
  1454. return qnum;
  1455. }
  1456. /*
  1457. * Expand time stamp to TSF
  1458. *
  1459. * Extend 15-bit time stamp from rx descriptor to
  1460. * a full 64-bit TSF using the current h/w TSF.
  1461. */
  1462. u64 ath_extend_tsf(struct ath_softc *sc, u32 rstamp)
  1463. {
  1464. u64 tsf;
  1465. tsf = ath9k_hw_gettsf64(sc->sc_ah);
  1466. if ((tsf & 0x7fff) < rstamp)
  1467. tsf -= 0x8000;
  1468. return (tsf & ~0x7fff) | rstamp;
  1469. }
  1470. /*
  1471. * Set Default Antenna
  1472. *
  1473. * Call into the HAL to set the default antenna to use. Not really valid for
  1474. * MIMO technology.
  1475. */
  1476. void ath_setdefantenna(void *context, u32 antenna)
  1477. {
  1478. struct ath_softc *sc = (struct ath_softc *)context;
  1479. struct ath_hal *ah = sc->sc_ah;
  1480. /* XXX block beacon interrupts */
  1481. ath9k_hw_setantenna(ah, antenna);
  1482. sc->sc_defant = antenna;
  1483. sc->sc_rxotherant = 0;
  1484. }
  1485. /*
  1486. * Set Slot Time
  1487. *
  1488. * This will wake up the chip if required, and set the slot time for the
  1489. * frame (maximum transmit time). Slot time is assumed to be already set
  1490. * in the ATH object member sc_slottime
  1491. */
  1492. void ath_setslottime(struct ath_softc *sc)
  1493. {
  1494. ath9k_hw_setslottime(sc->sc_ah, sc->sc_slottime);
  1495. sc->sc_updateslot = OK;
  1496. }