core.c 48 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923
  1. /*
  2. * Copyright (c) 2008, Atheros Communications Inc.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for any
  5. * purpose with or without fee is hereby granted, provided that the above
  6. * copyright notice and this permission notice appear in all copies.
  7. *
  8. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  9. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  10. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  11. * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  12. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  13. * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  14. * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  15. */
  16. /* Implementation of the main "ATH" layer. */
  17. #include "core.h"
  18. #include "regd.h"
  19. static int ath_outdoor; /* enable outdoor use */
  20. static const u8 ath_bcast_mac[ETH_ALEN] =
  21. { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
  22. static u32 ath_chainmask_sel_up_rssi_thres =
  23. ATH_CHAINMASK_SEL_UP_RSSI_THRES;
  24. static u32 ath_chainmask_sel_down_rssi_thres =
  25. ATH_CHAINMASK_SEL_DOWN_RSSI_THRES;
  26. static u32 ath_chainmask_sel_period =
  27. ATH_CHAINMASK_SEL_TIMEOUT;
  28. /* return bus cachesize in 4B word units */
  29. static void bus_read_cachesize(struct ath_softc *sc, int *csz)
  30. {
  31. u8 u8tmp;
  32. pci_read_config_byte(sc->pdev, PCI_CACHE_LINE_SIZE, (u8 *)&u8tmp);
  33. *csz = (int)u8tmp;
  34. /*
  35. * This check was put in to avoid "unplesant" consequences if
  36. * the bootrom has not fully initialized all PCI devices.
  37. * Sometimes the cache line size register is not set
  38. */
  39. if (*csz == 0)
  40. *csz = DEFAULT_CACHELINE >> 2; /* Use the default size */
  41. }
  42. /*
  43. * Set current operating mode
  44. *
  45. * This function initializes and fills the rate table in the ATH object based
  46. * on the operating mode. The blink rates are also set up here, although
  47. * they have been superceeded by the ath_led module.
  48. */
  49. static void ath_setcurmode(struct ath_softc *sc, enum wireless_mode mode)
  50. {
  51. const struct ath9k_rate_table *rt;
  52. int i;
  53. memset(sc->sc_rixmap, 0xff, sizeof(sc->sc_rixmap));
  54. rt = ath9k_hw_getratetable(sc->sc_ah, mode);
  55. BUG_ON(!rt);
  56. for (i = 0; i < rt->rateCount; i++)
  57. sc->sc_rixmap[rt->info[i].rateCode] = (u8) i;
  58. memzero(sc->sc_hwmap, sizeof(sc->sc_hwmap));
  59. for (i = 0; i < 256; i++) {
  60. u8 ix = rt->rateCodeToIndex[i];
  61. if (ix == 0xff)
  62. continue;
  63. sc->sc_hwmap[i].ieeerate =
  64. rt->info[ix].dot11Rate & IEEE80211_RATE_VAL;
  65. sc->sc_hwmap[i].rateKbps = rt->info[ix].rateKbps;
  66. if (rt->info[ix].shortPreamble ||
  67. rt->info[ix].phy == PHY_OFDM) {
  68. /* XXX: Handle this */
  69. }
  70. /* NB: this uses the last entry if the rate isn't found */
  71. /* XXX beware of overlow */
  72. }
  73. sc->sc_currates = rt;
  74. sc->sc_curmode = mode;
  75. /*
  76. * All protection frames are transmited at 2Mb/s for
  77. * 11g, otherwise at 1Mb/s.
  78. * XXX select protection rate index from rate table.
  79. */
  80. sc->sc_protrix = (mode == ATH9K_MODE_11G ? 1 : 0);
  81. }
  82. /*
  83. * Set up rate table (legacy rates)
  84. */
  85. static void ath_setup_rates(struct ath_softc *sc, enum ieee80211_band band)
  86. {
  87. struct ath_hal *ah = sc->sc_ah;
  88. const struct ath9k_rate_table *rt = NULL;
  89. struct ieee80211_supported_band *sband;
  90. struct ieee80211_rate *rate;
  91. int i, maxrates;
  92. switch (band) {
  93. case IEEE80211_BAND_2GHZ:
  94. rt = ath9k_hw_getratetable(ah, ATH9K_MODE_11G);
  95. break;
  96. case IEEE80211_BAND_5GHZ:
  97. rt = ath9k_hw_getratetable(ah, ATH9K_MODE_11A);
  98. break;
  99. default:
  100. break;
  101. }
  102. if (rt == NULL)
  103. return;
  104. sband = &sc->sbands[band];
  105. rate = sc->rates[band];
  106. if (rt->rateCount > ATH_RATE_MAX)
  107. maxrates = ATH_RATE_MAX;
  108. else
  109. maxrates = rt->rateCount;
  110. for (i = 0; i < maxrates; i++) {
  111. rate[i].bitrate = rt->info[i].rateKbps / 100;
  112. rate[i].hw_value = rt->info[i].rateCode;
  113. sband->n_bitrates++;
  114. DPRINTF(sc, ATH_DBG_CONFIG,
  115. "%s: Rate: %2dMbps, ratecode: %2d\n",
  116. __func__,
  117. rate[i].bitrate / 10,
  118. rate[i].hw_value);
  119. }
  120. }
  121. /*
  122. * Set up channel list
  123. */
  124. static int ath_setup_channels(struct ath_softc *sc)
  125. {
  126. struct ath_hal *ah = sc->sc_ah;
  127. int nchan, i, a = 0, b = 0;
  128. u8 regclassids[ATH_REGCLASSIDS_MAX];
  129. u32 nregclass = 0;
  130. struct ieee80211_supported_band *band_2ghz;
  131. struct ieee80211_supported_band *band_5ghz;
  132. struct ieee80211_channel *chan_2ghz;
  133. struct ieee80211_channel *chan_5ghz;
  134. struct ath9k_channel *c;
  135. /* Fill in ah->ah_channels */
  136. if (!ath9k_regd_init_channels(ah,
  137. ATH_CHAN_MAX,
  138. (u32 *)&nchan,
  139. regclassids,
  140. ATH_REGCLASSIDS_MAX,
  141. &nregclass,
  142. CTRY_DEFAULT,
  143. false,
  144. 1)) {
  145. u32 rd = ah->ah_currentRD;
  146. DPRINTF(sc, ATH_DBG_FATAL,
  147. "%s: unable to collect channel list; "
  148. "regdomain likely %u country code %u\n",
  149. __func__, rd, CTRY_DEFAULT);
  150. return -EINVAL;
  151. }
  152. band_2ghz = &sc->sbands[IEEE80211_BAND_2GHZ];
  153. band_5ghz = &sc->sbands[IEEE80211_BAND_5GHZ];
  154. chan_2ghz = sc->channels[IEEE80211_BAND_2GHZ];
  155. chan_5ghz = sc->channels[IEEE80211_BAND_5GHZ];
  156. for (i = 0; i < nchan; i++) {
  157. c = &ah->ah_channels[i];
  158. if (IS_CHAN_2GHZ(c)) {
  159. chan_2ghz[a].band = IEEE80211_BAND_2GHZ;
  160. chan_2ghz[a].center_freq = c->channel;
  161. chan_2ghz[a].max_power = c->maxTxPower;
  162. if (c->privFlags & CHANNEL_DISALLOW_ADHOC)
  163. chan_2ghz[a].flags |=
  164. IEEE80211_CHAN_NO_IBSS;
  165. if (c->channelFlags & CHANNEL_PASSIVE)
  166. chan_2ghz[a].flags |=
  167. IEEE80211_CHAN_PASSIVE_SCAN;
  168. band_2ghz->n_channels = ++a;
  169. DPRINTF(sc, ATH_DBG_CONFIG,
  170. "%s: 2MHz channel: %d, "
  171. "channelFlags: 0x%x\n",
  172. __func__,
  173. c->channel,
  174. c->channelFlags);
  175. } else if (IS_CHAN_5GHZ(c)) {
  176. chan_5ghz[b].band = IEEE80211_BAND_5GHZ;
  177. chan_5ghz[b].center_freq = c->channel;
  178. chan_5ghz[b].max_power = c->maxTxPower;
  179. if (c->privFlags & CHANNEL_DISALLOW_ADHOC)
  180. chan_5ghz[b].flags |=
  181. IEEE80211_CHAN_NO_IBSS;
  182. if (c->channelFlags & CHANNEL_PASSIVE)
  183. chan_5ghz[b].flags |=
  184. IEEE80211_CHAN_PASSIVE_SCAN;
  185. band_5ghz->n_channels = ++b;
  186. DPRINTF(sc, ATH_DBG_CONFIG,
  187. "%s: 5MHz channel: %d, "
  188. "channelFlags: 0x%x\n",
  189. __func__,
  190. c->channel,
  191. c->channelFlags);
  192. }
  193. }
  194. return 0;
  195. }
  196. /*
  197. * Determine mode from channel flags
  198. *
  199. * This routine will provide the enumerated WIRELESSS_MODE value based
  200. * on the settings of the channel flags. If ho valid set of flags
  201. * exist, the lowest mode (11b) is selected.
  202. */
  203. static enum wireless_mode ath_chan2mode(struct ath9k_channel *chan)
  204. {
  205. if (chan->chanmode == CHANNEL_A)
  206. return ATH9K_MODE_11A;
  207. else if (chan->chanmode == CHANNEL_G)
  208. return ATH9K_MODE_11G;
  209. else if (chan->chanmode == CHANNEL_B)
  210. return ATH9K_MODE_11B;
  211. else if (chan->chanmode == CHANNEL_A_HT20)
  212. return ATH9K_MODE_11NA_HT20;
  213. else if (chan->chanmode == CHANNEL_G_HT20)
  214. return ATH9K_MODE_11NG_HT20;
  215. else if (chan->chanmode == CHANNEL_A_HT40PLUS)
  216. return ATH9K_MODE_11NA_HT40PLUS;
  217. else if (chan->chanmode == CHANNEL_A_HT40MINUS)
  218. return ATH9K_MODE_11NA_HT40MINUS;
  219. else if (chan->chanmode == CHANNEL_G_HT40PLUS)
  220. return ATH9K_MODE_11NG_HT40PLUS;
  221. else if (chan->chanmode == CHANNEL_G_HT40MINUS)
  222. return ATH9K_MODE_11NG_HT40MINUS;
  223. /* NB: should not get here */
  224. return ATH9K_MODE_11B;
  225. }
  226. /*
  227. * Stop the device, grabbing the top-level lock to protect
  228. * against concurrent entry through ath_init (which can happen
  229. * if another thread does a system call and the thread doing the
  230. * stop is preempted).
  231. */
  232. static int ath_stop(struct ath_softc *sc)
  233. {
  234. struct ath_hal *ah = sc->sc_ah;
  235. DPRINTF(sc, ATH_DBG_CONFIG, "%s: invalid %u\n",
  236. __func__, sc->sc_invalid);
  237. /*
  238. * Shutdown the hardware and driver:
  239. * stop output from above
  240. * reset 802.11 state machine
  241. * (sends station deassoc/deauth frames)
  242. * turn off timers
  243. * disable interrupts
  244. * clear transmit machinery
  245. * clear receive machinery
  246. * turn off the radio
  247. * reclaim beacon resources
  248. *
  249. * Note that some of this work is not possible if the
  250. * hardware is gone (invalid).
  251. */
  252. if (!sc->sc_invalid)
  253. ath9k_hw_set_interrupts(ah, 0);
  254. ath_draintxq(sc, false);
  255. if (!sc->sc_invalid) {
  256. ath_stoprecv(sc);
  257. ath9k_hw_phy_disable(ah);
  258. } else
  259. sc->sc_rxlink = NULL;
  260. return 0;
  261. }
  262. /*
  263. * Start Scan
  264. *
  265. * This function is called when starting a channel scan. It will perform
  266. * power save wakeup processing, set the filter for the scan, and get the
  267. * chip ready to send broadcast packets out during the scan.
  268. */
  269. void ath_scan_start(struct ath_softc *sc)
  270. {
  271. struct ath_hal *ah = sc->sc_ah;
  272. u32 rfilt;
  273. u32 now = (u32) jiffies_to_msecs(get_timestamp());
  274. sc->sc_scanning = 1;
  275. rfilt = ath_calcrxfilter(sc);
  276. ath9k_hw_setrxfilter(ah, rfilt);
  277. ath9k_hw_write_associd(ah, ath_bcast_mac, 0);
  278. /* Restore previous power management state. */
  279. DPRINTF(sc, ATH_DBG_CONFIG, "%d.%03d | %s: RX filter 0x%x aid 0\n",
  280. now / 1000, now % 1000, __func__, rfilt);
  281. }
  282. /*
  283. * Scan End
  284. *
  285. * This routine is called by the upper layer when the scan is completed. This
  286. * will set the filters back to normal operating mode, set the BSSID to the
  287. * correct value, and restore the power save state.
  288. */
  289. void ath_scan_end(struct ath_softc *sc)
  290. {
  291. struct ath_hal *ah = sc->sc_ah;
  292. u32 rfilt;
  293. u32 now = (u32) jiffies_to_msecs(get_timestamp());
  294. sc->sc_scanning = 0;
  295. /* Request for a full reset due to rx packet filter changes */
  296. sc->sc_full_reset = 1;
  297. rfilt = ath_calcrxfilter(sc);
  298. ath9k_hw_setrxfilter(ah, rfilt);
  299. ath9k_hw_write_associd(ah, sc->sc_curbssid, sc->sc_curaid);
  300. DPRINTF(sc, ATH_DBG_CONFIG, "%d.%03d | %s: RX filter 0x%x aid 0x%x\n",
  301. now / 1000, now % 1000, __func__, rfilt, sc->sc_curaid);
  302. }
  303. /*
  304. * Set the current channel
  305. *
  306. * Set/change channels. If the channel is really being changed, it's done
  307. * by reseting the chip. To accomplish this we must first cleanup any pending
  308. * DMA, then restart stuff after a la ath_init.
  309. */
  310. int ath_set_channel(struct ath_softc *sc, struct ath9k_channel *hchan)
  311. {
  312. struct ath_hal *ah = sc->sc_ah;
  313. bool fastcc = true, stopped;
  314. enum ath9k_ht_macmode ht_macmode;
  315. if (sc->sc_invalid) /* if the device is invalid or removed */
  316. return -EIO;
  317. DPRINTF(sc, ATH_DBG_CONFIG,
  318. "%s: %u (%u MHz) -> %u (%u MHz), cflags:%x\n",
  319. __func__,
  320. ath9k_hw_mhz2ieee(ah, sc->sc_curchan.channel,
  321. sc->sc_curchan.channelFlags),
  322. sc->sc_curchan.channel,
  323. ath9k_hw_mhz2ieee(ah, hchan->channel, hchan->channelFlags),
  324. hchan->channel, hchan->channelFlags);
  325. ht_macmode = ath_cwm_macmode(sc);
  326. if (hchan->channel != sc->sc_curchan.channel ||
  327. hchan->channelFlags != sc->sc_curchan.channelFlags ||
  328. sc->sc_update_chainmask || sc->sc_full_reset) {
  329. int status;
  330. /*
  331. * This is only performed if the channel settings have
  332. * actually changed.
  333. *
  334. * To switch channels clear any pending DMA operations;
  335. * wait long enough for the RX fifo to drain, reset the
  336. * hardware at the new frequency, and then re-enable
  337. * the relevant bits of the h/w.
  338. */
  339. ath9k_hw_set_interrupts(ah, 0); /* disable interrupts */
  340. ath_draintxq(sc, false); /* clear pending tx frames */
  341. stopped = ath_stoprecv(sc); /* turn off frame recv */
  342. /* XXX: do not flush receive queue here. We don't want
  343. * to flush data frames already in queue because of
  344. * changing channel. */
  345. if (!stopped || sc->sc_full_reset)
  346. fastcc = false;
  347. spin_lock_bh(&sc->sc_resetlock);
  348. if (!ath9k_hw_reset(ah, sc->sc_opmode, hchan,
  349. ht_macmode, sc->sc_tx_chainmask,
  350. sc->sc_rx_chainmask,
  351. sc->sc_ht_extprotspacing,
  352. fastcc, &status)) {
  353. DPRINTF(sc, ATH_DBG_FATAL,
  354. "%s: unable to reset channel %u (%uMhz) "
  355. "flags 0x%x hal status %u\n", __func__,
  356. ath9k_hw_mhz2ieee(ah, hchan->channel,
  357. hchan->channelFlags),
  358. hchan->channel, hchan->channelFlags, status);
  359. spin_unlock_bh(&sc->sc_resetlock);
  360. return -EIO;
  361. }
  362. spin_unlock_bh(&sc->sc_resetlock);
  363. sc->sc_curchan = *hchan;
  364. sc->sc_update_chainmask = 0;
  365. sc->sc_full_reset = 0;
  366. /* Re-enable rx framework */
  367. if (ath_startrecv(sc) != 0) {
  368. DPRINTF(sc, ATH_DBG_FATAL,
  369. "%s: unable to restart recv logic\n", __func__);
  370. return -EIO;
  371. }
  372. /*
  373. * Change channels and update the h/w rate map
  374. * if we're switching; e.g. 11a to 11b/g.
  375. */
  376. ath_setcurmode(sc, ath_chan2mode(hchan));
  377. ath_update_txpow(sc); /* update tx power state */
  378. /*
  379. * Re-enable interrupts.
  380. */
  381. ath9k_hw_set_interrupts(ah, sc->sc_imask);
  382. }
  383. return 0;
  384. }
  385. /**********************/
  386. /* Chainmask Handling */
  387. /**********************/
  388. static void ath_chainmask_sel_timertimeout(unsigned long data)
  389. {
  390. struct ath_chainmask_sel *cm = (struct ath_chainmask_sel *)data;
  391. cm->switch_allowed = 1;
  392. }
  393. /* Start chainmask select timer */
  394. static void ath_chainmask_sel_timerstart(struct ath_chainmask_sel *cm)
  395. {
  396. cm->switch_allowed = 0;
  397. mod_timer(&cm->timer, ath_chainmask_sel_period);
  398. }
  399. /* Stop chainmask select timer */
  400. static void ath_chainmask_sel_timerstop(struct ath_chainmask_sel *cm)
  401. {
  402. cm->switch_allowed = 0;
  403. del_timer_sync(&cm->timer);
  404. }
  405. static void ath_chainmask_sel_init(struct ath_softc *sc, struct ath_node *an)
  406. {
  407. struct ath_chainmask_sel *cm = &an->an_chainmask_sel;
  408. memzero(cm, sizeof(struct ath_chainmask_sel));
  409. cm->cur_tx_mask = sc->sc_tx_chainmask;
  410. cm->cur_rx_mask = sc->sc_rx_chainmask;
  411. cm->tx_avgrssi = ATH_RSSI_DUMMY_MARKER;
  412. setup_timer(&cm->timer,
  413. ath_chainmask_sel_timertimeout, (unsigned long) cm);
  414. }
  415. int ath_chainmask_sel_logic(struct ath_softc *sc, struct ath_node *an)
  416. {
  417. struct ath_chainmask_sel *cm = &an->an_chainmask_sel;
  418. /*
  419. * Disable auto-swtiching in one of the following if conditions.
  420. * sc_chainmask_auto_sel is used for internal global auto-switching
  421. * enabled/disabled setting
  422. */
  423. if (sc->sc_ah->ah_caps.tx_chainmask != ATH_CHAINMASK_SEL_3X3) {
  424. cm->cur_tx_mask = sc->sc_tx_chainmask;
  425. return cm->cur_tx_mask;
  426. }
  427. if (cm->tx_avgrssi == ATH_RSSI_DUMMY_MARKER)
  428. return cm->cur_tx_mask;
  429. if (cm->switch_allowed) {
  430. /* Switch down from tx 3 to tx 2. */
  431. if (cm->cur_tx_mask == ATH_CHAINMASK_SEL_3X3 &&
  432. ATH_RSSI_OUT(cm->tx_avgrssi) >=
  433. ath_chainmask_sel_down_rssi_thres) {
  434. cm->cur_tx_mask = sc->sc_tx_chainmask;
  435. /* Don't let another switch happen until
  436. * this timer expires */
  437. ath_chainmask_sel_timerstart(cm);
  438. }
  439. /* Switch up from tx 2 to 3. */
  440. else if (cm->cur_tx_mask == sc->sc_tx_chainmask &&
  441. ATH_RSSI_OUT(cm->tx_avgrssi) <=
  442. ath_chainmask_sel_up_rssi_thres) {
  443. cm->cur_tx_mask = ATH_CHAINMASK_SEL_3X3;
  444. /* Don't let another switch happen
  445. * until this timer expires */
  446. ath_chainmask_sel_timerstart(cm);
  447. }
  448. }
  449. return cm->cur_tx_mask;
  450. }
  451. /*
  452. * Update tx/rx chainmask. For legacy association,
  453. * hard code chainmask to 1x1, for 11n association, use
  454. * the chainmask configuration.
  455. */
  456. void ath_update_chainmask(struct ath_softc *sc, int is_ht)
  457. {
  458. sc->sc_update_chainmask = 1;
  459. if (is_ht) {
  460. sc->sc_tx_chainmask = sc->sc_ah->ah_caps.tx_chainmask;
  461. sc->sc_rx_chainmask = sc->sc_ah->ah_caps.rx_chainmask;
  462. } else {
  463. sc->sc_tx_chainmask = 1;
  464. sc->sc_rx_chainmask = 1;
  465. }
  466. DPRINTF(sc, ATH_DBG_CONFIG, "%s: tx chmask: %d, rx chmask: %d\n",
  467. __func__, sc->sc_tx_chainmask, sc->sc_rx_chainmask);
  468. }
  469. /******************/
  470. /* VAP management */
  471. /******************/
  472. /*
  473. * VAP in Listen mode
  474. *
  475. * This routine brings the VAP out of the down state into a "listen" state
  476. * where it waits for association requests. This is used in AP and AdHoc
  477. * modes.
  478. */
  479. int ath_vap_listen(struct ath_softc *sc, int if_id)
  480. {
  481. struct ath_hal *ah = sc->sc_ah;
  482. struct ath_vap *avp;
  483. u32 rfilt = 0;
  484. DECLARE_MAC_BUF(mac);
  485. avp = sc->sc_vaps[if_id];
  486. if (avp == NULL) {
  487. DPRINTF(sc, ATH_DBG_FATAL, "%s: invalid interface id %u\n",
  488. __func__, if_id);
  489. return -EINVAL;
  490. }
  491. #ifdef CONFIG_SLOW_ANT_DIV
  492. ath_slow_ant_div_stop(&sc->sc_antdiv);
  493. #endif
  494. /* update ratectrl about the new state */
  495. ath_rate_newstate(sc, avp);
  496. rfilt = ath_calcrxfilter(sc);
  497. ath9k_hw_setrxfilter(ah, rfilt);
  498. if (sc->sc_opmode == ATH9K_M_STA || sc->sc_opmode == ATH9K_M_IBSS) {
  499. memcpy(sc->sc_curbssid, ath_bcast_mac, ETH_ALEN);
  500. ath9k_hw_write_associd(ah, sc->sc_curbssid, sc->sc_curaid);
  501. } else
  502. sc->sc_curaid = 0;
  503. DPRINTF(sc, ATH_DBG_CONFIG,
  504. "%s: RX filter 0x%x bssid %s aid 0x%x\n",
  505. __func__, rfilt, print_mac(mac,
  506. sc->sc_curbssid), sc->sc_curaid);
  507. /*
  508. * XXXX
  509. * Disable BMISS interrupt when we're not associated
  510. */
  511. ath9k_hw_set_interrupts(ah,
  512. sc->sc_imask & ~(ATH9K_INT_SWBA | ATH9K_INT_BMISS));
  513. sc->sc_imask &= ~(ATH9K_INT_SWBA | ATH9K_INT_BMISS);
  514. /* need to reconfigure the beacons when it moves to RUN */
  515. sc->sc_beacons = 0;
  516. return 0;
  517. }
  518. int ath_vap_attach(struct ath_softc *sc,
  519. int if_id,
  520. struct ieee80211_vif *if_data,
  521. enum ath9k_opmode opmode)
  522. {
  523. struct ath_vap *avp;
  524. if (if_id >= ATH_BCBUF || sc->sc_vaps[if_id] != NULL) {
  525. DPRINTF(sc, ATH_DBG_FATAL,
  526. "%s: Invalid interface id = %u\n", __func__, if_id);
  527. return -EINVAL;
  528. }
  529. switch (opmode) {
  530. case ATH9K_M_STA:
  531. case ATH9K_M_IBSS:
  532. case ATH9K_M_MONITOR:
  533. break;
  534. case ATH9K_M_HOSTAP:
  535. /* XXX not right, beacon buffer is allocated on RUN trans */
  536. if (list_empty(&sc->sc_bbuf))
  537. return -ENOMEM;
  538. break;
  539. default:
  540. return -EINVAL;
  541. }
  542. /* create ath_vap */
  543. avp = kmalloc(sizeof(struct ath_vap), GFP_KERNEL);
  544. if (avp == NULL)
  545. return -ENOMEM;
  546. memzero(avp, sizeof(struct ath_vap));
  547. avp->av_if_data = if_data;
  548. /* Set the VAP opmode */
  549. avp->av_opmode = opmode;
  550. avp->av_bslot = -1;
  551. INIT_LIST_HEAD(&avp->av_mcastq.axq_q);
  552. INIT_LIST_HEAD(&avp->av_mcastq.axq_acq);
  553. spin_lock_init(&avp->av_mcastq.axq_lock);
  554. ath9k_hw_set_tsfadjust(sc->sc_ah, 1);
  555. sc->sc_vaps[if_id] = avp;
  556. sc->sc_nvaps++;
  557. /* Set the device opmode */
  558. sc->sc_opmode = opmode;
  559. /* default VAP configuration */
  560. avp->av_config.av_fixed_rateset = IEEE80211_FIXED_RATE_NONE;
  561. avp->av_config.av_fixed_retryset = 0x03030303;
  562. return 0;
  563. }
  564. int ath_vap_detach(struct ath_softc *sc, int if_id)
  565. {
  566. struct ath_hal *ah = sc->sc_ah;
  567. struct ath_vap *avp;
  568. avp = sc->sc_vaps[if_id];
  569. if (avp == NULL) {
  570. DPRINTF(sc, ATH_DBG_FATAL, "%s: invalid interface id %u\n",
  571. __func__, if_id);
  572. return -EINVAL;
  573. }
  574. /*
  575. * Quiesce the hardware while we remove the vap. In
  576. * particular we need to reclaim all references to the
  577. * vap state by any frames pending on the tx queues.
  578. *
  579. * XXX can we do this w/o affecting other vap's?
  580. */
  581. ath9k_hw_set_interrupts(ah, 0); /* disable interrupts */
  582. ath_draintxq(sc, false); /* stop xmit side */
  583. ath_stoprecv(sc); /* stop recv side */
  584. ath_flushrecv(sc); /* flush recv queue */
  585. /* Reclaim any pending mcast bufs on the vap. */
  586. ath_tx_draintxq(sc, &avp->av_mcastq, false);
  587. kfree(avp);
  588. sc->sc_vaps[if_id] = NULL;
  589. sc->sc_nvaps--;
  590. return 0;
  591. }
  592. int ath_vap_config(struct ath_softc *sc,
  593. int if_id, struct ath_vap_config *if_config)
  594. {
  595. struct ath_vap *avp;
  596. if (if_id >= ATH_BCBUF) {
  597. DPRINTF(sc, ATH_DBG_FATAL,
  598. "%s: Invalid interface id = %u\n", __func__, if_id);
  599. return -EINVAL;
  600. }
  601. avp = sc->sc_vaps[if_id];
  602. ASSERT(avp != NULL);
  603. if (avp)
  604. memcpy(&avp->av_config, if_config, sizeof(avp->av_config));
  605. return 0;
  606. }
  607. /********/
  608. /* Core */
  609. /********/
  610. int ath_open(struct ath_softc *sc, struct ath9k_channel *initial_chan)
  611. {
  612. struct ath_hal *ah = sc->sc_ah;
  613. int status;
  614. int error = 0;
  615. enum ath9k_ht_macmode ht_macmode = ath_cwm_macmode(sc);
  616. DPRINTF(sc, ATH_DBG_CONFIG, "%s: mode %d\n", __func__, sc->sc_opmode);
  617. /*
  618. * Stop anything previously setup. This is safe
  619. * whether this is the first time through or not.
  620. */
  621. ath_stop(sc);
  622. /* Initialize chanmask selection */
  623. sc->sc_tx_chainmask = ah->ah_caps.tx_chainmask;
  624. sc->sc_rx_chainmask = ah->ah_caps.rx_chainmask;
  625. /* Reset SERDES registers */
  626. ath9k_hw_configpcipowersave(ah, 0);
  627. /*
  628. * The basic interface to setting the hardware in a good
  629. * state is ``reset''. On return the hardware is known to
  630. * be powered up and with interrupts disabled. This must
  631. * be followed by initialization of the appropriate bits
  632. * and then setup of the interrupt mask.
  633. */
  634. sc->sc_curchan = *initial_chan;
  635. spin_lock_bh(&sc->sc_resetlock);
  636. if (!ath9k_hw_reset(ah, sc->sc_opmode, &sc->sc_curchan, ht_macmode,
  637. sc->sc_tx_chainmask, sc->sc_rx_chainmask,
  638. sc->sc_ht_extprotspacing, false, &status)) {
  639. DPRINTF(sc, ATH_DBG_FATAL,
  640. "%s: unable to reset hardware; hal status %u "
  641. "(freq %u flags 0x%x)\n", __func__, status,
  642. sc->sc_curchan.channel, sc->sc_curchan.channelFlags);
  643. error = -EIO;
  644. spin_unlock_bh(&sc->sc_resetlock);
  645. goto done;
  646. }
  647. spin_unlock_bh(&sc->sc_resetlock);
  648. /*
  649. * This is needed only to setup initial state
  650. * but it's best done after a reset.
  651. */
  652. ath_update_txpow(sc);
  653. /*
  654. * Setup the hardware after reset:
  655. * The receive engine is set going.
  656. * Frame transmit is handled entirely
  657. * in the frame output path; there's nothing to do
  658. * here except setup the interrupt mask.
  659. */
  660. if (ath_startrecv(sc) != 0) {
  661. DPRINTF(sc, ATH_DBG_FATAL,
  662. "%s: unable to start recv logic\n", __func__);
  663. error = -EIO;
  664. goto done;
  665. }
  666. /* Setup our intr mask. */
  667. sc->sc_imask = ATH9K_INT_RX | ATH9K_INT_TX
  668. | ATH9K_INT_RXEOL | ATH9K_INT_RXORN
  669. | ATH9K_INT_FATAL | ATH9K_INT_GLOBAL;
  670. if (ah->ah_caps.hw_caps & ATH9K_HW_CAP_GTT)
  671. sc->sc_imask |= ATH9K_INT_GTT;
  672. if (ah->ah_caps.hw_caps & ATH9K_HW_CAP_HT)
  673. sc->sc_imask |= ATH9K_INT_CST;
  674. /*
  675. * Enable MIB interrupts when there are hardware phy counters.
  676. * Note we only do this (at the moment) for station mode.
  677. */
  678. if (ath9k_hw_phycounters(ah) &&
  679. ((sc->sc_opmode == ATH9K_M_STA) || (sc->sc_opmode == ATH9K_M_IBSS)))
  680. sc->sc_imask |= ATH9K_INT_MIB;
  681. /*
  682. * Some hardware processes the TIM IE and fires an
  683. * interrupt when the TIM bit is set. For hardware
  684. * that does, if not overridden by configuration,
  685. * enable the TIM interrupt when operating as station.
  686. */
  687. if ((ah->ah_caps.hw_caps & ATH9K_HW_CAP_ENHANCEDPM) &&
  688. (sc->sc_opmode == ATH9K_M_STA) &&
  689. !sc->sc_config.swBeaconProcess)
  690. sc->sc_imask |= ATH9K_INT_TIM;
  691. /*
  692. * Don't enable interrupts here as we've not yet built our
  693. * vap and node data structures, which will be needed as soon
  694. * as we start receiving.
  695. */
  696. ath_setcurmode(sc, ath_chan2mode(initial_chan));
  697. /* XXX: we must make sure h/w is ready and clear invalid flag
  698. * before turning on interrupt. */
  699. sc->sc_invalid = 0;
  700. done:
  701. return error;
  702. }
  703. /*
  704. * Reset the hardware w/o losing operational state. This is
  705. * basically a more efficient way of doing ath_stop, ath_init,
  706. * followed by state transitions to the current 802.11
  707. * operational state. Used to recover from errors rx overrun
  708. * and to reset the hardware when rf gain settings must be reset.
  709. */
  710. static int ath_reset_start(struct ath_softc *sc, u32 flag)
  711. {
  712. struct ath_hal *ah = sc->sc_ah;
  713. ath9k_hw_set_interrupts(ah, 0); /* disable interrupts */
  714. ath_draintxq(sc, flag & RESET_RETRY_TXQ); /* stop xmit side */
  715. ath_stoprecv(sc); /* stop recv side */
  716. ath_flushrecv(sc); /* flush recv queue */
  717. return 0;
  718. }
  719. static int ath_reset_end(struct ath_softc *sc, u32 flag)
  720. {
  721. struct ath_hal *ah = sc->sc_ah;
  722. if (ath_startrecv(sc) != 0) /* restart recv */
  723. DPRINTF(sc, ATH_DBG_FATAL,
  724. "%s: unable to start recv logic\n", __func__);
  725. /*
  726. * We may be doing a reset in response to a request
  727. * that changes the channel so update any state that
  728. * might change as a result.
  729. */
  730. ath_setcurmode(sc, ath_chan2mode(&sc->sc_curchan));
  731. ath_update_txpow(sc); /* update tx power state */
  732. if (sc->sc_beacons)
  733. ath_beacon_config(sc, ATH_IF_ID_ANY); /* restart beacons */
  734. ath9k_hw_set_interrupts(ah, sc->sc_imask);
  735. /* Restart the txq */
  736. if (flag & RESET_RETRY_TXQ) {
  737. int i;
  738. for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
  739. if (ATH_TXQ_SETUP(sc, i)) {
  740. spin_lock_bh(&sc->sc_txq[i].axq_lock);
  741. ath_txq_schedule(sc, &sc->sc_txq[i]);
  742. spin_unlock_bh(&sc->sc_txq[i].axq_lock);
  743. }
  744. }
  745. }
  746. return 0;
  747. }
  748. int ath_reset(struct ath_softc *sc)
  749. {
  750. struct ath_hal *ah = sc->sc_ah;
  751. int status;
  752. int error = 0;
  753. enum ath9k_ht_macmode ht_macmode = ath_cwm_macmode(sc);
  754. /* NB: indicate channel change so we do a full reset */
  755. spin_lock_bh(&sc->sc_resetlock);
  756. if (!ath9k_hw_reset(ah, sc->sc_opmode, &sc->sc_curchan,
  757. ht_macmode,
  758. sc->sc_tx_chainmask, sc->sc_rx_chainmask,
  759. sc->sc_ht_extprotspacing, false, &status)) {
  760. DPRINTF(sc, ATH_DBG_FATAL,
  761. "%s: unable to reset hardware; hal status %u\n",
  762. __func__, status);
  763. error = -EIO;
  764. }
  765. spin_unlock_bh(&sc->sc_resetlock);
  766. return error;
  767. }
  768. int ath_suspend(struct ath_softc *sc)
  769. {
  770. struct ath_hal *ah = sc->sc_ah;
  771. /* No I/O if device has been surprise removed */
  772. if (sc->sc_invalid)
  773. return -EIO;
  774. /* Shut off the interrupt before setting sc->sc_invalid to '1' */
  775. ath9k_hw_set_interrupts(ah, 0);
  776. /* XXX: we must make sure h/w will not generate any interrupt
  777. * before setting the invalid flag. */
  778. sc->sc_invalid = 1;
  779. /* disable HAL and put h/w to sleep */
  780. ath9k_hw_disable(sc->sc_ah);
  781. ath9k_hw_configpcipowersave(sc->sc_ah, 1);
  782. return 0;
  783. }
  784. /* Interrupt handler. Most of the actual processing is deferred.
  785. * It's the caller's responsibility to ensure the chip is awake. */
  786. irqreturn_t ath_isr(int irq, void *dev)
  787. {
  788. struct ath_softc *sc = dev;
  789. struct ath_hal *ah = sc->sc_ah;
  790. enum ath9k_int status;
  791. bool sched = false;
  792. do {
  793. if (sc->sc_invalid) {
  794. /*
  795. * The hardware is not ready/present, don't
  796. * touch anything. Note this can happen early
  797. * on if the IRQ is shared.
  798. */
  799. return IRQ_NONE;
  800. }
  801. if (!ath9k_hw_intrpend(ah)) { /* shared irq, not for us */
  802. return IRQ_NONE;
  803. }
  804. /*
  805. * Figure out the reason(s) for the interrupt. Note
  806. * that the hal returns a pseudo-ISR that may include
  807. * bits we haven't explicitly enabled so we mask the
  808. * value to insure we only process bits we requested.
  809. */
  810. ath9k_hw_getisr(ah, &status); /* NB: clears ISR too */
  811. status &= sc->sc_imask; /* discard unasked-for bits */
  812. /*
  813. * If there are no status bits set, then this interrupt was not
  814. * for me (should have been caught above).
  815. */
  816. if (!status)
  817. return IRQ_NONE;
  818. sc->sc_intrstatus = status;
  819. if (status & ATH9K_INT_FATAL) {
  820. /* need a chip reset */
  821. sched = true;
  822. } else if (status & ATH9K_INT_RXORN) {
  823. /* need a chip reset */
  824. sched = true;
  825. } else {
  826. if (status & ATH9K_INT_SWBA) {
  827. /* schedule a tasklet for beacon handling */
  828. tasklet_schedule(&sc->bcon_tasklet);
  829. }
  830. if (status & ATH9K_INT_RXEOL) {
  831. /*
  832. * NB: the hardware should re-read the link when
  833. * RXE bit is written, but it doesn't work
  834. * at least on older hardware revs.
  835. */
  836. sched = true;
  837. }
  838. if (status & ATH9K_INT_TXURN)
  839. /* bump tx trigger level */
  840. ath9k_hw_updatetxtriglevel(ah, true);
  841. /* XXX: optimize this */
  842. if (status & ATH9K_INT_RX)
  843. sched = true;
  844. if (status & ATH9K_INT_TX)
  845. sched = true;
  846. if (status & ATH9K_INT_BMISS)
  847. sched = true;
  848. /* carrier sense timeout */
  849. if (status & ATH9K_INT_CST)
  850. sched = true;
  851. if (status & ATH9K_INT_MIB) {
  852. /*
  853. * Disable interrupts until we service the MIB
  854. * interrupt; otherwise it will continue to
  855. * fire.
  856. */
  857. ath9k_hw_set_interrupts(ah, 0);
  858. /*
  859. * Let the hal handle the event. We assume
  860. * it will clear whatever condition caused
  861. * the interrupt.
  862. */
  863. ath9k_hw_procmibevent(ah, &sc->sc_halstats);
  864. ath9k_hw_set_interrupts(ah, sc->sc_imask);
  865. }
  866. if (status & ATH9K_INT_TIM_TIMER) {
  867. if (!(ah->ah_caps.hw_caps &
  868. ATH9K_HW_CAP_AUTOSLEEP)) {
  869. /* Clear RxAbort bit so that we can
  870. * receive frames */
  871. ath9k_hw_setrxabort(ah, 0);
  872. sched = true;
  873. }
  874. }
  875. }
  876. } while (0);
  877. if (sched) {
  878. /* turn off every interrupt except SWBA */
  879. ath9k_hw_set_interrupts(ah, (sc->sc_imask & ATH9K_INT_SWBA));
  880. tasklet_schedule(&sc->intr_tq);
  881. }
  882. return IRQ_HANDLED;
  883. }
  884. /* Deferred interrupt processing */
  885. static void ath9k_tasklet(unsigned long data)
  886. {
  887. struct ath_softc *sc = (struct ath_softc *)data;
  888. u32 status = sc->sc_intrstatus;
  889. if (status & ATH9K_INT_FATAL) {
  890. /* need a chip reset */
  891. ath_internal_reset(sc);
  892. return;
  893. } else {
  894. if (status &
  895. (ATH9K_INT_RX | ATH9K_INT_RXEOL | ATH9K_INT_RXORN)) {
  896. /* XXX: fill me in */
  897. /*
  898. if (status & ATH9K_INT_RXORN) {
  899. }
  900. if (status & ATH9K_INT_RXEOL) {
  901. }
  902. */
  903. spin_lock_bh(&sc->sc_rxflushlock);
  904. ath_rx_tasklet(sc, 0);
  905. spin_unlock_bh(&sc->sc_rxflushlock);
  906. }
  907. /* XXX: optimize this */
  908. if (status & ATH9K_INT_TX)
  909. ath_tx_tasklet(sc);
  910. /* XXX: fill me in */
  911. /*
  912. if (status & ATH9K_INT_BMISS) {
  913. }
  914. if (status & (ATH9K_INT_TIM | ATH9K_INT_DTIMSYNC)) {
  915. if (status & ATH9K_INT_TIM) {
  916. }
  917. if (status & ATH9K_INT_DTIMSYNC) {
  918. }
  919. }
  920. */
  921. }
  922. /* re-enable hardware interrupt */
  923. ath9k_hw_set_interrupts(sc->sc_ah, sc->sc_imask);
  924. }
  925. int ath_init(u16 devid, struct ath_softc *sc)
  926. {
  927. struct ath_hal *ah = NULL;
  928. int status;
  929. int error = 0, i;
  930. int csz = 0;
  931. u32 rd;
  932. /* XXX: hardware will not be ready until ath_open() being called */
  933. sc->sc_invalid = 1;
  934. sc->sc_debug = DBG_DEFAULT;
  935. DPRINTF(sc, ATH_DBG_CONFIG, "%s: devid 0x%x\n", __func__, devid);
  936. /* Initialize tasklet */
  937. tasklet_init(&sc->intr_tq, ath9k_tasklet, (unsigned long)sc);
  938. tasklet_init(&sc->bcon_tasklet, ath9k_beacon_tasklet,
  939. (unsigned long)sc);
  940. /*
  941. * Cache line size is used to size and align various
  942. * structures used to communicate with the hardware.
  943. */
  944. bus_read_cachesize(sc, &csz);
  945. /* XXX assert csz is non-zero */
  946. sc->sc_cachelsz = csz << 2; /* convert to bytes */
  947. spin_lock_init(&sc->sc_resetlock);
  948. ah = ath9k_hw_attach(devid, sc, sc->mem, &status);
  949. if (ah == NULL) {
  950. DPRINTF(sc, ATH_DBG_FATAL,
  951. "%s: unable to attach hardware; HAL status %u\n",
  952. __func__, status);
  953. error = -ENXIO;
  954. goto bad;
  955. }
  956. sc->sc_ah = ah;
  957. /* Get the chipset-specific aggr limit. */
  958. sc->sc_rtsaggrlimit = ah->ah_caps.rts_aggr_limit;
  959. /* Get the hardware key cache size. */
  960. sc->sc_keymax = ah->ah_caps.keycache_size;
  961. if (sc->sc_keymax > ATH_KEYMAX) {
  962. DPRINTF(sc, ATH_DBG_KEYCACHE,
  963. "%s: Warning, using only %u entries in %u key cache\n",
  964. __func__, ATH_KEYMAX, sc->sc_keymax);
  965. sc->sc_keymax = ATH_KEYMAX;
  966. }
  967. /*
  968. * Reset the key cache since some parts do not
  969. * reset the contents on initial power up.
  970. */
  971. for (i = 0; i < sc->sc_keymax; i++)
  972. ath9k_hw_keyreset(ah, (u16) i);
  973. /*
  974. * Mark key cache slots associated with global keys
  975. * as in use. If we knew TKIP was not to be used we
  976. * could leave the +32, +64, and +32+64 slots free.
  977. * XXX only for splitmic.
  978. */
  979. for (i = 0; i < IEEE80211_WEP_NKID; i++) {
  980. set_bit(i, sc->sc_keymap);
  981. set_bit(i + 32, sc->sc_keymap);
  982. set_bit(i + 64, sc->sc_keymap);
  983. set_bit(i + 32 + 64, sc->sc_keymap);
  984. }
  985. /*
  986. * Collect the channel list using the default country
  987. * code and including outdoor channels. The 802.11 layer
  988. * is resposible for filtering this list based on settings
  989. * like the phy mode.
  990. */
  991. rd = ah->ah_currentRD;
  992. error = ath_setup_channels(sc);
  993. if (error)
  994. goto bad;
  995. /* default to STA mode */
  996. sc->sc_opmode = ATH9K_M_MONITOR;
  997. /* Setup rate tables */
  998. ath_setup_rates(sc, IEEE80211_BAND_2GHZ);
  999. ath_setup_rates(sc, IEEE80211_BAND_5GHZ);
  1000. /* NB: setup here so ath_rate_update is happy */
  1001. ath_setcurmode(sc, ATH9K_MODE_11A);
  1002. /*
  1003. * Allocate hardware transmit queues: one queue for
  1004. * beacon frames and one data queue for each QoS
  1005. * priority. Note that the hal handles reseting
  1006. * these queues at the needed time.
  1007. */
  1008. sc->sc_bhalq = ath_beaconq_setup(ah);
  1009. if (sc->sc_bhalq == -1) {
  1010. DPRINTF(sc, ATH_DBG_FATAL,
  1011. "%s: unable to setup a beacon xmit queue\n", __func__);
  1012. error = -EIO;
  1013. goto bad2;
  1014. }
  1015. sc->sc_cabq = ath_txq_setup(sc, ATH9K_TX_QUEUE_CAB, 0);
  1016. if (sc->sc_cabq == NULL) {
  1017. DPRINTF(sc, ATH_DBG_FATAL,
  1018. "%s: unable to setup CAB xmit queue\n", __func__);
  1019. error = -EIO;
  1020. goto bad2;
  1021. }
  1022. sc->sc_config.cabqReadytime = ATH_CABQ_READY_TIME;
  1023. ath_cabq_update(sc);
  1024. for (i = 0; i < ARRAY_SIZE(sc->sc_haltype2q); i++)
  1025. sc->sc_haltype2q[i] = -1;
  1026. /* Setup data queues */
  1027. /* NB: ensure BK queue is the lowest priority h/w queue */
  1028. if (!ath_tx_setup(sc, ATH9K_WME_AC_BK)) {
  1029. DPRINTF(sc, ATH_DBG_FATAL,
  1030. "%s: unable to setup xmit queue for BK traffic\n",
  1031. __func__);
  1032. error = -EIO;
  1033. goto bad2;
  1034. }
  1035. if (!ath_tx_setup(sc, ATH9K_WME_AC_BE)) {
  1036. DPRINTF(sc, ATH_DBG_FATAL,
  1037. "%s: unable to setup xmit queue for BE traffic\n",
  1038. __func__);
  1039. error = -EIO;
  1040. goto bad2;
  1041. }
  1042. if (!ath_tx_setup(sc, ATH9K_WME_AC_VI)) {
  1043. DPRINTF(sc, ATH_DBG_FATAL,
  1044. "%s: unable to setup xmit queue for VI traffic\n",
  1045. __func__);
  1046. error = -EIO;
  1047. goto bad2;
  1048. }
  1049. if (!ath_tx_setup(sc, ATH9K_WME_AC_VO)) {
  1050. DPRINTF(sc, ATH_DBG_FATAL,
  1051. "%s: unable to setup xmit queue for VO traffic\n",
  1052. __func__);
  1053. error = -EIO;
  1054. goto bad2;
  1055. }
  1056. sc->sc_rc = ath_rate_attach(ah);
  1057. if (sc->sc_rc == NULL) {
  1058. error = EIO;
  1059. goto bad2;
  1060. }
  1061. if (ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
  1062. ATH9K_CIPHER_TKIP, NULL)) {
  1063. /*
  1064. * Whether we should enable h/w TKIP MIC.
  1065. * XXX: if we don't support WME TKIP MIC, then we wouldn't
  1066. * report WMM capable, so it's always safe to turn on
  1067. * TKIP MIC in this case.
  1068. */
  1069. ath9k_hw_setcapability(sc->sc_ah, ATH9K_CAP_TKIP_MIC,
  1070. 0, 1, NULL);
  1071. }
  1072. /*
  1073. * Check whether the separate key cache entries
  1074. * are required to handle both tx+rx MIC keys.
  1075. * With split mic keys the number of stations is limited
  1076. * to 27 otherwise 59.
  1077. */
  1078. if (ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
  1079. ATH9K_CIPHER_TKIP, NULL)
  1080. && ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
  1081. ATH9K_CIPHER_MIC, NULL)
  1082. && ath9k_hw_getcapability(ah, ATH9K_CAP_TKIP_SPLIT,
  1083. 0, NULL))
  1084. sc->sc_splitmic = 1;
  1085. /* turn on mcast key search if possible */
  1086. if (!ath9k_hw_getcapability(ah, ATH9K_CAP_MCAST_KEYSRCH, 0, NULL))
  1087. (void)ath9k_hw_setcapability(ah, ATH9K_CAP_MCAST_KEYSRCH, 1,
  1088. 1, NULL);
  1089. sc->sc_config.txpowlimit = ATH_TXPOWER_MAX;
  1090. sc->sc_config.txpowlimit_override = 0;
  1091. /* 11n Capabilities */
  1092. if (ah->ah_caps.hw_caps & ATH9K_HW_CAP_HT) {
  1093. sc->sc_txaggr = 1;
  1094. sc->sc_rxaggr = 1;
  1095. }
  1096. sc->sc_tx_chainmask = ah->ah_caps.tx_chainmask;
  1097. sc->sc_rx_chainmask = ah->ah_caps.rx_chainmask;
  1098. /* Configuration for rx chain detection */
  1099. sc->sc_rxchaindetect_ref = 0;
  1100. sc->sc_rxchaindetect_thresh5GHz = 35;
  1101. sc->sc_rxchaindetect_thresh2GHz = 35;
  1102. sc->sc_rxchaindetect_delta5GHz = 30;
  1103. sc->sc_rxchaindetect_delta2GHz = 30;
  1104. ath9k_hw_setcapability(ah, ATH9K_CAP_DIVERSITY, 1, true, NULL);
  1105. sc->sc_defant = ath9k_hw_getdefantenna(ah);
  1106. ath9k_hw_getmac(ah, sc->sc_myaddr);
  1107. if (ah->ah_caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK) {
  1108. ath9k_hw_getbssidmask(ah, sc->sc_bssidmask);
  1109. ATH_SET_VAP_BSSID_MASK(sc->sc_bssidmask);
  1110. ath9k_hw_setbssidmask(ah, sc->sc_bssidmask);
  1111. }
  1112. sc->sc_slottime = ATH9K_SLOT_TIME_9; /* default to short slot time */
  1113. /* initialize beacon slots */
  1114. for (i = 0; i < ARRAY_SIZE(sc->sc_bslot); i++)
  1115. sc->sc_bslot[i] = ATH_IF_ID_ANY;
  1116. /* save MISC configurations */
  1117. sc->sc_config.swBeaconProcess = 1;
  1118. #ifdef CONFIG_SLOW_ANT_DIV
  1119. /* range is 40 - 255, we use something in the middle */
  1120. ath_slow_ant_div_init(&sc->sc_antdiv, sc, 0x127);
  1121. #endif
  1122. return 0;
  1123. bad2:
  1124. /* cleanup tx queues */
  1125. for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
  1126. if (ATH_TXQ_SETUP(sc, i))
  1127. ath_tx_cleanupq(sc, &sc->sc_txq[i]);
  1128. bad:
  1129. if (ah)
  1130. ath9k_hw_detach(ah);
  1131. return error;
  1132. }
  1133. void ath_deinit(struct ath_softc *sc)
  1134. {
  1135. struct ath_hal *ah = sc->sc_ah;
  1136. int i;
  1137. DPRINTF(sc, ATH_DBG_CONFIG, "%s\n", __func__);
  1138. ath_stop(sc);
  1139. if (!sc->sc_invalid)
  1140. ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_AWAKE);
  1141. ath_rate_detach(sc->sc_rc);
  1142. /* cleanup tx queues */
  1143. for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
  1144. if (ATH_TXQ_SETUP(sc, i))
  1145. ath_tx_cleanupq(sc, &sc->sc_txq[i]);
  1146. ath9k_hw_detach(ah);
  1147. }
  1148. /*******************/
  1149. /* Node Management */
  1150. /*******************/
  1151. struct ath_node *ath_node_attach(struct ath_softc *sc, u8 *addr, int if_id)
  1152. {
  1153. struct ath_vap *avp;
  1154. struct ath_node *an;
  1155. DECLARE_MAC_BUF(mac);
  1156. avp = sc->sc_vaps[if_id];
  1157. ASSERT(avp != NULL);
  1158. /* mac80211 sta_notify callback is from an IRQ context, so no sleep */
  1159. an = kmalloc(sizeof(struct ath_node), GFP_ATOMIC);
  1160. if (an == NULL)
  1161. return NULL;
  1162. memzero(an, sizeof(*an));
  1163. an->an_sc = sc;
  1164. memcpy(an->an_addr, addr, ETH_ALEN);
  1165. atomic_set(&an->an_refcnt, 1);
  1166. /* set up per-node tx/rx state */
  1167. ath_tx_node_init(sc, an);
  1168. ath_rx_node_init(sc, an);
  1169. ath_chainmask_sel_init(sc, an);
  1170. ath_chainmask_sel_timerstart(&an->an_chainmask_sel);
  1171. list_add(&an->list, &sc->node_list);
  1172. return an;
  1173. }
  1174. void ath_node_detach(struct ath_softc *sc, struct ath_node *an, bool bh_flag)
  1175. {
  1176. unsigned long flags;
  1177. DECLARE_MAC_BUF(mac);
  1178. ath_chainmask_sel_timerstop(&an->an_chainmask_sel);
  1179. an->an_flags |= ATH_NODE_CLEAN;
  1180. ath_tx_node_cleanup(sc, an, bh_flag);
  1181. ath_rx_node_cleanup(sc, an);
  1182. ath_tx_node_free(sc, an);
  1183. ath_rx_node_free(sc, an);
  1184. spin_lock_irqsave(&sc->node_lock, flags);
  1185. list_del(&an->list);
  1186. spin_unlock_irqrestore(&sc->node_lock, flags);
  1187. kfree(an);
  1188. }
  1189. /* Finds a node and increases the refcnt if found */
  1190. struct ath_node *ath_node_get(struct ath_softc *sc, u8 *addr)
  1191. {
  1192. struct ath_node *an = NULL, *an_found = NULL;
  1193. if (list_empty(&sc->node_list)) /* FIXME */
  1194. goto out;
  1195. list_for_each_entry(an, &sc->node_list, list) {
  1196. if (!compare_ether_addr(an->an_addr, addr)) {
  1197. atomic_inc(&an->an_refcnt);
  1198. an_found = an;
  1199. break;
  1200. }
  1201. }
  1202. out:
  1203. return an_found;
  1204. }
  1205. /* Decrements the refcnt and if it drops to zero, detach the node */
  1206. void ath_node_put(struct ath_softc *sc, struct ath_node *an, bool bh_flag)
  1207. {
  1208. if (atomic_dec_and_test(&an->an_refcnt))
  1209. ath_node_detach(sc, an, bh_flag);
  1210. }
  1211. /* Finds a node, doesn't increment refcnt. Caller must hold sc->node_lock */
  1212. struct ath_node *ath_node_find(struct ath_softc *sc, u8 *addr)
  1213. {
  1214. struct ath_node *an = NULL, *an_found = NULL;
  1215. if (list_empty(&sc->node_list))
  1216. return NULL;
  1217. list_for_each_entry(an, &sc->node_list, list)
  1218. if (!compare_ether_addr(an->an_addr, addr)) {
  1219. an_found = an;
  1220. break;
  1221. }
  1222. return an_found;
  1223. }
  1224. /*
  1225. * Set up New Node
  1226. *
  1227. * Setup driver-specific state for a newly associated node. This routine
  1228. * really only applies if compression or XR are enabled, there is no code
  1229. * covering any other cases.
  1230. */
  1231. void ath_newassoc(struct ath_softc *sc,
  1232. struct ath_node *an, int isnew, int isuapsd)
  1233. {
  1234. int tidno;
  1235. /* if station reassociates, tear down the aggregation state. */
  1236. if (!isnew) {
  1237. for (tidno = 0; tidno < WME_NUM_TID; tidno++) {
  1238. if (sc->sc_txaggr)
  1239. ath_tx_aggr_teardown(sc, an, tidno);
  1240. if (sc->sc_rxaggr)
  1241. ath_rx_aggr_teardown(sc, an, tidno);
  1242. }
  1243. }
  1244. an->an_flags = 0;
  1245. }
  1246. /**************/
  1247. /* Encryption */
  1248. /**************/
  1249. void ath_key_reset(struct ath_softc *sc, u16 keyix, int freeslot)
  1250. {
  1251. ath9k_hw_keyreset(sc->sc_ah, keyix);
  1252. if (freeslot)
  1253. clear_bit(keyix, sc->sc_keymap);
  1254. }
  1255. int ath_keyset(struct ath_softc *sc,
  1256. u16 keyix,
  1257. struct ath9k_keyval *hk,
  1258. const u8 mac[ETH_ALEN])
  1259. {
  1260. bool status;
  1261. status = ath9k_hw_set_keycache_entry(sc->sc_ah,
  1262. keyix, hk, mac, false);
  1263. return status != false;
  1264. }
  1265. /***********************/
  1266. /* TX Power/Regulatory */
  1267. /***********************/
  1268. /*
  1269. * Set Transmit power in HAL
  1270. *
  1271. * This routine makes the actual HAL calls to set the new transmit power
  1272. * limit.
  1273. */
  1274. void ath_update_txpow(struct ath_softc *sc)
  1275. {
  1276. struct ath_hal *ah = sc->sc_ah;
  1277. u32 txpow;
  1278. if (sc->sc_curtxpow != sc->sc_config.txpowlimit) {
  1279. ath9k_hw_set_txpowerlimit(ah, sc->sc_config.txpowlimit);
  1280. /* read back in case value is clamped */
  1281. ath9k_hw_getcapability(ah, ATH9K_CAP_TXPOW, 1, &txpow);
  1282. sc->sc_curtxpow = txpow;
  1283. }
  1284. }
  1285. /* Return the current country and domain information */
  1286. void ath_get_currentCountry(struct ath_softc *sc,
  1287. struct ath9k_country_entry *ctry)
  1288. {
  1289. ath9k_regd_get_current_country(sc->sc_ah, ctry);
  1290. /* If HAL not specific yet, since it is band dependent,
  1291. * use the one we passed in. */
  1292. if (ctry->countryCode == CTRY_DEFAULT) {
  1293. ctry->iso[0] = 0;
  1294. ctry->iso[1] = 0;
  1295. } else if (ctry->iso[0] && ctry->iso[1]) {
  1296. if (!ctry->iso[2]) {
  1297. if (ath_outdoor)
  1298. ctry->iso[2] = 'O';
  1299. else
  1300. ctry->iso[2] = 'I';
  1301. }
  1302. }
  1303. }
  1304. /**************************/
  1305. /* Slow Antenna Diversity */
  1306. /**************************/
  1307. void ath_slow_ant_div_init(struct ath_antdiv *antdiv,
  1308. struct ath_softc *sc,
  1309. int32_t rssitrig)
  1310. {
  1311. int trig;
  1312. /* antdivf_rssitrig can range from 40 - 0xff */
  1313. trig = (rssitrig > 0xff) ? 0xff : rssitrig;
  1314. trig = (rssitrig < 40) ? 40 : rssitrig;
  1315. antdiv->antdiv_sc = sc;
  1316. antdiv->antdivf_rssitrig = trig;
  1317. }
  1318. void ath_slow_ant_div_start(struct ath_antdiv *antdiv,
  1319. u8 num_antcfg,
  1320. const u8 *bssid)
  1321. {
  1322. antdiv->antdiv_num_antcfg =
  1323. num_antcfg < ATH_ANT_DIV_MAX_CFG ?
  1324. num_antcfg : ATH_ANT_DIV_MAX_CFG;
  1325. antdiv->antdiv_state = ATH_ANT_DIV_IDLE;
  1326. antdiv->antdiv_curcfg = 0;
  1327. antdiv->antdiv_bestcfg = 0;
  1328. antdiv->antdiv_laststatetsf = 0;
  1329. memcpy(antdiv->antdiv_bssid, bssid, sizeof(antdiv->antdiv_bssid));
  1330. antdiv->antdiv_start = 1;
  1331. }
  1332. void ath_slow_ant_div_stop(struct ath_antdiv *antdiv)
  1333. {
  1334. antdiv->antdiv_start = 0;
  1335. }
  1336. static int32_t ath_find_max_val(int32_t *val,
  1337. u8 num_val, u8 *max_index)
  1338. {
  1339. u32 MaxVal = *val++;
  1340. u32 cur_index = 0;
  1341. *max_index = 0;
  1342. while (++cur_index < num_val) {
  1343. if (*val > MaxVal) {
  1344. MaxVal = *val;
  1345. *max_index = cur_index;
  1346. }
  1347. val++;
  1348. }
  1349. return MaxVal;
  1350. }
  1351. void ath_slow_ant_div(struct ath_antdiv *antdiv,
  1352. struct ieee80211_hdr *hdr,
  1353. struct ath_rx_status *rx_stats)
  1354. {
  1355. struct ath_softc *sc = antdiv->antdiv_sc;
  1356. struct ath_hal *ah = sc->sc_ah;
  1357. u64 curtsf = 0;
  1358. u8 bestcfg, curcfg = antdiv->antdiv_curcfg;
  1359. __le16 fc = hdr->frame_control;
  1360. if (antdiv->antdiv_start && ieee80211_is_beacon(fc)
  1361. && !compare_ether_addr(hdr->addr3, antdiv->antdiv_bssid)) {
  1362. antdiv->antdiv_lastbrssi[curcfg] = rx_stats->rs_rssi;
  1363. antdiv->antdiv_lastbtsf[curcfg] = ath9k_hw_gettsf64(sc->sc_ah);
  1364. curtsf = antdiv->antdiv_lastbtsf[curcfg];
  1365. } else {
  1366. return;
  1367. }
  1368. switch (antdiv->antdiv_state) {
  1369. case ATH_ANT_DIV_IDLE:
  1370. if ((antdiv->antdiv_lastbrssi[curcfg] <
  1371. antdiv->antdivf_rssitrig)
  1372. && ((curtsf - antdiv->antdiv_laststatetsf) >
  1373. ATH_ANT_DIV_MIN_IDLE_US)) {
  1374. curcfg++;
  1375. if (curcfg == antdiv->antdiv_num_antcfg)
  1376. curcfg = 0;
  1377. if (!ath9k_hw_select_antconfig(ah, curcfg)) {
  1378. antdiv->antdiv_bestcfg = antdiv->antdiv_curcfg;
  1379. antdiv->antdiv_curcfg = curcfg;
  1380. antdiv->antdiv_laststatetsf = curtsf;
  1381. antdiv->antdiv_state = ATH_ANT_DIV_SCAN;
  1382. }
  1383. }
  1384. break;
  1385. case ATH_ANT_DIV_SCAN:
  1386. if ((curtsf - antdiv->antdiv_laststatetsf) <
  1387. ATH_ANT_DIV_MIN_SCAN_US)
  1388. break;
  1389. curcfg++;
  1390. if (curcfg == antdiv->antdiv_num_antcfg)
  1391. curcfg = 0;
  1392. if (curcfg == antdiv->antdiv_bestcfg) {
  1393. ath_find_max_val(antdiv->antdiv_lastbrssi,
  1394. antdiv->antdiv_num_antcfg, &bestcfg);
  1395. if (!ath9k_hw_select_antconfig(ah, bestcfg)) {
  1396. antdiv->antdiv_bestcfg = bestcfg;
  1397. antdiv->antdiv_curcfg = bestcfg;
  1398. antdiv->antdiv_laststatetsf = curtsf;
  1399. antdiv->antdiv_state = ATH_ANT_DIV_IDLE;
  1400. }
  1401. } else {
  1402. if (!ath9k_hw_select_antconfig(ah, curcfg)) {
  1403. antdiv->antdiv_curcfg = curcfg;
  1404. antdiv->antdiv_laststatetsf = curtsf;
  1405. antdiv->antdiv_state = ATH_ANT_DIV_SCAN;
  1406. }
  1407. }
  1408. break;
  1409. }
  1410. }
  1411. /***********************/
  1412. /* Descriptor Handling */
  1413. /***********************/
  1414. /*
  1415. * Set up DMA descriptors
  1416. *
  1417. * This function will allocate both the DMA descriptor structure, and the
  1418. * buffers it contains. These are used to contain the descriptors used
  1419. * by the system.
  1420. */
  1421. int ath_descdma_setup(struct ath_softc *sc,
  1422. struct ath_descdma *dd,
  1423. struct list_head *head,
  1424. const char *name,
  1425. int nbuf,
  1426. int ndesc)
  1427. {
  1428. #define DS2PHYS(_dd, _ds) \
  1429. ((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc))
  1430. #define ATH_DESC_4KB_BOUND_CHECK(_daddr) ((((_daddr) & 0xFFF) > 0xF7F) ? 1 : 0)
  1431. #define ATH_DESC_4KB_BOUND_NUM_SKIPPED(_len) ((_len) / 4096)
  1432. struct ath_desc *ds;
  1433. struct ath_buf *bf;
  1434. int i, bsize, error;
  1435. DPRINTF(sc, ATH_DBG_CONFIG, "%s: %s DMA: %u buffers %u desc/buf\n",
  1436. __func__, name, nbuf, ndesc);
  1437. /* ath_desc must be a multiple of DWORDs */
  1438. if ((sizeof(struct ath_desc) % 4) != 0) {
  1439. DPRINTF(sc, ATH_DBG_FATAL, "%s: ath_desc not DWORD aligned\n",
  1440. __func__);
  1441. ASSERT((sizeof(struct ath_desc) % 4) == 0);
  1442. error = -ENOMEM;
  1443. goto fail;
  1444. }
  1445. dd->dd_name = name;
  1446. dd->dd_desc_len = sizeof(struct ath_desc) * nbuf * ndesc;
  1447. /*
  1448. * Need additional DMA memory because we can't use
  1449. * descriptors that cross the 4K page boundary. Assume
  1450. * one skipped descriptor per 4K page.
  1451. */
  1452. if (!(sc->sc_ah->ah_caps.hw_caps & ATH9K_HW_CAP_4KB_SPLITTRANS)) {
  1453. u32 ndesc_skipped =
  1454. ATH_DESC_4KB_BOUND_NUM_SKIPPED(dd->dd_desc_len);
  1455. u32 dma_len;
  1456. while (ndesc_skipped) {
  1457. dma_len = ndesc_skipped * sizeof(struct ath_desc);
  1458. dd->dd_desc_len += dma_len;
  1459. ndesc_skipped = ATH_DESC_4KB_BOUND_NUM_SKIPPED(dma_len);
  1460. };
  1461. }
  1462. /* allocate descriptors */
  1463. dd->dd_desc = pci_alloc_consistent(sc->pdev,
  1464. dd->dd_desc_len,
  1465. &dd->dd_desc_paddr);
  1466. if (dd->dd_desc == NULL) {
  1467. error = -ENOMEM;
  1468. goto fail;
  1469. }
  1470. ds = dd->dd_desc;
  1471. DPRINTF(sc, ATH_DBG_CONFIG, "%s: %s DMA map: %p (%u) -> %llx (%u)\n",
  1472. __func__, dd->dd_name, ds, (u32) dd->dd_desc_len,
  1473. ito64(dd->dd_desc_paddr), /*XXX*/(u32) dd->dd_desc_len);
  1474. /* allocate buffers */
  1475. bsize = sizeof(struct ath_buf) * nbuf;
  1476. bf = kmalloc(bsize, GFP_KERNEL);
  1477. if (bf == NULL) {
  1478. error = -ENOMEM;
  1479. goto fail2;
  1480. }
  1481. memzero(bf, bsize);
  1482. dd->dd_bufptr = bf;
  1483. INIT_LIST_HEAD(head);
  1484. for (i = 0; i < nbuf; i++, bf++, ds += ndesc) {
  1485. bf->bf_desc = ds;
  1486. bf->bf_daddr = DS2PHYS(dd, ds);
  1487. if (!(sc->sc_ah->ah_caps.hw_caps &
  1488. ATH9K_HW_CAP_4KB_SPLITTRANS)) {
  1489. /*
  1490. * Skip descriptor addresses which can cause 4KB
  1491. * boundary crossing (addr + length) with a 32 dword
  1492. * descriptor fetch.
  1493. */
  1494. while (ATH_DESC_4KB_BOUND_CHECK(bf->bf_daddr)) {
  1495. ASSERT((caddr_t) bf->bf_desc <
  1496. ((caddr_t) dd->dd_desc +
  1497. dd->dd_desc_len));
  1498. ds += ndesc;
  1499. bf->bf_desc = ds;
  1500. bf->bf_daddr = DS2PHYS(dd, ds);
  1501. }
  1502. }
  1503. list_add_tail(&bf->list, head);
  1504. }
  1505. return 0;
  1506. fail2:
  1507. pci_free_consistent(sc->pdev,
  1508. dd->dd_desc_len, dd->dd_desc, dd->dd_desc_paddr);
  1509. fail:
  1510. memzero(dd, sizeof(*dd));
  1511. return error;
  1512. #undef ATH_DESC_4KB_BOUND_CHECK
  1513. #undef ATH_DESC_4KB_BOUND_NUM_SKIPPED
  1514. #undef DS2PHYS
  1515. }
  1516. /*
  1517. * Cleanup DMA descriptors
  1518. *
  1519. * This function will free the DMA block that was allocated for the descriptor
  1520. * pool. Since this was allocated as one "chunk", it is freed in the same
  1521. * manner.
  1522. */
  1523. void ath_descdma_cleanup(struct ath_softc *sc,
  1524. struct ath_descdma *dd,
  1525. struct list_head *head)
  1526. {
  1527. /* Free memory associated with descriptors */
  1528. pci_free_consistent(sc->pdev,
  1529. dd->dd_desc_len, dd->dd_desc, dd->dd_desc_paddr);
  1530. INIT_LIST_HEAD(head);
  1531. kfree(dd->dd_bufptr);
  1532. memzero(dd, sizeof(*dd));
  1533. }
  1534. /*************/
  1535. /* Utilities */
  1536. /*************/
  1537. void ath_internal_reset(struct ath_softc *sc)
  1538. {
  1539. ath_reset_start(sc, 0);
  1540. ath_reset(sc);
  1541. ath_reset_end(sc, 0);
  1542. }
  1543. int ath_get_hal_qnum(u16 queue, struct ath_softc *sc)
  1544. {
  1545. int qnum;
  1546. switch (queue) {
  1547. case 0:
  1548. qnum = sc->sc_haltype2q[ATH9K_WME_AC_VO];
  1549. break;
  1550. case 1:
  1551. qnum = sc->sc_haltype2q[ATH9K_WME_AC_VI];
  1552. break;
  1553. case 2:
  1554. qnum = sc->sc_haltype2q[ATH9K_WME_AC_BE];
  1555. break;
  1556. case 3:
  1557. qnum = sc->sc_haltype2q[ATH9K_WME_AC_BK];
  1558. break;
  1559. default:
  1560. qnum = sc->sc_haltype2q[ATH9K_WME_AC_BE];
  1561. break;
  1562. }
  1563. return qnum;
  1564. }
  1565. int ath_get_mac80211_qnum(u32 queue, struct ath_softc *sc)
  1566. {
  1567. int qnum;
  1568. switch (queue) {
  1569. case ATH9K_WME_AC_VO:
  1570. qnum = 0;
  1571. break;
  1572. case ATH9K_WME_AC_VI:
  1573. qnum = 1;
  1574. break;
  1575. case ATH9K_WME_AC_BE:
  1576. qnum = 2;
  1577. break;
  1578. case ATH9K_WME_AC_BK:
  1579. qnum = 3;
  1580. break;
  1581. default:
  1582. qnum = -1;
  1583. break;
  1584. }
  1585. return qnum;
  1586. }
  1587. /*
  1588. * Expand time stamp to TSF
  1589. *
  1590. * Extend 15-bit time stamp from rx descriptor to
  1591. * a full 64-bit TSF using the current h/w TSF.
  1592. */
  1593. u64 ath_extend_tsf(struct ath_softc *sc, u32 rstamp)
  1594. {
  1595. u64 tsf;
  1596. tsf = ath9k_hw_gettsf64(sc->sc_ah);
  1597. if ((tsf & 0x7fff) < rstamp)
  1598. tsf -= 0x8000;
  1599. return (tsf & ~0x7fff) | rstamp;
  1600. }
  1601. /*
  1602. * Set Default Antenna
  1603. *
  1604. * Call into the HAL to set the default antenna to use. Not really valid for
  1605. * MIMO technology.
  1606. */
  1607. void ath_setdefantenna(void *context, u32 antenna)
  1608. {
  1609. struct ath_softc *sc = (struct ath_softc *)context;
  1610. struct ath_hal *ah = sc->sc_ah;
  1611. /* XXX block beacon interrupts */
  1612. ath9k_hw_setantenna(ah, antenna);
  1613. sc->sc_defant = antenna;
  1614. sc->sc_rxotherant = 0;
  1615. }
  1616. /*
  1617. * Set Slot Time
  1618. *
  1619. * This will wake up the chip if required, and set the slot time for the
  1620. * frame (maximum transmit time). Slot time is assumed to be already set
  1621. * in the ATH object member sc_slottime
  1622. */
  1623. void ath_setslottime(struct ath_softc *sc)
  1624. {
  1625. ath9k_hw_setslottime(sc->sc_ah, sc->sc_slottime);
  1626. sc->sc_updateslot = OK;
  1627. }