core.c 41 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610
  1. /*
  2. * Copyright (c) 2008, Atheros Communications Inc.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for any
  5. * purpose with or without fee is hereby granted, provided that the above
  6. * copyright notice and this permission notice appear in all copies.
  7. *
  8. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  9. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  10. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  11. * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  12. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  13. * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  14. * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  15. */
  16. #include "core.h"
  17. #include "regd.h"
  18. static u32 ath_chainmask_sel_up_rssi_thres =
  19. ATH_CHAINMASK_SEL_UP_RSSI_THRES;
  20. static u32 ath_chainmask_sel_down_rssi_thres =
  21. ATH_CHAINMASK_SEL_DOWN_RSSI_THRES;
  22. static u32 ath_chainmask_sel_period =
  23. ATH_CHAINMASK_SEL_TIMEOUT;
  24. /* return bus cachesize in 4B word units */
  25. static void bus_read_cachesize(struct ath_softc *sc, int *csz)
  26. {
  27. u8 u8tmp;
  28. pci_read_config_byte(sc->pdev, PCI_CACHE_LINE_SIZE, (u8 *)&u8tmp);
  29. *csz = (int)u8tmp;
  30. /*
  31. * This check was put in to avoid "unplesant" consequences if
  32. * the bootrom has not fully initialized all PCI devices.
  33. * Sometimes the cache line size register is not set
  34. */
  35. if (*csz == 0)
  36. *csz = DEFAULT_CACHELINE >> 2; /* Use the default size */
  37. }
  38. static u8 parse_mpdudensity(u8 mpdudensity)
  39. {
  40. /*
  41. * 802.11n D2.0 defined values for "Minimum MPDU Start Spacing":
  42. * 0 for no restriction
  43. * 1 for 1/4 us
  44. * 2 for 1/2 us
  45. * 3 for 1 us
  46. * 4 for 2 us
  47. * 5 for 4 us
  48. * 6 for 8 us
  49. * 7 for 16 us
  50. */
  51. switch (mpdudensity) {
  52. case 0:
  53. return 0;
  54. case 1:
  55. case 2:
  56. case 3:
  57. /* Our lower layer calculations limit our precision to
  58. 1 microsecond */
  59. return 1;
  60. case 4:
  61. return 2;
  62. case 5:
  63. return 4;
  64. case 6:
  65. return 8;
  66. case 7:
  67. return 16;
  68. default:
  69. return 0;
  70. }
  71. }
  72. /*
  73. * Set current operating mode
  74. */
  75. static void ath_setcurmode(struct ath_softc *sc, enum wireless_mode mode)
  76. {
  77. sc->sc_curmode = mode;
  78. /*
  79. * All protection frames are transmited at 2Mb/s for
  80. * 11g, otherwise at 1Mb/s.
  81. * XXX select protection rate index from rate table.
  82. */
  83. sc->sc_protrix = (mode == ATH9K_MODE_11G ? 1 : 0);
  84. }
  85. /*
  86. * Set up rate table (legacy rates)
  87. */
  88. static void ath_setup_rates(struct ath_softc *sc, enum ieee80211_band band)
  89. {
  90. struct ath_rate_table *rate_table = NULL;
  91. struct ieee80211_supported_band *sband;
  92. struct ieee80211_rate *rate;
  93. int i, maxrates;
  94. switch (band) {
  95. case IEEE80211_BAND_2GHZ:
  96. rate_table = sc->hw_rate_table[ATH9K_MODE_11G];
  97. break;
  98. case IEEE80211_BAND_5GHZ:
  99. rate_table = sc->hw_rate_table[ATH9K_MODE_11A];
  100. break;
  101. default:
  102. break;
  103. }
  104. if (rate_table == NULL)
  105. return;
  106. sband = &sc->sbands[band];
  107. rate = sc->rates[band];
  108. if (rate_table->rate_cnt > ATH_RATE_MAX)
  109. maxrates = ATH_RATE_MAX;
  110. else
  111. maxrates = rate_table->rate_cnt;
  112. for (i = 0; i < maxrates; i++) {
  113. rate[i].bitrate = rate_table->info[i].ratekbps / 100;
  114. rate[i].hw_value = rate_table->info[i].ratecode;
  115. sband->n_bitrates++;
  116. DPRINTF(sc, ATH_DBG_CONFIG,
  117. "%s: Rate: %2dMbps, ratecode: %2d\n",
  118. __func__,
  119. rate[i].bitrate / 10,
  120. rate[i].hw_value);
  121. }
  122. }
  123. /*
  124. * Set up channel list
  125. */
  126. static int ath_setup_channels(struct ath_softc *sc)
  127. {
  128. struct ath_hal *ah = sc->sc_ah;
  129. int nchan, i, a = 0, b = 0;
  130. u8 regclassids[ATH_REGCLASSIDS_MAX];
  131. u32 nregclass = 0;
  132. struct ieee80211_supported_band *band_2ghz;
  133. struct ieee80211_supported_band *band_5ghz;
  134. struct ieee80211_channel *chan_2ghz;
  135. struct ieee80211_channel *chan_5ghz;
  136. struct ath9k_channel *c;
  137. /* Fill in ah->ah_channels */
  138. if (!ath9k_regd_init_channels(ah, ATH_CHAN_MAX, (u32 *)&nchan,
  139. regclassids, ATH_REGCLASSIDS_MAX,
  140. &nregclass, CTRY_DEFAULT, false, 1)) {
  141. u32 rd = ah->ah_currentRD;
  142. DPRINTF(sc, ATH_DBG_FATAL,
  143. "%s: unable to collect channel list; "
  144. "regdomain likely %u country code %u\n",
  145. __func__, rd, CTRY_DEFAULT);
  146. return -EINVAL;
  147. }
  148. band_2ghz = &sc->sbands[IEEE80211_BAND_2GHZ];
  149. band_5ghz = &sc->sbands[IEEE80211_BAND_5GHZ];
  150. chan_2ghz = sc->channels[IEEE80211_BAND_2GHZ];
  151. chan_5ghz = sc->channels[IEEE80211_BAND_5GHZ];
  152. for (i = 0; i < nchan; i++) {
  153. c = &ah->ah_channels[i];
  154. if (IS_CHAN_2GHZ(c)) {
  155. chan_2ghz[a].band = IEEE80211_BAND_2GHZ;
  156. chan_2ghz[a].center_freq = c->channel;
  157. chan_2ghz[a].max_power = c->maxTxPower;
  158. if (c->privFlags & CHANNEL_DISALLOW_ADHOC)
  159. chan_2ghz[a].flags |= IEEE80211_CHAN_NO_IBSS;
  160. if (c->channelFlags & CHANNEL_PASSIVE)
  161. chan_2ghz[a].flags |= IEEE80211_CHAN_PASSIVE_SCAN;
  162. band_2ghz->n_channels = ++a;
  163. DPRINTF(sc, ATH_DBG_CONFIG,
  164. "%s: 2MHz channel: %d, "
  165. "channelFlags: 0x%x\n",
  166. __func__, c->channel, c->channelFlags);
  167. } else if (IS_CHAN_5GHZ(c)) {
  168. chan_5ghz[b].band = IEEE80211_BAND_5GHZ;
  169. chan_5ghz[b].center_freq = c->channel;
  170. chan_5ghz[b].max_power = c->maxTxPower;
  171. if (c->privFlags & CHANNEL_DISALLOW_ADHOC)
  172. chan_5ghz[b].flags |= IEEE80211_CHAN_NO_IBSS;
  173. if (c->channelFlags & CHANNEL_PASSIVE)
  174. chan_5ghz[b].flags |= IEEE80211_CHAN_PASSIVE_SCAN;
  175. band_5ghz->n_channels = ++b;
  176. DPRINTF(sc, ATH_DBG_CONFIG,
  177. "%s: 5MHz channel: %d, "
  178. "channelFlags: 0x%x\n",
  179. __func__, c->channel, c->channelFlags);
  180. }
  181. }
  182. return 0;
  183. }
  184. /*
  185. * Determine mode from channel flags
  186. *
  187. * This routine will provide the enumerated WIRELESSS_MODE value based
  188. * on the settings of the channel flags. If no valid set of flags
  189. * exist, the lowest mode (11b) is selected.
  190. */
  191. static enum wireless_mode ath_chan2mode(struct ath9k_channel *chan)
  192. {
  193. if (chan->chanmode == CHANNEL_A)
  194. return ATH9K_MODE_11A;
  195. else if (chan->chanmode == CHANNEL_G)
  196. return ATH9K_MODE_11G;
  197. else if (chan->chanmode == CHANNEL_B)
  198. return ATH9K_MODE_11B;
  199. else if (chan->chanmode == CHANNEL_A_HT20)
  200. return ATH9K_MODE_11NA_HT20;
  201. else if (chan->chanmode == CHANNEL_G_HT20)
  202. return ATH9K_MODE_11NG_HT20;
  203. else if (chan->chanmode == CHANNEL_A_HT40PLUS)
  204. return ATH9K_MODE_11NA_HT40PLUS;
  205. else if (chan->chanmode == CHANNEL_A_HT40MINUS)
  206. return ATH9K_MODE_11NA_HT40MINUS;
  207. else if (chan->chanmode == CHANNEL_G_HT40PLUS)
  208. return ATH9K_MODE_11NG_HT40PLUS;
  209. else if (chan->chanmode == CHANNEL_G_HT40MINUS)
  210. return ATH9K_MODE_11NG_HT40MINUS;
  211. WARN_ON(1); /* should not get here */
  212. return ATH9K_MODE_11B;
  213. }
  214. /*
  215. * Set the current channel
  216. *
  217. * Set/change channels. If the channel is really being changed, it's done
  218. * by reseting the chip. To accomplish this we must first cleanup any pending
  219. * DMA, then restart stuff after a la ath_init.
  220. */
  221. int ath_set_channel(struct ath_softc *sc, struct ath9k_channel *hchan)
  222. {
  223. struct ath_hal *ah = sc->sc_ah;
  224. bool fastcc = true, stopped;
  225. if (sc->sc_flags & SC_OP_INVALID) /* the device is invalid or removed */
  226. return -EIO;
  227. DPRINTF(sc, ATH_DBG_CONFIG,
  228. "%s: %u (%u MHz) -> %u (%u MHz), cflags:%x\n",
  229. __func__,
  230. ath9k_hw_mhz2ieee(ah, sc->sc_ah->ah_curchan->channel,
  231. sc->sc_ah->ah_curchan->channelFlags),
  232. sc->sc_ah->ah_curchan->channel,
  233. ath9k_hw_mhz2ieee(ah, hchan->channel, hchan->channelFlags),
  234. hchan->channel, hchan->channelFlags);
  235. if (hchan->channel != sc->sc_ah->ah_curchan->channel ||
  236. hchan->channelFlags != sc->sc_ah->ah_curchan->channelFlags ||
  237. (sc->sc_flags & SC_OP_CHAINMASK_UPDATE) ||
  238. (sc->sc_flags & SC_OP_FULL_RESET)) {
  239. int status;
  240. /*
  241. * This is only performed if the channel settings have
  242. * actually changed.
  243. *
  244. * To switch channels clear any pending DMA operations;
  245. * wait long enough for the RX fifo to drain, reset the
  246. * hardware at the new frequency, and then re-enable
  247. * the relevant bits of the h/w.
  248. */
  249. ath9k_hw_set_interrupts(ah, 0); /* disable interrupts */
  250. ath_draintxq(sc, false); /* clear pending tx frames */
  251. stopped = ath_stoprecv(sc); /* turn off frame recv */
  252. /* XXX: do not flush receive queue here. We don't want
  253. * to flush data frames already in queue because of
  254. * changing channel. */
  255. if (!stopped || (sc->sc_flags & SC_OP_FULL_RESET))
  256. fastcc = false;
  257. spin_lock_bh(&sc->sc_resetlock);
  258. if (!ath9k_hw_reset(ah, hchan,
  259. sc->sc_ht_info.tx_chan_width,
  260. sc->sc_tx_chainmask,
  261. sc->sc_rx_chainmask,
  262. sc->sc_ht_extprotspacing,
  263. fastcc, &status)) {
  264. DPRINTF(sc, ATH_DBG_FATAL,
  265. "%s: unable to reset channel %u (%uMhz) "
  266. "flags 0x%x hal status %u\n", __func__,
  267. ath9k_hw_mhz2ieee(ah, hchan->channel,
  268. hchan->channelFlags),
  269. hchan->channel, hchan->channelFlags, status);
  270. spin_unlock_bh(&sc->sc_resetlock);
  271. return -EIO;
  272. }
  273. spin_unlock_bh(&sc->sc_resetlock);
  274. sc->sc_flags &= ~SC_OP_CHAINMASK_UPDATE;
  275. sc->sc_flags &= ~SC_OP_FULL_RESET;
  276. /* Re-enable rx framework */
  277. if (ath_startrecv(sc) != 0) {
  278. DPRINTF(sc, ATH_DBG_FATAL,
  279. "%s: unable to restart recv logic\n", __func__);
  280. return -EIO;
  281. }
  282. /*
  283. * Change channels and update the h/w rate map
  284. * if we're switching; e.g. 11a to 11b/g.
  285. */
  286. ath_setcurmode(sc, ath_chan2mode(hchan));
  287. ath_update_txpow(sc); /* update tx power state */
  288. /*
  289. * Re-enable interrupts.
  290. */
  291. ath9k_hw_set_interrupts(ah, sc->sc_imask);
  292. }
  293. return 0;
  294. }
  295. /**********************/
  296. /* Chainmask Handling */
  297. /**********************/
  298. static void ath_chainmask_sel_timertimeout(unsigned long data)
  299. {
  300. struct ath_chainmask_sel *cm = (struct ath_chainmask_sel *)data;
  301. cm->switch_allowed = 1;
  302. }
  303. /* Start chainmask select timer */
  304. static void ath_chainmask_sel_timerstart(struct ath_chainmask_sel *cm)
  305. {
  306. cm->switch_allowed = 0;
  307. mod_timer(&cm->timer, ath_chainmask_sel_period);
  308. }
  309. /* Stop chainmask select timer */
  310. static void ath_chainmask_sel_timerstop(struct ath_chainmask_sel *cm)
  311. {
  312. cm->switch_allowed = 0;
  313. del_timer_sync(&cm->timer);
  314. }
  315. static void ath_chainmask_sel_init(struct ath_softc *sc, struct ath_node *an)
  316. {
  317. struct ath_chainmask_sel *cm = &an->an_chainmask_sel;
  318. memset(cm, 0, sizeof(struct ath_chainmask_sel));
  319. cm->cur_tx_mask = sc->sc_tx_chainmask;
  320. cm->cur_rx_mask = sc->sc_rx_chainmask;
  321. cm->tx_avgrssi = ATH_RSSI_DUMMY_MARKER;
  322. setup_timer(&cm->timer,
  323. ath_chainmask_sel_timertimeout, (unsigned long) cm);
  324. }
  325. int ath_chainmask_sel_logic(struct ath_softc *sc, struct ath_node *an)
  326. {
  327. struct ath_chainmask_sel *cm = &an->an_chainmask_sel;
  328. /*
  329. * Disable auto-swtiching in one of the following if conditions.
  330. * sc_chainmask_auto_sel is used for internal global auto-switching
  331. * enabled/disabled setting
  332. */
  333. if (sc->sc_ah->ah_caps.tx_chainmask != ATH_CHAINMASK_SEL_3X3) {
  334. cm->cur_tx_mask = sc->sc_tx_chainmask;
  335. return cm->cur_tx_mask;
  336. }
  337. if (cm->tx_avgrssi == ATH_RSSI_DUMMY_MARKER)
  338. return cm->cur_tx_mask;
  339. if (cm->switch_allowed) {
  340. /* Switch down from tx 3 to tx 2. */
  341. if (cm->cur_tx_mask == ATH_CHAINMASK_SEL_3X3 &&
  342. ATH_RSSI_OUT(cm->tx_avgrssi) >=
  343. ath_chainmask_sel_down_rssi_thres) {
  344. cm->cur_tx_mask = sc->sc_tx_chainmask;
  345. /* Don't let another switch happen until
  346. * this timer expires */
  347. ath_chainmask_sel_timerstart(cm);
  348. }
  349. /* Switch up from tx 2 to 3. */
  350. else if (cm->cur_tx_mask == sc->sc_tx_chainmask &&
  351. ATH_RSSI_OUT(cm->tx_avgrssi) <=
  352. ath_chainmask_sel_up_rssi_thres) {
  353. cm->cur_tx_mask = ATH_CHAINMASK_SEL_3X3;
  354. /* Don't let another switch happen
  355. * until this timer expires */
  356. ath_chainmask_sel_timerstart(cm);
  357. }
  358. }
  359. return cm->cur_tx_mask;
  360. }
  361. /*
  362. * Update tx/rx chainmask. For legacy association,
  363. * hard code chainmask to 1x1, for 11n association, use
  364. * the chainmask configuration.
  365. */
  366. void ath_update_chainmask(struct ath_softc *sc, int is_ht)
  367. {
  368. sc->sc_flags |= SC_OP_CHAINMASK_UPDATE;
  369. if (is_ht) {
  370. sc->sc_tx_chainmask = sc->sc_ah->ah_caps.tx_chainmask;
  371. sc->sc_rx_chainmask = sc->sc_ah->ah_caps.rx_chainmask;
  372. } else {
  373. sc->sc_tx_chainmask = 1;
  374. sc->sc_rx_chainmask = 1;
  375. }
  376. DPRINTF(sc, ATH_DBG_CONFIG, "%s: tx chmask: %d, rx chmask: %d\n",
  377. __func__, sc->sc_tx_chainmask, sc->sc_rx_chainmask);
  378. }
  379. /*******/
  380. /* ANI */
  381. /*******/
  382. /*
  383. * This routine performs the periodic noise floor calibration function
  384. * that is used to adjust and optimize the chip performance. This
  385. * takes environmental changes (location, temperature) into account.
  386. * When the task is complete, it reschedules itself depending on the
  387. * appropriate interval that was calculated.
  388. */
  389. static void ath_ani_calibrate(unsigned long data)
  390. {
  391. struct ath_softc *sc;
  392. struct ath_hal *ah;
  393. bool longcal = false;
  394. bool shortcal = false;
  395. bool aniflag = false;
  396. unsigned int timestamp = jiffies_to_msecs(jiffies);
  397. u32 cal_interval;
  398. sc = (struct ath_softc *)data;
  399. ah = sc->sc_ah;
  400. /*
  401. * don't calibrate when we're scanning.
  402. * we are most likely not on our home channel.
  403. */
  404. if (sc->rx_filter & FIF_BCN_PRBRESP_PROMISC)
  405. return;
  406. /* Long calibration runs independently of short calibration. */
  407. if ((timestamp - sc->sc_ani.sc_longcal_timer) >= ATH_LONG_CALINTERVAL) {
  408. longcal = true;
  409. DPRINTF(sc, ATH_DBG_ANI, "%s: longcal @%lu\n",
  410. __func__, jiffies);
  411. sc->sc_ani.sc_longcal_timer = timestamp;
  412. }
  413. /* Short calibration applies only while sc_caldone is false */
  414. if (!sc->sc_ani.sc_caldone) {
  415. if ((timestamp - sc->sc_ani.sc_shortcal_timer) >=
  416. ATH_SHORT_CALINTERVAL) {
  417. shortcal = true;
  418. DPRINTF(sc, ATH_DBG_ANI, "%s: shortcal @%lu\n",
  419. __func__, jiffies);
  420. sc->sc_ani.sc_shortcal_timer = timestamp;
  421. sc->sc_ani.sc_resetcal_timer = timestamp;
  422. }
  423. } else {
  424. if ((timestamp - sc->sc_ani.sc_resetcal_timer) >=
  425. ATH_RESTART_CALINTERVAL) {
  426. ath9k_hw_reset_calvalid(ah, ah->ah_curchan,
  427. &sc->sc_ani.sc_caldone);
  428. if (sc->sc_ani.sc_caldone)
  429. sc->sc_ani.sc_resetcal_timer = timestamp;
  430. }
  431. }
  432. /* Verify whether we must check ANI */
  433. if ((timestamp - sc->sc_ani.sc_checkani_timer) >=
  434. ATH_ANI_POLLINTERVAL) {
  435. aniflag = true;
  436. sc->sc_ani.sc_checkani_timer = timestamp;
  437. }
  438. /* Skip all processing if there's nothing to do. */
  439. if (longcal || shortcal || aniflag) {
  440. /* Call ANI routine if necessary */
  441. if (aniflag)
  442. ath9k_hw_ani_monitor(ah, &sc->sc_halstats,
  443. ah->ah_curchan);
  444. /* Perform calibration if necessary */
  445. if (longcal || shortcal) {
  446. bool iscaldone = false;
  447. if (ath9k_hw_calibrate(ah, ah->ah_curchan,
  448. sc->sc_rx_chainmask, longcal,
  449. &iscaldone)) {
  450. if (longcal)
  451. sc->sc_ani.sc_noise_floor =
  452. ath9k_hw_getchan_noise(ah,
  453. ah->ah_curchan);
  454. DPRINTF(sc, ATH_DBG_ANI,
  455. "%s: calibrate chan %u/%x nf: %d\n",
  456. __func__,
  457. ah->ah_curchan->channel,
  458. ah->ah_curchan->channelFlags,
  459. sc->sc_ani.sc_noise_floor);
  460. } else {
  461. DPRINTF(sc, ATH_DBG_ANY,
  462. "%s: calibrate chan %u/%x failed\n",
  463. __func__,
  464. ah->ah_curchan->channel,
  465. ah->ah_curchan->channelFlags);
  466. }
  467. sc->sc_ani.sc_caldone = iscaldone;
  468. }
  469. }
  470. /*
  471. * Set timer interval based on previous results.
  472. * The interval must be the shortest necessary to satisfy ANI,
  473. * short calibration and long calibration.
  474. */
  475. cal_interval = ATH_ANI_POLLINTERVAL;
  476. if (!sc->sc_ani.sc_caldone)
  477. cal_interval = min(cal_interval, (u32)ATH_SHORT_CALINTERVAL);
  478. mod_timer(&sc->sc_ani.timer, jiffies + msecs_to_jiffies(cal_interval));
  479. }
  480. /********/
  481. /* Core */
  482. /********/
  483. int ath_open(struct ath_softc *sc, struct ath9k_channel *initial_chan)
  484. {
  485. struct ath_hal *ah = sc->sc_ah;
  486. int status;
  487. int error = 0;
  488. DPRINTF(sc, ATH_DBG_CONFIG, "%s: mode %d\n",
  489. __func__, sc->sc_ah->ah_opmode);
  490. /* Reset SERDES registers */
  491. ath9k_hw_configpcipowersave(ah, 0);
  492. /*
  493. * The basic interface to setting the hardware in a good
  494. * state is ``reset''. On return the hardware is known to
  495. * be powered up and with interrupts disabled. This must
  496. * be followed by initialization of the appropriate bits
  497. * and then setup of the interrupt mask.
  498. */
  499. spin_lock_bh(&sc->sc_resetlock);
  500. if (!ath9k_hw_reset(ah, initial_chan,
  501. sc->sc_ht_info.tx_chan_width,
  502. sc->sc_tx_chainmask, sc->sc_rx_chainmask,
  503. sc->sc_ht_extprotspacing, false, &status)) {
  504. DPRINTF(sc, ATH_DBG_FATAL,
  505. "%s: unable to reset hardware; hal status %u "
  506. "(freq %u flags 0x%x)\n", __func__, status,
  507. initial_chan->channel, initial_chan->channelFlags);
  508. error = -EIO;
  509. spin_unlock_bh(&sc->sc_resetlock);
  510. goto done;
  511. }
  512. spin_unlock_bh(&sc->sc_resetlock);
  513. /*
  514. * This is needed only to setup initial state
  515. * but it's best done after a reset.
  516. */
  517. ath_update_txpow(sc);
  518. /*
  519. * Setup the hardware after reset:
  520. * The receive engine is set going.
  521. * Frame transmit is handled entirely
  522. * in the frame output path; there's nothing to do
  523. * here except setup the interrupt mask.
  524. */
  525. if (ath_startrecv(sc) != 0) {
  526. DPRINTF(sc, ATH_DBG_FATAL,
  527. "%s: unable to start recv logic\n", __func__);
  528. error = -EIO;
  529. goto done;
  530. }
  531. /* Setup our intr mask. */
  532. sc->sc_imask = ATH9K_INT_RX | ATH9K_INT_TX
  533. | ATH9K_INT_RXEOL | ATH9K_INT_RXORN
  534. | ATH9K_INT_FATAL | ATH9K_INT_GLOBAL;
  535. if (ah->ah_caps.hw_caps & ATH9K_HW_CAP_GTT)
  536. sc->sc_imask |= ATH9K_INT_GTT;
  537. if (ah->ah_caps.hw_caps & ATH9K_HW_CAP_HT)
  538. sc->sc_imask |= ATH9K_INT_CST;
  539. /*
  540. * Enable MIB interrupts when there are hardware phy counters.
  541. * Note we only do this (at the moment) for station mode.
  542. */
  543. if (ath9k_hw_phycounters(ah) &&
  544. ((sc->sc_ah->ah_opmode == ATH9K_M_STA) ||
  545. (sc->sc_ah->ah_opmode == ATH9K_M_IBSS)))
  546. sc->sc_imask |= ATH9K_INT_MIB;
  547. /*
  548. * Some hardware processes the TIM IE and fires an
  549. * interrupt when the TIM bit is set. For hardware
  550. * that does, if not overridden by configuration,
  551. * enable the TIM interrupt when operating as station.
  552. */
  553. if ((ah->ah_caps.hw_caps & ATH9K_HW_CAP_ENHANCEDPM) &&
  554. (sc->sc_ah->ah_opmode == ATH9K_M_STA) &&
  555. !sc->sc_config.swBeaconProcess)
  556. sc->sc_imask |= ATH9K_INT_TIM;
  557. ath_setcurmode(sc, ath_chan2mode(initial_chan));
  558. sc->sc_flags &= ~SC_OP_INVALID;
  559. /* Disable BMISS interrupt when we're not associated */
  560. sc->sc_imask &= ~(ATH9K_INT_SWBA | ATH9K_INT_BMISS);
  561. ath9k_hw_set_interrupts(sc->sc_ah,sc->sc_imask);
  562. ieee80211_wake_queues(sc->hw);
  563. done:
  564. return error;
  565. }
  566. void ath_stop(struct ath_softc *sc)
  567. {
  568. struct ath_hal *ah = sc->sc_ah;
  569. DPRINTF(sc, ATH_DBG_CONFIG, "%s: Cleaning up\n", __func__);
  570. ieee80211_stop_queues(sc->hw);
  571. /* make sure h/w will not generate any interrupt
  572. * before setting the invalid flag. */
  573. ath9k_hw_set_interrupts(ah, 0);
  574. if (!(sc->sc_flags & SC_OP_INVALID)) {
  575. ath_draintxq(sc, false);
  576. ath_stoprecv(sc);
  577. ath9k_hw_phy_disable(ah);
  578. } else
  579. sc->sc_rxlink = NULL;
  580. #if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
  581. if (sc->sc_ah->ah_caps.hw_caps & ATH9K_HW_CAP_RFSILENT)
  582. cancel_delayed_work_sync(&sc->rf_kill.rfkill_poll);
  583. #endif
  584. /* disable HAL and put h/w to sleep */
  585. ath9k_hw_disable(sc->sc_ah);
  586. ath9k_hw_configpcipowersave(sc->sc_ah, 1);
  587. sc->sc_flags |= SC_OP_INVALID;
  588. }
  589. int ath_reset(struct ath_softc *sc, bool retry_tx)
  590. {
  591. struct ath_hal *ah = sc->sc_ah;
  592. int status;
  593. int error = 0;
  594. ath9k_hw_set_interrupts(ah, 0);
  595. ath_draintxq(sc, retry_tx);
  596. ath_stoprecv(sc);
  597. ath_flushrecv(sc);
  598. /* Reset chip */
  599. spin_lock_bh(&sc->sc_resetlock);
  600. if (!ath9k_hw_reset(ah, sc->sc_ah->ah_curchan,
  601. sc->sc_ht_info.tx_chan_width,
  602. sc->sc_tx_chainmask, sc->sc_rx_chainmask,
  603. sc->sc_ht_extprotspacing, false, &status)) {
  604. DPRINTF(sc, ATH_DBG_FATAL,
  605. "%s: unable to reset hardware; hal status %u\n",
  606. __func__, status);
  607. error = -EIO;
  608. }
  609. spin_unlock_bh(&sc->sc_resetlock);
  610. if (ath_startrecv(sc) != 0)
  611. DPRINTF(sc, ATH_DBG_FATAL,
  612. "%s: unable to start recv logic\n", __func__);
  613. /*
  614. * We may be doing a reset in response to a request
  615. * that changes the channel so update any state that
  616. * might change as a result.
  617. */
  618. ath_setcurmode(sc, ath_chan2mode(sc->sc_ah->ah_curchan));
  619. ath_update_txpow(sc);
  620. if (sc->sc_flags & SC_OP_BEACONS)
  621. ath_beacon_config(sc, ATH_IF_ID_ANY); /* restart beacons */
  622. ath9k_hw_set_interrupts(ah, sc->sc_imask);
  623. /* Restart the txq */
  624. if (retry_tx) {
  625. int i;
  626. for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
  627. if (ATH_TXQ_SETUP(sc, i)) {
  628. spin_lock_bh(&sc->sc_txq[i].axq_lock);
  629. ath_txq_schedule(sc, &sc->sc_txq[i]);
  630. spin_unlock_bh(&sc->sc_txq[i].axq_lock);
  631. }
  632. }
  633. }
  634. return error;
  635. }
  636. /* Interrupt handler. Most of the actual processing is deferred.
  637. * It's the caller's responsibility to ensure the chip is awake. */
  638. irqreturn_t ath_isr(int irq, void *dev)
  639. {
  640. struct ath_softc *sc = dev;
  641. struct ath_hal *ah = sc->sc_ah;
  642. enum ath9k_int status;
  643. bool sched = false;
  644. do {
  645. if (sc->sc_flags & SC_OP_INVALID) {
  646. /*
  647. * The hardware is not ready/present, don't
  648. * touch anything. Note this can happen early
  649. * on if the IRQ is shared.
  650. */
  651. return IRQ_NONE;
  652. }
  653. if (!ath9k_hw_intrpend(ah)) { /* shared irq, not for us */
  654. return IRQ_NONE;
  655. }
  656. /*
  657. * Figure out the reason(s) for the interrupt. Note
  658. * that the hal returns a pseudo-ISR that may include
  659. * bits we haven't explicitly enabled so we mask the
  660. * value to insure we only process bits we requested.
  661. */
  662. ath9k_hw_getisr(ah, &status); /* NB: clears ISR too */
  663. status &= sc->sc_imask; /* discard unasked-for bits */
  664. /*
  665. * If there are no status bits set, then this interrupt was not
  666. * for me (should have been caught above).
  667. */
  668. if (!status)
  669. return IRQ_NONE;
  670. sc->sc_intrstatus = status;
  671. if (status & ATH9K_INT_FATAL) {
  672. /* need a chip reset */
  673. sched = true;
  674. } else if (status & ATH9K_INT_RXORN) {
  675. /* need a chip reset */
  676. sched = true;
  677. } else {
  678. if (status & ATH9K_INT_SWBA) {
  679. /* schedule a tasklet for beacon handling */
  680. tasklet_schedule(&sc->bcon_tasklet);
  681. }
  682. if (status & ATH9K_INT_RXEOL) {
  683. /*
  684. * NB: the hardware should re-read the link when
  685. * RXE bit is written, but it doesn't work
  686. * at least on older hardware revs.
  687. */
  688. sched = true;
  689. }
  690. if (status & ATH9K_INT_TXURN)
  691. /* bump tx trigger level */
  692. ath9k_hw_updatetxtriglevel(ah, true);
  693. /* XXX: optimize this */
  694. if (status & ATH9K_INT_RX)
  695. sched = true;
  696. if (status & ATH9K_INT_TX)
  697. sched = true;
  698. if (status & ATH9K_INT_BMISS)
  699. sched = true;
  700. /* carrier sense timeout */
  701. if (status & ATH9K_INT_CST)
  702. sched = true;
  703. if (status & ATH9K_INT_MIB) {
  704. /*
  705. * Disable interrupts until we service the MIB
  706. * interrupt; otherwise it will continue to
  707. * fire.
  708. */
  709. ath9k_hw_set_interrupts(ah, 0);
  710. /*
  711. * Let the hal handle the event. We assume
  712. * it will clear whatever condition caused
  713. * the interrupt.
  714. */
  715. ath9k_hw_procmibevent(ah, &sc->sc_halstats);
  716. ath9k_hw_set_interrupts(ah, sc->sc_imask);
  717. }
  718. if (status & ATH9K_INT_TIM_TIMER) {
  719. if (!(ah->ah_caps.hw_caps &
  720. ATH9K_HW_CAP_AUTOSLEEP)) {
  721. /* Clear RxAbort bit so that we can
  722. * receive frames */
  723. ath9k_hw_setrxabort(ah, 0);
  724. sched = true;
  725. }
  726. }
  727. }
  728. } while (0);
  729. if (sched) {
  730. /* turn off every interrupt except SWBA */
  731. ath9k_hw_set_interrupts(ah, (sc->sc_imask & ATH9K_INT_SWBA));
  732. tasklet_schedule(&sc->intr_tq);
  733. }
  734. return IRQ_HANDLED;
  735. }
  736. /* Deferred interrupt processing */
  737. static void ath9k_tasklet(unsigned long data)
  738. {
  739. struct ath_softc *sc = (struct ath_softc *)data;
  740. u32 status = sc->sc_intrstatus;
  741. if (status & ATH9K_INT_FATAL) {
  742. /* need a chip reset */
  743. ath_reset(sc, false);
  744. return;
  745. } else {
  746. if (status &
  747. (ATH9K_INT_RX | ATH9K_INT_RXEOL | ATH9K_INT_RXORN)) {
  748. /* XXX: fill me in */
  749. /*
  750. if (status & ATH9K_INT_RXORN) {
  751. }
  752. if (status & ATH9K_INT_RXEOL) {
  753. }
  754. */
  755. spin_lock_bh(&sc->sc_rxflushlock);
  756. ath_rx_tasklet(sc, 0);
  757. spin_unlock_bh(&sc->sc_rxflushlock);
  758. }
  759. /* XXX: optimize this */
  760. if (status & ATH9K_INT_TX)
  761. ath_tx_tasklet(sc);
  762. /* XXX: fill me in */
  763. /*
  764. if (status & ATH9K_INT_BMISS) {
  765. }
  766. if (status & (ATH9K_INT_TIM | ATH9K_INT_DTIMSYNC)) {
  767. if (status & ATH9K_INT_TIM) {
  768. }
  769. if (status & ATH9K_INT_DTIMSYNC) {
  770. }
  771. }
  772. */
  773. }
  774. /* re-enable hardware interrupt */
  775. ath9k_hw_set_interrupts(sc->sc_ah, sc->sc_imask);
  776. }
  777. int ath_init(u16 devid, struct ath_softc *sc)
  778. {
  779. struct ath_hal *ah = NULL;
  780. int status;
  781. int error = 0, i;
  782. int csz = 0;
  783. /* XXX: hardware will not be ready until ath_open() being called */
  784. sc->sc_flags |= SC_OP_INVALID;
  785. sc->sc_debug = DBG_DEFAULT;
  786. spin_lock_init(&sc->sc_resetlock);
  787. tasklet_init(&sc->intr_tq, ath9k_tasklet, (unsigned long)sc);
  788. tasklet_init(&sc->bcon_tasklet, ath9k_beacon_tasklet,
  789. (unsigned long)sc);
  790. /*
  791. * Cache line size is used to size and align various
  792. * structures used to communicate with the hardware.
  793. */
  794. bus_read_cachesize(sc, &csz);
  795. /* XXX assert csz is non-zero */
  796. sc->sc_cachelsz = csz << 2; /* convert to bytes */
  797. ah = ath9k_hw_attach(devid, sc, sc->mem, &status);
  798. if (ah == NULL) {
  799. DPRINTF(sc, ATH_DBG_FATAL,
  800. "%s: unable to attach hardware; HAL status %u\n",
  801. __func__, status);
  802. error = -ENXIO;
  803. goto bad;
  804. }
  805. sc->sc_ah = ah;
  806. /* Get the hardware key cache size. */
  807. sc->sc_keymax = ah->ah_caps.keycache_size;
  808. if (sc->sc_keymax > ATH_KEYMAX) {
  809. DPRINTF(sc, ATH_DBG_KEYCACHE,
  810. "%s: Warning, using only %u entries in %u key cache\n",
  811. __func__, ATH_KEYMAX, sc->sc_keymax);
  812. sc->sc_keymax = ATH_KEYMAX;
  813. }
  814. /*
  815. * Reset the key cache since some parts do not
  816. * reset the contents on initial power up.
  817. */
  818. for (i = 0; i < sc->sc_keymax; i++)
  819. ath9k_hw_keyreset(ah, (u16) i);
  820. /*
  821. * Mark key cache slots associated with global keys
  822. * as in use. If we knew TKIP was not to be used we
  823. * could leave the +32, +64, and +32+64 slots free.
  824. * XXX only for splitmic.
  825. */
  826. for (i = 0; i < IEEE80211_WEP_NKID; i++) {
  827. set_bit(i, sc->sc_keymap);
  828. set_bit(i + 32, sc->sc_keymap);
  829. set_bit(i + 64, sc->sc_keymap);
  830. set_bit(i + 32 + 64, sc->sc_keymap);
  831. }
  832. /* Collect the channel list using the default country code */
  833. error = ath_setup_channels(sc);
  834. if (error)
  835. goto bad;
  836. /* default to MONITOR mode */
  837. sc->sc_ah->ah_opmode = ATH9K_M_MONITOR;
  838. /* Setup rate tables */
  839. ath_rate_attach(sc);
  840. ath_setup_rates(sc, IEEE80211_BAND_2GHZ);
  841. ath_setup_rates(sc, IEEE80211_BAND_5GHZ);
  842. /*
  843. * Allocate hardware transmit queues: one queue for
  844. * beacon frames and one data queue for each QoS
  845. * priority. Note that the hal handles reseting
  846. * these queues at the needed time.
  847. */
  848. sc->sc_bhalq = ath_beaconq_setup(ah);
  849. if (sc->sc_bhalq == -1) {
  850. DPRINTF(sc, ATH_DBG_FATAL,
  851. "%s: unable to setup a beacon xmit queue\n", __func__);
  852. error = -EIO;
  853. goto bad2;
  854. }
  855. sc->sc_cabq = ath_txq_setup(sc, ATH9K_TX_QUEUE_CAB, 0);
  856. if (sc->sc_cabq == NULL) {
  857. DPRINTF(sc, ATH_DBG_FATAL,
  858. "%s: unable to setup CAB xmit queue\n", __func__);
  859. error = -EIO;
  860. goto bad2;
  861. }
  862. sc->sc_config.cabqReadytime = ATH_CABQ_READY_TIME;
  863. ath_cabq_update(sc);
  864. for (i = 0; i < ARRAY_SIZE(sc->sc_haltype2q); i++)
  865. sc->sc_haltype2q[i] = -1;
  866. /* Setup data queues */
  867. /* NB: ensure BK queue is the lowest priority h/w queue */
  868. if (!ath_tx_setup(sc, ATH9K_WME_AC_BK)) {
  869. DPRINTF(sc, ATH_DBG_FATAL,
  870. "%s: unable to setup xmit queue for BK traffic\n",
  871. __func__);
  872. error = -EIO;
  873. goto bad2;
  874. }
  875. if (!ath_tx_setup(sc, ATH9K_WME_AC_BE)) {
  876. DPRINTF(sc, ATH_DBG_FATAL,
  877. "%s: unable to setup xmit queue for BE traffic\n",
  878. __func__);
  879. error = -EIO;
  880. goto bad2;
  881. }
  882. if (!ath_tx_setup(sc, ATH9K_WME_AC_VI)) {
  883. DPRINTF(sc, ATH_DBG_FATAL,
  884. "%s: unable to setup xmit queue for VI traffic\n",
  885. __func__);
  886. error = -EIO;
  887. goto bad2;
  888. }
  889. if (!ath_tx_setup(sc, ATH9K_WME_AC_VO)) {
  890. DPRINTF(sc, ATH_DBG_FATAL,
  891. "%s: unable to setup xmit queue for VO traffic\n",
  892. __func__);
  893. error = -EIO;
  894. goto bad2;
  895. }
  896. /* Initializes the noise floor to a reasonable default value.
  897. * Later on this will be updated during ANI processing. */
  898. sc->sc_ani.sc_noise_floor = ATH_DEFAULT_NOISE_FLOOR;
  899. setup_timer(&sc->sc_ani.timer, ath_ani_calibrate, (unsigned long)sc);
  900. if (ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
  901. ATH9K_CIPHER_TKIP, NULL)) {
  902. /*
  903. * Whether we should enable h/w TKIP MIC.
  904. * XXX: if we don't support WME TKIP MIC, then we wouldn't
  905. * report WMM capable, so it's always safe to turn on
  906. * TKIP MIC in this case.
  907. */
  908. ath9k_hw_setcapability(sc->sc_ah, ATH9K_CAP_TKIP_MIC,
  909. 0, 1, NULL);
  910. }
  911. /*
  912. * Check whether the separate key cache entries
  913. * are required to handle both tx+rx MIC keys.
  914. * With split mic keys the number of stations is limited
  915. * to 27 otherwise 59.
  916. */
  917. if (ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
  918. ATH9K_CIPHER_TKIP, NULL)
  919. && ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
  920. ATH9K_CIPHER_MIC, NULL)
  921. && ath9k_hw_getcapability(ah, ATH9K_CAP_TKIP_SPLIT,
  922. 0, NULL))
  923. sc->sc_splitmic = 1;
  924. /* turn on mcast key search if possible */
  925. if (!ath9k_hw_getcapability(ah, ATH9K_CAP_MCAST_KEYSRCH, 0, NULL))
  926. (void)ath9k_hw_setcapability(ah, ATH9K_CAP_MCAST_KEYSRCH, 1,
  927. 1, NULL);
  928. sc->sc_config.txpowlimit = ATH_TXPOWER_MAX;
  929. sc->sc_config.txpowlimit_override = 0;
  930. /* 11n Capabilities */
  931. if (ah->ah_caps.hw_caps & ATH9K_HW_CAP_HT) {
  932. sc->sc_flags |= SC_OP_TXAGGR;
  933. sc->sc_flags |= SC_OP_RXAGGR;
  934. }
  935. sc->sc_tx_chainmask = ah->ah_caps.tx_chainmask;
  936. sc->sc_rx_chainmask = ah->ah_caps.rx_chainmask;
  937. ath9k_hw_setcapability(ah, ATH9K_CAP_DIVERSITY, 1, true, NULL);
  938. sc->sc_defant = ath9k_hw_getdefantenna(ah);
  939. ath9k_hw_getmac(ah, sc->sc_myaddr);
  940. if (ah->ah_caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK) {
  941. ath9k_hw_getbssidmask(ah, sc->sc_bssidmask);
  942. ATH_SET_VAP_BSSID_MASK(sc->sc_bssidmask);
  943. ath9k_hw_setbssidmask(ah, sc->sc_bssidmask);
  944. }
  945. sc->sc_slottime = ATH9K_SLOT_TIME_9; /* default to short slot time */
  946. /* initialize beacon slots */
  947. for (i = 0; i < ARRAY_SIZE(sc->sc_bslot); i++)
  948. sc->sc_bslot[i] = ATH_IF_ID_ANY;
  949. /* save MISC configurations */
  950. sc->sc_config.swBeaconProcess = 1;
  951. #ifdef CONFIG_SLOW_ANT_DIV
  952. /* range is 40 - 255, we use something in the middle */
  953. ath_slow_ant_div_init(&sc->sc_antdiv, sc, 0x127);
  954. #endif
  955. /* setup channels and rates */
  956. sc->sbands[IEEE80211_BAND_2GHZ].channels =
  957. sc->channels[IEEE80211_BAND_2GHZ];
  958. sc->sbands[IEEE80211_BAND_2GHZ].bitrates =
  959. sc->rates[IEEE80211_BAND_2GHZ];
  960. sc->sbands[IEEE80211_BAND_2GHZ].band = IEEE80211_BAND_2GHZ;
  961. if (test_bit(ATH9K_MODE_11A, sc->sc_ah->ah_caps.wireless_modes)) {
  962. sc->sbands[IEEE80211_BAND_5GHZ].channels =
  963. sc->channels[IEEE80211_BAND_5GHZ];
  964. sc->sbands[IEEE80211_BAND_5GHZ].bitrates =
  965. sc->rates[IEEE80211_BAND_5GHZ];
  966. sc->sbands[IEEE80211_BAND_5GHZ].band = IEEE80211_BAND_5GHZ;
  967. }
  968. return 0;
  969. bad2:
  970. /* cleanup tx queues */
  971. for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
  972. if (ATH_TXQ_SETUP(sc, i))
  973. ath_tx_cleanupq(sc, &sc->sc_txq[i]);
  974. bad:
  975. if (ah)
  976. ath9k_hw_detach(ah);
  977. return error;
  978. }
  979. /*******************/
  980. /* Node Management */
  981. /*******************/
  982. void ath_node_attach(struct ath_softc *sc, struct ieee80211_sta *sta)
  983. {
  984. struct ath_node *an;
  985. an = (struct ath_node *)sta->drv_priv;
  986. if (sc->sc_flags & SC_OP_TXAGGR)
  987. ath_tx_node_init(sc, an);
  988. an->maxampdu = 1 << (IEEE80211_HTCAP_MAXRXAMPDU_FACTOR +
  989. sta->ht_cap.ampdu_factor);
  990. an->mpdudensity = parse_mpdudensity(sta->ht_cap.ampdu_density);
  991. ath_chainmask_sel_init(sc, an);
  992. ath_chainmask_sel_timerstart(&an->an_chainmask_sel);
  993. }
  994. void ath_node_detach(struct ath_softc *sc, struct ieee80211_sta *sta)
  995. {
  996. struct ath_node *an = (struct ath_node *)sta->drv_priv;
  997. ath_chainmask_sel_timerstop(&an->an_chainmask_sel);
  998. if (sc->sc_flags & SC_OP_TXAGGR)
  999. ath_tx_node_cleanup(sc, an);
  1000. }
  1001. /*
  1002. * Set up New Node
  1003. *
  1004. * Setup driver-specific state for a newly associated node. This routine
  1005. * really only applies if compression or XR are enabled, there is no code
  1006. * covering any other cases.
  1007. */
  1008. void ath_newassoc(struct ath_softc *sc,
  1009. struct ath_node *an, int isnew, int isuapsd)
  1010. {
  1011. int tidno;
  1012. /* if station reassociates, tear down the aggregation state. */
  1013. if (!isnew) {
  1014. for (tidno = 0; tidno < WME_NUM_TID; tidno++) {
  1015. if (sc->sc_flags & SC_OP_TXAGGR)
  1016. ath_tx_aggr_teardown(sc, an, tidno);
  1017. }
  1018. }
  1019. }
  1020. /**************/
  1021. /* Encryption */
  1022. /**************/
  1023. void ath_key_reset(struct ath_softc *sc, u16 keyix, int freeslot)
  1024. {
  1025. ath9k_hw_keyreset(sc->sc_ah, keyix);
  1026. if (freeslot)
  1027. clear_bit(keyix, sc->sc_keymap);
  1028. }
  1029. int ath_keyset(struct ath_softc *sc,
  1030. u16 keyix,
  1031. struct ath9k_keyval *hk,
  1032. const u8 mac[ETH_ALEN])
  1033. {
  1034. bool status;
  1035. status = ath9k_hw_set_keycache_entry(sc->sc_ah,
  1036. keyix, hk, mac, false);
  1037. return status != false;
  1038. }
  1039. /***********************/
  1040. /* TX Power/Regulatory */
  1041. /***********************/
  1042. /*
  1043. * Set Transmit power in HAL
  1044. *
  1045. * This routine makes the actual HAL calls to set the new transmit power
  1046. * limit.
  1047. */
  1048. void ath_update_txpow(struct ath_softc *sc)
  1049. {
  1050. struct ath_hal *ah = sc->sc_ah;
  1051. u32 txpow;
  1052. if (sc->sc_curtxpow != sc->sc_config.txpowlimit) {
  1053. ath9k_hw_set_txpowerlimit(ah, sc->sc_config.txpowlimit);
  1054. /* read back in case value is clamped */
  1055. ath9k_hw_getcapability(ah, ATH9K_CAP_TXPOW, 1, &txpow);
  1056. sc->sc_curtxpow = txpow;
  1057. }
  1058. }
  1059. /**************************/
  1060. /* Slow Antenna Diversity */
  1061. /**************************/
  1062. void ath_slow_ant_div_init(struct ath_antdiv *antdiv,
  1063. struct ath_softc *sc,
  1064. int32_t rssitrig)
  1065. {
  1066. int trig;
  1067. /* antdivf_rssitrig can range from 40 - 0xff */
  1068. trig = (rssitrig > 0xff) ? 0xff : rssitrig;
  1069. trig = (rssitrig < 40) ? 40 : rssitrig;
  1070. antdiv->antdiv_sc = sc;
  1071. antdiv->antdivf_rssitrig = trig;
  1072. }
  1073. void ath_slow_ant_div_start(struct ath_antdiv *antdiv,
  1074. u8 num_antcfg,
  1075. const u8 *bssid)
  1076. {
  1077. antdiv->antdiv_num_antcfg =
  1078. num_antcfg < ATH_ANT_DIV_MAX_CFG ?
  1079. num_antcfg : ATH_ANT_DIV_MAX_CFG;
  1080. antdiv->antdiv_state = ATH_ANT_DIV_IDLE;
  1081. antdiv->antdiv_curcfg = 0;
  1082. antdiv->antdiv_bestcfg = 0;
  1083. antdiv->antdiv_laststatetsf = 0;
  1084. memcpy(antdiv->antdiv_bssid, bssid, sizeof(antdiv->antdiv_bssid));
  1085. antdiv->antdiv_start = 1;
  1086. }
  1087. void ath_slow_ant_div_stop(struct ath_antdiv *antdiv)
  1088. {
  1089. antdiv->antdiv_start = 0;
  1090. }
  1091. static int32_t ath_find_max_val(int32_t *val,
  1092. u8 num_val, u8 *max_index)
  1093. {
  1094. u32 MaxVal = *val++;
  1095. u32 cur_index = 0;
  1096. *max_index = 0;
  1097. while (++cur_index < num_val) {
  1098. if (*val > MaxVal) {
  1099. MaxVal = *val;
  1100. *max_index = cur_index;
  1101. }
  1102. val++;
  1103. }
  1104. return MaxVal;
  1105. }
  1106. void ath_slow_ant_div(struct ath_antdiv *antdiv,
  1107. struct ieee80211_hdr *hdr,
  1108. struct ath_rx_status *rx_stats)
  1109. {
  1110. struct ath_softc *sc = antdiv->antdiv_sc;
  1111. struct ath_hal *ah = sc->sc_ah;
  1112. u64 curtsf = 0;
  1113. u8 bestcfg, curcfg = antdiv->antdiv_curcfg;
  1114. __le16 fc = hdr->frame_control;
  1115. if (antdiv->antdiv_start && ieee80211_is_beacon(fc)
  1116. && !compare_ether_addr(hdr->addr3, antdiv->antdiv_bssid)) {
  1117. antdiv->antdiv_lastbrssi[curcfg] = rx_stats->rs_rssi;
  1118. antdiv->antdiv_lastbtsf[curcfg] = ath9k_hw_gettsf64(sc->sc_ah);
  1119. curtsf = antdiv->antdiv_lastbtsf[curcfg];
  1120. } else {
  1121. return;
  1122. }
  1123. switch (antdiv->antdiv_state) {
  1124. case ATH_ANT_DIV_IDLE:
  1125. if ((antdiv->antdiv_lastbrssi[curcfg] <
  1126. antdiv->antdivf_rssitrig)
  1127. && ((curtsf - antdiv->antdiv_laststatetsf) >
  1128. ATH_ANT_DIV_MIN_IDLE_US)) {
  1129. curcfg++;
  1130. if (curcfg == antdiv->antdiv_num_antcfg)
  1131. curcfg = 0;
  1132. if (!ath9k_hw_select_antconfig(ah, curcfg)) {
  1133. antdiv->antdiv_bestcfg = antdiv->antdiv_curcfg;
  1134. antdiv->antdiv_curcfg = curcfg;
  1135. antdiv->antdiv_laststatetsf = curtsf;
  1136. antdiv->antdiv_state = ATH_ANT_DIV_SCAN;
  1137. }
  1138. }
  1139. break;
  1140. case ATH_ANT_DIV_SCAN:
  1141. if ((curtsf - antdiv->antdiv_laststatetsf) <
  1142. ATH_ANT_DIV_MIN_SCAN_US)
  1143. break;
  1144. curcfg++;
  1145. if (curcfg == antdiv->antdiv_num_antcfg)
  1146. curcfg = 0;
  1147. if (curcfg == antdiv->antdiv_bestcfg) {
  1148. ath_find_max_val(antdiv->antdiv_lastbrssi,
  1149. antdiv->antdiv_num_antcfg, &bestcfg);
  1150. if (!ath9k_hw_select_antconfig(ah, bestcfg)) {
  1151. antdiv->antdiv_bestcfg = bestcfg;
  1152. antdiv->antdiv_curcfg = bestcfg;
  1153. antdiv->antdiv_laststatetsf = curtsf;
  1154. antdiv->antdiv_state = ATH_ANT_DIV_IDLE;
  1155. }
  1156. } else {
  1157. if (!ath9k_hw_select_antconfig(ah, curcfg)) {
  1158. antdiv->antdiv_curcfg = curcfg;
  1159. antdiv->antdiv_laststatetsf = curtsf;
  1160. antdiv->antdiv_state = ATH_ANT_DIV_SCAN;
  1161. }
  1162. }
  1163. break;
  1164. }
  1165. }
  1166. /***********************/
  1167. /* Descriptor Handling */
  1168. /***********************/
  1169. /*
  1170. * Set up DMA descriptors
  1171. *
  1172. * This function will allocate both the DMA descriptor structure, and the
  1173. * buffers it contains. These are used to contain the descriptors used
  1174. * by the system.
  1175. */
  1176. int ath_descdma_setup(struct ath_softc *sc,
  1177. struct ath_descdma *dd,
  1178. struct list_head *head,
  1179. const char *name,
  1180. int nbuf,
  1181. int ndesc)
  1182. {
  1183. #define DS2PHYS(_dd, _ds) \
  1184. ((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc))
  1185. #define ATH_DESC_4KB_BOUND_CHECK(_daddr) ((((_daddr) & 0xFFF) > 0xF7F) ? 1 : 0)
  1186. #define ATH_DESC_4KB_BOUND_NUM_SKIPPED(_len) ((_len) / 4096)
  1187. struct ath_desc *ds;
  1188. struct ath_buf *bf;
  1189. int i, bsize, error;
  1190. DPRINTF(sc, ATH_DBG_CONFIG, "%s: %s DMA: %u buffers %u desc/buf\n",
  1191. __func__, name, nbuf, ndesc);
  1192. /* ath_desc must be a multiple of DWORDs */
  1193. if ((sizeof(struct ath_desc) % 4) != 0) {
  1194. DPRINTF(sc, ATH_DBG_FATAL, "%s: ath_desc not DWORD aligned\n",
  1195. __func__);
  1196. ASSERT((sizeof(struct ath_desc) % 4) == 0);
  1197. error = -ENOMEM;
  1198. goto fail;
  1199. }
  1200. dd->dd_name = name;
  1201. dd->dd_desc_len = sizeof(struct ath_desc) * nbuf * ndesc;
  1202. /*
  1203. * Need additional DMA memory because we can't use
  1204. * descriptors that cross the 4K page boundary. Assume
  1205. * one skipped descriptor per 4K page.
  1206. */
  1207. if (!(sc->sc_ah->ah_caps.hw_caps & ATH9K_HW_CAP_4KB_SPLITTRANS)) {
  1208. u32 ndesc_skipped =
  1209. ATH_DESC_4KB_BOUND_NUM_SKIPPED(dd->dd_desc_len);
  1210. u32 dma_len;
  1211. while (ndesc_skipped) {
  1212. dma_len = ndesc_skipped * sizeof(struct ath_desc);
  1213. dd->dd_desc_len += dma_len;
  1214. ndesc_skipped = ATH_DESC_4KB_BOUND_NUM_SKIPPED(dma_len);
  1215. };
  1216. }
  1217. /* allocate descriptors */
  1218. dd->dd_desc = pci_alloc_consistent(sc->pdev,
  1219. dd->dd_desc_len,
  1220. &dd->dd_desc_paddr);
  1221. if (dd->dd_desc == NULL) {
  1222. error = -ENOMEM;
  1223. goto fail;
  1224. }
  1225. ds = dd->dd_desc;
  1226. DPRINTF(sc, ATH_DBG_CONFIG, "%s: %s DMA map: %p (%u) -> %llx (%u)\n",
  1227. __func__, dd->dd_name, ds, (u32) dd->dd_desc_len,
  1228. ito64(dd->dd_desc_paddr), /*XXX*/(u32) dd->dd_desc_len);
  1229. /* allocate buffers */
  1230. bsize = sizeof(struct ath_buf) * nbuf;
  1231. bf = kmalloc(bsize, GFP_KERNEL);
  1232. if (bf == NULL) {
  1233. error = -ENOMEM;
  1234. goto fail2;
  1235. }
  1236. memset(bf, 0, bsize);
  1237. dd->dd_bufptr = bf;
  1238. INIT_LIST_HEAD(head);
  1239. for (i = 0; i < nbuf; i++, bf++, ds += ndesc) {
  1240. bf->bf_desc = ds;
  1241. bf->bf_daddr = DS2PHYS(dd, ds);
  1242. if (!(sc->sc_ah->ah_caps.hw_caps &
  1243. ATH9K_HW_CAP_4KB_SPLITTRANS)) {
  1244. /*
  1245. * Skip descriptor addresses which can cause 4KB
  1246. * boundary crossing (addr + length) with a 32 dword
  1247. * descriptor fetch.
  1248. */
  1249. while (ATH_DESC_4KB_BOUND_CHECK(bf->bf_daddr)) {
  1250. ASSERT((caddr_t) bf->bf_desc <
  1251. ((caddr_t) dd->dd_desc +
  1252. dd->dd_desc_len));
  1253. ds += ndesc;
  1254. bf->bf_desc = ds;
  1255. bf->bf_daddr = DS2PHYS(dd, ds);
  1256. }
  1257. }
  1258. list_add_tail(&bf->list, head);
  1259. }
  1260. return 0;
  1261. fail2:
  1262. pci_free_consistent(sc->pdev,
  1263. dd->dd_desc_len, dd->dd_desc, dd->dd_desc_paddr);
  1264. fail:
  1265. memset(dd, 0, sizeof(*dd));
  1266. return error;
  1267. #undef ATH_DESC_4KB_BOUND_CHECK
  1268. #undef ATH_DESC_4KB_BOUND_NUM_SKIPPED
  1269. #undef DS2PHYS
  1270. }
  1271. /*
  1272. * Cleanup DMA descriptors
  1273. *
  1274. * This function will free the DMA block that was allocated for the descriptor
  1275. * pool. Since this was allocated as one "chunk", it is freed in the same
  1276. * manner.
  1277. */
  1278. void ath_descdma_cleanup(struct ath_softc *sc,
  1279. struct ath_descdma *dd,
  1280. struct list_head *head)
  1281. {
  1282. /* Free memory associated with descriptors */
  1283. pci_free_consistent(sc->pdev,
  1284. dd->dd_desc_len, dd->dd_desc, dd->dd_desc_paddr);
  1285. INIT_LIST_HEAD(head);
  1286. kfree(dd->dd_bufptr);
  1287. memset(dd, 0, sizeof(*dd));
  1288. }
  1289. /*************/
  1290. /* Utilities */
  1291. /*************/
  1292. int ath_get_hal_qnum(u16 queue, struct ath_softc *sc)
  1293. {
  1294. int qnum;
  1295. switch (queue) {
  1296. case 0:
  1297. qnum = sc->sc_haltype2q[ATH9K_WME_AC_VO];
  1298. break;
  1299. case 1:
  1300. qnum = sc->sc_haltype2q[ATH9K_WME_AC_VI];
  1301. break;
  1302. case 2:
  1303. qnum = sc->sc_haltype2q[ATH9K_WME_AC_BE];
  1304. break;
  1305. case 3:
  1306. qnum = sc->sc_haltype2q[ATH9K_WME_AC_BK];
  1307. break;
  1308. default:
  1309. qnum = sc->sc_haltype2q[ATH9K_WME_AC_BE];
  1310. break;
  1311. }
  1312. return qnum;
  1313. }
  1314. int ath_get_mac80211_qnum(u32 queue, struct ath_softc *sc)
  1315. {
  1316. int qnum;
  1317. switch (queue) {
  1318. case ATH9K_WME_AC_VO:
  1319. qnum = 0;
  1320. break;
  1321. case ATH9K_WME_AC_VI:
  1322. qnum = 1;
  1323. break;
  1324. case ATH9K_WME_AC_BE:
  1325. qnum = 2;
  1326. break;
  1327. case ATH9K_WME_AC_BK:
  1328. qnum = 3;
  1329. break;
  1330. default:
  1331. qnum = -1;
  1332. break;
  1333. }
  1334. return qnum;
  1335. }
  1336. /*
  1337. * Expand time stamp to TSF
  1338. *
  1339. * Extend 15-bit time stamp from rx descriptor to
  1340. * a full 64-bit TSF using the current h/w TSF.
  1341. */
  1342. u64 ath_extend_tsf(struct ath_softc *sc, u32 rstamp)
  1343. {
  1344. u64 tsf;
  1345. tsf = ath9k_hw_gettsf64(sc->sc_ah);
  1346. if ((tsf & 0x7fff) < rstamp)
  1347. tsf -= 0x8000;
  1348. return (tsf & ~0x7fff) | rstamp;
  1349. }
  1350. /*
  1351. * Set Default Antenna
  1352. *
  1353. * Call into the HAL to set the default antenna to use. Not really valid for
  1354. * MIMO technology.
  1355. */
  1356. void ath_setdefantenna(void *context, u32 antenna)
  1357. {
  1358. struct ath_softc *sc = (struct ath_softc *)context;
  1359. struct ath_hal *ah = sc->sc_ah;
  1360. /* XXX block beacon interrupts */
  1361. ath9k_hw_setantenna(ah, antenna);
  1362. sc->sc_defant = antenna;
  1363. sc->sc_rxotherant = 0;
  1364. }
  1365. /*
  1366. * Set Slot Time
  1367. *
  1368. * This will wake up the chip if required, and set the slot time for the
  1369. * frame (maximum transmit time). Slot time is assumed to be already set
  1370. * in the ATH object member sc_slottime
  1371. */
  1372. void ath_setslottime(struct ath_softc *sc)
  1373. {
  1374. ath9k_hw_setslottime(sc->sc_ah, sc->sc_slottime);
  1375. sc->sc_updateslot = OK;
  1376. }