mac.c 38 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383
  1. /*
  2. * Copyright (c) 2008-2009 Atheros Communications Inc.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for any
  5. * purpose with or without fee is hereby granted, provided that the above
  6. * copyright notice and this permission notice appear in all copies.
  7. *
  8. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  9. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  10. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  11. * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  12. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  13. * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  14. * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  15. */
  16. #include "hw.h"
  17. static void ar9002_hw_rx_enable(struct ath_hw *ah)
  18. {
  19. REG_WRITE(ah, AR_CR, AR_CR_RXE);
  20. }
  21. static void ar9002_hw_set_desc_link(void *ds, u32 ds_link)
  22. {
  23. ((struct ath_desc *) ds)->ds_link = ds_link;
  24. }
  25. static void ar9002_hw_get_desc_link(void *ds, u32 **ds_link)
  26. {
  27. *ds_link = &((struct ath_desc *)ds)->ds_link;
  28. }
  29. static bool ar9002_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
  30. {
  31. u32 isr = 0;
  32. u32 mask2 = 0;
  33. struct ath9k_hw_capabilities *pCap = &ah->caps;
  34. u32 sync_cause = 0;
  35. bool fatal_int = false;
  36. struct ath_common *common = ath9k_hw_common(ah);
  37. if (!AR_SREV_9100(ah)) {
  38. if (REG_READ(ah, AR_INTR_ASYNC_CAUSE) & AR_INTR_MAC_IRQ) {
  39. if ((REG_READ(ah, AR_RTC_STATUS) & AR_RTC_STATUS_M)
  40. == AR_RTC_STATUS_ON) {
  41. isr = REG_READ(ah, AR_ISR);
  42. }
  43. }
  44. sync_cause = REG_READ(ah, AR_INTR_SYNC_CAUSE) &
  45. AR_INTR_SYNC_DEFAULT;
  46. *masked = 0;
  47. if (!isr && !sync_cause)
  48. return false;
  49. } else {
  50. *masked = 0;
  51. isr = REG_READ(ah, AR_ISR);
  52. }
  53. if (isr) {
  54. if (isr & AR_ISR_BCNMISC) {
  55. u32 isr2;
  56. isr2 = REG_READ(ah, AR_ISR_S2);
  57. if (isr2 & AR_ISR_S2_TIM)
  58. mask2 |= ATH9K_INT_TIM;
  59. if (isr2 & AR_ISR_S2_DTIM)
  60. mask2 |= ATH9K_INT_DTIM;
  61. if (isr2 & AR_ISR_S2_DTIMSYNC)
  62. mask2 |= ATH9K_INT_DTIMSYNC;
  63. if (isr2 & (AR_ISR_S2_CABEND))
  64. mask2 |= ATH9K_INT_CABEND;
  65. if (isr2 & AR_ISR_S2_GTT)
  66. mask2 |= ATH9K_INT_GTT;
  67. if (isr2 & AR_ISR_S2_CST)
  68. mask2 |= ATH9K_INT_CST;
  69. if (isr2 & AR_ISR_S2_TSFOOR)
  70. mask2 |= ATH9K_INT_TSFOOR;
  71. }
  72. isr = REG_READ(ah, AR_ISR_RAC);
  73. if (isr == 0xffffffff) {
  74. *masked = 0;
  75. return false;
  76. }
  77. *masked = isr & ATH9K_INT_COMMON;
  78. if (ah->config.rx_intr_mitigation) {
  79. if (isr & (AR_ISR_RXMINTR | AR_ISR_RXINTM))
  80. *masked |= ATH9K_INT_RX;
  81. }
  82. if (isr & (AR_ISR_RXOK | AR_ISR_RXERR))
  83. *masked |= ATH9K_INT_RX;
  84. if (isr &
  85. (AR_ISR_TXOK | AR_ISR_TXDESC | AR_ISR_TXERR |
  86. AR_ISR_TXEOL)) {
  87. u32 s0_s, s1_s;
  88. *masked |= ATH9K_INT_TX;
  89. s0_s = REG_READ(ah, AR_ISR_S0_S);
  90. ah->intr_txqs |= MS(s0_s, AR_ISR_S0_QCU_TXOK);
  91. ah->intr_txqs |= MS(s0_s, AR_ISR_S0_QCU_TXDESC);
  92. s1_s = REG_READ(ah, AR_ISR_S1_S);
  93. ah->intr_txqs |= MS(s1_s, AR_ISR_S1_QCU_TXERR);
  94. ah->intr_txqs |= MS(s1_s, AR_ISR_S1_QCU_TXEOL);
  95. }
  96. if (isr & AR_ISR_RXORN) {
  97. ath_print(common, ATH_DBG_INTERRUPT,
  98. "receive FIFO overrun interrupt\n");
  99. }
  100. if (!AR_SREV_9100(ah)) {
  101. if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) {
  102. u32 isr5 = REG_READ(ah, AR_ISR_S5_S);
  103. if (isr5 & AR_ISR_S5_TIM_TIMER)
  104. *masked |= ATH9K_INT_TIM_TIMER;
  105. }
  106. }
  107. *masked |= mask2;
  108. }
  109. if (AR_SREV_9100(ah))
  110. return true;
  111. if (isr & AR_ISR_GENTMR) {
  112. u32 s5_s;
  113. s5_s = REG_READ(ah, AR_ISR_S5_S);
  114. if (isr & AR_ISR_GENTMR) {
  115. ah->intr_gen_timer_trigger =
  116. MS(s5_s, AR_ISR_S5_GENTIMER_TRIG);
  117. ah->intr_gen_timer_thresh =
  118. MS(s5_s, AR_ISR_S5_GENTIMER_THRESH);
  119. if (ah->intr_gen_timer_trigger)
  120. *masked |= ATH9K_INT_GENTIMER;
  121. }
  122. }
  123. if (sync_cause) {
  124. fatal_int =
  125. (sync_cause &
  126. (AR_INTR_SYNC_HOST1_FATAL | AR_INTR_SYNC_HOST1_PERR))
  127. ? true : false;
  128. if (fatal_int) {
  129. if (sync_cause & AR_INTR_SYNC_HOST1_FATAL) {
  130. ath_print(common, ATH_DBG_ANY,
  131. "received PCI FATAL interrupt\n");
  132. }
  133. if (sync_cause & AR_INTR_SYNC_HOST1_PERR) {
  134. ath_print(common, ATH_DBG_ANY,
  135. "received PCI PERR interrupt\n");
  136. }
  137. *masked |= ATH9K_INT_FATAL;
  138. }
  139. if (sync_cause & AR_INTR_SYNC_RADM_CPL_TIMEOUT) {
  140. ath_print(common, ATH_DBG_INTERRUPT,
  141. "AR_INTR_SYNC_RADM_CPL_TIMEOUT\n");
  142. REG_WRITE(ah, AR_RC, AR_RC_HOSTIF);
  143. REG_WRITE(ah, AR_RC, 0);
  144. *masked |= ATH9K_INT_FATAL;
  145. }
  146. if (sync_cause & AR_INTR_SYNC_LOCAL_TIMEOUT) {
  147. ath_print(common, ATH_DBG_INTERRUPT,
  148. "AR_INTR_SYNC_LOCAL_TIMEOUT\n");
  149. }
  150. REG_WRITE(ah, AR_INTR_SYNC_CAUSE_CLR, sync_cause);
  151. (void) REG_READ(ah, AR_INTR_SYNC_CAUSE_CLR);
  152. }
  153. return true;
  154. }
  155. static void ar9002_hw_fill_txdesc(struct ath_hw *ah, void *ds, u32 seglen,
  156. bool is_firstseg, bool is_lastseg,
  157. const void *ds0, dma_addr_t buf_addr,
  158. unsigned int qcu)
  159. {
  160. struct ar5416_desc *ads = AR5416DESC(ds);
  161. ads->ds_data = buf_addr;
  162. if (is_firstseg) {
  163. ads->ds_ctl1 |= seglen | (is_lastseg ? 0 : AR_TxMore);
  164. } else if (is_lastseg) {
  165. ads->ds_ctl0 = 0;
  166. ads->ds_ctl1 = seglen;
  167. ads->ds_ctl2 = AR5416DESC_CONST(ds0)->ds_ctl2;
  168. ads->ds_ctl3 = AR5416DESC_CONST(ds0)->ds_ctl3;
  169. } else {
  170. ads->ds_ctl0 = 0;
  171. ads->ds_ctl1 = seglen | AR_TxMore;
  172. ads->ds_ctl2 = 0;
  173. ads->ds_ctl3 = 0;
  174. }
  175. ads->ds_txstatus0 = ads->ds_txstatus1 = 0;
  176. ads->ds_txstatus2 = ads->ds_txstatus3 = 0;
  177. ads->ds_txstatus4 = ads->ds_txstatus5 = 0;
  178. ads->ds_txstatus6 = ads->ds_txstatus7 = 0;
  179. ads->ds_txstatus8 = ads->ds_txstatus9 = 0;
  180. }
  181. static int ar9002_hw_proc_txdesc(struct ath_hw *ah, void *ds,
  182. struct ath_tx_status *ts)
  183. {
  184. struct ar5416_desc *ads = AR5416DESC(ds);
  185. if ((ads->ds_txstatus9 & AR_TxDone) == 0)
  186. return -EINPROGRESS;
  187. ts->ts_seqnum = MS(ads->ds_txstatus9, AR_SeqNum);
  188. ts->ts_tstamp = ads->AR_SendTimestamp;
  189. ts->ts_status = 0;
  190. ts->ts_flags = 0;
  191. if (ads->ds_txstatus1 & AR_FrmXmitOK)
  192. ts->ts_status |= ATH9K_TX_ACKED;
  193. if (ads->ds_txstatus1 & AR_ExcessiveRetries)
  194. ts->ts_status |= ATH9K_TXERR_XRETRY;
  195. if (ads->ds_txstatus1 & AR_Filtered)
  196. ts->ts_status |= ATH9K_TXERR_FILT;
  197. if (ads->ds_txstatus1 & AR_FIFOUnderrun) {
  198. ts->ts_status |= ATH9K_TXERR_FIFO;
  199. ath9k_hw_updatetxtriglevel(ah, true);
  200. }
  201. if (ads->ds_txstatus9 & AR_TxOpExceeded)
  202. ts->ts_status |= ATH9K_TXERR_XTXOP;
  203. if (ads->ds_txstatus1 & AR_TxTimerExpired)
  204. ts->ts_status |= ATH9K_TXERR_TIMER_EXPIRED;
  205. if (ads->ds_txstatus1 & AR_DescCfgErr)
  206. ts->ts_flags |= ATH9K_TX_DESC_CFG_ERR;
  207. if (ads->ds_txstatus1 & AR_TxDataUnderrun) {
  208. ts->ts_flags |= ATH9K_TX_DATA_UNDERRUN;
  209. ath9k_hw_updatetxtriglevel(ah, true);
  210. }
  211. if (ads->ds_txstatus1 & AR_TxDelimUnderrun) {
  212. ts->ts_flags |= ATH9K_TX_DELIM_UNDERRUN;
  213. ath9k_hw_updatetxtriglevel(ah, true);
  214. }
  215. if (ads->ds_txstatus0 & AR_TxBaStatus) {
  216. ts->ts_flags |= ATH9K_TX_BA;
  217. ts->ba_low = ads->AR_BaBitmapLow;
  218. ts->ba_high = ads->AR_BaBitmapHigh;
  219. }
  220. ts->ts_rateindex = MS(ads->ds_txstatus9, AR_FinalTxIdx);
  221. switch (ts->ts_rateindex) {
  222. case 0:
  223. ts->ts_ratecode = MS(ads->ds_ctl3, AR_XmitRate0);
  224. break;
  225. case 1:
  226. ts->ts_ratecode = MS(ads->ds_ctl3, AR_XmitRate1);
  227. break;
  228. case 2:
  229. ts->ts_ratecode = MS(ads->ds_ctl3, AR_XmitRate2);
  230. break;
  231. case 3:
  232. ts->ts_ratecode = MS(ads->ds_ctl3, AR_XmitRate3);
  233. break;
  234. }
  235. ts->ts_rssi = MS(ads->ds_txstatus5, AR_TxRSSICombined);
  236. ts->ts_rssi_ctl0 = MS(ads->ds_txstatus0, AR_TxRSSIAnt00);
  237. ts->ts_rssi_ctl1 = MS(ads->ds_txstatus0, AR_TxRSSIAnt01);
  238. ts->ts_rssi_ctl2 = MS(ads->ds_txstatus0, AR_TxRSSIAnt02);
  239. ts->ts_rssi_ext0 = MS(ads->ds_txstatus5, AR_TxRSSIAnt10);
  240. ts->ts_rssi_ext1 = MS(ads->ds_txstatus5, AR_TxRSSIAnt11);
  241. ts->ts_rssi_ext2 = MS(ads->ds_txstatus5, AR_TxRSSIAnt12);
  242. ts->evm0 = ads->AR_TxEVM0;
  243. ts->evm1 = ads->AR_TxEVM1;
  244. ts->evm2 = ads->AR_TxEVM2;
  245. ts->ts_shortretry = MS(ads->ds_txstatus1, AR_RTSFailCnt);
  246. ts->ts_longretry = MS(ads->ds_txstatus1, AR_DataFailCnt);
  247. ts->ts_virtcol = MS(ads->ds_txstatus1, AR_VirtRetryCnt);
  248. ts->ts_antenna = 0;
  249. return 0;
  250. }
  251. static void ar9002_hw_set11n_txdesc(struct ath_hw *ah, void *ds,
  252. u32 pktLen, enum ath9k_pkt_type type,
  253. u32 txPower, u32 keyIx,
  254. enum ath9k_key_type keyType, u32 flags)
  255. {
  256. struct ar5416_desc *ads = AR5416DESC(ds);
  257. txPower += ah->txpower_indexoffset;
  258. if (txPower > 63)
  259. txPower = 63;
  260. ads->ds_ctl0 = (pktLen & AR_FrameLen)
  261. | (flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
  262. | SM(txPower, AR_XmitPower)
  263. | (flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
  264. | (flags & ATH9K_TXDESC_CLRDMASK ? AR_ClrDestMask : 0)
  265. | (flags & ATH9K_TXDESC_INTREQ ? AR_TxIntrReq : 0)
  266. | (keyIx != ATH9K_TXKEYIX_INVALID ? AR_DestIdxValid : 0);
  267. ads->ds_ctl1 =
  268. (keyIx != ATH9K_TXKEYIX_INVALID ? SM(keyIx, AR_DestIdx) : 0)
  269. | SM(type, AR_FrameType)
  270. | (flags & ATH9K_TXDESC_NOACK ? AR_NoAck : 0)
  271. | (flags & ATH9K_TXDESC_EXT_ONLY ? AR_ExtOnly : 0)
  272. | (flags & ATH9K_TXDESC_EXT_AND_CTL ? AR_ExtAndCtl : 0);
  273. ads->ds_ctl6 = SM(keyType, AR_EncrType);
  274. if (AR_SREV_9285(ah) || AR_SREV_9271(ah)) {
  275. ads->ds_ctl8 = 0;
  276. ads->ds_ctl9 = 0;
  277. ads->ds_ctl10 = 0;
  278. ads->ds_ctl11 = 0;
  279. }
  280. }
  281. static void ar9002_hw_set11n_ratescenario(struct ath_hw *ah, void *ds,
  282. void *lastds,
  283. u32 durUpdateEn, u32 rtsctsRate,
  284. u32 rtsctsDuration,
  285. struct ath9k_11n_rate_series series[],
  286. u32 nseries, u32 flags)
  287. {
  288. struct ar5416_desc *ads = AR5416DESC(ds);
  289. struct ar5416_desc *last_ads = AR5416DESC(lastds);
  290. u32 ds_ctl0;
  291. if (flags & (ATH9K_TXDESC_RTSENA | ATH9K_TXDESC_CTSENA)) {
  292. ds_ctl0 = ads->ds_ctl0;
  293. if (flags & ATH9K_TXDESC_RTSENA) {
  294. ds_ctl0 &= ~AR_CTSEnable;
  295. ds_ctl0 |= AR_RTSEnable;
  296. } else {
  297. ds_ctl0 &= ~AR_RTSEnable;
  298. ds_ctl0 |= AR_CTSEnable;
  299. }
  300. ads->ds_ctl0 = ds_ctl0;
  301. } else {
  302. ads->ds_ctl0 =
  303. (ads->ds_ctl0 & ~(AR_RTSEnable | AR_CTSEnable));
  304. }
  305. ads->ds_ctl2 = set11nTries(series, 0)
  306. | set11nTries(series, 1)
  307. | set11nTries(series, 2)
  308. | set11nTries(series, 3)
  309. | (durUpdateEn ? AR_DurUpdateEna : 0)
  310. | SM(0, AR_BurstDur);
  311. ads->ds_ctl3 = set11nRate(series, 0)
  312. | set11nRate(series, 1)
  313. | set11nRate(series, 2)
  314. | set11nRate(series, 3);
  315. ads->ds_ctl4 = set11nPktDurRTSCTS(series, 0)
  316. | set11nPktDurRTSCTS(series, 1);
  317. ads->ds_ctl5 = set11nPktDurRTSCTS(series, 2)
  318. | set11nPktDurRTSCTS(series, 3);
  319. ads->ds_ctl7 = set11nRateFlags(series, 0)
  320. | set11nRateFlags(series, 1)
  321. | set11nRateFlags(series, 2)
  322. | set11nRateFlags(series, 3)
  323. | SM(rtsctsRate, AR_RTSCTSRate);
  324. last_ads->ds_ctl2 = ads->ds_ctl2;
  325. last_ads->ds_ctl3 = ads->ds_ctl3;
  326. }
  327. static void ar9002_hw_set11n_aggr_first(struct ath_hw *ah, void *ds,
  328. u32 aggrLen)
  329. {
  330. struct ar5416_desc *ads = AR5416DESC(ds);
  331. ads->ds_ctl1 |= (AR_IsAggr | AR_MoreAggr);
  332. ads->ds_ctl6 &= ~AR_AggrLen;
  333. ads->ds_ctl6 |= SM(aggrLen, AR_AggrLen);
  334. }
  335. static void ar9002_hw_set11n_aggr_middle(struct ath_hw *ah, void *ds,
  336. u32 numDelims)
  337. {
  338. struct ar5416_desc *ads = AR5416DESC(ds);
  339. unsigned int ctl6;
  340. ads->ds_ctl1 |= (AR_IsAggr | AR_MoreAggr);
  341. ctl6 = ads->ds_ctl6;
  342. ctl6 &= ~AR_PadDelim;
  343. ctl6 |= SM(numDelims, AR_PadDelim);
  344. ads->ds_ctl6 = ctl6;
  345. }
  346. static void ar9002_hw_set11n_aggr_last(struct ath_hw *ah, void *ds)
  347. {
  348. struct ar5416_desc *ads = AR5416DESC(ds);
  349. ads->ds_ctl1 |= AR_IsAggr;
  350. ads->ds_ctl1 &= ~AR_MoreAggr;
  351. ads->ds_ctl6 &= ~AR_PadDelim;
  352. }
  353. static void ar9002_hw_clr11n_aggr(struct ath_hw *ah, void *ds)
  354. {
  355. struct ar5416_desc *ads = AR5416DESC(ds);
  356. ads->ds_ctl1 &= (~AR_IsAggr & ~AR_MoreAggr);
  357. }
  358. static void ar9002_hw_set11n_burstduration(struct ath_hw *ah, void *ds,
  359. u32 burstDuration)
  360. {
  361. struct ar5416_desc *ads = AR5416DESC(ds);
  362. ads->ds_ctl2 &= ~AR_BurstDur;
  363. ads->ds_ctl2 |= SM(burstDuration, AR_BurstDur);
  364. }
  365. static void ar9002_hw_set11n_virtualmorefrag(struct ath_hw *ah, void *ds,
  366. u32 vmf)
  367. {
  368. struct ar5416_desc *ads = AR5416DESC(ds);
  369. if (vmf)
  370. ads->ds_ctl0 |= AR_VirtMoreFrag;
  371. else
  372. ads->ds_ctl0 &= ~AR_VirtMoreFrag;
  373. }
  374. void ar9002_hw_attach_mac_ops(struct ath_hw *ah)
  375. {
  376. struct ath_hw_ops *ops = ath9k_hw_ops(ah);
  377. ops->rx_enable = ar9002_hw_rx_enable;
  378. ops->set_desc_link = ar9002_hw_set_desc_link;
  379. ops->get_desc_link = ar9002_hw_get_desc_link;
  380. ops->get_isr = ar9002_hw_get_isr;
  381. ops->fill_txdesc = ar9002_hw_fill_txdesc;
  382. ops->proc_txdesc = ar9002_hw_proc_txdesc;
  383. ops->set11n_txdesc = ar9002_hw_set11n_txdesc;
  384. ops->set11n_ratescenario = ar9002_hw_set11n_ratescenario;
  385. ops->set11n_aggr_first = ar9002_hw_set11n_aggr_first;
  386. ops->set11n_aggr_middle = ar9002_hw_set11n_aggr_middle;
  387. ops->set11n_aggr_last = ar9002_hw_set11n_aggr_last;
  388. ops->clr11n_aggr = ar9002_hw_clr11n_aggr;
  389. ops->set11n_burstduration = ar9002_hw_set11n_burstduration;
  390. ops->set11n_virtualmorefrag = ar9002_hw_set11n_virtualmorefrag;
  391. }
  392. static void ath9k_hw_set_txq_interrupts(struct ath_hw *ah,
  393. struct ath9k_tx_queue_info *qi)
  394. {
  395. ath_print(ath9k_hw_common(ah), ATH_DBG_INTERRUPT,
  396. "tx ok 0x%x err 0x%x desc 0x%x eol 0x%x urn 0x%x\n",
  397. ah->txok_interrupt_mask, ah->txerr_interrupt_mask,
  398. ah->txdesc_interrupt_mask, ah->txeol_interrupt_mask,
  399. ah->txurn_interrupt_mask);
  400. REG_WRITE(ah, AR_IMR_S0,
  401. SM(ah->txok_interrupt_mask, AR_IMR_S0_QCU_TXOK)
  402. | SM(ah->txdesc_interrupt_mask, AR_IMR_S0_QCU_TXDESC));
  403. REG_WRITE(ah, AR_IMR_S1,
  404. SM(ah->txerr_interrupt_mask, AR_IMR_S1_QCU_TXERR)
  405. | SM(ah->txeol_interrupt_mask, AR_IMR_S1_QCU_TXEOL));
  406. ah->imrs2_reg &= ~AR_IMR_S2_QCU_TXURN;
  407. ah->imrs2_reg |= (ah->txurn_interrupt_mask & AR_IMR_S2_QCU_TXURN);
  408. REG_WRITE(ah, AR_IMR_S2, ah->imrs2_reg);
  409. }
  410. u32 ath9k_hw_gettxbuf(struct ath_hw *ah, u32 q)
  411. {
  412. return REG_READ(ah, AR_QTXDP(q));
  413. }
  414. EXPORT_SYMBOL(ath9k_hw_gettxbuf);
  415. void ath9k_hw_puttxbuf(struct ath_hw *ah, u32 q, u32 txdp)
  416. {
  417. REG_WRITE(ah, AR_QTXDP(q), txdp);
  418. }
  419. EXPORT_SYMBOL(ath9k_hw_puttxbuf);
  420. void ath9k_hw_txstart(struct ath_hw *ah, u32 q)
  421. {
  422. ath_print(ath9k_hw_common(ah), ATH_DBG_QUEUE,
  423. "Enable TXE on queue: %u\n", q);
  424. REG_WRITE(ah, AR_Q_TXE, 1 << q);
  425. }
  426. EXPORT_SYMBOL(ath9k_hw_txstart);
  427. void ath9k_hw_cleartxdesc(struct ath_hw *ah, void *ds)
  428. {
  429. struct ar5416_desc *ads = AR5416DESC(ds);
  430. ads->ds_txstatus0 = ads->ds_txstatus1 = 0;
  431. ads->ds_txstatus2 = ads->ds_txstatus3 = 0;
  432. ads->ds_txstatus4 = ads->ds_txstatus5 = 0;
  433. ads->ds_txstatus6 = ads->ds_txstatus7 = 0;
  434. ads->ds_txstatus8 = ads->ds_txstatus9 = 0;
  435. }
  436. EXPORT_SYMBOL(ath9k_hw_cleartxdesc);
  437. u32 ath9k_hw_numtxpending(struct ath_hw *ah, u32 q)
  438. {
  439. u32 npend;
  440. npend = REG_READ(ah, AR_QSTS(q)) & AR_Q_STS_PEND_FR_CNT;
  441. if (npend == 0) {
  442. if (REG_READ(ah, AR_Q_TXE) & (1 << q))
  443. npend = 1;
  444. }
  445. return npend;
  446. }
  447. EXPORT_SYMBOL(ath9k_hw_numtxpending);
  448. /**
  449. * ath9k_hw_updatetxtriglevel - adjusts the frame trigger level
  450. *
  451. * @ah: atheros hardware struct
  452. * @bIncTrigLevel: whether or not the frame trigger level should be updated
  453. *
  454. * The frame trigger level specifies the minimum number of bytes,
  455. * in units of 64 bytes, that must be DMA'ed into the PCU TX FIFO
  456. * before the PCU will initiate sending the frame on the air. This can
  457. * mean we initiate transmit before a full frame is on the PCU TX FIFO.
  458. * Resets to 0x1 (meaning 64 bytes or a full frame, whichever occurs
  459. * first)
  460. *
  461. * Caution must be taken to ensure to set the frame trigger level based
  462. * on the DMA request size. For example if the DMA request size is set to
  463. * 128 bytes the trigger level cannot exceed 6 * 64 = 384. This is because
  464. * there need to be enough space in the tx FIFO for the requested transfer
  465. * size. Hence the tx FIFO will stop with 512 - 128 = 384 bytes. If we set
  466. * the threshold to a value beyond 6, then the transmit will hang.
  467. *
  468. * Current dual stream devices have a PCU TX FIFO size of 8 KB.
  469. * Current single stream devices have a PCU TX FIFO size of 4 KB, however,
  470. * there is a hardware issue which forces us to use 2 KB instead so the
  471. * frame trigger level must not exceed 2 KB for these chipsets.
  472. */
  473. bool ath9k_hw_updatetxtriglevel(struct ath_hw *ah, bool bIncTrigLevel)
  474. {
  475. u32 txcfg, curLevel, newLevel;
  476. enum ath9k_int omask;
  477. if (ah->tx_trig_level >= ah->config.max_txtrig_level)
  478. return false;
  479. omask = ath9k_hw_set_interrupts(ah, ah->imask & ~ATH9K_INT_GLOBAL);
  480. txcfg = REG_READ(ah, AR_TXCFG);
  481. curLevel = MS(txcfg, AR_FTRIG);
  482. newLevel = curLevel;
  483. if (bIncTrigLevel) {
  484. if (curLevel < ah->config.max_txtrig_level)
  485. newLevel++;
  486. } else if (curLevel > MIN_TX_FIFO_THRESHOLD)
  487. newLevel--;
  488. if (newLevel != curLevel)
  489. REG_WRITE(ah, AR_TXCFG,
  490. (txcfg & ~AR_FTRIG) | SM(newLevel, AR_FTRIG));
  491. ath9k_hw_set_interrupts(ah, omask);
  492. ah->tx_trig_level = newLevel;
  493. return newLevel != curLevel;
  494. }
  495. EXPORT_SYMBOL(ath9k_hw_updatetxtriglevel);
  496. bool ath9k_hw_stoptxdma(struct ath_hw *ah, u32 q)
  497. {
  498. #define ATH9K_TX_STOP_DMA_TIMEOUT 4000 /* usec */
  499. #define ATH9K_TIME_QUANTUM 100 /* usec */
  500. struct ath_common *common = ath9k_hw_common(ah);
  501. struct ath9k_hw_capabilities *pCap = &ah->caps;
  502. struct ath9k_tx_queue_info *qi;
  503. u32 tsfLow, j, wait;
  504. u32 wait_time = ATH9K_TX_STOP_DMA_TIMEOUT / ATH9K_TIME_QUANTUM;
  505. if (q >= pCap->total_queues) {
  506. ath_print(common, ATH_DBG_QUEUE, "Stopping TX DMA, "
  507. "invalid queue: %u\n", q);
  508. return false;
  509. }
  510. qi = &ah->txq[q];
  511. if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
  512. ath_print(common, ATH_DBG_QUEUE, "Stopping TX DMA, "
  513. "inactive queue: %u\n", q);
  514. return false;
  515. }
  516. REG_WRITE(ah, AR_Q_TXD, 1 << q);
  517. for (wait = wait_time; wait != 0; wait--) {
  518. if (ath9k_hw_numtxpending(ah, q) == 0)
  519. break;
  520. udelay(ATH9K_TIME_QUANTUM);
  521. }
  522. if (ath9k_hw_numtxpending(ah, q)) {
  523. ath_print(common, ATH_DBG_QUEUE,
  524. "%s: Num of pending TX Frames %d on Q %d\n",
  525. __func__, ath9k_hw_numtxpending(ah, q), q);
  526. for (j = 0; j < 2; j++) {
  527. tsfLow = REG_READ(ah, AR_TSF_L32);
  528. REG_WRITE(ah, AR_QUIET2,
  529. SM(10, AR_QUIET2_QUIET_DUR));
  530. REG_WRITE(ah, AR_QUIET_PERIOD, 100);
  531. REG_WRITE(ah, AR_NEXT_QUIET_TIMER, tsfLow >> 10);
  532. REG_SET_BIT(ah, AR_TIMER_MODE,
  533. AR_QUIET_TIMER_EN);
  534. if ((REG_READ(ah, AR_TSF_L32) >> 10) == (tsfLow >> 10))
  535. break;
  536. ath_print(common, ATH_DBG_QUEUE,
  537. "TSF has moved while trying to set "
  538. "quiet time TSF: 0x%08x\n", tsfLow);
  539. }
  540. REG_SET_BIT(ah, AR_DIAG_SW, AR_DIAG_FORCE_CH_IDLE_HIGH);
  541. udelay(200);
  542. REG_CLR_BIT(ah, AR_TIMER_MODE, AR_QUIET_TIMER_EN);
  543. wait = wait_time;
  544. while (ath9k_hw_numtxpending(ah, q)) {
  545. if ((--wait) == 0) {
  546. ath_print(common, ATH_DBG_FATAL,
  547. "Failed to stop TX DMA in 100 "
  548. "msec after killing last frame\n");
  549. break;
  550. }
  551. udelay(ATH9K_TIME_QUANTUM);
  552. }
  553. REG_CLR_BIT(ah, AR_DIAG_SW, AR_DIAG_FORCE_CH_IDLE_HIGH);
  554. }
  555. REG_WRITE(ah, AR_Q_TXD, 0);
  556. return wait != 0;
  557. #undef ATH9K_TX_STOP_DMA_TIMEOUT
  558. #undef ATH9K_TIME_QUANTUM
  559. }
  560. EXPORT_SYMBOL(ath9k_hw_stoptxdma);
  561. void ath9k_hw_gettxintrtxqs(struct ath_hw *ah, u32 *txqs)
  562. {
  563. *txqs &= ah->intr_txqs;
  564. ah->intr_txqs &= ~(*txqs);
  565. }
  566. EXPORT_SYMBOL(ath9k_hw_gettxintrtxqs);
  567. bool ath9k_hw_set_txq_props(struct ath_hw *ah, int q,
  568. const struct ath9k_tx_queue_info *qinfo)
  569. {
  570. u32 cw;
  571. struct ath_common *common = ath9k_hw_common(ah);
  572. struct ath9k_hw_capabilities *pCap = &ah->caps;
  573. struct ath9k_tx_queue_info *qi;
  574. if (q >= pCap->total_queues) {
  575. ath_print(common, ATH_DBG_QUEUE, "Set TXQ properties, "
  576. "invalid queue: %u\n", q);
  577. return false;
  578. }
  579. qi = &ah->txq[q];
  580. if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
  581. ath_print(common, ATH_DBG_QUEUE, "Set TXQ properties, "
  582. "inactive queue: %u\n", q);
  583. return false;
  584. }
  585. ath_print(common, ATH_DBG_QUEUE, "Set queue properties for: %u\n", q);
  586. qi->tqi_ver = qinfo->tqi_ver;
  587. qi->tqi_subtype = qinfo->tqi_subtype;
  588. qi->tqi_qflags = qinfo->tqi_qflags;
  589. qi->tqi_priority = qinfo->tqi_priority;
  590. if (qinfo->tqi_aifs != ATH9K_TXQ_USEDEFAULT)
  591. qi->tqi_aifs = min(qinfo->tqi_aifs, 255U);
  592. else
  593. qi->tqi_aifs = INIT_AIFS;
  594. if (qinfo->tqi_cwmin != ATH9K_TXQ_USEDEFAULT) {
  595. cw = min(qinfo->tqi_cwmin, 1024U);
  596. qi->tqi_cwmin = 1;
  597. while (qi->tqi_cwmin < cw)
  598. qi->tqi_cwmin = (qi->tqi_cwmin << 1) | 1;
  599. } else
  600. qi->tqi_cwmin = qinfo->tqi_cwmin;
  601. if (qinfo->tqi_cwmax != ATH9K_TXQ_USEDEFAULT) {
  602. cw = min(qinfo->tqi_cwmax, 1024U);
  603. qi->tqi_cwmax = 1;
  604. while (qi->tqi_cwmax < cw)
  605. qi->tqi_cwmax = (qi->tqi_cwmax << 1) | 1;
  606. } else
  607. qi->tqi_cwmax = INIT_CWMAX;
  608. if (qinfo->tqi_shretry != 0)
  609. qi->tqi_shretry = min((u32) qinfo->tqi_shretry, 15U);
  610. else
  611. qi->tqi_shretry = INIT_SH_RETRY;
  612. if (qinfo->tqi_lgretry != 0)
  613. qi->tqi_lgretry = min((u32) qinfo->tqi_lgretry, 15U);
  614. else
  615. qi->tqi_lgretry = INIT_LG_RETRY;
  616. qi->tqi_cbrPeriod = qinfo->tqi_cbrPeriod;
  617. qi->tqi_cbrOverflowLimit = qinfo->tqi_cbrOverflowLimit;
  618. qi->tqi_burstTime = qinfo->tqi_burstTime;
  619. qi->tqi_readyTime = qinfo->tqi_readyTime;
  620. switch (qinfo->tqi_subtype) {
  621. case ATH9K_WME_UPSD:
  622. if (qi->tqi_type == ATH9K_TX_QUEUE_DATA)
  623. qi->tqi_intFlags = ATH9K_TXQ_USE_LOCKOUT_BKOFF_DIS;
  624. break;
  625. default:
  626. break;
  627. }
  628. return true;
  629. }
  630. EXPORT_SYMBOL(ath9k_hw_set_txq_props);
  631. bool ath9k_hw_get_txq_props(struct ath_hw *ah, int q,
  632. struct ath9k_tx_queue_info *qinfo)
  633. {
  634. struct ath_common *common = ath9k_hw_common(ah);
  635. struct ath9k_hw_capabilities *pCap = &ah->caps;
  636. struct ath9k_tx_queue_info *qi;
  637. if (q >= pCap->total_queues) {
  638. ath_print(common, ATH_DBG_QUEUE, "Get TXQ properties, "
  639. "invalid queue: %u\n", q);
  640. return false;
  641. }
  642. qi = &ah->txq[q];
  643. if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
  644. ath_print(common, ATH_DBG_QUEUE, "Get TXQ properties, "
  645. "inactive queue: %u\n", q);
  646. return false;
  647. }
  648. qinfo->tqi_qflags = qi->tqi_qflags;
  649. qinfo->tqi_ver = qi->tqi_ver;
  650. qinfo->tqi_subtype = qi->tqi_subtype;
  651. qinfo->tqi_qflags = qi->tqi_qflags;
  652. qinfo->tqi_priority = qi->tqi_priority;
  653. qinfo->tqi_aifs = qi->tqi_aifs;
  654. qinfo->tqi_cwmin = qi->tqi_cwmin;
  655. qinfo->tqi_cwmax = qi->tqi_cwmax;
  656. qinfo->tqi_shretry = qi->tqi_shretry;
  657. qinfo->tqi_lgretry = qi->tqi_lgretry;
  658. qinfo->tqi_cbrPeriod = qi->tqi_cbrPeriod;
  659. qinfo->tqi_cbrOverflowLimit = qi->tqi_cbrOverflowLimit;
  660. qinfo->tqi_burstTime = qi->tqi_burstTime;
  661. qinfo->tqi_readyTime = qi->tqi_readyTime;
  662. return true;
  663. }
  664. EXPORT_SYMBOL(ath9k_hw_get_txq_props);
  665. int ath9k_hw_setuptxqueue(struct ath_hw *ah, enum ath9k_tx_queue type,
  666. const struct ath9k_tx_queue_info *qinfo)
  667. {
  668. struct ath_common *common = ath9k_hw_common(ah);
  669. struct ath9k_tx_queue_info *qi;
  670. struct ath9k_hw_capabilities *pCap = &ah->caps;
  671. int q;
  672. switch (type) {
  673. case ATH9K_TX_QUEUE_BEACON:
  674. q = pCap->total_queues - 1;
  675. break;
  676. case ATH9K_TX_QUEUE_CAB:
  677. q = pCap->total_queues - 2;
  678. break;
  679. case ATH9K_TX_QUEUE_PSPOLL:
  680. q = 1;
  681. break;
  682. case ATH9K_TX_QUEUE_UAPSD:
  683. q = pCap->total_queues - 3;
  684. break;
  685. case ATH9K_TX_QUEUE_DATA:
  686. for (q = 0; q < pCap->total_queues; q++)
  687. if (ah->txq[q].tqi_type ==
  688. ATH9K_TX_QUEUE_INACTIVE)
  689. break;
  690. if (q == pCap->total_queues) {
  691. ath_print(common, ATH_DBG_FATAL,
  692. "No available TX queue\n");
  693. return -1;
  694. }
  695. break;
  696. default:
  697. ath_print(common, ATH_DBG_FATAL,
  698. "Invalid TX queue type: %u\n", type);
  699. return -1;
  700. }
  701. ath_print(common, ATH_DBG_QUEUE, "Setup TX queue: %u\n", q);
  702. qi = &ah->txq[q];
  703. if (qi->tqi_type != ATH9K_TX_QUEUE_INACTIVE) {
  704. ath_print(common, ATH_DBG_FATAL,
  705. "TX queue: %u already active\n", q);
  706. return -1;
  707. }
  708. memset(qi, 0, sizeof(struct ath9k_tx_queue_info));
  709. qi->tqi_type = type;
  710. if (qinfo == NULL) {
  711. qi->tqi_qflags =
  712. TXQ_FLAG_TXOKINT_ENABLE
  713. | TXQ_FLAG_TXERRINT_ENABLE
  714. | TXQ_FLAG_TXDESCINT_ENABLE | TXQ_FLAG_TXURNINT_ENABLE;
  715. qi->tqi_aifs = INIT_AIFS;
  716. qi->tqi_cwmin = ATH9K_TXQ_USEDEFAULT;
  717. qi->tqi_cwmax = INIT_CWMAX;
  718. qi->tqi_shretry = INIT_SH_RETRY;
  719. qi->tqi_lgretry = INIT_LG_RETRY;
  720. qi->tqi_physCompBuf = 0;
  721. } else {
  722. qi->tqi_physCompBuf = qinfo->tqi_physCompBuf;
  723. (void) ath9k_hw_set_txq_props(ah, q, qinfo);
  724. }
  725. return q;
  726. }
  727. EXPORT_SYMBOL(ath9k_hw_setuptxqueue);
  728. bool ath9k_hw_releasetxqueue(struct ath_hw *ah, u32 q)
  729. {
  730. struct ath9k_hw_capabilities *pCap = &ah->caps;
  731. struct ath_common *common = ath9k_hw_common(ah);
  732. struct ath9k_tx_queue_info *qi;
  733. if (q >= pCap->total_queues) {
  734. ath_print(common, ATH_DBG_QUEUE, "Release TXQ, "
  735. "invalid queue: %u\n", q);
  736. return false;
  737. }
  738. qi = &ah->txq[q];
  739. if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
  740. ath_print(common, ATH_DBG_QUEUE, "Release TXQ, "
  741. "inactive queue: %u\n", q);
  742. return false;
  743. }
  744. ath_print(common, ATH_DBG_QUEUE, "Release TX queue: %u\n", q);
  745. qi->tqi_type = ATH9K_TX_QUEUE_INACTIVE;
  746. ah->txok_interrupt_mask &= ~(1 << q);
  747. ah->txerr_interrupt_mask &= ~(1 << q);
  748. ah->txdesc_interrupt_mask &= ~(1 << q);
  749. ah->txeol_interrupt_mask &= ~(1 << q);
  750. ah->txurn_interrupt_mask &= ~(1 << q);
  751. ath9k_hw_set_txq_interrupts(ah, qi);
  752. return true;
  753. }
  754. EXPORT_SYMBOL(ath9k_hw_releasetxqueue);
  755. bool ath9k_hw_resettxqueue(struct ath_hw *ah, u32 q)
  756. {
  757. struct ath9k_hw_capabilities *pCap = &ah->caps;
  758. struct ath_common *common = ath9k_hw_common(ah);
  759. struct ath9k_channel *chan = ah->curchan;
  760. struct ath9k_tx_queue_info *qi;
  761. u32 cwMin, chanCwMin, value;
  762. if (q >= pCap->total_queues) {
  763. ath_print(common, ATH_DBG_QUEUE, "Reset TXQ, "
  764. "invalid queue: %u\n", q);
  765. return false;
  766. }
  767. qi = &ah->txq[q];
  768. if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
  769. ath_print(common, ATH_DBG_QUEUE, "Reset TXQ, "
  770. "inactive queue: %u\n", q);
  771. return true;
  772. }
  773. ath_print(common, ATH_DBG_QUEUE, "Reset TX queue: %u\n", q);
  774. if (qi->tqi_cwmin == ATH9K_TXQ_USEDEFAULT) {
  775. if (chan && IS_CHAN_B(chan))
  776. chanCwMin = INIT_CWMIN_11B;
  777. else
  778. chanCwMin = INIT_CWMIN;
  779. for (cwMin = 1; cwMin < chanCwMin; cwMin = (cwMin << 1) | 1);
  780. } else
  781. cwMin = qi->tqi_cwmin;
  782. REG_WRITE(ah, AR_DLCL_IFS(q),
  783. SM(cwMin, AR_D_LCL_IFS_CWMIN) |
  784. SM(qi->tqi_cwmax, AR_D_LCL_IFS_CWMAX) |
  785. SM(qi->tqi_aifs, AR_D_LCL_IFS_AIFS));
  786. REG_WRITE(ah, AR_DRETRY_LIMIT(q),
  787. SM(INIT_SSH_RETRY, AR_D_RETRY_LIMIT_STA_SH) |
  788. SM(INIT_SLG_RETRY, AR_D_RETRY_LIMIT_STA_LG) |
  789. SM(qi->tqi_shretry, AR_D_RETRY_LIMIT_FR_SH));
  790. REG_WRITE(ah, AR_QMISC(q), AR_Q_MISC_DCU_EARLY_TERM_REQ);
  791. REG_WRITE(ah, AR_DMISC(q),
  792. AR_D_MISC_CW_BKOFF_EN | AR_D_MISC_FRAG_WAIT_EN | 0x2);
  793. if (qi->tqi_cbrPeriod) {
  794. REG_WRITE(ah, AR_QCBRCFG(q),
  795. SM(qi->tqi_cbrPeriod, AR_Q_CBRCFG_INTERVAL) |
  796. SM(qi->tqi_cbrOverflowLimit, AR_Q_CBRCFG_OVF_THRESH));
  797. REG_WRITE(ah, AR_QMISC(q),
  798. REG_READ(ah, AR_QMISC(q)) | AR_Q_MISC_FSP_CBR |
  799. (qi->tqi_cbrOverflowLimit ?
  800. AR_Q_MISC_CBR_EXP_CNTR_LIMIT_EN : 0));
  801. }
  802. if (qi->tqi_readyTime && (qi->tqi_type != ATH9K_TX_QUEUE_CAB)) {
  803. REG_WRITE(ah, AR_QRDYTIMECFG(q),
  804. SM(qi->tqi_readyTime, AR_Q_RDYTIMECFG_DURATION) |
  805. AR_Q_RDYTIMECFG_EN);
  806. }
  807. REG_WRITE(ah, AR_DCHNTIME(q),
  808. SM(qi->tqi_burstTime, AR_D_CHNTIME_DUR) |
  809. (qi->tqi_burstTime ? AR_D_CHNTIME_EN : 0));
  810. if (qi->tqi_burstTime
  811. && (qi->tqi_qflags & TXQ_FLAG_RDYTIME_EXP_POLICY_ENABLE)) {
  812. REG_WRITE(ah, AR_QMISC(q),
  813. REG_READ(ah, AR_QMISC(q)) |
  814. AR_Q_MISC_RDYTIME_EXP_POLICY);
  815. }
  816. if (qi->tqi_qflags & TXQ_FLAG_BACKOFF_DISABLE) {
  817. REG_WRITE(ah, AR_DMISC(q),
  818. REG_READ(ah, AR_DMISC(q)) |
  819. AR_D_MISC_POST_FR_BKOFF_DIS);
  820. }
  821. if (qi->tqi_qflags & TXQ_FLAG_FRAG_BURST_BACKOFF_ENABLE) {
  822. REG_WRITE(ah, AR_DMISC(q),
  823. REG_READ(ah, AR_DMISC(q)) |
  824. AR_D_MISC_FRAG_BKOFF_EN);
  825. }
  826. switch (qi->tqi_type) {
  827. case ATH9K_TX_QUEUE_BEACON:
  828. REG_WRITE(ah, AR_QMISC(q), REG_READ(ah, AR_QMISC(q))
  829. | AR_Q_MISC_FSP_DBA_GATED
  830. | AR_Q_MISC_BEACON_USE
  831. | AR_Q_MISC_CBR_INCR_DIS1);
  832. REG_WRITE(ah, AR_DMISC(q), REG_READ(ah, AR_DMISC(q))
  833. | (AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL <<
  834. AR_D_MISC_ARB_LOCKOUT_CNTRL_S)
  835. | AR_D_MISC_BEACON_USE
  836. | AR_D_MISC_POST_FR_BKOFF_DIS);
  837. break;
  838. case ATH9K_TX_QUEUE_CAB:
  839. REG_WRITE(ah, AR_QMISC(q), REG_READ(ah, AR_QMISC(q))
  840. | AR_Q_MISC_FSP_DBA_GATED
  841. | AR_Q_MISC_CBR_INCR_DIS1
  842. | AR_Q_MISC_CBR_INCR_DIS0);
  843. value = (qi->tqi_readyTime -
  844. (ah->config.sw_beacon_response_time -
  845. ah->config.dma_beacon_response_time) -
  846. ah->config.additional_swba_backoff) * 1024;
  847. REG_WRITE(ah, AR_QRDYTIMECFG(q),
  848. value | AR_Q_RDYTIMECFG_EN);
  849. REG_WRITE(ah, AR_DMISC(q), REG_READ(ah, AR_DMISC(q))
  850. | (AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL <<
  851. AR_D_MISC_ARB_LOCKOUT_CNTRL_S));
  852. break;
  853. case ATH9K_TX_QUEUE_PSPOLL:
  854. REG_WRITE(ah, AR_QMISC(q),
  855. REG_READ(ah, AR_QMISC(q)) | AR_Q_MISC_CBR_INCR_DIS1);
  856. break;
  857. case ATH9K_TX_QUEUE_UAPSD:
  858. REG_WRITE(ah, AR_DMISC(q), REG_READ(ah, AR_DMISC(q)) |
  859. AR_D_MISC_POST_FR_BKOFF_DIS);
  860. break;
  861. default:
  862. break;
  863. }
  864. if (qi->tqi_intFlags & ATH9K_TXQ_USE_LOCKOUT_BKOFF_DIS) {
  865. REG_WRITE(ah, AR_DMISC(q),
  866. REG_READ(ah, AR_DMISC(q)) |
  867. SM(AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL,
  868. AR_D_MISC_ARB_LOCKOUT_CNTRL) |
  869. AR_D_MISC_POST_FR_BKOFF_DIS);
  870. }
  871. if (qi->tqi_qflags & TXQ_FLAG_TXOKINT_ENABLE)
  872. ah->txok_interrupt_mask |= 1 << q;
  873. else
  874. ah->txok_interrupt_mask &= ~(1 << q);
  875. if (qi->tqi_qflags & TXQ_FLAG_TXERRINT_ENABLE)
  876. ah->txerr_interrupt_mask |= 1 << q;
  877. else
  878. ah->txerr_interrupt_mask &= ~(1 << q);
  879. if (qi->tqi_qflags & TXQ_FLAG_TXDESCINT_ENABLE)
  880. ah->txdesc_interrupt_mask |= 1 << q;
  881. else
  882. ah->txdesc_interrupt_mask &= ~(1 << q);
  883. if (qi->tqi_qflags & TXQ_FLAG_TXEOLINT_ENABLE)
  884. ah->txeol_interrupt_mask |= 1 << q;
  885. else
  886. ah->txeol_interrupt_mask &= ~(1 << q);
  887. if (qi->tqi_qflags & TXQ_FLAG_TXURNINT_ENABLE)
  888. ah->txurn_interrupt_mask |= 1 << q;
  889. else
  890. ah->txurn_interrupt_mask &= ~(1 << q);
  891. ath9k_hw_set_txq_interrupts(ah, qi);
  892. return true;
  893. }
  894. EXPORT_SYMBOL(ath9k_hw_resettxqueue);
  895. int ath9k_hw_rxprocdesc(struct ath_hw *ah, struct ath_desc *ds,
  896. struct ath_rx_status *rs, u64 tsf)
  897. {
  898. struct ar5416_desc ads;
  899. struct ar5416_desc *adsp = AR5416DESC(ds);
  900. u32 phyerr;
  901. if ((adsp->ds_rxstatus8 & AR_RxDone) == 0)
  902. return -EINPROGRESS;
  903. ads.u.rx = adsp->u.rx;
  904. rs->rs_status = 0;
  905. rs->rs_flags = 0;
  906. rs->rs_datalen = ads.ds_rxstatus1 & AR_DataLen;
  907. rs->rs_tstamp = ads.AR_RcvTimestamp;
  908. if (ads.ds_rxstatus8 & AR_PostDelimCRCErr) {
  909. rs->rs_rssi = ATH9K_RSSI_BAD;
  910. rs->rs_rssi_ctl0 = ATH9K_RSSI_BAD;
  911. rs->rs_rssi_ctl1 = ATH9K_RSSI_BAD;
  912. rs->rs_rssi_ctl2 = ATH9K_RSSI_BAD;
  913. rs->rs_rssi_ext0 = ATH9K_RSSI_BAD;
  914. rs->rs_rssi_ext1 = ATH9K_RSSI_BAD;
  915. rs->rs_rssi_ext2 = ATH9K_RSSI_BAD;
  916. } else {
  917. rs->rs_rssi = MS(ads.ds_rxstatus4, AR_RxRSSICombined);
  918. rs->rs_rssi_ctl0 = MS(ads.ds_rxstatus0,
  919. AR_RxRSSIAnt00);
  920. rs->rs_rssi_ctl1 = MS(ads.ds_rxstatus0,
  921. AR_RxRSSIAnt01);
  922. rs->rs_rssi_ctl2 = MS(ads.ds_rxstatus0,
  923. AR_RxRSSIAnt02);
  924. rs->rs_rssi_ext0 = MS(ads.ds_rxstatus4,
  925. AR_RxRSSIAnt10);
  926. rs->rs_rssi_ext1 = MS(ads.ds_rxstatus4,
  927. AR_RxRSSIAnt11);
  928. rs->rs_rssi_ext2 = MS(ads.ds_rxstatus4,
  929. AR_RxRSSIAnt12);
  930. }
  931. if (ads.ds_rxstatus8 & AR_RxKeyIdxValid)
  932. rs->rs_keyix = MS(ads.ds_rxstatus8, AR_KeyIdx);
  933. else
  934. rs->rs_keyix = ATH9K_RXKEYIX_INVALID;
  935. rs->rs_rate = RXSTATUS_RATE(ah, (&ads));
  936. rs->rs_more = (ads.ds_rxstatus1 & AR_RxMore) ? 1 : 0;
  937. rs->rs_isaggr = (ads.ds_rxstatus8 & AR_RxAggr) ? 1 : 0;
  938. rs->rs_moreaggr =
  939. (ads.ds_rxstatus8 & AR_RxMoreAggr) ? 1 : 0;
  940. rs->rs_antenna = MS(ads.ds_rxstatus3, AR_RxAntenna);
  941. rs->rs_flags =
  942. (ads.ds_rxstatus3 & AR_GI) ? ATH9K_RX_GI : 0;
  943. rs->rs_flags |=
  944. (ads.ds_rxstatus3 & AR_2040) ? ATH9K_RX_2040 : 0;
  945. if (ads.ds_rxstatus8 & AR_PreDelimCRCErr)
  946. rs->rs_flags |= ATH9K_RX_DELIM_CRC_PRE;
  947. if (ads.ds_rxstatus8 & AR_PostDelimCRCErr)
  948. rs->rs_flags |= ATH9K_RX_DELIM_CRC_POST;
  949. if (ads.ds_rxstatus8 & AR_DecryptBusyErr)
  950. rs->rs_flags |= ATH9K_RX_DECRYPT_BUSY;
  951. if ((ads.ds_rxstatus8 & AR_RxFrameOK) == 0) {
  952. if (ads.ds_rxstatus8 & AR_CRCErr)
  953. rs->rs_status |= ATH9K_RXERR_CRC;
  954. else if (ads.ds_rxstatus8 & AR_PHYErr) {
  955. rs->rs_status |= ATH9K_RXERR_PHY;
  956. phyerr = MS(ads.ds_rxstatus8, AR_PHYErrCode);
  957. rs->rs_phyerr = phyerr;
  958. } else if (ads.ds_rxstatus8 & AR_DecryptCRCErr)
  959. rs->rs_status |= ATH9K_RXERR_DECRYPT;
  960. else if (ads.ds_rxstatus8 & AR_MichaelErr)
  961. rs->rs_status |= ATH9K_RXERR_MIC;
  962. }
  963. return 0;
  964. }
  965. EXPORT_SYMBOL(ath9k_hw_rxprocdesc);
  966. void ath9k_hw_setuprxdesc(struct ath_hw *ah, struct ath_desc *ds,
  967. u32 size, u32 flags)
  968. {
  969. struct ar5416_desc *ads = AR5416DESC(ds);
  970. struct ath9k_hw_capabilities *pCap = &ah->caps;
  971. ads->ds_ctl1 = size & AR_BufLen;
  972. if (flags & ATH9K_RXDESC_INTREQ)
  973. ads->ds_ctl1 |= AR_RxIntrReq;
  974. ads->ds_rxstatus8 &= ~AR_RxDone;
  975. if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP))
  976. memset(&(ads->u), 0, sizeof(ads->u));
  977. }
  978. EXPORT_SYMBOL(ath9k_hw_setuprxdesc);
  979. /*
  980. * This can stop or re-enables RX.
  981. *
  982. * If bool is set this will kill any frame which is currently being
  983. * transferred between the MAC and baseband and also prevent any new
  984. * frames from getting started.
  985. */
  986. bool ath9k_hw_setrxabort(struct ath_hw *ah, bool set)
  987. {
  988. u32 reg;
  989. if (set) {
  990. REG_SET_BIT(ah, AR_DIAG_SW,
  991. (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT));
  992. if (!ath9k_hw_wait(ah, AR_OBS_BUS_1, AR_OBS_BUS_1_RX_STATE,
  993. 0, AH_WAIT_TIMEOUT)) {
  994. REG_CLR_BIT(ah, AR_DIAG_SW,
  995. (AR_DIAG_RX_DIS |
  996. AR_DIAG_RX_ABORT));
  997. reg = REG_READ(ah, AR_OBS_BUS_1);
  998. ath_print(ath9k_hw_common(ah), ATH_DBG_FATAL,
  999. "RX failed to go idle in 10 ms RXSM=0x%x\n",
  1000. reg);
  1001. return false;
  1002. }
  1003. } else {
  1004. REG_CLR_BIT(ah, AR_DIAG_SW,
  1005. (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT));
  1006. }
  1007. return true;
  1008. }
  1009. EXPORT_SYMBOL(ath9k_hw_setrxabort);
  1010. void ath9k_hw_putrxbuf(struct ath_hw *ah, u32 rxdp)
  1011. {
  1012. REG_WRITE(ah, AR_RXDP, rxdp);
  1013. }
  1014. EXPORT_SYMBOL(ath9k_hw_putrxbuf);
  1015. void ath9k_hw_startpcureceive(struct ath_hw *ah)
  1016. {
  1017. ath9k_enable_mib_counters(ah);
  1018. ath9k_ani_reset(ah);
  1019. REG_CLR_BIT(ah, AR_DIAG_SW, (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT));
  1020. }
  1021. EXPORT_SYMBOL(ath9k_hw_startpcureceive);
  1022. void ath9k_hw_stoppcurecv(struct ath_hw *ah)
  1023. {
  1024. REG_SET_BIT(ah, AR_DIAG_SW, AR_DIAG_RX_DIS);
  1025. ath9k_hw_disable_mib_counters(ah);
  1026. }
  1027. EXPORT_SYMBOL(ath9k_hw_stoppcurecv);
  1028. bool ath9k_hw_stopdmarecv(struct ath_hw *ah)
  1029. {
  1030. #define AH_RX_STOP_DMA_TIMEOUT 10000 /* usec */
  1031. #define AH_RX_TIME_QUANTUM 100 /* usec */
  1032. struct ath_common *common = ath9k_hw_common(ah);
  1033. int i;
  1034. REG_WRITE(ah, AR_CR, AR_CR_RXD);
  1035. /* Wait for rx enable bit to go low */
  1036. for (i = AH_RX_STOP_DMA_TIMEOUT / AH_TIME_QUANTUM; i != 0; i--) {
  1037. if ((REG_READ(ah, AR_CR) & AR_CR_RXE) == 0)
  1038. break;
  1039. udelay(AH_TIME_QUANTUM);
  1040. }
  1041. if (i == 0) {
  1042. ath_print(common, ATH_DBG_FATAL,
  1043. "DMA failed to stop in %d ms "
  1044. "AR_CR=0x%08x AR_DIAG_SW=0x%08x\n",
  1045. AH_RX_STOP_DMA_TIMEOUT / 1000,
  1046. REG_READ(ah, AR_CR),
  1047. REG_READ(ah, AR_DIAG_SW));
  1048. return false;
  1049. } else {
  1050. return true;
  1051. }
  1052. #undef AH_RX_TIME_QUANTUM
  1053. #undef AH_RX_STOP_DMA_TIMEOUT
  1054. }
  1055. EXPORT_SYMBOL(ath9k_hw_stopdmarecv);
  1056. int ath9k_hw_beaconq_setup(struct ath_hw *ah)
  1057. {
  1058. struct ath9k_tx_queue_info qi;
  1059. memset(&qi, 0, sizeof(qi));
  1060. qi.tqi_aifs = 1;
  1061. qi.tqi_cwmin = 0;
  1062. qi.tqi_cwmax = 0;
  1063. /* NB: don't enable any interrupts */
  1064. return ath9k_hw_setuptxqueue(ah, ATH9K_TX_QUEUE_BEACON, &qi);
  1065. }
  1066. EXPORT_SYMBOL(ath9k_hw_beaconq_setup);
  1067. bool ath9k_hw_intrpend(struct ath_hw *ah)
  1068. {
  1069. u32 host_isr;
  1070. if (AR_SREV_9100(ah))
  1071. return true;
  1072. host_isr = REG_READ(ah, AR_INTR_ASYNC_CAUSE);
  1073. if ((host_isr & AR_INTR_MAC_IRQ) && (host_isr != AR_INTR_SPURIOUS))
  1074. return true;
  1075. host_isr = REG_READ(ah, AR_INTR_SYNC_CAUSE);
  1076. if ((host_isr & AR_INTR_SYNC_DEFAULT)
  1077. && (host_isr != AR_INTR_SPURIOUS))
  1078. return true;
  1079. return false;
  1080. }
  1081. EXPORT_SYMBOL(ath9k_hw_intrpend);
  1082. enum ath9k_int ath9k_hw_set_interrupts(struct ath_hw *ah,
  1083. enum ath9k_int ints)
  1084. {
  1085. enum ath9k_int omask = ah->imask;
  1086. u32 mask, mask2;
  1087. struct ath9k_hw_capabilities *pCap = &ah->caps;
  1088. struct ath_common *common = ath9k_hw_common(ah);
  1089. ath_print(common, ATH_DBG_INTERRUPT, "0x%x => 0x%x\n", omask, ints);
  1090. if (omask & ATH9K_INT_GLOBAL) {
  1091. ath_print(common, ATH_DBG_INTERRUPT, "disable IER\n");
  1092. REG_WRITE(ah, AR_IER, AR_IER_DISABLE);
  1093. (void) REG_READ(ah, AR_IER);
  1094. if (!AR_SREV_9100(ah)) {
  1095. REG_WRITE(ah, AR_INTR_ASYNC_ENABLE, 0);
  1096. (void) REG_READ(ah, AR_INTR_ASYNC_ENABLE);
  1097. REG_WRITE(ah, AR_INTR_SYNC_ENABLE, 0);
  1098. (void) REG_READ(ah, AR_INTR_SYNC_ENABLE);
  1099. }
  1100. }
  1101. /* TODO: global int Ref count */
  1102. mask = ints & ATH9K_INT_COMMON;
  1103. mask2 = 0;
  1104. if (ints & ATH9K_INT_TX) {
  1105. if (ah->config.tx_intr_mitigation)
  1106. mask |= AR_IMR_TXMINTR | AR_IMR_TXINTM;
  1107. if (ah->txok_interrupt_mask)
  1108. mask |= AR_IMR_TXOK;
  1109. if (ah->txdesc_interrupt_mask)
  1110. mask |= AR_IMR_TXDESC;
  1111. if (ah->txerr_interrupt_mask)
  1112. mask |= AR_IMR_TXERR;
  1113. if (ah->txeol_interrupt_mask)
  1114. mask |= AR_IMR_TXEOL;
  1115. }
  1116. if (ints & ATH9K_INT_RX) {
  1117. if (AR_SREV_9300_20_OR_LATER(ah)) {
  1118. mask |= AR_IMR_RXERR | AR_IMR_RXOK_HP;
  1119. if (ah->config.rx_intr_mitigation) {
  1120. mask &= ~AR_IMR_RXOK_LP;
  1121. mask |= AR_IMR_RXMINTR | AR_IMR_RXINTM;
  1122. } else {
  1123. mask |= AR_IMR_RXOK_LP;
  1124. }
  1125. } else {
  1126. if (ah->config.rx_intr_mitigation)
  1127. mask |= AR_IMR_RXMINTR | AR_IMR_RXINTM;
  1128. else
  1129. mask |= AR_IMR_RXOK | AR_IMR_RXDESC;
  1130. }
  1131. if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP))
  1132. mask |= AR_IMR_GENTMR;
  1133. }
  1134. if (ints & (ATH9K_INT_BMISC)) {
  1135. mask |= AR_IMR_BCNMISC;
  1136. if (ints & ATH9K_INT_TIM)
  1137. mask2 |= AR_IMR_S2_TIM;
  1138. if (ints & ATH9K_INT_DTIM)
  1139. mask2 |= AR_IMR_S2_DTIM;
  1140. if (ints & ATH9K_INT_DTIMSYNC)
  1141. mask2 |= AR_IMR_S2_DTIMSYNC;
  1142. if (ints & ATH9K_INT_CABEND)
  1143. mask2 |= AR_IMR_S2_CABEND;
  1144. if (ints & ATH9K_INT_TSFOOR)
  1145. mask2 |= AR_IMR_S2_TSFOOR;
  1146. }
  1147. if (ints & (ATH9K_INT_GTT | ATH9K_INT_CST)) {
  1148. mask |= AR_IMR_BCNMISC;
  1149. if (ints & ATH9K_INT_GTT)
  1150. mask2 |= AR_IMR_S2_GTT;
  1151. if (ints & ATH9K_INT_CST)
  1152. mask2 |= AR_IMR_S2_CST;
  1153. }
  1154. ath_print(common, ATH_DBG_INTERRUPT, "new IMR 0x%x\n", mask);
  1155. REG_WRITE(ah, AR_IMR, mask);
  1156. ah->imrs2_reg &= ~(AR_IMR_S2_TIM | AR_IMR_S2_DTIM | AR_IMR_S2_DTIMSYNC |
  1157. AR_IMR_S2_CABEND | AR_IMR_S2_CABTO |
  1158. AR_IMR_S2_TSFOOR | AR_IMR_S2_GTT | AR_IMR_S2_CST);
  1159. ah->imrs2_reg |= mask2;
  1160. REG_WRITE(ah, AR_IMR_S2, ah->imrs2_reg);
  1161. if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) {
  1162. if (ints & ATH9K_INT_TIM_TIMER)
  1163. REG_SET_BIT(ah, AR_IMR_S5, AR_IMR_S5_TIM_TIMER);
  1164. else
  1165. REG_CLR_BIT(ah, AR_IMR_S5, AR_IMR_S5_TIM_TIMER);
  1166. }
  1167. if (ints & ATH9K_INT_GLOBAL) {
  1168. ath_print(common, ATH_DBG_INTERRUPT, "enable IER\n");
  1169. REG_WRITE(ah, AR_IER, AR_IER_ENABLE);
  1170. if (!AR_SREV_9100(ah)) {
  1171. REG_WRITE(ah, AR_INTR_ASYNC_ENABLE,
  1172. AR_INTR_MAC_IRQ);
  1173. REG_WRITE(ah, AR_INTR_ASYNC_MASK, AR_INTR_MAC_IRQ);
  1174. REG_WRITE(ah, AR_INTR_SYNC_ENABLE,
  1175. AR_INTR_SYNC_DEFAULT);
  1176. REG_WRITE(ah, AR_INTR_SYNC_MASK,
  1177. AR_INTR_SYNC_DEFAULT);
  1178. }
  1179. ath_print(common, ATH_DBG_INTERRUPT, "AR_IMR 0x%x IER 0x%x\n",
  1180. REG_READ(ah, AR_IMR), REG_READ(ah, AR_IER));
  1181. }
  1182. return omask;
  1183. }
  1184. EXPORT_SYMBOL(ath9k_hw_set_interrupts);