mac.c 29 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033
  1. /*
  2. * Copyright (c) 2008 Atheros Communications Inc.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for any
  5. * purpose with or without fee is hereby granted, provided that the above
  6. * copyright notice and this permission notice appear in all copies.
  7. *
  8. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  9. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  10. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  11. * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  12. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  13. * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  14. * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  15. */
  16. #include "core.h"
  17. #include "hw.h"
  18. #include "reg.h"
  19. #include "phy.h"
  20. static void ath9k_hw_set_txq_interrupts(struct ath_hal *ah,
  21. struct ath9k_tx_queue_info *qi)
  22. {
  23. struct ath_hal_5416 *ahp = AH5416(ah);
  24. DPRINTF(ah->ah_sc, ATH_DBG_INTERRUPT,
  25. "%s: tx ok 0x%x err 0x%x desc 0x%x eol 0x%x urn 0x%x\n",
  26. __func__, ahp->ah_txOkInterruptMask,
  27. ahp->ah_txErrInterruptMask, ahp->ah_txDescInterruptMask,
  28. ahp->ah_txEolInterruptMask, ahp->ah_txUrnInterruptMask);
  29. REG_WRITE(ah, AR_IMR_S0,
  30. SM(ahp->ah_txOkInterruptMask, AR_IMR_S0_QCU_TXOK)
  31. | SM(ahp->ah_txDescInterruptMask, AR_IMR_S0_QCU_TXDESC));
  32. REG_WRITE(ah, AR_IMR_S1,
  33. SM(ahp->ah_txErrInterruptMask, AR_IMR_S1_QCU_TXERR)
  34. | SM(ahp->ah_txEolInterruptMask, AR_IMR_S1_QCU_TXEOL));
  35. REG_RMW_FIELD(ah, AR_IMR_S2,
  36. AR_IMR_S2_QCU_TXURN, ahp->ah_txUrnInterruptMask);
  37. }
  38. void ath9k_hw_dmaRegDump(struct ath_hal *ah)
  39. {
  40. u32 val[ATH9K_NUM_DMA_DEBUG_REGS];
  41. int qcuOffset = 0, dcuOffset = 0;
  42. u32 *qcuBase = &val[0], *dcuBase = &val[4];
  43. int i;
  44. REG_WRITE(ah, AR_MACMISC,
  45. ((AR_MACMISC_DMA_OBS_LINE_8 << AR_MACMISC_DMA_OBS_S) |
  46. (AR_MACMISC_MISC_OBS_BUS_1 <<
  47. AR_MACMISC_MISC_OBS_BUS_MSB_S)));
  48. DPRINTF(ah->ah_sc, ATH_DBG_REG_IO, "Raw DMA Debug values:\n");
  49. for (i = 0; i < ATH9K_NUM_DMA_DEBUG_REGS; i++) {
  50. if (i % 4 == 0)
  51. DPRINTF(ah->ah_sc, ATH_DBG_REG_IO, "\n");
  52. val[i] = REG_READ(ah, AR_DMADBG_0 + (i * sizeof(u32)));
  53. DPRINTF(ah->ah_sc, ATH_DBG_REG_IO, "%d: %08x ", i, val[i]);
  54. }
  55. DPRINTF(ah->ah_sc, ATH_DBG_REG_IO, "\n\n");
  56. DPRINTF(ah->ah_sc, ATH_DBG_REG_IO,
  57. "Num QCU: chain_st fsp_ok fsp_st DCU: chain_st\n");
  58. for (i = 0; i < ATH9K_NUM_QUEUES;
  59. i++, qcuOffset += 4, dcuOffset += 5) {
  60. if (i == 8) {
  61. qcuOffset = 0;
  62. qcuBase++;
  63. }
  64. if (i == 6) {
  65. dcuOffset = 0;
  66. dcuBase++;
  67. }
  68. DPRINTF(ah->ah_sc, ATH_DBG_REG_IO,
  69. "%2d %2x %1x %2x %2x\n",
  70. i, (*qcuBase & (0x7 << qcuOffset)) >> qcuOffset,
  71. (*qcuBase & (0x8 << qcuOffset)) >> (qcuOffset + 3),
  72. val[2] & (0x7 << (i * 3)) >> (i * 3),
  73. (*dcuBase & (0x1f << dcuOffset)) >> dcuOffset);
  74. }
  75. DPRINTF(ah->ah_sc, ATH_DBG_REG_IO, "\n");
  76. DPRINTF(ah->ah_sc, ATH_DBG_REG_IO,
  77. "qcu_stitch state: %2x qcu_fetch state: %2x\n",
  78. (val[3] & 0x003c0000) >> 18, (val[3] & 0x03c00000) >> 22);
  79. DPRINTF(ah->ah_sc, ATH_DBG_REG_IO,
  80. "qcu_complete state: %2x dcu_complete state: %2x\n",
  81. (val[3] & 0x1c000000) >> 26, (val[6] & 0x3));
  82. DPRINTF(ah->ah_sc, ATH_DBG_REG_IO,
  83. "dcu_arb state: %2x dcu_fp state: %2x\n",
  84. (val[5] & 0x06000000) >> 25, (val[5] & 0x38000000) >> 27);
  85. DPRINTF(ah->ah_sc, ATH_DBG_REG_IO,
  86. "chan_idle_dur: %3d chan_idle_dur_valid: %1d\n",
  87. (val[6] & 0x000003fc) >> 2, (val[6] & 0x00000400) >> 10);
  88. DPRINTF(ah->ah_sc, ATH_DBG_REG_IO,
  89. "txfifo_valid_0: %1d txfifo_valid_1: %1d\n",
  90. (val[6] & 0x00000800) >> 11, (val[6] & 0x00001000) >> 12);
  91. DPRINTF(ah->ah_sc, ATH_DBG_REG_IO,
  92. "txfifo_dcu_num_0: %2d txfifo_dcu_num_1: %2d\n",
  93. (val[6] & 0x0001e000) >> 13, (val[6] & 0x001e0000) >> 17);
  94. DPRINTF(ah->ah_sc, ATH_DBG_REG_IO, "pcu observe 0x%x \n",
  95. REG_READ(ah, AR_OBS_BUS_1));
  96. DPRINTF(ah->ah_sc, ATH_DBG_REG_IO,
  97. "AR_CR 0x%x \n", REG_READ(ah, AR_CR));
  98. }
  99. u32 ath9k_hw_gettxbuf(struct ath_hal *ah, u32 q)
  100. {
  101. return REG_READ(ah, AR_QTXDP(q));
  102. }
  103. bool ath9k_hw_puttxbuf(struct ath_hal *ah, u32 q, u32 txdp)
  104. {
  105. REG_WRITE(ah, AR_QTXDP(q), txdp);
  106. return true;
  107. }
  108. bool ath9k_hw_txstart(struct ath_hal *ah, u32 q)
  109. {
  110. DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "%s: queue %u\n", __func__, q);
  111. REG_WRITE(ah, AR_Q_TXE, 1 << q);
  112. return true;
  113. }
  114. u32 ath9k_hw_numtxpending(struct ath_hal *ah, u32 q)
  115. {
  116. u32 npend;
  117. npend = REG_READ(ah, AR_QSTS(q)) & AR_Q_STS_PEND_FR_CNT;
  118. if (npend == 0) {
  119. if (REG_READ(ah, AR_Q_TXE) & (1 << q))
  120. npend = 1;
  121. }
  122. return npend;
  123. }
  124. bool ath9k_hw_updatetxtriglevel(struct ath_hal *ah, bool bIncTrigLevel)
  125. {
  126. struct ath_hal_5416 *ahp = AH5416(ah);
  127. u32 txcfg, curLevel, newLevel;
  128. enum ath9k_int omask;
  129. if (ah->ah_txTrigLevel >= MAX_TX_FIFO_THRESHOLD)
  130. return false;
  131. omask = ath9k_hw_set_interrupts(ah, ahp->ah_maskReg & ~ATH9K_INT_GLOBAL);
  132. txcfg = REG_READ(ah, AR_TXCFG);
  133. curLevel = MS(txcfg, AR_FTRIG);
  134. newLevel = curLevel;
  135. if (bIncTrigLevel) {
  136. if (curLevel < MAX_TX_FIFO_THRESHOLD)
  137. newLevel++;
  138. } else if (curLevel > MIN_TX_FIFO_THRESHOLD)
  139. newLevel--;
  140. if (newLevel != curLevel)
  141. REG_WRITE(ah, AR_TXCFG,
  142. (txcfg & ~AR_FTRIG) | SM(newLevel, AR_FTRIG));
  143. ath9k_hw_set_interrupts(ah, omask);
  144. ah->ah_txTrigLevel = newLevel;
  145. return newLevel != curLevel;
  146. }
  147. bool ath9k_hw_stoptxdma(struct ath_hal *ah, u32 q)
  148. {
  149. u32 tsfLow, j, wait;
  150. REG_WRITE(ah, AR_Q_TXD, 1 << q);
  151. for (wait = 1000; wait != 0; wait--) {
  152. if (ath9k_hw_numtxpending(ah, q) == 0)
  153. break;
  154. udelay(100);
  155. }
  156. if (ath9k_hw_numtxpending(ah, q)) {
  157. DPRINTF(ah->ah_sc, ATH_DBG_QUEUE,
  158. "%s: Num of pending TX Frames %d on Q %d\n",
  159. __func__, ath9k_hw_numtxpending(ah, q), q);
  160. for (j = 0; j < 2; j++) {
  161. tsfLow = REG_READ(ah, AR_TSF_L32);
  162. REG_WRITE(ah, AR_QUIET2,
  163. SM(10, AR_QUIET2_QUIET_DUR));
  164. REG_WRITE(ah, AR_QUIET_PERIOD, 100);
  165. REG_WRITE(ah, AR_NEXT_QUIET_TIMER, tsfLow >> 10);
  166. REG_SET_BIT(ah, AR_TIMER_MODE,
  167. AR_QUIET_TIMER_EN);
  168. if ((REG_READ(ah, AR_TSF_L32) >> 10) == (tsfLow >> 10))
  169. break;
  170. DPRINTF(ah->ah_sc, ATH_DBG_QUEUE,
  171. "%s: TSF have moved while trying to set "
  172. "quiet time TSF: 0x%08x\n",
  173. __func__, tsfLow);
  174. }
  175. REG_SET_BIT(ah, AR_DIAG_SW, AR_DIAG_FORCE_CH_IDLE_HIGH);
  176. udelay(200);
  177. REG_CLR_BIT(ah, AR_TIMER_MODE, AR_QUIET_TIMER_EN);
  178. wait = 1000;
  179. while (ath9k_hw_numtxpending(ah, q)) {
  180. if ((--wait) == 0) {
  181. DPRINTF(ah->ah_sc, ATH_DBG_XMIT,
  182. "%s: Failed to stop Tx DMA in 100 "
  183. "msec after killing last frame\n",
  184. __func__);
  185. break;
  186. }
  187. udelay(100);
  188. }
  189. REG_CLR_BIT(ah, AR_DIAG_SW, AR_DIAG_FORCE_CH_IDLE_HIGH);
  190. }
  191. REG_WRITE(ah, AR_Q_TXD, 0);
  192. return wait != 0;
  193. }
  194. bool ath9k_hw_filltxdesc(struct ath_hal *ah, struct ath_desc *ds,
  195. u32 segLen, bool firstSeg,
  196. bool lastSeg, const struct ath_desc *ds0)
  197. {
  198. struct ar5416_desc *ads = AR5416DESC(ds);
  199. if (firstSeg) {
  200. ads->ds_ctl1 |= segLen | (lastSeg ? 0 : AR_TxMore);
  201. } else if (lastSeg) {
  202. ads->ds_ctl0 = 0;
  203. ads->ds_ctl1 = segLen;
  204. ads->ds_ctl2 = AR5416DESC_CONST(ds0)->ds_ctl2;
  205. ads->ds_ctl3 = AR5416DESC_CONST(ds0)->ds_ctl3;
  206. } else {
  207. ads->ds_ctl0 = 0;
  208. ads->ds_ctl1 = segLen | AR_TxMore;
  209. ads->ds_ctl2 = 0;
  210. ads->ds_ctl3 = 0;
  211. }
  212. ads->ds_txstatus0 = ads->ds_txstatus1 = 0;
  213. ads->ds_txstatus2 = ads->ds_txstatus3 = 0;
  214. ads->ds_txstatus4 = ads->ds_txstatus5 = 0;
  215. ads->ds_txstatus6 = ads->ds_txstatus7 = 0;
  216. ads->ds_txstatus8 = ads->ds_txstatus9 = 0;
  217. return true;
  218. }
  219. void ath9k_hw_cleartxdesc(struct ath_hal *ah, struct ath_desc *ds)
  220. {
  221. struct ar5416_desc *ads = AR5416DESC(ds);
  222. ads->ds_txstatus0 = ads->ds_txstatus1 = 0;
  223. ads->ds_txstatus2 = ads->ds_txstatus3 = 0;
  224. ads->ds_txstatus4 = ads->ds_txstatus5 = 0;
  225. ads->ds_txstatus6 = ads->ds_txstatus7 = 0;
  226. ads->ds_txstatus8 = ads->ds_txstatus9 = 0;
  227. }
  228. int ath9k_hw_txprocdesc(struct ath_hal *ah, struct ath_desc *ds)
  229. {
  230. struct ar5416_desc *ads = AR5416DESC(ds);
  231. if ((ads->ds_txstatus9 & AR_TxDone) == 0)
  232. return -EINPROGRESS;
  233. ds->ds_txstat.ts_seqnum = MS(ads->ds_txstatus9, AR_SeqNum);
  234. ds->ds_txstat.ts_tstamp = ads->AR_SendTimestamp;
  235. ds->ds_txstat.ts_status = 0;
  236. ds->ds_txstat.ts_flags = 0;
  237. if (ads->ds_txstatus1 & AR_ExcessiveRetries)
  238. ds->ds_txstat.ts_status |= ATH9K_TXERR_XRETRY;
  239. if (ads->ds_txstatus1 & AR_Filtered)
  240. ds->ds_txstat.ts_status |= ATH9K_TXERR_FILT;
  241. if (ads->ds_txstatus1 & AR_FIFOUnderrun) {
  242. ds->ds_txstat.ts_status |= ATH9K_TXERR_FIFO;
  243. ath9k_hw_updatetxtriglevel(ah, true);
  244. }
  245. if (ads->ds_txstatus9 & AR_TxOpExceeded)
  246. ds->ds_txstat.ts_status |= ATH9K_TXERR_XTXOP;
  247. if (ads->ds_txstatus1 & AR_TxTimerExpired)
  248. ds->ds_txstat.ts_status |= ATH9K_TXERR_TIMER_EXPIRED;
  249. if (ads->ds_txstatus1 & AR_DescCfgErr)
  250. ds->ds_txstat.ts_flags |= ATH9K_TX_DESC_CFG_ERR;
  251. if (ads->ds_txstatus1 & AR_TxDataUnderrun) {
  252. ds->ds_txstat.ts_flags |= ATH9K_TX_DATA_UNDERRUN;
  253. ath9k_hw_updatetxtriglevel(ah, true);
  254. }
  255. if (ads->ds_txstatus1 & AR_TxDelimUnderrun) {
  256. ds->ds_txstat.ts_flags |= ATH9K_TX_DELIM_UNDERRUN;
  257. ath9k_hw_updatetxtriglevel(ah, true);
  258. }
  259. if (ads->ds_txstatus0 & AR_TxBaStatus) {
  260. ds->ds_txstat.ts_flags |= ATH9K_TX_BA;
  261. ds->ds_txstat.ba_low = ads->AR_BaBitmapLow;
  262. ds->ds_txstat.ba_high = ads->AR_BaBitmapHigh;
  263. }
  264. ds->ds_txstat.ts_rateindex = MS(ads->ds_txstatus9, AR_FinalTxIdx);
  265. switch (ds->ds_txstat.ts_rateindex) {
  266. case 0:
  267. ds->ds_txstat.ts_ratecode = MS(ads->ds_ctl3, AR_XmitRate0);
  268. break;
  269. case 1:
  270. ds->ds_txstat.ts_ratecode = MS(ads->ds_ctl3, AR_XmitRate1);
  271. break;
  272. case 2:
  273. ds->ds_txstat.ts_ratecode = MS(ads->ds_ctl3, AR_XmitRate2);
  274. break;
  275. case 3:
  276. ds->ds_txstat.ts_ratecode = MS(ads->ds_ctl3, AR_XmitRate3);
  277. break;
  278. }
  279. ds->ds_txstat.ts_rssi = MS(ads->ds_txstatus5, AR_TxRSSICombined);
  280. ds->ds_txstat.ts_rssi_ctl0 = MS(ads->ds_txstatus0, AR_TxRSSIAnt00);
  281. ds->ds_txstat.ts_rssi_ctl1 = MS(ads->ds_txstatus0, AR_TxRSSIAnt01);
  282. ds->ds_txstat.ts_rssi_ctl2 = MS(ads->ds_txstatus0, AR_TxRSSIAnt02);
  283. ds->ds_txstat.ts_rssi_ext0 = MS(ads->ds_txstatus5, AR_TxRSSIAnt10);
  284. ds->ds_txstat.ts_rssi_ext1 = MS(ads->ds_txstatus5, AR_TxRSSIAnt11);
  285. ds->ds_txstat.ts_rssi_ext2 = MS(ads->ds_txstatus5, AR_TxRSSIAnt12);
  286. ds->ds_txstat.evm0 = ads->AR_TxEVM0;
  287. ds->ds_txstat.evm1 = ads->AR_TxEVM1;
  288. ds->ds_txstat.evm2 = ads->AR_TxEVM2;
  289. ds->ds_txstat.ts_shortretry = MS(ads->ds_txstatus1, AR_RTSFailCnt);
  290. ds->ds_txstat.ts_longretry = MS(ads->ds_txstatus1, AR_DataFailCnt);
  291. ds->ds_txstat.ts_virtcol = MS(ads->ds_txstatus1, AR_VirtRetryCnt);
  292. ds->ds_txstat.ts_antenna = 1;
  293. return 0;
  294. }
  295. void ath9k_hw_set11n_txdesc(struct ath_hal *ah, struct ath_desc *ds,
  296. u32 pktLen, enum ath9k_pkt_type type, u32 txPower,
  297. u32 keyIx, enum ath9k_key_type keyType, u32 flags)
  298. {
  299. struct ar5416_desc *ads = AR5416DESC(ds);
  300. struct ath_hal_5416 *ahp = AH5416(ah);
  301. txPower += ahp->ah_txPowerIndexOffset;
  302. if (txPower > 63)
  303. txPower = 63;
  304. ads->ds_ctl0 = (pktLen & AR_FrameLen)
  305. | (flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
  306. | SM(txPower, AR_XmitPower)
  307. | (flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
  308. | (flags & ATH9K_TXDESC_CLRDMASK ? AR_ClrDestMask : 0)
  309. | (flags & ATH9K_TXDESC_INTREQ ? AR_TxIntrReq : 0)
  310. | (keyIx != ATH9K_TXKEYIX_INVALID ? AR_DestIdxValid : 0);
  311. ads->ds_ctl1 =
  312. (keyIx != ATH9K_TXKEYIX_INVALID ? SM(keyIx, AR_DestIdx) : 0)
  313. | SM(type, AR_FrameType)
  314. | (flags & ATH9K_TXDESC_NOACK ? AR_NoAck : 0)
  315. | (flags & ATH9K_TXDESC_EXT_ONLY ? AR_ExtOnly : 0)
  316. | (flags & ATH9K_TXDESC_EXT_AND_CTL ? AR_ExtAndCtl : 0);
  317. ads->ds_ctl6 = SM(keyType, AR_EncrType);
  318. if (AR_SREV_9285(ah)) {
  319. ads->ds_ctl8 = 0;
  320. ads->ds_ctl9 = 0;
  321. ads->ds_ctl10 = 0;
  322. ads->ds_ctl11 = 0;
  323. }
  324. }
  325. void ath9k_hw_set11n_ratescenario(struct ath_hal *ah, struct ath_desc *ds,
  326. struct ath_desc *lastds,
  327. u32 durUpdateEn, u32 rtsctsRate,
  328. u32 rtsctsDuration,
  329. struct ath9k_11n_rate_series series[],
  330. u32 nseries, u32 flags)
  331. {
  332. struct ar5416_desc *ads = AR5416DESC(ds);
  333. struct ar5416_desc *last_ads = AR5416DESC(lastds);
  334. u32 ds_ctl0;
  335. (void) nseries;
  336. (void) rtsctsDuration;
  337. if (flags & (ATH9K_TXDESC_RTSENA | ATH9K_TXDESC_CTSENA)) {
  338. ds_ctl0 = ads->ds_ctl0;
  339. if (flags & ATH9K_TXDESC_RTSENA) {
  340. ds_ctl0 &= ~AR_CTSEnable;
  341. ds_ctl0 |= AR_RTSEnable;
  342. } else {
  343. ds_ctl0 &= ~AR_RTSEnable;
  344. ds_ctl0 |= AR_CTSEnable;
  345. }
  346. ads->ds_ctl0 = ds_ctl0;
  347. } else {
  348. ads->ds_ctl0 =
  349. (ads->ds_ctl0 & ~(AR_RTSEnable | AR_CTSEnable));
  350. }
  351. ads->ds_ctl2 = set11nTries(series, 0)
  352. | set11nTries(series, 1)
  353. | set11nTries(series, 2)
  354. | set11nTries(series, 3)
  355. | (durUpdateEn ? AR_DurUpdateEna : 0)
  356. | SM(0, AR_BurstDur);
  357. ads->ds_ctl3 = set11nRate(series, 0)
  358. | set11nRate(series, 1)
  359. | set11nRate(series, 2)
  360. | set11nRate(series, 3);
  361. ads->ds_ctl4 = set11nPktDurRTSCTS(series, 0)
  362. | set11nPktDurRTSCTS(series, 1);
  363. ads->ds_ctl5 = set11nPktDurRTSCTS(series, 2)
  364. | set11nPktDurRTSCTS(series, 3);
  365. ads->ds_ctl7 = set11nRateFlags(series, 0)
  366. | set11nRateFlags(series, 1)
  367. | set11nRateFlags(series, 2)
  368. | set11nRateFlags(series, 3)
  369. | SM(rtsctsRate, AR_RTSCTSRate);
  370. last_ads->ds_ctl2 = ads->ds_ctl2;
  371. last_ads->ds_ctl3 = ads->ds_ctl3;
  372. }
  373. void ath9k_hw_set11n_aggr_first(struct ath_hal *ah, struct ath_desc *ds,
  374. u32 aggrLen)
  375. {
  376. struct ar5416_desc *ads = AR5416DESC(ds);
  377. ads->ds_ctl1 |= (AR_IsAggr | AR_MoreAggr);
  378. ads->ds_ctl6 &= ~AR_AggrLen;
  379. ads->ds_ctl6 |= SM(aggrLen, AR_AggrLen);
  380. }
  381. void ath9k_hw_set11n_aggr_middle(struct ath_hal *ah, struct ath_desc *ds,
  382. u32 numDelims)
  383. {
  384. struct ar5416_desc *ads = AR5416DESC(ds);
  385. unsigned int ctl6;
  386. ads->ds_ctl1 |= (AR_IsAggr | AR_MoreAggr);
  387. ctl6 = ads->ds_ctl6;
  388. ctl6 &= ~AR_PadDelim;
  389. ctl6 |= SM(numDelims, AR_PadDelim);
  390. ads->ds_ctl6 = ctl6;
  391. }
  392. void ath9k_hw_set11n_aggr_last(struct ath_hal *ah, struct ath_desc *ds)
  393. {
  394. struct ar5416_desc *ads = AR5416DESC(ds);
  395. ads->ds_ctl1 |= AR_IsAggr;
  396. ads->ds_ctl1 &= ~AR_MoreAggr;
  397. ads->ds_ctl6 &= ~AR_PadDelim;
  398. }
  399. void ath9k_hw_clr11n_aggr(struct ath_hal *ah, struct ath_desc *ds)
  400. {
  401. struct ar5416_desc *ads = AR5416DESC(ds);
  402. ads->ds_ctl1 &= (~AR_IsAggr & ~AR_MoreAggr);
  403. }
  404. void ath9k_hw_set11n_burstduration(struct ath_hal *ah, struct ath_desc *ds,
  405. u32 burstDuration)
  406. {
  407. struct ar5416_desc *ads = AR5416DESC(ds);
  408. ads->ds_ctl2 &= ~AR_BurstDur;
  409. ads->ds_ctl2 |= SM(burstDuration, AR_BurstDur);
  410. }
  411. void ath9k_hw_set11n_virtualmorefrag(struct ath_hal *ah, struct ath_desc *ds,
  412. u32 vmf)
  413. {
  414. struct ar5416_desc *ads = AR5416DESC(ds);
  415. if (vmf)
  416. ads->ds_ctl0 |= AR_VirtMoreFrag;
  417. else
  418. ads->ds_ctl0 &= ~AR_VirtMoreFrag;
  419. }
  420. void ath9k_hw_gettxintrtxqs(struct ath_hal *ah, u32 *txqs)
  421. {
  422. struct ath_hal_5416 *ahp = AH5416(ah);
  423. *txqs &= ahp->ah_intrTxqs;
  424. ahp->ah_intrTxqs &= ~(*txqs);
  425. }
  426. bool ath9k_hw_set_txq_props(struct ath_hal *ah, int q,
  427. const struct ath9k_tx_queue_info *qinfo)
  428. {
  429. u32 cw;
  430. struct ath_hal_5416 *ahp = AH5416(ah);
  431. struct ath9k_hw_capabilities *pCap = &ah->ah_caps;
  432. struct ath9k_tx_queue_info *qi;
  433. if (q >= pCap->total_queues) {
  434. DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "%s: invalid queue num %u\n",
  435. __func__, q);
  436. return false;
  437. }
  438. qi = &ahp->ah_txq[q];
  439. if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
  440. DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "%s: inactive queue\n",
  441. __func__);
  442. return false;
  443. }
  444. DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "%s: queue %p\n", __func__, qi);
  445. qi->tqi_ver = qinfo->tqi_ver;
  446. qi->tqi_subtype = qinfo->tqi_subtype;
  447. qi->tqi_qflags = qinfo->tqi_qflags;
  448. qi->tqi_priority = qinfo->tqi_priority;
  449. if (qinfo->tqi_aifs != ATH9K_TXQ_USEDEFAULT)
  450. qi->tqi_aifs = min(qinfo->tqi_aifs, 255U);
  451. else
  452. qi->tqi_aifs = INIT_AIFS;
  453. if (qinfo->tqi_cwmin != ATH9K_TXQ_USEDEFAULT) {
  454. cw = min(qinfo->tqi_cwmin, 1024U);
  455. qi->tqi_cwmin = 1;
  456. while (qi->tqi_cwmin < cw)
  457. qi->tqi_cwmin = (qi->tqi_cwmin << 1) | 1;
  458. } else
  459. qi->tqi_cwmin = qinfo->tqi_cwmin;
  460. if (qinfo->tqi_cwmax != ATH9K_TXQ_USEDEFAULT) {
  461. cw = min(qinfo->tqi_cwmax, 1024U);
  462. qi->tqi_cwmax = 1;
  463. while (qi->tqi_cwmax < cw)
  464. qi->tqi_cwmax = (qi->tqi_cwmax << 1) | 1;
  465. } else
  466. qi->tqi_cwmax = INIT_CWMAX;
  467. if (qinfo->tqi_shretry != 0)
  468. qi->tqi_shretry = min((u32) qinfo->tqi_shretry, 15U);
  469. else
  470. qi->tqi_shretry = INIT_SH_RETRY;
  471. if (qinfo->tqi_lgretry != 0)
  472. qi->tqi_lgretry = min((u32) qinfo->tqi_lgretry, 15U);
  473. else
  474. qi->tqi_lgretry = INIT_LG_RETRY;
  475. qi->tqi_cbrPeriod = qinfo->tqi_cbrPeriod;
  476. qi->tqi_cbrOverflowLimit = qinfo->tqi_cbrOverflowLimit;
  477. qi->tqi_burstTime = qinfo->tqi_burstTime;
  478. qi->tqi_readyTime = qinfo->tqi_readyTime;
  479. switch (qinfo->tqi_subtype) {
  480. case ATH9K_WME_UPSD:
  481. if (qi->tqi_type == ATH9K_TX_QUEUE_DATA)
  482. qi->tqi_intFlags = ATH9K_TXQ_USE_LOCKOUT_BKOFF_DIS;
  483. break;
  484. default:
  485. break;
  486. }
  487. return true;
  488. }
  489. bool ath9k_hw_get_txq_props(struct ath_hal *ah, int q,
  490. struct ath9k_tx_queue_info *qinfo)
  491. {
  492. struct ath_hal_5416 *ahp = AH5416(ah);
  493. struct ath9k_hw_capabilities *pCap = &ah->ah_caps;
  494. struct ath9k_tx_queue_info *qi;
  495. if (q >= pCap->total_queues) {
  496. DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "%s: invalid queue num %u\n",
  497. __func__, q);
  498. return false;
  499. }
  500. qi = &ahp->ah_txq[q];
  501. if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
  502. DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "%s: inactive queue\n",
  503. __func__);
  504. return false;
  505. }
  506. qinfo->tqi_qflags = qi->tqi_qflags;
  507. qinfo->tqi_ver = qi->tqi_ver;
  508. qinfo->tqi_subtype = qi->tqi_subtype;
  509. qinfo->tqi_qflags = qi->tqi_qflags;
  510. qinfo->tqi_priority = qi->tqi_priority;
  511. qinfo->tqi_aifs = qi->tqi_aifs;
  512. qinfo->tqi_cwmin = qi->tqi_cwmin;
  513. qinfo->tqi_cwmax = qi->tqi_cwmax;
  514. qinfo->tqi_shretry = qi->tqi_shretry;
  515. qinfo->tqi_lgretry = qi->tqi_lgretry;
  516. qinfo->tqi_cbrPeriod = qi->tqi_cbrPeriod;
  517. qinfo->tqi_cbrOverflowLimit = qi->tqi_cbrOverflowLimit;
  518. qinfo->tqi_burstTime = qi->tqi_burstTime;
  519. qinfo->tqi_readyTime = qi->tqi_readyTime;
  520. return true;
  521. }
  522. int ath9k_hw_setuptxqueue(struct ath_hal *ah, enum ath9k_tx_queue type,
  523. const struct ath9k_tx_queue_info *qinfo)
  524. {
  525. struct ath_hal_5416 *ahp = AH5416(ah);
  526. struct ath9k_tx_queue_info *qi;
  527. struct ath9k_hw_capabilities *pCap = &ah->ah_caps;
  528. int q;
  529. switch (type) {
  530. case ATH9K_TX_QUEUE_BEACON:
  531. q = pCap->total_queues - 1;
  532. break;
  533. case ATH9K_TX_QUEUE_CAB:
  534. q = pCap->total_queues - 2;
  535. break;
  536. case ATH9K_TX_QUEUE_PSPOLL:
  537. q = 1;
  538. break;
  539. case ATH9K_TX_QUEUE_UAPSD:
  540. q = pCap->total_queues - 3;
  541. break;
  542. case ATH9K_TX_QUEUE_DATA:
  543. for (q = 0; q < pCap->total_queues; q++)
  544. if (ahp->ah_txq[q].tqi_type ==
  545. ATH9K_TX_QUEUE_INACTIVE)
  546. break;
  547. if (q == pCap->total_queues) {
  548. DPRINTF(ah->ah_sc, ATH_DBG_QUEUE,
  549. "%s: no available tx queue\n", __func__);
  550. return -1;
  551. }
  552. break;
  553. default:
  554. DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "%s: bad tx queue type %u\n",
  555. __func__, type);
  556. return -1;
  557. }
  558. DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "%s: queue %u\n", __func__, q);
  559. qi = &ahp->ah_txq[q];
  560. if (qi->tqi_type != ATH9K_TX_QUEUE_INACTIVE) {
  561. DPRINTF(ah->ah_sc, ATH_DBG_QUEUE,
  562. "%s: tx queue %u already active\n", __func__, q);
  563. return -1;
  564. }
  565. memset(qi, 0, sizeof(struct ath9k_tx_queue_info));
  566. qi->tqi_type = type;
  567. if (qinfo == NULL) {
  568. qi->tqi_qflags =
  569. TXQ_FLAG_TXOKINT_ENABLE
  570. | TXQ_FLAG_TXERRINT_ENABLE
  571. | TXQ_FLAG_TXDESCINT_ENABLE | TXQ_FLAG_TXURNINT_ENABLE;
  572. qi->tqi_aifs = INIT_AIFS;
  573. qi->tqi_cwmin = ATH9K_TXQ_USEDEFAULT;
  574. qi->tqi_cwmax = INIT_CWMAX;
  575. qi->tqi_shretry = INIT_SH_RETRY;
  576. qi->tqi_lgretry = INIT_LG_RETRY;
  577. qi->tqi_physCompBuf = 0;
  578. } else {
  579. qi->tqi_physCompBuf = qinfo->tqi_physCompBuf;
  580. (void) ath9k_hw_set_txq_props(ah, q, qinfo);
  581. }
  582. return q;
  583. }
  584. bool ath9k_hw_releasetxqueue(struct ath_hal *ah, u32 q)
  585. {
  586. struct ath_hal_5416 *ahp = AH5416(ah);
  587. struct ath9k_hw_capabilities *pCap = &ah->ah_caps;
  588. struct ath9k_tx_queue_info *qi;
  589. if (q >= pCap->total_queues) {
  590. DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "%s: invalid queue num %u\n",
  591. __func__, q);
  592. return false;
  593. }
  594. qi = &ahp->ah_txq[q];
  595. if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
  596. DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "%s: inactive queue %u\n",
  597. __func__, q);
  598. return false;
  599. }
  600. DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "%s: release queue %u\n",
  601. __func__, q);
  602. qi->tqi_type = ATH9K_TX_QUEUE_INACTIVE;
  603. ahp->ah_txOkInterruptMask &= ~(1 << q);
  604. ahp->ah_txErrInterruptMask &= ~(1 << q);
  605. ahp->ah_txDescInterruptMask &= ~(1 << q);
  606. ahp->ah_txEolInterruptMask &= ~(1 << q);
  607. ahp->ah_txUrnInterruptMask &= ~(1 << q);
  608. ath9k_hw_set_txq_interrupts(ah, qi);
  609. return true;
  610. }
  611. bool ath9k_hw_resettxqueue(struct ath_hal *ah, u32 q)
  612. {
  613. struct ath_hal_5416 *ahp = AH5416(ah);
  614. struct ath9k_hw_capabilities *pCap = &ah->ah_caps;
  615. struct ath9k_channel *chan = ah->ah_curchan;
  616. struct ath9k_tx_queue_info *qi;
  617. u32 cwMin, chanCwMin, value;
  618. if (q >= pCap->total_queues) {
  619. DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "%s: invalid queue num %u\n",
  620. __func__, q);
  621. return false;
  622. }
  623. qi = &ahp->ah_txq[q];
  624. if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
  625. DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "%s: inactive queue %u\n",
  626. __func__, q);
  627. return true;
  628. }
  629. DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "%s: reset queue %u\n", __func__, q);
  630. if (qi->tqi_cwmin == ATH9K_TXQ_USEDEFAULT) {
  631. if (chan && IS_CHAN_B(chan))
  632. chanCwMin = INIT_CWMIN_11B;
  633. else
  634. chanCwMin = INIT_CWMIN;
  635. for (cwMin = 1; cwMin < chanCwMin; cwMin = (cwMin << 1) | 1);
  636. } else
  637. cwMin = qi->tqi_cwmin;
  638. REG_WRITE(ah, AR_DLCL_IFS(q),
  639. SM(cwMin, AR_D_LCL_IFS_CWMIN) |
  640. SM(qi->tqi_cwmax, AR_D_LCL_IFS_CWMAX) |
  641. SM(qi->tqi_aifs, AR_D_LCL_IFS_AIFS));
  642. REG_WRITE(ah, AR_DRETRY_LIMIT(q),
  643. SM(INIT_SSH_RETRY, AR_D_RETRY_LIMIT_STA_SH) |
  644. SM(INIT_SLG_RETRY, AR_D_RETRY_LIMIT_STA_LG) |
  645. SM(qi->tqi_shretry, AR_D_RETRY_LIMIT_FR_SH));
  646. REG_WRITE(ah, AR_QMISC(q), AR_Q_MISC_DCU_EARLY_TERM_REQ);
  647. REG_WRITE(ah, AR_DMISC(q),
  648. AR_D_MISC_CW_BKOFF_EN | AR_D_MISC_FRAG_WAIT_EN | 0x2);
  649. if (qi->tqi_cbrPeriod) {
  650. REG_WRITE(ah, AR_QCBRCFG(q),
  651. SM(qi->tqi_cbrPeriod, AR_Q_CBRCFG_INTERVAL) |
  652. SM(qi->tqi_cbrOverflowLimit, AR_Q_CBRCFG_OVF_THRESH));
  653. REG_WRITE(ah, AR_QMISC(q),
  654. REG_READ(ah, AR_QMISC(q)) | AR_Q_MISC_FSP_CBR |
  655. (qi->tqi_cbrOverflowLimit ?
  656. AR_Q_MISC_CBR_EXP_CNTR_LIMIT_EN : 0));
  657. }
  658. if (qi->tqi_readyTime && (qi->tqi_type != ATH9K_TX_QUEUE_CAB)) {
  659. REG_WRITE(ah, AR_QRDYTIMECFG(q),
  660. SM(qi->tqi_readyTime, AR_Q_RDYTIMECFG_DURATION) |
  661. AR_Q_RDYTIMECFG_EN);
  662. }
  663. REG_WRITE(ah, AR_DCHNTIME(q),
  664. SM(qi->tqi_burstTime, AR_D_CHNTIME_DUR) |
  665. (qi->tqi_burstTime ? AR_D_CHNTIME_EN : 0));
  666. if (qi->tqi_burstTime
  667. && (qi->tqi_qflags & TXQ_FLAG_RDYTIME_EXP_POLICY_ENABLE)) {
  668. REG_WRITE(ah, AR_QMISC(q),
  669. REG_READ(ah, AR_QMISC(q)) |
  670. AR_Q_MISC_RDYTIME_EXP_POLICY);
  671. }
  672. if (qi->tqi_qflags & TXQ_FLAG_BACKOFF_DISABLE) {
  673. REG_WRITE(ah, AR_DMISC(q),
  674. REG_READ(ah, AR_DMISC(q)) |
  675. AR_D_MISC_POST_FR_BKOFF_DIS);
  676. }
  677. if (qi->tqi_qflags & TXQ_FLAG_FRAG_BURST_BACKOFF_ENABLE) {
  678. REG_WRITE(ah, AR_DMISC(q),
  679. REG_READ(ah, AR_DMISC(q)) |
  680. AR_D_MISC_FRAG_BKOFF_EN);
  681. }
  682. switch (qi->tqi_type) {
  683. case ATH9K_TX_QUEUE_BEACON:
  684. REG_WRITE(ah, AR_QMISC(q), REG_READ(ah, AR_QMISC(q))
  685. | AR_Q_MISC_FSP_DBA_GATED
  686. | AR_Q_MISC_BEACON_USE
  687. | AR_Q_MISC_CBR_INCR_DIS1);
  688. REG_WRITE(ah, AR_DMISC(q), REG_READ(ah, AR_DMISC(q))
  689. | (AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL <<
  690. AR_D_MISC_ARB_LOCKOUT_CNTRL_S)
  691. | AR_D_MISC_BEACON_USE
  692. | AR_D_MISC_POST_FR_BKOFF_DIS);
  693. break;
  694. case ATH9K_TX_QUEUE_CAB:
  695. REG_WRITE(ah, AR_QMISC(q), REG_READ(ah, AR_QMISC(q))
  696. | AR_Q_MISC_FSP_DBA_GATED
  697. | AR_Q_MISC_CBR_INCR_DIS1
  698. | AR_Q_MISC_CBR_INCR_DIS0);
  699. value = (qi->tqi_readyTime -
  700. (ah->ah_config.sw_beacon_response_time -
  701. ah->ah_config.dma_beacon_response_time) -
  702. ah->ah_config.additional_swba_backoff) * 1024;
  703. REG_WRITE(ah, AR_QRDYTIMECFG(q),
  704. value | AR_Q_RDYTIMECFG_EN);
  705. REG_WRITE(ah, AR_DMISC(q), REG_READ(ah, AR_DMISC(q))
  706. | (AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL <<
  707. AR_D_MISC_ARB_LOCKOUT_CNTRL_S));
  708. break;
  709. case ATH9K_TX_QUEUE_PSPOLL:
  710. REG_WRITE(ah, AR_QMISC(q),
  711. REG_READ(ah, AR_QMISC(q)) | AR_Q_MISC_CBR_INCR_DIS1);
  712. break;
  713. case ATH9K_TX_QUEUE_UAPSD:
  714. REG_WRITE(ah, AR_DMISC(q), REG_READ(ah, AR_DMISC(q)) |
  715. AR_D_MISC_POST_FR_BKOFF_DIS);
  716. break;
  717. default:
  718. break;
  719. }
  720. if (qi->tqi_intFlags & ATH9K_TXQ_USE_LOCKOUT_BKOFF_DIS) {
  721. REG_WRITE(ah, AR_DMISC(q),
  722. REG_READ(ah, AR_DMISC(q)) |
  723. SM(AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL,
  724. AR_D_MISC_ARB_LOCKOUT_CNTRL) |
  725. AR_D_MISC_POST_FR_BKOFF_DIS);
  726. }
  727. if (qi->tqi_qflags & TXQ_FLAG_TXOKINT_ENABLE)
  728. ahp->ah_txOkInterruptMask |= 1 << q;
  729. else
  730. ahp->ah_txOkInterruptMask &= ~(1 << q);
  731. if (qi->tqi_qflags & TXQ_FLAG_TXERRINT_ENABLE)
  732. ahp->ah_txErrInterruptMask |= 1 << q;
  733. else
  734. ahp->ah_txErrInterruptMask &= ~(1 << q);
  735. if (qi->tqi_qflags & TXQ_FLAG_TXDESCINT_ENABLE)
  736. ahp->ah_txDescInterruptMask |= 1 << q;
  737. else
  738. ahp->ah_txDescInterruptMask &= ~(1 << q);
  739. if (qi->tqi_qflags & TXQ_FLAG_TXEOLINT_ENABLE)
  740. ahp->ah_txEolInterruptMask |= 1 << q;
  741. else
  742. ahp->ah_txEolInterruptMask &= ~(1 << q);
  743. if (qi->tqi_qflags & TXQ_FLAG_TXURNINT_ENABLE)
  744. ahp->ah_txUrnInterruptMask |= 1 << q;
  745. else
  746. ahp->ah_txUrnInterruptMask &= ~(1 << q);
  747. ath9k_hw_set_txq_interrupts(ah, qi);
  748. return true;
  749. }
  750. int ath9k_hw_rxprocdesc(struct ath_hal *ah, struct ath_desc *ds,
  751. u32 pa, struct ath_desc *nds, u64 tsf)
  752. {
  753. struct ar5416_desc ads;
  754. struct ar5416_desc *adsp = AR5416DESC(ds);
  755. u32 phyerr;
  756. if ((adsp->ds_rxstatus8 & AR_RxDone) == 0)
  757. return -EINPROGRESS;
  758. ads.u.rx = adsp->u.rx;
  759. ds->ds_rxstat.rs_status = 0;
  760. ds->ds_rxstat.rs_flags = 0;
  761. ds->ds_rxstat.rs_datalen = ads.ds_rxstatus1 & AR_DataLen;
  762. ds->ds_rxstat.rs_tstamp = ads.AR_RcvTimestamp;
  763. ds->ds_rxstat.rs_rssi = MS(ads.ds_rxstatus4, AR_RxRSSICombined);
  764. ds->ds_rxstat.rs_rssi_ctl0 = MS(ads.ds_rxstatus0, AR_RxRSSIAnt00);
  765. ds->ds_rxstat.rs_rssi_ctl1 = MS(ads.ds_rxstatus0, AR_RxRSSIAnt01);
  766. ds->ds_rxstat.rs_rssi_ctl2 = MS(ads.ds_rxstatus0, AR_RxRSSIAnt02);
  767. ds->ds_rxstat.rs_rssi_ext0 = MS(ads.ds_rxstatus4, AR_RxRSSIAnt10);
  768. ds->ds_rxstat.rs_rssi_ext1 = MS(ads.ds_rxstatus4, AR_RxRSSIAnt11);
  769. ds->ds_rxstat.rs_rssi_ext2 = MS(ads.ds_rxstatus4, AR_RxRSSIAnt12);
  770. if (ads.ds_rxstatus8 & AR_RxKeyIdxValid)
  771. ds->ds_rxstat.rs_keyix = MS(ads.ds_rxstatus8, AR_KeyIdx);
  772. else
  773. ds->ds_rxstat.rs_keyix = ATH9K_RXKEYIX_INVALID;
  774. ds->ds_rxstat.rs_rate = RXSTATUS_RATE(ah, (&ads));
  775. ds->ds_rxstat.rs_more = (ads.ds_rxstatus1 & AR_RxMore) ? 1 : 0;
  776. ds->ds_rxstat.rs_isaggr = (ads.ds_rxstatus8 & AR_RxAggr) ? 1 : 0;
  777. ds->ds_rxstat.rs_moreaggr =
  778. (ads.ds_rxstatus8 & AR_RxMoreAggr) ? 1 : 0;
  779. ds->ds_rxstat.rs_antenna = MS(ads.ds_rxstatus3, AR_RxAntenna);
  780. ds->ds_rxstat.rs_flags =
  781. (ads.ds_rxstatus3 & AR_GI) ? ATH9K_RX_GI : 0;
  782. ds->ds_rxstat.rs_flags |=
  783. (ads.ds_rxstatus3 & AR_2040) ? ATH9K_RX_2040 : 0;
  784. if (ads.ds_rxstatus8 & AR_PreDelimCRCErr)
  785. ds->ds_rxstat.rs_flags |= ATH9K_RX_DELIM_CRC_PRE;
  786. if (ads.ds_rxstatus8 & AR_PostDelimCRCErr)
  787. ds->ds_rxstat.rs_flags |= ATH9K_RX_DELIM_CRC_POST;
  788. if (ads.ds_rxstatus8 & AR_DecryptBusyErr)
  789. ds->ds_rxstat.rs_flags |= ATH9K_RX_DECRYPT_BUSY;
  790. if ((ads.ds_rxstatus8 & AR_RxFrameOK) == 0) {
  791. if (ads.ds_rxstatus8 & AR_CRCErr)
  792. ds->ds_rxstat.rs_status |= ATH9K_RXERR_CRC;
  793. else if (ads.ds_rxstatus8 & AR_PHYErr) {
  794. ds->ds_rxstat.rs_status |= ATH9K_RXERR_PHY;
  795. phyerr = MS(ads.ds_rxstatus8, AR_PHYErrCode);
  796. ds->ds_rxstat.rs_phyerr = phyerr;
  797. } else if (ads.ds_rxstatus8 & AR_DecryptCRCErr)
  798. ds->ds_rxstat.rs_status |= ATH9K_RXERR_DECRYPT;
  799. else if (ads.ds_rxstatus8 & AR_MichaelErr)
  800. ds->ds_rxstat.rs_status |= ATH9K_RXERR_MIC;
  801. }
  802. return 0;
  803. }
  804. bool ath9k_hw_setuprxdesc(struct ath_hal *ah, struct ath_desc *ds,
  805. u32 size, u32 flags)
  806. {
  807. struct ar5416_desc *ads = AR5416DESC(ds);
  808. struct ath9k_hw_capabilities *pCap = &ah->ah_caps;
  809. ads->ds_ctl1 = size & AR_BufLen;
  810. if (flags & ATH9K_RXDESC_INTREQ)
  811. ads->ds_ctl1 |= AR_RxIntrReq;
  812. ads->ds_rxstatus8 &= ~AR_RxDone;
  813. if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP))
  814. memset(&(ads->u), 0, sizeof(ads->u));
  815. return true;
  816. }
  817. bool ath9k_hw_setrxabort(struct ath_hal *ah, bool set)
  818. {
  819. u32 reg;
  820. if (set) {
  821. REG_SET_BIT(ah, AR_DIAG_SW,
  822. (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT));
  823. if (!ath9k_hw_wait(ah, AR_OBS_BUS_1, AR_OBS_BUS_1_RX_STATE, 0)) {
  824. REG_CLR_BIT(ah, AR_DIAG_SW,
  825. (AR_DIAG_RX_DIS |
  826. AR_DIAG_RX_ABORT));
  827. reg = REG_READ(ah, AR_OBS_BUS_1);
  828. DPRINTF(ah->ah_sc, ATH_DBG_FATAL,
  829. "%s: rx failed to go idle in 10 ms RXSM=0x%x\n",
  830. __func__, reg);
  831. return false;
  832. }
  833. } else {
  834. REG_CLR_BIT(ah, AR_DIAG_SW,
  835. (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT));
  836. }
  837. return true;
  838. }
  839. void ath9k_hw_putrxbuf(struct ath_hal *ah, u32 rxdp)
  840. {
  841. REG_WRITE(ah, AR_RXDP, rxdp);
  842. }
  843. void ath9k_hw_rxena(struct ath_hal *ah)
  844. {
  845. REG_WRITE(ah, AR_CR, AR_CR_RXE);
  846. }
  847. void ath9k_hw_startpcureceive(struct ath_hal *ah)
  848. {
  849. REG_CLR_BIT(ah, AR_DIAG_SW,
  850. (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT));
  851. ath9k_enable_mib_counters(ah);
  852. ath9k_ani_reset(ah);
  853. }
  854. void ath9k_hw_stoppcurecv(struct ath_hal *ah)
  855. {
  856. REG_SET_BIT(ah, AR_DIAG_SW, AR_DIAG_RX_DIS);
  857. ath9k_hw_disable_mib_counters(ah);
  858. }
  859. bool ath9k_hw_stopdmarecv(struct ath_hal *ah)
  860. {
  861. REG_WRITE(ah, AR_CR, AR_CR_RXD);
  862. if (!ath9k_hw_wait(ah, AR_CR, AR_CR_RXE, 0)) {
  863. DPRINTF(ah->ah_sc, ATH_DBG_QUEUE,
  864. "%s: dma failed to stop in 10ms\n"
  865. "AR_CR=0x%08x\nAR_DIAG_SW=0x%08x\n",
  866. __func__,
  867. REG_READ(ah, AR_CR), REG_READ(ah, AR_DIAG_SW));
  868. return false;
  869. } else {
  870. return true;
  871. }
  872. }