mac.c 29 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031
  1. /*
  2. * Copyright (c) 2008 Atheros Communications Inc.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for any
  5. * purpose with or without fee is hereby granted, provided that the above
  6. * copyright notice and this permission notice appear in all copies.
  7. *
  8. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  9. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  10. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  11. * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  12. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  13. * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  14. * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  15. */
  16. #include "core.h"
  17. #include "hw.h"
  18. #include "reg.h"
  19. #include "phy.h"
  20. static void ath9k_hw_set_txq_interrupts(struct ath_hal *ah,
  21. struct ath9k_tx_queue_info *qi)
  22. {
  23. struct ath_hal_5416 *ahp = AH5416(ah);
  24. DPRINTF(ah->ah_sc, ATH_DBG_INTERRUPT,
  25. "%s: tx ok 0x%x err 0x%x desc 0x%x eol 0x%x urn 0x%x\n",
  26. __func__, ahp->ah_txOkInterruptMask,
  27. ahp->ah_txErrInterruptMask, ahp->ah_txDescInterruptMask,
  28. ahp->ah_txEolInterruptMask, ahp->ah_txUrnInterruptMask);
  29. REG_WRITE(ah, AR_IMR_S0,
  30. SM(ahp->ah_txOkInterruptMask, AR_IMR_S0_QCU_TXOK)
  31. | SM(ahp->ah_txDescInterruptMask, AR_IMR_S0_QCU_TXDESC));
  32. REG_WRITE(ah, AR_IMR_S1,
  33. SM(ahp->ah_txErrInterruptMask, AR_IMR_S1_QCU_TXERR)
  34. | SM(ahp->ah_txEolInterruptMask, AR_IMR_S1_QCU_TXEOL));
  35. REG_RMW_FIELD(ah, AR_IMR_S2,
  36. AR_IMR_S2_QCU_TXURN, ahp->ah_txUrnInterruptMask);
  37. }
  38. void ath9k_hw_dmaRegDump(struct ath_hal *ah)
  39. {
  40. u32 val[ATH9K_NUM_DMA_DEBUG_REGS];
  41. int qcuOffset = 0, dcuOffset = 0;
  42. u32 *qcuBase = &val[0], *dcuBase = &val[4];
  43. int i;
  44. REG_WRITE(ah, AR_MACMISC,
  45. ((AR_MACMISC_DMA_OBS_LINE_8 << AR_MACMISC_DMA_OBS_S) |
  46. (AR_MACMISC_MISC_OBS_BUS_1 <<
  47. AR_MACMISC_MISC_OBS_BUS_MSB_S)));
  48. DPRINTF(ah->ah_sc, ATH_DBG_REG_IO, "Raw DMA Debug values:\n");
  49. for (i = 0; i < ATH9K_NUM_DMA_DEBUG_REGS; i++) {
  50. if (i % 4 == 0)
  51. DPRINTF(ah->ah_sc, ATH_DBG_REG_IO, "\n");
  52. val[i] = REG_READ(ah, AR_DMADBG_0 + (i * sizeof(u32)));
  53. DPRINTF(ah->ah_sc, ATH_DBG_REG_IO, "%d: %08x ", i, val[i]);
  54. }
  55. DPRINTF(ah->ah_sc, ATH_DBG_REG_IO, "\n\n");
  56. DPRINTF(ah->ah_sc, ATH_DBG_REG_IO,
  57. "Num QCU: chain_st fsp_ok fsp_st DCU: chain_st\n");
  58. for (i = 0; i < ATH9K_NUM_QUEUES;
  59. i++, qcuOffset += 4, dcuOffset += 5) {
  60. if (i == 8) {
  61. qcuOffset = 0;
  62. qcuBase++;
  63. }
  64. if (i == 6) {
  65. dcuOffset = 0;
  66. dcuBase++;
  67. }
  68. DPRINTF(ah->ah_sc, ATH_DBG_REG_IO,
  69. "%2d %2x %1x %2x %2x\n",
  70. i, (*qcuBase & (0x7 << qcuOffset)) >> qcuOffset,
  71. (*qcuBase & (0x8 << qcuOffset)) >> (qcuOffset + 3),
  72. val[2] & (0x7 << (i * 3)) >> (i * 3),
  73. (*dcuBase & (0x1f << dcuOffset)) >> dcuOffset);
  74. }
  75. DPRINTF(ah->ah_sc, ATH_DBG_REG_IO, "\n");
  76. DPRINTF(ah->ah_sc, ATH_DBG_REG_IO,
  77. "qcu_stitch state: %2x qcu_fetch state: %2x\n",
  78. (val[3] & 0x003c0000) >> 18, (val[3] & 0x03c00000) >> 22);
  79. DPRINTF(ah->ah_sc, ATH_DBG_REG_IO,
  80. "qcu_complete state: %2x dcu_complete state: %2x\n",
  81. (val[3] & 0x1c000000) >> 26, (val[6] & 0x3));
  82. DPRINTF(ah->ah_sc, ATH_DBG_REG_IO,
  83. "dcu_arb state: %2x dcu_fp state: %2x\n",
  84. (val[5] & 0x06000000) >> 25, (val[5] & 0x38000000) >> 27);
  85. DPRINTF(ah->ah_sc, ATH_DBG_REG_IO,
  86. "chan_idle_dur: %3d chan_idle_dur_valid: %1d\n",
  87. (val[6] & 0x000003fc) >> 2, (val[6] & 0x00000400) >> 10);
  88. DPRINTF(ah->ah_sc, ATH_DBG_REG_IO,
  89. "txfifo_valid_0: %1d txfifo_valid_1: %1d\n",
  90. (val[6] & 0x00000800) >> 11, (val[6] & 0x00001000) >> 12);
  91. DPRINTF(ah->ah_sc, ATH_DBG_REG_IO,
  92. "txfifo_dcu_num_0: %2d txfifo_dcu_num_1: %2d\n",
  93. (val[6] & 0x0001e000) >> 13, (val[6] & 0x001e0000) >> 17);
  94. DPRINTF(ah->ah_sc, ATH_DBG_REG_IO, "pcu observe 0x%x \n",
  95. REG_READ(ah, AR_OBS_BUS_1));
  96. DPRINTF(ah->ah_sc, ATH_DBG_REG_IO,
  97. "AR_CR 0x%x \n", REG_READ(ah, AR_CR));
  98. }
  99. u32 ath9k_hw_gettxbuf(struct ath_hal *ah, u32 q)
  100. {
  101. return REG_READ(ah, AR_QTXDP(q));
  102. }
  103. bool ath9k_hw_puttxbuf(struct ath_hal *ah, u32 q, u32 txdp)
  104. {
  105. REG_WRITE(ah, AR_QTXDP(q), txdp);
  106. return true;
  107. }
  108. bool ath9k_hw_txstart(struct ath_hal *ah, u32 q)
  109. {
  110. DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "%s: queue %u\n", __func__, q);
  111. REG_WRITE(ah, AR_Q_TXE, 1 << q);
  112. return true;
  113. }
  114. u32 ath9k_hw_numtxpending(struct ath_hal *ah, u32 q)
  115. {
  116. u32 npend;
  117. npend = REG_READ(ah, AR_QSTS(q)) & AR_Q_STS_PEND_FR_CNT;
  118. if (npend == 0) {
  119. if (REG_READ(ah, AR_Q_TXE) & (1 << q))
  120. npend = 1;
  121. }
  122. return npend;
  123. }
  124. bool ath9k_hw_updatetxtriglevel(struct ath_hal *ah, bool bIncTrigLevel)
  125. {
  126. struct ath_hal_5416 *ahp = AH5416(ah);
  127. u32 txcfg, curLevel, newLevel;
  128. enum ath9k_int omask;
  129. if (ah->ah_txTrigLevel >= MAX_TX_FIFO_THRESHOLD)
  130. return false;
  131. omask = ath9k_hw_set_interrupts(ah, ahp->ah_maskReg & ~ATH9K_INT_GLOBAL);
  132. txcfg = REG_READ(ah, AR_TXCFG);
  133. curLevel = MS(txcfg, AR_FTRIG);
  134. newLevel = curLevel;
  135. if (bIncTrigLevel) {
  136. if (curLevel < MAX_TX_FIFO_THRESHOLD)
  137. newLevel++;
  138. } else if (curLevel > MIN_TX_FIFO_THRESHOLD)
  139. newLevel--;
  140. if (newLevel != curLevel)
  141. REG_WRITE(ah, AR_TXCFG,
  142. (txcfg & ~AR_FTRIG) | SM(newLevel, AR_FTRIG));
  143. ath9k_hw_set_interrupts(ah, omask);
  144. ah->ah_txTrigLevel = newLevel;
  145. return newLevel != curLevel;
  146. }
  147. bool ath9k_hw_stoptxdma(struct ath_hal *ah, u32 q)
  148. {
  149. u32 tsfLow, j, wait;
  150. REG_WRITE(ah, AR_Q_TXD, 1 << q);
  151. for (wait = 1000; wait != 0; wait--) {
  152. if (ath9k_hw_numtxpending(ah, q) == 0)
  153. break;
  154. udelay(100);
  155. }
  156. if (ath9k_hw_numtxpending(ah, q)) {
  157. DPRINTF(ah->ah_sc, ATH_DBG_QUEUE,
  158. "%s: Num of pending TX Frames %d on Q %d\n",
  159. __func__, ath9k_hw_numtxpending(ah, q), q);
  160. for (j = 0; j < 2; j++) {
  161. tsfLow = REG_READ(ah, AR_TSF_L32);
  162. REG_WRITE(ah, AR_QUIET2,
  163. SM(10, AR_QUIET2_QUIET_DUR));
  164. REG_WRITE(ah, AR_QUIET_PERIOD, 100);
  165. REG_WRITE(ah, AR_NEXT_QUIET_TIMER, tsfLow >> 10);
  166. REG_SET_BIT(ah, AR_TIMER_MODE,
  167. AR_QUIET_TIMER_EN);
  168. if ((REG_READ(ah, AR_TSF_L32) >> 10) == (tsfLow >> 10))
  169. break;
  170. DPRINTF(ah->ah_sc, ATH_DBG_QUEUE,
  171. "%s: TSF have moved while trying to set "
  172. "quiet time TSF: 0x%08x\n",
  173. __func__, tsfLow);
  174. }
  175. REG_SET_BIT(ah, AR_DIAG_SW, AR_DIAG_FORCE_CH_IDLE_HIGH);
  176. udelay(200);
  177. REG_CLR_BIT(ah, AR_TIMER_MODE, AR_QUIET_TIMER_EN);
  178. wait = 1000;
  179. while (ath9k_hw_numtxpending(ah, q)) {
  180. if ((--wait) == 0) {
  181. DPRINTF(ah->ah_sc, ATH_DBG_XMIT,
  182. "%s: Failed to stop Tx DMA in 100 "
  183. "msec after killing last frame\n",
  184. __func__);
  185. break;
  186. }
  187. udelay(100);
  188. }
  189. REG_CLR_BIT(ah, AR_DIAG_SW, AR_DIAG_FORCE_CH_IDLE_HIGH);
  190. }
  191. REG_WRITE(ah, AR_Q_TXD, 0);
  192. return wait != 0;
  193. }
  194. bool ath9k_hw_filltxdesc(struct ath_hal *ah, struct ath_desc *ds,
  195. u32 segLen, bool firstSeg,
  196. bool lastSeg, const struct ath_desc *ds0)
  197. {
  198. struct ar5416_desc *ads = AR5416DESC(ds);
  199. if (firstSeg) {
  200. ads->ds_ctl1 |= segLen | (lastSeg ? 0 : AR_TxMore);
  201. } else if (lastSeg) {
  202. ads->ds_ctl0 = 0;
  203. ads->ds_ctl1 = segLen;
  204. ads->ds_ctl2 = AR5416DESC_CONST(ds0)->ds_ctl2;
  205. ads->ds_ctl3 = AR5416DESC_CONST(ds0)->ds_ctl3;
  206. } else {
  207. ads->ds_ctl0 = 0;
  208. ads->ds_ctl1 = segLen | AR_TxMore;
  209. ads->ds_ctl2 = 0;
  210. ads->ds_ctl3 = 0;
  211. }
  212. ads->ds_txstatus0 = ads->ds_txstatus1 = 0;
  213. ads->ds_txstatus2 = ads->ds_txstatus3 = 0;
  214. ads->ds_txstatus4 = ads->ds_txstatus5 = 0;
  215. ads->ds_txstatus6 = ads->ds_txstatus7 = 0;
  216. ads->ds_txstatus8 = ads->ds_txstatus9 = 0;
  217. return true;
  218. }
  219. void ath9k_hw_cleartxdesc(struct ath_hal *ah, struct ath_desc *ds)
  220. {
  221. struct ar5416_desc *ads = AR5416DESC(ds);
  222. ads->ds_txstatus0 = ads->ds_txstatus1 = 0;
  223. ads->ds_txstatus2 = ads->ds_txstatus3 = 0;
  224. ads->ds_txstatus4 = ads->ds_txstatus5 = 0;
  225. ads->ds_txstatus6 = ads->ds_txstatus7 = 0;
  226. ads->ds_txstatus8 = ads->ds_txstatus9 = 0;
  227. }
  228. int ath9k_hw_txprocdesc(struct ath_hal *ah, struct ath_desc *ds)
  229. {
  230. struct ar5416_desc *ads = AR5416DESC(ds);
  231. if ((ads->ds_txstatus9 & AR_TxDone) == 0)
  232. return -EINPROGRESS;
  233. ds->ds_txstat.ts_seqnum = MS(ads->ds_txstatus9, AR_SeqNum);
  234. ds->ds_txstat.ts_tstamp = ads->AR_SendTimestamp;
  235. ds->ds_txstat.ts_status = 0;
  236. ds->ds_txstat.ts_flags = 0;
  237. if (ads->ds_txstatus1 & AR_ExcessiveRetries)
  238. ds->ds_txstat.ts_status |= ATH9K_TXERR_XRETRY;
  239. if (ads->ds_txstatus1 & AR_Filtered)
  240. ds->ds_txstat.ts_status |= ATH9K_TXERR_FILT;
  241. if (ads->ds_txstatus1 & AR_FIFOUnderrun)
  242. ds->ds_txstat.ts_status |= ATH9K_TXERR_FIFO;
  243. if (ads->ds_txstatus9 & AR_TxOpExceeded)
  244. ds->ds_txstat.ts_status |= ATH9K_TXERR_XTXOP;
  245. if (ads->ds_txstatus1 & AR_TxTimerExpired)
  246. ds->ds_txstat.ts_status |= ATH9K_TXERR_TIMER_EXPIRED;
  247. if (ads->ds_txstatus1 & AR_DescCfgErr)
  248. ds->ds_txstat.ts_flags |= ATH9K_TX_DESC_CFG_ERR;
  249. if (ads->ds_txstatus1 & AR_TxDataUnderrun) {
  250. ds->ds_txstat.ts_flags |= ATH9K_TX_DATA_UNDERRUN;
  251. ath9k_hw_updatetxtriglevel(ah, true);
  252. }
  253. if (ads->ds_txstatus1 & AR_TxDelimUnderrun) {
  254. ds->ds_txstat.ts_flags |= ATH9K_TX_DELIM_UNDERRUN;
  255. ath9k_hw_updatetxtriglevel(ah, true);
  256. }
  257. if (ads->ds_txstatus0 & AR_TxBaStatus) {
  258. ds->ds_txstat.ts_flags |= ATH9K_TX_BA;
  259. ds->ds_txstat.ba_low = ads->AR_BaBitmapLow;
  260. ds->ds_txstat.ba_high = ads->AR_BaBitmapHigh;
  261. }
  262. ds->ds_txstat.ts_rateindex = MS(ads->ds_txstatus9, AR_FinalTxIdx);
  263. switch (ds->ds_txstat.ts_rateindex) {
  264. case 0:
  265. ds->ds_txstat.ts_ratecode = MS(ads->ds_ctl3, AR_XmitRate0);
  266. break;
  267. case 1:
  268. ds->ds_txstat.ts_ratecode = MS(ads->ds_ctl3, AR_XmitRate1);
  269. break;
  270. case 2:
  271. ds->ds_txstat.ts_ratecode = MS(ads->ds_ctl3, AR_XmitRate2);
  272. break;
  273. case 3:
  274. ds->ds_txstat.ts_ratecode = MS(ads->ds_ctl3, AR_XmitRate3);
  275. break;
  276. }
  277. ds->ds_txstat.ts_rssi = MS(ads->ds_txstatus5, AR_TxRSSICombined);
  278. ds->ds_txstat.ts_rssi_ctl0 = MS(ads->ds_txstatus0, AR_TxRSSIAnt00);
  279. ds->ds_txstat.ts_rssi_ctl1 = MS(ads->ds_txstatus0, AR_TxRSSIAnt01);
  280. ds->ds_txstat.ts_rssi_ctl2 = MS(ads->ds_txstatus0, AR_TxRSSIAnt02);
  281. ds->ds_txstat.ts_rssi_ext0 = MS(ads->ds_txstatus5, AR_TxRSSIAnt10);
  282. ds->ds_txstat.ts_rssi_ext1 = MS(ads->ds_txstatus5, AR_TxRSSIAnt11);
  283. ds->ds_txstat.ts_rssi_ext2 = MS(ads->ds_txstatus5, AR_TxRSSIAnt12);
  284. ds->ds_txstat.evm0 = ads->AR_TxEVM0;
  285. ds->ds_txstat.evm1 = ads->AR_TxEVM1;
  286. ds->ds_txstat.evm2 = ads->AR_TxEVM2;
  287. ds->ds_txstat.ts_shortretry = MS(ads->ds_txstatus1, AR_RTSFailCnt);
  288. ds->ds_txstat.ts_longretry = MS(ads->ds_txstatus1, AR_DataFailCnt);
  289. ds->ds_txstat.ts_virtcol = MS(ads->ds_txstatus1, AR_VirtRetryCnt);
  290. ds->ds_txstat.ts_antenna = 1;
  291. return 0;
  292. }
  293. void ath9k_hw_set11n_txdesc(struct ath_hal *ah, struct ath_desc *ds,
  294. u32 pktLen, enum ath9k_pkt_type type, u32 txPower,
  295. u32 keyIx, enum ath9k_key_type keyType, u32 flags)
  296. {
  297. struct ar5416_desc *ads = AR5416DESC(ds);
  298. struct ath_hal_5416 *ahp = AH5416(ah);
  299. txPower += ahp->ah_txPowerIndexOffset;
  300. if (txPower > 63)
  301. txPower = 63;
  302. ads->ds_ctl0 = (pktLen & AR_FrameLen)
  303. | (flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
  304. | SM(txPower, AR_XmitPower)
  305. | (flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
  306. | (flags & ATH9K_TXDESC_CLRDMASK ? AR_ClrDestMask : 0)
  307. | (flags & ATH9K_TXDESC_INTREQ ? AR_TxIntrReq : 0)
  308. | (keyIx != ATH9K_TXKEYIX_INVALID ? AR_DestIdxValid : 0);
  309. ads->ds_ctl1 =
  310. (keyIx != ATH9K_TXKEYIX_INVALID ? SM(keyIx, AR_DestIdx) : 0)
  311. | SM(type, AR_FrameType)
  312. | (flags & ATH9K_TXDESC_NOACK ? AR_NoAck : 0)
  313. | (flags & ATH9K_TXDESC_EXT_ONLY ? AR_ExtOnly : 0)
  314. | (flags & ATH9K_TXDESC_EXT_AND_CTL ? AR_ExtAndCtl : 0);
  315. ads->ds_ctl6 = SM(keyType, AR_EncrType);
  316. if (AR_SREV_9285(ah)) {
  317. ads->ds_ctl8 = 0;
  318. ads->ds_ctl9 = 0;
  319. ads->ds_ctl10 = 0;
  320. ads->ds_ctl11 = 0;
  321. }
  322. }
  323. void ath9k_hw_set11n_ratescenario(struct ath_hal *ah, struct ath_desc *ds,
  324. struct ath_desc *lastds,
  325. u32 durUpdateEn, u32 rtsctsRate,
  326. u32 rtsctsDuration,
  327. struct ath9k_11n_rate_series series[],
  328. u32 nseries, u32 flags)
  329. {
  330. struct ar5416_desc *ads = AR5416DESC(ds);
  331. struct ar5416_desc *last_ads = AR5416DESC(lastds);
  332. u32 ds_ctl0;
  333. (void) nseries;
  334. (void) rtsctsDuration;
  335. if (flags & (ATH9K_TXDESC_RTSENA | ATH9K_TXDESC_CTSENA)) {
  336. ds_ctl0 = ads->ds_ctl0;
  337. if (flags & ATH9K_TXDESC_RTSENA) {
  338. ds_ctl0 &= ~AR_CTSEnable;
  339. ds_ctl0 |= AR_RTSEnable;
  340. } else {
  341. ds_ctl0 &= ~AR_RTSEnable;
  342. ds_ctl0 |= AR_CTSEnable;
  343. }
  344. ads->ds_ctl0 = ds_ctl0;
  345. } else {
  346. ads->ds_ctl0 =
  347. (ads->ds_ctl0 & ~(AR_RTSEnable | AR_CTSEnable));
  348. }
  349. ads->ds_ctl2 = set11nTries(series, 0)
  350. | set11nTries(series, 1)
  351. | set11nTries(series, 2)
  352. | set11nTries(series, 3)
  353. | (durUpdateEn ? AR_DurUpdateEna : 0)
  354. | SM(0, AR_BurstDur);
  355. ads->ds_ctl3 = set11nRate(series, 0)
  356. | set11nRate(series, 1)
  357. | set11nRate(series, 2)
  358. | set11nRate(series, 3);
  359. ads->ds_ctl4 = set11nPktDurRTSCTS(series, 0)
  360. | set11nPktDurRTSCTS(series, 1);
  361. ads->ds_ctl5 = set11nPktDurRTSCTS(series, 2)
  362. | set11nPktDurRTSCTS(series, 3);
  363. ads->ds_ctl7 = set11nRateFlags(series, 0)
  364. | set11nRateFlags(series, 1)
  365. | set11nRateFlags(series, 2)
  366. | set11nRateFlags(series, 3)
  367. | SM(rtsctsRate, AR_RTSCTSRate);
  368. last_ads->ds_ctl2 = ads->ds_ctl2;
  369. last_ads->ds_ctl3 = ads->ds_ctl3;
  370. }
  371. void ath9k_hw_set11n_aggr_first(struct ath_hal *ah, struct ath_desc *ds,
  372. u32 aggrLen)
  373. {
  374. struct ar5416_desc *ads = AR5416DESC(ds);
  375. ads->ds_ctl1 |= (AR_IsAggr | AR_MoreAggr);
  376. ads->ds_ctl6 &= ~AR_AggrLen;
  377. ads->ds_ctl6 |= SM(aggrLen, AR_AggrLen);
  378. }
  379. void ath9k_hw_set11n_aggr_middle(struct ath_hal *ah, struct ath_desc *ds,
  380. u32 numDelims)
  381. {
  382. struct ar5416_desc *ads = AR5416DESC(ds);
  383. unsigned int ctl6;
  384. ads->ds_ctl1 |= (AR_IsAggr | AR_MoreAggr);
  385. ctl6 = ads->ds_ctl6;
  386. ctl6 &= ~AR_PadDelim;
  387. ctl6 |= SM(numDelims, AR_PadDelim);
  388. ads->ds_ctl6 = ctl6;
  389. }
  390. void ath9k_hw_set11n_aggr_last(struct ath_hal *ah, struct ath_desc *ds)
  391. {
  392. struct ar5416_desc *ads = AR5416DESC(ds);
  393. ads->ds_ctl1 |= AR_IsAggr;
  394. ads->ds_ctl1 &= ~AR_MoreAggr;
  395. ads->ds_ctl6 &= ~AR_PadDelim;
  396. }
  397. void ath9k_hw_clr11n_aggr(struct ath_hal *ah, struct ath_desc *ds)
  398. {
  399. struct ar5416_desc *ads = AR5416DESC(ds);
  400. ads->ds_ctl1 &= (~AR_IsAggr & ~AR_MoreAggr);
  401. }
  402. void ath9k_hw_set11n_burstduration(struct ath_hal *ah, struct ath_desc *ds,
  403. u32 burstDuration)
  404. {
  405. struct ar5416_desc *ads = AR5416DESC(ds);
  406. ads->ds_ctl2 &= ~AR_BurstDur;
  407. ads->ds_ctl2 |= SM(burstDuration, AR_BurstDur);
  408. }
  409. void ath9k_hw_set11n_virtualmorefrag(struct ath_hal *ah, struct ath_desc *ds,
  410. u32 vmf)
  411. {
  412. struct ar5416_desc *ads = AR5416DESC(ds);
  413. if (vmf)
  414. ads->ds_ctl0 |= AR_VirtMoreFrag;
  415. else
  416. ads->ds_ctl0 &= ~AR_VirtMoreFrag;
  417. }
  418. void ath9k_hw_gettxintrtxqs(struct ath_hal *ah, u32 *txqs)
  419. {
  420. struct ath_hal_5416 *ahp = AH5416(ah);
  421. *txqs &= ahp->ah_intrTxqs;
  422. ahp->ah_intrTxqs &= ~(*txqs);
  423. }
  424. bool ath9k_hw_set_txq_props(struct ath_hal *ah, int q,
  425. const struct ath9k_tx_queue_info *qinfo)
  426. {
  427. u32 cw;
  428. struct ath_hal_5416 *ahp = AH5416(ah);
  429. struct ath9k_hw_capabilities *pCap = &ah->ah_caps;
  430. struct ath9k_tx_queue_info *qi;
  431. if (q >= pCap->total_queues) {
  432. DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "%s: invalid queue num %u\n",
  433. __func__, q);
  434. return false;
  435. }
  436. qi = &ahp->ah_txq[q];
  437. if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
  438. DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "%s: inactive queue\n",
  439. __func__);
  440. return false;
  441. }
  442. DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "%s: queue %p\n", __func__, qi);
  443. qi->tqi_ver = qinfo->tqi_ver;
  444. qi->tqi_subtype = qinfo->tqi_subtype;
  445. qi->tqi_qflags = qinfo->tqi_qflags;
  446. qi->tqi_priority = qinfo->tqi_priority;
  447. if (qinfo->tqi_aifs != ATH9K_TXQ_USEDEFAULT)
  448. qi->tqi_aifs = min(qinfo->tqi_aifs, 255U);
  449. else
  450. qi->tqi_aifs = INIT_AIFS;
  451. if (qinfo->tqi_cwmin != ATH9K_TXQ_USEDEFAULT) {
  452. cw = min(qinfo->tqi_cwmin, 1024U);
  453. qi->tqi_cwmin = 1;
  454. while (qi->tqi_cwmin < cw)
  455. qi->tqi_cwmin = (qi->tqi_cwmin << 1) | 1;
  456. } else
  457. qi->tqi_cwmin = qinfo->tqi_cwmin;
  458. if (qinfo->tqi_cwmax != ATH9K_TXQ_USEDEFAULT) {
  459. cw = min(qinfo->tqi_cwmax, 1024U);
  460. qi->tqi_cwmax = 1;
  461. while (qi->tqi_cwmax < cw)
  462. qi->tqi_cwmax = (qi->tqi_cwmax << 1) | 1;
  463. } else
  464. qi->tqi_cwmax = INIT_CWMAX;
  465. if (qinfo->tqi_shretry != 0)
  466. qi->tqi_shretry = min((u32) qinfo->tqi_shretry, 15U);
  467. else
  468. qi->tqi_shretry = INIT_SH_RETRY;
  469. if (qinfo->tqi_lgretry != 0)
  470. qi->tqi_lgretry = min((u32) qinfo->tqi_lgretry, 15U);
  471. else
  472. qi->tqi_lgretry = INIT_LG_RETRY;
  473. qi->tqi_cbrPeriod = qinfo->tqi_cbrPeriod;
  474. qi->tqi_cbrOverflowLimit = qinfo->tqi_cbrOverflowLimit;
  475. qi->tqi_burstTime = qinfo->tqi_burstTime;
  476. qi->tqi_readyTime = qinfo->tqi_readyTime;
  477. switch (qinfo->tqi_subtype) {
  478. case ATH9K_WME_UPSD:
  479. if (qi->tqi_type == ATH9K_TX_QUEUE_DATA)
  480. qi->tqi_intFlags = ATH9K_TXQ_USE_LOCKOUT_BKOFF_DIS;
  481. break;
  482. default:
  483. break;
  484. }
  485. return true;
  486. }
  487. bool ath9k_hw_get_txq_props(struct ath_hal *ah, int q,
  488. struct ath9k_tx_queue_info *qinfo)
  489. {
  490. struct ath_hal_5416 *ahp = AH5416(ah);
  491. struct ath9k_hw_capabilities *pCap = &ah->ah_caps;
  492. struct ath9k_tx_queue_info *qi;
  493. if (q >= pCap->total_queues) {
  494. DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "%s: invalid queue num %u\n",
  495. __func__, q);
  496. return false;
  497. }
  498. qi = &ahp->ah_txq[q];
  499. if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
  500. DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "%s: inactive queue\n",
  501. __func__);
  502. return false;
  503. }
  504. qinfo->tqi_qflags = qi->tqi_qflags;
  505. qinfo->tqi_ver = qi->tqi_ver;
  506. qinfo->tqi_subtype = qi->tqi_subtype;
  507. qinfo->tqi_qflags = qi->tqi_qflags;
  508. qinfo->tqi_priority = qi->tqi_priority;
  509. qinfo->tqi_aifs = qi->tqi_aifs;
  510. qinfo->tqi_cwmin = qi->tqi_cwmin;
  511. qinfo->tqi_cwmax = qi->tqi_cwmax;
  512. qinfo->tqi_shretry = qi->tqi_shretry;
  513. qinfo->tqi_lgretry = qi->tqi_lgretry;
  514. qinfo->tqi_cbrPeriod = qi->tqi_cbrPeriod;
  515. qinfo->tqi_cbrOverflowLimit = qi->tqi_cbrOverflowLimit;
  516. qinfo->tqi_burstTime = qi->tqi_burstTime;
  517. qinfo->tqi_readyTime = qi->tqi_readyTime;
  518. return true;
  519. }
  520. int ath9k_hw_setuptxqueue(struct ath_hal *ah, enum ath9k_tx_queue type,
  521. const struct ath9k_tx_queue_info *qinfo)
  522. {
  523. struct ath_hal_5416 *ahp = AH5416(ah);
  524. struct ath9k_tx_queue_info *qi;
  525. struct ath9k_hw_capabilities *pCap = &ah->ah_caps;
  526. int q;
  527. switch (type) {
  528. case ATH9K_TX_QUEUE_BEACON:
  529. q = pCap->total_queues - 1;
  530. break;
  531. case ATH9K_TX_QUEUE_CAB:
  532. q = pCap->total_queues - 2;
  533. break;
  534. case ATH9K_TX_QUEUE_PSPOLL:
  535. q = 1;
  536. break;
  537. case ATH9K_TX_QUEUE_UAPSD:
  538. q = pCap->total_queues - 3;
  539. break;
  540. case ATH9K_TX_QUEUE_DATA:
  541. for (q = 0; q < pCap->total_queues; q++)
  542. if (ahp->ah_txq[q].tqi_type ==
  543. ATH9K_TX_QUEUE_INACTIVE)
  544. break;
  545. if (q == pCap->total_queues) {
  546. DPRINTF(ah->ah_sc, ATH_DBG_QUEUE,
  547. "%s: no available tx queue\n", __func__);
  548. return -1;
  549. }
  550. break;
  551. default:
  552. DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "%s: bad tx queue type %u\n",
  553. __func__, type);
  554. return -1;
  555. }
  556. DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "%s: queue %u\n", __func__, q);
  557. qi = &ahp->ah_txq[q];
  558. if (qi->tqi_type != ATH9K_TX_QUEUE_INACTIVE) {
  559. DPRINTF(ah->ah_sc, ATH_DBG_QUEUE,
  560. "%s: tx queue %u already active\n", __func__, q);
  561. return -1;
  562. }
  563. memset(qi, 0, sizeof(struct ath9k_tx_queue_info));
  564. qi->tqi_type = type;
  565. if (qinfo == NULL) {
  566. qi->tqi_qflags =
  567. TXQ_FLAG_TXOKINT_ENABLE
  568. | TXQ_FLAG_TXERRINT_ENABLE
  569. | TXQ_FLAG_TXDESCINT_ENABLE | TXQ_FLAG_TXURNINT_ENABLE;
  570. qi->tqi_aifs = INIT_AIFS;
  571. qi->tqi_cwmin = ATH9K_TXQ_USEDEFAULT;
  572. qi->tqi_cwmax = INIT_CWMAX;
  573. qi->tqi_shretry = INIT_SH_RETRY;
  574. qi->tqi_lgretry = INIT_LG_RETRY;
  575. qi->tqi_physCompBuf = 0;
  576. } else {
  577. qi->tqi_physCompBuf = qinfo->tqi_physCompBuf;
  578. (void) ath9k_hw_set_txq_props(ah, q, qinfo);
  579. }
  580. return q;
  581. }
  582. bool ath9k_hw_releasetxqueue(struct ath_hal *ah, u32 q)
  583. {
  584. struct ath_hal_5416 *ahp = AH5416(ah);
  585. struct ath9k_hw_capabilities *pCap = &ah->ah_caps;
  586. struct ath9k_tx_queue_info *qi;
  587. if (q >= pCap->total_queues) {
  588. DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "%s: invalid queue num %u\n",
  589. __func__, q);
  590. return false;
  591. }
  592. qi = &ahp->ah_txq[q];
  593. if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
  594. DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "%s: inactive queue %u\n",
  595. __func__, q);
  596. return false;
  597. }
  598. DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "%s: release queue %u\n",
  599. __func__, q);
  600. qi->tqi_type = ATH9K_TX_QUEUE_INACTIVE;
  601. ahp->ah_txOkInterruptMask &= ~(1 << q);
  602. ahp->ah_txErrInterruptMask &= ~(1 << q);
  603. ahp->ah_txDescInterruptMask &= ~(1 << q);
  604. ahp->ah_txEolInterruptMask &= ~(1 << q);
  605. ahp->ah_txUrnInterruptMask &= ~(1 << q);
  606. ath9k_hw_set_txq_interrupts(ah, qi);
  607. return true;
  608. }
  609. bool ath9k_hw_resettxqueue(struct ath_hal *ah, u32 q)
  610. {
  611. struct ath_hal_5416 *ahp = AH5416(ah);
  612. struct ath9k_hw_capabilities *pCap = &ah->ah_caps;
  613. struct ath9k_channel *chan = ah->ah_curchan;
  614. struct ath9k_tx_queue_info *qi;
  615. u32 cwMin, chanCwMin, value;
  616. if (q >= pCap->total_queues) {
  617. DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "%s: invalid queue num %u\n",
  618. __func__, q);
  619. return false;
  620. }
  621. qi = &ahp->ah_txq[q];
  622. if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
  623. DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "%s: inactive queue %u\n",
  624. __func__, q);
  625. return true;
  626. }
  627. DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "%s: reset queue %u\n", __func__, q);
  628. if (qi->tqi_cwmin == ATH9K_TXQ_USEDEFAULT) {
  629. if (chan && IS_CHAN_B(chan))
  630. chanCwMin = INIT_CWMIN_11B;
  631. else
  632. chanCwMin = INIT_CWMIN;
  633. for (cwMin = 1; cwMin < chanCwMin; cwMin = (cwMin << 1) | 1);
  634. } else
  635. cwMin = qi->tqi_cwmin;
  636. REG_WRITE(ah, AR_DLCL_IFS(q),
  637. SM(cwMin, AR_D_LCL_IFS_CWMIN) |
  638. SM(qi->tqi_cwmax, AR_D_LCL_IFS_CWMAX) |
  639. SM(qi->tqi_aifs, AR_D_LCL_IFS_AIFS));
  640. REG_WRITE(ah, AR_DRETRY_LIMIT(q),
  641. SM(INIT_SSH_RETRY, AR_D_RETRY_LIMIT_STA_SH) |
  642. SM(INIT_SLG_RETRY, AR_D_RETRY_LIMIT_STA_LG) |
  643. SM(qi->tqi_shretry, AR_D_RETRY_LIMIT_FR_SH));
  644. REG_WRITE(ah, AR_QMISC(q), AR_Q_MISC_DCU_EARLY_TERM_REQ);
  645. REG_WRITE(ah, AR_DMISC(q),
  646. AR_D_MISC_CW_BKOFF_EN | AR_D_MISC_FRAG_WAIT_EN | 0x2);
  647. if (qi->tqi_cbrPeriod) {
  648. REG_WRITE(ah, AR_QCBRCFG(q),
  649. SM(qi->tqi_cbrPeriod, AR_Q_CBRCFG_INTERVAL) |
  650. SM(qi->tqi_cbrOverflowLimit, AR_Q_CBRCFG_OVF_THRESH));
  651. REG_WRITE(ah, AR_QMISC(q),
  652. REG_READ(ah, AR_QMISC(q)) | AR_Q_MISC_FSP_CBR |
  653. (qi->tqi_cbrOverflowLimit ?
  654. AR_Q_MISC_CBR_EXP_CNTR_LIMIT_EN : 0));
  655. }
  656. if (qi->tqi_readyTime && (qi->tqi_type != ATH9K_TX_QUEUE_CAB)) {
  657. REG_WRITE(ah, AR_QRDYTIMECFG(q),
  658. SM(qi->tqi_readyTime, AR_Q_RDYTIMECFG_DURATION) |
  659. AR_Q_RDYTIMECFG_EN);
  660. }
  661. REG_WRITE(ah, AR_DCHNTIME(q),
  662. SM(qi->tqi_burstTime, AR_D_CHNTIME_DUR) |
  663. (qi->tqi_burstTime ? AR_D_CHNTIME_EN : 0));
  664. if (qi->tqi_burstTime
  665. && (qi->tqi_qflags & TXQ_FLAG_RDYTIME_EXP_POLICY_ENABLE)) {
  666. REG_WRITE(ah, AR_QMISC(q),
  667. REG_READ(ah, AR_QMISC(q)) |
  668. AR_Q_MISC_RDYTIME_EXP_POLICY);
  669. }
  670. if (qi->tqi_qflags & TXQ_FLAG_BACKOFF_DISABLE) {
  671. REG_WRITE(ah, AR_DMISC(q),
  672. REG_READ(ah, AR_DMISC(q)) |
  673. AR_D_MISC_POST_FR_BKOFF_DIS);
  674. }
  675. if (qi->tqi_qflags & TXQ_FLAG_FRAG_BURST_BACKOFF_ENABLE) {
  676. REG_WRITE(ah, AR_DMISC(q),
  677. REG_READ(ah, AR_DMISC(q)) |
  678. AR_D_MISC_FRAG_BKOFF_EN);
  679. }
  680. switch (qi->tqi_type) {
  681. case ATH9K_TX_QUEUE_BEACON:
  682. REG_WRITE(ah, AR_QMISC(q), REG_READ(ah, AR_QMISC(q))
  683. | AR_Q_MISC_FSP_DBA_GATED
  684. | AR_Q_MISC_BEACON_USE
  685. | AR_Q_MISC_CBR_INCR_DIS1);
  686. REG_WRITE(ah, AR_DMISC(q), REG_READ(ah, AR_DMISC(q))
  687. | (AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL <<
  688. AR_D_MISC_ARB_LOCKOUT_CNTRL_S)
  689. | AR_D_MISC_BEACON_USE
  690. | AR_D_MISC_POST_FR_BKOFF_DIS);
  691. break;
  692. case ATH9K_TX_QUEUE_CAB:
  693. REG_WRITE(ah, AR_QMISC(q), REG_READ(ah, AR_QMISC(q))
  694. | AR_Q_MISC_FSP_DBA_GATED
  695. | AR_Q_MISC_CBR_INCR_DIS1
  696. | AR_Q_MISC_CBR_INCR_DIS0);
  697. value = (qi->tqi_readyTime -
  698. (ah->ah_config.sw_beacon_response_time -
  699. ah->ah_config.dma_beacon_response_time) -
  700. ah->ah_config.additional_swba_backoff) * 1024;
  701. REG_WRITE(ah, AR_QRDYTIMECFG(q),
  702. value | AR_Q_RDYTIMECFG_EN);
  703. REG_WRITE(ah, AR_DMISC(q), REG_READ(ah, AR_DMISC(q))
  704. | (AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL <<
  705. AR_D_MISC_ARB_LOCKOUT_CNTRL_S));
  706. break;
  707. case ATH9K_TX_QUEUE_PSPOLL:
  708. REG_WRITE(ah, AR_QMISC(q),
  709. REG_READ(ah, AR_QMISC(q)) | AR_Q_MISC_CBR_INCR_DIS1);
  710. break;
  711. case ATH9K_TX_QUEUE_UAPSD:
  712. REG_WRITE(ah, AR_DMISC(q), REG_READ(ah, AR_DMISC(q)) |
  713. AR_D_MISC_POST_FR_BKOFF_DIS);
  714. break;
  715. default:
  716. break;
  717. }
  718. if (qi->tqi_intFlags & ATH9K_TXQ_USE_LOCKOUT_BKOFF_DIS) {
  719. REG_WRITE(ah, AR_DMISC(q),
  720. REG_READ(ah, AR_DMISC(q)) |
  721. SM(AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL,
  722. AR_D_MISC_ARB_LOCKOUT_CNTRL) |
  723. AR_D_MISC_POST_FR_BKOFF_DIS);
  724. }
  725. if (qi->tqi_qflags & TXQ_FLAG_TXOKINT_ENABLE)
  726. ahp->ah_txOkInterruptMask |= 1 << q;
  727. else
  728. ahp->ah_txOkInterruptMask &= ~(1 << q);
  729. if (qi->tqi_qflags & TXQ_FLAG_TXERRINT_ENABLE)
  730. ahp->ah_txErrInterruptMask |= 1 << q;
  731. else
  732. ahp->ah_txErrInterruptMask &= ~(1 << q);
  733. if (qi->tqi_qflags & TXQ_FLAG_TXDESCINT_ENABLE)
  734. ahp->ah_txDescInterruptMask |= 1 << q;
  735. else
  736. ahp->ah_txDescInterruptMask &= ~(1 << q);
  737. if (qi->tqi_qflags & TXQ_FLAG_TXEOLINT_ENABLE)
  738. ahp->ah_txEolInterruptMask |= 1 << q;
  739. else
  740. ahp->ah_txEolInterruptMask &= ~(1 << q);
  741. if (qi->tqi_qflags & TXQ_FLAG_TXURNINT_ENABLE)
  742. ahp->ah_txUrnInterruptMask |= 1 << q;
  743. else
  744. ahp->ah_txUrnInterruptMask &= ~(1 << q);
  745. ath9k_hw_set_txq_interrupts(ah, qi);
  746. return true;
  747. }
  748. int ath9k_hw_rxprocdesc(struct ath_hal *ah, struct ath_desc *ds,
  749. u32 pa, struct ath_desc *nds, u64 tsf)
  750. {
  751. struct ar5416_desc ads;
  752. struct ar5416_desc *adsp = AR5416DESC(ds);
  753. u32 phyerr;
  754. if ((adsp->ds_rxstatus8 & AR_RxDone) == 0)
  755. return -EINPROGRESS;
  756. ads.u.rx = adsp->u.rx;
  757. ds->ds_rxstat.rs_status = 0;
  758. ds->ds_rxstat.rs_flags = 0;
  759. ds->ds_rxstat.rs_datalen = ads.ds_rxstatus1 & AR_DataLen;
  760. ds->ds_rxstat.rs_tstamp = ads.AR_RcvTimestamp;
  761. ds->ds_rxstat.rs_rssi = MS(ads.ds_rxstatus4, AR_RxRSSICombined);
  762. ds->ds_rxstat.rs_rssi_ctl0 = MS(ads.ds_rxstatus0, AR_RxRSSIAnt00);
  763. ds->ds_rxstat.rs_rssi_ctl1 = MS(ads.ds_rxstatus0, AR_RxRSSIAnt01);
  764. ds->ds_rxstat.rs_rssi_ctl2 = MS(ads.ds_rxstatus0, AR_RxRSSIAnt02);
  765. ds->ds_rxstat.rs_rssi_ext0 = MS(ads.ds_rxstatus4, AR_RxRSSIAnt10);
  766. ds->ds_rxstat.rs_rssi_ext1 = MS(ads.ds_rxstatus4, AR_RxRSSIAnt11);
  767. ds->ds_rxstat.rs_rssi_ext2 = MS(ads.ds_rxstatus4, AR_RxRSSIAnt12);
  768. if (ads.ds_rxstatus8 & AR_RxKeyIdxValid)
  769. ds->ds_rxstat.rs_keyix = MS(ads.ds_rxstatus8, AR_KeyIdx);
  770. else
  771. ds->ds_rxstat.rs_keyix = ATH9K_RXKEYIX_INVALID;
  772. ds->ds_rxstat.rs_rate = RXSTATUS_RATE(ah, (&ads));
  773. ds->ds_rxstat.rs_more = (ads.ds_rxstatus1 & AR_RxMore) ? 1 : 0;
  774. ds->ds_rxstat.rs_isaggr = (ads.ds_rxstatus8 & AR_RxAggr) ? 1 : 0;
  775. ds->ds_rxstat.rs_moreaggr =
  776. (ads.ds_rxstatus8 & AR_RxMoreAggr) ? 1 : 0;
  777. ds->ds_rxstat.rs_antenna = MS(ads.ds_rxstatus3, AR_RxAntenna);
  778. ds->ds_rxstat.rs_flags =
  779. (ads.ds_rxstatus3 & AR_GI) ? ATH9K_RX_GI : 0;
  780. ds->ds_rxstat.rs_flags |=
  781. (ads.ds_rxstatus3 & AR_2040) ? ATH9K_RX_2040 : 0;
  782. if (ads.ds_rxstatus8 & AR_PreDelimCRCErr)
  783. ds->ds_rxstat.rs_flags |= ATH9K_RX_DELIM_CRC_PRE;
  784. if (ads.ds_rxstatus8 & AR_PostDelimCRCErr)
  785. ds->ds_rxstat.rs_flags |= ATH9K_RX_DELIM_CRC_POST;
  786. if (ads.ds_rxstatus8 & AR_DecryptBusyErr)
  787. ds->ds_rxstat.rs_flags |= ATH9K_RX_DECRYPT_BUSY;
  788. if ((ads.ds_rxstatus8 & AR_RxFrameOK) == 0) {
  789. if (ads.ds_rxstatus8 & AR_CRCErr)
  790. ds->ds_rxstat.rs_status |= ATH9K_RXERR_CRC;
  791. else if (ads.ds_rxstatus8 & AR_PHYErr) {
  792. ds->ds_rxstat.rs_status |= ATH9K_RXERR_PHY;
  793. phyerr = MS(ads.ds_rxstatus8, AR_PHYErrCode);
  794. ds->ds_rxstat.rs_phyerr = phyerr;
  795. } else if (ads.ds_rxstatus8 & AR_DecryptCRCErr)
  796. ds->ds_rxstat.rs_status |= ATH9K_RXERR_DECRYPT;
  797. else if (ads.ds_rxstatus8 & AR_MichaelErr)
  798. ds->ds_rxstat.rs_status |= ATH9K_RXERR_MIC;
  799. }
  800. return 0;
  801. }
  802. bool ath9k_hw_setuprxdesc(struct ath_hal *ah, struct ath_desc *ds,
  803. u32 size, u32 flags)
  804. {
  805. struct ar5416_desc *ads = AR5416DESC(ds);
  806. struct ath9k_hw_capabilities *pCap = &ah->ah_caps;
  807. ads->ds_ctl1 = size & AR_BufLen;
  808. if (flags & ATH9K_RXDESC_INTREQ)
  809. ads->ds_ctl1 |= AR_RxIntrReq;
  810. ads->ds_rxstatus8 &= ~AR_RxDone;
  811. if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP))
  812. memset(&(ads->u), 0, sizeof(ads->u));
  813. return true;
  814. }
  815. bool ath9k_hw_setrxabort(struct ath_hal *ah, bool set)
  816. {
  817. u32 reg;
  818. if (set) {
  819. REG_SET_BIT(ah, AR_DIAG_SW,
  820. (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT));
  821. if (!ath9k_hw_wait(ah, AR_OBS_BUS_1, AR_OBS_BUS_1_RX_STATE, 0)) {
  822. REG_CLR_BIT(ah, AR_DIAG_SW,
  823. (AR_DIAG_RX_DIS |
  824. AR_DIAG_RX_ABORT));
  825. reg = REG_READ(ah, AR_OBS_BUS_1);
  826. DPRINTF(ah->ah_sc, ATH_DBG_FATAL,
  827. "%s: rx failed to go idle in 10 ms RXSM=0x%x\n",
  828. __func__, reg);
  829. return false;
  830. }
  831. } else {
  832. REG_CLR_BIT(ah, AR_DIAG_SW,
  833. (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT));
  834. }
  835. return true;
  836. }
  837. void ath9k_hw_putrxbuf(struct ath_hal *ah, u32 rxdp)
  838. {
  839. REG_WRITE(ah, AR_RXDP, rxdp);
  840. }
  841. void ath9k_hw_rxena(struct ath_hal *ah)
  842. {
  843. REG_WRITE(ah, AR_CR, AR_CR_RXE);
  844. }
  845. void ath9k_hw_startpcureceive(struct ath_hal *ah)
  846. {
  847. REG_CLR_BIT(ah, AR_DIAG_SW,
  848. (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT));
  849. ath9k_enable_mib_counters(ah);
  850. ath9k_ani_reset(ah);
  851. }
  852. void ath9k_hw_stoppcurecv(struct ath_hal *ah)
  853. {
  854. REG_SET_BIT(ah, AR_DIAG_SW, AR_DIAG_RX_DIS);
  855. ath9k_hw_disable_mib_counters(ah);
  856. }
  857. bool ath9k_hw_stopdmarecv(struct ath_hal *ah)
  858. {
  859. REG_WRITE(ah, AR_CR, AR_CR_RXD);
  860. if (!ath9k_hw_wait(ah, AR_CR, AR_CR_RXE, 0)) {
  861. DPRINTF(ah->ah_sc, ATH_DBG_QUEUE,
  862. "%s: dma failed to stop in 10ms\n"
  863. "AR_CR=0x%08x\nAR_DIAG_SW=0x%08x\n",
  864. __func__,
  865. REG_READ(ah, AR_CR), REG_READ(ah, AR_DIAG_SW));
  866. return false;
  867. } else {
  868. return true;
  869. }
  870. }