mac.c 30 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067
  1. /*
  2. * Copyright (c) 2008-2009 Atheros Communications Inc.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for any
  5. * purpose with or without fee is hereby granted, provided that the above
  6. * copyright notice and this permission notice appear in all copies.
  7. *
  8. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  9. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  10. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  11. * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  12. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  13. * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  14. * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  15. */
  16. #include "hw.h"
  17. static void ath9k_hw_set_txq_interrupts(struct ath_hw *ah,
  18. struct ath9k_tx_queue_info *qi)
  19. {
  20. ath_print(ath9k_hw_common(ah), ATH_DBG_INTERRUPT,
  21. "tx ok 0x%x err 0x%x desc 0x%x eol 0x%x urn 0x%x\n",
  22. ah->txok_interrupt_mask, ah->txerr_interrupt_mask,
  23. ah->txdesc_interrupt_mask, ah->txeol_interrupt_mask,
  24. ah->txurn_interrupt_mask);
  25. REG_WRITE(ah, AR_IMR_S0,
  26. SM(ah->txok_interrupt_mask, AR_IMR_S0_QCU_TXOK)
  27. | SM(ah->txdesc_interrupt_mask, AR_IMR_S0_QCU_TXDESC));
  28. REG_WRITE(ah, AR_IMR_S1,
  29. SM(ah->txerr_interrupt_mask, AR_IMR_S1_QCU_TXERR)
  30. | SM(ah->txeol_interrupt_mask, AR_IMR_S1_QCU_TXEOL));
  31. REG_RMW_FIELD(ah, AR_IMR_S2,
  32. AR_IMR_S2_QCU_TXURN, ah->txurn_interrupt_mask);
  33. }
  34. u32 ath9k_hw_gettxbuf(struct ath_hw *ah, u32 q)
  35. {
  36. return REG_READ(ah, AR_QTXDP(q));
  37. }
  38. EXPORT_SYMBOL(ath9k_hw_gettxbuf);
  39. void ath9k_hw_puttxbuf(struct ath_hw *ah, u32 q, u32 txdp)
  40. {
  41. REG_WRITE(ah, AR_QTXDP(q), txdp);
  42. }
  43. EXPORT_SYMBOL(ath9k_hw_puttxbuf);
  44. void ath9k_hw_txstart(struct ath_hw *ah, u32 q)
  45. {
  46. ath_print(ath9k_hw_common(ah), ATH_DBG_QUEUE,
  47. "Enable TXE on queue: %u\n", q);
  48. REG_WRITE(ah, AR_Q_TXE, 1 << q);
  49. }
  50. EXPORT_SYMBOL(ath9k_hw_txstart);
  51. u32 ath9k_hw_numtxpending(struct ath_hw *ah, u32 q)
  52. {
  53. u32 npend;
  54. npend = REG_READ(ah, AR_QSTS(q)) & AR_Q_STS_PEND_FR_CNT;
  55. if (npend == 0) {
  56. if (REG_READ(ah, AR_Q_TXE) & (1 << q))
  57. npend = 1;
  58. }
  59. return npend;
  60. }
  61. EXPORT_SYMBOL(ath9k_hw_numtxpending);
  62. /**
  63. * ath9k_hw_updatetxtriglevel - adjusts the frame trigger level
  64. *
  65. * @ah: atheros hardware struct
  66. * @bIncTrigLevel: whether or not the frame trigger level should be updated
  67. *
  68. * The frame trigger level specifies the minimum number of bytes,
  69. * in units of 64 bytes, that must be DMA'ed into the PCU TX FIFO
  70. * before the PCU will initiate sending the frame on the air. This can
  71. * mean we initiate transmit before a full frame is on the PCU TX FIFO.
  72. * Resets to 0x1 (meaning 64 bytes or a full frame, whichever occurs
  73. * first)
  74. *
  75. * Caution must be taken to ensure to set the frame trigger level based
  76. * on the DMA request size. For example if the DMA request size is set to
  77. * 128 bytes the trigger level cannot exceed 6 * 64 = 384. This is because
  78. * there need to be enough space in the tx FIFO for the requested transfer
  79. * size. Hence the tx FIFO will stop with 512 - 128 = 384 bytes. If we set
  80. * the threshold to a value beyond 6, then the transmit will hang.
  81. *
  82. * Current dual stream devices have a PCU TX FIFO size of 8 KB.
  83. * Current single stream devices have a PCU TX FIFO size of 4 KB, however,
  84. * there is a hardware issue which forces us to use 2 KB instead so the
  85. * frame trigger level must not exceed 2 KB for these chipsets.
  86. */
  87. bool ath9k_hw_updatetxtriglevel(struct ath_hw *ah, bool bIncTrigLevel)
  88. {
  89. u32 txcfg, curLevel, newLevel;
  90. enum ath9k_int omask;
  91. if (ah->tx_trig_level >= ah->config.max_txtrig_level)
  92. return false;
  93. omask = ath9k_hw_set_interrupts(ah, ah->mask_reg & ~ATH9K_INT_GLOBAL);
  94. txcfg = REG_READ(ah, AR_TXCFG);
  95. curLevel = MS(txcfg, AR_FTRIG);
  96. newLevel = curLevel;
  97. if (bIncTrigLevel) {
  98. if (curLevel < ah->config.max_txtrig_level)
  99. newLevel++;
  100. } else if (curLevel > MIN_TX_FIFO_THRESHOLD)
  101. newLevel--;
  102. if (newLevel != curLevel)
  103. REG_WRITE(ah, AR_TXCFG,
  104. (txcfg & ~AR_FTRIG) | SM(newLevel, AR_FTRIG));
  105. ath9k_hw_set_interrupts(ah, omask);
  106. ah->tx_trig_level = newLevel;
  107. return newLevel != curLevel;
  108. }
  109. EXPORT_SYMBOL(ath9k_hw_updatetxtriglevel);
  110. bool ath9k_hw_stoptxdma(struct ath_hw *ah, u32 q)
  111. {
  112. #define ATH9K_TX_STOP_DMA_TIMEOUT 4000 /* usec */
  113. #define ATH9K_TIME_QUANTUM 100 /* usec */
  114. struct ath_common *common = ath9k_hw_common(ah);
  115. struct ath9k_hw_capabilities *pCap = &ah->caps;
  116. struct ath9k_tx_queue_info *qi;
  117. u32 tsfLow, j, wait;
  118. u32 wait_time = ATH9K_TX_STOP_DMA_TIMEOUT / ATH9K_TIME_QUANTUM;
  119. if (q >= pCap->total_queues) {
  120. ath_print(common, ATH_DBG_QUEUE, "Stopping TX DMA, "
  121. "invalid queue: %u\n", q);
  122. return false;
  123. }
  124. qi = &ah->txq[q];
  125. if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
  126. ath_print(common, ATH_DBG_QUEUE, "Stopping TX DMA, "
  127. "inactive queue: %u\n", q);
  128. return false;
  129. }
  130. REG_WRITE(ah, AR_Q_TXD, 1 << q);
  131. for (wait = wait_time; wait != 0; wait--) {
  132. if (ath9k_hw_numtxpending(ah, q) == 0)
  133. break;
  134. udelay(ATH9K_TIME_QUANTUM);
  135. }
  136. if (ath9k_hw_numtxpending(ah, q)) {
  137. ath_print(common, ATH_DBG_QUEUE,
  138. "%s: Num of pending TX Frames %d on Q %d\n",
  139. __func__, ath9k_hw_numtxpending(ah, q), q);
  140. for (j = 0; j < 2; j++) {
  141. tsfLow = REG_READ(ah, AR_TSF_L32);
  142. REG_WRITE(ah, AR_QUIET2,
  143. SM(10, AR_QUIET2_QUIET_DUR));
  144. REG_WRITE(ah, AR_QUIET_PERIOD, 100);
  145. REG_WRITE(ah, AR_NEXT_QUIET_TIMER, tsfLow >> 10);
  146. REG_SET_BIT(ah, AR_TIMER_MODE,
  147. AR_QUIET_TIMER_EN);
  148. if ((REG_READ(ah, AR_TSF_L32) >> 10) == (tsfLow >> 10))
  149. break;
  150. ath_print(common, ATH_DBG_QUEUE,
  151. "TSF has moved while trying to set "
  152. "quiet time TSF: 0x%08x\n", tsfLow);
  153. }
  154. REG_SET_BIT(ah, AR_DIAG_SW, AR_DIAG_FORCE_CH_IDLE_HIGH);
  155. udelay(200);
  156. REG_CLR_BIT(ah, AR_TIMER_MODE, AR_QUIET_TIMER_EN);
  157. wait = wait_time;
  158. while (ath9k_hw_numtxpending(ah, q)) {
  159. if ((--wait) == 0) {
  160. ath_print(common, ATH_DBG_FATAL,
  161. "Failed to stop TX DMA in 100 "
  162. "msec after killing last frame\n");
  163. break;
  164. }
  165. udelay(ATH9K_TIME_QUANTUM);
  166. }
  167. REG_CLR_BIT(ah, AR_DIAG_SW, AR_DIAG_FORCE_CH_IDLE_HIGH);
  168. }
  169. REG_WRITE(ah, AR_Q_TXD, 0);
  170. return wait != 0;
  171. #undef ATH9K_TX_STOP_DMA_TIMEOUT
  172. #undef ATH9K_TIME_QUANTUM
  173. }
  174. EXPORT_SYMBOL(ath9k_hw_stoptxdma);
  175. void ath9k_hw_filltxdesc(struct ath_hw *ah, struct ath_desc *ds,
  176. u32 segLen, bool firstSeg,
  177. bool lastSeg, const struct ath_desc *ds0)
  178. {
  179. struct ar5416_desc *ads = AR5416DESC(ds);
  180. if (firstSeg) {
  181. ads->ds_ctl1 |= segLen | (lastSeg ? 0 : AR_TxMore);
  182. } else if (lastSeg) {
  183. ads->ds_ctl0 = 0;
  184. ads->ds_ctl1 = segLen;
  185. ads->ds_ctl2 = AR5416DESC_CONST(ds0)->ds_ctl2;
  186. ads->ds_ctl3 = AR5416DESC_CONST(ds0)->ds_ctl3;
  187. } else {
  188. ads->ds_ctl0 = 0;
  189. ads->ds_ctl1 = segLen | AR_TxMore;
  190. ads->ds_ctl2 = 0;
  191. ads->ds_ctl3 = 0;
  192. }
  193. ads->ds_txstatus0 = ads->ds_txstatus1 = 0;
  194. ads->ds_txstatus2 = ads->ds_txstatus3 = 0;
  195. ads->ds_txstatus4 = ads->ds_txstatus5 = 0;
  196. ads->ds_txstatus6 = ads->ds_txstatus7 = 0;
  197. ads->ds_txstatus8 = ads->ds_txstatus9 = 0;
  198. }
  199. EXPORT_SYMBOL(ath9k_hw_filltxdesc);
  200. void ath9k_hw_cleartxdesc(struct ath_hw *ah, struct ath_desc *ds)
  201. {
  202. struct ar5416_desc *ads = AR5416DESC(ds);
  203. ads->ds_txstatus0 = ads->ds_txstatus1 = 0;
  204. ads->ds_txstatus2 = ads->ds_txstatus3 = 0;
  205. ads->ds_txstatus4 = ads->ds_txstatus5 = 0;
  206. ads->ds_txstatus6 = ads->ds_txstatus7 = 0;
  207. ads->ds_txstatus8 = ads->ds_txstatus9 = 0;
  208. }
  209. EXPORT_SYMBOL(ath9k_hw_cleartxdesc);
  210. int ath9k_hw_txprocdesc(struct ath_hw *ah, struct ath_desc *ds)
  211. {
  212. struct ar5416_desc *ads = AR5416DESC(ds);
  213. if ((ads->ds_txstatus9 & AR_TxDone) == 0)
  214. return -EINPROGRESS;
  215. ds->ds_txstat.ts_seqnum = MS(ads->ds_txstatus9, AR_SeqNum);
  216. ds->ds_txstat.ts_tstamp = ads->AR_SendTimestamp;
  217. ds->ds_txstat.ts_status = 0;
  218. ds->ds_txstat.ts_flags = 0;
  219. if (ads->ds_txstatus1 & AR_FrmXmitOK)
  220. ds->ds_txstat.ts_status |= ATH9K_TX_ACKED;
  221. if (ads->ds_txstatus1 & AR_ExcessiveRetries)
  222. ds->ds_txstat.ts_status |= ATH9K_TXERR_XRETRY;
  223. if (ads->ds_txstatus1 & AR_Filtered)
  224. ds->ds_txstat.ts_status |= ATH9K_TXERR_FILT;
  225. if (ads->ds_txstatus1 & AR_FIFOUnderrun) {
  226. ds->ds_txstat.ts_status |= ATH9K_TXERR_FIFO;
  227. ath9k_hw_updatetxtriglevel(ah, true);
  228. }
  229. if (ads->ds_txstatus9 & AR_TxOpExceeded)
  230. ds->ds_txstat.ts_status |= ATH9K_TXERR_XTXOP;
  231. if (ads->ds_txstatus1 & AR_TxTimerExpired)
  232. ds->ds_txstat.ts_status |= ATH9K_TXERR_TIMER_EXPIRED;
  233. if (ads->ds_txstatus1 & AR_DescCfgErr)
  234. ds->ds_txstat.ts_flags |= ATH9K_TX_DESC_CFG_ERR;
  235. if (ads->ds_txstatus1 & AR_TxDataUnderrun) {
  236. ds->ds_txstat.ts_flags |= ATH9K_TX_DATA_UNDERRUN;
  237. ath9k_hw_updatetxtriglevel(ah, true);
  238. }
  239. if (ads->ds_txstatus1 & AR_TxDelimUnderrun) {
  240. ds->ds_txstat.ts_flags |= ATH9K_TX_DELIM_UNDERRUN;
  241. ath9k_hw_updatetxtriglevel(ah, true);
  242. }
  243. if (ads->ds_txstatus0 & AR_TxBaStatus) {
  244. ds->ds_txstat.ts_flags |= ATH9K_TX_BA;
  245. ds->ds_txstat.ba_low = ads->AR_BaBitmapLow;
  246. ds->ds_txstat.ba_high = ads->AR_BaBitmapHigh;
  247. }
  248. ds->ds_txstat.ts_rateindex = MS(ads->ds_txstatus9, AR_FinalTxIdx);
  249. switch (ds->ds_txstat.ts_rateindex) {
  250. case 0:
  251. ds->ds_txstat.ts_ratecode = MS(ads->ds_ctl3, AR_XmitRate0);
  252. break;
  253. case 1:
  254. ds->ds_txstat.ts_ratecode = MS(ads->ds_ctl3, AR_XmitRate1);
  255. break;
  256. case 2:
  257. ds->ds_txstat.ts_ratecode = MS(ads->ds_ctl3, AR_XmitRate2);
  258. break;
  259. case 3:
  260. ds->ds_txstat.ts_ratecode = MS(ads->ds_ctl3, AR_XmitRate3);
  261. break;
  262. }
  263. ds->ds_txstat.ts_rssi = MS(ads->ds_txstatus5, AR_TxRSSICombined);
  264. ds->ds_txstat.ts_rssi_ctl0 = MS(ads->ds_txstatus0, AR_TxRSSIAnt00);
  265. ds->ds_txstat.ts_rssi_ctl1 = MS(ads->ds_txstatus0, AR_TxRSSIAnt01);
  266. ds->ds_txstat.ts_rssi_ctl2 = MS(ads->ds_txstatus0, AR_TxRSSIAnt02);
  267. ds->ds_txstat.ts_rssi_ext0 = MS(ads->ds_txstatus5, AR_TxRSSIAnt10);
  268. ds->ds_txstat.ts_rssi_ext1 = MS(ads->ds_txstatus5, AR_TxRSSIAnt11);
  269. ds->ds_txstat.ts_rssi_ext2 = MS(ads->ds_txstatus5, AR_TxRSSIAnt12);
  270. ds->ds_txstat.evm0 = ads->AR_TxEVM0;
  271. ds->ds_txstat.evm1 = ads->AR_TxEVM1;
  272. ds->ds_txstat.evm2 = ads->AR_TxEVM2;
  273. ds->ds_txstat.ts_shortretry = MS(ads->ds_txstatus1, AR_RTSFailCnt);
  274. ds->ds_txstat.ts_longretry = MS(ads->ds_txstatus1, AR_DataFailCnt);
  275. ds->ds_txstat.ts_virtcol = MS(ads->ds_txstatus1, AR_VirtRetryCnt);
  276. ds->ds_txstat.ts_antenna = 0;
  277. return 0;
  278. }
  279. EXPORT_SYMBOL(ath9k_hw_txprocdesc);
  280. void ath9k_hw_set11n_txdesc(struct ath_hw *ah, struct ath_desc *ds,
  281. u32 pktLen, enum ath9k_pkt_type type, u32 txPower,
  282. u32 keyIx, enum ath9k_key_type keyType, u32 flags)
  283. {
  284. struct ar5416_desc *ads = AR5416DESC(ds);
  285. txPower += ah->txpower_indexoffset;
  286. if (txPower > 63)
  287. txPower = 63;
  288. ads->ds_ctl0 = (pktLen & AR_FrameLen)
  289. | (flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
  290. | SM(txPower, AR_XmitPower)
  291. | (flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
  292. | (flags & ATH9K_TXDESC_CLRDMASK ? AR_ClrDestMask : 0)
  293. | (flags & ATH9K_TXDESC_INTREQ ? AR_TxIntrReq : 0)
  294. | (keyIx != ATH9K_TXKEYIX_INVALID ? AR_DestIdxValid : 0);
  295. ads->ds_ctl1 =
  296. (keyIx != ATH9K_TXKEYIX_INVALID ? SM(keyIx, AR_DestIdx) : 0)
  297. | SM(type, AR_FrameType)
  298. | (flags & ATH9K_TXDESC_NOACK ? AR_NoAck : 0)
  299. | (flags & ATH9K_TXDESC_EXT_ONLY ? AR_ExtOnly : 0)
  300. | (flags & ATH9K_TXDESC_EXT_AND_CTL ? AR_ExtAndCtl : 0);
  301. ads->ds_ctl6 = SM(keyType, AR_EncrType);
  302. if (AR_SREV_9285(ah)) {
  303. ads->ds_ctl8 = 0;
  304. ads->ds_ctl9 = 0;
  305. ads->ds_ctl10 = 0;
  306. ads->ds_ctl11 = 0;
  307. }
  308. }
  309. EXPORT_SYMBOL(ath9k_hw_set11n_txdesc);
  310. void ath9k_hw_set11n_ratescenario(struct ath_hw *ah, struct ath_desc *ds,
  311. struct ath_desc *lastds,
  312. u32 durUpdateEn, u32 rtsctsRate,
  313. u32 rtsctsDuration,
  314. struct ath9k_11n_rate_series series[],
  315. u32 nseries, u32 flags)
  316. {
  317. struct ar5416_desc *ads = AR5416DESC(ds);
  318. struct ar5416_desc *last_ads = AR5416DESC(lastds);
  319. u32 ds_ctl0;
  320. if (flags & (ATH9K_TXDESC_RTSENA | ATH9K_TXDESC_CTSENA)) {
  321. ds_ctl0 = ads->ds_ctl0;
  322. if (flags & ATH9K_TXDESC_RTSENA) {
  323. ds_ctl0 &= ~AR_CTSEnable;
  324. ds_ctl0 |= AR_RTSEnable;
  325. } else {
  326. ds_ctl0 &= ~AR_RTSEnable;
  327. ds_ctl0 |= AR_CTSEnable;
  328. }
  329. ads->ds_ctl0 = ds_ctl0;
  330. } else {
  331. ads->ds_ctl0 =
  332. (ads->ds_ctl0 & ~(AR_RTSEnable | AR_CTSEnable));
  333. }
  334. ads->ds_ctl2 = set11nTries(series, 0)
  335. | set11nTries(series, 1)
  336. | set11nTries(series, 2)
  337. | set11nTries(series, 3)
  338. | (durUpdateEn ? AR_DurUpdateEna : 0)
  339. | SM(0, AR_BurstDur);
  340. ads->ds_ctl3 = set11nRate(series, 0)
  341. | set11nRate(series, 1)
  342. | set11nRate(series, 2)
  343. | set11nRate(series, 3);
  344. ads->ds_ctl4 = set11nPktDurRTSCTS(series, 0)
  345. | set11nPktDurRTSCTS(series, 1);
  346. ads->ds_ctl5 = set11nPktDurRTSCTS(series, 2)
  347. | set11nPktDurRTSCTS(series, 3);
  348. ads->ds_ctl7 = set11nRateFlags(series, 0)
  349. | set11nRateFlags(series, 1)
  350. | set11nRateFlags(series, 2)
  351. | set11nRateFlags(series, 3)
  352. | SM(rtsctsRate, AR_RTSCTSRate);
  353. last_ads->ds_ctl2 = ads->ds_ctl2;
  354. last_ads->ds_ctl3 = ads->ds_ctl3;
  355. }
  356. EXPORT_SYMBOL(ath9k_hw_set11n_ratescenario);
  357. void ath9k_hw_set11n_aggr_first(struct ath_hw *ah, struct ath_desc *ds,
  358. u32 aggrLen)
  359. {
  360. struct ar5416_desc *ads = AR5416DESC(ds);
  361. ads->ds_ctl1 |= (AR_IsAggr | AR_MoreAggr);
  362. ads->ds_ctl6 &= ~AR_AggrLen;
  363. ads->ds_ctl6 |= SM(aggrLen, AR_AggrLen);
  364. }
  365. EXPORT_SYMBOL(ath9k_hw_set11n_aggr_first);
  366. void ath9k_hw_set11n_aggr_middle(struct ath_hw *ah, struct ath_desc *ds,
  367. u32 numDelims)
  368. {
  369. struct ar5416_desc *ads = AR5416DESC(ds);
  370. unsigned int ctl6;
  371. ads->ds_ctl1 |= (AR_IsAggr | AR_MoreAggr);
  372. ctl6 = ads->ds_ctl6;
  373. ctl6 &= ~AR_PadDelim;
  374. ctl6 |= SM(numDelims, AR_PadDelim);
  375. ads->ds_ctl6 = ctl6;
  376. }
  377. EXPORT_SYMBOL(ath9k_hw_set11n_aggr_middle);
  378. void ath9k_hw_set11n_aggr_last(struct ath_hw *ah, struct ath_desc *ds)
  379. {
  380. struct ar5416_desc *ads = AR5416DESC(ds);
  381. ads->ds_ctl1 |= AR_IsAggr;
  382. ads->ds_ctl1 &= ~AR_MoreAggr;
  383. ads->ds_ctl6 &= ~AR_PadDelim;
  384. }
  385. EXPORT_SYMBOL(ath9k_hw_set11n_aggr_last);
  386. void ath9k_hw_clr11n_aggr(struct ath_hw *ah, struct ath_desc *ds)
  387. {
  388. struct ar5416_desc *ads = AR5416DESC(ds);
  389. ads->ds_ctl1 &= (~AR_IsAggr & ~AR_MoreAggr);
  390. }
  391. EXPORT_SYMBOL(ath9k_hw_clr11n_aggr);
  392. void ath9k_hw_set11n_burstduration(struct ath_hw *ah, struct ath_desc *ds,
  393. u32 burstDuration)
  394. {
  395. struct ar5416_desc *ads = AR5416DESC(ds);
  396. ads->ds_ctl2 &= ~AR_BurstDur;
  397. ads->ds_ctl2 |= SM(burstDuration, AR_BurstDur);
  398. }
  399. EXPORT_SYMBOL(ath9k_hw_set11n_burstduration);
  400. void ath9k_hw_set11n_virtualmorefrag(struct ath_hw *ah, struct ath_desc *ds,
  401. u32 vmf)
  402. {
  403. struct ar5416_desc *ads = AR5416DESC(ds);
  404. if (vmf)
  405. ads->ds_ctl0 |= AR_VirtMoreFrag;
  406. else
  407. ads->ds_ctl0 &= ~AR_VirtMoreFrag;
  408. }
  409. void ath9k_hw_gettxintrtxqs(struct ath_hw *ah, u32 *txqs)
  410. {
  411. *txqs &= ah->intr_txqs;
  412. ah->intr_txqs &= ~(*txqs);
  413. }
  414. EXPORT_SYMBOL(ath9k_hw_gettxintrtxqs);
  415. bool ath9k_hw_set_txq_props(struct ath_hw *ah, int q,
  416. const struct ath9k_tx_queue_info *qinfo)
  417. {
  418. u32 cw;
  419. struct ath_common *common = ath9k_hw_common(ah);
  420. struct ath9k_hw_capabilities *pCap = &ah->caps;
  421. struct ath9k_tx_queue_info *qi;
  422. if (q >= pCap->total_queues) {
  423. ath_print(common, ATH_DBG_QUEUE, "Set TXQ properties, "
  424. "invalid queue: %u\n", q);
  425. return false;
  426. }
  427. qi = &ah->txq[q];
  428. if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
  429. ath_print(common, ATH_DBG_QUEUE, "Set TXQ properties, "
  430. "inactive queue: %u\n", q);
  431. return false;
  432. }
  433. ath_print(common, ATH_DBG_QUEUE, "Set queue properties for: %u\n", q);
  434. qi->tqi_ver = qinfo->tqi_ver;
  435. qi->tqi_subtype = qinfo->tqi_subtype;
  436. qi->tqi_qflags = qinfo->tqi_qflags;
  437. qi->tqi_priority = qinfo->tqi_priority;
  438. if (qinfo->tqi_aifs != ATH9K_TXQ_USEDEFAULT)
  439. qi->tqi_aifs = min(qinfo->tqi_aifs, 255U);
  440. else
  441. qi->tqi_aifs = INIT_AIFS;
  442. if (qinfo->tqi_cwmin != ATH9K_TXQ_USEDEFAULT) {
  443. cw = min(qinfo->tqi_cwmin, 1024U);
  444. qi->tqi_cwmin = 1;
  445. while (qi->tqi_cwmin < cw)
  446. qi->tqi_cwmin = (qi->tqi_cwmin << 1) | 1;
  447. } else
  448. qi->tqi_cwmin = qinfo->tqi_cwmin;
  449. if (qinfo->tqi_cwmax != ATH9K_TXQ_USEDEFAULT) {
  450. cw = min(qinfo->tqi_cwmax, 1024U);
  451. qi->tqi_cwmax = 1;
  452. while (qi->tqi_cwmax < cw)
  453. qi->tqi_cwmax = (qi->tqi_cwmax << 1) | 1;
  454. } else
  455. qi->tqi_cwmax = INIT_CWMAX;
  456. if (qinfo->tqi_shretry != 0)
  457. qi->tqi_shretry = min((u32) qinfo->tqi_shretry, 15U);
  458. else
  459. qi->tqi_shretry = INIT_SH_RETRY;
  460. if (qinfo->tqi_lgretry != 0)
  461. qi->tqi_lgretry = min((u32) qinfo->tqi_lgretry, 15U);
  462. else
  463. qi->tqi_lgretry = INIT_LG_RETRY;
  464. qi->tqi_cbrPeriod = qinfo->tqi_cbrPeriod;
  465. qi->tqi_cbrOverflowLimit = qinfo->tqi_cbrOverflowLimit;
  466. qi->tqi_burstTime = qinfo->tqi_burstTime;
  467. qi->tqi_readyTime = qinfo->tqi_readyTime;
  468. switch (qinfo->tqi_subtype) {
  469. case ATH9K_WME_UPSD:
  470. if (qi->tqi_type == ATH9K_TX_QUEUE_DATA)
  471. qi->tqi_intFlags = ATH9K_TXQ_USE_LOCKOUT_BKOFF_DIS;
  472. break;
  473. default:
  474. break;
  475. }
  476. return true;
  477. }
  478. EXPORT_SYMBOL(ath9k_hw_set_txq_props);
  479. bool ath9k_hw_get_txq_props(struct ath_hw *ah, int q,
  480. struct ath9k_tx_queue_info *qinfo)
  481. {
  482. struct ath_common *common = ath9k_hw_common(ah);
  483. struct ath9k_hw_capabilities *pCap = &ah->caps;
  484. struct ath9k_tx_queue_info *qi;
  485. if (q >= pCap->total_queues) {
  486. ath_print(common, ATH_DBG_QUEUE, "Get TXQ properties, "
  487. "invalid queue: %u\n", q);
  488. return false;
  489. }
  490. qi = &ah->txq[q];
  491. if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
  492. ath_print(common, ATH_DBG_QUEUE, "Get TXQ properties, "
  493. "inactive queue: %u\n", q);
  494. return false;
  495. }
  496. qinfo->tqi_qflags = qi->tqi_qflags;
  497. qinfo->tqi_ver = qi->tqi_ver;
  498. qinfo->tqi_subtype = qi->tqi_subtype;
  499. qinfo->tqi_qflags = qi->tqi_qflags;
  500. qinfo->tqi_priority = qi->tqi_priority;
  501. qinfo->tqi_aifs = qi->tqi_aifs;
  502. qinfo->tqi_cwmin = qi->tqi_cwmin;
  503. qinfo->tqi_cwmax = qi->tqi_cwmax;
  504. qinfo->tqi_shretry = qi->tqi_shretry;
  505. qinfo->tqi_lgretry = qi->tqi_lgretry;
  506. qinfo->tqi_cbrPeriod = qi->tqi_cbrPeriod;
  507. qinfo->tqi_cbrOverflowLimit = qi->tqi_cbrOverflowLimit;
  508. qinfo->tqi_burstTime = qi->tqi_burstTime;
  509. qinfo->tqi_readyTime = qi->tqi_readyTime;
  510. return true;
  511. }
  512. EXPORT_SYMBOL(ath9k_hw_get_txq_props);
  513. int ath9k_hw_setuptxqueue(struct ath_hw *ah, enum ath9k_tx_queue type,
  514. const struct ath9k_tx_queue_info *qinfo)
  515. {
  516. struct ath_common *common = ath9k_hw_common(ah);
  517. struct ath9k_tx_queue_info *qi;
  518. struct ath9k_hw_capabilities *pCap = &ah->caps;
  519. int q;
  520. switch (type) {
  521. case ATH9K_TX_QUEUE_BEACON:
  522. q = pCap->total_queues - 1;
  523. break;
  524. case ATH9K_TX_QUEUE_CAB:
  525. q = pCap->total_queues - 2;
  526. break;
  527. case ATH9K_TX_QUEUE_PSPOLL:
  528. q = 1;
  529. break;
  530. case ATH9K_TX_QUEUE_UAPSD:
  531. q = pCap->total_queues - 3;
  532. break;
  533. case ATH9K_TX_QUEUE_DATA:
  534. for (q = 0; q < pCap->total_queues; q++)
  535. if (ah->txq[q].tqi_type ==
  536. ATH9K_TX_QUEUE_INACTIVE)
  537. break;
  538. if (q == pCap->total_queues) {
  539. ath_print(common, ATH_DBG_FATAL,
  540. "No available TX queue\n");
  541. return -1;
  542. }
  543. break;
  544. default:
  545. ath_print(common, ATH_DBG_FATAL,
  546. "Invalid TX queue type: %u\n", type);
  547. return -1;
  548. }
  549. ath_print(common, ATH_DBG_QUEUE, "Setup TX queue: %u\n", q);
  550. qi = &ah->txq[q];
  551. if (qi->tqi_type != ATH9K_TX_QUEUE_INACTIVE) {
  552. ath_print(common, ATH_DBG_FATAL,
  553. "TX queue: %u already active\n", q);
  554. return -1;
  555. }
  556. memset(qi, 0, sizeof(struct ath9k_tx_queue_info));
  557. qi->tqi_type = type;
  558. if (qinfo == NULL) {
  559. qi->tqi_qflags =
  560. TXQ_FLAG_TXOKINT_ENABLE
  561. | TXQ_FLAG_TXERRINT_ENABLE
  562. | TXQ_FLAG_TXDESCINT_ENABLE | TXQ_FLAG_TXURNINT_ENABLE;
  563. qi->tqi_aifs = INIT_AIFS;
  564. qi->tqi_cwmin = ATH9K_TXQ_USEDEFAULT;
  565. qi->tqi_cwmax = INIT_CWMAX;
  566. qi->tqi_shretry = INIT_SH_RETRY;
  567. qi->tqi_lgretry = INIT_LG_RETRY;
  568. qi->tqi_physCompBuf = 0;
  569. } else {
  570. qi->tqi_physCompBuf = qinfo->tqi_physCompBuf;
  571. (void) ath9k_hw_set_txq_props(ah, q, qinfo);
  572. }
  573. return q;
  574. }
  575. EXPORT_SYMBOL(ath9k_hw_setuptxqueue);
  576. bool ath9k_hw_releasetxqueue(struct ath_hw *ah, u32 q)
  577. {
  578. struct ath9k_hw_capabilities *pCap = &ah->caps;
  579. struct ath_common *common = ath9k_hw_common(ah);
  580. struct ath9k_tx_queue_info *qi;
  581. if (q >= pCap->total_queues) {
  582. ath_print(common, ATH_DBG_QUEUE, "Release TXQ, "
  583. "invalid queue: %u\n", q);
  584. return false;
  585. }
  586. qi = &ah->txq[q];
  587. if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
  588. ath_print(common, ATH_DBG_QUEUE, "Release TXQ, "
  589. "inactive queue: %u\n", q);
  590. return false;
  591. }
  592. ath_print(common, ATH_DBG_QUEUE, "Release TX queue: %u\n", q);
  593. qi->tqi_type = ATH9K_TX_QUEUE_INACTIVE;
  594. ah->txok_interrupt_mask &= ~(1 << q);
  595. ah->txerr_interrupt_mask &= ~(1 << q);
  596. ah->txdesc_interrupt_mask &= ~(1 << q);
  597. ah->txeol_interrupt_mask &= ~(1 << q);
  598. ah->txurn_interrupt_mask &= ~(1 << q);
  599. ath9k_hw_set_txq_interrupts(ah, qi);
  600. return true;
  601. }
  602. EXPORT_SYMBOL(ath9k_hw_releasetxqueue);
  603. bool ath9k_hw_resettxqueue(struct ath_hw *ah, u32 q)
  604. {
  605. struct ath9k_hw_capabilities *pCap = &ah->caps;
  606. struct ath_common *common = ath9k_hw_common(ah);
  607. struct ath9k_channel *chan = ah->curchan;
  608. struct ath9k_tx_queue_info *qi;
  609. u32 cwMin, chanCwMin, value;
  610. if (q >= pCap->total_queues) {
  611. ath_print(common, ATH_DBG_QUEUE, "Reset TXQ, "
  612. "invalid queue: %u\n", q);
  613. return false;
  614. }
  615. qi = &ah->txq[q];
  616. if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
  617. ath_print(common, ATH_DBG_QUEUE, "Reset TXQ, "
  618. "inactive queue: %u\n", q);
  619. return true;
  620. }
  621. ath_print(common, ATH_DBG_QUEUE, "Reset TX queue: %u\n", q);
  622. if (qi->tqi_cwmin == ATH9K_TXQ_USEDEFAULT) {
  623. if (chan && IS_CHAN_B(chan))
  624. chanCwMin = INIT_CWMIN_11B;
  625. else
  626. chanCwMin = INIT_CWMIN;
  627. for (cwMin = 1; cwMin < chanCwMin; cwMin = (cwMin << 1) | 1);
  628. } else
  629. cwMin = qi->tqi_cwmin;
  630. REG_WRITE(ah, AR_DLCL_IFS(q),
  631. SM(cwMin, AR_D_LCL_IFS_CWMIN) |
  632. SM(qi->tqi_cwmax, AR_D_LCL_IFS_CWMAX) |
  633. SM(qi->tqi_aifs, AR_D_LCL_IFS_AIFS));
  634. REG_WRITE(ah, AR_DRETRY_LIMIT(q),
  635. SM(INIT_SSH_RETRY, AR_D_RETRY_LIMIT_STA_SH) |
  636. SM(INIT_SLG_RETRY, AR_D_RETRY_LIMIT_STA_LG) |
  637. SM(qi->tqi_shretry, AR_D_RETRY_LIMIT_FR_SH));
  638. REG_WRITE(ah, AR_QMISC(q), AR_Q_MISC_DCU_EARLY_TERM_REQ);
  639. REG_WRITE(ah, AR_DMISC(q),
  640. AR_D_MISC_CW_BKOFF_EN | AR_D_MISC_FRAG_WAIT_EN | 0x2);
  641. if (qi->tqi_cbrPeriod) {
  642. REG_WRITE(ah, AR_QCBRCFG(q),
  643. SM(qi->tqi_cbrPeriod, AR_Q_CBRCFG_INTERVAL) |
  644. SM(qi->tqi_cbrOverflowLimit, AR_Q_CBRCFG_OVF_THRESH));
  645. REG_WRITE(ah, AR_QMISC(q),
  646. REG_READ(ah, AR_QMISC(q)) | AR_Q_MISC_FSP_CBR |
  647. (qi->tqi_cbrOverflowLimit ?
  648. AR_Q_MISC_CBR_EXP_CNTR_LIMIT_EN : 0));
  649. }
  650. if (qi->tqi_readyTime && (qi->tqi_type != ATH9K_TX_QUEUE_CAB)) {
  651. REG_WRITE(ah, AR_QRDYTIMECFG(q),
  652. SM(qi->tqi_readyTime, AR_Q_RDYTIMECFG_DURATION) |
  653. AR_Q_RDYTIMECFG_EN);
  654. }
  655. REG_WRITE(ah, AR_DCHNTIME(q),
  656. SM(qi->tqi_burstTime, AR_D_CHNTIME_DUR) |
  657. (qi->tqi_burstTime ? AR_D_CHNTIME_EN : 0));
  658. if (qi->tqi_burstTime
  659. && (qi->tqi_qflags & TXQ_FLAG_RDYTIME_EXP_POLICY_ENABLE)) {
  660. REG_WRITE(ah, AR_QMISC(q),
  661. REG_READ(ah, AR_QMISC(q)) |
  662. AR_Q_MISC_RDYTIME_EXP_POLICY);
  663. }
  664. if (qi->tqi_qflags & TXQ_FLAG_BACKOFF_DISABLE) {
  665. REG_WRITE(ah, AR_DMISC(q),
  666. REG_READ(ah, AR_DMISC(q)) |
  667. AR_D_MISC_POST_FR_BKOFF_DIS);
  668. }
  669. if (qi->tqi_qflags & TXQ_FLAG_FRAG_BURST_BACKOFF_ENABLE) {
  670. REG_WRITE(ah, AR_DMISC(q),
  671. REG_READ(ah, AR_DMISC(q)) |
  672. AR_D_MISC_FRAG_BKOFF_EN);
  673. }
  674. switch (qi->tqi_type) {
  675. case ATH9K_TX_QUEUE_BEACON:
  676. REG_WRITE(ah, AR_QMISC(q), REG_READ(ah, AR_QMISC(q))
  677. | AR_Q_MISC_FSP_DBA_GATED
  678. | AR_Q_MISC_BEACON_USE
  679. | AR_Q_MISC_CBR_INCR_DIS1);
  680. REG_WRITE(ah, AR_DMISC(q), REG_READ(ah, AR_DMISC(q))
  681. | (AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL <<
  682. AR_D_MISC_ARB_LOCKOUT_CNTRL_S)
  683. | AR_D_MISC_BEACON_USE
  684. | AR_D_MISC_POST_FR_BKOFF_DIS);
  685. break;
  686. case ATH9K_TX_QUEUE_CAB:
  687. REG_WRITE(ah, AR_QMISC(q), REG_READ(ah, AR_QMISC(q))
  688. | AR_Q_MISC_FSP_DBA_GATED
  689. | AR_Q_MISC_CBR_INCR_DIS1
  690. | AR_Q_MISC_CBR_INCR_DIS0);
  691. value = (qi->tqi_readyTime -
  692. (ah->config.sw_beacon_response_time -
  693. ah->config.dma_beacon_response_time) -
  694. ah->config.additional_swba_backoff) * 1024;
  695. REG_WRITE(ah, AR_QRDYTIMECFG(q),
  696. value | AR_Q_RDYTIMECFG_EN);
  697. REG_WRITE(ah, AR_DMISC(q), REG_READ(ah, AR_DMISC(q))
  698. | (AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL <<
  699. AR_D_MISC_ARB_LOCKOUT_CNTRL_S));
  700. break;
  701. case ATH9K_TX_QUEUE_PSPOLL:
  702. REG_WRITE(ah, AR_QMISC(q),
  703. REG_READ(ah, AR_QMISC(q)) | AR_Q_MISC_CBR_INCR_DIS1);
  704. break;
  705. case ATH9K_TX_QUEUE_UAPSD:
  706. REG_WRITE(ah, AR_DMISC(q), REG_READ(ah, AR_DMISC(q)) |
  707. AR_D_MISC_POST_FR_BKOFF_DIS);
  708. break;
  709. default:
  710. break;
  711. }
  712. if (qi->tqi_intFlags & ATH9K_TXQ_USE_LOCKOUT_BKOFF_DIS) {
  713. REG_WRITE(ah, AR_DMISC(q),
  714. REG_READ(ah, AR_DMISC(q)) |
  715. SM(AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL,
  716. AR_D_MISC_ARB_LOCKOUT_CNTRL) |
  717. AR_D_MISC_POST_FR_BKOFF_DIS);
  718. }
  719. if (qi->tqi_qflags & TXQ_FLAG_TXOKINT_ENABLE)
  720. ah->txok_interrupt_mask |= 1 << q;
  721. else
  722. ah->txok_interrupt_mask &= ~(1 << q);
  723. if (qi->tqi_qflags & TXQ_FLAG_TXERRINT_ENABLE)
  724. ah->txerr_interrupt_mask |= 1 << q;
  725. else
  726. ah->txerr_interrupt_mask &= ~(1 << q);
  727. if (qi->tqi_qflags & TXQ_FLAG_TXDESCINT_ENABLE)
  728. ah->txdesc_interrupt_mask |= 1 << q;
  729. else
  730. ah->txdesc_interrupt_mask &= ~(1 << q);
  731. if (qi->tqi_qflags & TXQ_FLAG_TXEOLINT_ENABLE)
  732. ah->txeol_interrupt_mask |= 1 << q;
  733. else
  734. ah->txeol_interrupt_mask &= ~(1 << q);
  735. if (qi->tqi_qflags & TXQ_FLAG_TXURNINT_ENABLE)
  736. ah->txurn_interrupt_mask |= 1 << q;
  737. else
  738. ah->txurn_interrupt_mask &= ~(1 << q);
  739. ath9k_hw_set_txq_interrupts(ah, qi);
  740. return true;
  741. }
  742. EXPORT_SYMBOL(ath9k_hw_resettxqueue);
  743. int ath9k_hw_rxprocdesc(struct ath_hw *ah, struct ath_desc *ds,
  744. u32 pa, struct ath_desc *nds, u64 tsf)
  745. {
  746. struct ar5416_desc ads;
  747. struct ar5416_desc *adsp = AR5416DESC(ds);
  748. u32 phyerr;
  749. if ((adsp->ds_rxstatus8 & AR_RxDone) == 0)
  750. return -EINPROGRESS;
  751. ads.u.rx = adsp->u.rx;
  752. ds->ds_rxstat.rs_status = 0;
  753. ds->ds_rxstat.rs_flags = 0;
  754. ds->ds_rxstat.rs_datalen = ads.ds_rxstatus1 & AR_DataLen;
  755. ds->ds_rxstat.rs_tstamp = ads.AR_RcvTimestamp;
  756. if (ads.ds_rxstatus8 & AR_PostDelimCRCErr) {
  757. ds->ds_rxstat.rs_rssi = ATH9K_RSSI_BAD;
  758. ds->ds_rxstat.rs_rssi_ctl0 = ATH9K_RSSI_BAD;
  759. ds->ds_rxstat.rs_rssi_ctl1 = ATH9K_RSSI_BAD;
  760. ds->ds_rxstat.rs_rssi_ctl2 = ATH9K_RSSI_BAD;
  761. ds->ds_rxstat.rs_rssi_ext0 = ATH9K_RSSI_BAD;
  762. ds->ds_rxstat.rs_rssi_ext1 = ATH9K_RSSI_BAD;
  763. ds->ds_rxstat.rs_rssi_ext2 = ATH9K_RSSI_BAD;
  764. } else {
  765. ds->ds_rxstat.rs_rssi = MS(ads.ds_rxstatus4, AR_RxRSSICombined);
  766. ds->ds_rxstat.rs_rssi_ctl0 = MS(ads.ds_rxstatus0,
  767. AR_RxRSSIAnt00);
  768. ds->ds_rxstat.rs_rssi_ctl1 = MS(ads.ds_rxstatus0,
  769. AR_RxRSSIAnt01);
  770. ds->ds_rxstat.rs_rssi_ctl2 = MS(ads.ds_rxstatus0,
  771. AR_RxRSSIAnt02);
  772. ds->ds_rxstat.rs_rssi_ext0 = MS(ads.ds_rxstatus4,
  773. AR_RxRSSIAnt10);
  774. ds->ds_rxstat.rs_rssi_ext1 = MS(ads.ds_rxstatus4,
  775. AR_RxRSSIAnt11);
  776. ds->ds_rxstat.rs_rssi_ext2 = MS(ads.ds_rxstatus4,
  777. AR_RxRSSIAnt12);
  778. }
  779. if (ads.ds_rxstatus8 & AR_RxKeyIdxValid)
  780. ds->ds_rxstat.rs_keyix = MS(ads.ds_rxstatus8, AR_KeyIdx);
  781. else
  782. ds->ds_rxstat.rs_keyix = ATH9K_RXKEYIX_INVALID;
  783. ds->ds_rxstat.rs_rate = RXSTATUS_RATE(ah, (&ads));
  784. ds->ds_rxstat.rs_more = (ads.ds_rxstatus1 & AR_RxMore) ? 1 : 0;
  785. ds->ds_rxstat.rs_isaggr = (ads.ds_rxstatus8 & AR_RxAggr) ? 1 : 0;
  786. ds->ds_rxstat.rs_moreaggr =
  787. (ads.ds_rxstatus8 & AR_RxMoreAggr) ? 1 : 0;
  788. ds->ds_rxstat.rs_antenna = MS(ads.ds_rxstatus3, AR_RxAntenna);
  789. ds->ds_rxstat.rs_flags =
  790. (ads.ds_rxstatus3 & AR_GI) ? ATH9K_RX_GI : 0;
  791. ds->ds_rxstat.rs_flags |=
  792. (ads.ds_rxstatus3 & AR_2040) ? ATH9K_RX_2040 : 0;
  793. if (ads.ds_rxstatus8 & AR_PreDelimCRCErr)
  794. ds->ds_rxstat.rs_flags |= ATH9K_RX_DELIM_CRC_PRE;
  795. if (ads.ds_rxstatus8 & AR_PostDelimCRCErr)
  796. ds->ds_rxstat.rs_flags |= ATH9K_RX_DELIM_CRC_POST;
  797. if (ads.ds_rxstatus8 & AR_DecryptBusyErr)
  798. ds->ds_rxstat.rs_flags |= ATH9K_RX_DECRYPT_BUSY;
  799. if ((ads.ds_rxstatus8 & AR_RxFrameOK) == 0) {
  800. if (ads.ds_rxstatus8 & AR_CRCErr)
  801. ds->ds_rxstat.rs_status |= ATH9K_RXERR_CRC;
  802. else if (ads.ds_rxstatus8 & AR_PHYErr) {
  803. ds->ds_rxstat.rs_status |= ATH9K_RXERR_PHY;
  804. phyerr = MS(ads.ds_rxstatus8, AR_PHYErrCode);
  805. ds->ds_rxstat.rs_phyerr = phyerr;
  806. } else if (ads.ds_rxstatus8 & AR_DecryptCRCErr)
  807. ds->ds_rxstat.rs_status |= ATH9K_RXERR_DECRYPT;
  808. else if (ads.ds_rxstatus8 & AR_MichaelErr)
  809. ds->ds_rxstat.rs_status |= ATH9K_RXERR_MIC;
  810. }
  811. return 0;
  812. }
  813. EXPORT_SYMBOL(ath9k_hw_rxprocdesc);
  814. void ath9k_hw_setuprxdesc(struct ath_hw *ah, struct ath_desc *ds,
  815. u32 size, u32 flags)
  816. {
  817. struct ar5416_desc *ads = AR5416DESC(ds);
  818. struct ath9k_hw_capabilities *pCap = &ah->caps;
  819. ads->ds_ctl1 = size & AR_BufLen;
  820. if (flags & ATH9K_RXDESC_INTREQ)
  821. ads->ds_ctl1 |= AR_RxIntrReq;
  822. ads->ds_rxstatus8 &= ~AR_RxDone;
  823. if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP))
  824. memset(&(ads->u), 0, sizeof(ads->u));
  825. }
  826. EXPORT_SYMBOL(ath9k_hw_setuprxdesc);
  827. /*
  828. * This can stop or re-enables RX.
  829. *
  830. * If bool is set this will kill any frame which is currently being
  831. * transferred between the MAC and baseband and also prevent any new
  832. * frames from getting started.
  833. */
  834. bool ath9k_hw_setrxabort(struct ath_hw *ah, bool set)
  835. {
  836. u32 reg;
  837. if (set) {
  838. REG_SET_BIT(ah, AR_DIAG_SW,
  839. (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT));
  840. if (!ath9k_hw_wait(ah, AR_OBS_BUS_1, AR_OBS_BUS_1_RX_STATE,
  841. 0, AH_WAIT_TIMEOUT)) {
  842. REG_CLR_BIT(ah, AR_DIAG_SW,
  843. (AR_DIAG_RX_DIS |
  844. AR_DIAG_RX_ABORT));
  845. reg = REG_READ(ah, AR_OBS_BUS_1);
  846. ath_print(ath9k_hw_common(ah), ATH_DBG_FATAL,
  847. "RX failed to go idle in 10 ms RXSM=0x%x\n",
  848. reg);
  849. return false;
  850. }
  851. } else {
  852. REG_CLR_BIT(ah, AR_DIAG_SW,
  853. (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT));
  854. }
  855. return true;
  856. }
  857. EXPORT_SYMBOL(ath9k_hw_setrxabort);
  858. void ath9k_hw_putrxbuf(struct ath_hw *ah, u32 rxdp)
  859. {
  860. REG_WRITE(ah, AR_RXDP, rxdp);
  861. }
  862. EXPORT_SYMBOL(ath9k_hw_putrxbuf);
  863. void ath9k_hw_rxena(struct ath_hw *ah)
  864. {
  865. REG_WRITE(ah, AR_CR, AR_CR_RXE);
  866. }
  867. EXPORT_SYMBOL(ath9k_hw_rxena);
  868. void ath9k_hw_startpcureceive(struct ath_hw *ah)
  869. {
  870. ath9k_enable_mib_counters(ah);
  871. ath9k_ani_reset(ah);
  872. REG_CLR_BIT(ah, AR_DIAG_SW, (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT));
  873. }
  874. EXPORT_SYMBOL(ath9k_hw_startpcureceive);
  875. void ath9k_hw_stoppcurecv(struct ath_hw *ah)
  876. {
  877. REG_SET_BIT(ah, AR_DIAG_SW, AR_DIAG_RX_DIS);
  878. ath9k_hw_disable_mib_counters(ah);
  879. }
  880. EXPORT_SYMBOL(ath9k_hw_stoppcurecv);
  881. bool ath9k_hw_stopdmarecv(struct ath_hw *ah)
  882. {
  883. #define AH_RX_STOP_DMA_TIMEOUT 10000 /* usec */
  884. #define AH_RX_TIME_QUANTUM 100 /* usec */
  885. struct ath_common *common = ath9k_hw_common(ah);
  886. int i;
  887. REG_WRITE(ah, AR_CR, AR_CR_RXD);
  888. /* Wait for rx enable bit to go low */
  889. for (i = AH_RX_STOP_DMA_TIMEOUT / AH_TIME_QUANTUM; i != 0; i--) {
  890. if ((REG_READ(ah, AR_CR) & AR_CR_RXE) == 0)
  891. break;
  892. udelay(AH_TIME_QUANTUM);
  893. }
  894. if (i == 0) {
  895. ath_print(common, ATH_DBG_FATAL,
  896. "DMA failed to stop in %d ms "
  897. "AR_CR=0x%08x AR_DIAG_SW=0x%08x\n",
  898. AH_RX_STOP_DMA_TIMEOUT / 1000,
  899. REG_READ(ah, AR_CR),
  900. REG_READ(ah, AR_DIAG_SW));
  901. return false;
  902. } else {
  903. return true;
  904. }
  905. #undef AH_RX_TIME_QUANTUM
  906. #undef AH_RX_STOP_DMA_TIMEOUT
  907. }
  908. EXPORT_SYMBOL(ath9k_hw_stopdmarecv);
  909. int ath9k_hw_beaconq_setup(struct ath_hw *ah)
  910. {
  911. struct ath9k_tx_queue_info qi;
  912. memset(&qi, 0, sizeof(qi));
  913. qi.tqi_aifs = 1;
  914. qi.tqi_cwmin = 0;
  915. qi.tqi_cwmax = 0;
  916. /* NB: don't enable any interrupts */
  917. return ath9k_hw_setuptxqueue(ah, ATH9K_TX_QUEUE_BEACON, &qi);
  918. }
  919. EXPORT_SYMBOL(ath9k_hw_beaconq_setup);