mac.c 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920
  1. /*
  2. * Copyright (c) 2008-2011 Atheros Communications Inc.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for any
  5. * purpose with or without fee is hereby granted, provided that the above
  6. * copyright notice and this permission notice appear in all copies.
  7. *
  8. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  9. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  10. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  11. * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  12. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  13. * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  14. * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  15. */
  16. #include "hw.h"
  17. #include "hw-ops.h"
  18. static void ath9k_hw_set_txq_interrupts(struct ath_hw *ah,
  19. struct ath9k_tx_queue_info *qi)
  20. {
  21. ath_dbg(ath9k_hw_common(ah), ATH_DBG_INTERRUPT,
  22. "tx ok 0x%x err 0x%x desc 0x%x eol 0x%x urn 0x%x\n",
  23. ah->txok_interrupt_mask, ah->txerr_interrupt_mask,
  24. ah->txdesc_interrupt_mask, ah->txeol_interrupt_mask,
  25. ah->txurn_interrupt_mask);
  26. ENABLE_REGWRITE_BUFFER(ah);
  27. REG_WRITE(ah, AR_IMR_S0,
  28. SM(ah->txok_interrupt_mask, AR_IMR_S0_QCU_TXOK)
  29. | SM(ah->txdesc_interrupt_mask, AR_IMR_S0_QCU_TXDESC));
  30. REG_WRITE(ah, AR_IMR_S1,
  31. SM(ah->txerr_interrupt_mask, AR_IMR_S1_QCU_TXERR)
  32. | SM(ah->txeol_interrupt_mask, AR_IMR_S1_QCU_TXEOL));
  33. ah->imrs2_reg &= ~AR_IMR_S2_QCU_TXURN;
  34. ah->imrs2_reg |= (ah->txurn_interrupt_mask & AR_IMR_S2_QCU_TXURN);
  35. REG_WRITE(ah, AR_IMR_S2, ah->imrs2_reg);
  36. REGWRITE_BUFFER_FLUSH(ah);
  37. }
  38. u32 ath9k_hw_gettxbuf(struct ath_hw *ah, u32 q)
  39. {
  40. return REG_READ(ah, AR_QTXDP(q));
  41. }
  42. EXPORT_SYMBOL(ath9k_hw_gettxbuf);
  43. void ath9k_hw_puttxbuf(struct ath_hw *ah, u32 q, u32 txdp)
  44. {
  45. REG_WRITE(ah, AR_QTXDP(q), txdp);
  46. }
  47. EXPORT_SYMBOL(ath9k_hw_puttxbuf);
  48. void ath9k_hw_txstart(struct ath_hw *ah, u32 q)
  49. {
  50. ath_dbg(ath9k_hw_common(ah), ATH_DBG_QUEUE,
  51. "Enable TXE on queue: %u\n", q);
  52. REG_WRITE(ah, AR_Q_TXE, 1 << q);
  53. }
  54. EXPORT_SYMBOL(ath9k_hw_txstart);
  55. u32 ath9k_hw_numtxpending(struct ath_hw *ah, u32 q)
  56. {
  57. u32 npend;
  58. npend = REG_READ(ah, AR_QSTS(q)) & AR_Q_STS_PEND_FR_CNT;
  59. if (npend == 0) {
  60. if (REG_READ(ah, AR_Q_TXE) & (1 << q))
  61. npend = 1;
  62. }
  63. return npend;
  64. }
  65. EXPORT_SYMBOL(ath9k_hw_numtxpending);
  66. /**
  67. * ath9k_hw_updatetxtriglevel - adjusts the frame trigger level
  68. *
  69. * @ah: atheros hardware struct
  70. * @bIncTrigLevel: whether or not the frame trigger level should be updated
  71. *
  72. * The frame trigger level specifies the minimum number of bytes,
  73. * in units of 64 bytes, that must be DMA'ed into the PCU TX FIFO
  74. * before the PCU will initiate sending the frame on the air. This can
  75. * mean we initiate transmit before a full frame is on the PCU TX FIFO.
  76. * Resets to 0x1 (meaning 64 bytes or a full frame, whichever occurs
  77. * first)
  78. *
  79. * Caution must be taken to ensure to set the frame trigger level based
  80. * on the DMA request size. For example if the DMA request size is set to
  81. * 128 bytes the trigger level cannot exceed 6 * 64 = 384. This is because
  82. * there need to be enough space in the tx FIFO for the requested transfer
  83. * size. Hence the tx FIFO will stop with 512 - 128 = 384 bytes. If we set
  84. * the threshold to a value beyond 6, then the transmit will hang.
  85. *
  86. * Current dual stream devices have a PCU TX FIFO size of 8 KB.
  87. * Current single stream devices have a PCU TX FIFO size of 4 KB, however,
  88. * there is a hardware issue which forces us to use 2 KB instead so the
  89. * frame trigger level must not exceed 2 KB for these chipsets.
  90. */
  91. bool ath9k_hw_updatetxtriglevel(struct ath_hw *ah, bool bIncTrigLevel)
  92. {
  93. u32 txcfg, curLevel, newLevel;
  94. if (ah->tx_trig_level >= ah->config.max_txtrig_level)
  95. return false;
  96. ath9k_hw_disable_interrupts(ah);
  97. txcfg = REG_READ(ah, AR_TXCFG);
  98. curLevel = MS(txcfg, AR_FTRIG);
  99. newLevel = curLevel;
  100. if (bIncTrigLevel) {
  101. if (curLevel < ah->config.max_txtrig_level)
  102. newLevel++;
  103. } else if (curLevel > MIN_TX_FIFO_THRESHOLD)
  104. newLevel--;
  105. if (newLevel != curLevel)
  106. REG_WRITE(ah, AR_TXCFG,
  107. (txcfg & ~AR_FTRIG) | SM(newLevel, AR_FTRIG));
  108. ath9k_hw_enable_interrupts(ah);
  109. ah->tx_trig_level = newLevel;
  110. return newLevel != curLevel;
  111. }
  112. EXPORT_SYMBOL(ath9k_hw_updatetxtriglevel);
  113. void ath9k_hw_abort_tx_dma(struct ath_hw *ah)
  114. {
  115. int i, q;
  116. REG_WRITE(ah, AR_Q_TXD, AR_Q_TXD_M);
  117. REG_SET_BIT(ah, AR_PCU_MISC, AR_PCU_FORCE_QUIET_COLL | AR_PCU_CLEAR_VMF);
  118. REG_SET_BIT(ah, AR_DIAG_SW, AR_DIAG_FORCE_CH_IDLE_HIGH);
  119. REG_SET_BIT(ah, AR_D_GBL_IFS_MISC, AR_D_GBL_IFS_MISC_IGNORE_BACKOFF);
  120. for (q = 0; q < AR_NUM_QCU; q++) {
  121. for (i = 0; i < 1000; i++) {
  122. if (i)
  123. udelay(5);
  124. if (!ath9k_hw_numtxpending(ah, q))
  125. break;
  126. }
  127. }
  128. REG_CLR_BIT(ah, AR_PCU_MISC, AR_PCU_FORCE_QUIET_COLL | AR_PCU_CLEAR_VMF);
  129. REG_CLR_BIT(ah, AR_DIAG_SW, AR_DIAG_FORCE_CH_IDLE_HIGH);
  130. REG_CLR_BIT(ah, AR_D_GBL_IFS_MISC, AR_D_GBL_IFS_MISC_IGNORE_BACKOFF);
  131. REG_WRITE(ah, AR_Q_TXD, 0);
  132. }
  133. EXPORT_SYMBOL(ath9k_hw_abort_tx_dma);
  134. bool ath9k_hw_stop_dma_queue(struct ath_hw *ah, u32 q)
  135. {
  136. #define ATH9K_TX_STOP_DMA_TIMEOUT 1000 /* usec */
  137. #define ATH9K_TIME_QUANTUM 100 /* usec */
  138. int wait_time = ATH9K_TX_STOP_DMA_TIMEOUT / ATH9K_TIME_QUANTUM;
  139. int wait;
  140. REG_WRITE(ah, AR_Q_TXD, 1 << q);
  141. for (wait = wait_time; wait != 0; wait--) {
  142. if (wait != wait_time)
  143. udelay(ATH9K_TIME_QUANTUM);
  144. if (ath9k_hw_numtxpending(ah, q) == 0)
  145. break;
  146. }
  147. REG_WRITE(ah, AR_Q_TXD, 0);
  148. return wait != 0;
  149. #undef ATH9K_TX_STOP_DMA_TIMEOUT
  150. #undef ATH9K_TIME_QUANTUM
  151. }
  152. EXPORT_SYMBOL(ath9k_hw_stop_dma_queue);
  153. void ath9k_hw_gettxintrtxqs(struct ath_hw *ah, u32 *txqs)
  154. {
  155. *txqs &= ah->intr_txqs;
  156. ah->intr_txqs &= ~(*txqs);
  157. }
  158. EXPORT_SYMBOL(ath9k_hw_gettxintrtxqs);
  159. bool ath9k_hw_set_txq_props(struct ath_hw *ah, int q,
  160. const struct ath9k_tx_queue_info *qinfo)
  161. {
  162. u32 cw;
  163. struct ath_common *common = ath9k_hw_common(ah);
  164. struct ath9k_tx_queue_info *qi;
  165. qi = &ah->txq[q];
  166. if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
  167. ath_dbg(common, ATH_DBG_QUEUE,
  168. "Set TXQ properties, inactive queue: %u\n", q);
  169. return false;
  170. }
  171. ath_dbg(common, ATH_DBG_QUEUE, "Set queue properties for: %u\n", q);
  172. qi->tqi_ver = qinfo->tqi_ver;
  173. qi->tqi_subtype = qinfo->tqi_subtype;
  174. qi->tqi_qflags = qinfo->tqi_qflags;
  175. qi->tqi_priority = qinfo->tqi_priority;
  176. if (qinfo->tqi_aifs != ATH9K_TXQ_USEDEFAULT)
  177. qi->tqi_aifs = min(qinfo->tqi_aifs, 255U);
  178. else
  179. qi->tqi_aifs = INIT_AIFS;
  180. if (qinfo->tqi_cwmin != ATH9K_TXQ_USEDEFAULT) {
  181. cw = min(qinfo->tqi_cwmin, 1024U);
  182. qi->tqi_cwmin = 1;
  183. while (qi->tqi_cwmin < cw)
  184. qi->tqi_cwmin = (qi->tqi_cwmin << 1) | 1;
  185. } else
  186. qi->tqi_cwmin = qinfo->tqi_cwmin;
  187. if (qinfo->tqi_cwmax != ATH9K_TXQ_USEDEFAULT) {
  188. cw = min(qinfo->tqi_cwmax, 1024U);
  189. qi->tqi_cwmax = 1;
  190. while (qi->tqi_cwmax < cw)
  191. qi->tqi_cwmax = (qi->tqi_cwmax << 1) | 1;
  192. } else
  193. qi->tqi_cwmax = INIT_CWMAX;
  194. if (qinfo->tqi_shretry != 0)
  195. qi->tqi_shretry = min((u32) qinfo->tqi_shretry, 15U);
  196. else
  197. qi->tqi_shretry = INIT_SH_RETRY;
  198. if (qinfo->tqi_lgretry != 0)
  199. qi->tqi_lgretry = min((u32) qinfo->tqi_lgretry, 15U);
  200. else
  201. qi->tqi_lgretry = INIT_LG_RETRY;
  202. qi->tqi_cbrPeriod = qinfo->tqi_cbrPeriod;
  203. qi->tqi_cbrOverflowLimit = qinfo->tqi_cbrOverflowLimit;
  204. qi->tqi_burstTime = qinfo->tqi_burstTime;
  205. qi->tqi_readyTime = qinfo->tqi_readyTime;
  206. switch (qinfo->tqi_subtype) {
  207. case ATH9K_WME_UPSD:
  208. if (qi->tqi_type == ATH9K_TX_QUEUE_DATA)
  209. qi->tqi_intFlags = ATH9K_TXQ_USE_LOCKOUT_BKOFF_DIS;
  210. break;
  211. default:
  212. break;
  213. }
  214. return true;
  215. }
  216. EXPORT_SYMBOL(ath9k_hw_set_txq_props);
  217. bool ath9k_hw_get_txq_props(struct ath_hw *ah, int q,
  218. struct ath9k_tx_queue_info *qinfo)
  219. {
  220. struct ath_common *common = ath9k_hw_common(ah);
  221. struct ath9k_tx_queue_info *qi;
  222. qi = &ah->txq[q];
  223. if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
  224. ath_dbg(common, ATH_DBG_QUEUE,
  225. "Get TXQ properties, inactive queue: %u\n", q);
  226. return false;
  227. }
  228. qinfo->tqi_qflags = qi->tqi_qflags;
  229. qinfo->tqi_ver = qi->tqi_ver;
  230. qinfo->tqi_subtype = qi->tqi_subtype;
  231. qinfo->tqi_qflags = qi->tqi_qflags;
  232. qinfo->tqi_priority = qi->tqi_priority;
  233. qinfo->tqi_aifs = qi->tqi_aifs;
  234. qinfo->tqi_cwmin = qi->tqi_cwmin;
  235. qinfo->tqi_cwmax = qi->tqi_cwmax;
  236. qinfo->tqi_shretry = qi->tqi_shretry;
  237. qinfo->tqi_lgretry = qi->tqi_lgretry;
  238. qinfo->tqi_cbrPeriod = qi->tqi_cbrPeriod;
  239. qinfo->tqi_cbrOverflowLimit = qi->tqi_cbrOverflowLimit;
  240. qinfo->tqi_burstTime = qi->tqi_burstTime;
  241. qinfo->tqi_readyTime = qi->tqi_readyTime;
  242. return true;
  243. }
  244. EXPORT_SYMBOL(ath9k_hw_get_txq_props);
  245. int ath9k_hw_setuptxqueue(struct ath_hw *ah, enum ath9k_tx_queue type,
  246. const struct ath9k_tx_queue_info *qinfo)
  247. {
  248. struct ath_common *common = ath9k_hw_common(ah);
  249. struct ath9k_tx_queue_info *qi;
  250. int q;
  251. switch (type) {
  252. case ATH9K_TX_QUEUE_BEACON:
  253. q = ATH9K_NUM_TX_QUEUES - 1;
  254. break;
  255. case ATH9K_TX_QUEUE_CAB:
  256. q = ATH9K_NUM_TX_QUEUES - 2;
  257. break;
  258. case ATH9K_TX_QUEUE_PSPOLL:
  259. q = 1;
  260. break;
  261. case ATH9K_TX_QUEUE_UAPSD:
  262. q = ATH9K_NUM_TX_QUEUES - 3;
  263. break;
  264. case ATH9K_TX_QUEUE_DATA:
  265. for (q = 0; q < ATH9K_NUM_TX_QUEUES; q++)
  266. if (ah->txq[q].tqi_type ==
  267. ATH9K_TX_QUEUE_INACTIVE)
  268. break;
  269. if (q == ATH9K_NUM_TX_QUEUES) {
  270. ath_err(common, "No available TX queue\n");
  271. return -1;
  272. }
  273. break;
  274. default:
  275. ath_err(common, "Invalid TX queue type: %u\n", type);
  276. return -1;
  277. }
  278. ath_dbg(common, ATH_DBG_QUEUE, "Setup TX queue: %u\n", q);
  279. qi = &ah->txq[q];
  280. if (qi->tqi_type != ATH9K_TX_QUEUE_INACTIVE) {
  281. ath_err(common, "TX queue: %u already active\n", q);
  282. return -1;
  283. }
  284. memset(qi, 0, sizeof(struct ath9k_tx_queue_info));
  285. qi->tqi_type = type;
  286. qi->tqi_physCompBuf = qinfo->tqi_physCompBuf;
  287. (void) ath9k_hw_set_txq_props(ah, q, qinfo);
  288. return q;
  289. }
  290. EXPORT_SYMBOL(ath9k_hw_setuptxqueue);
  291. bool ath9k_hw_releasetxqueue(struct ath_hw *ah, u32 q)
  292. {
  293. struct ath_common *common = ath9k_hw_common(ah);
  294. struct ath9k_tx_queue_info *qi;
  295. qi = &ah->txq[q];
  296. if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
  297. ath_dbg(common, ATH_DBG_QUEUE,
  298. "Release TXQ, inactive queue: %u\n", q);
  299. return false;
  300. }
  301. ath_dbg(common, ATH_DBG_QUEUE, "Release TX queue: %u\n", q);
  302. qi->tqi_type = ATH9K_TX_QUEUE_INACTIVE;
  303. ah->txok_interrupt_mask &= ~(1 << q);
  304. ah->txerr_interrupt_mask &= ~(1 << q);
  305. ah->txdesc_interrupt_mask &= ~(1 << q);
  306. ah->txeol_interrupt_mask &= ~(1 << q);
  307. ah->txurn_interrupt_mask &= ~(1 << q);
  308. ath9k_hw_set_txq_interrupts(ah, qi);
  309. return true;
  310. }
  311. EXPORT_SYMBOL(ath9k_hw_releasetxqueue);
  312. bool ath9k_hw_resettxqueue(struct ath_hw *ah, u32 q)
  313. {
  314. struct ath_common *common = ath9k_hw_common(ah);
  315. struct ath9k_channel *chan = ah->curchan;
  316. struct ath9k_tx_queue_info *qi;
  317. u32 cwMin, chanCwMin, value;
  318. qi = &ah->txq[q];
  319. if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
  320. ath_dbg(common, ATH_DBG_QUEUE,
  321. "Reset TXQ, inactive queue: %u\n", q);
  322. return true;
  323. }
  324. ath_dbg(common, ATH_DBG_QUEUE, "Reset TX queue: %u\n", q);
  325. if (qi->tqi_cwmin == ATH9K_TXQ_USEDEFAULT) {
  326. if (chan && IS_CHAN_B(chan))
  327. chanCwMin = INIT_CWMIN_11B;
  328. else
  329. chanCwMin = INIT_CWMIN;
  330. for (cwMin = 1; cwMin < chanCwMin; cwMin = (cwMin << 1) | 1);
  331. } else
  332. cwMin = qi->tqi_cwmin;
  333. ENABLE_REGWRITE_BUFFER(ah);
  334. REG_WRITE(ah, AR_DLCL_IFS(q),
  335. SM(cwMin, AR_D_LCL_IFS_CWMIN) |
  336. SM(qi->tqi_cwmax, AR_D_LCL_IFS_CWMAX) |
  337. SM(qi->tqi_aifs, AR_D_LCL_IFS_AIFS));
  338. REG_WRITE(ah, AR_DRETRY_LIMIT(q),
  339. SM(INIT_SSH_RETRY, AR_D_RETRY_LIMIT_STA_SH) |
  340. SM(INIT_SLG_RETRY, AR_D_RETRY_LIMIT_STA_LG) |
  341. SM(qi->tqi_shretry, AR_D_RETRY_LIMIT_FR_SH));
  342. REG_WRITE(ah, AR_QMISC(q), AR_Q_MISC_DCU_EARLY_TERM_REQ);
  343. if (AR_SREV_9340(ah))
  344. REG_WRITE(ah, AR_DMISC(q),
  345. AR_D_MISC_CW_BKOFF_EN | AR_D_MISC_FRAG_WAIT_EN | 0x1);
  346. else
  347. REG_WRITE(ah, AR_DMISC(q),
  348. AR_D_MISC_CW_BKOFF_EN | AR_D_MISC_FRAG_WAIT_EN | 0x2);
  349. if (qi->tqi_cbrPeriod) {
  350. REG_WRITE(ah, AR_QCBRCFG(q),
  351. SM(qi->tqi_cbrPeriod, AR_Q_CBRCFG_INTERVAL) |
  352. SM(qi->tqi_cbrOverflowLimit, AR_Q_CBRCFG_OVF_THRESH));
  353. REG_SET_BIT(ah, AR_QMISC(q), AR_Q_MISC_FSP_CBR |
  354. (qi->tqi_cbrOverflowLimit ?
  355. AR_Q_MISC_CBR_EXP_CNTR_LIMIT_EN : 0));
  356. }
  357. if (qi->tqi_readyTime && (qi->tqi_type != ATH9K_TX_QUEUE_CAB)) {
  358. REG_WRITE(ah, AR_QRDYTIMECFG(q),
  359. SM(qi->tqi_readyTime, AR_Q_RDYTIMECFG_DURATION) |
  360. AR_Q_RDYTIMECFG_EN);
  361. }
  362. REG_WRITE(ah, AR_DCHNTIME(q),
  363. SM(qi->tqi_burstTime, AR_D_CHNTIME_DUR) |
  364. (qi->tqi_burstTime ? AR_D_CHNTIME_EN : 0));
  365. if (qi->tqi_burstTime
  366. && (qi->tqi_qflags & TXQ_FLAG_RDYTIME_EXP_POLICY_ENABLE))
  367. REG_SET_BIT(ah, AR_QMISC(q), AR_Q_MISC_RDYTIME_EXP_POLICY);
  368. if (qi->tqi_qflags & TXQ_FLAG_BACKOFF_DISABLE)
  369. REG_SET_BIT(ah, AR_DMISC(q), AR_D_MISC_POST_FR_BKOFF_DIS);
  370. REGWRITE_BUFFER_FLUSH(ah);
  371. if (qi->tqi_qflags & TXQ_FLAG_FRAG_BURST_BACKOFF_ENABLE)
  372. REG_SET_BIT(ah, AR_DMISC(q), AR_D_MISC_FRAG_BKOFF_EN);
  373. switch (qi->tqi_type) {
  374. case ATH9K_TX_QUEUE_BEACON:
  375. ENABLE_REGWRITE_BUFFER(ah);
  376. REG_SET_BIT(ah, AR_QMISC(q),
  377. AR_Q_MISC_FSP_DBA_GATED
  378. | AR_Q_MISC_BEACON_USE
  379. | AR_Q_MISC_CBR_INCR_DIS1);
  380. REG_SET_BIT(ah, AR_DMISC(q),
  381. (AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL <<
  382. AR_D_MISC_ARB_LOCKOUT_CNTRL_S)
  383. | AR_D_MISC_BEACON_USE
  384. | AR_D_MISC_POST_FR_BKOFF_DIS);
  385. REGWRITE_BUFFER_FLUSH(ah);
  386. /*
  387. * cwmin and cwmax should be 0 for beacon queue
  388. * but not for IBSS as we would create an imbalance
  389. * on beaconing fairness for participating nodes.
  390. */
  391. if (AR_SREV_9300_20_OR_LATER(ah) &&
  392. ah->opmode != NL80211_IFTYPE_ADHOC) {
  393. REG_WRITE(ah, AR_DLCL_IFS(q), SM(0, AR_D_LCL_IFS_CWMIN)
  394. | SM(0, AR_D_LCL_IFS_CWMAX)
  395. | SM(qi->tqi_aifs, AR_D_LCL_IFS_AIFS));
  396. }
  397. break;
  398. case ATH9K_TX_QUEUE_CAB:
  399. ENABLE_REGWRITE_BUFFER(ah);
  400. REG_SET_BIT(ah, AR_QMISC(q),
  401. AR_Q_MISC_FSP_DBA_GATED
  402. | AR_Q_MISC_CBR_INCR_DIS1
  403. | AR_Q_MISC_CBR_INCR_DIS0);
  404. value = (qi->tqi_readyTime -
  405. (ah->config.sw_beacon_response_time -
  406. ah->config.dma_beacon_response_time) -
  407. ah->config.additional_swba_backoff) * 1024;
  408. REG_WRITE(ah, AR_QRDYTIMECFG(q),
  409. value | AR_Q_RDYTIMECFG_EN);
  410. REG_SET_BIT(ah, AR_DMISC(q),
  411. (AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL <<
  412. AR_D_MISC_ARB_LOCKOUT_CNTRL_S));
  413. REGWRITE_BUFFER_FLUSH(ah);
  414. break;
  415. case ATH9K_TX_QUEUE_PSPOLL:
  416. REG_SET_BIT(ah, AR_QMISC(q), AR_Q_MISC_CBR_INCR_DIS1);
  417. break;
  418. case ATH9K_TX_QUEUE_UAPSD:
  419. REG_SET_BIT(ah, AR_DMISC(q), AR_D_MISC_POST_FR_BKOFF_DIS);
  420. break;
  421. default:
  422. break;
  423. }
  424. if (qi->tqi_intFlags & ATH9K_TXQ_USE_LOCKOUT_BKOFF_DIS) {
  425. REG_SET_BIT(ah, AR_DMISC(q),
  426. SM(AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL,
  427. AR_D_MISC_ARB_LOCKOUT_CNTRL) |
  428. AR_D_MISC_POST_FR_BKOFF_DIS);
  429. }
  430. if (AR_SREV_9300_20_OR_LATER(ah))
  431. REG_WRITE(ah, AR_Q_DESC_CRCCHK, AR_Q_DESC_CRCCHK_EN);
  432. if (qi->tqi_qflags & TXQ_FLAG_TXOKINT_ENABLE)
  433. ah->txok_interrupt_mask |= 1 << q;
  434. else
  435. ah->txok_interrupt_mask &= ~(1 << q);
  436. if (qi->tqi_qflags & TXQ_FLAG_TXERRINT_ENABLE)
  437. ah->txerr_interrupt_mask |= 1 << q;
  438. else
  439. ah->txerr_interrupt_mask &= ~(1 << q);
  440. if (qi->tqi_qflags & TXQ_FLAG_TXDESCINT_ENABLE)
  441. ah->txdesc_interrupt_mask |= 1 << q;
  442. else
  443. ah->txdesc_interrupt_mask &= ~(1 << q);
  444. if (qi->tqi_qflags & TXQ_FLAG_TXEOLINT_ENABLE)
  445. ah->txeol_interrupt_mask |= 1 << q;
  446. else
  447. ah->txeol_interrupt_mask &= ~(1 << q);
  448. if (qi->tqi_qflags & TXQ_FLAG_TXURNINT_ENABLE)
  449. ah->txurn_interrupt_mask |= 1 << q;
  450. else
  451. ah->txurn_interrupt_mask &= ~(1 << q);
  452. ath9k_hw_set_txq_interrupts(ah, qi);
  453. return true;
  454. }
  455. EXPORT_SYMBOL(ath9k_hw_resettxqueue);
  456. int ath9k_hw_rxprocdesc(struct ath_hw *ah, struct ath_desc *ds,
  457. struct ath_rx_status *rs)
  458. {
  459. struct ar5416_desc ads;
  460. struct ar5416_desc *adsp = AR5416DESC(ds);
  461. u32 phyerr;
  462. if ((adsp->ds_rxstatus8 & AR_RxDone) == 0)
  463. return -EINPROGRESS;
  464. ads.u.rx = adsp->u.rx;
  465. rs->rs_status = 0;
  466. rs->rs_flags = 0;
  467. rs->rs_datalen = ads.ds_rxstatus1 & AR_DataLen;
  468. rs->rs_tstamp = ads.AR_RcvTimestamp;
  469. if (ads.ds_rxstatus8 & AR_PostDelimCRCErr) {
  470. rs->rs_rssi = ATH9K_RSSI_BAD;
  471. rs->rs_rssi_ctl0 = ATH9K_RSSI_BAD;
  472. rs->rs_rssi_ctl1 = ATH9K_RSSI_BAD;
  473. rs->rs_rssi_ctl2 = ATH9K_RSSI_BAD;
  474. rs->rs_rssi_ext0 = ATH9K_RSSI_BAD;
  475. rs->rs_rssi_ext1 = ATH9K_RSSI_BAD;
  476. rs->rs_rssi_ext2 = ATH9K_RSSI_BAD;
  477. } else {
  478. rs->rs_rssi = MS(ads.ds_rxstatus4, AR_RxRSSICombined);
  479. rs->rs_rssi_ctl0 = MS(ads.ds_rxstatus0,
  480. AR_RxRSSIAnt00);
  481. rs->rs_rssi_ctl1 = MS(ads.ds_rxstatus0,
  482. AR_RxRSSIAnt01);
  483. rs->rs_rssi_ctl2 = MS(ads.ds_rxstatus0,
  484. AR_RxRSSIAnt02);
  485. rs->rs_rssi_ext0 = MS(ads.ds_rxstatus4,
  486. AR_RxRSSIAnt10);
  487. rs->rs_rssi_ext1 = MS(ads.ds_rxstatus4,
  488. AR_RxRSSIAnt11);
  489. rs->rs_rssi_ext2 = MS(ads.ds_rxstatus4,
  490. AR_RxRSSIAnt12);
  491. }
  492. if (ads.ds_rxstatus8 & AR_RxKeyIdxValid)
  493. rs->rs_keyix = MS(ads.ds_rxstatus8, AR_KeyIdx);
  494. else
  495. rs->rs_keyix = ATH9K_RXKEYIX_INVALID;
  496. rs->rs_rate = MS(ads.ds_rxstatus0, AR_RxRate);
  497. rs->rs_more = (ads.ds_rxstatus1 & AR_RxMore) ? 1 : 0;
  498. rs->rs_isaggr = (ads.ds_rxstatus8 & AR_RxAggr) ? 1 : 0;
  499. rs->rs_moreaggr =
  500. (ads.ds_rxstatus8 & AR_RxMoreAggr) ? 1 : 0;
  501. rs->rs_antenna = MS(ads.ds_rxstatus3, AR_RxAntenna);
  502. rs->rs_flags =
  503. (ads.ds_rxstatus3 & AR_GI) ? ATH9K_RX_GI : 0;
  504. rs->rs_flags |=
  505. (ads.ds_rxstatus3 & AR_2040) ? ATH9K_RX_2040 : 0;
  506. if (ads.ds_rxstatus8 & AR_PreDelimCRCErr)
  507. rs->rs_flags |= ATH9K_RX_DELIM_CRC_PRE;
  508. if (ads.ds_rxstatus8 & AR_PostDelimCRCErr)
  509. rs->rs_flags |= ATH9K_RX_DELIM_CRC_POST;
  510. if (ads.ds_rxstatus8 & AR_DecryptBusyErr)
  511. rs->rs_flags |= ATH9K_RX_DECRYPT_BUSY;
  512. if ((ads.ds_rxstatus8 & AR_RxFrameOK) == 0) {
  513. /*
  514. * Treat these errors as mutually exclusive to avoid spurious
  515. * extra error reports from the hardware. If a CRC error is
  516. * reported, then decryption and MIC errors are irrelevant,
  517. * the frame is going to be dropped either way
  518. */
  519. if (ads.ds_rxstatus8 & AR_CRCErr)
  520. rs->rs_status |= ATH9K_RXERR_CRC;
  521. else if (ads.ds_rxstatus8 & AR_PHYErr) {
  522. rs->rs_status |= ATH9K_RXERR_PHY;
  523. phyerr = MS(ads.ds_rxstatus8, AR_PHYErrCode);
  524. rs->rs_phyerr = phyerr;
  525. } else if (ads.ds_rxstatus8 & AR_DecryptCRCErr)
  526. rs->rs_status |= ATH9K_RXERR_DECRYPT;
  527. else if (ads.ds_rxstatus8 & AR_MichaelErr)
  528. rs->rs_status |= ATH9K_RXERR_MIC;
  529. if (ads.ds_rxstatus8 & AR_KeyMiss)
  530. rs->rs_status |= ATH9K_RXERR_KEYMISS;
  531. }
  532. return 0;
  533. }
  534. EXPORT_SYMBOL(ath9k_hw_rxprocdesc);
  535. /*
  536. * This can stop or re-enables RX.
  537. *
  538. * If bool is set this will kill any frame which is currently being
  539. * transferred between the MAC and baseband and also prevent any new
  540. * frames from getting started.
  541. */
  542. bool ath9k_hw_setrxabort(struct ath_hw *ah, bool set)
  543. {
  544. u32 reg;
  545. if (set) {
  546. REG_SET_BIT(ah, AR_DIAG_SW,
  547. (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT));
  548. if (!ath9k_hw_wait(ah, AR_OBS_BUS_1, AR_OBS_BUS_1_RX_STATE,
  549. 0, AH_WAIT_TIMEOUT)) {
  550. REG_CLR_BIT(ah, AR_DIAG_SW,
  551. (AR_DIAG_RX_DIS |
  552. AR_DIAG_RX_ABORT));
  553. reg = REG_READ(ah, AR_OBS_BUS_1);
  554. ath_err(ath9k_hw_common(ah),
  555. "RX failed to go idle in 10 ms RXSM=0x%x\n",
  556. reg);
  557. return false;
  558. }
  559. } else {
  560. REG_CLR_BIT(ah, AR_DIAG_SW,
  561. (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT));
  562. }
  563. return true;
  564. }
  565. EXPORT_SYMBOL(ath9k_hw_setrxabort);
  566. void ath9k_hw_putrxbuf(struct ath_hw *ah, u32 rxdp)
  567. {
  568. REG_WRITE(ah, AR_RXDP, rxdp);
  569. }
  570. EXPORT_SYMBOL(ath9k_hw_putrxbuf);
  571. void ath9k_hw_startpcureceive(struct ath_hw *ah, bool is_scanning)
  572. {
  573. ath9k_enable_mib_counters(ah);
  574. ath9k_ani_reset(ah, is_scanning);
  575. REG_CLR_BIT(ah, AR_DIAG_SW, (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT));
  576. }
  577. EXPORT_SYMBOL(ath9k_hw_startpcureceive);
  578. void ath9k_hw_abortpcurecv(struct ath_hw *ah)
  579. {
  580. REG_SET_BIT(ah, AR_DIAG_SW, AR_DIAG_RX_ABORT | AR_DIAG_RX_DIS);
  581. ath9k_hw_disable_mib_counters(ah);
  582. }
  583. EXPORT_SYMBOL(ath9k_hw_abortpcurecv);
  584. bool ath9k_hw_stopdmarecv(struct ath_hw *ah, bool *reset)
  585. {
  586. #define AH_RX_STOP_DMA_TIMEOUT 10000 /* usec */
  587. struct ath_common *common = ath9k_hw_common(ah);
  588. u32 mac_status, last_mac_status = 0;
  589. int i;
  590. /* Enable access to the DMA observation bus */
  591. REG_WRITE(ah, AR_MACMISC,
  592. ((AR_MACMISC_DMA_OBS_LINE_8 << AR_MACMISC_DMA_OBS_S) |
  593. (AR_MACMISC_MISC_OBS_BUS_1 <<
  594. AR_MACMISC_MISC_OBS_BUS_MSB_S)));
  595. REG_WRITE(ah, AR_CR, AR_CR_RXD);
  596. /* Wait for rx enable bit to go low */
  597. for (i = AH_RX_STOP_DMA_TIMEOUT / AH_TIME_QUANTUM; i != 0; i--) {
  598. if ((REG_READ(ah, AR_CR) & AR_CR_RXE) == 0)
  599. break;
  600. if (!AR_SREV_9300_20_OR_LATER(ah)) {
  601. mac_status = REG_READ(ah, AR_DMADBG_7) & 0x7f0;
  602. if (mac_status == 0x1c0 && mac_status == last_mac_status) {
  603. *reset = true;
  604. break;
  605. }
  606. last_mac_status = mac_status;
  607. }
  608. udelay(AH_TIME_QUANTUM);
  609. }
  610. if (i == 0) {
  611. ath_err(common,
  612. "DMA failed to stop in %d ms AR_CR=0x%08x AR_DIAG_SW=0x%08x DMADBG_7=0x%08x\n",
  613. AH_RX_STOP_DMA_TIMEOUT / 1000,
  614. REG_READ(ah, AR_CR),
  615. REG_READ(ah, AR_DIAG_SW),
  616. REG_READ(ah, AR_DMADBG_7));
  617. return false;
  618. } else {
  619. return true;
  620. }
  621. #undef AH_RX_STOP_DMA_TIMEOUT
  622. }
  623. EXPORT_SYMBOL(ath9k_hw_stopdmarecv);
  624. int ath9k_hw_beaconq_setup(struct ath_hw *ah)
  625. {
  626. struct ath9k_tx_queue_info qi;
  627. memset(&qi, 0, sizeof(qi));
  628. qi.tqi_aifs = 1;
  629. qi.tqi_cwmin = 0;
  630. qi.tqi_cwmax = 0;
  631. /* NB: don't enable any interrupts */
  632. return ath9k_hw_setuptxqueue(ah, ATH9K_TX_QUEUE_BEACON, &qi);
  633. }
  634. EXPORT_SYMBOL(ath9k_hw_beaconq_setup);
  635. bool ath9k_hw_intrpend(struct ath_hw *ah)
  636. {
  637. u32 host_isr;
  638. if (AR_SREV_9100(ah))
  639. return true;
  640. host_isr = REG_READ(ah, AR_INTR_ASYNC_CAUSE);
  641. if ((host_isr & AR_INTR_MAC_IRQ) && (host_isr != AR_INTR_SPURIOUS))
  642. return true;
  643. host_isr = REG_READ(ah, AR_INTR_SYNC_CAUSE);
  644. if ((host_isr & AR_INTR_SYNC_DEFAULT)
  645. && (host_isr != AR_INTR_SPURIOUS))
  646. return true;
  647. return false;
  648. }
  649. EXPORT_SYMBOL(ath9k_hw_intrpend);
  650. void ath9k_hw_disable_interrupts(struct ath_hw *ah)
  651. {
  652. struct ath_common *common = ath9k_hw_common(ah);
  653. if (!(ah->imask & ATH9K_INT_GLOBAL))
  654. atomic_set(&ah->intr_ref_cnt, -1);
  655. else
  656. atomic_dec(&ah->intr_ref_cnt);
  657. ath_dbg(common, ATH_DBG_INTERRUPT, "disable IER\n");
  658. REG_WRITE(ah, AR_IER, AR_IER_DISABLE);
  659. (void) REG_READ(ah, AR_IER);
  660. if (!AR_SREV_9100(ah)) {
  661. REG_WRITE(ah, AR_INTR_ASYNC_ENABLE, 0);
  662. (void) REG_READ(ah, AR_INTR_ASYNC_ENABLE);
  663. REG_WRITE(ah, AR_INTR_SYNC_ENABLE, 0);
  664. (void) REG_READ(ah, AR_INTR_SYNC_ENABLE);
  665. }
  666. }
  667. EXPORT_SYMBOL(ath9k_hw_disable_interrupts);
  668. void ath9k_hw_enable_interrupts(struct ath_hw *ah)
  669. {
  670. struct ath_common *common = ath9k_hw_common(ah);
  671. u32 sync_default = AR_INTR_SYNC_DEFAULT;
  672. if (!(ah->imask & ATH9K_INT_GLOBAL))
  673. return;
  674. if (!atomic_inc_and_test(&ah->intr_ref_cnt)) {
  675. ath_dbg(common, ATH_DBG_INTERRUPT,
  676. "Do not enable IER ref count %d\n",
  677. atomic_read(&ah->intr_ref_cnt));
  678. return;
  679. }
  680. if (AR_SREV_9340(ah))
  681. sync_default &= ~AR_INTR_SYNC_HOST1_FATAL;
  682. ath_dbg(common, ATH_DBG_INTERRUPT, "enable IER\n");
  683. REG_WRITE(ah, AR_IER, AR_IER_ENABLE);
  684. if (!AR_SREV_9100(ah)) {
  685. REG_WRITE(ah, AR_INTR_ASYNC_ENABLE,
  686. AR_INTR_MAC_IRQ);
  687. REG_WRITE(ah, AR_INTR_ASYNC_MASK, AR_INTR_MAC_IRQ);
  688. REG_WRITE(ah, AR_INTR_SYNC_ENABLE, sync_default);
  689. REG_WRITE(ah, AR_INTR_SYNC_MASK, sync_default);
  690. }
  691. ath_dbg(common, ATH_DBG_INTERRUPT, "AR_IMR 0x%x IER 0x%x\n",
  692. REG_READ(ah, AR_IMR), REG_READ(ah, AR_IER));
  693. }
  694. EXPORT_SYMBOL(ath9k_hw_enable_interrupts);
  695. void ath9k_hw_set_interrupts(struct ath_hw *ah)
  696. {
  697. enum ath9k_int ints = ah->imask;
  698. u32 mask, mask2;
  699. struct ath9k_hw_capabilities *pCap = &ah->caps;
  700. struct ath_common *common = ath9k_hw_common(ah);
  701. if (!(ints & ATH9K_INT_GLOBAL))
  702. ath9k_hw_disable_interrupts(ah);
  703. ath_dbg(common, ATH_DBG_INTERRUPT, "New interrupt mask 0x%x\n", ints);
  704. mask = ints & ATH9K_INT_COMMON;
  705. mask2 = 0;
  706. if (ints & ATH9K_INT_TX) {
  707. if (ah->config.tx_intr_mitigation)
  708. mask |= AR_IMR_TXMINTR | AR_IMR_TXINTM;
  709. else {
  710. if (ah->txok_interrupt_mask)
  711. mask |= AR_IMR_TXOK;
  712. if (ah->txdesc_interrupt_mask)
  713. mask |= AR_IMR_TXDESC;
  714. }
  715. if (ah->txerr_interrupt_mask)
  716. mask |= AR_IMR_TXERR;
  717. if (ah->txeol_interrupt_mask)
  718. mask |= AR_IMR_TXEOL;
  719. }
  720. if (ints & ATH9K_INT_RX) {
  721. if (AR_SREV_9300_20_OR_LATER(ah)) {
  722. mask |= AR_IMR_RXERR | AR_IMR_RXOK_HP;
  723. if (ah->config.rx_intr_mitigation) {
  724. mask &= ~AR_IMR_RXOK_LP;
  725. mask |= AR_IMR_RXMINTR | AR_IMR_RXINTM;
  726. } else {
  727. mask |= AR_IMR_RXOK_LP;
  728. }
  729. } else {
  730. if (ah->config.rx_intr_mitigation)
  731. mask |= AR_IMR_RXMINTR | AR_IMR_RXINTM;
  732. else
  733. mask |= AR_IMR_RXOK | AR_IMR_RXDESC;
  734. }
  735. if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP))
  736. mask |= AR_IMR_GENTMR;
  737. }
  738. if (ints & ATH9K_INT_GENTIMER)
  739. mask |= AR_IMR_GENTMR;
  740. if (ints & (ATH9K_INT_BMISC)) {
  741. mask |= AR_IMR_BCNMISC;
  742. if (ints & ATH9K_INT_TIM)
  743. mask2 |= AR_IMR_S2_TIM;
  744. if (ints & ATH9K_INT_DTIM)
  745. mask2 |= AR_IMR_S2_DTIM;
  746. if (ints & ATH9K_INT_DTIMSYNC)
  747. mask2 |= AR_IMR_S2_DTIMSYNC;
  748. if (ints & ATH9K_INT_CABEND)
  749. mask2 |= AR_IMR_S2_CABEND;
  750. if (ints & ATH9K_INT_TSFOOR)
  751. mask2 |= AR_IMR_S2_TSFOOR;
  752. }
  753. if (ints & (ATH9K_INT_GTT | ATH9K_INT_CST)) {
  754. mask |= AR_IMR_BCNMISC;
  755. if (ints & ATH9K_INT_GTT)
  756. mask2 |= AR_IMR_S2_GTT;
  757. if (ints & ATH9K_INT_CST)
  758. mask2 |= AR_IMR_S2_CST;
  759. }
  760. ath_dbg(common, ATH_DBG_INTERRUPT, "new IMR 0x%x\n", mask);
  761. REG_WRITE(ah, AR_IMR, mask);
  762. ah->imrs2_reg &= ~(AR_IMR_S2_TIM | AR_IMR_S2_DTIM | AR_IMR_S2_DTIMSYNC |
  763. AR_IMR_S2_CABEND | AR_IMR_S2_CABTO |
  764. AR_IMR_S2_TSFOOR | AR_IMR_S2_GTT | AR_IMR_S2_CST);
  765. ah->imrs2_reg |= mask2;
  766. REG_WRITE(ah, AR_IMR_S2, ah->imrs2_reg);
  767. if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) {
  768. if (ints & ATH9K_INT_TIM_TIMER)
  769. REG_SET_BIT(ah, AR_IMR_S5, AR_IMR_S5_TIM_TIMER);
  770. else
  771. REG_CLR_BIT(ah, AR_IMR_S5, AR_IMR_S5_TIM_TIMER);
  772. }
  773. return;
  774. }
  775. EXPORT_SYMBOL(ath9k_hw_set_interrupts);