qcu.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532
  1. /*
  2. * Copyright (c) 2004-2008 Reyk Floeter <reyk@openbsd.org>
  3. * Copyright (c) 2006-2008 Nick Kossifidis <mickflemm@gmail.com>
  4. *
  5. * Permission to use, copy, modify, and distribute this software for any
  6. * purpose with or without fee is hereby granted, provided that the above
  7. * copyright notice and this permission notice appear in all copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  10. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  11. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  12. * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  13. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  14. * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  15. * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  16. *
  17. */
  18. /********************************************\
  19. Queue Control Unit, DFS Control Unit Functions
  20. \********************************************/
  21. #include "ath5k.h"
  22. #include "reg.h"
  23. #include "debug.h"
  24. #include "base.h"
  25. /*
  26. * Get properties for a transmit queue
  27. */
  28. int ath5k_hw_get_tx_queueprops(struct ath5k_hw *ah, int queue,
  29. struct ath5k_txq_info *queue_info)
  30. {
  31. memcpy(queue_info, &ah->ah_txq[queue], sizeof(struct ath5k_txq_info));
  32. return 0;
  33. }
  34. /*
  35. * Make sure cw is a power of 2 minus 1 and smaller than 1024
  36. */
  37. static u16 ath5k_cw_validate(u16 cw_req)
  38. {
  39. u32 cw = 1;
  40. cw_req = min(cw_req, (u16)1023);
  41. while (cw < cw_req)
  42. cw = (cw << 1) | 1;
  43. return cw;
  44. }
  45. /*
  46. * Set properties for a transmit queue
  47. */
  48. int ath5k_hw_set_tx_queueprops(struct ath5k_hw *ah, int queue,
  49. const struct ath5k_txq_info *qinfo)
  50. {
  51. struct ath5k_txq_info *qi;
  52. AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
  53. qi = &ah->ah_txq[queue];
  54. if (qi->tqi_type == AR5K_TX_QUEUE_INACTIVE)
  55. return -EIO;
  56. /* copy and validate values */
  57. qi->tqi_type = qinfo->tqi_type;
  58. qi->tqi_subtype = qinfo->tqi_subtype;
  59. qi->tqi_flags = qinfo->tqi_flags;
  60. /*
  61. * According to the docs: Although the AIFS field is 8 bit wide,
  62. * the maximum supported value is 0xFC. Setting it higher than that
  63. * will cause the DCU to hang.
  64. */
  65. qi->tqi_aifs = min(qinfo->tqi_aifs, (u8)0xFC);
  66. qi->tqi_cw_min = ath5k_cw_validate(qinfo->tqi_cw_min);
  67. qi->tqi_cw_max = ath5k_cw_validate(qinfo->tqi_cw_max);
  68. qi->tqi_cbr_period = qinfo->tqi_cbr_period;
  69. qi->tqi_cbr_overflow_limit = qinfo->tqi_cbr_overflow_limit;
  70. qi->tqi_burst_time = qinfo->tqi_burst_time;
  71. qi->tqi_ready_time = qinfo->tqi_ready_time;
  72. /*XXX: Is this supported on 5210 ?*/
  73. /*XXX: Is this correct for AR5K_WME_AC_VI,VO ???*/
  74. if ((qinfo->tqi_type == AR5K_TX_QUEUE_DATA &&
  75. ((qinfo->tqi_subtype == AR5K_WME_AC_VI) ||
  76. (qinfo->tqi_subtype == AR5K_WME_AC_VO))) ||
  77. qinfo->tqi_type == AR5K_TX_QUEUE_UAPSD)
  78. qi->tqi_flags |= AR5K_TXQ_FLAG_POST_FR_BKOFF_DIS;
  79. return 0;
  80. }
  81. /*
  82. * Initialize a transmit queue
  83. */
  84. int ath5k_hw_setup_tx_queue(struct ath5k_hw *ah, enum ath5k_tx_queue queue_type,
  85. struct ath5k_txq_info *queue_info)
  86. {
  87. unsigned int queue;
  88. int ret;
  89. /*
  90. * Get queue by type
  91. */
  92. /*5210 only has 2 queues*/
  93. if (ah->ah_version == AR5K_AR5210) {
  94. switch (queue_type) {
  95. case AR5K_TX_QUEUE_DATA:
  96. queue = AR5K_TX_QUEUE_ID_NOQCU_DATA;
  97. break;
  98. case AR5K_TX_QUEUE_BEACON:
  99. case AR5K_TX_QUEUE_CAB:
  100. queue = AR5K_TX_QUEUE_ID_NOQCU_BEACON;
  101. break;
  102. default:
  103. return -EINVAL;
  104. }
  105. } else {
  106. switch (queue_type) {
  107. case AR5K_TX_QUEUE_DATA:
  108. for (queue = AR5K_TX_QUEUE_ID_DATA_MIN;
  109. ah->ah_txq[queue].tqi_type !=
  110. AR5K_TX_QUEUE_INACTIVE; queue++) {
  111. if (queue > AR5K_TX_QUEUE_ID_DATA_MAX)
  112. return -EINVAL;
  113. }
  114. break;
  115. case AR5K_TX_QUEUE_UAPSD:
  116. queue = AR5K_TX_QUEUE_ID_UAPSD;
  117. break;
  118. case AR5K_TX_QUEUE_BEACON:
  119. queue = AR5K_TX_QUEUE_ID_BEACON;
  120. break;
  121. case AR5K_TX_QUEUE_CAB:
  122. queue = AR5K_TX_QUEUE_ID_CAB;
  123. break;
  124. case AR5K_TX_QUEUE_XR_DATA:
  125. if (ah->ah_version != AR5K_AR5212)
  126. ATH5K_ERR(ah->ah_sc,
  127. "XR data queues only supported in"
  128. " 5212!\n");
  129. queue = AR5K_TX_QUEUE_ID_XR_DATA;
  130. break;
  131. default:
  132. return -EINVAL;
  133. }
  134. }
  135. /*
  136. * Setup internal queue structure
  137. */
  138. memset(&ah->ah_txq[queue], 0, sizeof(struct ath5k_txq_info));
  139. ah->ah_txq[queue].tqi_type = queue_type;
  140. if (queue_info != NULL) {
  141. queue_info->tqi_type = queue_type;
  142. ret = ath5k_hw_set_tx_queueprops(ah, queue, queue_info);
  143. if (ret)
  144. return ret;
  145. }
  146. /*
  147. * We use ah_txq_status to hold a temp value for
  148. * the Secondary interrupt mask registers on 5211+
  149. * check out ath5k_hw_reset_tx_queue
  150. */
  151. AR5K_Q_ENABLE_BITS(ah->ah_txq_status, queue);
  152. return queue;
  153. }
  154. /*
  155. * Get number of pending frames
  156. * for a specific queue [5211+]
  157. */
  158. u32 ath5k_hw_num_tx_pending(struct ath5k_hw *ah, unsigned int queue)
  159. {
  160. u32 pending;
  161. AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
  162. /* Return if queue is declared inactive */
  163. if (ah->ah_txq[queue].tqi_type == AR5K_TX_QUEUE_INACTIVE)
  164. return false;
  165. /* XXX: How about AR5K_CFG_TXCNT ? */
  166. if (ah->ah_version == AR5K_AR5210)
  167. return false;
  168. pending = ath5k_hw_reg_read(ah, AR5K_QUEUE_STATUS(queue));
  169. pending &= AR5K_QCU_STS_FRMPENDCNT;
  170. /* It's possible to have no frames pending even if TXE
  171. * is set. To indicate that q has not stopped return
  172. * true */
  173. if (!pending && AR5K_REG_READ_Q(ah, AR5K_QCU_TXE, queue))
  174. return true;
  175. return pending;
  176. }
  177. /*
  178. * Set a transmit queue inactive
  179. */
  180. void ath5k_hw_release_tx_queue(struct ath5k_hw *ah, unsigned int queue)
  181. {
  182. if (WARN_ON(queue >= ah->ah_capabilities.cap_queues.q_tx_num))
  183. return;
  184. /* This queue will be skipped in further operations */
  185. ah->ah_txq[queue].tqi_type = AR5K_TX_QUEUE_INACTIVE;
  186. /*For SIMR setup*/
  187. AR5K_Q_DISABLE_BITS(ah->ah_txq_status, queue);
  188. }
  189. /*
  190. * Set DFS properties for a transmit queue on DCU
  191. */
  192. int ath5k_hw_reset_tx_queue(struct ath5k_hw *ah, unsigned int queue)
  193. {
  194. u32 retry_lg, retry_sh;
  195. struct ath5k_txq_info *tq = &ah->ah_txq[queue];
  196. AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
  197. tq = &ah->ah_txq[queue];
  198. if (tq->tqi_type == AR5K_TX_QUEUE_INACTIVE)
  199. return 0;
  200. if (ah->ah_version == AR5K_AR5210) {
  201. /* Only handle data queues, others will be ignored */
  202. if (tq->tqi_type != AR5K_TX_QUEUE_DATA)
  203. return 0;
  204. /* Set Slot time */
  205. ath5k_hw_reg_write(ah, ah->ah_turbo ?
  206. AR5K_INIT_SLOT_TIME_TURBO : AR5K_INIT_SLOT_TIME,
  207. AR5K_SLOT_TIME);
  208. /* Set ACK_CTS timeout */
  209. ath5k_hw_reg_write(ah, ah->ah_turbo ?
  210. AR5K_INIT_ACK_CTS_TIMEOUT_TURBO :
  211. AR5K_INIT_ACK_CTS_TIMEOUT, AR5K_SLOT_TIME);
  212. /* Set Transmit Latency */
  213. ath5k_hw_reg_write(ah, ah->ah_turbo ?
  214. AR5K_INIT_TRANSMIT_LATENCY_TURBO :
  215. AR5K_INIT_TRANSMIT_LATENCY, AR5K_USEC_5210);
  216. /* Set IFS0 */
  217. if (ah->ah_turbo) {
  218. ath5k_hw_reg_write(ah, ((AR5K_INIT_SIFS_TURBO +
  219. tq->tqi_aifs * AR5K_INIT_SLOT_TIME_TURBO) <<
  220. AR5K_IFS0_DIFS_S) | AR5K_INIT_SIFS_TURBO,
  221. AR5K_IFS0);
  222. } else {
  223. ath5k_hw_reg_write(ah, ((AR5K_INIT_SIFS +
  224. tq->tqi_aifs * AR5K_INIT_SLOT_TIME) <<
  225. AR5K_IFS0_DIFS_S) |
  226. AR5K_INIT_SIFS, AR5K_IFS0);
  227. }
  228. /* Set IFS1 */
  229. ath5k_hw_reg_write(ah, ah->ah_turbo ?
  230. AR5K_INIT_PROTO_TIME_CNTRL_TURBO :
  231. AR5K_INIT_PROTO_TIME_CNTRL, AR5K_IFS1);
  232. /* Set AR5K_PHY_SETTLING */
  233. ath5k_hw_reg_write(ah, ah->ah_turbo ?
  234. (ath5k_hw_reg_read(ah, AR5K_PHY_SETTLING) & ~0x7F)
  235. | 0x38 :
  236. (ath5k_hw_reg_read(ah, AR5K_PHY_SETTLING) & ~0x7F)
  237. | 0x1C,
  238. AR5K_PHY_SETTLING);
  239. /* Set Frame Control Register */
  240. ath5k_hw_reg_write(ah, ah->ah_turbo ?
  241. (AR5K_PHY_FRAME_CTL_INI | AR5K_PHY_TURBO_MODE |
  242. AR5K_PHY_TURBO_SHORT | 0x2020) :
  243. (AR5K_PHY_FRAME_CTL_INI | 0x1020),
  244. AR5K_PHY_FRAME_CTL_5210);
  245. }
  246. /*
  247. * Calculate and set retry limits
  248. */
  249. if (ah->ah_software_retry) {
  250. /* XXX Need to test this */
  251. retry_lg = ah->ah_limit_tx_retries;
  252. retry_sh = retry_lg = retry_lg > AR5K_DCU_RETRY_LMT_SH_RETRY ?
  253. AR5K_DCU_RETRY_LMT_SH_RETRY : retry_lg;
  254. } else {
  255. retry_lg = AR5K_INIT_LG_RETRY;
  256. retry_sh = AR5K_INIT_SH_RETRY;
  257. }
  258. /*No QCU/DCU [5210]*/
  259. if (ah->ah_version == AR5K_AR5210) {
  260. ath5k_hw_reg_write(ah,
  261. (tq->tqi_cw_min << AR5K_NODCU_RETRY_LMT_CW_MIN_S)
  262. | AR5K_REG_SM(AR5K_INIT_SLG_RETRY,
  263. AR5K_NODCU_RETRY_LMT_SLG_RETRY)
  264. | AR5K_REG_SM(AR5K_INIT_SSH_RETRY,
  265. AR5K_NODCU_RETRY_LMT_SSH_RETRY)
  266. | AR5K_REG_SM(retry_lg, AR5K_NODCU_RETRY_LMT_LG_RETRY)
  267. | AR5K_REG_SM(retry_sh, AR5K_NODCU_RETRY_LMT_SH_RETRY),
  268. AR5K_NODCU_RETRY_LMT);
  269. } else {
  270. /*QCU/DCU [5211+]*/
  271. ath5k_hw_reg_write(ah,
  272. AR5K_REG_SM(AR5K_INIT_SLG_RETRY,
  273. AR5K_DCU_RETRY_LMT_SLG_RETRY) |
  274. AR5K_REG_SM(AR5K_INIT_SSH_RETRY,
  275. AR5K_DCU_RETRY_LMT_SSH_RETRY) |
  276. AR5K_REG_SM(retry_lg, AR5K_DCU_RETRY_LMT_LG_RETRY) |
  277. AR5K_REG_SM(retry_sh, AR5K_DCU_RETRY_LMT_SH_RETRY),
  278. AR5K_QUEUE_DFS_RETRY_LIMIT(queue));
  279. /*===Rest is also for QCU/DCU only [5211+]===*/
  280. /*
  281. * Set contention window (cw_min/cw_max)
  282. * and arbitrated interframe space (aifs)...
  283. */
  284. ath5k_hw_reg_write(ah,
  285. AR5K_REG_SM(tq->tqi_cw_min, AR5K_DCU_LCL_IFS_CW_MIN) |
  286. AR5K_REG_SM(tq->tqi_cw_max, AR5K_DCU_LCL_IFS_CW_MAX) |
  287. AR5K_REG_SM(tq->tqi_aifs, AR5K_DCU_LCL_IFS_AIFS),
  288. AR5K_QUEUE_DFS_LOCAL_IFS(queue));
  289. /*
  290. * Set misc registers
  291. */
  292. /* Enable DCU early termination for this queue */
  293. AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
  294. AR5K_QCU_MISC_DCU_EARLY);
  295. /* Enable DCU to wait for next fragment from QCU */
  296. AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_DFS_MISC(queue),
  297. AR5K_DCU_MISC_FRAG_WAIT);
  298. /* On Maui and Spirit use the global seqnum on DCU */
  299. if (ah->ah_mac_version < AR5K_SREV_AR5211)
  300. AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_DFS_MISC(queue),
  301. AR5K_DCU_MISC_SEQNUM_CTL);
  302. if (tq->tqi_cbr_period) {
  303. ath5k_hw_reg_write(ah, AR5K_REG_SM(tq->tqi_cbr_period,
  304. AR5K_QCU_CBRCFG_INTVAL) |
  305. AR5K_REG_SM(tq->tqi_cbr_overflow_limit,
  306. AR5K_QCU_CBRCFG_ORN_THRES),
  307. AR5K_QUEUE_CBRCFG(queue));
  308. AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
  309. AR5K_QCU_MISC_FRSHED_CBR);
  310. if (tq->tqi_cbr_overflow_limit)
  311. AR5K_REG_ENABLE_BITS(ah,
  312. AR5K_QUEUE_MISC(queue),
  313. AR5K_QCU_MISC_CBR_THRES_ENABLE);
  314. }
  315. if (tq->tqi_ready_time &&
  316. (tq->tqi_type != AR5K_TX_QUEUE_CAB))
  317. ath5k_hw_reg_write(ah, AR5K_REG_SM(tq->tqi_ready_time,
  318. AR5K_QCU_RDYTIMECFG_INTVAL) |
  319. AR5K_QCU_RDYTIMECFG_ENABLE,
  320. AR5K_QUEUE_RDYTIMECFG(queue));
  321. if (tq->tqi_burst_time) {
  322. ath5k_hw_reg_write(ah, AR5K_REG_SM(tq->tqi_burst_time,
  323. AR5K_DCU_CHAN_TIME_DUR) |
  324. AR5K_DCU_CHAN_TIME_ENABLE,
  325. AR5K_QUEUE_DFS_CHANNEL_TIME(queue));
  326. if (tq->tqi_flags
  327. & AR5K_TXQ_FLAG_RDYTIME_EXP_POLICY_ENABLE)
  328. AR5K_REG_ENABLE_BITS(ah,
  329. AR5K_QUEUE_MISC(queue),
  330. AR5K_QCU_MISC_RDY_VEOL_POLICY);
  331. }
  332. if (tq->tqi_flags & AR5K_TXQ_FLAG_BACKOFF_DISABLE)
  333. ath5k_hw_reg_write(ah, AR5K_DCU_MISC_POST_FR_BKOFF_DIS,
  334. AR5K_QUEUE_DFS_MISC(queue));
  335. if (tq->tqi_flags & AR5K_TXQ_FLAG_FRAG_BURST_BACKOFF_ENABLE)
  336. ath5k_hw_reg_write(ah, AR5K_DCU_MISC_BACKOFF_FRAG,
  337. AR5K_QUEUE_DFS_MISC(queue));
  338. /*
  339. * Set registers by queue type
  340. */
  341. switch (tq->tqi_type) {
  342. case AR5K_TX_QUEUE_BEACON:
  343. AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
  344. AR5K_QCU_MISC_FRSHED_DBA_GT |
  345. AR5K_QCU_MISC_CBREXP_BCN_DIS |
  346. AR5K_QCU_MISC_BCN_ENABLE);
  347. AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_DFS_MISC(queue),
  348. (AR5K_DCU_MISC_ARBLOCK_CTL_GLOBAL <<
  349. AR5K_DCU_MISC_ARBLOCK_CTL_S) |
  350. AR5K_DCU_MISC_ARBLOCK_IGNORE |
  351. AR5K_DCU_MISC_POST_FR_BKOFF_DIS |
  352. AR5K_DCU_MISC_BCN_ENABLE);
  353. break;
  354. case AR5K_TX_QUEUE_CAB:
  355. /* XXX: use BCN_SENT_GT, if we can figure out how */
  356. AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
  357. AR5K_QCU_MISC_FRSHED_DBA_GT |
  358. AR5K_QCU_MISC_CBREXP_DIS |
  359. AR5K_QCU_MISC_CBREXP_BCN_DIS);
  360. ath5k_hw_reg_write(ah, ((tq->tqi_ready_time -
  361. (AR5K_TUNE_SW_BEACON_RESP -
  362. AR5K_TUNE_DMA_BEACON_RESP) -
  363. AR5K_TUNE_ADDITIONAL_SWBA_BACKOFF) * 1024) |
  364. AR5K_QCU_RDYTIMECFG_ENABLE,
  365. AR5K_QUEUE_RDYTIMECFG(queue));
  366. AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_DFS_MISC(queue),
  367. (AR5K_DCU_MISC_ARBLOCK_CTL_GLOBAL <<
  368. AR5K_DCU_MISC_ARBLOCK_CTL_S));
  369. break;
  370. case AR5K_TX_QUEUE_UAPSD:
  371. AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
  372. AR5K_QCU_MISC_CBREXP_DIS);
  373. break;
  374. case AR5K_TX_QUEUE_DATA:
  375. default:
  376. break;
  377. }
  378. /* TODO: Handle frame compression */
  379. /*
  380. * Enable interrupts for this tx queue
  381. * in the secondary interrupt mask registers
  382. */
  383. if (tq->tqi_flags & AR5K_TXQ_FLAG_TXOKINT_ENABLE)
  384. AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txok, queue);
  385. if (tq->tqi_flags & AR5K_TXQ_FLAG_TXERRINT_ENABLE)
  386. AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txerr, queue);
  387. if (tq->tqi_flags & AR5K_TXQ_FLAG_TXURNINT_ENABLE)
  388. AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txurn, queue);
  389. if (tq->tqi_flags & AR5K_TXQ_FLAG_TXDESCINT_ENABLE)
  390. AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txdesc, queue);
  391. if (tq->tqi_flags & AR5K_TXQ_FLAG_TXEOLINT_ENABLE)
  392. AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txeol, queue);
  393. if (tq->tqi_flags & AR5K_TXQ_FLAG_CBRORNINT_ENABLE)
  394. AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_cbrorn, queue);
  395. if (tq->tqi_flags & AR5K_TXQ_FLAG_CBRURNINT_ENABLE)
  396. AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_cbrurn, queue);
  397. if (tq->tqi_flags & AR5K_TXQ_FLAG_QTRIGINT_ENABLE)
  398. AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_qtrig, queue);
  399. if (tq->tqi_flags & AR5K_TXQ_FLAG_TXNOFRMINT_ENABLE)
  400. AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_nofrm, queue);
  401. /* Update secondary interrupt mask registers */
  402. /* Filter out inactive queues */
  403. ah->ah_txq_imr_txok &= ah->ah_txq_status;
  404. ah->ah_txq_imr_txerr &= ah->ah_txq_status;
  405. ah->ah_txq_imr_txurn &= ah->ah_txq_status;
  406. ah->ah_txq_imr_txdesc &= ah->ah_txq_status;
  407. ah->ah_txq_imr_txeol &= ah->ah_txq_status;
  408. ah->ah_txq_imr_cbrorn &= ah->ah_txq_status;
  409. ah->ah_txq_imr_cbrurn &= ah->ah_txq_status;
  410. ah->ah_txq_imr_qtrig &= ah->ah_txq_status;
  411. ah->ah_txq_imr_nofrm &= ah->ah_txq_status;
  412. ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txq_imr_txok,
  413. AR5K_SIMR0_QCU_TXOK) |
  414. AR5K_REG_SM(ah->ah_txq_imr_txdesc,
  415. AR5K_SIMR0_QCU_TXDESC), AR5K_SIMR0);
  416. ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txq_imr_txerr,
  417. AR5K_SIMR1_QCU_TXERR) |
  418. AR5K_REG_SM(ah->ah_txq_imr_txeol,
  419. AR5K_SIMR1_QCU_TXEOL), AR5K_SIMR1);
  420. /* Update simr2 but don't overwrite rest simr2 settings */
  421. AR5K_REG_DISABLE_BITS(ah, AR5K_SIMR2, AR5K_SIMR2_QCU_TXURN);
  422. AR5K_REG_ENABLE_BITS(ah, AR5K_SIMR2,
  423. AR5K_REG_SM(ah->ah_txq_imr_txurn,
  424. AR5K_SIMR2_QCU_TXURN));
  425. ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txq_imr_cbrorn,
  426. AR5K_SIMR3_QCBRORN) |
  427. AR5K_REG_SM(ah->ah_txq_imr_cbrurn,
  428. AR5K_SIMR3_QCBRURN), AR5K_SIMR3);
  429. ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txq_imr_qtrig,
  430. AR5K_SIMR4_QTRIG), AR5K_SIMR4);
  431. /* Set TXNOFRM_QCU for the queues with TXNOFRM enabled */
  432. ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txq_imr_nofrm,
  433. AR5K_TXNOFRM_QCU), AR5K_TXNOFRM);
  434. /* No queue has TXNOFRM enabled, disable the interrupt
  435. * by setting AR5K_TXNOFRM to zero */
  436. if (ah->ah_txq_imr_nofrm == 0)
  437. ath5k_hw_reg_write(ah, 0, AR5K_TXNOFRM);
  438. /* Set QCU mask for this DCU to save power */
  439. AR5K_REG_WRITE_Q(ah, AR5K_QUEUE_QCUMASK(queue), queue);
  440. }
  441. return 0;
  442. }
  443. /*
  444. * Set slot time on DCU
  445. */
  446. int ath5k_hw_set_slot_time(struct ath5k_hw *ah, unsigned int slot_time)
  447. {
  448. u32 slot_time_clock = ath5k_hw_htoclock(ah, slot_time);
  449. if (slot_time < 6 || slot_time_clock > AR5K_SLOT_TIME_MAX)
  450. return -EINVAL;
  451. if (ah->ah_version == AR5K_AR5210)
  452. ath5k_hw_reg_write(ah, slot_time_clock, AR5K_SLOT_TIME);
  453. else
  454. ath5k_hw_reg_write(ah, slot_time_clock, AR5K_DCU_GBL_IFS_SLOT);
  455. return 0;
  456. }