qcu.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546
  1. /*
  2. * Copyright (c) 2004-2008 Reyk Floeter <reyk@openbsd.org>
  3. * Copyright (c) 2006-2008 Nick Kossifidis <mickflemm@gmail.com>
  4. *
  5. * Permission to use, copy, modify, and distribute this software for any
  6. * purpose with or without fee is hereby granted, provided that the above
  7. * copyright notice and this permission notice appear in all copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  10. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  11. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  12. * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  13. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  14. * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  15. * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  16. *
  17. */
  18. /********************************************\
  19. Queue Control Unit, DFS Control Unit Functions
  20. \********************************************/
  21. #include "ath5k.h"
  22. #include "reg.h"
  23. #include "debug.h"
  24. #include "base.h"
  25. /*
  26. * Get properties for a transmit queue
  27. */
  28. int ath5k_hw_get_tx_queueprops(struct ath5k_hw *ah, int queue,
  29. struct ath5k_txq_info *queue_info)
  30. {
  31. ATH5K_TRACE(ah->ah_sc);
  32. memcpy(queue_info, &ah->ah_txq[queue], sizeof(struct ath5k_txq_info));
  33. return 0;
  34. }
  35. /*
  36. * Set properties for a transmit queue
  37. */
  38. int ath5k_hw_set_tx_queueprops(struct ath5k_hw *ah, int queue,
  39. const struct ath5k_txq_info *queue_info)
  40. {
  41. ATH5K_TRACE(ah->ah_sc);
  42. AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
  43. if (ah->ah_txq[queue].tqi_type == AR5K_TX_QUEUE_INACTIVE)
  44. return -EIO;
  45. memcpy(&ah->ah_txq[queue], queue_info, sizeof(struct ath5k_txq_info));
  46. /*XXX: Is this supported on 5210 ?*/
  47. if ((queue_info->tqi_type == AR5K_TX_QUEUE_DATA &&
  48. ((queue_info->tqi_subtype == AR5K_WME_AC_VI) ||
  49. (queue_info->tqi_subtype == AR5K_WME_AC_VO))) ||
  50. queue_info->tqi_type == AR5K_TX_QUEUE_UAPSD)
  51. ah->ah_txq[queue].tqi_flags |= AR5K_TXQ_FLAG_POST_FR_BKOFF_DIS;
  52. return 0;
  53. }
  54. /*
  55. * Initialize a transmit queue
  56. */
  57. int ath5k_hw_setup_tx_queue(struct ath5k_hw *ah, enum ath5k_tx_queue queue_type,
  58. struct ath5k_txq_info *queue_info)
  59. {
  60. unsigned int queue;
  61. int ret;
  62. ATH5K_TRACE(ah->ah_sc);
  63. /*
  64. * Get queue by type
  65. */
  66. /*5210 only has 2 queues*/
  67. if (ah->ah_version == AR5K_AR5210) {
  68. switch (queue_type) {
  69. case AR5K_TX_QUEUE_DATA:
  70. queue = AR5K_TX_QUEUE_ID_NOQCU_DATA;
  71. break;
  72. case AR5K_TX_QUEUE_BEACON:
  73. case AR5K_TX_QUEUE_CAB:
  74. queue = AR5K_TX_QUEUE_ID_NOQCU_BEACON;
  75. break;
  76. default:
  77. return -EINVAL;
  78. }
  79. } else {
  80. switch (queue_type) {
  81. case AR5K_TX_QUEUE_DATA:
  82. for (queue = AR5K_TX_QUEUE_ID_DATA_MIN;
  83. ah->ah_txq[queue].tqi_type !=
  84. AR5K_TX_QUEUE_INACTIVE; queue++) {
  85. if (queue > AR5K_TX_QUEUE_ID_DATA_MAX)
  86. return -EINVAL;
  87. }
  88. break;
  89. case AR5K_TX_QUEUE_UAPSD:
  90. queue = AR5K_TX_QUEUE_ID_UAPSD;
  91. break;
  92. case AR5K_TX_QUEUE_BEACON:
  93. queue = AR5K_TX_QUEUE_ID_BEACON;
  94. break;
  95. case AR5K_TX_QUEUE_CAB:
  96. queue = AR5K_TX_QUEUE_ID_CAB;
  97. break;
  98. case AR5K_TX_QUEUE_XR_DATA:
  99. if (ah->ah_version != AR5K_AR5212)
  100. ATH5K_ERR(ah->ah_sc,
  101. "XR data queues only supported in"
  102. " 5212!\n");
  103. queue = AR5K_TX_QUEUE_ID_XR_DATA;
  104. break;
  105. default:
  106. return -EINVAL;
  107. }
  108. }
  109. /*
  110. * Setup internal queue structure
  111. */
  112. memset(&ah->ah_txq[queue], 0, sizeof(struct ath5k_txq_info));
  113. ah->ah_txq[queue].tqi_type = queue_type;
  114. if (queue_info != NULL) {
  115. queue_info->tqi_type = queue_type;
  116. ret = ath5k_hw_set_tx_queueprops(ah, queue, queue_info);
  117. if (ret)
  118. return ret;
  119. }
  120. /*
  121. * We use ah_txq_status to hold a temp value for
  122. * the Secondary interrupt mask registers on 5211+
  123. * check out ath5k_hw_reset_tx_queue
  124. */
  125. AR5K_Q_ENABLE_BITS(ah->ah_txq_status, queue);
  126. return queue;
  127. }
  128. /*
  129. * Get number of pending frames
  130. * for a specific queue [5211+]
  131. */
  132. u32 ath5k_hw_num_tx_pending(struct ath5k_hw *ah, unsigned int queue)
  133. {
  134. u32 pending;
  135. ATH5K_TRACE(ah->ah_sc);
  136. AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
  137. /* Return if queue is declared inactive */
  138. if (ah->ah_txq[queue].tqi_type == AR5K_TX_QUEUE_INACTIVE)
  139. return false;
  140. /* XXX: How about AR5K_CFG_TXCNT ? */
  141. if (ah->ah_version == AR5K_AR5210)
  142. return false;
  143. pending = (AR5K_QUEUE_STATUS(queue) & AR5K_QCU_STS_FRMPENDCNT);
  144. /* It's possible to have no frames pending even if TXE
  145. * is set. To indicate that q has not stopped return
  146. * true */
  147. if (!pending && AR5K_REG_READ_Q(ah, AR5K_QCU_TXE, queue))
  148. return true;
  149. return pending;
  150. }
  151. /*
  152. * Set a transmit queue inactive
  153. */
  154. void ath5k_hw_release_tx_queue(struct ath5k_hw *ah, unsigned int queue)
  155. {
  156. ATH5K_TRACE(ah->ah_sc);
  157. if (WARN_ON(queue >= ah->ah_capabilities.cap_queues.q_tx_num))
  158. return;
  159. /* This queue will be skipped in further operations */
  160. ah->ah_txq[queue].tqi_type = AR5K_TX_QUEUE_INACTIVE;
  161. /*For SIMR setup*/
  162. AR5K_Q_DISABLE_BITS(ah->ah_txq_status, queue);
  163. }
  164. /*
  165. * Set DFS properties for a transmit queue on DCU
  166. */
  167. int ath5k_hw_reset_tx_queue(struct ath5k_hw *ah, unsigned int queue)
  168. {
  169. u32 cw_min, cw_max, retry_lg, retry_sh;
  170. struct ath5k_txq_info *tq = &ah->ah_txq[queue];
  171. ATH5K_TRACE(ah->ah_sc);
  172. AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
  173. tq = &ah->ah_txq[queue];
  174. if (tq->tqi_type == AR5K_TX_QUEUE_INACTIVE)
  175. return 0;
  176. if (ah->ah_version == AR5K_AR5210) {
  177. /* Only handle data queues, others will be ignored */
  178. if (tq->tqi_type != AR5K_TX_QUEUE_DATA)
  179. return 0;
  180. /* Set Slot time */
  181. ath5k_hw_reg_write(ah, ah->ah_turbo ?
  182. AR5K_INIT_SLOT_TIME_TURBO : AR5K_INIT_SLOT_TIME,
  183. AR5K_SLOT_TIME);
  184. /* Set ACK_CTS timeout */
  185. ath5k_hw_reg_write(ah, ah->ah_turbo ?
  186. AR5K_INIT_ACK_CTS_TIMEOUT_TURBO :
  187. AR5K_INIT_ACK_CTS_TIMEOUT, AR5K_SLOT_TIME);
  188. /* Set Transmit Latency */
  189. ath5k_hw_reg_write(ah, ah->ah_turbo ?
  190. AR5K_INIT_TRANSMIT_LATENCY_TURBO :
  191. AR5K_INIT_TRANSMIT_LATENCY, AR5K_USEC_5210);
  192. /* Set IFS0 */
  193. if (ah->ah_turbo) {
  194. ath5k_hw_reg_write(ah, ((AR5K_INIT_SIFS_TURBO +
  195. (ah->ah_aifs + tq->tqi_aifs) *
  196. AR5K_INIT_SLOT_TIME_TURBO) <<
  197. AR5K_IFS0_DIFS_S) | AR5K_INIT_SIFS_TURBO,
  198. AR5K_IFS0);
  199. } else {
  200. ath5k_hw_reg_write(ah, ((AR5K_INIT_SIFS +
  201. (ah->ah_aifs + tq->tqi_aifs) *
  202. AR5K_INIT_SLOT_TIME) << AR5K_IFS0_DIFS_S) |
  203. AR5K_INIT_SIFS, AR5K_IFS0);
  204. }
  205. /* Set IFS1 */
  206. ath5k_hw_reg_write(ah, ah->ah_turbo ?
  207. AR5K_INIT_PROTO_TIME_CNTRL_TURBO :
  208. AR5K_INIT_PROTO_TIME_CNTRL, AR5K_IFS1);
  209. /* Set AR5K_PHY_SETTLING */
  210. ath5k_hw_reg_write(ah, ah->ah_turbo ?
  211. (ath5k_hw_reg_read(ah, AR5K_PHY_SETTLING) & ~0x7F)
  212. | 0x38 :
  213. (ath5k_hw_reg_read(ah, AR5K_PHY_SETTLING) & ~0x7F)
  214. | 0x1C,
  215. AR5K_PHY_SETTLING);
  216. /* Set Frame Control Register */
  217. ath5k_hw_reg_write(ah, ah->ah_turbo ?
  218. (AR5K_PHY_FRAME_CTL_INI | AR5K_PHY_TURBO_MODE |
  219. AR5K_PHY_TURBO_SHORT | 0x2020) :
  220. (AR5K_PHY_FRAME_CTL_INI | 0x1020),
  221. AR5K_PHY_FRAME_CTL_5210);
  222. }
  223. /*
  224. * Calculate cwmin/max by channel mode
  225. */
  226. cw_min = ah->ah_cw_min = AR5K_TUNE_CWMIN;
  227. cw_max = ah->ah_cw_max = AR5K_TUNE_CWMAX;
  228. ah->ah_aifs = AR5K_TUNE_AIFS;
  229. /*XR is only supported on 5212*/
  230. if (IS_CHAN_XR(ah->ah_current_channel) &&
  231. ah->ah_version == AR5K_AR5212) {
  232. cw_min = ah->ah_cw_min = AR5K_TUNE_CWMIN_XR;
  233. cw_max = ah->ah_cw_max = AR5K_TUNE_CWMAX_XR;
  234. ah->ah_aifs = AR5K_TUNE_AIFS_XR;
  235. /*B mode is not supported on 5210*/
  236. } else if (IS_CHAN_B(ah->ah_current_channel) &&
  237. ah->ah_version != AR5K_AR5210) {
  238. cw_min = ah->ah_cw_min = AR5K_TUNE_CWMIN_11B;
  239. cw_max = ah->ah_cw_max = AR5K_TUNE_CWMAX_11B;
  240. ah->ah_aifs = AR5K_TUNE_AIFS_11B;
  241. }
  242. cw_min = 1;
  243. while (cw_min < ah->ah_cw_min)
  244. cw_min = (cw_min << 1) | 1;
  245. cw_min = tq->tqi_cw_min < 0 ? (cw_min >> (-tq->tqi_cw_min)) :
  246. ((cw_min << tq->tqi_cw_min) + (1 << tq->tqi_cw_min) - 1);
  247. cw_max = tq->tqi_cw_max < 0 ? (cw_max >> (-tq->tqi_cw_max)) :
  248. ((cw_max << tq->tqi_cw_max) + (1 << tq->tqi_cw_max) - 1);
  249. /*
  250. * Calculate and set retry limits
  251. */
  252. if (ah->ah_software_retry) {
  253. /* XXX Need to test this */
  254. retry_lg = ah->ah_limit_tx_retries;
  255. retry_sh = retry_lg = retry_lg > AR5K_DCU_RETRY_LMT_SH_RETRY ?
  256. AR5K_DCU_RETRY_LMT_SH_RETRY : retry_lg;
  257. } else {
  258. retry_lg = AR5K_INIT_LG_RETRY;
  259. retry_sh = AR5K_INIT_SH_RETRY;
  260. }
  261. /*No QCU/DCU [5210]*/
  262. if (ah->ah_version == AR5K_AR5210) {
  263. ath5k_hw_reg_write(ah,
  264. (cw_min << AR5K_NODCU_RETRY_LMT_CW_MIN_S)
  265. | AR5K_REG_SM(AR5K_INIT_SLG_RETRY,
  266. AR5K_NODCU_RETRY_LMT_SLG_RETRY)
  267. | AR5K_REG_SM(AR5K_INIT_SSH_RETRY,
  268. AR5K_NODCU_RETRY_LMT_SSH_RETRY)
  269. | AR5K_REG_SM(retry_lg, AR5K_NODCU_RETRY_LMT_LG_RETRY)
  270. | AR5K_REG_SM(retry_sh, AR5K_NODCU_RETRY_LMT_SH_RETRY),
  271. AR5K_NODCU_RETRY_LMT);
  272. } else {
  273. /*QCU/DCU [5211+]*/
  274. ath5k_hw_reg_write(ah,
  275. AR5K_REG_SM(AR5K_INIT_SLG_RETRY,
  276. AR5K_DCU_RETRY_LMT_SLG_RETRY) |
  277. AR5K_REG_SM(AR5K_INIT_SSH_RETRY,
  278. AR5K_DCU_RETRY_LMT_SSH_RETRY) |
  279. AR5K_REG_SM(retry_lg, AR5K_DCU_RETRY_LMT_LG_RETRY) |
  280. AR5K_REG_SM(retry_sh, AR5K_DCU_RETRY_LMT_SH_RETRY),
  281. AR5K_QUEUE_DFS_RETRY_LIMIT(queue));
  282. /*===Rest is also for QCU/DCU only [5211+]===*/
  283. /*
  284. * Set initial content window (cw_min/cw_max)
  285. * and arbitrated interframe space (aifs)...
  286. */
  287. ath5k_hw_reg_write(ah,
  288. AR5K_REG_SM(cw_min, AR5K_DCU_LCL_IFS_CW_MIN) |
  289. AR5K_REG_SM(cw_max, AR5K_DCU_LCL_IFS_CW_MAX) |
  290. AR5K_REG_SM(ah->ah_aifs + tq->tqi_aifs,
  291. AR5K_DCU_LCL_IFS_AIFS),
  292. AR5K_QUEUE_DFS_LOCAL_IFS(queue));
  293. /*
  294. * Set misc registers
  295. */
  296. /* Enable DCU early termination for this queue */
  297. AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
  298. AR5K_QCU_MISC_DCU_EARLY);
  299. /* Enable DCU to wait for next fragment from QCU */
  300. AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_DFS_MISC(queue),
  301. AR5K_DCU_MISC_FRAG_WAIT);
  302. /* On Maui and Spirit use the global seqnum on DCU */
  303. if (ah->ah_mac_version < AR5K_SREV_AR5211)
  304. AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_DFS_MISC(queue),
  305. AR5K_DCU_MISC_SEQNUM_CTL);
  306. if (tq->tqi_cbr_period) {
  307. ath5k_hw_reg_write(ah, AR5K_REG_SM(tq->tqi_cbr_period,
  308. AR5K_QCU_CBRCFG_INTVAL) |
  309. AR5K_REG_SM(tq->tqi_cbr_overflow_limit,
  310. AR5K_QCU_CBRCFG_ORN_THRES),
  311. AR5K_QUEUE_CBRCFG(queue));
  312. AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
  313. AR5K_QCU_MISC_FRSHED_CBR);
  314. if (tq->tqi_cbr_overflow_limit)
  315. AR5K_REG_ENABLE_BITS(ah,
  316. AR5K_QUEUE_MISC(queue),
  317. AR5K_QCU_MISC_CBR_THRES_ENABLE);
  318. }
  319. if (tq->tqi_ready_time &&
  320. (tq->tqi_type != AR5K_TX_QUEUE_ID_CAB))
  321. ath5k_hw_reg_write(ah, AR5K_REG_SM(tq->tqi_ready_time,
  322. AR5K_QCU_RDYTIMECFG_INTVAL) |
  323. AR5K_QCU_RDYTIMECFG_ENABLE,
  324. AR5K_QUEUE_RDYTIMECFG(queue));
  325. if (tq->tqi_burst_time) {
  326. ath5k_hw_reg_write(ah, AR5K_REG_SM(tq->tqi_burst_time,
  327. AR5K_DCU_CHAN_TIME_DUR) |
  328. AR5K_DCU_CHAN_TIME_ENABLE,
  329. AR5K_QUEUE_DFS_CHANNEL_TIME(queue));
  330. if (tq->tqi_flags
  331. & AR5K_TXQ_FLAG_RDYTIME_EXP_POLICY_ENABLE)
  332. AR5K_REG_ENABLE_BITS(ah,
  333. AR5K_QUEUE_MISC(queue),
  334. AR5K_QCU_MISC_RDY_VEOL_POLICY);
  335. }
  336. if (tq->tqi_flags & AR5K_TXQ_FLAG_BACKOFF_DISABLE)
  337. ath5k_hw_reg_write(ah, AR5K_DCU_MISC_POST_FR_BKOFF_DIS,
  338. AR5K_QUEUE_DFS_MISC(queue));
  339. if (tq->tqi_flags & AR5K_TXQ_FLAG_FRAG_BURST_BACKOFF_ENABLE)
  340. ath5k_hw_reg_write(ah, AR5K_DCU_MISC_BACKOFF_FRAG,
  341. AR5K_QUEUE_DFS_MISC(queue));
  342. /*
  343. * Set registers by queue type
  344. */
  345. switch (tq->tqi_type) {
  346. case AR5K_TX_QUEUE_BEACON:
  347. AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
  348. AR5K_QCU_MISC_FRSHED_DBA_GT |
  349. AR5K_QCU_MISC_CBREXP_BCN_DIS |
  350. AR5K_QCU_MISC_BCN_ENABLE);
  351. AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_DFS_MISC(queue),
  352. (AR5K_DCU_MISC_ARBLOCK_CTL_GLOBAL <<
  353. AR5K_DCU_MISC_ARBLOCK_CTL_S) |
  354. AR5K_DCU_MISC_POST_FR_BKOFF_DIS |
  355. AR5K_DCU_MISC_BCN_ENABLE);
  356. break;
  357. case AR5K_TX_QUEUE_CAB:
  358. AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
  359. AR5K_QCU_MISC_FRSHED_DBA_GT |
  360. AR5K_QCU_MISC_CBREXP_DIS |
  361. AR5K_QCU_MISC_CBREXP_BCN_DIS);
  362. ath5k_hw_reg_write(ah, ((AR5K_TUNE_BEACON_INTERVAL -
  363. (AR5K_TUNE_SW_BEACON_RESP -
  364. AR5K_TUNE_DMA_BEACON_RESP) -
  365. AR5K_TUNE_ADDITIONAL_SWBA_BACKOFF) * 1024) |
  366. AR5K_QCU_RDYTIMECFG_ENABLE,
  367. AR5K_QUEUE_RDYTIMECFG(queue));
  368. AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_DFS_MISC(queue),
  369. (AR5K_DCU_MISC_ARBLOCK_CTL_GLOBAL <<
  370. AR5K_DCU_MISC_ARBLOCK_CTL_S));
  371. break;
  372. case AR5K_TX_QUEUE_UAPSD:
  373. AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
  374. AR5K_QCU_MISC_CBREXP_DIS);
  375. break;
  376. case AR5K_TX_QUEUE_DATA:
  377. default:
  378. break;
  379. }
  380. /* TODO: Handle frame compression */
  381. /*
  382. * Enable interrupts for this tx queue
  383. * in the secondary interrupt mask registers
  384. */
  385. if (tq->tqi_flags & AR5K_TXQ_FLAG_TXOKINT_ENABLE)
  386. AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txok, queue);
  387. if (tq->tqi_flags & AR5K_TXQ_FLAG_TXERRINT_ENABLE)
  388. AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txerr, queue);
  389. if (tq->tqi_flags & AR5K_TXQ_FLAG_TXURNINT_ENABLE)
  390. AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txurn, queue);
  391. if (tq->tqi_flags & AR5K_TXQ_FLAG_TXDESCINT_ENABLE)
  392. AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txdesc, queue);
  393. if (tq->tqi_flags & AR5K_TXQ_FLAG_TXEOLINT_ENABLE)
  394. AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txeol, queue);
  395. if (tq->tqi_flags & AR5K_TXQ_FLAG_CBRORNINT_ENABLE)
  396. AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_cbrorn, queue);
  397. if (tq->tqi_flags & AR5K_TXQ_FLAG_CBRURNINT_ENABLE)
  398. AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_cbrurn, queue);
  399. if (tq->tqi_flags & AR5K_TXQ_FLAG_QTRIGINT_ENABLE)
  400. AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_qtrig, queue);
  401. if (tq->tqi_flags & AR5K_TXQ_FLAG_TXNOFRMINT_ENABLE)
  402. AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_nofrm, queue);
  403. /* Update secondary interrupt mask registers */
  404. /* Filter out inactive queues */
  405. ah->ah_txq_imr_txok &= ah->ah_txq_status;
  406. ah->ah_txq_imr_txerr &= ah->ah_txq_status;
  407. ah->ah_txq_imr_txurn &= ah->ah_txq_status;
  408. ah->ah_txq_imr_txdesc &= ah->ah_txq_status;
  409. ah->ah_txq_imr_txeol &= ah->ah_txq_status;
  410. ah->ah_txq_imr_cbrorn &= ah->ah_txq_status;
  411. ah->ah_txq_imr_cbrurn &= ah->ah_txq_status;
  412. ah->ah_txq_imr_qtrig &= ah->ah_txq_status;
  413. ah->ah_txq_imr_nofrm &= ah->ah_txq_status;
  414. ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txq_imr_txok,
  415. AR5K_SIMR0_QCU_TXOK) |
  416. AR5K_REG_SM(ah->ah_txq_imr_txdesc,
  417. AR5K_SIMR0_QCU_TXDESC), AR5K_SIMR0);
  418. ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txq_imr_txerr,
  419. AR5K_SIMR1_QCU_TXERR) |
  420. AR5K_REG_SM(ah->ah_txq_imr_txeol,
  421. AR5K_SIMR1_QCU_TXEOL), AR5K_SIMR1);
  422. /* Update simr2 but don't overwrite rest simr2 settings */
  423. AR5K_REG_DISABLE_BITS(ah, AR5K_SIMR2, AR5K_SIMR2_QCU_TXURN);
  424. AR5K_REG_ENABLE_BITS(ah, AR5K_SIMR2,
  425. AR5K_REG_SM(ah->ah_txq_imr_txurn,
  426. AR5K_SIMR2_QCU_TXURN));
  427. ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txq_imr_cbrorn,
  428. AR5K_SIMR3_QCBRORN) |
  429. AR5K_REG_SM(ah->ah_txq_imr_cbrurn,
  430. AR5K_SIMR3_QCBRURN), AR5K_SIMR3);
  431. ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txq_imr_qtrig,
  432. AR5K_SIMR4_QTRIG), AR5K_SIMR4);
  433. /* Set TXNOFRM_QCU for the queues with TXNOFRM enabled */
  434. ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txq_imr_nofrm,
  435. AR5K_TXNOFRM_QCU), AR5K_TXNOFRM);
  436. /* No queue has TXNOFRM enabled, disable the interrupt
  437. * by setting AR5K_TXNOFRM to zero */
  438. if (ah->ah_txq_imr_nofrm == 0)
  439. ath5k_hw_reg_write(ah, 0, AR5K_TXNOFRM);
  440. /* Set QCU mask for this DCU to save power */
  441. AR5K_REG_WRITE_Q(ah, AR5K_QUEUE_QCUMASK(queue), queue);
  442. }
  443. return 0;
  444. }
  445. /*
  446. * Get slot time from DCU
  447. */
  448. unsigned int ath5k_hw_get_slot_time(struct ath5k_hw *ah)
  449. {
  450. ATH5K_TRACE(ah->ah_sc);
  451. if (ah->ah_version == AR5K_AR5210)
  452. return ath5k_hw_clocktoh(ath5k_hw_reg_read(ah,
  453. AR5K_SLOT_TIME) & 0xffff, ah->ah_turbo);
  454. else
  455. return ath5k_hw_reg_read(ah, AR5K_DCU_GBL_IFS_SLOT) & 0xffff;
  456. }
  457. /*
  458. * Set slot time on DCU
  459. */
  460. int ath5k_hw_set_slot_time(struct ath5k_hw *ah, unsigned int slot_time)
  461. {
  462. ATH5K_TRACE(ah->ah_sc);
  463. if (slot_time < AR5K_SLOT_TIME_9 || slot_time > AR5K_SLOT_TIME_MAX)
  464. return -EINVAL;
  465. if (ah->ah_version == AR5K_AR5210)
  466. ath5k_hw_reg_write(ah, ath5k_hw_htoclock(slot_time,
  467. ah->ah_turbo), AR5K_SLOT_TIME);
  468. else
  469. ath5k_hw_reg_write(ah, slot_time, AR5K_DCU_GBL_IFS_SLOT);
  470. return 0;
  471. }