qcu.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549
  1. /*
  2. * Copyright (c) 2004-2008 Reyk Floeter <reyk@openbsd.org>
  3. * Copyright (c) 2006-2008 Nick Kossifidis <mickflemm@gmail.com>
  4. *
  5. * Permission to use, copy, modify, and distribute this software for any
  6. * purpose with or without fee is hereby granted, provided that the above
  7. * copyright notice and this permission notice appear in all copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  10. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  11. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  12. * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  13. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  14. * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  15. * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  16. *
  17. */
  18. /********************************************\
  19. Queue Control Unit, DFS Control Unit Functions
  20. \********************************************/
  21. #include "ath5k.h"
  22. #include "reg.h"
  23. #include "debug.h"
  24. #include "base.h"
  25. /*
  26. * Get properties for a transmit queue
  27. */
  28. int ath5k_hw_get_tx_queueprops(struct ath5k_hw *ah, int queue,
  29. struct ath5k_txq_info *queue_info)
  30. {
  31. ATH5K_TRACE(ah->ah_sc);
  32. memcpy(queue_info, &ah->ah_txq[queue], sizeof(struct ath5k_txq_info));
  33. return 0;
  34. }
  35. /*
  36. * Set properties for a transmit queue
  37. */
  38. int ath5k_hw_set_tx_queueprops(struct ath5k_hw *ah, int queue,
  39. const struct ath5k_txq_info *queue_info)
  40. {
  41. ATH5K_TRACE(ah->ah_sc);
  42. AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
  43. if (ah->ah_txq[queue].tqi_type == AR5K_TX_QUEUE_INACTIVE)
  44. return -EIO;
  45. memcpy(&ah->ah_txq[queue], queue_info, sizeof(struct ath5k_txq_info));
  46. /*XXX: Is this supported on 5210 ?*/
  47. if ((queue_info->tqi_type == AR5K_TX_QUEUE_DATA &&
  48. ((queue_info->tqi_subtype == AR5K_WME_AC_VI) ||
  49. (queue_info->tqi_subtype == AR5K_WME_AC_VO))) ||
  50. queue_info->tqi_type == AR5K_TX_QUEUE_UAPSD)
  51. ah->ah_txq[queue].tqi_flags |= AR5K_TXQ_FLAG_POST_FR_BKOFF_DIS;
  52. return 0;
  53. }
  54. /*
  55. * Initialize a transmit queue
  56. */
  57. int ath5k_hw_setup_tx_queue(struct ath5k_hw *ah, enum ath5k_tx_queue queue_type,
  58. struct ath5k_txq_info *queue_info)
  59. {
  60. unsigned int queue;
  61. int ret;
  62. ATH5K_TRACE(ah->ah_sc);
  63. /*
  64. * Get queue by type
  65. */
  66. /*5210 only has 2 queues*/
  67. if (ah->ah_version == AR5K_AR5210) {
  68. switch (queue_type) {
  69. case AR5K_TX_QUEUE_DATA:
  70. queue = AR5K_TX_QUEUE_ID_NOQCU_DATA;
  71. break;
  72. case AR5K_TX_QUEUE_BEACON:
  73. case AR5K_TX_QUEUE_CAB:
  74. queue = AR5K_TX_QUEUE_ID_NOQCU_BEACON;
  75. break;
  76. default:
  77. return -EINVAL;
  78. }
  79. } else {
  80. switch (queue_type) {
  81. case AR5K_TX_QUEUE_DATA:
  82. for (queue = AR5K_TX_QUEUE_ID_DATA_MIN;
  83. ah->ah_txq[queue].tqi_type !=
  84. AR5K_TX_QUEUE_INACTIVE; queue++) {
  85. if (queue > AR5K_TX_QUEUE_ID_DATA_MAX)
  86. return -EINVAL;
  87. }
  88. break;
  89. case AR5K_TX_QUEUE_UAPSD:
  90. queue = AR5K_TX_QUEUE_ID_UAPSD;
  91. break;
  92. case AR5K_TX_QUEUE_BEACON:
  93. queue = AR5K_TX_QUEUE_ID_BEACON;
  94. break;
  95. case AR5K_TX_QUEUE_CAB:
  96. queue = AR5K_TX_QUEUE_ID_CAB;
  97. break;
  98. case AR5K_TX_QUEUE_XR_DATA:
  99. if (ah->ah_version != AR5K_AR5212)
  100. ATH5K_ERR(ah->ah_sc,
  101. "XR data queues only supported in"
  102. " 5212!\n");
  103. queue = AR5K_TX_QUEUE_ID_XR_DATA;
  104. break;
  105. default:
  106. return -EINVAL;
  107. }
  108. }
  109. /*
  110. * Setup internal queue structure
  111. */
  112. memset(&ah->ah_txq[queue], 0, sizeof(struct ath5k_txq_info));
  113. ah->ah_txq[queue].tqi_type = queue_type;
  114. if (queue_info != NULL) {
  115. queue_info->tqi_type = queue_type;
  116. ret = ath5k_hw_set_tx_queueprops(ah, queue, queue_info);
  117. if (ret)
  118. return ret;
  119. }
  120. /*
  121. * We use ah_txq_status to hold a temp value for
  122. * the Secondary interrupt mask registers on 5211+
  123. * check out ath5k_hw_reset_tx_queue
  124. */
  125. AR5K_Q_ENABLE_BITS(ah->ah_txq_status, queue);
  126. return queue;
  127. }
  128. /*
  129. * Get number of pending frames
  130. * for a specific queue [5211+]
  131. */
  132. u32 ath5k_hw_num_tx_pending(struct ath5k_hw *ah, unsigned int queue)
  133. {
  134. u32 pending;
  135. ATH5K_TRACE(ah->ah_sc);
  136. AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
  137. /* Return if queue is declared inactive */
  138. if (ah->ah_txq[queue].tqi_type == AR5K_TX_QUEUE_INACTIVE)
  139. return false;
  140. /* XXX: How about AR5K_CFG_TXCNT ? */
  141. if (ah->ah_version == AR5K_AR5210)
  142. return false;
  143. pending = ath5k_hw_reg_read(ah, AR5K_QUEUE_STATUS(queue));
  144. pending &= AR5K_QCU_STS_FRMPENDCNT;
  145. /* It's possible to have no frames pending even if TXE
  146. * is set. To indicate that q has not stopped return
  147. * true */
  148. if (!pending && AR5K_REG_READ_Q(ah, AR5K_QCU_TXE, queue))
  149. return true;
  150. return pending;
  151. }
  152. /*
  153. * Set a transmit queue inactive
  154. */
  155. void ath5k_hw_release_tx_queue(struct ath5k_hw *ah, unsigned int queue)
  156. {
  157. ATH5K_TRACE(ah->ah_sc);
  158. if (WARN_ON(queue >= ah->ah_capabilities.cap_queues.q_tx_num))
  159. return;
  160. /* This queue will be skipped in further operations */
  161. ah->ah_txq[queue].tqi_type = AR5K_TX_QUEUE_INACTIVE;
  162. /*For SIMR setup*/
  163. AR5K_Q_DISABLE_BITS(ah->ah_txq_status, queue);
  164. }
  165. /*
  166. * Set DFS properties for a transmit queue on DCU
  167. */
  168. int ath5k_hw_reset_tx_queue(struct ath5k_hw *ah, unsigned int queue)
  169. {
  170. u32 cw_min, cw_max, retry_lg, retry_sh;
  171. struct ath5k_txq_info *tq = &ah->ah_txq[queue];
  172. ATH5K_TRACE(ah->ah_sc);
  173. AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
  174. tq = &ah->ah_txq[queue];
  175. if (tq->tqi_type == AR5K_TX_QUEUE_INACTIVE)
  176. return 0;
  177. if (ah->ah_version == AR5K_AR5210) {
  178. /* Only handle data queues, others will be ignored */
  179. if (tq->tqi_type != AR5K_TX_QUEUE_DATA)
  180. return 0;
  181. /* Set Slot time */
  182. ath5k_hw_reg_write(ah, ah->ah_turbo ?
  183. AR5K_INIT_SLOT_TIME_TURBO : AR5K_INIT_SLOT_TIME,
  184. AR5K_SLOT_TIME);
  185. /* Set ACK_CTS timeout */
  186. ath5k_hw_reg_write(ah, ah->ah_turbo ?
  187. AR5K_INIT_ACK_CTS_TIMEOUT_TURBO :
  188. AR5K_INIT_ACK_CTS_TIMEOUT, AR5K_SLOT_TIME);
  189. /* Set Transmit Latency */
  190. ath5k_hw_reg_write(ah, ah->ah_turbo ?
  191. AR5K_INIT_TRANSMIT_LATENCY_TURBO :
  192. AR5K_INIT_TRANSMIT_LATENCY, AR5K_USEC_5210);
  193. /* Set IFS0 */
  194. if (ah->ah_turbo) {
  195. ath5k_hw_reg_write(ah, ((AR5K_INIT_SIFS_TURBO +
  196. (ah->ah_aifs + tq->tqi_aifs) *
  197. AR5K_INIT_SLOT_TIME_TURBO) <<
  198. AR5K_IFS0_DIFS_S) | AR5K_INIT_SIFS_TURBO,
  199. AR5K_IFS0);
  200. } else {
  201. ath5k_hw_reg_write(ah, ((AR5K_INIT_SIFS +
  202. (ah->ah_aifs + tq->tqi_aifs) *
  203. AR5K_INIT_SLOT_TIME) << AR5K_IFS0_DIFS_S) |
  204. AR5K_INIT_SIFS, AR5K_IFS0);
  205. }
  206. /* Set IFS1 */
  207. ath5k_hw_reg_write(ah, ah->ah_turbo ?
  208. AR5K_INIT_PROTO_TIME_CNTRL_TURBO :
  209. AR5K_INIT_PROTO_TIME_CNTRL, AR5K_IFS1);
  210. /* Set AR5K_PHY_SETTLING */
  211. ath5k_hw_reg_write(ah, ah->ah_turbo ?
  212. (ath5k_hw_reg_read(ah, AR5K_PHY_SETTLING) & ~0x7F)
  213. | 0x38 :
  214. (ath5k_hw_reg_read(ah, AR5K_PHY_SETTLING) & ~0x7F)
  215. | 0x1C,
  216. AR5K_PHY_SETTLING);
  217. /* Set Frame Control Register */
  218. ath5k_hw_reg_write(ah, ah->ah_turbo ?
  219. (AR5K_PHY_FRAME_CTL_INI | AR5K_PHY_TURBO_MODE |
  220. AR5K_PHY_TURBO_SHORT | 0x2020) :
  221. (AR5K_PHY_FRAME_CTL_INI | 0x1020),
  222. AR5K_PHY_FRAME_CTL_5210);
  223. }
  224. /*
  225. * Calculate cwmin/max by channel mode
  226. */
  227. cw_min = ah->ah_cw_min = AR5K_TUNE_CWMIN;
  228. cw_max = ah->ah_cw_max = AR5K_TUNE_CWMAX;
  229. ah->ah_aifs = AR5K_TUNE_AIFS;
  230. /*XR is only supported on 5212*/
  231. if (IS_CHAN_XR(ah->ah_current_channel) &&
  232. ah->ah_version == AR5K_AR5212) {
  233. cw_min = ah->ah_cw_min = AR5K_TUNE_CWMIN_XR;
  234. cw_max = ah->ah_cw_max = AR5K_TUNE_CWMAX_XR;
  235. ah->ah_aifs = AR5K_TUNE_AIFS_XR;
  236. /*B mode is not supported on 5210*/
  237. } else if (IS_CHAN_B(ah->ah_current_channel) &&
  238. ah->ah_version != AR5K_AR5210) {
  239. cw_min = ah->ah_cw_min = AR5K_TUNE_CWMIN_11B;
  240. cw_max = ah->ah_cw_max = AR5K_TUNE_CWMAX_11B;
  241. ah->ah_aifs = AR5K_TUNE_AIFS_11B;
  242. }
  243. cw_min = 1;
  244. while (cw_min < ah->ah_cw_min)
  245. cw_min = (cw_min << 1) | 1;
  246. cw_min = tq->tqi_cw_min < 0 ? (cw_min >> (-tq->tqi_cw_min)) :
  247. ((cw_min << tq->tqi_cw_min) + (1 << tq->tqi_cw_min) - 1);
  248. cw_max = tq->tqi_cw_max < 0 ? (cw_max >> (-tq->tqi_cw_max)) :
  249. ((cw_max << tq->tqi_cw_max) + (1 << tq->tqi_cw_max) - 1);
  250. /*
  251. * Calculate and set retry limits
  252. */
  253. if (ah->ah_software_retry) {
  254. /* XXX Need to test this */
  255. retry_lg = ah->ah_limit_tx_retries;
  256. retry_sh = retry_lg = retry_lg > AR5K_DCU_RETRY_LMT_SH_RETRY ?
  257. AR5K_DCU_RETRY_LMT_SH_RETRY : retry_lg;
  258. } else {
  259. retry_lg = AR5K_INIT_LG_RETRY;
  260. retry_sh = AR5K_INIT_SH_RETRY;
  261. }
  262. /*No QCU/DCU [5210]*/
  263. if (ah->ah_version == AR5K_AR5210) {
  264. ath5k_hw_reg_write(ah,
  265. (cw_min << AR5K_NODCU_RETRY_LMT_CW_MIN_S)
  266. | AR5K_REG_SM(AR5K_INIT_SLG_RETRY,
  267. AR5K_NODCU_RETRY_LMT_SLG_RETRY)
  268. | AR5K_REG_SM(AR5K_INIT_SSH_RETRY,
  269. AR5K_NODCU_RETRY_LMT_SSH_RETRY)
  270. | AR5K_REG_SM(retry_lg, AR5K_NODCU_RETRY_LMT_LG_RETRY)
  271. | AR5K_REG_SM(retry_sh, AR5K_NODCU_RETRY_LMT_SH_RETRY),
  272. AR5K_NODCU_RETRY_LMT);
  273. } else {
  274. /*QCU/DCU [5211+]*/
  275. ath5k_hw_reg_write(ah,
  276. AR5K_REG_SM(AR5K_INIT_SLG_RETRY,
  277. AR5K_DCU_RETRY_LMT_SLG_RETRY) |
  278. AR5K_REG_SM(AR5K_INIT_SSH_RETRY,
  279. AR5K_DCU_RETRY_LMT_SSH_RETRY) |
  280. AR5K_REG_SM(retry_lg, AR5K_DCU_RETRY_LMT_LG_RETRY) |
  281. AR5K_REG_SM(retry_sh, AR5K_DCU_RETRY_LMT_SH_RETRY),
  282. AR5K_QUEUE_DFS_RETRY_LIMIT(queue));
  283. /*===Rest is also for QCU/DCU only [5211+]===*/
  284. /*
  285. * Set initial content window (cw_min/cw_max)
  286. * and arbitrated interframe space (aifs)...
  287. */
  288. ath5k_hw_reg_write(ah,
  289. AR5K_REG_SM(cw_min, AR5K_DCU_LCL_IFS_CW_MIN) |
  290. AR5K_REG_SM(cw_max, AR5K_DCU_LCL_IFS_CW_MAX) |
  291. AR5K_REG_SM(ah->ah_aifs + tq->tqi_aifs,
  292. AR5K_DCU_LCL_IFS_AIFS),
  293. AR5K_QUEUE_DFS_LOCAL_IFS(queue));
  294. /*
  295. * Set misc registers
  296. */
  297. /* Enable DCU early termination for this queue */
  298. AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
  299. AR5K_QCU_MISC_DCU_EARLY);
  300. /* Enable DCU to wait for next fragment from QCU */
  301. AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_DFS_MISC(queue),
  302. AR5K_DCU_MISC_FRAG_WAIT);
  303. /* On Maui and Spirit use the global seqnum on DCU */
  304. if (ah->ah_mac_version < AR5K_SREV_AR5211)
  305. AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_DFS_MISC(queue),
  306. AR5K_DCU_MISC_SEQNUM_CTL);
  307. if (tq->tqi_cbr_period) {
  308. ath5k_hw_reg_write(ah, AR5K_REG_SM(tq->tqi_cbr_period,
  309. AR5K_QCU_CBRCFG_INTVAL) |
  310. AR5K_REG_SM(tq->tqi_cbr_overflow_limit,
  311. AR5K_QCU_CBRCFG_ORN_THRES),
  312. AR5K_QUEUE_CBRCFG(queue));
  313. AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
  314. AR5K_QCU_MISC_FRSHED_CBR);
  315. if (tq->tqi_cbr_overflow_limit)
  316. AR5K_REG_ENABLE_BITS(ah,
  317. AR5K_QUEUE_MISC(queue),
  318. AR5K_QCU_MISC_CBR_THRES_ENABLE);
  319. }
  320. if (tq->tqi_ready_time &&
  321. (tq->tqi_type != AR5K_TX_QUEUE_ID_CAB))
  322. ath5k_hw_reg_write(ah, AR5K_REG_SM(tq->tqi_ready_time,
  323. AR5K_QCU_RDYTIMECFG_INTVAL) |
  324. AR5K_QCU_RDYTIMECFG_ENABLE,
  325. AR5K_QUEUE_RDYTIMECFG(queue));
  326. if (tq->tqi_burst_time) {
  327. ath5k_hw_reg_write(ah, AR5K_REG_SM(tq->tqi_burst_time,
  328. AR5K_DCU_CHAN_TIME_DUR) |
  329. AR5K_DCU_CHAN_TIME_ENABLE,
  330. AR5K_QUEUE_DFS_CHANNEL_TIME(queue));
  331. if (tq->tqi_flags
  332. & AR5K_TXQ_FLAG_RDYTIME_EXP_POLICY_ENABLE)
  333. AR5K_REG_ENABLE_BITS(ah,
  334. AR5K_QUEUE_MISC(queue),
  335. AR5K_QCU_MISC_RDY_VEOL_POLICY);
  336. }
  337. if (tq->tqi_flags & AR5K_TXQ_FLAG_BACKOFF_DISABLE)
  338. ath5k_hw_reg_write(ah, AR5K_DCU_MISC_POST_FR_BKOFF_DIS,
  339. AR5K_QUEUE_DFS_MISC(queue));
  340. if (tq->tqi_flags & AR5K_TXQ_FLAG_FRAG_BURST_BACKOFF_ENABLE)
  341. ath5k_hw_reg_write(ah, AR5K_DCU_MISC_BACKOFF_FRAG,
  342. AR5K_QUEUE_DFS_MISC(queue));
  343. /*
  344. * Set registers by queue type
  345. */
  346. switch (tq->tqi_type) {
  347. case AR5K_TX_QUEUE_BEACON:
  348. AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
  349. AR5K_QCU_MISC_FRSHED_DBA_GT |
  350. AR5K_QCU_MISC_CBREXP_BCN_DIS |
  351. AR5K_QCU_MISC_BCN_ENABLE);
  352. AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_DFS_MISC(queue),
  353. (AR5K_DCU_MISC_ARBLOCK_CTL_GLOBAL <<
  354. AR5K_DCU_MISC_ARBLOCK_CTL_S) |
  355. AR5K_DCU_MISC_ARBLOCK_IGNORE |
  356. AR5K_DCU_MISC_POST_FR_BKOFF_DIS |
  357. AR5K_DCU_MISC_BCN_ENABLE);
  358. break;
  359. case AR5K_TX_QUEUE_CAB:
  360. AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
  361. AR5K_QCU_MISC_FRSHED_BCN_SENT_GT |
  362. AR5K_QCU_MISC_CBREXP_DIS |
  363. AR5K_QCU_MISC_RDY_VEOL_POLICY |
  364. AR5K_QCU_MISC_CBREXP_BCN_DIS);
  365. ath5k_hw_reg_write(ah, ((AR5K_TUNE_BEACON_INTERVAL -
  366. (AR5K_TUNE_SW_BEACON_RESP -
  367. AR5K_TUNE_DMA_BEACON_RESP) -
  368. AR5K_TUNE_ADDITIONAL_SWBA_BACKOFF) * 1024) |
  369. AR5K_QCU_RDYTIMECFG_ENABLE,
  370. AR5K_QUEUE_RDYTIMECFG(queue));
  371. AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_DFS_MISC(queue),
  372. (AR5K_DCU_MISC_ARBLOCK_CTL_GLOBAL <<
  373. AR5K_DCU_MISC_ARBLOCK_CTL_S));
  374. break;
  375. case AR5K_TX_QUEUE_UAPSD:
  376. AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
  377. AR5K_QCU_MISC_CBREXP_DIS);
  378. break;
  379. case AR5K_TX_QUEUE_DATA:
  380. default:
  381. break;
  382. }
  383. /* TODO: Handle frame compression */
  384. /*
  385. * Enable interrupts for this tx queue
  386. * in the secondary interrupt mask registers
  387. */
  388. if (tq->tqi_flags & AR5K_TXQ_FLAG_TXOKINT_ENABLE)
  389. AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txok, queue);
  390. if (tq->tqi_flags & AR5K_TXQ_FLAG_TXERRINT_ENABLE)
  391. AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txerr, queue);
  392. if (tq->tqi_flags & AR5K_TXQ_FLAG_TXURNINT_ENABLE)
  393. AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txurn, queue);
  394. if (tq->tqi_flags & AR5K_TXQ_FLAG_TXDESCINT_ENABLE)
  395. AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txdesc, queue);
  396. if (tq->tqi_flags & AR5K_TXQ_FLAG_TXEOLINT_ENABLE)
  397. AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txeol, queue);
  398. if (tq->tqi_flags & AR5K_TXQ_FLAG_CBRORNINT_ENABLE)
  399. AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_cbrorn, queue);
  400. if (tq->tqi_flags & AR5K_TXQ_FLAG_CBRURNINT_ENABLE)
  401. AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_cbrurn, queue);
  402. if (tq->tqi_flags & AR5K_TXQ_FLAG_QTRIGINT_ENABLE)
  403. AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_qtrig, queue);
  404. if (tq->tqi_flags & AR5K_TXQ_FLAG_TXNOFRMINT_ENABLE)
  405. AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_nofrm, queue);
  406. /* Update secondary interrupt mask registers */
  407. /* Filter out inactive queues */
  408. ah->ah_txq_imr_txok &= ah->ah_txq_status;
  409. ah->ah_txq_imr_txerr &= ah->ah_txq_status;
  410. ah->ah_txq_imr_txurn &= ah->ah_txq_status;
  411. ah->ah_txq_imr_txdesc &= ah->ah_txq_status;
  412. ah->ah_txq_imr_txeol &= ah->ah_txq_status;
  413. ah->ah_txq_imr_cbrorn &= ah->ah_txq_status;
  414. ah->ah_txq_imr_cbrurn &= ah->ah_txq_status;
  415. ah->ah_txq_imr_qtrig &= ah->ah_txq_status;
  416. ah->ah_txq_imr_nofrm &= ah->ah_txq_status;
  417. ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txq_imr_txok,
  418. AR5K_SIMR0_QCU_TXOK) |
  419. AR5K_REG_SM(ah->ah_txq_imr_txdesc,
  420. AR5K_SIMR0_QCU_TXDESC), AR5K_SIMR0);
  421. ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txq_imr_txerr,
  422. AR5K_SIMR1_QCU_TXERR) |
  423. AR5K_REG_SM(ah->ah_txq_imr_txeol,
  424. AR5K_SIMR1_QCU_TXEOL), AR5K_SIMR1);
  425. /* Update simr2 but don't overwrite rest simr2 settings */
  426. AR5K_REG_DISABLE_BITS(ah, AR5K_SIMR2, AR5K_SIMR2_QCU_TXURN);
  427. AR5K_REG_ENABLE_BITS(ah, AR5K_SIMR2,
  428. AR5K_REG_SM(ah->ah_txq_imr_txurn,
  429. AR5K_SIMR2_QCU_TXURN));
  430. ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txq_imr_cbrorn,
  431. AR5K_SIMR3_QCBRORN) |
  432. AR5K_REG_SM(ah->ah_txq_imr_cbrurn,
  433. AR5K_SIMR3_QCBRURN), AR5K_SIMR3);
  434. ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txq_imr_qtrig,
  435. AR5K_SIMR4_QTRIG), AR5K_SIMR4);
  436. /* Set TXNOFRM_QCU for the queues with TXNOFRM enabled */
  437. ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txq_imr_nofrm,
  438. AR5K_TXNOFRM_QCU), AR5K_TXNOFRM);
  439. /* No queue has TXNOFRM enabled, disable the interrupt
  440. * by setting AR5K_TXNOFRM to zero */
  441. if (ah->ah_txq_imr_nofrm == 0)
  442. ath5k_hw_reg_write(ah, 0, AR5K_TXNOFRM);
  443. /* Set QCU mask for this DCU to save power */
  444. AR5K_REG_WRITE_Q(ah, AR5K_QUEUE_QCUMASK(queue), queue);
  445. }
  446. return 0;
  447. }
  448. /*
  449. * Get slot time from DCU
  450. */
  451. unsigned int ath5k_hw_get_slot_time(struct ath5k_hw *ah)
  452. {
  453. ATH5K_TRACE(ah->ah_sc);
  454. if (ah->ah_version == AR5K_AR5210)
  455. return ath5k_hw_clocktoh(ath5k_hw_reg_read(ah,
  456. AR5K_SLOT_TIME) & 0xffff, ah->ah_turbo);
  457. else
  458. return ath5k_hw_reg_read(ah, AR5K_DCU_GBL_IFS_SLOT) & 0xffff;
  459. }
  460. /*
  461. * Set slot time on DCU
  462. */
  463. int ath5k_hw_set_slot_time(struct ath5k_hw *ah, unsigned int slot_time)
  464. {
  465. ATH5K_TRACE(ah->ah_sc);
  466. if (slot_time < AR5K_SLOT_TIME_9 || slot_time > AR5K_SLOT_TIME_MAX)
  467. return -EINVAL;
  468. if (ah->ah_version == AR5K_AR5210)
  469. ath5k_hw_reg_write(ah, ath5k_hw_htoclock(slot_time,
  470. ah->ah_turbo), AR5K_SLOT_TIME);
  471. else
  472. ath5k_hw_reg_write(ah, slot_time, AR5K_DCU_GBL_IFS_SLOT);
  473. return 0;
  474. }