mci.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593
  1. /*
  2. * Copyright (c) 2010-2011 Atheros Communications Inc.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for any
  5. * purpose with or without fee is hereby granted, provided that the above
  6. * copyright notice and this permission notice appear in all copies.
  7. *
  8. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  9. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  10. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  11. * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  12. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  13. * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  14. * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  15. */
  16. #include <linux/dma-mapping.h>
  17. #include <linux/slab.h>
  18. #include "ath9k.h"
  19. #include "mci.h"
  20. static const u8 ath_mci_duty_cycle[] = { 55, 50, 60, 70, 80, 85, 90, 95, 98 };
  21. static struct ath_mci_profile_info*
  22. ath_mci_find_profile(struct ath_mci_profile *mci,
  23. struct ath_mci_profile_info *info)
  24. {
  25. struct ath_mci_profile_info *entry;
  26. if (list_empty(&mci->info))
  27. return NULL;
  28. list_for_each_entry(entry, &mci->info, list) {
  29. if (entry->conn_handle == info->conn_handle)
  30. return entry;
  31. }
  32. return NULL;
  33. }
  34. static bool ath_mci_add_profile(struct ath_common *common,
  35. struct ath_mci_profile *mci,
  36. struct ath_mci_profile_info *info)
  37. {
  38. struct ath_mci_profile_info *entry;
  39. if ((mci->num_sco == ATH_MCI_MAX_SCO_PROFILE) &&
  40. (info->type == MCI_GPM_COEX_PROFILE_VOICE))
  41. return false;
  42. if (((NUM_PROF(mci) - mci->num_sco) == ATH_MCI_MAX_ACL_PROFILE) &&
  43. (info->type != MCI_GPM_COEX_PROFILE_VOICE))
  44. return false;
  45. entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
  46. if (!entry)
  47. return false;
  48. memcpy(entry, info, 10);
  49. INC_PROF(mci, info);
  50. list_add_tail(&entry->list, &mci->info);
  51. return true;
  52. }
  53. static void ath_mci_del_profile(struct ath_common *common,
  54. struct ath_mci_profile *mci,
  55. struct ath_mci_profile_info *entry)
  56. {
  57. if (!entry)
  58. return;
  59. DEC_PROF(mci, entry);
  60. list_del(&entry->list);
  61. kfree(entry);
  62. }
  63. void ath_mci_flush_profile(struct ath_mci_profile *mci)
  64. {
  65. struct ath_mci_profile_info *info, *tinfo;
  66. mci->aggr_limit = 0;
  67. mci->num_mgmt = 0;
  68. if (list_empty(&mci->info))
  69. return;
  70. list_for_each_entry_safe(info, tinfo, &mci->info, list) {
  71. list_del(&info->list);
  72. DEC_PROF(mci, info);
  73. kfree(info);
  74. }
  75. }
  76. static void ath_mci_adjust_aggr_limit(struct ath_btcoex *btcoex)
  77. {
  78. struct ath_mci_profile *mci = &btcoex->mci;
  79. u32 wlan_airtime = btcoex->btcoex_period *
  80. (100 - btcoex->duty_cycle) / 100;
  81. /*
  82. * Scale: wlan_airtime is in ms, aggr_limit is in 0.25 ms.
  83. * When wlan_airtime is less than 4ms, aggregation limit has to be
  84. * adjusted half of wlan_airtime to ensure that the aggregation can fit
  85. * without collision with BT traffic.
  86. */
  87. if ((wlan_airtime <= 4) &&
  88. (!mci->aggr_limit || (mci->aggr_limit > (2 * wlan_airtime))))
  89. mci->aggr_limit = 2 * wlan_airtime;
  90. }
  91. static void ath_mci_update_scheme(struct ath_softc *sc)
  92. {
  93. struct ath_common *common = ath9k_hw_common(sc->sc_ah);
  94. struct ath_btcoex *btcoex = &sc->btcoex;
  95. struct ath_mci_profile *mci = &btcoex->mci;
  96. struct ath9k_hw_mci *mci_hw = &sc->sc_ah->btcoex_hw.mci;
  97. struct ath_mci_profile_info *info;
  98. u32 num_profile = NUM_PROF(mci);
  99. if (mci_hw->config & ATH_MCI_CONFIG_DISABLE_TUNING)
  100. goto skip_tuning;
  101. mci->aggr_limit = 0;
  102. btcoex->duty_cycle = ath_mci_duty_cycle[num_profile];
  103. btcoex->btcoex_period = ATH_MCI_DEF_BT_PERIOD;
  104. if (NUM_PROF(mci))
  105. btcoex->bt_stomp_type = ATH_BTCOEX_STOMP_LOW;
  106. else
  107. btcoex->bt_stomp_type = mci->num_mgmt ? ATH_BTCOEX_STOMP_ALL :
  108. ATH_BTCOEX_STOMP_LOW;
  109. if (num_profile == 1) {
  110. info = list_first_entry(&mci->info,
  111. struct ath_mci_profile_info,
  112. list);
  113. if (mci->num_sco) {
  114. if (info->T == 12)
  115. mci->aggr_limit = 8;
  116. else if (info->T == 6) {
  117. mci->aggr_limit = 6;
  118. btcoex->duty_cycle = 30;
  119. } else
  120. mci->aggr_limit = 6;
  121. ath_dbg(common, MCI,
  122. "Single SCO, aggregation limit %d 1/4 ms\n",
  123. mci->aggr_limit);
  124. } else if (mci->num_pan || mci->num_other_acl) {
  125. /*
  126. * For single PAN/FTP profile, allocate 35% for BT
  127. * to improve WLAN throughput.
  128. */
  129. btcoex->duty_cycle = 35;
  130. btcoex->btcoex_period = 53;
  131. ath_dbg(common, MCI,
  132. "Single PAN/FTP bt period %d ms dutycycle %d\n",
  133. btcoex->duty_cycle, btcoex->btcoex_period);
  134. } else if (mci->num_hid) {
  135. btcoex->duty_cycle = 30;
  136. mci->aggr_limit = 6;
  137. ath_dbg(common, MCI,
  138. "Multiple attempt/timeout single HID "
  139. "aggregation limit 1.5 ms dutycycle 30%%\n");
  140. }
  141. } else if (num_profile == 2) {
  142. if (mci->num_hid == 2)
  143. btcoex->duty_cycle = 30;
  144. mci->aggr_limit = 6;
  145. ath_dbg(common, MCI,
  146. "Two BT profiles aggr limit 1.5 ms dutycycle %d%%\n",
  147. btcoex->duty_cycle);
  148. } else if (num_profile >= 3) {
  149. mci->aggr_limit = 4;
  150. ath_dbg(common, MCI,
  151. "Three or more profiles aggregation limit 1 ms\n");
  152. }
  153. skip_tuning:
  154. if (IS_CHAN_2GHZ(sc->sc_ah->curchan)) {
  155. if (IS_CHAN_HT(sc->sc_ah->curchan))
  156. ath_mci_adjust_aggr_limit(btcoex);
  157. else
  158. btcoex->btcoex_period >>= 1;
  159. }
  160. ath9k_btcoex_timer_pause(sc);
  161. ath9k_hw_btcoex_disable(sc->sc_ah);
  162. if (IS_CHAN_5GHZ(sc->sc_ah->curchan))
  163. return;
  164. btcoex->duty_cycle += (mci->num_bdr ? ATH_MCI_BDR_DUTY_CYCLE : 0);
  165. if (btcoex->duty_cycle > ATH_MCI_MAX_DUTY_CYCLE)
  166. btcoex->duty_cycle = ATH_MCI_MAX_DUTY_CYCLE;
  167. btcoex->btcoex_no_stomp = btcoex->btcoex_period * 1000 *
  168. (100 - btcoex->duty_cycle) / 100;
  169. ath9k_hw_btcoex_enable(sc->sc_ah);
  170. ath9k_btcoex_timer_resume(sc);
  171. }
  172. static void ath_mci_wait_btcal_done(struct ath_softc *sc)
  173. {
  174. struct ath_hw *ah = sc->sc_ah;
  175. /* Stop tx & rx */
  176. ieee80211_stop_queues(sc->hw);
  177. ath_stoprecv(sc);
  178. ath_drain_all_txq(sc, false);
  179. /* Wait for cal done */
  180. ar9003_mci_start_reset(ah, ah->curchan);
  181. /* Resume tx & rx */
  182. ath_startrecv(sc);
  183. ieee80211_wake_queues(sc->hw);
  184. }
  185. static void ath_mci_cal_msg(struct ath_softc *sc, u8 opcode, u8 *rx_payload)
  186. {
  187. struct ath_hw *ah = sc->sc_ah;
  188. struct ath_common *common = ath9k_hw_common(ah);
  189. struct ath9k_hw_mci *mci_hw = &ah->btcoex_hw.mci;
  190. u32 payload[4] = {0, 0, 0, 0};
  191. switch (opcode) {
  192. case MCI_GPM_BT_CAL_REQ:
  193. if (mci_hw->bt_state == MCI_BT_AWAKE) {
  194. mci_hw->bt_state = MCI_BT_CAL_START;
  195. ath_mci_wait_btcal_done(sc);
  196. }
  197. ath_dbg(common, MCI, "MCI State : %d\n", mci_hw->bt_state);
  198. break;
  199. case MCI_GPM_BT_CAL_GRANT:
  200. MCI_GPM_SET_CAL_TYPE(payload, MCI_GPM_WLAN_CAL_DONE);
  201. ar9003_mci_send_message(sc->sc_ah, MCI_GPM, 0, payload,
  202. 16, false, true);
  203. break;
  204. default:
  205. ath_dbg(common, MCI, "Unknown GPM CAL message\n");
  206. break;
  207. }
  208. }
  209. static void ath9k_mci_work(struct work_struct *work)
  210. {
  211. struct ath_softc *sc = container_of(work, struct ath_softc, mci_work);
  212. ath_mci_update_scheme(sc);
  213. }
  214. static void ath_mci_process_profile(struct ath_softc *sc,
  215. struct ath_mci_profile_info *info)
  216. {
  217. struct ath_common *common = ath9k_hw_common(sc->sc_ah);
  218. struct ath_btcoex *btcoex = &sc->btcoex;
  219. struct ath_mci_profile *mci = &btcoex->mci;
  220. struct ath_mci_profile_info *entry = NULL;
  221. entry = ath_mci_find_profile(mci, info);
  222. if (entry) {
  223. /*
  224. * Two MCI interrupts are generated while connecting to
  225. * headset and A2DP profile, but only one MCI interrupt
  226. * is generated with last added profile type while disconnecting
  227. * both profiles.
  228. * So while adding second profile type decrement
  229. * the first one.
  230. */
  231. if (entry->type != info->type) {
  232. DEC_PROF(mci, entry);
  233. INC_PROF(mci, info);
  234. }
  235. memcpy(entry, info, 10);
  236. }
  237. if (info->start) {
  238. if (!entry && !ath_mci_add_profile(common, mci, info))
  239. return;
  240. } else
  241. ath_mci_del_profile(common, mci, entry);
  242. ieee80211_queue_work(sc->hw, &sc->mci_work);
  243. }
  244. static void ath_mci_process_status(struct ath_softc *sc,
  245. struct ath_mci_profile_status *status)
  246. {
  247. struct ath_btcoex *btcoex = &sc->btcoex;
  248. struct ath_mci_profile *mci = &btcoex->mci;
  249. struct ath_mci_profile_info info;
  250. int i = 0, old_num_mgmt = mci->num_mgmt;
  251. /* Link status type are not handled */
  252. if (status->is_link)
  253. return;
  254. info.conn_handle = status->conn_handle;
  255. if (ath_mci_find_profile(mci, &info))
  256. return;
  257. if (status->conn_handle >= ATH_MCI_MAX_PROFILE)
  258. return;
  259. if (status->is_critical)
  260. __set_bit(status->conn_handle, mci->status);
  261. else
  262. __clear_bit(status->conn_handle, mci->status);
  263. mci->num_mgmt = 0;
  264. do {
  265. if (test_bit(i, mci->status))
  266. mci->num_mgmt++;
  267. } while (++i < ATH_MCI_MAX_PROFILE);
  268. if (old_num_mgmt != mci->num_mgmt)
  269. ieee80211_queue_work(sc->hw, &sc->mci_work);
  270. }
  271. static void ath_mci_msg(struct ath_softc *sc, u8 opcode, u8 *rx_payload)
  272. {
  273. struct ath_hw *ah = sc->sc_ah;
  274. struct ath_mci_profile_info profile_info;
  275. struct ath_mci_profile_status profile_status;
  276. struct ath_common *common = ath9k_hw_common(sc->sc_ah);
  277. u8 major, minor;
  278. u32 seq_num;
  279. if (ar9003_mci_state(ah, MCI_STATE_NEED_FLUSH_BT_INFO) &&
  280. ar9003_mci_state(ah, MCI_STATE_ENABLE)) {
  281. ath_dbg(common, MCI, "(MCI) Need to flush BT profiles\n");
  282. ath_mci_flush_profile(&sc->btcoex.mci);
  283. ar9003_mci_state(ah, MCI_STATE_SEND_STATUS_QUERY);
  284. }
  285. switch (opcode) {
  286. case MCI_GPM_COEX_VERSION_QUERY:
  287. ar9003_mci_state(ah, MCI_STATE_SEND_WLAN_COEX_VERSION);
  288. break;
  289. case MCI_GPM_COEX_VERSION_RESPONSE:
  290. major = *(rx_payload + MCI_GPM_COEX_B_MAJOR_VERSION);
  291. minor = *(rx_payload + MCI_GPM_COEX_B_MINOR_VERSION);
  292. ar9003_mci_set_bt_version(ah, major, minor);
  293. break;
  294. case MCI_GPM_COEX_STATUS_QUERY:
  295. ar9003_mci_send_wlan_channels(ah);
  296. break;
  297. case MCI_GPM_COEX_BT_PROFILE_INFO:
  298. memcpy(&profile_info,
  299. (rx_payload + MCI_GPM_COEX_B_PROFILE_TYPE), 10);
  300. if ((profile_info.type == MCI_GPM_COEX_PROFILE_UNKNOWN) ||
  301. (profile_info.type >= MCI_GPM_COEX_PROFILE_MAX)) {
  302. ath_dbg(common, MCI,
  303. "Illegal profile type = %d, state = %d\n",
  304. profile_info.type,
  305. profile_info.start);
  306. break;
  307. }
  308. ath_mci_process_profile(sc, &profile_info);
  309. break;
  310. case MCI_GPM_COEX_BT_STATUS_UPDATE:
  311. profile_status.is_link = *(rx_payload +
  312. MCI_GPM_COEX_B_STATUS_TYPE);
  313. profile_status.conn_handle = *(rx_payload +
  314. MCI_GPM_COEX_B_STATUS_LINKID);
  315. profile_status.is_critical = *(rx_payload +
  316. MCI_GPM_COEX_B_STATUS_STATE);
  317. seq_num = *((u32 *)(rx_payload + 12));
  318. ath_dbg(common, MCI,
  319. "BT_Status_Update: is_link=%d, linkId=%d, state=%d, SEQ=%u\n",
  320. profile_status.is_link, profile_status.conn_handle,
  321. profile_status.is_critical, seq_num);
  322. ath_mci_process_status(sc, &profile_status);
  323. break;
  324. default:
  325. ath_dbg(common, MCI, "Unknown GPM COEX message = 0x%02x\n", opcode);
  326. break;
  327. }
  328. }
  329. int ath_mci_setup(struct ath_softc *sc)
  330. {
  331. struct ath_common *common = ath9k_hw_common(sc->sc_ah);
  332. struct ath_mci_coex *mci = &sc->mci_coex;
  333. struct ath_mci_buf *buf = &mci->sched_buf;
  334. buf->bf_addr = dma_alloc_coherent(sc->dev,
  335. ATH_MCI_SCHED_BUF_SIZE + ATH_MCI_GPM_BUF_SIZE,
  336. &buf->bf_paddr, GFP_KERNEL);
  337. if (buf->bf_addr == NULL) {
  338. ath_dbg(common, FATAL, "MCI buffer alloc failed\n");
  339. return -ENOMEM;
  340. }
  341. memset(buf->bf_addr, MCI_GPM_RSVD_PATTERN,
  342. ATH_MCI_SCHED_BUF_SIZE + ATH_MCI_GPM_BUF_SIZE);
  343. mci->sched_buf.bf_len = ATH_MCI_SCHED_BUF_SIZE;
  344. mci->gpm_buf.bf_len = ATH_MCI_GPM_BUF_SIZE;
  345. mci->gpm_buf.bf_addr = (u8 *)mci->sched_buf.bf_addr + mci->sched_buf.bf_len;
  346. mci->gpm_buf.bf_paddr = mci->sched_buf.bf_paddr + mci->sched_buf.bf_len;
  347. ar9003_mci_setup(sc->sc_ah, mci->gpm_buf.bf_paddr,
  348. mci->gpm_buf.bf_addr, (mci->gpm_buf.bf_len >> 4),
  349. mci->sched_buf.bf_paddr);
  350. INIT_WORK(&sc->mci_work, ath9k_mci_work);
  351. ath_dbg(common, MCI, "MCI Initialized\n");
  352. return 0;
  353. }
  354. void ath_mci_cleanup(struct ath_softc *sc)
  355. {
  356. struct ath_common *common = ath9k_hw_common(sc->sc_ah);
  357. struct ath_hw *ah = sc->sc_ah;
  358. struct ath_mci_coex *mci = &sc->mci_coex;
  359. struct ath_mci_buf *buf = &mci->sched_buf;
  360. if (buf->bf_addr)
  361. dma_free_coherent(sc->dev,
  362. ATH_MCI_SCHED_BUF_SIZE + ATH_MCI_GPM_BUF_SIZE,
  363. buf->bf_addr, buf->bf_paddr);
  364. ar9003_mci_cleanup(ah);
  365. ath_dbg(common, MCI, "MCI De-Initialized\n");
  366. }
  367. void ath_mci_intr(struct ath_softc *sc)
  368. {
  369. struct ath_mci_coex *mci = &sc->mci_coex;
  370. struct ath_hw *ah = sc->sc_ah;
  371. struct ath_common *common = ath9k_hw_common(ah);
  372. struct ath9k_hw_mci *mci_hw = &ah->btcoex_hw.mci;
  373. u32 mci_int, mci_int_rxmsg;
  374. u32 offset, subtype, opcode;
  375. u32 *pgpm;
  376. u32 more_data = MCI_GPM_MORE;
  377. bool skip_gpm = false;
  378. ar9003_mci_get_interrupt(sc->sc_ah, &mci_int, &mci_int_rxmsg);
  379. if (ar9003_mci_state(ah, MCI_STATE_ENABLE) == 0) {
  380. ar9003_mci_get_next_gpm_offset(ah, true, NULL);
  381. return;
  382. }
  383. if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE) {
  384. u32 payload[4] = { 0xffffffff, 0xffffffff,
  385. 0xffffffff, 0xffffff00};
  386. /*
  387. * The following REMOTE_RESET and SYS_WAKING used to sent
  388. * only when BT wake up. Now they are always sent, as a
  389. * recovery method to reset BT MCI's RX alignment.
  390. */
  391. ar9003_mci_send_message(ah, MCI_REMOTE_RESET, 0,
  392. payload, 16, true, false);
  393. ar9003_mci_send_message(ah, MCI_SYS_WAKING, 0,
  394. NULL, 0, true, false);
  395. mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE;
  396. ar9003_mci_state(ah, MCI_STATE_RESET_REQ_WAKE);
  397. /*
  398. * always do this for recovery and 2G/5G toggling and LNA_TRANS
  399. */
  400. ar9003_mci_state(ah, MCI_STATE_SET_BT_AWAKE);
  401. }
  402. if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING) {
  403. mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING;
  404. if ((mci_hw->bt_state == MCI_BT_SLEEP) &&
  405. (ar9003_mci_state(ah, MCI_STATE_REMOTE_SLEEP) !=
  406. MCI_BT_SLEEP))
  407. ar9003_mci_state(ah, MCI_STATE_SET_BT_AWAKE);
  408. }
  409. if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING) {
  410. mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING;
  411. if ((mci_hw->bt_state == MCI_BT_AWAKE) &&
  412. (ar9003_mci_state(ah, MCI_STATE_REMOTE_SLEEP) !=
  413. MCI_BT_AWAKE))
  414. mci_hw->bt_state = MCI_BT_SLEEP;
  415. }
  416. if ((mci_int & AR_MCI_INTERRUPT_RX_INVALID_HDR) ||
  417. (mci_int & AR_MCI_INTERRUPT_CONT_INFO_TIMEOUT)) {
  418. ar9003_mci_state(ah, MCI_STATE_RECOVER_RX);
  419. skip_gpm = true;
  420. }
  421. if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO) {
  422. mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO;
  423. offset = ar9003_mci_state(ah, MCI_STATE_LAST_SCHD_MSG_OFFSET);
  424. }
  425. if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_GPM) {
  426. mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_GPM;
  427. while (more_data == MCI_GPM_MORE) {
  428. pgpm = mci->gpm_buf.bf_addr;
  429. offset = ar9003_mci_get_next_gpm_offset(ah, false,
  430. &more_data);
  431. if (offset == MCI_GPM_INVALID)
  432. break;
  433. pgpm += (offset >> 2);
  434. /*
  435. * The first dword is timer.
  436. * The real data starts from 2nd dword.
  437. */
  438. subtype = MCI_GPM_TYPE(pgpm);
  439. opcode = MCI_GPM_OPCODE(pgpm);
  440. if (skip_gpm)
  441. goto recycle;
  442. if (MCI_GPM_IS_CAL_TYPE(subtype)) {
  443. ath_mci_cal_msg(sc, subtype, (u8 *)pgpm);
  444. } else {
  445. switch (subtype) {
  446. case MCI_GPM_COEX_AGENT:
  447. ath_mci_msg(sc, opcode, (u8 *)pgpm);
  448. break;
  449. default:
  450. break;
  451. }
  452. }
  453. recycle:
  454. MCI_GPM_RECYCLE(pgpm);
  455. }
  456. }
  457. if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_HW_MSG_MASK) {
  458. if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_LNA_CONTROL)
  459. mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_LNA_CONTROL;
  460. if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_LNA_INFO)
  461. mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_LNA_INFO;
  462. if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_CONT_INFO) {
  463. int value_dbm = MS(mci_hw->cont_status,
  464. AR_MCI_CONT_RSSI_POWER);
  465. mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_CONT_INFO;
  466. ath_dbg(common, MCI,
  467. "MCI CONT_INFO: (%s) pri = %d pwr = %d dBm\n",
  468. MS(mci_hw->cont_status, AR_MCI_CONT_TXRX) ?
  469. "tx" : "rx",
  470. MS(mci_hw->cont_status, AR_MCI_CONT_PRIORITY),
  471. value_dbm);
  472. }
  473. if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_CONT_NACK)
  474. mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_CONT_NACK;
  475. if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_CONT_RST)
  476. mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_CONT_RST;
  477. }
  478. if ((mci_int & AR_MCI_INTERRUPT_RX_INVALID_HDR) ||
  479. (mci_int & AR_MCI_INTERRUPT_CONT_INFO_TIMEOUT)) {
  480. mci_int &= ~(AR_MCI_INTERRUPT_RX_INVALID_HDR |
  481. AR_MCI_INTERRUPT_CONT_INFO_TIMEOUT);
  482. ath_mci_msg(sc, MCI_GPM_COEX_NOOP, NULL);
  483. }
  484. }
  485. void ath_mci_enable(struct ath_softc *sc)
  486. {
  487. struct ath_common *common = ath9k_hw_common(sc->sc_ah);
  488. if (!common->btcoex_enabled)
  489. return;
  490. if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_MCI)
  491. sc->sc_ah->imask |= ATH9K_INT_MCI;
  492. }