htc.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988
  1. /*
  2. * Copyright (c) 2005-2011 Atheros Communications Inc.
  3. * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
  4. *
  5. * Permission to use, copy, modify, and/or distribute this software for any
  6. * purpose with or without fee is hereby granted, provided that the above
  7. * copyright notice and this permission notice appear in all copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  10. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  11. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  12. * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  13. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  14. * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  15. * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  16. */
  17. #include "core.h"
  18. #include "hif.h"
  19. #include "debug.h"
  20. /********/
  21. /* Send */
  22. /********/
  23. static inline void ath10k_htc_send_complete_check(struct ath10k_htc_ep *ep,
  24. int force)
  25. {
  26. /*
  27. * Check whether HIF has any prior sends that have finished,
  28. * have not had the post-processing done.
  29. */
  30. ath10k_hif_send_complete_check(ep->htc->ar, ep->ul_pipe_id, force);
  31. }
  32. static void ath10k_htc_control_tx_complete(struct ath10k *ar,
  33. struct sk_buff *skb)
  34. {
  35. kfree_skb(skb);
  36. }
  37. static struct sk_buff *ath10k_htc_build_tx_ctrl_skb(void *ar)
  38. {
  39. struct sk_buff *skb;
  40. struct ath10k_skb_cb *skb_cb;
  41. skb = dev_alloc_skb(ATH10K_HTC_CONTROL_BUFFER_SIZE);
  42. if (!skb) {
  43. ath10k_warn("Unable to allocate ctrl skb\n");
  44. return NULL;
  45. }
  46. skb_reserve(skb, 20); /* FIXME: why 20 bytes? */
  47. WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb");
  48. skb_cb = ATH10K_SKB_CB(skb);
  49. memset(skb_cb, 0, sizeof(*skb_cb));
  50. ath10k_dbg(ATH10K_DBG_HTC, "%s: skb %p\n", __func__, skb);
  51. return skb;
  52. }
  53. static inline void ath10k_htc_restore_tx_skb(struct ath10k_htc *htc,
  54. struct sk_buff *skb)
  55. {
  56. ath10k_skb_unmap(htc->ar->dev, skb);
  57. skb_pull(skb, sizeof(struct ath10k_htc_hdr));
  58. }
  59. static void ath10k_htc_notify_tx_completion(struct ath10k_htc_ep *ep,
  60. struct sk_buff *skb)
  61. {
  62. ath10k_dbg(ATH10K_DBG_HTC, "%s: ep %d skb %p\n", __func__,
  63. ep->eid, skb);
  64. ath10k_htc_restore_tx_skb(ep->htc, skb);
  65. if (!ep->ep_ops.ep_tx_complete) {
  66. ath10k_warn("no tx handler for eid %d\n", ep->eid);
  67. dev_kfree_skb_any(skb);
  68. return;
  69. }
  70. ep->ep_ops.ep_tx_complete(ep->htc->ar, skb);
  71. }
  72. /* assumes tx_lock is held */
  73. static bool ath10k_htc_ep_need_credit_update(struct ath10k_htc_ep *ep)
  74. {
  75. if (!ep->tx_credit_flow_enabled)
  76. return false;
  77. if (ep->tx_credits >= ep->tx_credits_per_max_message)
  78. return false;
  79. ath10k_dbg(ATH10K_DBG_HTC, "HTC: endpoint %d needs credit update\n",
  80. ep->eid);
  81. return true;
  82. }
  83. static void ath10k_htc_prepare_tx_skb(struct ath10k_htc_ep *ep,
  84. struct sk_buff *skb)
  85. {
  86. struct ath10k_htc_hdr *hdr;
  87. hdr = (struct ath10k_htc_hdr *)skb->data;
  88. memset(hdr, 0, sizeof(*hdr));
  89. hdr->eid = ep->eid;
  90. hdr->len = __cpu_to_le16(skb->len - sizeof(*hdr));
  91. spin_lock_bh(&ep->htc->tx_lock);
  92. hdr->seq_no = ep->seq_no++;
  93. if (ath10k_htc_ep_need_credit_update(ep))
  94. hdr->flags |= ATH10K_HTC_FLAG_NEED_CREDIT_UPDATE;
  95. spin_unlock_bh(&ep->htc->tx_lock);
  96. }
  97. static int ath10k_htc_issue_skb(struct ath10k_htc *htc,
  98. struct ath10k_htc_ep *ep,
  99. struct sk_buff *skb,
  100. u8 credits)
  101. {
  102. struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb);
  103. int ret;
  104. ath10k_dbg(ATH10K_DBG_HTC, "%s: ep %d skb %p\n", __func__,
  105. ep->eid, skb);
  106. ath10k_htc_prepare_tx_skb(ep, skb);
  107. ret = ath10k_skb_map(htc->ar->dev, skb);
  108. if (ret)
  109. goto err;
  110. ret = ath10k_hif_send_head(htc->ar,
  111. ep->ul_pipe_id,
  112. ep->eid,
  113. skb->len,
  114. skb);
  115. if (unlikely(ret))
  116. goto err;
  117. return 0;
  118. err:
  119. ath10k_warn("HTC issue failed: %d\n", ret);
  120. spin_lock_bh(&htc->tx_lock);
  121. ep->tx_credits += credits;
  122. spin_unlock_bh(&htc->tx_lock);
  123. /* this is the simplest way to handle out-of-resources for non-credit
  124. * based endpoints. credit based endpoints can still get -ENOSR, but
  125. * this is highly unlikely as credit reservation should prevent that */
  126. if (ret == -ENOSR) {
  127. spin_lock_bh(&htc->tx_lock);
  128. __skb_queue_head(&ep->tx_queue, skb);
  129. spin_unlock_bh(&htc->tx_lock);
  130. return ret;
  131. }
  132. skb_cb->is_aborted = true;
  133. ath10k_htc_notify_tx_completion(ep, skb);
  134. return ret;
  135. }
  136. static struct sk_buff *ath10k_htc_get_skb_credit_based(struct ath10k_htc *htc,
  137. struct ath10k_htc_ep *ep,
  138. u8 *credits)
  139. {
  140. struct sk_buff *skb;
  141. struct ath10k_skb_cb *skb_cb;
  142. int credits_required;
  143. int remainder;
  144. unsigned int transfer_len;
  145. lockdep_assert_held(&htc->tx_lock);
  146. skb = __skb_dequeue(&ep->tx_queue);
  147. if (!skb)
  148. return NULL;
  149. skb_cb = ATH10K_SKB_CB(skb);
  150. transfer_len = skb->len;
  151. if (likely(transfer_len <= htc->target_credit_size)) {
  152. credits_required = 1;
  153. } else {
  154. /* figure out how many credits this message requires */
  155. credits_required = transfer_len / htc->target_credit_size;
  156. remainder = transfer_len % htc->target_credit_size;
  157. if (remainder)
  158. credits_required++;
  159. }
  160. ath10k_dbg(ATH10K_DBG_HTC, "Credits required %d got %d\n",
  161. credits_required, ep->tx_credits);
  162. if (ep->tx_credits < credits_required) {
  163. __skb_queue_head(&ep->tx_queue, skb);
  164. return NULL;
  165. }
  166. ep->tx_credits -= credits_required;
  167. *credits = credits_required;
  168. return skb;
  169. }
  170. static void ath10k_htc_send_work(struct work_struct *work)
  171. {
  172. struct ath10k_htc_ep *ep = container_of(work,
  173. struct ath10k_htc_ep, send_work);
  174. struct ath10k_htc *htc = ep->htc;
  175. struct sk_buff *skb;
  176. u8 credits = 0;
  177. int ret;
  178. while (true) {
  179. if (ep->ul_is_polled)
  180. ath10k_htc_send_complete_check(ep, 0);
  181. spin_lock_bh(&htc->tx_lock);
  182. if (ep->tx_credit_flow_enabled)
  183. skb = ath10k_htc_get_skb_credit_based(htc, ep,
  184. &credits);
  185. else
  186. skb = __skb_dequeue(&ep->tx_queue);
  187. spin_unlock_bh(&htc->tx_lock);
  188. if (!skb)
  189. break;
  190. ret = ath10k_htc_issue_skb(htc, ep, skb, credits);
  191. if (ret == -ENOSR)
  192. break;
  193. }
  194. }
  195. int ath10k_htc_send(struct ath10k_htc *htc,
  196. enum ath10k_htc_ep_id eid,
  197. struct sk_buff *skb)
  198. {
  199. struct ath10k_htc_ep *ep = &htc->endpoint[eid];
  200. if (eid >= ATH10K_HTC_EP_COUNT) {
  201. ath10k_warn("Invalid endpoint id: %d\n", eid);
  202. return -ENOENT;
  203. }
  204. skb_push(skb, sizeof(struct ath10k_htc_hdr));
  205. spin_lock_bh(&htc->tx_lock);
  206. __skb_queue_tail(&ep->tx_queue, skb);
  207. spin_unlock_bh(&htc->tx_lock);
  208. queue_work(htc->ar->workqueue, &ep->send_work);
  209. return 0;
  210. }
  211. static int ath10k_htc_tx_completion_handler(struct ath10k *ar,
  212. struct sk_buff *skb,
  213. unsigned int eid)
  214. {
  215. struct ath10k_htc *htc = &ar->htc;
  216. struct ath10k_htc_ep *ep = &htc->endpoint[eid];
  217. bool stopping;
  218. ath10k_htc_notify_tx_completion(ep, skb);
  219. /* the skb now belongs to the completion handler */
  220. spin_lock_bh(&htc->tx_lock);
  221. stopping = htc->stopping;
  222. spin_unlock_bh(&htc->tx_lock);
  223. if (!ep->tx_credit_flow_enabled && !stopping)
  224. /*
  225. * note: when using TX credit flow, the re-checking of
  226. * queues happens when credits flow back from the target.
  227. * in the non-TX credit case, we recheck after the packet
  228. * completes
  229. */
  230. queue_work(ar->workqueue, &ep->send_work);
  231. return 0;
  232. }
  233. /* flush endpoint TX queue */
  234. static void ath10k_htc_flush_endpoint_tx(struct ath10k_htc *htc,
  235. struct ath10k_htc_ep *ep)
  236. {
  237. struct sk_buff *skb;
  238. struct ath10k_skb_cb *skb_cb;
  239. spin_lock_bh(&htc->tx_lock);
  240. for (;;) {
  241. skb = __skb_dequeue(&ep->tx_queue);
  242. if (!skb)
  243. break;
  244. skb_cb = ATH10K_SKB_CB(skb);
  245. skb_cb->is_aborted = true;
  246. ath10k_htc_notify_tx_completion(ep, skb);
  247. }
  248. spin_unlock_bh(&htc->tx_lock);
  249. cancel_work_sync(&ep->send_work);
  250. }
  251. /***********/
  252. /* Receive */
  253. /***********/
  254. static void
  255. ath10k_htc_process_credit_report(struct ath10k_htc *htc,
  256. const struct ath10k_htc_credit_report *report,
  257. int len,
  258. enum ath10k_htc_ep_id eid)
  259. {
  260. struct ath10k_htc_ep *ep;
  261. int i, n_reports;
  262. if (len % sizeof(*report))
  263. ath10k_warn("Uneven credit report len %d", len);
  264. n_reports = len / sizeof(*report);
  265. spin_lock_bh(&htc->tx_lock);
  266. for (i = 0; i < n_reports; i++, report++) {
  267. if (report->eid >= ATH10K_HTC_EP_COUNT)
  268. break;
  269. ath10k_dbg(ATH10K_DBG_HTC, "ep %d got %d credits\n",
  270. report->eid, report->credits);
  271. ep = &htc->endpoint[report->eid];
  272. ep->tx_credits += report->credits;
  273. if (ep->tx_credits && !skb_queue_empty(&ep->tx_queue))
  274. queue_work(htc->ar->workqueue, &ep->send_work);
  275. }
  276. spin_unlock_bh(&htc->tx_lock);
  277. }
  278. static int ath10k_htc_process_trailer(struct ath10k_htc *htc,
  279. u8 *buffer,
  280. int length,
  281. enum ath10k_htc_ep_id src_eid)
  282. {
  283. int status = 0;
  284. struct ath10k_htc_record *record;
  285. u8 *orig_buffer;
  286. int orig_length;
  287. size_t len;
  288. orig_buffer = buffer;
  289. orig_length = length;
  290. while (length > 0) {
  291. record = (struct ath10k_htc_record *)buffer;
  292. if (length < sizeof(record->hdr)) {
  293. status = -EINVAL;
  294. break;
  295. }
  296. if (record->hdr.len > length) {
  297. /* no room left in buffer for record */
  298. ath10k_warn("Invalid record length: %d\n",
  299. record->hdr.len);
  300. status = -EINVAL;
  301. break;
  302. }
  303. switch (record->hdr.id) {
  304. case ATH10K_HTC_RECORD_CREDITS:
  305. len = sizeof(struct ath10k_htc_credit_report);
  306. if (record->hdr.len < len) {
  307. ath10k_warn("Credit report too long\n");
  308. status = -EINVAL;
  309. break;
  310. }
  311. ath10k_htc_process_credit_report(htc,
  312. record->credit_report,
  313. record->hdr.len,
  314. src_eid);
  315. break;
  316. default:
  317. ath10k_warn("Unhandled record: id:%d length:%d\n",
  318. record->hdr.id, record->hdr.len);
  319. break;
  320. }
  321. if (status)
  322. break;
  323. /* multiple records may be present in a trailer */
  324. buffer += sizeof(record->hdr) + record->hdr.len;
  325. length -= sizeof(record->hdr) + record->hdr.len;
  326. }
  327. if (status)
  328. ath10k_dbg_dump(ATH10K_DBG_HTC, "htc rx bad trailer", "",
  329. orig_buffer, orig_length);
  330. return status;
  331. }
  332. static int ath10k_htc_rx_completion_handler(struct ath10k *ar,
  333. struct sk_buff *skb,
  334. u8 pipe_id)
  335. {
  336. int status = 0;
  337. struct ath10k_htc *htc = &ar->htc;
  338. struct ath10k_htc_hdr *hdr;
  339. struct ath10k_htc_ep *ep;
  340. u16 payload_len;
  341. u32 trailer_len = 0;
  342. size_t min_len;
  343. u8 eid;
  344. bool trailer_present;
  345. hdr = (struct ath10k_htc_hdr *)skb->data;
  346. skb_pull(skb, sizeof(*hdr));
  347. eid = hdr->eid;
  348. if (eid >= ATH10K_HTC_EP_COUNT) {
  349. ath10k_warn("HTC Rx: invalid eid %d\n", eid);
  350. ath10k_dbg_dump(ATH10K_DBG_HTC, "htc bad header", "",
  351. hdr, sizeof(*hdr));
  352. status = -EINVAL;
  353. goto out;
  354. }
  355. ep = &htc->endpoint[eid];
  356. /*
  357. * If this endpoint that received a message from the target has
  358. * a to-target HIF pipe whose send completions are polled rather
  359. * than interrupt-driven, this is a good point to ask HIF to check
  360. * whether it has any completed sends to handle.
  361. */
  362. if (ep->ul_is_polled)
  363. ath10k_htc_send_complete_check(ep, 1);
  364. payload_len = __le16_to_cpu(hdr->len);
  365. if (payload_len + sizeof(*hdr) > ATH10K_HTC_MAX_LEN) {
  366. ath10k_warn("HTC rx frame too long, len: %zu\n",
  367. payload_len + sizeof(*hdr));
  368. ath10k_dbg_dump(ATH10K_DBG_HTC, "htc bad rx pkt len", "",
  369. hdr, sizeof(*hdr));
  370. status = -EINVAL;
  371. goto out;
  372. }
  373. if (skb->len < payload_len) {
  374. ath10k_dbg(ATH10K_DBG_HTC,
  375. "HTC Rx: insufficient length, got %d, expected %d\n",
  376. skb->len, payload_len);
  377. ath10k_dbg_dump(ATH10K_DBG_HTC, "htc bad rx pkt len",
  378. "", hdr, sizeof(*hdr));
  379. status = -EINVAL;
  380. goto out;
  381. }
  382. /* get flags to check for trailer */
  383. trailer_present = hdr->flags & ATH10K_HTC_FLAG_TRAILER_PRESENT;
  384. if (trailer_present) {
  385. u8 *trailer;
  386. trailer_len = hdr->trailer_len;
  387. min_len = sizeof(struct ath10k_ath10k_htc_record_hdr);
  388. if ((trailer_len < min_len) ||
  389. (trailer_len > payload_len)) {
  390. ath10k_warn("Invalid trailer length: %d\n",
  391. trailer_len);
  392. status = -EPROTO;
  393. goto out;
  394. }
  395. trailer = (u8 *)hdr;
  396. trailer += sizeof(*hdr);
  397. trailer += payload_len;
  398. trailer -= trailer_len;
  399. status = ath10k_htc_process_trailer(htc, trailer,
  400. trailer_len, hdr->eid);
  401. if (status)
  402. goto out;
  403. skb_trim(skb, skb->len - trailer_len);
  404. }
  405. if (((int)payload_len - (int)trailer_len) <= 0)
  406. /* zero length packet with trailer data, just drop these */
  407. goto out;
  408. if (eid == ATH10K_HTC_EP_0) {
  409. struct ath10k_htc_msg *msg = (struct ath10k_htc_msg *)skb->data;
  410. switch (__le16_to_cpu(msg->hdr.message_id)) {
  411. default:
  412. /* handle HTC control message */
  413. if (completion_done(&htc->ctl_resp)) {
  414. /*
  415. * this is a fatal error, target should not be
  416. * sending unsolicited messages on the ep 0
  417. */
  418. ath10k_warn("HTC rx ctrl still processing\n");
  419. status = -EINVAL;
  420. complete(&htc->ctl_resp);
  421. goto out;
  422. }
  423. htc->control_resp_len =
  424. min_t(int, skb->len,
  425. ATH10K_HTC_MAX_CTRL_MSG_LEN);
  426. memcpy(htc->control_resp_buffer, skb->data,
  427. htc->control_resp_len);
  428. complete(&htc->ctl_resp);
  429. break;
  430. case ATH10K_HTC_MSG_SEND_SUSPEND_COMPLETE:
  431. htc->htc_ops.target_send_suspend_complete(ar);
  432. }
  433. goto out;
  434. }
  435. ath10k_dbg(ATH10K_DBG_HTC, "htc rx completion ep %d skb %p\n",
  436. eid, skb);
  437. ep->ep_ops.ep_rx_complete(ar, skb);
  438. /* skb is now owned by the rx completion handler */
  439. skb = NULL;
  440. out:
  441. kfree_skb(skb);
  442. return status;
  443. }
  444. static void ath10k_htc_control_rx_complete(struct ath10k *ar,
  445. struct sk_buff *skb)
  446. {
  447. /* This is unexpected. FW is not supposed to send regular rx on this
  448. * endpoint. */
  449. ath10k_warn("unexpected htc rx\n");
  450. kfree_skb(skb);
  451. }
  452. /***************/
  453. /* Init/Deinit */
  454. /***************/
  455. static const char *htc_service_name(enum ath10k_htc_svc_id id)
  456. {
  457. switch (id) {
  458. case ATH10K_HTC_SVC_ID_RESERVED:
  459. return "Reserved";
  460. case ATH10K_HTC_SVC_ID_RSVD_CTRL:
  461. return "Control";
  462. case ATH10K_HTC_SVC_ID_WMI_CONTROL:
  463. return "WMI";
  464. case ATH10K_HTC_SVC_ID_WMI_DATA_BE:
  465. return "DATA BE";
  466. case ATH10K_HTC_SVC_ID_WMI_DATA_BK:
  467. return "DATA BK";
  468. case ATH10K_HTC_SVC_ID_WMI_DATA_VI:
  469. return "DATA VI";
  470. case ATH10K_HTC_SVC_ID_WMI_DATA_VO:
  471. return "DATA VO";
  472. case ATH10K_HTC_SVC_ID_NMI_CONTROL:
  473. return "NMI Control";
  474. case ATH10K_HTC_SVC_ID_NMI_DATA:
  475. return "NMI Data";
  476. case ATH10K_HTC_SVC_ID_HTT_DATA_MSG:
  477. return "HTT Data";
  478. case ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS:
  479. return "RAW";
  480. }
  481. return "Unknown";
  482. }
  483. static void ath10k_htc_reset_endpoint_states(struct ath10k_htc *htc)
  484. {
  485. struct ath10k_htc_ep *ep;
  486. int i;
  487. for (i = ATH10K_HTC_EP_0; i < ATH10K_HTC_EP_COUNT; i++) {
  488. ep = &htc->endpoint[i];
  489. ep->service_id = ATH10K_HTC_SVC_ID_UNUSED;
  490. ep->max_ep_message_len = 0;
  491. ep->max_tx_queue_depth = 0;
  492. ep->eid = i;
  493. skb_queue_head_init(&ep->tx_queue);
  494. ep->htc = htc;
  495. ep->tx_credit_flow_enabled = true;
  496. INIT_WORK(&ep->send_work, ath10k_htc_send_work);
  497. }
  498. }
  499. static void ath10k_htc_setup_target_buffer_assignments(struct ath10k_htc *htc)
  500. {
  501. struct ath10k_htc_svc_tx_credits *entry;
  502. entry = &htc->service_tx_alloc[0];
  503. /*
  504. * for PCIE allocate all credists/HTC buffers to WMI.
  505. * no buffers are used/required for data. data always
  506. * remains on host.
  507. */
  508. entry++;
  509. entry->service_id = ATH10K_HTC_SVC_ID_WMI_CONTROL;
  510. entry->credit_allocation = htc->total_transmit_credits;
  511. }
  512. static u8 ath10k_htc_get_credit_allocation(struct ath10k_htc *htc,
  513. u16 service_id)
  514. {
  515. u8 allocation = 0;
  516. int i;
  517. for (i = 0; i < ATH10K_HTC_EP_COUNT; i++) {
  518. if (htc->service_tx_alloc[i].service_id == service_id)
  519. allocation =
  520. htc->service_tx_alloc[i].credit_allocation;
  521. }
  522. return allocation;
  523. }
  524. int ath10k_htc_wait_target(struct ath10k_htc *htc)
  525. {
  526. int status = 0;
  527. struct ath10k_htc_svc_conn_req conn_req;
  528. struct ath10k_htc_svc_conn_resp conn_resp;
  529. struct ath10k_htc_msg *msg;
  530. u16 message_id;
  531. u16 credit_count;
  532. u16 credit_size;
  533. INIT_COMPLETION(htc->ctl_resp);
  534. status = ath10k_hif_start(htc->ar);
  535. if (status) {
  536. ath10k_err("could not start HIF (%d)\n", status);
  537. goto err_start;
  538. }
  539. status = wait_for_completion_timeout(&htc->ctl_resp,
  540. ATH10K_HTC_WAIT_TIMEOUT_HZ);
  541. if (status <= 0) {
  542. if (status == 0)
  543. status = -ETIMEDOUT;
  544. ath10k_err("ctl_resp never came in (%d)\n", status);
  545. goto err_target;
  546. }
  547. if (htc->control_resp_len < sizeof(msg->hdr) + sizeof(msg->ready)) {
  548. ath10k_err("Invalid HTC ready msg len:%d\n",
  549. htc->control_resp_len);
  550. status = -ECOMM;
  551. goto err_target;
  552. }
  553. msg = (struct ath10k_htc_msg *)htc->control_resp_buffer;
  554. message_id = __le16_to_cpu(msg->hdr.message_id);
  555. credit_count = __le16_to_cpu(msg->ready.credit_count);
  556. credit_size = __le16_to_cpu(msg->ready.credit_size);
  557. if (message_id != ATH10K_HTC_MSG_READY_ID) {
  558. ath10k_err("Invalid HTC ready msg: 0x%x\n", message_id);
  559. status = -ECOMM;
  560. goto err_target;
  561. }
  562. htc->total_transmit_credits = credit_count;
  563. htc->target_credit_size = credit_size;
  564. ath10k_dbg(ATH10K_DBG_HTC,
  565. "Target ready! transmit resources: %d size:%d\n",
  566. htc->total_transmit_credits,
  567. htc->target_credit_size);
  568. if ((htc->total_transmit_credits == 0) ||
  569. (htc->target_credit_size == 0)) {
  570. status = -ECOMM;
  571. ath10k_err("Invalid credit size received\n");
  572. goto err_target;
  573. }
  574. ath10k_htc_setup_target_buffer_assignments(htc);
  575. /* setup our pseudo HTC control endpoint connection */
  576. memset(&conn_req, 0, sizeof(conn_req));
  577. memset(&conn_resp, 0, sizeof(conn_resp));
  578. conn_req.ep_ops.ep_tx_complete = ath10k_htc_control_tx_complete;
  579. conn_req.ep_ops.ep_rx_complete = ath10k_htc_control_rx_complete;
  580. conn_req.max_send_queue_depth = ATH10K_NUM_CONTROL_TX_BUFFERS;
  581. conn_req.service_id = ATH10K_HTC_SVC_ID_RSVD_CTRL;
  582. /* connect fake service */
  583. status = ath10k_htc_connect_service(htc, &conn_req, &conn_resp);
  584. if (status) {
  585. ath10k_err("could not connect to htc service (%d)\n", status);
  586. goto err_target;
  587. }
  588. return 0;
  589. err_target:
  590. ath10k_hif_stop(htc->ar);
  591. err_start:
  592. return status;
  593. }
  594. int ath10k_htc_connect_service(struct ath10k_htc *htc,
  595. struct ath10k_htc_svc_conn_req *conn_req,
  596. struct ath10k_htc_svc_conn_resp *conn_resp)
  597. {
  598. struct ath10k_htc_msg *msg;
  599. struct ath10k_htc_conn_svc *req_msg;
  600. struct ath10k_htc_conn_svc_response resp_msg_dummy;
  601. struct ath10k_htc_conn_svc_response *resp_msg = &resp_msg_dummy;
  602. enum ath10k_htc_ep_id assigned_eid = ATH10K_HTC_EP_COUNT;
  603. struct ath10k_htc_ep *ep;
  604. struct sk_buff *skb;
  605. unsigned int max_msg_size = 0;
  606. int length, status;
  607. bool disable_credit_flow_ctrl = false;
  608. u16 message_id, service_id, flags = 0;
  609. u8 tx_alloc = 0;
  610. /* special case for HTC pseudo control service */
  611. if (conn_req->service_id == ATH10K_HTC_SVC_ID_RSVD_CTRL) {
  612. disable_credit_flow_ctrl = true;
  613. assigned_eid = ATH10K_HTC_EP_0;
  614. max_msg_size = ATH10K_HTC_MAX_CTRL_MSG_LEN;
  615. memset(&resp_msg_dummy, 0, sizeof(resp_msg_dummy));
  616. goto setup;
  617. }
  618. tx_alloc = ath10k_htc_get_credit_allocation(htc,
  619. conn_req->service_id);
  620. if (!tx_alloc)
  621. ath10k_dbg(ATH10K_DBG_HTC,
  622. "HTC Service %s does not allocate target credits\n",
  623. htc_service_name(conn_req->service_id));
  624. skb = ath10k_htc_build_tx_ctrl_skb(htc->ar);
  625. if (!skb) {
  626. ath10k_err("Failed to allocate HTC packet\n");
  627. return -ENOMEM;
  628. }
  629. length = sizeof(msg->hdr) + sizeof(msg->connect_service);
  630. skb_put(skb, length);
  631. memset(skb->data, 0, length);
  632. msg = (struct ath10k_htc_msg *)skb->data;
  633. msg->hdr.message_id =
  634. __cpu_to_le16(ATH10K_HTC_MSG_CONNECT_SERVICE_ID);
  635. flags |= SM(tx_alloc, ATH10K_HTC_CONN_FLAGS_RECV_ALLOC);
  636. req_msg = &msg->connect_service;
  637. req_msg->flags = __cpu_to_le16(flags);
  638. req_msg->service_id = __cpu_to_le16(conn_req->service_id);
  639. /* Only enable credit flow control for WMI ctrl service */
  640. if (conn_req->service_id != ATH10K_HTC_SVC_ID_WMI_CONTROL) {
  641. flags |= ATH10K_HTC_CONN_FLAGS_DISABLE_CREDIT_FLOW_CTRL;
  642. disable_credit_flow_ctrl = true;
  643. }
  644. INIT_COMPLETION(htc->ctl_resp);
  645. status = ath10k_htc_send(htc, ATH10K_HTC_EP_0, skb);
  646. if (status) {
  647. kfree_skb(skb);
  648. return status;
  649. }
  650. /* wait for response */
  651. status = wait_for_completion_timeout(&htc->ctl_resp,
  652. ATH10K_HTC_CONN_SVC_TIMEOUT_HZ);
  653. if (status <= 0) {
  654. if (status == 0)
  655. status = -ETIMEDOUT;
  656. ath10k_err("Service connect timeout: %d\n", status);
  657. return status;
  658. }
  659. /* we controlled the buffer creation, it's aligned */
  660. msg = (struct ath10k_htc_msg *)htc->control_resp_buffer;
  661. resp_msg = &msg->connect_service_response;
  662. message_id = __le16_to_cpu(msg->hdr.message_id);
  663. service_id = __le16_to_cpu(resp_msg->service_id);
  664. if ((message_id != ATH10K_HTC_MSG_CONNECT_SERVICE_RESP_ID) ||
  665. (htc->control_resp_len < sizeof(msg->hdr) +
  666. sizeof(msg->connect_service_response))) {
  667. ath10k_err("Invalid resp message ID 0x%x", message_id);
  668. return -EPROTO;
  669. }
  670. ath10k_dbg(ATH10K_DBG_HTC,
  671. "HTC Service %s connect response: status: 0x%x, assigned ep: 0x%x\n",
  672. htc_service_name(service_id),
  673. resp_msg->status, resp_msg->eid);
  674. conn_resp->connect_resp_code = resp_msg->status;
  675. /* check response status */
  676. if (resp_msg->status != ATH10K_HTC_CONN_SVC_STATUS_SUCCESS) {
  677. ath10k_err("HTC Service %s connect request failed: 0x%x)\n",
  678. htc_service_name(service_id),
  679. resp_msg->status);
  680. return -EPROTO;
  681. }
  682. assigned_eid = (enum ath10k_htc_ep_id)resp_msg->eid;
  683. max_msg_size = __le16_to_cpu(resp_msg->max_msg_size);
  684. setup:
  685. if (assigned_eid >= ATH10K_HTC_EP_COUNT)
  686. return -EPROTO;
  687. if (max_msg_size == 0)
  688. return -EPROTO;
  689. ep = &htc->endpoint[assigned_eid];
  690. ep->eid = assigned_eid;
  691. if (ep->service_id != ATH10K_HTC_SVC_ID_UNUSED)
  692. return -EPROTO;
  693. /* return assigned endpoint to caller */
  694. conn_resp->eid = assigned_eid;
  695. conn_resp->max_msg_len = __le16_to_cpu(resp_msg->max_msg_size);
  696. /* setup the endpoint */
  697. ep->service_id = conn_req->service_id;
  698. ep->max_tx_queue_depth = conn_req->max_send_queue_depth;
  699. ep->max_ep_message_len = __le16_to_cpu(resp_msg->max_msg_size);
  700. ep->tx_credits = tx_alloc;
  701. ep->tx_credit_size = htc->target_credit_size;
  702. ep->tx_credits_per_max_message = ep->max_ep_message_len /
  703. htc->target_credit_size;
  704. if (ep->max_ep_message_len % htc->target_credit_size)
  705. ep->tx_credits_per_max_message++;
  706. /* copy all the callbacks */
  707. ep->ep_ops = conn_req->ep_ops;
  708. status = ath10k_hif_map_service_to_pipe(htc->ar,
  709. ep->service_id,
  710. &ep->ul_pipe_id,
  711. &ep->dl_pipe_id,
  712. &ep->ul_is_polled,
  713. &ep->dl_is_polled);
  714. if (status)
  715. return status;
  716. ath10k_dbg(ATH10K_DBG_HTC,
  717. "HTC service: %s UL pipe: %d DL pipe: %d eid: %d ready\n",
  718. htc_service_name(ep->service_id), ep->ul_pipe_id,
  719. ep->dl_pipe_id, ep->eid);
  720. ath10k_dbg(ATH10K_DBG_HTC,
  721. "EP %d UL polled: %d, DL polled: %d\n",
  722. ep->eid, ep->ul_is_polled, ep->dl_is_polled);
  723. if (disable_credit_flow_ctrl && ep->tx_credit_flow_enabled) {
  724. ep->tx_credit_flow_enabled = false;
  725. ath10k_dbg(ATH10K_DBG_HTC,
  726. "HTC service: %s eid: %d TX flow control disabled\n",
  727. htc_service_name(ep->service_id), assigned_eid);
  728. }
  729. return status;
  730. }
  731. struct sk_buff *ath10k_htc_alloc_skb(int size)
  732. {
  733. struct sk_buff *skb;
  734. skb = dev_alloc_skb(size + sizeof(struct ath10k_htc_hdr));
  735. if (!skb) {
  736. ath10k_warn("could not allocate HTC tx skb\n");
  737. return NULL;
  738. }
  739. skb_reserve(skb, sizeof(struct ath10k_htc_hdr));
  740. /* FW/HTC requires 4-byte aligned streams */
  741. if (!IS_ALIGNED((unsigned long)skb->data, 4))
  742. ath10k_warn("Unaligned HTC tx skb\n");
  743. return skb;
  744. }
  745. int ath10k_htc_start(struct ath10k_htc *htc)
  746. {
  747. struct sk_buff *skb;
  748. int status = 0;
  749. struct ath10k_htc_msg *msg;
  750. skb = ath10k_htc_build_tx_ctrl_skb(htc->ar);
  751. if (!skb)
  752. return -ENOMEM;
  753. skb_put(skb, sizeof(msg->hdr) + sizeof(msg->setup_complete_ext));
  754. memset(skb->data, 0, skb->len);
  755. msg = (struct ath10k_htc_msg *)skb->data;
  756. msg->hdr.message_id =
  757. __cpu_to_le16(ATH10K_HTC_MSG_SETUP_COMPLETE_EX_ID);
  758. ath10k_dbg(ATH10K_DBG_HTC, "HTC is using TX credit flow control\n");
  759. status = ath10k_htc_send(htc, ATH10K_HTC_EP_0, skb);
  760. if (status) {
  761. kfree_skb(skb);
  762. return status;
  763. }
  764. return 0;
  765. }
  766. /*
  767. * stop HTC communications, i.e. stop interrupt reception, and flush all
  768. * queued buffers
  769. */
  770. void ath10k_htc_stop(struct ath10k_htc *htc)
  771. {
  772. int i;
  773. struct ath10k_htc_ep *ep;
  774. spin_lock_bh(&htc->tx_lock);
  775. htc->stopping = true;
  776. spin_unlock_bh(&htc->tx_lock);
  777. for (i = ATH10K_HTC_EP_0; i < ATH10K_HTC_EP_COUNT; i++) {
  778. ep = &htc->endpoint[i];
  779. ath10k_htc_flush_endpoint_tx(htc, ep);
  780. }
  781. ath10k_hif_stop(htc->ar);
  782. ath10k_htc_reset_endpoint_states(htc);
  783. }
  784. /* registered target arrival callback from the HIF layer */
  785. int ath10k_htc_init(struct ath10k *ar)
  786. {
  787. struct ath10k_hif_cb htc_callbacks;
  788. struct ath10k_htc_ep *ep = NULL;
  789. struct ath10k_htc *htc = &ar->htc;
  790. spin_lock_init(&htc->tx_lock);
  791. ath10k_htc_reset_endpoint_states(htc);
  792. /* setup HIF layer callbacks */
  793. htc_callbacks.rx_completion = ath10k_htc_rx_completion_handler;
  794. htc_callbacks.tx_completion = ath10k_htc_tx_completion_handler;
  795. htc->ar = ar;
  796. /* Get HIF default pipe for HTC message exchange */
  797. ep = &htc->endpoint[ATH10K_HTC_EP_0];
  798. ath10k_hif_set_callbacks(ar, &htc_callbacks);
  799. ath10k_hif_get_default_pipe(ar, &ep->ul_pipe_id, &ep->dl_pipe_id);
  800. init_completion(&htc->ctl_resp);
  801. return 0;
  802. }