htc_pipe.c 42 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708
  1. /*
  2. * Copyright (c) 2007-2011 Atheros Communications Inc.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for any
  5. * purpose with or without fee is hereby granted, provided that the above
  6. * copyright notice and this permission notice appear in all copies.
  7. *
  8. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  9. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  10. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  11. * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  12. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  13. * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  14. * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  15. */
  16. #include "core.h"
  17. #include "debug.h"
  18. #include "hif-ops.h"
  19. #define HTC_PACKET_CONTAINER_ALLOCATION 32
  20. #define HTC_CONTROL_BUFFER_SIZE (HTC_MAX_CTRL_MSG_LEN + HTC_HDR_LENGTH)
  21. static int ath6kl_htc_pipe_tx(struct htc_target *handle,
  22. struct htc_packet *packet);
  23. static void ath6kl_htc_pipe_cleanup(struct htc_target *handle);
  24. /* htc pipe tx path */
  25. static inline void restore_tx_packet(struct htc_packet *packet)
  26. {
  27. if (packet->info.tx.flags & HTC_FLAGS_TX_FIXUP_NETBUF) {
  28. skb_pull(packet->skb, sizeof(struct htc_frame_hdr));
  29. packet->info.tx.flags &= ~HTC_FLAGS_TX_FIXUP_NETBUF;
  30. }
  31. }
  32. static void do_send_completion(struct htc_endpoint *ep,
  33. struct list_head *queue_to_indicate)
  34. {
  35. struct htc_packet *packet;
  36. if (list_empty(queue_to_indicate)) {
  37. /* nothing to indicate */
  38. return;
  39. }
  40. if (ep->ep_cb.tx_comp_multi != NULL) {
  41. ath6kl_dbg(ATH6KL_DBG_HTC,
  42. "%s: calling ep %d, send complete multiple callback (%d pkts)\n",
  43. __func__, ep->eid,
  44. get_queue_depth(queue_to_indicate));
  45. /*
  46. * a multiple send complete handler is being used,
  47. * pass the queue to the handler
  48. */
  49. ep->ep_cb.tx_comp_multi(ep->target, queue_to_indicate);
  50. /*
  51. * all packets are now owned by the callback,
  52. * reset queue to be safe
  53. */
  54. INIT_LIST_HEAD(queue_to_indicate);
  55. } else {
  56. /* using legacy EpTxComplete */
  57. do {
  58. packet = list_first_entry(queue_to_indicate,
  59. struct htc_packet, list);
  60. list_del(&packet->list);
  61. ath6kl_dbg(ATH6KL_DBG_HTC,
  62. "%s: calling ep %d send complete callback on packet 0x%p\n",
  63. __func__, ep->eid, packet);
  64. ep->ep_cb.tx_complete(ep->target, packet);
  65. } while (!list_empty(queue_to_indicate));
  66. }
  67. }
  68. static void send_packet_completion(struct htc_target *target,
  69. struct htc_packet *packet)
  70. {
  71. struct htc_endpoint *ep = &target->endpoint[packet->endpoint];
  72. struct list_head container;
  73. restore_tx_packet(packet);
  74. INIT_LIST_HEAD(&container);
  75. list_add_tail(&packet->list, &container);
  76. /* do completion */
  77. do_send_completion(ep, &container);
  78. }
  79. static void get_htc_packet_credit_based(struct htc_target *target,
  80. struct htc_endpoint *ep,
  81. struct list_head *queue)
  82. {
  83. int credits_required;
  84. int remainder;
  85. u8 send_flags;
  86. struct htc_packet *packet;
  87. unsigned int transfer_len;
  88. /* NOTE : the TX lock is held when this function is called */
  89. /* loop until we can grab as many packets out of the queue as we can */
  90. while (true) {
  91. send_flags = 0;
  92. if (list_empty(&ep->txq))
  93. break;
  94. /* get packet at head, but don't remove it */
  95. packet = list_first_entry(&ep->txq, struct htc_packet, list);
  96. ath6kl_dbg(ATH6KL_DBG_HTC,
  97. "%s: got head packet:0x%p , queue depth: %d\n",
  98. __func__, packet, get_queue_depth(&ep->txq));
  99. transfer_len = packet->act_len + HTC_HDR_LENGTH;
  100. if (transfer_len <= target->tgt_cred_sz) {
  101. credits_required = 1;
  102. } else {
  103. /* figure out how many credits this message requires */
  104. credits_required = transfer_len / target->tgt_cred_sz;
  105. remainder = transfer_len % target->tgt_cred_sz;
  106. if (remainder)
  107. credits_required++;
  108. }
  109. ath6kl_dbg(ATH6KL_DBG_HTC, "%s: creds required:%d got:%d\n",
  110. __func__, credits_required, ep->cred_dist.credits);
  111. if (ep->eid == ENDPOINT_0) {
  112. /*
  113. * endpoint 0 is special, it always has a credit and
  114. * does not require credit based flow control
  115. */
  116. credits_required = 0;
  117. } else {
  118. if (ep->cred_dist.credits < credits_required)
  119. break;
  120. ep->cred_dist.credits -= credits_required;
  121. ep->ep_st.cred_cosumd += credits_required;
  122. /* check if we need credits back from the target */
  123. if (ep->cred_dist.credits <
  124. ep->cred_dist.cred_per_msg) {
  125. /* tell the target we need credits ASAP! */
  126. send_flags |= HTC_FLAGS_NEED_CREDIT_UPDATE;
  127. ep->ep_st.cred_low_indicate += 1;
  128. ath6kl_dbg(ATH6KL_DBG_HTC,
  129. "%s: host needs credits\n",
  130. __func__);
  131. }
  132. }
  133. /* now we can fully dequeue */
  134. packet = list_first_entry(&ep->txq, struct htc_packet, list);
  135. list_del(&packet->list);
  136. /* save the number of credits this packet consumed */
  137. packet->info.tx.cred_used = credits_required;
  138. /* save send flags */
  139. packet->info.tx.flags = send_flags;
  140. packet->info.tx.seqno = ep->seqno;
  141. ep->seqno++;
  142. /* queue this packet into the caller's queue */
  143. list_add_tail(&packet->list, queue);
  144. }
  145. }
  146. static void get_htc_packet(struct htc_target *target,
  147. struct htc_endpoint *ep,
  148. struct list_head *queue, int resources)
  149. {
  150. struct htc_packet *packet;
  151. /* NOTE : the TX lock is held when this function is called */
  152. /* loop until we can grab as many packets out of the queue as we can */
  153. while (resources) {
  154. if (list_empty(&ep->txq))
  155. break;
  156. packet = list_first_entry(&ep->txq, struct htc_packet, list);
  157. list_del(&packet->list);
  158. ath6kl_dbg(ATH6KL_DBG_HTC,
  159. "%s: got packet:0x%p , new queue depth: %d\n",
  160. __func__, packet, get_queue_depth(&ep->txq));
  161. packet->info.tx.seqno = ep->seqno;
  162. packet->info.tx.flags = 0;
  163. packet->info.tx.cred_used = 0;
  164. ep->seqno++;
  165. /* queue this packet into the caller's queue */
  166. list_add_tail(&packet->list, queue);
  167. resources--;
  168. }
  169. }
  170. static int htc_issue_packets(struct htc_target *target,
  171. struct htc_endpoint *ep,
  172. struct list_head *pkt_queue)
  173. {
  174. int status = 0;
  175. u16 payload_len;
  176. struct sk_buff *skb;
  177. struct htc_frame_hdr *htc_hdr;
  178. struct htc_packet *packet;
  179. ath6kl_dbg(ATH6KL_DBG_HTC,
  180. "%s: queue: 0x%p, pkts %d\n", __func__,
  181. pkt_queue, get_queue_depth(pkt_queue));
  182. while (!list_empty(pkt_queue)) {
  183. packet = list_first_entry(pkt_queue, struct htc_packet, list);
  184. list_del(&packet->list);
  185. skb = packet->skb;
  186. if (!skb) {
  187. WARN_ON_ONCE(1);
  188. status = -EINVAL;
  189. break;
  190. }
  191. payload_len = packet->act_len;
  192. /* setup HTC frame header */
  193. htc_hdr = (struct htc_frame_hdr *) skb_push(skb,
  194. sizeof(*htc_hdr));
  195. if (!htc_hdr) {
  196. WARN_ON_ONCE(1);
  197. status = -EINVAL;
  198. break;
  199. }
  200. packet->info.tx.flags |= HTC_FLAGS_TX_FIXUP_NETBUF;
  201. /* Endianess? */
  202. put_unaligned((u16) payload_len, &htc_hdr->payld_len);
  203. htc_hdr->flags = packet->info.tx.flags;
  204. htc_hdr->eid = (u8) packet->endpoint;
  205. htc_hdr->ctrl[0] = 0;
  206. htc_hdr->ctrl[1] = (u8) packet->info.tx.seqno;
  207. spin_lock_bh(&target->tx_lock);
  208. /* store in look up queue to match completions */
  209. list_add_tail(&packet->list, &ep->pipe.tx_lookup_queue);
  210. ep->ep_st.tx_issued += 1;
  211. spin_unlock_bh(&target->tx_lock);
  212. status = ath6kl_hif_pipe_send(target->dev->ar,
  213. ep->pipe.pipeid_ul, NULL, skb);
  214. if (status != 0) {
  215. if (status != -ENOMEM) {
  216. /* TODO: if more than 1 endpoint maps to the
  217. * same PipeID, it is possible to run out of
  218. * resources in the HIF layer.
  219. * Don't emit the error
  220. */
  221. ath6kl_dbg(ATH6KL_DBG_HTC,
  222. "%s: failed status:%d\n",
  223. __func__, status);
  224. }
  225. spin_lock_bh(&target->tx_lock);
  226. list_del(&packet->list);
  227. /* reclaim credits */
  228. ep->cred_dist.credits += packet->info.tx.cred_used;
  229. spin_unlock_bh(&target->tx_lock);
  230. /* put it back into the callers queue */
  231. list_add(&packet->list, pkt_queue);
  232. break;
  233. }
  234. }
  235. if (status != 0) {
  236. while (!list_empty(pkt_queue)) {
  237. if (status != -ENOMEM) {
  238. ath6kl_dbg(ATH6KL_DBG_HTC,
  239. "%s: failed pkt:0x%p status:%d\n",
  240. __func__, packet, status);
  241. }
  242. packet = list_first_entry(pkt_queue,
  243. struct htc_packet, list);
  244. list_del(&packet->list);
  245. packet->status = status;
  246. send_packet_completion(target, packet);
  247. }
  248. }
  249. return status;
  250. }
  251. static enum htc_send_queue_result htc_try_send(struct htc_target *target,
  252. struct htc_endpoint *ep,
  253. struct list_head *txq)
  254. {
  255. struct list_head send_queue; /* temp queue to hold packets */
  256. struct htc_packet *packet, *tmp_pkt;
  257. struct ath6kl *ar = target->dev->ar;
  258. enum htc_send_full_action action;
  259. int tx_resources, overflow, txqueue_depth, i, good_pkts;
  260. u8 pipeid;
  261. ath6kl_dbg(ATH6KL_DBG_HTC, "%s: (queue:0x%p depth:%d)\n",
  262. __func__, txq,
  263. (txq == NULL) ? 0 : get_queue_depth(txq));
  264. /* init the local send queue */
  265. INIT_LIST_HEAD(&send_queue);
  266. /*
  267. * txq equals to NULL means
  268. * caller didn't provide a queue, just wants us to
  269. * check queues and send
  270. */
  271. if (txq != NULL) {
  272. if (list_empty(txq)) {
  273. /* empty queue */
  274. return HTC_SEND_QUEUE_DROP;
  275. }
  276. spin_lock_bh(&target->tx_lock);
  277. txqueue_depth = get_queue_depth(&ep->txq);
  278. spin_unlock_bh(&target->tx_lock);
  279. if (txqueue_depth >= ep->max_txq_depth) {
  280. /* we've already overflowed */
  281. overflow = get_queue_depth(txq);
  282. } else {
  283. /* get how much we will overflow by */
  284. overflow = txqueue_depth;
  285. overflow += get_queue_depth(txq);
  286. /* get how much we will overflow the TX queue by */
  287. overflow -= ep->max_txq_depth;
  288. }
  289. /* if overflow is negative or zero, we are okay */
  290. if (overflow > 0) {
  291. ath6kl_dbg(ATH6KL_DBG_HTC,
  292. "%s: Endpoint %d, TX queue will overflow :%d, Tx Depth:%d, Max:%d\n",
  293. __func__, ep->eid, overflow, txqueue_depth,
  294. ep->max_txq_depth);
  295. }
  296. if ((overflow <= 0) ||
  297. (ep->ep_cb.tx_full == NULL)) {
  298. /*
  299. * all packets will fit or caller did not provide send
  300. * full indication handler -- just move all of them
  301. * to the local send_queue object
  302. */
  303. list_splice_tail_init(txq, &send_queue);
  304. } else {
  305. good_pkts = get_queue_depth(txq) - overflow;
  306. if (good_pkts < 0) {
  307. WARN_ON_ONCE(1);
  308. return HTC_SEND_QUEUE_DROP;
  309. }
  310. /* we have overflowed, and a callback is provided */
  311. /* dequeue all non-overflow packets to the sendqueue */
  312. for (i = 0; i < good_pkts; i++) {
  313. /* pop off caller's queue */
  314. packet = list_first_entry(txq,
  315. struct htc_packet,
  316. list);
  317. list_del(&packet->list);
  318. /* insert into local queue */
  319. list_add_tail(&packet->list, &send_queue);
  320. }
  321. /*
  322. * the caller's queue has all the packets that won't fit
  323. * walk through the caller's queue and indicate each to
  324. * the send full handler
  325. */
  326. list_for_each_entry_safe(packet, tmp_pkt,
  327. txq, list) {
  328. ath6kl_dbg(ATH6KL_DBG_HTC,
  329. "%s: Indicat overflowed TX pkts: %p\n",
  330. __func__, packet);
  331. action = ep->ep_cb.tx_full(ep->target, packet);
  332. if (action == HTC_SEND_FULL_DROP) {
  333. /* callback wants the packet dropped */
  334. ep->ep_st.tx_dropped += 1;
  335. /* leave this one in the caller's queue
  336. * for cleanup */
  337. } else {
  338. /* callback wants to keep this packet,
  339. * remove from caller's queue */
  340. list_del(&packet->list);
  341. /* put it in the send queue */
  342. list_add_tail(&packet->list,
  343. &send_queue);
  344. }
  345. }
  346. if (list_empty(&send_queue)) {
  347. /* no packets made it in, caller will cleanup */
  348. return HTC_SEND_QUEUE_DROP;
  349. }
  350. }
  351. }
  352. if (!ep->pipe.tx_credit_flow_enabled) {
  353. tx_resources =
  354. ath6kl_hif_pipe_get_free_queue_number(ar,
  355. ep->pipe.pipeid_ul);
  356. } else {
  357. tx_resources = 0;
  358. }
  359. spin_lock_bh(&target->tx_lock);
  360. if (!list_empty(&send_queue)) {
  361. /* transfer packets to tail */
  362. list_splice_tail_init(&send_queue, &ep->txq);
  363. if (!list_empty(&send_queue)) {
  364. WARN_ON_ONCE(1);
  365. spin_unlock_bh(&target->tx_lock);
  366. return HTC_SEND_QUEUE_DROP;
  367. }
  368. INIT_LIST_HEAD(&send_queue);
  369. }
  370. /* increment tx processing count on entry */
  371. ep->tx_proc_cnt++;
  372. if (ep->tx_proc_cnt > 1) {
  373. /*
  374. * Another thread or task is draining the TX queues on this
  375. * endpoint that thread will reset the tx processing count
  376. * when the queue is drained.
  377. */
  378. ep->tx_proc_cnt--;
  379. spin_unlock_bh(&target->tx_lock);
  380. return HTC_SEND_QUEUE_OK;
  381. }
  382. /***** beyond this point only 1 thread may enter ******/
  383. /*
  384. * Now drain the endpoint TX queue for transmission as long as we have
  385. * enough transmit resources.
  386. */
  387. while (true) {
  388. if (get_queue_depth(&ep->txq) == 0)
  389. break;
  390. if (ep->pipe.tx_credit_flow_enabled) {
  391. /*
  392. * Credit based mechanism provides flow control
  393. * based on target transmit resource availability,
  394. * we assume that the HIF layer will always have
  395. * bus resources greater than target transmit
  396. * resources.
  397. */
  398. get_htc_packet_credit_based(target, ep, &send_queue);
  399. } else {
  400. /*
  401. * Get all packets for this endpoint that we can
  402. * for this pass.
  403. */
  404. get_htc_packet(target, ep, &send_queue, tx_resources);
  405. }
  406. if (get_queue_depth(&send_queue) == 0) {
  407. /*
  408. * Didn't get packets due to out of resources or TX
  409. * queue was drained.
  410. */
  411. break;
  412. }
  413. spin_unlock_bh(&target->tx_lock);
  414. /* send what we can */
  415. htc_issue_packets(target, ep, &send_queue);
  416. if (!ep->pipe.tx_credit_flow_enabled) {
  417. pipeid = ep->pipe.pipeid_ul;
  418. tx_resources =
  419. ath6kl_hif_pipe_get_free_queue_number(ar, pipeid);
  420. }
  421. spin_lock_bh(&target->tx_lock);
  422. }
  423. /* done with this endpoint, we can clear the count */
  424. ep->tx_proc_cnt = 0;
  425. spin_unlock_bh(&target->tx_lock);
  426. return HTC_SEND_QUEUE_OK;
  427. }
  428. /* htc control packet manipulation */
  429. static void destroy_htc_txctrl_packet(struct htc_packet *packet)
  430. {
  431. struct sk_buff *skb;
  432. skb = packet->skb;
  433. if (skb != NULL)
  434. dev_kfree_skb(skb);
  435. kfree(packet);
  436. }
  437. static struct htc_packet *build_htc_txctrl_packet(void)
  438. {
  439. struct htc_packet *packet = NULL;
  440. struct sk_buff *skb;
  441. packet = kzalloc(sizeof(struct htc_packet), GFP_KERNEL);
  442. if (packet == NULL)
  443. return NULL;
  444. skb = __dev_alloc_skb(HTC_CONTROL_BUFFER_SIZE, GFP_KERNEL);
  445. if (skb == NULL) {
  446. kfree(packet);
  447. return NULL;
  448. }
  449. packet->skb = skb;
  450. return packet;
  451. }
  452. static void htc_free_txctrl_packet(struct htc_target *target,
  453. struct htc_packet *packet)
  454. {
  455. destroy_htc_txctrl_packet(packet);
  456. }
  457. static struct htc_packet *htc_alloc_txctrl_packet(struct htc_target *target)
  458. {
  459. return build_htc_txctrl_packet();
  460. }
  461. static void htc_txctrl_complete(struct htc_target *target,
  462. struct htc_packet *packet)
  463. {
  464. htc_free_txctrl_packet(target, packet);
  465. }
  466. #define MAX_MESSAGE_SIZE 1536
  467. static int htc_setup_target_buffer_assignments(struct htc_target *target)
  468. {
  469. int status, credits, credit_per_maxmsg, i;
  470. struct htc_pipe_txcredit_alloc *entry;
  471. unsigned int hif_usbaudioclass = 0;
  472. credit_per_maxmsg = MAX_MESSAGE_SIZE / target->tgt_cred_sz;
  473. if (MAX_MESSAGE_SIZE % target->tgt_cred_sz)
  474. credit_per_maxmsg++;
  475. /* TODO, this should be configured by the caller! */
  476. credits = target->tgt_creds;
  477. entry = &target->pipe.txcredit_alloc[0];
  478. status = -ENOMEM;
  479. /* FIXME: hif_usbaudioclass is always zero */
  480. if (hif_usbaudioclass) {
  481. ath6kl_dbg(ATH6KL_DBG_HTC,
  482. "%s: For USB Audio Class- Total:%d\n",
  483. __func__, credits);
  484. entry++;
  485. entry++;
  486. /* Setup VO Service To have Max Credits */
  487. entry->service_id = WMI_DATA_VO_SVC;
  488. entry->credit_alloc = (credits - 6);
  489. if (entry->credit_alloc == 0)
  490. entry->credit_alloc++;
  491. credits -= (int) entry->credit_alloc;
  492. if (credits <= 0)
  493. return status;
  494. entry++;
  495. entry->service_id = WMI_CONTROL_SVC;
  496. entry->credit_alloc = credit_per_maxmsg;
  497. credits -= (int) entry->credit_alloc;
  498. if (credits <= 0)
  499. return status;
  500. /* leftovers go to best effort */
  501. entry++;
  502. entry++;
  503. entry->service_id = WMI_DATA_BE_SVC;
  504. entry->credit_alloc = (u8) credits;
  505. status = 0;
  506. } else {
  507. entry++;
  508. entry->service_id = WMI_DATA_VI_SVC;
  509. entry->credit_alloc = credits / 4;
  510. if (entry->credit_alloc == 0)
  511. entry->credit_alloc++;
  512. credits -= (int) entry->credit_alloc;
  513. if (credits <= 0)
  514. return status;
  515. entry++;
  516. entry->service_id = WMI_DATA_VO_SVC;
  517. entry->credit_alloc = credits / 4;
  518. if (entry->credit_alloc == 0)
  519. entry->credit_alloc++;
  520. credits -= (int) entry->credit_alloc;
  521. if (credits <= 0)
  522. return status;
  523. entry++;
  524. entry->service_id = WMI_CONTROL_SVC;
  525. entry->credit_alloc = credit_per_maxmsg;
  526. credits -= (int) entry->credit_alloc;
  527. if (credits <= 0)
  528. return status;
  529. entry++;
  530. entry->service_id = WMI_DATA_BK_SVC;
  531. entry->credit_alloc = credit_per_maxmsg;
  532. credits -= (int) entry->credit_alloc;
  533. if (credits <= 0)
  534. return status;
  535. /* leftovers go to best effort */
  536. entry++;
  537. entry->service_id = WMI_DATA_BE_SVC;
  538. entry->credit_alloc = (u8) credits;
  539. status = 0;
  540. }
  541. if (status == 0) {
  542. for (i = 0; i < ENDPOINT_MAX; i++) {
  543. if (target->pipe.txcredit_alloc[i].service_id != 0) {
  544. ath6kl_dbg(ATH6KL_DBG_HTC,
  545. "HTC Service Index : %d TX : 0x%2.2X : alloc:%d\n",
  546. i,
  547. target->pipe.txcredit_alloc[i].
  548. service_id,
  549. target->pipe.txcredit_alloc[i].
  550. credit_alloc);
  551. }
  552. }
  553. }
  554. return status;
  555. }
  556. /* process credit reports and call distribution function */
  557. static void htc_process_credit_report(struct htc_target *target,
  558. struct htc_credit_report *rpt,
  559. int num_entries,
  560. enum htc_endpoint_id from_ep)
  561. {
  562. int total_credits = 0, i;
  563. struct htc_endpoint *ep;
  564. /* lock out TX while we update credits */
  565. spin_lock_bh(&target->tx_lock);
  566. for (i = 0; i < num_entries; i++, rpt++) {
  567. if (rpt->eid >= ENDPOINT_MAX) {
  568. WARN_ON_ONCE(1);
  569. spin_unlock_bh(&target->tx_lock);
  570. return;
  571. }
  572. ep = &target->endpoint[rpt->eid];
  573. ep->cred_dist.credits += rpt->credits;
  574. if (ep->cred_dist.credits && get_queue_depth(&ep->txq)) {
  575. spin_unlock_bh(&target->tx_lock);
  576. htc_try_send(target, ep, NULL);
  577. spin_lock_bh(&target->tx_lock);
  578. }
  579. total_credits += rpt->credits;
  580. }
  581. ath6kl_dbg(ATH6KL_DBG_HTC,
  582. "Report indicated %d credits to distribute\n",
  583. total_credits);
  584. spin_unlock_bh(&target->tx_lock);
  585. }
  586. /* flush endpoint TX queue */
  587. static void htc_flush_tx_endpoint(struct htc_target *target,
  588. struct htc_endpoint *ep, u16 tag)
  589. {
  590. struct htc_packet *packet;
  591. spin_lock_bh(&target->tx_lock);
  592. while (get_queue_depth(&ep->txq)) {
  593. packet = list_first_entry(&ep->txq, struct htc_packet, list);
  594. list_del(&packet->list);
  595. packet->status = 0;
  596. send_packet_completion(target, packet);
  597. }
  598. spin_unlock_bh(&target->tx_lock);
  599. }
  600. /*
  601. * In the adapted HIF layer, struct sk_buff * are passed between HIF and HTC,
  602. * since upper layers expects struct htc_packet containers we use the completed
  603. * skb and lookup it's corresponding HTC packet buffer from a lookup list.
  604. * This is extra overhead that can be fixed by re-aligning HIF interfaces with
  605. * HTC.
  606. */
  607. static struct htc_packet *htc_lookup_tx_packet(struct htc_target *target,
  608. struct htc_endpoint *ep,
  609. struct sk_buff *skb)
  610. {
  611. struct htc_packet *packet, *tmp_pkt, *found_packet = NULL;
  612. spin_lock_bh(&target->tx_lock);
  613. /*
  614. * interate from the front of tx lookup queue
  615. * this lookup should be fast since lower layers completes in-order and
  616. * so the completed packet should be at the head of the list generally
  617. */
  618. list_for_each_entry_safe(packet, tmp_pkt, &ep->pipe.tx_lookup_queue,
  619. list) {
  620. /* check for removal */
  621. if (skb == packet->skb) {
  622. /* found it */
  623. list_del(&packet->list);
  624. found_packet = packet;
  625. break;
  626. }
  627. }
  628. spin_unlock_bh(&target->tx_lock);
  629. return found_packet;
  630. }
  631. static int ath6kl_htc_pipe_tx_complete(struct ath6kl *ar, struct sk_buff *skb)
  632. {
  633. struct htc_target *target = ar->htc_target;
  634. struct htc_frame_hdr *htc_hdr;
  635. struct htc_endpoint *ep;
  636. struct htc_packet *packet;
  637. u8 ep_id, *netdata;
  638. u32 netlen;
  639. netdata = skb->data;
  640. netlen = skb->len;
  641. htc_hdr = (struct htc_frame_hdr *) netdata;
  642. ep_id = htc_hdr->eid;
  643. ep = &target->endpoint[ep_id];
  644. packet = htc_lookup_tx_packet(target, ep, skb);
  645. if (packet == NULL) {
  646. /* may have already been flushed and freed */
  647. ath6kl_err("HTC TX lookup failed!\n");
  648. } else {
  649. /* will be giving this buffer back to upper layers */
  650. packet->status = 0;
  651. send_packet_completion(target, packet);
  652. }
  653. skb = NULL;
  654. if (!ep->pipe.tx_credit_flow_enabled) {
  655. /*
  656. * note: when using TX credit flow, the re-checking of queues
  657. * happens when credits flow back from the target. in the
  658. * non-TX credit case, we recheck after the packet completes
  659. */
  660. htc_try_send(target, ep, NULL);
  661. }
  662. return 0;
  663. }
  664. static int htc_send_packets_multiple(struct htc_target *target,
  665. struct list_head *pkt_queue)
  666. {
  667. struct htc_endpoint *ep;
  668. struct htc_packet *packet, *tmp_pkt;
  669. if (list_empty(pkt_queue))
  670. return -EINVAL;
  671. /* get first packet to find out which ep the packets will go into */
  672. packet = list_first_entry(pkt_queue, struct htc_packet, list);
  673. if (packet->endpoint >= ENDPOINT_MAX) {
  674. WARN_ON_ONCE(1);
  675. return -EINVAL;
  676. }
  677. ep = &target->endpoint[packet->endpoint];
  678. htc_try_send(target, ep, pkt_queue);
  679. /* do completion on any packets that couldn't get in */
  680. if (!list_empty(pkt_queue)) {
  681. list_for_each_entry_safe(packet, tmp_pkt, pkt_queue, list) {
  682. packet->status = -ENOMEM;
  683. }
  684. do_send_completion(ep, pkt_queue);
  685. }
  686. return 0;
  687. }
  688. /* htc pipe rx path */
  689. static struct htc_packet *alloc_htc_packet_container(struct htc_target *target)
  690. {
  691. struct htc_packet *packet;
  692. spin_lock_bh(&target->rx_lock);
  693. if (target->pipe.htc_packet_pool == NULL) {
  694. spin_unlock_bh(&target->rx_lock);
  695. return NULL;
  696. }
  697. packet = target->pipe.htc_packet_pool;
  698. target->pipe.htc_packet_pool = (struct htc_packet *) packet->list.next;
  699. spin_unlock_bh(&target->rx_lock);
  700. packet->list.next = NULL;
  701. return packet;
  702. }
  703. static void free_htc_packet_container(struct htc_target *target,
  704. struct htc_packet *packet)
  705. {
  706. struct list_head *lh;
  707. spin_lock_bh(&target->rx_lock);
  708. if (target->pipe.htc_packet_pool == NULL) {
  709. target->pipe.htc_packet_pool = packet;
  710. packet->list.next = NULL;
  711. } else {
  712. lh = (struct list_head *) target->pipe.htc_packet_pool;
  713. packet->list.next = lh;
  714. target->pipe.htc_packet_pool = packet;
  715. }
  716. spin_unlock_bh(&target->rx_lock);
  717. }
  718. static int htc_process_trailer(struct htc_target *target, u8 *buffer,
  719. int len, enum htc_endpoint_id from_ep)
  720. {
  721. struct htc_credit_report *report;
  722. struct htc_record_hdr *record;
  723. u8 *record_buf, *orig_buf;
  724. int orig_len, status;
  725. orig_buf = buffer;
  726. orig_len = len;
  727. status = 0;
  728. while (len > 0) {
  729. if (len < sizeof(struct htc_record_hdr)) {
  730. status = -EINVAL;
  731. break;
  732. }
  733. /* these are byte aligned structs */
  734. record = (struct htc_record_hdr *) buffer;
  735. len -= sizeof(struct htc_record_hdr);
  736. buffer += sizeof(struct htc_record_hdr);
  737. if (record->len > len) {
  738. /* no room left in buffer for record */
  739. ath6kl_dbg(ATH6KL_DBG_HTC,
  740. "invalid length: %d (id:%d) buffer has: %d bytes left\n",
  741. record->len, record->rec_id, len);
  742. status = -EINVAL;
  743. break;
  744. }
  745. /* start of record follows the header */
  746. record_buf = buffer;
  747. switch (record->rec_id) {
  748. case HTC_RECORD_CREDITS:
  749. if (record->len < sizeof(struct htc_credit_report)) {
  750. WARN_ON_ONCE(1);
  751. return -EINVAL;
  752. }
  753. report = (struct htc_credit_report *) record_buf;
  754. htc_process_credit_report(target, report,
  755. record->len / sizeof(*report),
  756. from_ep);
  757. break;
  758. default:
  759. ath6kl_dbg(ATH6KL_DBG_HTC,
  760. "unhandled record: id:%d length:%d\n",
  761. record->rec_id, record->len);
  762. break;
  763. }
  764. if (status != 0)
  765. break;
  766. /* advance buffer past this record for next time around */
  767. buffer += record->len;
  768. len -= record->len;
  769. }
  770. return status;
  771. }
  772. static void do_recv_completion(struct htc_endpoint *ep,
  773. struct list_head *queue_to_indicate)
  774. {
  775. struct htc_packet *packet;
  776. if (list_empty(queue_to_indicate)) {
  777. /* nothing to indicate */
  778. return;
  779. }
  780. /* using legacy EpRecv */
  781. while (!list_empty(queue_to_indicate)) {
  782. packet = list_first_entry(queue_to_indicate,
  783. struct htc_packet, list);
  784. list_del(&packet->list);
  785. ep->ep_cb.rx(ep->target, packet);
  786. }
  787. return;
  788. }
  789. static void recv_packet_completion(struct htc_target *target,
  790. struct htc_endpoint *ep,
  791. struct htc_packet *packet)
  792. {
  793. struct list_head container;
  794. INIT_LIST_HEAD(&container);
  795. list_add_tail(&packet->list, &container);
  796. /* do completion */
  797. do_recv_completion(ep, &container);
  798. }
  799. static int ath6kl_htc_pipe_rx_complete(struct ath6kl *ar, struct sk_buff *skb,
  800. u8 pipeid)
  801. {
  802. struct htc_target *target = ar->htc_target;
  803. u8 *netdata, *trailer, hdr_info;
  804. struct htc_frame_hdr *htc_hdr;
  805. u32 netlen, trailerlen = 0;
  806. struct htc_packet *packet;
  807. struct htc_endpoint *ep;
  808. u16 payload_len;
  809. int status = 0;
  810. netdata = skb->data;
  811. netlen = skb->len;
  812. htc_hdr = (struct htc_frame_hdr *) netdata;
  813. ep = &target->endpoint[htc_hdr->eid];
  814. if (htc_hdr->eid >= ENDPOINT_MAX) {
  815. ath6kl_dbg(ATH6KL_DBG_HTC,
  816. "HTC Rx: invalid EndpointID=%d\n",
  817. htc_hdr->eid);
  818. status = -EINVAL;
  819. goto free_skb;
  820. }
  821. payload_len = le16_to_cpu(get_unaligned(&htc_hdr->payld_len));
  822. if (netlen < (payload_len + HTC_HDR_LENGTH)) {
  823. ath6kl_dbg(ATH6KL_DBG_HTC,
  824. "HTC Rx: insufficient length, got:%d expected =%u\n",
  825. netlen, payload_len + HTC_HDR_LENGTH);
  826. status = -EINVAL;
  827. goto free_skb;
  828. }
  829. /* get flags to check for trailer */
  830. hdr_info = htc_hdr->flags;
  831. if (hdr_info & HTC_FLG_RX_TRAILER) {
  832. /* extract the trailer length */
  833. hdr_info = htc_hdr->ctrl[0];
  834. if ((hdr_info < sizeof(struct htc_record_hdr)) ||
  835. (hdr_info > payload_len)) {
  836. ath6kl_dbg(ATH6KL_DBG_HTC,
  837. "invalid header: payloadlen should be %d, CB[0]: %d\n",
  838. payload_len, hdr_info);
  839. status = -EINVAL;
  840. goto free_skb;
  841. }
  842. trailerlen = hdr_info;
  843. /* process trailer after hdr/apps payload */
  844. trailer = (u8 *) htc_hdr + HTC_HDR_LENGTH +
  845. payload_len - hdr_info;
  846. status = htc_process_trailer(target, trailer, hdr_info,
  847. htc_hdr->eid);
  848. if (status != 0)
  849. goto free_skb;
  850. }
  851. if (((int) payload_len - (int) trailerlen) <= 0) {
  852. /* zero length packet with trailer, just drop these */
  853. goto free_skb;
  854. }
  855. if (htc_hdr->eid == ENDPOINT_0) {
  856. /* handle HTC control message */
  857. if (target->htc_flags & HTC_OP_STATE_SETUP_COMPLETE) {
  858. /*
  859. * fatal: target should not send unsolicited
  860. * messageson the endpoint 0
  861. */
  862. ath6kl_dbg(ATH6KL_DBG_HTC,
  863. "HTC ignores Rx Ctrl after setup complete\n");
  864. status = -EINVAL;
  865. goto free_skb;
  866. }
  867. /* remove HTC header */
  868. skb_pull(skb, HTC_HDR_LENGTH);
  869. netdata = skb->data;
  870. netlen = skb->len;
  871. spin_lock_bh(&target->rx_lock);
  872. target->pipe.ctrl_response_valid = true;
  873. target->pipe.ctrl_response_len = min_t(int, netlen,
  874. HTC_MAX_CTRL_MSG_LEN);
  875. memcpy(target->pipe.ctrl_response_buf, netdata,
  876. target->pipe.ctrl_response_len);
  877. spin_unlock_bh(&target->rx_lock);
  878. dev_kfree_skb(skb);
  879. skb = NULL;
  880. goto free_skb;
  881. }
  882. /*
  883. * TODO: the message based HIF architecture allocates net bufs
  884. * for recv packets since it bridges that HIF to upper layers,
  885. * which expects HTC packets, we form the packets here
  886. */
  887. packet = alloc_htc_packet_container(target);
  888. if (packet == NULL) {
  889. status = -ENOMEM;
  890. goto free_skb;
  891. }
  892. packet->status = 0;
  893. packet->endpoint = htc_hdr->eid;
  894. packet->pkt_cntxt = skb;
  895. /* TODO: for backwards compatibility */
  896. packet->buf = skb_push(skb, 0) + HTC_HDR_LENGTH;
  897. packet->act_len = netlen - HTC_HDR_LENGTH - trailerlen;
  898. /*
  899. * TODO: this is a hack because the driver layer will set the
  900. * actual len of the skb again which will just double the len
  901. */
  902. skb_trim(skb, 0);
  903. recv_packet_completion(target, ep, packet);
  904. /* recover the packet container */
  905. free_htc_packet_container(target, packet);
  906. skb = NULL;
  907. free_skb:
  908. if (skb != NULL)
  909. dev_kfree_skb(skb);
  910. return status;
  911. }
  912. static void htc_flush_rx_queue(struct htc_target *target,
  913. struct htc_endpoint *ep)
  914. {
  915. struct list_head container;
  916. struct htc_packet *packet;
  917. spin_lock_bh(&target->rx_lock);
  918. while (1) {
  919. if (list_empty(&ep->rx_bufq))
  920. break;
  921. packet = list_first_entry(&ep->rx_bufq,
  922. struct htc_packet, list);
  923. list_del(&packet->list);
  924. spin_unlock_bh(&target->rx_lock);
  925. packet->status = -ECANCELED;
  926. packet->act_len = 0;
  927. ath6kl_dbg(ATH6KL_DBG_HTC,
  928. "Flushing RX packet:0x%p, length:%d, ep:%d\n",
  929. packet, packet->buf_len,
  930. packet->endpoint);
  931. INIT_LIST_HEAD(&container);
  932. list_add_tail(&packet->list, &container);
  933. /* give the packet back */
  934. do_recv_completion(ep, &container);
  935. spin_lock_bh(&target->rx_lock);
  936. }
  937. spin_unlock_bh(&target->rx_lock);
  938. }
  939. /* polling routine to wait for a control packet to be received */
  940. static int htc_wait_recv_ctrl_message(struct htc_target *target)
  941. {
  942. int count = HTC_TARGET_RESPONSE_POLL_COUNT;
  943. while (count > 0) {
  944. spin_lock_bh(&target->rx_lock);
  945. if (target->pipe.ctrl_response_valid) {
  946. target->pipe.ctrl_response_valid = false;
  947. spin_unlock_bh(&target->rx_lock);
  948. break;
  949. }
  950. spin_unlock_bh(&target->rx_lock);
  951. count--;
  952. msleep_interruptible(HTC_TARGET_RESPONSE_POLL_WAIT);
  953. }
  954. if (count <= 0) {
  955. ath6kl_dbg(ATH6KL_DBG_HTC, "%s: Timeout!\n", __func__);
  956. return -ECOMM;
  957. }
  958. return 0;
  959. }
  960. static void htc_rxctrl_complete(struct htc_target *context,
  961. struct htc_packet *packet)
  962. {
  963. /* TODO, can't really receive HTC control messages yet.... */
  964. ath6kl_dbg(ATH6KL_DBG_HTC, "%s: invalid call function\n", __func__);
  965. }
  966. /* htc pipe initialization */
  967. static void reset_endpoint_states(struct htc_target *target)
  968. {
  969. struct htc_endpoint *ep;
  970. int i;
  971. for (i = ENDPOINT_0; i < ENDPOINT_MAX; i++) {
  972. ep = &target->endpoint[i];
  973. ep->svc_id = 0;
  974. ep->len_max = 0;
  975. ep->max_txq_depth = 0;
  976. ep->eid = i;
  977. INIT_LIST_HEAD(&ep->txq);
  978. INIT_LIST_HEAD(&ep->pipe.tx_lookup_queue);
  979. INIT_LIST_HEAD(&ep->rx_bufq);
  980. ep->target = target;
  981. ep->pipe.tx_credit_flow_enabled = (bool) 1; /* FIXME */
  982. }
  983. }
  984. /* start HTC, this is called after all services are connected */
  985. static int htc_config_target_hif_pipe(struct htc_target *target)
  986. {
  987. return 0;
  988. }
  989. /* htc service functions */
  990. static u8 htc_get_credit_alloc(struct htc_target *target, u16 service_id)
  991. {
  992. u8 allocation = 0;
  993. int i;
  994. for (i = 0; i < ENDPOINT_MAX; i++) {
  995. if (target->pipe.txcredit_alloc[i].service_id == service_id)
  996. allocation =
  997. target->pipe.txcredit_alloc[i].credit_alloc;
  998. }
  999. if (allocation == 0) {
  1000. ath6kl_dbg(ATH6KL_DBG_HTC,
  1001. "HTC Service TX : 0x%2.2X : allocation is zero!\n",
  1002. service_id);
  1003. }
  1004. return allocation;
  1005. }
  1006. static int ath6kl_htc_pipe_conn_service(struct htc_target *target,
  1007. struct htc_service_connect_req *conn_req,
  1008. struct htc_service_connect_resp *conn_resp)
  1009. {
  1010. struct ath6kl *ar = target->dev->ar;
  1011. struct htc_packet *packet = NULL;
  1012. struct htc_conn_service_resp *resp_msg;
  1013. struct htc_conn_service_msg *conn_msg;
  1014. enum htc_endpoint_id assigned_epid = ENDPOINT_MAX;
  1015. bool disable_credit_flowctrl = false;
  1016. unsigned int max_msg_size = 0;
  1017. struct htc_endpoint *ep;
  1018. int length, status = 0;
  1019. struct sk_buff *skb;
  1020. u8 tx_alloc;
  1021. u16 flags;
  1022. if (conn_req->svc_id == 0) {
  1023. WARN_ON_ONCE(1);
  1024. status = -EINVAL;
  1025. goto free_packet;
  1026. }
  1027. if (conn_req->svc_id == HTC_CTRL_RSVD_SVC) {
  1028. /* special case for pseudo control service */
  1029. assigned_epid = ENDPOINT_0;
  1030. max_msg_size = HTC_MAX_CTRL_MSG_LEN;
  1031. tx_alloc = 0;
  1032. } else {
  1033. tx_alloc = htc_get_credit_alloc(target, conn_req->svc_id);
  1034. if (tx_alloc == 0) {
  1035. status = -ENOMEM;
  1036. goto free_packet;
  1037. }
  1038. /* allocate a packet to send to the target */
  1039. packet = htc_alloc_txctrl_packet(target);
  1040. if (packet == NULL) {
  1041. WARN_ON_ONCE(1);
  1042. status = -ENOMEM;
  1043. goto free_packet;
  1044. }
  1045. skb = packet->skb;
  1046. length = sizeof(struct htc_conn_service_msg);
  1047. /* assemble connect service message */
  1048. conn_msg = (struct htc_conn_service_msg *) skb_put(skb,
  1049. length);
  1050. if (conn_msg == NULL) {
  1051. WARN_ON_ONCE(1);
  1052. status = -EINVAL;
  1053. goto free_packet;
  1054. }
  1055. memset(conn_msg, 0,
  1056. sizeof(struct htc_conn_service_msg));
  1057. conn_msg->msg_id = cpu_to_le16(HTC_MSG_CONN_SVC_ID);
  1058. conn_msg->svc_id = cpu_to_le16(conn_req->svc_id);
  1059. conn_msg->conn_flags = cpu_to_le16(conn_req->conn_flags &
  1060. ~HTC_CONN_FLGS_SET_RECV_ALLOC_MASK);
  1061. /* tell target desired recv alloc for this ep */
  1062. flags = tx_alloc << HTC_CONN_FLGS_SET_RECV_ALLOC_SHIFT;
  1063. conn_msg->conn_flags |= cpu_to_le16(flags);
  1064. if (conn_req->conn_flags &
  1065. HTC_CONN_FLGS_DISABLE_CRED_FLOW_CTRL) {
  1066. disable_credit_flowctrl = true;
  1067. }
  1068. set_htc_pkt_info(packet, NULL, (u8 *) conn_msg,
  1069. length,
  1070. ENDPOINT_0, HTC_SERVICE_TX_PACKET_TAG);
  1071. status = ath6kl_htc_pipe_tx(target, packet);
  1072. /* we don't own it anymore */
  1073. packet = NULL;
  1074. if (status != 0)
  1075. goto free_packet;
  1076. /* wait for response */
  1077. status = htc_wait_recv_ctrl_message(target);
  1078. if (status != 0)
  1079. goto free_packet;
  1080. /* we controlled the buffer creation so it has to be
  1081. * properly aligned
  1082. */
  1083. resp_msg = (struct htc_conn_service_resp *)
  1084. target->pipe.ctrl_response_buf;
  1085. if (resp_msg->msg_id != cpu_to_le16(HTC_MSG_CONN_SVC_RESP_ID) ||
  1086. (target->pipe.ctrl_response_len < sizeof(*resp_msg))) {
  1087. /* this message is not valid */
  1088. WARN_ON_ONCE(1);
  1089. status = -EINVAL;
  1090. goto free_packet;
  1091. }
  1092. ath6kl_dbg(ATH6KL_DBG_TRC,
  1093. "%s: service 0x%X conn resp: status: %d ep: %d\n",
  1094. __func__, resp_msg->svc_id, resp_msg->status,
  1095. resp_msg->eid);
  1096. conn_resp->resp_code = resp_msg->status;
  1097. /* check response status */
  1098. if (resp_msg->status != HTC_SERVICE_SUCCESS) {
  1099. ath6kl_dbg(ATH6KL_DBG_HTC,
  1100. "Target failed service 0x%X connect request (status:%d)\n",
  1101. resp_msg->svc_id, resp_msg->status);
  1102. status = -EINVAL;
  1103. goto free_packet;
  1104. }
  1105. assigned_epid = (enum htc_endpoint_id) resp_msg->eid;
  1106. max_msg_size = le16_to_cpu(resp_msg->max_msg_sz);
  1107. }
  1108. /* the rest are parameter checks so set the error status */
  1109. status = -EINVAL;
  1110. if (assigned_epid >= ENDPOINT_MAX) {
  1111. WARN_ON_ONCE(1);
  1112. goto free_packet;
  1113. }
  1114. if (max_msg_size == 0) {
  1115. WARN_ON_ONCE(1);
  1116. goto free_packet;
  1117. }
  1118. ep = &target->endpoint[assigned_epid];
  1119. ep->eid = assigned_epid;
  1120. if (ep->svc_id != 0) {
  1121. /* endpoint already in use! */
  1122. WARN_ON_ONCE(1);
  1123. goto free_packet;
  1124. }
  1125. /* return assigned endpoint to caller */
  1126. conn_resp->endpoint = assigned_epid;
  1127. conn_resp->len_max = max_msg_size;
  1128. /* setup the endpoint */
  1129. ep->svc_id = conn_req->svc_id; /* this marks ep in use */
  1130. ep->max_txq_depth = conn_req->max_txq_depth;
  1131. ep->len_max = max_msg_size;
  1132. ep->cred_dist.credits = tx_alloc;
  1133. ep->cred_dist.cred_sz = target->tgt_cred_sz;
  1134. ep->cred_dist.cred_per_msg = max_msg_size / target->tgt_cred_sz;
  1135. if (max_msg_size % target->tgt_cred_sz)
  1136. ep->cred_dist.cred_per_msg++;
  1137. /* copy all the callbacks */
  1138. ep->ep_cb = conn_req->ep_cb;
  1139. /* initialize tx_drop_packet_threshold */
  1140. ep->tx_drop_packet_threshold = MAX_HI_COOKIE_NUM;
  1141. status = ath6kl_hif_pipe_map_service(ar, ep->svc_id,
  1142. &ep->pipe.pipeid_ul,
  1143. &ep->pipe.pipeid_dl);
  1144. if (status != 0)
  1145. goto free_packet;
  1146. ath6kl_dbg(ATH6KL_DBG_HTC,
  1147. "SVC Ready: 0x%4.4X: ULpipe:%d DLpipe:%d id:%d\n",
  1148. ep->svc_id, ep->pipe.pipeid_ul,
  1149. ep->pipe.pipeid_dl, ep->eid);
  1150. if (disable_credit_flowctrl && ep->pipe.tx_credit_flow_enabled) {
  1151. ep->pipe.tx_credit_flow_enabled = false;
  1152. ath6kl_dbg(ATH6KL_DBG_HTC,
  1153. "SVC: 0x%4.4X ep:%d TX flow control off\n",
  1154. ep->svc_id, assigned_epid);
  1155. }
  1156. free_packet:
  1157. if (packet != NULL)
  1158. htc_free_txctrl_packet(target, packet);
  1159. return status;
  1160. }
  1161. /* htc export functions */
  1162. static void *ath6kl_htc_pipe_create(struct ath6kl *ar)
  1163. {
  1164. int status = 0;
  1165. struct htc_endpoint *ep = NULL;
  1166. struct htc_target *target = NULL;
  1167. struct htc_packet *packet;
  1168. int i;
  1169. target = kzalloc(sizeof(struct htc_target), GFP_KERNEL);
  1170. if (target == NULL) {
  1171. ath6kl_err("htc create unable to allocate memory\n");
  1172. status = -ENOMEM;
  1173. goto fail_htc_create;
  1174. }
  1175. spin_lock_init(&target->htc_lock);
  1176. spin_lock_init(&target->rx_lock);
  1177. spin_lock_init(&target->tx_lock);
  1178. reset_endpoint_states(target);
  1179. for (i = 0; i < HTC_PACKET_CONTAINER_ALLOCATION; i++) {
  1180. packet = kzalloc(sizeof(struct htc_packet), GFP_KERNEL);
  1181. if (packet != NULL)
  1182. free_htc_packet_container(target, packet);
  1183. }
  1184. target->dev = kzalloc(sizeof(*target->dev), GFP_KERNEL);
  1185. if (!target->dev) {
  1186. ath6kl_err("unable to allocate memory\n");
  1187. status = -ENOMEM;
  1188. goto fail_htc_create;
  1189. }
  1190. target->dev->ar = ar;
  1191. target->dev->htc_cnxt = target;
  1192. /* Get HIF default pipe for HTC message exchange */
  1193. ep = &target->endpoint[ENDPOINT_0];
  1194. ath6kl_hif_pipe_get_default(ar, &ep->pipe.pipeid_ul,
  1195. &ep->pipe.pipeid_dl);
  1196. return target;
  1197. fail_htc_create:
  1198. if (status != 0) {
  1199. if (target != NULL)
  1200. ath6kl_htc_pipe_cleanup(target);
  1201. target = NULL;
  1202. }
  1203. return target;
  1204. }
  1205. /* cleanup the HTC instance */
  1206. static void ath6kl_htc_pipe_cleanup(struct htc_target *target)
  1207. {
  1208. struct htc_packet *packet;
  1209. while (true) {
  1210. packet = alloc_htc_packet_container(target);
  1211. if (packet == NULL)
  1212. break;
  1213. kfree(packet);
  1214. }
  1215. kfree(target->dev);
  1216. /* kfree our instance */
  1217. kfree(target);
  1218. }
  1219. static int ath6kl_htc_pipe_start(struct htc_target *target)
  1220. {
  1221. struct sk_buff *skb;
  1222. struct htc_setup_comp_ext_msg *setup;
  1223. struct htc_packet *packet;
  1224. htc_config_target_hif_pipe(target);
  1225. /* allocate a buffer to send */
  1226. packet = htc_alloc_txctrl_packet(target);
  1227. if (packet == NULL) {
  1228. WARN_ON_ONCE(1);
  1229. return -ENOMEM;
  1230. }
  1231. skb = packet->skb;
  1232. /* assemble setup complete message */
  1233. setup = (struct htc_setup_comp_ext_msg *) skb_put(skb,
  1234. sizeof(*setup));
  1235. memset(setup, 0, sizeof(struct htc_setup_comp_ext_msg));
  1236. setup->msg_id = cpu_to_le16(HTC_MSG_SETUP_COMPLETE_EX_ID);
  1237. ath6kl_dbg(ATH6KL_DBG_HTC, "HTC using TX credit flow control\n");
  1238. set_htc_pkt_info(packet, NULL, (u8 *) setup,
  1239. sizeof(struct htc_setup_comp_ext_msg),
  1240. ENDPOINT_0, HTC_SERVICE_TX_PACKET_TAG);
  1241. target->htc_flags |= HTC_OP_STATE_SETUP_COMPLETE;
  1242. return ath6kl_htc_pipe_tx(target, packet);
  1243. }
  1244. static void ath6kl_htc_pipe_stop(struct htc_target *target)
  1245. {
  1246. int i;
  1247. struct htc_endpoint *ep;
  1248. /* cleanup endpoints */
  1249. for (i = 0; i < ENDPOINT_MAX; i++) {
  1250. ep = &target->endpoint[i];
  1251. htc_flush_rx_queue(target, ep);
  1252. htc_flush_tx_endpoint(target, ep, HTC_TX_PACKET_TAG_ALL);
  1253. }
  1254. reset_endpoint_states(target);
  1255. target->htc_flags &= ~HTC_OP_STATE_SETUP_COMPLETE;
  1256. }
  1257. static int ath6kl_htc_pipe_get_rxbuf_num(struct htc_target *target,
  1258. enum htc_endpoint_id endpoint)
  1259. {
  1260. int num;
  1261. spin_lock_bh(&target->rx_lock);
  1262. num = get_queue_depth(&(target->endpoint[endpoint].rx_bufq));
  1263. spin_unlock_bh(&target->rx_lock);
  1264. return num;
  1265. }
  1266. static int ath6kl_htc_pipe_tx(struct htc_target *target,
  1267. struct htc_packet *packet)
  1268. {
  1269. struct list_head queue;
  1270. ath6kl_dbg(ATH6KL_DBG_HTC,
  1271. "%s: endPointId: %d, buffer: 0x%p, length: %d\n",
  1272. __func__, packet->endpoint, packet->buf,
  1273. packet->act_len);
  1274. INIT_LIST_HEAD(&queue);
  1275. list_add_tail(&packet->list, &queue);
  1276. return htc_send_packets_multiple(target, &queue);
  1277. }
  1278. static int ath6kl_htc_pipe_wait_target(struct htc_target *target)
  1279. {
  1280. struct htc_ready_ext_msg *ready_msg;
  1281. struct htc_service_connect_req connect;
  1282. struct htc_service_connect_resp resp;
  1283. int status = 0;
  1284. status = htc_wait_recv_ctrl_message(target);
  1285. if (status != 0)
  1286. return status;
  1287. if (target->pipe.ctrl_response_len < sizeof(*ready_msg)) {
  1288. ath6kl_dbg(ATH6KL_DBG_HTC, "invalid htc ready msg len:%d!\n",
  1289. target->pipe.ctrl_response_len);
  1290. return -ECOMM;
  1291. }
  1292. ready_msg = (struct htc_ready_ext_msg *) target->pipe.ctrl_response_buf;
  1293. if (ready_msg->ver2_0_info.msg_id != cpu_to_le16(HTC_MSG_READY_ID)) {
  1294. ath6kl_dbg(ATH6KL_DBG_HTC, "invalid htc ready msg : 0x%X !\n",
  1295. ready_msg->ver2_0_info.msg_id);
  1296. return -ECOMM;
  1297. }
  1298. ath6kl_dbg(ATH6KL_DBG_HTC,
  1299. "Target Ready! : transmit resources : %d size:%d\n",
  1300. ready_msg->ver2_0_info.cred_cnt,
  1301. ready_msg->ver2_0_info.cred_sz);
  1302. target->tgt_creds = le16_to_cpu(ready_msg->ver2_0_info.cred_cnt);
  1303. target->tgt_cred_sz = le16_to_cpu(ready_msg->ver2_0_info.cred_sz);
  1304. if ((target->tgt_creds == 0) || (target->tgt_cred_sz == 0))
  1305. return -ECOMM;
  1306. htc_setup_target_buffer_assignments(target);
  1307. /* setup our pseudo HTC control endpoint connection */
  1308. memset(&connect, 0, sizeof(connect));
  1309. memset(&resp, 0, sizeof(resp));
  1310. connect.ep_cb.tx_complete = htc_txctrl_complete;
  1311. connect.ep_cb.rx = htc_rxctrl_complete;
  1312. connect.max_txq_depth = NUM_CONTROL_TX_BUFFERS;
  1313. connect.svc_id = HTC_CTRL_RSVD_SVC;
  1314. /* connect fake service */
  1315. status = ath6kl_htc_pipe_conn_service(target, &connect, &resp);
  1316. return status;
  1317. }
  1318. static void ath6kl_htc_pipe_flush_txep(struct htc_target *target,
  1319. enum htc_endpoint_id endpoint, u16 tag)
  1320. {
  1321. struct htc_endpoint *ep = &target->endpoint[endpoint];
  1322. if (ep->svc_id == 0) {
  1323. WARN_ON_ONCE(1);
  1324. /* not in use.. */
  1325. return;
  1326. }
  1327. htc_flush_tx_endpoint(target, ep, tag);
  1328. }
  1329. static int ath6kl_htc_pipe_add_rxbuf_multiple(struct htc_target *target,
  1330. struct list_head *pkt_queue)
  1331. {
  1332. struct htc_packet *packet, *tmp_pkt, *first;
  1333. struct htc_endpoint *ep;
  1334. int status = 0;
  1335. if (list_empty(pkt_queue))
  1336. return -EINVAL;
  1337. first = list_first_entry(pkt_queue, struct htc_packet, list);
  1338. if (first->endpoint >= ENDPOINT_MAX) {
  1339. WARN_ON_ONCE(1);
  1340. return -EINVAL;
  1341. }
  1342. ath6kl_dbg(ATH6KL_DBG_HTC, "%s: epid: %d, cnt:%d, len: %d\n",
  1343. __func__, first->endpoint, get_queue_depth(pkt_queue),
  1344. first->buf_len);
  1345. ep = &target->endpoint[first->endpoint];
  1346. spin_lock_bh(&target->rx_lock);
  1347. /* store receive packets */
  1348. list_splice_tail_init(pkt_queue, &ep->rx_bufq);
  1349. spin_unlock_bh(&target->rx_lock);
  1350. if (status != 0) {
  1351. /* walk through queue and mark each one canceled */
  1352. list_for_each_entry_safe(packet, tmp_pkt, pkt_queue, list) {
  1353. packet->status = -ECANCELED;
  1354. }
  1355. do_recv_completion(ep, pkt_queue);
  1356. }
  1357. return status;
  1358. }
  1359. static void ath6kl_htc_pipe_activity_changed(struct htc_target *target,
  1360. enum htc_endpoint_id ep,
  1361. bool active)
  1362. {
  1363. /* TODO */
  1364. }
  1365. static void ath6kl_htc_pipe_flush_rx_buf(struct htc_target *target)
  1366. {
  1367. /* TODO */
  1368. }
  1369. static int ath6kl_htc_pipe_credit_setup(struct htc_target *target,
  1370. struct ath6kl_htc_credit_info *info)
  1371. {
  1372. return 0;
  1373. }
  1374. static const struct ath6kl_htc_ops ath6kl_htc_pipe_ops = {
  1375. .create = ath6kl_htc_pipe_create,
  1376. .wait_target = ath6kl_htc_pipe_wait_target,
  1377. .start = ath6kl_htc_pipe_start,
  1378. .conn_service = ath6kl_htc_pipe_conn_service,
  1379. .tx = ath6kl_htc_pipe_tx,
  1380. .stop = ath6kl_htc_pipe_stop,
  1381. .cleanup = ath6kl_htc_pipe_cleanup,
  1382. .flush_txep = ath6kl_htc_pipe_flush_txep,
  1383. .flush_rx_buf = ath6kl_htc_pipe_flush_rx_buf,
  1384. .activity_changed = ath6kl_htc_pipe_activity_changed,
  1385. .get_rxbuf_num = ath6kl_htc_pipe_get_rxbuf_num,
  1386. .add_rxbuf_multiple = ath6kl_htc_pipe_add_rxbuf_multiple,
  1387. .credit_setup = ath6kl_htc_pipe_credit_setup,
  1388. .tx_complete = ath6kl_htc_pipe_tx_complete,
  1389. .rx_complete = ath6kl_htc_pipe_rx_complete,
  1390. };
  1391. void ath6kl_htc_pipe_attach(struct ath6kl *ar)
  1392. {
  1393. ar->htc_ops = &ath6kl_htc_pipe_ops;
  1394. }