htc_pipe.c 42 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706
  1. /*
  2. * Copyright (c) 2007-2011 Atheros Communications Inc.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for any
  5. * purpose with or without fee is hereby granted, provided that the above
  6. * copyright notice and this permission notice appear in all copies.
  7. *
  8. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  9. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  10. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  11. * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  12. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  13. * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  14. * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  15. */
  16. #include "core.h"
  17. #include "debug.h"
  18. #include "hif-ops.h"
  19. #define HTC_PACKET_CONTAINER_ALLOCATION 32
  20. #define HTC_CONTROL_BUFFER_SIZE (HTC_MAX_CTRL_MSG_LEN + HTC_HDR_LENGTH)
  21. static int ath6kl_htc_pipe_tx(struct htc_target *handle,
  22. struct htc_packet *packet);
  23. static void ath6kl_htc_pipe_cleanup(struct htc_target *handle);
  24. /* htc pipe tx path */
  25. static inline void restore_tx_packet(struct htc_packet *packet)
  26. {
  27. if (packet->info.tx.flags & HTC_FLAGS_TX_FIXUP_NETBUF) {
  28. skb_pull(packet->skb, sizeof(struct htc_frame_hdr));
  29. packet->info.tx.flags &= ~HTC_FLAGS_TX_FIXUP_NETBUF;
  30. }
  31. }
  32. static void do_send_completion(struct htc_endpoint *ep,
  33. struct list_head *queue_to_indicate)
  34. {
  35. struct htc_packet *packet;
  36. if (list_empty(queue_to_indicate)) {
  37. /* nothing to indicate */
  38. return;
  39. }
  40. if (ep->ep_cb.tx_comp_multi != NULL) {
  41. ath6kl_dbg(ATH6KL_DBG_HTC,
  42. "%s: calling ep %d, send complete multiple callback (%d pkts)\n",
  43. __func__, ep->eid,
  44. get_queue_depth(queue_to_indicate));
  45. /*
  46. * a multiple send complete handler is being used,
  47. * pass the queue to the handler
  48. */
  49. ep->ep_cb.tx_comp_multi(ep->target, queue_to_indicate);
  50. /*
  51. * all packets are now owned by the callback,
  52. * reset queue to be safe
  53. */
  54. INIT_LIST_HEAD(queue_to_indicate);
  55. } else {
  56. /* using legacy EpTxComplete */
  57. do {
  58. packet = list_first_entry(queue_to_indicate,
  59. struct htc_packet, list);
  60. list_del(&packet->list);
  61. ath6kl_dbg(ATH6KL_DBG_HTC,
  62. "%s: calling ep %d send complete callback on packet 0x%p\n",
  63. __func__, ep->eid, packet);
  64. ep->ep_cb.tx_complete(ep->target, packet);
  65. } while (!list_empty(queue_to_indicate));
  66. }
  67. }
  68. static void send_packet_completion(struct htc_target *target,
  69. struct htc_packet *packet)
  70. {
  71. struct htc_endpoint *ep = &target->endpoint[packet->endpoint];
  72. struct list_head container;
  73. restore_tx_packet(packet);
  74. INIT_LIST_HEAD(&container);
  75. list_add_tail(&packet->list, &container);
  76. /* do completion */
  77. do_send_completion(ep, &container);
  78. }
  79. static void get_htc_packet_credit_based(struct htc_target *target,
  80. struct htc_endpoint *ep,
  81. struct list_head *queue)
  82. {
  83. int credits_required;
  84. int remainder;
  85. u8 send_flags;
  86. struct htc_packet *packet;
  87. unsigned int transfer_len;
  88. /* NOTE : the TX lock is held when this function is called */
  89. /* loop until we can grab as many packets out of the queue as we can */
  90. while (true) {
  91. send_flags = 0;
  92. if (list_empty(&ep->txq))
  93. break;
  94. /* get packet at head, but don't remove it */
  95. packet = list_first_entry(&ep->txq, struct htc_packet, list);
  96. ath6kl_dbg(ATH6KL_DBG_HTC,
  97. "%s: got head packet:0x%p , queue depth: %d\n",
  98. __func__, packet, get_queue_depth(&ep->txq));
  99. transfer_len = packet->act_len + HTC_HDR_LENGTH;
  100. if (transfer_len <= target->tgt_cred_sz) {
  101. credits_required = 1;
  102. } else {
  103. /* figure out how many credits this message requires */
  104. credits_required = transfer_len / target->tgt_cred_sz;
  105. remainder = transfer_len % target->tgt_cred_sz;
  106. if (remainder)
  107. credits_required++;
  108. }
  109. ath6kl_dbg(ATH6KL_DBG_HTC, "%s: creds required:%d got:%d\n",
  110. __func__, credits_required, ep->cred_dist.credits);
  111. if (ep->eid == ENDPOINT_0) {
  112. /*
  113. * endpoint 0 is special, it always has a credit and
  114. * does not require credit based flow control
  115. */
  116. credits_required = 0;
  117. } else {
  118. if (ep->cred_dist.credits < credits_required)
  119. break;
  120. ep->cred_dist.credits -= credits_required;
  121. ep->ep_st.cred_cosumd += credits_required;
  122. /* check if we need credits back from the target */
  123. if (ep->cred_dist.credits <
  124. ep->cred_dist.cred_per_msg) {
  125. /* tell the target we need credits ASAP! */
  126. send_flags |= HTC_FLAGS_NEED_CREDIT_UPDATE;
  127. ep->ep_st.cred_low_indicate += 1;
  128. ath6kl_dbg(ATH6KL_DBG_HTC,
  129. "%s: host needs credits\n",
  130. __func__);
  131. }
  132. }
  133. /* now we can fully dequeue */
  134. packet = list_first_entry(&ep->txq, struct htc_packet, list);
  135. list_del(&packet->list);
  136. /* save the number of credits this packet consumed */
  137. packet->info.tx.cred_used = credits_required;
  138. /* save send flags */
  139. packet->info.tx.flags = send_flags;
  140. packet->info.tx.seqno = ep->seqno;
  141. ep->seqno++;
  142. /* queue this packet into the caller's queue */
  143. list_add_tail(&packet->list, queue);
  144. }
  145. }
  146. static void get_htc_packet(struct htc_target *target,
  147. struct htc_endpoint *ep,
  148. struct list_head *queue, int resources)
  149. {
  150. struct htc_packet *packet;
  151. /* NOTE : the TX lock is held when this function is called */
  152. /* loop until we can grab as many packets out of the queue as we can */
  153. while (resources) {
  154. if (list_empty(&ep->txq))
  155. break;
  156. packet = list_first_entry(&ep->txq, struct htc_packet, list);
  157. list_del(&packet->list);
  158. ath6kl_dbg(ATH6KL_DBG_HTC,
  159. "%s: got packet:0x%p , new queue depth: %d\n",
  160. __func__, packet, get_queue_depth(&ep->txq));
  161. packet->info.tx.seqno = ep->seqno;
  162. packet->info.tx.flags = 0;
  163. packet->info.tx.cred_used = 0;
  164. ep->seqno++;
  165. /* queue this packet into the caller's queue */
  166. list_add_tail(&packet->list, queue);
  167. resources--;
  168. }
  169. }
  170. static int htc_issue_packets(struct htc_target *target,
  171. struct htc_endpoint *ep,
  172. struct list_head *pkt_queue)
  173. {
  174. int status = 0;
  175. u16 payload_len;
  176. struct sk_buff *skb;
  177. struct htc_frame_hdr *htc_hdr;
  178. struct htc_packet *packet;
  179. ath6kl_dbg(ATH6KL_DBG_HTC,
  180. "%s: queue: 0x%p, pkts %d\n", __func__,
  181. pkt_queue, get_queue_depth(pkt_queue));
  182. while (!list_empty(pkt_queue)) {
  183. packet = list_first_entry(pkt_queue, struct htc_packet, list);
  184. list_del(&packet->list);
  185. skb = packet->skb;
  186. if (!skb) {
  187. WARN_ON_ONCE(1);
  188. status = -EINVAL;
  189. break;
  190. }
  191. payload_len = packet->act_len;
  192. /* setup HTC frame header */
  193. htc_hdr = (struct htc_frame_hdr *) skb_push(skb,
  194. sizeof(*htc_hdr));
  195. if (!htc_hdr) {
  196. WARN_ON_ONCE(1);
  197. status = -EINVAL;
  198. break;
  199. }
  200. packet->info.tx.flags |= HTC_FLAGS_TX_FIXUP_NETBUF;
  201. /* Endianess? */
  202. put_unaligned((u16) payload_len, &htc_hdr->payld_len);
  203. htc_hdr->flags = packet->info.tx.flags;
  204. htc_hdr->eid = (u8) packet->endpoint;
  205. htc_hdr->ctrl[0] = 0;
  206. htc_hdr->ctrl[1] = (u8) packet->info.tx.seqno;
  207. spin_lock_bh(&target->tx_lock);
  208. /* store in look up queue to match completions */
  209. list_add_tail(&packet->list, &ep->pipe.tx_lookup_queue);
  210. ep->ep_st.tx_issued += 1;
  211. spin_unlock_bh(&target->tx_lock);
  212. status = ath6kl_hif_pipe_send(target->dev->ar,
  213. ep->pipe.pipeid_ul, NULL, skb);
  214. if (status != 0) {
  215. if (status != -ENOMEM) {
  216. /* TODO: if more than 1 endpoint maps to the
  217. * same PipeID, it is possible to run out of
  218. * resources in the HIF layer.
  219. * Don't emit the error
  220. */
  221. ath6kl_dbg(ATH6KL_DBG_HTC,
  222. "%s: failed status:%d\n",
  223. __func__, status);
  224. }
  225. spin_lock_bh(&target->tx_lock);
  226. list_del(&packet->list);
  227. /* reclaim credits */
  228. ep->cred_dist.credits += packet->info.tx.cred_used;
  229. spin_unlock_bh(&target->tx_lock);
  230. /* put it back into the callers queue */
  231. list_add(&packet->list, pkt_queue);
  232. break;
  233. }
  234. }
  235. if (status != 0) {
  236. while (!list_empty(pkt_queue)) {
  237. if (status != -ENOMEM) {
  238. ath6kl_dbg(ATH6KL_DBG_HTC,
  239. "%s: failed pkt:0x%p status:%d\n",
  240. __func__, packet, status);
  241. }
  242. packet = list_first_entry(pkt_queue,
  243. struct htc_packet, list);
  244. list_del(&packet->list);
  245. packet->status = status;
  246. send_packet_completion(target, packet);
  247. }
  248. }
  249. return status;
  250. }
  251. static enum htc_send_queue_result htc_try_send(struct htc_target *target,
  252. struct htc_endpoint *ep,
  253. struct list_head *txq)
  254. {
  255. struct list_head send_queue; /* temp queue to hold packets */
  256. struct htc_packet *packet, *tmp_pkt;
  257. struct ath6kl *ar = target->dev->ar;
  258. enum htc_send_full_action action;
  259. int tx_resources, overflow, txqueue_depth, i, good_pkts;
  260. u8 pipeid;
  261. ath6kl_dbg(ATH6KL_DBG_HTC, "%s: (queue:0x%p depth:%d)\n",
  262. __func__, txq,
  263. (txq == NULL) ? 0 : get_queue_depth(txq));
  264. /* init the local send queue */
  265. INIT_LIST_HEAD(&send_queue);
  266. /*
  267. * txq equals to NULL means
  268. * caller didn't provide a queue, just wants us to
  269. * check queues and send
  270. */
  271. if (txq != NULL) {
  272. if (list_empty(txq)) {
  273. /* empty queue */
  274. return HTC_SEND_QUEUE_DROP;
  275. }
  276. spin_lock_bh(&target->tx_lock);
  277. txqueue_depth = get_queue_depth(&ep->txq);
  278. spin_unlock_bh(&target->tx_lock);
  279. if (txqueue_depth >= ep->max_txq_depth) {
  280. /* we've already overflowed */
  281. overflow = get_queue_depth(txq);
  282. } else {
  283. /* get how much we will overflow by */
  284. overflow = txqueue_depth;
  285. overflow += get_queue_depth(txq);
  286. /* get how much we will overflow the TX queue by */
  287. overflow -= ep->max_txq_depth;
  288. }
  289. /* if overflow is negative or zero, we are okay */
  290. if (overflow > 0) {
  291. ath6kl_dbg(ATH6KL_DBG_HTC,
  292. "%s: Endpoint %d, TX queue will overflow :%d, Tx Depth:%d, Max:%d\n",
  293. __func__, ep->eid, overflow, txqueue_depth,
  294. ep->max_txq_depth);
  295. }
  296. if ((overflow <= 0) ||
  297. (ep->ep_cb.tx_full == NULL)) {
  298. /*
  299. * all packets will fit or caller did not provide send
  300. * full indication handler -- just move all of them
  301. * to the local send_queue object
  302. */
  303. list_splice_tail_init(txq, &send_queue);
  304. } else {
  305. good_pkts = get_queue_depth(txq) - overflow;
  306. if (good_pkts < 0) {
  307. WARN_ON_ONCE(1);
  308. return HTC_SEND_QUEUE_DROP;
  309. }
  310. /* we have overflowed, and a callback is provided */
  311. /* dequeue all non-overflow packets to the sendqueue */
  312. for (i = 0; i < good_pkts; i++) {
  313. /* pop off caller's queue */
  314. packet = list_first_entry(txq,
  315. struct htc_packet,
  316. list);
  317. /* move to local queue */
  318. list_move_tail(&packet->list, &send_queue);
  319. }
  320. /*
  321. * the caller's queue has all the packets that won't fit
  322. * walk through the caller's queue and indicate each to
  323. * the send full handler
  324. */
  325. list_for_each_entry_safe(packet, tmp_pkt,
  326. txq, list) {
  327. ath6kl_dbg(ATH6KL_DBG_HTC,
  328. "%s: Indicat overflowed TX pkts: %p\n",
  329. __func__, packet);
  330. action = ep->ep_cb.tx_full(ep->target, packet);
  331. if (action == HTC_SEND_FULL_DROP) {
  332. /* callback wants the packet dropped */
  333. ep->ep_st.tx_dropped += 1;
  334. /* leave this one in the caller's queue
  335. * for cleanup */
  336. } else {
  337. /* callback wants to keep this packet,
  338. * move from caller's queue to the send
  339. * queue */
  340. list_move_tail(&packet->list,
  341. &send_queue);
  342. }
  343. }
  344. if (list_empty(&send_queue)) {
  345. /* no packets made it in, caller will cleanup */
  346. return HTC_SEND_QUEUE_DROP;
  347. }
  348. }
  349. }
  350. if (!ep->pipe.tx_credit_flow_enabled) {
  351. tx_resources =
  352. ath6kl_hif_pipe_get_free_queue_number(ar,
  353. ep->pipe.pipeid_ul);
  354. } else {
  355. tx_resources = 0;
  356. }
  357. spin_lock_bh(&target->tx_lock);
  358. if (!list_empty(&send_queue)) {
  359. /* transfer packets to tail */
  360. list_splice_tail_init(&send_queue, &ep->txq);
  361. if (!list_empty(&send_queue)) {
  362. WARN_ON_ONCE(1);
  363. spin_unlock_bh(&target->tx_lock);
  364. return HTC_SEND_QUEUE_DROP;
  365. }
  366. INIT_LIST_HEAD(&send_queue);
  367. }
  368. /* increment tx processing count on entry */
  369. ep->tx_proc_cnt++;
  370. if (ep->tx_proc_cnt > 1) {
  371. /*
  372. * Another thread or task is draining the TX queues on this
  373. * endpoint that thread will reset the tx processing count
  374. * when the queue is drained.
  375. */
  376. ep->tx_proc_cnt--;
  377. spin_unlock_bh(&target->tx_lock);
  378. return HTC_SEND_QUEUE_OK;
  379. }
  380. /***** beyond this point only 1 thread may enter ******/
  381. /*
  382. * Now drain the endpoint TX queue for transmission as long as we have
  383. * enough transmit resources.
  384. */
  385. while (true) {
  386. if (get_queue_depth(&ep->txq) == 0)
  387. break;
  388. if (ep->pipe.tx_credit_flow_enabled) {
  389. /*
  390. * Credit based mechanism provides flow control
  391. * based on target transmit resource availability,
  392. * we assume that the HIF layer will always have
  393. * bus resources greater than target transmit
  394. * resources.
  395. */
  396. get_htc_packet_credit_based(target, ep, &send_queue);
  397. } else {
  398. /*
  399. * Get all packets for this endpoint that we can
  400. * for this pass.
  401. */
  402. get_htc_packet(target, ep, &send_queue, tx_resources);
  403. }
  404. if (get_queue_depth(&send_queue) == 0) {
  405. /*
  406. * Didn't get packets due to out of resources or TX
  407. * queue was drained.
  408. */
  409. break;
  410. }
  411. spin_unlock_bh(&target->tx_lock);
  412. /* send what we can */
  413. htc_issue_packets(target, ep, &send_queue);
  414. if (!ep->pipe.tx_credit_flow_enabled) {
  415. pipeid = ep->pipe.pipeid_ul;
  416. tx_resources =
  417. ath6kl_hif_pipe_get_free_queue_number(ar, pipeid);
  418. }
  419. spin_lock_bh(&target->tx_lock);
  420. }
  421. /* done with this endpoint, we can clear the count */
  422. ep->tx_proc_cnt = 0;
  423. spin_unlock_bh(&target->tx_lock);
  424. return HTC_SEND_QUEUE_OK;
  425. }
  426. /* htc control packet manipulation */
  427. static void destroy_htc_txctrl_packet(struct htc_packet *packet)
  428. {
  429. struct sk_buff *skb;
  430. skb = packet->skb;
  431. if (skb != NULL)
  432. dev_kfree_skb(skb);
  433. kfree(packet);
  434. }
  435. static struct htc_packet *build_htc_txctrl_packet(void)
  436. {
  437. struct htc_packet *packet = NULL;
  438. struct sk_buff *skb;
  439. packet = kzalloc(sizeof(struct htc_packet), GFP_KERNEL);
  440. if (packet == NULL)
  441. return NULL;
  442. skb = __dev_alloc_skb(HTC_CONTROL_BUFFER_SIZE, GFP_KERNEL);
  443. if (skb == NULL) {
  444. kfree(packet);
  445. return NULL;
  446. }
  447. packet->skb = skb;
  448. return packet;
  449. }
  450. static void htc_free_txctrl_packet(struct htc_target *target,
  451. struct htc_packet *packet)
  452. {
  453. destroy_htc_txctrl_packet(packet);
  454. }
  455. static struct htc_packet *htc_alloc_txctrl_packet(struct htc_target *target)
  456. {
  457. return build_htc_txctrl_packet();
  458. }
  459. static void htc_txctrl_complete(struct htc_target *target,
  460. struct htc_packet *packet)
  461. {
  462. htc_free_txctrl_packet(target, packet);
  463. }
  464. #define MAX_MESSAGE_SIZE 1536
  465. static int htc_setup_target_buffer_assignments(struct htc_target *target)
  466. {
  467. int status, credits, credit_per_maxmsg, i;
  468. struct htc_pipe_txcredit_alloc *entry;
  469. unsigned int hif_usbaudioclass = 0;
  470. credit_per_maxmsg = MAX_MESSAGE_SIZE / target->tgt_cred_sz;
  471. if (MAX_MESSAGE_SIZE % target->tgt_cred_sz)
  472. credit_per_maxmsg++;
  473. /* TODO, this should be configured by the caller! */
  474. credits = target->tgt_creds;
  475. entry = &target->pipe.txcredit_alloc[0];
  476. status = -ENOMEM;
  477. /* FIXME: hif_usbaudioclass is always zero */
  478. if (hif_usbaudioclass) {
  479. ath6kl_dbg(ATH6KL_DBG_HTC,
  480. "%s: For USB Audio Class- Total:%d\n",
  481. __func__, credits);
  482. entry++;
  483. entry++;
  484. /* Setup VO Service To have Max Credits */
  485. entry->service_id = WMI_DATA_VO_SVC;
  486. entry->credit_alloc = (credits - 6);
  487. if (entry->credit_alloc == 0)
  488. entry->credit_alloc++;
  489. credits -= (int) entry->credit_alloc;
  490. if (credits <= 0)
  491. return status;
  492. entry++;
  493. entry->service_id = WMI_CONTROL_SVC;
  494. entry->credit_alloc = credit_per_maxmsg;
  495. credits -= (int) entry->credit_alloc;
  496. if (credits <= 0)
  497. return status;
  498. /* leftovers go to best effort */
  499. entry++;
  500. entry++;
  501. entry->service_id = WMI_DATA_BE_SVC;
  502. entry->credit_alloc = (u8) credits;
  503. status = 0;
  504. } else {
  505. entry++;
  506. entry->service_id = WMI_DATA_VI_SVC;
  507. entry->credit_alloc = credits / 4;
  508. if (entry->credit_alloc == 0)
  509. entry->credit_alloc++;
  510. credits -= (int) entry->credit_alloc;
  511. if (credits <= 0)
  512. return status;
  513. entry++;
  514. entry->service_id = WMI_DATA_VO_SVC;
  515. entry->credit_alloc = credits / 4;
  516. if (entry->credit_alloc == 0)
  517. entry->credit_alloc++;
  518. credits -= (int) entry->credit_alloc;
  519. if (credits <= 0)
  520. return status;
  521. entry++;
  522. entry->service_id = WMI_CONTROL_SVC;
  523. entry->credit_alloc = credit_per_maxmsg;
  524. credits -= (int) entry->credit_alloc;
  525. if (credits <= 0)
  526. return status;
  527. entry++;
  528. entry->service_id = WMI_DATA_BK_SVC;
  529. entry->credit_alloc = credit_per_maxmsg;
  530. credits -= (int) entry->credit_alloc;
  531. if (credits <= 0)
  532. return status;
  533. /* leftovers go to best effort */
  534. entry++;
  535. entry->service_id = WMI_DATA_BE_SVC;
  536. entry->credit_alloc = (u8) credits;
  537. status = 0;
  538. }
  539. if (status == 0) {
  540. for (i = 0; i < ENDPOINT_MAX; i++) {
  541. if (target->pipe.txcredit_alloc[i].service_id != 0) {
  542. ath6kl_dbg(ATH6KL_DBG_HTC,
  543. "HTC Service Index : %d TX : 0x%2.2X : alloc:%d\n",
  544. i,
  545. target->pipe.txcredit_alloc[i].
  546. service_id,
  547. target->pipe.txcredit_alloc[i].
  548. credit_alloc);
  549. }
  550. }
  551. }
  552. return status;
  553. }
  554. /* process credit reports and call distribution function */
  555. static void htc_process_credit_report(struct htc_target *target,
  556. struct htc_credit_report *rpt,
  557. int num_entries,
  558. enum htc_endpoint_id from_ep)
  559. {
  560. int total_credits = 0, i;
  561. struct htc_endpoint *ep;
  562. /* lock out TX while we update credits */
  563. spin_lock_bh(&target->tx_lock);
  564. for (i = 0; i < num_entries; i++, rpt++) {
  565. if (rpt->eid >= ENDPOINT_MAX) {
  566. WARN_ON_ONCE(1);
  567. spin_unlock_bh(&target->tx_lock);
  568. return;
  569. }
  570. ep = &target->endpoint[rpt->eid];
  571. ep->cred_dist.credits += rpt->credits;
  572. if (ep->cred_dist.credits && get_queue_depth(&ep->txq)) {
  573. spin_unlock_bh(&target->tx_lock);
  574. htc_try_send(target, ep, NULL);
  575. spin_lock_bh(&target->tx_lock);
  576. }
  577. total_credits += rpt->credits;
  578. }
  579. ath6kl_dbg(ATH6KL_DBG_HTC,
  580. "Report indicated %d credits to distribute\n",
  581. total_credits);
  582. spin_unlock_bh(&target->tx_lock);
  583. }
  584. /* flush endpoint TX queue */
  585. static void htc_flush_tx_endpoint(struct htc_target *target,
  586. struct htc_endpoint *ep, u16 tag)
  587. {
  588. struct htc_packet *packet;
  589. spin_lock_bh(&target->tx_lock);
  590. while (get_queue_depth(&ep->txq)) {
  591. packet = list_first_entry(&ep->txq, struct htc_packet, list);
  592. list_del(&packet->list);
  593. packet->status = 0;
  594. send_packet_completion(target, packet);
  595. }
  596. spin_unlock_bh(&target->tx_lock);
  597. }
  598. /*
  599. * In the adapted HIF layer, struct sk_buff * are passed between HIF and HTC,
  600. * since upper layers expects struct htc_packet containers we use the completed
  601. * skb and lookup it's corresponding HTC packet buffer from a lookup list.
  602. * This is extra overhead that can be fixed by re-aligning HIF interfaces with
  603. * HTC.
  604. */
  605. static struct htc_packet *htc_lookup_tx_packet(struct htc_target *target,
  606. struct htc_endpoint *ep,
  607. struct sk_buff *skb)
  608. {
  609. struct htc_packet *packet, *tmp_pkt, *found_packet = NULL;
  610. spin_lock_bh(&target->tx_lock);
  611. /*
  612. * interate from the front of tx lookup queue
  613. * this lookup should be fast since lower layers completes in-order and
  614. * so the completed packet should be at the head of the list generally
  615. */
  616. list_for_each_entry_safe(packet, tmp_pkt, &ep->pipe.tx_lookup_queue,
  617. list) {
  618. /* check for removal */
  619. if (skb == packet->skb) {
  620. /* found it */
  621. list_del(&packet->list);
  622. found_packet = packet;
  623. break;
  624. }
  625. }
  626. spin_unlock_bh(&target->tx_lock);
  627. return found_packet;
  628. }
  629. static int ath6kl_htc_pipe_tx_complete(struct ath6kl *ar, struct sk_buff *skb)
  630. {
  631. struct htc_target *target = ar->htc_target;
  632. struct htc_frame_hdr *htc_hdr;
  633. struct htc_endpoint *ep;
  634. struct htc_packet *packet;
  635. u8 ep_id, *netdata;
  636. u32 netlen;
  637. netdata = skb->data;
  638. netlen = skb->len;
  639. htc_hdr = (struct htc_frame_hdr *) netdata;
  640. ep_id = htc_hdr->eid;
  641. ep = &target->endpoint[ep_id];
  642. packet = htc_lookup_tx_packet(target, ep, skb);
  643. if (packet == NULL) {
  644. /* may have already been flushed and freed */
  645. ath6kl_err("HTC TX lookup failed!\n");
  646. } else {
  647. /* will be giving this buffer back to upper layers */
  648. packet->status = 0;
  649. send_packet_completion(target, packet);
  650. }
  651. skb = NULL;
  652. if (!ep->pipe.tx_credit_flow_enabled) {
  653. /*
  654. * note: when using TX credit flow, the re-checking of queues
  655. * happens when credits flow back from the target. in the
  656. * non-TX credit case, we recheck after the packet completes
  657. */
  658. htc_try_send(target, ep, NULL);
  659. }
  660. return 0;
  661. }
  662. static int htc_send_packets_multiple(struct htc_target *target,
  663. struct list_head *pkt_queue)
  664. {
  665. struct htc_endpoint *ep;
  666. struct htc_packet *packet, *tmp_pkt;
  667. if (list_empty(pkt_queue))
  668. return -EINVAL;
  669. /* get first packet to find out which ep the packets will go into */
  670. packet = list_first_entry(pkt_queue, struct htc_packet, list);
  671. if (packet->endpoint >= ENDPOINT_MAX) {
  672. WARN_ON_ONCE(1);
  673. return -EINVAL;
  674. }
  675. ep = &target->endpoint[packet->endpoint];
  676. htc_try_send(target, ep, pkt_queue);
  677. /* do completion on any packets that couldn't get in */
  678. if (!list_empty(pkt_queue)) {
  679. list_for_each_entry_safe(packet, tmp_pkt, pkt_queue, list) {
  680. packet->status = -ENOMEM;
  681. }
  682. do_send_completion(ep, pkt_queue);
  683. }
  684. return 0;
  685. }
  686. /* htc pipe rx path */
  687. static struct htc_packet *alloc_htc_packet_container(struct htc_target *target)
  688. {
  689. struct htc_packet *packet;
  690. spin_lock_bh(&target->rx_lock);
  691. if (target->pipe.htc_packet_pool == NULL) {
  692. spin_unlock_bh(&target->rx_lock);
  693. return NULL;
  694. }
  695. packet = target->pipe.htc_packet_pool;
  696. target->pipe.htc_packet_pool = (struct htc_packet *) packet->list.next;
  697. spin_unlock_bh(&target->rx_lock);
  698. packet->list.next = NULL;
  699. return packet;
  700. }
  701. static void free_htc_packet_container(struct htc_target *target,
  702. struct htc_packet *packet)
  703. {
  704. struct list_head *lh;
  705. spin_lock_bh(&target->rx_lock);
  706. if (target->pipe.htc_packet_pool == NULL) {
  707. target->pipe.htc_packet_pool = packet;
  708. packet->list.next = NULL;
  709. } else {
  710. lh = (struct list_head *) target->pipe.htc_packet_pool;
  711. packet->list.next = lh;
  712. target->pipe.htc_packet_pool = packet;
  713. }
  714. spin_unlock_bh(&target->rx_lock);
  715. }
  716. static int htc_process_trailer(struct htc_target *target, u8 *buffer,
  717. int len, enum htc_endpoint_id from_ep)
  718. {
  719. struct htc_credit_report *report;
  720. struct htc_record_hdr *record;
  721. u8 *record_buf, *orig_buf;
  722. int orig_len, status;
  723. orig_buf = buffer;
  724. orig_len = len;
  725. status = 0;
  726. while (len > 0) {
  727. if (len < sizeof(struct htc_record_hdr)) {
  728. status = -EINVAL;
  729. break;
  730. }
  731. /* these are byte aligned structs */
  732. record = (struct htc_record_hdr *) buffer;
  733. len -= sizeof(struct htc_record_hdr);
  734. buffer += sizeof(struct htc_record_hdr);
  735. if (record->len > len) {
  736. /* no room left in buffer for record */
  737. ath6kl_dbg(ATH6KL_DBG_HTC,
  738. "invalid length: %d (id:%d) buffer has: %d bytes left\n",
  739. record->len, record->rec_id, len);
  740. status = -EINVAL;
  741. break;
  742. }
  743. /* start of record follows the header */
  744. record_buf = buffer;
  745. switch (record->rec_id) {
  746. case HTC_RECORD_CREDITS:
  747. if (record->len < sizeof(struct htc_credit_report)) {
  748. WARN_ON_ONCE(1);
  749. return -EINVAL;
  750. }
  751. report = (struct htc_credit_report *) record_buf;
  752. htc_process_credit_report(target, report,
  753. record->len / sizeof(*report),
  754. from_ep);
  755. break;
  756. default:
  757. ath6kl_dbg(ATH6KL_DBG_HTC,
  758. "unhandled record: id:%d length:%d\n",
  759. record->rec_id, record->len);
  760. break;
  761. }
  762. if (status != 0)
  763. break;
  764. /* advance buffer past this record for next time around */
  765. buffer += record->len;
  766. len -= record->len;
  767. }
  768. return status;
  769. }
  770. static void do_recv_completion(struct htc_endpoint *ep,
  771. struct list_head *queue_to_indicate)
  772. {
  773. struct htc_packet *packet;
  774. if (list_empty(queue_to_indicate)) {
  775. /* nothing to indicate */
  776. return;
  777. }
  778. /* using legacy EpRecv */
  779. while (!list_empty(queue_to_indicate)) {
  780. packet = list_first_entry(queue_to_indicate,
  781. struct htc_packet, list);
  782. list_del(&packet->list);
  783. ep->ep_cb.rx(ep->target, packet);
  784. }
  785. return;
  786. }
  787. static void recv_packet_completion(struct htc_target *target,
  788. struct htc_endpoint *ep,
  789. struct htc_packet *packet)
  790. {
  791. struct list_head container;
  792. INIT_LIST_HEAD(&container);
  793. list_add_tail(&packet->list, &container);
  794. /* do completion */
  795. do_recv_completion(ep, &container);
  796. }
  797. static int ath6kl_htc_pipe_rx_complete(struct ath6kl *ar, struct sk_buff *skb,
  798. u8 pipeid)
  799. {
  800. struct htc_target *target = ar->htc_target;
  801. u8 *netdata, *trailer, hdr_info;
  802. struct htc_frame_hdr *htc_hdr;
  803. u32 netlen, trailerlen = 0;
  804. struct htc_packet *packet;
  805. struct htc_endpoint *ep;
  806. u16 payload_len;
  807. int status = 0;
  808. netdata = skb->data;
  809. netlen = skb->len;
  810. htc_hdr = (struct htc_frame_hdr *) netdata;
  811. ep = &target->endpoint[htc_hdr->eid];
  812. if (htc_hdr->eid >= ENDPOINT_MAX) {
  813. ath6kl_dbg(ATH6KL_DBG_HTC,
  814. "HTC Rx: invalid EndpointID=%d\n",
  815. htc_hdr->eid);
  816. status = -EINVAL;
  817. goto free_skb;
  818. }
  819. payload_len = le16_to_cpu(get_unaligned(&htc_hdr->payld_len));
  820. if (netlen < (payload_len + HTC_HDR_LENGTH)) {
  821. ath6kl_dbg(ATH6KL_DBG_HTC,
  822. "HTC Rx: insufficient length, got:%d expected =%u\n",
  823. netlen, payload_len + HTC_HDR_LENGTH);
  824. status = -EINVAL;
  825. goto free_skb;
  826. }
  827. /* get flags to check for trailer */
  828. hdr_info = htc_hdr->flags;
  829. if (hdr_info & HTC_FLG_RX_TRAILER) {
  830. /* extract the trailer length */
  831. hdr_info = htc_hdr->ctrl[0];
  832. if ((hdr_info < sizeof(struct htc_record_hdr)) ||
  833. (hdr_info > payload_len)) {
  834. ath6kl_dbg(ATH6KL_DBG_HTC,
  835. "invalid header: payloadlen should be %d, CB[0]: %d\n",
  836. payload_len, hdr_info);
  837. status = -EINVAL;
  838. goto free_skb;
  839. }
  840. trailerlen = hdr_info;
  841. /* process trailer after hdr/apps payload */
  842. trailer = (u8 *) htc_hdr + HTC_HDR_LENGTH +
  843. payload_len - hdr_info;
  844. status = htc_process_trailer(target, trailer, hdr_info,
  845. htc_hdr->eid);
  846. if (status != 0)
  847. goto free_skb;
  848. }
  849. if (((int) payload_len - (int) trailerlen) <= 0) {
  850. /* zero length packet with trailer, just drop these */
  851. goto free_skb;
  852. }
  853. if (htc_hdr->eid == ENDPOINT_0) {
  854. /* handle HTC control message */
  855. if (target->htc_flags & HTC_OP_STATE_SETUP_COMPLETE) {
  856. /*
  857. * fatal: target should not send unsolicited
  858. * messageson the endpoint 0
  859. */
  860. ath6kl_dbg(ATH6KL_DBG_HTC,
  861. "HTC ignores Rx Ctrl after setup complete\n");
  862. status = -EINVAL;
  863. goto free_skb;
  864. }
  865. /* remove HTC header */
  866. skb_pull(skb, HTC_HDR_LENGTH);
  867. netdata = skb->data;
  868. netlen = skb->len;
  869. spin_lock_bh(&target->rx_lock);
  870. target->pipe.ctrl_response_valid = true;
  871. target->pipe.ctrl_response_len = min_t(int, netlen,
  872. HTC_MAX_CTRL_MSG_LEN);
  873. memcpy(target->pipe.ctrl_response_buf, netdata,
  874. target->pipe.ctrl_response_len);
  875. spin_unlock_bh(&target->rx_lock);
  876. dev_kfree_skb(skb);
  877. skb = NULL;
  878. goto free_skb;
  879. }
  880. /*
  881. * TODO: the message based HIF architecture allocates net bufs
  882. * for recv packets since it bridges that HIF to upper layers,
  883. * which expects HTC packets, we form the packets here
  884. */
  885. packet = alloc_htc_packet_container(target);
  886. if (packet == NULL) {
  887. status = -ENOMEM;
  888. goto free_skb;
  889. }
  890. packet->status = 0;
  891. packet->endpoint = htc_hdr->eid;
  892. packet->pkt_cntxt = skb;
  893. /* TODO: for backwards compatibility */
  894. packet->buf = skb_push(skb, 0) + HTC_HDR_LENGTH;
  895. packet->act_len = netlen - HTC_HDR_LENGTH - trailerlen;
  896. /*
  897. * TODO: this is a hack because the driver layer will set the
  898. * actual len of the skb again which will just double the len
  899. */
  900. skb_trim(skb, 0);
  901. recv_packet_completion(target, ep, packet);
  902. /* recover the packet container */
  903. free_htc_packet_container(target, packet);
  904. skb = NULL;
  905. free_skb:
  906. if (skb != NULL)
  907. dev_kfree_skb(skb);
  908. return status;
  909. }
  910. static void htc_flush_rx_queue(struct htc_target *target,
  911. struct htc_endpoint *ep)
  912. {
  913. struct list_head container;
  914. struct htc_packet *packet;
  915. spin_lock_bh(&target->rx_lock);
  916. while (1) {
  917. if (list_empty(&ep->rx_bufq))
  918. break;
  919. packet = list_first_entry(&ep->rx_bufq,
  920. struct htc_packet, list);
  921. list_del(&packet->list);
  922. spin_unlock_bh(&target->rx_lock);
  923. packet->status = -ECANCELED;
  924. packet->act_len = 0;
  925. ath6kl_dbg(ATH6KL_DBG_HTC,
  926. "Flushing RX packet:0x%p, length:%d, ep:%d\n",
  927. packet, packet->buf_len,
  928. packet->endpoint);
  929. INIT_LIST_HEAD(&container);
  930. list_add_tail(&packet->list, &container);
  931. /* give the packet back */
  932. do_recv_completion(ep, &container);
  933. spin_lock_bh(&target->rx_lock);
  934. }
  935. spin_unlock_bh(&target->rx_lock);
  936. }
  937. /* polling routine to wait for a control packet to be received */
  938. static int htc_wait_recv_ctrl_message(struct htc_target *target)
  939. {
  940. int count = HTC_TARGET_RESPONSE_POLL_COUNT;
  941. while (count > 0) {
  942. spin_lock_bh(&target->rx_lock);
  943. if (target->pipe.ctrl_response_valid) {
  944. target->pipe.ctrl_response_valid = false;
  945. spin_unlock_bh(&target->rx_lock);
  946. break;
  947. }
  948. spin_unlock_bh(&target->rx_lock);
  949. count--;
  950. msleep_interruptible(HTC_TARGET_RESPONSE_POLL_WAIT);
  951. }
  952. if (count <= 0) {
  953. ath6kl_dbg(ATH6KL_DBG_HTC, "%s: Timeout!\n", __func__);
  954. return -ECOMM;
  955. }
  956. return 0;
  957. }
  958. static void htc_rxctrl_complete(struct htc_target *context,
  959. struct htc_packet *packet)
  960. {
  961. /* TODO, can't really receive HTC control messages yet.... */
  962. ath6kl_dbg(ATH6KL_DBG_HTC, "%s: invalid call function\n", __func__);
  963. }
  964. /* htc pipe initialization */
  965. static void reset_endpoint_states(struct htc_target *target)
  966. {
  967. struct htc_endpoint *ep;
  968. int i;
  969. for (i = ENDPOINT_0; i < ENDPOINT_MAX; i++) {
  970. ep = &target->endpoint[i];
  971. ep->svc_id = 0;
  972. ep->len_max = 0;
  973. ep->max_txq_depth = 0;
  974. ep->eid = i;
  975. INIT_LIST_HEAD(&ep->txq);
  976. INIT_LIST_HEAD(&ep->pipe.tx_lookup_queue);
  977. INIT_LIST_HEAD(&ep->rx_bufq);
  978. ep->target = target;
  979. ep->pipe.tx_credit_flow_enabled = (bool) 1; /* FIXME */
  980. }
  981. }
  982. /* start HTC, this is called after all services are connected */
  983. static int htc_config_target_hif_pipe(struct htc_target *target)
  984. {
  985. return 0;
  986. }
  987. /* htc service functions */
  988. static u8 htc_get_credit_alloc(struct htc_target *target, u16 service_id)
  989. {
  990. u8 allocation = 0;
  991. int i;
  992. for (i = 0; i < ENDPOINT_MAX; i++) {
  993. if (target->pipe.txcredit_alloc[i].service_id == service_id)
  994. allocation =
  995. target->pipe.txcredit_alloc[i].credit_alloc;
  996. }
  997. if (allocation == 0) {
  998. ath6kl_dbg(ATH6KL_DBG_HTC,
  999. "HTC Service TX : 0x%2.2X : allocation is zero!\n",
  1000. service_id);
  1001. }
  1002. return allocation;
  1003. }
  1004. static int ath6kl_htc_pipe_conn_service(struct htc_target *target,
  1005. struct htc_service_connect_req *conn_req,
  1006. struct htc_service_connect_resp *conn_resp)
  1007. {
  1008. struct ath6kl *ar = target->dev->ar;
  1009. struct htc_packet *packet = NULL;
  1010. struct htc_conn_service_resp *resp_msg;
  1011. struct htc_conn_service_msg *conn_msg;
  1012. enum htc_endpoint_id assigned_epid = ENDPOINT_MAX;
  1013. bool disable_credit_flowctrl = false;
  1014. unsigned int max_msg_size = 0;
  1015. struct htc_endpoint *ep;
  1016. int length, status = 0;
  1017. struct sk_buff *skb;
  1018. u8 tx_alloc;
  1019. u16 flags;
  1020. if (conn_req->svc_id == 0) {
  1021. WARN_ON_ONCE(1);
  1022. status = -EINVAL;
  1023. goto free_packet;
  1024. }
  1025. if (conn_req->svc_id == HTC_CTRL_RSVD_SVC) {
  1026. /* special case for pseudo control service */
  1027. assigned_epid = ENDPOINT_0;
  1028. max_msg_size = HTC_MAX_CTRL_MSG_LEN;
  1029. tx_alloc = 0;
  1030. } else {
  1031. tx_alloc = htc_get_credit_alloc(target, conn_req->svc_id);
  1032. if (tx_alloc == 0) {
  1033. status = -ENOMEM;
  1034. goto free_packet;
  1035. }
  1036. /* allocate a packet to send to the target */
  1037. packet = htc_alloc_txctrl_packet(target);
  1038. if (packet == NULL) {
  1039. WARN_ON_ONCE(1);
  1040. status = -ENOMEM;
  1041. goto free_packet;
  1042. }
  1043. skb = packet->skb;
  1044. length = sizeof(struct htc_conn_service_msg);
  1045. /* assemble connect service message */
  1046. conn_msg = (struct htc_conn_service_msg *) skb_put(skb,
  1047. length);
  1048. if (conn_msg == NULL) {
  1049. WARN_ON_ONCE(1);
  1050. status = -EINVAL;
  1051. goto free_packet;
  1052. }
  1053. memset(conn_msg, 0,
  1054. sizeof(struct htc_conn_service_msg));
  1055. conn_msg->msg_id = cpu_to_le16(HTC_MSG_CONN_SVC_ID);
  1056. conn_msg->svc_id = cpu_to_le16(conn_req->svc_id);
  1057. conn_msg->conn_flags = cpu_to_le16(conn_req->conn_flags &
  1058. ~HTC_CONN_FLGS_SET_RECV_ALLOC_MASK);
  1059. /* tell target desired recv alloc for this ep */
  1060. flags = tx_alloc << HTC_CONN_FLGS_SET_RECV_ALLOC_SHIFT;
  1061. conn_msg->conn_flags |= cpu_to_le16(flags);
  1062. if (conn_req->conn_flags &
  1063. HTC_CONN_FLGS_DISABLE_CRED_FLOW_CTRL) {
  1064. disable_credit_flowctrl = true;
  1065. }
  1066. set_htc_pkt_info(packet, NULL, (u8 *) conn_msg,
  1067. length,
  1068. ENDPOINT_0, HTC_SERVICE_TX_PACKET_TAG);
  1069. status = ath6kl_htc_pipe_tx(target, packet);
  1070. /* we don't own it anymore */
  1071. packet = NULL;
  1072. if (status != 0)
  1073. goto free_packet;
  1074. /* wait for response */
  1075. status = htc_wait_recv_ctrl_message(target);
  1076. if (status != 0)
  1077. goto free_packet;
  1078. /* we controlled the buffer creation so it has to be
  1079. * properly aligned
  1080. */
  1081. resp_msg = (struct htc_conn_service_resp *)
  1082. target->pipe.ctrl_response_buf;
  1083. if (resp_msg->msg_id != cpu_to_le16(HTC_MSG_CONN_SVC_RESP_ID) ||
  1084. (target->pipe.ctrl_response_len < sizeof(*resp_msg))) {
  1085. /* this message is not valid */
  1086. WARN_ON_ONCE(1);
  1087. status = -EINVAL;
  1088. goto free_packet;
  1089. }
  1090. ath6kl_dbg(ATH6KL_DBG_TRC,
  1091. "%s: service 0x%X conn resp: status: %d ep: %d\n",
  1092. __func__, resp_msg->svc_id, resp_msg->status,
  1093. resp_msg->eid);
  1094. conn_resp->resp_code = resp_msg->status;
  1095. /* check response status */
  1096. if (resp_msg->status != HTC_SERVICE_SUCCESS) {
  1097. ath6kl_dbg(ATH6KL_DBG_HTC,
  1098. "Target failed service 0x%X connect request (status:%d)\n",
  1099. resp_msg->svc_id, resp_msg->status);
  1100. status = -EINVAL;
  1101. goto free_packet;
  1102. }
  1103. assigned_epid = (enum htc_endpoint_id) resp_msg->eid;
  1104. max_msg_size = le16_to_cpu(resp_msg->max_msg_sz);
  1105. }
  1106. /* the rest are parameter checks so set the error status */
  1107. status = -EINVAL;
  1108. if (assigned_epid >= ENDPOINT_MAX) {
  1109. WARN_ON_ONCE(1);
  1110. goto free_packet;
  1111. }
  1112. if (max_msg_size == 0) {
  1113. WARN_ON_ONCE(1);
  1114. goto free_packet;
  1115. }
  1116. ep = &target->endpoint[assigned_epid];
  1117. ep->eid = assigned_epid;
  1118. if (ep->svc_id != 0) {
  1119. /* endpoint already in use! */
  1120. WARN_ON_ONCE(1);
  1121. goto free_packet;
  1122. }
  1123. /* return assigned endpoint to caller */
  1124. conn_resp->endpoint = assigned_epid;
  1125. conn_resp->len_max = max_msg_size;
  1126. /* setup the endpoint */
  1127. ep->svc_id = conn_req->svc_id; /* this marks ep in use */
  1128. ep->max_txq_depth = conn_req->max_txq_depth;
  1129. ep->len_max = max_msg_size;
  1130. ep->cred_dist.credits = tx_alloc;
  1131. ep->cred_dist.cred_sz = target->tgt_cred_sz;
  1132. ep->cred_dist.cred_per_msg = max_msg_size / target->tgt_cred_sz;
  1133. if (max_msg_size % target->tgt_cred_sz)
  1134. ep->cred_dist.cred_per_msg++;
  1135. /* copy all the callbacks */
  1136. ep->ep_cb = conn_req->ep_cb;
  1137. /* initialize tx_drop_packet_threshold */
  1138. ep->tx_drop_packet_threshold = MAX_HI_COOKIE_NUM;
  1139. status = ath6kl_hif_pipe_map_service(ar, ep->svc_id,
  1140. &ep->pipe.pipeid_ul,
  1141. &ep->pipe.pipeid_dl);
  1142. if (status != 0)
  1143. goto free_packet;
  1144. ath6kl_dbg(ATH6KL_DBG_HTC,
  1145. "SVC Ready: 0x%4.4X: ULpipe:%d DLpipe:%d id:%d\n",
  1146. ep->svc_id, ep->pipe.pipeid_ul,
  1147. ep->pipe.pipeid_dl, ep->eid);
  1148. if (disable_credit_flowctrl && ep->pipe.tx_credit_flow_enabled) {
  1149. ep->pipe.tx_credit_flow_enabled = false;
  1150. ath6kl_dbg(ATH6KL_DBG_HTC,
  1151. "SVC: 0x%4.4X ep:%d TX flow control off\n",
  1152. ep->svc_id, assigned_epid);
  1153. }
  1154. free_packet:
  1155. if (packet != NULL)
  1156. htc_free_txctrl_packet(target, packet);
  1157. return status;
  1158. }
  1159. /* htc export functions */
  1160. static void *ath6kl_htc_pipe_create(struct ath6kl *ar)
  1161. {
  1162. int status = 0;
  1163. struct htc_endpoint *ep = NULL;
  1164. struct htc_target *target = NULL;
  1165. struct htc_packet *packet;
  1166. int i;
  1167. target = kzalloc(sizeof(struct htc_target), GFP_KERNEL);
  1168. if (target == NULL) {
  1169. ath6kl_err("htc create unable to allocate memory\n");
  1170. status = -ENOMEM;
  1171. goto fail_htc_create;
  1172. }
  1173. spin_lock_init(&target->htc_lock);
  1174. spin_lock_init(&target->rx_lock);
  1175. spin_lock_init(&target->tx_lock);
  1176. reset_endpoint_states(target);
  1177. for (i = 0; i < HTC_PACKET_CONTAINER_ALLOCATION; i++) {
  1178. packet = kzalloc(sizeof(struct htc_packet), GFP_KERNEL);
  1179. if (packet != NULL)
  1180. free_htc_packet_container(target, packet);
  1181. }
  1182. target->dev = kzalloc(sizeof(*target->dev), GFP_KERNEL);
  1183. if (!target->dev) {
  1184. ath6kl_err("unable to allocate memory\n");
  1185. status = -ENOMEM;
  1186. goto fail_htc_create;
  1187. }
  1188. target->dev->ar = ar;
  1189. target->dev->htc_cnxt = target;
  1190. /* Get HIF default pipe for HTC message exchange */
  1191. ep = &target->endpoint[ENDPOINT_0];
  1192. ath6kl_hif_pipe_get_default(ar, &ep->pipe.pipeid_ul,
  1193. &ep->pipe.pipeid_dl);
  1194. return target;
  1195. fail_htc_create:
  1196. if (status != 0) {
  1197. if (target != NULL)
  1198. ath6kl_htc_pipe_cleanup(target);
  1199. target = NULL;
  1200. }
  1201. return target;
  1202. }
  1203. /* cleanup the HTC instance */
  1204. static void ath6kl_htc_pipe_cleanup(struct htc_target *target)
  1205. {
  1206. struct htc_packet *packet;
  1207. while (true) {
  1208. packet = alloc_htc_packet_container(target);
  1209. if (packet == NULL)
  1210. break;
  1211. kfree(packet);
  1212. }
  1213. kfree(target->dev);
  1214. /* kfree our instance */
  1215. kfree(target);
  1216. }
  1217. static int ath6kl_htc_pipe_start(struct htc_target *target)
  1218. {
  1219. struct sk_buff *skb;
  1220. struct htc_setup_comp_ext_msg *setup;
  1221. struct htc_packet *packet;
  1222. htc_config_target_hif_pipe(target);
  1223. /* allocate a buffer to send */
  1224. packet = htc_alloc_txctrl_packet(target);
  1225. if (packet == NULL) {
  1226. WARN_ON_ONCE(1);
  1227. return -ENOMEM;
  1228. }
  1229. skb = packet->skb;
  1230. /* assemble setup complete message */
  1231. setup = (struct htc_setup_comp_ext_msg *) skb_put(skb,
  1232. sizeof(*setup));
  1233. memset(setup, 0, sizeof(struct htc_setup_comp_ext_msg));
  1234. setup->msg_id = cpu_to_le16(HTC_MSG_SETUP_COMPLETE_EX_ID);
  1235. ath6kl_dbg(ATH6KL_DBG_HTC, "HTC using TX credit flow control\n");
  1236. set_htc_pkt_info(packet, NULL, (u8 *) setup,
  1237. sizeof(struct htc_setup_comp_ext_msg),
  1238. ENDPOINT_0, HTC_SERVICE_TX_PACKET_TAG);
  1239. target->htc_flags |= HTC_OP_STATE_SETUP_COMPLETE;
  1240. return ath6kl_htc_pipe_tx(target, packet);
  1241. }
  1242. static void ath6kl_htc_pipe_stop(struct htc_target *target)
  1243. {
  1244. int i;
  1245. struct htc_endpoint *ep;
  1246. /* cleanup endpoints */
  1247. for (i = 0; i < ENDPOINT_MAX; i++) {
  1248. ep = &target->endpoint[i];
  1249. htc_flush_rx_queue(target, ep);
  1250. htc_flush_tx_endpoint(target, ep, HTC_TX_PACKET_TAG_ALL);
  1251. }
  1252. reset_endpoint_states(target);
  1253. target->htc_flags &= ~HTC_OP_STATE_SETUP_COMPLETE;
  1254. }
  1255. static int ath6kl_htc_pipe_get_rxbuf_num(struct htc_target *target,
  1256. enum htc_endpoint_id endpoint)
  1257. {
  1258. int num;
  1259. spin_lock_bh(&target->rx_lock);
  1260. num = get_queue_depth(&(target->endpoint[endpoint].rx_bufq));
  1261. spin_unlock_bh(&target->rx_lock);
  1262. return num;
  1263. }
  1264. static int ath6kl_htc_pipe_tx(struct htc_target *target,
  1265. struct htc_packet *packet)
  1266. {
  1267. struct list_head queue;
  1268. ath6kl_dbg(ATH6KL_DBG_HTC,
  1269. "%s: endPointId: %d, buffer: 0x%p, length: %d\n",
  1270. __func__, packet->endpoint, packet->buf,
  1271. packet->act_len);
  1272. INIT_LIST_HEAD(&queue);
  1273. list_add_tail(&packet->list, &queue);
  1274. return htc_send_packets_multiple(target, &queue);
  1275. }
  1276. static int ath6kl_htc_pipe_wait_target(struct htc_target *target)
  1277. {
  1278. struct htc_ready_ext_msg *ready_msg;
  1279. struct htc_service_connect_req connect;
  1280. struct htc_service_connect_resp resp;
  1281. int status = 0;
  1282. status = htc_wait_recv_ctrl_message(target);
  1283. if (status != 0)
  1284. return status;
  1285. if (target->pipe.ctrl_response_len < sizeof(*ready_msg)) {
  1286. ath6kl_dbg(ATH6KL_DBG_HTC, "invalid htc ready msg len:%d!\n",
  1287. target->pipe.ctrl_response_len);
  1288. return -ECOMM;
  1289. }
  1290. ready_msg = (struct htc_ready_ext_msg *) target->pipe.ctrl_response_buf;
  1291. if (ready_msg->ver2_0_info.msg_id != cpu_to_le16(HTC_MSG_READY_ID)) {
  1292. ath6kl_dbg(ATH6KL_DBG_HTC, "invalid htc ready msg : 0x%X !\n",
  1293. ready_msg->ver2_0_info.msg_id);
  1294. return -ECOMM;
  1295. }
  1296. ath6kl_dbg(ATH6KL_DBG_HTC,
  1297. "Target Ready! : transmit resources : %d size:%d\n",
  1298. ready_msg->ver2_0_info.cred_cnt,
  1299. ready_msg->ver2_0_info.cred_sz);
  1300. target->tgt_creds = le16_to_cpu(ready_msg->ver2_0_info.cred_cnt);
  1301. target->tgt_cred_sz = le16_to_cpu(ready_msg->ver2_0_info.cred_sz);
  1302. if ((target->tgt_creds == 0) || (target->tgt_cred_sz == 0))
  1303. return -ECOMM;
  1304. htc_setup_target_buffer_assignments(target);
  1305. /* setup our pseudo HTC control endpoint connection */
  1306. memset(&connect, 0, sizeof(connect));
  1307. memset(&resp, 0, sizeof(resp));
  1308. connect.ep_cb.tx_complete = htc_txctrl_complete;
  1309. connect.ep_cb.rx = htc_rxctrl_complete;
  1310. connect.max_txq_depth = NUM_CONTROL_TX_BUFFERS;
  1311. connect.svc_id = HTC_CTRL_RSVD_SVC;
  1312. /* connect fake service */
  1313. status = ath6kl_htc_pipe_conn_service(target, &connect, &resp);
  1314. return status;
  1315. }
  1316. static void ath6kl_htc_pipe_flush_txep(struct htc_target *target,
  1317. enum htc_endpoint_id endpoint, u16 tag)
  1318. {
  1319. struct htc_endpoint *ep = &target->endpoint[endpoint];
  1320. if (ep->svc_id == 0) {
  1321. WARN_ON_ONCE(1);
  1322. /* not in use.. */
  1323. return;
  1324. }
  1325. htc_flush_tx_endpoint(target, ep, tag);
  1326. }
  1327. static int ath6kl_htc_pipe_add_rxbuf_multiple(struct htc_target *target,
  1328. struct list_head *pkt_queue)
  1329. {
  1330. struct htc_packet *packet, *tmp_pkt, *first;
  1331. struct htc_endpoint *ep;
  1332. int status = 0;
  1333. if (list_empty(pkt_queue))
  1334. return -EINVAL;
  1335. first = list_first_entry(pkt_queue, struct htc_packet, list);
  1336. if (first->endpoint >= ENDPOINT_MAX) {
  1337. WARN_ON_ONCE(1);
  1338. return -EINVAL;
  1339. }
  1340. ath6kl_dbg(ATH6KL_DBG_HTC, "%s: epid: %d, cnt:%d, len: %d\n",
  1341. __func__, first->endpoint, get_queue_depth(pkt_queue),
  1342. first->buf_len);
  1343. ep = &target->endpoint[first->endpoint];
  1344. spin_lock_bh(&target->rx_lock);
  1345. /* store receive packets */
  1346. list_splice_tail_init(pkt_queue, &ep->rx_bufq);
  1347. spin_unlock_bh(&target->rx_lock);
  1348. if (status != 0) {
  1349. /* walk through queue and mark each one canceled */
  1350. list_for_each_entry_safe(packet, tmp_pkt, pkt_queue, list) {
  1351. packet->status = -ECANCELED;
  1352. }
  1353. do_recv_completion(ep, pkt_queue);
  1354. }
  1355. return status;
  1356. }
  1357. static void ath6kl_htc_pipe_activity_changed(struct htc_target *target,
  1358. enum htc_endpoint_id ep,
  1359. bool active)
  1360. {
  1361. /* TODO */
  1362. }
  1363. static void ath6kl_htc_pipe_flush_rx_buf(struct htc_target *target)
  1364. {
  1365. /* TODO */
  1366. }
  1367. static int ath6kl_htc_pipe_credit_setup(struct htc_target *target,
  1368. struct ath6kl_htc_credit_info *info)
  1369. {
  1370. return 0;
  1371. }
  1372. static const struct ath6kl_htc_ops ath6kl_htc_pipe_ops = {
  1373. .create = ath6kl_htc_pipe_create,
  1374. .wait_target = ath6kl_htc_pipe_wait_target,
  1375. .start = ath6kl_htc_pipe_start,
  1376. .conn_service = ath6kl_htc_pipe_conn_service,
  1377. .tx = ath6kl_htc_pipe_tx,
  1378. .stop = ath6kl_htc_pipe_stop,
  1379. .cleanup = ath6kl_htc_pipe_cleanup,
  1380. .flush_txep = ath6kl_htc_pipe_flush_txep,
  1381. .flush_rx_buf = ath6kl_htc_pipe_flush_rx_buf,
  1382. .activity_changed = ath6kl_htc_pipe_activity_changed,
  1383. .get_rxbuf_num = ath6kl_htc_pipe_get_rxbuf_num,
  1384. .add_rxbuf_multiple = ath6kl_htc_pipe_add_rxbuf_multiple,
  1385. .credit_setup = ath6kl_htc_pipe_credit_setup,
  1386. .tx_complete = ath6kl_htc_pipe_tx_complete,
  1387. .rx_complete = ath6kl_htc_pipe_rx_complete,
  1388. };
  1389. void ath6kl_htc_pipe_attach(struct ath6kl *ar)
  1390. {
  1391. ar->htc_ops = &ath6kl_htc_pipe_ops;
  1392. }