qset.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835
  1. /*
  2. * Wireless Host Controller (WHC) qset management.
  3. *
  4. * Copyright (C) 2007 Cambridge Silicon Radio Ltd.
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License version
  8. * 2 as published by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  17. */
  18. #include <linux/kernel.h>
  19. #include <linux/dma-mapping.h>
  20. #include <linux/uwb/umc.h>
  21. #include <linux/usb.h>
  22. #include "../../wusbcore/wusbhc.h"
  23. #include "whcd.h"
  24. struct whc_qset *qset_alloc(struct whc *whc, gfp_t mem_flags)
  25. {
  26. struct whc_qset *qset;
  27. dma_addr_t dma;
  28. qset = dma_pool_alloc(whc->qset_pool, mem_flags, &dma);
  29. if (qset == NULL)
  30. return NULL;
  31. memset(qset, 0, sizeof(struct whc_qset));
  32. qset->qset_dma = dma;
  33. qset->whc = whc;
  34. INIT_LIST_HEAD(&qset->list_node);
  35. INIT_LIST_HEAD(&qset->stds);
  36. return qset;
  37. }
  38. /**
  39. * qset_fill_qh - fill the static endpoint state in a qset's QHead
  40. * @qset: the qset whose QH needs initializing with static endpoint
  41. * state
  42. * @urb: an urb for a transfer to this endpoint
  43. */
  44. static void qset_fill_qh(struct whc_qset *qset, struct urb *urb)
  45. {
  46. struct usb_device *usb_dev = urb->dev;
  47. struct usb_wireless_ep_comp_descriptor *epcd;
  48. bool is_out;
  49. is_out = usb_pipeout(urb->pipe);
  50. qset->max_packet = le16_to_cpu(urb->ep->desc.wMaxPacketSize);
  51. epcd = (struct usb_wireless_ep_comp_descriptor *)qset->ep->extra;
  52. if (epcd) {
  53. qset->max_seq = epcd->bMaxSequence;
  54. qset->max_burst = epcd->bMaxBurst;
  55. } else {
  56. qset->max_seq = 2;
  57. qset->max_burst = 1;
  58. }
  59. qset->qh.info1 = cpu_to_le32(
  60. QH_INFO1_EP(usb_pipeendpoint(urb->pipe))
  61. | (is_out ? QH_INFO1_DIR_OUT : QH_INFO1_DIR_IN)
  62. | usb_pipe_to_qh_type(urb->pipe)
  63. | QH_INFO1_DEV_INFO_IDX(wusb_port_no_to_idx(usb_dev->portnum))
  64. | QH_INFO1_MAX_PKT_LEN(qset->max_packet)
  65. );
  66. qset->qh.info2 = cpu_to_le32(
  67. QH_INFO2_BURST(qset->max_burst)
  68. | QH_INFO2_DBP(0)
  69. | QH_INFO2_MAX_COUNT(3)
  70. | QH_INFO2_MAX_RETRY(3)
  71. | QH_INFO2_MAX_SEQ(qset->max_seq - 1)
  72. );
  73. /* FIXME: where can we obtain these Tx parameters from? Why
  74. * doesn't the chip know what Tx power to use? It knows the Rx
  75. * strength and can presumably guess the Tx power required
  76. * from that? */
  77. qset->qh.info3 = cpu_to_le32(
  78. QH_INFO3_TX_RATE_53_3
  79. | QH_INFO3_TX_PWR(0) /* 0 == max power */
  80. );
  81. qset->qh.cur_window = cpu_to_le32((1 << qset->max_burst) - 1);
  82. }
  83. /**
  84. * qset_clear - clear fields in a qset so it may be reinserted into a
  85. * schedule.
  86. *
  87. * The sequence number and current window are not cleared (see
  88. * qset_reset()).
  89. */
  90. void qset_clear(struct whc *whc, struct whc_qset *qset)
  91. {
  92. qset->td_start = qset->td_end = qset->ntds = 0;
  93. qset->qh.link = cpu_to_le32(QH_LINK_NTDS(8) | QH_LINK_T);
  94. qset->qh.status = qset->qh.status & QH_STATUS_SEQ_MASK;
  95. qset->qh.err_count = 0;
  96. qset->qh.scratch[0] = 0;
  97. qset->qh.scratch[1] = 0;
  98. qset->qh.scratch[2] = 0;
  99. memset(&qset->qh.overlay, 0, sizeof(qset->qh.overlay));
  100. init_completion(&qset->remove_complete);
  101. }
  102. /**
  103. * qset_reset - reset endpoint state in a qset.
  104. *
  105. * Clears the sequence number and current window. This qset must not
  106. * be in the ASL or PZL.
  107. */
  108. void qset_reset(struct whc *whc, struct whc_qset *qset)
  109. {
  110. qset->reset = 0;
  111. qset->qh.status &= ~QH_STATUS_SEQ_MASK;
  112. qset->qh.cur_window = cpu_to_le32((1 << qset->max_burst) - 1);
  113. }
  114. /**
  115. * get_qset - get the qset for an async endpoint
  116. *
  117. * A new qset is created if one does not already exist.
  118. */
  119. struct whc_qset *get_qset(struct whc *whc, struct urb *urb,
  120. gfp_t mem_flags)
  121. {
  122. struct whc_qset *qset;
  123. qset = urb->ep->hcpriv;
  124. if (qset == NULL) {
  125. qset = qset_alloc(whc, mem_flags);
  126. if (qset == NULL)
  127. return NULL;
  128. qset->ep = urb->ep;
  129. urb->ep->hcpriv = qset;
  130. qset_fill_qh(qset, urb);
  131. }
  132. return qset;
  133. }
  134. void qset_remove_complete(struct whc *whc, struct whc_qset *qset)
  135. {
  136. qset->remove = 0;
  137. list_del_init(&qset->list_node);
  138. complete(&qset->remove_complete);
  139. }
  140. /**
  141. * qset_add_qtds - add qTDs for an URB to a qset
  142. *
  143. * Returns true if the list (ASL/PZL) must be updated because (for a
  144. * WHCI 0.95 controller) an activated qTD was pointed to be iCur.
  145. */
  146. enum whc_update qset_add_qtds(struct whc *whc, struct whc_qset *qset)
  147. {
  148. struct whc_std *std;
  149. enum whc_update update = 0;
  150. list_for_each_entry(std, &qset->stds, list_node) {
  151. struct whc_qtd *qtd;
  152. uint32_t status;
  153. if (qset->ntds >= WHCI_QSET_TD_MAX
  154. || (qset->pause_after_urb && std->urb != qset->pause_after_urb))
  155. break;
  156. if (std->qtd)
  157. continue; /* already has a qTD */
  158. qtd = std->qtd = &qset->qtd[qset->td_end];
  159. /* Fill in setup bytes for control transfers. */
  160. if (usb_pipecontrol(std->urb->pipe))
  161. memcpy(qtd->setup, std->urb->setup_packet, 8);
  162. status = QTD_STS_ACTIVE | QTD_STS_LEN(std->len);
  163. if (whc_std_last(std) && usb_pipeout(std->urb->pipe))
  164. status |= QTD_STS_LAST_PKT;
  165. /*
  166. * For an IN transfer the iAlt field should be set so
  167. * the h/w will automatically advance to the next
  168. * transfer. However, if there are 8 or more TDs
  169. * remaining in this transfer then iAlt cannot be set
  170. * as it could point to somewhere in this transfer.
  171. */
  172. if (std->ntds_remaining < WHCI_QSET_TD_MAX) {
  173. int ialt;
  174. ialt = (qset->td_end + std->ntds_remaining) % WHCI_QSET_TD_MAX;
  175. status |= QTD_STS_IALT(ialt);
  176. } else if (usb_pipein(std->urb->pipe))
  177. qset->pause_after_urb = std->urb;
  178. if (std->num_pointers)
  179. qtd->options = cpu_to_le32(QTD_OPT_IOC);
  180. else
  181. qtd->options = cpu_to_le32(QTD_OPT_IOC | QTD_OPT_SMALL);
  182. qtd->page_list_ptr = cpu_to_le64(std->dma_addr);
  183. qtd->status = cpu_to_le32(status);
  184. if (QH_STATUS_TO_ICUR(qset->qh.status) == qset->td_end)
  185. update = WHC_UPDATE_UPDATED;
  186. if (++qset->td_end >= WHCI_QSET_TD_MAX)
  187. qset->td_end = 0;
  188. qset->ntds++;
  189. }
  190. return update;
  191. }
  192. /**
  193. * qset_remove_qtd - remove the first qTD from a qset.
  194. *
  195. * The qTD might be still active (if it's part of a IN URB that
  196. * resulted in a short read) so ensure it's deactivated.
  197. */
  198. static void qset_remove_qtd(struct whc *whc, struct whc_qset *qset)
  199. {
  200. qset->qtd[qset->td_start].status = 0;
  201. if (++qset->td_start >= WHCI_QSET_TD_MAX)
  202. qset->td_start = 0;
  203. qset->ntds--;
  204. }
  205. static void qset_copy_bounce_to_sg(struct whc *whc, struct whc_std *std)
  206. {
  207. struct scatterlist *sg;
  208. void *bounce;
  209. size_t remaining, offset;
  210. bounce = std->bounce_buf;
  211. remaining = std->len;
  212. sg = std->bounce_sg;
  213. offset = std->bounce_offset;
  214. while (remaining) {
  215. size_t len;
  216. len = min(sg->length - offset, remaining);
  217. memcpy(sg_virt(sg) + offset, bounce, len);
  218. bounce += len;
  219. remaining -= len;
  220. offset += len;
  221. if (offset >= sg->length) {
  222. sg = sg_next(sg);
  223. offset = 0;
  224. }
  225. }
  226. }
  227. /**
  228. * qset_free_std - remove an sTD and free it.
  229. * @whc: the WHCI host controller
  230. * @std: the sTD to remove and free.
  231. */
  232. void qset_free_std(struct whc *whc, struct whc_std *std)
  233. {
  234. list_del(&std->list_node);
  235. if (std->bounce_buf) {
  236. bool is_out = usb_pipeout(std->urb->pipe);
  237. dma_addr_t dma_addr;
  238. if (std->num_pointers)
  239. dma_addr = le64_to_cpu(std->pl_virt[0].buf_ptr);
  240. else
  241. dma_addr = std->dma_addr;
  242. dma_unmap_single(whc->wusbhc.dev, dma_addr,
  243. std->len, is_out ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
  244. if (!is_out)
  245. qset_copy_bounce_to_sg(whc, std);
  246. kfree(std->bounce_buf);
  247. }
  248. if (std->pl_virt) {
  249. if (std->dma_addr)
  250. dma_unmap_single(whc->wusbhc.dev, std->dma_addr,
  251. std->num_pointers * sizeof(struct whc_page_list_entry),
  252. DMA_TO_DEVICE);
  253. kfree(std->pl_virt);
  254. std->pl_virt = NULL;
  255. }
  256. kfree(std);
  257. }
  258. /**
  259. * qset_remove_qtds - remove an URB's qTDs (and sTDs).
  260. */
  261. static void qset_remove_qtds(struct whc *whc, struct whc_qset *qset,
  262. struct urb *urb)
  263. {
  264. struct whc_std *std, *t;
  265. list_for_each_entry_safe(std, t, &qset->stds, list_node) {
  266. if (std->urb != urb)
  267. break;
  268. if (std->qtd != NULL)
  269. qset_remove_qtd(whc, qset);
  270. qset_free_std(whc, std);
  271. }
  272. }
  273. /**
  274. * qset_free_stds - free any remaining sTDs for an URB.
  275. */
  276. static void qset_free_stds(struct whc_qset *qset, struct urb *urb)
  277. {
  278. struct whc_std *std, *t;
  279. list_for_each_entry_safe(std, t, &qset->stds, list_node) {
  280. if (std->urb == urb)
  281. qset_free_std(qset->whc, std);
  282. }
  283. }
  284. static int qset_fill_page_list(struct whc *whc, struct whc_std *std, gfp_t mem_flags)
  285. {
  286. dma_addr_t dma_addr = std->dma_addr;
  287. dma_addr_t sp, ep;
  288. size_t pl_len;
  289. int p;
  290. /* Short buffers don't need a page list. */
  291. if (std->len <= WHCI_PAGE_SIZE) {
  292. std->num_pointers = 0;
  293. return 0;
  294. }
  295. sp = dma_addr & ~(WHCI_PAGE_SIZE-1);
  296. ep = dma_addr + std->len;
  297. std->num_pointers = DIV_ROUND_UP(ep - sp, WHCI_PAGE_SIZE);
  298. pl_len = std->num_pointers * sizeof(struct whc_page_list_entry);
  299. std->pl_virt = kmalloc(pl_len, mem_flags);
  300. if (std->pl_virt == NULL)
  301. return -ENOMEM;
  302. std->dma_addr = dma_map_single(whc->wusbhc.dev, std->pl_virt, pl_len, DMA_TO_DEVICE);
  303. for (p = 0; p < std->num_pointers; p++) {
  304. std->pl_virt[p].buf_ptr = cpu_to_le64(dma_addr);
  305. dma_addr = (dma_addr + WHCI_PAGE_SIZE) & ~(WHCI_PAGE_SIZE-1);
  306. }
  307. return 0;
  308. }
  309. /**
  310. * urb_dequeue_work - executes asl/pzl update and gives back the urb to the system.
  311. */
  312. static void urb_dequeue_work(struct work_struct *work)
  313. {
  314. struct whc_urb *wurb = container_of(work, struct whc_urb, dequeue_work);
  315. struct whc_qset *qset = wurb->qset;
  316. struct whc *whc = qset->whc;
  317. unsigned long flags;
  318. if (wurb->is_async == true)
  319. asl_update(whc, WUSBCMD_ASYNC_UPDATED
  320. | WUSBCMD_ASYNC_SYNCED_DB
  321. | WUSBCMD_ASYNC_QSET_RM);
  322. else
  323. pzl_update(whc, WUSBCMD_PERIODIC_UPDATED
  324. | WUSBCMD_PERIODIC_SYNCED_DB
  325. | WUSBCMD_PERIODIC_QSET_RM);
  326. spin_lock_irqsave(&whc->lock, flags);
  327. qset_remove_urb(whc, qset, wurb->urb, wurb->status);
  328. spin_unlock_irqrestore(&whc->lock, flags);
  329. }
  330. static struct whc_std *qset_new_std(struct whc *whc, struct whc_qset *qset,
  331. struct urb *urb, gfp_t mem_flags)
  332. {
  333. struct whc_std *std;
  334. std = kzalloc(sizeof(struct whc_std), mem_flags);
  335. if (std == NULL)
  336. return NULL;
  337. std->urb = urb;
  338. std->qtd = NULL;
  339. INIT_LIST_HEAD(&std->list_node);
  340. list_add_tail(&std->list_node, &qset->stds);
  341. return std;
  342. }
  343. static int qset_add_urb_sg(struct whc *whc, struct whc_qset *qset, struct urb *urb,
  344. gfp_t mem_flags)
  345. {
  346. size_t remaining;
  347. struct scatterlist *sg;
  348. int i;
  349. int ntds = 0;
  350. struct whc_std *std = NULL;
  351. struct whc_page_list_entry *entry;
  352. dma_addr_t prev_end = 0;
  353. size_t pl_len;
  354. int p = 0;
  355. dev_dbg(&whc->umc->dev, "adding urb w/ sg of length %d\n", urb->transfer_buffer_length);
  356. remaining = urb->transfer_buffer_length;
  357. for_each_sg(urb->sg->sg, sg, urb->num_sgs, i) {
  358. dma_addr_t dma_addr;
  359. size_t dma_remaining;
  360. dma_addr_t sp, ep;
  361. int num_pointers;
  362. if (remaining == 0) {
  363. break;
  364. }
  365. dma_addr = sg_dma_address(sg);
  366. dma_remaining = min(sg_dma_len(sg), remaining);
  367. dev_dbg(&whc->umc->dev, "adding sg[%d] %08x %d\n", i, (unsigned)dma_addr,
  368. dma_remaining);
  369. while (dma_remaining) {
  370. size_t dma_len;
  371. /*
  372. * We can use the previous std (if it exists) provided that:
  373. * - the previous one ended on a page boundary.
  374. * - the current one begins on a page boundary.
  375. * - the previous one isn't full.
  376. *
  377. * If a new std is needed but the previous one
  378. * did not end on a wMaxPacketSize boundary
  379. * then this sg list cannot be mapped onto
  380. * multiple qTDs. Return an error and let the
  381. * caller sort it out.
  382. */
  383. if (!std
  384. || (prev_end & (WHCI_PAGE_SIZE-1))
  385. || (dma_addr & (WHCI_PAGE_SIZE-1))
  386. || std->len + WHCI_PAGE_SIZE > QTD_MAX_XFER_SIZE) {
  387. if (prev_end % qset->max_packet != 0)
  388. return -EINVAL;
  389. dev_dbg(&whc->umc->dev, "need new std\n");
  390. std = qset_new_std(whc, qset, urb, mem_flags);
  391. if (std == NULL) {
  392. return -ENOMEM;
  393. }
  394. ntds++;
  395. p = 0;
  396. }
  397. dma_len = dma_remaining;
  398. /*
  399. * If the remainder in this element doesn't
  400. * fit in a single qTD, end the qTD on a
  401. * wMaxPacketSize boundary.
  402. */
  403. if (std->len + dma_len > QTD_MAX_XFER_SIZE) {
  404. dma_len = QTD_MAX_XFER_SIZE - std->len;
  405. ep = ((dma_addr + dma_len) / qset->max_packet) * qset->max_packet;
  406. dma_len = ep - dma_addr;
  407. }
  408. dev_dbg(&whc->umc->dev, "adding %d\n", dma_len);
  409. std->len += dma_len;
  410. std->ntds_remaining = -1; /* filled in later */
  411. sp = dma_addr & ~(WHCI_PAGE_SIZE-1);
  412. ep = dma_addr + dma_len;
  413. num_pointers = DIV_ROUND_UP(ep - sp, WHCI_PAGE_SIZE);
  414. std->num_pointers += num_pointers;
  415. dev_dbg(&whc->umc->dev, "need %d more (%d total) page pointers\n",
  416. num_pointers, std->num_pointers);
  417. pl_len = std->num_pointers * sizeof(struct whc_page_list_entry);
  418. std->pl_virt = krealloc(std->pl_virt, pl_len, mem_flags);
  419. if (std->pl_virt == NULL) {
  420. return -ENOMEM;
  421. }
  422. for (;p < std->num_pointers; p++, entry++) {
  423. dev_dbg(&whc->umc->dev, "e[%d] %08x\n", p, dma_addr);
  424. std->pl_virt[p].buf_ptr = cpu_to_le64(dma_addr);
  425. dma_addr = (dma_addr + WHCI_PAGE_SIZE) & ~(WHCI_PAGE_SIZE-1);
  426. }
  427. prev_end = dma_addr = ep;
  428. dma_remaining -= dma_len;
  429. remaining -= dma_len;
  430. }
  431. }
  432. dev_dbg(&whc->umc->dev, "used %d tds\n", ntds);
  433. /* Now the number of stds is know, go back and fill in
  434. std->ntds_remaining. */
  435. list_for_each_entry(std, &qset->stds, list_node) {
  436. if (std->ntds_remaining == -1) {
  437. pl_len = std->num_pointers * sizeof(struct whc_page_list_entry);
  438. std->ntds_remaining = ntds--;
  439. std->dma_addr = dma_map_single(whc->wusbhc.dev, std->pl_virt,
  440. pl_len, DMA_TO_DEVICE);
  441. }
  442. }
  443. return 0;
  444. }
  445. /**
  446. * qset_add_urb_sg_linearize - add an urb with sg list, copying the data
  447. *
  448. * If the URB contains an sg list whose elements cannot be directly
  449. * mapped to qTDs then the data must be transferred via bounce
  450. * buffers.
  451. */
  452. static int qset_add_urb_sg_linearize(struct whc *whc, struct whc_qset *qset,
  453. struct urb *urb, gfp_t mem_flags)
  454. {
  455. bool is_out = usb_pipeout(urb->pipe);
  456. size_t max_std_len;
  457. size_t remaining;
  458. int ntds = 0;
  459. struct whc_std *std = NULL;
  460. void *bounce = NULL;
  461. struct scatterlist *sg;
  462. int i;
  463. /* limit maximum bounce buffer to 16 * 3.5 KiB ~= 28 k */
  464. max_std_len = qset->max_burst * qset->max_packet;
  465. remaining = urb->transfer_buffer_length;
  466. for_each_sg(urb->sg->sg, sg, urb->sg->nents, i) {
  467. size_t len;
  468. size_t sg_remaining;
  469. void *orig;
  470. if (remaining == 0) {
  471. break;
  472. }
  473. sg_remaining = min(remaining, sg->length);
  474. orig = sg_virt(sg);
  475. dev_dbg(&whc->umc->dev, "adding sg[%d] %d\n", i, sg_remaining);
  476. while (sg_remaining) {
  477. if (!std || std->len == max_std_len) {
  478. dev_dbg(&whc->umc->dev, "need new std\n");
  479. std = qset_new_std(whc, qset, urb, mem_flags);
  480. if (std == NULL)
  481. return -ENOMEM;
  482. std->bounce_buf = kmalloc(max_std_len, mem_flags);
  483. if (std->bounce_buf == NULL)
  484. return -ENOMEM;
  485. std->bounce_sg = sg;
  486. std->bounce_offset = orig - sg_virt(sg);
  487. bounce = std->bounce_buf;
  488. ntds++;
  489. }
  490. len = min(sg_remaining, max_std_len - std->len);
  491. dev_dbg(&whc->umc->dev, "added %d from sg[%d] @ offset %d\n",
  492. len, i, orig - sg_virt(sg));
  493. if (is_out)
  494. memcpy(bounce, orig, len);
  495. std->len += len;
  496. std->ntds_remaining = -1; /* filled in later */
  497. bounce += len;
  498. orig += len;
  499. sg_remaining -= len;
  500. remaining -= len;
  501. }
  502. }
  503. /*
  504. * For each of the new sTDs, map the bounce buffers, create
  505. * page lists (if necessary), and fill in std->ntds_remaining.
  506. */
  507. list_for_each_entry(std, &qset->stds, list_node) {
  508. if (std->ntds_remaining != -1)
  509. continue;
  510. std->dma_addr = dma_map_single(&whc->umc->dev, std->bounce_buf, std->len,
  511. is_out ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
  512. if (qset_fill_page_list(whc, std, mem_flags) < 0)
  513. return -ENOMEM;
  514. std->ntds_remaining = ntds--;
  515. }
  516. return 0;
  517. }
  518. /**
  519. * qset_add_urb - add an urb to the qset's queue.
  520. *
  521. * The URB is chopped into sTDs, one for each qTD that will required.
  522. * At least one qTD (and sTD) is required even if the transfer has no
  523. * data (e.g., for some control transfers).
  524. */
  525. int qset_add_urb(struct whc *whc, struct whc_qset *qset, struct urb *urb,
  526. gfp_t mem_flags)
  527. {
  528. struct whc_urb *wurb;
  529. int remaining = urb->transfer_buffer_length;
  530. u64 transfer_dma = urb->transfer_dma;
  531. int ntds_remaining;
  532. int ret;
  533. wurb = kzalloc(sizeof(struct whc_urb), mem_flags);
  534. if (wurb == NULL)
  535. goto err_no_mem;
  536. urb->hcpriv = wurb;
  537. wurb->qset = qset;
  538. wurb->urb = urb;
  539. INIT_WORK(&wurb->dequeue_work, urb_dequeue_work);
  540. if (urb->sg) {
  541. ret = qset_add_urb_sg(whc, qset, urb, mem_flags);
  542. if (ret == -EINVAL) {
  543. dev_dbg(&whc->umc->dev, "linearizing %d octet urb\n",
  544. urb->transfer_buffer_length);
  545. qset_free_stds(qset, urb);
  546. ret = qset_add_urb_sg_linearize(whc, qset, urb, mem_flags);
  547. }
  548. if (ret < 0)
  549. goto err_no_mem;
  550. return 0;
  551. }
  552. ntds_remaining = DIV_ROUND_UP(remaining, QTD_MAX_XFER_SIZE);
  553. if (ntds_remaining == 0)
  554. ntds_remaining = 1;
  555. while (ntds_remaining) {
  556. struct whc_std *std;
  557. size_t std_len;
  558. std_len = remaining;
  559. if (std_len > QTD_MAX_XFER_SIZE)
  560. std_len = QTD_MAX_XFER_SIZE;
  561. std = qset_new_std(whc, qset, urb, mem_flags);
  562. if (std == NULL)
  563. goto err_no_mem;
  564. std->dma_addr = transfer_dma;
  565. std->len = std_len;
  566. std->ntds_remaining = ntds_remaining;
  567. if (qset_fill_page_list(whc, std, mem_flags) < 0)
  568. goto err_no_mem;
  569. ntds_remaining--;
  570. remaining -= std_len;
  571. transfer_dma += std_len;
  572. }
  573. return 0;
  574. err_no_mem:
  575. qset_free_stds(qset, urb);
  576. return -ENOMEM;
  577. }
  578. /**
  579. * qset_remove_urb - remove an URB from the urb queue.
  580. *
  581. * The URB is returned to the USB subsystem.
  582. */
  583. void qset_remove_urb(struct whc *whc, struct whc_qset *qset,
  584. struct urb *urb, int status)
  585. {
  586. struct wusbhc *wusbhc = &whc->wusbhc;
  587. struct whc_urb *wurb = urb->hcpriv;
  588. usb_hcd_unlink_urb_from_ep(&wusbhc->usb_hcd, urb);
  589. /* Drop the lock as urb->complete() may enqueue another urb. */
  590. spin_unlock(&whc->lock);
  591. wusbhc_giveback_urb(wusbhc, urb, status);
  592. spin_lock(&whc->lock);
  593. kfree(wurb);
  594. }
  595. /**
  596. * get_urb_status_from_qtd - get the completed urb status from qTD status
  597. * @urb: completed urb
  598. * @status: qTD status
  599. */
  600. static int get_urb_status_from_qtd(struct urb *urb, u32 status)
  601. {
  602. if (status & QTD_STS_HALTED) {
  603. if (status & QTD_STS_DBE)
  604. return usb_pipein(urb->pipe) ? -ENOSR : -ECOMM;
  605. else if (status & QTD_STS_BABBLE)
  606. return -EOVERFLOW;
  607. else if (status & QTD_STS_RCE)
  608. return -ETIME;
  609. return -EPIPE;
  610. }
  611. if (usb_pipein(urb->pipe)
  612. && (urb->transfer_flags & URB_SHORT_NOT_OK)
  613. && urb->actual_length < urb->transfer_buffer_length)
  614. return -EREMOTEIO;
  615. return 0;
  616. }
  617. /**
  618. * process_inactive_qtd - process an inactive (but not halted) qTD.
  619. *
  620. * Update the urb with the transfer bytes from the qTD, if the urb is
  621. * completely transfered or (in the case of an IN only) the LPF is
  622. * set, then the transfer is complete and the urb should be returned
  623. * to the system.
  624. */
  625. void process_inactive_qtd(struct whc *whc, struct whc_qset *qset,
  626. struct whc_qtd *qtd)
  627. {
  628. struct whc_std *std = list_first_entry(&qset->stds, struct whc_std, list_node);
  629. struct urb *urb = std->urb;
  630. uint32_t status;
  631. bool complete;
  632. status = le32_to_cpu(qtd->status);
  633. urb->actual_length += std->len - QTD_STS_TO_LEN(status);
  634. if (usb_pipein(urb->pipe) && (status & QTD_STS_LAST_PKT))
  635. complete = true;
  636. else
  637. complete = whc_std_last(std);
  638. qset_remove_qtd(whc, qset);
  639. qset_free_std(whc, std);
  640. /*
  641. * Transfers for this URB are complete? Then return it to the
  642. * USB subsystem.
  643. */
  644. if (complete) {
  645. qset_remove_qtds(whc, qset, urb);
  646. qset_remove_urb(whc, qset, urb, get_urb_status_from_qtd(urb, status));
  647. /*
  648. * If iAlt isn't valid then the hardware didn't
  649. * advance iCur. Adjust the start and end pointers to
  650. * match iCur.
  651. */
  652. if (!(status & QTD_STS_IALT_VALID))
  653. qset->td_start = qset->td_end
  654. = QH_STATUS_TO_ICUR(le16_to_cpu(qset->qh.status));
  655. qset->pause_after_urb = NULL;
  656. }
  657. }
  658. /**
  659. * process_halted_qtd - process a qset with a halted qtd
  660. *
  661. * Remove all the qTDs for the failed URB and return the failed URB to
  662. * the USB subsystem. Then remove all other qTDs so the qset can be
  663. * removed.
  664. *
  665. * FIXME: this is the point where rate adaptation can be done. If a
  666. * transfer failed because it exceeded the maximum number of retries
  667. * then it could be reactivated with a slower rate without having to
  668. * remove the qset.
  669. */
  670. void process_halted_qtd(struct whc *whc, struct whc_qset *qset,
  671. struct whc_qtd *qtd)
  672. {
  673. struct whc_std *std = list_first_entry(&qset->stds, struct whc_std, list_node);
  674. struct urb *urb = std->urb;
  675. int urb_status;
  676. urb_status = get_urb_status_from_qtd(urb, le32_to_cpu(qtd->status));
  677. qset_remove_qtds(whc, qset, urb);
  678. qset_remove_urb(whc, qset, urb, urb_status);
  679. list_for_each_entry(std, &qset->stds, list_node) {
  680. if (qset->ntds == 0)
  681. break;
  682. qset_remove_qtd(whc, qset);
  683. std->qtd = NULL;
  684. }
  685. qset->remove = 1;
  686. }
  687. void qset_free(struct whc *whc, struct whc_qset *qset)
  688. {
  689. dma_pool_free(whc->qset_pool, qset, qset->qset_dma);
  690. }
  691. /**
  692. * qset_delete - wait for a qset to be unused, then free it.
  693. */
  694. void qset_delete(struct whc *whc, struct whc_qset *qset)
  695. {
  696. wait_for_completion(&qset->remove_complete);
  697. qset_free(whc, qset);
  698. }