qset.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567
  1. /*
  2. * Wireless Host Controller (WHC) qset management.
  3. *
  4. * Copyright (C) 2007 Cambridge Silicon Radio Ltd.
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License version
  8. * 2 as published by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  17. */
  18. #include <linux/kernel.h>
  19. #include <linux/dma-mapping.h>
  20. #include <linux/uwb/umc.h>
  21. #include <linux/usb.h>
  22. #include "../../wusbcore/wusbhc.h"
  23. #include "whcd.h"
  24. void dump_qset(struct whc_qset *qset, struct device *dev)
  25. {
  26. struct whc_std *std;
  27. struct urb *urb = NULL;
  28. int i;
  29. dev_dbg(dev, "qset %08x\n", (u32)qset->qset_dma);
  30. dev_dbg(dev, " -> %08x\n", (u32)qset->qh.link);
  31. dev_dbg(dev, " info: %08x %08x %08x\n",
  32. qset->qh.info1, qset->qh.info2, qset->qh.info3);
  33. dev_dbg(dev, " sts: %04x errs: %d\n", qset->qh.status, qset->qh.err_count);
  34. dev_dbg(dev, " TD: sts: %08x opts: %08x\n",
  35. qset->qh.overlay.qtd.status, qset->qh.overlay.qtd.options);
  36. for (i = 0; i < WHCI_QSET_TD_MAX; i++) {
  37. dev_dbg(dev, " %c%c TD[%d]: sts: %08x opts: %08x ptr: %08x\n",
  38. i == qset->td_start ? 'S' : ' ',
  39. i == qset->td_end ? 'E' : ' ',
  40. i, qset->qtd[i].status, qset->qtd[i].options,
  41. (u32)qset->qtd[i].page_list_ptr);
  42. }
  43. dev_dbg(dev, " ntds: %d\n", qset->ntds);
  44. list_for_each_entry(std, &qset->stds, list_node) {
  45. if (urb != std->urb) {
  46. urb = std->urb;
  47. dev_dbg(dev, " urb %p transferred: %d bytes\n", urb,
  48. urb->actual_length);
  49. }
  50. if (std->qtd)
  51. dev_dbg(dev, " sTD[%td]: %zu bytes @ %08x\n",
  52. std->qtd - &qset->qtd[0],
  53. std->len, std->num_pointers ?
  54. (u32)(std->pl_virt[0].buf_ptr) : (u32)std->dma_addr);
  55. else
  56. dev_dbg(dev, " sTD[-]: %zd bytes @ %08x\n",
  57. std->len, std->num_pointers ?
  58. (u32)(std->pl_virt[0].buf_ptr) : (u32)std->dma_addr);
  59. }
  60. }
  61. struct whc_qset *qset_alloc(struct whc *whc, gfp_t mem_flags)
  62. {
  63. struct whc_qset *qset;
  64. dma_addr_t dma;
  65. qset = dma_pool_alloc(whc->qset_pool, mem_flags, &dma);
  66. if (qset == NULL)
  67. return NULL;
  68. memset(qset, 0, sizeof(struct whc_qset));
  69. qset->qset_dma = dma;
  70. qset->whc = whc;
  71. INIT_LIST_HEAD(&qset->list_node);
  72. INIT_LIST_HEAD(&qset->stds);
  73. return qset;
  74. }
  75. /**
  76. * qset_fill_qh - fill the static endpoint state in a qset's QHead
  77. * @qset: the qset whose QH needs initializing with static endpoint
  78. * state
  79. * @urb: an urb for a transfer to this endpoint
  80. */
  81. static void qset_fill_qh(struct whc_qset *qset, struct urb *urb)
  82. {
  83. struct usb_device *usb_dev = urb->dev;
  84. struct usb_wireless_ep_comp_descriptor *epcd;
  85. bool is_out;
  86. is_out = usb_pipeout(urb->pipe);
  87. epcd = (struct usb_wireless_ep_comp_descriptor *)qset->ep->extra;
  88. if (epcd) {
  89. qset->max_seq = epcd->bMaxSequence;
  90. qset->max_burst = epcd->bMaxBurst;
  91. } else {
  92. qset->max_seq = 2;
  93. qset->max_burst = 1;
  94. }
  95. qset->qh.info1 = cpu_to_le32(
  96. QH_INFO1_EP(usb_pipeendpoint(urb->pipe))
  97. | (is_out ? QH_INFO1_DIR_OUT : QH_INFO1_DIR_IN)
  98. | usb_pipe_to_qh_type(urb->pipe)
  99. | QH_INFO1_DEV_INFO_IDX(wusb_port_no_to_idx(usb_dev->portnum))
  100. | QH_INFO1_MAX_PKT_LEN(usb_maxpacket(urb->dev, urb->pipe, is_out))
  101. );
  102. qset->qh.info2 = cpu_to_le32(
  103. QH_INFO2_BURST(qset->max_burst)
  104. | QH_INFO2_DBP(0)
  105. | QH_INFO2_MAX_COUNT(3)
  106. | QH_INFO2_MAX_RETRY(3)
  107. | QH_INFO2_MAX_SEQ(qset->max_seq - 1)
  108. );
  109. /* FIXME: where can we obtain these Tx parameters from? Why
  110. * doesn't the chip know what Tx power to use? It knows the Rx
  111. * strength and can presumably guess the Tx power required
  112. * from that? */
  113. qset->qh.info3 = cpu_to_le32(
  114. QH_INFO3_TX_RATE_53_3
  115. | QH_INFO3_TX_PWR(0) /* 0 == max power */
  116. );
  117. }
  118. /**
  119. * qset_clear - clear fields in a qset so it may be reinserted into a
  120. * schedule
  121. */
  122. void qset_clear(struct whc *whc, struct whc_qset *qset)
  123. {
  124. qset->td_start = qset->td_end = qset->ntds = 0;
  125. qset->remove = 0;
  126. qset->qh.link = cpu_to_le32(QH_LINK_NTDS(8) | QH_LINK_T);
  127. qset->qh.status = cpu_to_le16(QH_STATUS_ICUR(qset->td_start));
  128. qset->qh.err_count = 0;
  129. qset->qh.cur_window = cpu_to_le32((1 << qset->max_burst) - 1);
  130. qset->qh.scratch[0] = 0;
  131. qset->qh.scratch[1] = 0;
  132. qset->qh.scratch[2] = 0;
  133. memset(&qset->qh.overlay, 0, sizeof(qset->qh.overlay));
  134. init_completion(&qset->remove_complete);
  135. }
  136. /**
  137. * get_qset - get the qset for an async endpoint
  138. *
  139. * A new qset is created if one does not already exist.
  140. */
  141. struct whc_qset *get_qset(struct whc *whc, struct urb *urb,
  142. gfp_t mem_flags)
  143. {
  144. struct whc_qset *qset;
  145. qset = urb->ep->hcpriv;
  146. if (qset == NULL) {
  147. qset = qset_alloc(whc, mem_flags);
  148. if (qset == NULL)
  149. return NULL;
  150. qset->ep = urb->ep;
  151. urb->ep->hcpriv = qset;
  152. qset_fill_qh(qset, urb);
  153. }
  154. return qset;
  155. }
  156. void qset_remove_complete(struct whc *whc, struct whc_qset *qset)
  157. {
  158. list_del_init(&qset->list_node);
  159. complete(&qset->remove_complete);
  160. }
  161. /**
  162. * qset_add_qtds - add qTDs for an URB to a qset
  163. *
  164. * Returns true if the list (ASL/PZL) must be updated because (for a
  165. * WHCI 0.95 controller) an activated qTD was pointed to be iCur.
  166. */
  167. enum whc_update qset_add_qtds(struct whc *whc, struct whc_qset *qset)
  168. {
  169. struct whc_std *std;
  170. enum whc_update update = 0;
  171. list_for_each_entry(std, &qset->stds, list_node) {
  172. struct whc_qtd *qtd;
  173. uint32_t status;
  174. if (qset->ntds >= WHCI_QSET_TD_MAX
  175. || (qset->pause_after_urb && std->urb != qset->pause_after_urb))
  176. break;
  177. if (std->qtd)
  178. continue; /* already has a qTD */
  179. qtd = std->qtd = &qset->qtd[qset->td_end];
  180. /* Fill in setup bytes for control transfers. */
  181. if (usb_pipecontrol(std->urb->pipe))
  182. memcpy(qtd->setup, std->urb->setup_packet, 8);
  183. status = QTD_STS_ACTIVE | QTD_STS_LEN(std->len);
  184. if (whc_std_last(std) && usb_pipeout(std->urb->pipe))
  185. status |= QTD_STS_LAST_PKT;
  186. /*
  187. * For an IN transfer the iAlt field should be set so
  188. * the h/w will automatically advance to the next
  189. * transfer. However, if there are 8 or more TDs
  190. * remaining in this transfer then iAlt cannot be set
  191. * as it could point to somewhere in this transfer.
  192. */
  193. if (std->ntds_remaining < WHCI_QSET_TD_MAX) {
  194. int ialt;
  195. ialt = (qset->td_end + std->ntds_remaining) % WHCI_QSET_TD_MAX;
  196. status |= QTD_STS_IALT(ialt);
  197. } else if (usb_pipein(std->urb->pipe))
  198. qset->pause_after_urb = std->urb;
  199. if (std->num_pointers)
  200. qtd->options = cpu_to_le32(QTD_OPT_IOC);
  201. else
  202. qtd->options = cpu_to_le32(QTD_OPT_IOC | QTD_OPT_SMALL);
  203. qtd->page_list_ptr = cpu_to_le64(std->dma_addr);
  204. qtd->status = cpu_to_le32(status);
  205. if (QH_STATUS_TO_ICUR(qset->qh.status) == qset->td_end)
  206. update = WHC_UPDATE_UPDATED;
  207. if (++qset->td_end >= WHCI_QSET_TD_MAX)
  208. qset->td_end = 0;
  209. qset->ntds++;
  210. }
  211. return update;
  212. }
  213. /**
  214. * qset_remove_qtd - remove the first qTD from a qset.
  215. *
  216. * The qTD might be still active (if it's part of a IN URB that
  217. * resulted in a short read) so ensure it's deactivated.
  218. */
  219. static void qset_remove_qtd(struct whc *whc, struct whc_qset *qset)
  220. {
  221. qset->qtd[qset->td_start].status = 0;
  222. if (++qset->td_start >= WHCI_QSET_TD_MAX)
  223. qset->td_start = 0;
  224. qset->ntds--;
  225. }
  226. /**
  227. * qset_free_std - remove an sTD and free it.
  228. * @whc: the WHCI host controller
  229. * @std: the sTD to remove and free.
  230. */
  231. void qset_free_std(struct whc *whc, struct whc_std *std)
  232. {
  233. list_del(&std->list_node);
  234. if (std->num_pointers) {
  235. dma_unmap_single(whc->wusbhc.dev, std->dma_addr,
  236. std->num_pointers * sizeof(struct whc_page_list_entry),
  237. DMA_TO_DEVICE);
  238. kfree(std->pl_virt);
  239. }
  240. kfree(std);
  241. }
  242. /**
  243. * qset_remove_qtds - remove an URB's qTDs (and sTDs).
  244. */
  245. static void qset_remove_qtds(struct whc *whc, struct whc_qset *qset,
  246. struct urb *urb)
  247. {
  248. struct whc_std *std, *t;
  249. list_for_each_entry_safe(std, t, &qset->stds, list_node) {
  250. if (std->urb != urb)
  251. break;
  252. if (std->qtd != NULL)
  253. qset_remove_qtd(whc, qset);
  254. qset_free_std(whc, std);
  255. }
  256. }
  257. /**
  258. * qset_free_stds - free any remaining sTDs for an URB.
  259. */
  260. static void qset_free_stds(struct whc_qset *qset, struct urb *urb)
  261. {
  262. struct whc_std *std, *t;
  263. list_for_each_entry_safe(std, t, &qset->stds, list_node) {
  264. if (std->urb == urb)
  265. qset_free_std(qset->whc, std);
  266. }
  267. }
  268. static int qset_fill_page_list(struct whc *whc, struct whc_std *std, gfp_t mem_flags)
  269. {
  270. dma_addr_t dma_addr = std->dma_addr;
  271. dma_addr_t sp, ep;
  272. size_t std_len = std->len;
  273. size_t pl_len;
  274. int p;
  275. sp = ALIGN(dma_addr, WHCI_PAGE_SIZE);
  276. ep = dma_addr + std_len;
  277. std->num_pointers = DIV_ROUND_UP(ep - sp, WHCI_PAGE_SIZE);
  278. pl_len = std->num_pointers * sizeof(struct whc_page_list_entry);
  279. std->pl_virt = kmalloc(pl_len, mem_flags);
  280. if (std->pl_virt == NULL)
  281. return -ENOMEM;
  282. std->dma_addr = dma_map_single(whc->wusbhc.dev, std->pl_virt, pl_len, DMA_TO_DEVICE);
  283. for (p = 0; p < std->num_pointers; p++) {
  284. std->pl_virt[p].buf_ptr = cpu_to_le64(dma_addr);
  285. dma_addr = ALIGN(dma_addr + WHCI_PAGE_SIZE, WHCI_PAGE_SIZE);
  286. }
  287. return 0;
  288. }
  289. /**
  290. * urb_dequeue_work - executes asl/pzl update and gives back the urb to the system.
  291. */
  292. static void urb_dequeue_work(struct work_struct *work)
  293. {
  294. struct whc_urb *wurb = container_of(work, struct whc_urb, dequeue_work);
  295. struct whc_qset *qset = wurb->qset;
  296. struct whc *whc = qset->whc;
  297. unsigned long flags;
  298. if (wurb->is_async == true)
  299. asl_update(whc, WUSBCMD_ASYNC_UPDATED
  300. | WUSBCMD_ASYNC_SYNCED_DB
  301. | WUSBCMD_ASYNC_QSET_RM);
  302. else
  303. pzl_update(whc, WUSBCMD_PERIODIC_UPDATED
  304. | WUSBCMD_PERIODIC_SYNCED_DB
  305. | WUSBCMD_PERIODIC_QSET_RM);
  306. spin_lock_irqsave(&whc->lock, flags);
  307. qset_remove_urb(whc, qset, wurb->urb, wurb->status);
  308. spin_unlock_irqrestore(&whc->lock, flags);
  309. }
  310. /**
  311. * qset_add_urb - add an urb to the qset's queue.
  312. *
  313. * The URB is chopped into sTDs, one for each qTD that will required.
  314. * At least one qTD (and sTD) is required even if the transfer has no
  315. * data (e.g., for some control transfers).
  316. */
  317. int qset_add_urb(struct whc *whc, struct whc_qset *qset, struct urb *urb,
  318. gfp_t mem_flags)
  319. {
  320. struct whc_urb *wurb;
  321. int remaining = urb->transfer_buffer_length;
  322. u64 transfer_dma = urb->transfer_dma;
  323. int ntds_remaining;
  324. ntds_remaining = DIV_ROUND_UP(remaining, QTD_MAX_XFER_SIZE);
  325. if (ntds_remaining == 0)
  326. ntds_remaining = 1;
  327. wurb = kzalloc(sizeof(struct whc_urb), mem_flags);
  328. if (wurb == NULL)
  329. goto err_no_mem;
  330. urb->hcpriv = wurb;
  331. wurb->qset = qset;
  332. wurb->urb = urb;
  333. INIT_WORK(&wurb->dequeue_work, urb_dequeue_work);
  334. while (ntds_remaining) {
  335. struct whc_std *std;
  336. size_t std_len;
  337. std = kmalloc(sizeof(struct whc_std), mem_flags);
  338. if (std == NULL)
  339. goto err_no_mem;
  340. std_len = remaining;
  341. if (std_len > QTD_MAX_XFER_SIZE)
  342. std_len = QTD_MAX_XFER_SIZE;
  343. std->urb = urb;
  344. std->dma_addr = transfer_dma;
  345. std->len = std_len;
  346. std->ntds_remaining = ntds_remaining;
  347. std->qtd = NULL;
  348. INIT_LIST_HEAD(&std->list_node);
  349. list_add_tail(&std->list_node, &qset->stds);
  350. if (std_len > WHCI_PAGE_SIZE) {
  351. if (qset_fill_page_list(whc, std, mem_flags) < 0)
  352. goto err_no_mem;
  353. } else
  354. std->num_pointers = 0;
  355. ntds_remaining--;
  356. remaining -= std_len;
  357. transfer_dma += std_len;
  358. }
  359. return 0;
  360. err_no_mem:
  361. qset_free_stds(qset, urb);
  362. return -ENOMEM;
  363. }
  364. /**
  365. * qset_remove_urb - remove an URB from the urb queue.
  366. *
  367. * The URB is returned to the USB subsystem.
  368. */
  369. void qset_remove_urb(struct whc *whc, struct whc_qset *qset,
  370. struct urb *urb, int status)
  371. {
  372. struct wusbhc *wusbhc = &whc->wusbhc;
  373. struct whc_urb *wurb = urb->hcpriv;
  374. usb_hcd_unlink_urb_from_ep(&wusbhc->usb_hcd, urb);
  375. /* Drop the lock as urb->complete() may enqueue another urb. */
  376. spin_unlock(&whc->lock);
  377. wusbhc_giveback_urb(wusbhc, urb, status);
  378. spin_lock(&whc->lock);
  379. kfree(wurb);
  380. }
  381. /**
  382. * get_urb_status_from_qtd - get the completed urb status from qTD status
  383. * @urb: completed urb
  384. * @status: qTD status
  385. */
  386. static int get_urb_status_from_qtd(struct urb *urb, u32 status)
  387. {
  388. if (status & QTD_STS_HALTED) {
  389. if (status & QTD_STS_DBE)
  390. return usb_pipein(urb->pipe) ? -ENOSR : -ECOMM;
  391. else if (status & QTD_STS_BABBLE)
  392. return -EOVERFLOW;
  393. else if (status & QTD_STS_RCE)
  394. return -ETIME;
  395. return -EPIPE;
  396. }
  397. if (usb_pipein(urb->pipe)
  398. && (urb->transfer_flags & URB_SHORT_NOT_OK)
  399. && urb->actual_length < urb->transfer_buffer_length)
  400. return -EREMOTEIO;
  401. return 0;
  402. }
  403. /**
  404. * process_inactive_qtd - process an inactive (but not halted) qTD.
  405. *
  406. * Update the urb with the transfer bytes from the qTD, if the urb is
  407. * completely transfered or (in the case of an IN only) the LPF is
  408. * set, then the transfer is complete and the urb should be returned
  409. * to the system.
  410. */
  411. void process_inactive_qtd(struct whc *whc, struct whc_qset *qset,
  412. struct whc_qtd *qtd)
  413. {
  414. struct whc_std *std = list_first_entry(&qset->stds, struct whc_std, list_node);
  415. struct urb *urb = std->urb;
  416. uint32_t status;
  417. bool complete;
  418. status = le32_to_cpu(qtd->status);
  419. urb->actual_length += std->len - QTD_STS_TO_LEN(status);
  420. if (usb_pipein(urb->pipe) && (status & QTD_STS_LAST_PKT))
  421. complete = true;
  422. else
  423. complete = whc_std_last(std);
  424. qset_remove_qtd(whc, qset);
  425. qset_free_std(whc, std);
  426. /*
  427. * Transfers for this URB are complete? Then return it to the
  428. * USB subsystem.
  429. */
  430. if (complete) {
  431. qset_remove_qtds(whc, qset, urb);
  432. qset_remove_urb(whc, qset, urb, get_urb_status_from_qtd(urb, status));
  433. /*
  434. * If iAlt isn't valid then the hardware didn't
  435. * advance iCur. Adjust the start and end pointers to
  436. * match iCur.
  437. */
  438. if (!(status & QTD_STS_IALT_VALID))
  439. qset->td_start = qset->td_end
  440. = QH_STATUS_TO_ICUR(le16_to_cpu(qset->qh.status));
  441. qset->pause_after_urb = NULL;
  442. }
  443. }
  444. /**
  445. * process_halted_qtd - process a qset with a halted qtd
  446. *
  447. * Remove all the qTDs for the failed URB and return the failed URB to
  448. * the USB subsystem. Then remove all other qTDs so the qset can be
  449. * removed.
  450. *
  451. * FIXME: this is the point where rate adaptation can be done. If a
  452. * transfer failed because it exceeded the maximum number of retries
  453. * then it could be reactivated with a slower rate without having to
  454. * remove the qset.
  455. */
  456. void process_halted_qtd(struct whc *whc, struct whc_qset *qset,
  457. struct whc_qtd *qtd)
  458. {
  459. struct whc_std *std = list_first_entry(&qset->stds, struct whc_std, list_node);
  460. struct urb *urb = std->urb;
  461. int urb_status;
  462. urb_status = get_urb_status_from_qtd(urb, le32_to_cpu(qtd->status));
  463. qset_remove_qtds(whc, qset, urb);
  464. qset_remove_urb(whc, qset, urb, urb_status);
  465. list_for_each_entry(std, &qset->stds, list_node) {
  466. if (qset->ntds == 0)
  467. break;
  468. qset_remove_qtd(whc, qset);
  469. std->qtd = NULL;
  470. }
  471. qset->remove = 1;
  472. }
  473. void qset_free(struct whc *whc, struct whc_qset *qset)
  474. {
  475. dma_pool_free(whc->qset_pool, qset, qset->qset_dma);
  476. }
  477. /**
  478. * qset_delete - wait for a qset to be unused, then free it.
  479. */
  480. void qset_delete(struct whc *whc, struct whc_qset *qset)
  481. {
  482. wait_for_completion(&qset->remove_complete);
  483. qset_free(whc, qset);
  484. }