uhci-q.c 37 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428
  1. /*
  2. * Universal Host Controller Interface driver for USB.
  3. *
  4. * Maintainer: Alan Stern <stern@rowland.harvard.edu>
  5. *
  6. * (C) Copyright 1999 Linus Torvalds
  7. * (C) Copyright 1999-2002 Johannes Erdfelt, johannes@erdfelt.com
  8. * (C) Copyright 1999 Randy Dunlap
  9. * (C) Copyright 1999 Georg Acher, acher@in.tum.de
  10. * (C) Copyright 1999 Deti Fliegl, deti@fliegl.de
  11. * (C) Copyright 1999 Thomas Sailer, sailer@ife.ee.ethz.ch
  12. * (C) Copyright 1999 Roman Weissgaerber, weissg@vienna.at
  13. * (C) Copyright 2000 Yggdrasil Computing, Inc. (port of new PCI interface
  14. * support from usb-ohci.c by Adam Richter, adam@yggdrasil.com).
  15. * (C) Copyright 1999 Gregory P. Smith (from usb-ohci.c)
  16. * (C) Copyright 2004-2006 Alan Stern, stern@rowland.harvard.edu
  17. */
  18. /*
  19. * Technically, updating td->status here is a race, but it's not really a
  20. * problem. The worst that can happen is that we set the IOC bit again
  21. * generating a spurious interrupt. We could fix this by creating another
  22. * QH and leaving the IOC bit always set, but then we would have to play
  23. * games with the FSBR code to make sure we get the correct order in all
  24. * the cases. I don't think it's worth the effort
  25. */
  26. static void uhci_set_next_interrupt(struct uhci_hcd *uhci)
  27. {
  28. if (uhci->is_stopped)
  29. mod_timer(&uhci_to_hcd(uhci)->rh_timer, jiffies);
  30. uhci->term_td->status |= cpu_to_le32(TD_CTRL_IOC);
  31. }
  32. static inline void uhci_clear_next_interrupt(struct uhci_hcd *uhci)
  33. {
  34. uhci->term_td->status &= ~cpu_to_le32(TD_CTRL_IOC);
  35. }
  36. /*
  37. * Full-Speed Bandwidth Reclamation (FSBR).
  38. * We turn on FSBR whenever a queue that wants it is advancing,
  39. * and leave it on for a short time thereafter.
  40. */
  41. static void uhci_fsbr_on(struct uhci_hcd *uhci)
  42. {
  43. uhci->fsbr_is_on = 1;
  44. uhci->skel_term_qh->link = cpu_to_le32(
  45. uhci->skel_fs_control_qh->dma_handle) | UHCI_PTR_QH;
  46. }
  47. static void uhci_fsbr_off(struct uhci_hcd *uhci)
  48. {
  49. uhci->fsbr_is_on = 0;
  50. uhci->skel_term_qh->link = UHCI_PTR_TERM;
  51. }
  52. static void uhci_add_fsbr(struct uhci_hcd *uhci, struct urb *urb)
  53. {
  54. struct urb_priv *urbp = urb->hcpriv;
  55. if (!(urb->transfer_flags & URB_NO_FSBR))
  56. urbp->fsbr = 1;
  57. }
  58. static void uhci_qh_wants_fsbr(struct uhci_hcd *uhci, struct uhci_qh *qh)
  59. {
  60. struct urb_priv *urbp =
  61. list_entry(qh->queue.next, struct urb_priv, node);
  62. if (urbp->fsbr) {
  63. uhci->fsbr_jiffies = jiffies;
  64. if (!uhci->fsbr_is_on)
  65. uhci_fsbr_on(uhci);
  66. }
  67. }
  68. static struct uhci_td *uhci_alloc_td(struct uhci_hcd *uhci)
  69. {
  70. dma_addr_t dma_handle;
  71. struct uhci_td *td;
  72. td = dma_pool_alloc(uhci->td_pool, GFP_ATOMIC, &dma_handle);
  73. if (!td)
  74. return NULL;
  75. td->dma_handle = dma_handle;
  76. td->frame = -1;
  77. INIT_LIST_HEAD(&td->list);
  78. INIT_LIST_HEAD(&td->fl_list);
  79. return td;
  80. }
  81. static void uhci_free_td(struct uhci_hcd *uhci, struct uhci_td *td)
  82. {
  83. if (!list_empty(&td->list))
  84. dev_warn(uhci_dev(uhci), "td %p still in list!\n", td);
  85. if (!list_empty(&td->fl_list))
  86. dev_warn(uhci_dev(uhci), "td %p still in fl_list!\n", td);
  87. dma_pool_free(uhci->td_pool, td, td->dma_handle);
  88. }
  89. static inline void uhci_fill_td(struct uhci_td *td, u32 status,
  90. u32 token, u32 buffer)
  91. {
  92. td->status = cpu_to_le32(status);
  93. td->token = cpu_to_le32(token);
  94. td->buffer = cpu_to_le32(buffer);
  95. }
  96. static void uhci_add_td_to_urbp(struct uhci_td *td, struct urb_priv *urbp)
  97. {
  98. list_add_tail(&td->list, &urbp->td_list);
  99. }
  100. static void uhci_remove_td_from_urbp(struct uhci_td *td)
  101. {
  102. list_del_init(&td->list);
  103. }
  104. /*
  105. * We insert Isochronous URBs directly into the frame list at the beginning
  106. */
  107. static inline void uhci_insert_td_in_frame_list(struct uhci_hcd *uhci,
  108. struct uhci_td *td, unsigned framenum)
  109. {
  110. framenum &= (UHCI_NUMFRAMES - 1);
  111. td->frame = framenum;
  112. /* Is there a TD already mapped there? */
  113. if (uhci->frame_cpu[framenum]) {
  114. struct uhci_td *ftd, *ltd;
  115. ftd = uhci->frame_cpu[framenum];
  116. ltd = list_entry(ftd->fl_list.prev, struct uhci_td, fl_list);
  117. list_add_tail(&td->fl_list, &ftd->fl_list);
  118. td->link = ltd->link;
  119. wmb();
  120. ltd->link = cpu_to_le32(td->dma_handle);
  121. } else {
  122. td->link = uhci->frame[framenum];
  123. wmb();
  124. uhci->frame[framenum] = cpu_to_le32(td->dma_handle);
  125. uhci->frame_cpu[framenum] = td;
  126. }
  127. }
  128. static inline void uhci_remove_td_from_frame_list(struct uhci_hcd *uhci,
  129. struct uhci_td *td)
  130. {
  131. /* If it's not inserted, don't remove it */
  132. if (td->frame == -1) {
  133. WARN_ON(!list_empty(&td->fl_list));
  134. return;
  135. }
  136. if (uhci->frame_cpu[td->frame] == td) {
  137. if (list_empty(&td->fl_list)) {
  138. uhci->frame[td->frame] = td->link;
  139. uhci->frame_cpu[td->frame] = NULL;
  140. } else {
  141. struct uhci_td *ntd;
  142. ntd = list_entry(td->fl_list.next, struct uhci_td, fl_list);
  143. uhci->frame[td->frame] = cpu_to_le32(ntd->dma_handle);
  144. uhci->frame_cpu[td->frame] = ntd;
  145. }
  146. } else {
  147. struct uhci_td *ptd;
  148. ptd = list_entry(td->fl_list.prev, struct uhci_td, fl_list);
  149. ptd->link = td->link;
  150. }
  151. list_del_init(&td->fl_list);
  152. td->frame = -1;
  153. }
  154. /*
  155. * Remove all the TDs for an Isochronous URB from the frame list
  156. */
  157. static void uhci_unlink_isochronous_tds(struct uhci_hcd *uhci, struct urb *urb)
  158. {
  159. struct urb_priv *urbp = (struct urb_priv *) urb->hcpriv;
  160. struct uhci_td *td;
  161. list_for_each_entry(td, &urbp->td_list, list)
  162. uhci_remove_td_from_frame_list(uhci, td);
  163. }
  164. static struct uhci_qh *uhci_alloc_qh(struct uhci_hcd *uhci,
  165. struct usb_device *udev, struct usb_host_endpoint *hep)
  166. {
  167. dma_addr_t dma_handle;
  168. struct uhci_qh *qh;
  169. qh = dma_pool_alloc(uhci->qh_pool, GFP_ATOMIC, &dma_handle);
  170. if (!qh)
  171. return NULL;
  172. memset(qh, 0, sizeof(*qh));
  173. qh->dma_handle = dma_handle;
  174. qh->element = UHCI_PTR_TERM;
  175. qh->link = UHCI_PTR_TERM;
  176. INIT_LIST_HEAD(&qh->queue);
  177. INIT_LIST_HEAD(&qh->node);
  178. if (udev) { /* Normal QH */
  179. qh->dummy_td = uhci_alloc_td(uhci);
  180. if (!qh->dummy_td) {
  181. dma_pool_free(uhci->qh_pool, qh, dma_handle);
  182. return NULL;
  183. }
  184. qh->state = QH_STATE_IDLE;
  185. qh->hep = hep;
  186. qh->udev = udev;
  187. hep->hcpriv = qh;
  188. qh->type = hep->desc.bmAttributes & USB_ENDPOINT_XFERTYPE_MASK;
  189. } else { /* Skeleton QH */
  190. qh->state = QH_STATE_ACTIVE;
  191. qh->type = -1;
  192. }
  193. return qh;
  194. }
  195. static void uhci_free_qh(struct uhci_hcd *uhci, struct uhci_qh *qh)
  196. {
  197. WARN_ON(qh->state != QH_STATE_IDLE && qh->udev);
  198. if (!list_empty(&qh->queue))
  199. dev_warn(uhci_dev(uhci), "qh %p list not empty!\n", qh);
  200. list_del(&qh->node);
  201. if (qh->udev) {
  202. qh->hep->hcpriv = NULL;
  203. uhci_free_td(uhci, qh->dummy_td);
  204. }
  205. dma_pool_free(uhci->qh_pool, qh, qh->dma_handle);
  206. }
  207. /*
  208. * When a queue is stopped and a dequeued URB is given back, adjust
  209. * the previous TD link (if the URB isn't first on the queue) or
  210. * save its toggle value (if it is first and is currently executing).
  211. *
  212. * Returns 0 if the URB should not yet be given back, 1 otherwise.
  213. */
  214. static int uhci_cleanup_queue(struct uhci_hcd *uhci, struct uhci_qh *qh,
  215. struct urb *urb)
  216. {
  217. struct urb_priv *urbp = urb->hcpriv;
  218. struct uhci_td *td;
  219. int ret = 1;
  220. /* Isochronous pipes don't use toggles and their TD link pointers
  221. * get adjusted during uhci_urb_dequeue(). But since their queues
  222. * cannot truly be stopped, we have to watch out for dequeues
  223. * occurring after the nominal unlink frame. */
  224. if (qh->type == USB_ENDPOINT_XFER_ISOC) {
  225. ret = (uhci->frame_number + uhci->is_stopped !=
  226. qh->unlink_frame);
  227. return ret;
  228. }
  229. /* If the URB isn't first on its queue, adjust the link pointer
  230. * of the last TD in the previous URB. The toggle doesn't need
  231. * to be saved since this URB can't be executing yet. */
  232. if (qh->queue.next != &urbp->node) {
  233. struct urb_priv *purbp;
  234. struct uhci_td *ptd;
  235. purbp = list_entry(urbp->node.prev, struct urb_priv, node);
  236. WARN_ON(list_empty(&purbp->td_list));
  237. ptd = list_entry(purbp->td_list.prev, struct uhci_td,
  238. list);
  239. td = list_entry(urbp->td_list.prev, struct uhci_td,
  240. list);
  241. ptd->link = td->link;
  242. return ret;
  243. }
  244. /* If the QH element pointer is UHCI_PTR_TERM then then currently
  245. * executing URB has already been unlinked, so this one isn't it. */
  246. if (qh_element(qh) == UHCI_PTR_TERM)
  247. return ret;
  248. qh->element = UHCI_PTR_TERM;
  249. /* Control pipes have to worry about toggles */
  250. if (qh->type == USB_ENDPOINT_XFER_CONTROL)
  251. return ret;
  252. /* Save the next toggle value */
  253. WARN_ON(list_empty(&urbp->td_list));
  254. td = list_entry(urbp->td_list.next, struct uhci_td, list);
  255. qh->needs_fixup = 1;
  256. qh->initial_toggle = uhci_toggle(td_token(td));
  257. return ret;
  258. }
  259. /*
  260. * Fix up the data toggles for URBs in a queue, when one of them
  261. * terminates early (short transfer, error, or dequeued).
  262. */
  263. static void uhci_fixup_toggles(struct uhci_qh *qh, int skip_first)
  264. {
  265. struct urb_priv *urbp = NULL;
  266. struct uhci_td *td;
  267. unsigned int toggle = qh->initial_toggle;
  268. unsigned int pipe;
  269. /* Fixups for a short transfer start with the second URB in the
  270. * queue (the short URB is the first). */
  271. if (skip_first)
  272. urbp = list_entry(qh->queue.next, struct urb_priv, node);
  273. /* When starting with the first URB, if the QH element pointer is
  274. * still valid then we know the URB's toggles are okay. */
  275. else if (qh_element(qh) != UHCI_PTR_TERM)
  276. toggle = 2;
  277. /* Fix up the toggle for the URBs in the queue. Normally this
  278. * loop won't run more than once: When an error or short transfer
  279. * occurs, the queue usually gets emptied. */
  280. urbp = list_prepare_entry(urbp, &qh->queue, node);
  281. list_for_each_entry_continue(urbp, &qh->queue, node) {
  282. /* If the first TD has the right toggle value, we don't
  283. * need to change any toggles in this URB */
  284. td = list_entry(urbp->td_list.next, struct uhci_td, list);
  285. if (toggle > 1 || uhci_toggle(td_token(td)) == toggle) {
  286. td = list_entry(urbp->td_list.next, struct uhci_td,
  287. list);
  288. toggle = uhci_toggle(td_token(td)) ^ 1;
  289. /* Otherwise all the toggles in the URB have to be switched */
  290. } else {
  291. list_for_each_entry(td, &urbp->td_list, list) {
  292. td->token ^= __constant_cpu_to_le32(
  293. TD_TOKEN_TOGGLE);
  294. toggle ^= 1;
  295. }
  296. }
  297. }
  298. wmb();
  299. pipe = list_entry(qh->queue.next, struct urb_priv, node)->urb->pipe;
  300. usb_settoggle(qh->udev, usb_pipeendpoint(pipe),
  301. usb_pipeout(pipe), toggle);
  302. qh->needs_fixup = 0;
  303. }
  304. /*
  305. * Put a QH on the schedule in both hardware and software
  306. */
  307. static void uhci_activate_qh(struct uhci_hcd *uhci, struct uhci_qh *qh)
  308. {
  309. struct uhci_qh *pqh;
  310. WARN_ON(list_empty(&qh->queue));
  311. /* Set the element pointer if it isn't set already.
  312. * This isn't needed for Isochronous queues, but it doesn't hurt. */
  313. if (qh_element(qh) == UHCI_PTR_TERM) {
  314. struct urb_priv *urbp = list_entry(qh->queue.next,
  315. struct urb_priv, node);
  316. struct uhci_td *td = list_entry(urbp->td_list.next,
  317. struct uhci_td, list);
  318. qh->element = cpu_to_le32(td->dma_handle);
  319. }
  320. /* Treat the queue as if it has just advanced */
  321. qh->wait_expired = 0;
  322. qh->advance_jiffies = jiffies;
  323. if (qh->state == QH_STATE_ACTIVE)
  324. return;
  325. qh->state = QH_STATE_ACTIVE;
  326. /* Move the QH from its old list to the end of the appropriate
  327. * skeleton's list */
  328. if (qh == uhci->next_qh)
  329. uhci->next_qh = list_entry(qh->node.next, struct uhci_qh,
  330. node);
  331. list_move_tail(&qh->node, &qh->skel->node);
  332. /* Link it into the schedule */
  333. pqh = list_entry(qh->node.prev, struct uhci_qh, node);
  334. qh->link = pqh->link;
  335. wmb();
  336. pqh->link = UHCI_PTR_QH | cpu_to_le32(qh->dma_handle);
  337. }
  338. /*
  339. * Take a QH off the hardware schedule
  340. */
  341. static void uhci_unlink_qh(struct uhci_hcd *uhci, struct uhci_qh *qh)
  342. {
  343. struct uhci_qh *pqh;
  344. if (qh->state == QH_STATE_UNLINKING)
  345. return;
  346. WARN_ON(qh->state != QH_STATE_ACTIVE || !qh->udev);
  347. qh->state = QH_STATE_UNLINKING;
  348. /* Unlink the QH from the schedule and record when we did it */
  349. pqh = list_entry(qh->node.prev, struct uhci_qh, node);
  350. pqh->link = qh->link;
  351. mb();
  352. uhci_get_current_frame_number(uhci);
  353. qh->unlink_frame = uhci->frame_number;
  354. /* Force an interrupt so we know when the QH is fully unlinked */
  355. if (list_empty(&uhci->skel_unlink_qh->node))
  356. uhci_set_next_interrupt(uhci);
  357. /* Move the QH from its old list to the end of the unlinking list */
  358. if (qh == uhci->next_qh)
  359. uhci->next_qh = list_entry(qh->node.next, struct uhci_qh,
  360. node);
  361. list_move_tail(&qh->node, &uhci->skel_unlink_qh->node);
  362. }
  363. /*
  364. * When we and the controller are through with a QH, it becomes IDLE.
  365. * This happens when a QH has been off the schedule (on the unlinking
  366. * list) for more than one frame, or when an error occurs while adding
  367. * the first URB onto a new QH.
  368. */
  369. static void uhci_make_qh_idle(struct uhci_hcd *uhci, struct uhci_qh *qh)
  370. {
  371. WARN_ON(qh->state == QH_STATE_ACTIVE);
  372. if (qh == uhci->next_qh)
  373. uhci->next_qh = list_entry(qh->node.next, struct uhci_qh,
  374. node);
  375. list_move(&qh->node, &uhci->idle_qh_list);
  376. qh->state = QH_STATE_IDLE;
  377. /* Now that the QH is idle, its post_td isn't being used */
  378. if (qh->post_td) {
  379. uhci_free_td(uhci, qh->post_td);
  380. qh->post_td = NULL;
  381. }
  382. /* If anyone is waiting for a QH to become idle, wake them up */
  383. if (uhci->num_waiting)
  384. wake_up_all(&uhci->waitqh);
  385. }
  386. static inline struct urb_priv *uhci_alloc_urb_priv(struct uhci_hcd *uhci,
  387. struct urb *urb)
  388. {
  389. struct urb_priv *urbp;
  390. urbp = kmem_cache_alloc(uhci_up_cachep, SLAB_ATOMIC);
  391. if (!urbp)
  392. return NULL;
  393. memset((void *)urbp, 0, sizeof(*urbp));
  394. urbp->urb = urb;
  395. urb->hcpriv = urbp;
  396. INIT_LIST_HEAD(&urbp->node);
  397. INIT_LIST_HEAD(&urbp->td_list);
  398. return urbp;
  399. }
  400. static void uhci_free_urb_priv(struct uhci_hcd *uhci,
  401. struct urb_priv *urbp)
  402. {
  403. struct uhci_td *td, *tmp;
  404. if (!list_empty(&urbp->node))
  405. dev_warn(uhci_dev(uhci), "urb %p still on QH's list!\n",
  406. urbp->urb);
  407. list_for_each_entry_safe(td, tmp, &urbp->td_list, list) {
  408. uhci_remove_td_from_urbp(td);
  409. uhci_free_td(uhci, td);
  410. }
  411. urbp->urb->hcpriv = NULL;
  412. kmem_cache_free(uhci_up_cachep, urbp);
  413. }
  414. /*
  415. * Map status to standard result codes
  416. *
  417. * <status> is (td_status(td) & 0xF60000), a.k.a.
  418. * uhci_status_bits(td_status(td)).
  419. * Note: <status> does not include the TD_CTRL_NAK bit.
  420. * <dir_out> is True for output TDs and False for input TDs.
  421. */
  422. static int uhci_map_status(int status, int dir_out)
  423. {
  424. if (!status)
  425. return 0;
  426. if (status & TD_CTRL_BITSTUFF) /* Bitstuff error */
  427. return -EPROTO;
  428. if (status & TD_CTRL_CRCTIMEO) { /* CRC/Timeout */
  429. if (dir_out)
  430. return -EPROTO;
  431. else
  432. return -EILSEQ;
  433. }
  434. if (status & TD_CTRL_BABBLE) /* Babble */
  435. return -EOVERFLOW;
  436. if (status & TD_CTRL_DBUFERR) /* Buffer error */
  437. return -ENOSR;
  438. if (status & TD_CTRL_STALLED) /* Stalled */
  439. return -EPIPE;
  440. WARN_ON(status & TD_CTRL_ACTIVE); /* Active */
  441. return 0;
  442. }
  443. /*
  444. * Control transfers
  445. */
  446. static int uhci_submit_control(struct uhci_hcd *uhci, struct urb *urb,
  447. struct uhci_qh *qh)
  448. {
  449. struct uhci_td *td;
  450. unsigned long destination, status;
  451. int maxsze = le16_to_cpu(qh->hep->desc.wMaxPacketSize);
  452. int len = urb->transfer_buffer_length;
  453. dma_addr_t data = urb->transfer_dma;
  454. __le32 *plink;
  455. struct urb_priv *urbp = urb->hcpriv;
  456. /* The "pipe" thing contains the destination in bits 8--18 */
  457. destination = (urb->pipe & PIPE_DEVEP_MASK) | USB_PID_SETUP;
  458. /* 3 errors, dummy TD remains inactive */
  459. status = uhci_maxerr(3);
  460. if (urb->dev->speed == USB_SPEED_LOW)
  461. status |= TD_CTRL_LS;
  462. /*
  463. * Build the TD for the control request setup packet
  464. */
  465. td = qh->dummy_td;
  466. uhci_add_td_to_urbp(td, urbp);
  467. uhci_fill_td(td, status, destination | uhci_explen(8),
  468. urb->setup_dma);
  469. plink = &td->link;
  470. status |= TD_CTRL_ACTIVE;
  471. /*
  472. * If direction is "send", change the packet ID from SETUP (0x2D)
  473. * to OUT (0xE1). Else change it from SETUP to IN (0x69) and
  474. * set Short Packet Detect (SPD) for all data packets.
  475. */
  476. if (usb_pipeout(urb->pipe))
  477. destination ^= (USB_PID_SETUP ^ USB_PID_OUT);
  478. else {
  479. destination ^= (USB_PID_SETUP ^ USB_PID_IN);
  480. status |= TD_CTRL_SPD;
  481. }
  482. /*
  483. * Build the DATA TDs
  484. */
  485. while (len > 0) {
  486. int pktsze = min(len, maxsze);
  487. td = uhci_alloc_td(uhci);
  488. if (!td)
  489. goto nomem;
  490. *plink = cpu_to_le32(td->dma_handle);
  491. /* Alternate Data0/1 (start with Data1) */
  492. destination ^= TD_TOKEN_TOGGLE;
  493. uhci_add_td_to_urbp(td, urbp);
  494. uhci_fill_td(td, status, destination | uhci_explen(pktsze),
  495. data);
  496. plink = &td->link;
  497. data += pktsze;
  498. len -= pktsze;
  499. }
  500. /*
  501. * Build the final TD for control status
  502. */
  503. td = uhci_alloc_td(uhci);
  504. if (!td)
  505. goto nomem;
  506. *plink = cpu_to_le32(td->dma_handle);
  507. /*
  508. * It's IN if the pipe is an output pipe or we're not expecting
  509. * data back.
  510. */
  511. destination &= ~TD_TOKEN_PID_MASK;
  512. if (usb_pipeout(urb->pipe) || !urb->transfer_buffer_length)
  513. destination |= USB_PID_IN;
  514. else
  515. destination |= USB_PID_OUT;
  516. destination |= TD_TOKEN_TOGGLE; /* End in Data1 */
  517. status &= ~TD_CTRL_SPD;
  518. uhci_add_td_to_urbp(td, urbp);
  519. uhci_fill_td(td, status | TD_CTRL_IOC,
  520. destination | uhci_explen(0), 0);
  521. plink = &td->link;
  522. /*
  523. * Build the new dummy TD and activate the old one
  524. */
  525. td = uhci_alloc_td(uhci);
  526. if (!td)
  527. goto nomem;
  528. *plink = cpu_to_le32(td->dma_handle);
  529. uhci_fill_td(td, 0, USB_PID_OUT | uhci_explen(0), 0);
  530. wmb();
  531. qh->dummy_td->status |= __constant_cpu_to_le32(TD_CTRL_ACTIVE);
  532. qh->dummy_td = td;
  533. /* Low-speed transfers get a different queue, and won't hog the bus.
  534. * Also, some devices enumerate better without FSBR; the easiest way
  535. * to do that is to put URBs on the low-speed queue while the device
  536. * isn't in the CONFIGURED state. */
  537. if (urb->dev->speed == USB_SPEED_LOW ||
  538. urb->dev->state != USB_STATE_CONFIGURED)
  539. qh->skel = uhci->skel_ls_control_qh;
  540. else {
  541. qh->skel = uhci->skel_fs_control_qh;
  542. uhci_add_fsbr(uhci, urb);
  543. }
  544. urb->actual_length = -8; /* Account for the SETUP packet */
  545. return 0;
  546. nomem:
  547. /* Remove the dummy TD from the td_list so it doesn't get freed */
  548. uhci_remove_td_from_urbp(qh->dummy_td);
  549. return -ENOMEM;
  550. }
  551. /*
  552. * Common submit for bulk and interrupt
  553. */
  554. static int uhci_submit_common(struct uhci_hcd *uhci, struct urb *urb,
  555. struct uhci_qh *qh)
  556. {
  557. struct uhci_td *td;
  558. unsigned long destination, status;
  559. int maxsze = le16_to_cpu(qh->hep->desc.wMaxPacketSize);
  560. int len = urb->transfer_buffer_length;
  561. dma_addr_t data = urb->transfer_dma;
  562. __le32 *plink;
  563. struct urb_priv *urbp = urb->hcpriv;
  564. unsigned int toggle;
  565. if (len < 0)
  566. return -EINVAL;
  567. /* The "pipe" thing contains the destination in bits 8--18 */
  568. destination = (urb->pipe & PIPE_DEVEP_MASK) | usb_packetid(urb->pipe);
  569. toggle = usb_gettoggle(urb->dev, usb_pipeendpoint(urb->pipe),
  570. usb_pipeout(urb->pipe));
  571. /* 3 errors, dummy TD remains inactive */
  572. status = uhci_maxerr(3);
  573. if (urb->dev->speed == USB_SPEED_LOW)
  574. status |= TD_CTRL_LS;
  575. if (usb_pipein(urb->pipe))
  576. status |= TD_CTRL_SPD;
  577. /*
  578. * Build the DATA TDs
  579. */
  580. plink = NULL;
  581. td = qh->dummy_td;
  582. do { /* Allow zero length packets */
  583. int pktsze = maxsze;
  584. if (len <= pktsze) { /* The last packet */
  585. pktsze = len;
  586. if (!(urb->transfer_flags & URB_SHORT_NOT_OK))
  587. status &= ~TD_CTRL_SPD;
  588. }
  589. if (plink) {
  590. td = uhci_alloc_td(uhci);
  591. if (!td)
  592. goto nomem;
  593. *plink = cpu_to_le32(td->dma_handle);
  594. }
  595. uhci_add_td_to_urbp(td, urbp);
  596. uhci_fill_td(td, status,
  597. destination | uhci_explen(pktsze) |
  598. (toggle << TD_TOKEN_TOGGLE_SHIFT),
  599. data);
  600. plink = &td->link;
  601. status |= TD_CTRL_ACTIVE;
  602. data += pktsze;
  603. len -= maxsze;
  604. toggle ^= 1;
  605. } while (len > 0);
  606. /*
  607. * URB_ZERO_PACKET means adding a 0-length packet, if direction
  608. * is OUT and the transfer_length was an exact multiple of maxsze,
  609. * hence (len = transfer_length - N * maxsze) == 0
  610. * however, if transfer_length == 0, the zero packet was already
  611. * prepared above.
  612. */
  613. if ((urb->transfer_flags & URB_ZERO_PACKET) &&
  614. usb_pipeout(urb->pipe) && len == 0 &&
  615. urb->transfer_buffer_length > 0) {
  616. td = uhci_alloc_td(uhci);
  617. if (!td)
  618. goto nomem;
  619. *plink = cpu_to_le32(td->dma_handle);
  620. uhci_add_td_to_urbp(td, urbp);
  621. uhci_fill_td(td, status,
  622. destination | uhci_explen(0) |
  623. (toggle << TD_TOKEN_TOGGLE_SHIFT),
  624. data);
  625. plink = &td->link;
  626. toggle ^= 1;
  627. }
  628. /* Set the interrupt-on-completion flag on the last packet.
  629. * A more-or-less typical 4 KB URB (= size of one memory page)
  630. * will require about 3 ms to transfer; that's a little on the
  631. * fast side but not enough to justify delaying an interrupt
  632. * more than 2 or 3 URBs, so we will ignore the URB_NO_INTERRUPT
  633. * flag setting. */
  634. td->status |= __constant_cpu_to_le32(TD_CTRL_IOC);
  635. /*
  636. * Build the new dummy TD and activate the old one
  637. */
  638. td = uhci_alloc_td(uhci);
  639. if (!td)
  640. goto nomem;
  641. *plink = cpu_to_le32(td->dma_handle);
  642. uhci_fill_td(td, 0, USB_PID_OUT | uhci_explen(0), 0);
  643. wmb();
  644. qh->dummy_td->status |= __constant_cpu_to_le32(TD_CTRL_ACTIVE);
  645. qh->dummy_td = td;
  646. usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe),
  647. usb_pipeout(urb->pipe), toggle);
  648. return 0;
  649. nomem:
  650. /* Remove the dummy TD from the td_list so it doesn't get freed */
  651. uhci_remove_td_from_urbp(qh->dummy_td);
  652. return -ENOMEM;
  653. }
  654. static inline int uhci_submit_bulk(struct uhci_hcd *uhci, struct urb *urb,
  655. struct uhci_qh *qh)
  656. {
  657. int ret;
  658. /* Can't have low-speed bulk transfers */
  659. if (urb->dev->speed == USB_SPEED_LOW)
  660. return -EINVAL;
  661. qh->skel = uhci->skel_bulk_qh;
  662. ret = uhci_submit_common(uhci, urb, qh);
  663. if (ret == 0)
  664. uhci_add_fsbr(uhci, urb);
  665. return ret;
  666. }
  667. static inline int uhci_submit_interrupt(struct uhci_hcd *uhci, struct urb *urb,
  668. struct uhci_qh *qh)
  669. {
  670. /* USB 1.1 interrupt transfers only involve one packet per interval.
  671. * Drivers can submit URBs of any length, but longer ones will need
  672. * multiple intervals to complete.
  673. */
  674. qh->skel = uhci->skelqh[__interval_to_skel(urb->interval)];
  675. return uhci_submit_common(uhci, urb, qh);
  676. }
  677. /*
  678. * Fix up the data structures following a short transfer
  679. */
  680. static int uhci_fixup_short_transfer(struct uhci_hcd *uhci,
  681. struct uhci_qh *qh, struct urb_priv *urbp)
  682. {
  683. struct uhci_td *td;
  684. struct list_head *tmp;
  685. int ret;
  686. td = list_entry(urbp->td_list.prev, struct uhci_td, list);
  687. if (qh->type == USB_ENDPOINT_XFER_CONTROL) {
  688. /* When a control transfer is short, we have to restart
  689. * the queue at the status stage transaction, which is
  690. * the last TD. */
  691. WARN_ON(list_empty(&urbp->td_list));
  692. qh->element = cpu_to_le32(td->dma_handle);
  693. tmp = td->list.prev;
  694. ret = -EINPROGRESS;
  695. } else {
  696. /* When a bulk/interrupt transfer is short, we have to
  697. * fix up the toggles of the following URBs on the queue
  698. * before restarting the queue at the next URB. */
  699. qh->initial_toggle = uhci_toggle(td_token(qh->post_td)) ^ 1;
  700. uhci_fixup_toggles(qh, 1);
  701. if (list_empty(&urbp->td_list))
  702. td = qh->post_td;
  703. qh->element = td->link;
  704. tmp = urbp->td_list.prev;
  705. ret = 0;
  706. }
  707. /* Remove all the TDs we skipped over, from tmp back to the start */
  708. while (tmp != &urbp->td_list) {
  709. td = list_entry(tmp, struct uhci_td, list);
  710. tmp = tmp->prev;
  711. uhci_remove_td_from_urbp(td);
  712. uhci_free_td(uhci, td);
  713. }
  714. return ret;
  715. }
  716. /*
  717. * Common result for control, bulk, and interrupt
  718. */
  719. static int uhci_result_common(struct uhci_hcd *uhci, struct urb *urb)
  720. {
  721. struct urb_priv *urbp = urb->hcpriv;
  722. struct uhci_qh *qh = urbp->qh;
  723. struct uhci_td *td, *tmp;
  724. unsigned status;
  725. int ret = 0;
  726. list_for_each_entry_safe(td, tmp, &urbp->td_list, list) {
  727. unsigned int ctrlstat;
  728. int len;
  729. ctrlstat = td_status(td);
  730. status = uhci_status_bits(ctrlstat);
  731. if (status & TD_CTRL_ACTIVE)
  732. return -EINPROGRESS;
  733. len = uhci_actual_length(ctrlstat);
  734. urb->actual_length += len;
  735. if (status) {
  736. ret = uhci_map_status(status,
  737. uhci_packetout(td_token(td)));
  738. if ((debug == 1 && ret != -EPIPE) || debug > 1) {
  739. /* Some debugging code */
  740. dev_dbg(uhci_dev(uhci),
  741. "%s: failed with status %x\n",
  742. __FUNCTION__, status);
  743. if (debug > 1 && errbuf) {
  744. /* Print the chain for debugging */
  745. uhci_show_qh(urbp->qh, errbuf,
  746. ERRBUF_LEN, 0);
  747. lprintk(errbuf);
  748. }
  749. }
  750. } else if (len < uhci_expected_length(td_token(td))) {
  751. /* We received a short packet */
  752. if (urb->transfer_flags & URB_SHORT_NOT_OK)
  753. ret = -EREMOTEIO;
  754. else if (ctrlstat & TD_CTRL_SPD)
  755. ret = 1;
  756. }
  757. uhci_remove_td_from_urbp(td);
  758. if (qh->post_td)
  759. uhci_free_td(uhci, qh->post_td);
  760. qh->post_td = td;
  761. if (ret != 0)
  762. goto err;
  763. }
  764. return ret;
  765. err:
  766. if (ret < 0) {
  767. /* In case a control transfer gets an error
  768. * during the setup stage */
  769. urb->actual_length = max(urb->actual_length, 0);
  770. /* Note that the queue has stopped and save
  771. * the next toggle value */
  772. qh->element = UHCI_PTR_TERM;
  773. qh->is_stopped = 1;
  774. qh->needs_fixup = (qh->type != USB_ENDPOINT_XFER_CONTROL);
  775. qh->initial_toggle = uhci_toggle(td_token(td)) ^
  776. (ret == -EREMOTEIO);
  777. } else /* Short packet received */
  778. ret = uhci_fixup_short_transfer(uhci, qh, urbp);
  779. return ret;
  780. }
  781. /*
  782. * Isochronous transfers
  783. */
  784. static int uhci_submit_isochronous(struct uhci_hcd *uhci, struct urb *urb,
  785. struct uhci_qh *qh)
  786. {
  787. struct uhci_td *td = NULL; /* Since urb->number_of_packets > 0 */
  788. int i, frame;
  789. unsigned long destination, status;
  790. struct urb_priv *urbp = (struct urb_priv *) urb->hcpriv;
  791. if (urb->number_of_packets > 900) /* 900? Why? */
  792. return -EFBIG;
  793. status = TD_CTRL_ACTIVE | TD_CTRL_IOS;
  794. destination = (urb->pipe & PIPE_DEVEP_MASK) | usb_packetid(urb->pipe);
  795. /* Figure out the starting frame number */
  796. if (urb->transfer_flags & URB_ISO_ASAP) {
  797. if (list_empty(&qh->queue)) {
  798. uhci_get_current_frame_number(uhci);
  799. urb->start_frame = (uhci->frame_number + 10);
  800. } else { /* Go right after the last one */
  801. struct urb *last_urb;
  802. last_urb = list_entry(qh->queue.prev,
  803. struct urb_priv, node)->urb;
  804. urb->start_frame = (last_urb->start_frame +
  805. last_urb->number_of_packets *
  806. last_urb->interval);
  807. }
  808. } else {
  809. /* FIXME: Sanity check */
  810. }
  811. for (i = 0; i < urb->number_of_packets; i++) {
  812. td = uhci_alloc_td(uhci);
  813. if (!td)
  814. return -ENOMEM;
  815. uhci_add_td_to_urbp(td, urbp);
  816. uhci_fill_td(td, status, destination |
  817. uhci_explen(urb->iso_frame_desc[i].length),
  818. urb->transfer_dma +
  819. urb->iso_frame_desc[i].offset);
  820. }
  821. /* Set the interrupt-on-completion flag on the last packet. */
  822. td->status |= __constant_cpu_to_le32(TD_CTRL_IOC);
  823. qh->skel = uhci->skel_iso_qh;
  824. /* Add the TDs to the frame list */
  825. frame = urb->start_frame;
  826. list_for_each_entry(td, &urbp->td_list, list) {
  827. uhci_insert_td_in_frame_list(uhci, td, frame);
  828. frame += urb->interval;
  829. }
  830. return 0;
  831. }
  832. static int uhci_result_isochronous(struct uhci_hcd *uhci, struct urb *urb)
  833. {
  834. struct uhci_td *td;
  835. struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
  836. int status;
  837. int i, ret = 0;
  838. urb->actual_length = urb->error_count = 0;
  839. i = 0;
  840. list_for_each_entry(td, &urbp->td_list, list) {
  841. int actlength;
  842. unsigned int ctrlstat = td_status(td);
  843. if (ctrlstat & TD_CTRL_ACTIVE)
  844. return -EINPROGRESS;
  845. actlength = uhci_actual_length(ctrlstat);
  846. urb->iso_frame_desc[i].actual_length = actlength;
  847. urb->actual_length += actlength;
  848. status = uhci_map_status(uhci_status_bits(ctrlstat),
  849. usb_pipeout(urb->pipe));
  850. urb->iso_frame_desc[i].status = status;
  851. if (status) {
  852. urb->error_count++;
  853. ret = status;
  854. }
  855. i++;
  856. }
  857. return ret;
  858. }
  859. static int uhci_urb_enqueue(struct usb_hcd *hcd,
  860. struct usb_host_endpoint *hep,
  861. struct urb *urb, gfp_t mem_flags)
  862. {
  863. int ret;
  864. struct uhci_hcd *uhci = hcd_to_uhci(hcd);
  865. unsigned long flags;
  866. struct urb_priv *urbp;
  867. struct uhci_qh *qh;
  868. int bustime;
  869. spin_lock_irqsave(&uhci->lock, flags);
  870. ret = urb->status;
  871. if (ret != -EINPROGRESS) /* URB already unlinked! */
  872. goto done;
  873. ret = -ENOMEM;
  874. urbp = uhci_alloc_urb_priv(uhci, urb);
  875. if (!urbp)
  876. goto done;
  877. if (hep->hcpriv)
  878. qh = (struct uhci_qh *) hep->hcpriv;
  879. else {
  880. qh = uhci_alloc_qh(uhci, urb->dev, hep);
  881. if (!qh)
  882. goto err_no_qh;
  883. }
  884. urbp->qh = qh;
  885. switch (qh->type) {
  886. case USB_ENDPOINT_XFER_CONTROL:
  887. ret = uhci_submit_control(uhci, urb, qh);
  888. break;
  889. case USB_ENDPOINT_XFER_BULK:
  890. ret = uhci_submit_bulk(uhci, urb, qh);
  891. break;
  892. case USB_ENDPOINT_XFER_INT:
  893. if (list_empty(&qh->queue)) {
  894. bustime = usb_check_bandwidth(urb->dev, urb);
  895. if (bustime < 0)
  896. ret = bustime;
  897. else {
  898. ret = uhci_submit_interrupt(uhci, urb, qh);
  899. if (ret == 0)
  900. usb_claim_bandwidth(urb->dev, urb, bustime, 0);
  901. }
  902. } else { /* inherit from parent */
  903. struct urb_priv *eurbp;
  904. eurbp = list_entry(qh->queue.prev, struct urb_priv,
  905. node);
  906. urb->bandwidth = eurbp->urb->bandwidth;
  907. ret = uhci_submit_interrupt(uhci, urb, qh);
  908. }
  909. break;
  910. case USB_ENDPOINT_XFER_ISOC:
  911. bustime = usb_check_bandwidth(urb->dev, urb);
  912. if (bustime < 0) {
  913. ret = bustime;
  914. break;
  915. }
  916. ret = uhci_submit_isochronous(uhci, urb, qh);
  917. if (ret == 0)
  918. usb_claim_bandwidth(urb->dev, urb, bustime, 1);
  919. break;
  920. }
  921. if (ret != 0)
  922. goto err_submit_failed;
  923. /* Add this URB to the QH */
  924. urbp->qh = qh;
  925. list_add_tail(&urbp->node, &qh->queue);
  926. /* If the new URB is the first and only one on this QH then either
  927. * the QH is new and idle or else it's unlinked and waiting to
  928. * become idle, so we can activate it right away. But only if the
  929. * queue isn't stopped. */
  930. if (qh->queue.next == &urbp->node && !qh->is_stopped) {
  931. uhci_activate_qh(uhci, qh);
  932. uhci_qh_wants_fsbr(uhci, qh);
  933. }
  934. goto done;
  935. err_submit_failed:
  936. if (qh->state == QH_STATE_IDLE)
  937. uhci_make_qh_idle(uhci, qh); /* Reclaim unused QH */
  938. err_no_qh:
  939. uhci_free_urb_priv(uhci, urbp);
  940. done:
  941. spin_unlock_irqrestore(&uhci->lock, flags);
  942. return ret;
  943. }
  944. static int uhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb)
  945. {
  946. struct uhci_hcd *uhci = hcd_to_uhci(hcd);
  947. unsigned long flags;
  948. struct urb_priv *urbp;
  949. struct uhci_qh *qh;
  950. spin_lock_irqsave(&uhci->lock, flags);
  951. urbp = urb->hcpriv;
  952. if (!urbp) /* URB was never linked! */
  953. goto done;
  954. qh = urbp->qh;
  955. /* Remove Isochronous TDs from the frame list ASAP */
  956. if (qh->type == USB_ENDPOINT_XFER_ISOC) {
  957. uhci_unlink_isochronous_tds(uhci, urb);
  958. mb();
  959. /* If the URB has already started, update the QH unlink time */
  960. uhci_get_current_frame_number(uhci);
  961. if (uhci_frame_before_eq(urb->start_frame, uhci->frame_number))
  962. qh->unlink_frame = uhci->frame_number;
  963. }
  964. uhci_unlink_qh(uhci, qh);
  965. done:
  966. spin_unlock_irqrestore(&uhci->lock, flags);
  967. return 0;
  968. }
  969. /*
  970. * Finish unlinking an URB and give it back
  971. */
  972. static void uhci_giveback_urb(struct uhci_hcd *uhci, struct uhci_qh *qh,
  973. struct urb *urb, struct pt_regs *regs)
  974. __releases(uhci->lock)
  975. __acquires(uhci->lock)
  976. {
  977. struct urb_priv *urbp = (struct urb_priv *) urb->hcpriv;
  978. /* Isochronous TDs get unlinked directly from the frame list */
  979. if (qh->type == USB_ENDPOINT_XFER_ISOC)
  980. uhci_unlink_isochronous_tds(uhci, urb);
  981. /* Take the URB off the QH's queue. If the queue is now empty,
  982. * this is a perfect time for a toggle fixup. */
  983. list_del_init(&urbp->node);
  984. if (list_empty(&qh->queue) && qh->needs_fixup) {
  985. usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe),
  986. usb_pipeout(urb->pipe), qh->initial_toggle);
  987. qh->needs_fixup = 0;
  988. }
  989. uhci_free_urb_priv(uhci, urbp);
  990. switch (qh->type) {
  991. case USB_ENDPOINT_XFER_ISOC:
  992. /* Release bandwidth for Interrupt or Isoc. transfers */
  993. if (urb->bandwidth)
  994. usb_release_bandwidth(urb->dev, urb, 1);
  995. break;
  996. case USB_ENDPOINT_XFER_INT:
  997. /* Release bandwidth for Interrupt or Isoc. transfers */
  998. /* Make sure we don't release if we have a queued URB */
  999. if (list_empty(&qh->queue) && urb->bandwidth)
  1000. usb_release_bandwidth(urb->dev, urb, 0);
  1001. else
  1002. /* bandwidth was passed on to queued URB, */
  1003. /* so don't let usb_unlink_urb() release it */
  1004. urb->bandwidth = 0;
  1005. break;
  1006. }
  1007. spin_unlock(&uhci->lock);
  1008. usb_hcd_giveback_urb(uhci_to_hcd(uhci), urb, regs);
  1009. spin_lock(&uhci->lock);
  1010. /* If the queue is now empty, we can unlink the QH and give up its
  1011. * reserved bandwidth. */
  1012. if (list_empty(&qh->queue)) {
  1013. uhci_unlink_qh(uhci, qh);
  1014. /* Bandwidth stuff not yet implemented */
  1015. }
  1016. }
  1017. /*
  1018. * Scan the URBs in a QH's queue
  1019. */
  1020. #define QH_FINISHED_UNLINKING(qh) \
  1021. (qh->state == QH_STATE_UNLINKING && \
  1022. uhci->frame_number + uhci->is_stopped != qh->unlink_frame)
  1023. static void uhci_scan_qh(struct uhci_hcd *uhci, struct uhci_qh *qh,
  1024. struct pt_regs *regs)
  1025. {
  1026. struct urb_priv *urbp;
  1027. struct urb *urb;
  1028. int status;
  1029. while (!list_empty(&qh->queue)) {
  1030. urbp = list_entry(qh->queue.next, struct urb_priv, node);
  1031. urb = urbp->urb;
  1032. if (qh->type == USB_ENDPOINT_XFER_ISOC)
  1033. status = uhci_result_isochronous(uhci, urb);
  1034. else
  1035. status = uhci_result_common(uhci, urb);
  1036. if (status == -EINPROGRESS)
  1037. break;
  1038. spin_lock(&urb->lock);
  1039. if (urb->status == -EINPROGRESS) /* Not dequeued */
  1040. urb->status = status;
  1041. else
  1042. status = ECONNRESET; /* Not -ECONNRESET */
  1043. spin_unlock(&urb->lock);
  1044. /* Dequeued but completed URBs can't be given back unless
  1045. * the QH is stopped or has finished unlinking. */
  1046. if (status == ECONNRESET) {
  1047. if (QH_FINISHED_UNLINKING(qh))
  1048. qh->is_stopped = 1;
  1049. else if (!qh->is_stopped)
  1050. return;
  1051. }
  1052. uhci_giveback_urb(uhci, qh, urb, regs);
  1053. if (status < 0)
  1054. break;
  1055. }
  1056. /* If the QH is neither stopped nor finished unlinking (normal case),
  1057. * our work here is done. */
  1058. if (QH_FINISHED_UNLINKING(qh))
  1059. qh->is_stopped = 1;
  1060. else if (!qh->is_stopped)
  1061. return;
  1062. /* Otherwise give back each of the dequeued URBs */
  1063. restart:
  1064. list_for_each_entry(urbp, &qh->queue, node) {
  1065. urb = urbp->urb;
  1066. if (urb->status != -EINPROGRESS) {
  1067. /* Fix up the TD links and save the toggles for
  1068. * non-Isochronous queues. For Isochronous queues,
  1069. * test for too-recent dequeues. */
  1070. if (!uhci_cleanup_queue(uhci, qh, urb)) {
  1071. qh->is_stopped = 0;
  1072. return;
  1073. }
  1074. uhci_giveback_urb(uhci, qh, urb, regs);
  1075. goto restart;
  1076. }
  1077. }
  1078. qh->is_stopped = 0;
  1079. /* There are no more dequeued URBs. If there are still URBs on the
  1080. * queue, the QH can now be re-activated. */
  1081. if (!list_empty(&qh->queue)) {
  1082. if (qh->needs_fixup)
  1083. uhci_fixup_toggles(qh, 0);
  1084. /* If the first URB on the queue wants FSBR but its time
  1085. * limit has expired, set the next TD to interrupt on
  1086. * completion before reactivating the QH. */
  1087. urbp = list_entry(qh->queue.next, struct urb_priv, node);
  1088. if (urbp->fsbr && qh->wait_expired) {
  1089. struct uhci_td *td = list_entry(urbp->td_list.next,
  1090. struct uhci_td, list);
  1091. td->status |= __cpu_to_le32(TD_CTRL_IOC);
  1092. }
  1093. uhci_activate_qh(uhci, qh);
  1094. }
  1095. /* The queue is empty. The QH can become idle if it is fully
  1096. * unlinked. */
  1097. else if (QH_FINISHED_UNLINKING(qh))
  1098. uhci_make_qh_idle(uhci, qh);
  1099. }
  1100. /*
  1101. * Check for queues that have made some forward progress.
  1102. * Returns 0 if the queue is not Isochronous, is ACTIVE, and
  1103. * has not advanced since last examined; 1 otherwise.
  1104. *
  1105. * Early Intel controllers have a bug which causes qh->element sometimes
  1106. * not to advance when a TD completes successfully. The queue remains
  1107. * stuck on the inactive completed TD. We detect such cases and advance
  1108. * the element pointer by hand.
  1109. */
  1110. static int uhci_advance_check(struct uhci_hcd *uhci, struct uhci_qh *qh)
  1111. {
  1112. struct urb_priv *urbp = NULL;
  1113. struct uhci_td *td;
  1114. int ret = 1;
  1115. unsigned status;
  1116. if (qh->type == USB_ENDPOINT_XFER_ISOC)
  1117. return ret;
  1118. /* Treat an UNLINKING queue as though it hasn't advanced.
  1119. * This is okay because reactivation will treat it as though
  1120. * it has advanced, and if it is going to become IDLE then
  1121. * this doesn't matter anyway. Furthermore it's possible
  1122. * for an UNLINKING queue not to have any URBs at all, or
  1123. * for its first URB not to have any TDs (if it was dequeued
  1124. * just as it completed). So it's not easy in any case to
  1125. * test whether such queues have advanced. */
  1126. if (qh->state != QH_STATE_ACTIVE) {
  1127. urbp = NULL;
  1128. status = 0;
  1129. } else {
  1130. urbp = list_entry(qh->queue.next, struct urb_priv, node);
  1131. td = list_entry(urbp->td_list.next, struct uhci_td, list);
  1132. status = td_status(td);
  1133. if (!(status & TD_CTRL_ACTIVE)) {
  1134. /* We're okay, the queue has advanced */
  1135. qh->wait_expired = 0;
  1136. qh->advance_jiffies = jiffies;
  1137. return ret;
  1138. }
  1139. ret = 0;
  1140. }
  1141. /* The queue hasn't advanced; check for timeout */
  1142. if (!qh->wait_expired && time_after(jiffies,
  1143. qh->advance_jiffies + QH_WAIT_TIMEOUT)) {
  1144. /* Detect the Intel bug and work around it */
  1145. if (qh->post_td && qh_element(qh) ==
  1146. cpu_to_le32(qh->post_td->dma_handle)) {
  1147. qh->element = qh->post_td->link;
  1148. qh->advance_jiffies = jiffies;
  1149. return 1;
  1150. }
  1151. qh->wait_expired = 1;
  1152. /* If the current URB wants FSBR, unlink it temporarily
  1153. * so that we can safely set the next TD to interrupt on
  1154. * completion. That way we'll know as soon as the queue
  1155. * starts moving again. */
  1156. if (urbp && urbp->fsbr && !(status & TD_CTRL_IOC))
  1157. uhci_unlink_qh(uhci, qh);
  1158. }
  1159. return ret;
  1160. }
  1161. /*
  1162. * Process events in the schedule, but only in one thread at a time
  1163. */
  1164. static void uhci_scan_schedule(struct uhci_hcd *uhci, struct pt_regs *regs)
  1165. {
  1166. int i;
  1167. struct uhci_qh *qh;
  1168. /* Don't allow re-entrant calls */
  1169. if (uhci->scan_in_progress) {
  1170. uhci->need_rescan = 1;
  1171. return;
  1172. }
  1173. uhci->scan_in_progress = 1;
  1174. rescan:
  1175. uhci->need_rescan = 0;
  1176. uhci_clear_next_interrupt(uhci);
  1177. uhci_get_current_frame_number(uhci);
  1178. /* Go through all the QH queues and process the URBs in each one */
  1179. for (i = 0; i < UHCI_NUM_SKELQH - 1; ++i) {
  1180. uhci->next_qh = list_entry(uhci->skelqh[i]->node.next,
  1181. struct uhci_qh, node);
  1182. while ((qh = uhci->next_qh) != uhci->skelqh[i]) {
  1183. uhci->next_qh = list_entry(qh->node.next,
  1184. struct uhci_qh, node);
  1185. if (uhci_advance_check(uhci, qh)) {
  1186. uhci_scan_qh(uhci, qh, regs);
  1187. if (qh->state == QH_STATE_ACTIVE)
  1188. uhci_qh_wants_fsbr(uhci, qh);
  1189. }
  1190. }
  1191. }
  1192. if (uhci->need_rescan)
  1193. goto rescan;
  1194. uhci->scan_in_progress = 0;
  1195. if (uhci->fsbr_is_on && time_after(jiffies,
  1196. uhci->fsbr_jiffies + FSBR_OFF_DELAY))
  1197. uhci_fsbr_off(uhci);
  1198. if (list_empty(&uhci->skel_unlink_qh->node))
  1199. uhci_clear_next_interrupt(uhci);
  1200. else
  1201. uhci_set_next_interrupt(uhci);
  1202. }