uhci-q.c 39 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548
  1. /*
  2. * Universal Host Controller Interface driver for USB.
  3. *
  4. * Maintainer: Alan Stern <stern@rowland.harvard.edu>
  5. *
  6. * (C) Copyright 1999 Linus Torvalds
  7. * (C) Copyright 1999-2002 Johannes Erdfelt, johannes@erdfelt.com
  8. * (C) Copyright 1999 Randy Dunlap
  9. * (C) Copyright 1999 Georg Acher, acher@in.tum.de
  10. * (C) Copyright 1999 Deti Fliegl, deti@fliegl.de
  11. * (C) Copyright 1999 Thomas Sailer, sailer@ife.ee.ethz.ch
  12. * (C) Copyright 1999 Roman Weissgaerber, weissg@vienna.at
  13. * (C) Copyright 2000 Yggdrasil Computing, Inc. (port of new PCI interface
  14. * support from usb-ohci.c by Adam Richter, adam@yggdrasil.com).
  15. * (C) Copyright 1999 Gregory P. Smith (from usb-ohci.c)
  16. * (C) Copyright 2004 Alan Stern, stern@rowland.harvard.edu
  17. */
  18. static int uhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb);
  19. static void uhci_unlink_generic(struct uhci_hcd *uhci, struct urb *urb);
  20. static void uhci_remove_pending_urbps(struct uhci_hcd *uhci);
  21. static void uhci_free_pending_qhs(struct uhci_hcd *uhci);
  22. static void uhci_free_pending_tds(struct uhci_hcd *uhci);
  23. /*
  24. * Technically, updating td->status here is a race, but it's not really a
  25. * problem. The worst that can happen is that we set the IOC bit again
  26. * generating a spurious interrupt. We could fix this by creating another
  27. * QH and leaving the IOC bit always set, but then we would have to play
  28. * games with the FSBR code to make sure we get the correct order in all
  29. * the cases. I don't think it's worth the effort
  30. */
  31. static inline void uhci_set_next_interrupt(struct uhci_hcd *uhci)
  32. {
  33. if (uhci->is_stopped)
  34. mod_timer(&uhci_to_hcd(uhci)->rh_timer, jiffies);
  35. uhci->term_td->status |= cpu_to_le32(TD_CTRL_IOC);
  36. }
  37. static inline void uhci_clear_next_interrupt(struct uhci_hcd *uhci)
  38. {
  39. uhci->term_td->status &= ~cpu_to_le32(TD_CTRL_IOC);
  40. }
  41. static inline void uhci_moveto_complete(struct uhci_hcd *uhci,
  42. struct urb_priv *urbp)
  43. {
  44. list_move_tail(&urbp->urb_list, &uhci->complete_list);
  45. }
  46. static struct uhci_td *uhci_alloc_td(struct uhci_hcd *uhci)
  47. {
  48. dma_addr_t dma_handle;
  49. struct uhci_td *td;
  50. td = dma_pool_alloc(uhci->td_pool, GFP_ATOMIC, &dma_handle);
  51. if (!td)
  52. return NULL;
  53. td->dma_handle = dma_handle;
  54. td->link = UHCI_PTR_TERM;
  55. td->buffer = 0;
  56. td->frame = -1;
  57. INIT_LIST_HEAD(&td->list);
  58. INIT_LIST_HEAD(&td->remove_list);
  59. INIT_LIST_HEAD(&td->fl_list);
  60. return td;
  61. }
  62. static inline void uhci_fill_td(struct uhci_td *td, u32 status,
  63. u32 token, u32 buffer)
  64. {
  65. td->status = cpu_to_le32(status);
  66. td->token = cpu_to_le32(token);
  67. td->buffer = cpu_to_le32(buffer);
  68. }
  69. /*
  70. * We insert Isochronous URB's directly into the frame list at the beginning
  71. */
  72. static void uhci_insert_td_frame_list(struct uhci_hcd *uhci, struct uhci_td *td, unsigned framenum)
  73. {
  74. framenum &= (UHCI_NUMFRAMES - 1);
  75. td->frame = framenum;
  76. /* Is there a TD already mapped there? */
  77. if (uhci->frame_cpu[framenum]) {
  78. struct uhci_td *ftd, *ltd;
  79. ftd = uhci->frame_cpu[framenum];
  80. ltd = list_entry(ftd->fl_list.prev, struct uhci_td, fl_list);
  81. list_add_tail(&td->fl_list, &ftd->fl_list);
  82. td->link = ltd->link;
  83. wmb();
  84. ltd->link = cpu_to_le32(td->dma_handle);
  85. } else {
  86. td->link = uhci->frame[framenum];
  87. wmb();
  88. uhci->frame[framenum] = cpu_to_le32(td->dma_handle);
  89. uhci->frame_cpu[framenum] = td;
  90. }
  91. }
  92. static void uhci_remove_td(struct uhci_hcd *uhci, struct uhci_td *td)
  93. {
  94. /* If it's not inserted, don't remove it */
  95. if (td->frame == -1 && list_empty(&td->fl_list))
  96. return;
  97. if (td->frame != -1 && uhci->frame_cpu[td->frame] == td) {
  98. if (list_empty(&td->fl_list)) {
  99. uhci->frame[td->frame] = td->link;
  100. uhci->frame_cpu[td->frame] = NULL;
  101. } else {
  102. struct uhci_td *ntd;
  103. ntd = list_entry(td->fl_list.next, struct uhci_td, fl_list);
  104. uhci->frame[td->frame] = cpu_to_le32(ntd->dma_handle);
  105. uhci->frame_cpu[td->frame] = ntd;
  106. }
  107. } else {
  108. struct uhci_td *ptd;
  109. ptd = list_entry(td->fl_list.prev, struct uhci_td, fl_list);
  110. ptd->link = td->link;
  111. }
  112. wmb();
  113. td->link = UHCI_PTR_TERM;
  114. list_del_init(&td->fl_list);
  115. td->frame = -1;
  116. }
  117. /*
  118. * Inserts a td list into qh.
  119. */
  120. static void uhci_insert_tds_in_qh(struct uhci_qh *qh, struct urb *urb, __le32 breadth)
  121. {
  122. struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
  123. struct uhci_td *td;
  124. __le32 *plink;
  125. /* Ordering isn't important here yet since the QH hasn't been */
  126. /* inserted into the schedule yet */
  127. plink = &qh->element;
  128. list_for_each_entry(td, &urbp->td_list, list) {
  129. *plink = cpu_to_le32(td->dma_handle) | breadth;
  130. plink = &td->link;
  131. }
  132. *plink = UHCI_PTR_TERM;
  133. }
  134. static void uhci_free_td(struct uhci_hcd *uhci, struct uhci_td *td)
  135. {
  136. if (!list_empty(&td->list))
  137. dev_warn(uhci_dev(uhci), "td %p still in list!\n", td);
  138. if (!list_empty(&td->remove_list))
  139. dev_warn(uhci_dev(uhci), "td %p still in remove_list!\n", td);
  140. if (!list_empty(&td->fl_list))
  141. dev_warn(uhci_dev(uhci), "td %p still in fl_list!\n", td);
  142. dma_pool_free(uhci->td_pool, td, td->dma_handle);
  143. }
  144. static struct uhci_qh *uhci_alloc_qh(struct uhci_hcd *uhci)
  145. {
  146. dma_addr_t dma_handle;
  147. struct uhci_qh *qh;
  148. qh = dma_pool_alloc(uhci->qh_pool, GFP_ATOMIC, &dma_handle);
  149. if (!qh)
  150. return NULL;
  151. qh->dma_handle = dma_handle;
  152. qh->element = UHCI_PTR_TERM;
  153. qh->link = UHCI_PTR_TERM;
  154. qh->urbp = NULL;
  155. INIT_LIST_HEAD(&qh->list);
  156. INIT_LIST_HEAD(&qh->remove_list);
  157. return qh;
  158. }
  159. static void uhci_free_qh(struct uhci_hcd *uhci, struct uhci_qh *qh)
  160. {
  161. if (!list_empty(&qh->list))
  162. dev_warn(uhci_dev(uhci), "qh %p list not empty!\n", qh);
  163. if (!list_empty(&qh->remove_list))
  164. dev_warn(uhci_dev(uhci), "qh %p still in remove_list!\n", qh);
  165. dma_pool_free(uhci->qh_pool, qh, qh->dma_handle);
  166. }
  167. /*
  168. * Append this urb's qh after the last qh in skelqh->list
  169. *
  170. * Note that urb_priv.queue_list doesn't have a separate queue head;
  171. * it's a ring with every element "live".
  172. */
  173. static void uhci_insert_qh(struct uhci_hcd *uhci, struct uhci_qh *skelqh, struct urb *urb)
  174. {
  175. struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
  176. struct urb_priv *turbp;
  177. struct uhci_qh *lqh;
  178. /* Grab the last QH */
  179. lqh = list_entry(skelqh->list.prev, struct uhci_qh, list);
  180. /* Point to the next skelqh */
  181. urbp->qh->link = lqh->link;
  182. wmb(); /* Ordering is important */
  183. /*
  184. * Patch QHs for previous endpoint's queued URBs? HC goes
  185. * here next, not to the next skelqh it now points to.
  186. *
  187. * lqh --> td ... --> qh ... --> td --> qh ... --> td
  188. * | | |
  189. * v v v
  190. * +<----------------+-----------------+
  191. * v
  192. * newqh --> td ... --> td
  193. * |
  194. * v
  195. * ...
  196. *
  197. * The HC could see (and use!) any of these as we write them.
  198. */
  199. lqh->link = cpu_to_le32(urbp->qh->dma_handle) | UHCI_PTR_QH;
  200. if (lqh->urbp) {
  201. list_for_each_entry(turbp, &lqh->urbp->queue_list, queue_list)
  202. turbp->qh->link = lqh->link;
  203. }
  204. list_add_tail(&urbp->qh->list, &skelqh->list);
  205. }
  206. /*
  207. * Start removal of QH from schedule; it finishes next frame.
  208. * TDs should be unlinked before this is called.
  209. */
  210. static void uhci_remove_qh(struct uhci_hcd *uhci, struct uhci_qh *qh)
  211. {
  212. struct uhci_qh *pqh;
  213. __le32 newlink;
  214. if (!qh)
  215. return;
  216. /*
  217. * Only go through the hoops if it's actually linked in
  218. */
  219. if (!list_empty(&qh->list)) {
  220. /* If our queue is nonempty, make the next URB the head */
  221. if (!list_empty(&qh->urbp->queue_list)) {
  222. struct urb_priv *nurbp;
  223. nurbp = list_entry(qh->urbp->queue_list.next,
  224. struct urb_priv, queue_list);
  225. nurbp->queued = 0;
  226. list_add(&nurbp->qh->list, &qh->list);
  227. newlink = cpu_to_le32(nurbp->qh->dma_handle) | UHCI_PTR_QH;
  228. } else
  229. newlink = qh->link;
  230. /* Fix up the previous QH's queue to link to either
  231. * the new head of this queue or the start of the
  232. * next endpoint's queue. */
  233. pqh = list_entry(qh->list.prev, struct uhci_qh, list);
  234. pqh->link = newlink;
  235. if (pqh->urbp) {
  236. struct urb_priv *turbp;
  237. list_for_each_entry(turbp, &pqh->urbp->queue_list,
  238. queue_list)
  239. turbp->qh->link = newlink;
  240. }
  241. wmb();
  242. /* Leave qh->link in case the HC is on the QH now, it will */
  243. /* continue the rest of the schedule */
  244. qh->element = UHCI_PTR_TERM;
  245. list_del_init(&qh->list);
  246. }
  247. list_del_init(&qh->urbp->queue_list);
  248. qh->urbp = NULL;
  249. uhci_get_current_frame_number(uhci);
  250. if (uhci->frame_number + uhci->is_stopped != uhci->qh_remove_age) {
  251. uhci_free_pending_qhs(uhci);
  252. uhci->qh_remove_age = uhci->frame_number;
  253. }
  254. /* Check to see if the remove list is empty. Set the IOC bit */
  255. /* to force an interrupt so we can remove the QH */
  256. if (list_empty(&uhci->qh_remove_list))
  257. uhci_set_next_interrupt(uhci);
  258. list_add(&qh->remove_list, &uhci->qh_remove_list);
  259. }
  260. static int uhci_fixup_toggle(struct urb *urb, unsigned int toggle)
  261. {
  262. struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
  263. struct uhci_td *td;
  264. list_for_each_entry(td, &urbp->td_list, list) {
  265. if (toggle)
  266. td->token |= cpu_to_le32(TD_TOKEN_TOGGLE);
  267. else
  268. td->token &= ~cpu_to_le32(TD_TOKEN_TOGGLE);
  269. toggle ^= 1;
  270. }
  271. return toggle;
  272. }
  273. /* This function will append one URB's QH to another URB's QH. This is for */
  274. /* queuing interrupt, control or bulk transfers */
  275. static void uhci_append_queued_urb(struct uhci_hcd *uhci, struct urb *eurb, struct urb *urb)
  276. {
  277. struct urb_priv *eurbp, *urbp, *furbp, *lurbp;
  278. struct uhci_td *lltd;
  279. eurbp = eurb->hcpriv;
  280. urbp = urb->hcpriv;
  281. /* Find the first URB in the queue */
  282. furbp = eurbp;
  283. if (eurbp->queued) {
  284. list_for_each_entry(furbp, &eurbp->queue_list, queue_list)
  285. if (!furbp->queued)
  286. break;
  287. }
  288. lurbp = list_entry(furbp->queue_list.prev, struct urb_priv, queue_list);
  289. lltd = list_entry(lurbp->td_list.prev, struct uhci_td, list);
  290. /* Control transfers always start with toggle 0 */
  291. if (!usb_pipecontrol(urb->pipe))
  292. usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe),
  293. usb_pipeout(urb->pipe),
  294. uhci_fixup_toggle(urb,
  295. uhci_toggle(td_token(lltd)) ^ 1));
  296. /* All qh's in the queue need to link to the next queue */
  297. urbp->qh->link = eurbp->qh->link;
  298. wmb(); /* Make sure we flush everything */
  299. lltd->link = cpu_to_le32(urbp->qh->dma_handle) | UHCI_PTR_QH;
  300. list_add_tail(&urbp->queue_list, &furbp->queue_list);
  301. urbp->queued = 1;
  302. }
  303. static void uhci_delete_queued_urb(struct uhci_hcd *uhci, struct urb *urb)
  304. {
  305. struct urb_priv *urbp, *nurbp, *purbp, *turbp;
  306. struct uhci_td *pltd;
  307. unsigned int toggle;
  308. urbp = urb->hcpriv;
  309. if (list_empty(&urbp->queue_list))
  310. return;
  311. nurbp = list_entry(urbp->queue_list.next, struct urb_priv, queue_list);
  312. /*
  313. * Fix up the toggle for the following URBs in the queue.
  314. * Only needed for bulk and interrupt: control and isochronous
  315. * endpoints don't propagate toggles between messages.
  316. */
  317. if (usb_pipebulk(urb->pipe) || usb_pipeint(urb->pipe)) {
  318. if (!urbp->queued)
  319. /* We just set the toggle in uhci_unlink_generic */
  320. toggle = usb_gettoggle(urb->dev,
  321. usb_pipeendpoint(urb->pipe),
  322. usb_pipeout(urb->pipe));
  323. else {
  324. /* If we're in the middle of the queue, grab the */
  325. /* toggle from the TD previous to us */
  326. purbp = list_entry(urbp->queue_list.prev,
  327. struct urb_priv, queue_list);
  328. pltd = list_entry(purbp->td_list.prev,
  329. struct uhci_td, list);
  330. toggle = uhci_toggle(td_token(pltd)) ^ 1;
  331. }
  332. list_for_each_entry(turbp, &urbp->queue_list, queue_list) {
  333. if (!turbp->queued)
  334. break;
  335. toggle = uhci_fixup_toggle(turbp->urb, toggle);
  336. }
  337. usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe),
  338. usb_pipeout(urb->pipe), toggle);
  339. }
  340. if (urbp->queued) {
  341. /* We're somewhere in the middle (or end). The case where
  342. * we're at the head is handled in uhci_remove_qh(). */
  343. purbp = list_entry(urbp->queue_list.prev, struct urb_priv,
  344. queue_list);
  345. pltd = list_entry(purbp->td_list.prev, struct uhci_td, list);
  346. if (nurbp->queued)
  347. pltd->link = cpu_to_le32(nurbp->qh->dma_handle) | UHCI_PTR_QH;
  348. else
  349. /* The next URB happens to be the beginning, so */
  350. /* we're the last, end the chain */
  351. pltd->link = UHCI_PTR_TERM;
  352. }
  353. /* urbp->queue_list is handled in uhci_remove_qh() */
  354. }
  355. static struct urb_priv *uhci_alloc_urb_priv(struct uhci_hcd *uhci, struct urb *urb)
  356. {
  357. struct urb_priv *urbp;
  358. urbp = kmem_cache_alloc(uhci_up_cachep, SLAB_ATOMIC);
  359. if (!urbp)
  360. return NULL;
  361. memset((void *)urbp, 0, sizeof(*urbp));
  362. urbp->fsbrtime = jiffies;
  363. urbp->urb = urb;
  364. INIT_LIST_HEAD(&urbp->td_list);
  365. INIT_LIST_HEAD(&urbp->queue_list);
  366. INIT_LIST_HEAD(&urbp->urb_list);
  367. list_add_tail(&urbp->urb_list, &uhci->urb_list);
  368. urb->hcpriv = urbp;
  369. return urbp;
  370. }
  371. static void uhci_add_td_to_urb(struct urb *urb, struct uhci_td *td)
  372. {
  373. struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
  374. list_add_tail(&td->list, &urbp->td_list);
  375. }
  376. static void uhci_remove_td_from_urb(struct uhci_td *td)
  377. {
  378. if (list_empty(&td->list))
  379. return;
  380. list_del_init(&td->list);
  381. }
  382. static void uhci_destroy_urb_priv(struct uhci_hcd *uhci, struct urb *urb)
  383. {
  384. struct uhci_td *td, *tmp;
  385. struct urb_priv *urbp;
  386. urbp = (struct urb_priv *)urb->hcpriv;
  387. if (!urbp)
  388. return;
  389. if (!list_empty(&urbp->urb_list))
  390. dev_warn(uhci_dev(uhci), "urb %p still on uhci->urb_list "
  391. "or uhci->remove_list!\n", urb);
  392. uhci_get_current_frame_number(uhci);
  393. if (uhci->frame_number + uhci->is_stopped != uhci->td_remove_age) {
  394. uhci_free_pending_tds(uhci);
  395. uhci->td_remove_age = uhci->frame_number;
  396. }
  397. /* Check to see if the remove list is empty. Set the IOC bit */
  398. /* to force an interrupt so we can remove the TD's*/
  399. if (list_empty(&uhci->td_remove_list))
  400. uhci_set_next_interrupt(uhci);
  401. list_for_each_entry_safe(td, tmp, &urbp->td_list, list) {
  402. uhci_remove_td_from_urb(td);
  403. uhci_remove_td(uhci, td);
  404. list_add(&td->remove_list, &uhci->td_remove_list);
  405. }
  406. urb->hcpriv = NULL;
  407. kmem_cache_free(uhci_up_cachep, urbp);
  408. }
  409. static void uhci_inc_fsbr(struct uhci_hcd *uhci, struct urb *urb)
  410. {
  411. struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
  412. if ((!(urb->transfer_flags & URB_NO_FSBR)) && !urbp->fsbr) {
  413. urbp->fsbr = 1;
  414. if (!uhci->fsbr++ && !uhci->fsbrtimeout)
  415. uhci->skel_term_qh->link = cpu_to_le32(uhci->skel_fs_control_qh->dma_handle) | UHCI_PTR_QH;
  416. }
  417. }
  418. static void uhci_dec_fsbr(struct uhci_hcd *uhci, struct urb *urb)
  419. {
  420. struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
  421. if ((!(urb->transfer_flags & URB_NO_FSBR)) && urbp->fsbr) {
  422. urbp->fsbr = 0;
  423. if (!--uhci->fsbr)
  424. uhci->fsbrtimeout = jiffies + FSBR_DELAY;
  425. }
  426. }
  427. /*
  428. * Map status to standard result codes
  429. *
  430. * <status> is (td_status(td) & 0xF60000), a.k.a.
  431. * uhci_status_bits(td_status(td)).
  432. * Note: <status> does not include the TD_CTRL_NAK bit.
  433. * <dir_out> is True for output TDs and False for input TDs.
  434. */
  435. static int uhci_map_status(int status, int dir_out)
  436. {
  437. if (!status)
  438. return 0;
  439. if (status & TD_CTRL_BITSTUFF) /* Bitstuff error */
  440. return -EPROTO;
  441. if (status & TD_CTRL_CRCTIMEO) { /* CRC/Timeout */
  442. if (dir_out)
  443. return -EPROTO;
  444. else
  445. return -EILSEQ;
  446. }
  447. if (status & TD_CTRL_BABBLE) /* Babble */
  448. return -EOVERFLOW;
  449. if (status & TD_CTRL_DBUFERR) /* Buffer error */
  450. return -ENOSR;
  451. if (status & TD_CTRL_STALLED) /* Stalled */
  452. return -EPIPE;
  453. WARN_ON(status & TD_CTRL_ACTIVE); /* Active */
  454. return 0;
  455. }
  456. /*
  457. * Control transfers
  458. */
  459. static int uhci_submit_control(struct uhci_hcd *uhci, struct urb *urb, struct urb *eurb)
  460. {
  461. struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
  462. struct uhci_td *td;
  463. struct uhci_qh *qh, *skelqh;
  464. unsigned long destination, status;
  465. int maxsze = usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe));
  466. int len = urb->transfer_buffer_length;
  467. dma_addr_t data = urb->transfer_dma;
  468. /* The "pipe" thing contains the destination in bits 8--18 */
  469. destination = (urb->pipe & PIPE_DEVEP_MASK) | USB_PID_SETUP;
  470. /* 3 errors */
  471. status = TD_CTRL_ACTIVE | uhci_maxerr(3);
  472. if (urb->dev->speed == USB_SPEED_LOW)
  473. status |= TD_CTRL_LS;
  474. /*
  475. * Build the TD for the control request setup packet
  476. */
  477. td = uhci_alloc_td(uhci);
  478. if (!td)
  479. return -ENOMEM;
  480. uhci_add_td_to_urb(urb, td);
  481. uhci_fill_td(td, status, destination | uhci_explen(7),
  482. urb->setup_dma);
  483. /*
  484. * If direction is "send", change the packet ID from SETUP (0x2D)
  485. * to OUT (0xE1). Else change it from SETUP to IN (0x69) and
  486. * set Short Packet Detect (SPD) for all data packets.
  487. */
  488. if (usb_pipeout(urb->pipe))
  489. destination ^= (USB_PID_SETUP ^ USB_PID_OUT);
  490. else {
  491. destination ^= (USB_PID_SETUP ^ USB_PID_IN);
  492. status |= TD_CTRL_SPD;
  493. }
  494. /*
  495. * Build the DATA TD's
  496. */
  497. while (len > 0) {
  498. int pktsze = len;
  499. if (pktsze > maxsze)
  500. pktsze = maxsze;
  501. td = uhci_alloc_td(uhci);
  502. if (!td)
  503. return -ENOMEM;
  504. /* Alternate Data0/1 (start with Data1) */
  505. destination ^= TD_TOKEN_TOGGLE;
  506. uhci_add_td_to_urb(urb, td);
  507. uhci_fill_td(td, status, destination | uhci_explen(pktsze - 1),
  508. data);
  509. data += pktsze;
  510. len -= pktsze;
  511. }
  512. /*
  513. * Build the final TD for control status
  514. */
  515. td = uhci_alloc_td(uhci);
  516. if (!td)
  517. return -ENOMEM;
  518. /*
  519. * It's IN if the pipe is an output pipe or we're not expecting
  520. * data back.
  521. */
  522. destination &= ~TD_TOKEN_PID_MASK;
  523. if (usb_pipeout(urb->pipe) || !urb->transfer_buffer_length)
  524. destination |= USB_PID_IN;
  525. else
  526. destination |= USB_PID_OUT;
  527. destination |= TD_TOKEN_TOGGLE; /* End in Data1 */
  528. status &= ~TD_CTRL_SPD;
  529. uhci_add_td_to_urb(urb, td);
  530. uhci_fill_td(td, status | TD_CTRL_IOC,
  531. destination | uhci_explen(UHCI_NULL_DATA_SIZE), 0);
  532. qh = uhci_alloc_qh(uhci);
  533. if (!qh)
  534. return -ENOMEM;
  535. urbp->qh = qh;
  536. qh->urbp = urbp;
  537. uhci_insert_tds_in_qh(qh, urb, UHCI_PTR_BREADTH);
  538. /* Low-speed transfers get a different queue, and won't hog the bus.
  539. * Also, some devices enumerate better without FSBR; the easiest way
  540. * to do that is to put URBs on the low-speed queue while the device
  541. * is in the DEFAULT state. */
  542. if (urb->dev->speed == USB_SPEED_LOW ||
  543. urb->dev->state == USB_STATE_DEFAULT)
  544. skelqh = uhci->skel_ls_control_qh;
  545. else {
  546. skelqh = uhci->skel_fs_control_qh;
  547. uhci_inc_fsbr(uhci, urb);
  548. }
  549. if (eurb)
  550. uhci_append_queued_urb(uhci, eurb, urb);
  551. else
  552. uhci_insert_qh(uhci, skelqh, urb);
  553. return -EINPROGRESS;
  554. }
  555. /*
  556. * If control-IN transfer was short, the status packet wasn't sent.
  557. * This routine changes the element pointer in the QH to point at the
  558. * status TD. It's safe to do this even while the QH is live, because
  559. * the hardware only updates the element pointer following a successful
  560. * transfer. The inactive TD for the short packet won't cause an update,
  561. * so the pointer won't get overwritten. The next time the controller
  562. * sees this QH, it will send the status packet.
  563. */
  564. static int usb_control_retrigger_status(struct uhci_hcd *uhci, struct urb *urb)
  565. {
  566. struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
  567. struct uhci_td *td;
  568. urbp->short_control_packet = 1;
  569. td = list_entry(urbp->td_list.prev, struct uhci_td, list);
  570. urbp->qh->element = cpu_to_le32(td->dma_handle);
  571. return -EINPROGRESS;
  572. }
  573. static int uhci_result_control(struct uhci_hcd *uhci, struct urb *urb)
  574. {
  575. struct list_head *tmp, *head;
  576. struct urb_priv *urbp = urb->hcpriv;
  577. struct uhci_td *td;
  578. unsigned int status;
  579. int ret = 0;
  580. if (list_empty(&urbp->td_list))
  581. return -EINVAL;
  582. head = &urbp->td_list;
  583. if (urbp->short_control_packet) {
  584. tmp = head->prev;
  585. goto status_stage;
  586. }
  587. tmp = head->next;
  588. td = list_entry(tmp, struct uhci_td, list);
  589. /* The first TD is the SETUP stage, check the status, but skip */
  590. /* the count */
  591. status = uhci_status_bits(td_status(td));
  592. if (status & TD_CTRL_ACTIVE)
  593. return -EINPROGRESS;
  594. if (status)
  595. goto td_error;
  596. urb->actual_length = 0;
  597. /* The rest of the TD's (but the last) are data */
  598. tmp = tmp->next;
  599. while (tmp != head && tmp->next != head) {
  600. unsigned int ctrlstat;
  601. td = list_entry(tmp, struct uhci_td, list);
  602. tmp = tmp->next;
  603. ctrlstat = td_status(td);
  604. status = uhci_status_bits(ctrlstat);
  605. if (status & TD_CTRL_ACTIVE)
  606. return -EINPROGRESS;
  607. urb->actual_length += uhci_actual_length(ctrlstat);
  608. if (status)
  609. goto td_error;
  610. /* Check to see if we received a short packet */
  611. if (uhci_actual_length(ctrlstat) <
  612. uhci_expected_length(td_token(td))) {
  613. if (urb->transfer_flags & URB_SHORT_NOT_OK) {
  614. ret = -EREMOTEIO;
  615. goto err;
  616. }
  617. if (uhci_packetid(td_token(td)) == USB_PID_IN)
  618. return usb_control_retrigger_status(uhci, urb);
  619. else
  620. return 0;
  621. }
  622. }
  623. status_stage:
  624. td = list_entry(tmp, struct uhci_td, list);
  625. /* Control status stage */
  626. status = td_status(td);
  627. #ifdef I_HAVE_BUGGY_APC_BACKUPS
  628. /* APC BackUPS Pro kludge */
  629. /* It tries to send all of the descriptor instead of the amount */
  630. /* we requested */
  631. if (status & TD_CTRL_IOC && /* IOC is masked out by uhci_status_bits */
  632. status & TD_CTRL_ACTIVE &&
  633. status & TD_CTRL_NAK)
  634. return 0;
  635. #endif
  636. status = uhci_status_bits(status);
  637. if (status & TD_CTRL_ACTIVE)
  638. return -EINPROGRESS;
  639. if (status)
  640. goto td_error;
  641. return 0;
  642. td_error:
  643. ret = uhci_map_status(status, uhci_packetout(td_token(td)));
  644. err:
  645. if ((debug == 1 && ret != -EPIPE) || debug > 1) {
  646. /* Some debugging code */
  647. dev_dbg(uhci_dev(uhci), "%s: failed with status %x\n",
  648. __FUNCTION__, status);
  649. if (errbuf) {
  650. /* Print the chain for debugging purposes */
  651. uhci_show_qh(urbp->qh, errbuf, ERRBUF_LEN, 0);
  652. lprintk(errbuf);
  653. }
  654. }
  655. return ret;
  656. }
  657. /*
  658. * Common submit for bulk and interrupt
  659. */
  660. static int uhci_submit_common(struct uhci_hcd *uhci, struct urb *urb, struct urb *eurb, struct uhci_qh *skelqh)
  661. {
  662. struct uhci_td *td;
  663. struct uhci_qh *qh;
  664. unsigned long destination, status;
  665. int maxsze = usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe));
  666. int len = urb->transfer_buffer_length;
  667. struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
  668. dma_addr_t data = urb->transfer_dma;
  669. if (len < 0)
  670. return -EINVAL;
  671. /* The "pipe" thing contains the destination in bits 8--18 */
  672. destination = (urb->pipe & PIPE_DEVEP_MASK) | usb_packetid(urb->pipe);
  673. status = uhci_maxerr(3) | TD_CTRL_ACTIVE;
  674. if (urb->dev->speed == USB_SPEED_LOW)
  675. status |= TD_CTRL_LS;
  676. if (usb_pipein(urb->pipe))
  677. status |= TD_CTRL_SPD;
  678. /*
  679. * Build the DATA TD's
  680. */
  681. do { /* Allow zero length packets */
  682. int pktsze = maxsze;
  683. if (pktsze >= len) {
  684. pktsze = len;
  685. if (!(urb->transfer_flags & URB_SHORT_NOT_OK))
  686. status &= ~TD_CTRL_SPD;
  687. }
  688. td = uhci_alloc_td(uhci);
  689. if (!td)
  690. return -ENOMEM;
  691. uhci_add_td_to_urb(urb, td);
  692. uhci_fill_td(td, status, destination | uhci_explen(pktsze - 1) |
  693. (usb_gettoggle(urb->dev, usb_pipeendpoint(urb->pipe),
  694. usb_pipeout(urb->pipe)) << TD_TOKEN_TOGGLE_SHIFT),
  695. data);
  696. data += pktsze;
  697. len -= maxsze;
  698. usb_dotoggle(urb->dev, usb_pipeendpoint(urb->pipe),
  699. usb_pipeout(urb->pipe));
  700. } while (len > 0);
  701. /*
  702. * URB_ZERO_PACKET means adding a 0-length packet, if direction
  703. * is OUT and the transfer_length was an exact multiple of maxsze,
  704. * hence (len = transfer_length - N * maxsze) == 0
  705. * however, if transfer_length == 0, the zero packet was already
  706. * prepared above.
  707. */
  708. if (usb_pipeout(urb->pipe) && (urb->transfer_flags & URB_ZERO_PACKET) &&
  709. !len && urb->transfer_buffer_length) {
  710. td = uhci_alloc_td(uhci);
  711. if (!td)
  712. return -ENOMEM;
  713. uhci_add_td_to_urb(urb, td);
  714. uhci_fill_td(td, status, destination | uhci_explen(UHCI_NULL_DATA_SIZE) |
  715. (usb_gettoggle(urb->dev, usb_pipeendpoint(urb->pipe),
  716. usb_pipeout(urb->pipe)) << TD_TOKEN_TOGGLE_SHIFT),
  717. data);
  718. usb_dotoggle(urb->dev, usb_pipeendpoint(urb->pipe),
  719. usb_pipeout(urb->pipe));
  720. }
  721. /* Set the interrupt-on-completion flag on the last packet.
  722. * A more-or-less typical 4 KB URB (= size of one memory page)
  723. * will require about 3 ms to transfer; that's a little on the
  724. * fast side but not enough to justify delaying an interrupt
  725. * more than 2 or 3 URBs, so we will ignore the URB_NO_INTERRUPT
  726. * flag setting. */
  727. td->status |= cpu_to_le32(TD_CTRL_IOC);
  728. qh = uhci_alloc_qh(uhci);
  729. if (!qh)
  730. return -ENOMEM;
  731. urbp->qh = qh;
  732. qh->urbp = urbp;
  733. /* Always breadth first */
  734. uhci_insert_tds_in_qh(qh, urb, UHCI_PTR_BREADTH);
  735. if (eurb)
  736. uhci_append_queued_urb(uhci, eurb, urb);
  737. else
  738. uhci_insert_qh(uhci, skelqh, urb);
  739. return -EINPROGRESS;
  740. }
  741. /*
  742. * Common result for bulk and interrupt
  743. */
  744. static int uhci_result_common(struct uhci_hcd *uhci, struct urb *urb)
  745. {
  746. struct urb_priv *urbp = urb->hcpriv;
  747. struct uhci_td *td;
  748. unsigned int status = 0;
  749. int ret = 0;
  750. urb->actual_length = 0;
  751. list_for_each_entry(td, &urbp->td_list, list) {
  752. unsigned int ctrlstat = td_status(td);
  753. status = uhci_status_bits(ctrlstat);
  754. if (status & TD_CTRL_ACTIVE)
  755. return -EINPROGRESS;
  756. urb->actual_length += uhci_actual_length(ctrlstat);
  757. if (status)
  758. goto td_error;
  759. if (uhci_actual_length(ctrlstat) <
  760. uhci_expected_length(td_token(td))) {
  761. if (urb->transfer_flags & URB_SHORT_NOT_OK) {
  762. ret = -EREMOTEIO;
  763. goto err;
  764. } else
  765. return 0;
  766. }
  767. }
  768. return 0;
  769. td_error:
  770. ret = uhci_map_status(status, uhci_packetout(td_token(td)));
  771. err:
  772. /*
  773. * Enable this chunk of code if you want to see some more debugging.
  774. * But be careful, it has the tendancy to starve out khubd and prevent
  775. * disconnects from happening successfully if you have a slow debug
  776. * log interface (like a serial console.
  777. */
  778. #if 0
  779. if ((debug == 1 && ret != -EPIPE) || debug > 1) {
  780. /* Some debugging code */
  781. dev_dbg(uhci_dev(uhci), "%s: failed with status %x\n",
  782. __FUNCTION__, status);
  783. if (errbuf) {
  784. /* Print the chain for debugging purposes */
  785. uhci_show_qh(urbp->qh, errbuf, ERRBUF_LEN, 0);
  786. lprintk(errbuf);
  787. }
  788. }
  789. #endif
  790. return ret;
  791. }
  792. static inline int uhci_submit_bulk(struct uhci_hcd *uhci, struct urb *urb, struct urb *eurb)
  793. {
  794. int ret;
  795. /* Can't have low-speed bulk transfers */
  796. if (urb->dev->speed == USB_SPEED_LOW)
  797. return -EINVAL;
  798. ret = uhci_submit_common(uhci, urb, eurb, uhci->skel_bulk_qh);
  799. if (ret == -EINPROGRESS)
  800. uhci_inc_fsbr(uhci, urb);
  801. return ret;
  802. }
  803. static inline int uhci_submit_interrupt(struct uhci_hcd *uhci, struct urb *urb, struct urb *eurb)
  804. {
  805. /* USB 1.1 interrupt transfers only involve one packet per interval;
  806. * that's the uhci_submit_common() "breadth first" policy. Drivers
  807. * can submit urbs of any length, but longer ones might need many
  808. * intervals to complete.
  809. */
  810. return uhci_submit_common(uhci, urb, eurb, uhci->skelqh[__interval_to_skel(urb->interval)]);
  811. }
  812. /*
  813. * Isochronous transfers
  814. */
  815. static int isochronous_find_limits(struct uhci_hcd *uhci, struct urb *urb, unsigned int *start, unsigned int *end)
  816. {
  817. struct urb *last_urb = NULL;
  818. struct urb_priv *up;
  819. int ret = 0;
  820. list_for_each_entry(up, &uhci->urb_list, urb_list) {
  821. struct urb *u = up->urb;
  822. /* look for pending URB's with identical pipe handle */
  823. if ((urb->pipe == u->pipe) && (urb->dev == u->dev) &&
  824. (u->status == -EINPROGRESS) && (u != urb)) {
  825. if (!last_urb)
  826. *start = u->start_frame;
  827. last_urb = u;
  828. }
  829. }
  830. if (last_urb) {
  831. *end = (last_urb->start_frame + last_urb->number_of_packets *
  832. last_urb->interval) & (UHCI_NUMFRAMES-1);
  833. ret = 0;
  834. } else
  835. ret = -1; /* no previous urb found */
  836. return ret;
  837. }
  838. static int isochronous_find_start(struct uhci_hcd *uhci, struct urb *urb)
  839. {
  840. int limits;
  841. unsigned int start = 0, end = 0;
  842. if (urb->number_of_packets > 900) /* 900? Why? */
  843. return -EFBIG;
  844. limits = isochronous_find_limits(uhci, urb, &start, &end);
  845. if (urb->transfer_flags & URB_ISO_ASAP) {
  846. if (limits) {
  847. uhci_get_current_frame_number(uhci);
  848. urb->start_frame = (uhci->frame_number + 10)
  849. & (UHCI_NUMFRAMES - 1);
  850. } else
  851. urb->start_frame = end;
  852. } else {
  853. urb->start_frame &= (UHCI_NUMFRAMES - 1);
  854. /* FIXME: Sanity check */
  855. }
  856. return 0;
  857. }
  858. /*
  859. * Isochronous transfers
  860. */
  861. static int uhci_submit_isochronous(struct uhci_hcd *uhci, struct urb *urb)
  862. {
  863. struct uhci_td *td;
  864. int i, ret, frame;
  865. int status, destination;
  866. status = TD_CTRL_ACTIVE | TD_CTRL_IOS;
  867. destination = (urb->pipe & PIPE_DEVEP_MASK) | usb_packetid(urb->pipe);
  868. ret = isochronous_find_start(uhci, urb);
  869. if (ret)
  870. return ret;
  871. frame = urb->start_frame;
  872. for (i = 0; i < urb->number_of_packets; i++, frame += urb->interval) {
  873. if (!urb->iso_frame_desc[i].length)
  874. continue;
  875. td = uhci_alloc_td(uhci);
  876. if (!td)
  877. return -ENOMEM;
  878. uhci_add_td_to_urb(urb, td);
  879. uhci_fill_td(td, status, destination | uhci_explen(urb->iso_frame_desc[i].length - 1),
  880. urb->transfer_dma + urb->iso_frame_desc[i].offset);
  881. if (i + 1 >= urb->number_of_packets)
  882. td->status |= cpu_to_le32(TD_CTRL_IOC);
  883. uhci_insert_td_frame_list(uhci, td, frame);
  884. }
  885. return -EINPROGRESS;
  886. }
  887. static int uhci_result_isochronous(struct uhci_hcd *uhci, struct urb *urb)
  888. {
  889. struct uhci_td *td;
  890. struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
  891. int status;
  892. int i, ret = 0;
  893. urb->actual_length = 0;
  894. i = 0;
  895. list_for_each_entry(td, &urbp->td_list, list) {
  896. int actlength;
  897. unsigned int ctrlstat = td_status(td);
  898. if (ctrlstat & TD_CTRL_ACTIVE)
  899. return -EINPROGRESS;
  900. actlength = uhci_actual_length(ctrlstat);
  901. urb->iso_frame_desc[i].actual_length = actlength;
  902. urb->actual_length += actlength;
  903. status = uhci_map_status(uhci_status_bits(ctrlstat),
  904. usb_pipeout(urb->pipe));
  905. urb->iso_frame_desc[i].status = status;
  906. if (status) {
  907. urb->error_count++;
  908. ret = status;
  909. }
  910. i++;
  911. }
  912. return ret;
  913. }
  914. static struct urb *uhci_find_urb_ep(struct uhci_hcd *uhci, struct urb *urb)
  915. {
  916. struct urb_priv *up;
  917. /* We don't match Isoc transfers since they are special */
  918. if (usb_pipeisoc(urb->pipe))
  919. return NULL;
  920. list_for_each_entry(up, &uhci->urb_list, urb_list) {
  921. struct urb *u = up->urb;
  922. if (u->dev == urb->dev && u->status == -EINPROGRESS) {
  923. /* For control, ignore the direction */
  924. if (usb_pipecontrol(urb->pipe) &&
  925. (u->pipe & ~USB_DIR_IN) == (urb->pipe & ~USB_DIR_IN))
  926. return u;
  927. else if (u->pipe == urb->pipe)
  928. return u;
  929. }
  930. }
  931. return NULL;
  932. }
  933. static int uhci_urb_enqueue(struct usb_hcd *hcd,
  934. struct usb_host_endpoint *ep,
  935. struct urb *urb, gfp_t mem_flags)
  936. {
  937. int ret;
  938. struct uhci_hcd *uhci = hcd_to_uhci(hcd);
  939. unsigned long flags;
  940. struct urb *eurb;
  941. int bustime;
  942. spin_lock_irqsave(&uhci->lock, flags);
  943. ret = urb->status;
  944. if (ret != -EINPROGRESS) /* URB already unlinked! */
  945. goto out;
  946. eurb = uhci_find_urb_ep(uhci, urb);
  947. if (!uhci_alloc_urb_priv(uhci, urb)) {
  948. ret = -ENOMEM;
  949. goto out;
  950. }
  951. switch (usb_pipetype(urb->pipe)) {
  952. case PIPE_CONTROL:
  953. ret = uhci_submit_control(uhci, urb, eurb);
  954. break;
  955. case PIPE_INTERRUPT:
  956. if (!eurb) {
  957. bustime = usb_check_bandwidth(urb->dev, urb);
  958. if (bustime < 0)
  959. ret = bustime;
  960. else {
  961. ret = uhci_submit_interrupt(uhci, urb, eurb);
  962. if (ret == -EINPROGRESS)
  963. usb_claim_bandwidth(urb->dev, urb, bustime, 0);
  964. }
  965. } else { /* inherit from parent */
  966. urb->bandwidth = eurb->bandwidth;
  967. ret = uhci_submit_interrupt(uhci, urb, eurb);
  968. }
  969. break;
  970. case PIPE_BULK:
  971. ret = uhci_submit_bulk(uhci, urb, eurb);
  972. break;
  973. case PIPE_ISOCHRONOUS:
  974. bustime = usb_check_bandwidth(urb->dev, urb);
  975. if (bustime < 0) {
  976. ret = bustime;
  977. break;
  978. }
  979. ret = uhci_submit_isochronous(uhci, urb);
  980. if (ret == -EINPROGRESS)
  981. usb_claim_bandwidth(urb->dev, urb, bustime, 1);
  982. break;
  983. }
  984. if (ret != -EINPROGRESS) {
  985. /* Submit failed, so delete it from the urb_list */
  986. struct urb_priv *urbp = urb->hcpriv;
  987. list_del_init(&urbp->urb_list);
  988. uhci_destroy_urb_priv(uhci, urb);
  989. } else
  990. ret = 0;
  991. out:
  992. spin_unlock_irqrestore(&uhci->lock, flags);
  993. return ret;
  994. }
  995. /*
  996. * Return the result of a transfer
  997. */
  998. static void uhci_transfer_result(struct uhci_hcd *uhci, struct urb *urb)
  999. {
  1000. int ret = -EINPROGRESS;
  1001. struct urb_priv *urbp;
  1002. spin_lock(&urb->lock);
  1003. urbp = (struct urb_priv *)urb->hcpriv;
  1004. if (urb->status != -EINPROGRESS) /* URB already dequeued */
  1005. goto out;
  1006. switch (usb_pipetype(urb->pipe)) {
  1007. case PIPE_CONTROL:
  1008. ret = uhci_result_control(uhci, urb);
  1009. break;
  1010. case PIPE_BULK:
  1011. case PIPE_INTERRUPT:
  1012. ret = uhci_result_common(uhci, urb);
  1013. break;
  1014. case PIPE_ISOCHRONOUS:
  1015. ret = uhci_result_isochronous(uhci, urb);
  1016. break;
  1017. }
  1018. if (ret == -EINPROGRESS)
  1019. goto out;
  1020. urb->status = ret;
  1021. switch (usb_pipetype(urb->pipe)) {
  1022. case PIPE_CONTROL:
  1023. case PIPE_BULK:
  1024. case PIPE_ISOCHRONOUS:
  1025. /* Release bandwidth for Interrupt or Isoc. transfers */
  1026. if (urb->bandwidth)
  1027. usb_release_bandwidth(urb->dev, urb, 1);
  1028. uhci_unlink_generic(uhci, urb);
  1029. break;
  1030. case PIPE_INTERRUPT:
  1031. /* Release bandwidth for Interrupt or Isoc. transfers */
  1032. /* Make sure we don't release if we have a queued URB */
  1033. if (list_empty(&urbp->queue_list) && urb->bandwidth)
  1034. usb_release_bandwidth(urb->dev, urb, 0);
  1035. else
  1036. /* bandwidth was passed on to queued URB, */
  1037. /* so don't let usb_unlink_urb() release it */
  1038. urb->bandwidth = 0;
  1039. uhci_unlink_generic(uhci, urb);
  1040. break;
  1041. default:
  1042. dev_info(uhci_dev(uhci), "%s: unknown pipe type %d "
  1043. "for urb %p\n",
  1044. __FUNCTION__, usb_pipetype(urb->pipe), urb);
  1045. }
  1046. /* Move it from uhci->urb_list to uhci->complete_list */
  1047. uhci_moveto_complete(uhci, urbp);
  1048. out:
  1049. spin_unlock(&urb->lock);
  1050. }
  1051. static void uhci_unlink_generic(struct uhci_hcd *uhci, struct urb *urb)
  1052. {
  1053. struct list_head *head;
  1054. struct uhci_td *td;
  1055. struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
  1056. int prevactive = 0;
  1057. uhci_dec_fsbr(uhci, urb); /* Safe since it checks */
  1058. /*
  1059. * Now we need to find out what the last successful toggle was
  1060. * so we can update the local data toggle for the next transfer
  1061. *
  1062. * There are 2 ways the last successful completed TD is found:
  1063. *
  1064. * 1) The TD is NOT active and the actual length < expected length
  1065. * 2) The TD is NOT active and it's the last TD in the chain
  1066. *
  1067. * and a third way the first uncompleted TD is found:
  1068. *
  1069. * 3) The TD is active and the previous TD is NOT active
  1070. *
  1071. * Control and Isochronous ignore the toggle, so this is safe
  1072. * for all types
  1073. *
  1074. * FIXME: The toggle fixups won't be 100% reliable until we
  1075. * change over to using a single queue for each endpoint and
  1076. * stop the queue before unlinking.
  1077. */
  1078. head = &urbp->td_list;
  1079. list_for_each_entry(td, head, list) {
  1080. unsigned int ctrlstat = td_status(td);
  1081. if (!(ctrlstat & TD_CTRL_ACTIVE) &&
  1082. (uhci_actual_length(ctrlstat) <
  1083. uhci_expected_length(td_token(td)) ||
  1084. td->list.next == head))
  1085. usb_settoggle(urb->dev, uhci_endpoint(td_token(td)),
  1086. uhci_packetout(td_token(td)),
  1087. uhci_toggle(td_token(td)) ^ 1);
  1088. else if ((ctrlstat & TD_CTRL_ACTIVE) && !prevactive)
  1089. usb_settoggle(urb->dev, uhci_endpoint(td_token(td)),
  1090. uhci_packetout(td_token(td)),
  1091. uhci_toggle(td_token(td)));
  1092. prevactive = ctrlstat & TD_CTRL_ACTIVE;
  1093. }
  1094. uhci_delete_queued_urb(uhci, urb);
  1095. /* The interrupt loop will reclaim the QH's */
  1096. uhci_remove_qh(uhci, urbp->qh);
  1097. urbp->qh = NULL;
  1098. }
  1099. static int uhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb)
  1100. {
  1101. struct uhci_hcd *uhci = hcd_to_uhci(hcd);
  1102. unsigned long flags;
  1103. struct urb_priv *urbp;
  1104. spin_lock_irqsave(&uhci->lock, flags);
  1105. urbp = urb->hcpriv;
  1106. if (!urbp) /* URB was never linked! */
  1107. goto done;
  1108. list_del_init(&urbp->urb_list);
  1109. uhci_unlink_generic(uhci, urb);
  1110. uhci_get_current_frame_number(uhci);
  1111. if (uhci->frame_number + uhci->is_stopped != uhci->urb_remove_age) {
  1112. uhci_remove_pending_urbps(uhci);
  1113. uhci->urb_remove_age = uhci->frame_number;
  1114. }
  1115. /* If we're the first, set the next interrupt bit */
  1116. if (list_empty(&uhci->urb_remove_list))
  1117. uhci_set_next_interrupt(uhci);
  1118. list_add_tail(&urbp->urb_list, &uhci->urb_remove_list);
  1119. done:
  1120. spin_unlock_irqrestore(&uhci->lock, flags);
  1121. return 0;
  1122. }
  1123. static int uhci_fsbr_timeout(struct uhci_hcd *uhci, struct urb *urb)
  1124. {
  1125. struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
  1126. struct list_head *head;
  1127. struct uhci_td *td;
  1128. int count = 0;
  1129. uhci_dec_fsbr(uhci, urb);
  1130. urbp->fsbr_timeout = 1;
  1131. /*
  1132. * Ideally we would want to fix qh->element as well, but it's
  1133. * read/write by the HC, so that can introduce a race. It's not
  1134. * really worth the hassle
  1135. */
  1136. head = &urbp->td_list;
  1137. list_for_each_entry(td, head, list) {
  1138. /*
  1139. * Make sure we don't do the last one (since it'll have the
  1140. * TERM bit set) as well as we skip every so many TD's to
  1141. * make sure it doesn't hog the bandwidth
  1142. */
  1143. if (td->list.next != head && (count % DEPTH_INTERVAL) ==
  1144. (DEPTH_INTERVAL - 1))
  1145. td->link |= UHCI_PTR_DEPTH;
  1146. count++;
  1147. }
  1148. return 0;
  1149. }
  1150. static void uhci_free_pending_qhs(struct uhci_hcd *uhci)
  1151. {
  1152. struct uhci_qh *qh, *tmp;
  1153. list_for_each_entry_safe(qh, tmp, &uhci->qh_remove_list, remove_list) {
  1154. list_del_init(&qh->remove_list);
  1155. uhci_free_qh(uhci, qh);
  1156. }
  1157. }
  1158. static void uhci_free_pending_tds(struct uhci_hcd *uhci)
  1159. {
  1160. struct uhci_td *td, *tmp;
  1161. list_for_each_entry_safe(td, tmp, &uhci->td_remove_list, remove_list) {
  1162. list_del_init(&td->remove_list);
  1163. uhci_free_td(uhci, td);
  1164. }
  1165. }
  1166. static void
  1167. uhci_finish_urb(struct usb_hcd *hcd, struct urb *urb, struct pt_regs *regs)
  1168. __releases(uhci->lock)
  1169. __acquires(uhci->lock)
  1170. {
  1171. struct uhci_hcd *uhci = hcd_to_uhci(hcd);
  1172. uhci_destroy_urb_priv(uhci, urb);
  1173. spin_unlock(&uhci->lock);
  1174. usb_hcd_giveback_urb(hcd, urb, regs);
  1175. spin_lock(&uhci->lock);
  1176. }
  1177. static void uhci_finish_completion(struct uhci_hcd *uhci, struct pt_regs *regs)
  1178. {
  1179. struct urb_priv *urbp, *tmp;
  1180. list_for_each_entry_safe(urbp, tmp, &uhci->complete_list, urb_list) {
  1181. struct urb *urb = urbp->urb;
  1182. list_del_init(&urbp->urb_list);
  1183. uhci_finish_urb(uhci_to_hcd(uhci), urb, regs);
  1184. }
  1185. }
  1186. static void uhci_remove_pending_urbps(struct uhci_hcd *uhci)
  1187. {
  1188. /* Splice the urb_remove_list onto the end of the complete_list */
  1189. list_splice_init(&uhci->urb_remove_list, uhci->complete_list.prev);
  1190. }
  1191. /* Process events in the schedule, but only in one thread at a time */
  1192. static void uhci_scan_schedule(struct uhci_hcd *uhci, struct pt_regs *regs)
  1193. {
  1194. struct urb_priv *urbp, *tmp;
  1195. /* Don't allow re-entrant calls */
  1196. if (uhci->scan_in_progress) {
  1197. uhci->need_rescan = 1;
  1198. return;
  1199. }
  1200. uhci->scan_in_progress = 1;
  1201. rescan:
  1202. uhci->need_rescan = 0;
  1203. uhci_clear_next_interrupt(uhci);
  1204. uhci_get_current_frame_number(uhci);
  1205. if (uhci->frame_number + uhci->is_stopped != uhci->qh_remove_age)
  1206. uhci_free_pending_qhs(uhci);
  1207. if (uhci->frame_number + uhci->is_stopped != uhci->td_remove_age)
  1208. uhci_free_pending_tds(uhci);
  1209. if (uhci->frame_number + uhci->is_stopped != uhci->urb_remove_age)
  1210. uhci_remove_pending_urbps(uhci);
  1211. /* Walk the list of pending URBs to see which ones completed
  1212. * (must be _safe because uhci_transfer_result() dequeues URBs) */
  1213. list_for_each_entry_safe(urbp, tmp, &uhci->urb_list, urb_list) {
  1214. struct urb *urb = urbp->urb;
  1215. /* Checks the status and does all of the magic necessary */
  1216. uhci_transfer_result(uhci, urb);
  1217. }
  1218. uhci_finish_completion(uhci, regs);
  1219. /* If the controller is stopped, we can finish these off right now */
  1220. if (uhci->is_stopped) {
  1221. uhci_free_pending_qhs(uhci);
  1222. uhci_free_pending_tds(uhci);
  1223. uhci_remove_pending_urbps(uhci);
  1224. }
  1225. if (uhci->need_rescan)
  1226. goto rescan;
  1227. uhci->scan_in_progress = 0;
  1228. if (list_empty(&uhci->urb_remove_list) &&
  1229. list_empty(&uhci->td_remove_list) &&
  1230. list_empty(&uhci->qh_remove_list))
  1231. uhci_clear_next_interrupt(uhci);
  1232. else
  1233. uhci_set_next_interrupt(uhci);
  1234. /* Wake up anyone waiting for an URB to complete */
  1235. wake_up_all(&uhci->waitqh);
  1236. }
  1237. static void check_fsbr(struct uhci_hcd *uhci)
  1238. {
  1239. struct urb_priv *up;
  1240. list_for_each_entry(up, &uhci->urb_list, urb_list) {
  1241. struct urb *u = up->urb;
  1242. spin_lock(&u->lock);
  1243. /* Check if the FSBR timed out */
  1244. if (up->fsbr && !up->fsbr_timeout && time_after_eq(jiffies, up->fsbrtime + IDLE_TIMEOUT))
  1245. uhci_fsbr_timeout(uhci, u);
  1246. spin_unlock(&u->lock);
  1247. }
  1248. /* Really disable FSBR */
  1249. if (!uhci->fsbr && uhci->fsbrtimeout && time_after_eq(jiffies, uhci->fsbrtimeout)) {
  1250. uhci->fsbrtimeout = 0;
  1251. uhci->skel_term_qh->link = UHCI_PTR_TERM;
  1252. }
  1253. }