uhci-q.c 39 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539
  1. /*
  2. * Universal Host Controller Interface driver for USB.
  3. *
  4. * Maintainer: Alan Stern <stern@rowland.harvard.edu>
  5. *
  6. * (C) Copyright 1999 Linus Torvalds
  7. * (C) Copyright 1999-2002 Johannes Erdfelt, johannes@erdfelt.com
  8. * (C) Copyright 1999 Randy Dunlap
  9. * (C) Copyright 1999 Georg Acher, acher@in.tum.de
  10. * (C) Copyright 1999 Deti Fliegl, deti@fliegl.de
  11. * (C) Copyright 1999 Thomas Sailer, sailer@ife.ee.ethz.ch
  12. * (C) Copyright 1999 Roman Weissgaerber, weissg@vienna.at
  13. * (C) Copyright 2000 Yggdrasil Computing, Inc. (port of new PCI interface
  14. * support from usb-ohci.c by Adam Richter, adam@yggdrasil.com).
  15. * (C) Copyright 1999 Gregory P. Smith (from usb-ohci.c)
  16. * (C) Copyright 2004 Alan Stern, stern@rowland.harvard.edu
  17. */
  18. static int uhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb);
  19. static void uhci_unlink_generic(struct uhci_hcd *uhci, struct urb *urb);
  20. static void uhci_remove_pending_urbps(struct uhci_hcd *uhci);
  21. static void uhci_free_pending_qhs(struct uhci_hcd *uhci);
  22. static void uhci_free_pending_tds(struct uhci_hcd *uhci);
  23. /*
  24. * Technically, updating td->status here is a race, but it's not really a
  25. * problem. The worst that can happen is that we set the IOC bit again
  26. * generating a spurious interrupt. We could fix this by creating another
  27. * QH and leaving the IOC bit always set, but then we would have to play
  28. * games with the FSBR code to make sure we get the correct order in all
  29. * the cases. I don't think it's worth the effort
  30. */
  31. static inline void uhci_set_next_interrupt(struct uhci_hcd *uhci)
  32. {
  33. uhci->term_td->status |= cpu_to_le32(TD_CTRL_IOC);
  34. }
  35. static inline void uhci_clear_next_interrupt(struct uhci_hcd *uhci)
  36. {
  37. uhci->term_td->status &= ~cpu_to_le32(TD_CTRL_IOC);
  38. }
  39. static inline void uhci_moveto_complete(struct uhci_hcd *uhci,
  40. struct urb_priv *urbp)
  41. {
  42. list_move_tail(&urbp->urb_list, &uhci->complete_list);
  43. }
  44. static struct uhci_td *uhci_alloc_td(struct uhci_hcd *uhci, struct usb_device *dev)
  45. {
  46. dma_addr_t dma_handle;
  47. struct uhci_td *td;
  48. td = dma_pool_alloc(uhci->td_pool, GFP_ATOMIC, &dma_handle);
  49. if (!td)
  50. return NULL;
  51. td->dma_handle = dma_handle;
  52. td->link = UHCI_PTR_TERM;
  53. td->buffer = 0;
  54. td->frame = -1;
  55. td->dev = dev;
  56. INIT_LIST_HEAD(&td->list);
  57. INIT_LIST_HEAD(&td->remove_list);
  58. INIT_LIST_HEAD(&td->fl_list);
  59. usb_get_dev(dev);
  60. return td;
  61. }
  62. static inline void uhci_fill_td(struct uhci_td *td, u32 status,
  63. u32 token, u32 buffer)
  64. {
  65. td->status = cpu_to_le32(status);
  66. td->token = cpu_to_le32(token);
  67. td->buffer = cpu_to_le32(buffer);
  68. }
  69. /*
  70. * We insert Isochronous URB's directly into the frame list at the beginning
  71. */
  72. static void uhci_insert_td_frame_list(struct uhci_hcd *uhci, struct uhci_td *td, unsigned framenum)
  73. {
  74. framenum &= (UHCI_NUMFRAMES - 1);
  75. td->frame = framenum;
  76. /* Is there a TD already mapped there? */
  77. if (uhci->fl->frame_cpu[framenum]) {
  78. struct uhci_td *ftd, *ltd;
  79. ftd = uhci->fl->frame_cpu[framenum];
  80. ltd = list_entry(ftd->fl_list.prev, struct uhci_td, fl_list);
  81. list_add_tail(&td->fl_list, &ftd->fl_list);
  82. td->link = ltd->link;
  83. wmb();
  84. ltd->link = cpu_to_le32(td->dma_handle);
  85. } else {
  86. td->link = uhci->fl->frame[framenum];
  87. wmb();
  88. uhci->fl->frame[framenum] = cpu_to_le32(td->dma_handle);
  89. uhci->fl->frame_cpu[framenum] = td;
  90. }
  91. }
  92. static void uhci_remove_td(struct uhci_hcd *uhci, struct uhci_td *td)
  93. {
  94. /* If it's not inserted, don't remove it */
  95. if (td->frame == -1 && list_empty(&td->fl_list))
  96. return;
  97. if (td->frame != -1 && uhci->fl->frame_cpu[td->frame] == td) {
  98. if (list_empty(&td->fl_list)) {
  99. uhci->fl->frame[td->frame] = td->link;
  100. uhci->fl->frame_cpu[td->frame] = NULL;
  101. } else {
  102. struct uhci_td *ntd;
  103. ntd = list_entry(td->fl_list.next, struct uhci_td, fl_list);
  104. uhci->fl->frame[td->frame] = cpu_to_le32(ntd->dma_handle);
  105. uhci->fl->frame_cpu[td->frame] = ntd;
  106. }
  107. } else {
  108. struct uhci_td *ptd;
  109. ptd = list_entry(td->fl_list.prev, struct uhci_td, fl_list);
  110. ptd->link = td->link;
  111. }
  112. wmb();
  113. td->link = UHCI_PTR_TERM;
  114. list_del_init(&td->fl_list);
  115. td->frame = -1;
  116. }
  117. /*
  118. * Inserts a td list into qh.
  119. */
  120. static void uhci_insert_tds_in_qh(struct uhci_qh *qh, struct urb *urb, __le32 breadth)
  121. {
  122. struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
  123. struct uhci_td *td;
  124. __le32 *plink;
  125. /* Ordering isn't important here yet since the QH hasn't been */
  126. /* inserted into the schedule yet */
  127. plink = &qh->element;
  128. list_for_each_entry(td, &urbp->td_list, list) {
  129. *plink = cpu_to_le32(td->dma_handle) | breadth;
  130. plink = &td->link;
  131. }
  132. *plink = UHCI_PTR_TERM;
  133. }
  134. static void uhci_free_td(struct uhci_hcd *uhci, struct uhci_td *td)
  135. {
  136. if (!list_empty(&td->list))
  137. dev_warn(uhci_dev(uhci), "td %p still in list!\n", td);
  138. if (!list_empty(&td->remove_list))
  139. dev_warn(uhci_dev(uhci), "td %p still in remove_list!\n", td);
  140. if (!list_empty(&td->fl_list))
  141. dev_warn(uhci_dev(uhci), "td %p still in fl_list!\n", td);
  142. if (td->dev)
  143. usb_put_dev(td->dev);
  144. dma_pool_free(uhci->td_pool, td, td->dma_handle);
  145. }
  146. static struct uhci_qh *uhci_alloc_qh(struct uhci_hcd *uhci, struct usb_device *dev)
  147. {
  148. dma_addr_t dma_handle;
  149. struct uhci_qh *qh;
  150. qh = dma_pool_alloc(uhci->qh_pool, GFP_ATOMIC, &dma_handle);
  151. if (!qh)
  152. return NULL;
  153. qh->dma_handle = dma_handle;
  154. qh->element = UHCI_PTR_TERM;
  155. qh->link = UHCI_PTR_TERM;
  156. qh->dev = dev;
  157. qh->urbp = NULL;
  158. INIT_LIST_HEAD(&qh->list);
  159. INIT_LIST_HEAD(&qh->remove_list);
  160. usb_get_dev(dev);
  161. return qh;
  162. }
  163. static void uhci_free_qh(struct uhci_hcd *uhci, struct uhci_qh *qh)
  164. {
  165. if (!list_empty(&qh->list))
  166. dev_warn(uhci_dev(uhci), "qh %p list not empty!\n", qh);
  167. if (!list_empty(&qh->remove_list))
  168. dev_warn(uhci_dev(uhci), "qh %p still in remove_list!\n", qh);
  169. if (qh->dev)
  170. usb_put_dev(qh->dev);
  171. dma_pool_free(uhci->qh_pool, qh, qh->dma_handle);
  172. }
  173. /*
  174. * Append this urb's qh after the last qh in skelqh->list
  175. *
  176. * Note that urb_priv.queue_list doesn't have a separate queue head;
  177. * it's a ring with every element "live".
  178. */
  179. static void uhci_insert_qh(struct uhci_hcd *uhci, struct uhci_qh *skelqh, struct urb *urb)
  180. {
  181. struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
  182. struct urb_priv *turbp;
  183. struct uhci_qh *lqh;
  184. /* Grab the last QH */
  185. lqh = list_entry(skelqh->list.prev, struct uhci_qh, list);
  186. /* Point to the next skelqh */
  187. urbp->qh->link = lqh->link;
  188. wmb(); /* Ordering is important */
  189. /*
  190. * Patch QHs for previous endpoint's queued URBs? HC goes
  191. * here next, not to the next skelqh it now points to.
  192. *
  193. * lqh --> td ... --> qh ... --> td --> qh ... --> td
  194. * | | |
  195. * v v v
  196. * +<----------------+-----------------+
  197. * v
  198. * newqh --> td ... --> td
  199. * |
  200. * v
  201. * ...
  202. *
  203. * The HC could see (and use!) any of these as we write them.
  204. */
  205. lqh->link = cpu_to_le32(urbp->qh->dma_handle) | UHCI_PTR_QH;
  206. if (lqh->urbp) {
  207. list_for_each_entry(turbp, &lqh->urbp->queue_list, queue_list)
  208. turbp->qh->link = lqh->link;
  209. }
  210. list_add_tail(&urbp->qh->list, &skelqh->list);
  211. }
  212. /*
  213. * Start removal of QH from schedule; it finishes next frame.
  214. * TDs should be unlinked before this is called.
  215. */
  216. static void uhci_remove_qh(struct uhci_hcd *uhci, struct uhci_qh *qh)
  217. {
  218. struct uhci_qh *pqh;
  219. __le32 newlink;
  220. if (!qh)
  221. return;
  222. /*
  223. * Only go through the hoops if it's actually linked in
  224. */
  225. if (!list_empty(&qh->list)) {
  226. /* If our queue is nonempty, make the next URB the head */
  227. if (!list_empty(&qh->urbp->queue_list)) {
  228. struct urb_priv *nurbp;
  229. nurbp = list_entry(qh->urbp->queue_list.next,
  230. struct urb_priv, queue_list);
  231. nurbp->queued = 0;
  232. list_add(&nurbp->qh->list, &qh->list);
  233. newlink = cpu_to_le32(nurbp->qh->dma_handle) | UHCI_PTR_QH;
  234. } else
  235. newlink = qh->link;
  236. /* Fix up the previous QH's queue to link to either
  237. * the new head of this queue or the start of the
  238. * next endpoint's queue. */
  239. pqh = list_entry(qh->list.prev, struct uhci_qh, list);
  240. pqh->link = newlink;
  241. if (pqh->urbp) {
  242. struct urb_priv *turbp;
  243. list_for_each_entry(turbp, &pqh->urbp->queue_list,
  244. queue_list)
  245. turbp->qh->link = newlink;
  246. }
  247. wmb();
  248. /* Leave qh->link in case the HC is on the QH now, it will */
  249. /* continue the rest of the schedule */
  250. qh->element = UHCI_PTR_TERM;
  251. list_del_init(&qh->list);
  252. }
  253. list_del_init(&qh->urbp->queue_list);
  254. qh->urbp = NULL;
  255. uhci_get_current_frame_number(uhci);
  256. if (uhci->frame_number + uhci->is_stopped != uhci->qh_remove_age) {
  257. uhci_free_pending_qhs(uhci);
  258. uhci->qh_remove_age = uhci->frame_number;
  259. }
  260. /* Check to see if the remove list is empty. Set the IOC bit */
  261. /* to force an interrupt so we can remove the QH */
  262. if (list_empty(&uhci->qh_remove_list))
  263. uhci_set_next_interrupt(uhci);
  264. list_add(&qh->remove_list, &uhci->qh_remove_list);
  265. }
  266. static int uhci_fixup_toggle(struct urb *urb, unsigned int toggle)
  267. {
  268. struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
  269. struct uhci_td *td;
  270. list_for_each_entry(td, &urbp->td_list, list) {
  271. if (toggle)
  272. td->token |= cpu_to_le32(TD_TOKEN_TOGGLE);
  273. else
  274. td->token &= ~cpu_to_le32(TD_TOKEN_TOGGLE);
  275. toggle ^= 1;
  276. }
  277. return toggle;
  278. }
  279. /* This function will append one URB's QH to another URB's QH. This is for */
  280. /* queuing interrupt, control or bulk transfers */
  281. static void uhci_append_queued_urb(struct uhci_hcd *uhci, struct urb *eurb, struct urb *urb)
  282. {
  283. struct urb_priv *eurbp, *urbp, *furbp, *lurbp;
  284. struct uhci_td *lltd;
  285. eurbp = eurb->hcpriv;
  286. urbp = urb->hcpriv;
  287. /* Find the first URB in the queue */
  288. furbp = eurbp;
  289. if (eurbp->queued) {
  290. list_for_each_entry(furbp, &eurbp->queue_list, queue_list)
  291. if (!furbp->queued)
  292. break;
  293. }
  294. lurbp = list_entry(furbp->queue_list.prev, struct urb_priv, queue_list);
  295. lltd = list_entry(lurbp->td_list.prev, struct uhci_td, list);
  296. /* Control transfers always start with toggle 0 */
  297. if (!usb_pipecontrol(urb->pipe))
  298. usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe),
  299. usb_pipeout(urb->pipe),
  300. uhci_fixup_toggle(urb,
  301. uhci_toggle(td_token(lltd)) ^ 1));
  302. /* All qh's in the queue need to link to the next queue */
  303. urbp->qh->link = eurbp->qh->link;
  304. wmb(); /* Make sure we flush everything */
  305. lltd->link = cpu_to_le32(urbp->qh->dma_handle) | UHCI_PTR_QH;
  306. list_add_tail(&urbp->queue_list, &furbp->queue_list);
  307. urbp->queued = 1;
  308. }
  309. static void uhci_delete_queued_urb(struct uhci_hcd *uhci, struct urb *urb)
  310. {
  311. struct urb_priv *urbp, *nurbp, *purbp, *turbp;
  312. struct uhci_td *pltd;
  313. unsigned int toggle;
  314. urbp = urb->hcpriv;
  315. if (list_empty(&urbp->queue_list))
  316. return;
  317. nurbp = list_entry(urbp->queue_list.next, struct urb_priv, queue_list);
  318. /*
  319. * Fix up the toggle for the following URBs in the queue.
  320. * Only needed for bulk and interrupt: control and isochronous
  321. * endpoints don't propagate toggles between messages.
  322. */
  323. if (usb_pipebulk(urb->pipe) || usb_pipeint(urb->pipe)) {
  324. if (!urbp->queued)
  325. /* We just set the toggle in uhci_unlink_generic */
  326. toggle = usb_gettoggle(urb->dev,
  327. usb_pipeendpoint(urb->pipe),
  328. usb_pipeout(urb->pipe));
  329. else {
  330. /* If we're in the middle of the queue, grab the */
  331. /* toggle from the TD previous to us */
  332. purbp = list_entry(urbp->queue_list.prev,
  333. struct urb_priv, queue_list);
  334. pltd = list_entry(purbp->td_list.prev,
  335. struct uhci_td, list);
  336. toggle = uhci_toggle(td_token(pltd)) ^ 1;
  337. }
  338. list_for_each_entry(turbp, &urbp->queue_list, queue_list) {
  339. if (!turbp->queued)
  340. break;
  341. toggle = uhci_fixup_toggle(turbp->urb, toggle);
  342. }
  343. usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe),
  344. usb_pipeout(urb->pipe), toggle);
  345. }
  346. if (urbp->queued) {
  347. /* We're somewhere in the middle (or end). The case where
  348. * we're at the head is handled in uhci_remove_qh(). */
  349. purbp = list_entry(urbp->queue_list.prev, struct urb_priv,
  350. queue_list);
  351. pltd = list_entry(purbp->td_list.prev, struct uhci_td, list);
  352. if (nurbp->queued)
  353. pltd->link = cpu_to_le32(nurbp->qh->dma_handle) | UHCI_PTR_QH;
  354. else
  355. /* The next URB happens to be the beginning, so */
  356. /* we're the last, end the chain */
  357. pltd->link = UHCI_PTR_TERM;
  358. }
  359. /* urbp->queue_list is handled in uhci_remove_qh() */
  360. }
  361. static struct urb_priv *uhci_alloc_urb_priv(struct uhci_hcd *uhci, struct urb *urb)
  362. {
  363. struct urb_priv *urbp;
  364. urbp = kmem_cache_alloc(uhci_up_cachep, SLAB_ATOMIC);
  365. if (!urbp)
  366. return NULL;
  367. memset((void *)urbp, 0, sizeof(*urbp));
  368. urbp->inserttime = jiffies;
  369. urbp->fsbrtime = jiffies;
  370. urbp->urb = urb;
  371. INIT_LIST_HEAD(&urbp->td_list);
  372. INIT_LIST_HEAD(&urbp->queue_list);
  373. INIT_LIST_HEAD(&urbp->urb_list);
  374. list_add_tail(&urbp->urb_list, &uhci->urb_list);
  375. urb->hcpriv = urbp;
  376. return urbp;
  377. }
  378. static void uhci_add_td_to_urb(struct urb *urb, struct uhci_td *td)
  379. {
  380. struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
  381. td->urb = urb;
  382. list_add_tail(&td->list, &urbp->td_list);
  383. }
  384. static void uhci_remove_td_from_urb(struct uhci_td *td)
  385. {
  386. if (list_empty(&td->list))
  387. return;
  388. list_del_init(&td->list);
  389. td->urb = NULL;
  390. }
  391. static void uhci_destroy_urb_priv(struct uhci_hcd *uhci, struct urb *urb)
  392. {
  393. struct uhci_td *td, *tmp;
  394. struct urb_priv *urbp;
  395. urbp = (struct urb_priv *)urb->hcpriv;
  396. if (!urbp)
  397. return;
  398. if (!list_empty(&urbp->urb_list))
  399. dev_warn(uhci_dev(uhci), "urb %p still on uhci->urb_list "
  400. "or uhci->remove_list!\n", urb);
  401. uhci_get_current_frame_number(uhci);
  402. if (uhci->frame_number + uhci->is_stopped != uhci->td_remove_age) {
  403. uhci_free_pending_tds(uhci);
  404. uhci->td_remove_age = uhci->frame_number;
  405. }
  406. /* Check to see if the remove list is empty. Set the IOC bit */
  407. /* to force an interrupt so we can remove the TD's*/
  408. if (list_empty(&uhci->td_remove_list))
  409. uhci_set_next_interrupt(uhci);
  410. list_for_each_entry_safe(td, tmp, &urbp->td_list, list) {
  411. uhci_remove_td_from_urb(td);
  412. uhci_remove_td(uhci, td);
  413. list_add(&td->remove_list, &uhci->td_remove_list);
  414. }
  415. urb->hcpriv = NULL;
  416. kmem_cache_free(uhci_up_cachep, urbp);
  417. }
  418. static void uhci_inc_fsbr(struct uhci_hcd *uhci, struct urb *urb)
  419. {
  420. struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
  421. if ((!(urb->transfer_flags & URB_NO_FSBR)) && !urbp->fsbr) {
  422. urbp->fsbr = 1;
  423. if (!uhci->fsbr++ && !uhci->fsbrtimeout)
  424. uhci->skel_term_qh->link = cpu_to_le32(uhci->skel_fs_control_qh->dma_handle) | UHCI_PTR_QH;
  425. }
  426. }
  427. static void uhci_dec_fsbr(struct uhci_hcd *uhci, struct urb *urb)
  428. {
  429. struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
  430. if ((!(urb->transfer_flags & URB_NO_FSBR)) && urbp->fsbr) {
  431. urbp->fsbr = 0;
  432. if (!--uhci->fsbr)
  433. uhci->fsbrtimeout = jiffies + FSBR_DELAY;
  434. }
  435. }
  436. /*
  437. * Map status to standard result codes
  438. *
  439. * <status> is (td_status(td) & 0xF60000), a.k.a.
  440. * uhci_status_bits(td_status(td)).
  441. * Note: <status> does not include the TD_CTRL_NAK bit.
  442. * <dir_out> is True for output TDs and False for input TDs.
  443. */
  444. static int uhci_map_status(int status, int dir_out)
  445. {
  446. if (!status)
  447. return 0;
  448. if (status & TD_CTRL_BITSTUFF) /* Bitstuff error */
  449. return -EPROTO;
  450. if (status & TD_CTRL_CRCTIMEO) { /* CRC/Timeout */
  451. if (dir_out)
  452. return -EPROTO;
  453. else
  454. return -EILSEQ;
  455. }
  456. if (status & TD_CTRL_BABBLE) /* Babble */
  457. return -EOVERFLOW;
  458. if (status & TD_CTRL_DBUFERR) /* Buffer error */
  459. return -ENOSR;
  460. if (status & TD_CTRL_STALLED) /* Stalled */
  461. return -EPIPE;
  462. WARN_ON(status & TD_CTRL_ACTIVE); /* Active */
  463. return 0;
  464. }
  465. /*
  466. * Control transfers
  467. */
  468. static int uhci_submit_control(struct uhci_hcd *uhci, struct urb *urb, struct urb *eurb)
  469. {
  470. struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
  471. struct uhci_td *td;
  472. struct uhci_qh *qh, *skelqh;
  473. unsigned long destination, status;
  474. int maxsze = usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe));
  475. int len = urb->transfer_buffer_length;
  476. dma_addr_t data = urb->transfer_dma;
  477. /* The "pipe" thing contains the destination in bits 8--18 */
  478. destination = (urb->pipe & PIPE_DEVEP_MASK) | USB_PID_SETUP;
  479. /* 3 errors */
  480. status = TD_CTRL_ACTIVE | uhci_maxerr(3);
  481. if (urb->dev->speed == USB_SPEED_LOW)
  482. status |= TD_CTRL_LS;
  483. /*
  484. * Build the TD for the control request setup packet
  485. */
  486. td = uhci_alloc_td(uhci, urb->dev);
  487. if (!td)
  488. return -ENOMEM;
  489. uhci_add_td_to_urb(urb, td);
  490. uhci_fill_td(td, status, destination | uhci_explen(7),
  491. urb->setup_dma);
  492. /*
  493. * If direction is "send", change the packet ID from SETUP (0x2D)
  494. * to OUT (0xE1). Else change it from SETUP to IN (0x69) and
  495. * set Short Packet Detect (SPD) for all data packets.
  496. */
  497. if (usb_pipeout(urb->pipe))
  498. destination ^= (USB_PID_SETUP ^ USB_PID_OUT);
  499. else {
  500. destination ^= (USB_PID_SETUP ^ USB_PID_IN);
  501. status |= TD_CTRL_SPD;
  502. }
  503. /*
  504. * Build the DATA TD's
  505. */
  506. while (len > 0) {
  507. int pktsze = len;
  508. if (pktsze > maxsze)
  509. pktsze = maxsze;
  510. td = uhci_alloc_td(uhci, urb->dev);
  511. if (!td)
  512. return -ENOMEM;
  513. /* Alternate Data0/1 (start with Data1) */
  514. destination ^= TD_TOKEN_TOGGLE;
  515. uhci_add_td_to_urb(urb, td);
  516. uhci_fill_td(td, status, destination | uhci_explen(pktsze - 1),
  517. data);
  518. data += pktsze;
  519. len -= pktsze;
  520. }
  521. /*
  522. * Build the final TD for control status
  523. */
  524. td = uhci_alloc_td(uhci, urb->dev);
  525. if (!td)
  526. return -ENOMEM;
  527. /*
  528. * It's IN if the pipe is an output pipe or we're not expecting
  529. * data back.
  530. */
  531. destination &= ~TD_TOKEN_PID_MASK;
  532. if (usb_pipeout(urb->pipe) || !urb->transfer_buffer_length)
  533. destination |= USB_PID_IN;
  534. else
  535. destination |= USB_PID_OUT;
  536. destination |= TD_TOKEN_TOGGLE; /* End in Data1 */
  537. status &= ~TD_CTRL_SPD;
  538. uhci_add_td_to_urb(urb, td);
  539. uhci_fill_td(td, status | TD_CTRL_IOC,
  540. destination | uhci_explen(UHCI_NULL_DATA_SIZE), 0);
  541. qh = uhci_alloc_qh(uhci, urb->dev);
  542. if (!qh)
  543. return -ENOMEM;
  544. urbp->qh = qh;
  545. qh->urbp = urbp;
  546. uhci_insert_tds_in_qh(qh, urb, UHCI_PTR_BREADTH);
  547. /* Low-speed transfers get a different queue, and won't hog the bus.
  548. * Also, some devices enumerate better without FSBR; the easiest way
  549. * to do that is to put URBs on the low-speed queue while the device
  550. * is in the DEFAULT state. */
  551. if (urb->dev->speed == USB_SPEED_LOW ||
  552. urb->dev->state == USB_STATE_DEFAULT)
  553. skelqh = uhci->skel_ls_control_qh;
  554. else {
  555. skelqh = uhci->skel_fs_control_qh;
  556. uhci_inc_fsbr(uhci, urb);
  557. }
  558. if (eurb)
  559. uhci_append_queued_urb(uhci, eurb, urb);
  560. else
  561. uhci_insert_qh(uhci, skelqh, urb);
  562. return -EINPROGRESS;
  563. }
  564. /*
  565. * If control-IN transfer was short, the status packet wasn't sent.
  566. * This routine changes the element pointer in the QH to point at the
  567. * status TD. It's safe to do this even while the QH is live, because
  568. * the hardware only updates the element pointer following a successful
  569. * transfer. The inactive TD for the short packet won't cause an update,
  570. * so the pointer won't get overwritten. The next time the controller
  571. * sees this QH, it will send the status packet.
  572. */
  573. static int usb_control_retrigger_status(struct uhci_hcd *uhci, struct urb *urb)
  574. {
  575. struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
  576. struct uhci_td *td;
  577. urbp->short_control_packet = 1;
  578. td = list_entry(urbp->td_list.prev, struct uhci_td, list);
  579. urbp->qh->element = cpu_to_le32(td->dma_handle);
  580. return -EINPROGRESS;
  581. }
  582. static int uhci_result_control(struct uhci_hcd *uhci, struct urb *urb)
  583. {
  584. struct list_head *tmp, *head;
  585. struct urb_priv *urbp = urb->hcpriv;
  586. struct uhci_td *td;
  587. unsigned int status;
  588. int ret = 0;
  589. if (list_empty(&urbp->td_list))
  590. return -EINVAL;
  591. head = &urbp->td_list;
  592. if (urbp->short_control_packet) {
  593. tmp = head->prev;
  594. goto status_stage;
  595. }
  596. tmp = head->next;
  597. td = list_entry(tmp, struct uhci_td, list);
  598. /* The first TD is the SETUP stage, check the status, but skip */
  599. /* the count */
  600. status = uhci_status_bits(td_status(td));
  601. if (status & TD_CTRL_ACTIVE)
  602. return -EINPROGRESS;
  603. if (status)
  604. goto td_error;
  605. urb->actual_length = 0;
  606. /* The rest of the TD's (but the last) are data */
  607. tmp = tmp->next;
  608. while (tmp != head && tmp->next != head) {
  609. unsigned int ctrlstat;
  610. td = list_entry(tmp, struct uhci_td, list);
  611. tmp = tmp->next;
  612. ctrlstat = td_status(td);
  613. status = uhci_status_bits(ctrlstat);
  614. if (status & TD_CTRL_ACTIVE)
  615. return -EINPROGRESS;
  616. urb->actual_length += uhci_actual_length(ctrlstat);
  617. if (status)
  618. goto td_error;
  619. /* Check to see if we received a short packet */
  620. if (uhci_actual_length(ctrlstat) <
  621. uhci_expected_length(td_token(td))) {
  622. if (urb->transfer_flags & URB_SHORT_NOT_OK) {
  623. ret = -EREMOTEIO;
  624. goto err;
  625. }
  626. if (uhci_packetid(td_token(td)) == USB_PID_IN)
  627. return usb_control_retrigger_status(uhci, urb);
  628. else
  629. return 0;
  630. }
  631. }
  632. status_stage:
  633. td = list_entry(tmp, struct uhci_td, list);
  634. /* Control status stage */
  635. status = td_status(td);
  636. #ifdef I_HAVE_BUGGY_APC_BACKUPS
  637. /* APC BackUPS Pro kludge */
  638. /* It tries to send all of the descriptor instead of the amount */
  639. /* we requested */
  640. if (status & TD_CTRL_IOC && /* IOC is masked out by uhci_status_bits */
  641. status & TD_CTRL_ACTIVE &&
  642. status & TD_CTRL_NAK)
  643. return 0;
  644. #endif
  645. status = uhci_status_bits(status);
  646. if (status & TD_CTRL_ACTIVE)
  647. return -EINPROGRESS;
  648. if (status)
  649. goto td_error;
  650. return 0;
  651. td_error:
  652. ret = uhci_map_status(status, uhci_packetout(td_token(td)));
  653. err:
  654. if ((debug == 1 && ret != -EPIPE) || debug > 1) {
  655. /* Some debugging code */
  656. dev_dbg(uhci_dev(uhci), "%s: failed with status %x\n",
  657. __FUNCTION__, status);
  658. if (errbuf) {
  659. /* Print the chain for debugging purposes */
  660. uhci_show_qh(urbp->qh, errbuf, ERRBUF_LEN, 0);
  661. lprintk(errbuf);
  662. }
  663. }
  664. return ret;
  665. }
  666. /*
  667. * Common submit for bulk and interrupt
  668. */
  669. static int uhci_submit_common(struct uhci_hcd *uhci, struct urb *urb, struct urb *eurb, struct uhci_qh *skelqh)
  670. {
  671. struct uhci_td *td;
  672. struct uhci_qh *qh;
  673. unsigned long destination, status;
  674. int maxsze = usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe));
  675. int len = urb->transfer_buffer_length;
  676. struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
  677. dma_addr_t data = urb->transfer_dma;
  678. if (len < 0)
  679. return -EINVAL;
  680. /* The "pipe" thing contains the destination in bits 8--18 */
  681. destination = (urb->pipe & PIPE_DEVEP_MASK) | usb_packetid(urb->pipe);
  682. status = uhci_maxerr(3) | TD_CTRL_ACTIVE;
  683. if (urb->dev->speed == USB_SPEED_LOW)
  684. status |= TD_CTRL_LS;
  685. if (usb_pipein(urb->pipe))
  686. status |= TD_CTRL_SPD;
  687. /*
  688. * Build the DATA TD's
  689. */
  690. do { /* Allow zero length packets */
  691. int pktsze = maxsze;
  692. if (pktsze >= len) {
  693. pktsze = len;
  694. if (!(urb->transfer_flags & URB_SHORT_NOT_OK))
  695. status &= ~TD_CTRL_SPD;
  696. }
  697. td = uhci_alloc_td(uhci, urb->dev);
  698. if (!td)
  699. return -ENOMEM;
  700. uhci_add_td_to_urb(urb, td);
  701. uhci_fill_td(td, status, destination | uhci_explen(pktsze - 1) |
  702. (usb_gettoggle(urb->dev, usb_pipeendpoint(urb->pipe),
  703. usb_pipeout(urb->pipe)) << TD_TOKEN_TOGGLE_SHIFT),
  704. data);
  705. data += pktsze;
  706. len -= maxsze;
  707. usb_dotoggle(urb->dev, usb_pipeendpoint(urb->pipe),
  708. usb_pipeout(urb->pipe));
  709. } while (len > 0);
  710. /*
  711. * URB_ZERO_PACKET means adding a 0-length packet, if direction
  712. * is OUT and the transfer_length was an exact multiple of maxsze,
  713. * hence (len = transfer_length - N * maxsze) == 0
  714. * however, if transfer_length == 0, the zero packet was already
  715. * prepared above.
  716. */
  717. if (usb_pipeout(urb->pipe) && (urb->transfer_flags & URB_ZERO_PACKET) &&
  718. !len && urb->transfer_buffer_length) {
  719. td = uhci_alloc_td(uhci, urb->dev);
  720. if (!td)
  721. return -ENOMEM;
  722. uhci_add_td_to_urb(urb, td);
  723. uhci_fill_td(td, status, destination | uhci_explen(UHCI_NULL_DATA_SIZE) |
  724. (usb_gettoggle(urb->dev, usb_pipeendpoint(urb->pipe),
  725. usb_pipeout(urb->pipe)) << TD_TOKEN_TOGGLE_SHIFT),
  726. data);
  727. usb_dotoggle(urb->dev, usb_pipeendpoint(urb->pipe),
  728. usb_pipeout(urb->pipe));
  729. }
  730. /* Set the interrupt-on-completion flag on the last packet.
  731. * A more-or-less typical 4 KB URB (= size of one memory page)
  732. * will require about 3 ms to transfer; that's a little on the
  733. * fast side but not enough to justify delaying an interrupt
  734. * more than 2 or 3 URBs, so we will ignore the URB_NO_INTERRUPT
  735. * flag setting. */
  736. td->status |= cpu_to_le32(TD_CTRL_IOC);
  737. qh = uhci_alloc_qh(uhci, urb->dev);
  738. if (!qh)
  739. return -ENOMEM;
  740. urbp->qh = qh;
  741. qh->urbp = urbp;
  742. /* Always breadth first */
  743. uhci_insert_tds_in_qh(qh, urb, UHCI_PTR_BREADTH);
  744. if (eurb)
  745. uhci_append_queued_urb(uhci, eurb, urb);
  746. else
  747. uhci_insert_qh(uhci, skelqh, urb);
  748. return -EINPROGRESS;
  749. }
  750. /*
  751. * Common result for bulk and interrupt
  752. */
  753. static int uhci_result_common(struct uhci_hcd *uhci, struct urb *urb)
  754. {
  755. struct urb_priv *urbp = urb->hcpriv;
  756. struct uhci_td *td;
  757. unsigned int status = 0;
  758. int ret = 0;
  759. urb->actual_length = 0;
  760. list_for_each_entry(td, &urbp->td_list, list) {
  761. unsigned int ctrlstat = td_status(td);
  762. status = uhci_status_bits(ctrlstat);
  763. if (status & TD_CTRL_ACTIVE)
  764. return -EINPROGRESS;
  765. urb->actual_length += uhci_actual_length(ctrlstat);
  766. if (status)
  767. goto td_error;
  768. if (uhci_actual_length(ctrlstat) <
  769. uhci_expected_length(td_token(td))) {
  770. if (urb->transfer_flags & URB_SHORT_NOT_OK) {
  771. ret = -EREMOTEIO;
  772. goto err;
  773. } else
  774. return 0;
  775. }
  776. }
  777. return 0;
  778. td_error:
  779. ret = uhci_map_status(status, uhci_packetout(td_token(td)));
  780. err:
  781. /*
  782. * Enable this chunk of code if you want to see some more debugging.
  783. * But be careful, it has the tendancy to starve out khubd and prevent
  784. * disconnects from happening successfully if you have a slow debug
  785. * log interface (like a serial console.
  786. */
  787. #if 0
  788. if ((debug == 1 && ret != -EPIPE) || debug > 1) {
  789. /* Some debugging code */
  790. dev_dbg(uhci_dev(uhci), "%s: failed with status %x\n",
  791. __FUNCTION__, status);
  792. if (errbuf) {
  793. /* Print the chain for debugging purposes */
  794. uhci_show_qh(urbp->qh, errbuf, ERRBUF_LEN, 0);
  795. lprintk(errbuf);
  796. }
  797. }
  798. #endif
  799. return ret;
  800. }
  801. static inline int uhci_submit_bulk(struct uhci_hcd *uhci, struct urb *urb, struct urb *eurb)
  802. {
  803. int ret;
  804. /* Can't have low-speed bulk transfers */
  805. if (urb->dev->speed == USB_SPEED_LOW)
  806. return -EINVAL;
  807. ret = uhci_submit_common(uhci, urb, eurb, uhci->skel_bulk_qh);
  808. if (ret == -EINPROGRESS)
  809. uhci_inc_fsbr(uhci, urb);
  810. return ret;
  811. }
  812. static inline int uhci_submit_interrupt(struct uhci_hcd *uhci, struct urb *urb, struct urb *eurb)
  813. {
  814. /* USB 1.1 interrupt transfers only involve one packet per interval;
  815. * that's the uhci_submit_common() "breadth first" policy. Drivers
  816. * can submit urbs of any length, but longer ones might need many
  817. * intervals to complete.
  818. */
  819. return uhci_submit_common(uhci, urb, eurb, uhci->skelqh[__interval_to_skel(urb->interval)]);
  820. }
  821. /*
  822. * Isochronous transfers
  823. */
  824. static int isochronous_find_limits(struct uhci_hcd *uhci, struct urb *urb, unsigned int *start, unsigned int *end)
  825. {
  826. struct urb *last_urb = NULL;
  827. struct urb_priv *up;
  828. int ret = 0;
  829. list_for_each_entry(up, &uhci->urb_list, urb_list) {
  830. struct urb *u = up->urb;
  831. /* look for pending URB's with identical pipe handle */
  832. if ((urb->pipe == u->pipe) && (urb->dev == u->dev) &&
  833. (u->status == -EINPROGRESS) && (u != urb)) {
  834. if (!last_urb)
  835. *start = u->start_frame;
  836. last_urb = u;
  837. }
  838. }
  839. if (last_urb) {
  840. *end = (last_urb->start_frame + last_urb->number_of_packets *
  841. last_urb->interval) & (UHCI_NUMFRAMES-1);
  842. ret = 0;
  843. } else
  844. ret = -1; /* no previous urb found */
  845. return ret;
  846. }
  847. static int isochronous_find_start(struct uhci_hcd *uhci, struct urb *urb)
  848. {
  849. int limits;
  850. unsigned int start = 0, end = 0;
  851. if (urb->number_of_packets > 900) /* 900? Why? */
  852. return -EFBIG;
  853. limits = isochronous_find_limits(uhci, urb, &start, &end);
  854. if (urb->transfer_flags & URB_ISO_ASAP) {
  855. if (limits) {
  856. uhci_get_current_frame_number(uhci);
  857. urb->start_frame = (uhci->frame_number + 10)
  858. & (UHCI_NUMFRAMES - 1);
  859. } else
  860. urb->start_frame = end;
  861. } else {
  862. urb->start_frame &= (UHCI_NUMFRAMES - 1);
  863. /* FIXME: Sanity check */
  864. }
  865. return 0;
  866. }
  867. /*
  868. * Isochronous transfers
  869. */
  870. static int uhci_submit_isochronous(struct uhci_hcd *uhci, struct urb *urb)
  871. {
  872. struct uhci_td *td;
  873. int i, ret, frame;
  874. int status, destination;
  875. status = TD_CTRL_ACTIVE | TD_CTRL_IOS;
  876. destination = (urb->pipe & PIPE_DEVEP_MASK) | usb_packetid(urb->pipe);
  877. ret = isochronous_find_start(uhci, urb);
  878. if (ret)
  879. return ret;
  880. frame = urb->start_frame;
  881. for (i = 0; i < urb->number_of_packets; i++, frame += urb->interval) {
  882. if (!urb->iso_frame_desc[i].length)
  883. continue;
  884. td = uhci_alloc_td(uhci, urb->dev);
  885. if (!td)
  886. return -ENOMEM;
  887. uhci_add_td_to_urb(urb, td);
  888. uhci_fill_td(td, status, destination | uhci_explen(urb->iso_frame_desc[i].length - 1),
  889. urb->transfer_dma + urb->iso_frame_desc[i].offset);
  890. if (i + 1 >= urb->number_of_packets)
  891. td->status |= cpu_to_le32(TD_CTRL_IOC);
  892. uhci_insert_td_frame_list(uhci, td, frame);
  893. }
  894. return -EINPROGRESS;
  895. }
  896. static int uhci_result_isochronous(struct uhci_hcd *uhci, struct urb *urb)
  897. {
  898. struct uhci_td *td;
  899. struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
  900. int status;
  901. int i, ret = 0;
  902. urb->actual_length = 0;
  903. i = 0;
  904. list_for_each_entry(td, &urbp->td_list, list) {
  905. int actlength;
  906. unsigned int ctrlstat = td_status(td);
  907. if (ctrlstat & TD_CTRL_ACTIVE)
  908. return -EINPROGRESS;
  909. actlength = uhci_actual_length(ctrlstat);
  910. urb->iso_frame_desc[i].actual_length = actlength;
  911. urb->actual_length += actlength;
  912. status = uhci_map_status(uhci_status_bits(ctrlstat),
  913. usb_pipeout(urb->pipe));
  914. urb->iso_frame_desc[i].status = status;
  915. if (status) {
  916. urb->error_count++;
  917. ret = status;
  918. }
  919. i++;
  920. }
  921. return ret;
  922. }
  923. static struct urb *uhci_find_urb_ep(struct uhci_hcd *uhci, struct urb *urb)
  924. {
  925. struct urb_priv *up;
  926. /* We don't match Isoc transfers since they are special */
  927. if (usb_pipeisoc(urb->pipe))
  928. return NULL;
  929. list_for_each_entry(up, &uhci->urb_list, urb_list) {
  930. struct urb *u = up->urb;
  931. if (u->dev == urb->dev && u->status == -EINPROGRESS) {
  932. /* For control, ignore the direction */
  933. if (usb_pipecontrol(urb->pipe) &&
  934. (u->pipe & ~USB_DIR_IN) == (urb->pipe & ~USB_DIR_IN))
  935. return u;
  936. else if (u->pipe == urb->pipe)
  937. return u;
  938. }
  939. }
  940. return NULL;
  941. }
  942. static int uhci_urb_enqueue(struct usb_hcd *hcd,
  943. struct usb_host_endpoint *ep,
  944. struct urb *urb, int mem_flags)
  945. {
  946. int ret;
  947. struct uhci_hcd *uhci = hcd_to_uhci(hcd);
  948. unsigned long flags;
  949. struct urb *eurb;
  950. int bustime;
  951. spin_lock_irqsave(&uhci->lock, flags);
  952. ret = urb->status;
  953. if (ret != -EINPROGRESS) /* URB already unlinked! */
  954. goto out;
  955. eurb = uhci_find_urb_ep(uhci, urb);
  956. if (!uhci_alloc_urb_priv(uhci, urb)) {
  957. ret = -ENOMEM;
  958. goto out;
  959. }
  960. switch (usb_pipetype(urb->pipe)) {
  961. case PIPE_CONTROL:
  962. ret = uhci_submit_control(uhci, urb, eurb);
  963. break;
  964. case PIPE_INTERRUPT:
  965. if (!eurb) {
  966. bustime = usb_check_bandwidth(urb->dev, urb);
  967. if (bustime < 0)
  968. ret = bustime;
  969. else {
  970. ret = uhci_submit_interrupt(uhci, urb, eurb);
  971. if (ret == -EINPROGRESS)
  972. usb_claim_bandwidth(urb->dev, urb, bustime, 0);
  973. }
  974. } else { /* inherit from parent */
  975. urb->bandwidth = eurb->bandwidth;
  976. ret = uhci_submit_interrupt(uhci, urb, eurb);
  977. }
  978. break;
  979. case PIPE_BULK:
  980. ret = uhci_submit_bulk(uhci, urb, eurb);
  981. break;
  982. case PIPE_ISOCHRONOUS:
  983. bustime = usb_check_bandwidth(urb->dev, urb);
  984. if (bustime < 0) {
  985. ret = bustime;
  986. break;
  987. }
  988. ret = uhci_submit_isochronous(uhci, urb);
  989. if (ret == -EINPROGRESS)
  990. usb_claim_bandwidth(urb->dev, urb, bustime, 1);
  991. break;
  992. }
  993. if (ret != -EINPROGRESS) {
  994. /* Submit failed, so delete it from the urb_list */
  995. struct urb_priv *urbp = urb->hcpriv;
  996. list_del_init(&urbp->urb_list);
  997. uhci_destroy_urb_priv(uhci, urb);
  998. } else
  999. ret = 0;
  1000. out:
  1001. spin_unlock_irqrestore(&uhci->lock, flags);
  1002. return ret;
  1003. }
  1004. /*
  1005. * Return the result of a transfer
  1006. */
  1007. static void uhci_transfer_result(struct uhci_hcd *uhci, struct urb *urb)
  1008. {
  1009. int ret = -EINPROGRESS;
  1010. struct urb_priv *urbp;
  1011. spin_lock(&urb->lock);
  1012. urbp = (struct urb_priv *)urb->hcpriv;
  1013. if (urb->status != -EINPROGRESS) /* URB already dequeued */
  1014. goto out;
  1015. switch (usb_pipetype(urb->pipe)) {
  1016. case PIPE_CONTROL:
  1017. ret = uhci_result_control(uhci, urb);
  1018. break;
  1019. case PIPE_BULK:
  1020. case PIPE_INTERRUPT:
  1021. ret = uhci_result_common(uhci, urb);
  1022. break;
  1023. case PIPE_ISOCHRONOUS:
  1024. ret = uhci_result_isochronous(uhci, urb);
  1025. break;
  1026. }
  1027. if (ret == -EINPROGRESS)
  1028. goto out;
  1029. urb->status = ret;
  1030. switch (usb_pipetype(urb->pipe)) {
  1031. case PIPE_CONTROL:
  1032. case PIPE_BULK:
  1033. case PIPE_ISOCHRONOUS:
  1034. /* Release bandwidth for Interrupt or Isoc. transfers */
  1035. if (urb->bandwidth)
  1036. usb_release_bandwidth(urb->dev, urb, 1);
  1037. uhci_unlink_generic(uhci, urb);
  1038. break;
  1039. case PIPE_INTERRUPT:
  1040. /* Release bandwidth for Interrupt or Isoc. transfers */
  1041. /* Make sure we don't release if we have a queued URB */
  1042. if (list_empty(&urbp->queue_list) && urb->bandwidth)
  1043. usb_release_bandwidth(urb->dev, urb, 0);
  1044. else
  1045. /* bandwidth was passed on to queued URB, */
  1046. /* so don't let usb_unlink_urb() release it */
  1047. urb->bandwidth = 0;
  1048. uhci_unlink_generic(uhci, urb);
  1049. break;
  1050. default:
  1051. dev_info(uhci_dev(uhci), "%s: unknown pipe type %d "
  1052. "for urb %p\n",
  1053. __FUNCTION__, usb_pipetype(urb->pipe), urb);
  1054. }
  1055. /* Move it from uhci->urb_list to uhci->complete_list */
  1056. uhci_moveto_complete(uhci, urbp);
  1057. out:
  1058. spin_unlock(&urb->lock);
  1059. }
  1060. static void uhci_unlink_generic(struct uhci_hcd *uhci, struct urb *urb)
  1061. {
  1062. struct list_head *head;
  1063. struct uhci_td *td;
  1064. struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
  1065. int prevactive = 0;
  1066. uhci_dec_fsbr(uhci, urb); /* Safe since it checks */
  1067. /*
  1068. * Now we need to find out what the last successful toggle was
  1069. * so we can update the local data toggle for the next transfer
  1070. *
  1071. * There are 2 ways the last successful completed TD is found:
  1072. *
  1073. * 1) The TD is NOT active and the actual length < expected length
  1074. * 2) The TD is NOT active and it's the last TD in the chain
  1075. *
  1076. * and a third way the first uncompleted TD is found:
  1077. *
  1078. * 3) The TD is active and the previous TD is NOT active
  1079. *
  1080. * Control and Isochronous ignore the toggle, so this is safe
  1081. * for all types
  1082. *
  1083. * FIXME: The toggle fixups won't be 100% reliable until we
  1084. * change over to using a single queue for each endpoint and
  1085. * stop the queue before unlinking.
  1086. */
  1087. head = &urbp->td_list;
  1088. list_for_each_entry(td, head, list) {
  1089. unsigned int ctrlstat = td_status(td);
  1090. if (!(ctrlstat & TD_CTRL_ACTIVE) &&
  1091. (uhci_actual_length(ctrlstat) <
  1092. uhci_expected_length(td_token(td)) ||
  1093. td->list.next == head))
  1094. usb_settoggle(urb->dev, uhci_endpoint(td_token(td)),
  1095. uhci_packetout(td_token(td)),
  1096. uhci_toggle(td_token(td)) ^ 1);
  1097. else if ((ctrlstat & TD_CTRL_ACTIVE) && !prevactive)
  1098. usb_settoggle(urb->dev, uhci_endpoint(td_token(td)),
  1099. uhci_packetout(td_token(td)),
  1100. uhci_toggle(td_token(td)));
  1101. prevactive = ctrlstat & TD_CTRL_ACTIVE;
  1102. }
  1103. uhci_delete_queued_urb(uhci, urb);
  1104. /* The interrupt loop will reclaim the QH's */
  1105. uhci_remove_qh(uhci, urbp->qh);
  1106. urbp->qh = NULL;
  1107. }
  1108. static int uhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb)
  1109. {
  1110. struct uhci_hcd *uhci = hcd_to_uhci(hcd);
  1111. unsigned long flags;
  1112. struct urb_priv *urbp;
  1113. spin_lock_irqsave(&uhci->lock, flags);
  1114. urbp = urb->hcpriv;
  1115. if (!urbp) /* URB was never linked! */
  1116. goto done;
  1117. list_del_init(&urbp->urb_list);
  1118. uhci_unlink_generic(uhci, urb);
  1119. uhci_get_current_frame_number(uhci);
  1120. if (uhci->frame_number + uhci->is_stopped != uhci->urb_remove_age) {
  1121. uhci_remove_pending_urbps(uhci);
  1122. uhci->urb_remove_age = uhci->frame_number;
  1123. }
  1124. /* If we're the first, set the next interrupt bit */
  1125. if (list_empty(&uhci->urb_remove_list))
  1126. uhci_set_next_interrupt(uhci);
  1127. list_add_tail(&urbp->urb_list, &uhci->urb_remove_list);
  1128. done:
  1129. spin_unlock_irqrestore(&uhci->lock, flags);
  1130. return 0;
  1131. }
  1132. static int uhci_fsbr_timeout(struct uhci_hcd *uhci, struct urb *urb)
  1133. {
  1134. struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
  1135. struct list_head *head;
  1136. struct uhci_td *td;
  1137. int count = 0;
  1138. uhci_dec_fsbr(uhci, urb);
  1139. urbp->fsbr_timeout = 1;
  1140. /*
  1141. * Ideally we would want to fix qh->element as well, but it's
  1142. * read/write by the HC, so that can introduce a race. It's not
  1143. * really worth the hassle
  1144. */
  1145. head = &urbp->td_list;
  1146. list_for_each_entry(td, head, list) {
  1147. /*
  1148. * Make sure we don't do the last one (since it'll have the
  1149. * TERM bit set) as well as we skip every so many TD's to
  1150. * make sure it doesn't hog the bandwidth
  1151. */
  1152. if (td->list.next != head && (count % DEPTH_INTERVAL) ==
  1153. (DEPTH_INTERVAL - 1))
  1154. td->link |= UHCI_PTR_DEPTH;
  1155. count++;
  1156. }
  1157. return 0;
  1158. }
  1159. static void uhci_free_pending_qhs(struct uhci_hcd *uhci)
  1160. {
  1161. struct uhci_qh *qh, *tmp;
  1162. list_for_each_entry_safe(qh, tmp, &uhci->qh_remove_list, remove_list) {
  1163. list_del_init(&qh->remove_list);
  1164. uhci_free_qh(uhci, qh);
  1165. }
  1166. }
  1167. static void uhci_free_pending_tds(struct uhci_hcd *uhci)
  1168. {
  1169. struct uhci_td *td, *tmp;
  1170. list_for_each_entry_safe(td, tmp, &uhci->td_remove_list, remove_list) {
  1171. list_del_init(&td->remove_list);
  1172. uhci_free_td(uhci, td);
  1173. }
  1174. }
  1175. static void
  1176. uhci_finish_urb(struct usb_hcd *hcd, struct urb *urb, struct pt_regs *regs)
  1177. __releases(uhci->lock)
  1178. __acquires(uhci->lock)
  1179. {
  1180. struct uhci_hcd *uhci = hcd_to_uhci(hcd);
  1181. uhci_destroy_urb_priv(uhci, urb);
  1182. spin_unlock(&uhci->lock);
  1183. usb_hcd_giveback_urb(hcd, urb, regs);
  1184. spin_lock(&uhci->lock);
  1185. }
  1186. static void uhci_finish_completion(struct uhci_hcd *uhci, struct pt_regs *regs)
  1187. {
  1188. struct urb_priv *urbp, *tmp;
  1189. list_for_each_entry_safe(urbp, tmp, &uhci->complete_list, urb_list) {
  1190. struct urb *urb = urbp->urb;
  1191. list_del_init(&urbp->urb_list);
  1192. uhci_finish_urb(uhci_to_hcd(uhci), urb, regs);
  1193. }
  1194. }
  1195. static void uhci_remove_pending_urbps(struct uhci_hcd *uhci)
  1196. {
  1197. /* Splice the urb_remove_list onto the end of the complete_list */
  1198. list_splice_init(&uhci->urb_remove_list, uhci->complete_list.prev);
  1199. }
  1200. /* Process events in the schedule, but only in one thread at a time */
  1201. static void uhci_scan_schedule(struct uhci_hcd *uhci, struct pt_regs *regs)
  1202. {
  1203. struct urb_priv *urbp, *tmp;
  1204. /* Don't allow re-entrant calls */
  1205. if (uhci->scan_in_progress) {
  1206. uhci->need_rescan = 1;
  1207. return;
  1208. }
  1209. uhci->scan_in_progress = 1;
  1210. rescan:
  1211. uhci->need_rescan = 0;
  1212. uhci_get_current_frame_number(uhci);
  1213. if (uhci->frame_number + uhci->is_stopped != uhci->qh_remove_age)
  1214. uhci_free_pending_qhs(uhci);
  1215. if (uhci->frame_number + uhci->is_stopped != uhci->td_remove_age)
  1216. uhci_free_pending_tds(uhci);
  1217. if (uhci->frame_number + uhci->is_stopped != uhci->urb_remove_age)
  1218. uhci_remove_pending_urbps(uhci);
  1219. /* Walk the list of pending URBs to see which ones completed
  1220. * (must be _safe because uhci_transfer_result() dequeues URBs) */
  1221. list_for_each_entry_safe(urbp, tmp, &uhci->urb_list, urb_list) {
  1222. struct urb *urb = urbp->urb;
  1223. /* Checks the status and does all of the magic necessary */
  1224. uhci_transfer_result(uhci, urb);
  1225. }
  1226. uhci_finish_completion(uhci, regs);
  1227. /* If the controller is stopped, we can finish these off right now */
  1228. if (uhci->is_stopped) {
  1229. uhci_free_pending_qhs(uhci);
  1230. uhci_free_pending_tds(uhci);
  1231. uhci_remove_pending_urbps(uhci);
  1232. }
  1233. if (uhci->need_rescan)
  1234. goto rescan;
  1235. uhci->scan_in_progress = 0;
  1236. if (list_empty(&uhci->urb_remove_list) &&
  1237. list_empty(&uhci->td_remove_list) &&
  1238. list_empty(&uhci->qh_remove_list))
  1239. uhci_clear_next_interrupt(uhci);
  1240. else
  1241. uhci_set_next_interrupt(uhci);
  1242. /* Wake up anyone waiting for an URB to complete */
  1243. wake_up_all(&uhci->waitqh);
  1244. }