xhci-ring.c 97 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153
  1. /*
  2. * xHCI host controller driver
  3. *
  4. * Copyright (C) 2008 Intel Corp.
  5. *
  6. * Author: Sarah Sharp
  7. * Some code borrowed from the Linux EHCI driver.
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License version 2 as
  11. * published by the Free Software Foundation.
  12. *
  13. * This program is distributed in the hope that it will be useful, but
  14. * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
  15. * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
  16. * for more details.
  17. *
  18. * You should have received a copy of the GNU General Public License
  19. * along with this program; if not, write to the Free Software Foundation,
  20. * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  21. */
  22. /*
  23. * Ring initialization rules:
  24. * 1. Each segment is initialized to zero, except for link TRBs.
  25. * 2. Ring cycle state = 0. This represents Producer Cycle State (PCS) or
  26. * Consumer Cycle State (CCS), depending on ring function.
  27. * 3. Enqueue pointer = dequeue pointer = address of first TRB in the segment.
  28. *
  29. * Ring behavior rules:
  30. * 1. A ring is empty if enqueue == dequeue. This means there will always be at
  31. * least one free TRB in the ring. This is useful if you want to turn that
  32. * into a link TRB and expand the ring.
  33. * 2. When incrementing an enqueue or dequeue pointer, if the next TRB is a
  34. * link TRB, then load the pointer with the address in the link TRB. If the
  35. * link TRB had its toggle bit set, you may need to update the ring cycle
  36. * state (see cycle bit rules). You may have to do this multiple times
  37. * until you reach a non-link TRB.
  38. * 3. A ring is full if enqueue++ (for the definition of increment above)
  39. * equals the dequeue pointer.
  40. *
  41. * Cycle bit rules:
  42. * 1. When a consumer increments a dequeue pointer and encounters a toggle bit
  43. * in a link TRB, it must toggle the ring cycle state.
  44. * 2. When a producer increments an enqueue pointer and encounters a toggle bit
  45. * in a link TRB, it must toggle the ring cycle state.
  46. *
  47. * Producer rules:
  48. * 1. Check if ring is full before you enqueue.
  49. * 2. Write the ring cycle state to the cycle bit in the TRB you're enqueuing.
  50. * Update enqueue pointer between each write (which may update the ring
  51. * cycle state).
  52. * 3. Notify consumer. If SW is producer, it rings the doorbell for command
  53. * and endpoint rings. If HC is the producer for the event ring,
  54. * and it generates an interrupt according to interrupt modulation rules.
  55. *
  56. * Consumer rules:
  57. * 1. Check if TRB belongs to you. If the cycle bit == your ring cycle state,
  58. * the TRB is owned by the consumer.
  59. * 2. Update dequeue pointer (which may update the ring cycle state) and
  60. * continue processing TRBs until you reach a TRB which is not owned by you.
  61. * 3. Notify the producer. SW is the consumer for the event ring, and it
  62. * updates event ring dequeue pointer. HC is the consumer for the command and
  63. * endpoint rings; it generates events on the event ring for these.
  64. */
  65. #include <linux/scatterlist.h>
  66. #include <linux/slab.h>
  67. #include "xhci.h"
  68. /*
  69. * Returns zero if the TRB isn't in this segment, otherwise it returns the DMA
  70. * address of the TRB.
  71. */
  72. dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg,
  73. union xhci_trb *trb)
  74. {
  75. unsigned long segment_offset;
  76. if (!seg || !trb || trb < seg->trbs)
  77. return 0;
  78. /* offset in TRBs */
  79. segment_offset = trb - seg->trbs;
  80. if (segment_offset > TRBS_PER_SEGMENT)
  81. return 0;
  82. return seg->dma + (segment_offset * sizeof(*trb));
  83. }
  84. /* Does this link TRB point to the first segment in a ring,
  85. * or was the previous TRB the last TRB on the last segment in the ERST?
  86. */
  87. static inline bool last_trb_on_last_seg(struct xhci_hcd *xhci, struct xhci_ring *ring,
  88. struct xhci_segment *seg, union xhci_trb *trb)
  89. {
  90. if (ring == xhci->event_ring)
  91. return (trb == &seg->trbs[TRBS_PER_SEGMENT]) &&
  92. (seg->next == xhci->event_ring->first_seg);
  93. else
  94. return trb->link.control & LINK_TOGGLE;
  95. }
  96. /* Is this TRB a link TRB or was the last TRB the last TRB in this event ring
  97. * segment? I.e. would the updated event TRB pointer step off the end of the
  98. * event seg?
  99. */
  100. static inline int last_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
  101. struct xhci_segment *seg, union xhci_trb *trb)
  102. {
  103. if (ring == xhci->event_ring)
  104. return trb == &seg->trbs[TRBS_PER_SEGMENT];
  105. else
  106. return (trb->link.control & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK);
  107. }
  108. static inline int enqueue_is_link_trb(struct xhci_ring *ring)
  109. {
  110. struct xhci_link_trb *link = &ring->enqueue->link;
  111. return ((link->control & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK));
  112. }
  113. /* Updates trb to point to the next TRB in the ring, and updates seg if the next
  114. * TRB is in a new segment. This does not skip over link TRBs, and it does not
  115. * effect the ring dequeue or enqueue pointers.
  116. */
  117. static void next_trb(struct xhci_hcd *xhci,
  118. struct xhci_ring *ring,
  119. struct xhci_segment **seg,
  120. union xhci_trb **trb)
  121. {
  122. if (last_trb(xhci, ring, *seg, *trb)) {
  123. *seg = (*seg)->next;
  124. *trb = ((*seg)->trbs);
  125. } else {
  126. *trb = (*trb)++;
  127. }
  128. }
  129. /*
  130. * See Cycle bit rules. SW is the consumer for the event ring only.
  131. * Don't make a ring full of link TRBs. That would be dumb and this would loop.
  132. */
  133. static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer)
  134. {
  135. union xhci_trb *next = ++(ring->dequeue);
  136. unsigned long long addr;
  137. ring->deq_updates++;
  138. /* Update the dequeue pointer further if that was a link TRB or we're at
  139. * the end of an event ring segment (which doesn't have link TRBS)
  140. */
  141. while (last_trb(xhci, ring, ring->deq_seg, next)) {
  142. if (consumer && last_trb_on_last_seg(xhci, ring, ring->deq_seg, next)) {
  143. ring->cycle_state = (ring->cycle_state ? 0 : 1);
  144. if (!in_interrupt())
  145. xhci_dbg(xhci, "Toggle cycle state for ring %p = %i\n",
  146. ring,
  147. (unsigned int) ring->cycle_state);
  148. }
  149. ring->deq_seg = ring->deq_seg->next;
  150. ring->dequeue = ring->deq_seg->trbs;
  151. next = ring->dequeue;
  152. }
  153. addr = (unsigned long long) xhci_trb_virt_to_dma(ring->deq_seg, ring->dequeue);
  154. if (ring == xhci->event_ring)
  155. xhci_dbg(xhci, "Event ring deq = 0x%llx (DMA)\n", addr);
  156. else if (ring == xhci->cmd_ring)
  157. xhci_dbg(xhci, "Command ring deq = 0x%llx (DMA)\n", addr);
  158. else
  159. xhci_dbg(xhci, "Ring deq = 0x%llx (DMA)\n", addr);
  160. }
  161. /*
  162. * See Cycle bit rules. SW is the consumer for the event ring only.
  163. * Don't make a ring full of link TRBs. That would be dumb and this would loop.
  164. *
  165. * If we've just enqueued a TRB that is in the middle of a TD (meaning the
  166. * chain bit is set), then set the chain bit in all the following link TRBs.
  167. * If we've enqueued the last TRB in a TD, make sure the following link TRBs
  168. * have their chain bit cleared (so that each Link TRB is a separate TD).
  169. *
  170. * Section 6.4.4.1 of the 0.95 spec says link TRBs cannot have the chain bit
  171. * set, but other sections talk about dealing with the chain bit set. This was
  172. * fixed in the 0.96 specification errata, but we have to assume that all 0.95
  173. * xHCI hardware can't handle the chain bit being cleared on a link TRB.
  174. *
  175. * @more_trbs_coming: Will you enqueue more TRBs before calling
  176. * prepare_transfer()?
  177. */
  178. static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring,
  179. bool consumer, bool more_trbs_coming)
  180. {
  181. u32 chain;
  182. union xhci_trb *next;
  183. unsigned long long addr;
  184. chain = ring->enqueue->generic.field[3] & TRB_CHAIN;
  185. next = ++(ring->enqueue);
  186. ring->enq_updates++;
  187. /* Update the dequeue pointer further if that was a link TRB or we're at
  188. * the end of an event ring segment (which doesn't have link TRBS)
  189. */
  190. while (last_trb(xhci, ring, ring->enq_seg, next)) {
  191. if (!consumer) {
  192. if (ring != xhci->event_ring) {
  193. /*
  194. * If the caller doesn't plan on enqueueing more
  195. * TDs before ringing the doorbell, then we
  196. * don't want to give the link TRB to the
  197. * hardware just yet. We'll give the link TRB
  198. * back in prepare_ring() just before we enqueue
  199. * the TD at the top of the ring.
  200. */
  201. if (!chain && !more_trbs_coming)
  202. break;
  203. /* If we're not dealing with 0.95 hardware,
  204. * carry over the chain bit of the previous TRB
  205. * (which may mean the chain bit is cleared).
  206. */
  207. if (!xhci_link_trb_quirk(xhci)) {
  208. next->link.control &= ~TRB_CHAIN;
  209. next->link.control |= chain;
  210. }
  211. /* Give this link TRB to the hardware */
  212. wmb();
  213. next->link.control ^= TRB_CYCLE;
  214. }
  215. /* Toggle the cycle bit after the last ring segment. */
  216. if (last_trb_on_last_seg(xhci, ring, ring->enq_seg, next)) {
  217. ring->cycle_state = (ring->cycle_state ? 0 : 1);
  218. if (!in_interrupt())
  219. xhci_dbg(xhci, "Toggle cycle state for ring %p = %i\n",
  220. ring,
  221. (unsigned int) ring->cycle_state);
  222. }
  223. }
  224. ring->enq_seg = ring->enq_seg->next;
  225. ring->enqueue = ring->enq_seg->trbs;
  226. next = ring->enqueue;
  227. }
  228. addr = (unsigned long long) xhci_trb_virt_to_dma(ring->enq_seg, ring->enqueue);
  229. if (ring == xhci->event_ring)
  230. xhci_dbg(xhci, "Event ring enq = 0x%llx (DMA)\n", addr);
  231. else if (ring == xhci->cmd_ring)
  232. xhci_dbg(xhci, "Command ring enq = 0x%llx (DMA)\n", addr);
  233. else
  234. xhci_dbg(xhci, "Ring enq = 0x%llx (DMA)\n", addr);
  235. }
  236. /*
  237. * Check to see if there's room to enqueue num_trbs on the ring. See rules
  238. * above.
  239. * FIXME: this would be simpler and faster if we just kept track of the number
  240. * of free TRBs in a ring.
  241. */
  242. static int room_on_ring(struct xhci_hcd *xhci, struct xhci_ring *ring,
  243. unsigned int num_trbs)
  244. {
  245. int i;
  246. union xhci_trb *enq = ring->enqueue;
  247. struct xhci_segment *enq_seg = ring->enq_seg;
  248. struct xhci_segment *cur_seg;
  249. unsigned int left_on_ring;
  250. /* If we are currently pointing to a link TRB, advance the
  251. * enqueue pointer before checking for space */
  252. while (last_trb(xhci, ring, enq_seg, enq)) {
  253. enq_seg = enq_seg->next;
  254. enq = enq_seg->trbs;
  255. }
  256. /* Check if ring is empty */
  257. if (enq == ring->dequeue) {
  258. /* Can't use link trbs */
  259. left_on_ring = TRBS_PER_SEGMENT - 1;
  260. for (cur_seg = enq_seg->next; cur_seg != enq_seg;
  261. cur_seg = cur_seg->next)
  262. left_on_ring += TRBS_PER_SEGMENT - 1;
  263. /* Always need one TRB free in the ring. */
  264. left_on_ring -= 1;
  265. if (num_trbs > left_on_ring) {
  266. xhci_warn(xhci, "Not enough room on ring; "
  267. "need %u TRBs, %u TRBs left\n",
  268. num_trbs, left_on_ring);
  269. return 0;
  270. }
  271. return 1;
  272. }
  273. /* Make sure there's an extra empty TRB available */
  274. for (i = 0; i <= num_trbs; ++i) {
  275. if (enq == ring->dequeue)
  276. return 0;
  277. enq++;
  278. while (last_trb(xhci, ring, enq_seg, enq)) {
  279. enq_seg = enq_seg->next;
  280. enq = enq_seg->trbs;
  281. }
  282. }
  283. return 1;
  284. }
  285. /* Ring the host controller doorbell after placing a command on the ring */
  286. void xhci_ring_cmd_db(struct xhci_hcd *xhci)
  287. {
  288. u32 temp;
  289. xhci_dbg(xhci, "// Ding dong!\n");
  290. temp = xhci_readl(xhci, &xhci->dba->doorbell[0]) & DB_MASK;
  291. xhci_writel(xhci, temp | DB_TARGET_HOST, &xhci->dba->doorbell[0]);
  292. /* Flush PCI posted writes */
  293. xhci_readl(xhci, &xhci->dba->doorbell[0]);
  294. }
  295. static void ring_ep_doorbell(struct xhci_hcd *xhci,
  296. unsigned int slot_id,
  297. unsigned int ep_index,
  298. unsigned int stream_id)
  299. {
  300. struct xhci_virt_ep *ep;
  301. unsigned int ep_state;
  302. u32 field;
  303. __u32 __iomem *db_addr = &xhci->dba->doorbell[slot_id];
  304. ep = &xhci->devs[slot_id]->eps[ep_index];
  305. ep_state = ep->ep_state;
  306. /* Don't ring the doorbell for this endpoint if there are pending
  307. * cancellations because the we don't want to interrupt processing.
  308. * We don't want to restart any stream rings if there's a set dequeue
  309. * pointer command pending because the device can choose to start any
  310. * stream once the endpoint is on the HW schedule.
  311. * FIXME - check all the stream rings for pending cancellations.
  312. */
  313. if (!(ep_state & EP_HALT_PENDING) && !(ep_state & SET_DEQ_PENDING)
  314. && !(ep_state & EP_HALTED)) {
  315. field = xhci_readl(xhci, db_addr) & DB_MASK;
  316. field |= EPI_TO_DB(ep_index) | STREAM_ID_TO_DB(stream_id);
  317. xhci_writel(xhci, field, db_addr);
  318. }
  319. }
  320. /* Ring the doorbell for any rings with pending URBs */
  321. static void ring_doorbell_for_active_rings(struct xhci_hcd *xhci,
  322. unsigned int slot_id,
  323. unsigned int ep_index)
  324. {
  325. unsigned int stream_id;
  326. struct xhci_virt_ep *ep;
  327. ep = &xhci->devs[slot_id]->eps[ep_index];
  328. /* A ring has pending URBs if its TD list is not empty */
  329. if (!(ep->ep_state & EP_HAS_STREAMS)) {
  330. if (!(list_empty(&ep->ring->td_list)))
  331. ring_ep_doorbell(xhci, slot_id, ep_index, 0);
  332. return;
  333. }
  334. for (stream_id = 1; stream_id < ep->stream_info->num_streams;
  335. stream_id++) {
  336. struct xhci_stream_info *stream_info = ep->stream_info;
  337. if (!list_empty(&stream_info->stream_rings[stream_id]->td_list))
  338. ring_ep_doorbell(xhci, slot_id, ep_index, stream_id);
  339. }
  340. }
  341. /*
  342. * Find the segment that trb is in. Start searching in start_seg.
  343. * If we must move past a segment that has a link TRB with a toggle cycle state
  344. * bit set, then we will toggle the value pointed at by cycle_state.
  345. */
  346. static struct xhci_segment *find_trb_seg(
  347. struct xhci_segment *start_seg,
  348. union xhci_trb *trb, int *cycle_state)
  349. {
  350. struct xhci_segment *cur_seg = start_seg;
  351. struct xhci_generic_trb *generic_trb;
  352. while (cur_seg->trbs > trb ||
  353. &cur_seg->trbs[TRBS_PER_SEGMENT - 1] < trb) {
  354. generic_trb = &cur_seg->trbs[TRBS_PER_SEGMENT - 1].generic;
  355. if ((generic_trb->field[3] & TRB_TYPE_BITMASK) ==
  356. TRB_TYPE(TRB_LINK) &&
  357. (generic_trb->field[3] & LINK_TOGGLE))
  358. *cycle_state = ~(*cycle_state) & 0x1;
  359. cur_seg = cur_seg->next;
  360. if (cur_seg == start_seg)
  361. /* Looped over the entire list. Oops! */
  362. return NULL;
  363. }
  364. return cur_seg;
  365. }
  366. static struct xhci_ring *xhci_triad_to_transfer_ring(struct xhci_hcd *xhci,
  367. unsigned int slot_id, unsigned int ep_index,
  368. unsigned int stream_id)
  369. {
  370. struct xhci_virt_ep *ep;
  371. ep = &xhci->devs[slot_id]->eps[ep_index];
  372. /* Common case: no streams */
  373. if (!(ep->ep_state & EP_HAS_STREAMS))
  374. return ep->ring;
  375. if (stream_id == 0) {
  376. xhci_warn(xhci,
  377. "WARN: Slot ID %u, ep index %u has streams, "
  378. "but URB has no stream ID.\n",
  379. slot_id, ep_index);
  380. return NULL;
  381. }
  382. if (stream_id < ep->stream_info->num_streams)
  383. return ep->stream_info->stream_rings[stream_id];
  384. xhci_warn(xhci,
  385. "WARN: Slot ID %u, ep index %u has "
  386. "stream IDs 1 to %u allocated, "
  387. "but stream ID %u is requested.\n",
  388. slot_id, ep_index,
  389. ep->stream_info->num_streams - 1,
  390. stream_id);
  391. return NULL;
  392. }
  393. /* Get the right ring for the given URB.
  394. * If the endpoint supports streams, boundary check the URB's stream ID.
  395. * If the endpoint doesn't support streams, return the singular endpoint ring.
  396. */
  397. static struct xhci_ring *xhci_urb_to_transfer_ring(struct xhci_hcd *xhci,
  398. struct urb *urb)
  399. {
  400. return xhci_triad_to_transfer_ring(xhci, urb->dev->slot_id,
  401. xhci_get_endpoint_index(&urb->ep->desc), urb->stream_id);
  402. }
  403. /*
  404. * Move the xHC's endpoint ring dequeue pointer past cur_td.
  405. * Record the new state of the xHC's endpoint ring dequeue segment,
  406. * dequeue pointer, and new consumer cycle state in state.
  407. * Update our internal representation of the ring's dequeue pointer.
  408. *
  409. * We do this in three jumps:
  410. * - First we update our new ring state to be the same as when the xHC stopped.
  411. * - Then we traverse the ring to find the segment that contains
  412. * the last TRB in the TD. We toggle the xHC's new cycle state when we pass
  413. * any link TRBs with the toggle cycle bit set.
  414. * - Finally we move the dequeue state one TRB further, toggling the cycle bit
  415. * if we've moved it past a link TRB with the toggle cycle bit set.
  416. */
  417. void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
  418. unsigned int slot_id, unsigned int ep_index,
  419. unsigned int stream_id, struct xhci_td *cur_td,
  420. struct xhci_dequeue_state *state)
  421. {
  422. struct xhci_virt_device *dev = xhci->devs[slot_id];
  423. struct xhci_ring *ep_ring;
  424. struct xhci_generic_trb *trb;
  425. struct xhci_ep_ctx *ep_ctx;
  426. dma_addr_t addr;
  427. ep_ring = xhci_triad_to_transfer_ring(xhci, slot_id,
  428. ep_index, stream_id);
  429. if (!ep_ring) {
  430. xhci_warn(xhci, "WARN can't find new dequeue state "
  431. "for invalid stream ID %u.\n",
  432. stream_id);
  433. return;
  434. }
  435. state->new_cycle_state = 0;
  436. xhci_dbg(xhci, "Finding segment containing stopped TRB.\n");
  437. state->new_deq_seg = find_trb_seg(cur_td->start_seg,
  438. dev->eps[ep_index].stopped_trb,
  439. &state->new_cycle_state);
  440. if (!state->new_deq_seg)
  441. BUG();
  442. /* Dig out the cycle state saved by the xHC during the stop ep cmd */
  443. xhci_dbg(xhci, "Finding endpoint context\n");
  444. ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
  445. state->new_cycle_state = 0x1 & ep_ctx->deq;
  446. state->new_deq_ptr = cur_td->last_trb;
  447. xhci_dbg(xhci, "Finding segment containing last TRB in TD.\n");
  448. state->new_deq_seg = find_trb_seg(state->new_deq_seg,
  449. state->new_deq_ptr,
  450. &state->new_cycle_state);
  451. if (!state->new_deq_seg)
  452. BUG();
  453. trb = &state->new_deq_ptr->generic;
  454. if ((trb->field[3] & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK) &&
  455. (trb->field[3] & LINK_TOGGLE))
  456. state->new_cycle_state = ~(state->new_cycle_state) & 0x1;
  457. next_trb(xhci, ep_ring, &state->new_deq_seg, &state->new_deq_ptr);
  458. /* Don't update the ring cycle state for the producer (us). */
  459. xhci_dbg(xhci, "New dequeue segment = %p (virtual)\n",
  460. state->new_deq_seg);
  461. addr = xhci_trb_virt_to_dma(state->new_deq_seg, state->new_deq_ptr);
  462. xhci_dbg(xhci, "New dequeue pointer = 0x%llx (DMA)\n",
  463. (unsigned long long) addr);
  464. xhci_dbg(xhci, "Setting dequeue pointer in internal ring state.\n");
  465. ep_ring->dequeue = state->new_deq_ptr;
  466. ep_ring->deq_seg = state->new_deq_seg;
  467. }
  468. static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
  469. struct xhci_td *cur_td)
  470. {
  471. struct xhci_segment *cur_seg;
  472. union xhci_trb *cur_trb;
  473. for (cur_seg = cur_td->start_seg, cur_trb = cur_td->first_trb;
  474. true;
  475. next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) {
  476. if ((cur_trb->generic.field[3] & TRB_TYPE_BITMASK) ==
  477. TRB_TYPE(TRB_LINK)) {
  478. /* Unchain any chained Link TRBs, but
  479. * leave the pointers intact.
  480. */
  481. cur_trb->generic.field[3] &= ~TRB_CHAIN;
  482. xhci_dbg(xhci, "Cancel (unchain) link TRB\n");
  483. xhci_dbg(xhci, "Address = %p (0x%llx dma); "
  484. "in seg %p (0x%llx dma)\n",
  485. cur_trb,
  486. (unsigned long long)xhci_trb_virt_to_dma(cur_seg, cur_trb),
  487. cur_seg,
  488. (unsigned long long)cur_seg->dma);
  489. } else {
  490. cur_trb->generic.field[0] = 0;
  491. cur_trb->generic.field[1] = 0;
  492. cur_trb->generic.field[2] = 0;
  493. /* Preserve only the cycle bit of this TRB */
  494. cur_trb->generic.field[3] &= TRB_CYCLE;
  495. cur_trb->generic.field[3] |= TRB_TYPE(TRB_TR_NOOP);
  496. xhci_dbg(xhci, "Cancel TRB %p (0x%llx dma) "
  497. "in seg %p (0x%llx dma)\n",
  498. cur_trb,
  499. (unsigned long long)xhci_trb_virt_to_dma(cur_seg, cur_trb),
  500. cur_seg,
  501. (unsigned long long)cur_seg->dma);
  502. }
  503. if (cur_trb == cur_td->last_trb)
  504. break;
  505. }
  506. }
  507. static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id,
  508. unsigned int ep_index, unsigned int stream_id,
  509. struct xhci_segment *deq_seg,
  510. union xhci_trb *deq_ptr, u32 cycle_state);
  511. void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci,
  512. unsigned int slot_id, unsigned int ep_index,
  513. unsigned int stream_id,
  514. struct xhci_dequeue_state *deq_state)
  515. {
  516. struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index];
  517. xhci_dbg(xhci, "Set TR Deq Ptr cmd, new deq seg = %p (0x%llx dma), "
  518. "new deq ptr = %p (0x%llx dma), new cycle = %u\n",
  519. deq_state->new_deq_seg,
  520. (unsigned long long)deq_state->new_deq_seg->dma,
  521. deq_state->new_deq_ptr,
  522. (unsigned long long)xhci_trb_virt_to_dma(deq_state->new_deq_seg, deq_state->new_deq_ptr),
  523. deq_state->new_cycle_state);
  524. queue_set_tr_deq(xhci, slot_id, ep_index, stream_id,
  525. deq_state->new_deq_seg,
  526. deq_state->new_deq_ptr,
  527. (u32) deq_state->new_cycle_state);
  528. /* Stop the TD queueing code from ringing the doorbell until
  529. * this command completes. The HC won't set the dequeue pointer
  530. * if the ring is running, and ringing the doorbell starts the
  531. * ring running.
  532. */
  533. ep->ep_state |= SET_DEQ_PENDING;
  534. }
  535. static inline void xhci_stop_watchdog_timer_in_irq(struct xhci_hcd *xhci,
  536. struct xhci_virt_ep *ep)
  537. {
  538. ep->ep_state &= ~EP_HALT_PENDING;
  539. /* Can't del_timer_sync in interrupt, so we attempt to cancel. If the
  540. * timer is running on another CPU, we don't decrement stop_cmds_pending
  541. * (since we didn't successfully stop the watchdog timer).
  542. */
  543. if (del_timer(&ep->stop_cmd_timer))
  544. ep->stop_cmds_pending--;
  545. }
  546. /* Must be called with xhci->lock held in interrupt context */
  547. static void xhci_giveback_urb_in_irq(struct xhci_hcd *xhci,
  548. struct xhci_td *cur_td, int status, char *adjective)
  549. {
  550. struct usb_hcd *hcd = xhci_to_hcd(xhci);
  551. struct urb *urb;
  552. struct urb_priv *urb_priv;
  553. urb = cur_td->urb;
  554. urb_priv = urb->hcpriv;
  555. urb_priv->td_cnt++;
  556. /* Only giveback urb when this is the last td in urb */
  557. if (urb_priv->td_cnt == urb_priv->length) {
  558. usb_hcd_unlink_urb_from_ep(hcd, urb);
  559. xhci_dbg(xhci, "Giveback %s URB %p\n", adjective, urb);
  560. spin_unlock(&xhci->lock);
  561. usb_hcd_giveback_urb(hcd, urb, status);
  562. xhci_urb_free_priv(xhci, urb_priv);
  563. spin_lock(&xhci->lock);
  564. xhci_dbg(xhci, "%s URB given back\n", adjective);
  565. }
  566. }
  567. /*
  568. * When we get a command completion for a Stop Endpoint Command, we need to
  569. * unlink any cancelled TDs from the ring. There are two ways to do that:
  570. *
  571. * 1. If the HW was in the middle of processing the TD that needs to be
  572. * cancelled, then we must move the ring's dequeue pointer past the last TRB
  573. * in the TD with a Set Dequeue Pointer Command.
  574. * 2. Otherwise, we turn all the TRBs in the TD into No-op TRBs (with the chain
  575. * bit cleared) so that the HW will skip over them.
  576. */
  577. static void handle_stopped_endpoint(struct xhci_hcd *xhci,
  578. union xhci_trb *trb)
  579. {
  580. unsigned int slot_id;
  581. unsigned int ep_index;
  582. struct xhci_ring *ep_ring;
  583. struct xhci_virt_ep *ep;
  584. struct list_head *entry;
  585. struct xhci_td *cur_td = NULL;
  586. struct xhci_td *last_unlinked_td;
  587. struct xhci_dequeue_state deq_state;
  588. memset(&deq_state, 0, sizeof(deq_state));
  589. slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]);
  590. ep_index = TRB_TO_EP_INDEX(trb->generic.field[3]);
  591. ep = &xhci->devs[slot_id]->eps[ep_index];
  592. if (list_empty(&ep->cancelled_td_list)) {
  593. xhci_stop_watchdog_timer_in_irq(xhci, ep);
  594. ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
  595. return;
  596. }
  597. /* Fix up the ep ring first, so HW stops executing cancelled TDs.
  598. * We have the xHCI lock, so nothing can modify this list until we drop
  599. * it. We're also in the event handler, so we can't get re-interrupted
  600. * if another Stop Endpoint command completes
  601. */
  602. list_for_each(entry, &ep->cancelled_td_list) {
  603. cur_td = list_entry(entry, struct xhci_td, cancelled_td_list);
  604. xhci_dbg(xhci, "Cancelling TD starting at %p, 0x%llx (dma).\n",
  605. cur_td->first_trb,
  606. (unsigned long long)xhci_trb_virt_to_dma(cur_td->start_seg, cur_td->first_trb));
  607. ep_ring = xhci_urb_to_transfer_ring(xhci, cur_td->urb);
  608. if (!ep_ring) {
  609. /* This shouldn't happen unless a driver is mucking
  610. * with the stream ID after submission. This will
  611. * leave the TD on the hardware ring, and the hardware
  612. * will try to execute it, and may access a buffer
  613. * that has already been freed. In the best case, the
  614. * hardware will execute it, and the event handler will
  615. * ignore the completion event for that TD, since it was
  616. * removed from the td_list for that endpoint. In
  617. * short, don't muck with the stream ID after
  618. * submission.
  619. */
  620. xhci_warn(xhci, "WARN Cancelled URB %p "
  621. "has invalid stream ID %u.\n",
  622. cur_td->urb,
  623. cur_td->urb->stream_id);
  624. goto remove_finished_td;
  625. }
  626. /*
  627. * If we stopped on the TD we need to cancel, then we have to
  628. * move the xHC endpoint ring dequeue pointer past this TD.
  629. */
  630. if (cur_td == ep->stopped_td)
  631. xhci_find_new_dequeue_state(xhci, slot_id, ep_index,
  632. cur_td->urb->stream_id,
  633. cur_td, &deq_state);
  634. else
  635. td_to_noop(xhci, ep_ring, cur_td);
  636. remove_finished_td:
  637. /*
  638. * The event handler won't see a completion for this TD anymore,
  639. * so remove it from the endpoint ring's TD list. Keep it in
  640. * the cancelled TD list for URB completion later.
  641. */
  642. list_del(&cur_td->td_list);
  643. }
  644. last_unlinked_td = cur_td;
  645. xhci_stop_watchdog_timer_in_irq(xhci, ep);
  646. /* If necessary, queue a Set Transfer Ring Dequeue Pointer command */
  647. if (deq_state.new_deq_ptr && deq_state.new_deq_seg) {
  648. xhci_queue_new_dequeue_state(xhci,
  649. slot_id, ep_index,
  650. ep->stopped_td->urb->stream_id,
  651. &deq_state);
  652. xhci_ring_cmd_db(xhci);
  653. } else {
  654. /* Otherwise ring the doorbell(s) to restart queued transfers */
  655. ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
  656. }
  657. ep->stopped_td = NULL;
  658. ep->stopped_trb = NULL;
  659. /*
  660. * Drop the lock and complete the URBs in the cancelled TD list.
  661. * New TDs to be cancelled might be added to the end of the list before
  662. * we can complete all the URBs for the TDs we already unlinked.
  663. * So stop when we've completed the URB for the last TD we unlinked.
  664. */
  665. do {
  666. cur_td = list_entry(ep->cancelled_td_list.next,
  667. struct xhci_td, cancelled_td_list);
  668. list_del(&cur_td->cancelled_td_list);
  669. /* Clean up the cancelled URB */
  670. /* Doesn't matter what we pass for status, since the core will
  671. * just overwrite it (because the URB has been unlinked).
  672. */
  673. xhci_giveback_urb_in_irq(xhci, cur_td, 0, "cancelled");
  674. /* Stop processing the cancelled list if the watchdog timer is
  675. * running.
  676. */
  677. if (xhci->xhc_state & XHCI_STATE_DYING)
  678. return;
  679. } while (cur_td != last_unlinked_td);
  680. /* Return to the event handler with xhci->lock re-acquired */
  681. }
  682. /* Watchdog timer function for when a stop endpoint command fails to complete.
  683. * In this case, we assume the host controller is broken or dying or dead. The
  684. * host may still be completing some other events, so we have to be careful to
  685. * let the event ring handler and the URB dequeueing/enqueueing functions know
  686. * through xhci->state.
  687. *
  688. * The timer may also fire if the host takes a very long time to respond to the
  689. * command, and the stop endpoint command completion handler cannot delete the
  690. * timer before the timer function is called. Another endpoint cancellation may
  691. * sneak in before the timer function can grab the lock, and that may queue
  692. * another stop endpoint command and add the timer back. So we cannot use a
  693. * simple flag to say whether there is a pending stop endpoint command for a
  694. * particular endpoint.
  695. *
  696. * Instead we use a combination of that flag and a counter for the number of
  697. * pending stop endpoint commands. If the timer is the tail end of the last
  698. * stop endpoint command, and the endpoint's command is still pending, we assume
  699. * the host is dying.
  700. */
  701. void xhci_stop_endpoint_command_watchdog(unsigned long arg)
  702. {
  703. struct xhci_hcd *xhci;
  704. struct xhci_virt_ep *ep;
  705. struct xhci_virt_ep *temp_ep;
  706. struct xhci_ring *ring;
  707. struct xhci_td *cur_td;
  708. int ret, i, j;
  709. ep = (struct xhci_virt_ep *) arg;
  710. xhci = ep->xhci;
  711. spin_lock(&xhci->lock);
  712. ep->stop_cmds_pending--;
  713. if (xhci->xhc_state & XHCI_STATE_DYING) {
  714. xhci_dbg(xhci, "Stop EP timer ran, but another timer marked "
  715. "xHCI as DYING, exiting.\n");
  716. spin_unlock(&xhci->lock);
  717. return;
  718. }
  719. if (!(ep->stop_cmds_pending == 0 && (ep->ep_state & EP_HALT_PENDING))) {
  720. xhci_dbg(xhci, "Stop EP timer ran, but no command pending, "
  721. "exiting.\n");
  722. spin_unlock(&xhci->lock);
  723. return;
  724. }
  725. xhci_warn(xhci, "xHCI host not responding to stop endpoint command.\n");
  726. xhci_warn(xhci, "Assuming host is dying, halting host.\n");
  727. /* Oops, HC is dead or dying or at least not responding to the stop
  728. * endpoint command.
  729. */
  730. xhci->xhc_state |= XHCI_STATE_DYING;
  731. /* Disable interrupts from the host controller and start halting it */
  732. xhci_quiesce(xhci);
  733. spin_unlock(&xhci->lock);
  734. ret = xhci_halt(xhci);
  735. spin_lock(&xhci->lock);
  736. if (ret < 0) {
  737. /* This is bad; the host is not responding to commands and it's
  738. * not allowing itself to be halted. At least interrupts are
  739. * disabled, so we can set HC_STATE_HALT and notify the
  740. * USB core. But if we call usb_hc_died(), it will attempt to
  741. * disconnect all device drivers under this host. Those
  742. * disconnect() methods will wait for all URBs to be unlinked,
  743. * so we must complete them.
  744. */
  745. xhci_warn(xhci, "Non-responsive xHCI host is not halting.\n");
  746. xhci_warn(xhci, "Completing active URBs anyway.\n");
  747. /* We could turn all TDs on the rings to no-ops. This won't
  748. * help if the host has cached part of the ring, and is slow if
  749. * we want to preserve the cycle bit. Skip it and hope the host
  750. * doesn't touch the memory.
  751. */
  752. }
  753. for (i = 0; i < MAX_HC_SLOTS; i++) {
  754. if (!xhci->devs[i])
  755. continue;
  756. for (j = 0; j < 31; j++) {
  757. temp_ep = &xhci->devs[i]->eps[j];
  758. ring = temp_ep->ring;
  759. if (!ring)
  760. continue;
  761. xhci_dbg(xhci, "Killing URBs for slot ID %u, "
  762. "ep index %u\n", i, j);
  763. while (!list_empty(&ring->td_list)) {
  764. cur_td = list_first_entry(&ring->td_list,
  765. struct xhci_td,
  766. td_list);
  767. list_del(&cur_td->td_list);
  768. if (!list_empty(&cur_td->cancelled_td_list))
  769. list_del(&cur_td->cancelled_td_list);
  770. xhci_giveback_urb_in_irq(xhci, cur_td,
  771. -ESHUTDOWN, "killed");
  772. }
  773. while (!list_empty(&temp_ep->cancelled_td_list)) {
  774. cur_td = list_first_entry(
  775. &temp_ep->cancelled_td_list,
  776. struct xhci_td,
  777. cancelled_td_list);
  778. list_del(&cur_td->cancelled_td_list);
  779. xhci_giveback_urb_in_irq(xhci, cur_td,
  780. -ESHUTDOWN, "killed");
  781. }
  782. }
  783. }
  784. spin_unlock(&xhci->lock);
  785. xhci_to_hcd(xhci)->state = HC_STATE_HALT;
  786. xhci_dbg(xhci, "Calling usb_hc_died()\n");
  787. usb_hc_died(xhci_to_hcd(xhci));
  788. xhci_dbg(xhci, "xHCI host controller is dead.\n");
  789. }
  790. /*
  791. * When we get a completion for a Set Transfer Ring Dequeue Pointer command,
  792. * we need to clear the set deq pending flag in the endpoint ring state, so that
  793. * the TD queueing code can ring the doorbell again. We also need to ring the
  794. * endpoint doorbell to restart the ring, but only if there aren't more
  795. * cancellations pending.
  796. */
  797. static void handle_set_deq_completion(struct xhci_hcd *xhci,
  798. struct xhci_event_cmd *event,
  799. union xhci_trb *trb)
  800. {
  801. unsigned int slot_id;
  802. unsigned int ep_index;
  803. unsigned int stream_id;
  804. struct xhci_ring *ep_ring;
  805. struct xhci_virt_device *dev;
  806. struct xhci_ep_ctx *ep_ctx;
  807. struct xhci_slot_ctx *slot_ctx;
  808. slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]);
  809. ep_index = TRB_TO_EP_INDEX(trb->generic.field[3]);
  810. stream_id = TRB_TO_STREAM_ID(trb->generic.field[2]);
  811. dev = xhci->devs[slot_id];
  812. ep_ring = xhci_stream_id_to_ring(dev, ep_index, stream_id);
  813. if (!ep_ring) {
  814. xhci_warn(xhci, "WARN Set TR deq ptr command for "
  815. "freed stream ID %u\n",
  816. stream_id);
  817. /* XXX: Harmless??? */
  818. dev->eps[ep_index].ep_state &= ~SET_DEQ_PENDING;
  819. return;
  820. }
  821. ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
  822. slot_ctx = xhci_get_slot_ctx(xhci, dev->out_ctx);
  823. if (GET_COMP_CODE(event->status) != COMP_SUCCESS) {
  824. unsigned int ep_state;
  825. unsigned int slot_state;
  826. switch (GET_COMP_CODE(event->status)) {
  827. case COMP_TRB_ERR:
  828. xhci_warn(xhci, "WARN Set TR Deq Ptr cmd invalid because "
  829. "of stream ID configuration\n");
  830. break;
  831. case COMP_CTX_STATE:
  832. xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed due "
  833. "to incorrect slot or ep state.\n");
  834. ep_state = ep_ctx->ep_info;
  835. ep_state &= EP_STATE_MASK;
  836. slot_state = slot_ctx->dev_state;
  837. slot_state = GET_SLOT_STATE(slot_state);
  838. xhci_dbg(xhci, "Slot state = %u, EP state = %u\n",
  839. slot_state, ep_state);
  840. break;
  841. case COMP_EBADSLT:
  842. xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed because "
  843. "slot %u was not enabled.\n", slot_id);
  844. break;
  845. default:
  846. xhci_warn(xhci, "WARN Set TR Deq Ptr cmd with unknown "
  847. "completion code of %u.\n",
  848. GET_COMP_CODE(event->status));
  849. break;
  850. }
  851. /* OK what do we do now? The endpoint state is hosed, and we
  852. * should never get to this point if the synchronization between
  853. * queueing, and endpoint state are correct. This might happen
  854. * if the device gets disconnected after we've finished
  855. * cancelling URBs, which might not be an error...
  856. */
  857. } else {
  858. xhci_dbg(xhci, "Successful Set TR Deq Ptr cmd, deq = @%08llx\n",
  859. ep_ctx->deq);
  860. }
  861. dev->eps[ep_index].ep_state &= ~SET_DEQ_PENDING;
  862. /* Restart any rings with pending URBs */
  863. ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
  864. }
  865. static void handle_reset_ep_completion(struct xhci_hcd *xhci,
  866. struct xhci_event_cmd *event,
  867. union xhci_trb *trb)
  868. {
  869. int slot_id;
  870. unsigned int ep_index;
  871. slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]);
  872. ep_index = TRB_TO_EP_INDEX(trb->generic.field[3]);
  873. /* This command will only fail if the endpoint wasn't halted,
  874. * but we don't care.
  875. */
  876. xhci_dbg(xhci, "Ignoring reset ep completion code of %u\n",
  877. (unsigned int) GET_COMP_CODE(event->status));
  878. /* HW with the reset endpoint quirk needs to have a configure endpoint
  879. * command complete before the endpoint can be used. Queue that here
  880. * because the HW can't handle two commands being queued in a row.
  881. */
  882. if (xhci->quirks & XHCI_RESET_EP_QUIRK) {
  883. xhci_dbg(xhci, "Queueing configure endpoint command\n");
  884. xhci_queue_configure_endpoint(xhci,
  885. xhci->devs[slot_id]->in_ctx->dma, slot_id,
  886. false);
  887. xhci_ring_cmd_db(xhci);
  888. } else {
  889. /* Clear our internal halted state and restart the ring(s) */
  890. xhci->devs[slot_id]->eps[ep_index].ep_state &= ~EP_HALTED;
  891. ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
  892. }
  893. }
  894. /* Check to see if a command in the device's command queue matches this one.
  895. * Signal the completion or free the command, and return 1. Return 0 if the
  896. * completed command isn't at the head of the command list.
  897. */
  898. static int handle_cmd_in_cmd_wait_list(struct xhci_hcd *xhci,
  899. struct xhci_virt_device *virt_dev,
  900. struct xhci_event_cmd *event)
  901. {
  902. struct xhci_command *command;
  903. if (list_empty(&virt_dev->cmd_list))
  904. return 0;
  905. command = list_entry(virt_dev->cmd_list.next,
  906. struct xhci_command, cmd_list);
  907. if (xhci->cmd_ring->dequeue != command->command_trb)
  908. return 0;
  909. command->status =
  910. GET_COMP_CODE(event->status);
  911. list_del(&command->cmd_list);
  912. if (command->completion)
  913. complete(command->completion);
  914. else
  915. xhci_free_command(xhci, command);
  916. return 1;
  917. }
  918. static void handle_cmd_completion(struct xhci_hcd *xhci,
  919. struct xhci_event_cmd *event)
  920. {
  921. int slot_id = TRB_TO_SLOT_ID(event->flags);
  922. u64 cmd_dma;
  923. dma_addr_t cmd_dequeue_dma;
  924. struct xhci_input_control_ctx *ctrl_ctx;
  925. struct xhci_virt_device *virt_dev;
  926. unsigned int ep_index;
  927. struct xhci_ring *ep_ring;
  928. unsigned int ep_state;
  929. cmd_dma = event->cmd_trb;
  930. cmd_dequeue_dma = xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg,
  931. xhci->cmd_ring->dequeue);
  932. /* Is the command ring deq ptr out of sync with the deq seg ptr? */
  933. if (cmd_dequeue_dma == 0) {
  934. xhci->error_bitmask |= 1 << 4;
  935. return;
  936. }
  937. /* Does the DMA address match our internal dequeue pointer address? */
  938. if (cmd_dma != (u64) cmd_dequeue_dma) {
  939. xhci->error_bitmask |= 1 << 5;
  940. return;
  941. }
  942. switch (xhci->cmd_ring->dequeue->generic.field[3] & TRB_TYPE_BITMASK) {
  943. case TRB_TYPE(TRB_ENABLE_SLOT):
  944. if (GET_COMP_CODE(event->status) == COMP_SUCCESS)
  945. xhci->slot_id = slot_id;
  946. else
  947. xhci->slot_id = 0;
  948. complete(&xhci->addr_dev);
  949. break;
  950. case TRB_TYPE(TRB_DISABLE_SLOT):
  951. if (xhci->devs[slot_id])
  952. xhci_free_virt_device(xhci, slot_id);
  953. break;
  954. case TRB_TYPE(TRB_CONFIG_EP):
  955. virt_dev = xhci->devs[slot_id];
  956. if (handle_cmd_in_cmd_wait_list(xhci, virt_dev, event))
  957. break;
  958. /*
  959. * Configure endpoint commands can come from the USB core
  960. * configuration or alt setting changes, or because the HW
  961. * needed an extra configure endpoint command after a reset
  962. * endpoint command or streams were being configured.
  963. * If the command was for a halted endpoint, the xHCI driver
  964. * is not waiting on the configure endpoint command.
  965. */
  966. ctrl_ctx = xhci_get_input_control_ctx(xhci,
  967. virt_dev->in_ctx);
  968. /* Input ctx add_flags are the endpoint index plus one */
  969. ep_index = xhci_last_valid_endpoint(ctrl_ctx->add_flags) - 1;
  970. /* A usb_set_interface() call directly after clearing a halted
  971. * condition may race on this quirky hardware. Not worth
  972. * worrying about, since this is prototype hardware. Not sure
  973. * if this will work for streams, but streams support was
  974. * untested on this prototype.
  975. */
  976. if (xhci->quirks & XHCI_RESET_EP_QUIRK &&
  977. ep_index != (unsigned int) -1 &&
  978. ctrl_ctx->add_flags - SLOT_FLAG ==
  979. ctrl_ctx->drop_flags) {
  980. ep_ring = xhci->devs[slot_id]->eps[ep_index].ring;
  981. ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state;
  982. if (!(ep_state & EP_HALTED))
  983. goto bandwidth_change;
  984. xhci_dbg(xhci, "Completed config ep cmd - "
  985. "last ep index = %d, state = %d\n",
  986. ep_index, ep_state);
  987. /* Clear internal halted state and restart ring(s) */
  988. xhci->devs[slot_id]->eps[ep_index].ep_state &=
  989. ~EP_HALTED;
  990. ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
  991. break;
  992. }
  993. bandwidth_change:
  994. xhci_dbg(xhci, "Completed config ep cmd\n");
  995. xhci->devs[slot_id]->cmd_status =
  996. GET_COMP_CODE(event->status);
  997. complete(&xhci->devs[slot_id]->cmd_completion);
  998. break;
  999. case TRB_TYPE(TRB_EVAL_CONTEXT):
  1000. virt_dev = xhci->devs[slot_id];
  1001. if (handle_cmd_in_cmd_wait_list(xhci, virt_dev, event))
  1002. break;
  1003. xhci->devs[slot_id]->cmd_status = GET_COMP_CODE(event->status);
  1004. complete(&xhci->devs[slot_id]->cmd_completion);
  1005. break;
  1006. case TRB_TYPE(TRB_ADDR_DEV):
  1007. xhci->devs[slot_id]->cmd_status = GET_COMP_CODE(event->status);
  1008. complete(&xhci->addr_dev);
  1009. break;
  1010. case TRB_TYPE(TRB_STOP_RING):
  1011. handle_stopped_endpoint(xhci, xhci->cmd_ring->dequeue);
  1012. break;
  1013. case TRB_TYPE(TRB_SET_DEQ):
  1014. handle_set_deq_completion(xhci, event, xhci->cmd_ring->dequeue);
  1015. break;
  1016. case TRB_TYPE(TRB_CMD_NOOP):
  1017. ++xhci->noops_handled;
  1018. break;
  1019. case TRB_TYPE(TRB_RESET_EP):
  1020. handle_reset_ep_completion(xhci, event, xhci->cmd_ring->dequeue);
  1021. break;
  1022. case TRB_TYPE(TRB_RESET_DEV):
  1023. xhci_dbg(xhci, "Completed reset device command.\n");
  1024. slot_id = TRB_TO_SLOT_ID(
  1025. xhci->cmd_ring->dequeue->generic.field[3]);
  1026. virt_dev = xhci->devs[slot_id];
  1027. if (virt_dev)
  1028. handle_cmd_in_cmd_wait_list(xhci, virt_dev, event);
  1029. else
  1030. xhci_warn(xhci, "Reset device command completion "
  1031. "for disabled slot %u\n", slot_id);
  1032. break;
  1033. case TRB_TYPE(TRB_NEC_GET_FW):
  1034. if (!(xhci->quirks & XHCI_NEC_HOST)) {
  1035. xhci->error_bitmask |= 1 << 6;
  1036. break;
  1037. }
  1038. xhci_dbg(xhci, "NEC firmware version %2x.%02x\n",
  1039. NEC_FW_MAJOR(event->status),
  1040. NEC_FW_MINOR(event->status));
  1041. break;
  1042. default:
  1043. /* Skip over unknown commands on the event ring */
  1044. xhci->error_bitmask |= 1 << 6;
  1045. break;
  1046. }
  1047. inc_deq(xhci, xhci->cmd_ring, false);
  1048. }
  1049. static void handle_vendor_event(struct xhci_hcd *xhci,
  1050. union xhci_trb *event)
  1051. {
  1052. u32 trb_type;
  1053. trb_type = TRB_FIELD_TO_TYPE(event->generic.field[3]);
  1054. xhci_dbg(xhci, "Vendor specific event TRB type = %u\n", trb_type);
  1055. if (trb_type == TRB_NEC_CMD_COMP && (xhci->quirks & XHCI_NEC_HOST))
  1056. handle_cmd_completion(xhci, &event->event_cmd);
  1057. }
  1058. static void handle_port_status(struct xhci_hcd *xhci,
  1059. union xhci_trb *event)
  1060. {
  1061. u32 port_id;
  1062. /* Port status change events always have a successful completion code */
  1063. if (GET_COMP_CODE(event->generic.field[2]) != COMP_SUCCESS) {
  1064. xhci_warn(xhci, "WARN: xHC returned failed port status event\n");
  1065. xhci->error_bitmask |= 1 << 8;
  1066. }
  1067. /* FIXME: core doesn't care about all port link state changes yet */
  1068. port_id = GET_PORT_ID(event->generic.field[0]);
  1069. xhci_dbg(xhci, "Port Status Change Event for port %d\n", port_id);
  1070. /* Update event ring dequeue pointer before dropping the lock */
  1071. inc_deq(xhci, xhci->event_ring, true);
  1072. spin_unlock(&xhci->lock);
  1073. /* Pass this up to the core */
  1074. usb_hcd_poll_rh_status(xhci_to_hcd(xhci));
  1075. spin_lock(&xhci->lock);
  1076. }
  1077. /*
  1078. * This TD is defined by the TRBs starting at start_trb in start_seg and ending
  1079. * at end_trb, which may be in another segment. If the suspect DMA address is a
  1080. * TRB in this TD, this function returns that TRB's segment. Otherwise it
  1081. * returns 0.
  1082. */
  1083. struct xhci_segment *trb_in_td(struct xhci_segment *start_seg,
  1084. union xhci_trb *start_trb,
  1085. union xhci_trb *end_trb,
  1086. dma_addr_t suspect_dma)
  1087. {
  1088. dma_addr_t start_dma;
  1089. dma_addr_t end_seg_dma;
  1090. dma_addr_t end_trb_dma;
  1091. struct xhci_segment *cur_seg;
  1092. start_dma = xhci_trb_virt_to_dma(start_seg, start_trb);
  1093. cur_seg = start_seg;
  1094. do {
  1095. if (start_dma == 0)
  1096. return NULL;
  1097. /* We may get an event for a Link TRB in the middle of a TD */
  1098. end_seg_dma = xhci_trb_virt_to_dma(cur_seg,
  1099. &cur_seg->trbs[TRBS_PER_SEGMENT - 1]);
  1100. /* If the end TRB isn't in this segment, this is set to 0 */
  1101. end_trb_dma = xhci_trb_virt_to_dma(cur_seg, end_trb);
  1102. if (end_trb_dma > 0) {
  1103. /* The end TRB is in this segment, so suspect should be here */
  1104. if (start_dma <= end_trb_dma) {
  1105. if (suspect_dma >= start_dma && suspect_dma <= end_trb_dma)
  1106. return cur_seg;
  1107. } else {
  1108. /* Case for one segment with
  1109. * a TD wrapped around to the top
  1110. */
  1111. if ((suspect_dma >= start_dma &&
  1112. suspect_dma <= end_seg_dma) ||
  1113. (suspect_dma >= cur_seg->dma &&
  1114. suspect_dma <= end_trb_dma))
  1115. return cur_seg;
  1116. }
  1117. return NULL;
  1118. } else {
  1119. /* Might still be somewhere in this segment */
  1120. if (suspect_dma >= start_dma && suspect_dma <= end_seg_dma)
  1121. return cur_seg;
  1122. }
  1123. cur_seg = cur_seg->next;
  1124. start_dma = xhci_trb_virt_to_dma(cur_seg, &cur_seg->trbs[0]);
  1125. } while (cur_seg != start_seg);
  1126. return NULL;
  1127. }
  1128. static void xhci_cleanup_halted_endpoint(struct xhci_hcd *xhci,
  1129. unsigned int slot_id, unsigned int ep_index,
  1130. unsigned int stream_id,
  1131. struct xhci_td *td, union xhci_trb *event_trb)
  1132. {
  1133. struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index];
  1134. ep->ep_state |= EP_HALTED;
  1135. ep->stopped_td = td;
  1136. ep->stopped_trb = event_trb;
  1137. ep->stopped_stream = stream_id;
  1138. xhci_queue_reset_ep(xhci, slot_id, ep_index);
  1139. xhci_cleanup_stalled_ring(xhci, td->urb->dev, ep_index);
  1140. ep->stopped_td = NULL;
  1141. ep->stopped_trb = NULL;
  1142. ep->stopped_stream = 0;
  1143. xhci_ring_cmd_db(xhci);
  1144. }
  1145. /* Check if an error has halted the endpoint ring. The class driver will
  1146. * cleanup the halt for a non-default control endpoint if we indicate a stall.
  1147. * However, a babble and other errors also halt the endpoint ring, and the class
  1148. * driver won't clear the halt in that case, so we need to issue a Set Transfer
  1149. * Ring Dequeue Pointer command manually.
  1150. */
  1151. static int xhci_requires_manual_halt_cleanup(struct xhci_hcd *xhci,
  1152. struct xhci_ep_ctx *ep_ctx,
  1153. unsigned int trb_comp_code)
  1154. {
  1155. /* TRB completion codes that may require a manual halt cleanup */
  1156. if (trb_comp_code == COMP_TX_ERR ||
  1157. trb_comp_code == COMP_BABBLE ||
  1158. trb_comp_code == COMP_SPLIT_ERR)
  1159. /* The 0.96 spec says a babbling control endpoint
  1160. * is not halted. The 0.96 spec says it is. Some HW
  1161. * claims to be 0.95 compliant, but it halts the control
  1162. * endpoint anyway. Check if a babble halted the
  1163. * endpoint.
  1164. */
  1165. if ((ep_ctx->ep_info & EP_STATE_MASK) == EP_STATE_HALTED)
  1166. return 1;
  1167. return 0;
  1168. }
  1169. int xhci_is_vendor_info_code(struct xhci_hcd *xhci, unsigned int trb_comp_code)
  1170. {
  1171. if (trb_comp_code >= 224 && trb_comp_code <= 255) {
  1172. /* Vendor defined "informational" completion code,
  1173. * treat as not-an-error.
  1174. */
  1175. xhci_dbg(xhci, "Vendor defined info completion code %u\n",
  1176. trb_comp_code);
  1177. xhci_dbg(xhci, "Treating code as success.\n");
  1178. return 1;
  1179. }
  1180. return 0;
  1181. }
  1182. /*
  1183. * Finish the td processing, remove the td from td list;
  1184. * Return 1 if the urb can be given back.
  1185. */
  1186. static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td,
  1187. union xhci_trb *event_trb, struct xhci_transfer_event *event,
  1188. struct xhci_virt_ep *ep, int *status, bool skip)
  1189. {
  1190. struct xhci_virt_device *xdev;
  1191. struct xhci_ring *ep_ring;
  1192. unsigned int slot_id;
  1193. int ep_index;
  1194. struct urb *urb = NULL;
  1195. struct xhci_ep_ctx *ep_ctx;
  1196. int ret = 0;
  1197. struct urb_priv *urb_priv;
  1198. u32 trb_comp_code;
  1199. slot_id = TRB_TO_SLOT_ID(event->flags);
  1200. xdev = xhci->devs[slot_id];
  1201. ep_index = TRB_TO_EP_ID(event->flags) - 1;
  1202. ep_ring = xhci_dma_to_transfer_ring(ep, event->buffer);
  1203. ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
  1204. trb_comp_code = GET_COMP_CODE(event->transfer_len);
  1205. if (skip)
  1206. goto td_cleanup;
  1207. if (trb_comp_code == COMP_STOP_INVAL ||
  1208. trb_comp_code == COMP_STOP) {
  1209. /* The Endpoint Stop Command completion will take care of any
  1210. * stopped TDs. A stopped TD may be restarted, so don't update
  1211. * the ring dequeue pointer or take this TD off any lists yet.
  1212. */
  1213. ep->stopped_td = td;
  1214. ep->stopped_trb = event_trb;
  1215. return 0;
  1216. } else {
  1217. if (trb_comp_code == COMP_STALL) {
  1218. /* The transfer is completed from the driver's
  1219. * perspective, but we need to issue a set dequeue
  1220. * command for this stalled endpoint to move the dequeue
  1221. * pointer past the TD. We can't do that here because
  1222. * the halt condition must be cleared first. Let the
  1223. * USB class driver clear the stall later.
  1224. */
  1225. ep->stopped_td = td;
  1226. ep->stopped_trb = event_trb;
  1227. ep->stopped_stream = ep_ring->stream_id;
  1228. } else if (xhci_requires_manual_halt_cleanup(xhci,
  1229. ep_ctx, trb_comp_code)) {
  1230. /* Other types of errors halt the endpoint, but the
  1231. * class driver doesn't call usb_reset_endpoint() unless
  1232. * the error is -EPIPE. Clear the halted status in the
  1233. * xHCI hardware manually.
  1234. */
  1235. xhci_cleanup_halted_endpoint(xhci,
  1236. slot_id, ep_index, ep_ring->stream_id,
  1237. td, event_trb);
  1238. } else {
  1239. /* Update ring dequeue pointer */
  1240. while (ep_ring->dequeue != td->last_trb)
  1241. inc_deq(xhci, ep_ring, false);
  1242. inc_deq(xhci, ep_ring, false);
  1243. }
  1244. td_cleanup:
  1245. /* Clean up the endpoint's TD list */
  1246. urb = td->urb;
  1247. urb_priv = urb->hcpriv;
  1248. /* Do one last check of the actual transfer length.
  1249. * If the host controller said we transferred more data than
  1250. * the buffer length, urb->actual_length will be a very big
  1251. * number (since it's unsigned). Play it safe and say we didn't
  1252. * transfer anything.
  1253. */
  1254. if (urb->actual_length > urb->transfer_buffer_length) {
  1255. xhci_warn(xhci, "URB transfer length is wrong, "
  1256. "xHC issue? req. len = %u, "
  1257. "act. len = %u\n",
  1258. urb->transfer_buffer_length,
  1259. urb->actual_length);
  1260. urb->actual_length = 0;
  1261. if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
  1262. *status = -EREMOTEIO;
  1263. else
  1264. *status = 0;
  1265. }
  1266. list_del(&td->td_list);
  1267. /* Was this TD slated to be cancelled but completed anyway? */
  1268. if (!list_empty(&td->cancelled_td_list))
  1269. list_del(&td->cancelled_td_list);
  1270. urb_priv->td_cnt++;
  1271. /* Giveback the urb when all the tds are completed */
  1272. if (urb_priv->td_cnt == urb_priv->length)
  1273. ret = 1;
  1274. }
  1275. return ret;
  1276. }
  1277. /*
  1278. * Process control tds, update urb status and actual_length.
  1279. */
  1280. static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
  1281. union xhci_trb *event_trb, struct xhci_transfer_event *event,
  1282. struct xhci_virt_ep *ep, int *status)
  1283. {
  1284. struct xhci_virt_device *xdev;
  1285. struct xhci_ring *ep_ring;
  1286. unsigned int slot_id;
  1287. int ep_index;
  1288. struct xhci_ep_ctx *ep_ctx;
  1289. u32 trb_comp_code;
  1290. slot_id = TRB_TO_SLOT_ID(event->flags);
  1291. xdev = xhci->devs[slot_id];
  1292. ep_index = TRB_TO_EP_ID(event->flags) - 1;
  1293. ep_ring = xhci_dma_to_transfer_ring(ep, event->buffer);
  1294. ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
  1295. trb_comp_code = GET_COMP_CODE(event->transfer_len);
  1296. xhci_debug_trb(xhci, xhci->event_ring->dequeue);
  1297. switch (trb_comp_code) {
  1298. case COMP_SUCCESS:
  1299. if (event_trb == ep_ring->dequeue) {
  1300. xhci_warn(xhci, "WARN: Success on ctrl setup TRB "
  1301. "without IOC set??\n");
  1302. *status = -ESHUTDOWN;
  1303. } else if (event_trb != td->last_trb) {
  1304. xhci_warn(xhci, "WARN: Success on ctrl data TRB "
  1305. "without IOC set??\n");
  1306. *status = -ESHUTDOWN;
  1307. } else {
  1308. xhci_dbg(xhci, "Successful control transfer!\n");
  1309. *status = 0;
  1310. }
  1311. break;
  1312. case COMP_SHORT_TX:
  1313. xhci_warn(xhci, "WARN: short transfer on control ep\n");
  1314. if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
  1315. *status = -EREMOTEIO;
  1316. else
  1317. *status = 0;
  1318. break;
  1319. default:
  1320. if (!xhci_requires_manual_halt_cleanup(xhci,
  1321. ep_ctx, trb_comp_code))
  1322. break;
  1323. xhci_dbg(xhci, "TRB error code %u, "
  1324. "halted endpoint index = %u\n",
  1325. trb_comp_code, ep_index);
  1326. /* else fall through */
  1327. case COMP_STALL:
  1328. /* Did we transfer part of the data (middle) phase? */
  1329. if (event_trb != ep_ring->dequeue &&
  1330. event_trb != td->last_trb)
  1331. td->urb->actual_length =
  1332. td->urb->transfer_buffer_length
  1333. - TRB_LEN(event->transfer_len);
  1334. else
  1335. td->urb->actual_length = 0;
  1336. xhci_cleanup_halted_endpoint(xhci,
  1337. slot_id, ep_index, 0, td, event_trb);
  1338. return finish_td(xhci, td, event_trb, event, ep, status, true);
  1339. }
  1340. /*
  1341. * Did we transfer any data, despite the errors that might have
  1342. * happened? I.e. did we get past the setup stage?
  1343. */
  1344. if (event_trb != ep_ring->dequeue) {
  1345. /* The event was for the status stage */
  1346. if (event_trb == td->last_trb) {
  1347. if (td->urb->actual_length != 0) {
  1348. /* Don't overwrite a previously set error code
  1349. */
  1350. if ((*status == -EINPROGRESS || *status == 0) &&
  1351. (td->urb->transfer_flags
  1352. & URB_SHORT_NOT_OK))
  1353. /* Did we already see a short data
  1354. * stage? */
  1355. *status = -EREMOTEIO;
  1356. } else {
  1357. td->urb->actual_length =
  1358. td->urb->transfer_buffer_length;
  1359. }
  1360. } else {
  1361. /* Maybe the event was for the data stage? */
  1362. if (trb_comp_code != COMP_STOP_INVAL) {
  1363. /* We didn't stop on a link TRB in the middle */
  1364. td->urb->actual_length =
  1365. td->urb->transfer_buffer_length -
  1366. TRB_LEN(event->transfer_len);
  1367. xhci_dbg(xhci, "Waiting for status "
  1368. "stage event\n");
  1369. return 0;
  1370. }
  1371. }
  1372. }
  1373. return finish_td(xhci, td, event_trb, event, ep, status, false);
  1374. }
  1375. /*
  1376. * Process isochronous tds, update urb packet status and actual_length.
  1377. */
  1378. static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
  1379. union xhci_trb *event_trb, struct xhci_transfer_event *event,
  1380. struct xhci_virt_ep *ep, int *status)
  1381. {
  1382. struct xhci_ring *ep_ring;
  1383. struct urb_priv *urb_priv;
  1384. int idx;
  1385. int len = 0;
  1386. int skip_td = 0;
  1387. union xhci_trb *cur_trb;
  1388. struct xhci_segment *cur_seg;
  1389. u32 trb_comp_code;
  1390. ep_ring = xhci_dma_to_transfer_ring(ep, event->buffer);
  1391. trb_comp_code = GET_COMP_CODE(event->transfer_len);
  1392. urb_priv = td->urb->hcpriv;
  1393. idx = urb_priv->td_cnt;
  1394. if (ep->skip) {
  1395. /* The transfer is partly done */
  1396. *status = -EXDEV;
  1397. td->urb->iso_frame_desc[idx].status = -EXDEV;
  1398. } else {
  1399. /* handle completion code */
  1400. switch (trb_comp_code) {
  1401. case COMP_SUCCESS:
  1402. td->urb->iso_frame_desc[idx].status = 0;
  1403. xhci_dbg(xhci, "Successful isoc transfer!\n");
  1404. break;
  1405. case COMP_SHORT_TX:
  1406. if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
  1407. td->urb->iso_frame_desc[idx].status =
  1408. -EREMOTEIO;
  1409. else
  1410. td->urb->iso_frame_desc[idx].status = 0;
  1411. break;
  1412. case COMP_BW_OVER:
  1413. td->urb->iso_frame_desc[idx].status = -ECOMM;
  1414. skip_td = 1;
  1415. break;
  1416. case COMP_BUFF_OVER:
  1417. case COMP_BABBLE:
  1418. td->urb->iso_frame_desc[idx].status = -EOVERFLOW;
  1419. skip_td = 1;
  1420. break;
  1421. case COMP_STALL:
  1422. td->urb->iso_frame_desc[idx].status = -EPROTO;
  1423. skip_td = 1;
  1424. break;
  1425. case COMP_STOP:
  1426. case COMP_STOP_INVAL:
  1427. break;
  1428. default:
  1429. td->urb->iso_frame_desc[idx].status = -1;
  1430. break;
  1431. }
  1432. }
  1433. /* calc actual length */
  1434. if (ep->skip) {
  1435. td->urb->iso_frame_desc[idx].actual_length = 0;
  1436. return finish_td(xhci, td, event_trb, event, ep, status, true);
  1437. }
  1438. if (trb_comp_code == COMP_SUCCESS || skip_td == 1) {
  1439. td->urb->iso_frame_desc[idx].actual_length =
  1440. td->urb->iso_frame_desc[idx].length;
  1441. td->urb->actual_length +=
  1442. td->urb->iso_frame_desc[idx].length;
  1443. } else {
  1444. for (cur_trb = ep_ring->dequeue,
  1445. cur_seg = ep_ring->deq_seg; cur_trb != event_trb;
  1446. next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) {
  1447. if ((cur_trb->generic.field[3] &
  1448. TRB_TYPE_BITMASK) != TRB_TYPE(TRB_TR_NOOP) &&
  1449. (cur_trb->generic.field[3] &
  1450. TRB_TYPE_BITMASK) != TRB_TYPE(TRB_LINK))
  1451. len +=
  1452. TRB_LEN(cur_trb->generic.field[2]);
  1453. }
  1454. len += TRB_LEN(cur_trb->generic.field[2]) -
  1455. TRB_LEN(event->transfer_len);
  1456. if (trb_comp_code != COMP_STOP_INVAL) {
  1457. td->urb->iso_frame_desc[idx].actual_length = len;
  1458. td->urb->actual_length += len;
  1459. }
  1460. }
  1461. if ((idx == urb_priv->length - 1) && *status == -EINPROGRESS)
  1462. *status = 0;
  1463. return finish_td(xhci, td, event_trb, event, ep, status, false);
  1464. }
  1465. /*
  1466. * Process bulk and interrupt tds, update urb status and actual_length.
  1467. */
  1468. static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td,
  1469. union xhci_trb *event_trb, struct xhci_transfer_event *event,
  1470. struct xhci_virt_ep *ep, int *status)
  1471. {
  1472. struct xhci_ring *ep_ring;
  1473. union xhci_trb *cur_trb;
  1474. struct xhci_segment *cur_seg;
  1475. u32 trb_comp_code;
  1476. ep_ring = xhci_dma_to_transfer_ring(ep, event->buffer);
  1477. trb_comp_code = GET_COMP_CODE(event->transfer_len);
  1478. switch (trb_comp_code) {
  1479. case COMP_SUCCESS:
  1480. /* Double check that the HW transferred everything. */
  1481. if (event_trb != td->last_trb) {
  1482. xhci_warn(xhci, "WARN Successful completion "
  1483. "on short TX\n");
  1484. if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
  1485. *status = -EREMOTEIO;
  1486. else
  1487. *status = 0;
  1488. } else {
  1489. if (usb_endpoint_xfer_bulk(&td->urb->ep->desc))
  1490. xhci_dbg(xhci, "Successful bulk "
  1491. "transfer!\n");
  1492. else
  1493. xhci_dbg(xhci, "Successful interrupt "
  1494. "transfer!\n");
  1495. *status = 0;
  1496. }
  1497. break;
  1498. case COMP_SHORT_TX:
  1499. if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
  1500. *status = -EREMOTEIO;
  1501. else
  1502. *status = 0;
  1503. break;
  1504. default:
  1505. /* Others already handled above */
  1506. break;
  1507. }
  1508. dev_dbg(&td->urb->dev->dev,
  1509. "ep %#x - asked for %d bytes, "
  1510. "%d bytes untransferred\n",
  1511. td->urb->ep->desc.bEndpointAddress,
  1512. td->urb->transfer_buffer_length,
  1513. TRB_LEN(event->transfer_len));
  1514. /* Fast path - was this the last TRB in the TD for this URB? */
  1515. if (event_trb == td->last_trb) {
  1516. if (TRB_LEN(event->transfer_len) != 0) {
  1517. td->urb->actual_length =
  1518. td->urb->transfer_buffer_length -
  1519. TRB_LEN(event->transfer_len);
  1520. if (td->urb->transfer_buffer_length <
  1521. td->urb->actual_length) {
  1522. xhci_warn(xhci, "HC gave bad length "
  1523. "of %d bytes left\n",
  1524. TRB_LEN(event->transfer_len));
  1525. td->urb->actual_length = 0;
  1526. if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
  1527. *status = -EREMOTEIO;
  1528. else
  1529. *status = 0;
  1530. }
  1531. /* Don't overwrite a previously set error code */
  1532. if (*status == -EINPROGRESS) {
  1533. if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
  1534. *status = -EREMOTEIO;
  1535. else
  1536. *status = 0;
  1537. }
  1538. } else {
  1539. td->urb->actual_length =
  1540. td->urb->transfer_buffer_length;
  1541. /* Ignore a short packet completion if the
  1542. * untransferred length was zero.
  1543. */
  1544. if (*status == -EREMOTEIO)
  1545. *status = 0;
  1546. }
  1547. } else {
  1548. /* Slow path - walk the list, starting from the dequeue
  1549. * pointer, to get the actual length transferred.
  1550. */
  1551. td->urb->actual_length = 0;
  1552. for (cur_trb = ep_ring->dequeue, cur_seg = ep_ring->deq_seg;
  1553. cur_trb != event_trb;
  1554. next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) {
  1555. if ((cur_trb->generic.field[3] &
  1556. TRB_TYPE_BITMASK) != TRB_TYPE(TRB_TR_NOOP) &&
  1557. (cur_trb->generic.field[3] &
  1558. TRB_TYPE_BITMASK) != TRB_TYPE(TRB_LINK))
  1559. td->urb->actual_length +=
  1560. TRB_LEN(cur_trb->generic.field[2]);
  1561. }
  1562. /* If the ring didn't stop on a Link or No-op TRB, add
  1563. * in the actual bytes transferred from the Normal TRB
  1564. */
  1565. if (trb_comp_code != COMP_STOP_INVAL)
  1566. td->urb->actual_length +=
  1567. TRB_LEN(cur_trb->generic.field[2]) -
  1568. TRB_LEN(event->transfer_len);
  1569. }
  1570. return finish_td(xhci, td, event_trb, event, ep, status, false);
  1571. }
  1572. /*
  1573. * If this function returns an error condition, it means it got a Transfer
  1574. * event with a corrupted Slot ID, Endpoint ID, or TRB DMA address.
  1575. * At this point, the host controller is probably hosed and should be reset.
  1576. */
  1577. static int handle_tx_event(struct xhci_hcd *xhci,
  1578. struct xhci_transfer_event *event)
  1579. {
  1580. struct xhci_virt_device *xdev;
  1581. struct xhci_virt_ep *ep;
  1582. struct xhci_ring *ep_ring;
  1583. unsigned int slot_id;
  1584. int ep_index;
  1585. struct xhci_td *td = NULL;
  1586. dma_addr_t event_dma;
  1587. struct xhci_segment *event_seg;
  1588. union xhci_trb *event_trb;
  1589. struct urb *urb = NULL;
  1590. int status = -EINPROGRESS;
  1591. struct urb_priv *urb_priv;
  1592. struct xhci_ep_ctx *ep_ctx;
  1593. u32 trb_comp_code;
  1594. int ret = 0;
  1595. slot_id = TRB_TO_SLOT_ID(event->flags);
  1596. xdev = xhci->devs[slot_id];
  1597. if (!xdev) {
  1598. xhci_err(xhci, "ERROR Transfer event pointed to bad slot\n");
  1599. return -ENODEV;
  1600. }
  1601. /* Endpoint ID is 1 based, our index is zero based */
  1602. ep_index = TRB_TO_EP_ID(event->flags) - 1;
  1603. xhci_dbg(xhci, "%s - ep index = %d\n", __func__, ep_index);
  1604. ep = &xdev->eps[ep_index];
  1605. ep_ring = xhci_dma_to_transfer_ring(ep, event->buffer);
  1606. ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
  1607. if (!ep_ring ||
  1608. (ep_ctx->ep_info & EP_STATE_MASK) == EP_STATE_DISABLED) {
  1609. xhci_err(xhci, "ERROR Transfer event for disabled endpoint "
  1610. "or incorrect stream ring\n");
  1611. return -ENODEV;
  1612. }
  1613. event_dma = event->buffer;
  1614. trb_comp_code = GET_COMP_CODE(event->transfer_len);
  1615. /* Look for common error cases */
  1616. switch (trb_comp_code) {
  1617. /* Skip codes that require special handling depending on
  1618. * transfer type
  1619. */
  1620. case COMP_SUCCESS:
  1621. case COMP_SHORT_TX:
  1622. break;
  1623. case COMP_STOP:
  1624. xhci_dbg(xhci, "Stopped on Transfer TRB\n");
  1625. break;
  1626. case COMP_STOP_INVAL:
  1627. xhci_dbg(xhci, "Stopped on No-op or Link TRB\n");
  1628. break;
  1629. case COMP_STALL:
  1630. xhci_warn(xhci, "WARN: Stalled endpoint\n");
  1631. ep->ep_state |= EP_HALTED;
  1632. status = -EPIPE;
  1633. break;
  1634. case COMP_TRB_ERR:
  1635. xhci_warn(xhci, "WARN: TRB error on endpoint\n");
  1636. status = -EILSEQ;
  1637. break;
  1638. case COMP_SPLIT_ERR:
  1639. case COMP_TX_ERR:
  1640. xhci_warn(xhci, "WARN: transfer error on endpoint\n");
  1641. status = -EPROTO;
  1642. break;
  1643. case COMP_BABBLE:
  1644. xhci_warn(xhci, "WARN: babble error on endpoint\n");
  1645. status = -EOVERFLOW;
  1646. break;
  1647. case COMP_DB_ERR:
  1648. xhci_warn(xhci, "WARN: HC couldn't access mem fast enough\n");
  1649. status = -ENOSR;
  1650. break;
  1651. case COMP_BW_OVER:
  1652. xhci_warn(xhci, "WARN: bandwidth overrun event on endpoint\n");
  1653. break;
  1654. case COMP_BUFF_OVER:
  1655. xhci_warn(xhci, "WARN: buffer overrun event on endpoint\n");
  1656. break;
  1657. case COMP_UNDERRUN:
  1658. /*
  1659. * When the Isoch ring is empty, the xHC will generate
  1660. * a Ring Overrun Event for IN Isoch endpoint or Ring
  1661. * Underrun Event for OUT Isoch endpoint.
  1662. */
  1663. xhci_dbg(xhci, "underrun event on endpoint\n");
  1664. if (!list_empty(&ep_ring->td_list))
  1665. xhci_dbg(xhci, "Underrun Event for slot %d ep %d "
  1666. "still with TDs queued?\n",
  1667. TRB_TO_SLOT_ID(event->flags), ep_index);
  1668. goto cleanup;
  1669. case COMP_OVERRUN:
  1670. xhci_dbg(xhci, "overrun event on endpoint\n");
  1671. if (!list_empty(&ep_ring->td_list))
  1672. xhci_dbg(xhci, "Overrun Event for slot %d ep %d "
  1673. "still with TDs queued?\n",
  1674. TRB_TO_SLOT_ID(event->flags), ep_index);
  1675. goto cleanup;
  1676. case COMP_MISSED_INT:
  1677. /*
  1678. * When encounter missed service error, one or more isoc tds
  1679. * may be missed by xHC.
  1680. * Set skip flag of the ep_ring; Complete the missed tds as
  1681. * short transfer when process the ep_ring next time.
  1682. */
  1683. ep->skip = true;
  1684. xhci_dbg(xhci, "Miss service interval error, set skip flag\n");
  1685. goto cleanup;
  1686. default:
  1687. if (xhci_is_vendor_info_code(xhci, trb_comp_code)) {
  1688. status = 0;
  1689. break;
  1690. }
  1691. xhci_warn(xhci, "ERROR Unknown event condition, HC probably "
  1692. "busted\n");
  1693. goto cleanup;
  1694. }
  1695. do {
  1696. /* This TRB should be in the TD at the head of this ring's
  1697. * TD list.
  1698. */
  1699. if (list_empty(&ep_ring->td_list)) {
  1700. xhci_warn(xhci, "WARN Event TRB for slot %d ep %d "
  1701. "with no TDs queued?\n",
  1702. TRB_TO_SLOT_ID(event->flags), ep_index);
  1703. xhci_dbg(xhci, "Event TRB with TRB type ID %u\n",
  1704. (unsigned int) (event->flags & TRB_TYPE_BITMASK)>>10);
  1705. xhci_print_trb_offsets(xhci, (union xhci_trb *) event);
  1706. if (ep->skip) {
  1707. ep->skip = false;
  1708. xhci_dbg(xhci, "td_list is empty while skip "
  1709. "flag set. Clear skip flag.\n");
  1710. }
  1711. ret = 0;
  1712. goto cleanup;
  1713. }
  1714. td = list_entry(ep_ring->td_list.next, struct xhci_td, td_list);
  1715. /* Is this a TRB in the currently executing TD? */
  1716. event_seg = trb_in_td(ep_ring->deq_seg, ep_ring->dequeue,
  1717. td->last_trb, event_dma);
  1718. if (event_seg && ep->skip) {
  1719. xhci_dbg(xhci, "Found td. Clear skip flag.\n");
  1720. ep->skip = false;
  1721. }
  1722. if (!event_seg &&
  1723. (!ep->skip || !usb_endpoint_xfer_isoc(&td->urb->ep->desc))) {
  1724. /* HC is busted, give up! */
  1725. xhci_err(xhci, "ERROR Transfer event TRB DMA ptr not "
  1726. "part of current TD\n");
  1727. return -ESHUTDOWN;
  1728. }
  1729. if (event_seg) {
  1730. event_trb = &event_seg->trbs[(event_dma -
  1731. event_seg->dma) / sizeof(*event_trb)];
  1732. /*
  1733. * No-op TRB should not trigger interrupts.
  1734. * If event_trb is a no-op TRB, it means the
  1735. * corresponding TD has been cancelled. Just ignore
  1736. * the TD.
  1737. */
  1738. if ((event_trb->generic.field[3] & TRB_TYPE_BITMASK)
  1739. == TRB_TYPE(TRB_TR_NOOP)) {
  1740. xhci_dbg(xhci, "event_trb is a no-op TRB. "
  1741. "Skip it\n");
  1742. goto cleanup;
  1743. }
  1744. }
  1745. /* Now update the urb's actual_length and give back to
  1746. * the core
  1747. */
  1748. if (usb_endpoint_xfer_control(&td->urb->ep->desc))
  1749. ret = process_ctrl_td(xhci, td, event_trb, event, ep,
  1750. &status);
  1751. else if (usb_endpoint_xfer_isoc(&td->urb->ep->desc))
  1752. ret = process_isoc_td(xhci, td, event_trb, event, ep,
  1753. &status);
  1754. else
  1755. ret = process_bulk_intr_td(xhci, td, event_trb, event,
  1756. ep, &status);
  1757. cleanup:
  1758. /*
  1759. * Do not update event ring dequeue pointer if ep->skip is set.
  1760. * Will roll back to continue process missed tds.
  1761. */
  1762. if (trb_comp_code == COMP_MISSED_INT || !ep->skip) {
  1763. inc_deq(xhci, xhci->event_ring, true);
  1764. }
  1765. if (ret) {
  1766. urb = td->urb;
  1767. urb_priv = urb->hcpriv;
  1768. /* Leave the TD around for the reset endpoint function
  1769. * to use(but only if it's not a control endpoint,
  1770. * since we already queued the Set TR dequeue pointer
  1771. * command for stalled control endpoints).
  1772. */
  1773. if (usb_endpoint_xfer_control(&urb->ep->desc) ||
  1774. (trb_comp_code != COMP_STALL &&
  1775. trb_comp_code != COMP_BABBLE))
  1776. xhci_urb_free_priv(xhci, urb_priv);
  1777. usb_hcd_unlink_urb_from_ep(xhci_to_hcd(xhci), urb);
  1778. xhci_dbg(xhci, "Giveback URB %p, len = %d, "
  1779. "status = %d\n",
  1780. urb, urb->actual_length, status);
  1781. spin_unlock(&xhci->lock);
  1782. usb_hcd_giveback_urb(xhci_to_hcd(xhci), urb, status);
  1783. spin_lock(&xhci->lock);
  1784. }
  1785. /*
  1786. * If ep->skip is set, it means there are missed tds on the
  1787. * endpoint ring need to take care of.
  1788. * Process them as short transfer until reach the td pointed by
  1789. * the event.
  1790. */
  1791. } while (ep->skip && trb_comp_code != COMP_MISSED_INT);
  1792. return 0;
  1793. }
  1794. /*
  1795. * This function handles all OS-owned events on the event ring. It may drop
  1796. * xhci->lock between event processing (e.g. to pass up port status changes).
  1797. */
  1798. static void xhci_handle_event(struct xhci_hcd *xhci)
  1799. {
  1800. union xhci_trb *event;
  1801. int update_ptrs = 1;
  1802. int ret;
  1803. xhci_dbg(xhci, "In %s\n", __func__);
  1804. if (!xhci->event_ring || !xhci->event_ring->dequeue) {
  1805. xhci->error_bitmask |= 1 << 1;
  1806. return;
  1807. }
  1808. event = xhci->event_ring->dequeue;
  1809. /* Does the HC or OS own the TRB? */
  1810. if ((event->event_cmd.flags & TRB_CYCLE) !=
  1811. xhci->event_ring->cycle_state) {
  1812. xhci->error_bitmask |= 1 << 2;
  1813. return;
  1814. }
  1815. xhci_dbg(xhci, "%s - OS owns TRB\n", __func__);
  1816. /* FIXME: Handle more event types. */
  1817. switch ((event->event_cmd.flags & TRB_TYPE_BITMASK)) {
  1818. case TRB_TYPE(TRB_COMPLETION):
  1819. xhci_dbg(xhci, "%s - calling handle_cmd_completion\n", __func__);
  1820. handle_cmd_completion(xhci, &event->event_cmd);
  1821. xhci_dbg(xhci, "%s - returned from handle_cmd_completion\n", __func__);
  1822. break;
  1823. case TRB_TYPE(TRB_PORT_STATUS):
  1824. xhci_dbg(xhci, "%s - calling handle_port_status\n", __func__);
  1825. handle_port_status(xhci, event);
  1826. xhci_dbg(xhci, "%s - returned from handle_port_status\n", __func__);
  1827. update_ptrs = 0;
  1828. break;
  1829. case TRB_TYPE(TRB_TRANSFER):
  1830. xhci_dbg(xhci, "%s - calling handle_tx_event\n", __func__);
  1831. ret = handle_tx_event(xhci, &event->trans_event);
  1832. xhci_dbg(xhci, "%s - returned from handle_tx_event\n", __func__);
  1833. if (ret < 0)
  1834. xhci->error_bitmask |= 1 << 9;
  1835. else
  1836. update_ptrs = 0;
  1837. break;
  1838. default:
  1839. if ((event->event_cmd.flags & TRB_TYPE_BITMASK) >= TRB_TYPE(48))
  1840. handle_vendor_event(xhci, event);
  1841. else
  1842. xhci->error_bitmask |= 1 << 3;
  1843. }
  1844. /* Any of the above functions may drop and re-acquire the lock, so check
  1845. * to make sure a watchdog timer didn't mark the host as non-responsive.
  1846. */
  1847. if (xhci->xhc_state & XHCI_STATE_DYING) {
  1848. xhci_dbg(xhci, "xHCI host dying, returning from "
  1849. "event handler.\n");
  1850. return;
  1851. }
  1852. if (update_ptrs)
  1853. /* Update SW event ring dequeue pointer */
  1854. inc_deq(xhci, xhci->event_ring, true);
  1855. /* Are there more items on the event ring? */
  1856. xhci_handle_event(xhci);
  1857. }
  1858. /*
  1859. * xHCI spec says we can get an interrupt, and if the HC has an error condition,
  1860. * we might get bad data out of the event ring. Section 4.10.2.7 has a list of
  1861. * indicators of an event TRB error, but we check the status *first* to be safe.
  1862. */
  1863. irqreturn_t xhci_irq(struct usb_hcd *hcd)
  1864. {
  1865. struct xhci_hcd *xhci = hcd_to_xhci(hcd);
  1866. u32 status;
  1867. union xhci_trb *trb;
  1868. u64 temp_64;
  1869. union xhci_trb *event_ring_deq;
  1870. dma_addr_t deq;
  1871. spin_lock(&xhci->lock);
  1872. trb = xhci->event_ring->dequeue;
  1873. /* Check if the xHC generated the interrupt, or the irq is shared */
  1874. status = xhci_readl(xhci, &xhci->op_regs->status);
  1875. if (status == 0xffffffff)
  1876. goto hw_died;
  1877. if (!(status & STS_EINT)) {
  1878. spin_unlock(&xhci->lock);
  1879. xhci_warn(xhci, "Spurious interrupt.\n");
  1880. return IRQ_NONE;
  1881. }
  1882. xhci_dbg(xhci, "op reg status = %08x\n", status);
  1883. xhci_dbg(xhci, "Event ring dequeue ptr:\n");
  1884. xhci_dbg(xhci, "@%llx %08x %08x %08x %08x\n",
  1885. (unsigned long long)
  1886. xhci_trb_virt_to_dma(xhci->event_ring->deq_seg, trb),
  1887. lower_32_bits(trb->link.segment_ptr),
  1888. upper_32_bits(trb->link.segment_ptr),
  1889. (unsigned int) trb->link.intr_target,
  1890. (unsigned int) trb->link.control);
  1891. if (status & STS_FATAL) {
  1892. xhci_warn(xhci, "WARNING: Host System Error\n");
  1893. xhci_halt(xhci);
  1894. hw_died:
  1895. xhci_to_hcd(xhci)->state = HC_STATE_HALT;
  1896. spin_unlock(&xhci->lock);
  1897. return -ESHUTDOWN;
  1898. }
  1899. /*
  1900. * Clear the op reg interrupt status first,
  1901. * so we can receive interrupts from other MSI-X interrupters.
  1902. * Write 1 to clear the interrupt status.
  1903. */
  1904. status |= STS_EINT;
  1905. xhci_writel(xhci, status, &xhci->op_regs->status);
  1906. /* FIXME when MSI-X is supported and there are multiple vectors */
  1907. /* Clear the MSI-X event interrupt status */
  1908. if (hcd->irq != -1) {
  1909. u32 irq_pending;
  1910. /* Acknowledge the PCI interrupt */
  1911. irq_pending = xhci_readl(xhci, &xhci->ir_set->irq_pending);
  1912. irq_pending |= 0x3;
  1913. xhci_writel(xhci, irq_pending, &xhci->ir_set->irq_pending);
  1914. }
  1915. if (xhci->xhc_state & XHCI_STATE_DYING) {
  1916. xhci_dbg(xhci, "xHCI dying, ignoring interrupt. "
  1917. "Shouldn't IRQs be disabled?\n");
  1918. /* Clear the event handler busy flag (RW1C);
  1919. * the event ring should be empty.
  1920. */
  1921. temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
  1922. xhci_write_64(xhci, temp_64 | ERST_EHB,
  1923. &xhci->ir_set->erst_dequeue);
  1924. spin_unlock(&xhci->lock);
  1925. return IRQ_HANDLED;
  1926. }
  1927. event_ring_deq = xhci->event_ring->dequeue;
  1928. /* FIXME this should be a delayed service routine
  1929. * that clears the EHB.
  1930. */
  1931. xhci_handle_event(xhci);
  1932. temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
  1933. /* If necessary, update the HW's version of the event ring deq ptr. */
  1934. if (event_ring_deq != xhci->event_ring->dequeue) {
  1935. deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg,
  1936. xhci->event_ring->dequeue);
  1937. if (deq == 0)
  1938. xhci_warn(xhci, "WARN something wrong with SW event "
  1939. "ring dequeue ptr.\n");
  1940. /* Update HC event ring dequeue pointer */
  1941. temp_64 &= ERST_PTR_MASK;
  1942. temp_64 |= ((u64) deq & (u64) ~ERST_PTR_MASK);
  1943. }
  1944. /* Clear the event handler busy flag (RW1C); event ring is empty. */
  1945. temp_64 |= ERST_EHB;
  1946. xhci_write_64(xhci, temp_64, &xhci->ir_set->erst_dequeue);
  1947. spin_unlock(&xhci->lock);
  1948. return IRQ_HANDLED;
  1949. }
  1950. irqreturn_t xhci_msi_irq(int irq, struct usb_hcd *hcd)
  1951. {
  1952. irqreturn_t ret;
  1953. set_bit(HCD_FLAG_SAW_IRQ, &hcd->flags);
  1954. ret = xhci_irq(hcd);
  1955. return ret;
  1956. }
  1957. /**** Endpoint Ring Operations ****/
  1958. /*
  1959. * Generic function for queueing a TRB on a ring.
  1960. * The caller must have checked to make sure there's room on the ring.
  1961. *
  1962. * @more_trbs_coming: Will you enqueue more TRBs before calling
  1963. * prepare_transfer()?
  1964. */
  1965. static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
  1966. bool consumer, bool more_trbs_coming,
  1967. u32 field1, u32 field2, u32 field3, u32 field4)
  1968. {
  1969. struct xhci_generic_trb *trb;
  1970. trb = &ring->enqueue->generic;
  1971. trb->field[0] = field1;
  1972. trb->field[1] = field2;
  1973. trb->field[2] = field3;
  1974. trb->field[3] = field4;
  1975. inc_enq(xhci, ring, consumer, more_trbs_coming);
  1976. }
  1977. /*
  1978. * Does various checks on the endpoint ring, and makes it ready to queue num_trbs.
  1979. * FIXME allocate segments if the ring is full.
  1980. */
  1981. static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
  1982. u32 ep_state, unsigned int num_trbs, gfp_t mem_flags)
  1983. {
  1984. /* Make sure the endpoint has been added to xHC schedule */
  1985. xhci_dbg(xhci, "Endpoint state = 0x%x\n", ep_state);
  1986. switch (ep_state) {
  1987. case EP_STATE_DISABLED:
  1988. /*
  1989. * USB core changed config/interfaces without notifying us,
  1990. * or hardware is reporting the wrong state.
  1991. */
  1992. xhci_warn(xhci, "WARN urb submitted to disabled ep\n");
  1993. return -ENOENT;
  1994. case EP_STATE_ERROR:
  1995. xhci_warn(xhci, "WARN waiting for error on ep to be cleared\n");
  1996. /* FIXME event handling code for error needs to clear it */
  1997. /* XXX not sure if this should be -ENOENT or not */
  1998. return -EINVAL;
  1999. case EP_STATE_HALTED:
  2000. xhci_dbg(xhci, "WARN halted endpoint, queueing URB anyway.\n");
  2001. case EP_STATE_STOPPED:
  2002. case EP_STATE_RUNNING:
  2003. break;
  2004. default:
  2005. xhci_err(xhci, "ERROR unknown endpoint state for ep\n");
  2006. /*
  2007. * FIXME issue Configure Endpoint command to try to get the HC
  2008. * back into a known state.
  2009. */
  2010. return -EINVAL;
  2011. }
  2012. if (!room_on_ring(xhci, ep_ring, num_trbs)) {
  2013. /* FIXME allocate more room */
  2014. xhci_err(xhci, "ERROR no room on ep ring\n");
  2015. return -ENOMEM;
  2016. }
  2017. if (enqueue_is_link_trb(ep_ring)) {
  2018. struct xhci_ring *ring = ep_ring;
  2019. union xhci_trb *next;
  2020. xhci_dbg(xhci, "prepare_ring: pointing to link trb\n");
  2021. next = ring->enqueue;
  2022. while (last_trb(xhci, ring, ring->enq_seg, next)) {
  2023. /* If we're not dealing with 0.95 hardware,
  2024. * clear the chain bit.
  2025. */
  2026. if (!xhci_link_trb_quirk(xhci))
  2027. next->link.control &= ~TRB_CHAIN;
  2028. else
  2029. next->link.control |= TRB_CHAIN;
  2030. wmb();
  2031. next->link.control ^= (u32) TRB_CYCLE;
  2032. /* Toggle the cycle bit after the last ring segment. */
  2033. if (last_trb_on_last_seg(xhci, ring, ring->enq_seg, next)) {
  2034. ring->cycle_state = (ring->cycle_state ? 0 : 1);
  2035. if (!in_interrupt()) {
  2036. xhci_dbg(xhci, "queue_trb: Toggle cycle "
  2037. "state for ring %p = %i\n",
  2038. ring, (unsigned int)ring->cycle_state);
  2039. }
  2040. }
  2041. ring->enq_seg = ring->enq_seg->next;
  2042. ring->enqueue = ring->enq_seg->trbs;
  2043. next = ring->enqueue;
  2044. }
  2045. }
  2046. return 0;
  2047. }
  2048. static int prepare_transfer(struct xhci_hcd *xhci,
  2049. struct xhci_virt_device *xdev,
  2050. unsigned int ep_index,
  2051. unsigned int stream_id,
  2052. unsigned int num_trbs,
  2053. struct urb *urb,
  2054. unsigned int td_index,
  2055. gfp_t mem_flags)
  2056. {
  2057. int ret;
  2058. struct urb_priv *urb_priv;
  2059. struct xhci_td *td;
  2060. struct xhci_ring *ep_ring;
  2061. struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
  2062. ep_ring = xhci_stream_id_to_ring(xdev, ep_index, stream_id);
  2063. if (!ep_ring) {
  2064. xhci_dbg(xhci, "Can't prepare ring for bad stream ID %u\n",
  2065. stream_id);
  2066. return -EINVAL;
  2067. }
  2068. ret = prepare_ring(xhci, ep_ring,
  2069. ep_ctx->ep_info & EP_STATE_MASK,
  2070. num_trbs, mem_flags);
  2071. if (ret)
  2072. return ret;
  2073. urb_priv = urb->hcpriv;
  2074. td = urb_priv->td[td_index];
  2075. INIT_LIST_HEAD(&td->td_list);
  2076. INIT_LIST_HEAD(&td->cancelled_td_list);
  2077. if (td_index == 0) {
  2078. ret = usb_hcd_link_urb_to_ep(xhci_to_hcd(xhci), urb);
  2079. if (unlikely(ret)) {
  2080. xhci_urb_free_priv(xhci, urb_priv);
  2081. urb->hcpriv = NULL;
  2082. return ret;
  2083. }
  2084. }
  2085. td->urb = urb;
  2086. /* Add this TD to the tail of the endpoint ring's TD list */
  2087. list_add_tail(&td->td_list, &ep_ring->td_list);
  2088. td->start_seg = ep_ring->enq_seg;
  2089. td->first_trb = ep_ring->enqueue;
  2090. urb_priv->td[td_index] = td;
  2091. return 0;
  2092. }
  2093. static unsigned int count_sg_trbs_needed(struct xhci_hcd *xhci, struct urb *urb)
  2094. {
  2095. int num_sgs, num_trbs, running_total, temp, i;
  2096. struct scatterlist *sg;
  2097. sg = NULL;
  2098. num_sgs = urb->num_sgs;
  2099. temp = urb->transfer_buffer_length;
  2100. xhci_dbg(xhci, "count sg list trbs: \n");
  2101. num_trbs = 0;
  2102. for_each_sg(urb->sg, sg, num_sgs, i) {
  2103. unsigned int previous_total_trbs = num_trbs;
  2104. unsigned int len = sg_dma_len(sg);
  2105. /* Scatter gather list entries may cross 64KB boundaries */
  2106. running_total = TRB_MAX_BUFF_SIZE -
  2107. (sg_dma_address(sg) & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
  2108. if (running_total != 0)
  2109. num_trbs++;
  2110. /* How many more 64KB chunks to transfer, how many more TRBs? */
  2111. while (running_total < sg_dma_len(sg)) {
  2112. num_trbs++;
  2113. running_total += TRB_MAX_BUFF_SIZE;
  2114. }
  2115. xhci_dbg(xhci, " sg #%d: dma = %#llx, len = %#x (%d), num_trbs = %d\n",
  2116. i, (unsigned long long)sg_dma_address(sg),
  2117. len, len, num_trbs - previous_total_trbs);
  2118. len = min_t(int, len, temp);
  2119. temp -= len;
  2120. if (temp == 0)
  2121. break;
  2122. }
  2123. xhci_dbg(xhci, "\n");
  2124. if (!in_interrupt())
  2125. dev_dbg(&urb->dev->dev, "ep %#x - urb len = %d, sglist used, num_trbs = %d\n",
  2126. urb->ep->desc.bEndpointAddress,
  2127. urb->transfer_buffer_length,
  2128. num_trbs);
  2129. return num_trbs;
  2130. }
  2131. static void check_trb_math(struct urb *urb, int num_trbs, int running_total)
  2132. {
  2133. if (num_trbs != 0)
  2134. dev_dbg(&urb->dev->dev, "%s - ep %#x - Miscalculated number of "
  2135. "TRBs, %d left\n", __func__,
  2136. urb->ep->desc.bEndpointAddress, num_trbs);
  2137. if (running_total != urb->transfer_buffer_length)
  2138. dev_dbg(&urb->dev->dev, "%s - ep %#x - Miscalculated tx length, "
  2139. "queued %#x (%d), asked for %#x (%d)\n",
  2140. __func__,
  2141. urb->ep->desc.bEndpointAddress,
  2142. running_total, running_total,
  2143. urb->transfer_buffer_length,
  2144. urb->transfer_buffer_length);
  2145. }
  2146. static void giveback_first_trb(struct xhci_hcd *xhci, int slot_id,
  2147. unsigned int ep_index, unsigned int stream_id, int start_cycle,
  2148. struct xhci_generic_trb *start_trb, struct xhci_td *td)
  2149. {
  2150. /*
  2151. * Pass all the TRBs to the hardware at once and make sure this write
  2152. * isn't reordered.
  2153. */
  2154. wmb();
  2155. start_trb->field[3] |= start_cycle;
  2156. ring_ep_doorbell(xhci, slot_id, ep_index, stream_id);
  2157. }
  2158. /*
  2159. * xHCI uses normal TRBs for both bulk and interrupt. When the interrupt
  2160. * endpoint is to be serviced, the xHC will consume (at most) one TD. A TD
  2161. * (comprised of sg list entries) can take several service intervals to
  2162. * transmit.
  2163. */
  2164. int xhci_queue_intr_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
  2165. struct urb *urb, int slot_id, unsigned int ep_index)
  2166. {
  2167. struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci,
  2168. xhci->devs[slot_id]->out_ctx, ep_index);
  2169. int xhci_interval;
  2170. int ep_interval;
  2171. xhci_interval = EP_INTERVAL_TO_UFRAMES(ep_ctx->ep_info);
  2172. ep_interval = urb->interval;
  2173. /* Convert to microframes */
  2174. if (urb->dev->speed == USB_SPEED_LOW ||
  2175. urb->dev->speed == USB_SPEED_FULL)
  2176. ep_interval *= 8;
  2177. /* FIXME change this to a warning and a suggestion to use the new API
  2178. * to set the polling interval (once the API is added).
  2179. */
  2180. if (xhci_interval != ep_interval) {
  2181. if (!printk_ratelimit())
  2182. dev_dbg(&urb->dev->dev, "Driver uses different interval"
  2183. " (%d microframe%s) than xHCI "
  2184. "(%d microframe%s)\n",
  2185. ep_interval,
  2186. ep_interval == 1 ? "" : "s",
  2187. xhci_interval,
  2188. xhci_interval == 1 ? "" : "s");
  2189. urb->interval = xhci_interval;
  2190. /* Convert back to frames for LS/FS devices */
  2191. if (urb->dev->speed == USB_SPEED_LOW ||
  2192. urb->dev->speed == USB_SPEED_FULL)
  2193. urb->interval /= 8;
  2194. }
  2195. return xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb, slot_id, ep_index);
  2196. }
  2197. /*
  2198. * The TD size is the number of bytes remaining in the TD (including this TRB),
  2199. * right shifted by 10.
  2200. * It must fit in bits 21:17, so it can't be bigger than 31.
  2201. */
  2202. static u32 xhci_td_remainder(unsigned int remainder)
  2203. {
  2204. u32 max = (1 << (21 - 17 + 1)) - 1;
  2205. if ((remainder >> 10) >= max)
  2206. return max << 17;
  2207. else
  2208. return (remainder >> 10) << 17;
  2209. }
  2210. static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
  2211. struct urb *urb, int slot_id, unsigned int ep_index)
  2212. {
  2213. struct xhci_ring *ep_ring;
  2214. unsigned int num_trbs;
  2215. struct urb_priv *urb_priv;
  2216. struct xhci_td *td;
  2217. struct scatterlist *sg;
  2218. int num_sgs;
  2219. int trb_buff_len, this_sg_len, running_total;
  2220. bool first_trb;
  2221. u64 addr;
  2222. bool more_trbs_coming;
  2223. struct xhci_generic_trb *start_trb;
  2224. int start_cycle;
  2225. ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
  2226. if (!ep_ring)
  2227. return -EINVAL;
  2228. num_trbs = count_sg_trbs_needed(xhci, urb);
  2229. num_sgs = urb->num_sgs;
  2230. trb_buff_len = prepare_transfer(xhci, xhci->devs[slot_id],
  2231. ep_index, urb->stream_id,
  2232. num_trbs, urb, 0, mem_flags);
  2233. if (trb_buff_len < 0)
  2234. return trb_buff_len;
  2235. urb_priv = urb->hcpriv;
  2236. td = urb_priv->td[0];
  2237. /*
  2238. * Don't give the first TRB to the hardware (by toggling the cycle bit)
  2239. * until we've finished creating all the other TRBs. The ring's cycle
  2240. * state may change as we enqueue the other TRBs, so save it too.
  2241. */
  2242. start_trb = &ep_ring->enqueue->generic;
  2243. start_cycle = ep_ring->cycle_state;
  2244. running_total = 0;
  2245. /*
  2246. * How much data is in the first TRB?
  2247. *
  2248. * There are three forces at work for TRB buffer pointers and lengths:
  2249. * 1. We don't want to walk off the end of this sg-list entry buffer.
  2250. * 2. The transfer length that the driver requested may be smaller than
  2251. * the amount of memory allocated for this scatter-gather list.
  2252. * 3. TRBs buffers can't cross 64KB boundaries.
  2253. */
  2254. sg = urb->sg;
  2255. addr = (u64) sg_dma_address(sg);
  2256. this_sg_len = sg_dma_len(sg);
  2257. trb_buff_len = TRB_MAX_BUFF_SIZE -
  2258. (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
  2259. trb_buff_len = min_t(int, trb_buff_len, this_sg_len);
  2260. if (trb_buff_len > urb->transfer_buffer_length)
  2261. trb_buff_len = urb->transfer_buffer_length;
  2262. xhci_dbg(xhci, "First length to xfer from 1st sglist entry = %u\n",
  2263. trb_buff_len);
  2264. first_trb = true;
  2265. /* Queue the first TRB, even if it's zero-length */
  2266. do {
  2267. u32 field = 0;
  2268. u32 length_field = 0;
  2269. u32 remainder = 0;
  2270. /* Don't change the cycle bit of the first TRB until later */
  2271. if (first_trb)
  2272. first_trb = false;
  2273. else
  2274. field |= ep_ring->cycle_state;
  2275. /* Chain all the TRBs together; clear the chain bit in the last
  2276. * TRB to indicate it's the last TRB in the chain.
  2277. */
  2278. if (num_trbs > 1) {
  2279. field |= TRB_CHAIN;
  2280. } else {
  2281. /* FIXME - add check for ZERO_PACKET flag before this */
  2282. td->last_trb = ep_ring->enqueue;
  2283. field |= TRB_IOC;
  2284. }
  2285. xhci_dbg(xhci, " sg entry: dma = %#x, len = %#x (%d), "
  2286. "64KB boundary at %#x, end dma = %#x\n",
  2287. (unsigned int) addr, trb_buff_len, trb_buff_len,
  2288. (unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1),
  2289. (unsigned int) addr + trb_buff_len);
  2290. if (TRB_MAX_BUFF_SIZE -
  2291. (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1)) < trb_buff_len) {
  2292. xhci_warn(xhci, "WARN: sg dma xfer crosses 64KB boundaries!\n");
  2293. xhci_dbg(xhci, "Next boundary at %#x, end dma = %#x\n",
  2294. (unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1),
  2295. (unsigned int) addr + trb_buff_len);
  2296. }
  2297. remainder = xhci_td_remainder(urb->transfer_buffer_length -
  2298. running_total) ;
  2299. length_field = TRB_LEN(trb_buff_len) |
  2300. remainder |
  2301. TRB_INTR_TARGET(0);
  2302. if (num_trbs > 1)
  2303. more_trbs_coming = true;
  2304. else
  2305. more_trbs_coming = false;
  2306. queue_trb(xhci, ep_ring, false, more_trbs_coming,
  2307. lower_32_bits(addr),
  2308. upper_32_bits(addr),
  2309. length_field,
  2310. /* We always want to know if the TRB was short,
  2311. * or we won't get an event when it completes.
  2312. * (Unless we use event data TRBs, which are a
  2313. * waste of space and HC resources.)
  2314. */
  2315. field | TRB_ISP | TRB_TYPE(TRB_NORMAL));
  2316. --num_trbs;
  2317. running_total += trb_buff_len;
  2318. /* Calculate length for next transfer --
  2319. * Are we done queueing all the TRBs for this sg entry?
  2320. */
  2321. this_sg_len -= trb_buff_len;
  2322. if (this_sg_len == 0) {
  2323. --num_sgs;
  2324. if (num_sgs == 0)
  2325. break;
  2326. sg = sg_next(sg);
  2327. addr = (u64) sg_dma_address(sg);
  2328. this_sg_len = sg_dma_len(sg);
  2329. } else {
  2330. addr += trb_buff_len;
  2331. }
  2332. trb_buff_len = TRB_MAX_BUFF_SIZE -
  2333. (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
  2334. trb_buff_len = min_t(int, trb_buff_len, this_sg_len);
  2335. if (running_total + trb_buff_len > urb->transfer_buffer_length)
  2336. trb_buff_len =
  2337. urb->transfer_buffer_length - running_total;
  2338. } while (running_total < urb->transfer_buffer_length);
  2339. check_trb_math(urb, num_trbs, running_total);
  2340. giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
  2341. start_cycle, start_trb, td);
  2342. return 0;
  2343. }
  2344. /* This is very similar to what ehci-q.c qtd_fill() does */
  2345. int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
  2346. struct urb *urb, int slot_id, unsigned int ep_index)
  2347. {
  2348. struct xhci_ring *ep_ring;
  2349. struct urb_priv *urb_priv;
  2350. struct xhci_td *td;
  2351. int num_trbs;
  2352. struct xhci_generic_trb *start_trb;
  2353. bool first_trb;
  2354. bool more_trbs_coming;
  2355. int start_cycle;
  2356. u32 field, length_field;
  2357. int running_total, trb_buff_len, ret;
  2358. u64 addr;
  2359. if (urb->num_sgs)
  2360. return queue_bulk_sg_tx(xhci, mem_flags, urb, slot_id, ep_index);
  2361. ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
  2362. if (!ep_ring)
  2363. return -EINVAL;
  2364. num_trbs = 0;
  2365. /* How much data is (potentially) left before the 64KB boundary? */
  2366. running_total = TRB_MAX_BUFF_SIZE -
  2367. (urb->transfer_dma & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
  2368. /* If there's some data on this 64KB chunk, or we have to send a
  2369. * zero-length transfer, we need at least one TRB
  2370. */
  2371. if (running_total != 0 || urb->transfer_buffer_length == 0)
  2372. num_trbs++;
  2373. /* How many more 64KB chunks to transfer, how many more TRBs? */
  2374. while (running_total < urb->transfer_buffer_length) {
  2375. num_trbs++;
  2376. running_total += TRB_MAX_BUFF_SIZE;
  2377. }
  2378. /* FIXME: this doesn't deal with URB_ZERO_PACKET - need one more */
  2379. if (!in_interrupt())
  2380. dev_dbg(&urb->dev->dev, "ep %#x - urb len = %#x (%d), addr = %#llx, num_trbs = %d\n",
  2381. urb->ep->desc.bEndpointAddress,
  2382. urb->transfer_buffer_length,
  2383. urb->transfer_buffer_length,
  2384. (unsigned long long)urb->transfer_dma,
  2385. num_trbs);
  2386. ret = prepare_transfer(xhci, xhci->devs[slot_id],
  2387. ep_index, urb->stream_id,
  2388. num_trbs, urb, 0, mem_flags);
  2389. if (ret < 0)
  2390. return ret;
  2391. urb_priv = urb->hcpriv;
  2392. td = urb_priv->td[0];
  2393. /*
  2394. * Don't give the first TRB to the hardware (by toggling the cycle bit)
  2395. * until we've finished creating all the other TRBs. The ring's cycle
  2396. * state may change as we enqueue the other TRBs, so save it too.
  2397. */
  2398. start_trb = &ep_ring->enqueue->generic;
  2399. start_cycle = ep_ring->cycle_state;
  2400. running_total = 0;
  2401. /* How much data is in the first TRB? */
  2402. addr = (u64) urb->transfer_dma;
  2403. trb_buff_len = TRB_MAX_BUFF_SIZE -
  2404. (urb->transfer_dma & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
  2405. if (urb->transfer_buffer_length < trb_buff_len)
  2406. trb_buff_len = urb->transfer_buffer_length;
  2407. first_trb = true;
  2408. /* Queue the first TRB, even if it's zero-length */
  2409. do {
  2410. u32 remainder = 0;
  2411. field = 0;
  2412. /* Don't change the cycle bit of the first TRB until later */
  2413. if (first_trb)
  2414. first_trb = false;
  2415. else
  2416. field |= ep_ring->cycle_state;
  2417. /* Chain all the TRBs together; clear the chain bit in the last
  2418. * TRB to indicate it's the last TRB in the chain.
  2419. */
  2420. if (num_trbs > 1) {
  2421. field |= TRB_CHAIN;
  2422. } else {
  2423. /* FIXME - add check for ZERO_PACKET flag before this */
  2424. td->last_trb = ep_ring->enqueue;
  2425. field |= TRB_IOC;
  2426. }
  2427. remainder = xhci_td_remainder(urb->transfer_buffer_length -
  2428. running_total);
  2429. length_field = TRB_LEN(trb_buff_len) |
  2430. remainder |
  2431. TRB_INTR_TARGET(0);
  2432. if (num_trbs > 1)
  2433. more_trbs_coming = true;
  2434. else
  2435. more_trbs_coming = false;
  2436. queue_trb(xhci, ep_ring, false, more_trbs_coming,
  2437. lower_32_bits(addr),
  2438. upper_32_bits(addr),
  2439. length_field,
  2440. /* We always want to know if the TRB was short,
  2441. * or we won't get an event when it completes.
  2442. * (Unless we use event data TRBs, which are a
  2443. * waste of space and HC resources.)
  2444. */
  2445. field | TRB_ISP | TRB_TYPE(TRB_NORMAL));
  2446. --num_trbs;
  2447. running_total += trb_buff_len;
  2448. /* Calculate length for next transfer */
  2449. addr += trb_buff_len;
  2450. trb_buff_len = urb->transfer_buffer_length - running_total;
  2451. if (trb_buff_len > TRB_MAX_BUFF_SIZE)
  2452. trb_buff_len = TRB_MAX_BUFF_SIZE;
  2453. } while (running_total < urb->transfer_buffer_length);
  2454. check_trb_math(urb, num_trbs, running_total);
  2455. giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
  2456. start_cycle, start_trb, td);
  2457. return 0;
  2458. }
  2459. /* Caller must have locked xhci->lock */
  2460. int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
  2461. struct urb *urb, int slot_id, unsigned int ep_index)
  2462. {
  2463. struct xhci_ring *ep_ring;
  2464. int num_trbs;
  2465. int ret;
  2466. struct usb_ctrlrequest *setup;
  2467. struct xhci_generic_trb *start_trb;
  2468. int start_cycle;
  2469. u32 field, length_field;
  2470. struct urb_priv *urb_priv;
  2471. struct xhci_td *td;
  2472. ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
  2473. if (!ep_ring)
  2474. return -EINVAL;
  2475. /*
  2476. * Need to copy setup packet into setup TRB, so we can't use the setup
  2477. * DMA address.
  2478. */
  2479. if (!urb->setup_packet)
  2480. return -EINVAL;
  2481. if (!in_interrupt())
  2482. xhci_dbg(xhci, "Queueing ctrl tx for slot id %d, ep %d\n",
  2483. slot_id, ep_index);
  2484. /* 1 TRB for setup, 1 for status */
  2485. num_trbs = 2;
  2486. /*
  2487. * Don't need to check if we need additional event data and normal TRBs,
  2488. * since data in control transfers will never get bigger than 16MB
  2489. * XXX: can we get a buffer that crosses 64KB boundaries?
  2490. */
  2491. if (urb->transfer_buffer_length > 0)
  2492. num_trbs++;
  2493. ret = prepare_transfer(xhci, xhci->devs[slot_id],
  2494. ep_index, urb->stream_id,
  2495. num_trbs, urb, 0, mem_flags);
  2496. if (ret < 0)
  2497. return ret;
  2498. urb_priv = urb->hcpriv;
  2499. td = urb_priv->td[0];
  2500. /*
  2501. * Don't give the first TRB to the hardware (by toggling the cycle bit)
  2502. * until we've finished creating all the other TRBs. The ring's cycle
  2503. * state may change as we enqueue the other TRBs, so save it too.
  2504. */
  2505. start_trb = &ep_ring->enqueue->generic;
  2506. start_cycle = ep_ring->cycle_state;
  2507. /* Queue setup TRB - see section 6.4.1.2.1 */
  2508. /* FIXME better way to translate setup_packet into two u32 fields? */
  2509. setup = (struct usb_ctrlrequest *) urb->setup_packet;
  2510. queue_trb(xhci, ep_ring, false, true,
  2511. /* FIXME endianness is probably going to bite my ass here. */
  2512. setup->bRequestType | setup->bRequest << 8 | setup->wValue << 16,
  2513. setup->wIndex | setup->wLength << 16,
  2514. TRB_LEN(8) | TRB_INTR_TARGET(0),
  2515. /* Immediate data in pointer */
  2516. TRB_IDT | TRB_TYPE(TRB_SETUP));
  2517. /* If there's data, queue data TRBs */
  2518. field = 0;
  2519. length_field = TRB_LEN(urb->transfer_buffer_length) |
  2520. xhci_td_remainder(urb->transfer_buffer_length) |
  2521. TRB_INTR_TARGET(0);
  2522. if (urb->transfer_buffer_length > 0) {
  2523. if (setup->bRequestType & USB_DIR_IN)
  2524. field |= TRB_DIR_IN;
  2525. queue_trb(xhci, ep_ring, false, true,
  2526. lower_32_bits(urb->transfer_dma),
  2527. upper_32_bits(urb->transfer_dma),
  2528. length_field,
  2529. /* Event on short tx */
  2530. field | TRB_ISP | TRB_TYPE(TRB_DATA) | ep_ring->cycle_state);
  2531. }
  2532. /* Save the DMA address of the last TRB in the TD */
  2533. td->last_trb = ep_ring->enqueue;
  2534. /* Queue status TRB - see Table 7 and sections 4.11.2.2 and 6.4.1.2.3 */
  2535. /* If the device sent data, the status stage is an OUT transfer */
  2536. if (urb->transfer_buffer_length > 0 && setup->bRequestType & USB_DIR_IN)
  2537. field = 0;
  2538. else
  2539. field = TRB_DIR_IN;
  2540. queue_trb(xhci, ep_ring, false, false,
  2541. 0,
  2542. 0,
  2543. TRB_INTR_TARGET(0),
  2544. /* Event on completion */
  2545. field | TRB_IOC | TRB_TYPE(TRB_STATUS) | ep_ring->cycle_state);
  2546. giveback_first_trb(xhci, slot_id, ep_index, 0,
  2547. start_cycle, start_trb, td);
  2548. return 0;
  2549. }
  2550. static int count_isoc_trbs_needed(struct xhci_hcd *xhci,
  2551. struct urb *urb, int i)
  2552. {
  2553. int num_trbs = 0;
  2554. u64 addr, td_len, running_total;
  2555. addr = (u64) (urb->transfer_dma + urb->iso_frame_desc[i].offset);
  2556. td_len = urb->iso_frame_desc[i].length;
  2557. running_total = TRB_MAX_BUFF_SIZE -
  2558. (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
  2559. if (running_total != 0)
  2560. num_trbs++;
  2561. while (running_total < td_len) {
  2562. num_trbs++;
  2563. running_total += TRB_MAX_BUFF_SIZE;
  2564. }
  2565. return num_trbs;
  2566. }
  2567. /* This is for isoc transfer */
  2568. static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
  2569. struct urb *urb, int slot_id, unsigned int ep_index)
  2570. {
  2571. struct xhci_ring *ep_ring;
  2572. struct urb_priv *urb_priv;
  2573. struct xhci_td *td;
  2574. int num_tds, trbs_per_td;
  2575. struct xhci_generic_trb *start_trb;
  2576. bool first_trb;
  2577. int start_cycle;
  2578. u32 field, length_field;
  2579. int running_total, trb_buff_len, td_len, td_remain_len, ret;
  2580. u64 start_addr, addr;
  2581. int i, j;
  2582. ep_ring = xhci->devs[slot_id]->eps[ep_index].ring;
  2583. num_tds = urb->number_of_packets;
  2584. if (num_tds < 1) {
  2585. xhci_dbg(xhci, "Isoc URB with zero packets?\n");
  2586. return -EINVAL;
  2587. }
  2588. if (!in_interrupt())
  2589. dev_dbg(&urb->dev->dev, "ep %#x - urb len = %#x (%d),"
  2590. " addr = %#llx, num_tds = %d\n",
  2591. urb->ep->desc.bEndpointAddress,
  2592. urb->transfer_buffer_length,
  2593. urb->transfer_buffer_length,
  2594. (unsigned long long)urb->transfer_dma,
  2595. num_tds);
  2596. start_addr = (u64) urb->transfer_dma;
  2597. start_trb = &ep_ring->enqueue->generic;
  2598. start_cycle = ep_ring->cycle_state;
  2599. /* Queue the first TRB, even if it's zero-length */
  2600. for (i = 0; i < num_tds; i++) {
  2601. first_trb = true;
  2602. running_total = 0;
  2603. addr = start_addr + urb->iso_frame_desc[i].offset;
  2604. td_len = urb->iso_frame_desc[i].length;
  2605. td_remain_len = td_len;
  2606. trbs_per_td = count_isoc_trbs_needed(xhci, urb, i);
  2607. ret = prepare_transfer(xhci, xhci->devs[slot_id], ep_index,
  2608. urb->stream_id, trbs_per_td, urb, i, mem_flags);
  2609. if (ret < 0)
  2610. return ret;
  2611. urb_priv = urb->hcpriv;
  2612. td = urb_priv->td[i];
  2613. for (j = 0; j < trbs_per_td; j++) {
  2614. u32 remainder = 0;
  2615. field = 0;
  2616. if (first_trb) {
  2617. /* Queue the isoc TRB */
  2618. field |= TRB_TYPE(TRB_ISOC);
  2619. /* Assume URB_ISO_ASAP is set */
  2620. field |= TRB_SIA;
  2621. if (i > 0)
  2622. field |= ep_ring->cycle_state;
  2623. first_trb = false;
  2624. } else {
  2625. /* Queue other normal TRBs */
  2626. field |= TRB_TYPE(TRB_NORMAL);
  2627. field |= ep_ring->cycle_state;
  2628. }
  2629. /* Chain all the TRBs together; clear the chain bit in
  2630. * the last TRB to indicate it's the last TRB in the
  2631. * chain.
  2632. */
  2633. if (j < trbs_per_td - 1) {
  2634. field |= TRB_CHAIN;
  2635. } else {
  2636. td->last_trb = ep_ring->enqueue;
  2637. field |= TRB_IOC;
  2638. }
  2639. /* Calculate TRB length */
  2640. trb_buff_len = TRB_MAX_BUFF_SIZE -
  2641. (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
  2642. if (trb_buff_len > td_remain_len)
  2643. trb_buff_len = td_remain_len;
  2644. remainder = xhci_td_remainder(td_len - running_total);
  2645. length_field = TRB_LEN(trb_buff_len) |
  2646. remainder |
  2647. TRB_INTR_TARGET(0);
  2648. queue_trb(xhci, ep_ring, false, false,
  2649. lower_32_bits(addr),
  2650. upper_32_bits(addr),
  2651. length_field,
  2652. /* We always want to know if the TRB was short,
  2653. * or we won't get an event when it completes.
  2654. * (Unless we use event data TRBs, which are a
  2655. * waste of space and HC resources.)
  2656. */
  2657. field | TRB_ISP);
  2658. running_total += trb_buff_len;
  2659. addr += trb_buff_len;
  2660. td_remain_len -= trb_buff_len;
  2661. }
  2662. /* Check TD length */
  2663. if (running_total != td_len) {
  2664. xhci_err(xhci, "ISOC TD length unmatch\n");
  2665. return -EINVAL;
  2666. }
  2667. }
  2668. wmb();
  2669. start_trb->field[3] |= start_cycle;
  2670. ring_ep_doorbell(xhci, slot_id, ep_index, urb->stream_id);
  2671. return 0;
  2672. }
  2673. /*
  2674. * Check transfer ring to guarantee there is enough room for the urb.
  2675. * Update ISO URB start_frame and interval.
  2676. * Update interval as xhci_queue_intr_tx does. Just use xhci frame_index to
  2677. * update the urb->start_frame by now.
  2678. * Always assume URB_ISO_ASAP set, and NEVER use urb->start_frame as input.
  2679. */
  2680. int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags,
  2681. struct urb *urb, int slot_id, unsigned int ep_index)
  2682. {
  2683. struct xhci_virt_device *xdev;
  2684. struct xhci_ring *ep_ring;
  2685. struct xhci_ep_ctx *ep_ctx;
  2686. int start_frame;
  2687. int xhci_interval;
  2688. int ep_interval;
  2689. int num_tds, num_trbs, i;
  2690. int ret;
  2691. xdev = xhci->devs[slot_id];
  2692. ep_ring = xdev->eps[ep_index].ring;
  2693. ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
  2694. num_trbs = 0;
  2695. num_tds = urb->number_of_packets;
  2696. for (i = 0; i < num_tds; i++)
  2697. num_trbs += count_isoc_trbs_needed(xhci, urb, i);
  2698. /* Check the ring to guarantee there is enough room for the whole urb.
  2699. * Do not insert any td of the urb to the ring if the check failed.
  2700. */
  2701. ret = prepare_ring(xhci, ep_ring, ep_ctx->ep_info & EP_STATE_MASK,
  2702. num_trbs, mem_flags);
  2703. if (ret)
  2704. return ret;
  2705. start_frame = xhci_readl(xhci, &xhci->run_regs->microframe_index);
  2706. start_frame &= 0x3fff;
  2707. urb->start_frame = start_frame;
  2708. if (urb->dev->speed == USB_SPEED_LOW ||
  2709. urb->dev->speed == USB_SPEED_FULL)
  2710. urb->start_frame >>= 3;
  2711. xhci_interval = EP_INTERVAL_TO_UFRAMES(ep_ctx->ep_info);
  2712. ep_interval = urb->interval;
  2713. /* Convert to microframes */
  2714. if (urb->dev->speed == USB_SPEED_LOW ||
  2715. urb->dev->speed == USB_SPEED_FULL)
  2716. ep_interval *= 8;
  2717. /* FIXME change this to a warning and a suggestion to use the new API
  2718. * to set the polling interval (once the API is added).
  2719. */
  2720. if (xhci_interval != ep_interval) {
  2721. if (!printk_ratelimit())
  2722. dev_dbg(&urb->dev->dev, "Driver uses different interval"
  2723. " (%d microframe%s) than xHCI "
  2724. "(%d microframe%s)\n",
  2725. ep_interval,
  2726. ep_interval == 1 ? "" : "s",
  2727. xhci_interval,
  2728. xhci_interval == 1 ? "" : "s");
  2729. urb->interval = xhci_interval;
  2730. /* Convert back to frames for LS/FS devices */
  2731. if (urb->dev->speed == USB_SPEED_LOW ||
  2732. urb->dev->speed == USB_SPEED_FULL)
  2733. urb->interval /= 8;
  2734. }
  2735. return xhci_queue_isoc_tx(xhci, GFP_ATOMIC, urb, slot_id, ep_index);
  2736. }
  2737. /**** Command Ring Operations ****/
  2738. /* Generic function for queueing a command TRB on the command ring.
  2739. * Check to make sure there's room on the command ring for one command TRB.
  2740. * Also check that there's room reserved for commands that must not fail.
  2741. * If this is a command that must not fail, meaning command_must_succeed = TRUE,
  2742. * then only check for the number of reserved spots.
  2743. * Don't decrement xhci->cmd_ring_reserved_trbs after we've queued the TRB
  2744. * because the command event handler may want to resubmit a failed command.
  2745. */
  2746. static int queue_command(struct xhci_hcd *xhci, u32 field1, u32 field2,
  2747. u32 field3, u32 field4, bool command_must_succeed)
  2748. {
  2749. int reserved_trbs = xhci->cmd_ring_reserved_trbs;
  2750. int ret;
  2751. if (!command_must_succeed)
  2752. reserved_trbs++;
  2753. ret = prepare_ring(xhci, xhci->cmd_ring, EP_STATE_RUNNING,
  2754. reserved_trbs, GFP_ATOMIC);
  2755. if (ret < 0) {
  2756. xhci_err(xhci, "ERR: No room for command on command ring\n");
  2757. if (command_must_succeed)
  2758. xhci_err(xhci, "ERR: Reserved TRB counting for "
  2759. "unfailable commands failed.\n");
  2760. return ret;
  2761. }
  2762. queue_trb(xhci, xhci->cmd_ring, false, false, field1, field2, field3,
  2763. field4 | xhci->cmd_ring->cycle_state);
  2764. return 0;
  2765. }
  2766. /* Queue a no-op command on the command ring */
  2767. static int queue_cmd_noop(struct xhci_hcd *xhci)
  2768. {
  2769. return queue_command(xhci, 0, 0, 0, TRB_TYPE(TRB_CMD_NOOP), false);
  2770. }
  2771. /*
  2772. * Place a no-op command on the command ring to test the command and
  2773. * event ring.
  2774. */
  2775. void *xhci_setup_one_noop(struct xhci_hcd *xhci)
  2776. {
  2777. if (queue_cmd_noop(xhci) < 0)
  2778. return NULL;
  2779. xhci->noops_submitted++;
  2780. return xhci_ring_cmd_db;
  2781. }
  2782. /* Queue a slot enable or disable request on the command ring */
  2783. int xhci_queue_slot_control(struct xhci_hcd *xhci, u32 trb_type, u32 slot_id)
  2784. {
  2785. return queue_command(xhci, 0, 0, 0,
  2786. TRB_TYPE(trb_type) | SLOT_ID_FOR_TRB(slot_id), false);
  2787. }
  2788. /* Queue an address device command TRB */
  2789. int xhci_queue_address_device(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
  2790. u32 slot_id)
  2791. {
  2792. return queue_command(xhci, lower_32_bits(in_ctx_ptr),
  2793. upper_32_bits(in_ctx_ptr), 0,
  2794. TRB_TYPE(TRB_ADDR_DEV) | SLOT_ID_FOR_TRB(slot_id),
  2795. false);
  2796. }
  2797. int xhci_queue_vendor_command(struct xhci_hcd *xhci,
  2798. u32 field1, u32 field2, u32 field3, u32 field4)
  2799. {
  2800. return queue_command(xhci, field1, field2, field3, field4, false);
  2801. }
  2802. /* Queue a reset device command TRB */
  2803. int xhci_queue_reset_device(struct xhci_hcd *xhci, u32 slot_id)
  2804. {
  2805. return queue_command(xhci, 0, 0, 0,
  2806. TRB_TYPE(TRB_RESET_DEV) | SLOT_ID_FOR_TRB(slot_id),
  2807. false);
  2808. }
  2809. /* Queue a configure endpoint command TRB */
  2810. int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
  2811. u32 slot_id, bool command_must_succeed)
  2812. {
  2813. return queue_command(xhci, lower_32_bits(in_ctx_ptr),
  2814. upper_32_bits(in_ctx_ptr), 0,
  2815. TRB_TYPE(TRB_CONFIG_EP) | SLOT_ID_FOR_TRB(slot_id),
  2816. command_must_succeed);
  2817. }
  2818. /* Queue an evaluate context command TRB */
  2819. int xhci_queue_evaluate_context(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
  2820. u32 slot_id)
  2821. {
  2822. return queue_command(xhci, lower_32_bits(in_ctx_ptr),
  2823. upper_32_bits(in_ctx_ptr), 0,
  2824. TRB_TYPE(TRB_EVAL_CONTEXT) | SLOT_ID_FOR_TRB(slot_id),
  2825. false);
  2826. }
  2827. int xhci_queue_stop_endpoint(struct xhci_hcd *xhci, int slot_id,
  2828. unsigned int ep_index)
  2829. {
  2830. u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
  2831. u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
  2832. u32 type = TRB_TYPE(TRB_STOP_RING);
  2833. return queue_command(xhci, 0, 0, 0,
  2834. trb_slot_id | trb_ep_index | type, false);
  2835. }
  2836. /* Set Transfer Ring Dequeue Pointer command.
  2837. * This should not be used for endpoints that have streams enabled.
  2838. */
  2839. static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id,
  2840. unsigned int ep_index, unsigned int stream_id,
  2841. struct xhci_segment *deq_seg,
  2842. union xhci_trb *deq_ptr, u32 cycle_state)
  2843. {
  2844. dma_addr_t addr;
  2845. u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
  2846. u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
  2847. u32 trb_stream_id = STREAM_ID_FOR_TRB(stream_id);
  2848. u32 type = TRB_TYPE(TRB_SET_DEQ);
  2849. addr = xhci_trb_virt_to_dma(deq_seg, deq_ptr);
  2850. if (addr == 0) {
  2851. xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr\n");
  2852. xhci_warn(xhci, "WARN deq seg = %p, deq pt = %p\n",
  2853. deq_seg, deq_ptr);
  2854. return 0;
  2855. }
  2856. return queue_command(xhci, lower_32_bits(addr) | cycle_state,
  2857. upper_32_bits(addr), trb_stream_id,
  2858. trb_slot_id | trb_ep_index | type, false);
  2859. }
  2860. int xhci_queue_reset_ep(struct xhci_hcd *xhci, int slot_id,
  2861. unsigned int ep_index)
  2862. {
  2863. u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
  2864. u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
  2865. u32 type = TRB_TYPE(TRB_RESET_EP);
  2866. return queue_command(xhci, 0, 0, 0, trb_slot_id | trb_ep_index | type,
  2867. false);
  2868. }