xhci-ring.c 109 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564
  1. /*
  2. * xHCI host controller driver
  3. *
  4. * Copyright (C) 2008 Intel Corp.
  5. *
  6. * Author: Sarah Sharp
  7. * Some code borrowed from the Linux EHCI driver.
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License version 2 as
  11. * published by the Free Software Foundation.
  12. *
  13. * This program is distributed in the hope that it will be useful, but
  14. * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
  15. * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
  16. * for more details.
  17. *
  18. * You should have received a copy of the GNU General Public License
  19. * along with this program; if not, write to the Free Software Foundation,
  20. * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  21. */
  22. /*
  23. * Ring initialization rules:
  24. * 1. Each segment is initialized to zero, except for link TRBs.
  25. * 2. Ring cycle state = 0. This represents Producer Cycle State (PCS) or
  26. * Consumer Cycle State (CCS), depending on ring function.
  27. * 3. Enqueue pointer = dequeue pointer = address of first TRB in the segment.
  28. *
  29. * Ring behavior rules:
  30. * 1. A ring is empty if enqueue == dequeue. This means there will always be at
  31. * least one free TRB in the ring. This is useful if you want to turn that
  32. * into a link TRB and expand the ring.
  33. * 2. When incrementing an enqueue or dequeue pointer, if the next TRB is a
  34. * link TRB, then load the pointer with the address in the link TRB. If the
  35. * link TRB had its toggle bit set, you may need to update the ring cycle
  36. * state (see cycle bit rules). You may have to do this multiple times
  37. * until you reach a non-link TRB.
  38. * 3. A ring is full if enqueue++ (for the definition of increment above)
  39. * equals the dequeue pointer.
  40. *
  41. * Cycle bit rules:
  42. * 1. When a consumer increments a dequeue pointer and encounters a toggle bit
  43. * in a link TRB, it must toggle the ring cycle state.
  44. * 2. When a producer increments an enqueue pointer and encounters a toggle bit
  45. * in a link TRB, it must toggle the ring cycle state.
  46. *
  47. * Producer rules:
  48. * 1. Check if ring is full before you enqueue.
  49. * 2. Write the ring cycle state to the cycle bit in the TRB you're enqueuing.
  50. * Update enqueue pointer between each write (which may update the ring
  51. * cycle state).
  52. * 3. Notify consumer. If SW is producer, it rings the doorbell for command
  53. * and endpoint rings. If HC is the producer for the event ring,
  54. * and it generates an interrupt according to interrupt modulation rules.
  55. *
  56. * Consumer rules:
  57. * 1. Check if TRB belongs to you. If the cycle bit == your ring cycle state,
  58. * the TRB is owned by the consumer.
  59. * 2. Update dequeue pointer (which may update the ring cycle state) and
  60. * continue processing TRBs until you reach a TRB which is not owned by you.
  61. * 3. Notify the producer. SW is the consumer for the event ring, and it
  62. * updates event ring dequeue pointer. HC is the consumer for the command and
  63. * endpoint rings; it generates events on the event ring for these.
  64. */
  65. #include <linux/scatterlist.h>
  66. #include <linux/slab.h>
  67. #include "xhci.h"
  68. static int handle_cmd_in_cmd_wait_list(struct xhci_hcd *xhci,
  69. struct xhci_virt_device *virt_dev,
  70. struct xhci_event_cmd *event);
  71. /*
  72. * Returns zero if the TRB isn't in this segment, otherwise it returns the DMA
  73. * address of the TRB.
  74. */
  75. dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg,
  76. union xhci_trb *trb)
  77. {
  78. unsigned long segment_offset;
  79. if (!seg || !trb || trb < seg->trbs)
  80. return 0;
  81. /* offset in TRBs */
  82. segment_offset = trb - seg->trbs;
  83. if (segment_offset > TRBS_PER_SEGMENT)
  84. return 0;
  85. return seg->dma + (segment_offset * sizeof(*trb));
  86. }
  87. /* Does this link TRB point to the first segment in a ring,
  88. * or was the previous TRB the last TRB on the last segment in the ERST?
  89. */
  90. static bool last_trb_on_last_seg(struct xhci_hcd *xhci, struct xhci_ring *ring,
  91. struct xhci_segment *seg, union xhci_trb *trb)
  92. {
  93. if (ring == xhci->event_ring)
  94. return (trb == &seg->trbs[TRBS_PER_SEGMENT]) &&
  95. (seg->next == xhci->event_ring->first_seg);
  96. else
  97. return le32_to_cpu(trb->link.control) & LINK_TOGGLE;
  98. }
  99. /* Is this TRB a link TRB or was the last TRB the last TRB in this event ring
  100. * segment? I.e. would the updated event TRB pointer step off the end of the
  101. * event seg?
  102. */
  103. static int last_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
  104. struct xhci_segment *seg, union xhci_trb *trb)
  105. {
  106. if (ring == xhci->event_ring)
  107. return trb == &seg->trbs[TRBS_PER_SEGMENT];
  108. else
  109. return TRB_TYPE_LINK_LE32(trb->link.control);
  110. }
  111. static int enqueue_is_link_trb(struct xhci_ring *ring)
  112. {
  113. struct xhci_link_trb *link = &ring->enqueue->link;
  114. return TRB_TYPE_LINK_LE32(link->control);
  115. }
  116. /* Updates trb to point to the next TRB in the ring, and updates seg if the next
  117. * TRB is in a new segment. This does not skip over link TRBs, and it does not
  118. * effect the ring dequeue or enqueue pointers.
  119. */
  120. static void next_trb(struct xhci_hcd *xhci,
  121. struct xhci_ring *ring,
  122. struct xhci_segment **seg,
  123. union xhci_trb **trb)
  124. {
  125. if (last_trb(xhci, ring, *seg, *trb)) {
  126. *seg = (*seg)->next;
  127. *trb = ((*seg)->trbs);
  128. } else {
  129. (*trb)++;
  130. }
  131. }
  132. /*
  133. * See Cycle bit rules. SW is the consumer for the event ring only.
  134. * Don't make a ring full of link TRBs. That would be dumb and this would loop.
  135. */
  136. static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer)
  137. {
  138. union xhci_trb *next = ++(ring->dequeue);
  139. unsigned long long addr;
  140. ring->deq_updates++;
  141. /* Update the dequeue pointer further if that was a link TRB or we're at
  142. * the end of an event ring segment (which doesn't have link TRBS)
  143. */
  144. while (last_trb(xhci, ring, ring->deq_seg, next)) {
  145. if (consumer && last_trb_on_last_seg(xhci, ring, ring->deq_seg, next)) {
  146. ring->cycle_state = (ring->cycle_state ? 0 : 1);
  147. if (!in_interrupt())
  148. xhci_dbg(xhci, "Toggle cycle state for ring %p = %i\n",
  149. ring,
  150. (unsigned int) ring->cycle_state);
  151. }
  152. ring->deq_seg = ring->deq_seg->next;
  153. ring->dequeue = ring->deq_seg->trbs;
  154. next = ring->dequeue;
  155. }
  156. addr = (unsigned long long) xhci_trb_virt_to_dma(ring->deq_seg, ring->dequeue);
  157. }
  158. /*
  159. * See Cycle bit rules. SW is the consumer for the event ring only.
  160. * Don't make a ring full of link TRBs. That would be dumb and this would loop.
  161. *
  162. * If we've just enqueued a TRB that is in the middle of a TD (meaning the
  163. * chain bit is set), then set the chain bit in all the following link TRBs.
  164. * If we've enqueued the last TRB in a TD, make sure the following link TRBs
  165. * have their chain bit cleared (so that each Link TRB is a separate TD).
  166. *
  167. * Section 6.4.4.1 of the 0.95 spec says link TRBs cannot have the chain bit
  168. * set, but other sections talk about dealing with the chain bit set. This was
  169. * fixed in the 0.96 specification errata, but we have to assume that all 0.95
  170. * xHCI hardware can't handle the chain bit being cleared on a link TRB.
  171. *
  172. * @more_trbs_coming: Will you enqueue more TRBs before calling
  173. * prepare_transfer()?
  174. */
  175. static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring,
  176. bool consumer, bool more_trbs_coming)
  177. {
  178. u32 chain;
  179. union xhci_trb *next;
  180. unsigned long long addr;
  181. chain = le32_to_cpu(ring->enqueue->generic.field[3]) & TRB_CHAIN;
  182. next = ++(ring->enqueue);
  183. ring->enq_updates++;
  184. /* Update the dequeue pointer further if that was a link TRB or we're at
  185. * the end of an event ring segment (which doesn't have link TRBS)
  186. */
  187. while (last_trb(xhci, ring, ring->enq_seg, next)) {
  188. if (!consumer) {
  189. if (ring != xhci->event_ring) {
  190. /*
  191. * If the caller doesn't plan on enqueueing more
  192. * TDs before ringing the doorbell, then we
  193. * don't want to give the link TRB to the
  194. * hardware just yet. We'll give the link TRB
  195. * back in prepare_ring() just before we enqueue
  196. * the TD at the top of the ring.
  197. */
  198. if (!chain && !more_trbs_coming)
  199. break;
  200. /* If we're not dealing with 0.95 hardware,
  201. * carry over the chain bit of the previous TRB
  202. * (which may mean the chain bit is cleared).
  203. */
  204. if (!xhci_link_trb_quirk(xhci)) {
  205. next->link.control &=
  206. cpu_to_le32(~TRB_CHAIN);
  207. next->link.control |=
  208. cpu_to_le32(chain);
  209. }
  210. /* Give this link TRB to the hardware */
  211. wmb();
  212. next->link.control ^= cpu_to_le32(TRB_CYCLE);
  213. }
  214. /* Toggle the cycle bit after the last ring segment. */
  215. if (last_trb_on_last_seg(xhci, ring, ring->enq_seg, next)) {
  216. ring->cycle_state = (ring->cycle_state ? 0 : 1);
  217. if (!in_interrupt())
  218. xhci_dbg(xhci, "Toggle cycle state for ring %p = %i\n",
  219. ring,
  220. (unsigned int) ring->cycle_state);
  221. }
  222. }
  223. ring->enq_seg = ring->enq_seg->next;
  224. ring->enqueue = ring->enq_seg->trbs;
  225. next = ring->enqueue;
  226. }
  227. addr = (unsigned long long) xhci_trb_virt_to_dma(ring->enq_seg, ring->enqueue);
  228. }
  229. /*
  230. * Check to see if there's room to enqueue num_trbs on the ring. See rules
  231. * above.
  232. * FIXME: this would be simpler and faster if we just kept track of the number
  233. * of free TRBs in a ring.
  234. */
  235. static int room_on_ring(struct xhci_hcd *xhci, struct xhci_ring *ring,
  236. unsigned int num_trbs)
  237. {
  238. int i;
  239. union xhci_trb *enq = ring->enqueue;
  240. struct xhci_segment *enq_seg = ring->enq_seg;
  241. struct xhci_segment *cur_seg;
  242. unsigned int left_on_ring;
  243. /* If we are currently pointing to a link TRB, advance the
  244. * enqueue pointer before checking for space */
  245. while (last_trb(xhci, ring, enq_seg, enq)) {
  246. enq_seg = enq_seg->next;
  247. enq = enq_seg->trbs;
  248. }
  249. /* Check if ring is empty */
  250. if (enq == ring->dequeue) {
  251. /* Can't use link trbs */
  252. left_on_ring = TRBS_PER_SEGMENT - 1;
  253. for (cur_seg = enq_seg->next; cur_seg != enq_seg;
  254. cur_seg = cur_seg->next)
  255. left_on_ring += TRBS_PER_SEGMENT - 1;
  256. /* Always need one TRB free in the ring. */
  257. left_on_ring -= 1;
  258. if (num_trbs > left_on_ring) {
  259. xhci_warn(xhci, "Not enough room on ring; "
  260. "need %u TRBs, %u TRBs left\n",
  261. num_trbs, left_on_ring);
  262. return 0;
  263. }
  264. return 1;
  265. }
  266. /* Make sure there's an extra empty TRB available */
  267. for (i = 0; i <= num_trbs; ++i) {
  268. if (enq == ring->dequeue)
  269. return 0;
  270. enq++;
  271. while (last_trb(xhci, ring, enq_seg, enq)) {
  272. enq_seg = enq_seg->next;
  273. enq = enq_seg->trbs;
  274. }
  275. }
  276. return 1;
  277. }
  278. /* Ring the host controller doorbell after placing a command on the ring */
  279. void xhci_ring_cmd_db(struct xhci_hcd *xhci)
  280. {
  281. xhci_dbg(xhci, "// Ding dong!\n");
  282. xhci_writel(xhci, DB_VALUE_HOST, &xhci->dba->doorbell[0]);
  283. /* Flush PCI posted writes */
  284. xhci_readl(xhci, &xhci->dba->doorbell[0]);
  285. }
  286. void xhci_ring_ep_doorbell(struct xhci_hcd *xhci,
  287. unsigned int slot_id,
  288. unsigned int ep_index,
  289. unsigned int stream_id)
  290. {
  291. __le32 __iomem *db_addr = &xhci->dba->doorbell[slot_id];
  292. struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index];
  293. unsigned int ep_state = ep->ep_state;
  294. /* Don't ring the doorbell for this endpoint if there are pending
  295. * cancellations because we don't want to interrupt processing.
  296. * We don't want to restart any stream rings if there's a set dequeue
  297. * pointer command pending because the device can choose to start any
  298. * stream once the endpoint is on the HW schedule.
  299. * FIXME - check all the stream rings for pending cancellations.
  300. */
  301. if ((ep_state & EP_HALT_PENDING) || (ep_state & SET_DEQ_PENDING) ||
  302. (ep_state & EP_HALTED))
  303. return;
  304. xhci_writel(xhci, DB_VALUE(ep_index, stream_id), db_addr);
  305. /* The CPU has better things to do at this point than wait for a
  306. * write-posting flush. It'll get there soon enough.
  307. */
  308. }
  309. /* Ring the doorbell for any rings with pending URBs */
  310. static void ring_doorbell_for_active_rings(struct xhci_hcd *xhci,
  311. unsigned int slot_id,
  312. unsigned int ep_index)
  313. {
  314. unsigned int stream_id;
  315. struct xhci_virt_ep *ep;
  316. ep = &xhci->devs[slot_id]->eps[ep_index];
  317. /* A ring has pending URBs if its TD list is not empty */
  318. if (!(ep->ep_state & EP_HAS_STREAMS)) {
  319. if (!(list_empty(&ep->ring->td_list)))
  320. xhci_ring_ep_doorbell(xhci, slot_id, ep_index, 0);
  321. return;
  322. }
  323. for (stream_id = 1; stream_id < ep->stream_info->num_streams;
  324. stream_id++) {
  325. struct xhci_stream_info *stream_info = ep->stream_info;
  326. if (!list_empty(&stream_info->stream_rings[stream_id]->td_list))
  327. xhci_ring_ep_doorbell(xhci, slot_id, ep_index,
  328. stream_id);
  329. }
  330. }
  331. /*
  332. * Find the segment that trb is in. Start searching in start_seg.
  333. * If we must move past a segment that has a link TRB with a toggle cycle state
  334. * bit set, then we will toggle the value pointed at by cycle_state.
  335. */
  336. static struct xhci_segment *find_trb_seg(
  337. struct xhci_segment *start_seg,
  338. union xhci_trb *trb, int *cycle_state)
  339. {
  340. struct xhci_segment *cur_seg = start_seg;
  341. struct xhci_generic_trb *generic_trb;
  342. while (cur_seg->trbs > trb ||
  343. &cur_seg->trbs[TRBS_PER_SEGMENT - 1] < trb) {
  344. generic_trb = &cur_seg->trbs[TRBS_PER_SEGMENT - 1].generic;
  345. if (generic_trb->field[3] & cpu_to_le32(LINK_TOGGLE))
  346. *cycle_state ^= 0x1;
  347. cur_seg = cur_seg->next;
  348. if (cur_seg == start_seg)
  349. /* Looped over the entire list. Oops! */
  350. return NULL;
  351. }
  352. return cur_seg;
  353. }
  354. static struct xhci_ring *xhci_triad_to_transfer_ring(struct xhci_hcd *xhci,
  355. unsigned int slot_id, unsigned int ep_index,
  356. unsigned int stream_id)
  357. {
  358. struct xhci_virt_ep *ep;
  359. ep = &xhci->devs[slot_id]->eps[ep_index];
  360. /* Common case: no streams */
  361. if (!(ep->ep_state & EP_HAS_STREAMS))
  362. return ep->ring;
  363. if (stream_id == 0) {
  364. xhci_warn(xhci,
  365. "WARN: Slot ID %u, ep index %u has streams, "
  366. "but URB has no stream ID.\n",
  367. slot_id, ep_index);
  368. return NULL;
  369. }
  370. if (stream_id < ep->stream_info->num_streams)
  371. return ep->stream_info->stream_rings[stream_id];
  372. xhci_warn(xhci,
  373. "WARN: Slot ID %u, ep index %u has "
  374. "stream IDs 1 to %u allocated, "
  375. "but stream ID %u is requested.\n",
  376. slot_id, ep_index,
  377. ep->stream_info->num_streams - 1,
  378. stream_id);
  379. return NULL;
  380. }
  381. /* Get the right ring for the given URB.
  382. * If the endpoint supports streams, boundary check the URB's stream ID.
  383. * If the endpoint doesn't support streams, return the singular endpoint ring.
  384. */
  385. static struct xhci_ring *xhci_urb_to_transfer_ring(struct xhci_hcd *xhci,
  386. struct urb *urb)
  387. {
  388. return xhci_triad_to_transfer_ring(xhci, urb->dev->slot_id,
  389. xhci_get_endpoint_index(&urb->ep->desc), urb->stream_id);
  390. }
  391. /*
  392. * Move the xHC's endpoint ring dequeue pointer past cur_td.
  393. * Record the new state of the xHC's endpoint ring dequeue segment,
  394. * dequeue pointer, and new consumer cycle state in state.
  395. * Update our internal representation of the ring's dequeue pointer.
  396. *
  397. * We do this in three jumps:
  398. * - First we update our new ring state to be the same as when the xHC stopped.
  399. * - Then we traverse the ring to find the segment that contains
  400. * the last TRB in the TD. We toggle the xHC's new cycle state when we pass
  401. * any link TRBs with the toggle cycle bit set.
  402. * - Finally we move the dequeue state one TRB further, toggling the cycle bit
  403. * if we've moved it past a link TRB with the toggle cycle bit set.
  404. *
  405. * Some of the uses of xhci_generic_trb are grotty, but if they're done
  406. * with correct __le32 accesses they should work fine. Only users of this are
  407. * in here.
  408. */
  409. void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
  410. unsigned int slot_id, unsigned int ep_index,
  411. unsigned int stream_id, struct xhci_td *cur_td,
  412. struct xhci_dequeue_state *state)
  413. {
  414. struct xhci_virt_device *dev = xhci->devs[slot_id];
  415. struct xhci_ring *ep_ring;
  416. struct xhci_generic_trb *trb;
  417. struct xhci_ep_ctx *ep_ctx;
  418. dma_addr_t addr;
  419. ep_ring = xhci_triad_to_transfer_ring(xhci, slot_id,
  420. ep_index, stream_id);
  421. if (!ep_ring) {
  422. xhci_warn(xhci, "WARN can't find new dequeue state "
  423. "for invalid stream ID %u.\n",
  424. stream_id);
  425. return;
  426. }
  427. state->new_cycle_state = 0;
  428. xhci_dbg(xhci, "Finding segment containing stopped TRB.\n");
  429. state->new_deq_seg = find_trb_seg(cur_td->start_seg,
  430. dev->eps[ep_index].stopped_trb,
  431. &state->new_cycle_state);
  432. if (!state->new_deq_seg) {
  433. WARN_ON(1);
  434. return;
  435. }
  436. /* Dig out the cycle state saved by the xHC during the stop ep cmd */
  437. xhci_dbg(xhci, "Finding endpoint context\n");
  438. ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
  439. state->new_cycle_state = 0x1 & le64_to_cpu(ep_ctx->deq);
  440. state->new_deq_ptr = cur_td->last_trb;
  441. xhci_dbg(xhci, "Finding segment containing last TRB in TD.\n");
  442. state->new_deq_seg = find_trb_seg(state->new_deq_seg,
  443. state->new_deq_ptr,
  444. &state->new_cycle_state);
  445. if (!state->new_deq_seg) {
  446. WARN_ON(1);
  447. return;
  448. }
  449. trb = &state->new_deq_ptr->generic;
  450. if (TRB_TYPE_LINK_LE32(trb->field[3]) &&
  451. (trb->field[3] & cpu_to_le32(LINK_TOGGLE)))
  452. state->new_cycle_state ^= 0x1;
  453. next_trb(xhci, ep_ring, &state->new_deq_seg, &state->new_deq_ptr);
  454. /*
  455. * If there is only one segment in a ring, find_trb_seg()'s while loop
  456. * will not run, and it will return before it has a chance to see if it
  457. * needs to toggle the cycle bit. It can't tell if the stalled transfer
  458. * ended just before the link TRB on a one-segment ring, or if the TD
  459. * wrapped around the top of the ring, because it doesn't have the TD in
  460. * question. Look for the one-segment case where stalled TRB's address
  461. * is greater than the new dequeue pointer address.
  462. */
  463. if (ep_ring->first_seg == ep_ring->first_seg->next &&
  464. state->new_deq_ptr < dev->eps[ep_index].stopped_trb)
  465. state->new_cycle_state ^= 0x1;
  466. xhci_dbg(xhci, "Cycle state = 0x%x\n", state->new_cycle_state);
  467. /* Don't update the ring cycle state for the producer (us). */
  468. xhci_dbg(xhci, "New dequeue segment = %p (virtual)\n",
  469. state->new_deq_seg);
  470. addr = xhci_trb_virt_to_dma(state->new_deq_seg, state->new_deq_ptr);
  471. xhci_dbg(xhci, "New dequeue pointer = 0x%llx (DMA)\n",
  472. (unsigned long long) addr);
  473. }
  474. static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
  475. struct xhci_td *cur_td)
  476. {
  477. struct xhci_segment *cur_seg;
  478. union xhci_trb *cur_trb;
  479. for (cur_seg = cur_td->start_seg, cur_trb = cur_td->first_trb;
  480. true;
  481. next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) {
  482. if (TRB_TYPE_LINK_LE32(cur_trb->generic.field[3])) {
  483. /* Unchain any chained Link TRBs, but
  484. * leave the pointers intact.
  485. */
  486. cur_trb->generic.field[3] &= cpu_to_le32(~TRB_CHAIN);
  487. xhci_dbg(xhci, "Cancel (unchain) link TRB\n");
  488. xhci_dbg(xhci, "Address = %p (0x%llx dma); "
  489. "in seg %p (0x%llx dma)\n",
  490. cur_trb,
  491. (unsigned long long)xhci_trb_virt_to_dma(cur_seg, cur_trb),
  492. cur_seg,
  493. (unsigned long long)cur_seg->dma);
  494. } else {
  495. cur_trb->generic.field[0] = 0;
  496. cur_trb->generic.field[1] = 0;
  497. cur_trb->generic.field[2] = 0;
  498. /* Preserve only the cycle bit of this TRB */
  499. cur_trb->generic.field[3] &= cpu_to_le32(TRB_CYCLE);
  500. cur_trb->generic.field[3] |= cpu_to_le32(
  501. TRB_TYPE(TRB_TR_NOOP));
  502. xhci_dbg(xhci, "Cancel TRB %p (0x%llx dma) "
  503. "in seg %p (0x%llx dma)\n",
  504. cur_trb,
  505. (unsigned long long)xhci_trb_virt_to_dma(cur_seg, cur_trb),
  506. cur_seg,
  507. (unsigned long long)cur_seg->dma);
  508. }
  509. if (cur_trb == cur_td->last_trb)
  510. break;
  511. }
  512. }
  513. static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id,
  514. unsigned int ep_index, unsigned int stream_id,
  515. struct xhci_segment *deq_seg,
  516. union xhci_trb *deq_ptr, u32 cycle_state);
  517. void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci,
  518. unsigned int slot_id, unsigned int ep_index,
  519. unsigned int stream_id,
  520. struct xhci_dequeue_state *deq_state)
  521. {
  522. struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index];
  523. xhci_dbg(xhci, "Set TR Deq Ptr cmd, new deq seg = %p (0x%llx dma), "
  524. "new deq ptr = %p (0x%llx dma), new cycle = %u\n",
  525. deq_state->new_deq_seg,
  526. (unsigned long long)deq_state->new_deq_seg->dma,
  527. deq_state->new_deq_ptr,
  528. (unsigned long long)xhci_trb_virt_to_dma(deq_state->new_deq_seg, deq_state->new_deq_ptr),
  529. deq_state->new_cycle_state);
  530. queue_set_tr_deq(xhci, slot_id, ep_index, stream_id,
  531. deq_state->new_deq_seg,
  532. deq_state->new_deq_ptr,
  533. (u32) deq_state->new_cycle_state);
  534. /* Stop the TD queueing code from ringing the doorbell until
  535. * this command completes. The HC won't set the dequeue pointer
  536. * if the ring is running, and ringing the doorbell starts the
  537. * ring running.
  538. */
  539. ep->ep_state |= SET_DEQ_PENDING;
  540. }
  541. static void xhci_stop_watchdog_timer_in_irq(struct xhci_hcd *xhci,
  542. struct xhci_virt_ep *ep)
  543. {
  544. ep->ep_state &= ~EP_HALT_PENDING;
  545. /* Can't del_timer_sync in interrupt, so we attempt to cancel. If the
  546. * timer is running on another CPU, we don't decrement stop_cmds_pending
  547. * (since we didn't successfully stop the watchdog timer).
  548. */
  549. if (del_timer(&ep->stop_cmd_timer))
  550. ep->stop_cmds_pending--;
  551. }
  552. /* Must be called with xhci->lock held in interrupt context */
  553. static void xhci_giveback_urb_in_irq(struct xhci_hcd *xhci,
  554. struct xhci_td *cur_td, int status, char *adjective)
  555. {
  556. struct usb_hcd *hcd;
  557. struct urb *urb;
  558. struct urb_priv *urb_priv;
  559. urb = cur_td->urb;
  560. urb_priv = urb->hcpriv;
  561. urb_priv->td_cnt++;
  562. hcd = bus_to_hcd(urb->dev->bus);
  563. /* Only giveback urb when this is the last td in urb */
  564. if (urb_priv->td_cnt == urb_priv->length) {
  565. if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
  566. xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs--;
  567. if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) {
  568. if (xhci->quirks & XHCI_AMD_PLL_FIX)
  569. usb_amd_quirk_pll_enable();
  570. }
  571. }
  572. usb_hcd_unlink_urb_from_ep(hcd, urb);
  573. spin_unlock(&xhci->lock);
  574. usb_hcd_giveback_urb(hcd, urb, status);
  575. xhci_urb_free_priv(xhci, urb_priv);
  576. spin_lock(&xhci->lock);
  577. }
  578. }
  579. /*
  580. * When we get a command completion for a Stop Endpoint Command, we need to
  581. * unlink any cancelled TDs from the ring. There are two ways to do that:
  582. *
  583. * 1. If the HW was in the middle of processing the TD that needs to be
  584. * cancelled, then we must move the ring's dequeue pointer past the last TRB
  585. * in the TD with a Set Dequeue Pointer Command.
  586. * 2. Otherwise, we turn all the TRBs in the TD into No-op TRBs (with the chain
  587. * bit cleared) so that the HW will skip over them.
  588. */
  589. static void handle_stopped_endpoint(struct xhci_hcd *xhci,
  590. union xhci_trb *trb, struct xhci_event_cmd *event)
  591. {
  592. unsigned int slot_id;
  593. unsigned int ep_index;
  594. struct xhci_virt_device *virt_dev;
  595. struct xhci_ring *ep_ring;
  596. struct xhci_virt_ep *ep;
  597. struct list_head *entry;
  598. struct xhci_td *cur_td = NULL;
  599. struct xhci_td *last_unlinked_td;
  600. struct xhci_dequeue_state deq_state;
  601. if (unlikely(TRB_TO_SUSPEND_PORT(
  602. le32_to_cpu(xhci->cmd_ring->dequeue->generic.field[3])))) {
  603. slot_id = TRB_TO_SLOT_ID(
  604. le32_to_cpu(xhci->cmd_ring->dequeue->generic.field[3]));
  605. virt_dev = xhci->devs[slot_id];
  606. if (virt_dev)
  607. handle_cmd_in_cmd_wait_list(xhci, virt_dev,
  608. event);
  609. else
  610. xhci_warn(xhci, "Stop endpoint command "
  611. "completion for disabled slot %u\n",
  612. slot_id);
  613. return;
  614. }
  615. memset(&deq_state, 0, sizeof(deq_state));
  616. slot_id = TRB_TO_SLOT_ID(le32_to_cpu(trb->generic.field[3]));
  617. ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
  618. ep = &xhci->devs[slot_id]->eps[ep_index];
  619. if (list_empty(&ep->cancelled_td_list)) {
  620. xhci_stop_watchdog_timer_in_irq(xhci, ep);
  621. ep->stopped_td = NULL;
  622. ep->stopped_trb = NULL;
  623. ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
  624. return;
  625. }
  626. /* Fix up the ep ring first, so HW stops executing cancelled TDs.
  627. * We have the xHCI lock, so nothing can modify this list until we drop
  628. * it. We're also in the event handler, so we can't get re-interrupted
  629. * if another Stop Endpoint command completes
  630. */
  631. list_for_each(entry, &ep->cancelled_td_list) {
  632. cur_td = list_entry(entry, struct xhci_td, cancelled_td_list);
  633. xhci_dbg(xhci, "Cancelling TD starting at %p, 0x%llx (dma).\n",
  634. cur_td->first_trb,
  635. (unsigned long long)xhci_trb_virt_to_dma(cur_td->start_seg, cur_td->first_trb));
  636. ep_ring = xhci_urb_to_transfer_ring(xhci, cur_td->urb);
  637. if (!ep_ring) {
  638. /* This shouldn't happen unless a driver is mucking
  639. * with the stream ID after submission. This will
  640. * leave the TD on the hardware ring, and the hardware
  641. * will try to execute it, and may access a buffer
  642. * that has already been freed. In the best case, the
  643. * hardware will execute it, and the event handler will
  644. * ignore the completion event for that TD, since it was
  645. * removed from the td_list for that endpoint. In
  646. * short, don't muck with the stream ID after
  647. * submission.
  648. */
  649. xhci_warn(xhci, "WARN Cancelled URB %p "
  650. "has invalid stream ID %u.\n",
  651. cur_td->urb,
  652. cur_td->urb->stream_id);
  653. goto remove_finished_td;
  654. }
  655. /*
  656. * If we stopped on the TD we need to cancel, then we have to
  657. * move the xHC endpoint ring dequeue pointer past this TD.
  658. */
  659. if (cur_td == ep->stopped_td)
  660. xhci_find_new_dequeue_state(xhci, slot_id, ep_index,
  661. cur_td->urb->stream_id,
  662. cur_td, &deq_state);
  663. else
  664. td_to_noop(xhci, ep_ring, cur_td);
  665. remove_finished_td:
  666. /*
  667. * The event handler won't see a completion for this TD anymore,
  668. * so remove it from the endpoint ring's TD list. Keep it in
  669. * the cancelled TD list for URB completion later.
  670. */
  671. list_del(&cur_td->td_list);
  672. }
  673. last_unlinked_td = cur_td;
  674. xhci_stop_watchdog_timer_in_irq(xhci, ep);
  675. /* If necessary, queue a Set Transfer Ring Dequeue Pointer command */
  676. if (deq_state.new_deq_ptr && deq_state.new_deq_seg) {
  677. xhci_queue_new_dequeue_state(xhci,
  678. slot_id, ep_index,
  679. ep->stopped_td->urb->stream_id,
  680. &deq_state);
  681. xhci_ring_cmd_db(xhci);
  682. } else {
  683. /* Otherwise ring the doorbell(s) to restart queued transfers */
  684. ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
  685. }
  686. ep->stopped_td = NULL;
  687. ep->stopped_trb = NULL;
  688. /*
  689. * Drop the lock and complete the URBs in the cancelled TD list.
  690. * New TDs to be cancelled might be added to the end of the list before
  691. * we can complete all the URBs for the TDs we already unlinked.
  692. * So stop when we've completed the URB for the last TD we unlinked.
  693. */
  694. do {
  695. cur_td = list_entry(ep->cancelled_td_list.next,
  696. struct xhci_td, cancelled_td_list);
  697. list_del(&cur_td->cancelled_td_list);
  698. /* Clean up the cancelled URB */
  699. /* Doesn't matter what we pass for status, since the core will
  700. * just overwrite it (because the URB has been unlinked).
  701. */
  702. xhci_giveback_urb_in_irq(xhci, cur_td, 0, "cancelled");
  703. /* Stop processing the cancelled list if the watchdog timer is
  704. * running.
  705. */
  706. if (xhci->xhc_state & XHCI_STATE_DYING)
  707. return;
  708. } while (cur_td != last_unlinked_td);
  709. /* Return to the event handler with xhci->lock re-acquired */
  710. }
  711. /* Watchdog timer function for when a stop endpoint command fails to complete.
  712. * In this case, we assume the host controller is broken or dying or dead. The
  713. * host may still be completing some other events, so we have to be careful to
  714. * let the event ring handler and the URB dequeueing/enqueueing functions know
  715. * through xhci->state.
  716. *
  717. * The timer may also fire if the host takes a very long time to respond to the
  718. * command, and the stop endpoint command completion handler cannot delete the
  719. * timer before the timer function is called. Another endpoint cancellation may
  720. * sneak in before the timer function can grab the lock, and that may queue
  721. * another stop endpoint command and add the timer back. So we cannot use a
  722. * simple flag to say whether there is a pending stop endpoint command for a
  723. * particular endpoint.
  724. *
  725. * Instead we use a combination of that flag and a counter for the number of
  726. * pending stop endpoint commands. If the timer is the tail end of the last
  727. * stop endpoint command, and the endpoint's command is still pending, we assume
  728. * the host is dying.
  729. */
  730. void xhci_stop_endpoint_command_watchdog(unsigned long arg)
  731. {
  732. struct xhci_hcd *xhci;
  733. struct xhci_virt_ep *ep;
  734. struct xhci_virt_ep *temp_ep;
  735. struct xhci_ring *ring;
  736. struct xhci_td *cur_td;
  737. int ret, i, j;
  738. ep = (struct xhci_virt_ep *) arg;
  739. xhci = ep->xhci;
  740. spin_lock(&xhci->lock);
  741. ep->stop_cmds_pending--;
  742. if (xhci->xhc_state & XHCI_STATE_DYING) {
  743. xhci_dbg(xhci, "Stop EP timer ran, but another timer marked "
  744. "xHCI as DYING, exiting.\n");
  745. spin_unlock(&xhci->lock);
  746. return;
  747. }
  748. if (!(ep->stop_cmds_pending == 0 && (ep->ep_state & EP_HALT_PENDING))) {
  749. xhci_dbg(xhci, "Stop EP timer ran, but no command pending, "
  750. "exiting.\n");
  751. spin_unlock(&xhci->lock);
  752. return;
  753. }
  754. xhci_warn(xhci, "xHCI host not responding to stop endpoint command.\n");
  755. xhci_warn(xhci, "Assuming host is dying, halting host.\n");
  756. /* Oops, HC is dead or dying or at least not responding to the stop
  757. * endpoint command.
  758. */
  759. xhci->xhc_state |= XHCI_STATE_DYING;
  760. /* Disable interrupts from the host controller and start halting it */
  761. xhci_quiesce(xhci);
  762. spin_unlock(&xhci->lock);
  763. ret = xhci_halt(xhci);
  764. spin_lock(&xhci->lock);
  765. if (ret < 0) {
  766. /* This is bad; the host is not responding to commands and it's
  767. * not allowing itself to be halted. At least interrupts are
  768. * disabled. If we call usb_hc_died(), it will attempt to
  769. * disconnect all device drivers under this host. Those
  770. * disconnect() methods will wait for all URBs to be unlinked,
  771. * so we must complete them.
  772. */
  773. xhci_warn(xhci, "Non-responsive xHCI host is not halting.\n");
  774. xhci_warn(xhci, "Completing active URBs anyway.\n");
  775. /* We could turn all TDs on the rings to no-ops. This won't
  776. * help if the host has cached part of the ring, and is slow if
  777. * we want to preserve the cycle bit. Skip it and hope the host
  778. * doesn't touch the memory.
  779. */
  780. }
  781. for (i = 0; i < MAX_HC_SLOTS; i++) {
  782. if (!xhci->devs[i])
  783. continue;
  784. for (j = 0; j < 31; j++) {
  785. temp_ep = &xhci->devs[i]->eps[j];
  786. ring = temp_ep->ring;
  787. if (!ring)
  788. continue;
  789. xhci_dbg(xhci, "Killing URBs for slot ID %u, "
  790. "ep index %u\n", i, j);
  791. while (!list_empty(&ring->td_list)) {
  792. cur_td = list_first_entry(&ring->td_list,
  793. struct xhci_td,
  794. td_list);
  795. list_del(&cur_td->td_list);
  796. if (!list_empty(&cur_td->cancelled_td_list))
  797. list_del(&cur_td->cancelled_td_list);
  798. xhci_giveback_urb_in_irq(xhci, cur_td,
  799. -ESHUTDOWN, "killed");
  800. }
  801. while (!list_empty(&temp_ep->cancelled_td_list)) {
  802. cur_td = list_first_entry(
  803. &temp_ep->cancelled_td_list,
  804. struct xhci_td,
  805. cancelled_td_list);
  806. list_del(&cur_td->cancelled_td_list);
  807. xhci_giveback_urb_in_irq(xhci, cur_td,
  808. -ESHUTDOWN, "killed");
  809. }
  810. }
  811. }
  812. spin_unlock(&xhci->lock);
  813. xhci_dbg(xhci, "Calling usb_hc_died()\n");
  814. usb_hc_died(xhci_to_hcd(xhci)->primary_hcd);
  815. xhci_dbg(xhci, "xHCI host controller is dead.\n");
  816. }
  817. /*
  818. * When we get a completion for a Set Transfer Ring Dequeue Pointer command,
  819. * we need to clear the set deq pending flag in the endpoint ring state, so that
  820. * the TD queueing code can ring the doorbell again. We also need to ring the
  821. * endpoint doorbell to restart the ring, but only if there aren't more
  822. * cancellations pending.
  823. */
  824. static void handle_set_deq_completion(struct xhci_hcd *xhci,
  825. struct xhci_event_cmd *event,
  826. union xhci_trb *trb)
  827. {
  828. unsigned int slot_id;
  829. unsigned int ep_index;
  830. unsigned int stream_id;
  831. struct xhci_ring *ep_ring;
  832. struct xhci_virt_device *dev;
  833. struct xhci_ep_ctx *ep_ctx;
  834. struct xhci_slot_ctx *slot_ctx;
  835. slot_id = TRB_TO_SLOT_ID(le32_to_cpu(trb->generic.field[3]));
  836. ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
  837. stream_id = TRB_TO_STREAM_ID(le32_to_cpu(trb->generic.field[2]));
  838. dev = xhci->devs[slot_id];
  839. ep_ring = xhci_stream_id_to_ring(dev, ep_index, stream_id);
  840. if (!ep_ring) {
  841. xhci_warn(xhci, "WARN Set TR deq ptr command for "
  842. "freed stream ID %u\n",
  843. stream_id);
  844. /* XXX: Harmless??? */
  845. dev->eps[ep_index].ep_state &= ~SET_DEQ_PENDING;
  846. return;
  847. }
  848. ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
  849. slot_ctx = xhci_get_slot_ctx(xhci, dev->out_ctx);
  850. if (GET_COMP_CODE(le32_to_cpu(event->status)) != COMP_SUCCESS) {
  851. unsigned int ep_state;
  852. unsigned int slot_state;
  853. switch (GET_COMP_CODE(le32_to_cpu(event->status))) {
  854. case COMP_TRB_ERR:
  855. xhci_warn(xhci, "WARN Set TR Deq Ptr cmd invalid because "
  856. "of stream ID configuration\n");
  857. break;
  858. case COMP_CTX_STATE:
  859. xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed due "
  860. "to incorrect slot or ep state.\n");
  861. ep_state = le32_to_cpu(ep_ctx->ep_info);
  862. ep_state &= EP_STATE_MASK;
  863. slot_state = le32_to_cpu(slot_ctx->dev_state);
  864. slot_state = GET_SLOT_STATE(slot_state);
  865. xhci_dbg(xhci, "Slot state = %u, EP state = %u\n",
  866. slot_state, ep_state);
  867. break;
  868. case COMP_EBADSLT:
  869. xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed because "
  870. "slot %u was not enabled.\n", slot_id);
  871. break;
  872. default:
  873. xhci_warn(xhci, "WARN Set TR Deq Ptr cmd with unknown "
  874. "completion code of %u.\n",
  875. GET_COMP_CODE(le32_to_cpu(event->status)));
  876. break;
  877. }
  878. /* OK what do we do now? The endpoint state is hosed, and we
  879. * should never get to this point if the synchronization between
  880. * queueing, and endpoint state are correct. This might happen
  881. * if the device gets disconnected after we've finished
  882. * cancelling URBs, which might not be an error...
  883. */
  884. } else {
  885. xhci_dbg(xhci, "Successful Set TR Deq Ptr cmd, deq = @%08llx\n",
  886. le64_to_cpu(ep_ctx->deq));
  887. if (xhci_trb_virt_to_dma(dev->eps[ep_index].queued_deq_seg,
  888. dev->eps[ep_index].queued_deq_ptr) ==
  889. (le64_to_cpu(ep_ctx->deq) & ~(EP_CTX_CYCLE_MASK))) {
  890. /* Update the ring's dequeue segment and dequeue pointer
  891. * to reflect the new position.
  892. */
  893. ep_ring->deq_seg = dev->eps[ep_index].queued_deq_seg;
  894. ep_ring->dequeue = dev->eps[ep_index].queued_deq_ptr;
  895. } else {
  896. xhci_warn(xhci, "Mismatch between completed Set TR Deq "
  897. "Ptr command & xHCI internal state.\n");
  898. xhci_warn(xhci, "ep deq seg = %p, deq ptr = %p\n",
  899. dev->eps[ep_index].queued_deq_seg,
  900. dev->eps[ep_index].queued_deq_ptr);
  901. }
  902. }
  903. dev->eps[ep_index].ep_state &= ~SET_DEQ_PENDING;
  904. dev->eps[ep_index].queued_deq_seg = NULL;
  905. dev->eps[ep_index].queued_deq_ptr = NULL;
  906. /* Restart any rings with pending URBs */
  907. ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
  908. }
  909. static void handle_reset_ep_completion(struct xhci_hcd *xhci,
  910. struct xhci_event_cmd *event,
  911. union xhci_trb *trb)
  912. {
  913. int slot_id;
  914. unsigned int ep_index;
  915. slot_id = TRB_TO_SLOT_ID(le32_to_cpu(trb->generic.field[3]));
  916. ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
  917. /* This command will only fail if the endpoint wasn't halted,
  918. * but we don't care.
  919. */
  920. xhci_dbg(xhci, "Ignoring reset ep completion code of %u\n",
  921. GET_COMP_CODE(le32_to_cpu(event->status)));
  922. /* HW with the reset endpoint quirk needs to have a configure endpoint
  923. * command complete before the endpoint can be used. Queue that here
  924. * because the HW can't handle two commands being queued in a row.
  925. */
  926. if (xhci->quirks & XHCI_RESET_EP_QUIRK) {
  927. xhci_dbg(xhci, "Queueing configure endpoint command\n");
  928. xhci_queue_configure_endpoint(xhci,
  929. xhci->devs[slot_id]->in_ctx->dma, slot_id,
  930. false);
  931. xhci_ring_cmd_db(xhci);
  932. } else {
  933. /* Clear our internal halted state and restart the ring(s) */
  934. xhci->devs[slot_id]->eps[ep_index].ep_state &= ~EP_HALTED;
  935. ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
  936. }
  937. }
  938. /* Check to see if a command in the device's command queue matches this one.
  939. * Signal the completion or free the command, and return 1. Return 0 if the
  940. * completed command isn't at the head of the command list.
  941. */
  942. static int handle_cmd_in_cmd_wait_list(struct xhci_hcd *xhci,
  943. struct xhci_virt_device *virt_dev,
  944. struct xhci_event_cmd *event)
  945. {
  946. struct xhci_command *command;
  947. if (list_empty(&virt_dev->cmd_list))
  948. return 0;
  949. command = list_entry(virt_dev->cmd_list.next,
  950. struct xhci_command, cmd_list);
  951. if (xhci->cmd_ring->dequeue != command->command_trb)
  952. return 0;
  953. command->status = GET_COMP_CODE(le32_to_cpu(event->status));
  954. list_del(&command->cmd_list);
  955. if (command->completion)
  956. complete(command->completion);
  957. else
  958. xhci_free_command(xhci, command);
  959. return 1;
  960. }
  961. static void handle_cmd_completion(struct xhci_hcd *xhci,
  962. struct xhci_event_cmd *event)
  963. {
  964. int slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
  965. u64 cmd_dma;
  966. dma_addr_t cmd_dequeue_dma;
  967. struct xhci_input_control_ctx *ctrl_ctx;
  968. struct xhci_virt_device *virt_dev;
  969. unsigned int ep_index;
  970. struct xhci_ring *ep_ring;
  971. unsigned int ep_state;
  972. cmd_dma = le64_to_cpu(event->cmd_trb);
  973. cmd_dequeue_dma = xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg,
  974. xhci->cmd_ring->dequeue);
  975. /* Is the command ring deq ptr out of sync with the deq seg ptr? */
  976. if (cmd_dequeue_dma == 0) {
  977. xhci->error_bitmask |= 1 << 4;
  978. return;
  979. }
  980. /* Does the DMA address match our internal dequeue pointer address? */
  981. if (cmd_dma != (u64) cmd_dequeue_dma) {
  982. xhci->error_bitmask |= 1 << 5;
  983. return;
  984. }
  985. switch (le32_to_cpu(xhci->cmd_ring->dequeue->generic.field[3])
  986. & TRB_TYPE_BITMASK) {
  987. case TRB_TYPE(TRB_ENABLE_SLOT):
  988. if (GET_COMP_CODE(le32_to_cpu(event->status)) == COMP_SUCCESS)
  989. xhci->slot_id = slot_id;
  990. else
  991. xhci->slot_id = 0;
  992. complete(&xhci->addr_dev);
  993. break;
  994. case TRB_TYPE(TRB_DISABLE_SLOT):
  995. if (xhci->devs[slot_id]) {
  996. if (xhci->quirks & XHCI_EP_LIMIT_QUIRK)
  997. /* Delete default control endpoint resources */
  998. xhci_free_device_endpoint_resources(xhci,
  999. xhci->devs[slot_id], true);
  1000. xhci_free_virt_device(xhci, slot_id);
  1001. }
  1002. break;
  1003. case TRB_TYPE(TRB_CONFIG_EP):
  1004. virt_dev = xhci->devs[slot_id];
  1005. if (handle_cmd_in_cmd_wait_list(xhci, virt_dev, event))
  1006. break;
  1007. /*
  1008. * Configure endpoint commands can come from the USB core
  1009. * configuration or alt setting changes, or because the HW
  1010. * needed an extra configure endpoint command after a reset
  1011. * endpoint command or streams were being configured.
  1012. * If the command was for a halted endpoint, the xHCI driver
  1013. * is not waiting on the configure endpoint command.
  1014. */
  1015. ctrl_ctx = xhci_get_input_control_ctx(xhci,
  1016. virt_dev->in_ctx);
  1017. /* Input ctx add_flags are the endpoint index plus one */
  1018. ep_index = xhci_last_valid_endpoint(le32_to_cpu(ctrl_ctx->add_flags)) - 1;
  1019. /* A usb_set_interface() call directly after clearing a halted
  1020. * condition may race on this quirky hardware. Not worth
  1021. * worrying about, since this is prototype hardware. Not sure
  1022. * if this will work for streams, but streams support was
  1023. * untested on this prototype.
  1024. */
  1025. if (xhci->quirks & XHCI_RESET_EP_QUIRK &&
  1026. ep_index != (unsigned int) -1 &&
  1027. le32_to_cpu(ctrl_ctx->add_flags) - SLOT_FLAG ==
  1028. le32_to_cpu(ctrl_ctx->drop_flags)) {
  1029. ep_ring = xhci->devs[slot_id]->eps[ep_index].ring;
  1030. ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state;
  1031. if (!(ep_state & EP_HALTED))
  1032. goto bandwidth_change;
  1033. xhci_dbg(xhci, "Completed config ep cmd - "
  1034. "last ep index = %d, state = %d\n",
  1035. ep_index, ep_state);
  1036. /* Clear internal halted state and restart ring(s) */
  1037. xhci->devs[slot_id]->eps[ep_index].ep_state &=
  1038. ~EP_HALTED;
  1039. ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
  1040. break;
  1041. }
  1042. bandwidth_change:
  1043. xhci_dbg(xhci, "Completed config ep cmd\n");
  1044. xhci->devs[slot_id]->cmd_status =
  1045. GET_COMP_CODE(le32_to_cpu(event->status));
  1046. complete(&xhci->devs[slot_id]->cmd_completion);
  1047. break;
  1048. case TRB_TYPE(TRB_EVAL_CONTEXT):
  1049. virt_dev = xhci->devs[slot_id];
  1050. if (handle_cmd_in_cmd_wait_list(xhci, virt_dev, event))
  1051. break;
  1052. xhci->devs[slot_id]->cmd_status = GET_COMP_CODE(le32_to_cpu(event->status));
  1053. complete(&xhci->devs[slot_id]->cmd_completion);
  1054. break;
  1055. case TRB_TYPE(TRB_ADDR_DEV):
  1056. xhci->devs[slot_id]->cmd_status = GET_COMP_CODE(le32_to_cpu(event->status));
  1057. complete(&xhci->addr_dev);
  1058. break;
  1059. case TRB_TYPE(TRB_STOP_RING):
  1060. handle_stopped_endpoint(xhci, xhci->cmd_ring->dequeue, event);
  1061. break;
  1062. case TRB_TYPE(TRB_SET_DEQ):
  1063. handle_set_deq_completion(xhci, event, xhci->cmd_ring->dequeue);
  1064. break;
  1065. case TRB_TYPE(TRB_CMD_NOOP):
  1066. break;
  1067. case TRB_TYPE(TRB_RESET_EP):
  1068. handle_reset_ep_completion(xhci, event, xhci->cmd_ring->dequeue);
  1069. break;
  1070. case TRB_TYPE(TRB_RESET_DEV):
  1071. xhci_dbg(xhci, "Completed reset device command.\n");
  1072. slot_id = TRB_TO_SLOT_ID(
  1073. le32_to_cpu(xhci->cmd_ring->dequeue->generic.field[3]));
  1074. virt_dev = xhci->devs[slot_id];
  1075. if (virt_dev)
  1076. handle_cmd_in_cmd_wait_list(xhci, virt_dev, event);
  1077. else
  1078. xhci_warn(xhci, "Reset device command completion "
  1079. "for disabled slot %u\n", slot_id);
  1080. break;
  1081. case TRB_TYPE(TRB_NEC_GET_FW):
  1082. if (!(xhci->quirks & XHCI_NEC_HOST)) {
  1083. xhci->error_bitmask |= 1 << 6;
  1084. break;
  1085. }
  1086. xhci_dbg(xhci, "NEC firmware version %2x.%02x\n",
  1087. NEC_FW_MAJOR(le32_to_cpu(event->status)),
  1088. NEC_FW_MINOR(le32_to_cpu(event->status)));
  1089. break;
  1090. default:
  1091. /* Skip over unknown commands on the event ring */
  1092. xhci->error_bitmask |= 1 << 6;
  1093. break;
  1094. }
  1095. inc_deq(xhci, xhci->cmd_ring, false);
  1096. }
  1097. static void handle_vendor_event(struct xhci_hcd *xhci,
  1098. union xhci_trb *event)
  1099. {
  1100. u32 trb_type;
  1101. trb_type = TRB_FIELD_TO_TYPE(le32_to_cpu(event->generic.field[3]));
  1102. xhci_dbg(xhci, "Vendor specific event TRB type = %u\n", trb_type);
  1103. if (trb_type == TRB_NEC_CMD_COMP && (xhci->quirks & XHCI_NEC_HOST))
  1104. handle_cmd_completion(xhci, &event->event_cmd);
  1105. }
  1106. /* @port_id: the one-based port ID from the hardware (indexed from array of all
  1107. * port registers -- USB 3.0 and USB 2.0).
  1108. *
  1109. * Returns a zero-based port number, which is suitable for indexing into each of
  1110. * the split roothubs' port arrays and bus state arrays.
  1111. */
  1112. static unsigned int find_faked_portnum_from_hw_portnum(struct usb_hcd *hcd,
  1113. struct xhci_hcd *xhci, u32 port_id)
  1114. {
  1115. unsigned int i;
  1116. unsigned int num_similar_speed_ports = 0;
  1117. /* port_id from the hardware is 1-based, but port_array[], usb3_ports[],
  1118. * and usb2_ports are 0-based indexes. Count the number of similar
  1119. * speed ports, up to 1 port before this port.
  1120. */
  1121. for (i = 0; i < (port_id - 1); i++) {
  1122. u8 port_speed = xhci->port_array[i];
  1123. /*
  1124. * Skip ports that don't have known speeds, or have duplicate
  1125. * Extended Capabilities port speed entries.
  1126. */
  1127. if (port_speed == 0 || port_speed == DUPLICATE_ENTRY)
  1128. continue;
  1129. /*
  1130. * USB 3.0 ports are always under a USB 3.0 hub. USB 2.0 and
  1131. * 1.1 ports are under the USB 2.0 hub. If the port speed
  1132. * matches the device speed, it's a similar speed port.
  1133. */
  1134. if ((port_speed == 0x03) == (hcd->speed == HCD_USB3))
  1135. num_similar_speed_ports++;
  1136. }
  1137. return num_similar_speed_ports;
  1138. }
  1139. static void handle_port_status(struct xhci_hcd *xhci,
  1140. union xhci_trb *event)
  1141. {
  1142. struct usb_hcd *hcd;
  1143. u32 port_id;
  1144. u32 temp, temp1;
  1145. int max_ports;
  1146. int slot_id;
  1147. unsigned int faked_port_index;
  1148. u8 major_revision;
  1149. struct xhci_bus_state *bus_state;
  1150. __le32 __iomem **port_array;
  1151. bool bogus_port_status = false;
  1152. /* Port status change events always have a successful completion code */
  1153. if (GET_COMP_CODE(le32_to_cpu(event->generic.field[2])) != COMP_SUCCESS) {
  1154. xhci_warn(xhci, "WARN: xHC returned failed port status event\n");
  1155. xhci->error_bitmask |= 1 << 8;
  1156. }
  1157. port_id = GET_PORT_ID(le32_to_cpu(event->generic.field[0]));
  1158. xhci_dbg(xhci, "Port Status Change Event for port %d\n", port_id);
  1159. max_ports = HCS_MAX_PORTS(xhci->hcs_params1);
  1160. if ((port_id <= 0) || (port_id > max_ports)) {
  1161. xhci_warn(xhci, "Invalid port id %d\n", port_id);
  1162. bogus_port_status = true;
  1163. goto cleanup;
  1164. }
  1165. /* Figure out which usb_hcd this port is attached to:
  1166. * is it a USB 3.0 port or a USB 2.0/1.1 port?
  1167. */
  1168. major_revision = xhci->port_array[port_id - 1];
  1169. if (major_revision == 0) {
  1170. xhci_warn(xhci, "Event for port %u not in "
  1171. "Extended Capabilities, ignoring.\n",
  1172. port_id);
  1173. bogus_port_status = true;
  1174. goto cleanup;
  1175. }
  1176. if (major_revision == DUPLICATE_ENTRY) {
  1177. xhci_warn(xhci, "Event for port %u duplicated in"
  1178. "Extended Capabilities, ignoring.\n",
  1179. port_id);
  1180. bogus_port_status = true;
  1181. goto cleanup;
  1182. }
  1183. /*
  1184. * Hardware port IDs reported by a Port Status Change Event include USB
  1185. * 3.0 and USB 2.0 ports. We want to check if the port has reported a
  1186. * resume event, but we first need to translate the hardware port ID
  1187. * into the index into the ports on the correct split roothub, and the
  1188. * correct bus_state structure.
  1189. */
  1190. /* Find the right roothub. */
  1191. hcd = xhci_to_hcd(xhci);
  1192. if ((major_revision == 0x03) != (hcd->speed == HCD_USB3))
  1193. hcd = xhci->shared_hcd;
  1194. bus_state = &xhci->bus_state[hcd_index(hcd)];
  1195. if (hcd->speed == HCD_USB3)
  1196. port_array = xhci->usb3_ports;
  1197. else
  1198. port_array = xhci->usb2_ports;
  1199. /* Find the faked port hub number */
  1200. faked_port_index = find_faked_portnum_from_hw_portnum(hcd, xhci,
  1201. port_id);
  1202. temp = xhci_readl(xhci, port_array[faked_port_index]);
  1203. if (hcd->state == HC_STATE_SUSPENDED) {
  1204. xhci_dbg(xhci, "resume root hub\n");
  1205. usb_hcd_resume_root_hub(hcd);
  1206. }
  1207. if ((temp & PORT_PLC) && (temp & PORT_PLS_MASK) == XDEV_RESUME) {
  1208. xhci_dbg(xhci, "port resume event for port %d\n", port_id);
  1209. temp1 = xhci_readl(xhci, &xhci->op_regs->command);
  1210. if (!(temp1 & CMD_RUN)) {
  1211. xhci_warn(xhci, "xHC is not running.\n");
  1212. goto cleanup;
  1213. }
  1214. if (DEV_SUPERSPEED(temp)) {
  1215. xhci_dbg(xhci, "resume SS port %d\n", port_id);
  1216. temp = xhci_port_state_to_neutral(temp);
  1217. temp &= ~PORT_PLS_MASK;
  1218. temp |= PORT_LINK_STROBE | XDEV_U0;
  1219. xhci_writel(xhci, temp, port_array[faked_port_index]);
  1220. slot_id = xhci_find_slot_id_by_port(hcd, xhci,
  1221. faked_port_index);
  1222. if (!slot_id) {
  1223. xhci_dbg(xhci, "slot_id is zero\n");
  1224. goto cleanup;
  1225. }
  1226. xhci_ring_device(xhci, slot_id);
  1227. xhci_dbg(xhci, "resume SS port %d finished\n", port_id);
  1228. /* Clear PORT_PLC */
  1229. temp = xhci_readl(xhci, port_array[faked_port_index]);
  1230. temp = xhci_port_state_to_neutral(temp);
  1231. temp |= PORT_PLC;
  1232. xhci_writel(xhci, temp, port_array[faked_port_index]);
  1233. } else {
  1234. xhci_dbg(xhci, "resume HS port %d\n", port_id);
  1235. bus_state->resume_done[faked_port_index] = jiffies +
  1236. msecs_to_jiffies(20);
  1237. mod_timer(&hcd->rh_timer,
  1238. bus_state->resume_done[faked_port_index]);
  1239. /* Do the rest in GetPortStatus */
  1240. }
  1241. }
  1242. cleanup:
  1243. /* Update event ring dequeue pointer before dropping the lock */
  1244. inc_deq(xhci, xhci->event_ring, true);
  1245. /* Don't make the USB core poll the roothub if we got a bad port status
  1246. * change event. Besides, at that point we can't tell which roothub
  1247. * (USB 2.0 or USB 3.0) to kick.
  1248. */
  1249. if (bogus_port_status)
  1250. return;
  1251. spin_unlock(&xhci->lock);
  1252. /* Pass this up to the core */
  1253. usb_hcd_poll_rh_status(hcd);
  1254. spin_lock(&xhci->lock);
  1255. }
  1256. /*
  1257. * This TD is defined by the TRBs starting at start_trb in start_seg and ending
  1258. * at end_trb, which may be in another segment. If the suspect DMA address is a
  1259. * TRB in this TD, this function returns that TRB's segment. Otherwise it
  1260. * returns 0.
  1261. */
  1262. struct xhci_segment *trb_in_td(struct xhci_segment *start_seg,
  1263. union xhci_trb *start_trb,
  1264. union xhci_trb *end_trb,
  1265. dma_addr_t suspect_dma)
  1266. {
  1267. dma_addr_t start_dma;
  1268. dma_addr_t end_seg_dma;
  1269. dma_addr_t end_trb_dma;
  1270. struct xhci_segment *cur_seg;
  1271. start_dma = xhci_trb_virt_to_dma(start_seg, start_trb);
  1272. cur_seg = start_seg;
  1273. do {
  1274. if (start_dma == 0)
  1275. return NULL;
  1276. /* We may get an event for a Link TRB in the middle of a TD */
  1277. end_seg_dma = xhci_trb_virt_to_dma(cur_seg,
  1278. &cur_seg->trbs[TRBS_PER_SEGMENT - 1]);
  1279. /* If the end TRB isn't in this segment, this is set to 0 */
  1280. end_trb_dma = xhci_trb_virt_to_dma(cur_seg, end_trb);
  1281. if (end_trb_dma > 0) {
  1282. /* The end TRB is in this segment, so suspect should be here */
  1283. if (start_dma <= end_trb_dma) {
  1284. if (suspect_dma >= start_dma && suspect_dma <= end_trb_dma)
  1285. return cur_seg;
  1286. } else {
  1287. /* Case for one segment with
  1288. * a TD wrapped around to the top
  1289. */
  1290. if ((suspect_dma >= start_dma &&
  1291. suspect_dma <= end_seg_dma) ||
  1292. (suspect_dma >= cur_seg->dma &&
  1293. suspect_dma <= end_trb_dma))
  1294. return cur_seg;
  1295. }
  1296. return NULL;
  1297. } else {
  1298. /* Might still be somewhere in this segment */
  1299. if (suspect_dma >= start_dma && suspect_dma <= end_seg_dma)
  1300. return cur_seg;
  1301. }
  1302. cur_seg = cur_seg->next;
  1303. start_dma = xhci_trb_virt_to_dma(cur_seg, &cur_seg->trbs[0]);
  1304. } while (cur_seg != start_seg);
  1305. return NULL;
  1306. }
  1307. static void xhci_cleanup_halted_endpoint(struct xhci_hcd *xhci,
  1308. unsigned int slot_id, unsigned int ep_index,
  1309. unsigned int stream_id,
  1310. struct xhci_td *td, union xhci_trb *event_trb)
  1311. {
  1312. struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index];
  1313. ep->ep_state |= EP_HALTED;
  1314. ep->stopped_td = td;
  1315. ep->stopped_trb = event_trb;
  1316. ep->stopped_stream = stream_id;
  1317. xhci_queue_reset_ep(xhci, slot_id, ep_index);
  1318. xhci_cleanup_stalled_ring(xhci, td->urb->dev, ep_index);
  1319. ep->stopped_td = NULL;
  1320. ep->stopped_trb = NULL;
  1321. ep->stopped_stream = 0;
  1322. xhci_ring_cmd_db(xhci);
  1323. }
  1324. /* Check if an error has halted the endpoint ring. The class driver will
  1325. * cleanup the halt for a non-default control endpoint if we indicate a stall.
  1326. * However, a babble and other errors also halt the endpoint ring, and the class
  1327. * driver won't clear the halt in that case, so we need to issue a Set Transfer
  1328. * Ring Dequeue Pointer command manually.
  1329. */
  1330. static int xhci_requires_manual_halt_cleanup(struct xhci_hcd *xhci,
  1331. struct xhci_ep_ctx *ep_ctx,
  1332. unsigned int trb_comp_code)
  1333. {
  1334. /* TRB completion codes that may require a manual halt cleanup */
  1335. if (trb_comp_code == COMP_TX_ERR ||
  1336. trb_comp_code == COMP_BABBLE ||
  1337. trb_comp_code == COMP_SPLIT_ERR)
  1338. /* The 0.96 spec says a babbling control endpoint
  1339. * is not halted. The 0.96 spec says it is. Some HW
  1340. * claims to be 0.95 compliant, but it halts the control
  1341. * endpoint anyway. Check if a babble halted the
  1342. * endpoint.
  1343. */
  1344. if ((ep_ctx->ep_info & cpu_to_le32(EP_STATE_MASK)) ==
  1345. cpu_to_le32(EP_STATE_HALTED))
  1346. return 1;
  1347. return 0;
  1348. }
  1349. int xhci_is_vendor_info_code(struct xhci_hcd *xhci, unsigned int trb_comp_code)
  1350. {
  1351. if (trb_comp_code >= 224 && trb_comp_code <= 255) {
  1352. /* Vendor defined "informational" completion code,
  1353. * treat as not-an-error.
  1354. */
  1355. xhci_dbg(xhci, "Vendor defined info completion code %u\n",
  1356. trb_comp_code);
  1357. xhci_dbg(xhci, "Treating code as success.\n");
  1358. return 1;
  1359. }
  1360. return 0;
  1361. }
  1362. /*
  1363. * Finish the td processing, remove the td from td list;
  1364. * Return 1 if the urb can be given back.
  1365. */
  1366. static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td,
  1367. union xhci_trb *event_trb, struct xhci_transfer_event *event,
  1368. struct xhci_virt_ep *ep, int *status, bool skip)
  1369. {
  1370. struct xhci_virt_device *xdev;
  1371. struct xhci_ring *ep_ring;
  1372. unsigned int slot_id;
  1373. int ep_index;
  1374. struct urb *urb = NULL;
  1375. struct xhci_ep_ctx *ep_ctx;
  1376. int ret = 0;
  1377. struct urb_priv *urb_priv;
  1378. u32 trb_comp_code;
  1379. slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
  1380. xdev = xhci->devs[slot_id];
  1381. ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
  1382. ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
  1383. ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
  1384. trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
  1385. if (skip)
  1386. goto td_cleanup;
  1387. if (trb_comp_code == COMP_STOP_INVAL ||
  1388. trb_comp_code == COMP_STOP) {
  1389. /* The Endpoint Stop Command completion will take care of any
  1390. * stopped TDs. A stopped TD may be restarted, so don't update
  1391. * the ring dequeue pointer or take this TD off any lists yet.
  1392. */
  1393. ep->stopped_td = td;
  1394. ep->stopped_trb = event_trb;
  1395. return 0;
  1396. } else {
  1397. if (trb_comp_code == COMP_STALL) {
  1398. /* The transfer is completed from the driver's
  1399. * perspective, but we need to issue a set dequeue
  1400. * command for this stalled endpoint to move the dequeue
  1401. * pointer past the TD. We can't do that here because
  1402. * the halt condition must be cleared first. Let the
  1403. * USB class driver clear the stall later.
  1404. */
  1405. ep->stopped_td = td;
  1406. ep->stopped_trb = event_trb;
  1407. ep->stopped_stream = ep_ring->stream_id;
  1408. } else if (xhci_requires_manual_halt_cleanup(xhci,
  1409. ep_ctx, trb_comp_code)) {
  1410. /* Other types of errors halt the endpoint, but the
  1411. * class driver doesn't call usb_reset_endpoint() unless
  1412. * the error is -EPIPE. Clear the halted status in the
  1413. * xHCI hardware manually.
  1414. */
  1415. xhci_cleanup_halted_endpoint(xhci,
  1416. slot_id, ep_index, ep_ring->stream_id,
  1417. td, event_trb);
  1418. } else {
  1419. /* Update ring dequeue pointer */
  1420. while (ep_ring->dequeue != td->last_trb)
  1421. inc_deq(xhci, ep_ring, false);
  1422. inc_deq(xhci, ep_ring, false);
  1423. }
  1424. td_cleanup:
  1425. /* Clean up the endpoint's TD list */
  1426. urb = td->urb;
  1427. urb_priv = urb->hcpriv;
  1428. /* Do one last check of the actual transfer length.
  1429. * If the host controller said we transferred more data than
  1430. * the buffer length, urb->actual_length will be a very big
  1431. * number (since it's unsigned). Play it safe and say we didn't
  1432. * transfer anything.
  1433. */
  1434. if (urb->actual_length > urb->transfer_buffer_length) {
  1435. xhci_warn(xhci, "URB transfer length is wrong, "
  1436. "xHC issue? req. len = %u, "
  1437. "act. len = %u\n",
  1438. urb->transfer_buffer_length,
  1439. urb->actual_length);
  1440. urb->actual_length = 0;
  1441. if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
  1442. *status = -EREMOTEIO;
  1443. else
  1444. *status = 0;
  1445. }
  1446. list_del(&td->td_list);
  1447. /* Was this TD slated to be cancelled but completed anyway? */
  1448. if (!list_empty(&td->cancelled_td_list))
  1449. list_del(&td->cancelled_td_list);
  1450. urb_priv->td_cnt++;
  1451. /* Giveback the urb when all the tds are completed */
  1452. if (urb_priv->td_cnt == urb_priv->length) {
  1453. ret = 1;
  1454. if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
  1455. xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs--;
  1456. if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs
  1457. == 0) {
  1458. if (xhci->quirks & XHCI_AMD_PLL_FIX)
  1459. usb_amd_quirk_pll_enable();
  1460. }
  1461. }
  1462. }
  1463. }
  1464. return ret;
  1465. }
  1466. /*
  1467. * Process control tds, update urb status and actual_length.
  1468. */
  1469. static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
  1470. union xhci_trb *event_trb, struct xhci_transfer_event *event,
  1471. struct xhci_virt_ep *ep, int *status)
  1472. {
  1473. struct xhci_virt_device *xdev;
  1474. struct xhci_ring *ep_ring;
  1475. unsigned int slot_id;
  1476. int ep_index;
  1477. struct xhci_ep_ctx *ep_ctx;
  1478. u32 trb_comp_code;
  1479. slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
  1480. xdev = xhci->devs[slot_id];
  1481. ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
  1482. ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
  1483. ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
  1484. trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
  1485. xhci_debug_trb(xhci, xhci->event_ring->dequeue);
  1486. switch (trb_comp_code) {
  1487. case COMP_SUCCESS:
  1488. if (event_trb == ep_ring->dequeue) {
  1489. xhci_warn(xhci, "WARN: Success on ctrl setup TRB "
  1490. "without IOC set??\n");
  1491. *status = -ESHUTDOWN;
  1492. } else if (event_trb != td->last_trb) {
  1493. xhci_warn(xhci, "WARN: Success on ctrl data TRB "
  1494. "without IOC set??\n");
  1495. *status = -ESHUTDOWN;
  1496. } else {
  1497. *status = 0;
  1498. }
  1499. break;
  1500. case COMP_SHORT_TX:
  1501. xhci_warn(xhci, "WARN: short transfer on control ep\n");
  1502. if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
  1503. *status = -EREMOTEIO;
  1504. else
  1505. *status = 0;
  1506. break;
  1507. case COMP_STOP_INVAL:
  1508. case COMP_STOP:
  1509. return finish_td(xhci, td, event_trb, event, ep, status, false);
  1510. default:
  1511. if (!xhci_requires_manual_halt_cleanup(xhci,
  1512. ep_ctx, trb_comp_code))
  1513. break;
  1514. xhci_dbg(xhci, "TRB error code %u, "
  1515. "halted endpoint index = %u\n",
  1516. trb_comp_code, ep_index);
  1517. /* else fall through */
  1518. case COMP_STALL:
  1519. /* Did we transfer part of the data (middle) phase? */
  1520. if (event_trb != ep_ring->dequeue &&
  1521. event_trb != td->last_trb)
  1522. td->urb->actual_length =
  1523. td->urb->transfer_buffer_length
  1524. - TRB_LEN(le32_to_cpu(event->transfer_len));
  1525. else
  1526. td->urb->actual_length = 0;
  1527. xhci_cleanup_halted_endpoint(xhci,
  1528. slot_id, ep_index, 0, td, event_trb);
  1529. return finish_td(xhci, td, event_trb, event, ep, status, true);
  1530. }
  1531. /*
  1532. * Did we transfer any data, despite the errors that might have
  1533. * happened? I.e. did we get past the setup stage?
  1534. */
  1535. if (event_trb != ep_ring->dequeue) {
  1536. /* The event was for the status stage */
  1537. if (event_trb == td->last_trb) {
  1538. if (td->urb->actual_length != 0) {
  1539. /* Don't overwrite a previously set error code
  1540. */
  1541. if ((*status == -EINPROGRESS || *status == 0) &&
  1542. (td->urb->transfer_flags
  1543. & URB_SHORT_NOT_OK))
  1544. /* Did we already see a short data
  1545. * stage? */
  1546. *status = -EREMOTEIO;
  1547. } else {
  1548. td->urb->actual_length =
  1549. td->urb->transfer_buffer_length;
  1550. }
  1551. } else {
  1552. /* Maybe the event was for the data stage? */
  1553. td->urb->actual_length =
  1554. td->urb->transfer_buffer_length -
  1555. TRB_LEN(le32_to_cpu(event->transfer_len));
  1556. xhci_dbg(xhci, "Waiting for status "
  1557. "stage event\n");
  1558. return 0;
  1559. }
  1560. }
  1561. return finish_td(xhci, td, event_trb, event, ep, status, false);
  1562. }
  1563. /*
  1564. * Process isochronous tds, update urb packet status and actual_length.
  1565. */
  1566. static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
  1567. union xhci_trb *event_trb, struct xhci_transfer_event *event,
  1568. struct xhci_virt_ep *ep, int *status)
  1569. {
  1570. struct xhci_ring *ep_ring;
  1571. struct urb_priv *urb_priv;
  1572. int idx;
  1573. int len = 0;
  1574. union xhci_trb *cur_trb;
  1575. struct xhci_segment *cur_seg;
  1576. struct usb_iso_packet_descriptor *frame;
  1577. u32 trb_comp_code;
  1578. bool skip_td = false;
  1579. ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
  1580. trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
  1581. urb_priv = td->urb->hcpriv;
  1582. idx = urb_priv->td_cnt;
  1583. frame = &td->urb->iso_frame_desc[idx];
  1584. /* handle completion code */
  1585. switch (trb_comp_code) {
  1586. case COMP_SUCCESS:
  1587. frame->status = 0;
  1588. break;
  1589. case COMP_SHORT_TX:
  1590. frame->status = td->urb->transfer_flags & URB_SHORT_NOT_OK ?
  1591. -EREMOTEIO : 0;
  1592. break;
  1593. case COMP_BW_OVER:
  1594. frame->status = -ECOMM;
  1595. skip_td = true;
  1596. break;
  1597. case COMP_BUFF_OVER:
  1598. case COMP_BABBLE:
  1599. frame->status = -EOVERFLOW;
  1600. skip_td = true;
  1601. break;
  1602. case COMP_DEV_ERR:
  1603. case COMP_STALL:
  1604. frame->status = -EPROTO;
  1605. skip_td = true;
  1606. break;
  1607. case COMP_STOP:
  1608. case COMP_STOP_INVAL:
  1609. break;
  1610. default:
  1611. frame->status = -1;
  1612. break;
  1613. }
  1614. if (trb_comp_code == COMP_SUCCESS || skip_td) {
  1615. frame->actual_length = frame->length;
  1616. td->urb->actual_length += frame->length;
  1617. } else {
  1618. for (cur_trb = ep_ring->dequeue,
  1619. cur_seg = ep_ring->deq_seg; cur_trb != event_trb;
  1620. next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) {
  1621. if (!TRB_TYPE_NOOP_LE32(cur_trb->generic.field[3]) &&
  1622. !TRB_TYPE_LINK_LE32(cur_trb->generic.field[3]))
  1623. len += TRB_LEN(le32_to_cpu(cur_trb->generic.field[2]));
  1624. }
  1625. len += TRB_LEN(le32_to_cpu(cur_trb->generic.field[2])) -
  1626. TRB_LEN(le32_to_cpu(event->transfer_len));
  1627. if (trb_comp_code != COMP_STOP_INVAL) {
  1628. frame->actual_length = len;
  1629. td->urb->actual_length += len;
  1630. }
  1631. }
  1632. return finish_td(xhci, td, event_trb, event, ep, status, false);
  1633. }
  1634. static int skip_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
  1635. struct xhci_transfer_event *event,
  1636. struct xhci_virt_ep *ep, int *status)
  1637. {
  1638. struct xhci_ring *ep_ring;
  1639. struct urb_priv *urb_priv;
  1640. struct usb_iso_packet_descriptor *frame;
  1641. int idx;
  1642. ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
  1643. urb_priv = td->urb->hcpriv;
  1644. idx = urb_priv->td_cnt;
  1645. frame = &td->urb->iso_frame_desc[idx];
  1646. /* The transfer is partly done. */
  1647. frame->status = -EXDEV;
  1648. /* calc actual length */
  1649. frame->actual_length = 0;
  1650. /* Update ring dequeue pointer */
  1651. while (ep_ring->dequeue != td->last_trb)
  1652. inc_deq(xhci, ep_ring, false);
  1653. inc_deq(xhci, ep_ring, false);
  1654. return finish_td(xhci, td, NULL, event, ep, status, true);
  1655. }
  1656. /*
  1657. * Process bulk and interrupt tds, update urb status and actual_length.
  1658. */
  1659. static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td,
  1660. union xhci_trb *event_trb, struct xhci_transfer_event *event,
  1661. struct xhci_virt_ep *ep, int *status)
  1662. {
  1663. struct xhci_ring *ep_ring;
  1664. union xhci_trb *cur_trb;
  1665. struct xhci_segment *cur_seg;
  1666. u32 trb_comp_code;
  1667. ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
  1668. trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
  1669. switch (trb_comp_code) {
  1670. case COMP_SUCCESS:
  1671. /* Double check that the HW transferred everything. */
  1672. if (event_trb != td->last_trb) {
  1673. xhci_warn(xhci, "WARN Successful completion "
  1674. "on short TX\n");
  1675. if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
  1676. *status = -EREMOTEIO;
  1677. else
  1678. *status = 0;
  1679. } else {
  1680. *status = 0;
  1681. }
  1682. break;
  1683. case COMP_SHORT_TX:
  1684. if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
  1685. *status = -EREMOTEIO;
  1686. else
  1687. *status = 0;
  1688. break;
  1689. default:
  1690. /* Others already handled above */
  1691. break;
  1692. }
  1693. if (trb_comp_code == COMP_SHORT_TX)
  1694. xhci_dbg(xhci, "ep %#x - asked for %d bytes, "
  1695. "%d bytes untransferred\n",
  1696. td->urb->ep->desc.bEndpointAddress,
  1697. td->urb->transfer_buffer_length,
  1698. TRB_LEN(le32_to_cpu(event->transfer_len)));
  1699. /* Fast path - was this the last TRB in the TD for this URB? */
  1700. if (event_trb == td->last_trb) {
  1701. if (TRB_LEN(le32_to_cpu(event->transfer_len)) != 0) {
  1702. td->urb->actual_length =
  1703. td->urb->transfer_buffer_length -
  1704. TRB_LEN(le32_to_cpu(event->transfer_len));
  1705. if (td->urb->transfer_buffer_length <
  1706. td->urb->actual_length) {
  1707. xhci_warn(xhci, "HC gave bad length "
  1708. "of %d bytes left\n",
  1709. TRB_LEN(le32_to_cpu(event->transfer_len)));
  1710. td->urb->actual_length = 0;
  1711. if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
  1712. *status = -EREMOTEIO;
  1713. else
  1714. *status = 0;
  1715. }
  1716. /* Don't overwrite a previously set error code */
  1717. if (*status == -EINPROGRESS) {
  1718. if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
  1719. *status = -EREMOTEIO;
  1720. else
  1721. *status = 0;
  1722. }
  1723. } else {
  1724. td->urb->actual_length =
  1725. td->urb->transfer_buffer_length;
  1726. /* Ignore a short packet completion if the
  1727. * untransferred length was zero.
  1728. */
  1729. if (*status == -EREMOTEIO)
  1730. *status = 0;
  1731. }
  1732. } else {
  1733. /* Slow path - walk the list, starting from the dequeue
  1734. * pointer, to get the actual length transferred.
  1735. */
  1736. td->urb->actual_length = 0;
  1737. for (cur_trb = ep_ring->dequeue, cur_seg = ep_ring->deq_seg;
  1738. cur_trb != event_trb;
  1739. next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) {
  1740. if (!TRB_TYPE_NOOP_LE32(cur_trb->generic.field[3]) &&
  1741. !TRB_TYPE_LINK_LE32(cur_trb->generic.field[3]))
  1742. td->urb->actual_length +=
  1743. TRB_LEN(le32_to_cpu(cur_trb->generic.field[2]));
  1744. }
  1745. /* If the ring didn't stop on a Link or No-op TRB, add
  1746. * in the actual bytes transferred from the Normal TRB
  1747. */
  1748. if (trb_comp_code != COMP_STOP_INVAL)
  1749. td->urb->actual_length +=
  1750. TRB_LEN(le32_to_cpu(cur_trb->generic.field[2])) -
  1751. TRB_LEN(le32_to_cpu(event->transfer_len));
  1752. }
  1753. return finish_td(xhci, td, event_trb, event, ep, status, false);
  1754. }
  1755. /*
  1756. * If this function returns an error condition, it means it got a Transfer
  1757. * event with a corrupted Slot ID, Endpoint ID, or TRB DMA address.
  1758. * At this point, the host controller is probably hosed and should be reset.
  1759. */
  1760. static int handle_tx_event(struct xhci_hcd *xhci,
  1761. struct xhci_transfer_event *event)
  1762. {
  1763. struct xhci_virt_device *xdev;
  1764. struct xhci_virt_ep *ep;
  1765. struct xhci_ring *ep_ring;
  1766. unsigned int slot_id;
  1767. int ep_index;
  1768. struct xhci_td *td = NULL;
  1769. dma_addr_t event_dma;
  1770. struct xhci_segment *event_seg;
  1771. union xhci_trb *event_trb;
  1772. struct urb *urb = NULL;
  1773. int status = -EINPROGRESS;
  1774. struct urb_priv *urb_priv;
  1775. struct xhci_ep_ctx *ep_ctx;
  1776. u32 trb_comp_code;
  1777. int ret = 0;
  1778. slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
  1779. xdev = xhci->devs[slot_id];
  1780. if (!xdev) {
  1781. xhci_err(xhci, "ERROR Transfer event pointed to bad slot\n");
  1782. return -ENODEV;
  1783. }
  1784. /* Endpoint ID is 1 based, our index is zero based */
  1785. ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
  1786. ep = &xdev->eps[ep_index];
  1787. ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
  1788. ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
  1789. if (!ep_ring ||
  1790. (le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK) ==
  1791. EP_STATE_DISABLED) {
  1792. xhci_err(xhci, "ERROR Transfer event for disabled endpoint "
  1793. "or incorrect stream ring\n");
  1794. return -ENODEV;
  1795. }
  1796. event_dma = le64_to_cpu(event->buffer);
  1797. trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
  1798. /* Look for common error cases */
  1799. switch (trb_comp_code) {
  1800. /* Skip codes that require special handling depending on
  1801. * transfer type
  1802. */
  1803. case COMP_SUCCESS:
  1804. case COMP_SHORT_TX:
  1805. break;
  1806. case COMP_STOP:
  1807. xhci_dbg(xhci, "Stopped on Transfer TRB\n");
  1808. break;
  1809. case COMP_STOP_INVAL:
  1810. xhci_dbg(xhci, "Stopped on No-op or Link TRB\n");
  1811. break;
  1812. case COMP_STALL:
  1813. xhci_warn(xhci, "WARN: Stalled endpoint\n");
  1814. ep->ep_state |= EP_HALTED;
  1815. status = -EPIPE;
  1816. break;
  1817. case COMP_TRB_ERR:
  1818. xhci_warn(xhci, "WARN: TRB error on endpoint\n");
  1819. status = -EILSEQ;
  1820. break;
  1821. case COMP_SPLIT_ERR:
  1822. case COMP_TX_ERR:
  1823. xhci_warn(xhci, "WARN: transfer error on endpoint\n");
  1824. status = -EPROTO;
  1825. break;
  1826. case COMP_BABBLE:
  1827. xhci_warn(xhci, "WARN: babble error on endpoint\n");
  1828. status = -EOVERFLOW;
  1829. break;
  1830. case COMP_DB_ERR:
  1831. xhci_warn(xhci, "WARN: HC couldn't access mem fast enough\n");
  1832. status = -ENOSR;
  1833. break;
  1834. case COMP_BW_OVER:
  1835. xhci_warn(xhci, "WARN: bandwidth overrun event on endpoint\n");
  1836. break;
  1837. case COMP_BUFF_OVER:
  1838. xhci_warn(xhci, "WARN: buffer overrun event on endpoint\n");
  1839. break;
  1840. case COMP_UNDERRUN:
  1841. /*
  1842. * When the Isoch ring is empty, the xHC will generate
  1843. * a Ring Overrun Event for IN Isoch endpoint or Ring
  1844. * Underrun Event for OUT Isoch endpoint.
  1845. */
  1846. xhci_dbg(xhci, "underrun event on endpoint\n");
  1847. if (!list_empty(&ep_ring->td_list))
  1848. xhci_dbg(xhci, "Underrun Event for slot %d ep %d "
  1849. "still with TDs queued?\n",
  1850. TRB_TO_SLOT_ID(le32_to_cpu(event->flags)),
  1851. ep_index);
  1852. goto cleanup;
  1853. case COMP_OVERRUN:
  1854. xhci_dbg(xhci, "overrun event on endpoint\n");
  1855. if (!list_empty(&ep_ring->td_list))
  1856. xhci_dbg(xhci, "Overrun Event for slot %d ep %d "
  1857. "still with TDs queued?\n",
  1858. TRB_TO_SLOT_ID(le32_to_cpu(event->flags)),
  1859. ep_index);
  1860. goto cleanup;
  1861. case COMP_DEV_ERR:
  1862. xhci_warn(xhci, "WARN: detect an incompatible device");
  1863. status = -EPROTO;
  1864. break;
  1865. case COMP_MISSED_INT:
  1866. /*
  1867. * When encounter missed service error, one or more isoc tds
  1868. * may be missed by xHC.
  1869. * Set skip flag of the ep_ring; Complete the missed tds as
  1870. * short transfer when process the ep_ring next time.
  1871. */
  1872. ep->skip = true;
  1873. xhci_dbg(xhci, "Miss service interval error, set skip flag\n");
  1874. goto cleanup;
  1875. default:
  1876. if (xhci_is_vendor_info_code(xhci, trb_comp_code)) {
  1877. status = 0;
  1878. break;
  1879. }
  1880. xhci_warn(xhci, "ERROR Unknown event condition, HC probably "
  1881. "busted\n");
  1882. goto cleanup;
  1883. }
  1884. do {
  1885. /* This TRB should be in the TD at the head of this ring's
  1886. * TD list.
  1887. */
  1888. if (list_empty(&ep_ring->td_list)) {
  1889. xhci_warn(xhci, "WARN Event TRB for slot %d ep %d "
  1890. "with no TDs queued?\n",
  1891. TRB_TO_SLOT_ID(le32_to_cpu(event->flags)),
  1892. ep_index);
  1893. xhci_dbg(xhci, "Event TRB with TRB type ID %u\n",
  1894. (le32_to_cpu(event->flags) &
  1895. TRB_TYPE_BITMASK)>>10);
  1896. xhci_print_trb_offsets(xhci, (union xhci_trb *) event);
  1897. if (ep->skip) {
  1898. ep->skip = false;
  1899. xhci_dbg(xhci, "td_list is empty while skip "
  1900. "flag set. Clear skip flag.\n");
  1901. }
  1902. ret = 0;
  1903. goto cleanup;
  1904. }
  1905. td = list_entry(ep_ring->td_list.next, struct xhci_td, td_list);
  1906. /* Is this a TRB in the currently executing TD? */
  1907. event_seg = trb_in_td(ep_ring->deq_seg, ep_ring->dequeue,
  1908. td->last_trb, event_dma);
  1909. /*
  1910. * Skip the Force Stopped Event. The event_trb(event_dma) of FSE
  1911. * is not in the current TD pointed by ep_ring->dequeue because
  1912. * that the hardware dequeue pointer still at the previous TRB
  1913. * of the current TD. The previous TRB maybe a Link TD or the
  1914. * last TRB of the previous TD. The command completion handle
  1915. * will take care the rest.
  1916. */
  1917. if (!event_seg && trb_comp_code == COMP_STOP_INVAL) {
  1918. ret = 0;
  1919. goto cleanup;
  1920. }
  1921. if (!event_seg) {
  1922. if (!ep->skip ||
  1923. !usb_endpoint_xfer_isoc(&td->urb->ep->desc)) {
  1924. /* Some host controllers give a spurious
  1925. * successful event after a short transfer.
  1926. * Ignore it.
  1927. */
  1928. if ((xhci->quirks & XHCI_SPURIOUS_SUCCESS) &&
  1929. ep_ring->last_td_was_short) {
  1930. ep_ring->last_td_was_short = false;
  1931. ret = 0;
  1932. goto cleanup;
  1933. }
  1934. /* HC is busted, give up! */
  1935. xhci_err(xhci,
  1936. "ERROR Transfer event TRB DMA ptr not "
  1937. "part of current TD\n");
  1938. return -ESHUTDOWN;
  1939. }
  1940. ret = skip_isoc_td(xhci, td, event, ep, &status);
  1941. goto cleanup;
  1942. }
  1943. if (trb_comp_code == COMP_SHORT_TX)
  1944. ep_ring->last_td_was_short = true;
  1945. else
  1946. ep_ring->last_td_was_short = false;
  1947. if (ep->skip) {
  1948. xhci_dbg(xhci, "Found td. Clear skip flag.\n");
  1949. ep->skip = false;
  1950. }
  1951. event_trb = &event_seg->trbs[(event_dma - event_seg->dma) /
  1952. sizeof(*event_trb)];
  1953. /*
  1954. * No-op TRB should not trigger interrupts.
  1955. * If event_trb is a no-op TRB, it means the
  1956. * corresponding TD has been cancelled. Just ignore
  1957. * the TD.
  1958. */
  1959. if (TRB_TYPE_NOOP_LE32(event_trb->generic.field[3])) {
  1960. xhci_dbg(xhci,
  1961. "event_trb is a no-op TRB. Skip it\n");
  1962. goto cleanup;
  1963. }
  1964. /* Now update the urb's actual_length and give back to
  1965. * the core
  1966. */
  1967. if (usb_endpoint_xfer_control(&td->urb->ep->desc))
  1968. ret = process_ctrl_td(xhci, td, event_trb, event, ep,
  1969. &status);
  1970. else if (usb_endpoint_xfer_isoc(&td->urb->ep->desc))
  1971. ret = process_isoc_td(xhci, td, event_trb, event, ep,
  1972. &status);
  1973. else
  1974. ret = process_bulk_intr_td(xhci, td, event_trb, event,
  1975. ep, &status);
  1976. cleanup:
  1977. /*
  1978. * Do not update event ring dequeue pointer if ep->skip is set.
  1979. * Will roll back to continue process missed tds.
  1980. */
  1981. if (trb_comp_code == COMP_MISSED_INT || !ep->skip) {
  1982. inc_deq(xhci, xhci->event_ring, true);
  1983. }
  1984. if (ret) {
  1985. urb = td->urb;
  1986. urb_priv = urb->hcpriv;
  1987. /* Leave the TD around for the reset endpoint function
  1988. * to use(but only if it's not a control endpoint,
  1989. * since we already queued the Set TR dequeue pointer
  1990. * command for stalled control endpoints).
  1991. */
  1992. if (usb_endpoint_xfer_control(&urb->ep->desc) ||
  1993. (trb_comp_code != COMP_STALL &&
  1994. trb_comp_code != COMP_BABBLE))
  1995. xhci_urb_free_priv(xhci, urb_priv);
  1996. usb_hcd_unlink_urb_from_ep(bus_to_hcd(urb->dev->bus), urb);
  1997. if ((urb->actual_length != urb->transfer_buffer_length &&
  1998. (urb->transfer_flags &
  1999. URB_SHORT_NOT_OK)) ||
  2000. status != 0)
  2001. xhci_dbg(xhci, "Giveback URB %p, len = %d, "
  2002. "expected = %x, status = %d\n",
  2003. urb, urb->actual_length,
  2004. urb->transfer_buffer_length,
  2005. status);
  2006. spin_unlock(&xhci->lock);
  2007. /* EHCI, UHCI, and OHCI always unconditionally set the
  2008. * urb->status of an isochronous endpoint to 0.
  2009. */
  2010. if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS)
  2011. status = 0;
  2012. usb_hcd_giveback_urb(bus_to_hcd(urb->dev->bus), urb, status);
  2013. spin_lock(&xhci->lock);
  2014. }
  2015. /*
  2016. * If ep->skip is set, it means there are missed tds on the
  2017. * endpoint ring need to take care of.
  2018. * Process them as short transfer until reach the td pointed by
  2019. * the event.
  2020. */
  2021. } while (ep->skip && trb_comp_code != COMP_MISSED_INT);
  2022. return 0;
  2023. }
  2024. /*
  2025. * This function handles all OS-owned events on the event ring. It may drop
  2026. * xhci->lock between event processing (e.g. to pass up port status changes).
  2027. * Returns >0 for "possibly more events to process" (caller should call again),
  2028. * otherwise 0 if done. In future, <0 returns should indicate error code.
  2029. */
  2030. static int xhci_handle_event(struct xhci_hcd *xhci)
  2031. {
  2032. union xhci_trb *event;
  2033. int update_ptrs = 1;
  2034. int ret;
  2035. if (!xhci->event_ring || !xhci->event_ring->dequeue) {
  2036. xhci->error_bitmask |= 1 << 1;
  2037. return 0;
  2038. }
  2039. event = xhci->event_ring->dequeue;
  2040. /* Does the HC or OS own the TRB? */
  2041. if ((le32_to_cpu(event->event_cmd.flags) & TRB_CYCLE) !=
  2042. xhci->event_ring->cycle_state) {
  2043. xhci->error_bitmask |= 1 << 2;
  2044. return 0;
  2045. }
  2046. /*
  2047. * Barrier between reading the TRB_CYCLE (valid) flag above and any
  2048. * speculative reads of the event's flags/data below.
  2049. */
  2050. rmb();
  2051. /* FIXME: Handle more event types. */
  2052. switch ((le32_to_cpu(event->event_cmd.flags) & TRB_TYPE_BITMASK)) {
  2053. case TRB_TYPE(TRB_COMPLETION):
  2054. handle_cmd_completion(xhci, &event->event_cmd);
  2055. break;
  2056. case TRB_TYPE(TRB_PORT_STATUS):
  2057. handle_port_status(xhci, event);
  2058. update_ptrs = 0;
  2059. break;
  2060. case TRB_TYPE(TRB_TRANSFER):
  2061. ret = handle_tx_event(xhci, &event->trans_event);
  2062. if (ret < 0)
  2063. xhci->error_bitmask |= 1 << 9;
  2064. else
  2065. update_ptrs = 0;
  2066. break;
  2067. default:
  2068. if ((le32_to_cpu(event->event_cmd.flags) & TRB_TYPE_BITMASK) >=
  2069. TRB_TYPE(48))
  2070. handle_vendor_event(xhci, event);
  2071. else
  2072. xhci->error_bitmask |= 1 << 3;
  2073. }
  2074. /* Any of the above functions may drop and re-acquire the lock, so check
  2075. * to make sure a watchdog timer didn't mark the host as non-responsive.
  2076. */
  2077. if (xhci->xhc_state & XHCI_STATE_DYING) {
  2078. xhci_dbg(xhci, "xHCI host dying, returning from "
  2079. "event handler.\n");
  2080. return 0;
  2081. }
  2082. if (update_ptrs)
  2083. /* Update SW event ring dequeue pointer */
  2084. inc_deq(xhci, xhci->event_ring, true);
  2085. /* Are there more items on the event ring? Caller will call us again to
  2086. * check.
  2087. */
  2088. return 1;
  2089. }
  2090. /*
  2091. * xHCI spec says we can get an interrupt, and if the HC has an error condition,
  2092. * we might get bad data out of the event ring. Section 4.10.2.7 has a list of
  2093. * indicators of an event TRB error, but we check the status *first* to be safe.
  2094. */
  2095. irqreturn_t xhci_irq(struct usb_hcd *hcd)
  2096. {
  2097. struct xhci_hcd *xhci = hcd_to_xhci(hcd);
  2098. u32 status;
  2099. union xhci_trb *trb;
  2100. u64 temp_64;
  2101. union xhci_trb *event_ring_deq;
  2102. dma_addr_t deq;
  2103. spin_lock(&xhci->lock);
  2104. trb = xhci->event_ring->dequeue;
  2105. /* Check if the xHC generated the interrupt, or the irq is shared */
  2106. status = xhci_readl(xhci, &xhci->op_regs->status);
  2107. if (status == 0xffffffff)
  2108. goto hw_died;
  2109. if (!(status & STS_EINT)) {
  2110. spin_unlock(&xhci->lock);
  2111. return IRQ_NONE;
  2112. }
  2113. if (status & STS_FATAL) {
  2114. xhci_warn(xhci, "WARNING: Host System Error\n");
  2115. xhci_halt(xhci);
  2116. hw_died:
  2117. spin_unlock(&xhci->lock);
  2118. return -ESHUTDOWN;
  2119. }
  2120. /*
  2121. * Clear the op reg interrupt status first,
  2122. * so we can receive interrupts from other MSI-X interrupters.
  2123. * Write 1 to clear the interrupt status.
  2124. */
  2125. status |= STS_EINT;
  2126. xhci_writel(xhci, status, &xhci->op_regs->status);
  2127. /* FIXME when MSI-X is supported and there are multiple vectors */
  2128. /* Clear the MSI-X event interrupt status */
  2129. if (hcd->irq != -1) {
  2130. u32 irq_pending;
  2131. /* Acknowledge the PCI interrupt */
  2132. irq_pending = xhci_readl(xhci, &xhci->ir_set->irq_pending);
  2133. irq_pending |= 0x3;
  2134. xhci_writel(xhci, irq_pending, &xhci->ir_set->irq_pending);
  2135. }
  2136. if (xhci->xhc_state & XHCI_STATE_DYING) {
  2137. xhci_dbg(xhci, "xHCI dying, ignoring interrupt. "
  2138. "Shouldn't IRQs be disabled?\n");
  2139. /* Clear the event handler busy flag (RW1C);
  2140. * the event ring should be empty.
  2141. */
  2142. temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
  2143. xhci_write_64(xhci, temp_64 | ERST_EHB,
  2144. &xhci->ir_set->erst_dequeue);
  2145. spin_unlock(&xhci->lock);
  2146. return IRQ_HANDLED;
  2147. }
  2148. event_ring_deq = xhci->event_ring->dequeue;
  2149. /* FIXME this should be a delayed service routine
  2150. * that clears the EHB.
  2151. */
  2152. while (xhci_handle_event(xhci) > 0) {}
  2153. temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
  2154. /* If necessary, update the HW's version of the event ring deq ptr. */
  2155. if (event_ring_deq != xhci->event_ring->dequeue) {
  2156. deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg,
  2157. xhci->event_ring->dequeue);
  2158. if (deq == 0)
  2159. xhci_warn(xhci, "WARN something wrong with SW event "
  2160. "ring dequeue ptr.\n");
  2161. /* Update HC event ring dequeue pointer */
  2162. temp_64 &= ERST_PTR_MASK;
  2163. temp_64 |= ((u64) deq & (u64) ~ERST_PTR_MASK);
  2164. }
  2165. /* Clear the event handler busy flag (RW1C); event ring is empty. */
  2166. temp_64 |= ERST_EHB;
  2167. xhci_write_64(xhci, temp_64, &xhci->ir_set->erst_dequeue);
  2168. spin_unlock(&xhci->lock);
  2169. return IRQ_HANDLED;
  2170. }
  2171. irqreturn_t xhci_msi_irq(int irq, struct usb_hcd *hcd)
  2172. {
  2173. irqreturn_t ret;
  2174. struct xhci_hcd *xhci;
  2175. xhci = hcd_to_xhci(hcd);
  2176. set_bit(HCD_FLAG_SAW_IRQ, &hcd->flags);
  2177. if (xhci->shared_hcd)
  2178. set_bit(HCD_FLAG_SAW_IRQ, &xhci->shared_hcd->flags);
  2179. ret = xhci_irq(hcd);
  2180. return ret;
  2181. }
  2182. /**** Endpoint Ring Operations ****/
  2183. /*
  2184. * Generic function for queueing a TRB on a ring.
  2185. * The caller must have checked to make sure there's room on the ring.
  2186. *
  2187. * @more_trbs_coming: Will you enqueue more TRBs before calling
  2188. * prepare_transfer()?
  2189. */
  2190. static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
  2191. bool consumer, bool more_trbs_coming,
  2192. u32 field1, u32 field2, u32 field3, u32 field4)
  2193. {
  2194. struct xhci_generic_trb *trb;
  2195. trb = &ring->enqueue->generic;
  2196. trb->field[0] = cpu_to_le32(field1);
  2197. trb->field[1] = cpu_to_le32(field2);
  2198. trb->field[2] = cpu_to_le32(field3);
  2199. trb->field[3] = cpu_to_le32(field4);
  2200. inc_enq(xhci, ring, consumer, more_trbs_coming);
  2201. }
  2202. /*
  2203. * Does various checks on the endpoint ring, and makes it ready to queue num_trbs.
  2204. * FIXME allocate segments if the ring is full.
  2205. */
  2206. static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
  2207. u32 ep_state, unsigned int num_trbs, gfp_t mem_flags)
  2208. {
  2209. /* Make sure the endpoint has been added to xHC schedule */
  2210. switch (ep_state) {
  2211. case EP_STATE_DISABLED:
  2212. /*
  2213. * USB core changed config/interfaces without notifying us,
  2214. * or hardware is reporting the wrong state.
  2215. */
  2216. xhci_warn(xhci, "WARN urb submitted to disabled ep\n");
  2217. return -ENOENT;
  2218. case EP_STATE_ERROR:
  2219. xhci_warn(xhci, "WARN waiting for error on ep to be cleared\n");
  2220. /* FIXME event handling code for error needs to clear it */
  2221. /* XXX not sure if this should be -ENOENT or not */
  2222. return -EINVAL;
  2223. case EP_STATE_HALTED:
  2224. xhci_dbg(xhci, "WARN halted endpoint, queueing URB anyway.\n");
  2225. case EP_STATE_STOPPED:
  2226. case EP_STATE_RUNNING:
  2227. break;
  2228. default:
  2229. xhci_err(xhci, "ERROR unknown endpoint state for ep\n");
  2230. /*
  2231. * FIXME issue Configure Endpoint command to try to get the HC
  2232. * back into a known state.
  2233. */
  2234. return -EINVAL;
  2235. }
  2236. if (!room_on_ring(xhci, ep_ring, num_trbs)) {
  2237. /* FIXME allocate more room */
  2238. xhci_err(xhci, "ERROR no room on ep ring\n");
  2239. return -ENOMEM;
  2240. }
  2241. if (enqueue_is_link_trb(ep_ring)) {
  2242. struct xhci_ring *ring = ep_ring;
  2243. union xhci_trb *next;
  2244. next = ring->enqueue;
  2245. while (last_trb(xhci, ring, ring->enq_seg, next)) {
  2246. /* If we're not dealing with 0.95 hardware,
  2247. * clear the chain bit.
  2248. */
  2249. if (!xhci_link_trb_quirk(xhci))
  2250. next->link.control &= cpu_to_le32(~TRB_CHAIN);
  2251. else
  2252. next->link.control |= cpu_to_le32(TRB_CHAIN);
  2253. wmb();
  2254. next->link.control ^= cpu_to_le32(TRB_CYCLE);
  2255. /* Toggle the cycle bit after the last ring segment. */
  2256. if (last_trb_on_last_seg(xhci, ring, ring->enq_seg, next)) {
  2257. ring->cycle_state = (ring->cycle_state ? 0 : 1);
  2258. if (!in_interrupt()) {
  2259. xhci_dbg(xhci, "queue_trb: Toggle cycle "
  2260. "state for ring %p = %i\n",
  2261. ring, (unsigned int)ring->cycle_state);
  2262. }
  2263. }
  2264. ring->enq_seg = ring->enq_seg->next;
  2265. ring->enqueue = ring->enq_seg->trbs;
  2266. next = ring->enqueue;
  2267. }
  2268. }
  2269. return 0;
  2270. }
  2271. static int prepare_transfer(struct xhci_hcd *xhci,
  2272. struct xhci_virt_device *xdev,
  2273. unsigned int ep_index,
  2274. unsigned int stream_id,
  2275. unsigned int num_trbs,
  2276. struct urb *urb,
  2277. unsigned int td_index,
  2278. gfp_t mem_flags)
  2279. {
  2280. int ret;
  2281. struct urb_priv *urb_priv;
  2282. struct xhci_td *td;
  2283. struct xhci_ring *ep_ring;
  2284. struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
  2285. ep_ring = xhci_stream_id_to_ring(xdev, ep_index, stream_id);
  2286. if (!ep_ring) {
  2287. xhci_dbg(xhci, "Can't prepare ring for bad stream ID %u\n",
  2288. stream_id);
  2289. return -EINVAL;
  2290. }
  2291. ret = prepare_ring(xhci, ep_ring,
  2292. le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK,
  2293. num_trbs, mem_flags);
  2294. if (ret)
  2295. return ret;
  2296. urb_priv = urb->hcpriv;
  2297. td = urb_priv->td[td_index];
  2298. INIT_LIST_HEAD(&td->td_list);
  2299. INIT_LIST_HEAD(&td->cancelled_td_list);
  2300. if (td_index == 0) {
  2301. ret = usb_hcd_link_urb_to_ep(bus_to_hcd(urb->dev->bus), urb);
  2302. if (unlikely(ret)) {
  2303. xhci_urb_free_priv(xhci, urb_priv);
  2304. urb->hcpriv = NULL;
  2305. return ret;
  2306. }
  2307. }
  2308. td->urb = urb;
  2309. /* Add this TD to the tail of the endpoint ring's TD list */
  2310. list_add_tail(&td->td_list, &ep_ring->td_list);
  2311. td->start_seg = ep_ring->enq_seg;
  2312. td->first_trb = ep_ring->enqueue;
  2313. urb_priv->td[td_index] = td;
  2314. return 0;
  2315. }
  2316. static unsigned int count_sg_trbs_needed(struct xhci_hcd *xhci, struct urb *urb)
  2317. {
  2318. int num_sgs, num_trbs, running_total, temp, i;
  2319. struct scatterlist *sg;
  2320. sg = NULL;
  2321. num_sgs = urb->num_sgs;
  2322. temp = urb->transfer_buffer_length;
  2323. xhci_dbg(xhci, "count sg list trbs: \n");
  2324. num_trbs = 0;
  2325. for_each_sg(urb->sg, sg, num_sgs, i) {
  2326. unsigned int previous_total_trbs = num_trbs;
  2327. unsigned int len = sg_dma_len(sg);
  2328. /* Scatter gather list entries may cross 64KB boundaries */
  2329. running_total = TRB_MAX_BUFF_SIZE -
  2330. (sg_dma_address(sg) & (TRB_MAX_BUFF_SIZE - 1));
  2331. running_total &= TRB_MAX_BUFF_SIZE - 1;
  2332. if (running_total != 0)
  2333. num_trbs++;
  2334. /* How many more 64KB chunks to transfer, how many more TRBs? */
  2335. while (running_total < sg_dma_len(sg) && running_total < temp) {
  2336. num_trbs++;
  2337. running_total += TRB_MAX_BUFF_SIZE;
  2338. }
  2339. xhci_dbg(xhci, " sg #%d: dma = %#llx, len = %#x (%d), num_trbs = %d\n",
  2340. i, (unsigned long long)sg_dma_address(sg),
  2341. len, len, num_trbs - previous_total_trbs);
  2342. len = min_t(int, len, temp);
  2343. temp -= len;
  2344. if (temp == 0)
  2345. break;
  2346. }
  2347. xhci_dbg(xhci, "\n");
  2348. if (!in_interrupt())
  2349. xhci_dbg(xhci, "ep %#x - urb len = %d, sglist used, "
  2350. "num_trbs = %d\n",
  2351. urb->ep->desc.bEndpointAddress,
  2352. urb->transfer_buffer_length,
  2353. num_trbs);
  2354. return num_trbs;
  2355. }
  2356. static void check_trb_math(struct urb *urb, int num_trbs, int running_total)
  2357. {
  2358. if (num_trbs != 0)
  2359. dev_err(&urb->dev->dev, "%s - ep %#x - Miscalculated number of "
  2360. "TRBs, %d left\n", __func__,
  2361. urb->ep->desc.bEndpointAddress, num_trbs);
  2362. if (running_total != urb->transfer_buffer_length)
  2363. dev_err(&urb->dev->dev, "%s - ep %#x - Miscalculated tx length, "
  2364. "queued %#x (%d), asked for %#x (%d)\n",
  2365. __func__,
  2366. urb->ep->desc.bEndpointAddress,
  2367. running_total, running_total,
  2368. urb->transfer_buffer_length,
  2369. urb->transfer_buffer_length);
  2370. }
  2371. static void giveback_first_trb(struct xhci_hcd *xhci, int slot_id,
  2372. unsigned int ep_index, unsigned int stream_id, int start_cycle,
  2373. struct xhci_generic_trb *start_trb)
  2374. {
  2375. /*
  2376. * Pass all the TRBs to the hardware at once and make sure this write
  2377. * isn't reordered.
  2378. */
  2379. wmb();
  2380. if (start_cycle)
  2381. start_trb->field[3] |= cpu_to_le32(start_cycle);
  2382. else
  2383. start_trb->field[3] &= cpu_to_le32(~TRB_CYCLE);
  2384. xhci_ring_ep_doorbell(xhci, slot_id, ep_index, stream_id);
  2385. }
  2386. /*
  2387. * xHCI uses normal TRBs for both bulk and interrupt. When the interrupt
  2388. * endpoint is to be serviced, the xHC will consume (at most) one TD. A TD
  2389. * (comprised of sg list entries) can take several service intervals to
  2390. * transmit.
  2391. */
  2392. int xhci_queue_intr_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
  2393. struct urb *urb, int slot_id, unsigned int ep_index)
  2394. {
  2395. struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci,
  2396. xhci->devs[slot_id]->out_ctx, ep_index);
  2397. int xhci_interval;
  2398. int ep_interval;
  2399. xhci_interval = EP_INTERVAL_TO_UFRAMES(le32_to_cpu(ep_ctx->ep_info));
  2400. ep_interval = urb->interval;
  2401. /* Convert to microframes */
  2402. if (urb->dev->speed == USB_SPEED_LOW ||
  2403. urb->dev->speed == USB_SPEED_FULL)
  2404. ep_interval *= 8;
  2405. /* FIXME change this to a warning and a suggestion to use the new API
  2406. * to set the polling interval (once the API is added).
  2407. */
  2408. if (xhci_interval != ep_interval) {
  2409. if (printk_ratelimit())
  2410. dev_dbg(&urb->dev->dev, "Driver uses different interval"
  2411. " (%d microframe%s) than xHCI "
  2412. "(%d microframe%s)\n",
  2413. ep_interval,
  2414. ep_interval == 1 ? "" : "s",
  2415. xhci_interval,
  2416. xhci_interval == 1 ? "" : "s");
  2417. urb->interval = xhci_interval;
  2418. /* Convert back to frames for LS/FS devices */
  2419. if (urb->dev->speed == USB_SPEED_LOW ||
  2420. urb->dev->speed == USB_SPEED_FULL)
  2421. urb->interval /= 8;
  2422. }
  2423. return xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb, slot_id, ep_index);
  2424. }
  2425. /*
  2426. * The TD size is the number of bytes remaining in the TD (including this TRB),
  2427. * right shifted by 10.
  2428. * It must fit in bits 21:17, so it can't be bigger than 31.
  2429. */
  2430. static u32 xhci_td_remainder(unsigned int remainder)
  2431. {
  2432. u32 max = (1 << (21 - 17 + 1)) - 1;
  2433. if ((remainder >> 10) >= max)
  2434. return max << 17;
  2435. else
  2436. return (remainder >> 10) << 17;
  2437. }
  2438. /*
  2439. * For xHCI 1.0 host controllers, TD size is the number of packets remaining in
  2440. * the TD (*not* including this TRB).
  2441. *
  2442. * Total TD packet count = total_packet_count =
  2443. * roundup(TD size in bytes / wMaxPacketSize)
  2444. *
  2445. * Packets transferred up to and including this TRB = packets_transferred =
  2446. * rounddown(total bytes transferred including this TRB / wMaxPacketSize)
  2447. *
  2448. * TD size = total_packet_count - packets_transferred
  2449. *
  2450. * It must fit in bits 21:17, so it can't be bigger than 31.
  2451. */
  2452. static u32 xhci_v1_0_td_remainder(int running_total, int trb_buff_len,
  2453. unsigned int total_packet_count, struct urb *urb)
  2454. {
  2455. int packets_transferred;
  2456. /* All the TRB queueing functions don't count the current TRB in
  2457. * running_total.
  2458. */
  2459. packets_transferred = (running_total + trb_buff_len) /
  2460. le16_to_cpu(urb->ep->desc.wMaxPacketSize);
  2461. return xhci_td_remainder(total_packet_count - packets_transferred);
  2462. }
  2463. static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
  2464. struct urb *urb, int slot_id, unsigned int ep_index)
  2465. {
  2466. struct xhci_ring *ep_ring;
  2467. unsigned int num_trbs;
  2468. struct urb_priv *urb_priv;
  2469. struct xhci_td *td;
  2470. struct scatterlist *sg;
  2471. int num_sgs;
  2472. int trb_buff_len, this_sg_len, running_total;
  2473. unsigned int total_packet_count;
  2474. bool first_trb;
  2475. u64 addr;
  2476. bool more_trbs_coming;
  2477. struct xhci_generic_trb *start_trb;
  2478. int start_cycle;
  2479. ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
  2480. if (!ep_ring)
  2481. return -EINVAL;
  2482. num_trbs = count_sg_trbs_needed(xhci, urb);
  2483. num_sgs = urb->num_sgs;
  2484. total_packet_count = roundup(urb->transfer_buffer_length,
  2485. le16_to_cpu(urb->ep->desc.wMaxPacketSize));
  2486. trb_buff_len = prepare_transfer(xhci, xhci->devs[slot_id],
  2487. ep_index, urb->stream_id,
  2488. num_trbs, urb, 0, mem_flags);
  2489. if (trb_buff_len < 0)
  2490. return trb_buff_len;
  2491. urb_priv = urb->hcpriv;
  2492. td = urb_priv->td[0];
  2493. /*
  2494. * Don't give the first TRB to the hardware (by toggling the cycle bit)
  2495. * until we've finished creating all the other TRBs. The ring's cycle
  2496. * state may change as we enqueue the other TRBs, so save it too.
  2497. */
  2498. start_trb = &ep_ring->enqueue->generic;
  2499. start_cycle = ep_ring->cycle_state;
  2500. running_total = 0;
  2501. /*
  2502. * How much data is in the first TRB?
  2503. *
  2504. * There are three forces at work for TRB buffer pointers and lengths:
  2505. * 1. We don't want to walk off the end of this sg-list entry buffer.
  2506. * 2. The transfer length that the driver requested may be smaller than
  2507. * the amount of memory allocated for this scatter-gather list.
  2508. * 3. TRBs buffers can't cross 64KB boundaries.
  2509. */
  2510. sg = urb->sg;
  2511. addr = (u64) sg_dma_address(sg);
  2512. this_sg_len = sg_dma_len(sg);
  2513. trb_buff_len = TRB_MAX_BUFF_SIZE - (addr & (TRB_MAX_BUFF_SIZE - 1));
  2514. trb_buff_len = min_t(int, trb_buff_len, this_sg_len);
  2515. if (trb_buff_len > urb->transfer_buffer_length)
  2516. trb_buff_len = urb->transfer_buffer_length;
  2517. xhci_dbg(xhci, "First length to xfer from 1st sglist entry = %u\n",
  2518. trb_buff_len);
  2519. first_trb = true;
  2520. /* Queue the first TRB, even if it's zero-length */
  2521. do {
  2522. u32 field = 0;
  2523. u32 length_field = 0;
  2524. u32 remainder = 0;
  2525. /* Don't change the cycle bit of the first TRB until later */
  2526. if (first_trb) {
  2527. first_trb = false;
  2528. if (start_cycle == 0)
  2529. field |= 0x1;
  2530. } else
  2531. field |= ep_ring->cycle_state;
  2532. /* Chain all the TRBs together; clear the chain bit in the last
  2533. * TRB to indicate it's the last TRB in the chain.
  2534. */
  2535. if (num_trbs > 1) {
  2536. field |= TRB_CHAIN;
  2537. } else {
  2538. /* FIXME - add check for ZERO_PACKET flag before this */
  2539. td->last_trb = ep_ring->enqueue;
  2540. field |= TRB_IOC;
  2541. }
  2542. /* Only set interrupt on short packet for IN endpoints */
  2543. if (usb_urb_dir_in(urb))
  2544. field |= TRB_ISP;
  2545. xhci_dbg(xhci, " sg entry: dma = %#x, len = %#x (%d), "
  2546. "64KB boundary at %#x, end dma = %#x\n",
  2547. (unsigned int) addr, trb_buff_len, trb_buff_len,
  2548. (unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1),
  2549. (unsigned int) addr + trb_buff_len);
  2550. if (TRB_MAX_BUFF_SIZE -
  2551. (addr & (TRB_MAX_BUFF_SIZE - 1)) < trb_buff_len) {
  2552. xhci_warn(xhci, "WARN: sg dma xfer crosses 64KB boundaries!\n");
  2553. xhci_dbg(xhci, "Next boundary at %#x, end dma = %#x\n",
  2554. (unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1),
  2555. (unsigned int) addr + trb_buff_len);
  2556. }
  2557. /* Set the TRB length, TD size, and interrupter fields. */
  2558. if (xhci->hci_version < 0x100) {
  2559. remainder = xhci_td_remainder(
  2560. urb->transfer_buffer_length -
  2561. running_total);
  2562. } else {
  2563. remainder = xhci_v1_0_td_remainder(running_total,
  2564. trb_buff_len, total_packet_count, urb);
  2565. }
  2566. length_field = TRB_LEN(trb_buff_len) |
  2567. remainder |
  2568. TRB_INTR_TARGET(0);
  2569. if (num_trbs > 1)
  2570. more_trbs_coming = true;
  2571. else
  2572. more_trbs_coming = false;
  2573. queue_trb(xhci, ep_ring, false, more_trbs_coming,
  2574. lower_32_bits(addr),
  2575. upper_32_bits(addr),
  2576. length_field,
  2577. field | TRB_TYPE(TRB_NORMAL));
  2578. --num_trbs;
  2579. running_total += trb_buff_len;
  2580. /* Calculate length for next transfer --
  2581. * Are we done queueing all the TRBs for this sg entry?
  2582. */
  2583. this_sg_len -= trb_buff_len;
  2584. if (this_sg_len == 0) {
  2585. --num_sgs;
  2586. if (num_sgs == 0)
  2587. break;
  2588. sg = sg_next(sg);
  2589. addr = (u64) sg_dma_address(sg);
  2590. this_sg_len = sg_dma_len(sg);
  2591. } else {
  2592. addr += trb_buff_len;
  2593. }
  2594. trb_buff_len = TRB_MAX_BUFF_SIZE -
  2595. (addr & (TRB_MAX_BUFF_SIZE - 1));
  2596. trb_buff_len = min_t(int, trb_buff_len, this_sg_len);
  2597. if (running_total + trb_buff_len > urb->transfer_buffer_length)
  2598. trb_buff_len =
  2599. urb->transfer_buffer_length - running_total;
  2600. } while (running_total < urb->transfer_buffer_length);
  2601. check_trb_math(urb, num_trbs, running_total);
  2602. giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
  2603. start_cycle, start_trb);
  2604. return 0;
  2605. }
  2606. /* This is very similar to what ehci-q.c qtd_fill() does */
  2607. int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
  2608. struct urb *urb, int slot_id, unsigned int ep_index)
  2609. {
  2610. struct xhci_ring *ep_ring;
  2611. struct urb_priv *urb_priv;
  2612. struct xhci_td *td;
  2613. int num_trbs;
  2614. struct xhci_generic_trb *start_trb;
  2615. bool first_trb;
  2616. bool more_trbs_coming;
  2617. int start_cycle;
  2618. u32 field, length_field;
  2619. int running_total, trb_buff_len, ret;
  2620. unsigned int total_packet_count;
  2621. u64 addr;
  2622. if (urb->num_sgs)
  2623. return queue_bulk_sg_tx(xhci, mem_flags, urb, slot_id, ep_index);
  2624. ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
  2625. if (!ep_ring)
  2626. return -EINVAL;
  2627. num_trbs = 0;
  2628. /* How much data is (potentially) left before the 64KB boundary? */
  2629. running_total = TRB_MAX_BUFF_SIZE -
  2630. (urb->transfer_dma & (TRB_MAX_BUFF_SIZE - 1));
  2631. running_total &= TRB_MAX_BUFF_SIZE - 1;
  2632. /* If there's some data on this 64KB chunk, or we have to send a
  2633. * zero-length transfer, we need at least one TRB
  2634. */
  2635. if (running_total != 0 || urb->transfer_buffer_length == 0)
  2636. num_trbs++;
  2637. /* How many more 64KB chunks to transfer, how many more TRBs? */
  2638. while (running_total < urb->transfer_buffer_length) {
  2639. num_trbs++;
  2640. running_total += TRB_MAX_BUFF_SIZE;
  2641. }
  2642. /* FIXME: this doesn't deal with URB_ZERO_PACKET - need one more */
  2643. if (!in_interrupt())
  2644. xhci_dbg(xhci, "ep %#x - urb len = %#x (%d), "
  2645. "addr = %#llx, num_trbs = %d\n",
  2646. urb->ep->desc.bEndpointAddress,
  2647. urb->transfer_buffer_length,
  2648. urb->transfer_buffer_length,
  2649. (unsigned long long)urb->transfer_dma,
  2650. num_trbs);
  2651. ret = prepare_transfer(xhci, xhci->devs[slot_id],
  2652. ep_index, urb->stream_id,
  2653. num_trbs, urb, 0, mem_flags);
  2654. if (ret < 0)
  2655. return ret;
  2656. urb_priv = urb->hcpriv;
  2657. td = urb_priv->td[0];
  2658. /*
  2659. * Don't give the first TRB to the hardware (by toggling the cycle bit)
  2660. * until we've finished creating all the other TRBs. The ring's cycle
  2661. * state may change as we enqueue the other TRBs, so save it too.
  2662. */
  2663. start_trb = &ep_ring->enqueue->generic;
  2664. start_cycle = ep_ring->cycle_state;
  2665. running_total = 0;
  2666. total_packet_count = roundup(urb->transfer_buffer_length,
  2667. le16_to_cpu(urb->ep->desc.wMaxPacketSize));
  2668. /* How much data is in the first TRB? */
  2669. addr = (u64) urb->transfer_dma;
  2670. trb_buff_len = TRB_MAX_BUFF_SIZE -
  2671. (urb->transfer_dma & (TRB_MAX_BUFF_SIZE - 1));
  2672. if (trb_buff_len > urb->transfer_buffer_length)
  2673. trb_buff_len = urb->transfer_buffer_length;
  2674. first_trb = true;
  2675. /* Queue the first TRB, even if it's zero-length */
  2676. do {
  2677. u32 remainder = 0;
  2678. field = 0;
  2679. /* Don't change the cycle bit of the first TRB until later */
  2680. if (first_trb) {
  2681. first_trb = false;
  2682. if (start_cycle == 0)
  2683. field |= 0x1;
  2684. } else
  2685. field |= ep_ring->cycle_state;
  2686. /* Chain all the TRBs together; clear the chain bit in the last
  2687. * TRB to indicate it's the last TRB in the chain.
  2688. */
  2689. if (num_trbs > 1) {
  2690. field |= TRB_CHAIN;
  2691. } else {
  2692. /* FIXME - add check for ZERO_PACKET flag before this */
  2693. td->last_trb = ep_ring->enqueue;
  2694. field |= TRB_IOC;
  2695. }
  2696. /* Only set interrupt on short packet for IN endpoints */
  2697. if (usb_urb_dir_in(urb))
  2698. field |= TRB_ISP;
  2699. /* Set the TRB length, TD size, and interrupter fields. */
  2700. if (xhci->hci_version < 0x100) {
  2701. remainder = xhci_td_remainder(
  2702. urb->transfer_buffer_length -
  2703. running_total);
  2704. } else {
  2705. remainder = xhci_v1_0_td_remainder(running_total,
  2706. trb_buff_len, total_packet_count, urb);
  2707. }
  2708. length_field = TRB_LEN(trb_buff_len) |
  2709. remainder |
  2710. TRB_INTR_TARGET(0);
  2711. if (num_trbs > 1)
  2712. more_trbs_coming = true;
  2713. else
  2714. more_trbs_coming = false;
  2715. queue_trb(xhci, ep_ring, false, more_trbs_coming,
  2716. lower_32_bits(addr),
  2717. upper_32_bits(addr),
  2718. length_field,
  2719. field | TRB_TYPE(TRB_NORMAL));
  2720. --num_trbs;
  2721. running_total += trb_buff_len;
  2722. /* Calculate length for next transfer */
  2723. addr += trb_buff_len;
  2724. trb_buff_len = urb->transfer_buffer_length - running_total;
  2725. if (trb_buff_len > TRB_MAX_BUFF_SIZE)
  2726. trb_buff_len = TRB_MAX_BUFF_SIZE;
  2727. } while (running_total < urb->transfer_buffer_length);
  2728. check_trb_math(urb, num_trbs, running_total);
  2729. giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
  2730. start_cycle, start_trb);
  2731. return 0;
  2732. }
  2733. /* Caller must have locked xhci->lock */
  2734. int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
  2735. struct urb *urb, int slot_id, unsigned int ep_index)
  2736. {
  2737. struct xhci_ring *ep_ring;
  2738. int num_trbs;
  2739. int ret;
  2740. struct usb_ctrlrequest *setup;
  2741. struct xhci_generic_trb *start_trb;
  2742. int start_cycle;
  2743. u32 field, length_field;
  2744. struct urb_priv *urb_priv;
  2745. struct xhci_td *td;
  2746. ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
  2747. if (!ep_ring)
  2748. return -EINVAL;
  2749. /*
  2750. * Need to copy setup packet into setup TRB, so we can't use the setup
  2751. * DMA address.
  2752. */
  2753. if (!urb->setup_packet)
  2754. return -EINVAL;
  2755. if (!in_interrupt())
  2756. xhci_dbg(xhci, "Queueing ctrl tx for slot id %d, ep %d\n",
  2757. slot_id, ep_index);
  2758. /* 1 TRB for setup, 1 for status */
  2759. num_trbs = 2;
  2760. /*
  2761. * Don't need to check if we need additional event data and normal TRBs,
  2762. * since data in control transfers will never get bigger than 16MB
  2763. * XXX: can we get a buffer that crosses 64KB boundaries?
  2764. */
  2765. if (urb->transfer_buffer_length > 0)
  2766. num_trbs++;
  2767. ret = prepare_transfer(xhci, xhci->devs[slot_id],
  2768. ep_index, urb->stream_id,
  2769. num_trbs, urb, 0, mem_flags);
  2770. if (ret < 0)
  2771. return ret;
  2772. urb_priv = urb->hcpriv;
  2773. td = urb_priv->td[0];
  2774. /*
  2775. * Don't give the first TRB to the hardware (by toggling the cycle bit)
  2776. * until we've finished creating all the other TRBs. The ring's cycle
  2777. * state may change as we enqueue the other TRBs, so save it too.
  2778. */
  2779. start_trb = &ep_ring->enqueue->generic;
  2780. start_cycle = ep_ring->cycle_state;
  2781. /* Queue setup TRB - see section 6.4.1.2.1 */
  2782. /* FIXME better way to translate setup_packet into two u32 fields? */
  2783. setup = (struct usb_ctrlrequest *) urb->setup_packet;
  2784. field = 0;
  2785. field |= TRB_IDT | TRB_TYPE(TRB_SETUP);
  2786. if (start_cycle == 0)
  2787. field |= 0x1;
  2788. /* xHCI 1.0 6.4.1.2.1: Transfer Type field */
  2789. if (xhci->hci_version == 0x100) {
  2790. if (urb->transfer_buffer_length > 0) {
  2791. if (setup->bRequestType & USB_DIR_IN)
  2792. field |= TRB_TX_TYPE(TRB_DATA_IN);
  2793. else
  2794. field |= TRB_TX_TYPE(TRB_DATA_OUT);
  2795. }
  2796. }
  2797. queue_trb(xhci, ep_ring, false, true,
  2798. setup->bRequestType | setup->bRequest << 8 | le16_to_cpu(setup->wValue) << 16,
  2799. le16_to_cpu(setup->wIndex) | le16_to_cpu(setup->wLength) << 16,
  2800. TRB_LEN(8) | TRB_INTR_TARGET(0),
  2801. /* Immediate data in pointer */
  2802. field);
  2803. /* If there's data, queue data TRBs */
  2804. /* Only set interrupt on short packet for IN endpoints */
  2805. if (usb_urb_dir_in(urb))
  2806. field = TRB_ISP | TRB_TYPE(TRB_DATA);
  2807. else
  2808. field = TRB_TYPE(TRB_DATA);
  2809. length_field = TRB_LEN(urb->transfer_buffer_length) |
  2810. xhci_td_remainder(urb->transfer_buffer_length) |
  2811. TRB_INTR_TARGET(0);
  2812. if (urb->transfer_buffer_length > 0) {
  2813. if (setup->bRequestType & USB_DIR_IN)
  2814. field |= TRB_DIR_IN;
  2815. queue_trb(xhci, ep_ring, false, true,
  2816. lower_32_bits(urb->transfer_dma),
  2817. upper_32_bits(urb->transfer_dma),
  2818. length_field,
  2819. field | ep_ring->cycle_state);
  2820. }
  2821. /* Save the DMA address of the last TRB in the TD */
  2822. td->last_trb = ep_ring->enqueue;
  2823. /* Queue status TRB - see Table 7 and sections 4.11.2.2 and 6.4.1.2.3 */
  2824. /* If the device sent data, the status stage is an OUT transfer */
  2825. if (urb->transfer_buffer_length > 0 && setup->bRequestType & USB_DIR_IN)
  2826. field = 0;
  2827. else
  2828. field = TRB_DIR_IN;
  2829. queue_trb(xhci, ep_ring, false, false,
  2830. 0,
  2831. 0,
  2832. TRB_INTR_TARGET(0),
  2833. /* Event on completion */
  2834. field | TRB_IOC | TRB_TYPE(TRB_STATUS) | ep_ring->cycle_state);
  2835. giveback_first_trb(xhci, slot_id, ep_index, 0,
  2836. start_cycle, start_trb);
  2837. return 0;
  2838. }
  2839. static int count_isoc_trbs_needed(struct xhci_hcd *xhci,
  2840. struct urb *urb, int i)
  2841. {
  2842. int num_trbs = 0;
  2843. u64 addr, td_len, running_total;
  2844. addr = (u64) (urb->transfer_dma + urb->iso_frame_desc[i].offset);
  2845. td_len = urb->iso_frame_desc[i].length;
  2846. running_total = TRB_MAX_BUFF_SIZE - (addr & (TRB_MAX_BUFF_SIZE - 1));
  2847. running_total &= TRB_MAX_BUFF_SIZE - 1;
  2848. if (running_total != 0)
  2849. num_trbs++;
  2850. while (running_total < td_len) {
  2851. num_trbs++;
  2852. running_total += TRB_MAX_BUFF_SIZE;
  2853. }
  2854. return num_trbs;
  2855. }
  2856. /*
  2857. * The transfer burst count field of the isochronous TRB defines the number of
  2858. * bursts that are required to move all packets in this TD. Only SuperSpeed
  2859. * devices can burst up to bMaxBurst number of packets per service interval.
  2860. * This field is zero based, meaning a value of zero in the field means one
  2861. * burst. Basically, for everything but SuperSpeed devices, this field will be
  2862. * zero. Only xHCI 1.0 host controllers support this field.
  2863. */
  2864. static unsigned int xhci_get_burst_count(struct xhci_hcd *xhci,
  2865. struct usb_device *udev,
  2866. struct urb *urb, unsigned int total_packet_count)
  2867. {
  2868. unsigned int max_burst;
  2869. if (xhci->hci_version < 0x100 || udev->speed != USB_SPEED_SUPER)
  2870. return 0;
  2871. max_burst = urb->ep->ss_ep_comp.bMaxBurst;
  2872. return roundup(total_packet_count, max_burst + 1) - 1;
  2873. }
  2874. /*
  2875. * Returns the number of packets in the last "burst" of packets. This field is
  2876. * valid for all speeds of devices. USB 2.0 devices can only do one "burst", so
  2877. * the last burst packet count is equal to the total number of packets in the
  2878. * TD. SuperSpeed endpoints can have up to 3 bursts. All but the last burst
  2879. * must contain (bMaxBurst + 1) number of packets, but the last burst can
  2880. * contain 1 to (bMaxBurst + 1) packets.
  2881. */
  2882. static unsigned int xhci_get_last_burst_packet_count(struct xhci_hcd *xhci,
  2883. struct usb_device *udev,
  2884. struct urb *urb, unsigned int total_packet_count)
  2885. {
  2886. unsigned int max_burst;
  2887. unsigned int residue;
  2888. if (xhci->hci_version < 0x100)
  2889. return 0;
  2890. switch (udev->speed) {
  2891. case USB_SPEED_SUPER:
  2892. /* bMaxBurst is zero based: 0 means 1 packet per burst */
  2893. max_burst = urb->ep->ss_ep_comp.bMaxBurst;
  2894. residue = total_packet_count % (max_burst + 1);
  2895. /* If residue is zero, the last burst contains (max_burst + 1)
  2896. * number of packets, but the TLBPC field is zero-based.
  2897. */
  2898. if (residue == 0)
  2899. return max_burst;
  2900. return residue - 1;
  2901. default:
  2902. if (total_packet_count == 0)
  2903. return 0;
  2904. return total_packet_count - 1;
  2905. }
  2906. }
  2907. /* This is for isoc transfer */
  2908. static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
  2909. struct urb *urb, int slot_id, unsigned int ep_index)
  2910. {
  2911. struct xhci_ring *ep_ring;
  2912. struct urb_priv *urb_priv;
  2913. struct xhci_td *td;
  2914. int num_tds, trbs_per_td;
  2915. struct xhci_generic_trb *start_trb;
  2916. bool first_trb;
  2917. int start_cycle;
  2918. u32 field, length_field;
  2919. int running_total, trb_buff_len, td_len, td_remain_len, ret;
  2920. u64 start_addr, addr;
  2921. int i, j;
  2922. bool more_trbs_coming;
  2923. ep_ring = xhci->devs[slot_id]->eps[ep_index].ring;
  2924. num_tds = urb->number_of_packets;
  2925. if (num_tds < 1) {
  2926. xhci_dbg(xhci, "Isoc URB with zero packets?\n");
  2927. return -EINVAL;
  2928. }
  2929. if (!in_interrupt())
  2930. xhci_dbg(xhci, "ep %#x - urb len = %#x (%d),"
  2931. " addr = %#llx, num_tds = %d\n",
  2932. urb->ep->desc.bEndpointAddress,
  2933. urb->transfer_buffer_length,
  2934. urb->transfer_buffer_length,
  2935. (unsigned long long)urb->transfer_dma,
  2936. num_tds);
  2937. start_addr = (u64) urb->transfer_dma;
  2938. start_trb = &ep_ring->enqueue->generic;
  2939. start_cycle = ep_ring->cycle_state;
  2940. /* Queue the first TRB, even if it's zero-length */
  2941. for (i = 0; i < num_tds; i++) {
  2942. unsigned int total_packet_count;
  2943. unsigned int burst_count;
  2944. unsigned int residue;
  2945. first_trb = true;
  2946. running_total = 0;
  2947. addr = start_addr + urb->iso_frame_desc[i].offset;
  2948. td_len = urb->iso_frame_desc[i].length;
  2949. td_remain_len = td_len;
  2950. /* FIXME: Ignoring zero-length packets, can those happen? */
  2951. total_packet_count = roundup(td_len,
  2952. le16_to_cpu(urb->ep->desc.wMaxPacketSize));
  2953. burst_count = xhci_get_burst_count(xhci, urb->dev, urb,
  2954. total_packet_count);
  2955. residue = xhci_get_last_burst_packet_count(xhci,
  2956. urb->dev, urb, total_packet_count);
  2957. trbs_per_td = count_isoc_trbs_needed(xhci, urb, i);
  2958. ret = prepare_transfer(xhci, xhci->devs[slot_id], ep_index,
  2959. urb->stream_id, trbs_per_td, urb, i, mem_flags);
  2960. if (ret < 0)
  2961. return ret;
  2962. urb_priv = urb->hcpriv;
  2963. td = urb_priv->td[i];
  2964. for (j = 0; j < trbs_per_td; j++) {
  2965. u32 remainder = 0;
  2966. field = TRB_TBC(burst_count) | TRB_TLBPC(residue);
  2967. if (first_trb) {
  2968. /* Queue the isoc TRB */
  2969. field |= TRB_TYPE(TRB_ISOC);
  2970. /* Assume URB_ISO_ASAP is set */
  2971. field |= TRB_SIA;
  2972. if (i == 0) {
  2973. if (start_cycle == 0)
  2974. field |= 0x1;
  2975. } else
  2976. field |= ep_ring->cycle_state;
  2977. first_trb = false;
  2978. } else {
  2979. /* Queue other normal TRBs */
  2980. field |= TRB_TYPE(TRB_NORMAL);
  2981. field |= ep_ring->cycle_state;
  2982. }
  2983. /* Only set interrupt on short packet for IN EPs */
  2984. if (usb_urb_dir_in(urb))
  2985. field |= TRB_ISP;
  2986. /* Chain all the TRBs together; clear the chain bit in
  2987. * the last TRB to indicate it's the last TRB in the
  2988. * chain.
  2989. */
  2990. if (j < trbs_per_td - 1) {
  2991. field |= TRB_CHAIN;
  2992. more_trbs_coming = true;
  2993. } else {
  2994. td->last_trb = ep_ring->enqueue;
  2995. field |= TRB_IOC;
  2996. if (xhci->hci_version == 0x100) {
  2997. /* Set BEI bit except for the last td */
  2998. if (i < num_tds - 1)
  2999. field |= TRB_BEI;
  3000. }
  3001. more_trbs_coming = false;
  3002. }
  3003. /* Calculate TRB length */
  3004. trb_buff_len = TRB_MAX_BUFF_SIZE -
  3005. (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
  3006. if (trb_buff_len > td_remain_len)
  3007. trb_buff_len = td_remain_len;
  3008. /* Set the TRB length, TD size, & interrupter fields. */
  3009. if (xhci->hci_version < 0x100) {
  3010. remainder = xhci_td_remainder(
  3011. td_len - running_total);
  3012. } else {
  3013. remainder = xhci_v1_0_td_remainder(
  3014. running_total, trb_buff_len,
  3015. total_packet_count, urb);
  3016. }
  3017. length_field = TRB_LEN(trb_buff_len) |
  3018. remainder |
  3019. TRB_INTR_TARGET(0);
  3020. queue_trb(xhci, ep_ring, false, more_trbs_coming,
  3021. lower_32_bits(addr),
  3022. upper_32_bits(addr),
  3023. length_field,
  3024. field);
  3025. running_total += trb_buff_len;
  3026. addr += trb_buff_len;
  3027. td_remain_len -= trb_buff_len;
  3028. }
  3029. /* Check TD length */
  3030. if (running_total != td_len) {
  3031. xhci_err(xhci, "ISOC TD length unmatch\n");
  3032. return -EINVAL;
  3033. }
  3034. }
  3035. if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) {
  3036. if (xhci->quirks & XHCI_AMD_PLL_FIX)
  3037. usb_amd_quirk_pll_disable();
  3038. }
  3039. xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs++;
  3040. giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
  3041. start_cycle, start_trb);
  3042. return 0;
  3043. }
  3044. /*
  3045. * Check transfer ring to guarantee there is enough room for the urb.
  3046. * Update ISO URB start_frame and interval.
  3047. * Update interval as xhci_queue_intr_tx does. Just use xhci frame_index to
  3048. * update the urb->start_frame by now.
  3049. * Always assume URB_ISO_ASAP set, and NEVER use urb->start_frame as input.
  3050. */
  3051. int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags,
  3052. struct urb *urb, int slot_id, unsigned int ep_index)
  3053. {
  3054. struct xhci_virt_device *xdev;
  3055. struct xhci_ring *ep_ring;
  3056. struct xhci_ep_ctx *ep_ctx;
  3057. int start_frame;
  3058. int xhci_interval;
  3059. int ep_interval;
  3060. int num_tds, num_trbs, i;
  3061. int ret;
  3062. xdev = xhci->devs[slot_id];
  3063. ep_ring = xdev->eps[ep_index].ring;
  3064. ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
  3065. num_trbs = 0;
  3066. num_tds = urb->number_of_packets;
  3067. for (i = 0; i < num_tds; i++)
  3068. num_trbs += count_isoc_trbs_needed(xhci, urb, i);
  3069. /* Check the ring to guarantee there is enough room for the whole urb.
  3070. * Do not insert any td of the urb to the ring if the check failed.
  3071. */
  3072. ret = prepare_ring(xhci, ep_ring, le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK,
  3073. num_trbs, mem_flags);
  3074. if (ret)
  3075. return ret;
  3076. start_frame = xhci_readl(xhci, &xhci->run_regs->microframe_index);
  3077. start_frame &= 0x3fff;
  3078. urb->start_frame = start_frame;
  3079. if (urb->dev->speed == USB_SPEED_LOW ||
  3080. urb->dev->speed == USB_SPEED_FULL)
  3081. urb->start_frame >>= 3;
  3082. xhci_interval = EP_INTERVAL_TO_UFRAMES(le32_to_cpu(ep_ctx->ep_info));
  3083. ep_interval = urb->interval;
  3084. /* Convert to microframes */
  3085. if (urb->dev->speed == USB_SPEED_LOW ||
  3086. urb->dev->speed == USB_SPEED_FULL)
  3087. ep_interval *= 8;
  3088. /* FIXME change this to a warning and a suggestion to use the new API
  3089. * to set the polling interval (once the API is added).
  3090. */
  3091. if (xhci_interval != ep_interval) {
  3092. if (printk_ratelimit())
  3093. dev_dbg(&urb->dev->dev, "Driver uses different interval"
  3094. " (%d microframe%s) than xHCI "
  3095. "(%d microframe%s)\n",
  3096. ep_interval,
  3097. ep_interval == 1 ? "" : "s",
  3098. xhci_interval,
  3099. xhci_interval == 1 ? "" : "s");
  3100. urb->interval = xhci_interval;
  3101. /* Convert back to frames for LS/FS devices */
  3102. if (urb->dev->speed == USB_SPEED_LOW ||
  3103. urb->dev->speed == USB_SPEED_FULL)
  3104. urb->interval /= 8;
  3105. }
  3106. return xhci_queue_isoc_tx(xhci, GFP_ATOMIC, urb, slot_id, ep_index);
  3107. }
  3108. /**** Command Ring Operations ****/
  3109. /* Generic function for queueing a command TRB on the command ring.
  3110. * Check to make sure there's room on the command ring for one command TRB.
  3111. * Also check that there's room reserved for commands that must not fail.
  3112. * If this is a command that must not fail, meaning command_must_succeed = TRUE,
  3113. * then only check for the number of reserved spots.
  3114. * Don't decrement xhci->cmd_ring_reserved_trbs after we've queued the TRB
  3115. * because the command event handler may want to resubmit a failed command.
  3116. */
  3117. static int queue_command(struct xhci_hcd *xhci, u32 field1, u32 field2,
  3118. u32 field3, u32 field4, bool command_must_succeed)
  3119. {
  3120. int reserved_trbs = xhci->cmd_ring_reserved_trbs;
  3121. int ret;
  3122. if (!command_must_succeed)
  3123. reserved_trbs++;
  3124. ret = prepare_ring(xhci, xhci->cmd_ring, EP_STATE_RUNNING,
  3125. reserved_trbs, GFP_ATOMIC);
  3126. if (ret < 0) {
  3127. xhci_err(xhci, "ERR: No room for command on command ring\n");
  3128. if (command_must_succeed)
  3129. xhci_err(xhci, "ERR: Reserved TRB counting for "
  3130. "unfailable commands failed.\n");
  3131. return ret;
  3132. }
  3133. queue_trb(xhci, xhci->cmd_ring, false, false, field1, field2, field3,
  3134. field4 | xhci->cmd_ring->cycle_state);
  3135. return 0;
  3136. }
  3137. /* Queue a slot enable or disable request on the command ring */
  3138. int xhci_queue_slot_control(struct xhci_hcd *xhci, u32 trb_type, u32 slot_id)
  3139. {
  3140. return queue_command(xhci, 0, 0, 0,
  3141. TRB_TYPE(trb_type) | SLOT_ID_FOR_TRB(slot_id), false);
  3142. }
  3143. /* Queue an address device command TRB */
  3144. int xhci_queue_address_device(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
  3145. u32 slot_id)
  3146. {
  3147. return queue_command(xhci, lower_32_bits(in_ctx_ptr),
  3148. upper_32_bits(in_ctx_ptr), 0,
  3149. TRB_TYPE(TRB_ADDR_DEV) | SLOT_ID_FOR_TRB(slot_id),
  3150. false);
  3151. }
  3152. int xhci_queue_vendor_command(struct xhci_hcd *xhci,
  3153. u32 field1, u32 field2, u32 field3, u32 field4)
  3154. {
  3155. return queue_command(xhci, field1, field2, field3, field4, false);
  3156. }
  3157. /* Queue a reset device command TRB */
  3158. int xhci_queue_reset_device(struct xhci_hcd *xhci, u32 slot_id)
  3159. {
  3160. return queue_command(xhci, 0, 0, 0,
  3161. TRB_TYPE(TRB_RESET_DEV) | SLOT_ID_FOR_TRB(slot_id),
  3162. false);
  3163. }
  3164. /* Queue a configure endpoint command TRB */
  3165. int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
  3166. u32 slot_id, bool command_must_succeed)
  3167. {
  3168. return queue_command(xhci, lower_32_bits(in_ctx_ptr),
  3169. upper_32_bits(in_ctx_ptr), 0,
  3170. TRB_TYPE(TRB_CONFIG_EP) | SLOT_ID_FOR_TRB(slot_id),
  3171. command_must_succeed);
  3172. }
  3173. /* Queue an evaluate context command TRB */
  3174. int xhci_queue_evaluate_context(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
  3175. u32 slot_id)
  3176. {
  3177. return queue_command(xhci, lower_32_bits(in_ctx_ptr),
  3178. upper_32_bits(in_ctx_ptr), 0,
  3179. TRB_TYPE(TRB_EVAL_CONTEXT) | SLOT_ID_FOR_TRB(slot_id),
  3180. false);
  3181. }
  3182. /*
  3183. * Suspend is set to indicate "Stop Endpoint Command" is being issued to stop
  3184. * activity on an endpoint that is about to be suspended.
  3185. */
  3186. int xhci_queue_stop_endpoint(struct xhci_hcd *xhci, int slot_id,
  3187. unsigned int ep_index, int suspend)
  3188. {
  3189. u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
  3190. u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
  3191. u32 type = TRB_TYPE(TRB_STOP_RING);
  3192. u32 trb_suspend = SUSPEND_PORT_FOR_TRB(suspend);
  3193. return queue_command(xhci, 0, 0, 0,
  3194. trb_slot_id | trb_ep_index | type | trb_suspend, false);
  3195. }
  3196. /* Set Transfer Ring Dequeue Pointer command.
  3197. * This should not be used for endpoints that have streams enabled.
  3198. */
  3199. static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id,
  3200. unsigned int ep_index, unsigned int stream_id,
  3201. struct xhci_segment *deq_seg,
  3202. union xhci_trb *deq_ptr, u32 cycle_state)
  3203. {
  3204. dma_addr_t addr;
  3205. u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
  3206. u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
  3207. u32 trb_stream_id = STREAM_ID_FOR_TRB(stream_id);
  3208. u32 type = TRB_TYPE(TRB_SET_DEQ);
  3209. struct xhci_virt_ep *ep;
  3210. addr = xhci_trb_virt_to_dma(deq_seg, deq_ptr);
  3211. if (addr == 0) {
  3212. xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr\n");
  3213. xhci_warn(xhci, "WARN deq seg = %p, deq pt = %p\n",
  3214. deq_seg, deq_ptr);
  3215. return 0;
  3216. }
  3217. ep = &xhci->devs[slot_id]->eps[ep_index];
  3218. if ((ep->ep_state & SET_DEQ_PENDING)) {
  3219. xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr\n");
  3220. xhci_warn(xhci, "A Set TR Deq Ptr command is pending.\n");
  3221. return 0;
  3222. }
  3223. ep->queued_deq_seg = deq_seg;
  3224. ep->queued_deq_ptr = deq_ptr;
  3225. return queue_command(xhci, lower_32_bits(addr) | cycle_state,
  3226. upper_32_bits(addr), trb_stream_id,
  3227. trb_slot_id | trb_ep_index | type, false);
  3228. }
  3229. int xhci_queue_reset_ep(struct xhci_hcd *xhci, int slot_id,
  3230. unsigned int ep_index)
  3231. {
  3232. u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
  3233. u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
  3234. u32 type = TRB_TYPE(TRB_RESET_EP);
  3235. return queue_command(xhci, 0, 0, 0, trb_slot_id | trb_ep_index | type,
  3236. false);
  3237. }