xhci-ring.c 112 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616
  1. /*
  2. * xHCI host controller driver
  3. *
  4. * Copyright (C) 2008 Intel Corp.
  5. *
  6. * Author: Sarah Sharp
  7. * Some code borrowed from the Linux EHCI driver.
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License version 2 as
  11. * published by the Free Software Foundation.
  12. *
  13. * This program is distributed in the hope that it will be useful, but
  14. * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
  15. * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
  16. * for more details.
  17. *
  18. * You should have received a copy of the GNU General Public License
  19. * along with this program; if not, write to the Free Software Foundation,
  20. * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  21. */
  22. /*
  23. * Ring initialization rules:
  24. * 1. Each segment is initialized to zero, except for link TRBs.
  25. * 2. Ring cycle state = 0. This represents Producer Cycle State (PCS) or
  26. * Consumer Cycle State (CCS), depending on ring function.
  27. * 3. Enqueue pointer = dequeue pointer = address of first TRB in the segment.
  28. *
  29. * Ring behavior rules:
  30. * 1. A ring is empty if enqueue == dequeue. This means there will always be at
  31. * least one free TRB in the ring. This is useful if you want to turn that
  32. * into a link TRB and expand the ring.
  33. * 2. When incrementing an enqueue or dequeue pointer, if the next TRB is a
  34. * link TRB, then load the pointer with the address in the link TRB. If the
  35. * link TRB had its toggle bit set, you may need to update the ring cycle
  36. * state (see cycle bit rules). You may have to do this multiple times
  37. * until you reach a non-link TRB.
  38. * 3. A ring is full if enqueue++ (for the definition of increment above)
  39. * equals the dequeue pointer.
  40. *
  41. * Cycle bit rules:
  42. * 1. When a consumer increments a dequeue pointer and encounters a toggle bit
  43. * in a link TRB, it must toggle the ring cycle state.
  44. * 2. When a producer increments an enqueue pointer and encounters a toggle bit
  45. * in a link TRB, it must toggle the ring cycle state.
  46. *
  47. * Producer rules:
  48. * 1. Check if ring is full before you enqueue.
  49. * 2. Write the ring cycle state to the cycle bit in the TRB you're enqueuing.
  50. * Update enqueue pointer between each write (which may update the ring
  51. * cycle state).
  52. * 3. Notify consumer. If SW is producer, it rings the doorbell for command
  53. * and endpoint rings. If HC is the producer for the event ring,
  54. * and it generates an interrupt according to interrupt modulation rules.
  55. *
  56. * Consumer rules:
  57. * 1. Check if TRB belongs to you. If the cycle bit == your ring cycle state,
  58. * the TRB is owned by the consumer.
  59. * 2. Update dequeue pointer (which may update the ring cycle state) and
  60. * continue processing TRBs until you reach a TRB which is not owned by you.
  61. * 3. Notify the producer. SW is the consumer for the event ring, and it
  62. * updates event ring dequeue pointer. HC is the consumer for the command and
  63. * endpoint rings; it generates events on the event ring for these.
  64. */
  65. #include <linux/scatterlist.h>
  66. #include <linux/slab.h>
  67. #include "xhci.h"
  68. static int handle_cmd_in_cmd_wait_list(struct xhci_hcd *xhci,
  69. struct xhci_virt_device *virt_dev,
  70. struct xhci_event_cmd *event);
  71. /*
  72. * Returns zero if the TRB isn't in this segment, otherwise it returns the DMA
  73. * address of the TRB.
  74. */
  75. dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg,
  76. union xhci_trb *trb)
  77. {
  78. unsigned long segment_offset;
  79. if (!seg || !trb || trb < seg->trbs)
  80. return 0;
  81. /* offset in TRBs */
  82. segment_offset = trb - seg->trbs;
  83. if (segment_offset > TRBS_PER_SEGMENT)
  84. return 0;
  85. return seg->dma + (segment_offset * sizeof(*trb));
  86. }
  87. /* Does this link TRB point to the first segment in a ring,
  88. * or was the previous TRB the last TRB on the last segment in the ERST?
  89. */
  90. static bool last_trb_on_last_seg(struct xhci_hcd *xhci, struct xhci_ring *ring,
  91. struct xhci_segment *seg, union xhci_trb *trb)
  92. {
  93. if (ring == xhci->event_ring)
  94. return (trb == &seg->trbs[TRBS_PER_SEGMENT]) &&
  95. (seg->next == xhci->event_ring->first_seg);
  96. else
  97. return le32_to_cpu(trb->link.control) & LINK_TOGGLE;
  98. }
  99. /* Is this TRB a link TRB or was the last TRB the last TRB in this event ring
  100. * segment? I.e. would the updated event TRB pointer step off the end of the
  101. * event seg?
  102. */
  103. static int last_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
  104. struct xhci_segment *seg, union xhci_trb *trb)
  105. {
  106. if (ring == xhci->event_ring)
  107. return trb == &seg->trbs[TRBS_PER_SEGMENT];
  108. else
  109. return TRB_TYPE_LINK_LE32(trb->link.control);
  110. }
  111. static int enqueue_is_link_trb(struct xhci_ring *ring)
  112. {
  113. struct xhci_link_trb *link = &ring->enqueue->link;
  114. return TRB_TYPE_LINK_LE32(link->control);
  115. }
  116. /* Updates trb to point to the next TRB in the ring, and updates seg if the next
  117. * TRB is in a new segment. This does not skip over link TRBs, and it does not
  118. * effect the ring dequeue or enqueue pointers.
  119. */
  120. static void next_trb(struct xhci_hcd *xhci,
  121. struct xhci_ring *ring,
  122. struct xhci_segment **seg,
  123. union xhci_trb **trb)
  124. {
  125. if (last_trb(xhci, ring, *seg, *trb)) {
  126. *seg = (*seg)->next;
  127. *trb = ((*seg)->trbs);
  128. } else {
  129. (*trb)++;
  130. }
  131. }
  132. /*
  133. * See Cycle bit rules. SW is the consumer for the event ring only.
  134. * Don't make a ring full of link TRBs. That would be dumb and this would loop.
  135. */
  136. static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer)
  137. {
  138. union xhci_trb *next = ++(ring->dequeue);
  139. unsigned long long addr;
  140. ring->deq_updates++;
  141. /* Update the dequeue pointer further if that was a link TRB or we're at
  142. * the end of an event ring segment (which doesn't have link TRBS)
  143. */
  144. while (last_trb(xhci, ring, ring->deq_seg, next)) {
  145. if (consumer && last_trb_on_last_seg(xhci, ring, ring->deq_seg, next)) {
  146. ring->cycle_state = (ring->cycle_state ? 0 : 1);
  147. if (!in_interrupt())
  148. xhci_dbg(xhci, "Toggle cycle state for ring %p = %i\n",
  149. ring,
  150. (unsigned int) ring->cycle_state);
  151. }
  152. ring->deq_seg = ring->deq_seg->next;
  153. ring->dequeue = ring->deq_seg->trbs;
  154. next = ring->dequeue;
  155. }
  156. addr = (unsigned long long) xhci_trb_virt_to_dma(ring->deq_seg, ring->dequeue);
  157. }
  158. /*
  159. * See Cycle bit rules. SW is the consumer for the event ring only.
  160. * Don't make a ring full of link TRBs. That would be dumb and this would loop.
  161. *
  162. * If we've just enqueued a TRB that is in the middle of a TD (meaning the
  163. * chain bit is set), then set the chain bit in all the following link TRBs.
  164. * If we've enqueued the last TRB in a TD, make sure the following link TRBs
  165. * have their chain bit cleared (so that each Link TRB is a separate TD).
  166. *
  167. * Section 6.4.4.1 of the 0.95 spec says link TRBs cannot have the chain bit
  168. * set, but other sections talk about dealing with the chain bit set. This was
  169. * fixed in the 0.96 specification errata, but we have to assume that all 0.95
  170. * xHCI hardware can't handle the chain bit being cleared on a link TRB.
  171. *
  172. * @more_trbs_coming: Will you enqueue more TRBs before calling
  173. * prepare_transfer()?
  174. */
  175. static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring,
  176. bool consumer, bool more_trbs_coming, bool isoc)
  177. {
  178. u32 chain;
  179. union xhci_trb *next;
  180. unsigned long long addr;
  181. chain = le32_to_cpu(ring->enqueue->generic.field[3]) & TRB_CHAIN;
  182. next = ++(ring->enqueue);
  183. ring->enq_updates++;
  184. /* Update the dequeue pointer further if that was a link TRB or we're at
  185. * the end of an event ring segment (which doesn't have link TRBS)
  186. */
  187. while (last_trb(xhci, ring, ring->enq_seg, next)) {
  188. if (!consumer) {
  189. if (ring != xhci->event_ring) {
  190. /*
  191. * If the caller doesn't plan on enqueueing more
  192. * TDs before ringing the doorbell, then we
  193. * don't want to give the link TRB to the
  194. * hardware just yet. We'll give the link TRB
  195. * back in prepare_ring() just before we enqueue
  196. * the TD at the top of the ring.
  197. */
  198. if (!chain && !more_trbs_coming)
  199. break;
  200. /* If we're not dealing with 0.95 hardware or
  201. * isoc rings on AMD 0.96 host,
  202. * carry over the chain bit of the previous TRB
  203. * (which may mean the chain bit is cleared).
  204. */
  205. if (!(isoc && (xhci->quirks & XHCI_AMD_0x96_HOST))
  206. && !xhci_link_trb_quirk(xhci)) {
  207. next->link.control &=
  208. cpu_to_le32(~TRB_CHAIN);
  209. next->link.control |=
  210. cpu_to_le32(chain);
  211. }
  212. /* Give this link TRB to the hardware */
  213. wmb();
  214. next->link.control ^= cpu_to_le32(TRB_CYCLE);
  215. }
  216. /* Toggle the cycle bit after the last ring segment. */
  217. if (last_trb_on_last_seg(xhci, ring, ring->enq_seg, next)) {
  218. ring->cycle_state = (ring->cycle_state ? 0 : 1);
  219. if (!in_interrupt())
  220. xhci_dbg(xhci, "Toggle cycle state for ring %p = %i\n",
  221. ring,
  222. (unsigned int) ring->cycle_state);
  223. }
  224. }
  225. ring->enq_seg = ring->enq_seg->next;
  226. ring->enqueue = ring->enq_seg->trbs;
  227. next = ring->enqueue;
  228. }
  229. addr = (unsigned long long) xhci_trb_virt_to_dma(ring->enq_seg, ring->enqueue);
  230. }
  231. /*
  232. * Check to see if there's room to enqueue num_trbs on the ring. See rules
  233. * above.
  234. * FIXME: this would be simpler and faster if we just kept track of the number
  235. * of free TRBs in a ring.
  236. */
  237. static int room_on_ring(struct xhci_hcd *xhci, struct xhci_ring *ring,
  238. unsigned int num_trbs)
  239. {
  240. int i;
  241. union xhci_trb *enq = ring->enqueue;
  242. struct xhci_segment *enq_seg = ring->enq_seg;
  243. struct xhci_segment *cur_seg;
  244. unsigned int left_on_ring;
  245. /* If we are currently pointing to a link TRB, advance the
  246. * enqueue pointer before checking for space */
  247. while (last_trb(xhci, ring, enq_seg, enq)) {
  248. enq_seg = enq_seg->next;
  249. enq = enq_seg->trbs;
  250. }
  251. /* Check if ring is empty */
  252. if (enq == ring->dequeue) {
  253. /* Can't use link trbs */
  254. left_on_ring = TRBS_PER_SEGMENT - 1;
  255. for (cur_seg = enq_seg->next; cur_seg != enq_seg;
  256. cur_seg = cur_seg->next)
  257. left_on_ring += TRBS_PER_SEGMENT - 1;
  258. /* Always need one TRB free in the ring. */
  259. left_on_ring -= 1;
  260. if (num_trbs > left_on_ring) {
  261. xhci_warn(xhci, "Not enough room on ring; "
  262. "need %u TRBs, %u TRBs left\n",
  263. num_trbs, left_on_ring);
  264. return 0;
  265. }
  266. return 1;
  267. }
  268. /* Make sure there's an extra empty TRB available */
  269. for (i = 0; i <= num_trbs; ++i) {
  270. if (enq == ring->dequeue)
  271. return 0;
  272. enq++;
  273. while (last_trb(xhci, ring, enq_seg, enq)) {
  274. enq_seg = enq_seg->next;
  275. enq = enq_seg->trbs;
  276. }
  277. }
  278. return 1;
  279. }
  280. /* Ring the host controller doorbell after placing a command on the ring */
  281. void xhci_ring_cmd_db(struct xhci_hcd *xhci)
  282. {
  283. xhci_dbg(xhci, "// Ding dong!\n");
  284. xhci_writel(xhci, DB_VALUE_HOST, &xhci->dba->doorbell[0]);
  285. /* Flush PCI posted writes */
  286. xhci_readl(xhci, &xhci->dba->doorbell[0]);
  287. }
  288. void xhci_ring_ep_doorbell(struct xhci_hcd *xhci,
  289. unsigned int slot_id,
  290. unsigned int ep_index,
  291. unsigned int stream_id)
  292. {
  293. __le32 __iomem *db_addr = &xhci->dba->doorbell[slot_id];
  294. struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index];
  295. unsigned int ep_state = ep->ep_state;
  296. /* Don't ring the doorbell for this endpoint if there are pending
  297. * cancellations because we don't want to interrupt processing.
  298. * We don't want to restart any stream rings if there's a set dequeue
  299. * pointer command pending because the device can choose to start any
  300. * stream once the endpoint is on the HW schedule.
  301. * FIXME - check all the stream rings for pending cancellations.
  302. */
  303. if ((ep_state & EP_HALT_PENDING) || (ep_state & SET_DEQ_PENDING) ||
  304. (ep_state & EP_HALTED))
  305. return;
  306. xhci_writel(xhci, DB_VALUE(ep_index, stream_id), db_addr);
  307. /* The CPU has better things to do at this point than wait for a
  308. * write-posting flush. It'll get there soon enough.
  309. */
  310. }
  311. /* Ring the doorbell for any rings with pending URBs */
  312. static void ring_doorbell_for_active_rings(struct xhci_hcd *xhci,
  313. unsigned int slot_id,
  314. unsigned int ep_index)
  315. {
  316. unsigned int stream_id;
  317. struct xhci_virt_ep *ep;
  318. ep = &xhci->devs[slot_id]->eps[ep_index];
  319. /* A ring has pending URBs if its TD list is not empty */
  320. if (!(ep->ep_state & EP_HAS_STREAMS)) {
  321. if (!(list_empty(&ep->ring->td_list)))
  322. xhci_ring_ep_doorbell(xhci, slot_id, ep_index, 0);
  323. return;
  324. }
  325. for (stream_id = 1; stream_id < ep->stream_info->num_streams;
  326. stream_id++) {
  327. struct xhci_stream_info *stream_info = ep->stream_info;
  328. if (!list_empty(&stream_info->stream_rings[stream_id]->td_list))
  329. xhci_ring_ep_doorbell(xhci, slot_id, ep_index,
  330. stream_id);
  331. }
  332. }
  333. /*
  334. * Find the segment that trb is in. Start searching in start_seg.
  335. * If we must move past a segment that has a link TRB with a toggle cycle state
  336. * bit set, then we will toggle the value pointed at by cycle_state.
  337. */
  338. static struct xhci_segment *find_trb_seg(
  339. struct xhci_segment *start_seg,
  340. union xhci_trb *trb, int *cycle_state)
  341. {
  342. struct xhci_segment *cur_seg = start_seg;
  343. struct xhci_generic_trb *generic_trb;
  344. while (cur_seg->trbs > trb ||
  345. &cur_seg->trbs[TRBS_PER_SEGMENT - 1] < trb) {
  346. generic_trb = &cur_seg->trbs[TRBS_PER_SEGMENT - 1].generic;
  347. if (generic_trb->field[3] & cpu_to_le32(LINK_TOGGLE))
  348. *cycle_state ^= 0x1;
  349. cur_seg = cur_seg->next;
  350. if (cur_seg == start_seg)
  351. /* Looped over the entire list. Oops! */
  352. return NULL;
  353. }
  354. return cur_seg;
  355. }
  356. static struct xhci_ring *xhci_triad_to_transfer_ring(struct xhci_hcd *xhci,
  357. unsigned int slot_id, unsigned int ep_index,
  358. unsigned int stream_id)
  359. {
  360. struct xhci_virt_ep *ep;
  361. ep = &xhci->devs[slot_id]->eps[ep_index];
  362. /* Common case: no streams */
  363. if (!(ep->ep_state & EP_HAS_STREAMS))
  364. return ep->ring;
  365. if (stream_id == 0) {
  366. xhci_warn(xhci,
  367. "WARN: Slot ID %u, ep index %u has streams, "
  368. "but URB has no stream ID.\n",
  369. slot_id, ep_index);
  370. return NULL;
  371. }
  372. if (stream_id < ep->stream_info->num_streams)
  373. return ep->stream_info->stream_rings[stream_id];
  374. xhci_warn(xhci,
  375. "WARN: Slot ID %u, ep index %u has "
  376. "stream IDs 1 to %u allocated, "
  377. "but stream ID %u is requested.\n",
  378. slot_id, ep_index,
  379. ep->stream_info->num_streams - 1,
  380. stream_id);
  381. return NULL;
  382. }
  383. /* Get the right ring for the given URB.
  384. * If the endpoint supports streams, boundary check the URB's stream ID.
  385. * If the endpoint doesn't support streams, return the singular endpoint ring.
  386. */
  387. static struct xhci_ring *xhci_urb_to_transfer_ring(struct xhci_hcd *xhci,
  388. struct urb *urb)
  389. {
  390. return xhci_triad_to_transfer_ring(xhci, urb->dev->slot_id,
  391. xhci_get_endpoint_index(&urb->ep->desc), urb->stream_id);
  392. }
  393. /*
  394. * Move the xHC's endpoint ring dequeue pointer past cur_td.
  395. * Record the new state of the xHC's endpoint ring dequeue segment,
  396. * dequeue pointer, and new consumer cycle state in state.
  397. * Update our internal representation of the ring's dequeue pointer.
  398. *
  399. * We do this in three jumps:
  400. * - First we update our new ring state to be the same as when the xHC stopped.
  401. * - Then we traverse the ring to find the segment that contains
  402. * the last TRB in the TD. We toggle the xHC's new cycle state when we pass
  403. * any link TRBs with the toggle cycle bit set.
  404. * - Finally we move the dequeue state one TRB further, toggling the cycle bit
  405. * if we've moved it past a link TRB with the toggle cycle bit set.
  406. *
  407. * Some of the uses of xhci_generic_trb are grotty, but if they're done
  408. * with correct __le32 accesses they should work fine. Only users of this are
  409. * in here.
  410. */
  411. void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
  412. unsigned int slot_id, unsigned int ep_index,
  413. unsigned int stream_id, struct xhci_td *cur_td,
  414. struct xhci_dequeue_state *state)
  415. {
  416. struct xhci_virt_device *dev = xhci->devs[slot_id];
  417. struct xhci_ring *ep_ring;
  418. struct xhci_generic_trb *trb;
  419. struct xhci_ep_ctx *ep_ctx;
  420. dma_addr_t addr;
  421. ep_ring = xhci_triad_to_transfer_ring(xhci, slot_id,
  422. ep_index, stream_id);
  423. if (!ep_ring) {
  424. xhci_warn(xhci, "WARN can't find new dequeue state "
  425. "for invalid stream ID %u.\n",
  426. stream_id);
  427. return;
  428. }
  429. state->new_cycle_state = 0;
  430. xhci_dbg(xhci, "Finding segment containing stopped TRB.\n");
  431. state->new_deq_seg = find_trb_seg(cur_td->start_seg,
  432. dev->eps[ep_index].stopped_trb,
  433. &state->new_cycle_state);
  434. if (!state->new_deq_seg) {
  435. WARN_ON(1);
  436. return;
  437. }
  438. /* Dig out the cycle state saved by the xHC during the stop ep cmd */
  439. xhci_dbg(xhci, "Finding endpoint context\n");
  440. ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
  441. state->new_cycle_state = 0x1 & le64_to_cpu(ep_ctx->deq);
  442. state->new_deq_ptr = cur_td->last_trb;
  443. xhci_dbg(xhci, "Finding segment containing last TRB in TD.\n");
  444. state->new_deq_seg = find_trb_seg(state->new_deq_seg,
  445. state->new_deq_ptr,
  446. &state->new_cycle_state);
  447. if (!state->new_deq_seg) {
  448. WARN_ON(1);
  449. return;
  450. }
  451. trb = &state->new_deq_ptr->generic;
  452. if (TRB_TYPE_LINK_LE32(trb->field[3]) &&
  453. (trb->field[3] & cpu_to_le32(LINK_TOGGLE)))
  454. state->new_cycle_state ^= 0x1;
  455. next_trb(xhci, ep_ring, &state->new_deq_seg, &state->new_deq_ptr);
  456. /*
  457. * If there is only one segment in a ring, find_trb_seg()'s while loop
  458. * will not run, and it will return before it has a chance to see if it
  459. * needs to toggle the cycle bit. It can't tell if the stalled transfer
  460. * ended just before the link TRB on a one-segment ring, or if the TD
  461. * wrapped around the top of the ring, because it doesn't have the TD in
  462. * question. Look for the one-segment case where stalled TRB's address
  463. * is greater than the new dequeue pointer address.
  464. */
  465. if (ep_ring->first_seg == ep_ring->first_seg->next &&
  466. state->new_deq_ptr < dev->eps[ep_index].stopped_trb)
  467. state->new_cycle_state ^= 0x1;
  468. xhci_dbg(xhci, "Cycle state = 0x%x\n", state->new_cycle_state);
  469. /* Don't update the ring cycle state for the producer (us). */
  470. xhci_dbg(xhci, "New dequeue segment = %p (virtual)\n",
  471. state->new_deq_seg);
  472. addr = xhci_trb_virt_to_dma(state->new_deq_seg, state->new_deq_ptr);
  473. xhci_dbg(xhci, "New dequeue pointer = 0x%llx (DMA)\n",
  474. (unsigned long long) addr);
  475. }
  476. /* flip_cycle means flip the cycle bit of all but the first and last TRB.
  477. * (The last TRB actually points to the ring enqueue pointer, which is not part
  478. * of this TD.) This is used to remove partially enqueued isoc TDs from a ring.
  479. */
  480. static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
  481. struct xhci_td *cur_td, bool flip_cycle)
  482. {
  483. struct xhci_segment *cur_seg;
  484. union xhci_trb *cur_trb;
  485. for (cur_seg = cur_td->start_seg, cur_trb = cur_td->first_trb;
  486. true;
  487. next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) {
  488. if (TRB_TYPE_LINK_LE32(cur_trb->generic.field[3])) {
  489. /* Unchain any chained Link TRBs, but
  490. * leave the pointers intact.
  491. */
  492. cur_trb->generic.field[3] &= cpu_to_le32(~TRB_CHAIN);
  493. /* Flip the cycle bit (link TRBs can't be the first
  494. * or last TRB).
  495. */
  496. if (flip_cycle)
  497. cur_trb->generic.field[3] ^=
  498. cpu_to_le32(TRB_CYCLE);
  499. xhci_dbg(xhci, "Cancel (unchain) link TRB\n");
  500. xhci_dbg(xhci, "Address = %p (0x%llx dma); "
  501. "in seg %p (0x%llx dma)\n",
  502. cur_trb,
  503. (unsigned long long)xhci_trb_virt_to_dma(cur_seg, cur_trb),
  504. cur_seg,
  505. (unsigned long long)cur_seg->dma);
  506. } else {
  507. cur_trb->generic.field[0] = 0;
  508. cur_trb->generic.field[1] = 0;
  509. cur_trb->generic.field[2] = 0;
  510. /* Preserve only the cycle bit of this TRB */
  511. cur_trb->generic.field[3] &= cpu_to_le32(TRB_CYCLE);
  512. /* Flip the cycle bit except on the first or last TRB */
  513. if (flip_cycle && cur_trb != cur_td->first_trb &&
  514. cur_trb != cur_td->last_trb)
  515. cur_trb->generic.field[3] ^=
  516. cpu_to_le32(TRB_CYCLE);
  517. cur_trb->generic.field[3] |= cpu_to_le32(
  518. TRB_TYPE(TRB_TR_NOOP));
  519. xhci_dbg(xhci, "Cancel TRB %p (0x%llx dma) "
  520. "in seg %p (0x%llx dma)\n",
  521. cur_trb,
  522. (unsigned long long)xhci_trb_virt_to_dma(cur_seg, cur_trb),
  523. cur_seg,
  524. (unsigned long long)cur_seg->dma);
  525. }
  526. if (cur_trb == cur_td->last_trb)
  527. break;
  528. }
  529. }
  530. static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id,
  531. unsigned int ep_index, unsigned int stream_id,
  532. struct xhci_segment *deq_seg,
  533. union xhci_trb *deq_ptr, u32 cycle_state);
  534. void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci,
  535. unsigned int slot_id, unsigned int ep_index,
  536. unsigned int stream_id,
  537. struct xhci_dequeue_state *deq_state)
  538. {
  539. struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index];
  540. xhci_dbg(xhci, "Set TR Deq Ptr cmd, new deq seg = %p (0x%llx dma), "
  541. "new deq ptr = %p (0x%llx dma), new cycle = %u\n",
  542. deq_state->new_deq_seg,
  543. (unsigned long long)deq_state->new_deq_seg->dma,
  544. deq_state->new_deq_ptr,
  545. (unsigned long long)xhci_trb_virt_to_dma(deq_state->new_deq_seg, deq_state->new_deq_ptr),
  546. deq_state->new_cycle_state);
  547. queue_set_tr_deq(xhci, slot_id, ep_index, stream_id,
  548. deq_state->new_deq_seg,
  549. deq_state->new_deq_ptr,
  550. (u32) deq_state->new_cycle_state);
  551. /* Stop the TD queueing code from ringing the doorbell until
  552. * this command completes. The HC won't set the dequeue pointer
  553. * if the ring is running, and ringing the doorbell starts the
  554. * ring running.
  555. */
  556. ep->ep_state |= SET_DEQ_PENDING;
  557. }
  558. static void xhci_stop_watchdog_timer_in_irq(struct xhci_hcd *xhci,
  559. struct xhci_virt_ep *ep)
  560. {
  561. ep->ep_state &= ~EP_HALT_PENDING;
  562. /* Can't del_timer_sync in interrupt, so we attempt to cancel. If the
  563. * timer is running on another CPU, we don't decrement stop_cmds_pending
  564. * (since we didn't successfully stop the watchdog timer).
  565. */
  566. if (del_timer(&ep->stop_cmd_timer))
  567. ep->stop_cmds_pending--;
  568. }
  569. /* Must be called with xhci->lock held in interrupt context */
  570. static void xhci_giveback_urb_in_irq(struct xhci_hcd *xhci,
  571. struct xhci_td *cur_td, int status, char *adjective)
  572. {
  573. struct usb_hcd *hcd;
  574. struct urb *urb;
  575. struct urb_priv *urb_priv;
  576. urb = cur_td->urb;
  577. urb_priv = urb->hcpriv;
  578. urb_priv->td_cnt++;
  579. hcd = bus_to_hcd(urb->dev->bus);
  580. /* Only giveback urb when this is the last td in urb */
  581. if (urb_priv->td_cnt == urb_priv->length) {
  582. if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
  583. xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs--;
  584. if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) {
  585. if (xhci->quirks & XHCI_AMD_PLL_FIX)
  586. usb_amd_quirk_pll_enable();
  587. }
  588. }
  589. usb_hcd_unlink_urb_from_ep(hcd, urb);
  590. spin_unlock(&xhci->lock);
  591. usb_hcd_giveback_urb(hcd, urb, status);
  592. xhci_urb_free_priv(xhci, urb_priv);
  593. spin_lock(&xhci->lock);
  594. }
  595. }
  596. /*
  597. * When we get a command completion for a Stop Endpoint Command, we need to
  598. * unlink any cancelled TDs from the ring. There are two ways to do that:
  599. *
  600. * 1. If the HW was in the middle of processing the TD that needs to be
  601. * cancelled, then we must move the ring's dequeue pointer past the last TRB
  602. * in the TD with a Set Dequeue Pointer Command.
  603. * 2. Otherwise, we turn all the TRBs in the TD into No-op TRBs (with the chain
  604. * bit cleared) so that the HW will skip over them.
  605. */
  606. static void handle_stopped_endpoint(struct xhci_hcd *xhci,
  607. union xhci_trb *trb, struct xhci_event_cmd *event)
  608. {
  609. unsigned int slot_id;
  610. unsigned int ep_index;
  611. struct xhci_virt_device *virt_dev;
  612. struct xhci_ring *ep_ring;
  613. struct xhci_virt_ep *ep;
  614. struct list_head *entry;
  615. struct xhci_td *cur_td = NULL;
  616. struct xhci_td *last_unlinked_td;
  617. struct xhci_dequeue_state deq_state;
  618. if (unlikely(TRB_TO_SUSPEND_PORT(
  619. le32_to_cpu(xhci->cmd_ring->dequeue->generic.field[3])))) {
  620. slot_id = TRB_TO_SLOT_ID(
  621. le32_to_cpu(xhci->cmd_ring->dequeue->generic.field[3]));
  622. virt_dev = xhci->devs[slot_id];
  623. if (virt_dev)
  624. handle_cmd_in_cmd_wait_list(xhci, virt_dev,
  625. event);
  626. else
  627. xhci_warn(xhci, "Stop endpoint command "
  628. "completion for disabled slot %u\n",
  629. slot_id);
  630. return;
  631. }
  632. memset(&deq_state, 0, sizeof(deq_state));
  633. slot_id = TRB_TO_SLOT_ID(le32_to_cpu(trb->generic.field[3]));
  634. ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
  635. ep = &xhci->devs[slot_id]->eps[ep_index];
  636. if (list_empty(&ep->cancelled_td_list)) {
  637. xhci_stop_watchdog_timer_in_irq(xhci, ep);
  638. ep->stopped_td = NULL;
  639. ep->stopped_trb = NULL;
  640. ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
  641. return;
  642. }
  643. /* Fix up the ep ring first, so HW stops executing cancelled TDs.
  644. * We have the xHCI lock, so nothing can modify this list until we drop
  645. * it. We're also in the event handler, so we can't get re-interrupted
  646. * if another Stop Endpoint command completes
  647. */
  648. list_for_each(entry, &ep->cancelled_td_list) {
  649. cur_td = list_entry(entry, struct xhci_td, cancelled_td_list);
  650. xhci_dbg(xhci, "Cancelling TD starting at %p, 0x%llx (dma).\n",
  651. cur_td->first_trb,
  652. (unsigned long long)xhci_trb_virt_to_dma(cur_td->start_seg, cur_td->first_trb));
  653. ep_ring = xhci_urb_to_transfer_ring(xhci, cur_td->urb);
  654. if (!ep_ring) {
  655. /* This shouldn't happen unless a driver is mucking
  656. * with the stream ID after submission. This will
  657. * leave the TD on the hardware ring, and the hardware
  658. * will try to execute it, and may access a buffer
  659. * that has already been freed. In the best case, the
  660. * hardware will execute it, and the event handler will
  661. * ignore the completion event for that TD, since it was
  662. * removed from the td_list for that endpoint. In
  663. * short, don't muck with the stream ID after
  664. * submission.
  665. */
  666. xhci_warn(xhci, "WARN Cancelled URB %p "
  667. "has invalid stream ID %u.\n",
  668. cur_td->urb,
  669. cur_td->urb->stream_id);
  670. goto remove_finished_td;
  671. }
  672. /*
  673. * If we stopped on the TD we need to cancel, then we have to
  674. * move the xHC endpoint ring dequeue pointer past this TD.
  675. */
  676. if (cur_td == ep->stopped_td)
  677. xhci_find_new_dequeue_state(xhci, slot_id, ep_index,
  678. cur_td->urb->stream_id,
  679. cur_td, &deq_state);
  680. else
  681. td_to_noop(xhci, ep_ring, cur_td, false);
  682. remove_finished_td:
  683. /*
  684. * The event handler won't see a completion for this TD anymore,
  685. * so remove it from the endpoint ring's TD list. Keep it in
  686. * the cancelled TD list for URB completion later.
  687. */
  688. list_del_init(&cur_td->td_list);
  689. }
  690. last_unlinked_td = cur_td;
  691. xhci_stop_watchdog_timer_in_irq(xhci, ep);
  692. /* If necessary, queue a Set Transfer Ring Dequeue Pointer command */
  693. if (deq_state.new_deq_ptr && deq_state.new_deq_seg) {
  694. xhci_queue_new_dequeue_state(xhci,
  695. slot_id, ep_index,
  696. ep->stopped_td->urb->stream_id,
  697. &deq_state);
  698. xhci_ring_cmd_db(xhci);
  699. } else {
  700. /* Otherwise ring the doorbell(s) to restart queued transfers */
  701. ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
  702. }
  703. ep->stopped_td = NULL;
  704. ep->stopped_trb = NULL;
  705. /*
  706. * Drop the lock and complete the URBs in the cancelled TD list.
  707. * New TDs to be cancelled might be added to the end of the list before
  708. * we can complete all the URBs for the TDs we already unlinked.
  709. * So stop when we've completed the URB for the last TD we unlinked.
  710. */
  711. do {
  712. cur_td = list_entry(ep->cancelled_td_list.next,
  713. struct xhci_td, cancelled_td_list);
  714. list_del_init(&cur_td->cancelled_td_list);
  715. /* Clean up the cancelled URB */
  716. /* Doesn't matter what we pass for status, since the core will
  717. * just overwrite it (because the URB has been unlinked).
  718. */
  719. xhci_giveback_urb_in_irq(xhci, cur_td, 0, "cancelled");
  720. /* Stop processing the cancelled list if the watchdog timer is
  721. * running.
  722. */
  723. if (xhci->xhc_state & XHCI_STATE_DYING)
  724. return;
  725. } while (cur_td != last_unlinked_td);
  726. /* Return to the event handler with xhci->lock re-acquired */
  727. }
  728. /* Watchdog timer function for when a stop endpoint command fails to complete.
  729. * In this case, we assume the host controller is broken or dying or dead. The
  730. * host may still be completing some other events, so we have to be careful to
  731. * let the event ring handler and the URB dequeueing/enqueueing functions know
  732. * through xhci->state.
  733. *
  734. * The timer may also fire if the host takes a very long time to respond to the
  735. * command, and the stop endpoint command completion handler cannot delete the
  736. * timer before the timer function is called. Another endpoint cancellation may
  737. * sneak in before the timer function can grab the lock, and that may queue
  738. * another stop endpoint command and add the timer back. So we cannot use a
  739. * simple flag to say whether there is a pending stop endpoint command for a
  740. * particular endpoint.
  741. *
  742. * Instead we use a combination of that flag and a counter for the number of
  743. * pending stop endpoint commands. If the timer is the tail end of the last
  744. * stop endpoint command, and the endpoint's command is still pending, we assume
  745. * the host is dying.
  746. */
  747. void xhci_stop_endpoint_command_watchdog(unsigned long arg)
  748. {
  749. struct xhci_hcd *xhci;
  750. struct xhci_virt_ep *ep;
  751. struct xhci_virt_ep *temp_ep;
  752. struct xhci_ring *ring;
  753. struct xhci_td *cur_td;
  754. int ret, i, j;
  755. unsigned long flags;
  756. ep = (struct xhci_virt_ep *) arg;
  757. xhci = ep->xhci;
  758. spin_lock_irqsave(&xhci->lock, flags);
  759. ep->stop_cmds_pending--;
  760. if (xhci->xhc_state & XHCI_STATE_DYING) {
  761. xhci_dbg(xhci, "Stop EP timer ran, but another timer marked "
  762. "xHCI as DYING, exiting.\n");
  763. spin_unlock_irqrestore(&xhci->lock, flags);
  764. return;
  765. }
  766. if (!(ep->stop_cmds_pending == 0 && (ep->ep_state & EP_HALT_PENDING))) {
  767. xhci_dbg(xhci, "Stop EP timer ran, but no command pending, "
  768. "exiting.\n");
  769. spin_unlock_irqrestore(&xhci->lock, flags);
  770. return;
  771. }
  772. xhci_warn(xhci, "xHCI host not responding to stop endpoint command.\n");
  773. xhci_warn(xhci, "Assuming host is dying, halting host.\n");
  774. /* Oops, HC is dead or dying or at least not responding to the stop
  775. * endpoint command.
  776. */
  777. xhci->xhc_state |= XHCI_STATE_DYING;
  778. /* Disable interrupts from the host controller and start halting it */
  779. xhci_quiesce(xhci);
  780. spin_unlock_irqrestore(&xhci->lock, flags);
  781. ret = xhci_halt(xhci);
  782. spin_lock_irqsave(&xhci->lock, flags);
  783. if (ret < 0) {
  784. /* This is bad; the host is not responding to commands and it's
  785. * not allowing itself to be halted. At least interrupts are
  786. * disabled. If we call usb_hc_died(), it will attempt to
  787. * disconnect all device drivers under this host. Those
  788. * disconnect() methods will wait for all URBs to be unlinked,
  789. * so we must complete them.
  790. */
  791. xhci_warn(xhci, "Non-responsive xHCI host is not halting.\n");
  792. xhci_warn(xhci, "Completing active URBs anyway.\n");
  793. /* We could turn all TDs on the rings to no-ops. This won't
  794. * help if the host has cached part of the ring, and is slow if
  795. * we want to preserve the cycle bit. Skip it and hope the host
  796. * doesn't touch the memory.
  797. */
  798. }
  799. for (i = 0; i < MAX_HC_SLOTS; i++) {
  800. if (!xhci->devs[i])
  801. continue;
  802. for (j = 0; j < 31; j++) {
  803. temp_ep = &xhci->devs[i]->eps[j];
  804. ring = temp_ep->ring;
  805. if (!ring)
  806. continue;
  807. xhci_dbg(xhci, "Killing URBs for slot ID %u, "
  808. "ep index %u\n", i, j);
  809. while (!list_empty(&ring->td_list)) {
  810. cur_td = list_first_entry(&ring->td_list,
  811. struct xhci_td,
  812. td_list);
  813. list_del_init(&cur_td->td_list);
  814. if (!list_empty(&cur_td->cancelled_td_list))
  815. list_del_init(&cur_td->cancelled_td_list);
  816. xhci_giveback_urb_in_irq(xhci, cur_td,
  817. -ESHUTDOWN, "killed");
  818. }
  819. while (!list_empty(&temp_ep->cancelled_td_list)) {
  820. cur_td = list_first_entry(
  821. &temp_ep->cancelled_td_list,
  822. struct xhci_td,
  823. cancelled_td_list);
  824. list_del_init(&cur_td->cancelled_td_list);
  825. xhci_giveback_urb_in_irq(xhci, cur_td,
  826. -ESHUTDOWN, "killed");
  827. }
  828. }
  829. }
  830. spin_unlock_irqrestore(&xhci->lock, flags);
  831. xhci_dbg(xhci, "Calling usb_hc_died()\n");
  832. usb_hc_died(xhci_to_hcd(xhci)->primary_hcd);
  833. xhci_dbg(xhci, "xHCI host controller is dead.\n");
  834. }
  835. /*
  836. * When we get a completion for a Set Transfer Ring Dequeue Pointer command,
  837. * we need to clear the set deq pending flag in the endpoint ring state, so that
  838. * the TD queueing code can ring the doorbell again. We also need to ring the
  839. * endpoint doorbell to restart the ring, but only if there aren't more
  840. * cancellations pending.
  841. */
  842. static void handle_set_deq_completion(struct xhci_hcd *xhci,
  843. struct xhci_event_cmd *event,
  844. union xhci_trb *trb)
  845. {
  846. unsigned int slot_id;
  847. unsigned int ep_index;
  848. unsigned int stream_id;
  849. struct xhci_ring *ep_ring;
  850. struct xhci_virt_device *dev;
  851. struct xhci_ep_ctx *ep_ctx;
  852. struct xhci_slot_ctx *slot_ctx;
  853. slot_id = TRB_TO_SLOT_ID(le32_to_cpu(trb->generic.field[3]));
  854. ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
  855. stream_id = TRB_TO_STREAM_ID(le32_to_cpu(trb->generic.field[2]));
  856. dev = xhci->devs[slot_id];
  857. ep_ring = xhci_stream_id_to_ring(dev, ep_index, stream_id);
  858. if (!ep_ring) {
  859. xhci_warn(xhci, "WARN Set TR deq ptr command for "
  860. "freed stream ID %u\n",
  861. stream_id);
  862. /* XXX: Harmless??? */
  863. dev->eps[ep_index].ep_state &= ~SET_DEQ_PENDING;
  864. return;
  865. }
  866. ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
  867. slot_ctx = xhci_get_slot_ctx(xhci, dev->out_ctx);
  868. if (GET_COMP_CODE(le32_to_cpu(event->status)) != COMP_SUCCESS) {
  869. unsigned int ep_state;
  870. unsigned int slot_state;
  871. switch (GET_COMP_CODE(le32_to_cpu(event->status))) {
  872. case COMP_TRB_ERR:
  873. xhci_warn(xhci, "WARN Set TR Deq Ptr cmd invalid because "
  874. "of stream ID configuration\n");
  875. break;
  876. case COMP_CTX_STATE:
  877. xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed due "
  878. "to incorrect slot or ep state.\n");
  879. ep_state = le32_to_cpu(ep_ctx->ep_info);
  880. ep_state &= EP_STATE_MASK;
  881. slot_state = le32_to_cpu(slot_ctx->dev_state);
  882. slot_state = GET_SLOT_STATE(slot_state);
  883. xhci_dbg(xhci, "Slot state = %u, EP state = %u\n",
  884. slot_state, ep_state);
  885. break;
  886. case COMP_EBADSLT:
  887. xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed because "
  888. "slot %u was not enabled.\n", slot_id);
  889. break;
  890. default:
  891. xhci_warn(xhci, "WARN Set TR Deq Ptr cmd with unknown "
  892. "completion code of %u.\n",
  893. GET_COMP_CODE(le32_to_cpu(event->status)));
  894. break;
  895. }
  896. /* OK what do we do now? The endpoint state is hosed, and we
  897. * should never get to this point if the synchronization between
  898. * queueing, and endpoint state are correct. This might happen
  899. * if the device gets disconnected after we've finished
  900. * cancelling URBs, which might not be an error...
  901. */
  902. } else {
  903. xhci_dbg(xhci, "Successful Set TR Deq Ptr cmd, deq = @%08llx\n",
  904. le64_to_cpu(ep_ctx->deq));
  905. if (xhci_trb_virt_to_dma(dev->eps[ep_index].queued_deq_seg,
  906. dev->eps[ep_index].queued_deq_ptr) ==
  907. (le64_to_cpu(ep_ctx->deq) & ~(EP_CTX_CYCLE_MASK))) {
  908. /* Update the ring's dequeue segment and dequeue pointer
  909. * to reflect the new position.
  910. */
  911. ep_ring->deq_seg = dev->eps[ep_index].queued_deq_seg;
  912. ep_ring->dequeue = dev->eps[ep_index].queued_deq_ptr;
  913. } else {
  914. xhci_warn(xhci, "Mismatch between completed Set TR Deq "
  915. "Ptr command & xHCI internal state.\n");
  916. xhci_warn(xhci, "ep deq seg = %p, deq ptr = %p\n",
  917. dev->eps[ep_index].queued_deq_seg,
  918. dev->eps[ep_index].queued_deq_ptr);
  919. }
  920. }
  921. dev->eps[ep_index].ep_state &= ~SET_DEQ_PENDING;
  922. dev->eps[ep_index].queued_deq_seg = NULL;
  923. dev->eps[ep_index].queued_deq_ptr = NULL;
  924. /* Restart any rings with pending URBs */
  925. ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
  926. }
  927. static void handle_reset_ep_completion(struct xhci_hcd *xhci,
  928. struct xhci_event_cmd *event,
  929. union xhci_trb *trb)
  930. {
  931. int slot_id;
  932. unsigned int ep_index;
  933. slot_id = TRB_TO_SLOT_ID(le32_to_cpu(trb->generic.field[3]));
  934. ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
  935. /* This command will only fail if the endpoint wasn't halted,
  936. * but we don't care.
  937. */
  938. xhci_dbg(xhci, "Ignoring reset ep completion code of %u\n",
  939. GET_COMP_CODE(le32_to_cpu(event->status)));
  940. /* HW with the reset endpoint quirk needs to have a configure endpoint
  941. * command complete before the endpoint can be used. Queue that here
  942. * because the HW can't handle two commands being queued in a row.
  943. */
  944. if (xhci->quirks & XHCI_RESET_EP_QUIRK) {
  945. xhci_dbg(xhci, "Queueing configure endpoint command\n");
  946. xhci_queue_configure_endpoint(xhci,
  947. xhci->devs[slot_id]->in_ctx->dma, slot_id,
  948. false);
  949. xhci_ring_cmd_db(xhci);
  950. } else {
  951. /* Clear our internal halted state and restart the ring(s) */
  952. xhci->devs[slot_id]->eps[ep_index].ep_state &= ~EP_HALTED;
  953. ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
  954. }
  955. }
  956. /* Check to see if a command in the device's command queue matches this one.
  957. * Signal the completion or free the command, and return 1. Return 0 if the
  958. * completed command isn't at the head of the command list.
  959. */
  960. static int handle_cmd_in_cmd_wait_list(struct xhci_hcd *xhci,
  961. struct xhci_virt_device *virt_dev,
  962. struct xhci_event_cmd *event)
  963. {
  964. struct xhci_command *command;
  965. if (list_empty(&virt_dev->cmd_list))
  966. return 0;
  967. command = list_entry(virt_dev->cmd_list.next,
  968. struct xhci_command, cmd_list);
  969. if (xhci->cmd_ring->dequeue != command->command_trb)
  970. return 0;
  971. command->status = GET_COMP_CODE(le32_to_cpu(event->status));
  972. list_del(&command->cmd_list);
  973. if (command->completion)
  974. complete(command->completion);
  975. else
  976. xhci_free_command(xhci, command);
  977. return 1;
  978. }
  979. static void handle_cmd_completion(struct xhci_hcd *xhci,
  980. struct xhci_event_cmd *event)
  981. {
  982. int slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
  983. u64 cmd_dma;
  984. dma_addr_t cmd_dequeue_dma;
  985. struct xhci_input_control_ctx *ctrl_ctx;
  986. struct xhci_virt_device *virt_dev;
  987. unsigned int ep_index;
  988. struct xhci_ring *ep_ring;
  989. unsigned int ep_state;
  990. cmd_dma = le64_to_cpu(event->cmd_trb);
  991. cmd_dequeue_dma = xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg,
  992. xhci->cmd_ring->dequeue);
  993. /* Is the command ring deq ptr out of sync with the deq seg ptr? */
  994. if (cmd_dequeue_dma == 0) {
  995. xhci->error_bitmask |= 1 << 4;
  996. return;
  997. }
  998. /* Does the DMA address match our internal dequeue pointer address? */
  999. if (cmd_dma != (u64) cmd_dequeue_dma) {
  1000. xhci->error_bitmask |= 1 << 5;
  1001. return;
  1002. }
  1003. switch (le32_to_cpu(xhci->cmd_ring->dequeue->generic.field[3])
  1004. & TRB_TYPE_BITMASK) {
  1005. case TRB_TYPE(TRB_ENABLE_SLOT):
  1006. if (GET_COMP_CODE(le32_to_cpu(event->status)) == COMP_SUCCESS)
  1007. xhci->slot_id = slot_id;
  1008. else
  1009. xhci->slot_id = 0;
  1010. complete(&xhci->addr_dev);
  1011. break;
  1012. case TRB_TYPE(TRB_DISABLE_SLOT):
  1013. if (xhci->devs[slot_id]) {
  1014. if (xhci->quirks & XHCI_EP_LIMIT_QUIRK)
  1015. /* Delete default control endpoint resources */
  1016. xhci_free_device_endpoint_resources(xhci,
  1017. xhci->devs[slot_id], true);
  1018. xhci_free_virt_device(xhci, slot_id);
  1019. }
  1020. break;
  1021. case TRB_TYPE(TRB_CONFIG_EP):
  1022. virt_dev = xhci->devs[slot_id];
  1023. if (handle_cmd_in_cmd_wait_list(xhci, virt_dev, event))
  1024. break;
  1025. /*
  1026. * Configure endpoint commands can come from the USB core
  1027. * configuration or alt setting changes, or because the HW
  1028. * needed an extra configure endpoint command after a reset
  1029. * endpoint command or streams were being configured.
  1030. * If the command was for a halted endpoint, the xHCI driver
  1031. * is not waiting on the configure endpoint command.
  1032. */
  1033. ctrl_ctx = xhci_get_input_control_ctx(xhci,
  1034. virt_dev->in_ctx);
  1035. /* Input ctx add_flags are the endpoint index plus one */
  1036. ep_index = xhci_last_valid_endpoint(le32_to_cpu(ctrl_ctx->add_flags)) - 1;
  1037. /* A usb_set_interface() call directly after clearing a halted
  1038. * condition may race on this quirky hardware. Not worth
  1039. * worrying about, since this is prototype hardware. Not sure
  1040. * if this will work for streams, but streams support was
  1041. * untested on this prototype.
  1042. */
  1043. if (xhci->quirks & XHCI_RESET_EP_QUIRK &&
  1044. ep_index != (unsigned int) -1 &&
  1045. le32_to_cpu(ctrl_ctx->add_flags) - SLOT_FLAG ==
  1046. le32_to_cpu(ctrl_ctx->drop_flags)) {
  1047. ep_ring = xhci->devs[slot_id]->eps[ep_index].ring;
  1048. ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state;
  1049. if (!(ep_state & EP_HALTED))
  1050. goto bandwidth_change;
  1051. xhci_dbg(xhci, "Completed config ep cmd - "
  1052. "last ep index = %d, state = %d\n",
  1053. ep_index, ep_state);
  1054. /* Clear internal halted state and restart ring(s) */
  1055. xhci->devs[slot_id]->eps[ep_index].ep_state &=
  1056. ~EP_HALTED;
  1057. ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
  1058. break;
  1059. }
  1060. bandwidth_change:
  1061. xhci_dbg(xhci, "Completed config ep cmd\n");
  1062. xhci->devs[slot_id]->cmd_status =
  1063. GET_COMP_CODE(le32_to_cpu(event->status));
  1064. complete(&xhci->devs[slot_id]->cmd_completion);
  1065. break;
  1066. case TRB_TYPE(TRB_EVAL_CONTEXT):
  1067. virt_dev = xhci->devs[slot_id];
  1068. if (handle_cmd_in_cmd_wait_list(xhci, virt_dev, event))
  1069. break;
  1070. xhci->devs[slot_id]->cmd_status = GET_COMP_CODE(le32_to_cpu(event->status));
  1071. complete(&xhci->devs[slot_id]->cmd_completion);
  1072. break;
  1073. case TRB_TYPE(TRB_ADDR_DEV):
  1074. xhci->devs[slot_id]->cmd_status = GET_COMP_CODE(le32_to_cpu(event->status));
  1075. complete(&xhci->addr_dev);
  1076. break;
  1077. case TRB_TYPE(TRB_STOP_RING):
  1078. handle_stopped_endpoint(xhci, xhci->cmd_ring->dequeue, event);
  1079. break;
  1080. case TRB_TYPE(TRB_SET_DEQ):
  1081. handle_set_deq_completion(xhci, event, xhci->cmd_ring->dequeue);
  1082. break;
  1083. case TRB_TYPE(TRB_CMD_NOOP):
  1084. break;
  1085. case TRB_TYPE(TRB_RESET_EP):
  1086. handle_reset_ep_completion(xhci, event, xhci->cmd_ring->dequeue);
  1087. break;
  1088. case TRB_TYPE(TRB_RESET_DEV):
  1089. xhci_dbg(xhci, "Completed reset device command.\n");
  1090. slot_id = TRB_TO_SLOT_ID(
  1091. le32_to_cpu(xhci->cmd_ring->dequeue->generic.field[3]));
  1092. virt_dev = xhci->devs[slot_id];
  1093. if (virt_dev)
  1094. handle_cmd_in_cmd_wait_list(xhci, virt_dev, event);
  1095. else
  1096. xhci_warn(xhci, "Reset device command completion "
  1097. "for disabled slot %u\n", slot_id);
  1098. break;
  1099. case TRB_TYPE(TRB_NEC_GET_FW):
  1100. if (!(xhci->quirks & XHCI_NEC_HOST)) {
  1101. xhci->error_bitmask |= 1 << 6;
  1102. break;
  1103. }
  1104. xhci_dbg(xhci, "NEC firmware version %2x.%02x\n",
  1105. NEC_FW_MAJOR(le32_to_cpu(event->status)),
  1106. NEC_FW_MINOR(le32_to_cpu(event->status)));
  1107. break;
  1108. default:
  1109. /* Skip over unknown commands on the event ring */
  1110. xhci->error_bitmask |= 1 << 6;
  1111. break;
  1112. }
  1113. inc_deq(xhci, xhci->cmd_ring, false);
  1114. }
  1115. static void handle_vendor_event(struct xhci_hcd *xhci,
  1116. union xhci_trb *event)
  1117. {
  1118. u32 trb_type;
  1119. trb_type = TRB_FIELD_TO_TYPE(le32_to_cpu(event->generic.field[3]));
  1120. xhci_dbg(xhci, "Vendor specific event TRB type = %u\n", trb_type);
  1121. if (trb_type == TRB_NEC_CMD_COMP && (xhci->quirks & XHCI_NEC_HOST))
  1122. handle_cmd_completion(xhci, &event->event_cmd);
  1123. }
  1124. /* @port_id: the one-based port ID from the hardware (indexed from array of all
  1125. * port registers -- USB 3.0 and USB 2.0).
  1126. *
  1127. * Returns a zero-based port number, which is suitable for indexing into each of
  1128. * the split roothubs' port arrays and bus state arrays.
  1129. */
  1130. static unsigned int find_faked_portnum_from_hw_portnum(struct usb_hcd *hcd,
  1131. struct xhci_hcd *xhci, u32 port_id)
  1132. {
  1133. unsigned int i;
  1134. unsigned int num_similar_speed_ports = 0;
  1135. /* port_id from the hardware is 1-based, but port_array[], usb3_ports[],
  1136. * and usb2_ports are 0-based indexes. Count the number of similar
  1137. * speed ports, up to 1 port before this port.
  1138. */
  1139. for (i = 0; i < (port_id - 1); i++) {
  1140. u8 port_speed = xhci->port_array[i];
  1141. /*
  1142. * Skip ports that don't have known speeds, or have duplicate
  1143. * Extended Capabilities port speed entries.
  1144. */
  1145. if (port_speed == 0 || port_speed == DUPLICATE_ENTRY)
  1146. continue;
  1147. /*
  1148. * USB 3.0 ports are always under a USB 3.0 hub. USB 2.0 and
  1149. * 1.1 ports are under the USB 2.0 hub. If the port speed
  1150. * matches the device speed, it's a similar speed port.
  1151. */
  1152. if ((port_speed == 0x03) == (hcd->speed == HCD_USB3))
  1153. num_similar_speed_ports++;
  1154. }
  1155. return num_similar_speed_ports;
  1156. }
  1157. static void handle_port_status(struct xhci_hcd *xhci,
  1158. union xhci_trb *event)
  1159. {
  1160. struct usb_hcd *hcd;
  1161. u32 port_id;
  1162. u32 temp, temp1;
  1163. int max_ports;
  1164. int slot_id;
  1165. unsigned int faked_port_index;
  1166. u8 major_revision;
  1167. struct xhci_bus_state *bus_state;
  1168. __le32 __iomem **port_array;
  1169. bool bogus_port_status = false;
  1170. /* Port status change events always have a successful completion code */
  1171. if (GET_COMP_CODE(le32_to_cpu(event->generic.field[2])) != COMP_SUCCESS) {
  1172. xhci_warn(xhci, "WARN: xHC returned failed port status event\n");
  1173. xhci->error_bitmask |= 1 << 8;
  1174. }
  1175. port_id = GET_PORT_ID(le32_to_cpu(event->generic.field[0]));
  1176. xhci_dbg(xhci, "Port Status Change Event for port %d\n", port_id);
  1177. max_ports = HCS_MAX_PORTS(xhci->hcs_params1);
  1178. if ((port_id <= 0) || (port_id > max_ports)) {
  1179. xhci_warn(xhci, "Invalid port id %d\n", port_id);
  1180. bogus_port_status = true;
  1181. goto cleanup;
  1182. }
  1183. /* Figure out which usb_hcd this port is attached to:
  1184. * is it a USB 3.0 port or a USB 2.0/1.1 port?
  1185. */
  1186. major_revision = xhci->port_array[port_id - 1];
  1187. if (major_revision == 0) {
  1188. xhci_warn(xhci, "Event for port %u not in "
  1189. "Extended Capabilities, ignoring.\n",
  1190. port_id);
  1191. bogus_port_status = true;
  1192. goto cleanup;
  1193. }
  1194. if (major_revision == DUPLICATE_ENTRY) {
  1195. xhci_warn(xhci, "Event for port %u duplicated in"
  1196. "Extended Capabilities, ignoring.\n",
  1197. port_id);
  1198. bogus_port_status = true;
  1199. goto cleanup;
  1200. }
  1201. /*
  1202. * Hardware port IDs reported by a Port Status Change Event include USB
  1203. * 3.0 and USB 2.0 ports. We want to check if the port has reported a
  1204. * resume event, but we first need to translate the hardware port ID
  1205. * into the index into the ports on the correct split roothub, and the
  1206. * correct bus_state structure.
  1207. */
  1208. /* Find the right roothub. */
  1209. hcd = xhci_to_hcd(xhci);
  1210. if ((major_revision == 0x03) != (hcd->speed == HCD_USB3))
  1211. hcd = xhci->shared_hcd;
  1212. bus_state = &xhci->bus_state[hcd_index(hcd)];
  1213. if (hcd->speed == HCD_USB3)
  1214. port_array = xhci->usb3_ports;
  1215. else
  1216. port_array = xhci->usb2_ports;
  1217. /* Find the faked port hub number */
  1218. faked_port_index = find_faked_portnum_from_hw_portnum(hcd, xhci,
  1219. port_id);
  1220. temp = xhci_readl(xhci, port_array[faked_port_index]);
  1221. if (hcd->state == HC_STATE_SUSPENDED) {
  1222. xhci_dbg(xhci, "resume root hub\n");
  1223. usb_hcd_resume_root_hub(hcd);
  1224. }
  1225. if ((temp & PORT_PLC) && (temp & PORT_PLS_MASK) == XDEV_RESUME) {
  1226. xhci_dbg(xhci, "port resume event for port %d\n", port_id);
  1227. temp1 = xhci_readl(xhci, &xhci->op_regs->command);
  1228. if (!(temp1 & CMD_RUN)) {
  1229. xhci_warn(xhci, "xHC is not running.\n");
  1230. goto cleanup;
  1231. }
  1232. if (DEV_SUPERSPEED(temp)) {
  1233. xhci_dbg(xhci, "resume SS port %d\n", port_id);
  1234. xhci_set_link_state(xhci, port_array, faked_port_index,
  1235. XDEV_U0);
  1236. slot_id = xhci_find_slot_id_by_port(hcd, xhci,
  1237. faked_port_index);
  1238. if (!slot_id) {
  1239. xhci_dbg(xhci, "slot_id is zero\n");
  1240. goto cleanup;
  1241. }
  1242. xhci_ring_device(xhci, slot_id);
  1243. xhci_dbg(xhci, "resume SS port %d finished\n", port_id);
  1244. /* Clear PORT_PLC */
  1245. xhci_test_and_clear_bit(xhci, port_array,
  1246. faked_port_index, PORT_PLC);
  1247. } else {
  1248. xhci_dbg(xhci, "resume HS port %d\n", port_id);
  1249. bus_state->resume_done[faked_port_index] = jiffies +
  1250. msecs_to_jiffies(20);
  1251. mod_timer(&hcd->rh_timer,
  1252. bus_state->resume_done[faked_port_index]);
  1253. /* Do the rest in GetPortStatus */
  1254. }
  1255. }
  1256. if (hcd->speed != HCD_USB3)
  1257. xhci_test_and_clear_bit(xhci, port_array, faked_port_index,
  1258. PORT_PLC);
  1259. cleanup:
  1260. /* Update event ring dequeue pointer before dropping the lock */
  1261. inc_deq(xhci, xhci->event_ring, true);
  1262. /* Don't make the USB core poll the roothub if we got a bad port status
  1263. * change event. Besides, at that point we can't tell which roothub
  1264. * (USB 2.0 or USB 3.0) to kick.
  1265. */
  1266. if (bogus_port_status)
  1267. return;
  1268. spin_unlock(&xhci->lock);
  1269. /* Pass this up to the core */
  1270. usb_hcd_poll_rh_status(hcd);
  1271. spin_lock(&xhci->lock);
  1272. }
  1273. /*
  1274. * This TD is defined by the TRBs starting at start_trb in start_seg and ending
  1275. * at end_trb, which may be in another segment. If the suspect DMA address is a
  1276. * TRB in this TD, this function returns that TRB's segment. Otherwise it
  1277. * returns 0.
  1278. */
  1279. struct xhci_segment *trb_in_td(struct xhci_segment *start_seg,
  1280. union xhci_trb *start_trb,
  1281. union xhci_trb *end_trb,
  1282. dma_addr_t suspect_dma)
  1283. {
  1284. dma_addr_t start_dma;
  1285. dma_addr_t end_seg_dma;
  1286. dma_addr_t end_trb_dma;
  1287. struct xhci_segment *cur_seg;
  1288. start_dma = xhci_trb_virt_to_dma(start_seg, start_trb);
  1289. cur_seg = start_seg;
  1290. do {
  1291. if (start_dma == 0)
  1292. return NULL;
  1293. /* We may get an event for a Link TRB in the middle of a TD */
  1294. end_seg_dma = xhci_trb_virt_to_dma(cur_seg,
  1295. &cur_seg->trbs[TRBS_PER_SEGMENT - 1]);
  1296. /* If the end TRB isn't in this segment, this is set to 0 */
  1297. end_trb_dma = xhci_trb_virt_to_dma(cur_seg, end_trb);
  1298. if (end_trb_dma > 0) {
  1299. /* The end TRB is in this segment, so suspect should be here */
  1300. if (start_dma <= end_trb_dma) {
  1301. if (suspect_dma >= start_dma && suspect_dma <= end_trb_dma)
  1302. return cur_seg;
  1303. } else {
  1304. /* Case for one segment with
  1305. * a TD wrapped around to the top
  1306. */
  1307. if ((suspect_dma >= start_dma &&
  1308. suspect_dma <= end_seg_dma) ||
  1309. (suspect_dma >= cur_seg->dma &&
  1310. suspect_dma <= end_trb_dma))
  1311. return cur_seg;
  1312. }
  1313. return NULL;
  1314. } else {
  1315. /* Might still be somewhere in this segment */
  1316. if (suspect_dma >= start_dma && suspect_dma <= end_seg_dma)
  1317. return cur_seg;
  1318. }
  1319. cur_seg = cur_seg->next;
  1320. start_dma = xhci_trb_virt_to_dma(cur_seg, &cur_seg->trbs[0]);
  1321. } while (cur_seg != start_seg);
  1322. return NULL;
  1323. }
  1324. static void xhci_cleanup_halted_endpoint(struct xhci_hcd *xhci,
  1325. unsigned int slot_id, unsigned int ep_index,
  1326. unsigned int stream_id,
  1327. struct xhci_td *td, union xhci_trb *event_trb)
  1328. {
  1329. struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index];
  1330. ep->ep_state |= EP_HALTED;
  1331. ep->stopped_td = td;
  1332. ep->stopped_trb = event_trb;
  1333. ep->stopped_stream = stream_id;
  1334. xhci_queue_reset_ep(xhci, slot_id, ep_index);
  1335. xhci_cleanup_stalled_ring(xhci, td->urb->dev, ep_index);
  1336. ep->stopped_td = NULL;
  1337. ep->stopped_trb = NULL;
  1338. ep->stopped_stream = 0;
  1339. xhci_ring_cmd_db(xhci);
  1340. }
  1341. /* Check if an error has halted the endpoint ring. The class driver will
  1342. * cleanup the halt for a non-default control endpoint if we indicate a stall.
  1343. * However, a babble and other errors also halt the endpoint ring, and the class
  1344. * driver won't clear the halt in that case, so we need to issue a Set Transfer
  1345. * Ring Dequeue Pointer command manually.
  1346. */
  1347. static int xhci_requires_manual_halt_cleanup(struct xhci_hcd *xhci,
  1348. struct xhci_ep_ctx *ep_ctx,
  1349. unsigned int trb_comp_code)
  1350. {
  1351. /* TRB completion codes that may require a manual halt cleanup */
  1352. if (trb_comp_code == COMP_TX_ERR ||
  1353. trb_comp_code == COMP_BABBLE ||
  1354. trb_comp_code == COMP_SPLIT_ERR)
  1355. /* The 0.96 spec says a babbling control endpoint
  1356. * is not halted. The 0.96 spec says it is. Some HW
  1357. * claims to be 0.95 compliant, but it halts the control
  1358. * endpoint anyway. Check if a babble halted the
  1359. * endpoint.
  1360. */
  1361. if ((ep_ctx->ep_info & cpu_to_le32(EP_STATE_MASK)) ==
  1362. cpu_to_le32(EP_STATE_HALTED))
  1363. return 1;
  1364. return 0;
  1365. }
  1366. int xhci_is_vendor_info_code(struct xhci_hcd *xhci, unsigned int trb_comp_code)
  1367. {
  1368. if (trb_comp_code >= 224 && trb_comp_code <= 255) {
  1369. /* Vendor defined "informational" completion code,
  1370. * treat as not-an-error.
  1371. */
  1372. xhci_dbg(xhci, "Vendor defined info completion code %u\n",
  1373. trb_comp_code);
  1374. xhci_dbg(xhci, "Treating code as success.\n");
  1375. return 1;
  1376. }
  1377. return 0;
  1378. }
  1379. /*
  1380. * Finish the td processing, remove the td from td list;
  1381. * Return 1 if the urb can be given back.
  1382. */
  1383. static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td,
  1384. union xhci_trb *event_trb, struct xhci_transfer_event *event,
  1385. struct xhci_virt_ep *ep, int *status, bool skip)
  1386. {
  1387. struct xhci_virt_device *xdev;
  1388. struct xhci_ring *ep_ring;
  1389. unsigned int slot_id;
  1390. int ep_index;
  1391. struct urb *urb = NULL;
  1392. struct xhci_ep_ctx *ep_ctx;
  1393. int ret = 0;
  1394. struct urb_priv *urb_priv;
  1395. u32 trb_comp_code;
  1396. slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
  1397. xdev = xhci->devs[slot_id];
  1398. ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
  1399. ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
  1400. ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
  1401. trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
  1402. if (skip)
  1403. goto td_cleanup;
  1404. if (trb_comp_code == COMP_STOP_INVAL ||
  1405. trb_comp_code == COMP_STOP) {
  1406. /* The Endpoint Stop Command completion will take care of any
  1407. * stopped TDs. A stopped TD may be restarted, so don't update
  1408. * the ring dequeue pointer or take this TD off any lists yet.
  1409. */
  1410. ep->stopped_td = td;
  1411. ep->stopped_trb = event_trb;
  1412. return 0;
  1413. } else {
  1414. if (trb_comp_code == COMP_STALL) {
  1415. /* The transfer is completed from the driver's
  1416. * perspective, but we need to issue a set dequeue
  1417. * command for this stalled endpoint to move the dequeue
  1418. * pointer past the TD. We can't do that here because
  1419. * the halt condition must be cleared first. Let the
  1420. * USB class driver clear the stall later.
  1421. */
  1422. ep->stopped_td = td;
  1423. ep->stopped_trb = event_trb;
  1424. ep->stopped_stream = ep_ring->stream_id;
  1425. } else if (xhci_requires_manual_halt_cleanup(xhci,
  1426. ep_ctx, trb_comp_code)) {
  1427. /* Other types of errors halt the endpoint, but the
  1428. * class driver doesn't call usb_reset_endpoint() unless
  1429. * the error is -EPIPE. Clear the halted status in the
  1430. * xHCI hardware manually.
  1431. */
  1432. xhci_cleanup_halted_endpoint(xhci,
  1433. slot_id, ep_index, ep_ring->stream_id,
  1434. td, event_trb);
  1435. } else {
  1436. /* Update ring dequeue pointer */
  1437. while (ep_ring->dequeue != td->last_trb)
  1438. inc_deq(xhci, ep_ring, false);
  1439. inc_deq(xhci, ep_ring, false);
  1440. }
  1441. td_cleanup:
  1442. /* Clean up the endpoint's TD list */
  1443. urb = td->urb;
  1444. urb_priv = urb->hcpriv;
  1445. /* Do one last check of the actual transfer length.
  1446. * If the host controller said we transferred more data than
  1447. * the buffer length, urb->actual_length will be a very big
  1448. * number (since it's unsigned). Play it safe and say we didn't
  1449. * transfer anything.
  1450. */
  1451. if (urb->actual_length > urb->transfer_buffer_length) {
  1452. xhci_warn(xhci, "URB transfer length is wrong, "
  1453. "xHC issue? req. len = %u, "
  1454. "act. len = %u\n",
  1455. urb->transfer_buffer_length,
  1456. urb->actual_length);
  1457. urb->actual_length = 0;
  1458. if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
  1459. *status = -EREMOTEIO;
  1460. else
  1461. *status = 0;
  1462. }
  1463. list_del_init(&td->td_list);
  1464. /* Was this TD slated to be cancelled but completed anyway? */
  1465. if (!list_empty(&td->cancelled_td_list))
  1466. list_del_init(&td->cancelled_td_list);
  1467. urb_priv->td_cnt++;
  1468. /* Giveback the urb when all the tds are completed */
  1469. if (urb_priv->td_cnt == urb_priv->length) {
  1470. ret = 1;
  1471. if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
  1472. xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs--;
  1473. if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs
  1474. == 0) {
  1475. if (xhci->quirks & XHCI_AMD_PLL_FIX)
  1476. usb_amd_quirk_pll_enable();
  1477. }
  1478. }
  1479. }
  1480. }
  1481. return ret;
  1482. }
  1483. /*
  1484. * Process control tds, update urb status and actual_length.
  1485. */
  1486. static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
  1487. union xhci_trb *event_trb, struct xhci_transfer_event *event,
  1488. struct xhci_virt_ep *ep, int *status)
  1489. {
  1490. struct xhci_virt_device *xdev;
  1491. struct xhci_ring *ep_ring;
  1492. unsigned int slot_id;
  1493. int ep_index;
  1494. struct xhci_ep_ctx *ep_ctx;
  1495. u32 trb_comp_code;
  1496. slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
  1497. xdev = xhci->devs[slot_id];
  1498. ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
  1499. ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
  1500. ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
  1501. trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
  1502. xhci_debug_trb(xhci, xhci->event_ring->dequeue);
  1503. switch (trb_comp_code) {
  1504. case COMP_SUCCESS:
  1505. if (event_trb == ep_ring->dequeue) {
  1506. xhci_warn(xhci, "WARN: Success on ctrl setup TRB "
  1507. "without IOC set??\n");
  1508. *status = -ESHUTDOWN;
  1509. } else if (event_trb != td->last_trb) {
  1510. xhci_warn(xhci, "WARN: Success on ctrl data TRB "
  1511. "without IOC set??\n");
  1512. *status = -ESHUTDOWN;
  1513. } else {
  1514. *status = 0;
  1515. }
  1516. break;
  1517. case COMP_SHORT_TX:
  1518. xhci_warn(xhci, "WARN: short transfer on control ep\n");
  1519. if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
  1520. *status = -EREMOTEIO;
  1521. else
  1522. *status = 0;
  1523. break;
  1524. case COMP_STOP_INVAL:
  1525. case COMP_STOP:
  1526. return finish_td(xhci, td, event_trb, event, ep, status, false);
  1527. default:
  1528. if (!xhci_requires_manual_halt_cleanup(xhci,
  1529. ep_ctx, trb_comp_code))
  1530. break;
  1531. xhci_dbg(xhci, "TRB error code %u, "
  1532. "halted endpoint index = %u\n",
  1533. trb_comp_code, ep_index);
  1534. /* else fall through */
  1535. case COMP_STALL:
  1536. /* Did we transfer part of the data (middle) phase? */
  1537. if (event_trb != ep_ring->dequeue &&
  1538. event_trb != td->last_trb)
  1539. td->urb->actual_length =
  1540. td->urb->transfer_buffer_length
  1541. - TRB_LEN(le32_to_cpu(event->transfer_len));
  1542. else
  1543. td->urb->actual_length = 0;
  1544. xhci_cleanup_halted_endpoint(xhci,
  1545. slot_id, ep_index, 0, td, event_trb);
  1546. return finish_td(xhci, td, event_trb, event, ep, status, true);
  1547. }
  1548. /*
  1549. * Did we transfer any data, despite the errors that might have
  1550. * happened? I.e. did we get past the setup stage?
  1551. */
  1552. if (event_trb != ep_ring->dequeue) {
  1553. /* The event was for the status stage */
  1554. if (event_trb == td->last_trb) {
  1555. if (td->urb->actual_length != 0) {
  1556. /* Don't overwrite a previously set error code
  1557. */
  1558. if ((*status == -EINPROGRESS || *status == 0) &&
  1559. (td->urb->transfer_flags
  1560. & URB_SHORT_NOT_OK))
  1561. /* Did we already see a short data
  1562. * stage? */
  1563. *status = -EREMOTEIO;
  1564. } else {
  1565. td->urb->actual_length =
  1566. td->urb->transfer_buffer_length;
  1567. }
  1568. } else {
  1569. /* Maybe the event was for the data stage? */
  1570. td->urb->actual_length =
  1571. td->urb->transfer_buffer_length -
  1572. TRB_LEN(le32_to_cpu(event->transfer_len));
  1573. xhci_dbg(xhci, "Waiting for status "
  1574. "stage event\n");
  1575. return 0;
  1576. }
  1577. }
  1578. return finish_td(xhci, td, event_trb, event, ep, status, false);
  1579. }
  1580. /*
  1581. * Process isochronous tds, update urb packet status and actual_length.
  1582. */
  1583. static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
  1584. union xhci_trb *event_trb, struct xhci_transfer_event *event,
  1585. struct xhci_virt_ep *ep, int *status)
  1586. {
  1587. struct xhci_ring *ep_ring;
  1588. struct urb_priv *urb_priv;
  1589. int idx;
  1590. int len = 0;
  1591. union xhci_trb *cur_trb;
  1592. struct xhci_segment *cur_seg;
  1593. struct usb_iso_packet_descriptor *frame;
  1594. u32 trb_comp_code;
  1595. bool skip_td = false;
  1596. ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
  1597. trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
  1598. urb_priv = td->urb->hcpriv;
  1599. idx = urb_priv->td_cnt;
  1600. frame = &td->urb->iso_frame_desc[idx];
  1601. /* handle completion code */
  1602. switch (trb_comp_code) {
  1603. case COMP_SUCCESS:
  1604. frame->status = 0;
  1605. break;
  1606. case COMP_SHORT_TX:
  1607. frame->status = td->urb->transfer_flags & URB_SHORT_NOT_OK ?
  1608. -EREMOTEIO : 0;
  1609. break;
  1610. case COMP_BW_OVER:
  1611. frame->status = -ECOMM;
  1612. skip_td = true;
  1613. break;
  1614. case COMP_BUFF_OVER:
  1615. case COMP_BABBLE:
  1616. frame->status = -EOVERFLOW;
  1617. skip_td = true;
  1618. break;
  1619. case COMP_DEV_ERR:
  1620. case COMP_STALL:
  1621. frame->status = -EPROTO;
  1622. skip_td = true;
  1623. break;
  1624. case COMP_STOP:
  1625. case COMP_STOP_INVAL:
  1626. break;
  1627. default:
  1628. frame->status = -1;
  1629. break;
  1630. }
  1631. if (trb_comp_code == COMP_SUCCESS || skip_td) {
  1632. frame->actual_length = frame->length;
  1633. td->urb->actual_length += frame->length;
  1634. } else {
  1635. for (cur_trb = ep_ring->dequeue,
  1636. cur_seg = ep_ring->deq_seg; cur_trb != event_trb;
  1637. next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) {
  1638. if (!TRB_TYPE_NOOP_LE32(cur_trb->generic.field[3]) &&
  1639. !TRB_TYPE_LINK_LE32(cur_trb->generic.field[3]))
  1640. len += TRB_LEN(le32_to_cpu(cur_trb->generic.field[2]));
  1641. }
  1642. len += TRB_LEN(le32_to_cpu(cur_trb->generic.field[2])) -
  1643. TRB_LEN(le32_to_cpu(event->transfer_len));
  1644. if (trb_comp_code != COMP_STOP_INVAL) {
  1645. frame->actual_length = len;
  1646. td->urb->actual_length += len;
  1647. }
  1648. }
  1649. return finish_td(xhci, td, event_trb, event, ep, status, false);
  1650. }
  1651. static int skip_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
  1652. struct xhci_transfer_event *event,
  1653. struct xhci_virt_ep *ep, int *status)
  1654. {
  1655. struct xhci_ring *ep_ring;
  1656. struct urb_priv *urb_priv;
  1657. struct usb_iso_packet_descriptor *frame;
  1658. int idx;
  1659. ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
  1660. urb_priv = td->urb->hcpriv;
  1661. idx = urb_priv->td_cnt;
  1662. frame = &td->urb->iso_frame_desc[idx];
  1663. /* The transfer is partly done. */
  1664. frame->status = -EXDEV;
  1665. /* calc actual length */
  1666. frame->actual_length = 0;
  1667. /* Update ring dequeue pointer */
  1668. while (ep_ring->dequeue != td->last_trb)
  1669. inc_deq(xhci, ep_ring, false);
  1670. inc_deq(xhci, ep_ring, false);
  1671. return finish_td(xhci, td, NULL, event, ep, status, true);
  1672. }
  1673. /*
  1674. * Process bulk and interrupt tds, update urb status and actual_length.
  1675. */
  1676. static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td,
  1677. union xhci_trb *event_trb, struct xhci_transfer_event *event,
  1678. struct xhci_virt_ep *ep, int *status)
  1679. {
  1680. struct xhci_ring *ep_ring;
  1681. union xhci_trb *cur_trb;
  1682. struct xhci_segment *cur_seg;
  1683. u32 trb_comp_code;
  1684. ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
  1685. trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
  1686. switch (trb_comp_code) {
  1687. case COMP_SUCCESS:
  1688. /* Double check that the HW transferred everything. */
  1689. if (event_trb != td->last_trb) {
  1690. xhci_warn(xhci, "WARN Successful completion "
  1691. "on short TX\n");
  1692. if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
  1693. *status = -EREMOTEIO;
  1694. else
  1695. *status = 0;
  1696. } else {
  1697. *status = 0;
  1698. }
  1699. break;
  1700. case COMP_SHORT_TX:
  1701. if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
  1702. *status = -EREMOTEIO;
  1703. else
  1704. *status = 0;
  1705. break;
  1706. default:
  1707. /* Others already handled above */
  1708. break;
  1709. }
  1710. if (trb_comp_code == COMP_SHORT_TX)
  1711. xhci_dbg(xhci, "ep %#x - asked for %d bytes, "
  1712. "%d bytes untransferred\n",
  1713. td->urb->ep->desc.bEndpointAddress,
  1714. td->urb->transfer_buffer_length,
  1715. TRB_LEN(le32_to_cpu(event->transfer_len)));
  1716. /* Fast path - was this the last TRB in the TD for this URB? */
  1717. if (event_trb == td->last_trb) {
  1718. if (TRB_LEN(le32_to_cpu(event->transfer_len)) != 0) {
  1719. td->urb->actual_length =
  1720. td->urb->transfer_buffer_length -
  1721. TRB_LEN(le32_to_cpu(event->transfer_len));
  1722. if (td->urb->transfer_buffer_length <
  1723. td->urb->actual_length) {
  1724. xhci_warn(xhci, "HC gave bad length "
  1725. "of %d bytes left\n",
  1726. TRB_LEN(le32_to_cpu(event->transfer_len)));
  1727. td->urb->actual_length = 0;
  1728. if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
  1729. *status = -EREMOTEIO;
  1730. else
  1731. *status = 0;
  1732. }
  1733. /* Don't overwrite a previously set error code */
  1734. if (*status == -EINPROGRESS) {
  1735. if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
  1736. *status = -EREMOTEIO;
  1737. else
  1738. *status = 0;
  1739. }
  1740. } else {
  1741. td->urb->actual_length =
  1742. td->urb->transfer_buffer_length;
  1743. /* Ignore a short packet completion if the
  1744. * untransferred length was zero.
  1745. */
  1746. if (*status == -EREMOTEIO)
  1747. *status = 0;
  1748. }
  1749. } else {
  1750. /* Slow path - walk the list, starting from the dequeue
  1751. * pointer, to get the actual length transferred.
  1752. */
  1753. td->urb->actual_length = 0;
  1754. for (cur_trb = ep_ring->dequeue, cur_seg = ep_ring->deq_seg;
  1755. cur_trb != event_trb;
  1756. next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) {
  1757. if (!TRB_TYPE_NOOP_LE32(cur_trb->generic.field[3]) &&
  1758. !TRB_TYPE_LINK_LE32(cur_trb->generic.field[3]))
  1759. td->urb->actual_length +=
  1760. TRB_LEN(le32_to_cpu(cur_trb->generic.field[2]));
  1761. }
  1762. /* If the ring didn't stop on a Link or No-op TRB, add
  1763. * in the actual bytes transferred from the Normal TRB
  1764. */
  1765. if (trb_comp_code != COMP_STOP_INVAL)
  1766. td->urb->actual_length +=
  1767. TRB_LEN(le32_to_cpu(cur_trb->generic.field[2])) -
  1768. TRB_LEN(le32_to_cpu(event->transfer_len));
  1769. }
  1770. return finish_td(xhci, td, event_trb, event, ep, status, false);
  1771. }
  1772. /*
  1773. * If this function returns an error condition, it means it got a Transfer
  1774. * event with a corrupted Slot ID, Endpoint ID, or TRB DMA address.
  1775. * At this point, the host controller is probably hosed and should be reset.
  1776. */
  1777. static int handle_tx_event(struct xhci_hcd *xhci,
  1778. struct xhci_transfer_event *event)
  1779. {
  1780. struct xhci_virt_device *xdev;
  1781. struct xhci_virt_ep *ep;
  1782. struct xhci_ring *ep_ring;
  1783. unsigned int slot_id;
  1784. int ep_index;
  1785. struct xhci_td *td = NULL;
  1786. dma_addr_t event_dma;
  1787. struct xhci_segment *event_seg;
  1788. union xhci_trb *event_trb;
  1789. struct urb *urb = NULL;
  1790. int status = -EINPROGRESS;
  1791. struct urb_priv *urb_priv;
  1792. struct xhci_ep_ctx *ep_ctx;
  1793. struct list_head *tmp;
  1794. u32 trb_comp_code;
  1795. int ret = 0;
  1796. int td_num = 0;
  1797. slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
  1798. xdev = xhci->devs[slot_id];
  1799. if (!xdev) {
  1800. xhci_err(xhci, "ERROR Transfer event pointed to bad slot\n");
  1801. return -ENODEV;
  1802. }
  1803. /* Endpoint ID is 1 based, our index is zero based */
  1804. ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
  1805. ep = &xdev->eps[ep_index];
  1806. ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
  1807. ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
  1808. if (!ep_ring ||
  1809. (le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK) ==
  1810. EP_STATE_DISABLED) {
  1811. xhci_err(xhci, "ERROR Transfer event for disabled endpoint "
  1812. "or incorrect stream ring\n");
  1813. return -ENODEV;
  1814. }
  1815. /* Count current td numbers if ep->skip is set */
  1816. if (ep->skip) {
  1817. list_for_each(tmp, &ep_ring->td_list)
  1818. td_num++;
  1819. }
  1820. event_dma = le64_to_cpu(event->buffer);
  1821. trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
  1822. /* Look for common error cases */
  1823. switch (trb_comp_code) {
  1824. /* Skip codes that require special handling depending on
  1825. * transfer type
  1826. */
  1827. case COMP_SUCCESS:
  1828. case COMP_SHORT_TX:
  1829. break;
  1830. case COMP_STOP:
  1831. xhci_dbg(xhci, "Stopped on Transfer TRB\n");
  1832. break;
  1833. case COMP_STOP_INVAL:
  1834. xhci_dbg(xhci, "Stopped on No-op or Link TRB\n");
  1835. break;
  1836. case COMP_STALL:
  1837. xhci_warn(xhci, "WARN: Stalled endpoint\n");
  1838. ep->ep_state |= EP_HALTED;
  1839. status = -EPIPE;
  1840. break;
  1841. case COMP_TRB_ERR:
  1842. xhci_warn(xhci, "WARN: TRB error on endpoint\n");
  1843. status = -EILSEQ;
  1844. break;
  1845. case COMP_SPLIT_ERR:
  1846. case COMP_TX_ERR:
  1847. xhci_warn(xhci, "WARN: transfer error on endpoint\n");
  1848. status = -EPROTO;
  1849. break;
  1850. case COMP_BABBLE:
  1851. xhci_warn(xhci, "WARN: babble error on endpoint\n");
  1852. status = -EOVERFLOW;
  1853. break;
  1854. case COMP_DB_ERR:
  1855. xhci_warn(xhci, "WARN: HC couldn't access mem fast enough\n");
  1856. status = -ENOSR;
  1857. break;
  1858. case COMP_BW_OVER:
  1859. xhci_warn(xhci, "WARN: bandwidth overrun event on endpoint\n");
  1860. break;
  1861. case COMP_BUFF_OVER:
  1862. xhci_warn(xhci, "WARN: buffer overrun event on endpoint\n");
  1863. break;
  1864. case COMP_UNDERRUN:
  1865. /*
  1866. * When the Isoch ring is empty, the xHC will generate
  1867. * a Ring Overrun Event for IN Isoch endpoint or Ring
  1868. * Underrun Event for OUT Isoch endpoint.
  1869. */
  1870. xhci_dbg(xhci, "underrun event on endpoint\n");
  1871. if (!list_empty(&ep_ring->td_list))
  1872. xhci_dbg(xhci, "Underrun Event for slot %d ep %d "
  1873. "still with TDs queued?\n",
  1874. TRB_TO_SLOT_ID(le32_to_cpu(event->flags)),
  1875. ep_index);
  1876. goto cleanup;
  1877. case COMP_OVERRUN:
  1878. xhci_dbg(xhci, "overrun event on endpoint\n");
  1879. if (!list_empty(&ep_ring->td_list))
  1880. xhci_dbg(xhci, "Overrun Event for slot %d ep %d "
  1881. "still with TDs queued?\n",
  1882. TRB_TO_SLOT_ID(le32_to_cpu(event->flags)),
  1883. ep_index);
  1884. goto cleanup;
  1885. case COMP_DEV_ERR:
  1886. xhci_warn(xhci, "WARN: detect an incompatible device");
  1887. status = -EPROTO;
  1888. break;
  1889. case COMP_MISSED_INT:
  1890. /*
  1891. * When encounter missed service error, one or more isoc tds
  1892. * may be missed by xHC.
  1893. * Set skip flag of the ep_ring; Complete the missed tds as
  1894. * short transfer when process the ep_ring next time.
  1895. */
  1896. ep->skip = true;
  1897. xhci_dbg(xhci, "Miss service interval error, set skip flag\n");
  1898. goto cleanup;
  1899. default:
  1900. if (xhci_is_vendor_info_code(xhci, trb_comp_code)) {
  1901. status = 0;
  1902. break;
  1903. }
  1904. xhci_warn(xhci, "ERROR Unknown event condition, HC probably "
  1905. "busted\n");
  1906. goto cleanup;
  1907. }
  1908. do {
  1909. /* This TRB should be in the TD at the head of this ring's
  1910. * TD list.
  1911. */
  1912. if (list_empty(&ep_ring->td_list)) {
  1913. xhci_warn(xhci, "WARN Event TRB for slot %d ep %d "
  1914. "with no TDs queued?\n",
  1915. TRB_TO_SLOT_ID(le32_to_cpu(event->flags)),
  1916. ep_index);
  1917. xhci_dbg(xhci, "Event TRB with TRB type ID %u\n",
  1918. (le32_to_cpu(event->flags) &
  1919. TRB_TYPE_BITMASK)>>10);
  1920. xhci_print_trb_offsets(xhci, (union xhci_trb *) event);
  1921. if (ep->skip) {
  1922. ep->skip = false;
  1923. xhci_dbg(xhci, "td_list is empty while skip "
  1924. "flag set. Clear skip flag.\n");
  1925. }
  1926. ret = 0;
  1927. goto cleanup;
  1928. }
  1929. /* We've skipped all the TDs on the ep ring when ep->skip set */
  1930. if (ep->skip && td_num == 0) {
  1931. ep->skip = false;
  1932. xhci_dbg(xhci, "All tds on the ep_ring skipped. "
  1933. "Clear skip flag.\n");
  1934. ret = 0;
  1935. goto cleanup;
  1936. }
  1937. td = list_entry(ep_ring->td_list.next, struct xhci_td, td_list);
  1938. if (ep->skip)
  1939. td_num--;
  1940. /* Is this a TRB in the currently executing TD? */
  1941. event_seg = trb_in_td(ep_ring->deq_seg, ep_ring->dequeue,
  1942. td->last_trb, event_dma);
  1943. /*
  1944. * Skip the Force Stopped Event. The event_trb(event_dma) of FSE
  1945. * is not in the current TD pointed by ep_ring->dequeue because
  1946. * that the hardware dequeue pointer still at the previous TRB
  1947. * of the current TD. The previous TRB maybe a Link TD or the
  1948. * last TRB of the previous TD. The command completion handle
  1949. * will take care the rest.
  1950. */
  1951. if (!event_seg && trb_comp_code == COMP_STOP_INVAL) {
  1952. ret = 0;
  1953. goto cleanup;
  1954. }
  1955. if (!event_seg) {
  1956. if (!ep->skip ||
  1957. !usb_endpoint_xfer_isoc(&td->urb->ep->desc)) {
  1958. /* Some host controllers give a spurious
  1959. * successful event after a short transfer.
  1960. * Ignore it.
  1961. */
  1962. if ((xhci->quirks & XHCI_SPURIOUS_SUCCESS) &&
  1963. ep_ring->last_td_was_short) {
  1964. ep_ring->last_td_was_short = false;
  1965. ret = 0;
  1966. goto cleanup;
  1967. }
  1968. /* HC is busted, give up! */
  1969. xhci_err(xhci,
  1970. "ERROR Transfer event TRB DMA ptr not "
  1971. "part of current TD\n");
  1972. return -ESHUTDOWN;
  1973. }
  1974. ret = skip_isoc_td(xhci, td, event, ep, &status);
  1975. goto cleanup;
  1976. }
  1977. if (trb_comp_code == COMP_SHORT_TX)
  1978. ep_ring->last_td_was_short = true;
  1979. else
  1980. ep_ring->last_td_was_short = false;
  1981. if (ep->skip) {
  1982. xhci_dbg(xhci, "Found td. Clear skip flag.\n");
  1983. ep->skip = false;
  1984. }
  1985. event_trb = &event_seg->trbs[(event_dma - event_seg->dma) /
  1986. sizeof(*event_trb)];
  1987. /*
  1988. * No-op TRB should not trigger interrupts.
  1989. * If event_trb is a no-op TRB, it means the
  1990. * corresponding TD has been cancelled. Just ignore
  1991. * the TD.
  1992. */
  1993. if (TRB_TYPE_NOOP_LE32(event_trb->generic.field[3])) {
  1994. xhci_dbg(xhci,
  1995. "event_trb is a no-op TRB. Skip it\n");
  1996. goto cleanup;
  1997. }
  1998. /* Now update the urb's actual_length and give back to
  1999. * the core
  2000. */
  2001. if (usb_endpoint_xfer_control(&td->urb->ep->desc))
  2002. ret = process_ctrl_td(xhci, td, event_trb, event, ep,
  2003. &status);
  2004. else if (usb_endpoint_xfer_isoc(&td->urb->ep->desc))
  2005. ret = process_isoc_td(xhci, td, event_trb, event, ep,
  2006. &status);
  2007. else
  2008. ret = process_bulk_intr_td(xhci, td, event_trb, event,
  2009. ep, &status);
  2010. cleanup:
  2011. /*
  2012. * Do not update event ring dequeue pointer if ep->skip is set.
  2013. * Will roll back to continue process missed tds.
  2014. */
  2015. if (trb_comp_code == COMP_MISSED_INT || !ep->skip) {
  2016. inc_deq(xhci, xhci->event_ring, true);
  2017. }
  2018. if (ret) {
  2019. urb = td->urb;
  2020. urb_priv = urb->hcpriv;
  2021. /* Leave the TD around for the reset endpoint function
  2022. * to use(but only if it's not a control endpoint,
  2023. * since we already queued the Set TR dequeue pointer
  2024. * command for stalled control endpoints).
  2025. */
  2026. if (usb_endpoint_xfer_control(&urb->ep->desc) ||
  2027. (trb_comp_code != COMP_STALL &&
  2028. trb_comp_code != COMP_BABBLE))
  2029. xhci_urb_free_priv(xhci, urb_priv);
  2030. usb_hcd_unlink_urb_from_ep(bus_to_hcd(urb->dev->bus), urb);
  2031. if ((urb->actual_length != urb->transfer_buffer_length &&
  2032. (urb->transfer_flags &
  2033. URB_SHORT_NOT_OK)) ||
  2034. (status != 0 &&
  2035. !usb_endpoint_xfer_isoc(&urb->ep->desc)))
  2036. xhci_dbg(xhci, "Giveback URB %p, len = %d, "
  2037. "expected = %x, status = %d\n",
  2038. urb, urb->actual_length,
  2039. urb->transfer_buffer_length,
  2040. status);
  2041. spin_unlock(&xhci->lock);
  2042. /* EHCI, UHCI, and OHCI always unconditionally set the
  2043. * urb->status of an isochronous endpoint to 0.
  2044. */
  2045. if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS)
  2046. status = 0;
  2047. usb_hcd_giveback_urb(bus_to_hcd(urb->dev->bus), urb, status);
  2048. spin_lock(&xhci->lock);
  2049. }
  2050. /*
  2051. * If ep->skip is set, it means there are missed tds on the
  2052. * endpoint ring need to take care of.
  2053. * Process them as short transfer until reach the td pointed by
  2054. * the event.
  2055. */
  2056. } while (ep->skip && trb_comp_code != COMP_MISSED_INT);
  2057. return 0;
  2058. }
  2059. /*
  2060. * This function handles all OS-owned events on the event ring. It may drop
  2061. * xhci->lock between event processing (e.g. to pass up port status changes).
  2062. * Returns >0 for "possibly more events to process" (caller should call again),
  2063. * otherwise 0 if done. In future, <0 returns should indicate error code.
  2064. */
  2065. static int xhci_handle_event(struct xhci_hcd *xhci)
  2066. {
  2067. union xhci_trb *event;
  2068. int update_ptrs = 1;
  2069. int ret;
  2070. if (!xhci->event_ring || !xhci->event_ring->dequeue) {
  2071. xhci->error_bitmask |= 1 << 1;
  2072. return 0;
  2073. }
  2074. event = xhci->event_ring->dequeue;
  2075. /* Does the HC or OS own the TRB? */
  2076. if ((le32_to_cpu(event->event_cmd.flags) & TRB_CYCLE) !=
  2077. xhci->event_ring->cycle_state) {
  2078. xhci->error_bitmask |= 1 << 2;
  2079. return 0;
  2080. }
  2081. /*
  2082. * Barrier between reading the TRB_CYCLE (valid) flag above and any
  2083. * speculative reads of the event's flags/data below.
  2084. */
  2085. rmb();
  2086. /* FIXME: Handle more event types. */
  2087. switch ((le32_to_cpu(event->event_cmd.flags) & TRB_TYPE_BITMASK)) {
  2088. case TRB_TYPE(TRB_COMPLETION):
  2089. handle_cmd_completion(xhci, &event->event_cmd);
  2090. break;
  2091. case TRB_TYPE(TRB_PORT_STATUS):
  2092. handle_port_status(xhci, event);
  2093. update_ptrs = 0;
  2094. break;
  2095. case TRB_TYPE(TRB_TRANSFER):
  2096. ret = handle_tx_event(xhci, &event->trans_event);
  2097. if (ret < 0)
  2098. xhci->error_bitmask |= 1 << 9;
  2099. else
  2100. update_ptrs = 0;
  2101. break;
  2102. default:
  2103. if ((le32_to_cpu(event->event_cmd.flags) & TRB_TYPE_BITMASK) >=
  2104. TRB_TYPE(48))
  2105. handle_vendor_event(xhci, event);
  2106. else
  2107. xhci->error_bitmask |= 1 << 3;
  2108. }
  2109. /* Any of the above functions may drop and re-acquire the lock, so check
  2110. * to make sure a watchdog timer didn't mark the host as non-responsive.
  2111. */
  2112. if (xhci->xhc_state & XHCI_STATE_DYING) {
  2113. xhci_dbg(xhci, "xHCI host dying, returning from "
  2114. "event handler.\n");
  2115. return 0;
  2116. }
  2117. if (update_ptrs)
  2118. /* Update SW event ring dequeue pointer */
  2119. inc_deq(xhci, xhci->event_ring, true);
  2120. /* Are there more items on the event ring? Caller will call us again to
  2121. * check.
  2122. */
  2123. return 1;
  2124. }
  2125. /*
  2126. * xHCI spec says we can get an interrupt, and if the HC has an error condition,
  2127. * we might get bad data out of the event ring. Section 4.10.2.7 has a list of
  2128. * indicators of an event TRB error, but we check the status *first* to be safe.
  2129. */
  2130. irqreturn_t xhci_irq(struct usb_hcd *hcd)
  2131. {
  2132. struct xhci_hcd *xhci = hcd_to_xhci(hcd);
  2133. u32 status;
  2134. union xhci_trb *trb;
  2135. u64 temp_64;
  2136. union xhci_trb *event_ring_deq;
  2137. dma_addr_t deq;
  2138. spin_lock(&xhci->lock);
  2139. trb = xhci->event_ring->dequeue;
  2140. /* Check if the xHC generated the interrupt, or the irq is shared */
  2141. status = xhci_readl(xhci, &xhci->op_regs->status);
  2142. if (status == 0xffffffff)
  2143. goto hw_died;
  2144. if (!(status & STS_EINT)) {
  2145. spin_unlock(&xhci->lock);
  2146. return IRQ_NONE;
  2147. }
  2148. if (status & STS_FATAL) {
  2149. xhci_warn(xhci, "WARNING: Host System Error\n");
  2150. xhci_halt(xhci);
  2151. hw_died:
  2152. spin_unlock(&xhci->lock);
  2153. return -ESHUTDOWN;
  2154. }
  2155. /*
  2156. * Clear the op reg interrupt status first,
  2157. * so we can receive interrupts from other MSI-X interrupters.
  2158. * Write 1 to clear the interrupt status.
  2159. */
  2160. status |= STS_EINT;
  2161. xhci_writel(xhci, status, &xhci->op_regs->status);
  2162. /* FIXME when MSI-X is supported and there are multiple vectors */
  2163. /* Clear the MSI-X event interrupt status */
  2164. if (hcd->irq != -1) {
  2165. u32 irq_pending;
  2166. /* Acknowledge the PCI interrupt */
  2167. irq_pending = xhci_readl(xhci, &xhci->ir_set->irq_pending);
  2168. irq_pending |= 0x3;
  2169. xhci_writel(xhci, irq_pending, &xhci->ir_set->irq_pending);
  2170. }
  2171. if (xhci->xhc_state & XHCI_STATE_DYING) {
  2172. xhci_dbg(xhci, "xHCI dying, ignoring interrupt. "
  2173. "Shouldn't IRQs be disabled?\n");
  2174. /* Clear the event handler busy flag (RW1C);
  2175. * the event ring should be empty.
  2176. */
  2177. temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
  2178. xhci_write_64(xhci, temp_64 | ERST_EHB,
  2179. &xhci->ir_set->erst_dequeue);
  2180. spin_unlock(&xhci->lock);
  2181. return IRQ_HANDLED;
  2182. }
  2183. event_ring_deq = xhci->event_ring->dequeue;
  2184. /* FIXME this should be a delayed service routine
  2185. * that clears the EHB.
  2186. */
  2187. while (xhci_handle_event(xhci) > 0) {}
  2188. temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
  2189. /* If necessary, update the HW's version of the event ring deq ptr. */
  2190. if (event_ring_deq != xhci->event_ring->dequeue) {
  2191. deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg,
  2192. xhci->event_ring->dequeue);
  2193. if (deq == 0)
  2194. xhci_warn(xhci, "WARN something wrong with SW event "
  2195. "ring dequeue ptr.\n");
  2196. /* Update HC event ring dequeue pointer */
  2197. temp_64 &= ERST_PTR_MASK;
  2198. temp_64 |= ((u64) deq & (u64) ~ERST_PTR_MASK);
  2199. }
  2200. /* Clear the event handler busy flag (RW1C); event ring is empty. */
  2201. temp_64 |= ERST_EHB;
  2202. xhci_write_64(xhci, temp_64, &xhci->ir_set->erst_dequeue);
  2203. spin_unlock(&xhci->lock);
  2204. return IRQ_HANDLED;
  2205. }
  2206. irqreturn_t xhci_msi_irq(int irq, struct usb_hcd *hcd)
  2207. {
  2208. return xhci_irq(hcd);
  2209. }
  2210. /**** Endpoint Ring Operations ****/
  2211. /*
  2212. * Generic function for queueing a TRB on a ring.
  2213. * The caller must have checked to make sure there's room on the ring.
  2214. *
  2215. * @more_trbs_coming: Will you enqueue more TRBs before calling
  2216. * prepare_transfer()?
  2217. */
  2218. static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
  2219. bool consumer, bool more_trbs_coming, bool isoc,
  2220. u32 field1, u32 field2, u32 field3, u32 field4)
  2221. {
  2222. struct xhci_generic_trb *trb;
  2223. trb = &ring->enqueue->generic;
  2224. trb->field[0] = cpu_to_le32(field1);
  2225. trb->field[1] = cpu_to_le32(field2);
  2226. trb->field[2] = cpu_to_le32(field3);
  2227. trb->field[3] = cpu_to_le32(field4);
  2228. inc_enq(xhci, ring, consumer, more_trbs_coming, isoc);
  2229. }
  2230. /*
  2231. * Does various checks on the endpoint ring, and makes it ready to queue num_trbs.
  2232. * FIXME allocate segments if the ring is full.
  2233. */
  2234. static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
  2235. u32 ep_state, unsigned int num_trbs, bool isoc, gfp_t mem_flags)
  2236. {
  2237. /* Make sure the endpoint has been added to xHC schedule */
  2238. switch (ep_state) {
  2239. case EP_STATE_DISABLED:
  2240. /*
  2241. * USB core changed config/interfaces without notifying us,
  2242. * or hardware is reporting the wrong state.
  2243. */
  2244. xhci_warn(xhci, "WARN urb submitted to disabled ep\n");
  2245. return -ENOENT;
  2246. case EP_STATE_ERROR:
  2247. xhci_warn(xhci, "WARN waiting for error on ep to be cleared\n");
  2248. /* FIXME event handling code for error needs to clear it */
  2249. /* XXX not sure if this should be -ENOENT or not */
  2250. return -EINVAL;
  2251. case EP_STATE_HALTED:
  2252. xhci_dbg(xhci, "WARN halted endpoint, queueing URB anyway.\n");
  2253. case EP_STATE_STOPPED:
  2254. case EP_STATE_RUNNING:
  2255. break;
  2256. default:
  2257. xhci_err(xhci, "ERROR unknown endpoint state for ep\n");
  2258. /*
  2259. * FIXME issue Configure Endpoint command to try to get the HC
  2260. * back into a known state.
  2261. */
  2262. return -EINVAL;
  2263. }
  2264. if (!room_on_ring(xhci, ep_ring, num_trbs)) {
  2265. /* FIXME allocate more room */
  2266. xhci_err(xhci, "ERROR no room on ep ring\n");
  2267. return -ENOMEM;
  2268. }
  2269. if (enqueue_is_link_trb(ep_ring)) {
  2270. struct xhci_ring *ring = ep_ring;
  2271. union xhci_trb *next;
  2272. next = ring->enqueue;
  2273. while (last_trb(xhci, ring, ring->enq_seg, next)) {
  2274. /* If we're not dealing with 0.95 hardware or isoc rings
  2275. * on AMD 0.96 host, clear the chain bit.
  2276. */
  2277. if (!xhci_link_trb_quirk(xhci) && !(isoc &&
  2278. (xhci->quirks & XHCI_AMD_0x96_HOST)))
  2279. next->link.control &= cpu_to_le32(~TRB_CHAIN);
  2280. else
  2281. next->link.control |= cpu_to_le32(TRB_CHAIN);
  2282. wmb();
  2283. next->link.control ^= cpu_to_le32(TRB_CYCLE);
  2284. /* Toggle the cycle bit after the last ring segment. */
  2285. if (last_trb_on_last_seg(xhci, ring, ring->enq_seg, next)) {
  2286. ring->cycle_state = (ring->cycle_state ? 0 : 1);
  2287. if (!in_interrupt()) {
  2288. xhci_dbg(xhci, "queue_trb: Toggle cycle "
  2289. "state for ring %p = %i\n",
  2290. ring, (unsigned int)ring->cycle_state);
  2291. }
  2292. }
  2293. ring->enq_seg = ring->enq_seg->next;
  2294. ring->enqueue = ring->enq_seg->trbs;
  2295. next = ring->enqueue;
  2296. }
  2297. }
  2298. return 0;
  2299. }
  2300. static int prepare_transfer(struct xhci_hcd *xhci,
  2301. struct xhci_virt_device *xdev,
  2302. unsigned int ep_index,
  2303. unsigned int stream_id,
  2304. unsigned int num_trbs,
  2305. struct urb *urb,
  2306. unsigned int td_index,
  2307. bool isoc,
  2308. gfp_t mem_flags)
  2309. {
  2310. int ret;
  2311. struct urb_priv *urb_priv;
  2312. struct xhci_td *td;
  2313. struct xhci_ring *ep_ring;
  2314. struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
  2315. ep_ring = xhci_stream_id_to_ring(xdev, ep_index, stream_id);
  2316. if (!ep_ring) {
  2317. xhci_dbg(xhci, "Can't prepare ring for bad stream ID %u\n",
  2318. stream_id);
  2319. return -EINVAL;
  2320. }
  2321. ret = prepare_ring(xhci, ep_ring,
  2322. le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK,
  2323. num_trbs, isoc, mem_flags);
  2324. if (ret)
  2325. return ret;
  2326. urb_priv = urb->hcpriv;
  2327. td = urb_priv->td[td_index];
  2328. INIT_LIST_HEAD(&td->td_list);
  2329. INIT_LIST_HEAD(&td->cancelled_td_list);
  2330. if (td_index == 0) {
  2331. ret = usb_hcd_link_urb_to_ep(bus_to_hcd(urb->dev->bus), urb);
  2332. if (unlikely(ret))
  2333. return ret;
  2334. }
  2335. td->urb = urb;
  2336. /* Add this TD to the tail of the endpoint ring's TD list */
  2337. list_add_tail(&td->td_list, &ep_ring->td_list);
  2338. td->start_seg = ep_ring->enq_seg;
  2339. td->first_trb = ep_ring->enqueue;
  2340. urb_priv->td[td_index] = td;
  2341. return 0;
  2342. }
  2343. static unsigned int count_sg_trbs_needed(struct xhci_hcd *xhci, struct urb *urb)
  2344. {
  2345. int num_sgs, num_trbs, running_total, temp, i;
  2346. struct scatterlist *sg;
  2347. sg = NULL;
  2348. num_sgs = urb->num_mapped_sgs;
  2349. temp = urb->transfer_buffer_length;
  2350. xhci_dbg(xhci, "count sg list trbs: \n");
  2351. num_trbs = 0;
  2352. for_each_sg(urb->sg, sg, num_sgs, i) {
  2353. unsigned int previous_total_trbs = num_trbs;
  2354. unsigned int len = sg_dma_len(sg);
  2355. /* Scatter gather list entries may cross 64KB boundaries */
  2356. running_total = TRB_MAX_BUFF_SIZE -
  2357. (sg_dma_address(sg) & (TRB_MAX_BUFF_SIZE - 1));
  2358. running_total &= TRB_MAX_BUFF_SIZE - 1;
  2359. if (running_total != 0)
  2360. num_trbs++;
  2361. /* How many more 64KB chunks to transfer, how many more TRBs? */
  2362. while (running_total < sg_dma_len(sg) && running_total < temp) {
  2363. num_trbs++;
  2364. running_total += TRB_MAX_BUFF_SIZE;
  2365. }
  2366. xhci_dbg(xhci, " sg #%d: dma = %#llx, len = %#x (%d), num_trbs = %d\n",
  2367. i, (unsigned long long)sg_dma_address(sg),
  2368. len, len, num_trbs - previous_total_trbs);
  2369. len = min_t(int, len, temp);
  2370. temp -= len;
  2371. if (temp == 0)
  2372. break;
  2373. }
  2374. xhci_dbg(xhci, "\n");
  2375. if (!in_interrupt())
  2376. xhci_dbg(xhci, "ep %#x - urb len = %d, sglist used, "
  2377. "num_trbs = %d\n",
  2378. urb->ep->desc.bEndpointAddress,
  2379. urb->transfer_buffer_length,
  2380. num_trbs);
  2381. return num_trbs;
  2382. }
  2383. static void check_trb_math(struct urb *urb, int num_trbs, int running_total)
  2384. {
  2385. if (num_trbs != 0)
  2386. dev_err(&urb->dev->dev, "%s - ep %#x - Miscalculated number of "
  2387. "TRBs, %d left\n", __func__,
  2388. urb->ep->desc.bEndpointAddress, num_trbs);
  2389. if (running_total != urb->transfer_buffer_length)
  2390. dev_err(&urb->dev->dev, "%s - ep %#x - Miscalculated tx length, "
  2391. "queued %#x (%d), asked for %#x (%d)\n",
  2392. __func__,
  2393. urb->ep->desc.bEndpointAddress,
  2394. running_total, running_total,
  2395. urb->transfer_buffer_length,
  2396. urb->transfer_buffer_length);
  2397. }
  2398. static void giveback_first_trb(struct xhci_hcd *xhci, int slot_id,
  2399. unsigned int ep_index, unsigned int stream_id, int start_cycle,
  2400. struct xhci_generic_trb *start_trb)
  2401. {
  2402. /*
  2403. * Pass all the TRBs to the hardware at once and make sure this write
  2404. * isn't reordered.
  2405. */
  2406. wmb();
  2407. if (start_cycle)
  2408. start_trb->field[3] |= cpu_to_le32(start_cycle);
  2409. else
  2410. start_trb->field[3] &= cpu_to_le32(~TRB_CYCLE);
  2411. xhci_ring_ep_doorbell(xhci, slot_id, ep_index, stream_id);
  2412. }
  2413. /*
  2414. * xHCI uses normal TRBs for both bulk and interrupt. When the interrupt
  2415. * endpoint is to be serviced, the xHC will consume (at most) one TD. A TD
  2416. * (comprised of sg list entries) can take several service intervals to
  2417. * transmit.
  2418. */
  2419. int xhci_queue_intr_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
  2420. struct urb *urb, int slot_id, unsigned int ep_index)
  2421. {
  2422. struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci,
  2423. xhci->devs[slot_id]->out_ctx, ep_index);
  2424. int xhci_interval;
  2425. int ep_interval;
  2426. xhci_interval = EP_INTERVAL_TO_UFRAMES(le32_to_cpu(ep_ctx->ep_info));
  2427. ep_interval = urb->interval;
  2428. /* Convert to microframes */
  2429. if (urb->dev->speed == USB_SPEED_LOW ||
  2430. urb->dev->speed == USB_SPEED_FULL)
  2431. ep_interval *= 8;
  2432. /* FIXME change this to a warning and a suggestion to use the new API
  2433. * to set the polling interval (once the API is added).
  2434. */
  2435. if (xhci_interval != ep_interval) {
  2436. if (printk_ratelimit())
  2437. dev_dbg(&urb->dev->dev, "Driver uses different interval"
  2438. " (%d microframe%s) than xHCI "
  2439. "(%d microframe%s)\n",
  2440. ep_interval,
  2441. ep_interval == 1 ? "" : "s",
  2442. xhci_interval,
  2443. xhci_interval == 1 ? "" : "s");
  2444. urb->interval = xhci_interval;
  2445. /* Convert back to frames for LS/FS devices */
  2446. if (urb->dev->speed == USB_SPEED_LOW ||
  2447. urb->dev->speed == USB_SPEED_FULL)
  2448. urb->interval /= 8;
  2449. }
  2450. return xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb, slot_id, ep_index);
  2451. }
  2452. /*
  2453. * The TD size is the number of bytes remaining in the TD (including this TRB),
  2454. * right shifted by 10.
  2455. * It must fit in bits 21:17, so it can't be bigger than 31.
  2456. */
  2457. static u32 xhci_td_remainder(unsigned int remainder)
  2458. {
  2459. u32 max = (1 << (21 - 17 + 1)) - 1;
  2460. if ((remainder >> 10) >= max)
  2461. return max << 17;
  2462. else
  2463. return (remainder >> 10) << 17;
  2464. }
  2465. /*
  2466. * For xHCI 1.0 host controllers, TD size is the number of packets remaining in
  2467. * the TD (*not* including this TRB).
  2468. *
  2469. * Total TD packet count = total_packet_count =
  2470. * roundup(TD size in bytes / wMaxPacketSize)
  2471. *
  2472. * Packets transferred up to and including this TRB = packets_transferred =
  2473. * rounddown(total bytes transferred including this TRB / wMaxPacketSize)
  2474. *
  2475. * TD size = total_packet_count - packets_transferred
  2476. *
  2477. * It must fit in bits 21:17, so it can't be bigger than 31.
  2478. */
  2479. static u32 xhci_v1_0_td_remainder(int running_total, int trb_buff_len,
  2480. unsigned int total_packet_count, struct urb *urb)
  2481. {
  2482. int packets_transferred;
  2483. /* One TRB with a zero-length data packet. */
  2484. if (running_total == 0 && trb_buff_len == 0)
  2485. return 0;
  2486. /* All the TRB queueing functions don't count the current TRB in
  2487. * running_total.
  2488. */
  2489. packets_transferred = (running_total + trb_buff_len) /
  2490. usb_endpoint_maxp(&urb->ep->desc);
  2491. return xhci_td_remainder(total_packet_count - packets_transferred);
  2492. }
  2493. static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
  2494. struct urb *urb, int slot_id, unsigned int ep_index)
  2495. {
  2496. struct xhci_ring *ep_ring;
  2497. unsigned int num_trbs;
  2498. struct urb_priv *urb_priv;
  2499. struct xhci_td *td;
  2500. struct scatterlist *sg;
  2501. int num_sgs;
  2502. int trb_buff_len, this_sg_len, running_total;
  2503. unsigned int total_packet_count;
  2504. bool first_trb;
  2505. u64 addr;
  2506. bool more_trbs_coming;
  2507. struct xhci_generic_trb *start_trb;
  2508. int start_cycle;
  2509. ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
  2510. if (!ep_ring)
  2511. return -EINVAL;
  2512. num_trbs = count_sg_trbs_needed(xhci, urb);
  2513. num_sgs = urb->num_mapped_sgs;
  2514. total_packet_count = roundup(urb->transfer_buffer_length,
  2515. usb_endpoint_maxp(&urb->ep->desc));
  2516. trb_buff_len = prepare_transfer(xhci, xhci->devs[slot_id],
  2517. ep_index, urb->stream_id,
  2518. num_trbs, urb, 0, false, mem_flags);
  2519. if (trb_buff_len < 0)
  2520. return trb_buff_len;
  2521. urb_priv = urb->hcpriv;
  2522. td = urb_priv->td[0];
  2523. /*
  2524. * Don't give the first TRB to the hardware (by toggling the cycle bit)
  2525. * until we've finished creating all the other TRBs. The ring's cycle
  2526. * state may change as we enqueue the other TRBs, so save it too.
  2527. */
  2528. start_trb = &ep_ring->enqueue->generic;
  2529. start_cycle = ep_ring->cycle_state;
  2530. running_total = 0;
  2531. /*
  2532. * How much data is in the first TRB?
  2533. *
  2534. * There are three forces at work for TRB buffer pointers and lengths:
  2535. * 1. We don't want to walk off the end of this sg-list entry buffer.
  2536. * 2. The transfer length that the driver requested may be smaller than
  2537. * the amount of memory allocated for this scatter-gather list.
  2538. * 3. TRBs buffers can't cross 64KB boundaries.
  2539. */
  2540. sg = urb->sg;
  2541. addr = (u64) sg_dma_address(sg);
  2542. this_sg_len = sg_dma_len(sg);
  2543. trb_buff_len = TRB_MAX_BUFF_SIZE - (addr & (TRB_MAX_BUFF_SIZE - 1));
  2544. trb_buff_len = min_t(int, trb_buff_len, this_sg_len);
  2545. if (trb_buff_len > urb->transfer_buffer_length)
  2546. trb_buff_len = urb->transfer_buffer_length;
  2547. xhci_dbg(xhci, "First length to xfer from 1st sglist entry = %u\n",
  2548. trb_buff_len);
  2549. first_trb = true;
  2550. /* Queue the first TRB, even if it's zero-length */
  2551. do {
  2552. u32 field = 0;
  2553. u32 length_field = 0;
  2554. u32 remainder = 0;
  2555. /* Don't change the cycle bit of the first TRB until later */
  2556. if (first_trb) {
  2557. first_trb = false;
  2558. if (start_cycle == 0)
  2559. field |= 0x1;
  2560. } else
  2561. field |= ep_ring->cycle_state;
  2562. /* Chain all the TRBs together; clear the chain bit in the last
  2563. * TRB to indicate it's the last TRB in the chain.
  2564. */
  2565. if (num_trbs > 1) {
  2566. field |= TRB_CHAIN;
  2567. } else {
  2568. /* FIXME - add check for ZERO_PACKET flag before this */
  2569. td->last_trb = ep_ring->enqueue;
  2570. field |= TRB_IOC;
  2571. }
  2572. /* Only set interrupt on short packet for IN endpoints */
  2573. if (usb_urb_dir_in(urb))
  2574. field |= TRB_ISP;
  2575. xhci_dbg(xhci, " sg entry: dma = %#x, len = %#x (%d), "
  2576. "64KB boundary at %#x, end dma = %#x\n",
  2577. (unsigned int) addr, trb_buff_len, trb_buff_len,
  2578. (unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1),
  2579. (unsigned int) addr + trb_buff_len);
  2580. if (TRB_MAX_BUFF_SIZE -
  2581. (addr & (TRB_MAX_BUFF_SIZE - 1)) < trb_buff_len) {
  2582. xhci_warn(xhci, "WARN: sg dma xfer crosses 64KB boundaries!\n");
  2583. xhci_dbg(xhci, "Next boundary at %#x, end dma = %#x\n",
  2584. (unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1),
  2585. (unsigned int) addr + trb_buff_len);
  2586. }
  2587. /* Set the TRB length, TD size, and interrupter fields. */
  2588. if (xhci->hci_version < 0x100) {
  2589. remainder = xhci_td_remainder(
  2590. urb->transfer_buffer_length -
  2591. running_total);
  2592. } else {
  2593. remainder = xhci_v1_0_td_remainder(running_total,
  2594. trb_buff_len, total_packet_count, urb);
  2595. }
  2596. length_field = TRB_LEN(trb_buff_len) |
  2597. remainder |
  2598. TRB_INTR_TARGET(0);
  2599. if (num_trbs > 1)
  2600. more_trbs_coming = true;
  2601. else
  2602. more_trbs_coming = false;
  2603. queue_trb(xhci, ep_ring, false, more_trbs_coming, false,
  2604. lower_32_bits(addr),
  2605. upper_32_bits(addr),
  2606. length_field,
  2607. field | TRB_TYPE(TRB_NORMAL));
  2608. --num_trbs;
  2609. running_total += trb_buff_len;
  2610. /* Calculate length for next transfer --
  2611. * Are we done queueing all the TRBs for this sg entry?
  2612. */
  2613. this_sg_len -= trb_buff_len;
  2614. if (this_sg_len == 0) {
  2615. --num_sgs;
  2616. if (num_sgs == 0)
  2617. break;
  2618. sg = sg_next(sg);
  2619. addr = (u64) sg_dma_address(sg);
  2620. this_sg_len = sg_dma_len(sg);
  2621. } else {
  2622. addr += trb_buff_len;
  2623. }
  2624. trb_buff_len = TRB_MAX_BUFF_SIZE -
  2625. (addr & (TRB_MAX_BUFF_SIZE - 1));
  2626. trb_buff_len = min_t(int, trb_buff_len, this_sg_len);
  2627. if (running_total + trb_buff_len > urb->transfer_buffer_length)
  2628. trb_buff_len =
  2629. urb->transfer_buffer_length - running_total;
  2630. } while (running_total < urb->transfer_buffer_length);
  2631. check_trb_math(urb, num_trbs, running_total);
  2632. giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
  2633. start_cycle, start_trb);
  2634. return 0;
  2635. }
  2636. /* This is very similar to what ehci-q.c qtd_fill() does */
  2637. int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
  2638. struct urb *urb, int slot_id, unsigned int ep_index)
  2639. {
  2640. struct xhci_ring *ep_ring;
  2641. struct urb_priv *urb_priv;
  2642. struct xhci_td *td;
  2643. int num_trbs;
  2644. struct xhci_generic_trb *start_trb;
  2645. bool first_trb;
  2646. bool more_trbs_coming;
  2647. int start_cycle;
  2648. u32 field, length_field;
  2649. int running_total, trb_buff_len, ret;
  2650. unsigned int total_packet_count;
  2651. u64 addr;
  2652. if (urb->num_sgs)
  2653. return queue_bulk_sg_tx(xhci, mem_flags, urb, slot_id, ep_index);
  2654. ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
  2655. if (!ep_ring)
  2656. return -EINVAL;
  2657. num_trbs = 0;
  2658. /* How much data is (potentially) left before the 64KB boundary? */
  2659. running_total = TRB_MAX_BUFF_SIZE -
  2660. (urb->transfer_dma & (TRB_MAX_BUFF_SIZE - 1));
  2661. running_total &= TRB_MAX_BUFF_SIZE - 1;
  2662. /* If there's some data on this 64KB chunk, or we have to send a
  2663. * zero-length transfer, we need at least one TRB
  2664. */
  2665. if (running_total != 0 || urb->transfer_buffer_length == 0)
  2666. num_trbs++;
  2667. /* How many more 64KB chunks to transfer, how many more TRBs? */
  2668. while (running_total < urb->transfer_buffer_length) {
  2669. num_trbs++;
  2670. running_total += TRB_MAX_BUFF_SIZE;
  2671. }
  2672. /* FIXME: this doesn't deal with URB_ZERO_PACKET - need one more */
  2673. if (!in_interrupt())
  2674. xhci_dbg(xhci, "ep %#x - urb len = %#x (%d), "
  2675. "addr = %#llx, num_trbs = %d\n",
  2676. urb->ep->desc.bEndpointAddress,
  2677. urb->transfer_buffer_length,
  2678. urb->transfer_buffer_length,
  2679. (unsigned long long)urb->transfer_dma,
  2680. num_trbs);
  2681. ret = prepare_transfer(xhci, xhci->devs[slot_id],
  2682. ep_index, urb->stream_id,
  2683. num_trbs, urb, 0, false, mem_flags);
  2684. if (ret < 0)
  2685. return ret;
  2686. urb_priv = urb->hcpriv;
  2687. td = urb_priv->td[0];
  2688. /*
  2689. * Don't give the first TRB to the hardware (by toggling the cycle bit)
  2690. * until we've finished creating all the other TRBs. The ring's cycle
  2691. * state may change as we enqueue the other TRBs, so save it too.
  2692. */
  2693. start_trb = &ep_ring->enqueue->generic;
  2694. start_cycle = ep_ring->cycle_state;
  2695. running_total = 0;
  2696. total_packet_count = roundup(urb->transfer_buffer_length,
  2697. usb_endpoint_maxp(&urb->ep->desc));
  2698. /* How much data is in the first TRB? */
  2699. addr = (u64) urb->transfer_dma;
  2700. trb_buff_len = TRB_MAX_BUFF_SIZE -
  2701. (urb->transfer_dma & (TRB_MAX_BUFF_SIZE - 1));
  2702. if (trb_buff_len > urb->transfer_buffer_length)
  2703. trb_buff_len = urb->transfer_buffer_length;
  2704. first_trb = true;
  2705. /* Queue the first TRB, even if it's zero-length */
  2706. do {
  2707. u32 remainder = 0;
  2708. field = 0;
  2709. /* Don't change the cycle bit of the first TRB until later */
  2710. if (first_trb) {
  2711. first_trb = false;
  2712. if (start_cycle == 0)
  2713. field |= 0x1;
  2714. } else
  2715. field |= ep_ring->cycle_state;
  2716. /* Chain all the TRBs together; clear the chain bit in the last
  2717. * TRB to indicate it's the last TRB in the chain.
  2718. */
  2719. if (num_trbs > 1) {
  2720. field |= TRB_CHAIN;
  2721. } else {
  2722. /* FIXME - add check for ZERO_PACKET flag before this */
  2723. td->last_trb = ep_ring->enqueue;
  2724. field |= TRB_IOC;
  2725. }
  2726. /* Only set interrupt on short packet for IN endpoints */
  2727. if (usb_urb_dir_in(urb))
  2728. field |= TRB_ISP;
  2729. /* Set the TRB length, TD size, and interrupter fields. */
  2730. if (xhci->hci_version < 0x100) {
  2731. remainder = xhci_td_remainder(
  2732. urb->transfer_buffer_length -
  2733. running_total);
  2734. } else {
  2735. remainder = xhci_v1_0_td_remainder(running_total,
  2736. trb_buff_len, total_packet_count, urb);
  2737. }
  2738. length_field = TRB_LEN(trb_buff_len) |
  2739. remainder |
  2740. TRB_INTR_TARGET(0);
  2741. if (num_trbs > 1)
  2742. more_trbs_coming = true;
  2743. else
  2744. more_trbs_coming = false;
  2745. queue_trb(xhci, ep_ring, false, more_trbs_coming, false,
  2746. lower_32_bits(addr),
  2747. upper_32_bits(addr),
  2748. length_field,
  2749. field | TRB_TYPE(TRB_NORMAL));
  2750. --num_trbs;
  2751. running_total += trb_buff_len;
  2752. /* Calculate length for next transfer */
  2753. addr += trb_buff_len;
  2754. trb_buff_len = urb->transfer_buffer_length - running_total;
  2755. if (trb_buff_len > TRB_MAX_BUFF_SIZE)
  2756. trb_buff_len = TRB_MAX_BUFF_SIZE;
  2757. } while (running_total < urb->transfer_buffer_length);
  2758. check_trb_math(urb, num_trbs, running_total);
  2759. giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
  2760. start_cycle, start_trb);
  2761. return 0;
  2762. }
  2763. /* Caller must have locked xhci->lock */
  2764. int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
  2765. struct urb *urb, int slot_id, unsigned int ep_index)
  2766. {
  2767. struct xhci_ring *ep_ring;
  2768. int num_trbs;
  2769. int ret;
  2770. struct usb_ctrlrequest *setup;
  2771. struct xhci_generic_trb *start_trb;
  2772. int start_cycle;
  2773. u32 field, length_field;
  2774. struct urb_priv *urb_priv;
  2775. struct xhci_td *td;
  2776. ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
  2777. if (!ep_ring)
  2778. return -EINVAL;
  2779. /*
  2780. * Need to copy setup packet into setup TRB, so we can't use the setup
  2781. * DMA address.
  2782. */
  2783. if (!urb->setup_packet)
  2784. return -EINVAL;
  2785. if (!in_interrupt())
  2786. xhci_dbg(xhci, "Queueing ctrl tx for slot id %d, ep %d\n",
  2787. slot_id, ep_index);
  2788. /* 1 TRB for setup, 1 for status */
  2789. num_trbs = 2;
  2790. /*
  2791. * Don't need to check if we need additional event data and normal TRBs,
  2792. * since data in control transfers will never get bigger than 16MB
  2793. * XXX: can we get a buffer that crosses 64KB boundaries?
  2794. */
  2795. if (urb->transfer_buffer_length > 0)
  2796. num_trbs++;
  2797. ret = prepare_transfer(xhci, xhci->devs[slot_id],
  2798. ep_index, urb->stream_id,
  2799. num_trbs, urb, 0, false, mem_flags);
  2800. if (ret < 0)
  2801. return ret;
  2802. urb_priv = urb->hcpriv;
  2803. td = urb_priv->td[0];
  2804. /*
  2805. * Don't give the first TRB to the hardware (by toggling the cycle bit)
  2806. * until we've finished creating all the other TRBs. The ring's cycle
  2807. * state may change as we enqueue the other TRBs, so save it too.
  2808. */
  2809. start_trb = &ep_ring->enqueue->generic;
  2810. start_cycle = ep_ring->cycle_state;
  2811. /* Queue setup TRB - see section 6.4.1.2.1 */
  2812. /* FIXME better way to translate setup_packet into two u32 fields? */
  2813. setup = (struct usb_ctrlrequest *) urb->setup_packet;
  2814. field = 0;
  2815. field |= TRB_IDT | TRB_TYPE(TRB_SETUP);
  2816. if (start_cycle == 0)
  2817. field |= 0x1;
  2818. /* xHCI 1.0 6.4.1.2.1: Transfer Type field */
  2819. if (xhci->hci_version == 0x100) {
  2820. if (urb->transfer_buffer_length > 0) {
  2821. if (setup->bRequestType & USB_DIR_IN)
  2822. field |= TRB_TX_TYPE(TRB_DATA_IN);
  2823. else
  2824. field |= TRB_TX_TYPE(TRB_DATA_OUT);
  2825. }
  2826. }
  2827. queue_trb(xhci, ep_ring, false, true, false,
  2828. setup->bRequestType | setup->bRequest << 8 | le16_to_cpu(setup->wValue) << 16,
  2829. le16_to_cpu(setup->wIndex) | le16_to_cpu(setup->wLength) << 16,
  2830. TRB_LEN(8) | TRB_INTR_TARGET(0),
  2831. /* Immediate data in pointer */
  2832. field);
  2833. /* If there's data, queue data TRBs */
  2834. /* Only set interrupt on short packet for IN endpoints */
  2835. if (usb_urb_dir_in(urb))
  2836. field = TRB_ISP | TRB_TYPE(TRB_DATA);
  2837. else
  2838. field = TRB_TYPE(TRB_DATA);
  2839. length_field = TRB_LEN(urb->transfer_buffer_length) |
  2840. xhci_td_remainder(urb->transfer_buffer_length) |
  2841. TRB_INTR_TARGET(0);
  2842. if (urb->transfer_buffer_length > 0) {
  2843. if (setup->bRequestType & USB_DIR_IN)
  2844. field |= TRB_DIR_IN;
  2845. queue_trb(xhci, ep_ring, false, true, false,
  2846. lower_32_bits(urb->transfer_dma),
  2847. upper_32_bits(urb->transfer_dma),
  2848. length_field,
  2849. field | ep_ring->cycle_state);
  2850. }
  2851. /* Save the DMA address of the last TRB in the TD */
  2852. td->last_trb = ep_ring->enqueue;
  2853. /* Queue status TRB - see Table 7 and sections 4.11.2.2 and 6.4.1.2.3 */
  2854. /* If the device sent data, the status stage is an OUT transfer */
  2855. if (urb->transfer_buffer_length > 0 && setup->bRequestType & USB_DIR_IN)
  2856. field = 0;
  2857. else
  2858. field = TRB_DIR_IN;
  2859. queue_trb(xhci, ep_ring, false, false, false,
  2860. 0,
  2861. 0,
  2862. TRB_INTR_TARGET(0),
  2863. /* Event on completion */
  2864. field | TRB_IOC | TRB_TYPE(TRB_STATUS) | ep_ring->cycle_state);
  2865. giveback_first_trb(xhci, slot_id, ep_index, 0,
  2866. start_cycle, start_trb);
  2867. return 0;
  2868. }
  2869. static int count_isoc_trbs_needed(struct xhci_hcd *xhci,
  2870. struct urb *urb, int i)
  2871. {
  2872. int num_trbs = 0;
  2873. u64 addr, td_len;
  2874. addr = (u64) (urb->transfer_dma + urb->iso_frame_desc[i].offset);
  2875. td_len = urb->iso_frame_desc[i].length;
  2876. num_trbs = DIV_ROUND_UP(td_len + (addr & (TRB_MAX_BUFF_SIZE - 1)),
  2877. TRB_MAX_BUFF_SIZE);
  2878. if (num_trbs == 0)
  2879. num_trbs++;
  2880. return num_trbs;
  2881. }
  2882. /*
  2883. * The transfer burst count field of the isochronous TRB defines the number of
  2884. * bursts that are required to move all packets in this TD. Only SuperSpeed
  2885. * devices can burst up to bMaxBurst number of packets per service interval.
  2886. * This field is zero based, meaning a value of zero in the field means one
  2887. * burst. Basically, for everything but SuperSpeed devices, this field will be
  2888. * zero. Only xHCI 1.0 host controllers support this field.
  2889. */
  2890. static unsigned int xhci_get_burst_count(struct xhci_hcd *xhci,
  2891. struct usb_device *udev,
  2892. struct urb *urb, unsigned int total_packet_count)
  2893. {
  2894. unsigned int max_burst;
  2895. if (xhci->hci_version < 0x100 || udev->speed != USB_SPEED_SUPER)
  2896. return 0;
  2897. max_burst = urb->ep->ss_ep_comp.bMaxBurst;
  2898. return roundup(total_packet_count, max_burst + 1) - 1;
  2899. }
  2900. /*
  2901. * Returns the number of packets in the last "burst" of packets. This field is
  2902. * valid for all speeds of devices. USB 2.0 devices can only do one "burst", so
  2903. * the last burst packet count is equal to the total number of packets in the
  2904. * TD. SuperSpeed endpoints can have up to 3 bursts. All but the last burst
  2905. * must contain (bMaxBurst + 1) number of packets, but the last burst can
  2906. * contain 1 to (bMaxBurst + 1) packets.
  2907. */
  2908. static unsigned int xhci_get_last_burst_packet_count(struct xhci_hcd *xhci,
  2909. struct usb_device *udev,
  2910. struct urb *urb, unsigned int total_packet_count)
  2911. {
  2912. unsigned int max_burst;
  2913. unsigned int residue;
  2914. if (xhci->hci_version < 0x100)
  2915. return 0;
  2916. switch (udev->speed) {
  2917. case USB_SPEED_SUPER:
  2918. /* bMaxBurst is zero based: 0 means 1 packet per burst */
  2919. max_burst = urb->ep->ss_ep_comp.bMaxBurst;
  2920. residue = total_packet_count % (max_burst + 1);
  2921. /* If residue is zero, the last burst contains (max_burst + 1)
  2922. * number of packets, but the TLBPC field is zero-based.
  2923. */
  2924. if (residue == 0)
  2925. return max_burst;
  2926. return residue - 1;
  2927. default:
  2928. if (total_packet_count == 0)
  2929. return 0;
  2930. return total_packet_count - 1;
  2931. }
  2932. }
  2933. /* This is for isoc transfer */
  2934. static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
  2935. struct urb *urb, int slot_id, unsigned int ep_index)
  2936. {
  2937. struct xhci_ring *ep_ring;
  2938. struct urb_priv *urb_priv;
  2939. struct xhci_td *td;
  2940. int num_tds, trbs_per_td;
  2941. struct xhci_generic_trb *start_trb;
  2942. bool first_trb;
  2943. int start_cycle;
  2944. u32 field, length_field;
  2945. int running_total, trb_buff_len, td_len, td_remain_len, ret;
  2946. u64 start_addr, addr;
  2947. int i, j;
  2948. bool more_trbs_coming;
  2949. ep_ring = xhci->devs[slot_id]->eps[ep_index].ring;
  2950. num_tds = urb->number_of_packets;
  2951. if (num_tds < 1) {
  2952. xhci_dbg(xhci, "Isoc URB with zero packets?\n");
  2953. return -EINVAL;
  2954. }
  2955. if (!in_interrupt())
  2956. xhci_dbg(xhci, "ep %#x - urb len = %#x (%d),"
  2957. " addr = %#llx, num_tds = %d\n",
  2958. urb->ep->desc.bEndpointAddress,
  2959. urb->transfer_buffer_length,
  2960. urb->transfer_buffer_length,
  2961. (unsigned long long)urb->transfer_dma,
  2962. num_tds);
  2963. start_addr = (u64) urb->transfer_dma;
  2964. start_trb = &ep_ring->enqueue->generic;
  2965. start_cycle = ep_ring->cycle_state;
  2966. urb_priv = urb->hcpriv;
  2967. /* Queue the first TRB, even if it's zero-length */
  2968. for (i = 0; i < num_tds; i++) {
  2969. unsigned int total_packet_count;
  2970. unsigned int burst_count;
  2971. unsigned int residue;
  2972. first_trb = true;
  2973. running_total = 0;
  2974. addr = start_addr + urb->iso_frame_desc[i].offset;
  2975. td_len = urb->iso_frame_desc[i].length;
  2976. td_remain_len = td_len;
  2977. total_packet_count = roundup(td_len,
  2978. usb_endpoint_maxp(&urb->ep->desc));
  2979. /* A zero-length transfer still involves at least one packet. */
  2980. if (total_packet_count == 0)
  2981. total_packet_count++;
  2982. burst_count = xhci_get_burst_count(xhci, urb->dev, urb,
  2983. total_packet_count);
  2984. residue = xhci_get_last_burst_packet_count(xhci,
  2985. urb->dev, urb, total_packet_count);
  2986. trbs_per_td = count_isoc_trbs_needed(xhci, urb, i);
  2987. ret = prepare_transfer(xhci, xhci->devs[slot_id], ep_index,
  2988. urb->stream_id, trbs_per_td, urb, i, true,
  2989. mem_flags);
  2990. if (ret < 0) {
  2991. if (i == 0)
  2992. return ret;
  2993. goto cleanup;
  2994. }
  2995. td = urb_priv->td[i];
  2996. for (j = 0; j < trbs_per_td; j++) {
  2997. u32 remainder = 0;
  2998. field = TRB_TBC(burst_count) | TRB_TLBPC(residue);
  2999. if (first_trb) {
  3000. /* Queue the isoc TRB */
  3001. field |= TRB_TYPE(TRB_ISOC);
  3002. /* Assume URB_ISO_ASAP is set */
  3003. field |= TRB_SIA;
  3004. if (i == 0) {
  3005. if (start_cycle == 0)
  3006. field |= 0x1;
  3007. } else
  3008. field |= ep_ring->cycle_state;
  3009. first_trb = false;
  3010. } else {
  3011. /* Queue other normal TRBs */
  3012. field |= TRB_TYPE(TRB_NORMAL);
  3013. field |= ep_ring->cycle_state;
  3014. }
  3015. /* Only set interrupt on short packet for IN EPs */
  3016. if (usb_urb_dir_in(urb))
  3017. field |= TRB_ISP;
  3018. /* Chain all the TRBs together; clear the chain bit in
  3019. * the last TRB to indicate it's the last TRB in the
  3020. * chain.
  3021. */
  3022. if (j < trbs_per_td - 1) {
  3023. field |= TRB_CHAIN;
  3024. more_trbs_coming = true;
  3025. } else {
  3026. td->last_trb = ep_ring->enqueue;
  3027. field |= TRB_IOC;
  3028. if (xhci->hci_version == 0x100) {
  3029. /* Set BEI bit except for the last td */
  3030. if (i < num_tds - 1)
  3031. field |= TRB_BEI;
  3032. }
  3033. more_trbs_coming = false;
  3034. }
  3035. /* Calculate TRB length */
  3036. trb_buff_len = TRB_MAX_BUFF_SIZE -
  3037. (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
  3038. if (trb_buff_len > td_remain_len)
  3039. trb_buff_len = td_remain_len;
  3040. /* Set the TRB length, TD size, & interrupter fields. */
  3041. if (xhci->hci_version < 0x100) {
  3042. remainder = xhci_td_remainder(
  3043. td_len - running_total);
  3044. } else {
  3045. remainder = xhci_v1_0_td_remainder(
  3046. running_total, trb_buff_len,
  3047. total_packet_count, urb);
  3048. }
  3049. length_field = TRB_LEN(trb_buff_len) |
  3050. remainder |
  3051. TRB_INTR_TARGET(0);
  3052. queue_trb(xhci, ep_ring, false, more_trbs_coming, true,
  3053. lower_32_bits(addr),
  3054. upper_32_bits(addr),
  3055. length_field,
  3056. field);
  3057. running_total += trb_buff_len;
  3058. addr += trb_buff_len;
  3059. td_remain_len -= trb_buff_len;
  3060. }
  3061. /* Check TD length */
  3062. if (running_total != td_len) {
  3063. xhci_err(xhci, "ISOC TD length unmatch\n");
  3064. return -EINVAL;
  3065. }
  3066. }
  3067. if (xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs == 0) {
  3068. if (xhci->quirks & XHCI_AMD_PLL_FIX)
  3069. usb_amd_quirk_pll_disable();
  3070. }
  3071. xhci_to_hcd(xhci)->self.bandwidth_isoc_reqs++;
  3072. giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
  3073. start_cycle, start_trb);
  3074. return 0;
  3075. cleanup:
  3076. /* Clean up a partially enqueued isoc transfer. */
  3077. for (i--; i >= 0; i--)
  3078. list_del_init(&urb_priv->td[i]->td_list);
  3079. /* Use the first TD as a temporary variable to turn the TDs we've queued
  3080. * into No-ops with a software-owned cycle bit. That way the hardware
  3081. * won't accidentally start executing bogus TDs when we partially
  3082. * overwrite them. td->first_trb and td->start_seg are already set.
  3083. */
  3084. urb_priv->td[0]->last_trb = ep_ring->enqueue;
  3085. /* Every TRB except the first & last will have its cycle bit flipped. */
  3086. td_to_noop(xhci, ep_ring, urb_priv->td[0], true);
  3087. /* Reset the ring enqueue back to the first TRB and its cycle bit. */
  3088. ep_ring->enqueue = urb_priv->td[0]->first_trb;
  3089. ep_ring->enq_seg = urb_priv->td[0]->start_seg;
  3090. ep_ring->cycle_state = start_cycle;
  3091. usb_hcd_unlink_urb_from_ep(bus_to_hcd(urb->dev->bus), urb);
  3092. return ret;
  3093. }
  3094. /*
  3095. * Check transfer ring to guarantee there is enough room for the urb.
  3096. * Update ISO URB start_frame and interval.
  3097. * Update interval as xhci_queue_intr_tx does. Just use xhci frame_index to
  3098. * update the urb->start_frame by now.
  3099. * Always assume URB_ISO_ASAP set, and NEVER use urb->start_frame as input.
  3100. */
  3101. int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags,
  3102. struct urb *urb, int slot_id, unsigned int ep_index)
  3103. {
  3104. struct xhci_virt_device *xdev;
  3105. struct xhci_ring *ep_ring;
  3106. struct xhci_ep_ctx *ep_ctx;
  3107. int start_frame;
  3108. int xhci_interval;
  3109. int ep_interval;
  3110. int num_tds, num_trbs, i;
  3111. int ret;
  3112. xdev = xhci->devs[slot_id];
  3113. ep_ring = xdev->eps[ep_index].ring;
  3114. ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
  3115. num_trbs = 0;
  3116. num_tds = urb->number_of_packets;
  3117. for (i = 0; i < num_tds; i++)
  3118. num_trbs += count_isoc_trbs_needed(xhci, urb, i);
  3119. /* Check the ring to guarantee there is enough room for the whole urb.
  3120. * Do not insert any td of the urb to the ring if the check failed.
  3121. */
  3122. ret = prepare_ring(xhci, ep_ring, le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK,
  3123. num_trbs, true, mem_flags);
  3124. if (ret)
  3125. return ret;
  3126. start_frame = xhci_readl(xhci, &xhci->run_regs->microframe_index);
  3127. start_frame &= 0x3fff;
  3128. urb->start_frame = start_frame;
  3129. if (urb->dev->speed == USB_SPEED_LOW ||
  3130. urb->dev->speed == USB_SPEED_FULL)
  3131. urb->start_frame >>= 3;
  3132. xhci_interval = EP_INTERVAL_TO_UFRAMES(le32_to_cpu(ep_ctx->ep_info));
  3133. ep_interval = urb->interval;
  3134. /* Convert to microframes */
  3135. if (urb->dev->speed == USB_SPEED_LOW ||
  3136. urb->dev->speed == USB_SPEED_FULL)
  3137. ep_interval *= 8;
  3138. /* FIXME change this to a warning and a suggestion to use the new API
  3139. * to set the polling interval (once the API is added).
  3140. */
  3141. if (xhci_interval != ep_interval) {
  3142. if (printk_ratelimit())
  3143. dev_dbg(&urb->dev->dev, "Driver uses different interval"
  3144. " (%d microframe%s) than xHCI "
  3145. "(%d microframe%s)\n",
  3146. ep_interval,
  3147. ep_interval == 1 ? "" : "s",
  3148. xhci_interval,
  3149. xhci_interval == 1 ? "" : "s");
  3150. urb->interval = xhci_interval;
  3151. /* Convert back to frames for LS/FS devices */
  3152. if (urb->dev->speed == USB_SPEED_LOW ||
  3153. urb->dev->speed == USB_SPEED_FULL)
  3154. urb->interval /= 8;
  3155. }
  3156. return xhci_queue_isoc_tx(xhci, GFP_ATOMIC, urb, slot_id, ep_index);
  3157. }
  3158. /**** Command Ring Operations ****/
  3159. /* Generic function for queueing a command TRB on the command ring.
  3160. * Check to make sure there's room on the command ring for one command TRB.
  3161. * Also check that there's room reserved for commands that must not fail.
  3162. * If this is a command that must not fail, meaning command_must_succeed = TRUE,
  3163. * then only check for the number of reserved spots.
  3164. * Don't decrement xhci->cmd_ring_reserved_trbs after we've queued the TRB
  3165. * because the command event handler may want to resubmit a failed command.
  3166. */
  3167. static int queue_command(struct xhci_hcd *xhci, u32 field1, u32 field2,
  3168. u32 field3, u32 field4, bool command_must_succeed)
  3169. {
  3170. int reserved_trbs = xhci->cmd_ring_reserved_trbs;
  3171. int ret;
  3172. if (!command_must_succeed)
  3173. reserved_trbs++;
  3174. ret = prepare_ring(xhci, xhci->cmd_ring, EP_STATE_RUNNING,
  3175. reserved_trbs, false, GFP_ATOMIC);
  3176. if (ret < 0) {
  3177. xhci_err(xhci, "ERR: No room for command on command ring\n");
  3178. if (command_must_succeed)
  3179. xhci_err(xhci, "ERR: Reserved TRB counting for "
  3180. "unfailable commands failed.\n");
  3181. return ret;
  3182. }
  3183. queue_trb(xhci, xhci->cmd_ring, false, false, false, field1, field2,
  3184. field3, field4 | xhci->cmd_ring->cycle_state);
  3185. return 0;
  3186. }
  3187. /* Queue a slot enable or disable request on the command ring */
  3188. int xhci_queue_slot_control(struct xhci_hcd *xhci, u32 trb_type, u32 slot_id)
  3189. {
  3190. return queue_command(xhci, 0, 0, 0,
  3191. TRB_TYPE(trb_type) | SLOT_ID_FOR_TRB(slot_id), false);
  3192. }
  3193. /* Queue an address device command TRB */
  3194. int xhci_queue_address_device(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
  3195. u32 slot_id)
  3196. {
  3197. return queue_command(xhci, lower_32_bits(in_ctx_ptr),
  3198. upper_32_bits(in_ctx_ptr), 0,
  3199. TRB_TYPE(TRB_ADDR_DEV) | SLOT_ID_FOR_TRB(slot_id),
  3200. false);
  3201. }
  3202. int xhci_queue_vendor_command(struct xhci_hcd *xhci,
  3203. u32 field1, u32 field2, u32 field3, u32 field4)
  3204. {
  3205. return queue_command(xhci, field1, field2, field3, field4, false);
  3206. }
  3207. /* Queue a reset device command TRB */
  3208. int xhci_queue_reset_device(struct xhci_hcd *xhci, u32 slot_id)
  3209. {
  3210. return queue_command(xhci, 0, 0, 0,
  3211. TRB_TYPE(TRB_RESET_DEV) | SLOT_ID_FOR_TRB(slot_id),
  3212. false);
  3213. }
  3214. /* Queue a configure endpoint command TRB */
  3215. int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
  3216. u32 slot_id, bool command_must_succeed)
  3217. {
  3218. return queue_command(xhci, lower_32_bits(in_ctx_ptr),
  3219. upper_32_bits(in_ctx_ptr), 0,
  3220. TRB_TYPE(TRB_CONFIG_EP) | SLOT_ID_FOR_TRB(slot_id),
  3221. command_must_succeed);
  3222. }
  3223. /* Queue an evaluate context command TRB */
  3224. int xhci_queue_evaluate_context(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
  3225. u32 slot_id)
  3226. {
  3227. return queue_command(xhci, lower_32_bits(in_ctx_ptr),
  3228. upper_32_bits(in_ctx_ptr), 0,
  3229. TRB_TYPE(TRB_EVAL_CONTEXT) | SLOT_ID_FOR_TRB(slot_id),
  3230. false);
  3231. }
  3232. /*
  3233. * Suspend is set to indicate "Stop Endpoint Command" is being issued to stop
  3234. * activity on an endpoint that is about to be suspended.
  3235. */
  3236. int xhci_queue_stop_endpoint(struct xhci_hcd *xhci, int slot_id,
  3237. unsigned int ep_index, int suspend)
  3238. {
  3239. u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
  3240. u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
  3241. u32 type = TRB_TYPE(TRB_STOP_RING);
  3242. u32 trb_suspend = SUSPEND_PORT_FOR_TRB(suspend);
  3243. return queue_command(xhci, 0, 0, 0,
  3244. trb_slot_id | trb_ep_index | type | trb_suspend, false);
  3245. }
  3246. /* Set Transfer Ring Dequeue Pointer command.
  3247. * This should not be used for endpoints that have streams enabled.
  3248. */
  3249. static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id,
  3250. unsigned int ep_index, unsigned int stream_id,
  3251. struct xhci_segment *deq_seg,
  3252. union xhci_trb *deq_ptr, u32 cycle_state)
  3253. {
  3254. dma_addr_t addr;
  3255. u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
  3256. u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
  3257. u32 trb_stream_id = STREAM_ID_FOR_TRB(stream_id);
  3258. u32 type = TRB_TYPE(TRB_SET_DEQ);
  3259. struct xhci_virt_ep *ep;
  3260. addr = xhci_trb_virt_to_dma(deq_seg, deq_ptr);
  3261. if (addr == 0) {
  3262. xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr\n");
  3263. xhci_warn(xhci, "WARN deq seg = %p, deq pt = %p\n",
  3264. deq_seg, deq_ptr);
  3265. return 0;
  3266. }
  3267. ep = &xhci->devs[slot_id]->eps[ep_index];
  3268. if ((ep->ep_state & SET_DEQ_PENDING)) {
  3269. xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr\n");
  3270. xhci_warn(xhci, "A Set TR Deq Ptr command is pending.\n");
  3271. return 0;
  3272. }
  3273. ep->queued_deq_seg = deq_seg;
  3274. ep->queued_deq_ptr = deq_ptr;
  3275. return queue_command(xhci, lower_32_bits(addr) | cycle_state,
  3276. upper_32_bits(addr), trb_stream_id,
  3277. trb_slot_id | trb_ep_index | type, false);
  3278. }
  3279. int xhci_queue_reset_ep(struct xhci_hcd *xhci, int slot_id,
  3280. unsigned int ep_index)
  3281. {
  3282. u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
  3283. u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
  3284. u32 type = TRB_TYPE(TRB_RESET_EP);
  3285. return queue_command(xhci, 0, 0, 0, trb_slot_id | trb_ep_index | type,
  3286. false);
  3287. }