xhci-ring.c 99 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254
  1. /*
  2. * xHCI host controller driver
  3. *
  4. * Copyright (C) 2008 Intel Corp.
  5. *
  6. * Author: Sarah Sharp
  7. * Some code borrowed from the Linux EHCI driver.
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License version 2 as
  11. * published by the Free Software Foundation.
  12. *
  13. * This program is distributed in the hope that it will be useful, but
  14. * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
  15. * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
  16. * for more details.
  17. *
  18. * You should have received a copy of the GNU General Public License
  19. * along with this program; if not, write to the Free Software Foundation,
  20. * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  21. */
  22. /*
  23. * Ring initialization rules:
  24. * 1. Each segment is initialized to zero, except for link TRBs.
  25. * 2. Ring cycle state = 0. This represents Producer Cycle State (PCS) or
  26. * Consumer Cycle State (CCS), depending on ring function.
  27. * 3. Enqueue pointer = dequeue pointer = address of first TRB in the segment.
  28. *
  29. * Ring behavior rules:
  30. * 1. A ring is empty if enqueue == dequeue. This means there will always be at
  31. * least one free TRB in the ring. This is useful if you want to turn that
  32. * into a link TRB and expand the ring.
  33. * 2. When incrementing an enqueue or dequeue pointer, if the next TRB is a
  34. * link TRB, then load the pointer with the address in the link TRB. If the
  35. * link TRB had its toggle bit set, you may need to update the ring cycle
  36. * state (see cycle bit rules). You may have to do this multiple times
  37. * until you reach a non-link TRB.
  38. * 3. A ring is full if enqueue++ (for the definition of increment above)
  39. * equals the dequeue pointer.
  40. *
  41. * Cycle bit rules:
  42. * 1. When a consumer increments a dequeue pointer and encounters a toggle bit
  43. * in a link TRB, it must toggle the ring cycle state.
  44. * 2. When a producer increments an enqueue pointer and encounters a toggle bit
  45. * in a link TRB, it must toggle the ring cycle state.
  46. *
  47. * Producer rules:
  48. * 1. Check if ring is full before you enqueue.
  49. * 2. Write the ring cycle state to the cycle bit in the TRB you're enqueuing.
  50. * Update enqueue pointer between each write (which may update the ring
  51. * cycle state).
  52. * 3. Notify consumer. If SW is producer, it rings the doorbell for command
  53. * and endpoint rings. If HC is the producer for the event ring,
  54. * and it generates an interrupt according to interrupt modulation rules.
  55. *
  56. * Consumer rules:
  57. * 1. Check if TRB belongs to you. If the cycle bit == your ring cycle state,
  58. * the TRB is owned by the consumer.
  59. * 2. Update dequeue pointer (which may update the ring cycle state) and
  60. * continue processing TRBs until you reach a TRB which is not owned by you.
  61. * 3. Notify the producer. SW is the consumer for the event ring, and it
  62. * updates event ring dequeue pointer. HC is the consumer for the command and
  63. * endpoint rings; it generates events on the event ring for these.
  64. */
  65. #include <linux/scatterlist.h>
  66. #include <linux/slab.h>
  67. #include "xhci.h"
  68. static int handle_cmd_in_cmd_wait_list(struct xhci_hcd *xhci,
  69. struct xhci_virt_device *virt_dev,
  70. struct xhci_event_cmd *event);
  71. /*
  72. * Returns zero if the TRB isn't in this segment, otherwise it returns the DMA
  73. * address of the TRB.
  74. */
  75. dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg,
  76. union xhci_trb *trb)
  77. {
  78. unsigned long segment_offset;
  79. if (!seg || !trb || trb < seg->trbs)
  80. return 0;
  81. /* offset in TRBs */
  82. segment_offset = trb - seg->trbs;
  83. if (segment_offset > TRBS_PER_SEGMENT)
  84. return 0;
  85. return seg->dma + (segment_offset * sizeof(*trb));
  86. }
  87. /* Does this link TRB point to the first segment in a ring,
  88. * or was the previous TRB the last TRB on the last segment in the ERST?
  89. */
  90. static inline bool last_trb_on_last_seg(struct xhci_hcd *xhci, struct xhci_ring *ring,
  91. struct xhci_segment *seg, union xhci_trb *trb)
  92. {
  93. if (ring == xhci->event_ring)
  94. return (trb == &seg->trbs[TRBS_PER_SEGMENT]) &&
  95. (seg->next == xhci->event_ring->first_seg);
  96. else
  97. return trb->link.control & LINK_TOGGLE;
  98. }
  99. /* Is this TRB a link TRB or was the last TRB the last TRB in this event ring
  100. * segment? I.e. would the updated event TRB pointer step off the end of the
  101. * event seg?
  102. */
  103. static inline int last_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
  104. struct xhci_segment *seg, union xhci_trb *trb)
  105. {
  106. if (ring == xhci->event_ring)
  107. return trb == &seg->trbs[TRBS_PER_SEGMENT];
  108. else
  109. return (trb->link.control & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK);
  110. }
  111. static inline int enqueue_is_link_trb(struct xhci_ring *ring)
  112. {
  113. struct xhci_link_trb *link = &ring->enqueue->link;
  114. return ((link->control & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK));
  115. }
  116. /* Updates trb to point to the next TRB in the ring, and updates seg if the next
  117. * TRB is in a new segment. This does not skip over link TRBs, and it does not
  118. * effect the ring dequeue or enqueue pointers.
  119. */
  120. static void next_trb(struct xhci_hcd *xhci,
  121. struct xhci_ring *ring,
  122. struct xhci_segment **seg,
  123. union xhci_trb **trb)
  124. {
  125. if (last_trb(xhci, ring, *seg, *trb)) {
  126. *seg = (*seg)->next;
  127. *trb = ((*seg)->trbs);
  128. } else {
  129. (*trb)++;
  130. }
  131. }
  132. /*
  133. * See Cycle bit rules. SW is the consumer for the event ring only.
  134. * Don't make a ring full of link TRBs. That would be dumb and this would loop.
  135. */
  136. static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer)
  137. {
  138. union xhci_trb *next = ++(ring->dequeue);
  139. unsigned long long addr;
  140. ring->deq_updates++;
  141. /* Update the dequeue pointer further if that was a link TRB or we're at
  142. * the end of an event ring segment (which doesn't have link TRBS)
  143. */
  144. while (last_trb(xhci, ring, ring->deq_seg, next)) {
  145. if (consumer && last_trb_on_last_seg(xhci, ring, ring->deq_seg, next)) {
  146. ring->cycle_state = (ring->cycle_state ? 0 : 1);
  147. if (!in_interrupt())
  148. xhci_dbg(xhci, "Toggle cycle state for ring %p = %i\n",
  149. ring,
  150. (unsigned int) ring->cycle_state);
  151. }
  152. ring->deq_seg = ring->deq_seg->next;
  153. ring->dequeue = ring->deq_seg->trbs;
  154. next = ring->dequeue;
  155. }
  156. addr = (unsigned long long) xhci_trb_virt_to_dma(ring->deq_seg, ring->dequeue);
  157. if (ring == xhci->event_ring)
  158. xhci_dbg(xhci, "Event ring deq = 0x%llx (DMA)\n", addr);
  159. else if (ring == xhci->cmd_ring)
  160. xhci_dbg(xhci, "Command ring deq = 0x%llx (DMA)\n", addr);
  161. else
  162. xhci_dbg(xhci, "Ring deq = 0x%llx (DMA)\n", addr);
  163. }
  164. /*
  165. * See Cycle bit rules. SW is the consumer for the event ring only.
  166. * Don't make a ring full of link TRBs. That would be dumb and this would loop.
  167. *
  168. * If we've just enqueued a TRB that is in the middle of a TD (meaning the
  169. * chain bit is set), then set the chain bit in all the following link TRBs.
  170. * If we've enqueued the last TRB in a TD, make sure the following link TRBs
  171. * have their chain bit cleared (so that each Link TRB is a separate TD).
  172. *
  173. * Section 6.4.4.1 of the 0.95 spec says link TRBs cannot have the chain bit
  174. * set, but other sections talk about dealing with the chain bit set. This was
  175. * fixed in the 0.96 specification errata, but we have to assume that all 0.95
  176. * xHCI hardware can't handle the chain bit being cleared on a link TRB.
  177. *
  178. * @more_trbs_coming: Will you enqueue more TRBs before calling
  179. * prepare_transfer()?
  180. */
  181. static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring,
  182. bool consumer, bool more_trbs_coming)
  183. {
  184. u32 chain;
  185. union xhci_trb *next;
  186. unsigned long long addr;
  187. chain = ring->enqueue->generic.field[3] & TRB_CHAIN;
  188. next = ++(ring->enqueue);
  189. ring->enq_updates++;
  190. /* Update the dequeue pointer further if that was a link TRB or we're at
  191. * the end of an event ring segment (which doesn't have link TRBS)
  192. */
  193. while (last_trb(xhci, ring, ring->enq_seg, next)) {
  194. if (!consumer) {
  195. if (ring != xhci->event_ring) {
  196. /*
  197. * If the caller doesn't plan on enqueueing more
  198. * TDs before ringing the doorbell, then we
  199. * don't want to give the link TRB to the
  200. * hardware just yet. We'll give the link TRB
  201. * back in prepare_ring() just before we enqueue
  202. * the TD at the top of the ring.
  203. */
  204. if (!chain && !more_trbs_coming)
  205. break;
  206. /* If we're not dealing with 0.95 hardware,
  207. * carry over the chain bit of the previous TRB
  208. * (which may mean the chain bit is cleared).
  209. */
  210. if (!xhci_link_trb_quirk(xhci)) {
  211. next->link.control &= ~TRB_CHAIN;
  212. next->link.control |= chain;
  213. }
  214. /* Give this link TRB to the hardware */
  215. wmb();
  216. next->link.control ^= TRB_CYCLE;
  217. }
  218. /* Toggle the cycle bit after the last ring segment. */
  219. if (last_trb_on_last_seg(xhci, ring, ring->enq_seg, next)) {
  220. ring->cycle_state = (ring->cycle_state ? 0 : 1);
  221. if (!in_interrupt())
  222. xhci_dbg(xhci, "Toggle cycle state for ring %p = %i\n",
  223. ring,
  224. (unsigned int) ring->cycle_state);
  225. }
  226. }
  227. ring->enq_seg = ring->enq_seg->next;
  228. ring->enqueue = ring->enq_seg->trbs;
  229. next = ring->enqueue;
  230. }
  231. addr = (unsigned long long) xhci_trb_virt_to_dma(ring->enq_seg, ring->enqueue);
  232. if (ring == xhci->event_ring)
  233. xhci_dbg(xhci, "Event ring enq = 0x%llx (DMA)\n", addr);
  234. else if (ring == xhci->cmd_ring)
  235. xhci_dbg(xhci, "Command ring enq = 0x%llx (DMA)\n", addr);
  236. else
  237. xhci_dbg(xhci, "Ring enq = 0x%llx (DMA)\n", addr);
  238. }
  239. /*
  240. * Check to see if there's room to enqueue num_trbs on the ring. See rules
  241. * above.
  242. * FIXME: this would be simpler and faster if we just kept track of the number
  243. * of free TRBs in a ring.
  244. */
  245. static int room_on_ring(struct xhci_hcd *xhci, struct xhci_ring *ring,
  246. unsigned int num_trbs)
  247. {
  248. int i;
  249. union xhci_trb *enq = ring->enqueue;
  250. struct xhci_segment *enq_seg = ring->enq_seg;
  251. struct xhci_segment *cur_seg;
  252. unsigned int left_on_ring;
  253. /* If we are currently pointing to a link TRB, advance the
  254. * enqueue pointer before checking for space */
  255. while (last_trb(xhci, ring, enq_seg, enq)) {
  256. enq_seg = enq_seg->next;
  257. enq = enq_seg->trbs;
  258. }
  259. /* Check if ring is empty */
  260. if (enq == ring->dequeue) {
  261. /* Can't use link trbs */
  262. left_on_ring = TRBS_PER_SEGMENT - 1;
  263. for (cur_seg = enq_seg->next; cur_seg != enq_seg;
  264. cur_seg = cur_seg->next)
  265. left_on_ring += TRBS_PER_SEGMENT - 1;
  266. /* Always need one TRB free in the ring. */
  267. left_on_ring -= 1;
  268. if (num_trbs > left_on_ring) {
  269. xhci_warn(xhci, "Not enough room on ring; "
  270. "need %u TRBs, %u TRBs left\n",
  271. num_trbs, left_on_ring);
  272. return 0;
  273. }
  274. return 1;
  275. }
  276. /* Make sure there's an extra empty TRB available */
  277. for (i = 0; i <= num_trbs; ++i) {
  278. if (enq == ring->dequeue)
  279. return 0;
  280. enq++;
  281. while (last_trb(xhci, ring, enq_seg, enq)) {
  282. enq_seg = enq_seg->next;
  283. enq = enq_seg->trbs;
  284. }
  285. }
  286. return 1;
  287. }
  288. /* Ring the host controller doorbell after placing a command on the ring */
  289. void xhci_ring_cmd_db(struct xhci_hcd *xhci)
  290. {
  291. xhci_dbg(xhci, "// Ding dong!\n");
  292. xhci_writel(xhci, DB_VALUE_HOST, &xhci->dba->doorbell[0]);
  293. /* Flush PCI posted writes */
  294. xhci_readl(xhci, &xhci->dba->doorbell[0]);
  295. }
  296. void xhci_ring_ep_doorbell(struct xhci_hcd *xhci,
  297. unsigned int slot_id,
  298. unsigned int ep_index,
  299. unsigned int stream_id)
  300. {
  301. __u32 __iomem *db_addr = &xhci->dba->doorbell[slot_id];
  302. struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index];
  303. unsigned int ep_state = ep->ep_state;
  304. /* Don't ring the doorbell for this endpoint if there are pending
  305. * cancellations because we don't want to interrupt processing.
  306. * We don't want to restart any stream rings if there's a set dequeue
  307. * pointer command pending because the device can choose to start any
  308. * stream once the endpoint is on the HW schedule.
  309. * FIXME - check all the stream rings for pending cancellations.
  310. */
  311. if ((ep_state & EP_HALT_PENDING) || (ep_state & SET_DEQ_PENDING) ||
  312. (ep_state & EP_HALTED))
  313. return;
  314. xhci_writel(xhci, DB_VALUE(ep_index, stream_id), db_addr);
  315. /* The CPU has better things to do at this point than wait for a
  316. * write-posting flush. It'll get there soon enough.
  317. */
  318. }
  319. /* Ring the doorbell for any rings with pending URBs */
  320. static void ring_doorbell_for_active_rings(struct xhci_hcd *xhci,
  321. unsigned int slot_id,
  322. unsigned int ep_index)
  323. {
  324. unsigned int stream_id;
  325. struct xhci_virt_ep *ep;
  326. ep = &xhci->devs[slot_id]->eps[ep_index];
  327. /* A ring has pending URBs if its TD list is not empty */
  328. if (!(ep->ep_state & EP_HAS_STREAMS)) {
  329. if (!(list_empty(&ep->ring->td_list)))
  330. xhci_ring_ep_doorbell(xhci, slot_id, ep_index, 0);
  331. return;
  332. }
  333. for (stream_id = 1; stream_id < ep->stream_info->num_streams;
  334. stream_id++) {
  335. struct xhci_stream_info *stream_info = ep->stream_info;
  336. if (!list_empty(&stream_info->stream_rings[stream_id]->td_list))
  337. xhci_ring_ep_doorbell(xhci, slot_id, ep_index,
  338. stream_id);
  339. }
  340. }
  341. /*
  342. * Find the segment that trb is in. Start searching in start_seg.
  343. * If we must move past a segment that has a link TRB with a toggle cycle state
  344. * bit set, then we will toggle the value pointed at by cycle_state.
  345. */
  346. static struct xhci_segment *find_trb_seg(
  347. struct xhci_segment *start_seg,
  348. union xhci_trb *trb, int *cycle_state)
  349. {
  350. struct xhci_segment *cur_seg = start_seg;
  351. struct xhci_generic_trb *generic_trb;
  352. while (cur_seg->trbs > trb ||
  353. &cur_seg->trbs[TRBS_PER_SEGMENT - 1] < trb) {
  354. generic_trb = &cur_seg->trbs[TRBS_PER_SEGMENT - 1].generic;
  355. if ((generic_trb->field[3] & TRB_TYPE_BITMASK) ==
  356. TRB_TYPE(TRB_LINK) &&
  357. (generic_trb->field[3] & LINK_TOGGLE))
  358. *cycle_state = ~(*cycle_state) & 0x1;
  359. cur_seg = cur_seg->next;
  360. if (cur_seg == start_seg)
  361. /* Looped over the entire list. Oops! */
  362. return NULL;
  363. }
  364. return cur_seg;
  365. }
  366. static struct xhci_ring *xhci_triad_to_transfer_ring(struct xhci_hcd *xhci,
  367. unsigned int slot_id, unsigned int ep_index,
  368. unsigned int stream_id)
  369. {
  370. struct xhci_virt_ep *ep;
  371. ep = &xhci->devs[slot_id]->eps[ep_index];
  372. /* Common case: no streams */
  373. if (!(ep->ep_state & EP_HAS_STREAMS))
  374. return ep->ring;
  375. if (stream_id == 0) {
  376. xhci_warn(xhci,
  377. "WARN: Slot ID %u, ep index %u has streams, "
  378. "but URB has no stream ID.\n",
  379. slot_id, ep_index);
  380. return NULL;
  381. }
  382. if (stream_id < ep->stream_info->num_streams)
  383. return ep->stream_info->stream_rings[stream_id];
  384. xhci_warn(xhci,
  385. "WARN: Slot ID %u, ep index %u has "
  386. "stream IDs 1 to %u allocated, "
  387. "but stream ID %u is requested.\n",
  388. slot_id, ep_index,
  389. ep->stream_info->num_streams - 1,
  390. stream_id);
  391. return NULL;
  392. }
  393. /* Get the right ring for the given URB.
  394. * If the endpoint supports streams, boundary check the URB's stream ID.
  395. * If the endpoint doesn't support streams, return the singular endpoint ring.
  396. */
  397. static struct xhci_ring *xhci_urb_to_transfer_ring(struct xhci_hcd *xhci,
  398. struct urb *urb)
  399. {
  400. return xhci_triad_to_transfer_ring(xhci, urb->dev->slot_id,
  401. xhci_get_endpoint_index(&urb->ep->desc), urb->stream_id);
  402. }
  403. /*
  404. * Move the xHC's endpoint ring dequeue pointer past cur_td.
  405. * Record the new state of the xHC's endpoint ring dequeue segment,
  406. * dequeue pointer, and new consumer cycle state in state.
  407. * Update our internal representation of the ring's dequeue pointer.
  408. *
  409. * We do this in three jumps:
  410. * - First we update our new ring state to be the same as when the xHC stopped.
  411. * - Then we traverse the ring to find the segment that contains
  412. * the last TRB in the TD. We toggle the xHC's new cycle state when we pass
  413. * any link TRBs with the toggle cycle bit set.
  414. * - Finally we move the dequeue state one TRB further, toggling the cycle bit
  415. * if we've moved it past a link TRB with the toggle cycle bit set.
  416. */
  417. void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
  418. unsigned int slot_id, unsigned int ep_index,
  419. unsigned int stream_id, struct xhci_td *cur_td,
  420. struct xhci_dequeue_state *state)
  421. {
  422. struct xhci_virt_device *dev = xhci->devs[slot_id];
  423. struct xhci_ring *ep_ring;
  424. struct xhci_generic_trb *trb;
  425. struct xhci_ep_ctx *ep_ctx;
  426. dma_addr_t addr;
  427. ep_ring = xhci_triad_to_transfer_ring(xhci, slot_id,
  428. ep_index, stream_id);
  429. if (!ep_ring) {
  430. xhci_warn(xhci, "WARN can't find new dequeue state "
  431. "for invalid stream ID %u.\n",
  432. stream_id);
  433. return;
  434. }
  435. state->new_cycle_state = 0;
  436. xhci_dbg(xhci, "Finding segment containing stopped TRB.\n");
  437. state->new_deq_seg = find_trb_seg(cur_td->start_seg,
  438. dev->eps[ep_index].stopped_trb,
  439. &state->new_cycle_state);
  440. if (!state->new_deq_seg) {
  441. WARN_ON(1);
  442. return;
  443. }
  444. /* Dig out the cycle state saved by the xHC during the stop ep cmd */
  445. xhci_dbg(xhci, "Finding endpoint context\n");
  446. ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
  447. state->new_cycle_state = 0x1 & ep_ctx->deq;
  448. state->new_deq_ptr = cur_td->last_trb;
  449. xhci_dbg(xhci, "Finding segment containing last TRB in TD.\n");
  450. state->new_deq_seg = find_trb_seg(state->new_deq_seg,
  451. state->new_deq_ptr,
  452. &state->new_cycle_state);
  453. if (!state->new_deq_seg) {
  454. WARN_ON(1);
  455. return;
  456. }
  457. trb = &state->new_deq_ptr->generic;
  458. if ((trb->field[3] & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK) &&
  459. (trb->field[3] & LINK_TOGGLE))
  460. state->new_cycle_state = ~(state->new_cycle_state) & 0x1;
  461. next_trb(xhci, ep_ring, &state->new_deq_seg, &state->new_deq_ptr);
  462. /* Don't update the ring cycle state for the producer (us). */
  463. xhci_dbg(xhci, "New dequeue segment = %p (virtual)\n",
  464. state->new_deq_seg);
  465. addr = xhci_trb_virt_to_dma(state->new_deq_seg, state->new_deq_ptr);
  466. xhci_dbg(xhci, "New dequeue pointer = 0x%llx (DMA)\n",
  467. (unsigned long long) addr);
  468. xhci_dbg(xhci, "Setting dequeue pointer in internal ring state.\n");
  469. ep_ring->dequeue = state->new_deq_ptr;
  470. ep_ring->deq_seg = state->new_deq_seg;
  471. }
  472. static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
  473. struct xhci_td *cur_td)
  474. {
  475. struct xhci_segment *cur_seg;
  476. union xhci_trb *cur_trb;
  477. for (cur_seg = cur_td->start_seg, cur_trb = cur_td->first_trb;
  478. true;
  479. next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) {
  480. if ((cur_trb->generic.field[3] & TRB_TYPE_BITMASK) ==
  481. TRB_TYPE(TRB_LINK)) {
  482. /* Unchain any chained Link TRBs, but
  483. * leave the pointers intact.
  484. */
  485. cur_trb->generic.field[3] &= ~TRB_CHAIN;
  486. xhci_dbg(xhci, "Cancel (unchain) link TRB\n");
  487. xhci_dbg(xhci, "Address = %p (0x%llx dma); "
  488. "in seg %p (0x%llx dma)\n",
  489. cur_trb,
  490. (unsigned long long)xhci_trb_virt_to_dma(cur_seg, cur_trb),
  491. cur_seg,
  492. (unsigned long long)cur_seg->dma);
  493. } else {
  494. cur_trb->generic.field[0] = 0;
  495. cur_trb->generic.field[1] = 0;
  496. cur_trb->generic.field[2] = 0;
  497. /* Preserve only the cycle bit of this TRB */
  498. cur_trb->generic.field[3] &= TRB_CYCLE;
  499. cur_trb->generic.field[3] |= TRB_TYPE(TRB_TR_NOOP);
  500. xhci_dbg(xhci, "Cancel TRB %p (0x%llx dma) "
  501. "in seg %p (0x%llx dma)\n",
  502. cur_trb,
  503. (unsigned long long)xhci_trb_virt_to_dma(cur_seg, cur_trb),
  504. cur_seg,
  505. (unsigned long long)cur_seg->dma);
  506. }
  507. if (cur_trb == cur_td->last_trb)
  508. break;
  509. }
  510. }
  511. static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id,
  512. unsigned int ep_index, unsigned int stream_id,
  513. struct xhci_segment *deq_seg,
  514. union xhci_trb *deq_ptr, u32 cycle_state);
  515. void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci,
  516. unsigned int slot_id, unsigned int ep_index,
  517. unsigned int stream_id,
  518. struct xhci_dequeue_state *deq_state)
  519. {
  520. struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index];
  521. xhci_dbg(xhci, "Set TR Deq Ptr cmd, new deq seg = %p (0x%llx dma), "
  522. "new deq ptr = %p (0x%llx dma), new cycle = %u\n",
  523. deq_state->new_deq_seg,
  524. (unsigned long long)deq_state->new_deq_seg->dma,
  525. deq_state->new_deq_ptr,
  526. (unsigned long long)xhci_trb_virt_to_dma(deq_state->new_deq_seg, deq_state->new_deq_ptr),
  527. deq_state->new_cycle_state);
  528. queue_set_tr_deq(xhci, slot_id, ep_index, stream_id,
  529. deq_state->new_deq_seg,
  530. deq_state->new_deq_ptr,
  531. (u32) deq_state->new_cycle_state);
  532. /* Stop the TD queueing code from ringing the doorbell until
  533. * this command completes. The HC won't set the dequeue pointer
  534. * if the ring is running, and ringing the doorbell starts the
  535. * ring running.
  536. */
  537. ep->ep_state |= SET_DEQ_PENDING;
  538. }
  539. static inline void xhci_stop_watchdog_timer_in_irq(struct xhci_hcd *xhci,
  540. struct xhci_virt_ep *ep)
  541. {
  542. ep->ep_state &= ~EP_HALT_PENDING;
  543. /* Can't del_timer_sync in interrupt, so we attempt to cancel. If the
  544. * timer is running on another CPU, we don't decrement stop_cmds_pending
  545. * (since we didn't successfully stop the watchdog timer).
  546. */
  547. if (del_timer(&ep->stop_cmd_timer))
  548. ep->stop_cmds_pending--;
  549. }
  550. /* Must be called with xhci->lock held in interrupt context */
  551. static void xhci_giveback_urb_in_irq(struct xhci_hcd *xhci,
  552. struct xhci_td *cur_td, int status, char *adjective)
  553. {
  554. struct usb_hcd *hcd = xhci_to_hcd(xhci);
  555. struct urb *urb;
  556. struct urb_priv *urb_priv;
  557. urb = cur_td->urb;
  558. urb_priv = urb->hcpriv;
  559. urb_priv->td_cnt++;
  560. /* Only giveback urb when this is the last td in urb */
  561. if (urb_priv->td_cnt == urb_priv->length) {
  562. usb_hcd_unlink_urb_from_ep(hcd, urb);
  563. xhci_dbg(xhci, "Giveback %s URB %p\n", adjective, urb);
  564. spin_unlock(&xhci->lock);
  565. usb_hcd_giveback_urb(hcd, urb, status);
  566. xhci_urb_free_priv(xhci, urb_priv);
  567. spin_lock(&xhci->lock);
  568. xhci_dbg(xhci, "%s URB given back\n", adjective);
  569. }
  570. }
  571. /*
  572. * When we get a command completion for a Stop Endpoint Command, we need to
  573. * unlink any cancelled TDs from the ring. There are two ways to do that:
  574. *
  575. * 1. If the HW was in the middle of processing the TD that needs to be
  576. * cancelled, then we must move the ring's dequeue pointer past the last TRB
  577. * in the TD with a Set Dequeue Pointer Command.
  578. * 2. Otherwise, we turn all the TRBs in the TD into No-op TRBs (with the chain
  579. * bit cleared) so that the HW will skip over them.
  580. */
  581. static void handle_stopped_endpoint(struct xhci_hcd *xhci,
  582. union xhci_trb *trb, struct xhci_event_cmd *event)
  583. {
  584. unsigned int slot_id;
  585. unsigned int ep_index;
  586. struct xhci_virt_device *virt_dev;
  587. struct xhci_ring *ep_ring;
  588. struct xhci_virt_ep *ep;
  589. struct list_head *entry;
  590. struct xhci_td *cur_td = NULL;
  591. struct xhci_td *last_unlinked_td;
  592. struct xhci_dequeue_state deq_state;
  593. if (unlikely(TRB_TO_SUSPEND_PORT(
  594. xhci->cmd_ring->dequeue->generic.field[3]))) {
  595. slot_id = TRB_TO_SLOT_ID(
  596. xhci->cmd_ring->dequeue->generic.field[3]);
  597. virt_dev = xhci->devs[slot_id];
  598. if (virt_dev)
  599. handle_cmd_in_cmd_wait_list(xhci, virt_dev,
  600. event);
  601. else
  602. xhci_warn(xhci, "Stop endpoint command "
  603. "completion for disabled slot %u\n",
  604. slot_id);
  605. return;
  606. }
  607. memset(&deq_state, 0, sizeof(deq_state));
  608. slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]);
  609. ep_index = TRB_TO_EP_INDEX(trb->generic.field[3]);
  610. ep = &xhci->devs[slot_id]->eps[ep_index];
  611. if (list_empty(&ep->cancelled_td_list)) {
  612. xhci_stop_watchdog_timer_in_irq(xhci, ep);
  613. ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
  614. return;
  615. }
  616. /* Fix up the ep ring first, so HW stops executing cancelled TDs.
  617. * We have the xHCI lock, so nothing can modify this list until we drop
  618. * it. We're also in the event handler, so we can't get re-interrupted
  619. * if another Stop Endpoint command completes
  620. */
  621. list_for_each(entry, &ep->cancelled_td_list) {
  622. cur_td = list_entry(entry, struct xhci_td, cancelled_td_list);
  623. xhci_dbg(xhci, "Cancelling TD starting at %p, 0x%llx (dma).\n",
  624. cur_td->first_trb,
  625. (unsigned long long)xhci_trb_virt_to_dma(cur_td->start_seg, cur_td->first_trb));
  626. ep_ring = xhci_urb_to_transfer_ring(xhci, cur_td->urb);
  627. if (!ep_ring) {
  628. /* This shouldn't happen unless a driver is mucking
  629. * with the stream ID after submission. This will
  630. * leave the TD on the hardware ring, and the hardware
  631. * will try to execute it, and may access a buffer
  632. * that has already been freed. In the best case, the
  633. * hardware will execute it, and the event handler will
  634. * ignore the completion event for that TD, since it was
  635. * removed from the td_list for that endpoint. In
  636. * short, don't muck with the stream ID after
  637. * submission.
  638. */
  639. xhci_warn(xhci, "WARN Cancelled URB %p "
  640. "has invalid stream ID %u.\n",
  641. cur_td->urb,
  642. cur_td->urb->stream_id);
  643. goto remove_finished_td;
  644. }
  645. /*
  646. * If we stopped on the TD we need to cancel, then we have to
  647. * move the xHC endpoint ring dequeue pointer past this TD.
  648. */
  649. if (cur_td == ep->stopped_td)
  650. xhci_find_new_dequeue_state(xhci, slot_id, ep_index,
  651. cur_td->urb->stream_id,
  652. cur_td, &deq_state);
  653. else
  654. td_to_noop(xhci, ep_ring, cur_td);
  655. remove_finished_td:
  656. /*
  657. * The event handler won't see a completion for this TD anymore,
  658. * so remove it from the endpoint ring's TD list. Keep it in
  659. * the cancelled TD list for URB completion later.
  660. */
  661. list_del(&cur_td->td_list);
  662. }
  663. last_unlinked_td = cur_td;
  664. xhci_stop_watchdog_timer_in_irq(xhci, ep);
  665. /* If necessary, queue a Set Transfer Ring Dequeue Pointer command */
  666. if (deq_state.new_deq_ptr && deq_state.new_deq_seg) {
  667. xhci_queue_new_dequeue_state(xhci,
  668. slot_id, ep_index,
  669. ep->stopped_td->urb->stream_id,
  670. &deq_state);
  671. xhci_ring_cmd_db(xhci);
  672. } else {
  673. /* Otherwise ring the doorbell(s) to restart queued transfers */
  674. ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
  675. }
  676. ep->stopped_td = NULL;
  677. ep->stopped_trb = NULL;
  678. /*
  679. * Drop the lock and complete the URBs in the cancelled TD list.
  680. * New TDs to be cancelled might be added to the end of the list before
  681. * we can complete all the URBs for the TDs we already unlinked.
  682. * So stop when we've completed the URB for the last TD we unlinked.
  683. */
  684. do {
  685. cur_td = list_entry(ep->cancelled_td_list.next,
  686. struct xhci_td, cancelled_td_list);
  687. list_del(&cur_td->cancelled_td_list);
  688. /* Clean up the cancelled URB */
  689. /* Doesn't matter what we pass for status, since the core will
  690. * just overwrite it (because the URB has been unlinked).
  691. */
  692. xhci_giveback_urb_in_irq(xhci, cur_td, 0, "cancelled");
  693. /* Stop processing the cancelled list if the watchdog timer is
  694. * running.
  695. */
  696. if (xhci->xhc_state & XHCI_STATE_DYING)
  697. return;
  698. } while (cur_td != last_unlinked_td);
  699. /* Return to the event handler with xhci->lock re-acquired */
  700. }
  701. /* Watchdog timer function for when a stop endpoint command fails to complete.
  702. * In this case, we assume the host controller is broken or dying or dead. The
  703. * host may still be completing some other events, so we have to be careful to
  704. * let the event ring handler and the URB dequeueing/enqueueing functions know
  705. * through xhci->state.
  706. *
  707. * The timer may also fire if the host takes a very long time to respond to the
  708. * command, and the stop endpoint command completion handler cannot delete the
  709. * timer before the timer function is called. Another endpoint cancellation may
  710. * sneak in before the timer function can grab the lock, and that may queue
  711. * another stop endpoint command and add the timer back. So we cannot use a
  712. * simple flag to say whether there is a pending stop endpoint command for a
  713. * particular endpoint.
  714. *
  715. * Instead we use a combination of that flag and a counter for the number of
  716. * pending stop endpoint commands. If the timer is the tail end of the last
  717. * stop endpoint command, and the endpoint's command is still pending, we assume
  718. * the host is dying.
  719. */
  720. void xhci_stop_endpoint_command_watchdog(unsigned long arg)
  721. {
  722. struct xhci_hcd *xhci;
  723. struct xhci_virt_ep *ep;
  724. struct xhci_virt_ep *temp_ep;
  725. struct xhci_ring *ring;
  726. struct xhci_td *cur_td;
  727. int ret, i, j;
  728. ep = (struct xhci_virt_ep *) arg;
  729. xhci = ep->xhci;
  730. spin_lock(&xhci->lock);
  731. ep->stop_cmds_pending--;
  732. if (xhci->xhc_state & XHCI_STATE_DYING) {
  733. xhci_dbg(xhci, "Stop EP timer ran, but another timer marked "
  734. "xHCI as DYING, exiting.\n");
  735. spin_unlock(&xhci->lock);
  736. return;
  737. }
  738. if (!(ep->stop_cmds_pending == 0 && (ep->ep_state & EP_HALT_PENDING))) {
  739. xhci_dbg(xhci, "Stop EP timer ran, but no command pending, "
  740. "exiting.\n");
  741. spin_unlock(&xhci->lock);
  742. return;
  743. }
  744. xhci_warn(xhci, "xHCI host not responding to stop endpoint command.\n");
  745. xhci_warn(xhci, "Assuming host is dying, halting host.\n");
  746. /* Oops, HC is dead or dying or at least not responding to the stop
  747. * endpoint command.
  748. */
  749. xhci->xhc_state |= XHCI_STATE_DYING;
  750. /* Disable interrupts from the host controller and start halting it */
  751. xhci_quiesce(xhci);
  752. spin_unlock(&xhci->lock);
  753. ret = xhci_halt(xhci);
  754. spin_lock(&xhci->lock);
  755. if (ret < 0) {
  756. /* This is bad; the host is not responding to commands and it's
  757. * not allowing itself to be halted. At least interrupts are
  758. * disabled, so we can set HC_STATE_HALT and notify the
  759. * USB core. But if we call usb_hc_died(), it will attempt to
  760. * disconnect all device drivers under this host. Those
  761. * disconnect() methods will wait for all URBs to be unlinked,
  762. * so we must complete them.
  763. */
  764. xhci_warn(xhci, "Non-responsive xHCI host is not halting.\n");
  765. xhci_warn(xhci, "Completing active URBs anyway.\n");
  766. /* We could turn all TDs on the rings to no-ops. This won't
  767. * help if the host has cached part of the ring, and is slow if
  768. * we want to preserve the cycle bit. Skip it and hope the host
  769. * doesn't touch the memory.
  770. */
  771. }
  772. for (i = 0; i < MAX_HC_SLOTS; i++) {
  773. if (!xhci->devs[i])
  774. continue;
  775. for (j = 0; j < 31; j++) {
  776. temp_ep = &xhci->devs[i]->eps[j];
  777. ring = temp_ep->ring;
  778. if (!ring)
  779. continue;
  780. xhci_dbg(xhci, "Killing URBs for slot ID %u, "
  781. "ep index %u\n", i, j);
  782. while (!list_empty(&ring->td_list)) {
  783. cur_td = list_first_entry(&ring->td_list,
  784. struct xhci_td,
  785. td_list);
  786. list_del(&cur_td->td_list);
  787. if (!list_empty(&cur_td->cancelled_td_list))
  788. list_del(&cur_td->cancelled_td_list);
  789. xhci_giveback_urb_in_irq(xhci, cur_td,
  790. -ESHUTDOWN, "killed");
  791. }
  792. while (!list_empty(&temp_ep->cancelled_td_list)) {
  793. cur_td = list_first_entry(
  794. &temp_ep->cancelled_td_list,
  795. struct xhci_td,
  796. cancelled_td_list);
  797. list_del(&cur_td->cancelled_td_list);
  798. xhci_giveback_urb_in_irq(xhci, cur_td,
  799. -ESHUTDOWN, "killed");
  800. }
  801. }
  802. }
  803. spin_unlock(&xhci->lock);
  804. xhci_to_hcd(xhci)->state = HC_STATE_HALT;
  805. xhci_dbg(xhci, "Calling usb_hc_died()\n");
  806. usb_hc_died(xhci_to_hcd(xhci));
  807. xhci_dbg(xhci, "xHCI host controller is dead.\n");
  808. }
  809. /*
  810. * When we get a completion for a Set Transfer Ring Dequeue Pointer command,
  811. * we need to clear the set deq pending flag in the endpoint ring state, so that
  812. * the TD queueing code can ring the doorbell again. We also need to ring the
  813. * endpoint doorbell to restart the ring, but only if there aren't more
  814. * cancellations pending.
  815. */
  816. static void handle_set_deq_completion(struct xhci_hcd *xhci,
  817. struct xhci_event_cmd *event,
  818. union xhci_trb *trb)
  819. {
  820. unsigned int slot_id;
  821. unsigned int ep_index;
  822. unsigned int stream_id;
  823. struct xhci_ring *ep_ring;
  824. struct xhci_virt_device *dev;
  825. struct xhci_ep_ctx *ep_ctx;
  826. struct xhci_slot_ctx *slot_ctx;
  827. slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]);
  828. ep_index = TRB_TO_EP_INDEX(trb->generic.field[3]);
  829. stream_id = TRB_TO_STREAM_ID(trb->generic.field[2]);
  830. dev = xhci->devs[slot_id];
  831. ep_ring = xhci_stream_id_to_ring(dev, ep_index, stream_id);
  832. if (!ep_ring) {
  833. xhci_warn(xhci, "WARN Set TR deq ptr command for "
  834. "freed stream ID %u\n",
  835. stream_id);
  836. /* XXX: Harmless??? */
  837. dev->eps[ep_index].ep_state &= ~SET_DEQ_PENDING;
  838. return;
  839. }
  840. ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
  841. slot_ctx = xhci_get_slot_ctx(xhci, dev->out_ctx);
  842. if (GET_COMP_CODE(event->status) != COMP_SUCCESS) {
  843. unsigned int ep_state;
  844. unsigned int slot_state;
  845. switch (GET_COMP_CODE(event->status)) {
  846. case COMP_TRB_ERR:
  847. xhci_warn(xhci, "WARN Set TR Deq Ptr cmd invalid because "
  848. "of stream ID configuration\n");
  849. break;
  850. case COMP_CTX_STATE:
  851. xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed due "
  852. "to incorrect slot or ep state.\n");
  853. ep_state = ep_ctx->ep_info;
  854. ep_state &= EP_STATE_MASK;
  855. slot_state = slot_ctx->dev_state;
  856. slot_state = GET_SLOT_STATE(slot_state);
  857. xhci_dbg(xhci, "Slot state = %u, EP state = %u\n",
  858. slot_state, ep_state);
  859. break;
  860. case COMP_EBADSLT:
  861. xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed because "
  862. "slot %u was not enabled.\n", slot_id);
  863. break;
  864. default:
  865. xhci_warn(xhci, "WARN Set TR Deq Ptr cmd with unknown "
  866. "completion code of %u.\n",
  867. GET_COMP_CODE(event->status));
  868. break;
  869. }
  870. /* OK what do we do now? The endpoint state is hosed, and we
  871. * should never get to this point if the synchronization between
  872. * queueing, and endpoint state are correct. This might happen
  873. * if the device gets disconnected after we've finished
  874. * cancelling URBs, which might not be an error...
  875. */
  876. } else {
  877. xhci_dbg(xhci, "Successful Set TR Deq Ptr cmd, deq = @%08llx\n",
  878. ep_ctx->deq);
  879. }
  880. dev->eps[ep_index].ep_state &= ~SET_DEQ_PENDING;
  881. /* Restart any rings with pending URBs */
  882. ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
  883. }
  884. static void handle_reset_ep_completion(struct xhci_hcd *xhci,
  885. struct xhci_event_cmd *event,
  886. union xhci_trb *trb)
  887. {
  888. int slot_id;
  889. unsigned int ep_index;
  890. slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]);
  891. ep_index = TRB_TO_EP_INDEX(trb->generic.field[3]);
  892. /* This command will only fail if the endpoint wasn't halted,
  893. * but we don't care.
  894. */
  895. xhci_dbg(xhci, "Ignoring reset ep completion code of %u\n",
  896. (unsigned int) GET_COMP_CODE(event->status));
  897. /* HW with the reset endpoint quirk needs to have a configure endpoint
  898. * command complete before the endpoint can be used. Queue that here
  899. * because the HW can't handle two commands being queued in a row.
  900. */
  901. if (xhci->quirks & XHCI_RESET_EP_QUIRK) {
  902. xhci_dbg(xhci, "Queueing configure endpoint command\n");
  903. xhci_queue_configure_endpoint(xhci,
  904. xhci->devs[slot_id]->in_ctx->dma, slot_id,
  905. false);
  906. xhci_ring_cmd_db(xhci);
  907. } else {
  908. /* Clear our internal halted state and restart the ring(s) */
  909. xhci->devs[slot_id]->eps[ep_index].ep_state &= ~EP_HALTED;
  910. ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
  911. }
  912. }
  913. /* Check to see if a command in the device's command queue matches this one.
  914. * Signal the completion or free the command, and return 1. Return 0 if the
  915. * completed command isn't at the head of the command list.
  916. */
  917. static int handle_cmd_in_cmd_wait_list(struct xhci_hcd *xhci,
  918. struct xhci_virt_device *virt_dev,
  919. struct xhci_event_cmd *event)
  920. {
  921. struct xhci_command *command;
  922. if (list_empty(&virt_dev->cmd_list))
  923. return 0;
  924. command = list_entry(virt_dev->cmd_list.next,
  925. struct xhci_command, cmd_list);
  926. if (xhci->cmd_ring->dequeue != command->command_trb)
  927. return 0;
  928. command->status =
  929. GET_COMP_CODE(event->status);
  930. list_del(&command->cmd_list);
  931. if (command->completion)
  932. complete(command->completion);
  933. else
  934. xhci_free_command(xhci, command);
  935. return 1;
  936. }
  937. static void handle_cmd_completion(struct xhci_hcd *xhci,
  938. struct xhci_event_cmd *event)
  939. {
  940. int slot_id = TRB_TO_SLOT_ID(event->flags);
  941. u64 cmd_dma;
  942. dma_addr_t cmd_dequeue_dma;
  943. struct xhci_input_control_ctx *ctrl_ctx;
  944. struct xhci_virt_device *virt_dev;
  945. unsigned int ep_index;
  946. struct xhci_ring *ep_ring;
  947. unsigned int ep_state;
  948. cmd_dma = event->cmd_trb;
  949. cmd_dequeue_dma = xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg,
  950. xhci->cmd_ring->dequeue);
  951. /* Is the command ring deq ptr out of sync with the deq seg ptr? */
  952. if (cmd_dequeue_dma == 0) {
  953. xhci->error_bitmask |= 1 << 4;
  954. return;
  955. }
  956. /* Does the DMA address match our internal dequeue pointer address? */
  957. if (cmd_dma != (u64) cmd_dequeue_dma) {
  958. xhci->error_bitmask |= 1 << 5;
  959. return;
  960. }
  961. switch (xhci->cmd_ring->dequeue->generic.field[3] & TRB_TYPE_BITMASK) {
  962. case TRB_TYPE(TRB_ENABLE_SLOT):
  963. if (GET_COMP_CODE(event->status) == COMP_SUCCESS)
  964. xhci->slot_id = slot_id;
  965. else
  966. xhci->slot_id = 0;
  967. complete(&xhci->addr_dev);
  968. break;
  969. case TRB_TYPE(TRB_DISABLE_SLOT):
  970. if (xhci->devs[slot_id])
  971. xhci_free_virt_device(xhci, slot_id);
  972. break;
  973. case TRB_TYPE(TRB_CONFIG_EP):
  974. virt_dev = xhci->devs[slot_id];
  975. if (handle_cmd_in_cmd_wait_list(xhci, virt_dev, event))
  976. break;
  977. /*
  978. * Configure endpoint commands can come from the USB core
  979. * configuration or alt setting changes, or because the HW
  980. * needed an extra configure endpoint command after a reset
  981. * endpoint command or streams were being configured.
  982. * If the command was for a halted endpoint, the xHCI driver
  983. * is not waiting on the configure endpoint command.
  984. */
  985. ctrl_ctx = xhci_get_input_control_ctx(xhci,
  986. virt_dev->in_ctx);
  987. /* Input ctx add_flags are the endpoint index plus one */
  988. ep_index = xhci_last_valid_endpoint(ctrl_ctx->add_flags) - 1;
  989. /* A usb_set_interface() call directly after clearing a halted
  990. * condition may race on this quirky hardware. Not worth
  991. * worrying about, since this is prototype hardware. Not sure
  992. * if this will work for streams, but streams support was
  993. * untested on this prototype.
  994. */
  995. if (xhci->quirks & XHCI_RESET_EP_QUIRK &&
  996. ep_index != (unsigned int) -1 &&
  997. ctrl_ctx->add_flags - SLOT_FLAG ==
  998. ctrl_ctx->drop_flags) {
  999. ep_ring = xhci->devs[slot_id]->eps[ep_index].ring;
  1000. ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state;
  1001. if (!(ep_state & EP_HALTED))
  1002. goto bandwidth_change;
  1003. xhci_dbg(xhci, "Completed config ep cmd - "
  1004. "last ep index = %d, state = %d\n",
  1005. ep_index, ep_state);
  1006. /* Clear internal halted state and restart ring(s) */
  1007. xhci->devs[slot_id]->eps[ep_index].ep_state &=
  1008. ~EP_HALTED;
  1009. ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
  1010. break;
  1011. }
  1012. bandwidth_change:
  1013. xhci_dbg(xhci, "Completed config ep cmd\n");
  1014. xhci->devs[slot_id]->cmd_status =
  1015. GET_COMP_CODE(event->status);
  1016. complete(&xhci->devs[slot_id]->cmd_completion);
  1017. break;
  1018. case TRB_TYPE(TRB_EVAL_CONTEXT):
  1019. virt_dev = xhci->devs[slot_id];
  1020. if (handle_cmd_in_cmd_wait_list(xhci, virt_dev, event))
  1021. break;
  1022. xhci->devs[slot_id]->cmd_status = GET_COMP_CODE(event->status);
  1023. complete(&xhci->devs[slot_id]->cmd_completion);
  1024. break;
  1025. case TRB_TYPE(TRB_ADDR_DEV):
  1026. xhci->devs[slot_id]->cmd_status = GET_COMP_CODE(event->status);
  1027. complete(&xhci->addr_dev);
  1028. break;
  1029. case TRB_TYPE(TRB_STOP_RING):
  1030. handle_stopped_endpoint(xhci, xhci->cmd_ring->dequeue, event);
  1031. break;
  1032. case TRB_TYPE(TRB_SET_DEQ):
  1033. handle_set_deq_completion(xhci, event, xhci->cmd_ring->dequeue);
  1034. break;
  1035. case TRB_TYPE(TRB_CMD_NOOP):
  1036. ++xhci->noops_handled;
  1037. break;
  1038. case TRB_TYPE(TRB_RESET_EP):
  1039. handle_reset_ep_completion(xhci, event, xhci->cmd_ring->dequeue);
  1040. break;
  1041. case TRB_TYPE(TRB_RESET_DEV):
  1042. xhci_dbg(xhci, "Completed reset device command.\n");
  1043. slot_id = TRB_TO_SLOT_ID(
  1044. xhci->cmd_ring->dequeue->generic.field[3]);
  1045. virt_dev = xhci->devs[slot_id];
  1046. if (virt_dev)
  1047. handle_cmd_in_cmd_wait_list(xhci, virt_dev, event);
  1048. else
  1049. xhci_warn(xhci, "Reset device command completion "
  1050. "for disabled slot %u\n", slot_id);
  1051. break;
  1052. case TRB_TYPE(TRB_NEC_GET_FW):
  1053. if (!(xhci->quirks & XHCI_NEC_HOST)) {
  1054. xhci->error_bitmask |= 1 << 6;
  1055. break;
  1056. }
  1057. xhci_dbg(xhci, "NEC firmware version %2x.%02x\n",
  1058. NEC_FW_MAJOR(event->status),
  1059. NEC_FW_MINOR(event->status));
  1060. break;
  1061. default:
  1062. /* Skip over unknown commands on the event ring */
  1063. xhci->error_bitmask |= 1 << 6;
  1064. break;
  1065. }
  1066. inc_deq(xhci, xhci->cmd_ring, false);
  1067. }
  1068. static void handle_vendor_event(struct xhci_hcd *xhci,
  1069. union xhci_trb *event)
  1070. {
  1071. u32 trb_type;
  1072. trb_type = TRB_FIELD_TO_TYPE(event->generic.field[3]);
  1073. xhci_dbg(xhci, "Vendor specific event TRB type = %u\n", trb_type);
  1074. if (trb_type == TRB_NEC_CMD_COMP && (xhci->quirks & XHCI_NEC_HOST))
  1075. handle_cmd_completion(xhci, &event->event_cmd);
  1076. }
  1077. static void handle_port_status(struct xhci_hcd *xhci,
  1078. union xhci_trb *event)
  1079. {
  1080. struct usb_hcd *hcd = xhci_to_hcd(xhci);
  1081. u32 port_id;
  1082. u32 temp, temp1;
  1083. u32 __iomem *addr;
  1084. int ports;
  1085. int slot_id;
  1086. /* Port status change events always have a successful completion code */
  1087. if (GET_COMP_CODE(event->generic.field[2]) != COMP_SUCCESS) {
  1088. xhci_warn(xhci, "WARN: xHC returned failed port status event\n");
  1089. xhci->error_bitmask |= 1 << 8;
  1090. }
  1091. port_id = GET_PORT_ID(event->generic.field[0]);
  1092. xhci_dbg(xhci, "Port Status Change Event for port %d\n", port_id);
  1093. ports = HCS_MAX_PORTS(xhci->hcs_params1);
  1094. if ((port_id <= 0) || (port_id > ports)) {
  1095. xhci_warn(xhci, "Invalid port id %d\n", port_id);
  1096. goto cleanup;
  1097. }
  1098. addr = &xhci->op_regs->port_status_base + NUM_PORT_REGS * (port_id - 1);
  1099. temp = xhci_readl(xhci, addr);
  1100. if (hcd->state == HC_STATE_SUSPENDED) {
  1101. xhci_dbg(xhci, "resume root hub\n");
  1102. usb_hcd_resume_root_hub(hcd);
  1103. }
  1104. if ((temp & PORT_PLC) && (temp & PORT_PLS_MASK) == XDEV_RESUME) {
  1105. xhci_dbg(xhci, "port resume event for port %d\n", port_id);
  1106. temp1 = xhci_readl(xhci, &xhci->op_regs->command);
  1107. if (!(temp1 & CMD_RUN)) {
  1108. xhci_warn(xhci, "xHC is not running.\n");
  1109. goto cleanup;
  1110. }
  1111. if (DEV_SUPERSPEED(temp)) {
  1112. xhci_dbg(xhci, "resume SS port %d\n", port_id);
  1113. temp = xhci_port_state_to_neutral(temp);
  1114. temp &= ~PORT_PLS_MASK;
  1115. temp |= PORT_LINK_STROBE | XDEV_U0;
  1116. xhci_writel(xhci, temp, addr);
  1117. slot_id = xhci_find_slot_id_by_port(xhci, port_id);
  1118. if (!slot_id) {
  1119. xhci_dbg(xhci, "slot_id is zero\n");
  1120. goto cleanup;
  1121. }
  1122. xhci_ring_device(xhci, slot_id);
  1123. xhci_dbg(xhci, "resume SS port %d finished\n", port_id);
  1124. /* Clear PORT_PLC */
  1125. temp = xhci_readl(xhci, addr);
  1126. temp = xhci_port_state_to_neutral(temp);
  1127. temp |= PORT_PLC;
  1128. xhci_writel(xhci, temp, addr);
  1129. } else {
  1130. xhci_dbg(xhci, "resume HS port %d\n", port_id);
  1131. xhci->resume_done[port_id - 1] = jiffies +
  1132. msecs_to_jiffies(20);
  1133. mod_timer(&hcd->rh_timer,
  1134. xhci->resume_done[port_id - 1]);
  1135. /* Do the rest in GetPortStatus */
  1136. }
  1137. }
  1138. cleanup:
  1139. /* Update event ring dequeue pointer before dropping the lock */
  1140. inc_deq(xhci, xhci->event_ring, true);
  1141. spin_unlock(&xhci->lock);
  1142. /* Pass this up to the core */
  1143. usb_hcd_poll_rh_status(xhci_to_hcd(xhci));
  1144. spin_lock(&xhci->lock);
  1145. }
  1146. /*
  1147. * This TD is defined by the TRBs starting at start_trb in start_seg and ending
  1148. * at end_trb, which may be in another segment. If the suspect DMA address is a
  1149. * TRB in this TD, this function returns that TRB's segment. Otherwise it
  1150. * returns 0.
  1151. */
  1152. struct xhci_segment *trb_in_td(struct xhci_segment *start_seg,
  1153. union xhci_trb *start_trb,
  1154. union xhci_trb *end_trb,
  1155. dma_addr_t suspect_dma)
  1156. {
  1157. dma_addr_t start_dma;
  1158. dma_addr_t end_seg_dma;
  1159. dma_addr_t end_trb_dma;
  1160. struct xhci_segment *cur_seg;
  1161. start_dma = xhci_trb_virt_to_dma(start_seg, start_trb);
  1162. cur_seg = start_seg;
  1163. do {
  1164. if (start_dma == 0)
  1165. return NULL;
  1166. /* We may get an event for a Link TRB in the middle of a TD */
  1167. end_seg_dma = xhci_trb_virt_to_dma(cur_seg,
  1168. &cur_seg->trbs[TRBS_PER_SEGMENT - 1]);
  1169. /* If the end TRB isn't in this segment, this is set to 0 */
  1170. end_trb_dma = xhci_trb_virt_to_dma(cur_seg, end_trb);
  1171. if (end_trb_dma > 0) {
  1172. /* The end TRB is in this segment, so suspect should be here */
  1173. if (start_dma <= end_trb_dma) {
  1174. if (suspect_dma >= start_dma && suspect_dma <= end_trb_dma)
  1175. return cur_seg;
  1176. } else {
  1177. /* Case for one segment with
  1178. * a TD wrapped around to the top
  1179. */
  1180. if ((suspect_dma >= start_dma &&
  1181. suspect_dma <= end_seg_dma) ||
  1182. (suspect_dma >= cur_seg->dma &&
  1183. suspect_dma <= end_trb_dma))
  1184. return cur_seg;
  1185. }
  1186. return NULL;
  1187. } else {
  1188. /* Might still be somewhere in this segment */
  1189. if (suspect_dma >= start_dma && suspect_dma <= end_seg_dma)
  1190. return cur_seg;
  1191. }
  1192. cur_seg = cur_seg->next;
  1193. start_dma = xhci_trb_virt_to_dma(cur_seg, &cur_seg->trbs[0]);
  1194. } while (cur_seg != start_seg);
  1195. return NULL;
  1196. }
  1197. static void xhci_cleanup_halted_endpoint(struct xhci_hcd *xhci,
  1198. unsigned int slot_id, unsigned int ep_index,
  1199. unsigned int stream_id,
  1200. struct xhci_td *td, union xhci_trb *event_trb)
  1201. {
  1202. struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index];
  1203. ep->ep_state |= EP_HALTED;
  1204. ep->stopped_td = td;
  1205. ep->stopped_trb = event_trb;
  1206. ep->stopped_stream = stream_id;
  1207. xhci_queue_reset_ep(xhci, slot_id, ep_index);
  1208. xhci_cleanup_stalled_ring(xhci, td->urb->dev, ep_index);
  1209. ep->stopped_td = NULL;
  1210. ep->stopped_trb = NULL;
  1211. ep->stopped_stream = 0;
  1212. xhci_ring_cmd_db(xhci);
  1213. }
  1214. /* Check if an error has halted the endpoint ring. The class driver will
  1215. * cleanup the halt for a non-default control endpoint if we indicate a stall.
  1216. * However, a babble and other errors also halt the endpoint ring, and the class
  1217. * driver won't clear the halt in that case, so we need to issue a Set Transfer
  1218. * Ring Dequeue Pointer command manually.
  1219. */
  1220. static int xhci_requires_manual_halt_cleanup(struct xhci_hcd *xhci,
  1221. struct xhci_ep_ctx *ep_ctx,
  1222. unsigned int trb_comp_code)
  1223. {
  1224. /* TRB completion codes that may require a manual halt cleanup */
  1225. if (trb_comp_code == COMP_TX_ERR ||
  1226. trb_comp_code == COMP_BABBLE ||
  1227. trb_comp_code == COMP_SPLIT_ERR)
  1228. /* The 0.96 spec says a babbling control endpoint
  1229. * is not halted. The 0.96 spec says it is. Some HW
  1230. * claims to be 0.95 compliant, but it halts the control
  1231. * endpoint anyway. Check if a babble halted the
  1232. * endpoint.
  1233. */
  1234. if ((ep_ctx->ep_info & EP_STATE_MASK) == EP_STATE_HALTED)
  1235. return 1;
  1236. return 0;
  1237. }
  1238. int xhci_is_vendor_info_code(struct xhci_hcd *xhci, unsigned int trb_comp_code)
  1239. {
  1240. if (trb_comp_code >= 224 && trb_comp_code <= 255) {
  1241. /* Vendor defined "informational" completion code,
  1242. * treat as not-an-error.
  1243. */
  1244. xhci_dbg(xhci, "Vendor defined info completion code %u\n",
  1245. trb_comp_code);
  1246. xhci_dbg(xhci, "Treating code as success.\n");
  1247. return 1;
  1248. }
  1249. return 0;
  1250. }
  1251. /*
  1252. * Finish the td processing, remove the td from td list;
  1253. * Return 1 if the urb can be given back.
  1254. */
  1255. static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td,
  1256. union xhci_trb *event_trb, struct xhci_transfer_event *event,
  1257. struct xhci_virt_ep *ep, int *status, bool skip)
  1258. {
  1259. struct xhci_virt_device *xdev;
  1260. struct xhci_ring *ep_ring;
  1261. unsigned int slot_id;
  1262. int ep_index;
  1263. struct urb *urb = NULL;
  1264. struct xhci_ep_ctx *ep_ctx;
  1265. int ret = 0;
  1266. struct urb_priv *urb_priv;
  1267. u32 trb_comp_code;
  1268. slot_id = TRB_TO_SLOT_ID(event->flags);
  1269. xdev = xhci->devs[slot_id];
  1270. ep_index = TRB_TO_EP_ID(event->flags) - 1;
  1271. ep_ring = xhci_dma_to_transfer_ring(ep, event->buffer);
  1272. ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
  1273. trb_comp_code = GET_COMP_CODE(event->transfer_len);
  1274. if (skip)
  1275. goto td_cleanup;
  1276. if (trb_comp_code == COMP_STOP_INVAL ||
  1277. trb_comp_code == COMP_STOP) {
  1278. /* The Endpoint Stop Command completion will take care of any
  1279. * stopped TDs. A stopped TD may be restarted, so don't update
  1280. * the ring dequeue pointer or take this TD off any lists yet.
  1281. */
  1282. ep->stopped_td = td;
  1283. ep->stopped_trb = event_trb;
  1284. return 0;
  1285. } else {
  1286. if (trb_comp_code == COMP_STALL) {
  1287. /* The transfer is completed from the driver's
  1288. * perspective, but we need to issue a set dequeue
  1289. * command for this stalled endpoint to move the dequeue
  1290. * pointer past the TD. We can't do that here because
  1291. * the halt condition must be cleared first. Let the
  1292. * USB class driver clear the stall later.
  1293. */
  1294. ep->stopped_td = td;
  1295. ep->stopped_trb = event_trb;
  1296. ep->stopped_stream = ep_ring->stream_id;
  1297. } else if (xhci_requires_manual_halt_cleanup(xhci,
  1298. ep_ctx, trb_comp_code)) {
  1299. /* Other types of errors halt the endpoint, but the
  1300. * class driver doesn't call usb_reset_endpoint() unless
  1301. * the error is -EPIPE. Clear the halted status in the
  1302. * xHCI hardware manually.
  1303. */
  1304. xhci_cleanup_halted_endpoint(xhci,
  1305. slot_id, ep_index, ep_ring->stream_id,
  1306. td, event_trb);
  1307. } else {
  1308. /* Update ring dequeue pointer */
  1309. while (ep_ring->dequeue != td->last_trb)
  1310. inc_deq(xhci, ep_ring, false);
  1311. inc_deq(xhci, ep_ring, false);
  1312. }
  1313. td_cleanup:
  1314. /* Clean up the endpoint's TD list */
  1315. urb = td->urb;
  1316. urb_priv = urb->hcpriv;
  1317. /* Do one last check of the actual transfer length.
  1318. * If the host controller said we transferred more data than
  1319. * the buffer length, urb->actual_length will be a very big
  1320. * number (since it's unsigned). Play it safe and say we didn't
  1321. * transfer anything.
  1322. */
  1323. if (urb->actual_length > urb->transfer_buffer_length) {
  1324. xhci_warn(xhci, "URB transfer length is wrong, "
  1325. "xHC issue? req. len = %u, "
  1326. "act. len = %u\n",
  1327. urb->transfer_buffer_length,
  1328. urb->actual_length);
  1329. urb->actual_length = 0;
  1330. if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
  1331. *status = -EREMOTEIO;
  1332. else
  1333. *status = 0;
  1334. }
  1335. list_del(&td->td_list);
  1336. /* Was this TD slated to be cancelled but completed anyway? */
  1337. if (!list_empty(&td->cancelled_td_list))
  1338. list_del(&td->cancelled_td_list);
  1339. urb_priv->td_cnt++;
  1340. /* Giveback the urb when all the tds are completed */
  1341. if (urb_priv->td_cnt == urb_priv->length)
  1342. ret = 1;
  1343. }
  1344. return ret;
  1345. }
  1346. /*
  1347. * Process control tds, update urb status and actual_length.
  1348. */
  1349. static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
  1350. union xhci_trb *event_trb, struct xhci_transfer_event *event,
  1351. struct xhci_virt_ep *ep, int *status)
  1352. {
  1353. struct xhci_virt_device *xdev;
  1354. struct xhci_ring *ep_ring;
  1355. unsigned int slot_id;
  1356. int ep_index;
  1357. struct xhci_ep_ctx *ep_ctx;
  1358. u32 trb_comp_code;
  1359. slot_id = TRB_TO_SLOT_ID(event->flags);
  1360. xdev = xhci->devs[slot_id];
  1361. ep_index = TRB_TO_EP_ID(event->flags) - 1;
  1362. ep_ring = xhci_dma_to_transfer_ring(ep, event->buffer);
  1363. ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
  1364. trb_comp_code = GET_COMP_CODE(event->transfer_len);
  1365. xhci_debug_trb(xhci, xhci->event_ring->dequeue);
  1366. switch (trb_comp_code) {
  1367. case COMP_SUCCESS:
  1368. if (event_trb == ep_ring->dequeue) {
  1369. xhci_warn(xhci, "WARN: Success on ctrl setup TRB "
  1370. "without IOC set??\n");
  1371. *status = -ESHUTDOWN;
  1372. } else if (event_trb != td->last_trb) {
  1373. xhci_warn(xhci, "WARN: Success on ctrl data TRB "
  1374. "without IOC set??\n");
  1375. *status = -ESHUTDOWN;
  1376. } else {
  1377. xhci_dbg(xhci, "Successful control transfer!\n");
  1378. *status = 0;
  1379. }
  1380. break;
  1381. case COMP_SHORT_TX:
  1382. xhci_warn(xhci, "WARN: short transfer on control ep\n");
  1383. if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
  1384. *status = -EREMOTEIO;
  1385. else
  1386. *status = 0;
  1387. break;
  1388. default:
  1389. if (!xhci_requires_manual_halt_cleanup(xhci,
  1390. ep_ctx, trb_comp_code))
  1391. break;
  1392. xhci_dbg(xhci, "TRB error code %u, "
  1393. "halted endpoint index = %u\n",
  1394. trb_comp_code, ep_index);
  1395. /* else fall through */
  1396. case COMP_STALL:
  1397. /* Did we transfer part of the data (middle) phase? */
  1398. if (event_trb != ep_ring->dequeue &&
  1399. event_trb != td->last_trb)
  1400. td->urb->actual_length =
  1401. td->urb->transfer_buffer_length
  1402. - TRB_LEN(event->transfer_len);
  1403. else
  1404. td->urb->actual_length = 0;
  1405. xhci_cleanup_halted_endpoint(xhci,
  1406. slot_id, ep_index, 0, td, event_trb);
  1407. return finish_td(xhci, td, event_trb, event, ep, status, true);
  1408. }
  1409. /*
  1410. * Did we transfer any data, despite the errors that might have
  1411. * happened? I.e. did we get past the setup stage?
  1412. */
  1413. if (event_trb != ep_ring->dequeue) {
  1414. /* The event was for the status stage */
  1415. if (event_trb == td->last_trb) {
  1416. if (td->urb->actual_length != 0) {
  1417. /* Don't overwrite a previously set error code
  1418. */
  1419. if ((*status == -EINPROGRESS || *status == 0) &&
  1420. (td->urb->transfer_flags
  1421. & URB_SHORT_NOT_OK))
  1422. /* Did we already see a short data
  1423. * stage? */
  1424. *status = -EREMOTEIO;
  1425. } else {
  1426. td->urb->actual_length =
  1427. td->urb->transfer_buffer_length;
  1428. }
  1429. } else {
  1430. /* Maybe the event was for the data stage? */
  1431. if (trb_comp_code != COMP_STOP_INVAL) {
  1432. /* We didn't stop on a link TRB in the middle */
  1433. td->urb->actual_length =
  1434. td->urb->transfer_buffer_length -
  1435. TRB_LEN(event->transfer_len);
  1436. xhci_dbg(xhci, "Waiting for status "
  1437. "stage event\n");
  1438. return 0;
  1439. }
  1440. }
  1441. }
  1442. return finish_td(xhci, td, event_trb, event, ep, status, false);
  1443. }
  1444. /*
  1445. * Process isochronous tds, update urb packet status and actual_length.
  1446. */
  1447. static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
  1448. union xhci_trb *event_trb, struct xhci_transfer_event *event,
  1449. struct xhci_virt_ep *ep, int *status)
  1450. {
  1451. struct xhci_ring *ep_ring;
  1452. struct urb_priv *urb_priv;
  1453. int idx;
  1454. int len = 0;
  1455. int skip_td = 0;
  1456. union xhci_trb *cur_trb;
  1457. struct xhci_segment *cur_seg;
  1458. u32 trb_comp_code;
  1459. ep_ring = xhci_dma_to_transfer_ring(ep, event->buffer);
  1460. trb_comp_code = GET_COMP_CODE(event->transfer_len);
  1461. urb_priv = td->urb->hcpriv;
  1462. idx = urb_priv->td_cnt;
  1463. if (ep->skip) {
  1464. /* The transfer is partly done */
  1465. *status = -EXDEV;
  1466. td->urb->iso_frame_desc[idx].status = -EXDEV;
  1467. } else {
  1468. /* handle completion code */
  1469. switch (trb_comp_code) {
  1470. case COMP_SUCCESS:
  1471. td->urb->iso_frame_desc[idx].status = 0;
  1472. xhci_dbg(xhci, "Successful isoc transfer!\n");
  1473. break;
  1474. case COMP_SHORT_TX:
  1475. if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
  1476. td->urb->iso_frame_desc[idx].status =
  1477. -EREMOTEIO;
  1478. else
  1479. td->urb->iso_frame_desc[idx].status = 0;
  1480. break;
  1481. case COMP_BW_OVER:
  1482. td->urb->iso_frame_desc[idx].status = -ECOMM;
  1483. skip_td = 1;
  1484. break;
  1485. case COMP_BUFF_OVER:
  1486. case COMP_BABBLE:
  1487. td->urb->iso_frame_desc[idx].status = -EOVERFLOW;
  1488. skip_td = 1;
  1489. break;
  1490. case COMP_STALL:
  1491. td->urb->iso_frame_desc[idx].status = -EPROTO;
  1492. skip_td = 1;
  1493. break;
  1494. case COMP_STOP:
  1495. case COMP_STOP_INVAL:
  1496. break;
  1497. default:
  1498. td->urb->iso_frame_desc[idx].status = -1;
  1499. break;
  1500. }
  1501. }
  1502. /* calc actual length */
  1503. if (ep->skip) {
  1504. td->urb->iso_frame_desc[idx].actual_length = 0;
  1505. /* Update ring dequeue pointer */
  1506. while (ep_ring->dequeue != td->last_trb)
  1507. inc_deq(xhci, ep_ring, false);
  1508. inc_deq(xhci, ep_ring, false);
  1509. return finish_td(xhci, td, event_trb, event, ep, status, true);
  1510. }
  1511. if (trb_comp_code == COMP_SUCCESS || skip_td == 1) {
  1512. td->urb->iso_frame_desc[idx].actual_length =
  1513. td->urb->iso_frame_desc[idx].length;
  1514. td->urb->actual_length +=
  1515. td->urb->iso_frame_desc[idx].length;
  1516. } else {
  1517. for (cur_trb = ep_ring->dequeue,
  1518. cur_seg = ep_ring->deq_seg; cur_trb != event_trb;
  1519. next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) {
  1520. if ((cur_trb->generic.field[3] &
  1521. TRB_TYPE_BITMASK) != TRB_TYPE(TRB_TR_NOOP) &&
  1522. (cur_trb->generic.field[3] &
  1523. TRB_TYPE_BITMASK) != TRB_TYPE(TRB_LINK))
  1524. len +=
  1525. TRB_LEN(cur_trb->generic.field[2]);
  1526. }
  1527. len += TRB_LEN(cur_trb->generic.field[2]) -
  1528. TRB_LEN(event->transfer_len);
  1529. if (trb_comp_code != COMP_STOP_INVAL) {
  1530. td->urb->iso_frame_desc[idx].actual_length = len;
  1531. td->urb->actual_length += len;
  1532. }
  1533. }
  1534. if ((idx == urb_priv->length - 1) && *status == -EINPROGRESS)
  1535. *status = 0;
  1536. return finish_td(xhci, td, event_trb, event, ep, status, false);
  1537. }
  1538. /*
  1539. * Process bulk and interrupt tds, update urb status and actual_length.
  1540. */
  1541. static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td,
  1542. union xhci_trb *event_trb, struct xhci_transfer_event *event,
  1543. struct xhci_virt_ep *ep, int *status)
  1544. {
  1545. struct xhci_ring *ep_ring;
  1546. union xhci_trb *cur_trb;
  1547. struct xhci_segment *cur_seg;
  1548. u32 trb_comp_code;
  1549. ep_ring = xhci_dma_to_transfer_ring(ep, event->buffer);
  1550. trb_comp_code = GET_COMP_CODE(event->transfer_len);
  1551. switch (trb_comp_code) {
  1552. case COMP_SUCCESS:
  1553. /* Double check that the HW transferred everything. */
  1554. if (event_trb != td->last_trb) {
  1555. xhci_warn(xhci, "WARN Successful completion "
  1556. "on short TX\n");
  1557. if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
  1558. *status = -EREMOTEIO;
  1559. else
  1560. *status = 0;
  1561. } else {
  1562. if (usb_endpoint_xfer_bulk(&td->urb->ep->desc))
  1563. xhci_dbg(xhci, "Successful bulk "
  1564. "transfer!\n");
  1565. else
  1566. xhci_dbg(xhci, "Successful interrupt "
  1567. "transfer!\n");
  1568. *status = 0;
  1569. }
  1570. break;
  1571. case COMP_SHORT_TX:
  1572. if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
  1573. *status = -EREMOTEIO;
  1574. else
  1575. *status = 0;
  1576. break;
  1577. default:
  1578. /* Others already handled above */
  1579. break;
  1580. }
  1581. xhci_dbg(xhci, "ep %#x - asked for %d bytes, "
  1582. "%d bytes untransferred\n",
  1583. td->urb->ep->desc.bEndpointAddress,
  1584. td->urb->transfer_buffer_length,
  1585. TRB_LEN(event->transfer_len));
  1586. /* Fast path - was this the last TRB in the TD for this URB? */
  1587. if (event_trb == td->last_trb) {
  1588. if (TRB_LEN(event->transfer_len) != 0) {
  1589. td->urb->actual_length =
  1590. td->urb->transfer_buffer_length -
  1591. TRB_LEN(event->transfer_len);
  1592. if (td->urb->transfer_buffer_length <
  1593. td->urb->actual_length) {
  1594. xhci_warn(xhci, "HC gave bad length "
  1595. "of %d bytes left\n",
  1596. TRB_LEN(event->transfer_len));
  1597. td->urb->actual_length = 0;
  1598. if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
  1599. *status = -EREMOTEIO;
  1600. else
  1601. *status = 0;
  1602. }
  1603. /* Don't overwrite a previously set error code */
  1604. if (*status == -EINPROGRESS) {
  1605. if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
  1606. *status = -EREMOTEIO;
  1607. else
  1608. *status = 0;
  1609. }
  1610. } else {
  1611. td->urb->actual_length =
  1612. td->urb->transfer_buffer_length;
  1613. /* Ignore a short packet completion if the
  1614. * untransferred length was zero.
  1615. */
  1616. if (*status == -EREMOTEIO)
  1617. *status = 0;
  1618. }
  1619. } else {
  1620. /* Slow path - walk the list, starting from the dequeue
  1621. * pointer, to get the actual length transferred.
  1622. */
  1623. td->urb->actual_length = 0;
  1624. for (cur_trb = ep_ring->dequeue, cur_seg = ep_ring->deq_seg;
  1625. cur_trb != event_trb;
  1626. next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) {
  1627. if ((cur_trb->generic.field[3] &
  1628. TRB_TYPE_BITMASK) != TRB_TYPE(TRB_TR_NOOP) &&
  1629. (cur_trb->generic.field[3] &
  1630. TRB_TYPE_BITMASK) != TRB_TYPE(TRB_LINK))
  1631. td->urb->actual_length +=
  1632. TRB_LEN(cur_trb->generic.field[2]);
  1633. }
  1634. /* If the ring didn't stop on a Link or No-op TRB, add
  1635. * in the actual bytes transferred from the Normal TRB
  1636. */
  1637. if (trb_comp_code != COMP_STOP_INVAL)
  1638. td->urb->actual_length +=
  1639. TRB_LEN(cur_trb->generic.field[2]) -
  1640. TRB_LEN(event->transfer_len);
  1641. }
  1642. return finish_td(xhci, td, event_trb, event, ep, status, false);
  1643. }
  1644. /*
  1645. * If this function returns an error condition, it means it got a Transfer
  1646. * event with a corrupted Slot ID, Endpoint ID, or TRB DMA address.
  1647. * At this point, the host controller is probably hosed and should be reset.
  1648. */
  1649. static int handle_tx_event(struct xhci_hcd *xhci,
  1650. struct xhci_transfer_event *event)
  1651. {
  1652. struct xhci_virt_device *xdev;
  1653. struct xhci_virt_ep *ep;
  1654. struct xhci_ring *ep_ring;
  1655. unsigned int slot_id;
  1656. int ep_index;
  1657. struct xhci_td *td = NULL;
  1658. dma_addr_t event_dma;
  1659. struct xhci_segment *event_seg;
  1660. union xhci_trb *event_trb;
  1661. struct urb *urb = NULL;
  1662. int status = -EINPROGRESS;
  1663. struct urb_priv *urb_priv;
  1664. struct xhci_ep_ctx *ep_ctx;
  1665. u32 trb_comp_code;
  1666. int ret = 0;
  1667. slot_id = TRB_TO_SLOT_ID(event->flags);
  1668. xdev = xhci->devs[slot_id];
  1669. if (!xdev) {
  1670. xhci_err(xhci, "ERROR Transfer event pointed to bad slot\n");
  1671. return -ENODEV;
  1672. }
  1673. /* Endpoint ID is 1 based, our index is zero based */
  1674. ep_index = TRB_TO_EP_ID(event->flags) - 1;
  1675. xhci_dbg(xhci, "%s - ep index = %d\n", __func__, ep_index);
  1676. ep = &xdev->eps[ep_index];
  1677. ep_ring = xhci_dma_to_transfer_ring(ep, event->buffer);
  1678. ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
  1679. if (!ep_ring ||
  1680. (ep_ctx->ep_info & EP_STATE_MASK) == EP_STATE_DISABLED) {
  1681. xhci_err(xhci, "ERROR Transfer event for disabled endpoint "
  1682. "or incorrect stream ring\n");
  1683. return -ENODEV;
  1684. }
  1685. event_dma = event->buffer;
  1686. trb_comp_code = GET_COMP_CODE(event->transfer_len);
  1687. /* Look for common error cases */
  1688. switch (trb_comp_code) {
  1689. /* Skip codes that require special handling depending on
  1690. * transfer type
  1691. */
  1692. case COMP_SUCCESS:
  1693. case COMP_SHORT_TX:
  1694. break;
  1695. case COMP_STOP:
  1696. xhci_dbg(xhci, "Stopped on Transfer TRB\n");
  1697. break;
  1698. case COMP_STOP_INVAL:
  1699. xhci_dbg(xhci, "Stopped on No-op or Link TRB\n");
  1700. break;
  1701. case COMP_STALL:
  1702. xhci_warn(xhci, "WARN: Stalled endpoint\n");
  1703. ep->ep_state |= EP_HALTED;
  1704. status = -EPIPE;
  1705. break;
  1706. case COMP_TRB_ERR:
  1707. xhci_warn(xhci, "WARN: TRB error on endpoint\n");
  1708. status = -EILSEQ;
  1709. break;
  1710. case COMP_SPLIT_ERR:
  1711. case COMP_TX_ERR:
  1712. xhci_warn(xhci, "WARN: transfer error on endpoint\n");
  1713. status = -EPROTO;
  1714. break;
  1715. case COMP_BABBLE:
  1716. xhci_warn(xhci, "WARN: babble error on endpoint\n");
  1717. status = -EOVERFLOW;
  1718. break;
  1719. case COMP_DB_ERR:
  1720. xhci_warn(xhci, "WARN: HC couldn't access mem fast enough\n");
  1721. status = -ENOSR;
  1722. break;
  1723. case COMP_BW_OVER:
  1724. xhci_warn(xhci, "WARN: bandwidth overrun event on endpoint\n");
  1725. break;
  1726. case COMP_BUFF_OVER:
  1727. xhci_warn(xhci, "WARN: buffer overrun event on endpoint\n");
  1728. break;
  1729. case COMP_UNDERRUN:
  1730. /*
  1731. * When the Isoch ring is empty, the xHC will generate
  1732. * a Ring Overrun Event for IN Isoch endpoint or Ring
  1733. * Underrun Event for OUT Isoch endpoint.
  1734. */
  1735. xhci_dbg(xhci, "underrun event on endpoint\n");
  1736. if (!list_empty(&ep_ring->td_list))
  1737. xhci_dbg(xhci, "Underrun Event for slot %d ep %d "
  1738. "still with TDs queued?\n",
  1739. TRB_TO_SLOT_ID(event->flags), ep_index);
  1740. goto cleanup;
  1741. case COMP_OVERRUN:
  1742. xhci_dbg(xhci, "overrun event on endpoint\n");
  1743. if (!list_empty(&ep_ring->td_list))
  1744. xhci_dbg(xhci, "Overrun Event for slot %d ep %d "
  1745. "still with TDs queued?\n",
  1746. TRB_TO_SLOT_ID(event->flags), ep_index);
  1747. goto cleanup;
  1748. case COMP_MISSED_INT:
  1749. /*
  1750. * When encounter missed service error, one or more isoc tds
  1751. * may be missed by xHC.
  1752. * Set skip flag of the ep_ring; Complete the missed tds as
  1753. * short transfer when process the ep_ring next time.
  1754. */
  1755. ep->skip = true;
  1756. xhci_dbg(xhci, "Miss service interval error, set skip flag\n");
  1757. goto cleanup;
  1758. default:
  1759. if (xhci_is_vendor_info_code(xhci, trb_comp_code)) {
  1760. status = 0;
  1761. break;
  1762. }
  1763. xhci_warn(xhci, "ERROR Unknown event condition, HC probably "
  1764. "busted\n");
  1765. goto cleanup;
  1766. }
  1767. do {
  1768. /* This TRB should be in the TD at the head of this ring's
  1769. * TD list.
  1770. */
  1771. if (list_empty(&ep_ring->td_list)) {
  1772. xhci_warn(xhci, "WARN Event TRB for slot %d ep %d "
  1773. "with no TDs queued?\n",
  1774. TRB_TO_SLOT_ID(event->flags), ep_index);
  1775. xhci_dbg(xhci, "Event TRB with TRB type ID %u\n",
  1776. (unsigned int) (event->flags & TRB_TYPE_BITMASK)>>10);
  1777. xhci_print_trb_offsets(xhci, (union xhci_trb *) event);
  1778. if (ep->skip) {
  1779. ep->skip = false;
  1780. xhci_dbg(xhci, "td_list is empty while skip "
  1781. "flag set. Clear skip flag.\n");
  1782. }
  1783. ret = 0;
  1784. goto cleanup;
  1785. }
  1786. td = list_entry(ep_ring->td_list.next, struct xhci_td, td_list);
  1787. /* Is this a TRB in the currently executing TD? */
  1788. event_seg = trb_in_td(ep_ring->deq_seg, ep_ring->dequeue,
  1789. td->last_trb, event_dma);
  1790. if (event_seg && ep->skip) {
  1791. xhci_dbg(xhci, "Found td. Clear skip flag.\n");
  1792. ep->skip = false;
  1793. }
  1794. if (!event_seg &&
  1795. (!ep->skip || !usb_endpoint_xfer_isoc(&td->urb->ep->desc))) {
  1796. /* HC is busted, give up! */
  1797. xhci_err(xhci, "ERROR Transfer event TRB DMA ptr not "
  1798. "part of current TD\n");
  1799. return -ESHUTDOWN;
  1800. }
  1801. if (event_seg) {
  1802. event_trb = &event_seg->trbs[(event_dma -
  1803. event_seg->dma) / sizeof(*event_trb)];
  1804. /*
  1805. * No-op TRB should not trigger interrupts.
  1806. * If event_trb is a no-op TRB, it means the
  1807. * corresponding TD has been cancelled. Just ignore
  1808. * the TD.
  1809. */
  1810. if ((event_trb->generic.field[3] & TRB_TYPE_BITMASK)
  1811. == TRB_TYPE(TRB_TR_NOOP)) {
  1812. xhci_dbg(xhci, "event_trb is a no-op TRB. "
  1813. "Skip it\n");
  1814. goto cleanup;
  1815. }
  1816. }
  1817. /* Now update the urb's actual_length and give back to
  1818. * the core
  1819. */
  1820. if (usb_endpoint_xfer_control(&td->urb->ep->desc))
  1821. ret = process_ctrl_td(xhci, td, event_trb, event, ep,
  1822. &status);
  1823. else if (usb_endpoint_xfer_isoc(&td->urb->ep->desc))
  1824. ret = process_isoc_td(xhci, td, event_trb, event, ep,
  1825. &status);
  1826. else
  1827. ret = process_bulk_intr_td(xhci, td, event_trb, event,
  1828. ep, &status);
  1829. cleanup:
  1830. /*
  1831. * Do not update event ring dequeue pointer if ep->skip is set.
  1832. * Will roll back to continue process missed tds.
  1833. */
  1834. if (trb_comp_code == COMP_MISSED_INT || !ep->skip) {
  1835. inc_deq(xhci, xhci->event_ring, true);
  1836. }
  1837. if (ret) {
  1838. urb = td->urb;
  1839. urb_priv = urb->hcpriv;
  1840. /* Leave the TD around for the reset endpoint function
  1841. * to use(but only if it's not a control endpoint,
  1842. * since we already queued the Set TR dequeue pointer
  1843. * command for stalled control endpoints).
  1844. */
  1845. if (usb_endpoint_xfer_control(&urb->ep->desc) ||
  1846. (trb_comp_code != COMP_STALL &&
  1847. trb_comp_code != COMP_BABBLE))
  1848. xhci_urb_free_priv(xhci, urb_priv);
  1849. usb_hcd_unlink_urb_from_ep(xhci_to_hcd(xhci), urb);
  1850. xhci_dbg(xhci, "Giveback URB %p, len = %d, "
  1851. "status = %d\n",
  1852. urb, urb->actual_length, status);
  1853. spin_unlock(&xhci->lock);
  1854. usb_hcd_giveback_urb(xhci_to_hcd(xhci), urb, status);
  1855. spin_lock(&xhci->lock);
  1856. }
  1857. /*
  1858. * If ep->skip is set, it means there are missed tds on the
  1859. * endpoint ring need to take care of.
  1860. * Process them as short transfer until reach the td pointed by
  1861. * the event.
  1862. */
  1863. } while (ep->skip && trb_comp_code != COMP_MISSED_INT);
  1864. return 0;
  1865. }
  1866. /*
  1867. * This function handles all OS-owned events on the event ring. It may drop
  1868. * xhci->lock between event processing (e.g. to pass up port status changes).
  1869. */
  1870. static void xhci_handle_event(struct xhci_hcd *xhci)
  1871. {
  1872. union xhci_trb *event;
  1873. int update_ptrs = 1;
  1874. int ret;
  1875. xhci_dbg(xhci, "In %s\n", __func__);
  1876. if (!xhci->event_ring || !xhci->event_ring->dequeue) {
  1877. xhci->error_bitmask |= 1 << 1;
  1878. return;
  1879. }
  1880. event = xhci->event_ring->dequeue;
  1881. /* Does the HC or OS own the TRB? */
  1882. if ((event->event_cmd.flags & TRB_CYCLE) !=
  1883. xhci->event_ring->cycle_state) {
  1884. xhci->error_bitmask |= 1 << 2;
  1885. return;
  1886. }
  1887. xhci_dbg(xhci, "%s - OS owns TRB\n", __func__);
  1888. /* FIXME: Handle more event types. */
  1889. switch ((event->event_cmd.flags & TRB_TYPE_BITMASK)) {
  1890. case TRB_TYPE(TRB_COMPLETION):
  1891. xhci_dbg(xhci, "%s - calling handle_cmd_completion\n", __func__);
  1892. handle_cmd_completion(xhci, &event->event_cmd);
  1893. xhci_dbg(xhci, "%s - returned from handle_cmd_completion\n", __func__);
  1894. break;
  1895. case TRB_TYPE(TRB_PORT_STATUS):
  1896. xhci_dbg(xhci, "%s - calling handle_port_status\n", __func__);
  1897. handle_port_status(xhci, event);
  1898. xhci_dbg(xhci, "%s - returned from handle_port_status\n", __func__);
  1899. update_ptrs = 0;
  1900. break;
  1901. case TRB_TYPE(TRB_TRANSFER):
  1902. xhci_dbg(xhci, "%s - calling handle_tx_event\n", __func__);
  1903. ret = handle_tx_event(xhci, &event->trans_event);
  1904. xhci_dbg(xhci, "%s - returned from handle_tx_event\n", __func__);
  1905. if (ret < 0)
  1906. xhci->error_bitmask |= 1 << 9;
  1907. else
  1908. update_ptrs = 0;
  1909. break;
  1910. default:
  1911. if ((event->event_cmd.flags & TRB_TYPE_BITMASK) >= TRB_TYPE(48))
  1912. handle_vendor_event(xhci, event);
  1913. else
  1914. xhci->error_bitmask |= 1 << 3;
  1915. }
  1916. /* Any of the above functions may drop and re-acquire the lock, so check
  1917. * to make sure a watchdog timer didn't mark the host as non-responsive.
  1918. */
  1919. if (xhci->xhc_state & XHCI_STATE_DYING) {
  1920. xhci_dbg(xhci, "xHCI host dying, returning from "
  1921. "event handler.\n");
  1922. return;
  1923. }
  1924. if (update_ptrs)
  1925. /* Update SW event ring dequeue pointer */
  1926. inc_deq(xhci, xhci->event_ring, true);
  1927. /* Are there more items on the event ring? */
  1928. xhci_handle_event(xhci);
  1929. }
  1930. /*
  1931. * xHCI spec says we can get an interrupt, and if the HC has an error condition,
  1932. * we might get bad data out of the event ring. Section 4.10.2.7 has a list of
  1933. * indicators of an event TRB error, but we check the status *first* to be safe.
  1934. */
  1935. irqreturn_t xhci_irq(struct usb_hcd *hcd)
  1936. {
  1937. struct xhci_hcd *xhci = hcd_to_xhci(hcd);
  1938. u32 status;
  1939. union xhci_trb *trb;
  1940. u64 temp_64;
  1941. union xhci_trb *event_ring_deq;
  1942. dma_addr_t deq;
  1943. spin_lock(&xhci->lock);
  1944. trb = xhci->event_ring->dequeue;
  1945. /* Check if the xHC generated the interrupt, or the irq is shared */
  1946. status = xhci_readl(xhci, &xhci->op_regs->status);
  1947. if (status == 0xffffffff)
  1948. goto hw_died;
  1949. if (!(status & STS_EINT)) {
  1950. spin_unlock(&xhci->lock);
  1951. return IRQ_NONE;
  1952. }
  1953. xhci_dbg(xhci, "op reg status = %08x\n", status);
  1954. xhci_dbg(xhci, "Event ring dequeue ptr:\n");
  1955. xhci_dbg(xhci, "@%llx %08x %08x %08x %08x\n",
  1956. (unsigned long long)
  1957. xhci_trb_virt_to_dma(xhci->event_ring->deq_seg, trb),
  1958. lower_32_bits(trb->link.segment_ptr),
  1959. upper_32_bits(trb->link.segment_ptr),
  1960. (unsigned int) trb->link.intr_target,
  1961. (unsigned int) trb->link.control);
  1962. if (status & STS_FATAL) {
  1963. xhci_warn(xhci, "WARNING: Host System Error\n");
  1964. xhci_halt(xhci);
  1965. hw_died:
  1966. xhci_to_hcd(xhci)->state = HC_STATE_HALT;
  1967. spin_unlock(&xhci->lock);
  1968. return -ESHUTDOWN;
  1969. }
  1970. /*
  1971. * Clear the op reg interrupt status first,
  1972. * so we can receive interrupts from other MSI-X interrupters.
  1973. * Write 1 to clear the interrupt status.
  1974. */
  1975. status |= STS_EINT;
  1976. xhci_writel(xhci, status, &xhci->op_regs->status);
  1977. /* FIXME when MSI-X is supported and there are multiple vectors */
  1978. /* Clear the MSI-X event interrupt status */
  1979. if (hcd->irq != -1) {
  1980. u32 irq_pending;
  1981. /* Acknowledge the PCI interrupt */
  1982. irq_pending = xhci_readl(xhci, &xhci->ir_set->irq_pending);
  1983. irq_pending |= 0x3;
  1984. xhci_writel(xhci, irq_pending, &xhci->ir_set->irq_pending);
  1985. }
  1986. if (xhci->xhc_state & XHCI_STATE_DYING) {
  1987. xhci_dbg(xhci, "xHCI dying, ignoring interrupt. "
  1988. "Shouldn't IRQs be disabled?\n");
  1989. /* Clear the event handler busy flag (RW1C);
  1990. * the event ring should be empty.
  1991. */
  1992. temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
  1993. xhci_write_64(xhci, temp_64 | ERST_EHB,
  1994. &xhci->ir_set->erst_dequeue);
  1995. spin_unlock(&xhci->lock);
  1996. return IRQ_HANDLED;
  1997. }
  1998. event_ring_deq = xhci->event_ring->dequeue;
  1999. /* FIXME this should be a delayed service routine
  2000. * that clears the EHB.
  2001. */
  2002. xhci_handle_event(xhci);
  2003. temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
  2004. /* If necessary, update the HW's version of the event ring deq ptr. */
  2005. if (event_ring_deq != xhci->event_ring->dequeue) {
  2006. deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg,
  2007. xhci->event_ring->dequeue);
  2008. if (deq == 0)
  2009. xhci_warn(xhci, "WARN something wrong with SW event "
  2010. "ring dequeue ptr.\n");
  2011. /* Update HC event ring dequeue pointer */
  2012. temp_64 &= ERST_PTR_MASK;
  2013. temp_64 |= ((u64) deq & (u64) ~ERST_PTR_MASK);
  2014. }
  2015. /* Clear the event handler busy flag (RW1C); event ring is empty. */
  2016. temp_64 |= ERST_EHB;
  2017. xhci_write_64(xhci, temp_64, &xhci->ir_set->erst_dequeue);
  2018. spin_unlock(&xhci->lock);
  2019. return IRQ_HANDLED;
  2020. }
  2021. irqreturn_t xhci_msi_irq(int irq, struct usb_hcd *hcd)
  2022. {
  2023. irqreturn_t ret;
  2024. set_bit(HCD_FLAG_SAW_IRQ, &hcd->flags);
  2025. ret = xhci_irq(hcd);
  2026. return ret;
  2027. }
  2028. /**** Endpoint Ring Operations ****/
  2029. /*
  2030. * Generic function for queueing a TRB on a ring.
  2031. * The caller must have checked to make sure there's room on the ring.
  2032. *
  2033. * @more_trbs_coming: Will you enqueue more TRBs before calling
  2034. * prepare_transfer()?
  2035. */
  2036. static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
  2037. bool consumer, bool more_trbs_coming,
  2038. u32 field1, u32 field2, u32 field3, u32 field4)
  2039. {
  2040. struct xhci_generic_trb *trb;
  2041. trb = &ring->enqueue->generic;
  2042. trb->field[0] = field1;
  2043. trb->field[1] = field2;
  2044. trb->field[2] = field3;
  2045. trb->field[3] = field4;
  2046. inc_enq(xhci, ring, consumer, more_trbs_coming);
  2047. }
  2048. /*
  2049. * Does various checks on the endpoint ring, and makes it ready to queue num_trbs.
  2050. * FIXME allocate segments if the ring is full.
  2051. */
  2052. static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
  2053. u32 ep_state, unsigned int num_trbs, gfp_t mem_flags)
  2054. {
  2055. /* Make sure the endpoint has been added to xHC schedule */
  2056. xhci_dbg(xhci, "Endpoint state = 0x%x\n", ep_state);
  2057. switch (ep_state) {
  2058. case EP_STATE_DISABLED:
  2059. /*
  2060. * USB core changed config/interfaces without notifying us,
  2061. * or hardware is reporting the wrong state.
  2062. */
  2063. xhci_warn(xhci, "WARN urb submitted to disabled ep\n");
  2064. return -ENOENT;
  2065. case EP_STATE_ERROR:
  2066. xhci_warn(xhci, "WARN waiting for error on ep to be cleared\n");
  2067. /* FIXME event handling code for error needs to clear it */
  2068. /* XXX not sure if this should be -ENOENT or not */
  2069. return -EINVAL;
  2070. case EP_STATE_HALTED:
  2071. xhci_dbg(xhci, "WARN halted endpoint, queueing URB anyway.\n");
  2072. case EP_STATE_STOPPED:
  2073. case EP_STATE_RUNNING:
  2074. break;
  2075. default:
  2076. xhci_err(xhci, "ERROR unknown endpoint state for ep\n");
  2077. /*
  2078. * FIXME issue Configure Endpoint command to try to get the HC
  2079. * back into a known state.
  2080. */
  2081. return -EINVAL;
  2082. }
  2083. if (!room_on_ring(xhci, ep_ring, num_trbs)) {
  2084. /* FIXME allocate more room */
  2085. xhci_err(xhci, "ERROR no room on ep ring\n");
  2086. return -ENOMEM;
  2087. }
  2088. if (enqueue_is_link_trb(ep_ring)) {
  2089. struct xhci_ring *ring = ep_ring;
  2090. union xhci_trb *next;
  2091. xhci_dbg(xhci, "prepare_ring: pointing to link trb\n");
  2092. next = ring->enqueue;
  2093. while (last_trb(xhci, ring, ring->enq_seg, next)) {
  2094. /* If we're not dealing with 0.95 hardware,
  2095. * clear the chain bit.
  2096. */
  2097. if (!xhci_link_trb_quirk(xhci))
  2098. next->link.control &= ~TRB_CHAIN;
  2099. else
  2100. next->link.control |= TRB_CHAIN;
  2101. wmb();
  2102. next->link.control ^= (u32) TRB_CYCLE;
  2103. /* Toggle the cycle bit after the last ring segment. */
  2104. if (last_trb_on_last_seg(xhci, ring, ring->enq_seg, next)) {
  2105. ring->cycle_state = (ring->cycle_state ? 0 : 1);
  2106. if (!in_interrupt()) {
  2107. xhci_dbg(xhci, "queue_trb: Toggle cycle "
  2108. "state for ring %p = %i\n",
  2109. ring, (unsigned int)ring->cycle_state);
  2110. }
  2111. }
  2112. ring->enq_seg = ring->enq_seg->next;
  2113. ring->enqueue = ring->enq_seg->trbs;
  2114. next = ring->enqueue;
  2115. }
  2116. }
  2117. return 0;
  2118. }
  2119. static int prepare_transfer(struct xhci_hcd *xhci,
  2120. struct xhci_virt_device *xdev,
  2121. unsigned int ep_index,
  2122. unsigned int stream_id,
  2123. unsigned int num_trbs,
  2124. struct urb *urb,
  2125. unsigned int td_index,
  2126. gfp_t mem_flags)
  2127. {
  2128. int ret;
  2129. struct urb_priv *urb_priv;
  2130. struct xhci_td *td;
  2131. struct xhci_ring *ep_ring;
  2132. struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
  2133. ep_ring = xhci_stream_id_to_ring(xdev, ep_index, stream_id);
  2134. if (!ep_ring) {
  2135. xhci_dbg(xhci, "Can't prepare ring for bad stream ID %u\n",
  2136. stream_id);
  2137. return -EINVAL;
  2138. }
  2139. ret = prepare_ring(xhci, ep_ring,
  2140. ep_ctx->ep_info & EP_STATE_MASK,
  2141. num_trbs, mem_flags);
  2142. if (ret)
  2143. return ret;
  2144. urb_priv = urb->hcpriv;
  2145. td = urb_priv->td[td_index];
  2146. INIT_LIST_HEAD(&td->td_list);
  2147. INIT_LIST_HEAD(&td->cancelled_td_list);
  2148. if (td_index == 0) {
  2149. ret = usb_hcd_link_urb_to_ep(xhci_to_hcd(xhci), urb);
  2150. if (unlikely(ret)) {
  2151. xhci_urb_free_priv(xhci, urb_priv);
  2152. urb->hcpriv = NULL;
  2153. return ret;
  2154. }
  2155. }
  2156. td->urb = urb;
  2157. /* Add this TD to the tail of the endpoint ring's TD list */
  2158. list_add_tail(&td->td_list, &ep_ring->td_list);
  2159. td->start_seg = ep_ring->enq_seg;
  2160. td->first_trb = ep_ring->enqueue;
  2161. urb_priv->td[td_index] = td;
  2162. return 0;
  2163. }
  2164. static unsigned int count_sg_trbs_needed(struct xhci_hcd *xhci, struct urb *urb)
  2165. {
  2166. int num_sgs, num_trbs, running_total, temp, i;
  2167. struct scatterlist *sg;
  2168. sg = NULL;
  2169. num_sgs = urb->num_sgs;
  2170. temp = urb->transfer_buffer_length;
  2171. xhci_dbg(xhci, "count sg list trbs: \n");
  2172. num_trbs = 0;
  2173. for_each_sg(urb->sg, sg, num_sgs, i) {
  2174. unsigned int previous_total_trbs = num_trbs;
  2175. unsigned int len = sg_dma_len(sg);
  2176. /* Scatter gather list entries may cross 64KB boundaries */
  2177. running_total = TRB_MAX_BUFF_SIZE -
  2178. (sg_dma_address(sg) & (TRB_MAX_BUFF_SIZE - 1));
  2179. running_total &= TRB_MAX_BUFF_SIZE - 1;
  2180. if (running_total != 0)
  2181. num_trbs++;
  2182. /* How many more 64KB chunks to transfer, how many more TRBs? */
  2183. while (running_total < sg_dma_len(sg) && running_total < temp) {
  2184. num_trbs++;
  2185. running_total += TRB_MAX_BUFF_SIZE;
  2186. }
  2187. xhci_dbg(xhci, " sg #%d: dma = %#llx, len = %#x (%d), num_trbs = %d\n",
  2188. i, (unsigned long long)sg_dma_address(sg),
  2189. len, len, num_trbs - previous_total_trbs);
  2190. len = min_t(int, len, temp);
  2191. temp -= len;
  2192. if (temp == 0)
  2193. break;
  2194. }
  2195. xhci_dbg(xhci, "\n");
  2196. if (!in_interrupt())
  2197. xhci_dbg(xhci, "ep %#x - urb len = %d, sglist used, "
  2198. "num_trbs = %d\n",
  2199. urb->ep->desc.bEndpointAddress,
  2200. urb->transfer_buffer_length,
  2201. num_trbs);
  2202. return num_trbs;
  2203. }
  2204. static void check_trb_math(struct urb *urb, int num_trbs, int running_total)
  2205. {
  2206. if (num_trbs != 0)
  2207. dev_err(&urb->dev->dev, "%s - ep %#x - Miscalculated number of "
  2208. "TRBs, %d left\n", __func__,
  2209. urb->ep->desc.bEndpointAddress, num_trbs);
  2210. if (running_total != urb->transfer_buffer_length)
  2211. dev_err(&urb->dev->dev, "%s - ep %#x - Miscalculated tx length, "
  2212. "queued %#x (%d), asked for %#x (%d)\n",
  2213. __func__,
  2214. urb->ep->desc.bEndpointAddress,
  2215. running_total, running_total,
  2216. urb->transfer_buffer_length,
  2217. urb->transfer_buffer_length);
  2218. }
  2219. static void giveback_first_trb(struct xhci_hcd *xhci, int slot_id,
  2220. unsigned int ep_index, unsigned int stream_id, int start_cycle,
  2221. struct xhci_generic_trb *start_trb)
  2222. {
  2223. /*
  2224. * Pass all the TRBs to the hardware at once and make sure this write
  2225. * isn't reordered.
  2226. */
  2227. wmb();
  2228. if (start_cycle)
  2229. start_trb->field[3] |= start_cycle;
  2230. else
  2231. start_trb->field[3] &= ~0x1;
  2232. xhci_ring_ep_doorbell(xhci, slot_id, ep_index, stream_id);
  2233. }
  2234. /*
  2235. * xHCI uses normal TRBs for both bulk and interrupt. When the interrupt
  2236. * endpoint is to be serviced, the xHC will consume (at most) one TD. A TD
  2237. * (comprised of sg list entries) can take several service intervals to
  2238. * transmit.
  2239. */
  2240. int xhci_queue_intr_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
  2241. struct urb *urb, int slot_id, unsigned int ep_index)
  2242. {
  2243. struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci,
  2244. xhci->devs[slot_id]->out_ctx, ep_index);
  2245. int xhci_interval;
  2246. int ep_interval;
  2247. xhci_interval = EP_INTERVAL_TO_UFRAMES(ep_ctx->ep_info);
  2248. ep_interval = urb->interval;
  2249. /* Convert to microframes */
  2250. if (urb->dev->speed == USB_SPEED_LOW ||
  2251. urb->dev->speed == USB_SPEED_FULL)
  2252. ep_interval *= 8;
  2253. /* FIXME change this to a warning and a suggestion to use the new API
  2254. * to set the polling interval (once the API is added).
  2255. */
  2256. if (xhci_interval != ep_interval) {
  2257. if (printk_ratelimit())
  2258. dev_dbg(&urb->dev->dev, "Driver uses different interval"
  2259. " (%d microframe%s) than xHCI "
  2260. "(%d microframe%s)\n",
  2261. ep_interval,
  2262. ep_interval == 1 ? "" : "s",
  2263. xhci_interval,
  2264. xhci_interval == 1 ? "" : "s");
  2265. urb->interval = xhci_interval;
  2266. /* Convert back to frames for LS/FS devices */
  2267. if (urb->dev->speed == USB_SPEED_LOW ||
  2268. urb->dev->speed == USB_SPEED_FULL)
  2269. urb->interval /= 8;
  2270. }
  2271. return xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb, slot_id, ep_index);
  2272. }
  2273. /*
  2274. * The TD size is the number of bytes remaining in the TD (including this TRB),
  2275. * right shifted by 10.
  2276. * It must fit in bits 21:17, so it can't be bigger than 31.
  2277. */
  2278. static u32 xhci_td_remainder(unsigned int remainder)
  2279. {
  2280. u32 max = (1 << (21 - 17 + 1)) - 1;
  2281. if ((remainder >> 10) >= max)
  2282. return max << 17;
  2283. else
  2284. return (remainder >> 10) << 17;
  2285. }
  2286. static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
  2287. struct urb *urb, int slot_id, unsigned int ep_index)
  2288. {
  2289. struct xhci_ring *ep_ring;
  2290. unsigned int num_trbs;
  2291. struct urb_priv *urb_priv;
  2292. struct xhci_td *td;
  2293. struct scatterlist *sg;
  2294. int num_sgs;
  2295. int trb_buff_len, this_sg_len, running_total;
  2296. bool first_trb;
  2297. u64 addr;
  2298. bool more_trbs_coming;
  2299. struct xhci_generic_trb *start_trb;
  2300. int start_cycle;
  2301. ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
  2302. if (!ep_ring)
  2303. return -EINVAL;
  2304. num_trbs = count_sg_trbs_needed(xhci, urb);
  2305. num_sgs = urb->num_sgs;
  2306. trb_buff_len = prepare_transfer(xhci, xhci->devs[slot_id],
  2307. ep_index, urb->stream_id,
  2308. num_trbs, urb, 0, mem_flags);
  2309. if (trb_buff_len < 0)
  2310. return trb_buff_len;
  2311. urb_priv = urb->hcpriv;
  2312. td = urb_priv->td[0];
  2313. /*
  2314. * Don't give the first TRB to the hardware (by toggling the cycle bit)
  2315. * until we've finished creating all the other TRBs. The ring's cycle
  2316. * state may change as we enqueue the other TRBs, so save it too.
  2317. */
  2318. start_trb = &ep_ring->enqueue->generic;
  2319. start_cycle = ep_ring->cycle_state;
  2320. running_total = 0;
  2321. /*
  2322. * How much data is in the first TRB?
  2323. *
  2324. * There are three forces at work for TRB buffer pointers and lengths:
  2325. * 1. We don't want to walk off the end of this sg-list entry buffer.
  2326. * 2. The transfer length that the driver requested may be smaller than
  2327. * the amount of memory allocated for this scatter-gather list.
  2328. * 3. TRBs buffers can't cross 64KB boundaries.
  2329. */
  2330. sg = urb->sg;
  2331. addr = (u64) sg_dma_address(sg);
  2332. this_sg_len = sg_dma_len(sg);
  2333. trb_buff_len = TRB_MAX_BUFF_SIZE - (addr & (TRB_MAX_BUFF_SIZE - 1));
  2334. trb_buff_len = min_t(int, trb_buff_len, this_sg_len);
  2335. if (trb_buff_len > urb->transfer_buffer_length)
  2336. trb_buff_len = urb->transfer_buffer_length;
  2337. xhci_dbg(xhci, "First length to xfer from 1st sglist entry = %u\n",
  2338. trb_buff_len);
  2339. first_trb = true;
  2340. /* Queue the first TRB, even if it's zero-length */
  2341. do {
  2342. u32 field = 0;
  2343. u32 length_field = 0;
  2344. u32 remainder = 0;
  2345. /* Don't change the cycle bit of the first TRB until later */
  2346. if (first_trb) {
  2347. first_trb = false;
  2348. if (start_cycle == 0)
  2349. field |= 0x1;
  2350. } else
  2351. field |= ep_ring->cycle_state;
  2352. /* Chain all the TRBs together; clear the chain bit in the last
  2353. * TRB to indicate it's the last TRB in the chain.
  2354. */
  2355. if (num_trbs > 1) {
  2356. field |= TRB_CHAIN;
  2357. } else {
  2358. /* FIXME - add check for ZERO_PACKET flag before this */
  2359. td->last_trb = ep_ring->enqueue;
  2360. field |= TRB_IOC;
  2361. }
  2362. xhci_dbg(xhci, " sg entry: dma = %#x, len = %#x (%d), "
  2363. "64KB boundary at %#x, end dma = %#x\n",
  2364. (unsigned int) addr, trb_buff_len, trb_buff_len,
  2365. (unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1),
  2366. (unsigned int) addr + trb_buff_len);
  2367. if (TRB_MAX_BUFF_SIZE -
  2368. (addr & (TRB_MAX_BUFF_SIZE - 1)) < trb_buff_len) {
  2369. xhci_warn(xhci, "WARN: sg dma xfer crosses 64KB boundaries!\n");
  2370. xhci_dbg(xhci, "Next boundary at %#x, end dma = %#x\n",
  2371. (unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1),
  2372. (unsigned int) addr + trb_buff_len);
  2373. }
  2374. remainder = xhci_td_remainder(urb->transfer_buffer_length -
  2375. running_total) ;
  2376. length_field = TRB_LEN(trb_buff_len) |
  2377. remainder |
  2378. TRB_INTR_TARGET(0);
  2379. if (num_trbs > 1)
  2380. more_trbs_coming = true;
  2381. else
  2382. more_trbs_coming = false;
  2383. queue_trb(xhci, ep_ring, false, more_trbs_coming,
  2384. lower_32_bits(addr),
  2385. upper_32_bits(addr),
  2386. length_field,
  2387. /* We always want to know if the TRB was short,
  2388. * or we won't get an event when it completes.
  2389. * (Unless we use event data TRBs, which are a
  2390. * waste of space and HC resources.)
  2391. */
  2392. field | TRB_ISP | TRB_TYPE(TRB_NORMAL));
  2393. --num_trbs;
  2394. running_total += trb_buff_len;
  2395. /* Calculate length for next transfer --
  2396. * Are we done queueing all the TRBs for this sg entry?
  2397. */
  2398. this_sg_len -= trb_buff_len;
  2399. if (this_sg_len == 0) {
  2400. --num_sgs;
  2401. if (num_sgs == 0)
  2402. break;
  2403. sg = sg_next(sg);
  2404. addr = (u64) sg_dma_address(sg);
  2405. this_sg_len = sg_dma_len(sg);
  2406. } else {
  2407. addr += trb_buff_len;
  2408. }
  2409. trb_buff_len = TRB_MAX_BUFF_SIZE -
  2410. (addr & (TRB_MAX_BUFF_SIZE - 1));
  2411. trb_buff_len = min_t(int, trb_buff_len, this_sg_len);
  2412. if (running_total + trb_buff_len > urb->transfer_buffer_length)
  2413. trb_buff_len =
  2414. urb->transfer_buffer_length - running_total;
  2415. } while (running_total < urb->transfer_buffer_length);
  2416. check_trb_math(urb, num_trbs, running_total);
  2417. giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
  2418. start_cycle, start_trb);
  2419. return 0;
  2420. }
  2421. /* This is very similar to what ehci-q.c qtd_fill() does */
  2422. int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
  2423. struct urb *urb, int slot_id, unsigned int ep_index)
  2424. {
  2425. struct xhci_ring *ep_ring;
  2426. struct urb_priv *urb_priv;
  2427. struct xhci_td *td;
  2428. int num_trbs;
  2429. struct xhci_generic_trb *start_trb;
  2430. bool first_trb;
  2431. bool more_trbs_coming;
  2432. int start_cycle;
  2433. u32 field, length_field;
  2434. int running_total, trb_buff_len, ret;
  2435. u64 addr;
  2436. if (urb->num_sgs)
  2437. return queue_bulk_sg_tx(xhci, mem_flags, urb, slot_id, ep_index);
  2438. ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
  2439. if (!ep_ring)
  2440. return -EINVAL;
  2441. num_trbs = 0;
  2442. /* How much data is (potentially) left before the 64KB boundary? */
  2443. running_total = TRB_MAX_BUFF_SIZE -
  2444. (urb->transfer_dma & (TRB_MAX_BUFF_SIZE - 1));
  2445. running_total &= TRB_MAX_BUFF_SIZE - 1;
  2446. /* If there's some data on this 64KB chunk, or we have to send a
  2447. * zero-length transfer, we need at least one TRB
  2448. */
  2449. if (running_total != 0 || urb->transfer_buffer_length == 0)
  2450. num_trbs++;
  2451. /* How many more 64KB chunks to transfer, how many more TRBs? */
  2452. while (running_total < urb->transfer_buffer_length) {
  2453. num_trbs++;
  2454. running_total += TRB_MAX_BUFF_SIZE;
  2455. }
  2456. /* FIXME: this doesn't deal with URB_ZERO_PACKET - need one more */
  2457. if (!in_interrupt())
  2458. xhci_dbg(xhci, "ep %#x - urb len = %#x (%d), "
  2459. "addr = %#llx, num_trbs = %d\n",
  2460. urb->ep->desc.bEndpointAddress,
  2461. urb->transfer_buffer_length,
  2462. urb->transfer_buffer_length,
  2463. (unsigned long long)urb->transfer_dma,
  2464. num_trbs);
  2465. ret = prepare_transfer(xhci, xhci->devs[slot_id],
  2466. ep_index, urb->stream_id,
  2467. num_trbs, urb, 0, mem_flags);
  2468. if (ret < 0)
  2469. return ret;
  2470. urb_priv = urb->hcpriv;
  2471. td = urb_priv->td[0];
  2472. /*
  2473. * Don't give the first TRB to the hardware (by toggling the cycle bit)
  2474. * until we've finished creating all the other TRBs. The ring's cycle
  2475. * state may change as we enqueue the other TRBs, so save it too.
  2476. */
  2477. start_trb = &ep_ring->enqueue->generic;
  2478. start_cycle = ep_ring->cycle_state;
  2479. running_total = 0;
  2480. /* How much data is in the first TRB? */
  2481. addr = (u64) urb->transfer_dma;
  2482. trb_buff_len = TRB_MAX_BUFF_SIZE -
  2483. (urb->transfer_dma & (TRB_MAX_BUFF_SIZE - 1));
  2484. if (trb_buff_len > urb->transfer_buffer_length)
  2485. trb_buff_len = urb->transfer_buffer_length;
  2486. first_trb = true;
  2487. /* Queue the first TRB, even if it's zero-length */
  2488. do {
  2489. u32 remainder = 0;
  2490. field = 0;
  2491. /* Don't change the cycle bit of the first TRB until later */
  2492. if (first_trb) {
  2493. first_trb = false;
  2494. if (start_cycle == 0)
  2495. field |= 0x1;
  2496. } else
  2497. field |= ep_ring->cycle_state;
  2498. /* Chain all the TRBs together; clear the chain bit in the last
  2499. * TRB to indicate it's the last TRB in the chain.
  2500. */
  2501. if (num_trbs > 1) {
  2502. field |= TRB_CHAIN;
  2503. } else {
  2504. /* FIXME - add check for ZERO_PACKET flag before this */
  2505. td->last_trb = ep_ring->enqueue;
  2506. field |= TRB_IOC;
  2507. }
  2508. remainder = xhci_td_remainder(urb->transfer_buffer_length -
  2509. running_total);
  2510. length_field = TRB_LEN(trb_buff_len) |
  2511. remainder |
  2512. TRB_INTR_TARGET(0);
  2513. if (num_trbs > 1)
  2514. more_trbs_coming = true;
  2515. else
  2516. more_trbs_coming = false;
  2517. queue_trb(xhci, ep_ring, false, more_trbs_coming,
  2518. lower_32_bits(addr),
  2519. upper_32_bits(addr),
  2520. length_field,
  2521. /* We always want to know if the TRB was short,
  2522. * or we won't get an event when it completes.
  2523. * (Unless we use event data TRBs, which are a
  2524. * waste of space and HC resources.)
  2525. */
  2526. field | TRB_ISP | TRB_TYPE(TRB_NORMAL));
  2527. --num_trbs;
  2528. running_total += trb_buff_len;
  2529. /* Calculate length for next transfer */
  2530. addr += trb_buff_len;
  2531. trb_buff_len = urb->transfer_buffer_length - running_total;
  2532. if (trb_buff_len > TRB_MAX_BUFF_SIZE)
  2533. trb_buff_len = TRB_MAX_BUFF_SIZE;
  2534. } while (running_total < urb->transfer_buffer_length);
  2535. check_trb_math(urb, num_trbs, running_total);
  2536. giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
  2537. start_cycle, start_trb);
  2538. return 0;
  2539. }
  2540. /* Caller must have locked xhci->lock */
  2541. int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
  2542. struct urb *urb, int slot_id, unsigned int ep_index)
  2543. {
  2544. struct xhci_ring *ep_ring;
  2545. int num_trbs;
  2546. int ret;
  2547. struct usb_ctrlrequest *setup;
  2548. struct xhci_generic_trb *start_trb;
  2549. int start_cycle;
  2550. u32 field, length_field;
  2551. struct urb_priv *urb_priv;
  2552. struct xhci_td *td;
  2553. ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
  2554. if (!ep_ring)
  2555. return -EINVAL;
  2556. /*
  2557. * Need to copy setup packet into setup TRB, so we can't use the setup
  2558. * DMA address.
  2559. */
  2560. if (!urb->setup_packet)
  2561. return -EINVAL;
  2562. if (!in_interrupt())
  2563. xhci_dbg(xhci, "Queueing ctrl tx for slot id %d, ep %d\n",
  2564. slot_id, ep_index);
  2565. /* 1 TRB for setup, 1 for status */
  2566. num_trbs = 2;
  2567. /*
  2568. * Don't need to check if we need additional event data and normal TRBs,
  2569. * since data in control transfers will never get bigger than 16MB
  2570. * XXX: can we get a buffer that crosses 64KB boundaries?
  2571. */
  2572. if (urb->transfer_buffer_length > 0)
  2573. num_trbs++;
  2574. ret = prepare_transfer(xhci, xhci->devs[slot_id],
  2575. ep_index, urb->stream_id,
  2576. num_trbs, urb, 0, mem_flags);
  2577. if (ret < 0)
  2578. return ret;
  2579. urb_priv = urb->hcpriv;
  2580. td = urb_priv->td[0];
  2581. /*
  2582. * Don't give the first TRB to the hardware (by toggling the cycle bit)
  2583. * until we've finished creating all the other TRBs. The ring's cycle
  2584. * state may change as we enqueue the other TRBs, so save it too.
  2585. */
  2586. start_trb = &ep_ring->enqueue->generic;
  2587. start_cycle = ep_ring->cycle_state;
  2588. /* Queue setup TRB - see section 6.4.1.2.1 */
  2589. /* FIXME better way to translate setup_packet into two u32 fields? */
  2590. setup = (struct usb_ctrlrequest *) urb->setup_packet;
  2591. field = 0;
  2592. field |= TRB_IDT | TRB_TYPE(TRB_SETUP);
  2593. if (start_cycle == 0)
  2594. field |= 0x1;
  2595. queue_trb(xhci, ep_ring, false, true,
  2596. /* FIXME endianness is probably going to bite my ass here. */
  2597. setup->bRequestType | setup->bRequest << 8 | setup->wValue << 16,
  2598. setup->wIndex | setup->wLength << 16,
  2599. TRB_LEN(8) | TRB_INTR_TARGET(0),
  2600. /* Immediate data in pointer */
  2601. field);
  2602. /* If there's data, queue data TRBs */
  2603. field = 0;
  2604. length_field = TRB_LEN(urb->transfer_buffer_length) |
  2605. xhci_td_remainder(urb->transfer_buffer_length) |
  2606. TRB_INTR_TARGET(0);
  2607. if (urb->transfer_buffer_length > 0) {
  2608. if (setup->bRequestType & USB_DIR_IN)
  2609. field |= TRB_DIR_IN;
  2610. queue_trb(xhci, ep_ring, false, true,
  2611. lower_32_bits(urb->transfer_dma),
  2612. upper_32_bits(urb->transfer_dma),
  2613. length_field,
  2614. /* Event on short tx */
  2615. field | TRB_ISP | TRB_TYPE(TRB_DATA) | ep_ring->cycle_state);
  2616. }
  2617. /* Save the DMA address of the last TRB in the TD */
  2618. td->last_trb = ep_ring->enqueue;
  2619. /* Queue status TRB - see Table 7 and sections 4.11.2.2 and 6.4.1.2.3 */
  2620. /* If the device sent data, the status stage is an OUT transfer */
  2621. if (urb->transfer_buffer_length > 0 && setup->bRequestType & USB_DIR_IN)
  2622. field = 0;
  2623. else
  2624. field = TRB_DIR_IN;
  2625. queue_trb(xhci, ep_ring, false, false,
  2626. 0,
  2627. 0,
  2628. TRB_INTR_TARGET(0),
  2629. /* Event on completion */
  2630. field | TRB_IOC | TRB_TYPE(TRB_STATUS) | ep_ring->cycle_state);
  2631. giveback_first_trb(xhci, slot_id, ep_index, 0,
  2632. start_cycle, start_trb);
  2633. return 0;
  2634. }
  2635. static int count_isoc_trbs_needed(struct xhci_hcd *xhci,
  2636. struct urb *urb, int i)
  2637. {
  2638. int num_trbs = 0;
  2639. u64 addr, td_len, running_total;
  2640. addr = (u64) (urb->transfer_dma + urb->iso_frame_desc[i].offset);
  2641. td_len = urb->iso_frame_desc[i].length;
  2642. running_total = TRB_MAX_BUFF_SIZE - (addr & (TRB_MAX_BUFF_SIZE - 1));
  2643. running_total &= TRB_MAX_BUFF_SIZE - 1;
  2644. if (running_total != 0)
  2645. num_trbs++;
  2646. while (running_total < td_len) {
  2647. num_trbs++;
  2648. running_total += TRB_MAX_BUFF_SIZE;
  2649. }
  2650. return num_trbs;
  2651. }
  2652. /* This is for isoc transfer */
  2653. static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
  2654. struct urb *urb, int slot_id, unsigned int ep_index)
  2655. {
  2656. struct xhci_ring *ep_ring;
  2657. struct urb_priv *urb_priv;
  2658. struct xhci_td *td;
  2659. int num_tds, trbs_per_td;
  2660. struct xhci_generic_trb *start_trb;
  2661. bool first_trb;
  2662. int start_cycle;
  2663. u32 field, length_field;
  2664. int running_total, trb_buff_len, td_len, td_remain_len, ret;
  2665. u64 start_addr, addr;
  2666. int i, j;
  2667. bool more_trbs_coming;
  2668. ep_ring = xhci->devs[slot_id]->eps[ep_index].ring;
  2669. num_tds = urb->number_of_packets;
  2670. if (num_tds < 1) {
  2671. xhci_dbg(xhci, "Isoc URB with zero packets?\n");
  2672. return -EINVAL;
  2673. }
  2674. if (!in_interrupt())
  2675. xhci_dbg(xhci, "ep %#x - urb len = %#x (%d),"
  2676. " addr = %#llx, num_tds = %d\n",
  2677. urb->ep->desc.bEndpointAddress,
  2678. urb->transfer_buffer_length,
  2679. urb->transfer_buffer_length,
  2680. (unsigned long long)urb->transfer_dma,
  2681. num_tds);
  2682. start_addr = (u64) urb->transfer_dma;
  2683. start_trb = &ep_ring->enqueue->generic;
  2684. start_cycle = ep_ring->cycle_state;
  2685. /* Queue the first TRB, even if it's zero-length */
  2686. for (i = 0; i < num_tds; i++) {
  2687. first_trb = true;
  2688. running_total = 0;
  2689. addr = start_addr + urb->iso_frame_desc[i].offset;
  2690. td_len = urb->iso_frame_desc[i].length;
  2691. td_remain_len = td_len;
  2692. trbs_per_td = count_isoc_trbs_needed(xhci, urb, i);
  2693. ret = prepare_transfer(xhci, xhci->devs[slot_id], ep_index,
  2694. urb->stream_id, trbs_per_td, urb, i, mem_flags);
  2695. if (ret < 0)
  2696. return ret;
  2697. urb_priv = urb->hcpriv;
  2698. td = urb_priv->td[i];
  2699. for (j = 0; j < trbs_per_td; j++) {
  2700. u32 remainder = 0;
  2701. field = 0;
  2702. if (first_trb) {
  2703. /* Queue the isoc TRB */
  2704. field |= TRB_TYPE(TRB_ISOC);
  2705. /* Assume URB_ISO_ASAP is set */
  2706. field |= TRB_SIA;
  2707. if (i == 0) {
  2708. if (start_cycle == 0)
  2709. field |= 0x1;
  2710. } else
  2711. field |= ep_ring->cycle_state;
  2712. first_trb = false;
  2713. } else {
  2714. /* Queue other normal TRBs */
  2715. field |= TRB_TYPE(TRB_NORMAL);
  2716. field |= ep_ring->cycle_state;
  2717. }
  2718. /* Chain all the TRBs together; clear the chain bit in
  2719. * the last TRB to indicate it's the last TRB in the
  2720. * chain.
  2721. */
  2722. if (j < trbs_per_td - 1) {
  2723. field |= TRB_CHAIN;
  2724. more_trbs_coming = true;
  2725. } else {
  2726. td->last_trb = ep_ring->enqueue;
  2727. field |= TRB_IOC;
  2728. more_trbs_coming = false;
  2729. }
  2730. /* Calculate TRB length */
  2731. trb_buff_len = TRB_MAX_BUFF_SIZE -
  2732. (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
  2733. if (trb_buff_len > td_remain_len)
  2734. trb_buff_len = td_remain_len;
  2735. remainder = xhci_td_remainder(td_len - running_total);
  2736. length_field = TRB_LEN(trb_buff_len) |
  2737. remainder |
  2738. TRB_INTR_TARGET(0);
  2739. queue_trb(xhci, ep_ring, false, more_trbs_coming,
  2740. lower_32_bits(addr),
  2741. upper_32_bits(addr),
  2742. length_field,
  2743. /* We always want to know if the TRB was short,
  2744. * or we won't get an event when it completes.
  2745. * (Unless we use event data TRBs, which are a
  2746. * waste of space and HC resources.)
  2747. */
  2748. field | TRB_ISP);
  2749. running_total += trb_buff_len;
  2750. addr += trb_buff_len;
  2751. td_remain_len -= trb_buff_len;
  2752. }
  2753. /* Check TD length */
  2754. if (running_total != td_len) {
  2755. xhci_err(xhci, "ISOC TD length unmatch\n");
  2756. return -EINVAL;
  2757. }
  2758. }
  2759. giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
  2760. start_cycle, start_trb);
  2761. return 0;
  2762. }
  2763. /*
  2764. * Check transfer ring to guarantee there is enough room for the urb.
  2765. * Update ISO URB start_frame and interval.
  2766. * Update interval as xhci_queue_intr_tx does. Just use xhci frame_index to
  2767. * update the urb->start_frame by now.
  2768. * Always assume URB_ISO_ASAP set, and NEVER use urb->start_frame as input.
  2769. */
  2770. int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags,
  2771. struct urb *urb, int slot_id, unsigned int ep_index)
  2772. {
  2773. struct xhci_virt_device *xdev;
  2774. struct xhci_ring *ep_ring;
  2775. struct xhci_ep_ctx *ep_ctx;
  2776. int start_frame;
  2777. int xhci_interval;
  2778. int ep_interval;
  2779. int num_tds, num_trbs, i;
  2780. int ret;
  2781. xdev = xhci->devs[slot_id];
  2782. ep_ring = xdev->eps[ep_index].ring;
  2783. ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
  2784. num_trbs = 0;
  2785. num_tds = urb->number_of_packets;
  2786. for (i = 0; i < num_tds; i++)
  2787. num_trbs += count_isoc_trbs_needed(xhci, urb, i);
  2788. /* Check the ring to guarantee there is enough room for the whole urb.
  2789. * Do not insert any td of the urb to the ring if the check failed.
  2790. */
  2791. ret = prepare_ring(xhci, ep_ring, ep_ctx->ep_info & EP_STATE_MASK,
  2792. num_trbs, mem_flags);
  2793. if (ret)
  2794. return ret;
  2795. start_frame = xhci_readl(xhci, &xhci->run_regs->microframe_index);
  2796. start_frame &= 0x3fff;
  2797. urb->start_frame = start_frame;
  2798. if (urb->dev->speed == USB_SPEED_LOW ||
  2799. urb->dev->speed == USB_SPEED_FULL)
  2800. urb->start_frame >>= 3;
  2801. xhci_interval = EP_INTERVAL_TO_UFRAMES(ep_ctx->ep_info);
  2802. ep_interval = urb->interval;
  2803. /* Convert to microframes */
  2804. if (urb->dev->speed == USB_SPEED_LOW ||
  2805. urb->dev->speed == USB_SPEED_FULL)
  2806. ep_interval *= 8;
  2807. /* FIXME change this to a warning and a suggestion to use the new API
  2808. * to set the polling interval (once the API is added).
  2809. */
  2810. if (xhci_interval != ep_interval) {
  2811. if (printk_ratelimit())
  2812. dev_dbg(&urb->dev->dev, "Driver uses different interval"
  2813. " (%d microframe%s) than xHCI "
  2814. "(%d microframe%s)\n",
  2815. ep_interval,
  2816. ep_interval == 1 ? "" : "s",
  2817. xhci_interval,
  2818. xhci_interval == 1 ? "" : "s");
  2819. urb->interval = xhci_interval;
  2820. /* Convert back to frames for LS/FS devices */
  2821. if (urb->dev->speed == USB_SPEED_LOW ||
  2822. urb->dev->speed == USB_SPEED_FULL)
  2823. urb->interval /= 8;
  2824. }
  2825. return xhci_queue_isoc_tx(xhci, GFP_ATOMIC, urb, slot_id, ep_index);
  2826. }
  2827. /**** Command Ring Operations ****/
  2828. /* Generic function for queueing a command TRB on the command ring.
  2829. * Check to make sure there's room on the command ring for one command TRB.
  2830. * Also check that there's room reserved for commands that must not fail.
  2831. * If this is a command that must not fail, meaning command_must_succeed = TRUE,
  2832. * then only check for the number of reserved spots.
  2833. * Don't decrement xhci->cmd_ring_reserved_trbs after we've queued the TRB
  2834. * because the command event handler may want to resubmit a failed command.
  2835. */
  2836. static int queue_command(struct xhci_hcd *xhci, u32 field1, u32 field2,
  2837. u32 field3, u32 field4, bool command_must_succeed)
  2838. {
  2839. int reserved_trbs = xhci->cmd_ring_reserved_trbs;
  2840. int ret;
  2841. if (!command_must_succeed)
  2842. reserved_trbs++;
  2843. ret = prepare_ring(xhci, xhci->cmd_ring, EP_STATE_RUNNING,
  2844. reserved_trbs, GFP_ATOMIC);
  2845. if (ret < 0) {
  2846. xhci_err(xhci, "ERR: No room for command on command ring\n");
  2847. if (command_must_succeed)
  2848. xhci_err(xhci, "ERR: Reserved TRB counting for "
  2849. "unfailable commands failed.\n");
  2850. return ret;
  2851. }
  2852. queue_trb(xhci, xhci->cmd_ring, false, false, field1, field2, field3,
  2853. field4 | xhci->cmd_ring->cycle_state);
  2854. return 0;
  2855. }
  2856. /* Queue a no-op command on the command ring */
  2857. static int queue_cmd_noop(struct xhci_hcd *xhci)
  2858. {
  2859. return queue_command(xhci, 0, 0, 0, TRB_TYPE(TRB_CMD_NOOP), false);
  2860. }
  2861. /*
  2862. * Place a no-op command on the command ring to test the command and
  2863. * event ring.
  2864. */
  2865. void *xhci_setup_one_noop(struct xhci_hcd *xhci)
  2866. {
  2867. if (queue_cmd_noop(xhci) < 0)
  2868. return NULL;
  2869. xhci->noops_submitted++;
  2870. return xhci_ring_cmd_db;
  2871. }
  2872. /* Queue a slot enable or disable request on the command ring */
  2873. int xhci_queue_slot_control(struct xhci_hcd *xhci, u32 trb_type, u32 slot_id)
  2874. {
  2875. return queue_command(xhci, 0, 0, 0,
  2876. TRB_TYPE(trb_type) | SLOT_ID_FOR_TRB(slot_id), false);
  2877. }
  2878. /* Queue an address device command TRB */
  2879. int xhci_queue_address_device(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
  2880. u32 slot_id)
  2881. {
  2882. return queue_command(xhci, lower_32_bits(in_ctx_ptr),
  2883. upper_32_bits(in_ctx_ptr), 0,
  2884. TRB_TYPE(TRB_ADDR_DEV) | SLOT_ID_FOR_TRB(slot_id),
  2885. false);
  2886. }
  2887. int xhci_queue_vendor_command(struct xhci_hcd *xhci,
  2888. u32 field1, u32 field2, u32 field3, u32 field4)
  2889. {
  2890. return queue_command(xhci, field1, field2, field3, field4, false);
  2891. }
  2892. /* Queue a reset device command TRB */
  2893. int xhci_queue_reset_device(struct xhci_hcd *xhci, u32 slot_id)
  2894. {
  2895. return queue_command(xhci, 0, 0, 0,
  2896. TRB_TYPE(TRB_RESET_DEV) | SLOT_ID_FOR_TRB(slot_id),
  2897. false);
  2898. }
  2899. /* Queue a configure endpoint command TRB */
  2900. int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
  2901. u32 slot_id, bool command_must_succeed)
  2902. {
  2903. return queue_command(xhci, lower_32_bits(in_ctx_ptr),
  2904. upper_32_bits(in_ctx_ptr), 0,
  2905. TRB_TYPE(TRB_CONFIG_EP) | SLOT_ID_FOR_TRB(slot_id),
  2906. command_must_succeed);
  2907. }
  2908. /* Queue an evaluate context command TRB */
  2909. int xhci_queue_evaluate_context(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
  2910. u32 slot_id)
  2911. {
  2912. return queue_command(xhci, lower_32_bits(in_ctx_ptr),
  2913. upper_32_bits(in_ctx_ptr), 0,
  2914. TRB_TYPE(TRB_EVAL_CONTEXT) | SLOT_ID_FOR_TRB(slot_id),
  2915. false);
  2916. }
  2917. /*
  2918. * Suspend is set to indicate "Stop Endpoint Command" is being issued to stop
  2919. * activity on an endpoint that is about to be suspended.
  2920. */
  2921. int xhci_queue_stop_endpoint(struct xhci_hcd *xhci, int slot_id,
  2922. unsigned int ep_index, int suspend)
  2923. {
  2924. u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
  2925. u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
  2926. u32 type = TRB_TYPE(TRB_STOP_RING);
  2927. u32 trb_suspend = SUSPEND_PORT_FOR_TRB(suspend);
  2928. return queue_command(xhci, 0, 0, 0,
  2929. trb_slot_id | trb_ep_index | type | trb_suspend, false);
  2930. }
  2931. /* Set Transfer Ring Dequeue Pointer command.
  2932. * This should not be used for endpoints that have streams enabled.
  2933. */
  2934. static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id,
  2935. unsigned int ep_index, unsigned int stream_id,
  2936. struct xhci_segment *deq_seg,
  2937. union xhci_trb *deq_ptr, u32 cycle_state)
  2938. {
  2939. dma_addr_t addr;
  2940. u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
  2941. u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
  2942. u32 trb_stream_id = STREAM_ID_FOR_TRB(stream_id);
  2943. u32 type = TRB_TYPE(TRB_SET_DEQ);
  2944. addr = xhci_trb_virt_to_dma(deq_seg, deq_ptr);
  2945. if (addr == 0) {
  2946. xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr\n");
  2947. xhci_warn(xhci, "WARN deq seg = %p, deq pt = %p\n",
  2948. deq_seg, deq_ptr);
  2949. return 0;
  2950. }
  2951. return queue_command(xhci, lower_32_bits(addr) | cycle_state,
  2952. upper_32_bits(addr), trb_stream_id,
  2953. trb_slot_id | trb_ep_index | type, false);
  2954. }
  2955. int xhci_queue_reset_ep(struct xhci_hcd *xhci, int slot_id,
  2956. unsigned int ep_index)
  2957. {
  2958. u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
  2959. u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
  2960. u32 type = TRB_TYPE(TRB_RESET_EP);
  2961. return queue_command(xhci, 0, 0, 0, trb_slot_id | trb_ep_index | type,
  2962. false);
  2963. }