xhci-ring.c 79 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484
  1. /*
  2. * xHCI host controller driver
  3. *
  4. * Copyright (C) 2008 Intel Corp.
  5. *
  6. * Author: Sarah Sharp
  7. * Some code borrowed from the Linux EHCI driver.
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License version 2 as
  11. * published by the Free Software Foundation.
  12. *
  13. * This program is distributed in the hope that it will be useful, but
  14. * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
  15. * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
  16. * for more details.
  17. *
  18. * You should have received a copy of the GNU General Public License
  19. * along with this program; if not, write to the Free Software Foundation,
  20. * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  21. */
  22. /*
  23. * Ring initialization rules:
  24. * 1. Each segment is initialized to zero, except for link TRBs.
  25. * 2. Ring cycle state = 0. This represents Producer Cycle State (PCS) or
  26. * Consumer Cycle State (CCS), depending on ring function.
  27. * 3. Enqueue pointer = dequeue pointer = address of first TRB in the segment.
  28. *
  29. * Ring behavior rules:
  30. * 1. A ring is empty if enqueue == dequeue. This means there will always be at
  31. * least one free TRB in the ring. This is useful if you want to turn that
  32. * into a link TRB and expand the ring.
  33. * 2. When incrementing an enqueue or dequeue pointer, if the next TRB is a
  34. * link TRB, then load the pointer with the address in the link TRB. If the
  35. * link TRB had its toggle bit set, you may need to update the ring cycle
  36. * state (see cycle bit rules). You may have to do this multiple times
  37. * until you reach a non-link TRB.
  38. * 3. A ring is full if enqueue++ (for the definition of increment above)
  39. * equals the dequeue pointer.
  40. *
  41. * Cycle bit rules:
  42. * 1. When a consumer increments a dequeue pointer and encounters a toggle bit
  43. * in a link TRB, it must toggle the ring cycle state.
  44. * 2. When a producer increments an enqueue pointer and encounters a toggle bit
  45. * in a link TRB, it must toggle the ring cycle state.
  46. *
  47. * Producer rules:
  48. * 1. Check if ring is full before you enqueue.
  49. * 2. Write the ring cycle state to the cycle bit in the TRB you're enqueuing.
  50. * Update enqueue pointer between each write (which may update the ring
  51. * cycle state).
  52. * 3. Notify consumer. If SW is producer, it rings the doorbell for command
  53. * and endpoint rings. If HC is the producer for the event ring,
  54. * and it generates an interrupt according to interrupt modulation rules.
  55. *
  56. * Consumer rules:
  57. * 1. Check if TRB belongs to you. If the cycle bit == your ring cycle state,
  58. * the TRB is owned by the consumer.
  59. * 2. Update dequeue pointer (which may update the ring cycle state) and
  60. * continue processing TRBs until you reach a TRB which is not owned by you.
  61. * 3. Notify the producer. SW is the consumer for the event ring, and it
  62. * updates event ring dequeue pointer. HC is the consumer for the command and
  63. * endpoint rings; it generates events on the event ring for these.
  64. */
  65. #include <linux/scatterlist.h>
  66. #include <linux/slab.h>
  67. #include "xhci.h"
  68. /*
  69. * Returns zero if the TRB isn't in this segment, otherwise it returns the DMA
  70. * address of the TRB.
  71. */
  72. dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg,
  73. union xhci_trb *trb)
  74. {
  75. unsigned long segment_offset;
  76. if (!seg || !trb || trb < seg->trbs)
  77. return 0;
  78. /* offset in TRBs */
  79. segment_offset = trb - seg->trbs;
  80. if (segment_offset > TRBS_PER_SEGMENT)
  81. return 0;
  82. return seg->dma + (segment_offset * sizeof(*trb));
  83. }
  84. /* Does this link TRB point to the first segment in a ring,
  85. * or was the previous TRB the last TRB on the last segment in the ERST?
  86. */
  87. static inline bool last_trb_on_last_seg(struct xhci_hcd *xhci, struct xhci_ring *ring,
  88. struct xhci_segment *seg, union xhci_trb *trb)
  89. {
  90. if (ring == xhci->event_ring)
  91. return (trb == &seg->trbs[TRBS_PER_SEGMENT]) &&
  92. (seg->next == xhci->event_ring->first_seg);
  93. else
  94. return trb->link.control & LINK_TOGGLE;
  95. }
  96. /* Is this TRB a link TRB or was the last TRB the last TRB in this event ring
  97. * segment? I.e. would the updated event TRB pointer step off the end of the
  98. * event seg?
  99. */
  100. static inline int last_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
  101. struct xhci_segment *seg, union xhci_trb *trb)
  102. {
  103. if (ring == xhci->event_ring)
  104. return trb == &seg->trbs[TRBS_PER_SEGMENT];
  105. else
  106. return (trb->link.control & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK);
  107. }
  108. static inline int enqueue_is_link_trb(struct xhci_ring *ring)
  109. {
  110. struct xhci_link_trb *link = &ring->enqueue->link;
  111. return ((link->control & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK));
  112. }
  113. /* Updates trb to point to the next TRB in the ring, and updates seg if the next
  114. * TRB is in a new segment. This does not skip over link TRBs, and it does not
  115. * effect the ring dequeue or enqueue pointers.
  116. */
  117. static void next_trb(struct xhci_hcd *xhci,
  118. struct xhci_ring *ring,
  119. struct xhci_segment **seg,
  120. union xhci_trb **trb)
  121. {
  122. if (last_trb(xhci, ring, *seg, *trb)) {
  123. *seg = (*seg)->next;
  124. *trb = ((*seg)->trbs);
  125. } else {
  126. *trb = (*trb)++;
  127. }
  128. }
  129. /*
  130. * See Cycle bit rules. SW is the consumer for the event ring only.
  131. * Don't make a ring full of link TRBs. That would be dumb and this would loop.
  132. */
  133. static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer)
  134. {
  135. union xhci_trb *next = ++(ring->dequeue);
  136. unsigned long long addr;
  137. ring->deq_updates++;
  138. /* Update the dequeue pointer further if that was a link TRB or we're at
  139. * the end of an event ring segment (which doesn't have link TRBS)
  140. */
  141. while (last_trb(xhci, ring, ring->deq_seg, next)) {
  142. if (consumer && last_trb_on_last_seg(xhci, ring, ring->deq_seg, next)) {
  143. ring->cycle_state = (ring->cycle_state ? 0 : 1);
  144. if (!in_interrupt())
  145. xhci_dbg(xhci, "Toggle cycle state for ring %p = %i\n",
  146. ring,
  147. (unsigned int) ring->cycle_state);
  148. }
  149. ring->deq_seg = ring->deq_seg->next;
  150. ring->dequeue = ring->deq_seg->trbs;
  151. next = ring->dequeue;
  152. }
  153. addr = (unsigned long long) xhci_trb_virt_to_dma(ring->deq_seg, ring->dequeue);
  154. if (ring == xhci->event_ring)
  155. xhci_dbg(xhci, "Event ring deq = 0x%llx (DMA)\n", addr);
  156. else if (ring == xhci->cmd_ring)
  157. xhci_dbg(xhci, "Command ring deq = 0x%llx (DMA)\n", addr);
  158. else
  159. xhci_dbg(xhci, "Ring deq = 0x%llx (DMA)\n", addr);
  160. }
  161. /*
  162. * See Cycle bit rules. SW is the consumer for the event ring only.
  163. * Don't make a ring full of link TRBs. That would be dumb and this would loop.
  164. *
  165. * If we've just enqueued a TRB that is in the middle of a TD (meaning the
  166. * chain bit is set), then set the chain bit in all the following link TRBs.
  167. * If we've enqueued the last TRB in a TD, make sure the following link TRBs
  168. * have their chain bit cleared (so that each Link TRB is a separate TD).
  169. *
  170. * Section 6.4.4.1 of the 0.95 spec says link TRBs cannot have the chain bit
  171. * set, but other sections talk about dealing with the chain bit set. This was
  172. * fixed in the 0.96 specification errata, but we have to assume that all 0.95
  173. * xHCI hardware can't handle the chain bit being cleared on a link TRB.
  174. */
  175. static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer)
  176. {
  177. u32 chain;
  178. union xhci_trb *next;
  179. unsigned long long addr;
  180. chain = ring->enqueue->generic.field[3] & TRB_CHAIN;
  181. next = ++(ring->enqueue);
  182. ring->enq_updates++;
  183. /* Update the dequeue pointer further if that was a link TRB or we're at
  184. * the end of an event ring segment (which doesn't have link TRBS)
  185. */
  186. while (last_trb(xhci, ring, ring->enq_seg, next)) {
  187. if (!consumer) {
  188. if (ring != xhci->event_ring) {
  189. if (chain) {
  190. next->link.control |= TRB_CHAIN;
  191. /* Give this link TRB to the hardware */
  192. wmb();
  193. next->link.control ^= TRB_CYCLE;
  194. } else {
  195. break;
  196. }
  197. }
  198. /* Toggle the cycle bit after the last ring segment. */
  199. if (last_trb_on_last_seg(xhci, ring, ring->enq_seg, next)) {
  200. ring->cycle_state = (ring->cycle_state ? 0 : 1);
  201. if (!in_interrupt())
  202. xhci_dbg(xhci, "Toggle cycle state for ring %p = %i\n",
  203. ring,
  204. (unsigned int) ring->cycle_state);
  205. }
  206. }
  207. ring->enq_seg = ring->enq_seg->next;
  208. ring->enqueue = ring->enq_seg->trbs;
  209. next = ring->enqueue;
  210. }
  211. addr = (unsigned long long) xhci_trb_virt_to_dma(ring->enq_seg, ring->enqueue);
  212. if (ring == xhci->event_ring)
  213. xhci_dbg(xhci, "Event ring enq = 0x%llx (DMA)\n", addr);
  214. else if (ring == xhci->cmd_ring)
  215. xhci_dbg(xhci, "Command ring enq = 0x%llx (DMA)\n", addr);
  216. else
  217. xhci_dbg(xhci, "Ring enq = 0x%llx (DMA)\n", addr);
  218. }
  219. /*
  220. * Check to see if there's room to enqueue num_trbs on the ring. See rules
  221. * above.
  222. * FIXME: this would be simpler and faster if we just kept track of the number
  223. * of free TRBs in a ring.
  224. */
  225. static int room_on_ring(struct xhci_hcd *xhci, struct xhci_ring *ring,
  226. unsigned int num_trbs)
  227. {
  228. int i;
  229. union xhci_trb *enq = ring->enqueue;
  230. struct xhci_segment *enq_seg = ring->enq_seg;
  231. struct xhci_segment *cur_seg;
  232. unsigned int left_on_ring;
  233. /* If we are currently pointing to a link TRB, advance the
  234. * enqueue pointer before checking for space */
  235. while (last_trb(xhci, ring, enq_seg, enq)) {
  236. enq_seg = enq_seg->next;
  237. enq = enq_seg->trbs;
  238. }
  239. /* Check if ring is empty */
  240. if (enq == ring->dequeue) {
  241. /* Can't use link trbs */
  242. left_on_ring = TRBS_PER_SEGMENT - 1;
  243. for (cur_seg = enq_seg->next; cur_seg != enq_seg;
  244. cur_seg = cur_seg->next)
  245. left_on_ring += TRBS_PER_SEGMENT - 1;
  246. /* Always need one TRB free in the ring. */
  247. left_on_ring -= 1;
  248. if (num_trbs > left_on_ring) {
  249. xhci_warn(xhci, "Not enough room on ring; "
  250. "need %u TRBs, %u TRBs left\n",
  251. num_trbs, left_on_ring);
  252. return 0;
  253. }
  254. return 1;
  255. }
  256. /* Make sure there's an extra empty TRB available */
  257. for (i = 0; i <= num_trbs; ++i) {
  258. if (enq == ring->dequeue)
  259. return 0;
  260. enq++;
  261. while (last_trb(xhci, ring, enq_seg, enq)) {
  262. enq_seg = enq_seg->next;
  263. enq = enq_seg->trbs;
  264. }
  265. }
  266. return 1;
  267. }
  268. void xhci_set_hc_event_deq(struct xhci_hcd *xhci)
  269. {
  270. u64 temp;
  271. dma_addr_t deq;
  272. deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg,
  273. xhci->event_ring->dequeue);
  274. if (deq == 0 && !in_interrupt())
  275. xhci_warn(xhci, "WARN something wrong with SW event ring "
  276. "dequeue ptr.\n");
  277. /* Update HC event ring dequeue pointer */
  278. temp = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
  279. temp &= ERST_PTR_MASK;
  280. /* Don't clear the EHB bit (which is RW1C) because
  281. * there might be more events to service.
  282. */
  283. temp &= ~ERST_EHB;
  284. xhci_dbg(xhci, "// Write event ring dequeue pointer, preserving EHB bit\n");
  285. xhci_write_64(xhci, ((u64) deq & (u64) ~ERST_PTR_MASK) | temp,
  286. &xhci->ir_set->erst_dequeue);
  287. }
  288. /* Ring the host controller doorbell after placing a command on the ring */
  289. void xhci_ring_cmd_db(struct xhci_hcd *xhci)
  290. {
  291. u32 temp;
  292. xhci_dbg(xhci, "// Ding dong!\n");
  293. temp = xhci_readl(xhci, &xhci->dba->doorbell[0]) & DB_MASK;
  294. xhci_writel(xhci, temp | DB_TARGET_HOST, &xhci->dba->doorbell[0]);
  295. /* Flush PCI posted writes */
  296. xhci_readl(xhci, &xhci->dba->doorbell[0]);
  297. }
  298. static void ring_ep_doorbell(struct xhci_hcd *xhci,
  299. unsigned int slot_id,
  300. unsigned int ep_index,
  301. unsigned int stream_id)
  302. {
  303. struct xhci_virt_ep *ep;
  304. unsigned int ep_state;
  305. u32 field;
  306. __u32 __iomem *db_addr = &xhci->dba->doorbell[slot_id];
  307. ep = &xhci->devs[slot_id]->eps[ep_index];
  308. ep_state = ep->ep_state;
  309. /* Don't ring the doorbell for this endpoint if there are pending
  310. * cancellations because the we don't want to interrupt processing.
  311. * We don't want to restart any stream rings if there's a set dequeue
  312. * pointer command pending because the device can choose to start any
  313. * stream once the endpoint is on the HW schedule.
  314. * FIXME - check all the stream rings for pending cancellations.
  315. */
  316. if (!(ep_state & EP_HALT_PENDING) && !(ep_state & SET_DEQ_PENDING)
  317. && !(ep_state & EP_HALTED)) {
  318. field = xhci_readl(xhci, db_addr) & DB_MASK;
  319. field |= EPI_TO_DB(ep_index) | STREAM_ID_TO_DB(stream_id);
  320. xhci_writel(xhci, field, db_addr);
  321. /* Flush PCI posted writes - FIXME Matthew Wilcox says this
  322. * isn't time-critical and we shouldn't make the CPU wait for
  323. * the flush.
  324. */
  325. xhci_readl(xhci, db_addr);
  326. }
  327. }
  328. /* Ring the doorbell for any rings with pending URBs */
  329. static void ring_doorbell_for_active_rings(struct xhci_hcd *xhci,
  330. unsigned int slot_id,
  331. unsigned int ep_index)
  332. {
  333. unsigned int stream_id;
  334. struct xhci_virt_ep *ep;
  335. ep = &xhci->devs[slot_id]->eps[ep_index];
  336. /* A ring has pending URBs if its TD list is not empty */
  337. if (!(ep->ep_state & EP_HAS_STREAMS)) {
  338. if (!(list_empty(&ep->ring->td_list)))
  339. ring_ep_doorbell(xhci, slot_id, ep_index, 0);
  340. return;
  341. }
  342. for (stream_id = 1; stream_id < ep->stream_info->num_streams;
  343. stream_id++) {
  344. struct xhci_stream_info *stream_info = ep->stream_info;
  345. if (!list_empty(&stream_info->stream_rings[stream_id]->td_list))
  346. ring_ep_doorbell(xhci, slot_id, ep_index, stream_id);
  347. }
  348. }
  349. /*
  350. * Find the segment that trb is in. Start searching in start_seg.
  351. * If we must move past a segment that has a link TRB with a toggle cycle state
  352. * bit set, then we will toggle the value pointed at by cycle_state.
  353. */
  354. static struct xhci_segment *find_trb_seg(
  355. struct xhci_segment *start_seg,
  356. union xhci_trb *trb, int *cycle_state)
  357. {
  358. struct xhci_segment *cur_seg = start_seg;
  359. struct xhci_generic_trb *generic_trb;
  360. while (cur_seg->trbs > trb ||
  361. &cur_seg->trbs[TRBS_PER_SEGMENT - 1] < trb) {
  362. generic_trb = &cur_seg->trbs[TRBS_PER_SEGMENT - 1].generic;
  363. if ((generic_trb->field[3] & TRB_TYPE_BITMASK) ==
  364. TRB_TYPE(TRB_LINK) &&
  365. (generic_trb->field[3] & LINK_TOGGLE))
  366. *cycle_state = ~(*cycle_state) & 0x1;
  367. cur_seg = cur_seg->next;
  368. if (cur_seg == start_seg)
  369. /* Looped over the entire list. Oops! */
  370. return NULL;
  371. }
  372. return cur_seg;
  373. }
  374. /*
  375. * Move the xHC's endpoint ring dequeue pointer past cur_td.
  376. * Record the new state of the xHC's endpoint ring dequeue segment,
  377. * dequeue pointer, and new consumer cycle state in state.
  378. * Update our internal representation of the ring's dequeue pointer.
  379. *
  380. * We do this in three jumps:
  381. * - First we update our new ring state to be the same as when the xHC stopped.
  382. * - Then we traverse the ring to find the segment that contains
  383. * the last TRB in the TD. We toggle the xHC's new cycle state when we pass
  384. * any link TRBs with the toggle cycle bit set.
  385. * - Finally we move the dequeue state one TRB further, toggling the cycle bit
  386. * if we've moved it past a link TRB with the toggle cycle bit set.
  387. */
  388. void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
  389. unsigned int slot_id, unsigned int ep_index,
  390. unsigned int stream_id, struct xhci_td *cur_td,
  391. struct xhci_dequeue_state *state)
  392. {
  393. struct xhci_virt_device *dev = xhci->devs[slot_id];
  394. struct xhci_ring *ep_ring;
  395. struct xhci_generic_trb *trb;
  396. struct xhci_ep_ctx *ep_ctx;
  397. dma_addr_t addr;
  398. ep_ring = xhci_triad_to_transfer_ring(xhci, slot_id,
  399. ep_index, stream_id);
  400. if (!ep_ring) {
  401. xhci_warn(xhci, "WARN can't find new dequeue state "
  402. "for invalid stream ID %u.\n",
  403. stream_id);
  404. return;
  405. }
  406. state->new_cycle_state = 0;
  407. xhci_dbg(xhci, "Finding segment containing stopped TRB.\n");
  408. state->new_deq_seg = find_trb_seg(cur_td->start_seg,
  409. dev->eps[ep_index].stopped_trb,
  410. &state->new_cycle_state);
  411. if (!state->new_deq_seg)
  412. BUG();
  413. /* Dig out the cycle state saved by the xHC during the stop ep cmd */
  414. xhci_dbg(xhci, "Finding endpoint context\n");
  415. ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
  416. state->new_cycle_state = 0x1 & ep_ctx->deq;
  417. state->new_deq_ptr = cur_td->last_trb;
  418. xhci_dbg(xhci, "Finding segment containing last TRB in TD.\n");
  419. state->new_deq_seg = find_trb_seg(state->new_deq_seg,
  420. state->new_deq_ptr,
  421. &state->new_cycle_state);
  422. if (!state->new_deq_seg)
  423. BUG();
  424. trb = &state->new_deq_ptr->generic;
  425. if ((trb->field[3] & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK) &&
  426. (trb->field[3] & LINK_TOGGLE))
  427. state->new_cycle_state = ~(state->new_cycle_state) & 0x1;
  428. next_trb(xhci, ep_ring, &state->new_deq_seg, &state->new_deq_ptr);
  429. /* Don't update the ring cycle state for the producer (us). */
  430. xhci_dbg(xhci, "New dequeue segment = %p (virtual)\n",
  431. state->new_deq_seg);
  432. addr = xhci_trb_virt_to_dma(state->new_deq_seg, state->new_deq_ptr);
  433. xhci_dbg(xhci, "New dequeue pointer = 0x%llx (DMA)\n",
  434. (unsigned long long) addr);
  435. xhci_dbg(xhci, "Setting dequeue pointer in internal ring state.\n");
  436. ep_ring->dequeue = state->new_deq_ptr;
  437. ep_ring->deq_seg = state->new_deq_seg;
  438. }
  439. static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
  440. struct xhci_td *cur_td)
  441. {
  442. struct xhci_segment *cur_seg;
  443. union xhci_trb *cur_trb;
  444. for (cur_seg = cur_td->start_seg, cur_trb = cur_td->first_trb;
  445. true;
  446. next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) {
  447. if ((cur_trb->generic.field[3] & TRB_TYPE_BITMASK) ==
  448. TRB_TYPE(TRB_LINK)) {
  449. /* Unchain any chained Link TRBs, but
  450. * leave the pointers intact.
  451. */
  452. cur_trb->generic.field[3] &= ~TRB_CHAIN;
  453. xhci_dbg(xhci, "Cancel (unchain) link TRB\n");
  454. xhci_dbg(xhci, "Address = %p (0x%llx dma); "
  455. "in seg %p (0x%llx dma)\n",
  456. cur_trb,
  457. (unsigned long long)xhci_trb_virt_to_dma(cur_seg, cur_trb),
  458. cur_seg,
  459. (unsigned long long)cur_seg->dma);
  460. } else {
  461. cur_trb->generic.field[0] = 0;
  462. cur_trb->generic.field[1] = 0;
  463. cur_trb->generic.field[2] = 0;
  464. /* Preserve only the cycle bit of this TRB */
  465. cur_trb->generic.field[3] &= TRB_CYCLE;
  466. cur_trb->generic.field[3] |= TRB_TYPE(TRB_TR_NOOP);
  467. xhci_dbg(xhci, "Cancel TRB %p (0x%llx dma) "
  468. "in seg %p (0x%llx dma)\n",
  469. cur_trb,
  470. (unsigned long long)xhci_trb_virt_to_dma(cur_seg, cur_trb),
  471. cur_seg,
  472. (unsigned long long)cur_seg->dma);
  473. }
  474. if (cur_trb == cur_td->last_trb)
  475. break;
  476. }
  477. }
  478. static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id,
  479. unsigned int ep_index, unsigned int stream_id,
  480. struct xhci_segment *deq_seg,
  481. union xhci_trb *deq_ptr, u32 cycle_state);
  482. void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci,
  483. unsigned int slot_id, unsigned int ep_index,
  484. unsigned int stream_id,
  485. struct xhci_dequeue_state *deq_state)
  486. {
  487. struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index];
  488. xhci_dbg(xhci, "Set TR Deq Ptr cmd, new deq seg = %p (0x%llx dma), "
  489. "new deq ptr = %p (0x%llx dma), new cycle = %u\n",
  490. deq_state->new_deq_seg,
  491. (unsigned long long)deq_state->new_deq_seg->dma,
  492. deq_state->new_deq_ptr,
  493. (unsigned long long)xhci_trb_virt_to_dma(deq_state->new_deq_seg, deq_state->new_deq_ptr),
  494. deq_state->new_cycle_state);
  495. queue_set_tr_deq(xhci, slot_id, ep_index, stream_id,
  496. deq_state->new_deq_seg,
  497. deq_state->new_deq_ptr,
  498. (u32) deq_state->new_cycle_state);
  499. /* Stop the TD queueing code from ringing the doorbell until
  500. * this command completes. The HC won't set the dequeue pointer
  501. * if the ring is running, and ringing the doorbell starts the
  502. * ring running.
  503. */
  504. ep->ep_state |= SET_DEQ_PENDING;
  505. }
  506. static inline void xhci_stop_watchdog_timer_in_irq(struct xhci_hcd *xhci,
  507. struct xhci_virt_ep *ep)
  508. {
  509. ep->ep_state &= ~EP_HALT_PENDING;
  510. /* Can't del_timer_sync in interrupt, so we attempt to cancel. If the
  511. * timer is running on another CPU, we don't decrement stop_cmds_pending
  512. * (since we didn't successfully stop the watchdog timer).
  513. */
  514. if (del_timer(&ep->stop_cmd_timer))
  515. ep->stop_cmds_pending--;
  516. }
  517. /* Must be called with xhci->lock held in interrupt context */
  518. static void xhci_giveback_urb_in_irq(struct xhci_hcd *xhci,
  519. struct xhci_td *cur_td, int status, char *adjective)
  520. {
  521. struct usb_hcd *hcd = xhci_to_hcd(xhci);
  522. cur_td->urb->hcpriv = NULL;
  523. usb_hcd_unlink_urb_from_ep(hcd, cur_td->urb);
  524. xhci_dbg(xhci, "Giveback %s URB %p\n", adjective, cur_td->urb);
  525. spin_unlock(&xhci->lock);
  526. usb_hcd_giveback_urb(hcd, cur_td->urb, status);
  527. kfree(cur_td);
  528. spin_lock(&xhci->lock);
  529. xhci_dbg(xhci, "%s URB given back\n", adjective);
  530. }
  531. /*
  532. * When we get a command completion for a Stop Endpoint Command, we need to
  533. * unlink any cancelled TDs from the ring. There are two ways to do that:
  534. *
  535. * 1. If the HW was in the middle of processing the TD that needs to be
  536. * cancelled, then we must move the ring's dequeue pointer past the last TRB
  537. * in the TD with a Set Dequeue Pointer Command.
  538. * 2. Otherwise, we turn all the TRBs in the TD into No-op TRBs (with the chain
  539. * bit cleared) so that the HW will skip over them.
  540. */
  541. static void handle_stopped_endpoint(struct xhci_hcd *xhci,
  542. union xhci_trb *trb)
  543. {
  544. unsigned int slot_id;
  545. unsigned int ep_index;
  546. struct xhci_ring *ep_ring;
  547. struct xhci_virt_ep *ep;
  548. struct list_head *entry;
  549. struct xhci_td *cur_td = NULL;
  550. struct xhci_td *last_unlinked_td;
  551. struct xhci_dequeue_state deq_state;
  552. memset(&deq_state, 0, sizeof(deq_state));
  553. slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]);
  554. ep_index = TRB_TO_EP_INDEX(trb->generic.field[3]);
  555. ep = &xhci->devs[slot_id]->eps[ep_index];
  556. if (list_empty(&ep->cancelled_td_list)) {
  557. xhci_stop_watchdog_timer_in_irq(xhci, ep);
  558. ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
  559. return;
  560. }
  561. /* Fix up the ep ring first, so HW stops executing cancelled TDs.
  562. * We have the xHCI lock, so nothing can modify this list until we drop
  563. * it. We're also in the event handler, so we can't get re-interrupted
  564. * if another Stop Endpoint command completes
  565. */
  566. list_for_each(entry, &ep->cancelled_td_list) {
  567. cur_td = list_entry(entry, struct xhci_td, cancelled_td_list);
  568. xhci_dbg(xhci, "Cancelling TD starting at %p, 0x%llx (dma).\n",
  569. cur_td->first_trb,
  570. (unsigned long long)xhci_trb_virt_to_dma(cur_td->start_seg, cur_td->first_trb));
  571. ep_ring = xhci_urb_to_transfer_ring(xhci, cur_td->urb);
  572. if (!ep_ring) {
  573. /* This shouldn't happen unless a driver is mucking
  574. * with the stream ID after submission. This will
  575. * leave the TD on the hardware ring, and the hardware
  576. * will try to execute it, and may access a buffer
  577. * that has already been freed. In the best case, the
  578. * hardware will execute it, and the event handler will
  579. * ignore the completion event for that TD, since it was
  580. * removed from the td_list for that endpoint. In
  581. * short, don't muck with the stream ID after
  582. * submission.
  583. */
  584. xhci_warn(xhci, "WARN Cancelled URB %p "
  585. "has invalid stream ID %u.\n",
  586. cur_td->urb,
  587. cur_td->urb->stream_id);
  588. goto remove_finished_td;
  589. }
  590. /*
  591. * If we stopped on the TD we need to cancel, then we have to
  592. * move the xHC endpoint ring dequeue pointer past this TD.
  593. */
  594. if (cur_td == ep->stopped_td)
  595. xhci_find_new_dequeue_state(xhci, slot_id, ep_index,
  596. cur_td->urb->stream_id,
  597. cur_td, &deq_state);
  598. else
  599. td_to_noop(xhci, ep_ring, cur_td);
  600. remove_finished_td:
  601. /*
  602. * The event handler won't see a completion for this TD anymore,
  603. * so remove it from the endpoint ring's TD list. Keep it in
  604. * the cancelled TD list for URB completion later.
  605. */
  606. list_del(&cur_td->td_list);
  607. }
  608. last_unlinked_td = cur_td;
  609. xhci_stop_watchdog_timer_in_irq(xhci, ep);
  610. /* If necessary, queue a Set Transfer Ring Dequeue Pointer command */
  611. if (deq_state.new_deq_ptr && deq_state.new_deq_seg) {
  612. xhci_queue_new_dequeue_state(xhci,
  613. slot_id, ep_index,
  614. ep->stopped_td->urb->stream_id,
  615. &deq_state);
  616. xhci_ring_cmd_db(xhci);
  617. } else {
  618. /* Otherwise ring the doorbell(s) to restart queued transfers */
  619. ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
  620. }
  621. ep->stopped_td = NULL;
  622. ep->stopped_trb = NULL;
  623. /*
  624. * Drop the lock and complete the URBs in the cancelled TD list.
  625. * New TDs to be cancelled might be added to the end of the list before
  626. * we can complete all the URBs for the TDs we already unlinked.
  627. * So stop when we've completed the URB for the last TD we unlinked.
  628. */
  629. do {
  630. cur_td = list_entry(ep->cancelled_td_list.next,
  631. struct xhci_td, cancelled_td_list);
  632. list_del(&cur_td->cancelled_td_list);
  633. /* Clean up the cancelled URB */
  634. /* Doesn't matter what we pass for status, since the core will
  635. * just overwrite it (because the URB has been unlinked).
  636. */
  637. xhci_giveback_urb_in_irq(xhci, cur_td, 0, "cancelled");
  638. /* Stop processing the cancelled list if the watchdog timer is
  639. * running.
  640. */
  641. if (xhci->xhc_state & XHCI_STATE_DYING)
  642. return;
  643. } while (cur_td != last_unlinked_td);
  644. /* Return to the event handler with xhci->lock re-acquired */
  645. }
  646. /* Watchdog timer function for when a stop endpoint command fails to complete.
  647. * In this case, we assume the host controller is broken or dying or dead. The
  648. * host may still be completing some other events, so we have to be careful to
  649. * let the event ring handler and the URB dequeueing/enqueueing functions know
  650. * through xhci->state.
  651. *
  652. * The timer may also fire if the host takes a very long time to respond to the
  653. * command, and the stop endpoint command completion handler cannot delete the
  654. * timer before the timer function is called. Another endpoint cancellation may
  655. * sneak in before the timer function can grab the lock, and that may queue
  656. * another stop endpoint command and add the timer back. So we cannot use a
  657. * simple flag to say whether there is a pending stop endpoint command for a
  658. * particular endpoint.
  659. *
  660. * Instead we use a combination of that flag and a counter for the number of
  661. * pending stop endpoint commands. If the timer is the tail end of the last
  662. * stop endpoint command, and the endpoint's command is still pending, we assume
  663. * the host is dying.
  664. */
  665. void xhci_stop_endpoint_command_watchdog(unsigned long arg)
  666. {
  667. struct xhci_hcd *xhci;
  668. struct xhci_virt_ep *ep;
  669. struct xhci_virt_ep *temp_ep;
  670. struct xhci_ring *ring;
  671. struct xhci_td *cur_td;
  672. int ret, i, j;
  673. ep = (struct xhci_virt_ep *) arg;
  674. xhci = ep->xhci;
  675. spin_lock(&xhci->lock);
  676. ep->stop_cmds_pending--;
  677. if (xhci->xhc_state & XHCI_STATE_DYING) {
  678. xhci_dbg(xhci, "Stop EP timer ran, but another timer marked "
  679. "xHCI as DYING, exiting.\n");
  680. spin_unlock(&xhci->lock);
  681. return;
  682. }
  683. if (!(ep->stop_cmds_pending == 0 && (ep->ep_state & EP_HALT_PENDING))) {
  684. xhci_dbg(xhci, "Stop EP timer ran, but no command pending, "
  685. "exiting.\n");
  686. spin_unlock(&xhci->lock);
  687. return;
  688. }
  689. xhci_warn(xhci, "xHCI host not responding to stop endpoint command.\n");
  690. xhci_warn(xhci, "Assuming host is dying, halting host.\n");
  691. /* Oops, HC is dead or dying or at least not responding to the stop
  692. * endpoint command.
  693. */
  694. xhci->xhc_state |= XHCI_STATE_DYING;
  695. /* Disable interrupts from the host controller and start halting it */
  696. xhci_quiesce(xhci);
  697. spin_unlock(&xhci->lock);
  698. ret = xhci_halt(xhci);
  699. spin_lock(&xhci->lock);
  700. if (ret < 0) {
  701. /* This is bad; the host is not responding to commands and it's
  702. * not allowing itself to be halted. At least interrupts are
  703. * disabled, so we can set HC_STATE_HALT and notify the
  704. * USB core. But if we call usb_hc_died(), it will attempt to
  705. * disconnect all device drivers under this host. Those
  706. * disconnect() methods will wait for all URBs to be unlinked,
  707. * so we must complete them.
  708. */
  709. xhci_warn(xhci, "Non-responsive xHCI host is not halting.\n");
  710. xhci_warn(xhci, "Completing active URBs anyway.\n");
  711. /* We could turn all TDs on the rings to no-ops. This won't
  712. * help if the host has cached part of the ring, and is slow if
  713. * we want to preserve the cycle bit. Skip it and hope the host
  714. * doesn't touch the memory.
  715. */
  716. }
  717. for (i = 0; i < MAX_HC_SLOTS; i++) {
  718. if (!xhci->devs[i])
  719. continue;
  720. for (j = 0; j < 31; j++) {
  721. temp_ep = &xhci->devs[i]->eps[j];
  722. ring = temp_ep->ring;
  723. if (!ring)
  724. continue;
  725. xhci_dbg(xhci, "Killing URBs for slot ID %u, "
  726. "ep index %u\n", i, j);
  727. while (!list_empty(&ring->td_list)) {
  728. cur_td = list_first_entry(&ring->td_list,
  729. struct xhci_td,
  730. td_list);
  731. list_del(&cur_td->td_list);
  732. if (!list_empty(&cur_td->cancelled_td_list))
  733. list_del(&cur_td->cancelled_td_list);
  734. xhci_giveback_urb_in_irq(xhci, cur_td,
  735. -ESHUTDOWN, "killed");
  736. }
  737. while (!list_empty(&temp_ep->cancelled_td_list)) {
  738. cur_td = list_first_entry(
  739. &temp_ep->cancelled_td_list,
  740. struct xhci_td,
  741. cancelled_td_list);
  742. list_del(&cur_td->cancelled_td_list);
  743. xhci_giveback_urb_in_irq(xhci, cur_td,
  744. -ESHUTDOWN, "killed");
  745. }
  746. }
  747. }
  748. spin_unlock(&xhci->lock);
  749. xhci_to_hcd(xhci)->state = HC_STATE_HALT;
  750. xhci_dbg(xhci, "Calling usb_hc_died()\n");
  751. usb_hc_died(xhci_to_hcd(xhci));
  752. xhci_dbg(xhci, "xHCI host controller is dead.\n");
  753. }
  754. /*
  755. * When we get a completion for a Set Transfer Ring Dequeue Pointer command,
  756. * we need to clear the set deq pending flag in the endpoint ring state, so that
  757. * the TD queueing code can ring the doorbell again. We also need to ring the
  758. * endpoint doorbell to restart the ring, but only if there aren't more
  759. * cancellations pending.
  760. */
  761. static void handle_set_deq_completion(struct xhci_hcd *xhci,
  762. struct xhci_event_cmd *event,
  763. union xhci_trb *trb)
  764. {
  765. unsigned int slot_id;
  766. unsigned int ep_index;
  767. unsigned int stream_id;
  768. struct xhci_ring *ep_ring;
  769. struct xhci_virt_device *dev;
  770. struct xhci_ep_ctx *ep_ctx;
  771. struct xhci_slot_ctx *slot_ctx;
  772. slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]);
  773. ep_index = TRB_TO_EP_INDEX(trb->generic.field[3]);
  774. stream_id = TRB_TO_STREAM_ID(trb->generic.field[2]);
  775. dev = xhci->devs[slot_id];
  776. ep_ring = xhci_stream_id_to_ring(dev, ep_index, stream_id);
  777. if (!ep_ring) {
  778. xhci_warn(xhci, "WARN Set TR deq ptr command for "
  779. "freed stream ID %u\n",
  780. stream_id);
  781. /* XXX: Harmless??? */
  782. dev->eps[ep_index].ep_state &= ~SET_DEQ_PENDING;
  783. return;
  784. }
  785. ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
  786. slot_ctx = xhci_get_slot_ctx(xhci, dev->out_ctx);
  787. if (GET_COMP_CODE(event->status) != COMP_SUCCESS) {
  788. unsigned int ep_state;
  789. unsigned int slot_state;
  790. switch (GET_COMP_CODE(event->status)) {
  791. case COMP_TRB_ERR:
  792. xhci_warn(xhci, "WARN Set TR Deq Ptr cmd invalid because "
  793. "of stream ID configuration\n");
  794. break;
  795. case COMP_CTX_STATE:
  796. xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed due "
  797. "to incorrect slot or ep state.\n");
  798. ep_state = ep_ctx->ep_info;
  799. ep_state &= EP_STATE_MASK;
  800. slot_state = slot_ctx->dev_state;
  801. slot_state = GET_SLOT_STATE(slot_state);
  802. xhci_dbg(xhci, "Slot state = %u, EP state = %u\n",
  803. slot_state, ep_state);
  804. break;
  805. case COMP_EBADSLT:
  806. xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed because "
  807. "slot %u was not enabled.\n", slot_id);
  808. break;
  809. default:
  810. xhci_warn(xhci, "WARN Set TR Deq Ptr cmd with unknown "
  811. "completion code of %u.\n",
  812. GET_COMP_CODE(event->status));
  813. break;
  814. }
  815. /* OK what do we do now? The endpoint state is hosed, and we
  816. * should never get to this point if the synchronization between
  817. * queueing, and endpoint state are correct. This might happen
  818. * if the device gets disconnected after we've finished
  819. * cancelling URBs, which might not be an error...
  820. */
  821. } else {
  822. xhci_dbg(xhci, "Successful Set TR Deq Ptr cmd, deq = @%08llx\n",
  823. ep_ctx->deq);
  824. }
  825. dev->eps[ep_index].ep_state &= ~SET_DEQ_PENDING;
  826. /* Restart any rings with pending URBs */
  827. ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
  828. }
  829. static void handle_reset_ep_completion(struct xhci_hcd *xhci,
  830. struct xhci_event_cmd *event,
  831. union xhci_trb *trb)
  832. {
  833. int slot_id;
  834. unsigned int ep_index;
  835. slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]);
  836. ep_index = TRB_TO_EP_INDEX(trb->generic.field[3]);
  837. /* This command will only fail if the endpoint wasn't halted,
  838. * but we don't care.
  839. */
  840. xhci_dbg(xhci, "Ignoring reset ep completion code of %u\n",
  841. (unsigned int) GET_COMP_CODE(event->status));
  842. /* HW with the reset endpoint quirk needs to have a configure endpoint
  843. * command complete before the endpoint can be used. Queue that here
  844. * because the HW can't handle two commands being queued in a row.
  845. */
  846. if (xhci->quirks & XHCI_RESET_EP_QUIRK) {
  847. xhci_dbg(xhci, "Queueing configure endpoint command\n");
  848. xhci_queue_configure_endpoint(xhci,
  849. xhci->devs[slot_id]->in_ctx->dma, slot_id,
  850. false);
  851. xhci_ring_cmd_db(xhci);
  852. } else {
  853. /* Clear our internal halted state and restart the ring(s) */
  854. xhci->devs[slot_id]->eps[ep_index].ep_state &= ~EP_HALTED;
  855. ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
  856. }
  857. }
  858. /* Check to see if a command in the device's command queue matches this one.
  859. * Signal the completion or free the command, and return 1. Return 0 if the
  860. * completed command isn't at the head of the command list.
  861. */
  862. static int handle_cmd_in_cmd_wait_list(struct xhci_hcd *xhci,
  863. struct xhci_virt_device *virt_dev,
  864. struct xhci_event_cmd *event)
  865. {
  866. struct xhci_command *command;
  867. if (list_empty(&virt_dev->cmd_list))
  868. return 0;
  869. command = list_entry(virt_dev->cmd_list.next,
  870. struct xhci_command, cmd_list);
  871. if (xhci->cmd_ring->dequeue != command->command_trb)
  872. return 0;
  873. command->status =
  874. GET_COMP_CODE(event->status);
  875. list_del(&command->cmd_list);
  876. if (command->completion)
  877. complete(command->completion);
  878. else
  879. xhci_free_command(xhci, command);
  880. return 1;
  881. }
  882. static void handle_cmd_completion(struct xhci_hcd *xhci,
  883. struct xhci_event_cmd *event)
  884. {
  885. int slot_id = TRB_TO_SLOT_ID(event->flags);
  886. u64 cmd_dma;
  887. dma_addr_t cmd_dequeue_dma;
  888. struct xhci_input_control_ctx *ctrl_ctx;
  889. struct xhci_virt_device *virt_dev;
  890. unsigned int ep_index;
  891. struct xhci_ring *ep_ring;
  892. unsigned int ep_state;
  893. cmd_dma = event->cmd_trb;
  894. cmd_dequeue_dma = xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg,
  895. xhci->cmd_ring->dequeue);
  896. /* Is the command ring deq ptr out of sync with the deq seg ptr? */
  897. if (cmd_dequeue_dma == 0) {
  898. xhci->error_bitmask |= 1 << 4;
  899. return;
  900. }
  901. /* Does the DMA address match our internal dequeue pointer address? */
  902. if (cmd_dma != (u64) cmd_dequeue_dma) {
  903. xhci->error_bitmask |= 1 << 5;
  904. return;
  905. }
  906. switch (xhci->cmd_ring->dequeue->generic.field[3] & TRB_TYPE_BITMASK) {
  907. case TRB_TYPE(TRB_ENABLE_SLOT):
  908. if (GET_COMP_CODE(event->status) == COMP_SUCCESS)
  909. xhci->slot_id = slot_id;
  910. else
  911. xhci->slot_id = 0;
  912. complete(&xhci->addr_dev);
  913. break;
  914. case TRB_TYPE(TRB_DISABLE_SLOT):
  915. if (xhci->devs[slot_id])
  916. xhci_free_virt_device(xhci, slot_id);
  917. break;
  918. case TRB_TYPE(TRB_CONFIG_EP):
  919. virt_dev = xhci->devs[slot_id];
  920. if (handle_cmd_in_cmd_wait_list(xhci, virt_dev, event))
  921. break;
  922. /*
  923. * Configure endpoint commands can come from the USB core
  924. * configuration or alt setting changes, or because the HW
  925. * needed an extra configure endpoint command after a reset
  926. * endpoint command or streams were being configured.
  927. * If the command was for a halted endpoint, the xHCI driver
  928. * is not waiting on the configure endpoint command.
  929. */
  930. ctrl_ctx = xhci_get_input_control_ctx(xhci,
  931. virt_dev->in_ctx);
  932. /* Input ctx add_flags are the endpoint index plus one */
  933. ep_index = xhci_last_valid_endpoint(ctrl_ctx->add_flags) - 1;
  934. /* A usb_set_interface() call directly after clearing a halted
  935. * condition may race on this quirky hardware. Not worth
  936. * worrying about, since this is prototype hardware. Not sure
  937. * if this will work for streams, but streams support was
  938. * untested on this prototype.
  939. */
  940. if (xhci->quirks & XHCI_RESET_EP_QUIRK &&
  941. ep_index != (unsigned int) -1 &&
  942. ctrl_ctx->add_flags - SLOT_FLAG ==
  943. ctrl_ctx->drop_flags) {
  944. ep_ring = xhci->devs[slot_id]->eps[ep_index].ring;
  945. ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state;
  946. if (!(ep_state & EP_HALTED))
  947. goto bandwidth_change;
  948. xhci_dbg(xhci, "Completed config ep cmd - "
  949. "last ep index = %d, state = %d\n",
  950. ep_index, ep_state);
  951. /* Clear internal halted state and restart ring(s) */
  952. xhci->devs[slot_id]->eps[ep_index].ep_state &=
  953. ~EP_HALTED;
  954. ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
  955. break;
  956. }
  957. bandwidth_change:
  958. xhci_dbg(xhci, "Completed config ep cmd\n");
  959. xhci->devs[slot_id]->cmd_status =
  960. GET_COMP_CODE(event->status);
  961. complete(&xhci->devs[slot_id]->cmd_completion);
  962. break;
  963. case TRB_TYPE(TRB_EVAL_CONTEXT):
  964. virt_dev = xhci->devs[slot_id];
  965. if (handle_cmd_in_cmd_wait_list(xhci, virt_dev, event))
  966. break;
  967. xhci->devs[slot_id]->cmd_status = GET_COMP_CODE(event->status);
  968. complete(&xhci->devs[slot_id]->cmd_completion);
  969. break;
  970. case TRB_TYPE(TRB_ADDR_DEV):
  971. xhci->devs[slot_id]->cmd_status = GET_COMP_CODE(event->status);
  972. complete(&xhci->addr_dev);
  973. break;
  974. case TRB_TYPE(TRB_STOP_RING):
  975. handle_stopped_endpoint(xhci, xhci->cmd_ring->dequeue);
  976. break;
  977. case TRB_TYPE(TRB_SET_DEQ):
  978. handle_set_deq_completion(xhci, event, xhci->cmd_ring->dequeue);
  979. break;
  980. case TRB_TYPE(TRB_CMD_NOOP):
  981. ++xhci->noops_handled;
  982. break;
  983. case TRB_TYPE(TRB_RESET_EP):
  984. handle_reset_ep_completion(xhci, event, xhci->cmd_ring->dequeue);
  985. break;
  986. case TRB_TYPE(TRB_RESET_DEV):
  987. xhci_dbg(xhci, "Completed reset device command.\n");
  988. slot_id = TRB_TO_SLOT_ID(
  989. xhci->cmd_ring->dequeue->generic.field[3]);
  990. virt_dev = xhci->devs[slot_id];
  991. if (virt_dev)
  992. handle_cmd_in_cmd_wait_list(xhci, virt_dev, event);
  993. else
  994. xhci_warn(xhci, "Reset device command completion "
  995. "for disabled slot %u\n", slot_id);
  996. break;
  997. case TRB_TYPE(TRB_NEC_GET_FW):
  998. if (!(xhci->quirks & XHCI_NEC_HOST)) {
  999. xhci->error_bitmask |= 1 << 6;
  1000. break;
  1001. }
  1002. xhci_dbg(xhci, "NEC firmware version %2x.%02x\n",
  1003. NEC_FW_MAJOR(event->status),
  1004. NEC_FW_MINOR(event->status));
  1005. break;
  1006. default:
  1007. /* Skip over unknown commands on the event ring */
  1008. xhci->error_bitmask |= 1 << 6;
  1009. break;
  1010. }
  1011. inc_deq(xhci, xhci->cmd_ring, false);
  1012. }
  1013. static void handle_vendor_event(struct xhci_hcd *xhci,
  1014. union xhci_trb *event)
  1015. {
  1016. u32 trb_type;
  1017. trb_type = TRB_FIELD_TO_TYPE(event->generic.field[3]);
  1018. xhci_dbg(xhci, "Vendor specific event TRB type = %u\n", trb_type);
  1019. if (trb_type == TRB_NEC_CMD_COMP && (xhci->quirks & XHCI_NEC_HOST))
  1020. handle_cmd_completion(xhci, &event->event_cmd);
  1021. }
  1022. static void handle_port_status(struct xhci_hcd *xhci,
  1023. union xhci_trb *event)
  1024. {
  1025. u32 port_id;
  1026. /* Port status change events always have a successful completion code */
  1027. if (GET_COMP_CODE(event->generic.field[2]) != COMP_SUCCESS) {
  1028. xhci_warn(xhci, "WARN: xHC returned failed port status event\n");
  1029. xhci->error_bitmask |= 1 << 8;
  1030. }
  1031. /* FIXME: core doesn't care about all port link state changes yet */
  1032. port_id = GET_PORT_ID(event->generic.field[0]);
  1033. xhci_dbg(xhci, "Port Status Change Event for port %d\n", port_id);
  1034. /* Update event ring dequeue pointer before dropping the lock */
  1035. inc_deq(xhci, xhci->event_ring, true);
  1036. xhci_set_hc_event_deq(xhci);
  1037. spin_unlock(&xhci->lock);
  1038. /* Pass this up to the core */
  1039. usb_hcd_poll_rh_status(xhci_to_hcd(xhci));
  1040. spin_lock(&xhci->lock);
  1041. }
  1042. /*
  1043. * This TD is defined by the TRBs starting at start_trb in start_seg and ending
  1044. * at end_trb, which may be in another segment. If the suspect DMA address is a
  1045. * TRB in this TD, this function returns that TRB's segment. Otherwise it
  1046. * returns 0.
  1047. */
  1048. struct xhci_segment *trb_in_td(struct xhci_segment *start_seg,
  1049. union xhci_trb *start_trb,
  1050. union xhci_trb *end_trb,
  1051. dma_addr_t suspect_dma)
  1052. {
  1053. dma_addr_t start_dma;
  1054. dma_addr_t end_seg_dma;
  1055. dma_addr_t end_trb_dma;
  1056. struct xhci_segment *cur_seg;
  1057. start_dma = xhci_trb_virt_to_dma(start_seg, start_trb);
  1058. cur_seg = start_seg;
  1059. do {
  1060. if (start_dma == 0)
  1061. return NULL;
  1062. /* We may get an event for a Link TRB in the middle of a TD */
  1063. end_seg_dma = xhci_trb_virt_to_dma(cur_seg,
  1064. &cur_seg->trbs[TRBS_PER_SEGMENT - 1]);
  1065. /* If the end TRB isn't in this segment, this is set to 0 */
  1066. end_trb_dma = xhci_trb_virt_to_dma(cur_seg, end_trb);
  1067. if (end_trb_dma > 0) {
  1068. /* The end TRB is in this segment, so suspect should be here */
  1069. if (start_dma <= end_trb_dma) {
  1070. if (suspect_dma >= start_dma && suspect_dma <= end_trb_dma)
  1071. return cur_seg;
  1072. } else {
  1073. /* Case for one segment with
  1074. * a TD wrapped around to the top
  1075. */
  1076. if ((suspect_dma >= start_dma &&
  1077. suspect_dma <= end_seg_dma) ||
  1078. (suspect_dma >= cur_seg->dma &&
  1079. suspect_dma <= end_trb_dma))
  1080. return cur_seg;
  1081. }
  1082. return NULL;
  1083. } else {
  1084. /* Might still be somewhere in this segment */
  1085. if (suspect_dma >= start_dma && suspect_dma <= end_seg_dma)
  1086. return cur_seg;
  1087. }
  1088. cur_seg = cur_seg->next;
  1089. start_dma = xhci_trb_virt_to_dma(cur_seg, &cur_seg->trbs[0]);
  1090. } while (cur_seg != start_seg);
  1091. return NULL;
  1092. }
  1093. static void xhci_cleanup_halted_endpoint(struct xhci_hcd *xhci,
  1094. unsigned int slot_id, unsigned int ep_index,
  1095. unsigned int stream_id,
  1096. struct xhci_td *td, union xhci_trb *event_trb)
  1097. {
  1098. struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index];
  1099. ep->ep_state |= EP_HALTED;
  1100. ep->stopped_td = td;
  1101. ep->stopped_trb = event_trb;
  1102. ep->stopped_stream = stream_id;
  1103. xhci_queue_reset_ep(xhci, slot_id, ep_index);
  1104. xhci_cleanup_stalled_ring(xhci, td->urb->dev, ep_index);
  1105. ep->stopped_td = NULL;
  1106. ep->stopped_trb = NULL;
  1107. ep->stopped_stream = 0;
  1108. xhci_ring_cmd_db(xhci);
  1109. }
  1110. /* Check if an error has halted the endpoint ring. The class driver will
  1111. * cleanup the halt for a non-default control endpoint if we indicate a stall.
  1112. * However, a babble and other errors also halt the endpoint ring, and the class
  1113. * driver won't clear the halt in that case, so we need to issue a Set Transfer
  1114. * Ring Dequeue Pointer command manually.
  1115. */
  1116. static int xhci_requires_manual_halt_cleanup(struct xhci_hcd *xhci,
  1117. struct xhci_ep_ctx *ep_ctx,
  1118. unsigned int trb_comp_code)
  1119. {
  1120. /* TRB completion codes that may require a manual halt cleanup */
  1121. if (trb_comp_code == COMP_TX_ERR ||
  1122. trb_comp_code == COMP_BABBLE ||
  1123. trb_comp_code == COMP_SPLIT_ERR)
  1124. /* The 0.96 spec says a babbling control endpoint
  1125. * is not halted. The 0.96 spec says it is. Some HW
  1126. * claims to be 0.95 compliant, but it halts the control
  1127. * endpoint anyway. Check if a babble halted the
  1128. * endpoint.
  1129. */
  1130. if ((ep_ctx->ep_info & EP_STATE_MASK) == EP_STATE_HALTED)
  1131. return 1;
  1132. return 0;
  1133. }
  1134. int xhci_is_vendor_info_code(struct xhci_hcd *xhci, unsigned int trb_comp_code)
  1135. {
  1136. if (trb_comp_code >= 224 && trb_comp_code <= 255) {
  1137. /* Vendor defined "informational" completion code,
  1138. * treat as not-an-error.
  1139. */
  1140. xhci_dbg(xhci, "Vendor defined info completion code %u\n",
  1141. trb_comp_code);
  1142. xhci_dbg(xhci, "Treating code as success.\n");
  1143. return 1;
  1144. }
  1145. return 0;
  1146. }
  1147. /*
  1148. * If this function returns an error condition, it means it got a Transfer
  1149. * event with a corrupted Slot ID, Endpoint ID, or TRB DMA address.
  1150. * At this point, the host controller is probably hosed and should be reset.
  1151. */
  1152. static int handle_tx_event(struct xhci_hcd *xhci,
  1153. struct xhci_transfer_event *event)
  1154. {
  1155. struct xhci_virt_device *xdev;
  1156. struct xhci_virt_ep *ep;
  1157. struct xhci_ring *ep_ring;
  1158. unsigned int slot_id;
  1159. int ep_index;
  1160. struct xhci_td *td = NULL;
  1161. dma_addr_t event_dma;
  1162. struct xhci_segment *event_seg;
  1163. union xhci_trb *event_trb;
  1164. struct urb *urb = NULL;
  1165. int status = -EINPROGRESS;
  1166. struct xhci_ep_ctx *ep_ctx;
  1167. u32 trb_comp_code;
  1168. xhci_dbg(xhci, "In %s\n", __func__);
  1169. slot_id = TRB_TO_SLOT_ID(event->flags);
  1170. xdev = xhci->devs[slot_id];
  1171. if (!xdev) {
  1172. xhci_err(xhci, "ERROR Transfer event pointed to bad slot\n");
  1173. return -ENODEV;
  1174. }
  1175. /* Endpoint ID is 1 based, our index is zero based */
  1176. ep_index = TRB_TO_EP_ID(event->flags) - 1;
  1177. xhci_dbg(xhci, "%s - ep index = %d\n", __func__, ep_index);
  1178. ep = &xdev->eps[ep_index];
  1179. ep_ring = xhci_dma_to_transfer_ring(ep, event->buffer);
  1180. ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
  1181. if (!ep_ring || (ep_ctx->ep_info & EP_STATE_MASK) == EP_STATE_DISABLED) {
  1182. xhci_err(xhci, "ERROR Transfer event for disabled endpoint "
  1183. "or incorrect stream ring\n");
  1184. return -ENODEV;
  1185. }
  1186. event_dma = event->buffer;
  1187. /* This TRB should be in the TD at the head of this ring's TD list */
  1188. xhci_dbg(xhci, "%s - checking for list empty\n", __func__);
  1189. if (list_empty(&ep_ring->td_list)) {
  1190. xhci_warn(xhci, "WARN Event TRB for slot %d ep %d with no TDs queued?\n",
  1191. TRB_TO_SLOT_ID(event->flags), ep_index);
  1192. xhci_dbg(xhci, "Event TRB with TRB type ID %u\n",
  1193. (unsigned int) (event->flags & TRB_TYPE_BITMASK)>>10);
  1194. xhci_print_trb_offsets(xhci, (union xhci_trb *) event);
  1195. urb = NULL;
  1196. goto cleanup;
  1197. }
  1198. xhci_dbg(xhci, "%s - getting list entry\n", __func__);
  1199. td = list_entry(ep_ring->td_list.next, struct xhci_td, td_list);
  1200. /* Is this a TRB in the currently executing TD? */
  1201. xhci_dbg(xhci, "%s - looking for TD\n", __func__);
  1202. event_seg = trb_in_td(ep_ring->deq_seg, ep_ring->dequeue,
  1203. td->last_trb, event_dma);
  1204. xhci_dbg(xhci, "%s - found event_seg = %p\n", __func__, event_seg);
  1205. if (!event_seg) {
  1206. /* HC is busted, give up! */
  1207. xhci_err(xhci, "ERROR Transfer event TRB DMA ptr not part of current TD\n");
  1208. return -ESHUTDOWN;
  1209. }
  1210. event_trb = &event_seg->trbs[(event_dma - event_seg->dma) / sizeof(*event_trb)];
  1211. xhci_dbg(xhci, "Event TRB with TRB type ID %u\n",
  1212. (unsigned int) (event->flags & TRB_TYPE_BITMASK)>>10);
  1213. xhci_dbg(xhci, "Offset 0x00 (buffer lo) = 0x%x\n",
  1214. lower_32_bits(event->buffer));
  1215. xhci_dbg(xhci, "Offset 0x04 (buffer hi) = 0x%x\n",
  1216. upper_32_bits(event->buffer));
  1217. xhci_dbg(xhci, "Offset 0x08 (transfer length) = 0x%x\n",
  1218. (unsigned int) event->transfer_len);
  1219. xhci_dbg(xhci, "Offset 0x0C (flags) = 0x%x\n",
  1220. (unsigned int) event->flags);
  1221. /* Look for common error cases */
  1222. trb_comp_code = GET_COMP_CODE(event->transfer_len);
  1223. switch (trb_comp_code) {
  1224. /* Skip codes that require special handling depending on
  1225. * transfer type
  1226. */
  1227. case COMP_SUCCESS:
  1228. case COMP_SHORT_TX:
  1229. break;
  1230. case COMP_STOP:
  1231. xhci_dbg(xhci, "Stopped on Transfer TRB\n");
  1232. break;
  1233. case COMP_STOP_INVAL:
  1234. xhci_dbg(xhci, "Stopped on No-op or Link TRB\n");
  1235. break;
  1236. case COMP_STALL:
  1237. xhci_warn(xhci, "WARN: Stalled endpoint\n");
  1238. ep->ep_state |= EP_HALTED;
  1239. status = -EPIPE;
  1240. break;
  1241. case COMP_TRB_ERR:
  1242. xhci_warn(xhci, "WARN: TRB error on endpoint\n");
  1243. status = -EILSEQ;
  1244. break;
  1245. case COMP_SPLIT_ERR:
  1246. case COMP_TX_ERR:
  1247. xhci_warn(xhci, "WARN: transfer error on endpoint\n");
  1248. status = -EPROTO;
  1249. break;
  1250. case COMP_BABBLE:
  1251. xhci_warn(xhci, "WARN: babble error on endpoint\n");
  1252. status = -EOVERFLOW;
  1253. break;
  1254. case COMP_DB_ERR:
  1255. xhci_warn(xhci, "WARN: HC couldn't access mem fast enough\n");
  1256. status = -ENOSR;
  1257. break;
  1258. default:
  1259. if (xhci_is_vendor_info_code(xhci, trb_comp_code)) {
  1260. status = 0;
  1261. break;
  1262. }
  1263. xhci_warn(xhci, "ERROR Unknown event condition, HC probably busted\n");
  1264. urb = NULL;
  1265. goto cleanup;
  1266. }
  1267. /* Now update the urb's actual_length and give back to the core */
  1268. /* Was this a control transfer? */
  1269. if (usb_endpoint_xfer_control(&td->urb->ep->desc)) {
  1270. xhci_debug_trb(xhci, xhci->event_ring->dequeue);
  1271. switch (trb_comp_code) {
  1272. case COMP_SUCCESS:
  1273. if (event_trb == ep_ring->dequeue) {
  1274. xhci_warn(xhci, "WARN: Success on ctrl setup TRB without IOC set??\n");
  1275. status = -ESHUTDOWN;
  1276. } else if (event_trb != td->last_trb) {
  1277. xhci_warn(xhci, "WARN: Success on ctrl data TRB without IOC set??\n");
  1278. status = -ESHUTDOWN;
  1279. } else {
  1280. xhci_dbg(xhci, "Successful control transfer!\n");
  1281. status = 0;
  1282. }
  1283. break;
  1284. case COMP_SHORT_TX:
  1285. xhci_warn(xhci, "WARN: short transfer on control ep\n");
  1286. if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
  1287. status = -EREMOTEIO;
  1288. else
  1289. status = 0;
  1290. break;
  1291. default:
  1292. if (!xhci_requires_manual_halt_cleanup(xhci,
  1293. ep_ctx, trb_comp_code))
  1294. break;
  1295. xhci_dbg(xhci, "TRB error code %u, "
  1296. "halted endpoint index = %u\n",
  1297. trb_comp_code, ep_index);
  1298. /* else fall through */
  1299. case COMP_STALL:
  1300. /* Did we transfer part of the data (middle) phase? */
  1301. if (event_trb != ep_ring->dequeue &&
  1302. event_trb != td->last_trb)
  1303. td->urb->actual_length =
  1304. td->urb->transfer_buffer_length
  1305. - TRB_LEN(event->transfer_len);
  1306. else
  1307. td->urb->actual_length = 0;
  1308. xhci_cleanup_halted_endpoint(xhci,
  1309. slot_id, ep_index, 0, td, event_trb);
  1310. goto td_cleanup;
  1311. }
  1312. /*
  1313. * Did we transfer any data, despite the errors that might have
  1314. * happened? I.e. did we get past the setup stage?
  1315. */
  1316. if (event_trb != ep_ring->dequeue) {
  1317. /* The event was for the status stage */
  1318. if (event_trb == td->last_trb) {
  1319. if (td->urb->actual_length != 0) {
  1320. /* Don't overwrite a previously set error code */
  1321. if ((status == -EINPROGRESS ||
  1322. status == 0) &&
  1323. (td->urb->transfer_flags
  1324. & URB_SHORT_NOT_OK))
  1325. /* Did we already see a short data stage? */
  1326. status = -EREMOTEIO;
  1327. } else {
  1328. td->urb->actual_length =
  1329. td->urb->transfer_buffer_length;
  1330. }
  1331. } else {
  1332. /* Maybe the event was for the data stage? */
  1333. if (trb_comp_code != COMP_STOP_INVAL) {
  1334. /* We didn't stop on a link TRB in the middle */
  1335. td->urb->actual_length =
  1336. td->urb->transfer_buffer_length -
  1337. TRB_LEN(event->transfer_len);
  1338. xhci_dbg(xhci, "Waiting for status stage event\n");
  1339. urb = NULL;
  1340. goto cleanup;
  1341. }
  1342. }
  1343. }
  1344. } else {
  1345. switch (trb_comp_code) {
  1346. case COMP_SUCCESS:
  1347. /* Double check that the HW transferred everything. */
  1348. if (event_trb != td->last_trb) {
  1349. xhci_warn(xhci, "WARN Successful completion "
  1350. "on short TX\n");
  1351. if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
  1352. status = -EREMOTEIO;
  1353. else
  1354. status = 0;
  1355. } else {
  1356. if (usb_endpoint_xfer_bulk(&td->urb->ep->desc))
  1357. xhci_dbg(xhci, "Successful bulk "
  1358. "transfer!\n");
  1359. else
  1360. xhci_dbg(xhci, "Successful interrupt "
  1361. "transfer!\n");
  1362. status = 0;
  1363. }
  1364. break;
  1365. case COMP_SHORT_TX:
  1366. if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
  1367. status = -EREMOTEIO;
  1368. else
  1369. status = 0;
  1370. break;
  1371. default:
  1372. /* Others already handled above */
  1373. break;
  1374. }
  1375. dev_dbg(&td->urb->dev->dev,
  1376. "ep %#x - asked for %d bytes, "
  1377. "%d bytes untransferred\n",
  1378. td->urb->ep->desc.bEndpointAddress,
  1379. td->urb->transfer_buffer_length,
  1380. TRB_LEN(event->transfer_len));
  1381. /* Fast path - was this the last TRB in the TD for this URB? */
  1382. if (event_trb == td->last_trb) {
  1383. if (TRB_LEN(event->transfer_len) != 0) {
  1384. td->urb->actual_length =
  1385. td->urb->transfer_buffer_length -
  1386. TRB_LEN(event->transfer_len);
  1387. if (td->urb->transfer_buffer_length <
  1388. td->urb->actual_length) {
  1389. xhci_warn(xhci, "HC gave bad length "
  1390. "of %d bytes left\n",
  1391. TRB_LEN(event->transfer_len));
  1392. td->urb->actual_length = 0;
  1393. if (td->urb->transfer_flags &
  1394. URB_SHORT_NOT_OK)
  1395. status = -EREMOTEIO;
  1396. else
  1397. status = 0;
  1398. }
  1399. /* Don't overwrite a previously set error code */
  1400. if (status == -EINPROGRESS) {
  1401. if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
  1402. status = -EREMOTEIO;
  1403. else
  1404. status = 0;
  1405. }
  1406. } else {
  1407. td->urb->actual_length = td->urb->transfer_buffer_length;
  1408. /* Ignore a short packet completion if the
  1409. * untransferred length was zero.
  1410. */
  1411. if (status == -EREMOTEIO)
  1412. status = 0;
  1413. }
  1414. } else {
  1415. /* Slow path - walk the list, starting from the dequeue
  1416. * pointer, to get the actual length transferred.
  1417. */
  1418. union xhci_trb *cur_trb;
  1419. struct xhci_segment *cur_seg;
  1420. td->urb->actual_length = 0;
  1421. for (cur_trb = ep_ring->dequeue, cur_seg = ep_ring->deq_seg;
  1422. cur_trb != event_trb;
  1423. next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) {
  1424. if ((cur_trb->generic.field[3] &
  1425. TRB_TYPE_BITMASK) != TRB_TYPE(TRB_TR_NOOP) &&
  1426. (cur_trb->generic.field[3] &
  1427. TRB_TYPE_BITMASK) != TRB_TYPE(TRB_LINK))
  1428. td->urb->actual_length +=
  1429. TRB_LEN(cur_trb->generic.field[2]);
  1430. }
  1431. /* If the ring didn't stop on a Link or No-op TRB, add
  1432. * in the actual bytes transferred from the Normal TRB
  1433. */
  1434. if (trb_comp_code != COMP_STOP_INVAL)
  1435. td->urb->actual_length +=
  1436. TRB_LEN(cur_trb->generic.field[2]) -
  1437. TRB_LEN(event->transfer_len);
  1438. }
  1439. }
  1440. if (trb_comp_code == COMP_STOP_INVAL ||
  1441. trb_comp_code == COMP_STOP) {
  1442. /* The Endpoint Stop Command completion will take care of any
  1443. * stopped TDs. A stopped TD may be restarted, so don't update
  1444. * the ring dequeue pointer or take this TD off any lists yet.
  1445. */
  1446. ep->stopped_td = td;
  1447. ep->stopped_trb = event_trb;
  1448. } else {
  1449. if (trb_comp_code == COMP_STALL) {
  1450. /* The transfer is completed from the driver's
  1451. * perspective, but we need to issue a set dequeue
  1452. * command for this stalled endpoint to move the dequeue
  1453. * pointer past the TD. We can't do that here because
  1454. * the halt condition must be cleared first. Let the
  1455. * USB class driver clear the stall later.
  1456. */
  1457. ep->stopped_td = td;
  1458. ep->stopped_trb = event_trb;
  1459. ep->stopped_stream = ep_ring->stream_id;
  1460. } else if (xhci_requires_manual_halt_cleanup(xhci,
  1461. ep_ctx, trb_comp_code)) {
  1462. /* Other types of errors halt the endpoint, but the
  1463. * class driver doesn't call usb_reset_endpoint() unless
  1464. * the error is -EPIPE. Clear the halted status in the
  1465. * xHCI hardware manually.
  1466. */
  1467. xhci_cleanup_halted_endpoint(xhci,
  1468. slot_id, ep_index, ep_ring->stream_id, td, event_trb);
  1469. } else {
  1470. /* Update ring dequeue pointer */
  1471. while (ep_ring->dequeue != td->last_trb)
  1472. inc_deq(xhci, ep_ring, false);
  1473. inc_deq(xhci, ep_ring, false);
  1474. }
  1475. td_cleanup:
  1476. /* Clean up the endpoint's TD list */
  1477. urb = td->urb;
  1478. /* Do one last check of the actual transfer length.
  1479. * If the host controller said we transferred more data than
  1480. * the buffer length, urb->actual_length will be a very big
  1481. * number (since it's unsigned). Play it safe and say we didn't
  1482. * transfer anything.
  1483. */
  1484. if (urb->actual_length > urb->transfer_buffer_length) {
  1485. xhci_warn(xhci, "URB transfer length is wrong, "
  1486. "xHC issue? req. len = %u, "
  1487. "act. len = %u\n",
  1488. urb->transfer_buffer_length,
  1489. urb->actual_length);
  1490. urb->actual_length = 0;
  1491. if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
  1492. status = -EREMOTEIO;
  1493. else
  1494. status = 0;
  1495. }
  1496. list_del(&td->td_list);
  1497. /* Was this TD slated to be cancelled but completed anyway? */
  1498. if (!list_empty(&td->cancelled_td_list))
  1499. list_del(&td->cancelled_td_list);
  1500. /* Leave the TD around for the reset endpoint function to use
  1501. * (but only if it's not a control endpoint, since we already
  1502. * queued the Set TR dequeue pointer command for stalled
  1503. * control endpoints).
  1504. */
  1505. if (usb_endpoint_xfer_control(&urb->ep->desc) ||
  1506. (trb_comp_code != COMP_STALL &&
  1507. trb_comp_code != COMP_BABBLE)) {
  1508. kfree(td);
  1509. }
  1510. urb->hcpriv = NULL;
  1511. }
  1512. cleanup:
  1513. inc_deq(xhci, xhci->event_ring, true);
  1514. xhci_set_hc_event_deq(xhci);
  1515. /* FIXME for multi-TD URBs (who have buffers bigger than 64MB) */
  1516. if (urb) {
  1517. usb_hcd_unlink_urb_from_ep(xhci_to_hcd(xhci), urb);
  1518. xhci_dbg(xhci, "Giveback URB %p, len = %d, status = %d\n",
  1519. urb, urb->actual_length, status);
  1520. spin_unlock(&xhci->lock);
  1521. usb_hcd_giveback_urb(xhci_to_hcd(xhci), urb, status);
  1522. spin_lock(&xhci->lock);
  1523. }
  1524. return 0;
  1525. }
  1526. /*
  1527. * This function handles all OS-owned events on the event ring. It may drop
  1528. * xhci->lock between event processing (e.g. to pass up port status changes).
  1529. */
  1530. void xhci_handle_event(struct xhci_hcd *xhci)
  1531. {
  1532. union xhci_trb *event;
  1533. int update_ptrs = 1;
  1534. int ret;
  1535. xhci_dbg(xhci, "In %s\n", __func__);
  1536. if (!xhci->event_ring || !xhci->event_ring->dequeue) {
  1537. xhci->error_bitmask |= 1 << 1;
  1538. return;
  1539. }
  1540. event = xhci->event_ring->dequeue;
  1541. /* Does the HC or OS own the TRB? */
  1542. if ((event->event_cmd.flags & TRB_CYCLE) !=
  1543. xhci->event_ring->cycle_state) {
  1544. xhci->error_bitmask |= 1 << 2;
  1545. return;
  1546. }
  1547. xhci_dbg(xhci, "%s - OS owns TRB\n", __func__);
  1548. /* FIXME: Handle more event types. */
  1549. switch ((event->event_cmd.flags & TRB_TYPE_BITMASK)) {
  1550. case TRB_TYPE(TRB_COMPLETION):
  1551. xhci_dbg(xhci, "%s - calling handle_cmd_completion\n", __func__);
  1552. handle_cmd_completion(xhci, &event->event_cmd);
  1553. xhci_dbg(xhci, "%s - returned from handle_cmd_completion\n", __func__);
  1554. break;
  1555. case TRB_TYPE(TRB_PORT_STATUS):
  1556. xhci_dbg(xhci, "%s - calling handle_port_status\n", __func__);
  1557. handle_port_status(xhci, event);
  1558. xhci_dbg(xhci, "%s - returned from handle_port_status\n", __func__);
  1559. update_ptrs = 0;
  1560. break;
  1561. case TRB_TYPE(TRB_TRANSFER):
  1562. xhci_dbg(xhci, "%s - calling handle_tx_event\n", __func__);
  1563. ret = handle_tx_event(xhci, &event->trans_event);
  1564. xhci_dbg(xhci, "%s - returned from handle_tx_event\n", __func__);
  1565. if (ret < 0)
  1566. xhci->error_bitmask |= 1 << 9;
  1567. else
  1568. update_ptrs = 0;
  1569. break;
  1570. default:
  1571. if ((event->event_cmd.flags & TRB_TYPE_BITMASK) >= TRB_TYPE(48))
  1572. handle_vendor_event(xhci, event);
  1573. else
  1574. xhci->error_bitmask |= 1 << 3;
  1575. }
  1576. /* Any of the above functions may drop and re-acquire the lock, so check
  1577. * to make sure a watchdog timer didn't mark the host as non-responsive.
  1578. */
  1579. if (xhci->xhc_state & XHCI_STATE_DYING) {
  1580. xhci_dbg(xhci, "xHCI host dying, returning from "
  1581. "event handler.\n");
  1582. return;
  1583. }
  1584. if (update_ptrs) {
  1585. /* Update SW and HC event ring dequeue pointer */
  1586. inc_deq(xhci, xhci->event_ring, true);
  1587. xhci_set_hc_event_deq(xhci);
  1588. }
  1589. /* Are there more items on the event ring? */
  1590. xhci_handle_event(xhci);
  1591. }
  1592. /**** Endpoint Ring Operations ****/
  1593. /*
  1594. * Generic function for queueing a TRB on a ring.
  1595. * The caller must have checked to make sure there's room on the ring.
  1596. */
  1597. static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
  1598. bool consumer,
  1599. u32 field1, u32 field2, u32 field3, u32 field4)
  1600. {
  1601. struct xhci_generic_trb *trb;
  1602. trb = &ring->enqueue->generic;
  1603. trb->field[0] = field1;
  1604. trb->field[1] = field2;
  1605. trb->field[2] = field3;
  1606. trb->field[3] = field4;
  1607. inc_enq(xhci, ring, consumer);
  1608. }
  1609. /*
  1610. * Does various checks on the endpoint ring, and makes it ready to queue num_trbs.
  1611. * FIXME allocate segments if the ring is full.
  1612. */
  1613. static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
  1614. u32 ep_state, unsigned int num_trbs, gfp_t mem_flags)
  1615. {
  1616. /* Make sure the endpoint has been added to xHC schedule */
  1617. xhci_dbg(xhci, "Endpoint state = 0x%x\n", ep_state);
  1618. switch (ep_state) {
  1619. case EP_STATE_DISABLED:
  1620. /*
  1621. * USB core changed config/interfaces without notifying us,
  1622. * or hardware is reporting the wrong state.
  1623. */
  1624. xhci_warn(xhci, "WARN urb submitted to disabled ep\n");
  1625. return -ENOENT;
  1626. case EP_STATE_ERROR:
  1627. xhci_warn(xhci, "WARN waiting for error on ep to be cleared\n");
  1628. /* FIXME event handling code for error needs to clear it */
  1629. /* XXX not sure if this should be -ENOENT or not */
  1630. return -EINVAL;
  1631. case EP_STATE_HALTED:
  1632. xhci_dbg(xhci, "WARN halted endpoint, queueing URB anyway.\n");
  1633. case EP_STATE_STOPPED:
  1634. case EP_STATE_RUNNING:
  1635. break;
  1636. default:
  1637. xhci_err(xhci, "ERROR unknown endpoint state for ep\n");
  1638. /*
  1639. * FIXME issue Configure Endpoint command to try to get the HC
  1640. * back into a known state.
  1641. */
  1642. return -EINVAL;
  1643. }
  1644. if (!room_on_ring(xhci, ep_ring, num_trbs)) {
  1645. /* FIXME allocate more room */
  1646. xhci_err(xhci, "ERROR no room on ep ring\n");
  1647. return -ENOMEM;
  1648. }
  1649. if (enqueue_is_link_trb(ep_ring)) {
  1650. struct xhci_ring *ring = ep_ring;
  1651. union xhci_trb *next;
  1652. xhci_dbg(xhci, "prepare_ring: pointing to link trb\n");
  1653. next = ring->enqueue;
  1654. while (last_trb(xhci, ring, ring->enq_seg, next)) {
  1655. /* If we're not dealing with 0.95 hardware,
  1656. * clear the chain bit.
  1657. */
  1658. if (!xhci_link_trb_quirk(xhci))
  1659. next->link.control &= ~TRB_CHAIN;
  1660. else
  1661. next->link.control |= TRB_CHAIN;
  1662. wmb();
  1663. next->link.control ^= (u32) TRB_CYCLE;
  1664. /* Toggle the cycle bit after the last ring segment. */
  1665. if (last_trb_on_last_seg(xhci, ring, ring->enq_seg, next)) {
  1666. ring->cycle_state = (ring->cycle_state ? 0 : 1);
  1667. if (!in_interrupt()) {
  1668. xhci_dbg(xhci, "queue_trb: Toggle cycle "
  1669. "state for ring %p = %i\n",
  1670. ring, (unsigned int)ring->cycle_state);
  1671. }
  1672. }
  1673. ring->enq_seg = ring->enq_seg->next;
  1674. ring->enqueue = ring->enq_seg->trbs;
  1675. next = ring->enqueue;
  1676. }
  1677. }
  1678. return 0;
  1679. }
  1680. static int prepare_transfer(struct xhci_hcd *xhci,
  1681. struct xhci_virt_device *xdev,
  1682. unsigned int ep_index,
  1683. unsigned int stream_id,
  1684. unsigned int num_trbs,
  1685. struct urb *urb,
  1686. struct xhci_td **td,
  1687. gfp_t mem_flags)
  1688. {
  1689. int ret;
  1690. struct xhci_ring *ep_ring;
  1691. struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
  1692. ep_ring = xhci_stream_id_to_ring(xdev, ep_index, stream_id);
  1693. if (!ep_ring) {
  1694. xhci_dbg(xhci, "Can't prepare ring for bad stream ID %u\n",
  1695. stream_id);
  1696. return -EINVAL;
  1697. }
  1698. ret = prepare_ring(xhci, ep_ring,
  1699. ep_ctx->ep_info & EP_STATE_MASK,
  1700. num_trbs, mem_flags);
  1701. if (ret)
  1702. return ret;
  1703. *td = kzalloc(sizeof(struct xhci_td), mem_flags);
  1704. if (!*td)
  1705. return -ENOMEM;
  1706. INIT_LIST_HEAD(&(*td)->td_list);
  1707. INIT_LIST_HEAD(&(*td)->cancelled_td_list);
  1708. ret = usb_hcd_link_urb_to_ep(xhci_to_hcd(xhci), urb);
  1709. if (unlikely(ret)) {
  1710. kfree(*td);
  1711. return ret;
  1712. }
  1713. (*td)->urb = urb;
  1714. urb->hcpriv = (void *) (*td);
  1715. /* Add this TD to the tail of the endpoint ring's TD list */
  1716. list_add_tail(&(*td)->td_list, &ep_ring->td_list);
  1717. (*td)->start_seg = ep_ring->enq_seg;
  1718. (*td)->first_trb = ep_ring->enqueue;
  1719. return 0;
  1720. }
  1721. static unsigned int count_sg_trbs_needed(struct xhci_hcd *xhci, struct urb *urb)
  1722. {
  1723. int num_sgs, num_trbs, running_total, temp, i;
  1724. struct scatterlist *sg;
  1725. sg = NULL;
  1726. num_sgs = urb->num_sgs;
  1727. temp = urb->transfer_buffer_length;
  1728. xhci_dbg(xhci, "count sg list trbs: \n");
  1729. num_trbs = 0;
  1730. for_each_sg(urb->sg, sg, num_sgs, i) {
  1731. unsigned int previous_total_trbs = num_trbs;
  1732. unsigned int len = sg_dma_len(sg);
  1733. /* Scatter gather list entries may cross 64KB boundaries */
  1734. running_total = TRB_MAX_BUFF_SIZE -
  1735. (sg_dma_address(sg) & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
  1736. if (running_total != 0)
  1737. num_trbs++;
  1738. /* How many more 64KB chunks to transfer, how many more TRBs? */
  1739. while (running_total < sg_dma_len(sg)) {
  1740. num_trbs++;
  1741. running_total += TRB_MAX_BUFF_SIZE;
  1742. }
  1743. xhci_dbg(xhci, " sg #%d: dma = %#llx, len = %#x (%d), num_trbs = %d\n",
  1744. i, (unsigned long long)sg_dma_address(sg),
  1745. len, len, num_trbs - previous_total_trbs);
  1746. len = min_t(int, len, temp);
  1747. temp -= len;
  1748. if (temp == 0)
  1749. break;
  1750. }
  1751. xhci_dbg(xhci, "\n");
  1752. if (!in_interrupt())
  1753. dev_dbg(&urb->dev->dev, "ep %#x - urb len = %d, sglist used, num_trbs = %d\n",
  1754. urb->ep->desc.bEndpointAddress,
  1755. urb->transfer_buffer_length,
  1756. num_trbs);
  1757. return num_trbs;
  1758. }
  1759. static void check_trb_math(struct urb *urb, int num_trbs, int running_total)
  1760. {
  1761. if (num_trbs != 0)
  1762. dev_dbg(&urb->dev->dev, "%s - ep %#x - Miscalculated number of "
  1763. "TRBs, %d left\n", __func__,
  1764. urb->ep->desc.bEndpointAddress, num_trbs);
  1765. if (running_total != urb->transfer_buffer_length)
  1766. dev_dbg(&urb->dev->dev, "%s - ep %#x - Miscalculated tx length, "
  1767. "queued %#x (%d), asked for %#x (%d)\n",
  1768. __func__,
  1769. urb->ep->desc.bEndpointAddress,
  1770. running_total, running_total,
  1771. urb->transfer_buffer_length,
  1772. urb->transfer_buffer_length);
  1773. }
  1774. static void giveback_first_trb(struct xhci_hcd *xhci, int slot_id,
  1775. unsigned int ep_index, unsigned int stream_id, int start_cycle,
  1776. struct xhci_generic_trb *start_trb, struct xhci_td *td)
  1777. {
  1778. /*
  1779. * Pass all the TRBs to the hardware at once and make sure this write
  1780. * isn't reordered.
  1781. */
  1782. wmb();
  1783. start_trb->field[3] |= start_cycle;
  1784. ring_ep_doorbell(xhci, slot_id, ep_index, stream_id);
  1785. }
  1786. /*
  1787. * xHCI uses normal TRBs for both bulk and interrupt. When the interrupt
  1788. * endpoint is to be serviced, the xHC will consume (at most) one TD. A TD
  1789. * (comprised of sg list entries) can take several service intervals to
  1790. * transmit.
  1791. */
  1792. int xhci_queue_intr_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
  1793. struct urb *urb, int slot_id, unsigned int ep_index)
  1794. {
  1795. struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci,
  1796. xhci->devs[slot_id]->out_ctx, ep_index);
  1797. int xhci_interval;
  1798. int ep_interval;
  1799. xhci_interval = EP_INTERVAL_TO_UFRAMES(ep_ctx->ep_info);
  1800. ep_interval = urb->interval;
  1801. /* Convert to microframes */
  1802. if (urb->dev->speed == USB_SPEED_LOW ||
  1803. urb->dev->speed == USB_SPEED_FULL)
  1804. ep_interval *= 8;
  1805. /* FIXME change this to a warning and a suggestion to use the new API
  1806. * to set the polling interval (once the API is added).
  1807. */
  1808. if (xhci_interval != ep_interval) {
  1809. if (!printk_ratelimit())
  1810. dev_dbg(&urb->dev->dev, "Driver uses different interval"
  1811. " (%d microframe%s) than xHCI "
  1812. "(%d microframe%s)\n",
  1813. ep_interval,
  1814. ep_interval == 1 ? "" : "s",
  1815. xhci_interval,
  1816. xhci_interval == 1 ? "" : "s");
  1817. urb->interval = xhci_interval;
  1818. /* Convert back to frames for LS/FS devices */
  1819. if (urb->dev->speed == USB_SPEED_LOW ||
  1820. urb->dev->speed == USB_SPEED_FULL)
  1821. urb->interval /= 8;
  1822. }
  1823. return xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb, slot_id, ep_index);
  1824. }
  1825. /*
  1826. * The TD size is the number of bytes remaining in the TD (including this TRB),
  1827. * right shifted by 10.
  1828. * It must fit in bits 21:17, so it can't be bigger than 31.
  1829. */
  1830. static u32 xhci_td_remainder(unsigned int remainder)
  1831. {
  1832. u32 max = (1 << (21 - 17 + 1)) - 1;
  1833. if ((remainder >> 10) >= max)
  1834. return max << 17;
  1835. else
  1836. return (remainder >> 10) << 17;
  1837. }
  1838. static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
  1839. struct urb *urb, int slot_id, unsigned int ep_index)
  1840. {
  1841. struct xhci_ring *ep_ring;
  1842. unsigned int num_trbs;
  1843. struct xhci_td *td;
  1844. struct scatterlist *sg;
  1845. int num_sgs;
  1846. int trb_buff_len, this_sg_len, running_total;
  1847. bool first_trb;
  1848. u64 addr;
  1849. struct xhci_generic_trb *start_trb;
  1850. int start_cycle;
  1851. ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
  1852. if (!ep_ring)
  1853. return -EINVAL;
  1854. num_trbs = count_sg_trbs_needed(xhci, urb);
  1855. num_sgs = urb->num_sgs;
  1856. trb_buff_len = prepare_transfer(xhci, xhci->devs[slot_id],
  1857. ep_index, urb->stream_id,
  1858. num_trbs, urb, &td, mem_flags);
  1859. if (trb_buff_len < 0)
  1860. return trb_buff_len;
  1861. /*
  1862. * Don't give the first TRB to the hardware (by toggling the cycle bit)
  1863. * until we've finished creating all the other TRBs. The ring's cycle
  1864. * state may change as we enqueue the other TRBs, so save it too.
  1865. */
  1866. start_trb = &ep_ring->enqueue->generic;
  1867. start_cycle = ep_ring->cycle_state;
  1868. running_total = 0;
  1869. /*
  1870. * How much data is in the first TRB?
  1871. *
  1872. * There are three forces at work for TRB buffer pointers and lengths:
  1873. * 1. We don't want to walk off the end of this sg-list entry buffer.
  1874. * 2. The transfer length that the driver requested may be smaller than
  1875. * the amount of memory allocated for this scatter-gather list.
  1876. * 3. TRBs buffers can't cross 64KB boundaries.
  1877. */
  1878. sg = urb->sg;
  1879. addr = (u64) sg_dma_address(sg);
  1880. this_sg_len = sg_dma_len(sg);
  1881. trb_buff_len = TRB_MAX_BUFF_SIZE -
  1882. (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
  1883. trb_buff_len = min_t(int, trb_buff_len, this_sg_len);
  1884. if (trb_buff_len > urb->transfer_buffer_length)
  1885. trb_buff_len = urb->transfer_buffer_length;
  1886. xhci_dbg(xhci, "First length to xfer from 1st sglist entry = %u\n",
  1887. trb_buff_len);
  1888. first_trb = true;
  1889. /* Queue the first TRB, even if it's zero-length */
  1890. do {
  1891. u32 field = 0;
  1892. u32 length_field = 0;
  1893. u32 remainder = 0;
  1894. /* Don't change the cycle bit of the first TRB until later */
  1895. if (first_trb)
  1896. first_trb = false;
  1897. else
  1898. field |= ep_ring->cycle_state;
  1899. /* Chain all the TRBs together; clear the chain bit in the last
  1900. * TRB to indicate it's the last TRB in the chain.
  1901. */
  1902. if (num_trbs > 1) {
  1903. field |= TRB_CHAIN;
  1904. } else {
  1905. /* FIXME - add check for ZERO_PACKET flag before this */
  1906. td->last_trb = ep_ring->enqueue;
  1907. field |= TRB_IOC;
  1908. }
  1909. xhci_dbg(xhci, " sg entry: dma = %#x, len = %#x (%d), "
  1910. "64KB boundary at %#x, end dma = %#x\n",
  1911. (unsigned int) addr, trb_buff_len, trb_buff_len,
  1912. (unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1),
  1913. (unsigned int) addr + trb_buff_len);
  1914. if (TRB_MAX_BUFF_SIZE -
  1915. (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1)) < trb_buff_len) {
  1916. xhci_warn(xhci, "WARN: sg dma xfer crosses 64KB boundaries!\n");
  1917. xhci_dbg(xhci, "Next boundary at %#x, end dma = %#x\n",
  1918. (unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1),
  1919. (unsigned int) addr + trb_buff_len);
  1920. }
  1921. remainder = xhci_td_remainder(urb->transfer_buffer_length -
  1922. running_total) ;
  1923. length_field = TRB_LEN(trb_buff_len) |
  1924. remainder |
  1925. TRB_INTR_TARGET(0);
  1926. queue_trb(xhci, ep_ring, false,
  1927. lower_32_bits(addr),
  1928. upper_32_bits(addr),
  1929. length_field,
  1930. /* We always want to know if the TRB was short,
  1931. * or we won't get an event when it completes.
  1932. * (Unless we use event data TRBs, which are a
  1933. * waste of space and HC resources.)
  1934. */
  1935. field | TRB_ISP | TRB_TYPE(TRB_NORMAL));
  1936. --num_trbs;
  1937. running_total += trb_buff_len;
  1938. /* Calculate length for next transfer --
  1939. * Are we done queueing all the TRBs for this sg entry?
  1940. */
  1941. this_sg_len -= trb_buff_len;
  1942. if (this_sg_len == 0) {
  1943. --num_sgs;
  1944. if (num_sgs == 0)
  1945. break;
  1946. sg = sg_next(sg);
  1947. addr = (u64) sg_dma_address(sg);
  1948. this_sg_len = sg_dma_len(sg);
  1949. } else {
  1950. addr += trb_buff_len;
  1951. }
  1952. trb_buff_len = TRB_MAX_BUFF_SIZE -
  1953. (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
  1954. trb_buff_len = min_t(int, trb_buff_len, this_sg_len);
  1955. if (running_total + trb_buff_len > urb->transfer_buffer_length)
  1956. trb_buff_len =
  1957. urb->transfer_buffer_length - running_total;
  1958. } while (running_total < urb->transfer_buffer_length);
  1959. check_trb_math(urb, num_trbs, running_total);
  1960. giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
  1961. start_cycle, start_trb, td);
  1962. return 0;
  1963. }
  1964. /* This is very similar to what ehci-q.c qtd_fill() does */
  1965. int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
  1966. struct urb *urb, int slot_id, unsigned int ep_index)
  1967. {
  1968. struct xhci_ring *ep_ring;
  1969. struct xhci_td *td;
  1970. int num_trbs;
  1971. struct xhci_generic_trb *start_trb;
  1972. bool first_trb;
  1973. int start_cycle;
  1974. u32 field, length_field;
  1975. int running_total, trb_buff_len, ret;
  1976. u64 addr;
  1977. if (urb->num_sgs)
  1978. return queue_bulk_sg_tx(xhci, mem_flags, urb, slot_id, ep_index);
  1979. ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
  1980. if (!ep_ring)
  1981. return -EINVAL;
  1982. num_trbs = 0;
  1983. /* How much data is (potentially) left before the 64KB boundary? */
  1984. running_total = TRB_MAX_BUFF_SIZE -
  1985. (urb->transfer_dma & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
  1986. /* If there's some data on this 64KB chunk, or we have to send a
  1987. * zero-length transfer, we need at least one TRB
  1988. */
  1989. if (running_total != 0 || urb->transfer_buffer_length == 0)
  1990. num_trbs++;
  1991. /* How many more 64KB chunks to transfer, how many more TRBs? */
  1992. while (running_total < urb->transfer_buffer_length) {
  1993. num_trbs++;
  1994. running_total += TRB_MAX_BUFF_SIZE;
  1995. }
  1996. /* FIXME: this doesn't deal with URB_ZERO_PACKET - need one more */
  1997. if (!in_interrupt())
  1998. dev_dbg(&urb->dev->dev, "ep %#x - urb len = %#x (%d), addr = %#llx, num_trbs = %d\n",
  1999. urb->ep->desc.bEndpointAddress,
  2000. urb->transfer_buffer_length,
  2001. urb->transfer_buffer_length,
  2002. (unsigned long long)urb->transfer_dma,
  2003. num_trbs);
  2004. ret = prepare_transfer(xhci, xhci->devs[slot_id],
  2005. ep_index, urb->stream_id,
  2006. num_trbs, urb, &td, mem_flags);
  2007. if (ret < 0)
  2008. return ret;
  2009. /*
  2010. * Don't give the first TRB to the hardware (by toggling the cycle bit)
  2011. * until we've finished creating all the other TRBs. The ring's cycle
  2012. * state may change as we enqueue the other TRBs, so save it too.
  2013. */
  2014. start_trb = &ep_ring->enqueue->generic;
  2015. start_cycle = ep_ring->cycle_state;
  2016. running_total = 0;
  2017. /* How much data is in the first TRB? */
  2018. addr = (u64) urb->transfer_dma;
  2019. trb_buff_len = TRB_MAX_BUFF_SIZE -
  2020. (urb->transfer_dma & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
  2021. if (urb->transfer_buffer_length < trb_buff_len)
  2022. trb_buff_len = urb->transfer_buffer_length;
  2023. first_trb = true;
  2024. /* Queue the first TRB, even if it's zero-length */
  2025. do {
  2026. u32 remainder = 0;
  2027. field = 0;
  2028. /* Don't change the cycle bit of the first TRB until later */
  2029. if (first_trb)
  2030. first_trb = false;
  2031. else
  2032. field |= ep_ring->cycle_state;
  2033. /* Chain all the TRBs together; clear the chain bit in the last
  2034. * TRB to indicate it's the last TRB in the chain.
  2035. */
  2036. if (num_trbs > 1) {
  2037. field |= TRB_CHAIN;
  2038. } else {
  2039. /* FIXME - add check for ZERO_PACKET flag before this */
  2040. td->last_trb = ep_ring->enqueue;
  2041. field |= TRB_IOC;
  2042. }
  2043. remainder = xhci_td_remainder(urb->transfer_buffer_length -
  2044. running_total);
  2045. length_field = TRB_LEN(trb_buff_len) |
  2046. remainder |
  2047. TRB_INTR_TARGET(0);
  2048. queue_trb(xhci, ep_ring, false,
  2049. lower_32_bits(addr),
  2050. upper_32_bits(addr),
  2051. length_field,
  2052. /* We always want to know if the TRB was short,
  2053. * or we won't get an event when it completes.
  2054. * (Unless we use event data TRBs, which are a
  2055. * waste of space and HC resources.)
  2056. */
  2057. field | TRB_ISP | TRB_TYPE(TRB_NORMAL));
  2058. --num_trbs;
  2059. running_total += trb_buff_len;
  2060. /* Calculate length for next transfer */
  2061. addr += trb_buff_len;
  2062. trb_buff_len = urb->transfer_buffer_length - running_total;
  2063. if (trb_buff_len > TRB_MAX_BUFF_SIZE)
  2064. trb_buff_len = TRB_MAX_BUFF_SIZE;
  2065. } while (running_total < urb->transfer_buffer_length);
  2066. check_trb_math(urb, num_trbs, running_total);
  2067. giveback_first_trb(xhci, slot_id, ep_index, urb->stream_id,
  2068. start_cycle, start_trb, td);
  2069. return 0;
  2070. }
  2071. /* Caller must have locked xhci->lock */
  2072. int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
  2073. struct urb *urb, int slot_id, unsigned int ep_index)
  2074. {
  2075. struct xhci_ring *ep_ring;
  2076. int num_trbs;
  2077. int ret;
  2078. struct usb_ctrlrequest *setup;
  2079. struct xhci_generic_trb *start_trb;
  2080. int start_cycle;
  2081. u32 field, length_field;
  2082. struct xhci_td *td;
  2083. ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
  2084. if (!ep_ring)
  2085. return -EINVAL;
  2086. /*
  2087. * Need to copy setup packet into setup TRB, so we can't use the setup
  2088. * DMA address.
  2089. */
  2090. if (!urb->setup_packet)
  2091. return -EINVAL;
  2092. if (!in_interrupt())
  2093. xhci_dbg(xhci, "Queueing ctrl tx for slot id %d, ep %d\n",
  2094. slot_id, ep_index);
  2095. /* 1 TRB for setup, 1 for status */
  2096. num_trbs = 2;
  2097. /*
  2098. * Don't need to check if we need additional event data and normal TRBs,
  2099. * since data in control transfers will never get bigger than 16MB
  2100. * XXX: can we get a buffer that crosses 64KB boundaries?
  2101. */
  2102. if (urb->transfer_buffer_length > 0)
  2103. num_trbs++;
  2104. ret = prepare_transfer(xhci, xhci->devs[slot_id],
  2105. ep_index, urb->stream_id,
  2106. num_trbs, urb, &td, mem_flags);
  2107. if (ret < 0)
  2108. return ret;
  2109. /*
  2110. * Don't give the first TRB to the hardware (by toggling the cycle bit)
  2111. * until we've finished creating all the other TRBs. The ring's cycle
  2112. * state may change as we enqueue the other TRBs, so save it too.
  2113. */
  2114. start_trb = &ep_ring->enqueue->generic;
  2115. start_cycle = ep_ring->cycle_state;
  2116. /* Queue setup TRB - see section 6.4.1.2.1 */
  2117. /* FIXME better way to translate setup_packet into two u32 fields? */
  2118. setup = (struct usb_ctrlrequest *) urb->setup_packet;
  2119. queue_trb(xhci, ep_ring, false,
  2120. /* FIXME endianness is probably going to bite my ass here. */
  2121. setup->bRequestType | setup->bRequest << 8 | setup->wValue << 16,
  2122. setup->wIndex | setup->wLength << 16,
  2123. TRB_LEN(8) | TRB_INTR_TARGET(0),
  2124. /* Immediate data in pointer */
  2125. TRB_IDT | TRB_TYPE(TRB_SETUP));
  2126. /* If there's data, queue data TRBs */
  2127. field = 0;
  2128. length_field = TRB_LEN(urb->transfer_buffer_length) |
  2129. xhci_td_remainder(urb->transfer_buffer_length) |
  2130. TRB_INTR_TARGET(0);
  2131. if (urb->transfer_buffer_length > 0) {
  2132. if (setup->bRequestType & USB_DIR_IN)
  2133. field |= TRB_DIR_IN;
  2134. queue_trb(xhci, ep_ring, false,
  2135. lower_32_bits(urb->transfer_dma),
  2136. upper_32_bits(urb->transfer_dma),
  2137. length_field,
  2138. /* Event on short tx */
  2139. field | TRB_ISP | TRB_TYPE(TRB_DATA) | ep_ring->cycle_state);
  2140. }
  2141. /* Save the DMA address of the last TRB in the TD */
  2142. td->last_trb = ep_ring->enqueue;
  2143. /* Queue status TRB - see Table 7 and sections 4.11.2.2 and 6.4.1.2.3 */
  2144. /* If the device sent data, the status stage is an OUT transfer */
  2145. if (urb->transfer_buffer_length > 0 && setup->bRequestType & USB_DIR_IN)
  2146. field = 0;
  2147. else
  2148. field = TRB_DIR_IN;
  2149. queue_trb(xhci, ep_ring, false,
  2150. 0,
  2151. 0,
  2152. TRB_INTR_TARGET(0),
  2153. /* Event on completion */
  2154. field | TRB_IOC | TRB_TYPE(TRB_STATUS) | ep_ring->cycle_state);
  2155. giveback_first_trb(xhci, slot_id, ep_index, 0,
  2156. start_cycle, start_trb, td);
  2157. return 0;
  2158. }
  2159. /**** Command Ring Operations ****/
  2160. /* Generic function for queueing a command TRB on the command ring.
  2161. * Check to make sure there's room on the command ring for one command TRB.
  2162. * Also check that there's room reserved for commands that must not fail.
  2163. * If this is a command that must not fail, meaning command_must_succeed = TRUE,
  2164. * then only check for the number of reserved spots.
  2165. * Don't decrement xhci->cmd_ring_reserved_trbs after we've queued the TRB
  2166. * because the command event handler may want to resubmit a failed command.
  2167. */
  2168. static int queue_command(struct xhci_hcd *xhci, u32 field1, u32 field2,
  2169. u32 field3, u32 field4, bool command_must_succeed)
  2170. {
  2171. int reserved_trbs = xhci->cmd_ring_reserved_trbs;
  2172. if (!command_must_succeed)
  2173. reserved_trbs++;
  2174. if (!room_on_ring(xhci, xhci->cmd_ring, reserved_trbs)) {
  2175. if (!in_interrupt())
  2176. xhci_err(xhci, "ERR: No room for command on command ring\n");
  2177. if (command_must_succeed)
  2178. xhci_err(xhci, "ERR: Reserved TRB counting for "
  2179. "unfailable commands failed.\n");
  2180. return -ENOMEM;
  2181. }
  2182. queue_trb(xhci, xhci->cmd_ring, false, field1, field2, field3,
  2183. field4 | xhci->cmd_ring->cycle_state);
  2184. return 0;
  2185. }
  2186. /* Queue a no-op command on the command ring */
  2187. static int queue_cmd_noop(struct xhci_hcd *xhci)
  2188. {
  2189. return queue_command(xhci, 0, 0, 0, TRB_TYPE(TRB_CMD_NOOP), false);
  2190. }
  2191. /*
  2192. * Place a no-op command on the command ring to test the command and
  2193. * event ring.
  2194. */
  2195. void *xhci_setup_one_noop(struct xhci_hcd *xhci)
  2196. {
  2197. if (queue_cmd_noop(xhci) < 0)
  2198. return NULL;
  2199. xhci->noops_submitted++;
  2200. return xhci_ring_cmd_db;
  2201. }
  2202. /* Queue a slot enable or disable request on the command ring */
  2203. int xhci_queue_slot_control(struct xhci_hcd *xhci, u32 trb_type, u32 slot_id)
  2204. {
  2205. return queue_command(xhci, 0, 0, 0,
  2206. TRB_TYPE(trb_type) | SLOT_ID_FOR_TRB(slot_id), false);
  2207. }
  2208. /* Queue an address device command TRB */
  2209. int xhci_queue_address_device(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
  2210. u32 slot_id)
  2211. {
  2212. return queue_command(xhci, lower_32_bits(in_ctx_ptr),
  2213. upper_32_bits(in_ctx_ptr), 0,
  2214. TRB_TYPE(TRB_ADDR_DEV) | SLOT_ID_FOR_TRB(slot_id),
  2215. false);
  2216. }
  2217. int xhci_queue_vendor_command(struct xhci_hcd *xhci,
  2218. u32 field1, u32 field2, u32 field3, u32 field4)
  2219. {
  2220. return queue_command(xhci, field1, field2, field3, field4, false);
  2221. }
  2222. /* Queue a reset device command TRB */
  2223. int xhci_queue_reset_device(struct xhci_hcd *xhci, u32 slot_id)
  2224. {
  2225. return queue_command(xhci, 0, 0, 0,
  2226. TRB_TYPE(TRB_RESET_DEV) | SLOT_ID_FOR_TRB(slot_id),
  2227. false);
  2228. }
  2229. /* Queue a configure endpoint command TRB */
  2230. int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
  2231. u32 slot_id, bool command_must_succeed)
  2232. {
  2233. return queue_command(xhci, lower_32_bits(in_ctx_ptr),
  2234. upper_32_bits(in_ctx_ptr), 0,
  2235. TRB_TYPE(TRB_CONFIG_EP) | SLOT_ID_FOR_TRB(slot_id),
  2236. command_must_succeed);
  2237. }
  2238. /* Queue an evaluate context command TRB */
  2239. int xhci_queue_evaluate_context(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
  2240. u32 slot_id)
  2241. {
  2242. return queue_command(xhci, lower_32_bits(in_ctx_ptr),
  2243. upper_32_bits(in_ctx_ptr), 0,
  2244. TRB_TYPE(TRB_EVAL_CONTEXT) | SLOT_ID_FOR_TRB(slot_id),
  2245. false);
  2246. }
  2247. int xhci_queue_stop_endpoint(struct xhci_hcd *xhci, int slot_id,
  2248. unsigned int ep_index)
  2249. {
  2250. u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
  2251. u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
  2252. u32 type = TRB_TYPE(TRB_STOP_RING);
  2253. return queue_command(xhci, 0, 0, 0,
  2254. trb_slot_id | trb_ep_index | type, false);
  2255. }
  2256. /* Set Transfer Ring Dequeue Pointer command.
  2257. * This should not be used for endpoints that have streams enabled.
  2258. */
  2259. static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id,
  2260. unsigned int ep_index, unsigned int stream_id,
  2261. struct xhci_segment *deq_seg,
  2262. union xhci_trb *deq_ptr, u32 cycle_state)
  2263. {
  2264. dma_addr_t addr;
  2265. u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
  2266. u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
  2267. u32 trb_stream_id = STREAM_ID_FOR_TRB(stream_id);
  2268. u32 type = TRB_TYPE(TRB_SET_DEQ);
  2269. addr = xhci_trb_virt_to_dma(deq_seg, deq_ptr);
  2270. if (addr == 0) {
  2271. xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr\n");
  2272. xhci_warn(xhci, "WARN deq seg = %p, deq pt = %p\n",
  2273. deq_seg, deq_ptr);
  2274. return 0;
  2275. }
  2276. return queue_command(xhci, lower_32_bits(addr) | cycle_state,
  2277. upper_32_bits(addr), trb_stream_id,
  2278. trb_slot_id | trb_ep_index | type, false);
  2279. }
  2280. int xhci_queue_reset_ep(struct xhci_hcd *xhci, int slot_id,
  2281. unsigned int ep_index)
  2282. {
  2283. u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
  2284. u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
  2285. u32 type = TRB_TYPE(TRB_RESET_EP);
  2286. return queue_command(xhci, 0, 0, 0, trb_slot_id | trb_ep_index | type,
  2287. false);
  2288. }