xhci-ring.c 52 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648
  1. /*
  2. * xHCI host controller driver
  3. *
  4. * Copyright (C) 2008 Intel Corp.
  5. *
  6. * Author: Sarah Sharp
  7. * Some code borrowed from the Linux EHCI driver.
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License version 2 as
  11. * published by the Free Software Foundation.
  12. *
  13. * This program is distributed in the hope that it will be useful, but
  14. * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
  15. * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
  16. * for more details.
  17. *
  18. * You should have received a copy of the GNU General Public License
  19. * along with this program; if not, write to the Free Software Foundation,
  20. * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  21. */
  22. /*
  23. * Ring initialization rules:
  24. * 1. Each segment is initialized to zero, except for link TRBs.
  25. * 2. Ring cycle state = 0. This represents Producer Cycle State (PCS) or
  26. * Consumer Cycle State (CCS), depending on ring function.
  27. * 3. Enqueue pointer = dequeue pointer = address of first TRB in the segment.
  28. *
  29. * Ring behavior rules:
  30. * 1. A ring is empty if enqueue == dequeue. This means there will always be at
  31. * least one free TRB in the ring. This is useful if you want to turn that
  32. * into a link TRB and expand the ring.
  33. * 2. When incrementing an enqueue or dequeue pointer, if the next TRB is a
  34. * link TRB, then load the pointer with the address in the link TRB. If the
  35. * link TRB had its toggle bit set, you may need to update the ring cycle
  36. * state (see cycle bit rules). You may have to do this multiple times
  37. * until you reach a non-link TRB.
  38. * 3. A ring is full if enqueue++ (for the definition of increment above)
  39. * equals the dequeue pointer.
  40. *
  41. * Cycle bit rules:
  42. * 1. When a consumer increments a dequeue pointer and encounters a toggle bit
  43. * in a link TRB, it must toggle the ring cycle state.
  44. * 2. When a producer increments an enqueue pointer and encounters a toggle bit
  45. * in a link TRB, it must toggle the ring cycle state.
  46. *
  47. * Producer rules:
  48. * 1. Check if ring is full before you enqueue.
  49. * 2. Write the ring cycle state to the cycle bit in the TRB you're enqueuing.
  50. * Update enqueue pointer between each write (which may update the ring
  51. * cycle state).
  52. * 3. Notify consumer. If SW is producer, it rings the doorbell for command
  53. * and endpoint rings. If HC is the producer for the event ring,
  54. * and it generates an interrupt according to interrupt modulation rules.
  55. *
  56. * Consumer rules:
  57. * 1. Check if TRB belongs to you. If the cycle bit == your ring cycle state,
  58. * the TRB is owned by the consumer.
  59. * 2. Update dequeue pointer (which may update the ring cycle state) and
  60. * continue processing TRBs until you reach a TRB which is not owned by you.
  61. * 3. Notify the producer. SW is the consumer for the event ring, and it
  62. * updates event ring dequeue pointer. HC is the consumer for the command and
  63. * endpoint rings; it generates events on the event ring for these.
  64. */
  65. #include <linux/scatterlist.h>
  66. #include "xhci.h"
  67. /*
  68. * Returns zero if the TRB isn't in this segment, otherwise it returns the DMA
  69. * address of the TRB.
  70. */
  71. dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg,
  72. union xhci_trb *trb)
  73. {
  74. unsigned long segment_offset;
  75. if (!seg || !trb || trb < seg->trbs)
  76. return 0;
  77. /* offset in TRBs */
  78. segment_offset = trb - seg->trbs;
  79. if (segment_offset > TRBS_PER_SEGMENT)
  80. return 0;
  81. return seg->dma + (segment_offset * sizeof(*trb));
  82. }
  83. /* Does this link TRB point to the first segment in a ring,
  84. * or was the previous TRB the last TRB on the last segment in the ERST?
  85. */
  86. static inline bool last_trb_on_last_seg(struct xhci_hcd *xhci, struct xhci_ring *ring,
  87. struct xhci_segment *seg, union xhci_trb *trb)
  88. {
  89. if (ring == xhci->event_ring)
  90. return (trb == &seg->trbs[TRBS_PER_SEGMENT]) &&
  91. (seg->next == xhci->event_ring->first_seg);
  92. else
  93. return trb->link.control & LINK_TOGGLE;
  94. }
  95. /* Is this TRB a link TRB or was the last TRB the last TRB in this event ring
  96. * segment? I.e. would the updated event TRB pointer step off the end of the
  97. * event seg?
  98. */
  99. static inline int last_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
  100. struct xhci_segment *seg, union xhci_trb *trb)
  101. {
  102. if (ring == xhci->event_ring)
  103. return trb == &seg->trbs[TRBS_PER_SEGMENT];
  104. else
  105. return (trb->link.control & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK);
  106. }
  107. /* Updates trb to point to the next TRB in the ring, and updates seg if the next
  108. * TRB is in a new segment. This does not skip over link TRBs, and it does not
  109. * effect the ring dequeue or enqueue pointers.
  110. */
  111. static void next_trb(struct xhci_hcd *xhci,
  112. struct xhci_ring *ring,
  113. struct xhci_segment **seg,
  114. union xhci_trb **trb)
  115. {
  116. if (last_trb(xhci, ring, *seg, *trb)) {
  117. *seg = (*seg)->next;
  118. *trb = ((*seg)->trbs);
  119. } else {
  120. *trb = (*trb)++;
  121. }
  122. }
  123. /*
  124. * See Cycle bit rules. SW is the consumer for the event ring only.
  125. * Don't make a ring full of link TRBs. That would be dumb and this would loop.
  126. */
  127. static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer)
  128. {
  129. union xhci_trb *next = ++(ring->dequeue);
  130. ring->deq_updates++;
  131. /* Update the dequeue pointer further if that was a link TRB or we're at
  132. * the end of an event ring segment (which doesn't have link TRBS)
  133. */
  134. while (last_trb(xhci, ring, ring->deq_seg, next)) {
  135. if (consumer && last_trb_on_last_seg(xhci, ring, ring->deq_seg, next)) {
  136. ring->cycle_state = (ring->cycle_state ? 0 : 1);
  137. if (!in_interrupt())
  138. xhci_dbg(xhci, "Toggle cycle state for ring %p = %i\n",
  139. ring,
  140. (unsigned int) ring->cycle_state);
  141. }
  142. ring->deq_seg = ring->deq_seg->next;
  143. ring->dequeue = ring->deq_seg->trbs;
  144. next = ring->dequeue;
  145. }
  146. }
  147. /*
  148. * See Cycle bit rules. SW is the consumer for the event ring only.
  149. * Don't make a ring full of link TRBs. That would be dumb and this would loop.
  150. *
  151. * If we've just enqueued a TRB that is in the middle of a TD (meaning the
  152. * chain bit is set), then set the chain bit in all the following link TRBs.
  153. * If we've enqueued the last TRB in a TD, make sure the following link TRBs
  154. * have their chain bit cleared (so that each Link TRB is a separate TD).
  155. *
  156. * Section 6.4.4.1 of the 0.95 spec says link TRBs cannot have the chain bit
  157. * set, but other sections talk about dealing with the chain bit set.
  158. * Assume section 6.4.4.1 is wrong, and the chain bit can be set in a Link TRB.
  159. */
  160. static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer)
  161. {
  162. u32 chain;
  163. union xhci_trb *next;
  164. chain = ring->enqueue->generic.field[3] & TRB_CHAIN;
  165. next = ++(ring->enqueue);
  166. ring->enq_updates++;
  167. /* Update the dequeue pointer further if that was a link TRB or we're at
  168. * the end of an event ring segment (which doesn't have link TRBS)
  169. */
  170. while (last_trb(xhci, ring, ring->enq_seg, next)) {
  171. if (!consumer) {
  172. if (ring != xhci->event_ring) {
  173. next->link.control &= ~TRB_CHAIN;
  174. next->link.control |= chain;
  175. /* Give this link TRB to the hardware */
  176. wmb();
  177. if (next->link.control & TRB_CYCLE)
  178. next->link.control &= (u32) ~TRB_CYCLE;
  179. else
  180. next->link.control |= (u32) TRB_CYCLE;
  181. }
  182. /* Toggle the cycle bit after the last ring segment. */
  183. if (last_trb_on_last_seg(xhci, ring, ring->enq_seg, next)) {
  184. ring->cycle_state = (ring->cycle_state ? 0 : 1);
  185. if (!in_interrupt())
  186. xhci_dbg(xhci, "Toggle cycle state for ring %p = %i\n",
  187. ring,
  188. (unsigned int) ring->cycle_state);
  189. }
  190. }
  191. ring->enq_seg = ring->enq_seg->next;
  192. ring->enqueue = ring->enq_seg->trbs;
  193. next = ring->enqueue;
  194. }
  195. }
  196. /*
  197. * Check to see if there's room to enqueue num_trbs on the ring. See rules
  198. * above.
  199. * FIXME: this would be simpler and faster if we just kept track of the number
  200. * of free TRBs in a ring.
  201. */
  202. static int room_on_ring(struct xhci_hcd *xhci, struct xhci_ring *ring,
  203. unsigned int num_trbs)
  204. {
  205. int i;
  206. union xhci_trb *enq = ring->enqueue;
  207. struct xhci_segment *enq_seg = ring->enq_seg;
  208. /* Check if ring is empty */
  209. if (enq == ring->dequeue)
  210. return 1;
  211. /* Make sure there's an extra empty TRB available */
  212. for (i = 0; i <= num_trbs; ++i) {
  213. if (enq == ring->dequeue)
  214. return 0;
  215. enq++;
  216. while (last_trb(xhci, ring, enq_seg, enq)) {
  217. enq_seg = enq_seg->next;
  218. enq = enq_seg->trbs;
  219. }
  220. }
  221. return 1;
  222. }
  223. void xhci_set_hc_event_deq(struct xhci_hcd *xhci)
  224. {
  225. u32 temp;
  226. dma_addr_t deq;
  227. deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg,
  228. xhci->event_ring->dequeue);
  229. if (deq == 0 && !in_interrupt())
  230. xhci_warn(xhci, "WARN something wrong with SW event ring "
  231. "dequeue ptr.\n");
  232. /* Update HC event ring dequeue pointer */
  233. temp = xhci_readl(xhci, &xhci->ir_set->erst_dequeue[0]);
  234. temp &= ERST_PTR_MASK;
  235. if (!in_interrupt())
  236. xhci_dbg(xhci, "// Write event ring dequeue pointer\n");
  237. xhci_writel(xhci, 0, &xhci->ir_set->erst_dequeue[1]);
  238. xhci_writel(xhci, (deq & ~ERST_PTR_MASK) | temp,
  239. &xhci->ir_set->erst_dequeue[0]);
  240. }
  241. /* Ring the host controller doorbell after placing a command on the ring */
  242. void xhci_ring_cmd_db(struct xhci_hcd *xhci)
  243. {
  244. u32 temp;
  245. xhci_dbg(xhci, "// Ding dong!\n");
  246. temp = xhci_readl(xhci, &xhci->dba->doorbell[0]) & DB_MASK;
  247. xhci_writel(xhci, temp | DB_TARGET_HOST, &xhci->dba->doorbell[0]);
  248. /* Flush PCI posted writes */
  249. xhci_readl(xhci, &xhci->dba->doorbell[0]);
  250. }
  251. static void ring_ep_doorbell(struct xhci_hcd *xhci,
  252. unsigned int slot_id,
  253. unsigned int ep_index)
  254. {
  255. struct xhci_ring *ep_ring;
  256. u32 field;
  257. __u32 __iomem *db_addr = &xhci->dba->doorbell[slot_id];
  258. ep_ring = xhci->devs[slot_id]->ep_rings[ep_index];
  259. /* Don't ring the doorbell for this endpoint if there are pending
  260. * cancellations because the we don't want to interrupt processing.
  261. */
  262. if (!ep_ring->cancels_pending && !(ep_ring->state & SET_DEQ_PENDING)) {
  263. field = xhci_readl(xhci, db_addr) & DB_MASK;
  264. xhci_writel(xhci, field | EPI_TO_DB(ep_index), db_addr);
  265. /* Flush PCI posted writes - FIXME Matthew Wilcox says this
  266. * isn't time-critical and we shouldn't make the CPU wait for
  267. * the flush.
  268. */
  269. xhci_readl(xhci, db_addr);
  270. }
  271. }
  272. /*
  273. * Find the segment that trb is in. Start searching in start_seg.
  274. * If we must move past a segment that has a link TRB with a toggle cycle state
  275. * bit set, then we will toggle the value pointed at by cycle_state.
  276. */
  277. static struct xhci_segment *find_trb_seg(
  278. struct xhci_segment *start_seg,
  279. union xhci_trb *trb, int *cycle_state)
  280. {
  281. struct xhci_segment *cur_seg = start_seg;
  282. struct xhci_generic_trb *generic_trb;
  283. while (cur_seg->trbs > trb ||
  284. &cur_seg->trbs[TRBS_PER_SEGMENT - 1] < trb) {
  285. generic_trb = &cur_seg->trbs[TRBS_PER_SEGMENT - 1].generic;
  286. if (TRB_TYPE(generic_trb->field[3]) == TRB_LINK &&
  287. (generic_trb->field[3] & LINK_TOGGLE))
  288. *cycle_state = ~(*cycle_state) & 0x1;
  289. cur_seg = cur_seg->next;
  290. if (cur_seg == start_seg)
  291. /* Looped over the entire list. Oops! */
  292. return 0;
  293. }
  294. return cur_seg;
  295. }
  296. struct dequeue_state {
  297. struct xhci_segment *new_deq_seg;
  298. union xhci_trb *new_deq_ptr;
  299. int new_cycle_state;
  300. };
  301. /*
  302. * Move the xHC's endpoint ring dequeue pointer past cur_td.
  303. * Record the new state of the xHC's endpoint ring dequeue segment,
  304. * dequeue pointer, and new consumer cycle state in state.
  305. * Update our internal representation of the ring's dequeue pointer.
  306. *
  307. * We do this in three jumps:
  308. * - First we update our new ring state to be the same as when the xHC stopped.
  309. * - Then we traverse the ring to find the segment that contains
  310. * the last TRB in the TD. We toggle the xHC's new cycle state when we pass
  311. * any link TRBs with the toggle cycle bit set.
  312. * - Finally we move the dequeue state one TRB further, toggling the cycle bit
  313. * if we've moved it past a link TRB with the toggle cycle bit set.
  314. */
  315. static void find_new_dequeue_state(struct xhci_hcd *xhci,
  316. unsigned int slot_id, unsigned int ep_index,
  317. struct xhci_td *cur_td, struct dequeue_state *state)
  318. {
  319. struct xhci_virt_device *dev = xhci->devs[slot_id];
  320. struct xhci_ring *ep_ring = dev->ep_rings[ep_index];
  321. struct xhci_generic_trb *trb;
  322. state->new_cycle_state = 0;
  323. state->new_deq_seg = find_trb_seg(cur_td->start_seg,
  324. ep_ring->stopped_trb,
  325. &state->new_cycle_state);
  326. if (!state->new_deq_seg)
  327. BUG();
  328. /* Dig out the cycle state saved by the xHC during the stop ep cmd */
  329. state->new_cycle_state = 0x1 & dev->out_ctx->ep[ep_index].deq[0];
  330. state->new_deq_ptr = cur_td->last_trb;
  331. state->new_deq_seg = find_trb_seg(state->new_deq_seg,
  332. state->new_deq_ptr,
  333. &state->new_cycle_state);
  334. if (!state->new_deq_seg)
  335. BUG();
  336. trb = &state->new_deq_ptr->generic;
  337. if (TRB_TYPE(trb->field[3]) == TRB_LINK &&
  338. (trb->field[3] & LINK_TOGGLE))
  339. state->new_cycle_state = ~(state->new_cycle_state) & 0x1;
  340. next_trb(xhci, ep_ring, &state->new_deq_seg, &state->new_deq_ptr);
  341. /* Don't update the ring cycle state for the producer (us). */
  342. ep_ring->dequeue = state->new_deq_ptr;
  343. ep_ring->deq_seg = state->new_deq_seg;
  344. }
  345. static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
  346. struct xhci_td *cur_td)
  347. {
  348. struct xhci_segment *cur_seg;
  349. union xhci_trb *cur_trb;
  350. for (cur_seg = cur_td->start_seg, cur_trb = cur_td->first_trb;
  351. true;
  352. next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) {
  353. if ((cur_trb->generic.field[3] & TRB_TYPE_BITMASK) ==
  354. TRB_TYPE(TRB_LINK)) {
  355. /* Unchain any chained Link TRBs, but
  356. * leave the pointers intact.
  357. */
  358. cur_trb->generic.field[3] &= ~TRB_CHAIN;
  359. xhci_dbg(xhci, "Cancel (unchain) link TRB\n");
  360. xhci_dbg(xhci, "Address = %p (0x%llx dma); "
  361. "in seg %p (0x%llx dma)\n",
  362. cur_trb,
  363. (unsigned long long)xhci_trb_virt_to_dma(cur_seg, cur_trb),
  364. cur_seg,
  365. (unsigned long long)cur_seg->dma);
  366. } else {
  367. cur_trb->generic.field[0] = 0;
  368. cur_trb->generic.field[1] = 0;
  369. cur_trb->generic.field[2] = 0;
  370. /* Preserve only the cycle bit of this TRB */
  371. cur_trb->generic.field[3] &= TRB_CYCLE;
  372. cur_trb->generic.field[3] |= TRB_TYPE(TRB_TR_NOOP);
  373. xhci_dbg(xhci, "Cancel TRB %p (0x%llx dma) "
  374. "in seg %p (0x%llx dma)\n",
  375. cur_trb,
  376. (unsigned long long)xhci_trb_virt_to_dma(cur_seg, cur_trb),
  377. cur_seg,
  378. (unsigned long long)cur_seg->dma);
  379. }
  380. if (cur_trb == cur_td->last_trb)
  381. break;
  382. }
  383. }
  384. static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id,
  385. unsigned int ep_index, struct xhci_segment *deq_seg,
  386. union xhci_trb *deq_ptr, u32 cycle_state);
  387. /*
  388. * When we get a command completion for a Stop Endpoint Command, we need to
  389. * unlink any cancelled TDs from the ring. There are two ways to do that:
  390. *
  391. * 1. If the HW was in the middle of processing the TD that needs to be
  392. * cancelled, then we must move the ring's dequeue pointer past the last TRB
  393. * in the TD with a Set Dequeue Pointer Command.
  394. * 2. Otherwise, we turn all the TRBs in the TD into No-op TRBs (with the chain
  395. * bit cleared) so that the HW will skip over them.
  396. */
  397. static void handle_stopped_endpoint(struct xhci_hcd *xhci,
  398. union xhci_trb *trb)
  399. {
  400. unsigned int slot_id;
  401. unsigned int ep_index;
  402. struct xhci_ring *ep_ring;
  403. struct list_head *entry;
  404. struct xhci_td *cur_td = 0;
  405. struct xhci_td *last_unlinked_td;
  406. struct dequeue_state deq_state;
  407. #ifdef CONFIG_USB_HCD_STAT
  408. ktime_t stop_time = ktime_get();
  409. #endif
  410. memset(&deq_state, 0, sizeof(deq_state));
  411. slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]);
  412. ep_index = TRB_TO_EP_INDEX(trb->generic.field[3]);
  413. ep_ring = xhci->devs[slot_id]->ep_rings[ep_index];
  414. if (list_empty(&ep_ring->cancelled_td_list))
  415. return;
  416. /* Fix up the ep ring first, so HW stops executing cancelled TDs.
  417. * We have the xHCI lock, so nothing can modify this list until we drop
  418. * it. We're also in the event handler, so we can't get re-interrupted
  419. * if another Stop Endpoint command completes
  420. */
  421. list_for_each(entry, &ep_ring->cancelled_td_list) {
  422. cur_td = list_entry(entry, struct xhci_td, cancelled_td_list);
  423. xhci_dbg(xhci, "Cancelling TD starting at %p, 0x%llx (dma).\n",
  424. cur_td->first_trb,
  425. (unsigned long long)xhci_trb_virt_to_dma(cur_td->start_seg, cur_td->first_trb));
  426. /*
  427. * If we stopped on the TD we need to cancel, then we have to
  428. * move the xHC endpoint ring dequeue pointer past this TD.
  429. */
  430. if (cur_td == ep_ring->stopped_td)
  431. find_new_dequeue_state(xhci, slot_id, ep_index, cur_td,
  432. &deq_state);
  433. else
  434. td_to_noop(xhci, ep_ring, cur_td);
  435. /*
  436. * The event handler won't see a completion for this TD anymore,
  437. * so remove it from the endpoint ring's TD list. Keep it in
  438. * the cancelled TD list for URB completion later.
  439. */
  440. list_del(&cur_td->td_list);
  441. ep_ring->cancels_pending--;
  442. }
  443. last_unlinked_td = cur_td;
  444. /* If necessary, queue a Set Transfer Ring Dequeue Pointer command */
  445. if (deq_state.new_deq_ptr && deq_state.new_deq_seg) {
  446. xhci_dbg(xhci, "Set TR Deq Ptr cmd, new deq seg = %p (0x%llx dma), "
  447. "new deq ptr = %p (0x%llx dma), new cycle = %u\n",
  448. deq_state.new_deq_seg,
  449. (unsigned long long)deq_state.new_deq_seg->dma,
  450. deq_state.new_deq_ptr,
  451. (unsigned long long)xhci_trb_virt_to_dma(deq_state.new_deq_seg, deq_state.new_deq_ptr),
  452. deq_state.new_cycle_state);
  453. queue_set_tr_deq(xhci, slot_id, ep_index,
  454. deq_state.new_deq_seg,
  455. deq_state.new_deq_ptr,
  456. (u32) deq_state.new_cycle_state);
  457. /* Stop the TD queueing code from ringing the doorbell until
  458. * this command completes. The HC won't set the dequeue pointer
  459. * if the ring is running, and ringing the doorbell starts the
  460. * ring running.
  461. */
  462. ep_ring->state |= SET_DEQ_PENDING;
  463. xhci_ring_cmd_db(xhci);
  464. } else {
  465. /* Otherwise just ring the doorbell to restart the ring */
  466. ring_ep_doorbell(xhci, slot_id, ep_index);
  467. }
  468. /*
  469. * Drop the lock and complete the URBs in the cancelled TD list.
  470. * New TDs to be cancelled might be added to the end of the list before
  471. * we can complete all the URBs for the TDs we already unlinked.
  472. * So stop when we've completed the URB for the last TD we unlinked.
  473. */
  474. do {
  475. cur_td = list_entry(ep_ring->cancelled_td_list.next,
  476. struct xhci_td, cancelled_td_list);
  477. list_del(&cur_td->cancelled_td_list);
  478. /* Clean up the cancelled URB */
  479. #ifdef CONFIG_USB_HCD_STAT
  480. hcd_stat_update(xhci->tp_stat, cur_td->urb->actual_length,
  481. ktime_sub(stop_time, cur_td->start_time));
  482. #endif
  483. cur_td->urb->hcpriv = NULL;
  484. usb_hcd_unlink_urb_from_ep(xhci_to_hcd(xhci), cur_td->urb);
  485. xhci_dbg(xhci, "Giveback cancelled URB %p\n", cur_td->urb);
  486. spin_unlock(&xhci->lock);
  487. /* Doesn't matter what we pass for status, since the core will
  488. * just overwrite it (because the URB has been unlinked).
  489. */
  490. usb_hcd_giveback_urb(xhci_to_hcd(xhci), cur_td->urb, 0);
  491. kfree(cur_td);
  492. spin_lock(&xhci->lock);
  493. } while (cur_td != last_unlinked_td);
  494. /* Return to the event handler with xhci->lock re-acquired */
  495. }
  496. /*
  497. * When we get a completion for a Set Transfer Ring Dequeue Pointer command,
  498. * we need to clear the set deq pending flag in the endpoint ring state, so that
  499. * the TD queueing code can ring the doorbell again. We also need to ring the
  500. * endpoint doorbell to restart the ring, but only if there aren't more
  501. * cancellations pending.
  502. */
  503. static void handle_set_deq_completion(struct xhci_hcd *xhci,
  504. struct xhci_event_cmd *event,
  505. union xhci_trb *trb)
  506. {
  507. unsigned int slot_id;
  508. unsigned int ep_index;
  509. struct xhci_ring *ep_ring;
  510. struct xhci_virt_device *dev;
  511. slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]);
  512. ep_index = TRB_TO_EP_INDEX(trb->generic.field[3]);
  513. dev = xhci->devs[slot_id];
  514. ep_ring = dev->ep_rings[ep_index];
  515. if (GET_COMP_CODE(event->status) != COMP_SUCCESS) {
  516. unsigned int ep_state;
  517. unsigned int slot_state;
  518. switch (GET_COMP_CODE(event->status)) {
  519. case COMP_TRB_ERR:
  520. xhci_warn(xhci, "WARN Set TR Deq Ptr cmd invalid because "
  521. "of stream ID configuration\n");
  522. break;
  523. case COMP_CTX_STATE:
  524. xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed due "
  525. "to incorrect slot or ep state.\n");
  526. ep_state = dev->out_ctx->ep[ep_index].ep_info;
  527. ep_state &= EP_STATE_MASK;
  528. slot_state = dev->out_ctx->slot.dev_state;
  529. slot_state = GET_SLOT_STATE(slot_state);
  530. xhci_dbg(xhci, "Slot state = %u, EP state = %u\n",
  531. slot_state, ep_state);
  532. break;
  533. case COMP_EBADSLT:
  534. xhci_warn(xhci, "WARN Set TR Deq Ptr cmd failed because "
  535. "slot %u was not enabled.\n", slot_id);
  536. break;
  537. default:
  538. xhci_warn(xhci, "WARN Set TR Deq Ptr cmd with unknown "
  539. "completion code of %u.\n",
  540. GET_COMP_CODE(event->status));
  541. break;
  542. }
  543. /* OK what do we do now? The endpoint state is hosed, and we
  544. * should never get to this point if the synchronization between
  545. * queueing, and endpoint state are correct. This might happen
  546. * if the device gets disconnected after we've finished
  547. * cancelling URBs, which might not be an error...
  548. */
  549. } else {
  550. xhci_dbg(xhci, "Successful Set TR Deq Ptr cmd, deq[0] = 0x%x, "
  551. "deq[1] = 0x%x.\n",
  552. dev->out_ctx->ep[ep_index].deq[0],
  553. dev->out_ctx->ep[ep_index].deq[1]);
  554. }
  555. ep_ring->state &= ~SET_DEQ_PENDING;
  556. ring_ep_doorbell(xhci, slot_id, ep_index);
  557. }
  558. static void handle_cmd_completion(struct xhci_hcd *xhci,
  559. struct xhci_event_cmd *event)
  560. {
  561. int slot_id = TRB_TO_SLOT_ID(event->flags);
  562. u64 cmd_dma;
  563. dma_addr_t cmd_dequeue_dma;
  564. cmd_dma = (((u64) event->cmd_trb[1]) << 32) + event->cmd_trb[0];
  565. cmd_dequeue_dma = xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg,
  566. xhci->cmd_ring->dequeue);
  567. /* Is the command ring deq ptr out of sync with the deq seg ptr? */
  568. if (cmd_dequeue_dma == 0) {
  569. xhci->error_bitmask |= 1 << 4;
  570. return;
  571. }
  572. /* Does the DMA address match our internal dequeue pointer address? */
  573. if (cmd_dma != (u64) cmd_dequeue_dma) {
  574. xhci->error_bitmask |= 1 << 5;
  575. return;
  576. }
  577. switch (xhci->cmd_ring->dequeue->generic.field[3] & TRB_TYPE_BITMASK) {
  578. case TRB_TYPE(TRB_ENABLE_SLOT):
  579. if (GET_COMP_CODE(event->status) == COMP_SUCCESS)
  580. xhci->slot_id = slot_id;
  581. else
  582. xhci->slot_id = 0;
  583. complete(&xhci->addr_dev);
  584. break;
  585. case TRB_TYPE(TRB_DISABLE_SLOT):
  586. if (xhci->devs[slot_id])
  587. xhci_free_virt_device(xhci, slot_id);
  588. break;
  589. case TRB_TYPE(TRB_CONFIG_EP):
  590. xhci->devs[slot_id]->cmd_status = GET_COMP_CODE(event->status);
  591. complete(&xhci->devs[slot_id]->cmd_completion);
  592. break;
  593. case TRB_TYPE(TRB_ADDR_DEV):
  594. xhci->devs[slot_id]->cmd_status = GET_COMP_CODE(event->status);
  595. complete(&xhci->addr_dev);
  596. break;
  597. case TRB_TYPE(TRB_STOP_RING):
  598. handle_stopped_endpoint(xhci, xhci->cmd_ring->dequeue);
  599. break;
  600. case TRB_TYPE(TRB_SET_DEQ):
  601. handle_set_deq_completion(xhci, event, xhci->cmd_ring->dequeue);
  602. break;
  603. case TRB_TYPE(TRB_CMD_NOOP):
  604. ++xhci->noops_handled;
  605. break;
  606. default:
  607. /* Skip over unknown commands on the event ring */
  608. xhci->error_bitmask |= 1 << 6;
  609. break;
  610. }
  611. inc_deq(xhci, xhci->cmd_ring, false);
  612. }
  613. static void handle_port_status(struct xhci_hcd *xhci,
  614. union xhci_trb *event)
  615. {
  616. u32 port_id;
  617. /* Port status change events always have a successful completion code */
  618. if (GET_COMP_CODE(event->generic.field[2]) != COMP_SUCCESS) {
  619. xhci_warn(xhci, "WARN: xHC returned failed port status event\n");
  620. xhci->error_bitmask |= 1 << 8;
  621. }
  622. /* FIXME: core doesn't care about all port link state changes yet */
  623. port_id = GET_PORT_ID(event->generic.field[0]);
  624. xhci_dbg(xhci, "Port Status Change Event for port %d\n", port_id);
  625. /* Update event ring dequeue pointer before dropping the lock */
  626. inc_deq(xhci, xhci->event_ring, true);
  627. xhci_set_hc_event_deq(xhci);
  628. spin_unlock(&xhci->lock);
  629. /* Pass this up to the core */
  630. usb_hcd_poll_rh_status(xhci_to_hcd(xhci));
  631. spin_lock(&xhci->lock);
  632. }
  633. /*
  634. * This TD is defined by the TRBs starting at start_trb in start_seg and ending
  635. * at end_trb, which may be in another segment. If the suspect DMA address is a
  636. * TRB in this TD, this function returns that TRB's segment. Otherwise it
  637. * returns 0.
  638. */
  639. static struct xhci_segment *trb_in_td(
  640. struct xhci_segment *start_seg,
  641. union xhci_trb *start_trb,
  642. union xhci_trb *end_trb,
  643. dma_addr_t suspect_dma)
  644. {
  645. dma_addr_t start_dma;
  646. dma_addr_t end_seg_dma;
  647. dma_addr_t end_trb_dma;
  648. struct xhci_segment *cur_seg;
  649. start_dma = xhci_trb_virt_to_dma(start_seg, start_trb);
  650. cur_seg = start_seg;
  651. do {
  652. /* We may get an event for a Link TRB in the middle of a TD */
  653. end_seg_dma = xhci_trb_virt_to_dma(cur_seg,
  654. &start_seg->trbs[TRBS_PER_SEGMENT - 1]);
  655. /* If the end TRB isn't in this segment, this is set to 0 */
  656. end_trb_dma = xhci_trb_virt_to_dma(cur_seg, end_trb);
  657. if (end_trb_dma > 0) {
  658. /* The end TRB is in this segment, so suspect should be here */
  659. if (start_dma <= end_trb_dma) {
  660. if (suspect_dma >= start_dma && suspect_dma <= end_trb_dma)
  661. return cur_seg;
  662. } else {
  663. /* Case for one segment with
  664. * a TD wrapped around to the top
  665. */
  666. if ((suspect_dma >= start_dma &&
  667. suspect_dma <= end_seg_dma) ||
  668. (suspect_dma >= cur_seg->dma &&
  669. suspect_dma <= end_trb_dma))
  670. return cur_seg;
  671. }
  672. return 0;
  673. } else {
  674. /* Might still be somewhere in this segment */
  675. if (suspect_dma >= start_dma && suspect_dma <= end_seg_dma)
  676. return cur_seg;
  677. }
  678. cur_seg = cur_seg->next;
  679. start_dma = xhci_trb_virt_to_dma(cur_seg, &cur_seg->trbs[0]);
  680. } while (1);
  681. }
  682. /*
  683. * If this function returns an error condition, it means it got a Transfer
  684. * event with a corrupted Slot ID, Endpoint ID, or TRB DMA address.
  685. * At this point, the host controller is probably hosed and should be reset.
  686. */
  687. static int handle_tx_event(struct xhci_hcd *xhci,
  688. struct xhci_transfer_event *event)
  689. {
  690. struct xhci_virt_device *xdev;
  691. struct xhci_ring *ep_ring;
  692. int ep_index;
  693. struct xhci_td *td = 0;
  694. dma_addr_t event_dma;
  695. struct xhci_segment *event_seg;
  696. union xhci_trb *event_trb;
  697. struct urb *urb = 0;
  698. int status = -EINPROGRESS;
  699. xdev = xhci->devs[TRB_TO_SLOT_ID(event->flags)];
  700. if (!xdev) {
  701. xhci_err(xhci, "ERROR Transfer event pointed to bad slot\n");
  702. return -ENODEV;
  703. }
  704. /* Endpoint ID is 1 based, our index is zero based */
  705. ep_index = TRB_TO_EP_ID(event->flags) - 1;
  706. ep_ring = xdev->ep_rings[ep_index];
  707. if (!ep_ring || (xdev->out_ctx->ep[ep_index].ep_info & EP_STATE_MASK) == EP_STATE_DISABLED) {
  708. xhci_err(xhci, "ERROR Transfer event pointed to disabled endpoint\n");
  709. return -ENODEV;
  710. }
  711. event_dma = event->buffer[0];
  712. if (event->buffer[1] != 0)
  713. xhci_warn(xhci, "WARN ignoring upper 32-bits of 64-bit TRB dma address\n");
  714. /* This TRB should be in the TD at the head of this ring's TD list */
  715. if (list_empty(&ep_ring->td_list)) {
  716. xhci_warn(xhci, "WARN Event TRB for slot %d ep %d with no TDs queued?\n",
  717. TRB_TO_SLOT_ID(event->flags), ep_index);
  718. xhci_dbg(xhci, "Event TRB with TRB type ID %u\n",
  719. (unsigned int) (event->flags & TRB_TYPE_BITMASK)>>10);
  720. xhci_print_trb_offsets(xhci, (union xhci_trb *) event);
  721. urb = NULL;
  722. goto cleanup;
  723. }
  724. td = list_entry(ep_ring->td_list.next, struct xhci_td, td_list);
  725. /* Is this a TRB in the currently executing TD? */
  726. event_seg = trb_in_td(ep_ring->deq_seg, ep_ring->dequeue,
  727. td->last_trb, event_dma);
  728. if (!event_seg) {
  729. /* HC is busted, give up! */
  730. xhci_err(xhci, "ERROR Transfer event TRB DMA ptr not part of current TD\n");
  731. return -ESHUTDOWN;
  732. }
  733. event_trb = &event_seg->trbs[(event_dma - event_seg->dma) / sizeof(*event_trb)];
  734. xhci_dbg(xhci, "Event TRB with TRB type ID %u\n",
  735. (unsigned int) (event->flags & TRB_TYPE_BITMASK)>>10);
  736. xhci_dbg(xhci, "Offset 0x00 (buffer[0]) = 0x%x\n",
  737. (unsigned int) event->buffer[0]);
  738. xhci_dbg(xhci, "Offset 0x04 (buffer[0]) = 0x%x\n",
  739. (unsigned int) event->buffer[1]);
  740. xhci_dbg(xhci, "Offset 0x08 (transfer length) = 0x%x\n",
  741. (unsigned int) event->transfer_len);
  742. xhci_dbg(xhci, "Offset 0x0C (flags) = 0x%x\n",
  743. (unsigned int) event->flags);
  744. /* Look for common error cases */
  745. switch (GET_COMP_CODE(event->transfer_len)) {
  746. /* Skip codes that require special handling depending on
  747. * transfer type
  748. */
  749. case COMP_SUCCESS:
  750. case COMP_SHORT_TX:
  751. break;
  752. case COMP_STOP:
  753. xhci_dbg(xhci, "Stopped on Transfer TRB\n");
  754. break;
  755. case COMP_STOP_INVAL:
  756. xhci_dbg(xhci, "Stopped on No-op or Link TRB\n");
  757. break;
  758. case COMP_STALL:
  759. xhci_warn(xhci, "WARN: Stalled endpoint\n");
  760. status = -EPIPE;
  761. break;
  762. case COMP_TRB_ERR:
  763. xhci_warn(xhci, "WARN: TRB error on endpoint\n");
  764. status = -EILSEQ;
  765. break;
  766. case COMP_TX_ERR:
  767. xhci_warn(xhci, "WARN: transfer error on endpoint\n");
  768. status = -EPROTO;
  769. break;
  770. case COMP_DB_ERR:
  771. xhci_warn(xhci, "WARN: HC couldn't access mem fast enough\n");
  772. status = -ENOSR;
  773. break;
  774. default:
  775. xhci_warn(xhci, "ERROR Unknown event condition, HC probably busted\n");
  776. urb = NULL;
  777. goto cleanup;
  778. }
  779. /* Now update the urb's actual_length and give back to the core */
  780. /* Was this a control transfer? */
  781. if (usb_endpoint_xfer_control(&td->urb->ep->desc)) {
  782. xhci_debug_trb(xhci, xhci->event_ring->dequeue);
  783. switch (GET_COMP_CODE(event->transfer_len)) {
  784. case COMP_SUCCESS:
  785. if (event_trb == ep_ring->dequeue) {
  786. xhci_warn(xhci, "WARN: Success on ctrl setup TRB without IOC set??\n");
  787. status = -ESHUTDOWN;
  788. } else if (event_trb != td->last_trb) {
  789. xhci_warn(xhci, "WARN: Success on ctrl data TRB without IOC set??\n");
  790. status = -ESHUTDOWN;
  791. } else {
  792. xhci_dbg(xhci, "Successful control transfer!\n");
  793. status = 0;
  794. }
  795. break;
  796. case COMP_SHORT_TX:
  797. xhci_warn(xhci, "WARN: short transfer on control ep\n");
  798. status = -EREMOTEIO;
  799. break;
  800. default:
  801. /* Others already handled above */
  802. break;
  803. }
  804. /*
  805. * Did we transfer any data, despite the errors that might have
  806. * happened? I.e. did we get past the setup stage?
  807. */
  808. if (event_trb != ep_ring->dequeue) {
  809. /* The event was for the status stage */
  810. if (event_trb == td->last_trb) {
  811. td->urb->actual_length =
  812. td->urb->transfer_buffer_length;
  813. } else {
  814. /* Maybe the event was for the data stage? */
  815. if (GET_COMP_CODE(event->transfer_len) != COMP_STOP_INVAL)
  816. /* We didn't stop on a link TRB in the middle */
  817. td->urb->actual_length =
  818. td->urb->transfer_buffer_length -
  819. TRB_LEN(event->transfer_len);
  820. }
  821. }
  822. } else {
  823. switch (GET_COMP_CODE(event->transfer_len)) {
  824. case COMP_SUCCESS:
  825. /* Double check that the HW transferred everything. */
  826. if (event_trb != td->last_trb) {
  827. xhci_warn(xhci, "WARN Successful completion "
  828. "on short TX\n");
  829. if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
  830. status = -EREMOTEIO;
  831. else
  832. status = 0;
  833. } else {
  834. xhci_dbg(xhci, "Successful bulk transfer!\n");
  835. status = 0;
  836. }
  837. break;
  838. case COMP_SHORT_TX:
  839. if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
  840. status = -EREMOTEIO;
  841. else
  842. status = 0;
  843. break;
  844. default:
  845. /* Others already handled above */
  846. break;
  847. }
  848. dev_dbg(&td->urb->dev->dev,
  849. "ep %#x - asked for %d bytes, "
  850. "%d bytes untransferred\n",
  851. td->urb->ep->desc.bEndpointAddress,
  852. td->urb->transfer_buffer_length,
  853. TRB_LEN(event->transfer_len));
  854. /* Fast path - was this the last TRB in the TD for this URB? */
  855. if (event_trb == td->last_trb) {
  856. if (TRB_LEN(event->transfer_len) != 0) {
  857. td->urb->actual_length =
  858. td->urb->transfer_buffer_length -
  859. TRB_LEN(event->transfer_len);
  860. if (td->urb->actual_length < 0) {
  861. xhci_warn(xhci, "HC gave bad length "
  862. "of %d bytes left\n",
  863. TRB_LEN(event->transfer_len));
  864. td->urb->actual_length = 0;
  865. }
  866. if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
  867. status = -EREMOTEIO;
  868. else
  869. status = 0;
  870. } else {
  871. td->urb->actual_length = td->urb->transfer_buffer_length;
  872. /* Ignore a short packet completion if the
  873. * untransferred length was zero.
  874. */
  875. status = 0;
  876. }
  877. } else {
  878. /* Slow path - walk the list, starting from the dequeue
  879. * pointer, to get the actual length transferred.
  880. */
  881. union xhci_trb *cur_trb;
  882. struct xhci_segment *cur_seg;
  883. td->urb->actual_length = 0;
  884. for (cur_trb = ep_ring->dequeue, cur_seg = ep_ring->deq_seg;
  885. cur_trb != event_trb;
  886. next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) {
  887. if (TRB_TYPE(cur_trb->generic.field[3]) != TRB_TR_NOOP &&
  888. TRB_TYPE(cur_trb->generic.field[3]) != TRB_LINK)
  889. td->urb->actual_length +=
  890. TRB_LEN(cur_trb->generic.field[2]);
  891. }
  892. /* If the ring didn't stop on a Link or No-op TRB, add
  893. * in the actual bytes transferred from the Normal TRB
  894. */
  895. if (GET_COMP_CODE(event->transfer_len) != COMP_STOP_INVAL)
  896. td->urb->actual_length +=
  897. TRB_LEN(cur_trb->generic.field[2]) -
  898. TRB_LEN(event->transfer_len);
  899. }
  900. }
  901. /* The Endpoint Stop Command completion will take care of
  902. * any stopped TDs. A stopped TD may be restarted, so don't update the
  903. * ring dequeue pointer or take this TD off any lists yet.
  904. */
  905. if (GET_COMP_CODE(event->transfer_len) == COMP_STOP_INVAL ||
  906. GET_COMP_CODE(event->transfer_len) == COMP_STOP) {
  907. ep_ring->stopped_td = td;
  908. ep_ring->stopped_trb = event_trb;
  909. } else {
  910. /* Update ring dequeue pointer */
  911. while (ep_ring->dequeue != td->last_trb)
  912. inc_deq(xhci, ep_ring, false);
  913. inc_deq(xhci, ep_ring, false);
  914. /* Clean up the endpoint's TD list */
  915. urb = td->urb;
  916. list_del(&td->td_list);
  917. /* Was this TD slated to be cancelled but completed anyway? */
  918. if (!list_empty(&td->cancelled_td_list)) {
  919. list_del(&td->cancelled_td_list);
  920. ep_ring->cancels_pending--;
  921. }
  922. kfree(td);
  923. urb->hcpriv = NULL;
  924. }
  925. cleanup:
  926. inc_deq(xhci, xhci->event_ring, true);
  927. xhci_set_hc_event_deq(xhci);
  928. /* FIXME for multi-TD URBs (who have buffers bigger than 64MB) */
  929. if (urb) {
  930. usb_hcd_unlink_urb_from_ep(xhci_to_hcd(xhci), urb);
  931. spin_unlock(&xhci->lock);
  932. usb_hcd_giveback_urb(xhci_to_hcd(xhci), urb, status);
  933. spin_lock(&xhci->lock);
  934. }
  935. return 0;
  936. }
  937. /*
  938. * This function handles all OS-owned events on the event ring. It may drop
  939. * xhci->lock between event processing (e.g. to pass up port status changes).
  940. */
  941. void xhci_handle_event(struct xhci_hcd *xhci)
  942. {
  943. union xhci_trb *event;
  944. int update_ptrs = 1;
  945. int ret;
  946. if (!xhci->event_ring || !xhci->event_ring->dequeue) {
  947. xhci->error_bitmask |= 1 << 1;
  948. return;
  949. }
  950. event = xhci->event_ring->dequeue;
  951. /* Does the HC or OS own the TRB? */
  952. if ((event->event_cmd.flags & TRB_CYCLE) !=
  953. xhci->event_ring->cycle_state) {
  954. xhci->error_bitmask |= 1 << 2;
  955. return;
  956. }
  957. /* FIXME: Handle more event types. */
  958. switch ((event->event_cmd.flags & TRB_TYPE_BITMASK)) {
  959. case TRB_TYPE(TRB_COMPLETION):
  960. handle_cmd_completion(xhci, &event->event_cmd);
  961. break;
  962. case TRB_TYPE(TRB_PORT_STATUS):
  963. handle_port_status(xhci, event);
  964. update_ptrs = 0;
  965. break;
  966. case TRB_TYPE(TRB_TRANSFER):
  967. ret = handle_tx_event(xhci, &event->trans_event);
  968. if (ret < 0)
  969. xhci->error_bitmask |= 1 << 9;
  970. else
  971. update_ptrs = 0;
  972. break;
  973. default:
  974. xhci->error_bitmask |= 1 << 3;
  975. }
  976. if (update_ptrs) {
  977. /* Update SW and HC event ring dequeue pointer */
  978. inc_deq(xhci, xhci->event_ring, true);
  979. xhci_set_hc_event_deq(xhci);
  980. }
  981. /* Are there more items on the event ring? */
  982. xhci_handle_event(xhci);
  983. }
  984. /**** Endpoint Ring Operations ****/
  985. /*
  986. * Generic function for queueing a TRB on a ring.
  987. * The caller must have checked to make sure there's room on the ring.
  988. */
  989. static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
  990. bool consumer,
  991. u32 field1, u32 field2, u32 field3, u32 field4)
  992. {
  993. struct xhci_generic_trb *trb;
  994. trb = &ring->enqueue->generic;
  995. trb->field[0] = field1;
  996. trb->field[1] = field2;
  997. trb->field[2] = field3;
  998. trb->field[3] = field4;
  999. inc_enq(xhci, ring, consumer);
  1000. }
  1001. /*
  1002. * Does various checks on the endpoint ring, and makes it ready to queue num_trbs.
  1003. * FIXME allocate segments if the ring is full.
  1004. */
  1005. static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
  1006. u32 ep_state, unsigned int num_trbs, gfp_t mem_flags)
  1007. {
  1008. /* Make sure the endpoint has been added to xHC schedule */
  1009. xhci_dbg(xhci, "Endpoint state = 0x%x\n", ep_state);
  1010. switch (ep_state) {
  1011. case EP_STATE_DISABLED:
  1012. /*
  1013. * USB core changed config/interfaces without notifying us,
  1014. * or hardware is reporting the wrong state.
  1015. */
  1016. xhci_warn(xhci, "WARN urb submitted to disabled ep\n");
  1017. return -ENOENT;
  1018. case EP_STATE_HALTED:
  1019. case EP_STATE_ERROR:
  1020. xhci_warn(xhci, "WARN waiting for halt or error on ep "
  1021. "to be cleared\n");
  1022. /* FIXME event handling code for error needs to clear it */
  1023. /* XXX not sure if this should be -ENOENT or not */
  1024. return -EINVAL;
  1025. case EP_STATE_STOPPED:
  1026. case EP_STATE_RUNNING:
  1027. break;
  1028. default:
  1029. xhci_err(xhci, "ERROR unknown endpoint state for ep\n");
  1030. /*
  1031. * FIXME issue Configure Endpoint command to try to get the HC
  1032. * back into a known state.
  1033. */
  1034. return -EINVAL;
  1035. }
  1036. if (!room_on_ring(xhci, ep_ring, num_trbs)) {
  1037. /* FIXME allocate more room */
  1038. xhci_err(xhci, "ERROR no room on ep ring\n");
  1039. return -ENOMEM;
  1040. }
  1041. return 0;
  1042. }
  1043. static int prepare_transfer(struct xhci_hcd *xhci,
  1044. struct xhci_virt_device *xdev,
  1045. unsigned int ep_index,
  1046. unsigned int num_trbs,
  1047. struct urb *urb,
  1048. struct xhci_td **td,
  1049. gfp_t mem_flags)
  1050. {
  1051. int ret;
  1052. ret = prepare_ring(xhci, xdev->ep_rings[ep_index],
  1053. xdev->out_ctx->ep[ep_index].ep_info & EP_STATE_MASK,
  1054. num_trbs, mem_flags);
  1055. if (ret)
  1056. return ret;
  1057. *td = kzalloc(sizeof(struct xhci_td), mem_flags);
  1058. if (!*td)
  1059. return -ENOMEM;
  1060. INIT_LIST_HEAD(&(*td)->td_list);
  1061. INIT_LIST_HEAD(&(*td)->cancelled_td_list);
  1062. ret = usb_hcd_link_urb_to_ep(xhci_to_hcd(xhci), urb);
  1063. if (unlikely(ret)) {
  1064. kfree(*td);
  1065. return ret;
  1066. }
  1067. (*td)->urb = urb;
  1068. urb->hcpriv = (void *) (*td);
  1069. /* Add this TD to the tail of the endpoint ring's TD list */
  1070. list_add_tail(&(*td)->td_list, &xdev->ep_rings[ep_index]->td_list);
  1071. (*td)->start_seg = xdev->ep_rings[ep_index]->enq_seg;
  1072. (*td)->first_trb = xdev->ep_rings[ep_index]->enqueue;
  1073. return 0;
  1074. }
  1075. static unsigned int count_sg_trbs_needed(struct xhci_hcd *xhci, struct urb *urb)
  1076. {
  1077. int num_sgs, num_trbs, running_total, temp, i;
  1078. struct scatterlist *sg;
  1079. sg = NULL;
  1080. num_sgs = urb->num_sgs;
  1081. temp = urb->transfer_buffer_length;
  1082. xhci_dbg(xhci, "count sg list trbs: \n");
  1083. num_trbs = 0;
  1084. for_each_sg(urb->sg->sg, sg, num_sgs, i) {
  1085. unsigned int previous_total_trbs = num_trbs;
  1086. unsigned int len = sg_dma_len(sg);
  1087. /* Scatter gather list entries may cross 64KB boundaries */
  1088. running_total = TRB_MAX_BUFF_SIZE -
  1089. (sg_dma_address(sg) & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
  1090. if (running_total != 0)
  1091. num_trbs++;
  1092. /* How many more 64KB chunks to transfer, how many more TRBs? */
  1093. while (running_total < sg_dma_len(sg)) {
  1094. num_trbs++;
  1095. running_total += TRB_MAX_BUFF_SIZE;
  1096. }
  1097. xhci_dbg(xhci, " sg #%d: dma = %#llx, len = %#x (%d), num_trbs = %d\n",
  1098. i, (unsigned long long)sg_dma_address(sg),
  1099. len, len, num_trbs - previous_total_trbs);
  1100. len = min_t(int, len, temp);
  1101. temp -= len;
  1102. if (temp == 0)
  1103. break;
  1104. }
  1105. xhci_dbg(xhci, "\n");
  1106. if (!in_interrupt())
  1107. dev_dbg(&urb->dev->dev, "ep %#x - urb len = %d, sglist used, num_trbs = %d\n",
  1108. urb->ep->desc.bEndpointAddress,
  1109. urb->transfer_buffer_length,
  1110. num_trbs);
  1111. return num_trbs;
  1112. }
  1113. static void check_trb_math(struct urb *urb, int num_trbs, int running_total)
  1114. {
  1115. if (num_trbs != 0)
  1116. dev_dbg(&urb->dev->dev, "%s - ep %#x - Miscalculated number of "
  1117. "TRBs, %d left\n", __func__,
  1118. urb->ep->desc.bEndpointAddress, num_trbs);
  1119. if (running_total != urb->transfer_buffer_length)
  1120. dev_dbg(&urb->dev->dev, "%s - ep %#x - Miscalculated tx length, "
  1121. "queued %#x (%d), asked for %#x (%d)\n",
  1122. __func__,
  1123. urb->ep->desc.bEndpointAddress,
  1124. running_total, running_total,
  1125. urb->transfer_buffer_length,
  1126. urb->transfer_buffer_length);
  1127. }
  1128. static void giveback_first_trb(struct xhci_hcd *xhci, int slot_id,
  1129. unsigned int ep_index, int start_cycle,
  1130. struct xhci_generic_trb *start_trb, struct xhci_td *td)
  1131. {
  1132. /*
  1133. * Pass all the TRBs to the hardware at once and make sure this write
  1134. * isn't reordered.
  1135. */
  1136. wmb();
  1137. start_trb->field[3] |= start_cycle;
  1138. ring_ep_doorbell(xhci, slot_id, ep_index);
  1139. }
  1140. static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
  1141. struct urb *urb, int slot_id, unsigned int ep_index)
  1142. {
  1143. struct xhci_ring *ep_ring;
  1144. unsigned int num_trbs;
  1145. struct xhci_td *td;
  1146. struct scatterlist *sg;
  1147. int num_sgs;
  1148. int trb_buff_len, this_sg_len, running_total;
  1149. bool first_trb;
  1150. u64 addr;
  1151. struct xhci_generic_trb *start_trb;
  1152. int start_cycle;
  1153. ep_ring = xhci->devs[slot_id]->ep_rings[ep_index];
  1154. num_trbs = count_sg_trbs_needed(xhci, urb);
  1155. num_sgs = urb->num_sgs;
  1156. trb_buff_len = prepare_transfer(xhci, xhci->devs[slot_id],
  1157. ep_index, num_trbs, urb, &td, mem_flags);
  1158. if (trb_buff_len < 0)
  1159. return trb_buff_len;
  1160. /*
  1161. * Don't give the first TRB to the hardware (by toggling the cycle bit)
  1162. * until we've finished creating all the other TRBs. The ring's cycle
  1163. * state may change as we enqueue the other TRBs, so save it too.
  1164. */
  1165. start_trb = &ep_ring->enqueue->generic;
  1166. start_cycle = ep_ring->cycle_state;
  1167. running_total = 0;
  1168. /*
  1169. * How much data is in the first TRB?
  1170. *
  1171. * There are three forces at work for TRB buffer pointers and lengths:
  1172. * 1. We don't want to walk off the end of this sg-list entry buffer.
  1173. * 2. The transfer length that the driver requested may be smaller than
  1174. * the amount of memory allocated for this scatter-gather list.
  1175. * 3. TRBs buffers can't cross 64KB boundaries.
  1176. */
  1177. sg = urb->sg->sg;
  1178. addr = (u64) sg_dma_address(sg);
  1179. this_sg_len = sg_dma_len(sg);
  1180. trb_buff_len = TRB_MAX_BUFF_SIZE -
  1181. (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
  1182. trb_buff_len = min_t(int, trb_buff_len, this_sg_len);
  1183. if (trb_buff_len > urb->transfer_buffer_length)
  1184. trb_buff_len = urb->transfer_buffer_length;
  1185. xhci_dbg(xhci, "First length to xfer from 1st sglist entry = %u\n",
  1186. trb_buff_len);
  1187. first_trb = true;
  1188. /* Queue the first TRB, even if it's zero-length */
  1189. do {
  1190. u32 field = 0;
  1191. /* Don't change the cycle bit of the first TRB until later */
  1192. if (first_trb)
  1193. first_trb = false;
  1194. else
  1195. field |= ep_ring->cycle_state;
  1196. /* Chain all the TRBs together; clear the chain bit in the last
  1197. * TRB to indicate it's the last TRB in the chain.
  1198. */
  1199. if (num_trbs > 1) {
  1200. field |= TRB_CHAIN;
  1201. } else {
  1202. /* FIXME - add check for ZERO_PACKET flag before this */
  1203. td->last_trb = ep_ring->enqueue;
  1204. field |= TRB_IOC;
  1205. }
  1206. xhci_dbg(xhci, " sg entry: dma = %#x, len = %#x (%d), "
  1207. "64KB boundary at %#x, end dma = %#x\n",
  1208. (unsigned int) addr, trb_buff_len, trb_buff_len,
  1209. (unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1),
  1210. (unsigned int) addr + trb_buff_len);
  1211. if (TRB_MAX_BUFF_SIZE -
  1212. (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1)) < trb_buff_len) {
  1213. xhci_warn(xhci, "WARN: sg dma xfer crosses 64KB boundaries!\n");
  1214. xhci_dbg(xhci, "Next boundary at %#x, end dma = %#x\n",
  1215. (unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1),
  1216. (unsigned int) addr + trb_buff_len);
  1217. }
  1218. queue_trb(xhci, ep_ring, false,
  1219. (u32) addr,
  1220. (u32) ((u64) addr >> 32),
  1221. TRB_LEN(trb_buff_len) | TRB_INTR_TARGET(0),
  1222. /* We always want to know if the TRB was short,
  1223. * or we won't get an event when it completes.
  1224. * (Unless we use event data TRBs, which are a
  1225. * waste of space and HC resources.)
  1226. */
  1227. field | TRB_ISP | TRB_TYPE(TRB_NORMAL));
  1228. --num_trbs;
  1229. running_total += trb_buff_len;
  1230. /* Calculate length for next transfer --
  1231. * Are we done queueing all the TRBs for this sg entry?
  1232. */
  1233. this_sg_len -= trb_buff_len;
  1234. if (this_sg_len == 0) {
  1235. --num_sgs;
  1236. if (num_sgs == 0)
  1237. break;
  1238. sg = sg_next(sg);
  1239. addr = (u64) sg_dma_address(sg);
  1240. this_sg_len = sg_dma_len(sg);
  1241. } else {
  1242. addr += trb_buff_len;
  1243. }
  1244. trb_buff_len = TRB_MAX_BUFF_SIZE -
  1245. (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
  1246. trb_buff_len = min_t(int, trb_buff_len, this_sg_len);
  1247. if (running_total + trb_buff_len > urb->transfer_buffer_length)
  1248. trb_buff_len =
  1249. urb->transfer_buffer_length - running_total;
  1250. } while (running_total < urb->transfer_buffer_length);
  1251. check_trb_math(urb, num_trbs, running_total);
  1252. giveback_first_trb(xhci, slot_id, ep_index, start_cycle, start_trb, td);
  1253. return 0;
  1254. }
  1255. /* This is very similar to what ehci-q.c qtd_fill() does */
  1256. int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
  1257. struct urb *urb, int slot_id, unsigned int ep_index)
  1258. {
  1259. struct xhci_ring *ep_ring;
  1260. struct xhci_td *td;
  1261. int num_trbs;
  1262. struct xhci_generic_trb *start_trb;
  1263. bool first_trb;
  1264. int start_cycle;
  1265. u32 field;
  1266. int running_total, trb_buff_len, ret;
  1267. u64 addr;
  1268. if (urb->sg)
  1269. return queue_bulk_sg_tx(xhci, mem_flags, urb, slot_id, ep_index);
  1270. ep_ring = xhci->devs[slot_id]->ep_rings[ep_index];
  1271. num_trbs = 0;
  1272. /* How much data is (potentially) left before the 64KB boundary? */
  1273. running_total = TRB_MAX_BUFF_SIZE -
  1274. (urb->transfer_dma & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
  1275. /* If there's some data on this 64KB chunk, or we have to send a
  1276. * zero-length transfer, we need at least one TRB
  1277. */
  1278. if (running_total != 0 || urb->transfer_buffer_length == 0)
  1279. num_trbs++;
  1280. /* How many more 64KB chunks to transfer, how many more TRBs? */
  1281. while (running_total < urb->transfer_buffer_length) {
  1282. num_trbs++;
  1283. running_total += TRB_MAX_BUFF_SIZE;
  1284. }
  1285. /* FIXME: this doesn't deal with URB_ZERO_PACKET - need one more */
  1286. if (!in_interrupt())
  1287. dev_dbg(&urb->dev->dev, "ep %#x - urb len = %#x (%d), addr = %#llx, num_trbs = %d\n",
  1288. urb->ep->desc.bEndpointAddress,
  1289. urb->transfer_buffer_length,
  1290. urb->transfer_buffer_length,
  1291. (unsigned long long)urb->transfer_dma,
  1292. num_trbs);
  1293. ret = prepare_transfer(xhci, xhci->devs[slot_id], ep_index,
  1294. num_trbs, urb, &td, mem_flags);
  1295. if (ret < 0)
  1296. return ret;
  1297. /*
  1298. * Don't give the first TRB to the hardware (by toggling the cycle bit)
  1299. * until we've finished creating all the other TRBs. The ring's cycle
  1300. * state may change as we enqueue the other TRBs, so save it too.
  1301. */
  1302. start_trb = &ep_ring->enqueue->generic;
  1303. start_cycle = ep_ring->cycle_state;
  1304. running_total = 0;
  1305. /* How much data is in the first TRB? */
  1306. addr = (u64) urb->transfer_dma;
  1307. trb_buff_len = TRB_MAX_BUFF_SIZE -
  1308. (urb->transfer_dma & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
  1309. if (urb->transfer_buffer_length < trb_buff_len)
  1310. trb_buff_len = urb->transfer_buffer_length;
  1311. first_trb = true;
  1312. /* Queue the first TRB, even if it's zero-length */
  1313. do {
  1314. field = 0;
  1315. /* Don't change the cycle bit of the first TRB until later */
  1316. if (first_trb)
  1317. first_trb = false;
  1318. else
  1319. field |= ep_ring->cycle_state;
  1320. /* Chain all the TRBs together; clear the chain bit in the last
  1321. * TRB to indicate it's the last TRB in the chain.
  1322. */
  1323. if (num_trbs > 1) {
  1324. field |= TRB_CHAIN;
  1325. } else {
  1326. /* FIXME - add check for ZERO_PACKET flag before this */
  1327. td->last_trb = ep_ring->enqueue;
  1328. field |= TRB_IOC;
  1329. }
  1330. queue_trb(xhci, ep_ring, false,
  1331. (u32) addr,
  1332. (u32) ((u64) addr >> 32),
  1333. TRB_LEN(trb_buff_len) | TRB_INTR_TARGET(0),
  1334. /* We always want to know if the TRB was short,
  1335. * or we won't get an event when it completes.
  1336. * (Unless we use event data TRBs, which are a
  1337. * waste of space and HC resources.)
  1338. */
  1339. field | TRB_ISP | TRB_TYPE(TRB_NORMAL));
  1340. --num_trbs;
  1341. running_total += trb_buff_len;
  1342. /* Calculate length for next transfer */
  1343. addr += trb_buff_len;
  1344. trb_buff_len = urb->transfer_buffer_length - running_total;
  1345. if (trb_buff_len > TRB_MAX_BUFF_SIZE)
  1346. trb_buff_len = TRB_MAX_BUFF_SIZE;
  1347. } while (running_total < urb->transfer_buffer_length);
  1348. check_trb_math(urb, num_trbs, running_total);
  1349. giveback_first_trb(xhci, slot_id, ep_index, start_cycle, start_trb, td);
  1350. return 0;
  1351. }
  1352. /* Caller must have locked xhci->lock */
  1353. int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
  1354. struct urb *urb, int slot_id, unsigned int ep_index)
  1355. {
  1356. struct xhci_ring *ep_ring;
  1357. int num_trbs;
  1358. int ret;
  1359. struct usb_ctrlrequest *setup;
  1360. struct xhci_generic_trb *start_trb;
  1361. int start_cycle;
  1362. u32 field;
  1363. struct xhci_td *td;
  1364. ep_ring = xhci->devs[slot_id]->ep_rings[ep_index];
  1365. /*
  1366. * Need to copy setup packet into setup TRB, so we can't use the setup
  1367. * DMA address.
  1368. */
  1369. if (!urb->setup_packet)
  1370. return -EINVAL;
  1371. if (!in_interrupt())
  1372. xhci_dbg(xhci, "Queueing ctrl tx for slot id %d, ep %d\n",
  1373. slot_id, ep_index);
  1374. /* 1 TRB for setup, 1 for status */
  1375. num_trbs = 2;
  1376. /*
  1377. * Don't need to check if we need additional event data and normal TRBs,
  1378. * since data in control transfers will never get bigger than 16MB
  1379. * XXX: can we get a buffer that crosses 64KB boundaries?
  1380. */
  1381. if (urb->transfer_buffer_length > 0)
  1382. num_trbs++;
  1383. ret = prepare_transfer(xhci, xhci->devs[slot_id], ep_index, num_trbs,
  1384. urb, &td, mem_flags);
  1385. if (ret < 0)
  1386. return ret;
  1387. /*
  1388. * Don't give the first TRB to the hardware (by toggling the cycle bit)
  1389. * until we've finished creating all the other TRBs. The ring's cycle
  1390. * state may change as we enqueue the other TRBs, so save it too.
  1391. */
  1392. start_trb = &ep_ring->enqueue->generic;
  1393. start_cycle = ep_ring->cycle_state;
  1394. /* Queue setup TRB - see section 6.4.1.2.1 */
  1395. /* FIXME better way to translate setup_packet into two u32 fields? */
  1396. setup = (struct usb_ctrlrequest *) urb->setup_packet;
  1397. queue_trb(xhci, ep_ring, false,
  1398. /* FIXME endianness is probably going to bite my ass here. */
  1399. setup->bRequestType | setup->bRequest << 8 | setup->wValue << 16,
  1400. setup->wIndex | setup->wLength << 16,
  1401. TRB_LEN(8) | TRB_INTR_TARGET(0),
  1402. /* Immediate data in pointer */
  1403. TRB_IDT | TRB_TYPE(TRB_SETUP));
  1404. /* If there's data, queue data TRBs */
  1405. field = 0;
  1406. if (urb->transfer_buffer_length > 0) {
  1407. if (setup->bRequestType & USB_DIR_IN)
  1408. field |= TRB_DIR_IN;
  1409. queue_trb(xhci, ep_ring, false,
  1410. lower_32_bits(urb->transfer_dma),
  1411. upper_32_bits(urb->transfer_dma),
  1412. TRB_LEN(urb->transfer_buffer_length) | TRB_INTR_TARGET(0),
  1413. /* Event on short tx */
  1414. field | TRB_ISP | TRB_TYPE(TRB_DATA) | ep_ring->cycle_state);
  1415. }
  1416. /* Save the DMA address of the last TRB in the TD */
  1417. td->last_trb = ep_ring->enqueue;
  1418. /* Queue status TRB - see Table 7 and sections 4.11.2.2 and 6.4.1.2.3 */
  1419. /* If the device sent data, the status stage is an OUT transfer */
  1420. if (urb->transfer_buffer_length > 0 && setup->bRequestType & USB_DIR_IN)
  1421. field = 0;
  1422. else
  1423. field = TRB_DIR_IN;
  1424. queue_trb(xhci, ep_ring, false,
  1425. 0,
  1426. 0,
  1427. TRB_INTR_TARGET(0),
  1428. /* Event on completion */
  1429. field | TRB_IOC | TRB_TYPE(TRB_STATUS) | ep_ring->cycle_state);
  1430. giveback_first_trb(xhci, slot_id, ep_index, start_cycle, start_trb, td);
  1431. return 0;
  1432. }
  1433. /**** Command Ring Operations ****/
  1434. /* Generic function for queueing a command TRB on the command ring */
  1435. static int queue_command(struct xhci_hcd *xhci, u32 field1, u32 field2, u32 field3, u32 field4)
  1436. {
  1437. if (!room_on_ring(xhci, xhci->cmd_ring, 1)) {
  1438. if (!in_interrupt())
  1439. xhci_err(xhci, "ERR: No room for command on command ring\n");
  1440. return -ENOMEM;
  1441. }
  1442. queue_trb(xhci, xhci->cmd_ring, false, field1, field2, field3,
  1443. field4 | xhci->cmd_ring->cycle_state);
  1444. return 0;
  1445. }
  1446. /* Queue a no-op command on the command ring */
  1447. static int queue_cmd_noop(struct xhci_hcd *xhci)
  1448. {
  1449. return queue_command(xhci, 0, 0, 0, TRB_TYPE(TRB_CMD_NOOP));
  1450. }
  1451. /*
  1452. * Place a no-op command on the command ring to test the command and
  1453. * event ring.
  1454. */
  1455. void *xhci_setup_one_noop(struct xhci_hcd *xhci)
  1456. {
  1457. if (queue_cmd_noop(xhci) < 0)
  1458. return NULL;
  1459. xhci->noops_submitted++;
  1460. return xhci_ring_cmd_db;
  1461. }
  1462. /* Queue a slot enable or disable request on the command ring */
  1463. int xhci_queue_slot_control(struct xhci_hcd *xhci, u32 trb_type, u32 slot_id)
  1464. {
  1465. return queue_command(xhci, 0, 0, 0,
  1466. TRB_TYPE(trb_type) | SLOT_ID_FOR_TRB(slot_id));
  1467. }
  1468. /* Queue an address device command TRB */
  1469. int xhci_queue_address_device(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
  1470. u32 slot_id)
  1471. {
  1472. return queue_command(xhci, in_ctx_ptr, 0, 0,
  1473. TRB_TYPE(TRB_ADDR_DEV) | SLOT_ID_FOR_TRB(slot_id));
  1474. }
  1475. /* Queue a configure endpoint command TRB */
  1476. int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
  1477. u32 slot_id)
  1478. {
  1479. return queue_command(xhci, in_ctx_ptr, 0, 0,
  1480. TRB_TYPE(TRB_CONFIG_EP) | SLOT_ID_FOR_TRB(slot_id));
  1481. }
  1482. int xhci_queue_stop_endpoint(struct xhci_hcd *xhci, int slot_id,
  1483. unsigned int ep_index)
  1484. {
  1485. u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
  1486. u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
  1487. u32 type = TRB_TYPE(TRB_STOP_RING);
  1488. return queue_command(xhci, 0, 0, 0,
  1489. trb_slot_id | trb_ep_index | type);
  1490. }
  1491. /* Set Transfer Ring Dequeue Pointer command.
  1492. * This should not be used for endpoints that have streams enabled.
  1493. */
  1494. static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id,
  1495. unsigned int ep_index, struct xhci_segment *deq_seg,
  1496. union xhci_trb *deq_ptr, u32 cycle_state)
  1497. {
  1498. dma_addr_t addr;
  1499. u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
  1500. u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
  1501. u32 type = TRB_TYPE(TRB_SET_DEQ);
  1502. addr = xhci_trb_virt_to_dma(deq_seg, deq_ptr);
  1503. if (addr == 0)
  1504. xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr\n");
  1505. xhci_warn(xhci, "WARN deq seg = %p, deq pt = %p\n",
  1506. deq_seg, deq_ptr);
  1507. return queue_command(xhci, (u32) addr | cycle_state, 0, 0,
  1508. trb_slot_id | trb_ep_index | type);
  1509. }