|
@@ -145,10 +145,17 @@ static void next_trb(struct xhci_hcd *xhci,
|
|
|
*/
|
|
|
static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring)
|
|
|
{
|
|
|
- union xhci_trb *next = ++(ring->dequeue);
|
|
|
+ union xhci_trb *next;
|
|
|
unsigned long long addr;
|
|
|
|
|
|
ring->deq_updates++;
|
|
|
+
|
|
|
+ /* If this is not event ring, there is one more usable TRB */
|
|
|
+ if (ring->type != TYPE_EVENT &&
|
|
|
+ !last_trb(xhci, ring, ring->deq_seg, ring->dequeue))
|
|
|
+ ring->num_trbs_free++;
|
|
|
+ next = ++(ring->dequeue);
|
|
|
+
|
|
|
/* Update the dequeue pointer further if that was a link TRB or we're at
|
|
|
* the end of an event ring segment (which doesn't have link TRBS)
|
|
|
*/
|
|
@@ -189,6 +196,10 @@ static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring,
|
|
|
unsigned long long addr;
|
|
|
|
|
|
chain = le32_to_cpu(ring->enqueue->generic.field[3]) & TRB_CHAIN;
|
|
|
+ /* If this is not event ring, there is one less usable TRB */
|
|
|
+ if (ring->type != TYPE_EVENT &&
|
|
|
+ !last_trb(xhci, ring, ring->enq_seg, ring->enqueue))
|
|
|
+ ring->num_trbs_free--;
|
|
|
next = ++(ring->enqueue);
|
|
|
|
|
|
ring->enq_updates++;
|
|
@@ -240,54 +251,14 @@ static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring,
|
|
|
/*
|
|
|
* Check to see if there's room to enqueue num_trbs on the ring. See rules
|
|
|
* above.
|
|
|
- * FIXME: this would be simpler and faster if we just kept track of the number
|
|
|
- * of free TRBs in a ring.
|
|
|
*/
|
|
|
-static int room_on_ring(struct xhci_hcd *xhci, struct xhci_ring *ring,
|
|
|
+static inline int room_on_ring(struct xhci_hcd *xhci, struct xhci_ring *ring,
|
|
|
unsigned int num_trbs)
|
|
|
{
|
|
|
- int i;
|
|
|
- union xhci_trb *enq = ring->enqueue;
|
|
|
- struct xhci_segment *enq_seg = ring->enq_seg;
|
|
|
- struct xhci_segment *cur_seg;
|
|
|
- unsigned int left_on_ring;
|
|
|
-
|
|
|
- /* If we are currently pointing to a link TRB, advance the
|
|
|
- * enqueue pointer before checking for space */
|
|
|
- while (last_trb(xhci, ring, enq_seg, enq)) {
|
|
|
- enq_seg = enq_seg->next;
|
|
|
- enq = enq_seg->trbs;
|
|
|
- }
|
|
|
-
|
|
|
- /* Check if ring is empty */
|
|
|
- if (enq == ring->dequeue) {
|
|
|
- /* Can't use link trbs */
|
|
|
- left_on_ring = TRBS_PER_SEGMENT - 1;
|
|
|
- for (cur_seg = enq_seg->next; cur_seg != enq_seg;
|
|
|
- cur_seg = cur_seg->next)
|
|
|
- left_on_ring += TRBS_PER_SEGMENT - 1;
|
|
|
-
|
|
|
- /* Always need one TRB free in the ring. */
|
|
|
- left_on_ring -= 1;
|
|
|
- if (num_trbs > left_on_ring) {
|
|
|
- xhci_warn(xhci, "Not enough room on ring; "
|
|
|
- "need %u TRBs, %u TRBs left\n",
|
|
|
- num_trbs, left_on_ring);
|
|
|
- return 0;
|
|
|
- }
|
|
|
+ if (ring->num_trbs_free >= num_trbs)
|
|
|
return 1;
|
|
|
- }
|
|
|
- /* Make sure there's an extra empty TRB available */
|
|
|
- for (i = 0; i <= num_trbs; ++i) {
|
|
|
- if (enq == ring->dequeue)
|
|
|
- return 0;
|
|
|
- enq++;
|
|
|
- while (last_trb(xhci, ring, enq_seg, enq)) {
|
|
|
- enq_seg = enq_seg->next;
|
|
|
- enq = enq_seg->trbs;
|
|
|
- }
|
|
|
- }
|
|
|
- return 1;
|
|
|
+
|
|
|
+ return 0;
|
|
|
}
|
|
|
|
|
|
/* Ring the host controller doorbell after placing a command on the ring */
|
|
@@ -893,6 +864,43 @@ void xhci_stop_endpoint_command_watchdog(unsigned long arg)
|
|
|
xhci_dbg(xhci, "xHCI host controller is dead.\n");
|
|
|
}
|
|
|
|
|
|
+
|
|
|
+static void update_ring_for_set_deq_completion(struct xhci_hcd *xhci,
|
|
|
+ struct xhci_virt_device *dev,
|
|
|
+ struct xhci_ring *ep_ring,
|
|
|
+ unsigned int ep_index)
|
|
|
+{
|
|
|
+ union xhci_trb *dequeue_temp;
|
|
|
+ int num_trbs_free_temp;
|
|
|
+ bool revert = false;
|
|
|
+
|
|
|
+ num_trbs_free_temp = ep_ring->num_trbs_free;
|
|
|
+ dequeue_temp = ep_ring->dequeue;
|
|
|
+
|
|
|
+ while (ep_ring->dequeue != dev->eps[ep_index].queued_deq_ptr) {
|
|
|
+ /* We have more usable TRBs */
|
|
|
+ ep_ring->num_trbs_free++;
|
|
|
+ ep_ring->dequeue++;
|
|
|
+ if (last_trb(xhci, ep_ring, ep_ring->deq_seg,
|
|
|
+ ep_ring->dequeue)) {
|
|
|
+ if (ep_ring->dequeue ==
|
|
|
+ dev->eps[ep_index].queued_deq_ptr)
|
|
|
+ break;
|
|
|
+ ep_ring->deq_seg = ep_ring->deq_seg->next;
|
|
|
+ ep_ring->dequeue = ep_ring->deq_seg->trbs;
|
|
|
+ }
|
|
|
+ if (ep_ring->dequeue == dequeue_temp) {
|
|
|
+ revert = true;
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ if (revert) {
|
|
|
+ xhci_dbg(xhci, "Unable to find new dequeue pointer\n");
|
|
|
+ ep_ring->num_trbs_free = num_trbs_free_temp;
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* When we get a completion for a Set Transfer Ring Dequeue Pointer command,
|
|
|
* we need to clear the set deq pending flag in the endpoint ring state, so that
|
|
@@ -974,8 +982,8 @@ static void handle_set_deq_completion(struct xhci_hcd *xhci,
|
|
|
/* Update the ring's dequeue segment and dequeue pointer
|
|
|
* to reflect the new position.
|
|
|
*/
|
|
|
- ep_ring->deq_seg = dev->eps[ep_index].queued_deq_seg;
|
|
|
- ep_ring->dequeue = dev->eps[ep_index].queued_deq_ptr;
|
|
|
+ update_ring_for_set_deq_completion(xhci, dev,
|
|
|
+ ep_ring, ep_index);
|
|
|
} else {
|
|
|
xhci_warn(xhci, "Mismatch between completed Set TR Deq "
|
|
|
"Ptr command & xHCI internal state.\n");
|
|
@@ -3407,6 +3415,7 @@ cleanup:
|
|
|
ep_ring->enqueue = urb_priv->td[0]->first_trb;
|
|
|
ep_ring->enq_seg = urb_priv->td[0]->start_seg;
|
|
|
ep_ring->cycle_state = start_cycle;
|
|
|
+ ep_ring->num_trbs_free = ep_ring->num_trbs_free_temp;
|
|
|
usb_hcd_unlink_urb_from_ep(bus_to_hcd(urb->dev->bus), urb);
|
|
|
return ret;
|
|
|
}
|
|
@@ -3479,6 +3488,8 @@ int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags,
|
|
|
urb->dev->speed == USB_SPEED_FULL)
|
|
|
urb->interval /= 8;
|
|
|
}
|
|
|
+ ep_ring->num_trbs_free_temp = ep_ring->num_trbs_free;
|
|
|
+
|
|
|
return xhci_queue_isoc_tx(xhci, GFP_ATOMIC, urb, slot_id, ep_index);
|
|
|
}
|
|
|
|