|
@@ -143,7 +143,7 @@ static void next_trb(struct xhci_hcd *xhci,
|
|
|
* See Cycle bit rules. SW is the consumer for the event ring only.
|
|
|
* Don't make a ring full of link TRBs. That would be dumb and this would loop.
|
|
|
*/
|
|
|
-static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer)
|
|
|
+static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring)
|
|
|
{
|
|
|
union xhci_trb *next = ++(ring->dequeue);
|
|
|
unsigned long long addr;
|
|
@@ -153,7 +153,8 @@ static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer
|
|
|
* the end of an event ring segment (which doesn't have link TRBS)
|
|
|
*/
|
|
|
while (last_trb(xhci, ring, ring->deq_seg, next)) {
|
|
|
- if (consumer && last_trb_on_last_seg(xhci, ring, ring->deq_seg, next)) {
|
|
|
+ if (ring->type == TYPE_EVENT && last_trb_on_last_seg(xhci,
|
|
|
+ ring, ring->deq_seg, next)) {
|
|
|
ring->cycle_state = (ring->cycle_state ? 0 : 1);
|
|
|
}
|
|
|
ring->deq_seg = ring->deq_seg->next;
|
|
@@ -181,7 +182,7 @@ static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer
|
|
|
* prepare_transfer()?
|
|
|
*/
|
|
|
static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring,
|
|
|
- bool consumer, bool more_trbs_coming, bool isoc)
|
|
|
+ bool more_trbs_coming)
|
|
|
{
|
|
|
u32 chain;
|
|
|
union xhci_trb *next;
|
|
@@ -195,35 +196,35 @@ static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring,
|
|
|
* the end of an event ring segment (which doesn't have link TRBS)
|
|
|
*/
|
|
|
while (last_trb(xhci, ring, ring->enq_seg, next)) {
|
|
|
- if (!consumer) {
|
|
|
- if (ring != xhci->event_ring) {
|
|
|
- /*
|
|
|
- * If the caller doesn't plan on enqueueing more
|
|
|
- * TDs before ringing the doorbell, then we
|
|
|
- * don't want to give the link TRB to the
|
|
|
- * hardware just yet. We'll give the link TRB
|
|
|
- * back in prepare_ring() just before we enqueue
|
|
|
- * the TD at the top of the ring.
|
|
|
- */
|
|
|
- if (!chain && !more_trbs_coming)
|
|
|
- break;
|
|
|
+ if (ring->type != TYPE_EVENT) {
|
|
|
+ /*
|
|
|
+ * If the caller doesn't plan on enqueueing more
|
|
|
+ * TDs before ringing the doorbell, then we
|
|
|
+ * don't want to give the link TRB to the
|
|
|
+ * hardware just yet. We'll give the link TRB
|
|
|
+ * back in prepare_ring() just before we enqueue
|
|
|
+ * the TD at the top of the ring.
|
|
|
+ */
|
|
|
+ if (!chain && !more_trbs_coming)
|
|
|
+ break;
|
|
|
|
|
|
- /* If we're not dealing with 0.95 hardware or
|
|
|
- * isoc rings on AMD 0.96 host,
|
|
|
- * carry over the chain bit of the previous TRB
|
|
|
- * (which may mean the chain bit is cleared).
|
|
|
- */
|
|
|
- if (!(isoc && (xhci->quirks & XHCI_AMD_0x96_HOST))
|
|
|
+ /* If we're not dealing with 0.95 hardware or
|
|
|
+ * isoc rings on AMD 0.96 host,
|
|
|
+ * carry over the chain bit of the previous TRB
|
|
|
+ * (which may mean the chain bit is cleared).
|
|
|
+ */
|
|
|
+ if (!(ring->type == TYPE_ISOC &&
|
|
|
+ (xhci->quirks & XHCI_AMD_0x96_HOST))
|
|
|
&& !xhci_link_trb_quirk(xhci)) {
|
|
|
- next->link.control &=
|
|
|
- cpu_to_le32(~TRB_CHAIN);
|
|
|
- next->link.control |=
|
|
|
- cpu_to_le32(chain);
|
|
|
- }
|
|
|
- /* Give this link TRB to the hardware */
|
|
|
- wmb();
|
|
|
- next->link.control ^= cpu_to_le32(TRB_CYCLE);
|
|
|
+ next->link.control &=
|
|
|
+ cpu_to_le32(~TRB_CHAIN);
|
|
|
+ next->link.control |=
|
|
|
+ cpu_to_le32(chain);
|
|
|
}
|
|
|
+ /* Give this link TRB to the hardware */
|
|
|
+ wmb();
|
|
|
+ next->link.control ^= cpu_to_le32(TRB_CYCLE);
|
|
|
+
|
|
|
/* Toggle the cycle bit after the last ring segment. */
|
|
|
if (last_trb_on_last_seg(xhci, ring, ring->enq_seg, next)) {
|
|
|
ring->cycle_state = (ring->cycle_state ? 0 : 1);
|
|
@@ -1185,7 +1186,7 @@ bandwidth_change:
|
|
|
xhci->error_bitmask |= 1 << 6;
|
|
|
break;
|
|
|
}
|
|
|
- inc_deq(xhci, xhci->cmd_ring, false);
|
|
|
+ inc_deq(xhci, xhci->cmd_ring);
|
|
|
}
|
|
|
|
|
|
static void handle_vendor_event(struct xhci_hcd *xhci,
|
|
@@ -1398,7 +1399,7 @@ static void handle_port_status(struct xhci_hcd *xhci,
|
|
|
|
|
|
cleanup:
|
|
|
/* Update event ring dequeue pointer before dropping the lock */
|
|
|
- inc_deq(xhci, xhci->event_ring, true);
|
|
|
+ inc_deq(xhci, xhci->event_ring);
|
|
|
|
|
|
/* Don't make the USB core poll the roothub if we got a bad port status
|
|
|
* change event. Besides, at that point we can't tell which roothub
|
|
@@ -1593,8 +1594,8 @@ static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td,
|
|
|
} else {
|
|
|
/* Update ring dequeue pointer */
|
|
|
while (ep_ring->dequeue != td->last_trb)
|
|
|
- inc_deq(xhci, ep_ring, false);
|
|
|
- inc_deq(xhci, ep_ring, false);
|
|
|
+ inc_deq(xhci, ep_ring);
|
|
|
+ inc_deq(xhci, ep_ring);
|
|
|
}
|
|
|
|
|
|
td_cleanup:
|
|
@@ -1842,8 +1843,8 @@ static int skip_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
|
|
|
|
|
|
/* Update ring dequeue pointer */
|
|
|
while (ep_ring->dequeue != td->last_trb)
|
|
|
- inc_deq(xhci, ep_ring, false);
|
|
|
- inc_deq(xhci, ep_ring, false);
|
|
|
+ inc_deq(xhci, ep_ring);
|
|
|
+ inc_deq(xhci, ep_ring);
|
|
|
|
|
|
return finish_td(xhci, td, NULL, event, ep, status, true);
|
|
|
}
|
|
@@ -2230,7 +2231,7 @@ cleanup:
|
|
|
* Will roll back to continue process missed tds.
|
|
|
*/
|
|
|
if (trb_comp_code == COMP_MISSED_INT || !ep->skip) {
|
|
|
- inc_deq(xhci, xhci->event_ring, true);
|
|
|
+ inc_deq(xhci, xhci->event_ring);
|
|
|
}
|
|
|
|
|
|
if (ret) {
|
|
@@ -2345,7 +2346,7 @@ static int xhci_handle_event(struct xhci_hcd *xhci)
|
|
|
|
|
|
if (update_ptrs)
|
|
|
/* Update SW event ring dequeue pointer */
|
|
|
- inc_deq(xhci, xhci->event_ring, true);
|
|
|
+ inc_deq(xhci, xhci->event_ring);
|
|
|
|
|
|
/* Are there more items on the event ring? Caller will call us again to
|
|
|
* check.
|
|
@@ -2461,7 +2462,7 @@ irqreturn_t xhci_msi_irq(int irq, struct usb_hcd *hcd)
|
|
|
* prepare_transfer()?
|
|
|
*/
|
|
|
static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
|
|
|
- bool consumer, bool more_trbs_coming, bool isoc,
|
|
|
+ bool more_trbs_coming,
|
|
|
u32 field1, u32 field2, u32 field3, u32 field4)
|
|
|
{
|
|
|
struct xhci_generic_trb *trb;
|
|
@@ -2471,7 +2472,7 @@ static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
|
|
|
trb->field[1] = cpu_to_le32(field2);
|
|
|
trb->field[2] = cpu_to_le32(field3);
|
|
|
trb->field[3] = cpu_to_le32(field4);
|
|
|
- inc_enq(xhci, ring, consumer, more_trbs_coming, isoc);
|
|
|
+ inc_enq(xhci, ring, more_trbs_coming);
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -2479,7 +2480,7 @@ static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
|
|
|
* FIXME allocate segments if the ring is full.
|
|
|
*/
|
|
|
static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
|
|
|
- u32 ep_state, unsigned int num_trbs, bool isoc, gfp_t mem_flags)
|
|
|
+ u32 ep_state, unsigned int num_trbs, gfp_t mem_flags)
|
|
|
{
|
|
|
/* Make sure the endpoint has been added to xHC schedule */
|
|
|
switch (ep_state) {
|
|
@@ -2524,8 +2525,9 @@ static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
|
|
|
/* If we're not dealing with 0.95 hardware or isoc rings
|
|
|
* on AMD 0.96 host, clear the chain bit.
|
|
|
*/
|
|
|
- if (!xhci_link_trb_quirk(xhci) && !(isoc &&
|
|
|
- (xhci->quirks & XHCI_AMD_0x96_HOST)))
|
|
|
+ if (!xhci_link_trb_quirk(xhci) &&
|
|
|
+ !(ring->type == TYPE_ISOC &&
|
|
|
+ (xhci->quirks & XHCI_AMD_0x96_HOST)))
|
|
|
next->link.control &= cpu_to_le32(~TRB_CHAIN);
|
|
|
else
|
|
|
next->link.control |= cpu_to_le32(TRB_CHAIN);
|
|
@@ -2553,7 +2555,6 @@ static int prepare_transfer(struct xhci_hcd *xhci,
|
|
|
unsigned int num_trbs,
|
|
|
struct urb *urb,
|
|
|
unsigned int td_index,
|
|
|
- bool isoc,
|
|
|
gfp_t mem_flags)
|
|
|
{
|
|
|
int ret;
|
|
@@ -2571,7 +2572,7 @@ static int prepare_transfer(struct xhci_hcd *xhci,
|
|
|
|
|
|
ret = prepare_ring(xhci, ep_ring,
|
|
|
le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK,
|
|
|
- num_trbs, isoc, mem_flags);
|
|
|
+ num_trbs, mem_flags);
|
|
|
if (ret)
|
|
|
return ret;
|
|
|
|
|
@@ -2781,7 +2782,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
|
|
|
|
|
|
trb_buff_len = prepare_transfer(xhci, xhci->devs[slot_id],
|
|
|
ep_index, urb->stream_id,
|
|
|
- num_trbs, urb, 0, false, mem_flags);
|
|
|
+ num_trbs, urb, 0, mem_flags);
|
|
|
if (trb_buff_len < 0)
|
|
|
return trb_buff_len;
|
|
|
|
|
@@ -2869,7 +2870,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
|
|
|
more_trbs_coming = true;
|
|
|
else
|
|
|
more_trbs_coming = false;
|
|
|
- queue_trb(xhci, ep_ring, false, more_trbs_coming, false,
|
|
|
+ queue_trb(xhci, ep_ring, more_trbs_coming,
|
|
|
lower_32_bits(addr),
|
|
|
upper_32_bits(addr),
|
|
|
length_field,
|
|
@@ -2951,7 +2952,7 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
|
|
|
|
|
|
ret = prepare_transfer(xhci, xhci->devs[slot_id],
|
|
|
ep_index, urb->stream_id,
|
|
|
- num_trbs, urb, 0, false, mem_flags);
|
|
|
+ num_trbs, urb, 0, mem_flags);
|
|
|
if (ret < 0)
|
|
|
return ret;
|
|
|
|
|
@@ -3023,7 +3024,7 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
|
|
|
more_trbs_coming = true;
|
|
|
else
|
|
|
more_trbs_coming = false;
|
|
|
- queue_trb(xhci, ep_ring, false, more_trbs_coming, false,
|
|
|
+ queue_trb(xhci, ep_ring, more_trbs_coming,
|
|
|
lower_32_bits(addr),
|
|
|
upper_32_bits(addr),
|
|
|
length_field,
|
|
@@ -3080,7 +3081,7 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
|
|
|
num_trbs++;
|
|
|
ret = prepare_transfer(xhci, xhci->devs[slot_id],
|
|
|
ep_index, urb->stream_id,
|
|
|
- num_trbs, urb, 0, false, mem_flags);
|
|
|
+ num_trbs, urb, 0, mem_flags);
|
|
|
if (ret < 0)
|
|
|
return ret;
|
|
|
|
|
@@ -3113,7 +3114,7 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
|
|
|
}
|
|
|
}
|
|
|
|
|
|
- queue_trb(xhci, ep_ring, false, true, false,
|
|
|
+ queue_trb(xhci, ep_ring, true,
|
|
|
setup->bRequestType | setup->bRequest << 8 | le16_to_cpu(setup->wValue) << 16,
|
|
|
le16_to_cpu(setup->wIndex) | le16_to_cpu(setup->wLength) << 16,
|
|
|
TRB_LEN(8) | TRB_INTR_TARGET(0),
|
|
@@ -3133,7 +3134,7 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
|
|
|
if (urb->transfer_buffer_length > 0) {
|
|
|
if (setup->bRequestType & USB_DIR_IN)
|
|
|
field |= TRB_DIR_IN;
|
|
|
- queue_trb(xhci, ep_ring, false, true, false,
|
|
|
+ queue_trb(xhci, ep_ring, true,
|
|
|
lower_32_bits(urb->transfer_dma),
|
|
|
upper_32_bits(urb->transfer_dma),
|
|
|
length_field,
|
|
@@ -3149,7 +3150,7 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
|
|
|
field = 0;
|
|
|
else
|
|
|
field = TRB_DIR_IN;
|
|
|
- queue_trb(xhci, ep_ring, false, false, false,
|
|
|
+ queue_trb(xhci, ep_ring, false,
|
|
|
0,
|
|
|
0,
|
|
|
TRB_INTR_TARGET(0),
|
|
@@ -3289,8 +3290,7 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
|
|
|
trbs_per_td = count_isoc_trbs_needed(xhci, urb, i);
|
|
|
|
|
|
ret = prepare_transfer(xhci, xhci->devs[slot_id], ep_index,
|
|
|
- urb->stream_id, trbs_per_td, urb, i, true,
|
|
|
- mem_flags);
|
|
|
+ urb->stream_id, trbs_per_td, urb, i, mem_flags);
|
|
|
if (ret < 0) {
|
|
|
if (i == 0)
|
|
|
return ret;
|
|
@@ -3360,7 +3360,7 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
|
|
|
remainder |
|
|
|
TRB_INTR_TARGET(0);
|
|
|
|
|
|
- queue_trb(xhci, ep_ring, false, more_trbs_coming, true,
|
|
|
+ queue_trb(xhci, ep_ring, more_trbs_coming,
|
|
|
lower_32_bits(addr),
|
|
|
upper_32_bits(addr),
|
|
|
length_field,
|
|
@@ -3443,7 +3443,7 @@ int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags,
|
|
|
* Do not insert any td of the urb to the ring if the check failed.
|
|
|
*/
|
|
|
ret = prepare_ring(xhci, ep_ring, le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK,
|
|
|
- num_trbs, true, mem_flags);
|
|
|
+ num_trbs, mem_flags);
|
|
|
if (ret)
|
|
|
return ret;
|
|
|
|
|
@@ -3502,7 +3502,7 @@ static int queue_command(struct xhci_hcd *xhci, u32 field1, u32 field2,
|
|
|
reserved_trbs++;
|
|
|
|
|
|
ret = prepare_ring(xhci, xhci->cmd_ring, EP_STATE_RUNNING,
|
|
|
- reserved_trbs, false, GFP_ATOMIC);
|
|
|
+ reserved_trbs, GFP_ATOMIC);
|
|
|
if (ret < 0) {
|
|
|
xhci_err(xhci, "ERR: No room for command on command ring\n");
|
|
|
if (command_must_succeed)
|
|
@@ -3510,8 +3510,8 @@ static int queue_command(struct xhci_hcd *xhci, u32 field1, u32 field2,
|
|
|
"unfailable commands failed.\n");
|
|
|
return ret;
|
|
|
}
|
|
|
- queue_trb(xhci, xhci->cmd_ring, false, false, false, field1, field2,
|
|
|
- field3, field4 | xhci->cmd_ring->cycle_state);
|
|
|
+ queue_trb(xhci, xhci->cmd_ring, false, field1, field2, field3,
|
|
|
+ field4 | xhci->cmd_ring->cycle_state);
|
|
|
return 0;
|
|
|
}
|
|
|
|