|
@@ -178,7 +178,7 @@ static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring)
|
|
|
if (ring->type == TYPE_EVENT &&
|
|
|
last_trb_on_last_seg(xhci, ring,
|
|
|
ring->deq_seg, ring->dequeue)) {
|
|
|
- ring->cycle_state = (ring->cycle_state ? 0 : 1);
|
|
|
+ ring->cycle_state ^= 1;
|
|
|
}
|
|
|
ring->deq_seg = ring->deq_seg->next;
|
|
|
ring->dequeue = ring->deq_seg->trbs;
|
|
@@ -726,7 +726,7 @@ static void xhci_stop_watchdog_timer_in_irq(struct xhci_hcd *xhci,
|
|
|
|
|
|
/* Must be called with xhci->lock held in interrupt context */
|
|
|
static void xhci_giveback_urb_in_irq(struct xhci_hcd *xhci,
|
|
|
- struct xhci_td *cur_td, int status, char *adjective)
|
|
|
+ struct xhci_td *cur_td, int status)
|
|
|
{
|
|
|
struct usb_hcd *hcd;
|
|
|
struct urb *urb;
|
|
@@ -765,10 +765,9 @@ static void xhci_giveback_urb_in_irq(struct xhci_hcd *xhci,
|
|
|
* 2. Otherwise, we turn all the TRBs in the TD into No-op TRBs (with the chain
|
|
|
* bit cleared) so that the HW will skip over them.
|
|
|
*/
|
|
|
-static void handle_stopped_endpoint(struct xhci_hcd *xhci,
|
|
|
+static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id,
|
|
|
union xhci_trb *trb, struct xhci_event_cmd *event)
|
|
|
{
|
|
|
- unsigned int slot_id;
|
|
|
unsigned int ep_index;
|
|
|
struct xhci_virt_device *virt_dev;
|
|
|
struct xhci_ring *ep_ring;
|
|
@@ -779,10 +778,7 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci,
|
|
|
|
|
|
struct xhci_dequeue_state deq_state;
|
|
|
|
|
|
- if (unlikely(TRB_TO_SUSPEND_PORT(
|
|
|
- le32_to_cpu(xhci->cmd_ring->dequeue->generic.field[3])))) {
|
|
|
- slot_id = TRB_TO_SLOT_ID(
|
|
|
- le32_to_cpu(xhci->cmd_ring->dequeue->generic.field[3]));
|
|
|
+ if (unlikely(TRB_TO_SUSPEND_PORT(le32_to_cpu(trb->generic.field[3])))) {
|
|
|
virt_dev = xhci->devs[slot_id];
|
|
|
if (virt_dev)
|
|
|
handle_cmd_in_cmd_wait_list(xhci, virt_dev,
|
|
@@ -795,7 +791,6 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci,
|
|
|
}
|
|
|
|
|
|
memset(&deq_state, 0, sizeof(deq_state));
|
|
|
- slot_id = TRB_TO_SLOT_ID(le32_to_cpu(trb->generic.field[3]));
|
|
|
ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
|
|
|
ep = &xhci->devs[slot_id]->eps[ep_index];
|
|
|
|
|
@@ -891,7 +886,7 @@ remove_finished_td:
|
|
|
/* Doesn't matter what we pass for status, since the core will
|
|
|
* just overwrite it (because the URB has been unlinked).
|
|
|
*/
|
|
|
- xhci_giveback_urb_in_irq(xhci, cur_td, 0, "cancelled");
|
|
|
+ xhci_giveback_urb_in_irq(xhci, cur_td, 0);
|
|
|
|
|
|
/* Stop processing the cancelled list if the watchdog timer is
|
|
|
* running.
|
|
@@ -1001,7 +996,7 @@ void xhci_stop_endpoint_command_watchdog(unsigned long arg)
|
|
|
if (!list_empty(&cur_td->cancelled_td_list))
|
|
|
list_del_init(&cur_td->cancelled_td_list);
|
|
|
xhci_giveback_urb_in_irq(xhci, cur_td,
|
|
|
- -ESHUTDOWN, "killed");
|
|
|
+ -ESHUTDOWN);
|
|
|
}
|
|
|
while (!list_empty(&temp_ep->cancelled_td_list)) {
|
|
|
cur_td = list_first_entry(
|
|
@@ -1010,7 +1005,7 @@ void xhci_stop_endpoint_command_watchdog(unsigned long arg)
|
|
|
cancelled_td_list);
|
|
|
list_del_init(&cur_td->cancelled_td_list);
|
|
|
xhci_giveback_urb_in_irq(xhci, cur_td,
|
|
|
- -ESHUTDOWN, "killed");
|
|
|
+ -ESHUTDOWN);
|
|
|
}
|
|
|
}
|
|
|
}
|
|
@@ -1077,11 +1072,9 @@ static void update_ring_for_set_deq_completion(struct xhci_hcd *xhci,
|
|
|
* endpoint doorbell to restart the ring, but only if there aren't more
|
|
|
* cancellations pending.
|
|
|
*/
|
|
|
-static void handle_set_deq_completion(struct xhci_hcd *xhci,
|
|
|
- struct xhci_event_cmd *event,
|
|
|
- union xhci_trb *trb)
|
|
|
+static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id,
|
|
|
+ union xhci_trb *trb, u32 cmd_comp_code)
|
|
|
{
|
|
|
- unsigned int slot_id;
|
|
|
unsigned int ep_index;
|
|
|
unsigned int stream_id;
|
|
|
struct xhci_ring *ep_ring;
|
|
@@ -1089,7 +1082,6 @@ static void handle_set_deq_completion(struct xhci_hcd *xhci,
|
|
|
struct xhci_ep_ctx *ep_ctx;
|
|
|
struct xhci_slot_ctx *slot_ctx;
|
|
|
|
|
|
- slot_id = TRB_TO_SLOT_ID(le32_to_cpu(trb->generic.field[3]));
|
|
|
ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
|
|
|
stream_id = TRB_TO_STREAM_ID(le32_to_cpu(trb->generic.field[2]));
|
|
|
dev = xhci->devs[slot_id];
|
|
@@ -1107,11 +1099,11 @@ static void handle_set_deq_completion(struct xhci_hcd *xhci,
|
|
|
ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
|
|
|
slot_ctx = xhci_get_slot_ctx(xhci, dev->out_ctx);
|
|
|
|
|
|
- if (GET_COMP_CODE(le32_to_cpu(event->status)) != COMP_SUCCESS) {
|
|
|
+ if (cmd_comp_code != COMP_SUCCESS) {
|
|
|
unsigned int ep_state;
|
|
|
unsigned int slot_state;
|
|
|
|
|
|
- switch (GET_COMP_CODE(le32_to_cpu(event->status))) {
|
|
|
+ switch (cmd_comp_code) {
|
|
|
case COMP_TRB_ERR:
|
|
|
xhci_warn(xhci, "WARN Set TR Deq Ptr cmd invalid because "
|
|
|
"of stream ID configuration\n");
|
|
@@ -1134,7 +1126,7 @@ static void handle_set_deq_completion(struct xhci_hcd *xhci,
|
|
|
default:
|
|
|
xhci_warn(xhci, "WARN Set TR Deq Ptr cmd with unknown "
|
|
|
"completion code of %u.\n",
|
|
|
- GET_COMP_CODE(le32_to_cpu(event->status)));
|
|
|
+ cmd_comp_code);
|
|
|
break;
|
|
|
}
|
|
|
/* OK what do we do now? The endpoint state is hosed, and we
|
|
@@ -1171,21 +1163,17 @@ static void handle_set_deq_completion(struct xhci_hcd *xhci,
|
|
|
ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
|
|
|
}
|
|
|
|
|
|
-static void handle_reset_ep_completion(struct xhci_hcd *xhci,
|
|
|
- struct xhci_event_cmd *event,
|
|
|
- union xhci_trb *trb)
|
|
|
+static void xhci_handle_cmd_reset_ep(struct xhci_hcd *xhci, int slot_id,
|
|
|
+ union xhci_trb *trb, u32 cmd_comp_code)
|
|
|
{
|
|
|
- int slot_id;
|
|
|
unsigned int ep_index;
|
|
|
|
|
|
- slot_id = TRB_TO_SLOT_ID(le32_to_cpu(trb->generic.field[3]));
|
|
|
ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
|
|
|
/* This command will only fail if the endpoint wasn't halted,
|
|
|
* but we don't care.
|
|
|
*/
|
|
|
xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep,
|
|
|
- "Ignoring reset ep completion code of %u",
|
|
|
- GET_COMP_CODE(le32_to_cpu(event->status)));
|
|
|
+ "Ignoring reset ep completion code of %u", cmd_comp_code);
|
|
|
|
|
|
/* HW with the reset endpoint quirk needs to have a configure endpoint
|
|
|
* command complete before the endpoint can be used. Queue that here
|
|
@@ -1386,21 +1374,149 @@ static int handle_stopped_cmd_ring(struct xhci_hcd *xhci,
|
|
|
return cur_trb_is_good;
|
|
|
}
|
|
|
|
|
|
+static void xhci_handle_cmd_enable_slot(struct xhci_hcd *xhci, int slot_id,
|
|
|
+ u32 cmd_comp_code)
|
|
|
+{
|
|
|
+ if (cmd_comp_code == COMP_SUCCESS)
|
|
|
+ xhci->slot_id = slot_id;
|
|
|
+ else
|
|
|
+ xhci->slot_id = 0;
|
|
|
+ complete(&xhci->addr_dev);
|
|
|
+}
|
|
|
+
|
|
|
+static void xhci_handle_cmd_disable_slot(struct xhci_hcd *xhci, int slot_id)
|
|
|
+{
|
|
|
+ struct xhci_virt_device *virt_dev;
|
|
|
+
|
|
|
+ virt_dev = xhci->devs[slot_id];
|
|
|
+ if (!virt_dev)
|
|
|
+ return;
|
|
|
+ if (xhci->quirks & XHCI_EP_LIMIT_QUIRK)
|
|
|
+ /* Delete default control endpoint resources */
|
|
|
+ xhci_free_device_endpoint_resources(xhci, virt_dev, true);
|
|
|
+ xhci_free_virt_device(xhci, slot_id);
|
|
|
+}
|
|
|
+
|
|
|
+static void xhci_handle_cmd_config_ep(struct xhci_hcd *xhci, int slot_id,
|
|
|
+ struct xhci_event_cmd *event, u32 cmd_comp_code)
|
|
|
+{
|
|
|
+ struct xhci_virt_device *virt_dev;
|
|
|
+ struct xhci_input_control_ctx *ctrl_ctx;
|
|
|
+ unsigned int ep_index;
|
|
|
+ unsigned int ep_state;
|
|
|
+ u32 add_flags, drop_flags;
|
|
|
+
|
|
|
+ virt_dev = xhci->devs[slot_id];
|
|
|
+ if (handle_cmd_in_cmd_wait_list(xhci, virt_dev, event))
|
|
|
+ return;
|
|
|
+ /*
|
|
|
+ * Configure endpoint commands can come from the USB core
|
|
|
+ * configuration or alt setting changes, or because the HW
|
|
|
+ * needed an extra configure endpoint command after a reset
|
|
|
+ * endpoint command or streams were being configured.
|
|
|
+ * If the command was for a halted endpoint, the xHCI driver
|
|
|
+ * is not waiting on the configure endpoint command.
|
|
|
+ */
|
|
|
+ ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx);
|
|
|
+ if (!ctrl_ctx) {
|
|
|
+ xhci_warn(xhci, "Could not get input context, bad type.\n");
|
|
|
+ return;
|
|
|
+ }
|
|
|
+
|
|
|
+ add_flags = le32_to_cpu(ctrl_ctx->add_flags);
|
|
|
+ drop_flags = le32_to_cpu(ctrl_ctx->drop_flags);
|
|
|
+ /* Input ctx add_flags are the endpoint index plus one */
|
|
|
+ ep_index = xhci_last_valid_endpoint(add_flags) - 1;
|
|
|
+
|
|
|
+ /* A usb_set_interface() call directly after clearing a halted
|
|
|
+ * condition may race on this quirky hardware. Not worth
|
|
|
+ * worrying about, since this is prototype hardware. Not sure
|
|
|
+ * if this will work for streams, but streams support was
|
|
|
+ * untested on this prototype.
|
|
|
+ */
|
|
|
+ if (xhci->quirks & XHCI_RESET_EP_QUIRK &&
|
|
|
+ ep_index != (unsigned int) -1 &&
|
|
|
+ add_flags - SLOT_FLAG == drop_flags) {
|
|
|
+ ep_state = virt_dev->eps[ep_index].ep_state;
|
|
|
+ if (!(ep_state & EP_HALTED))
|
|
|
+ goto bandwidth_change;
|
|
|
+ xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
|
|
|
+ "Completed config ep cmd - "
|
|
|
+ "last ep index = %d, state = %d",
|
|
|
+ ep_index, ep_state);
|
|
|
+ /* Clear internal halted state and restart ring(s) */
|
|
|
+ virt_dev->eps[ep_index].ep_state &= ~EP_HALTED;
|
|
|
+ ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
|
|
|
+ return;
|
|
|
+ }
|
|
|
+bandwidth_change:
|
|
|
+ xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
|
|
|
+ "Completed config ep cmd");
|
|
|
+ virt_dev->cmd_status = cmd_comp_code;
|
|
|
+ complete(&virt_dev->cmd_completion);
|
|
|
+ return;
|
|
|
+}
|
|
|
+
|
|
|
+static void xhci_handle_cmd_eval_ctx(struct xhci_hcd *xhci, int slot_id,
|
|
|
+ struct xhci_event_cmd *event, u32 cmd_comp_code)
|
|
|
+{
|
|
|
+ struct xhci_virt_device *virt_dev;
|
|
|
+
|
|
|
+ virt_dev = xhci->devs[slot_id];
|
|
|
+ if (handle_cmd_in_cmd_wait_list(xhci, virt_dev, event))
|
|
|
+ return;
|
|
|
+ virt_dev->cmd_status = cmd_comp_code;
|
|
|
+ complete(&virt_dev->cmd_completion);
|
|
|
+}
|
|
|
+
|
|
|
+static void xhci_handle_cmd_addr_dev(struct xhci_hcd *xhci, int slot_id,
|
|
|
+ u32 cmd_comp_code)
|
|
|
+{
|
|
|
+ xhci->devs[slot_id]->cmd_status = cmd_comp_code;
|
|
|
+ complete(&xhci->addr_dev);
|
|
|
+}
|
|
|
+
|
|
|
+static void xhci_handle_cmd_reset_dev(struct xhci_hcd *xhci, int slot_id,
|
|
|
+ struct xhci_event_cmd *event)
|
|
|
+{
|
|
|
+ struct xhci_virt_device *virt_dev;
|
|
|
+
|
|
|
+ xhci_dbg(xhci, "Completed reset device command.\n");
|
|
|
+ virt_dev = xhci->devs[slot_id];
|
|
|
+ if (virt_dev)
|
|
|
+ handle_cmd_in_cmd_wait_list(xhci, virt_dev, event);
|
|
|
+ else
|
|
|
+ xhci_warn(xhci, "Reset device command completion "
|
|
|
+ "for disabled slot %u\n", slot_id);
|
|
|
+}
|
|
|
+
|
|
|
+static void xhci_handle_cmd_nec_get_fw(struct xhci_hcd *xhci,
|
|
|
+ struct xhci_event_cmd *event)
|
|
|
+{
|
|
|
+ if (!(xhci->quirks & XHCI_NEC_HOST)) {
|
|
|
+ xhci->error_bitmask |= 1 << 6;
|
|
|
+ return;
|
|
|
+ }
|
|
|
+ xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
|
|
|
+ "NEC firmware version %2x.%02x",
|
|
|
+ NEC_FW_MAJOR(le32_to_cpu(event->status)),
|
|
|
+ NEC_FW_MINOR(le32_to_cpu(event->status)));
|
|
|
+}
|
|
|
+
|
|
|
static void handle_cmd_completion(struct xhci_hcd *xhci,
|
|
|
struct xhci_event_cmd *event)
|
|
|
{
|
|
|
int slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
|
|
|
u64 cmd_dma;
|
|
|
dma_addr_t cmd_dequeue_dma;
|
|
|
- struct xhci_input_control_ctx *ctrl_ctx;
|
|
|
- struct xhci_virt_device *virt_dev;
|
|
|
- unsigned int ep_index;
|
|
|
- struct xhci_ring *ep_ring;
|
|
|
- unsigned int ep_state;
|
|
|
+ u32 cmd_comp_code;
|
|
|
+ union xhci_trb *cmd_trb;
|
|
|
+ u32 cmd_type;
|
|
|
|
|
|
cmd_dma = le64_to_cpu(event->cmd_trb);
|
|
|
+ cmd_trb = xhci->cmd_ring->dequeue;
|
|
|
cmd_dequeue_dma = xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg,
|
|
|
- xhci->cmd_ring->dequeue);
|
|
|
+ cmd_trb);
|
|
|
/* Is the command ring deq ptr out of sync with the deq seg ptr? */
|
|
|
if (cmd_dequeue_dma == 0) {
|
|
|
xhci->error_bitmask |= 1 << 4;
|
|
@@ -1412,19 +1528,17 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
|
|
|
return;
|
|
|
}
|
|
|
|
|
|
- trace_xhci_cmd_completion(&xhci->cmd_ring->dequeue->generic,
|
|
|
- (struct xhci_generic_trb *) event);
|
|
|
+ trace_xhci_cmd_completion(cmd_trb, (struct xhci_generic_trb *) event);
|
|
|
|
|
|
- if ((GET_COMP_CODE(le32_to_cpu(event->status)) == COMP_CMD_ABORT) ||
|
|
|
- (GET_COMP_CODE(le32_to_cpu(event->status)) == COMP_CMD_STOP)) {
|
|
|
+ cmd_comp_code = GET_COMP_CODE(le32_to_cpu(event->status));
|
|
|
+ if (cmd_comp_code == COMP_CMD_ABORT || cmd_comp_code == COMP_CMD_STOP) {
|
|
|
/* If the return value is 0, we think the trb pointed by
|
|
|
* command ring dequeue pointer is a good trb. The good
|
|
|
* trb means we don't want to cancel the trb, but it have
|
|
|
* been stopped by host. So we should handle it normally.
|
|
|
* Otherwise, driver should invoke inc_deq() and return.
|
|
|
*/
|
|
|
- if (handle_stopped_cmd_ring(xhci,
|
|
|
- GET_COMP_CODE(le32_to_cpu(event->status)))) {
|
|
|
+ if (handle_stopped_cmd_ring(xhci, cmd_comp_code)) {
|
|
|
inc_deq(xhci, xhci->cmd_ring);
|
|
|
return;
|
|
|
}
|
|
@@ -1436,117 +1550,47 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
|
|
|
return;
|
|
|
}
|
|
|
|
|
|
- switch (le32_to_cpu(xhci->cmd_ring->dequeue->generic.field[3])
|
|
|
- & TRB_TYPE_BITMASK) {
|
|
|
- case TRB_TYPE(TRB_ENABLE_SLOT):
|
|
|
- if (GET_COMP_CODE(le32_to_cpu(event->status)) == COMP_SUCCESS)
|
|
|
- xhci->slot_id = slot_id;
|
|
|
- else
|
|
|
- xhci->slot_id = 0;
|
|
|
- complete(&xhci->addr_dev);
|
|
|
+ cmd_type = TRB_FIELD_TO_TYPE(le32_to_cpu(cmd_trb->generic.field[3]));
|
|
|
+ switch (cmd_type) {
|
|
|
+ case TRB_ENABLE_SLOT:
|
|
|
+ xhci_handle_cmd_enable_slot(xhci, slot_id, cmd_comp_code);
|
|
|
break;
|
|
|
- case TRB_TYPE(TRB_DISABLE_SLOT):
|
|
|
- if (xhci->devs[slot_id]) {
|
|
|
- if (xhci->quirks & XHCI_EP_LIMIT_QUIRK)
|
|
|
- /* Delete default control endpoint resources */
|
|
|
- xhci_free_device_endpoint_resources(xhci,
|
|
|
- xhci->devs[slot_id], true);
|
|
|
- xhci_free_virt_device(xhci, slot_id);
|
|
|
- }
|
|
|
+ case TRB_DISABLE_SLOT:
|
|
|
+ xhci_handle_cmd_disable_slot(xhci, slot_id);
|
|
|
break;
|
|
|
- case TRB_TYPE(TRB_CONFIG_EP):
|
|
|
- virt_dev = xhci->devs[slot_id];
|
|
|
- if (handle_cmd_in_cmd_wait_list(xhci, virt_dev, event))
|
|
|
- break;
|
|
|
- /*
|
|
|
- * Configure endpoint commands can come from the USB core
|
|
|
- * configuration or alt setting changes, or because the HW
|
|
|
- * needed an extra configure endpoint command after a reset
|
|
|
- * endpoint command or streams were being configured.
|
|
|
- * If the command was for a halted endpoint, the xHCI driver
|
|
|
- * is not waiting on the configure endpoint command.
|
|
|
- */
|
|
|
- ctrl_ctx = xhci_get_input_control_ctx(xhci,
|
|
|
- virt_dev->in_ctx);
|
|
|
- if (!ctrl_ctx) {
|
|
|
- xhci_warn(xhci, "Could not get input context, bad type.\n");
|
|
|
- break;
|
|
|
- }
|
|
|
- /* Input ctx add_flags are the endpoint index plus one */
|
|
|
- ep_index = xhci_last_valid_endpoint(le32_to_cpu(ctrl_ctx->add_flags)) - 1;
|
|
|
- /* A usb_set_interface() call directly after clearing a halted
|
|
|
- * condition may race on this quirky hardware. Not worth
|
|
|
- * worrying about, since this is prototype hardware. Not sure
|
|
|
- * if this will work for streams, but streams support was
|
|
|
- * untested on this prototype.
|
|
|
- */
|
|
|
- if (xhci->quirks & XHCI_RESET_EP_QUIRK &&
|
|
|
- ep_index != (unsigned int) -1 &&
|
|
|
- le32_to_cpu(ctrl_ctx->add_flags) - SLOT_FLAG ==
|
|
|
- le32_to_cpu(ctrl_ctx->drop_flags)) {
|
|
|
- ep_ring = xhci->devs[slot_id]->eps[ep_index].ring;
|
|
|
- ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state;
|
|
|
- if (!(ep_state & EP_HALTED))
|
|
|
- goto bandwidth_change;
|
|
|
- xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
|
|
|
- "Completed config ep cmd - "
|
|
|
- "last ep index = %d, state = %d",
|
|
|
- ep_index, ep_state);
|
|
|
- /* Clear internal halted state and restart ring(s) */
|
|
|
- xhci->devs[slot_id]->eps[ep_index].ep_state &=
|
|
|
- ~EP_HALTED;
|
|
|
- ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
|
|
|
- break;
|
|
|
- }
|
|
|
-bandwidth_change:
|
|
|
- xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
|
|
|
- "Completed config ep cmd");
|
|
|
- xhci->devs[slot_id]->cmd_status =
|
|
|
- GET_COMP_CODE(le32_to_cpu(event->status));
|
|
|
- complete(&xhci->devs[slot_id]->cmd_completion);
|
|
|
+ case TRB_CONFIG_EP:
|
|
|
+ xhci_handle_cmd_config_ep(xhci, slot_id, event, cmd_comp_code);
|
|
|
break;
|
|
|
- case TRB_TYPE(TRB_EVAL_CONTEXT):
|
|
|
- virt_dev = xhci->devs[slot_id];
|
|
|
- if (handle_cmd_in_cmd_wait_list(xhci, virt_dev, event))
|
|
|
- break;
|
|
|
- xhci->devs[slot_id]->cmd_status = GET_COMP_CODE(le32_to_cpu(event->status));
|
|
|
- complete(&xhci->devs[slot_id]->cmd_completion);
|
|
|
+ case TRB_EVAL_CONTEXT:
|
|
|
+ xhci_handle_cmd_eval_ctx(xhci, slot_id, event, cmd_comp_code);
|
|
|
break;
|
|
|
- case TRB_TYPE(TRB_ADDR_DEV):
|
|
|
- xhci->devs[slot_id]->cmd_status = GET_COMP_CODE(le32_to_cpu(event->status));
|
|
|
- complete(&xhci->addr_dev);
|
|
|
+ case TRB_ADDR_DEV:
|
|
|
+ xhci_handle_cmd_addr_dev(xhci, slot_id, cmd_comp_code);
|
|
|
break;
|
|
|
- case TRB_TYPE(TRB_STOP_RING):
|
|
|
- handle_stopped_endpoint(xhci, xhci->cmd_ring->dequeue, event);
|
|
|
+ case TRB_STOP_RING:
|
|
|
+ WARN_ON(slot_id != TRB_TO_SLOT_ID(
|
|
|
+ le32_to_cpu(cmd_trb->generic.field[3])));
|
|
|
+ xhci_handle_cmd_stop_ep(xhci, slot_id, cmd_trb, event);
|
|
|
break;
|
|
|
- case TRB_TYPE(TRB_SET_DEQ):
|
|
|
- handle_set_deq_completion(xhci, event, xhci->cmd_ring->dequeue);
|
|
|
+ case TRB_SET_DEQ:
|
|
|
+ WARN_ON(slot_id != TRB_TO_SLOT_ID(
|
|
|
+ le32_to_cpu(cmd_trb->generic.field[3])));
|
|
|
+ xhci_handle_cmd_set_deq(xhci, slot_id, cmd_trb, cmd_comp_code);
|
|
|
break;
|
|
|
- case TRB_TYPE(TRB_CMD_NOOP):
|
|
|
+ case TRB_CMD_NOOP:
|
|
|
break;
|
|
|
- case TRB_TYPE(TRB_RESET_EP):
|
|
|
- handle_reset_ep_completion(xhci, event, xhci->cmd_ring->dequeue);
|
|
|
+ case TRB_RESET_EP:
|
|
|
+ WARN_ON(slot_id != TRB_TO_SLOT_ID(
|
|
|
+ le32_to_cpu(cmd_trb->generic.field[3])));
|
|
|
+ xhci_handle_cmd_reset_ep(xhci, slot_id, cmd_trb, cmd_comp_code);
|
|
|
break;
|
|
|
- case TRB_TYPE(TRB_RESET_DEV):
|
|
|
- xhci_dbg(xhci, "Completed reset device command.\n");
|
|
|
- slot_id = TRB_TO_SLOT_ID(
|
|
|
- le32_to_cpu(xhci->cmd_ring->dequeue->generic.field[3]));
|
|
|
- virt_dev = xhci->devs[slot_id];
|
|
|
- if (virt_dev)
|
|
|
- handle_cmd_in_cmd_wait_list(xhci, virt_dev, event);
|
|
|
- else
|
|
|
- xhci_warn(xhci, "Reset device command completion "
|
|
|
- "for disabled slot %u\n", slot_id);
|
|
|
+ case TRB_RESET_DEV:
|
|
|
+ WARN_ON(slot_id != TRB_TO_SLOT_ID(
|
|
|
+ le32_to_cpu(cmd_trb->generic.field[3])));
|
|
|
+ xhci_handle_cmd_reset_dev(xhci, slot_id, event);
|
|
|
break;
|
|
|
- case TRB_TYPE(TRB_NEC_GET_FW):
|
|
|
- if (!(xhci->quirks & XHCI_NEC_HOST)) {
|
|
|
- xhci->error_bitmask |= 1 << 6;
|
|
|
- break;
|
|
|
- }
|
|
|
- xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
|
|
|
- "NEC firmware version %2x.%02x",
|
|
|
- NEC_FW_MAJOR(le32_to_cpu(event->status)),
|
|
|
- NEC_FW_MINOR(le32_to_cpu(event->status)));
|
|
|
+ case TRB_NEC_GET_FW:
|
|
|
+ xhci_handle_cmd_nec_get_fw(xhci, event);
|
|
|
break;
|
|
|
default:
|
|
|
/* Skip over unknown commands on the event ring */
|