|
@@ -280,12 +280,123 @@ static inline int room_on_ring(struct xhci_hcd *xhci, struct xhci_ring *ring,
|
|
|
/* Ring the host controller doorbell after placing a command on the ring */
|
|
|
void xhci_ring_cmd_db(struct xhci_hcd *xhci)
|
|
|
{
|
|
|
+ if (!(xhci->cmd_ring_state & CMD_RING_STATE_RUNNING))
|
|
|
+ return;
|
|
|
+
|
|
|
xhci_dbg(xhci, "// Ding dong!\n");
|
|
|
xhci_writel(xhci, DB_VALUE_HOST, &xhci->dba->doorbell[0]);
|
|
|
/* Flush PCI posted writes */
|
|
|
xhci_readl(xhci, &xhci->dba->doorbell[0]);
|
|
|
}
|
|
|
|
|
|
+static int xhci_abort_cmd_ring(struct xhci_hcd *xhci)
|
|
|
+{
|
|
|
+ u64 temp_64;
|
|
|
+ int ret;
|
|
|
+
|
|
|
+ xhci_dbg(xhci, "Abort command ring\n");
|
|
|
+
|
|
|
+ if (!(xhci->cmd_ring_state & CMD_RING_STATE_RUNNING)) {
|
|
|
+ xhci_dbg(xhci, "The command ring isn't running, "
|
|
|
+ "Have the command ring been stopped?\n");
|
|
|
+ return 0;
|
|
|
+ }
|
|
|
+
|
|
|
+ temp_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
|
|
|
+ if (!(temp_64 & CMD_RING_RUNNING)) {
|
|
|
+ xhci_dbg(xhci, "Command ring had been stopped\n");
|
|
|
+ return 0;
|
|
|
+ }
|
|
|
+ xhci->cmd_ring_state = CMD_RING_STATE_ABORTED;
|
|
|
+ xhci_write_64(xhci, temp_64 | CMD_RING_ABORT,
|
|
|
+ &xhci->op_regs->cmd_ring);
|
|
|
+
|
|
|
+ /* Section 4.6.1.2 of xHCI 1.0 spec says software should
|
|
|
+ * time the completion od all xHCI commands, including
|
|
|
+ * the Command Abort operation. If software doesn't see
|
|
|
+ * CRR negated in a timely manner (e.g. longer than 5
|
|
|
+ * seconds), then it should assume that the there are
|
|
|
+ * larger problems with the xHC and assert HCRST.
|
|
|
+ */
|
|
|
+ ret = handshake(xhci, &xhci->op_regs->cmd_ring,
|
|
|
+ CMD_RING_RUNNING, 0, 5 * 1000 * 1000);
|
|
|
+ if (ret < 0) {
|
|
|
+ xhci_err(xhci, "Stopped the command ring failed, "
|
|
|
+ "maybe the host is dead\n");
|
|
|
+ xhci->xhc_state |= XHCI_STATE_DYING;
|
|
|
+ xhci_quiesce(xhci);
|
|
|
+ xhci_halt(xhci);
|
|
|
+ return -ESHUTDOWN;
|
|
|
+ }
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+static int xhci_queue_cd(struct xhci_hcd *xhci,
|
|
|
+ struct xhci_command *command,
|
|
|
+ union xhci_trb *cmd_trb)
|
|
|
+{
|
|
|
+ struct xhci_cd *cd;
|
|
|
+ cd = kzalloc(sizeof(struct xhci_cd), GFP_ATOMIC);
|
|
|
+ if (!cd)
|
|
|
+ return -ENOMEM;
|
|
|
+ INIT_LIST_HEAD(&cd->cancel_cmd_list);
|
|
|
+
|
|
|
+ cd->command = command;
|
|
|
+ cd->cmd_trb = cmd_trb;
|
|
|
+ list_add_tail(&cd->cancel_cmd_list, &xhci->cancel_cmd_list);
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * Cancel the command which has issue.
|
|
|
+ *
|
|
|
+ * Some commands may hang due to waiting for acknowledgement from
|
|
|
+ * usb device. It is outside of the xHC's ability to control and
|
|
|
+ * will cause the command ring is blocked. When it occurs software
|
|
|
+ * should intervene to recover the command ring.
|
|
|
+ * See Section 4.6.1.1 and 4.6.1.2
|
|
|
+ */
|
|
|
+int xhci_cancel_cmd(struct xhci_hcd *xhci, struct xhci_command *command,
|
|
|
+ union xhci_trb *cmd_trb)
|
|
|
+{
|
|
|
+ int retval = 0;
|
|
|
+ unsigned long flags;
|
|
|
+
|
|
|
+ spin_lock_irqsave(&xhci->lock, flags);
|
|
|
+
|
|
|
+ if (xhci->xhc_state & XHCI_STATE_DYING) {
|
|
|
+ xhci_warn(xhci, "Abort the command ring,"
|
|
|
+ " but the xHCI is dead.\n");
|
|
|
+ retval = -ESHUTDOWN;
|
|
|
+ goto fail;
|
|
|
+ }
|
|
|
+
|
|
|
+ /* queue the cmd desriptor to cancel_cmd_list */
|
|
|
+ retval = xhci_queue_cd(xhci, command, cmd_trb);
|
|
|
+ if (retval) {
|
|
|
+ xhci_warn(xhci, "Queuing command descriptor failed.\n");
|
|
|
+ goto fail;
|
|
|
+ }
|
|
|
+
|
|
|
+ /* abort command ring */
|
|
|
+ retval = xhci_abort_cmd_ring(xhci);
|
|
|
+ if (retval) {
|
|
|
+ xhci_err(xhci, "Abort command ring failed\n");
|
|
|
+ if (unlikely(retval == -ESHUTDOWN)) {
|
|
|
+ spin_unlock_irqrestore(&xhci->lock, flags);
|
|
|
+ usb_hc_died(xhci_to_hcd(xhci)->primary_hcd);
|
|
|
+ xhci_dbg(xhci, "xHCI host controller is dead.\n");
|
|
|
+ return retval;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+fail:
|
|
|
+ spin_unlock_irqrestore(&xhci->lock, flags);
|
|
|
+ return retval;
|
|
|
+}
|
|
|
+
|
|
|
void xhci_ring_ep_doorbell(struct xhci_hcd *xhci,
|
|
|
unsigned int slot_id,
|
|
|
unsigned int ep_index,
|
|
@@ -1059,6 +1170,20 @@ static void handle_reset_ep_completion(struct xhci_hcd *xhci,
|
|
|
}
|
|
|
}
|
|
|
|
|
|
+/* Complete the command and detele it from the devcie's command queue.
|
|
|
+ */
|
|
|
+static void xhci_complete_cmd_in_cmd_wait_list(struct xhci_hcd *xhci,
|
|
|
+ struct xhci_command *command, u32 status)
|
|
|
+{
|
|
|
+ command->status = status;
|
|
|
+ list_del(&command->cmd_list);
|
|
|
+ if (command->completion)
|
|
|
+ complete(command->completion);
|
|
|
+ else
|
|
|
+ xhci_free_command(xhci, command);
|
|
|
+}
|
|
|
+
|
|
|
+
|
|
|
/* Check to see if a command in the device's command queue matches this one.
|
|
|
* Signal the completion or free the command, and return 1. Return 0 if the
|
|
|
* completed command isn't at the head of the command list.
|
|
@@ -1077,15 +1202,144 @@ static int handle_cmd_in_cmd_wait_list(struct xhci_hcd *xhci,
|
|
|
if (xhci->cmd_ring->dequeue != command->command_trb)
|
|
|
return 0;
|
|
|
|
|
|
- command->status = GET_COMP_CODE(le32_to_cpu(event->status));
|
|
|
- list_del(&command->cmd_list);
|
|
|
- if (command->completion)
|
|
|
- complete(command->completion);
|
|
|
- else
|
|
|
- xhci_free_command(xhci, command);
|
|
|
+ xhci_complete_cmd_in_cmd_wait_list(xhci, command,
|
|
|
+ GET_COMP_CODE(le32_to_cpu(event->status)));
|
|
|
return 1;
|
|
|
}
|
|
|
|
|
|
+/*
|
|
|
+ * Finding the command trb need to be cancelled and modifying it to
|
|
|
+ * NO OP command. And if the command is in device's command wait
|
|
|
+ * list, finishing and freeing it.
|
|
|
+ *
|
|
|
+ * If we can't find the command trb, we think it had already been
|
|
|
+ * executed.
|
|
|
+ */
|
|
|
+static void xhci_cmd_to_noop(struct xhci_hcd *xhci, struct xhci_cd *cur_cd)
|
|
|
+{
|
|
|
+ struct xhci_segment *cur_seg;
|
|
|
+ union xhci_trb *cmd_trb;
|
|
|
+ u32 cycle_state;
|
|
|
+
|
|
|
+ if (xhci->cmd_ring->dequeue == xhci->cmd_ring->enqueue)
|
|
|
+ return;
|
|
|
+
|
|
|
+ /* find the current segment of command ring */
|
|
|
+ cur_seg = find_trb_seg(xhci->cmd_ring->first_seg,
|
|
|
+ xhci->cmd_ring->dequeue, &cycle_state);
|
|
|
+
|
|
|
+ /* find the command trb matched by cd from command ring */
|
|
|
+ for (cmd_trb = xhci->cmd_ring->dequeue;
|
|
|
+ cmd_trb != xhci->cmd_ring->enqueue;
|
|
|
+ next_trb(xhci, xhci->cmd_ring, &cur_seg, &cmd_trb)) {
|
|
|
+ /* If the trb is link trb, continue */
|
|
|
+ if (TRB_TYPE_LINK_LE32(cmd_trb->generic.field[3]))
|
|
|
+ continue;
|
|
|
+
|
|
|
+ if (cur_cd->cmd_trb == cmd_trb) {
|
|
|
+
|
|
|
+ /* If the command in device's command list, we should
|
|
|
+ * finish it and free the command structure.
|
|
|
+ */
|
|
|
+ if (cur_cd->command)
|
|
|
+ xhci_complete_cmd_in_cmd_wait_list(xhci,
|
|
|
+ cur_cd->command, COMP_CMD_STOP);
|
|
|
+
|
|
|
+ /* get cycle state from the origin command trb */
|
|
|
+ cycle_state = le32_to_cpu(cmd_trb->generic.field[3])
|
|
|
+ & TRB_CYCLE;
|
|
|
+
|
|
|
+ /* modify the command trb to NO OP command */
|
|
|
+ cmd_trb->generic.field[0] = 0;
|
|
|
+ cmd_trb->generic.field[1] = 0;
|
|
|
+ cmd_trb->generic.field[2] = 0;
|
|
|
+ cmd_trb->generic.field[3] = cpu_to_le32(
|
|
|
+ TRB_TYPE(TRB_CMD_NOOP) | cycle_state);
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+static void xhci_cancel_cmd_in_cd_list(struct xhci_hcd *xhci)
|
|
|
+{
|
|
|
+ struct xhci_cd *cur_cd, *next_cd;
|
|
|
+
|
|
|
+ if (list_empty(&xhci->cancel_cmd_list))
|
|
|
+ return;
|
|
|
+
|
|
|
+ list_for_each_entry_safe(cur_cd, next_cd,
|
|
|
+ &xhci->cancel_cmd_list, cancel_cmd_list) {
|
|
|
+ xhci_cmd_to_noop(xhci, cur_cd);
|
|
|
+ list_del(&cur_cd->cancel_cmd_list);
|
|
|
+ kfree(cur_cd);
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * traversing the cancel_cmd_list. If the command descriptor according
|
|
|
+ * to cmd_trb is found, the function free it and return 1, otherwise
|
|
|
+ * return 0.
|
|
|
+ */
|
|
|
+static int xhci_search_cmd_trb_in_cd_list(struct xhci_hcd *xhci,
|
|
|
+ union xhci_trb *cmd_trb)
|
|
|
+{
|
|
|
+ struct xhci_cd *cur_cd, *next_cd;
|
|
|
+
|
|
|
+ if (list_empty(&xhci->cancel_cmd_list))
|
|
|
+ return 0;
|
|
|
+
|
|
|
+ list_for_each_entry_safe(cur_cd, next_cd,
|
|
|
+ &xhci->cancel_cmd_list, cancel_cmd_list) {
|
|
|
+ if (cur_cd->cmd_trb == cmd_trb) {
|
|
|
+ if (cur_cd->command)
|
|
|
+ xhci_complete_cmd_in_cmd_wait_list(xhci,
|
|
|
+ cur_cd->command, COMP_CMD_STOP);
|
|
|
+ list_del(&cur_cd->cancel_cmd_list);
|
|
|
+ kfree(cur_cd);
|
|
|
+ return 1;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * If the cmd_trb_comp_code is COMP_CMD_ABORT, we just check whether the
|
|
|
+ * trb pointed by the command ring dequeue pointer is the trb we want to
|
|
|
+ * cancel or not. And if the cmd_trb_comp_code is COMP_CMD_STOP, we will
|
|
|
+ * traverse the cancel_cmd_list to trun the all of the commands according
|
|
|
+ * to command descriptor to NO-OP trb.
|
|
|
+ */
|
|
|
+static int handle_stopped_cmd_ring(struct xhci_hcd *xhci,
|
|
|
+ int cmd_trb_comp_code)
|
|
|
+{
|
|
|
+ int cur_trb_is_good = 0;
|
|
|
+
|
|
|
+ /* Searching the cmd trb pointed by the command ring dequeue
|
|
|
+ * pointer in command descriptor list. If it is found, free it.
|
|
|
+ */
|
|
|
+ cur_trb_is_good = xhci_search_cmd_trb_in_cd_list(xhci,
|
|
|
+ xhci->cmd_ring->dequeue);
|
|
|
+
|
|
|
+ if (cmd_trb_comp_code == COMP_CMD_ABORT)
|
|
|
+ xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
|
|
|
+ else if (cmd_trb_comp_code == COMP_CMD_STOP) {
|
|
|
+ /* traversing the cancel_cmd_list and canceling
|
|
|
+ * the command according to command descriptor
|
|
|
+ */
|
|
|
+ xhci_cancel_cmd_in_cd_list(xhci);
|
|
|
+
|
|
|
+ xhci->cmd_ring_state = CMD_RING_STATE_RUNNING;
|
|
|
+ /*
|
|
|
+ * ring command ring doorbell again to restart the
|
|
|
+ * command ring
|
|
|
+ */
|
|
|
+ if (xhci->cmd_ring->dequeue != xhci->cmd_ring->enqueue)
|
|
|
+ xhci_ring_cmd_db(xhci);
|
|
|
+ }
|
|
|
+ return cur_trb_is_good;
|
|
|
+}
|
|
|
+
|
|
|
static void handle_cmd_completion(struct xhci_hcd *xhci,
|
|
|
struct xhci_event_cmd *event)
|
|
|
{
|
|
@@ -1111,6 +1365,22 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
|
|
|
xhci->error_bitmask |= 1 << 5;
|
|
|
return;
|
|
|
}
|
|
|
+
|
|
|
+ if ((GET_COMP_CODE(le32_to_cpu(event->status)) == COMP_CMD_ABORT) ||
|
|
|
+ (GET_COMP_CODE(le32_to_cpu(event->status)) == COMP_CMD_STOP)) {
|
|
|
+ /* If the return value is 0, we think the trb pointed by
|
|
|
+ * command ring dequeue pointer is a good trb. The good
|
|
|
+ * trb means we don't want to cancel the trb, but it have
|
|
|
+ * been stopped by host. So we should handle it normally.
|
|
|
+ * Otherwise, driver should invoke inc_deq() and return.
|
|
|
+ */
|
|
|
+ if (handle_stopped_cmd_ring(xhci,
|
|
|
+ GET_COMP_CODE(le32_to_cpu(event->status)))) {
|
|
|
+ inc_deq(xhci, xhci->cmd_ring);
|
|
|
+ return;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
switch (le32_to_cpu(xhci->cmd_ring->dequeue->generic.field[3])
|
|
|
& TRB_TYPE_BITMASK) {
|
|
|
case TRB_TYPE(TRB_ENABLE_SLOT):
|
|
@@ -2003,6 +2273,8 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td,
|
|
|
*/
|
|
|
static int handle_tx_event(struct xhci_hcd *xhci,
|
|
|
struct xhci_transfer_event *event)
|
|
|
+ __releases(&xhci->lock)
|
|
|
+ __acquires(&xhci->lock)
|
|
|
{
|
|
|
struct xhci_virt_device *xdev;
|
|
|
struct xhci_virt_ep *ep;
|
|
@@ -2580,7 +2852,7 @@ static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
|
|
|
xhci_err(xhci, "Ring expansion failed\n");
|
|
|
return -ENOMEM;
|
|
|
}
|
|
|
- };
|
|
|
+ }
|
|
|
|
|
|
if (enqueue_is_link_trb(ep_ring)) {
|
|
|
struct xhci_ring *ring = ep_ring;
|