xhci: Use completion and status in global command queue
[cascardo/linux.git] / drivers / usb / host / xhci-ring.c
index 5f926be..3d60865 100644 (file)
 #include "xhci.h"
 #include "xhci-trace.h"
 
-static int handle_cmd_in_cmd_wait_list(struct xhci_hcd *xhci,
-               struct xhci_virt_device *virt_dev,
-               struct xhci_event_cmd *event);
-
 /*
  * Returns zero if the TRB isn't in this segment, otherwise it returns the DMA
  * address of the TRB.
@@ -123,16 +119,6 @@ static int enqueue_is_link_trb(struct xhci_ring *ring)
        return TRB_TYPE_LINK_LE32(link->control);
 }
 
-union xhci_trb *xhci_find_next_enqueue(struct xhci_ring *ring)
-{
-       /* Enqueue pointer can be left pointing to the link TRB,
-        * we must handle that
-        */
-       if (TRB_TYPE_LINK_LE32(ring->enqueue->link.control))
-               return ring->enq_seg->next->trbs;
-       return ring->enqueue;
-}
-
 /* Updates trb to point to the next TRB in the ring, and updates seg if the next
  * TRB is in a new segment.  This does not skip over link TRBs, and it does not
  * effect the ring dequeue or enqueue pointers.
@@ -550,6 +536,7 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
        struct xhci_ring *ep_ring;
        struct xhci_generic_trb *trb;
        dma_addr_t addr;
+       u64 hw_dequeue;
 
        ep_ring = xhci_triad_to_transfer_ring(xhci, slot_id,
                        ep_index, stream_id);
@@ -559,16 +546,6 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
                                stream_id);
                return;
        }
-       state->new_cycle_state = 0;
-       xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
-                       "Finding segment containing stopped TRB.");
-       state->new_deq_seg = find_trb_seg(cur_td->start_seg,
-                       dev->eps[ep_index].stopped_trb,
-                       &state->new_cycle_state);
-       if (!state->new_deq_seg) {
-               WARN_ON(1);
-               return;
-       }
 
        /* Dig out the cycle state saved by the xHC during the stop ep cmd */
        xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
@@ -577,46 +554,57 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
        if (ep->ep_state & EP_HAS_STREAMS) {
                struct xhci_stream_ctx *ctx =
                        &ep->stream_info->stream_ctx_array[stream_id];
-               state->new_cycle_state = 0x1 & le64_to_cpu(ctx->stream_ring);
+               hw_dequeue = le64_to_cpu(ctx->stream_ring);
        } else {
                struct xhci_ep_ctx *ep_ctx
                        = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
-               state->new_cycle_state = 0x1 & le64_to_cpu(ep_ctx->deq);
+               hw_dequeue = le64_to_cpu(ep_ctx->deq);
+       }
+
+       /* Find virtual address and segment of hardware dequeue pointer */
+       state->new_deq_seg = ep_ring->deq_seg;
+       state->new_deq_ptr = ep_ring->dequeue;
+       while (xhci_trb_virt_to_dma(state->new_deq_seg, state->new_deq_ptr)
+                       != (dma_addr_t)(hw_dequeue & ~0xf)) {
+               next_trb(xhci, ep_ring, &state->new_deq_seg,
+                                       &state->new_deq_ptr);
+               if (state->new_deq_ptr == ep_ring->dequeue) {
+                       WARN_ON(1);
+                       return;
+               }
        }
+       /*
+        * Find cycle state for last_trb, starting at old cycle state of
+        * hw_dequeue. If there is only one segment ring, find_trb_seg() will
+        * return immediately and cannot toggle the cycle state if this search
+        * wraps around, so add one more toggle manually in that case.
+        */
+       state->new_cycle_state = hw_dequeue & 0x1;
+       if (ep_ring->first_seg == ep_ring->first_seg->next &&
+                       cur_td->last_trb < state->new_deq_ptr)
+               state->new_cycle_state ^= 0x1;
 
        state->new_deq_ptr = cur_td->last_trb;
        xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
                        "Finding segment containing last TRB in TD.");
        state->new_deq_seg = find_trb_seg(state->new_deq_seg,
-                       state->new_deq_ptr,
-                       &state->new_cycle_state);
+                       state->new_deq_ptr, &state->new_cycle_state);
        if (!state->new_deq_seg) {
                WARN_ON(1);
                return;
        }
 
+       /* Increment to find next TRB after last_trb. Cycle if appropriate. */
        trb = &state->new_deq_ptr->generic;
        if (TRB_TYPE_LINK_LE32(trb->field[3]) &&
            (trb->field[3] & cpu_to_le32(LINK_TOGGLE)))
                state->new_cycle_state ^= 0x1;
        next_trb(xhci, ep_ring, &state->new_deq_seg, &state->new_deq_ptr);
 
-       /*
-        * If there is only one segment in a ring, find_trb_seg()'s while loop
-        * will not run, and it will return before it has a chance to see if it
-        * needs to toggle the cycle bit.  It can't tell if the stalled transfer
-        * ended just before the link TRB on a one-segment ring, or if the TD
-        * wrapped around the top of the ring, because it doesn't have the TD in
-        * question.  Look for the one-segment case where stalled TRB's address
-        * is greater than the new dequeue pointer address.
-        */
-       if (ep_ring->first_seg == ep_ring->first_seg->next &&
-                       state->new_deq_ptr < dev->eps[ep_index].stopped_trb)
-               state->new_cycle_state ^= 0x1;
+       /* Don't update the ring cycle state for the producer (us). */
        xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
                        "Cycle state = 0x%x", state->new_cycle_state);
 
-       /* Don't update the ring cycle state for the producer (us). */
        xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
                        "New dequeue segment = %p (virtual)",
                        state->new_deq_seg);
@@ -682,12 +670,14 @@ static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
        }
 }
 
-static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id,
+static int queue_set_tr_deq(struct xhci_hcd *xhci,
+               struct xhci_command *cmd, int slot_id,
                unsigned int ep_index, unsigned int stream_id,
                struct xhci_segment *deq_seg,
                union xhci_trb *deq_ptr, u32 cycle_state);
 
 void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci,
+               struct xhci_command *cmd,
                unsigned int slot_id, unsigned int ep_index,
                unsigned int stream_id,
                struct xhci_dequeue_state *deq_state)
@@ -702,7 +692,7 @@ void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci,
                        deq_state->new_deq_ptr,
                        (unsigned long long)xhci_trb_virt_to_dma(deq_state->new_deq_seg, deq_state->new_deq_ptr),
                        deq_state->new_cycle_state);
-       queue_set_tr_deq(xhci, slot_id, ep_index, stream_id,
+       queue_set_tr_deq(xhci, cmd, slot_id, ep_index, stream_id,
                        deq_state->new_deq_seg,
                        deq_state->new_deq_ptr,
                        (u32) deq_state->new_cycle_state);
@@ -771,7 +761,6 @@ static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id,
                union xhci_trb *trb, struct xhci_event_cmd *event)
 {
        unsigned int ep_index;
-       struct xhci_virt_device *virt_dev;
        struct xhci_ring *ep_ring;
        struct xhci_virt_ep *ep;
        struct list_head *entry;
@@ -781,11 +770,7 @@ static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id,
        struct xhci_dequeue_state deq_state;
 
        if (unlikely(TRB_TO_SUSPEND_PORT(le32_to_cpu(trb->generic.field[3])))) {
-               virt_dev = xhci->devs[slot_id];
-               if (virt_dev)
-                       handle_cmd_in_cmd_wait_list(xhci, virt_dev,
-                               event);
-               else
+               if (!xhci->devs[slot_id])
                        xhci_warn(xhci, "Stop endpoint command "
                                "completion for disabled slot %u\n",
                                slot_id);
@@ -799,7 +784,6 @@ static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id,
        if (list_empty(&ep->cancelled_td_list)) {
                xhci_stop_watchdog_timer_in_irq(xhci, ep);
                ep->stopped_td = NULL;
-               ep->stopped_trb = NULL;
                ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
                return;
        }
@@ -857,7 +841,9 @@ remove_finished_td:
 
        /* If necessary, queue a Set Transfer Ring Dequeue Pointer command */
        if (deq_state.new_deq_ptr && deq_state.new_deq_seg) {
-               xhci_queue_new_dequeue_state(xhci,
+               struct xhci_command *command;
+               command = xhci_alloc_command(xhci, false, false, GFP_ATOMIC);
+               xhci_queue_new_dequeue_state(xhci, command,
                                slot_id, ep_index,
                                ep->stopped_td->urb->stream_id,
                                &deq_state);
@@ -867,11 +853,9 @@ remove_finished_td:
                ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
        }
 
-       /* Clear stopped_td and stopped_trb if endpoint is not halted */
-       if (!(ep->ep_state & EP_HALTED)) {
+       /* Clear stopped_td if endpoint is not halted */
+       if (!(ep->ep_state & EP_HALTED))
                ep->stopped_td = NULL;
-               ep->stopped_trb = NULL;
-       }
 
        /*
         * Drop the lock and complete the URBs in the cancelled TD list.
@@ -1207,9 +1191,11 @@ static void xhci_handle_cmd_reset_ep(struct xhci_hcd *xhci, int slot_id,
         * because the HW can't handle two commands being queued in a row.
         */
        if (xhci->quirks & XHCI_RESET_EP_QUIRK) {
+               struct xhci_command *command;
+               command = xhci_alloc_command(xhci, false, false, GFP_ATOMIC);
                xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
                                "Queueing configure endpoint command");
-               xhci_queue_configure_endpoint(xhci,
+               xhci_queue_configure_endpoint(xhci, command,
                                xhci->devs[slot_id]->in_ctx->dma, slot_id,
                                false);
                xhci_ring_cmd_db(xhci);
@@ -1234,29 +1220,6 @@ static void xhci_complete_cmd_in_cmd_wait_list(struct xhci_hcd *xhci,
 }
 
 
-/* Check to see if a command in the device's command queue matches this one.
- * Signal the completion or free the command, and return 1.  Return 0 if the
- * completed command isn't at the head of the command list.
- */
-static int handle_cmd_in_cmd_wait_list(struct xhci_hcd *xhci,
-               struct xhci_virt_device *virt_dev,
-               struct xhci_event_cmd *event)
-{
-       struct xhci_command *command;
-
-       if (list_empty(&virt_dev->cmd_list))
-               return 0;
-
-       command = list_entry(virt_dev->cmd_list.next,
-                       struct xhci_command, cmd_list);
-       if (xhci->cmd_ring->dequeue != command->command_trb)
-               return 0;
-
-       xhci_complete_cmd_in_cmd_wait_list(xhci, command,
-                       GET_COMP_CODE(le32_to_cpu(event->status)));
-       return 1;
-}
-
 /*
  * Finding the command trb need to be cancelled and modifying it to
  * NO OP command. And if the command is in device's command wait
@@ -1408,7 +1371,6 @@ static void xhci_handle_cmd_enable_slot(struct xhci_hcd *xhci, int slot_id,
                xhci->slot_id = slot_id;
        else
                xhci->slot_id = 0;
-       complete(&xhci->addr_dev);
 }
 
 static void xhci_handle_cmd_disable_slot(struct xhci_hcd *xhci, int slot_id)
@@ -1433,9 +1395,6 @@ static void xhci_handle_cmd_config_ep(struct xhci_hcd *xhci, int slot_id,
        unsigned int ep_state;
        u32 add_flags, drop_flags;
 
-       virt_dev = xhci->devs[slot_id];
-       if (handle_cmd_in_cmd_wait_list(xhci, virt_dev, event))
-               return;
        /*
         * Configure endpoint commands can come from the USB core
         * configuration or alt setting changes, or because the HW
@@ -1444,6 +1403,7 @@ static void xhci_handle_cmd_config_ep(struct xhci_hcd *xhci, int slot_id,
         * If the command was for a halted endpoint, the xHCI driver
         * is not waiting on the configure endpoint command.
         */
+       virt_dev = xhci->devs[slot_id];
        ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx);
        if (!ctrl_ctx) {
                xhci_warn(xhci, "Could not get input context, bad type.\n");
@@ -1466,7 +1426,7 @@ static void xhci_handle_cmd_config_ep(struct xhci_hcd *xhci, int slot_id,
                        add_flags - SLOT_FLAG == drop_flags) {
                ep_state = virt_dev->eps[ep_index].ep_state;
                if (!(ep_state & EP_HALTED))
-                       goto bandwidth_change;
+                       return;
                xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
                                "Completed config ep cmd - "
                                "last ep index = %d, state = %d",
@@ -1476,43 +1436,14 @@ static void xhci_handle_cmd_config_ep(struct xhci_hcd *xhci, int slot_id,
                ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
                return;
        }
-bandwidth_change:
-       xhci_dbg_trace(xhci,  trace_xhci_dbg_context_change,
-                       "Completed config ep cmd");
-       virt_dev->cmd_status = cmd_comp_code;
-       complete(&virt_dev->cmd_completion);
        return;
 }
 
-static void xhci_handle_cmd_eval_ctx(struct xhci_hcd *xhci, int slot_id,
-               struct xhci_event_cmd *event, u32 cmd_comp_code)
-{
-       struct xhci_virt_device *virt_dev;
-
-       virt_dev = xhci->devs[slot_id];
-       if (handle_cmd_in_cmd_wait_list(xhci, virt_dev, event))
-               return;
-       virt_dev->cmd_status = cmd_comp_code;
-       complete(&virt_dev->cmd_completion);
-}
-
-static void xhci_handle_cmd_addr_dev(struct xhci_hcd *xhci, int slot_id,
-               u32 cmd_comp_code)
-{
-       xhci->devs[slot_id]->cmd_status = cmd_comp_code;
-       complete(&xhci->addr_dev);
-}
-
 static void xhci_handle_cmd_reset_dev(struct xhci_hcd *xhci, int slot_id,
                struct xhci_event_cmd *event)
 {
-       struct xhci_virt_device *virt_dev;
-
        xhci_dbg(xhci, "Completed reset device command.\n");
-       virt_dev = xhci->devs[slot_id];
-       if (virt_dev)
-               handle_cmd_in_cmd_wait_list(xhci, virt_dev, event);
-       else
+       if (!xhci->devs[slot_id])
                xhci_warn(xhci, "Reset device command completion "
                                "for disabled slot %u\n", slot_id);
 }
@@ -1530,6 +1461,25 @@ static void xhci_handle_cmd_nec_get_fw(struct xhci_hcd *xhci,
                        NEC_FW_MINOR(le32_to_cpu(event->status)));
 }
 
+static void xhci_complete_del_and_free_cmd(struct xhci_command *cmd, u32 status)
+{
+       list_del(&cmd->cmd_list);
+
+       if (cmd->completion) {
+               cmd->status = status;
+               complete(cmd->completion);
+       } else {
+               kfree(cmd);
+       }
+}
+
+void xhci_cleanup_command_queue(struct xhci_hcd *xhci)
+{
+       struct xhci_command *cur_cmd, *tmp_cmd;
+       list_for_each_entry_safe(cur_cmd, tmp_cmd, &xhci->cmd_list, cmd_list)
+               xhci_complete_del_and_free_cmd(cur_cmd, COMP_CMD_ABORT);
+}
+
 static void handle_cmd_completion(struct xhci_hcd *xhci,
                struct xhci_event_cmd *event)
 {
@@ -1538,6 +1488,7 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
        dma_addr_t cmd_dequeue_dma;
        u32 cmd_comp_code;
        union xhci_trb *cmd_trb;
+       struct xhci_command *cmd;
        u32 cmd_type;
 
        cmd_dma = le64_to_cpu(event->cmd_trb);
@@ -1555,6 +1506,13 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
                return;
        }
 
+       cmd = list_entry(xhci->cmd_list.next, struct xhci_command, cmd_list);
+
+       if (cmd->command_trb != xhci->cmd_ring->dequeue) {
+               xhci_err(xhci,
+                        "Command completion event does not match command\n");
+               return;
+       }
        trace_xhci_cmd_completion(cmd_trb, (struct xhci_generic_trb *) event);
 
        cmd_comp_code = GET_COMP_CODE(le32_to_cpu(event->status));
@@ -1586,13 +1544,13 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
                xhci_handle_cmd_disable_slot(xhci, slot_id);
                break;
        case TRB_CONFIG_EP:
-               xhci_handle_cmd_config_ep(xhci, slot_id, event, cmd_comp_code);
+               if (!cmd->completion)
+                       xhci_handle_cmd_config_ep(xhci, slot_id, event,
+                                                 cmd_comp_code);
                break;
        case TRB_EVAL_CONTEXT:
-               xhci_handle_cmd_eval_ctx(xhci, slot_id, event, cmd_comp_code);
                break;
        case TRB_ADDR_DEV:
-               xhci_handle_cmd_addr_dev(xhci, slot_id, cmd_comp_code);
                break;
        case TRB_STOP_RING:
                WARN_ON(slot_id != TRB_TO_SLOT_ID(
@@ -1624,6 +1582,9 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
                xhci->error_bitmask |= 1 << 6;
                break;
        }
+
+       xhci_complete_del_and_free_cmd(cmd, cmd_comp_code);
+
        inc_deq(xhci, xhci->cmd_ring);
 }
 
@@ -1939,16 +1900,19 @@ static void xhci_cleanup_halted_endpoint(struct xhci_hcd *xhci,
                struct xhci_td *td, union xhci_trb *event_trb)
 {
        struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index];
+       struct xhci_command *command;
+       command = xhci_alloc_command(xhci, false, false, GFP_ATOMIC);
+       if (!command)
+               return;
+
        ep->ep_state |= EP_HALTED;
        ep->stopped_td = td;
-       ep->stopped_trb = event_trb;
        ep->stopped_stream = stream_id;
 
-       xhci_queue_reset_ep(xhci, slot_id, ep_index);
+       xhci_queue_reset_ep(xhci, command, slot_id, ep_index);
        xhci_cleanup_stalled_ring(xhci, td->urb->dev, ep_index);
 
        ep->stopped_td = NULL;
-       ep->stopped_trb = NULL;
        ep->stopped_stream = 0;
 
        xhci_ring_cmd_db(xhci);
@@ -2030,7 +1994,6 @@ static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td,
                 * the ring dequeue pointer or take this TD off any lists yet.
                 */
                ep->stopped_td = td;
-               ep->stopped_trb = event_trb;
                return 0;
        } else {
                if (trb_comp_code == COMP_STALL) {
@@ -2042,7 +2005,6 @@ static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td,
                         * USB class driver clear the stall later.
                         */
                        ep->stopped_td = td;
-                       ep->stopped_trb = event_trb;
                        ep->stopped_stream = ep_ring->stream_id;
                } else if (xhci_requires_manual_halt_cleanup(xhci,
                                        ep_ctx, trb_comp_code)) {
@@ -2659,7 +2621,7 @@ static int handle_tx_event(struct xhci_hcd *xhci,
                                 * successful event after a short transfer.
                                 * Ignore it.
                                 */
-                               if ((xhci->quirks & XHCI_SPURIOUS_SUCCESS) && 
+                               if ((xhci->quirks & XHCI_SPURIOUS_SUCCESS) &&
                                                ep_ring->last_td_was_short) {
                                        ep_ring->last_td_was_short = false;
                                        ret = 0;
@@ -4001,11 +3963,14 @@ int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags,
  * Don't decrement xhci->cmd_ring_reserved_trbs after we've queued the TRB
  * because the command event handler may want to resubmit a failed command.
  */
-static int queue_command(struct xhci_hcd *xhci, u32 field1, u32 field2,
-               u32 field3, u32 field4, bool command_must_succeed)
+static int queue_command(struct xhci_hcd *xhci, struct xhci_command *cmd,
+                        u32 field1, u32 field2,
+                        u32 field3, u32 field4, bool command_must_succeed)
 {
        int reserved_trbs = xhci->cmd_ring_reserved_trbs;
        int ret;
+       if (xhci->xhc_state & XHCI_STATE_DYING)
+               return -ESHUTDOWN;
 
        if (!command_must_succeed)
                reserved_trbs++;
@@ -4019,57 +3984,64 @@ static int queue_command(struct xhci_hcd *xhci, u32 field1, u32 field2,
                                        "unfailable commands failed.\n");
                return ret;
        }
+
+       cmd->command_trb = xhci->cmd_ring->enqueue;
+       list_add_tail(&cmd->cmd_list, &xhci->cmd_list);
+
        queue_trb(xhci, xhci->cmd_ring, false, field1, field2, field3,
                        field4 | xhci->cmd_ring->cycle_state);
        return 0;
 }
 
 /* Queue a slot enable or disable request on the command ring */
-int xhci_queue_slot_control(struct xhci_hcd *xhci, u32 trb_type, u32 slot_id)
+int xhci_queue_slot_control(struct xhci_hcd *xhci, struct xhci_command *cmd,
+               u32 trb_type, u32 slot_id)
 {
-       return queue_command(xhci, 0, 0, 0,
+       return queue_command(xhci, cmd, 0, 0, 0,
                        TRB_TYPE(trb_type) | SLOT_ID_FOR_TRB(slot_id), false);
 }
 
 /* Queue an address device command TRB */
-int xhci_queue_address_device(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
-                             u32 slot_id, enum xhci_setup_dev setup)
+int xhci_queue_address_device(struct xhci_hcd *xhci, struct xhci_command *cmd,
+               dma_addr_t in_ctx_ptr, u32 slot_id, enum xhci_setup_dev setup)
 {
-       return queue_command(xhci, lower_32_bits(in_ctx_ptr),
+       return queue_command(xhci, cmd, lower_32_bits(in_ctx_ptr),
                        upper_32_bits(in_ctx_ptr), 0,
                        TRB_TYPE(TRB_ADDR_DEV) | SLOT_ID_FOR_TRB(slot_id)
                        | (setup == SETUP_CONTEXT_ONLY ? TRB_BSR : 0), false);
 }
 
-int xhci_queue_vendor_command(struct xhci_hcd *xhci,
+int xhci_queue_vendor_command(struct xhci_hcd *xhci, struct xhci_command *cmd,
                u32 field1, u32 field2, u32 field3, u32 field4)
 {
-       return queue_command(xhci, field1, field2, field3, field4, false);
+       return queue_command(xhci, cmd, field1, field2, field3, field4, false);
 }
 
 /* Queue a reset device command TRB */
-int xhci_queue_reset_device(struct xhci_hcd *xhci, u32 slot_id)
+int xhci_queue_reset_device(struct xhci_hcd *xhci, struct xhci_command *cmd,
+               u32 slot_id)
 {
-       return queue_command(xhci, 0, 0, 0,
+       return queue_command(xhci, cmd, 0, 0, 0,
                        TRB_TYPE(TRB_RESET_DEV) | SLOT_ID_FOR_TRB(slot_id),
                        false);
 }
 
 /* Queue a configure endpoint command TRB */
-int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
+int xhci_queue_configure_endpoint(struct xhci_hcd *xhci,
+               struct xhci_command *cmd, dma_addr_t in_ctx_ptr,
                u32 slot_id, bool command_must_succeed)
 {
-       return queue_command(xhci, lower_32_bits(in_ctx_ptr),
+       return queue_command(xhci, cmd, lower_32_bits(in_ctx_ptr),
                        upper_32_bits(in_ctx_ptr), 0,
                        TRB_TYPE(TRB_CONFIG_EP) | SLOT_ID_FOR_TRB(slot_id),
                        command_must_succeed);
 }
 
 /* Queue an evaluate context command TRB */
-int xhci_queue_evaluate_context(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
-               u32 slot_id, bool command_must_succeed)
+int xhci_queue_evaluate_context(struct xhci_hcd *xhci, struct xhci_command *cmd,
+               dma_addr_t in_ctx_ptr, u32 slot_id, bool command_must_succeed)
 {
-       return queue_command(xhci, lower_32_bits(in_ctx_ptr),
+       return queue_command(xhci, cmd, lower_32_bits(in_ctx_ptr),
                        upper_32_bits(in_ctx_ptr), 0,
                        TRB_TYPE(TRB_EVAL_CONTEXT) | SLOT_ID_FOR_TRB(slot_id),
                        command_must_succeed);
@@ -4079,25 +4051,26 @@ int xhci_queue_evaluate_context(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
  * Suspend is set to indicate "Stop Endpoint Command" is being issued to stop
  * activity on an endpoint that is about to be suspended.
  */
-int xhci_queue_stop_endpoint(struct xhci_hcd *xhci, int slot_id,
-               unsigned int ep_index, int suspend)
+int xhci_queue_stop_endpoint(struct xhci_hcd *xhci, struct xhci_command *cmd,
+                            int slot_id, unsigned int ep_index, int suspend)
 {
        u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
        u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
        u32 type = TRB_TYPE(TRB_STOP_RING);
        u32 trb_suspend = SUSPEND_PORT_FOR_TRB(suspend);
 
-       return queue_command(xhci, 0, 0, 0,
+       return queue_command(xhci, cmd, 0, 0, 0,
                        trb_slot_id | trb_ep_index | type | trb_suspend, false);
 }
 
 /* Set Transfer Ring Dequeue Pointer command.
  * This should not be used for endpoints that have streams enabled.
  */
-static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id,
-               unsigned int ep_index, unsigned int stream_id,
-               struct xhci_segment *deq_seg,
-               union xhci_trb *deq_ptr, u32 cycle_state)
+static int queue_set_tr_deq(struct xhci_hcd *xhci, struct xhci_command *cmd,
+                       int slot_id,
+                       unsigned int ep_index, unsigned int stream_id,
+                       struct xhci_segment *deq_seg,
+                       union xhci_trb *deq_ptr, u32 cycle_state)
 {
        dma_addr_t addr;
        u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
@@ -4124,18 +4097,19 @@ static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id,
        ep->queued_deq_ptr = deq_ptr;
        if (stream_id)
                trb_sct = SCT_FOR_TRB(SCT_PRI_TR);
-       return queue_command(xhci, lower_32_bits(addr) | trb_sct | cycle_state,
+       return queue_command(xhci, cmd,
+                       lower_32_bits(addr) | trb_sct | cycle_state,
                        upper_32_bits(addr), trb_stream_id,
                        trb_slot_id | trb_ep_index | type, false);
 }
 
-int xhci_queue_reset_ep(struct xhci_hcd *xhci, int slot_id,
-               unsigned int ep_index)
+int xhci_queue_reset_ep(struct xhci_hcd *xhci, struct xhci_command *cmd,
+                       int slot_id, unsigned int ep_index)
 {
        u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
        u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
        u32 type = TRB_TYPE(TRB_RESET_EP);
 
-       return queue_command(xhci, 0, 0, 0, trb_slot_id | trb_ep_index | type,
-                       false);
+       return queue_command(xhci, cmd, 0, 0, 0,
+                       trb_slot_id | trb_ep_index | type, false);
 }