|
@@ -1747,6 +1747,241 @@ static void xhci_finish_resource_reservation(struct xhci_hcd *xhci,
|
|
|
xhci->num_active_eps);
|
|
|
}
|
|
|
|
|
|
+/* Run the algorithm on the bandwidth table. If this table is part of a
|
|
|
+ * TT, see if we need to update the number of active TTs.
|
|
|
+ */
|
|
|
+static int xhci_check_bw_table(struct xhci_hcd *xhci,
|
|
|
+ struct xhci_virt_device *virt_dev,
|
|
|
+ int old_active_eps)
|
|
|
+{
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+static bool xhci_is_async_ep(unsigned int ep_type)
|
|
|
+{
|
|
|
+ return (ep_type != ISOC_OUT_EP && ep_type != INT_OUT_EP &&
|
|
|
+ ep_type != ISOC_IN_EP &&
|
|
|
+ ep_type != INT_IN_EP);
|
|
|
+}
|
|
|
+
|
|
|
+void xhci_drop_ep_from_interval_table(struct xhci_hcd *xhci,
|
|
|
+ struct xhci_bw_info *ep_bw,
|
|
|
+ struct xhci_interval_bw_table *bw_table,
|
|
|
+ struct usb_device *udev,
|
|
|
+ struct xhci_virt_ep *virt_ep,
|
|
|
+ struct xhci_tt_bw_info *tt_info)
|
|
|
+{
|
|
|
+ struct xhci_interval_bw *interval_bw;
|
|
|
+ int normalized_interval;
|
|
|
+
|
|
|
+ if (xhci_is_async_ep(ep_bw->type) ||
|
|
|
+ list_empty(&virt_ep->bw_endpoint_list))
|
|
|
+ return;
|
|
|
+
|
|
|
+ /* For LS/FS devices, we need to translate the interval expressed in
|
|
|
+ * microframes to frames.
|
|
|
+ */
|
|
|
+ if (udev->speed == USB_SPEED_HIGH)
|
|
|
+ normalized_interval = ep_bw->ep_interval;
|
|
|
+ else
|
|
|
+ normalized_interval = ep_bw->ep_interval - 3;
|
|
|
+
|
|
|
+ if (normalized_interval == 0)
|
|
|
+ bw_table->interval0_esit_payload -= ep_bw->max_esit_payload;
|
|
|
+ interval_bw = &bw_table->interval_bw[normalized_interval];
|
|
|
+ interval_bw->num_packets -= ep_bw->num_packets;
|
|
|
+ switch (udev->speed) {
|
|
|
+ case USB_SPEED_LOW:
|
|
|
+ interval_bw->overhead[LS_OVERHEAD_TYPE] -= 1;
|
|
|
+ break;
|
|
|
+ case USB_SPEED_FULL:
|
|
|
+ interval_bw->overhead[FS_OVERHEAD_TYPE] -= 1;
|
|
|
+ break;
|
|
|
+ case USB_SPEED_HIGH:
|
|
|
+ interval_bw->overhead[HS_OVERHEAD_TYPE] -= 1;
|
|
|
+ break;
|
|
|
+ case USB_SPEED_SUPER:
|
|
|
+ case USB_SPEED_UNKNOWN:
|
|
|
+ case USB_SPEED_WIRELESS:
|
|
|
+ /* Should never happen because only LS/FS/HS endpoints will get
|
|
|
+ * added to the endpoint list.
|
|
|
+ */
|
|
|
+ return;
|
|
|
+ }
|
|
|
+ if (tt_info)
|
|
|
+ tt_info->active_eps -= 1;
|
|
|
+ list_del_init(&virt_ep->bw_endpoint_list);
|
|
|
+}
|
|
|
+
|
|
|
+static void xhci_add_ep_to_interval_table(struct xhci_hcd *xhci,
|
|
|
+ struct xhci_bw_info *ep_bw,
|
|
|
+ struct xhci_interval_bw_table *bw_table,
|
|
|
+ struct usb_device *udev,
|
|
|
+ struct xhci_virt_ep *virt_ep,
|
|
|
+ struct xhci_tt_bw_info *tt_info)
|
|
|
+{
|
|
|
+ struct xhci_interval_bw *interval_bw;
|
|
|
+ struct xhci_virt_ep *smaller_ep;
|
|
|
+ int normalized_interval;
|
|
|
+
|
|
|
+ if (xhci_is_async_ep(ep_bw->type))
|
|
|
+ return;
|
|
|
+
|
|
|
+ /* For LS/FS devices, we need to translate the interval expressed in
|
|
|
+ * microframes to frames.
|
|
|
+ */
|
|
|
+ if (udev->speed == USB_SPEED_HIGH)
|
|
|
+ normalized_interval = ep_bw->ep_interval;
|
|
|
+ else
|
|
|
+ normalized_interval = ep_bw->ep_interval - 3;
|
|
|
+
|
|
|
+ if (normalized_interval == 0)
|
|
|
+ bw_table->interval0_esit_payload += ep_bw->max_esit_payload;
|
|
|
+ interval_bw = &bw_table->interval_bw[normalized_interval];
|
|
|
+ interval_bw->num_packets += ep_bw->num_packets;
|
|
|
+ switch (udev->speed) {
|
|
|
+ case USB_SPEED_LOW:
|
|
|
+ interval_bw->overhead[LS_OVERHEAD_TYPE] += 1;
|
|
|
+ break;
|
|
|
+ case USB_SPEED_FULL:
|
|
|
+ interval_bw->overhead[FS_OVERHEAD_TYPE] += 1;
|
|
|
+ break;
|
|
|
+ case USB_SPEED_HIGH:
|
|
|
+ interval_bw->overhead[HS_OVERHEAD_TYPE] += 1;
|
|
|
+ break;
|
|
|
+ case USB_SPEED_SUPER:
|
|
|
+ case USB_SPEED_UNKNOWN:
|
|
|
+ case USB_SPEED_WIRELESS:
|
|
|
+ /* Should never happen because only LS/FS/HS endpoints will get
|
|
|
+ * added to the endpoint list.
|
|
|
+ */
|
|
|
+ return;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (tt_info)
|
|
|
+ tt_info->active_eps += 1;
|
|
|
+ /* Insert the endpoint into the list, largest max packet size first. */
|
|
|
+ list_for_each_entry(smaller_ep, &interval_bw->endpoints,
|
|
|
+ bw_endpoint_list) {
|
|
|
+ if (ep_bw->max_packet_size >=
|
|
|
+ smaller_ep->bw_info.max_packet_size) {
|
|
|
+ /* Add the new ep before the smaller endpoint */
|
|
|
+ list_add_tail(&virt_ep->bw_endpoint_list,
|
|
|
+ &smaller_ep->bw_endpoint_list);
|
|
|
+ return;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ /* Add the new endpoint at the end of the list. */
|
|
|
+ list_add_tail(&virt_ep->bw_endpoint_list,
|
|
|
+ &interval_bw->endpoints);
|
|
|
+}
|
|
|
+
|
|
|
+void xhci_update_tt_active_eps(struct xhci_hcd *xhci,
|
|
|
+ struct xhci_virt_device *virt_dev,
|
|
|
+ int old_active_eps)
|
|
|
+{
|
|
|
+ struct xhci_root_port_bw_info *rh_bw_info;
|
|
|
+ if (!virt_dev->tt_info)
|
|
|
+ return;
|
|
|
+
|
|
|
+ rh_bw_info = &xhci->rh_bw[virt_dev->real_port - 1];
|
|
|
+ if (old_active_eps == 0 &&
|
|
|
+ virt_dev->tt_info->active_eps != 0) {
|
|
|
+ rh_bw_info->num_active_tts += 1;
|
|
|
+ } else if (old_active_eps != 0 &&
|
|
|
+ virt_dev->tt_info->active_eps == 0) {
|
|
|
+ rh_bw_info->num_active_tts -= 1;
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+static int xhci_reserve_bandwidth(struct xhci_hcd *xhci,
|
|
|
+ struct xhci_virt_device *virt_dev,
|
|
|
+ struct xhci_container_ctx *in_ctx)
|
|
|
+{
|
|
|
+ struct xhci_bw_info ep_bw_info[31];
|
|
|
+ int i;
|
|
|
+ struct xhci_input_control_ctx *ctrl_ctx;
|
|
|
+ int old_active_eps = 0;
|
|
|
+
|
|
|
+ if (virt_dev->udev->speed == USB_SPEED_SUPER)
|
|
|
+ return 0;
|
|
|
+
|
|
|
+ if (virt_dev->tt_info)
|
|
|
+ old_active_eps = virt_dev->tt_info->active_eps;
|
|
|
+
|
|
|
+ ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
|
|
|
+
|
|
|
+ for (i = 0; i < 31; i++) {
|
|
|
+ if (!EP_IS_ADDED(ctrl_ctx, i) && !EP_IS_DROPPED(ctrl_ctx, i))
|
|
|
+ continue;
|
|
|
+
|
|
|
+ /* Make a copy of the BW info in case we need to revert this */
|
|
|
+ memcpy(&ep_bw_info[i], &virt_dev->eps[i].bw_info,
|
|
|
+ sizeof(ep_bw_info[i]));
|
|
|
+ /* Drop the endpoint from the interval table if the endpoint is
|
|
|
+ * being dropped or changed.
|
|
|
+ */
|
|
|
+ if (EP_IS_DROPPED(ctrl_ctx, i))
|
|
|
+ xhci_drop_ep_from_interval_table(xhci,
|
|
|
+ &virt_dev->eps[i].bw_info,
|
|
|
+ virt_dev->bw_table,
|
|
|
+ virt_dev->udev,
|
|
|
+ &virt_dev->eps[i],
|
|
|
+ virt_dev->tt_info);
|
|
|
+ }
|
|
|
+ /* Overwrite the information stored in the endpoints' bw_info */
|
|
|
+ xhci_update_bw_info(xhci, virt_dev->in_ctx, ctrl_ctx, virt_dev);
|
|
|
+ for (i = 0; i < 31; i++) {
|
|
|
+ /* Add any changed or added endpoints to the interval table */
|
|
|
+ if (EP_IS_ADDED(ctrl_ctx, i))
|
|
|
+ xhci_add_ep_to_interval_table(xhci,
|
|
|
+ &virt_dev->eps[i].bw_info,
|
|
|
+ virt_dev->bw_table,
|
|
|
+ virt_dev->udev,
|
|
|
+ &virt_dev->eps[i],
|
|
|
+ virt_dev->tt_info);
|
|
|
+ }
|
|
|
+
|
|
|
+ if (!xhci_check_bw_table(xhci, virt_dev, old_active_eps)) {
|
|
|
+ /* Ok, this fits in the bandwidth we have.
|
|
|
+ * Update the number of active TTs.
|
|
|
+ */
|
|
|
+ xhci_update_tt_active_eps(xhci, virt_dev, old_active_eps);
|
|
|
+ return 0;
|
|
|
+ }
|
|
|
+
|
|
|
+ /* We don't have enough bandwidth for this, revert the stored info. */
|
|
|
+ for (i = 0; i < 31; i++) {
|
|
|
+ if (!EP_IS_ADDED(ctrl_ctx, i) && !EP_IS_DROPPED(ctrl_ctx, i))
|
|
|
+ continue;
|
|
|
+
|
|
|
+ /* Drop the new copies of any added or changed endpoints from
|
|
|
+ * the interval table.
|
|
|
+ */
|
|
|
+ if (EP_IS_ADDED(ctrl_ctx, i)) {
|
|
|
+ xhci_drop_ep_from_interval_table(xhci,
|
|
|
+ &virt_dev->eps[i].bw_info,
|
|
|
+ virt_dev->bw_table,
|
|
|
+ virt_dev->udev,
|
|
|
+ &virt_dev->eps[i],
|
|
|
+ virt_dev->tt_info);
|
|
|
+ }
|
|
|
+ /* Revert the endpoint back to its old information */
|
|
|
+ memcpy(&virt_dev->eps[i].bw_info, &ep_bw_info[i],
|
|
|
+ sizeof(ep_bw_info[i]));
|
|
|
+ /* Add any changed or dropped endpoints back into the table */
|
|
|
+ if (EP_IS_DROPPED(ctrl_ctx, i))
|
|
|
+ xhci_add_ep_to_interval_table(xhci,
|
|
|
+ &virt_dev->eps[i].bw_info,
|
|
|
+ virt_dev->bw_table,
|
|
|
+ virt_dev->udev,
|
|
|
+ &virt_dev->eps[i],
|
|
|
+ virt_dev->tt_info);
|
|
|
+ }
|
|
|
+ return -ENOMEM;
|
|
|
+}
|
|
|
+
|
|
|
+
|
|
|
/* Issue a configure endpoint command or evaluate context command
|
|
|
* and wait for it to finish.
|
|
|
*/
|
|
@@ -1779,6 +2014,14 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci,
|
|
|
xhci->num_active_eps);
|
|
|
return -ENOMEM;
|
|
|
}
|
|
|
+ if ((xhci->quirks & XHCI_SW_BW_CHECKING) &&
|
|
|
+ xhci_reserve_bandwidth(xhci, virt_dev, in_ctx)) {
|
|
|
+ if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK))
|
|
|
+ xhci_free_host_resources(xhci, in_ctx);
|
|
|
+ spin_unlock_irqrestore(&xhci->lock, flags);
|
|
|
+ xhci_warn(xhci, "Not enough bandwidth\n");
|
|
|
+ return -ENOMEM;
|
|
|
+ }
|
|
|
|
|
|
if (command) {
|
|
|
cmd_completion = command->completion;
|
|
@@ -1912,7 +2155,6 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
|
|
|
!(le32_to_cpu(ctrl_ctx->add_flags) & (1 << (i + 1))))
|
|
|
xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i);
|
|
|
}
|
|
|
- xhci_update_bw_info(xhci, virt_dev->in_ctx, ctrl_ctx, virt_dev);
|
|
|
xhci_zero_in_ctx(xhci, virt_dev);
|
|
|
/*
|
|
|
* Install any rings for completely new endpoints or changed endpoints,
|
|
@@ -2528,6 +2770,7 @@ int xhci_discover_or_reset_device(struct usb_hcd *hcd, struct usb_device *udev)
|
|
|
int timeleft;
|
|
|
int last_freed_endpoint;
|
|
|
struct xhci_slot_ctx *slot_ctx;
|
|
|
+ int old_active_eps = 0;
|
|
|
|
|
|
ret = xhci_check_args(hcd, udev, NULL, 0, false, __func__);
|
|
|
if (ret <= 0)
|
|
@@ -2669,8 +2912,18 @@ int xhci_discover_or_reset_device(struct usb_hcd *hcd, struct usb_device *udev)
|
|
|
xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i);
|
|
|
last_freed_endpoint = i;
|
|
|
}
|
|
|
+ if (!list_empty(&virt_dev->eps[i].bw_endpoint_list))
|
|
|
+ xhci_drop_ep_from_interval_table(xhci,
|
|
|
+ &virt_dev->eps[i].bw_info,
|
|
|
+ virt_dev->bw_table,
|
|
|
+ udev,
|
|
|
+ &virt_dev->eps[i],
|
|
|
+ virt_dev->tt_info);
|
|
|
xhci_clear_endpoint_bw_info(&virt_dev->eps[i].bw_info);
|
|
|
}
|
|
|
+ /* If necessary, update the number of active TTs on this root port */
|
|
|
+ xhci_update_tt_active_eps(xhci, virt_dev, old_active_eps);
|
|
|
+
|
|
|
xhci_dbg(xhci, "Output context after successful reset device cmd:\n");
|
|
|
xhci_dbg_ctx(xhci, virt_dev->out_ctx, last_freed_endpoint);
|
|
|
ret = 0;
|