|
@@ -335,16 +335,11 @@ musb_save_toggle(struct musb_hw_ep *ep, int is_in, struct urb *urb)
|
|
static struct musb_qh *
|
|
static struct musb_qh *
|
|
musb_giveback(struct musb_qh *qh, struct urb *urb, int status)
|
|
musb_giveback(struct musb_qh *qh, struct urb *urb, int status)
|
|
{
|
|
{
|
|
- int is_in;
|
|
|
|
struct musb_hw_ep *ep = qh->hw_ep;
|
|
struct musb_hw_ep *ep = qh->hw_ep;
|
|
struct musb *musb = ep->musb;
|
|
struct musb *musb = ep->musb;
|
|
|
|
+ int is_in = usb_pipein(urb->pipe);
|
|
int ready = qh->is_ready;
|
|
int ready = qh->is_ready;
|
|
|
|
|
|
- if (ep->is_shared_fifo)
|
|
|
|
- is_in = 1;
|
|
|
|
- else
|
|
|
|
- is_in = usb_pipein(urb->pipe);
|
|
|
|
-
|
|
|
|
/* save toggle eagerly, for paranoia */
|
|
/* save toggle eagerly, for paranoia */
|
|
switch (qh->type) {
|
|
switch (qh->type) {
|
|
case USB_ENDPOINT_XFER_BULK:
|
|
case USB_ENDPOINT_XFER_BULK:
|
|
@@ -432,7 +427,7 @@ musb_advance_schedule(struct musb *musb, struct urb *urb,
|
|
else
|
|
else
|
|
qh = musb_giveback(qh, urb, urb->status);
|
|
qh = musb_giveback(qh, urb, urb->status);
|
|
|
|
|
|
- if (qh && qh->is_ready && !list_empty(&qh->hep->urb_list)) {
|
|
|
|
|
|
+ if (qh != NULL && qh->is_ready) {
|
|
DBG(4, "... next ep%d %cX urb %p\n",
|
|
DBG(4, "... next ep%d %cX urb %p\n",
|
|
hw_ep->epnum, is_in ? 'R' : 'T',
|
|
hw_ep->epnum, is_in ? 'R' : 'T',
|
|
next_urb(qh));
|
|
next_urb(qh));
|
|
@@ -942,8 +937,8 @@ static bool musb_h_ep0_continue(struct musb *musb, u16 len, struct urb *urb)
|
|
switch (musb->ep0_stage) {
|
|
switch (musb->ep0_stage) {
|
|
case MUSB_EP0_IN:
|
|
case MUSB_EP0_IN:
|
|
fifo_dest = urb->transfer_buffer + urb->actual_length;
|
|
fifo_dest = urb->transfer_buffer + urb->actual_length;
|
|
- fifo_count = min(len, ((u16) (urb->transfer_buffer_length
|
|
|
|
- - urb->actual_length)));
|
|
|
|
|
|
+ fifo_count = min_t(size_t, len, urb->transfer_buffer_length -
|
|
|
|
+ urb->actual_length);
|
|
if (fifo_count < len)
|
|
if (fifo_count < len)
|
|
urb->status = -EOVERFLOW;
|
|
urb->status = -EOVERFLOW;
|
|
|
|
|
|
@@ -976,10 +971,9 @@ static bool musb_h_ep0_continue(struct musb *musb, u16 len, struct urb *urb)
|
|
}
|
|
}
|
|
/* FALLTHROUGH */
|
|
/* FALLTHROUGH */
|
|
case MUSB_EP0_OUT:
|
|
case MUSB_EP0_OUT:
|
|
- fifo_count = min(qh->maxpacket, ((u16)
|
|
|
|
- (urb->transfer_buffer_length
|
|
|
|
- - urb->actual_length)));
|
|
|
|
-
|
|
|
|
|
|
+ fifo_count = min_t(size_t, qh->maxpacket,
|
|
|
|
+ urb->transfer_buffer_length -
|
|
|
|
+ urb->actual_length);
|
|
if (fifo_count) {
|
|
if (fifo_count) {
|
|
fifo_dest = (u8 *) (urb->transfer_buffer
|
|
fifo_dest = (u8 *) (urb->transfer_buffer
|
|
+ urb->actual_length);
|
|
+ urb->actual_length);
|
|
@@ -1161,7 +1155,8 @@ void musb_host_tx(struct musb *musb, u8 epnum)
|
|
struct urb *urb;
|
|
struct urb *urb;
|
|
struct musb_hw_ep *hw_ep = musb->endpoints + epnum;
|
|
struct musb_hw_ep *hw_ep = musb->endpoints + epnum;
|
|
void __iomem *epio = hw_ep->regs;
|
|
void __iomem *epio = hw_ep->regs;
|
|
- struct musb_qh *qh = hw_ep->out_qh;
|
|
|
|
|
|
+ struct musb_qh *qh = hw_ep->is_shared_fifo ? hw_ep->in_qh
|
|
|
|
+ : hw_ep->out_qh;
|
|
u32 status = 0;
|
|
u32 status = 0;
|
|
void __iomem *mbase = musb->mregs;
|
|
void __iomem *mbase = musb->mregs;
|
|
struct dma_channel *dma;
|
|
struct dma_channel *dma;
|
|
@@ -1308,7 +1303,8 @@ void musb_host_tx(struct musb *musb, u8 epnum)
|
|
* packets before updating TXCSR ... other docs disagree ...
|
|
* packets before updating TXCSR ... other docs disagree ...
|
|
*/
|
|
*/
|
|
/* PIO: start next packet in this URB */
|
|
/* PIO: start next packet in this URB */
|
|
- wLength = min(qh->maxpacket, (u16) wLength);
|
|
|
|
|
|
+ if (wLength > qh->maxpacket)
|
|
|
|
+ wLength = qh->maxpacket;
|
|
musb_write_fifo(hw_ep, wLength, buf);
|
|
musb_write_fifo(hw_ep, wLength, buf);
|
|
qh->segsize = wLength;
|
|
qh->segsize = wLength;
|
|
|
|
|
|
@@ -1867,19 +1863,21 @@ static int musb_urb_enqueue(
|
|
}
|
|
}
|
|
qh->type_reg = type_reg;
|
|
qh->type_reg = type_reg;
|
|
|
|
|
|
- /* precompute rxinterval/txinterval register */
|
|
|
|
- interval = min((u8)16, epd->bInterval); /* log encoding */
|
|
|
|
|
|
+ /* Precompute RXINTERVAL/TXINTERVAL register */
|
|
switch (qh->type) {
|
|
switch (qh->type) {
|
|
case USB_ENDPOINT_XFER_INT:
|
|
case USB_ENDPOINT_XFER_INT:
|
|
- /* fullspeed uses linear encoding */
|
|
|
|
- if (USB_SPEED_FULL == urb->dev->speed) {
|
|
|
|
- interval = epd->bInterval;
|
|
|
|
- if (!interval)
|
|
|
|
- interval = 1;
|
|
|
|
|
|
+ /*
|
|
|
|
+ * Full/low speeds use the linear encoding,
|
|
|
|
+ * high speed uses the logarithmic encoding.
|
|
|
|
+ */
|
|
|
|
+ if (urb->dev->speed <= USB_SPEED_FULL) {
|
|
|
|
+ interval = max_t(u8, epd->bInterval, 1);
|
|
|
|
+ break;
|
|
}
|
|
}
|
|
/* FALLTHROUGH */
|
|
/* FALLTHROUGH */
|
|
case USB_ENDPOINT_XFER_ISOC:
|
|
case USB_ENDPOINT_XFER_ISOC:
|
|
- /* iso always uses log encoding */
|
|
|
|
|
|
+ /* ISO always uses logarithmic encoding */
|
|
|
|
+ interval = min_t(u8, epd->bInterval, 16);
|
|
break;
|
|
break;
|
|
default:
|
|
default:
|
|
/* REVISIT we actually want to use NAK limits, hinting to the
|
|
/* REVISIT we actually want to use NAK limits, hinting to the
|
|
@@ -2037,9 +2035,9 @@ static int musb_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
|
|
goto done;
|
|
goto done;
|
|
|
|
|
|
/* Any URB not actively programmed into endpoint hardware can be
|
|
/* Any URB not actively programmed into endpoint hardware can be
|
|
- * immediately given back. Such an URB must be at the head of its
|
|
|
|
|
|
+ * immediately given back; that's any URB not at the head of an
|
|
* endpoint queue, unless someday we get real DMA queues. And even
|
|
* endpoint queue, unless someday we get real DMA queues. And even
|
|
- * then, it might not be known to the hardware...
|
|
|
|
|
|
+ * if it's at the head, it might not be known to the hardware...
|
|
*
|
|
*
|
|
* Otherwise abort current transfer, pending dma, etc.; urb->status
|
|
* Otherwise abort current transfer, pending dma, etc.; urb->status
|
|
* has already been updated. This is a synchronous abort; it'd be
|
|
* has already been updated. This is a synchronous abort; it'd be
|
|
@@ -2078,6 +2076,15 @@ static int musb_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
|
|
qh->is_ready = 0;
|
|
qh->is_ready = 0;
|
|
__musb_giveback(musb, urb, 0);
|
|
__musb_giveback(musb, urb, 0);
|
|
qh->is_ready = ready;
|
|
qh->is_ready = ready;
|
|
|
|
+
|
|
|
|
+ /* If nothing else (usually musb_giveback) is using it
|
|
|
|
+ * and its URB list has emptied, recycle this qh.
|
|
|
|
+ */
|
|
|
|
+ if (ready && list_empty(&qh->hep->urb_list)) {
|
|
|
|
+ qh->hep->hcpriv = NULL;
|
|
|
|
+ list_del(&qh->ring);
|
|
|
|
+ kfree(qh);
|
|
|
|
+ }
|
|
} else
|
|
} else
|
|
ret = musb_cleanup_urb(urb, qh, urb->pipe & USB_DIR_IN);
|
|
ret = musb_cleanup_urb(urb, qh, urb->pipe & USB_DIR_IN);
|
|
done:
|
|
done:
|
|
@@ -2093,15 +2100,16 @@ musb_h_disable(struct usb_hcd *hcd, struct usb_host_endpoint *hep)
|
|
unsigned long flags;
|
|
unsigned long flags;
|
|
struct musb *musb = hcd_to_musb(hcd);
|
|
struct musb *musb = hcd_to_musb(hcd);
|
|
u8 is_in = epnum & USB_DIR_IN;
|
|
u8 is_in = epnum & USB_DIR_IN;
|
|
- struct musb_qh *qh = hep->hcpriv;
|
|
|
|
- struct urb *urb, *tmp;
|
|
|
|
|
|
+ struct musb_qh *qh;
|
|
|
|
+ struct urb *urb;
|
|
struct list_head *sched;
|
|
struct list_head *sched;
|
|
|
|
|
|
- if (!qh)
|
|
|
|
- return;
|
|
|
|
-
|
|
|
|
spin_lock_irqsave(&musb->lock, flags);
|
|
spin_lock_irqsave(&musb->lock, flags);
|
|
|
|
|
|
|
|
+ qh = hep->hcpriv;
|
|
|
|
+ if (qh == NULL)
|
|
|
|
+ goto exit;
|
|
|
|
+
|
|
switch (qh->type) {
|
|
switch (qh->type) {
|
|
case USB_ENDPOINT_XFER_CONTROL:
|
|
case USB_ENDPOINT_XFER_CONTROL:
|
|
sched = &musb->control;
|
|
sched = &musb->control;
|
|
@@ -2135,13 +2143,28 @@ musb_h_disable(struct usb_hcd *hcd, struct usb_host_endpoint *hep)
|
|
|
|
|
|
/* cleanup */
|
|
/* cleanup */
|
|
musb_cleanup_urb(urb, qh, urb->pipe & USB_DIR_IN);
|
|
musb_cleanup_urb(urb, qh, urb->pipe & USB_DIR_IN);
|
|
- } else
|
|
|
|
- urb = NULL;
|
|
|
|
|
|
|
|
- /* then just nuke all the others */
|
|
|
|
- list_for_each_entry_safe_from(urb, tmp, &hep->urb_list, urb_list)
|
|
|
|
- musb_giveback(qh, urb, -ESHUTDOWN);
|
|
|
|
|
|
+ /* Then nuke all the others ... and advance the
|
|
|
|
+ * queue on hw_ep (e.g. bulk ring) when we're done.
|
|
|
|
+ */
|
|
|
|
+ while (!list_empty(&hep->urb_list)) {
|
|
|
|
+ urb = next_urb(qh);
|
|
|
|
+ urb->status = -ESHUTDOWN;
|
|
|
|
+ musb_advance_schedule(musb, urb, qh->hw_ep, is_in);
|
|
|
|
+ }
|
|
|
|
+ } else {
|
|
|
|
+ /* Just empty the queue; the hardware is busy with
|
|
|
|
+ * other transfers, and since !qh->is_ready nothing
|
|
|
|
+ * will activate any of these as it advances.
|
|
|
|
+ */
|
|
|
|
+ while (!list_empty(&hep->urb_list))
|
|
|
|
+ __musb_giveback(musb, next_urb(qh), -ESHUTDOWN);
|
|
|
|
|
|
|
|
+ hep->hcpriv = NULL;
|
|
|
|
+ list_del(&qh->ring);
|
|
|
|
+ kfree(qh);
|
|
|
|
+ }
|
|
|
|
+exit:
|
|
spin_unlock_irqrestore(&musb->lock, flags);
|
|
spin_unlock_irqrestore(&musb->lock, flags);
|
|
}
|
|
}
|
|
|
|
|