|
@@ -92,6 +92,59 @@
|
|
|
|
|
|
/* ----------------------------------------------------------------------- */
|
|
/* ----------------------------------------------------------------------- */
|
|
|
|
|
|
|
|
+/* Maps the buffer to dma */
|
|
|
|
+
|
|
|
|
+static inline void map_dma_buffer(struct musb_request *request,
|
|
|
|
+ struct musb *musb)
|
|
|
|
+{
|
|
|
|
+ if (request->request.dma == DMA_ADDR_INVALID) {
|
|
|
|
+ request->request.dma = dma_map_single(
|
|
|
|
+ musb->controller,
|
|
|
|
+ request->request.buf,
|
|
|
|
+ request->request.length,
|
|
|
|
+ request->tx
|
|
|
|
+ ? DMA_TO_DEVICE
|
|
|
|
+ : DMA_FROM_DEVICE);
|
|
|
|
+ request->mapped = 1;
|
|
|
|
+ } else {
|
|
|
|
+ dma_sync_single_for_device(musb->controller,
|
|
|
|
+ request->request.dma,
|
|
|
|
+ request->request.length,
|
|
|
|
+ request->tx
|
|
|
|
+ ? DMA_TO_DEVICE
|
|
|
|
+ : DMA_FROM_DEVICE);
|
|
|
|
+ request->mapped = 0;
|
|
|
|
+ }
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+/* Unmap the buffer from dma and maps it back to cpu */
|
|
|
|
+static inline void unmap_dma_buffer(struct musb_request *request,
|
|
|
|
+ struct musb *musb)
|
|
|
|
+{
|
|
|
|
+ if (request->request.dma == DMA_ADDR_INVALID) {
|
|
|
|
+ DBG(20, "not unmapping a never mapped buffer\n");
|
|
|
|
+ return;
|
|
|
|
+ }
|
|
|
|
+ if (request->mapped) {
|
|
|
|
+ dma_unmap_single(musb->controller,
|
|
|
|
+ request->request.dma,
|
|
|
|
+ request->request.length,
|
|
|
|
+ request->tx
|
|
|
|
+ ? DMA_TO_DEVICE
|
|
|
|
+ : DMA_FROM_DEVICE);
|
|
|
|
+ request->request.dma = DMA_ADDR_INVALID;
|
|
|
|
+ request->mapped = 0;
|
|
|
|
+ } else {
|
|
|
|
+ dma_sync_single_for_cpu(musb->controller,
|
|
|
|
+ request->request.dma,
|
|
|
|
+ request->request.length,
|
|
|
|
+ request->tx
|
|
|
|
+ ? DMA_TO_DEVICE
|
|
|
|
+ : DMA_FROM_DEVICE);
|
|
|
|
+
|
|
|
|
+ }
|
|
|
|
+}
|
|
|
|
+
|
|
/*
|
|
/*
|
|
* Immediately complete a request.
|
|
* Immediately complete a request.
|
|
*
|
|
*
|
|
@@ -119,24 +172,8 @@ __acquires(ep->musb->lock)
|
|
|
|
|
|
ep->busy = 1;
|
|
ep->busy = 1;
|
|
spin_unlock(&musb->lock);
|
|
spin_unlock(&musb->lock);
|
|
- if (is_dma_capable()) {
|
|
|
|
- if (req->mapped) {
|
|
|
|
- dma_unmap_single(musb->controller,
|
|
|
|
- req->request.dma,
|
|
|
|
- req->request.length,
|
|
|
|
- req->tx
|
|
|
|
- ? DMA_TO_DEVICE
|
|
|
|
- : DMA_FROM_DEVICE);
|
|
|
|
- req->request.dma = DMA_ADDR_INVALID;
|
|
|
|
- req->mapped = 0;
|
|
|
|
- } else if (req->request.dma != DMA_ADDR_INVALID)
|
|
|
|
- dma_sync_single_for_cpu(musb->controller,
|
|
|
|
- req->request.dma,
|
|
|
|
- req->request.length,
|
|
|
|
- req->tx
|
|
|
|
- ? DMA_TO_DEVICE
|
|
|
|
- : DMA_FROM_DEVICE);
|
|
|
|
- }
|
|
|
|
|
|
+ if (is_dma_capable() && ep->dma)
|
|
|
|
+ unmap_dma_buffer(req, musb);
|
|
if (request->status == 0)
|
|
if (request->status == 0)
|
|
DBG(5, "%s done request %p, %d/%d\n",
|
|
DBG(5, "%s done request %p, %d/%d\n",
|
|
ep->end_point.name, request,
|
|
ep->end_point.name, request,
|
|
@@ -395,6 +432,13 @@ static void txstate(struct musb *musb, struct musb_request *req)
|
|
#endif
|
|
#endif
|
|
|
|
|
|
if (!use_dma) {
|
|
if (!use_dma) {
|
|
|
|
+ /*
|
|
|
|
+ * Unmap the dma buffer back to cpu if dma channel
|
|
|
|
+ * programming fails
|
|
|
|
+ */
|
|
|
|
+ if (is_dma_capable() && musb_ep->dma)
|
|
|
|
+ unmap_dma_buffer(req, musb);
|
|
|
|
+
|
|
musb_write_fifo(musb_ep->hw_ep, fifo_count,
|
|
musb_write_fifo(musb_ep->hw_ep, fifo_count,
|
|
(u8 *) (request->buf + request->actual));
|
|
(u8 *) (request->buf + request->actual));
|
|
request->actual += fifo_count;
|
|
request->actual += fifo_count;
|
|
@@ -713,6 +757,20 @@ static void rxstate(struct musb *musb, struct musb_request *req)
|
|
return;
|
|
return;
|
|
}
|
|
}
|
|
#endif
|
|
#endif
|
|
|
|
+ /*
|
|
|
|
+ * Unmap the dma buffer back to cpu if dma channel
|
|
|
|
+ * programming fails. This buffer is mapped if the
|
|
|
|
+ * channel allocation is successful
|
|
|
|
+ */
|
|
|
|
+ if (is_dma_capable() && musb_ep->dma) {
|
|
|
|
+ unmap_dma_buffer(req, musb);
|
|
|
|
+
|
|
|
|
+ /* Clear DMAENAB for the
|
|
|
|
+ * PIO mode transfer
|
|
|
|
+ */
|
|
|
|
+ csr &= ~MUSB_RXCSR_DMAENAB;
|
|
|
|
+ musb_writew(epio, MUSB_RXCSR, csr);
|
|
|
|
+ }
|
|
|
|
|
|
musb_read_fifo(musb_ep->hw_ep, fifo_count, (u8 *)
|
|
musb_read_fifo(musb_ep->hw_ep, fifo_count, (u8 *)
|
|
(request->buf + request->actual));
|
|
(request->buf + request->actual));
|
|
@@ -1150,26 +1208,9 @@ static int musb_gadget_queue(struct usb_ep *ep, struct usb_request *req,
|
|
request->epnum = musb_ep->current_epnum;
|
|
request->epnum = musb_ep->current_epnum;
|
|
request->tx = musb_ep->is_in;
|
|
request->tx = musb_ep->is_in;
|
|
|
|
|
|
- if (is_dma_capable() && musb_ep->dma) {
|
|
|
|
- if (request->request.dma == DMA_ADDR_INVALID) {
|
|
|
|
- request->request.dma = dma_map_single(
|
|
|
|
- musb->controller,
|
|
|
|
- request->request.buf,
|
|
|
|
- request->request.length,
|
|
|
|
- request->tx
|
|
|
|
- ? DMA_TO_DEVICE
|
|
|
|
- : DMA_FROM_DEVICE);
|
|
|
|
- request->mapped = 1;
|
|
|
|
- } else {
|
|
|
|
- dma_sync_single_for_device(musb->controller,
|
|
|
|
- request->request.dma,
|
|
|
|
- request->request.length,
|
|
|
|
- request->tx
|
|
|
|
- ? DMA_TO_DEVICE
|
|
|
|
- : DMA_FROM_DEVICE);
|
|
|
|
- request->mapped = 0;
|
|
|
|
- }
|
|
|
|
- } else
|
|
|
|
|
|
+ if (is_dma_capable() && musb_ep->dma)
|
|
|
|
+ map_dma_buffer(request, musb);
|
|
|
|
+ else
|
|
request->mapped = 0;
|
|
request->mapped = 0;
|
|
|
|
|
|
spin_lock_irqsave(&musb->lock, lockflags);
|
|
spin_lock_irqsave(&musb->lock, lockflags);
|