|
@@ -54,70 +54,6 @@
|
|
|
#include "gadget.h"
|
|
|
#include "io.h"
|
|
|
|
|
|
-#define DMA_ADDR_INVALID (~(dma_addr_t)0)
|
|
|
-
|
|
|
-void dwc3_map_buffer_to_dma(struct dwc3_request *req)
|
|
|
-{
|
|
|
- struct dwc3 *dwc = req->dep->dwc;
|
|
|
-
|
|
|
- if (req->request.length == 0) {
|
|
|
- /* req->request.dma = dwc->setup_buf_addr; */
|
|
|
- return;
|
|
|
- }
|
|
|
-
|
|
|
- if (req->request.num_sgs) {
|
|
|
- int mapped;
|
|
|
-
|
|
|
- mapped = dma_map_sg(dwc->dev, req->request.sg,
|
|
|
- req->request.num_sgs,
|
|
|
- req->direction ? DMA_TO_DEVICE
|
|
|
- : DMA_FROM_DEVICE);
|
|
|
- if (mapped < 0) {
|
|
|
- dev_err(dwc->dev, "failed to map SGs\n");
|
|
|
- return;
|
|
|
- }
|
|
|
-
|
|
|
- req->request.num_mapped_sgs = mapped;
|
|
|
- return;
|
|
|
- }
|
|
|
-
|
|
|
- if (req->request.dma == DMA_ADDR_INVALID) {
|
|
|
- req->request.dma = dma_map_single(dwc->dev, req->request.buf,
|
|
|
- req->request.length, req->direction
|
|
|
- ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
|
|
|
- req->mapped = true;
|
|
|
- }
|
|
|
-}
|
|
|
-
|
|
|
-void dwc3_unmap_buffer_from_dma(struct dwc3_request *req)
|
|
|
-{
|
|
|
- struct dwc3 *dwc = req->dep->dwc;
|
|
|
-
|
|
|
- if (req->request.length == 0) {
|
|
|
- req->request.dma = DMA_ADDR_INVALID;
|
|
|
- return;
|
|
|
- }
|
|
|
-
|
|
|
- if (req->request.num_mapped_sgs) {
|
|
|
- req->request.dma = DMA_ADDR_INVALID;
|
|
|
- dma_unmap_sg(dwc->dev, req->request.sg,
|
|
|
- req->request.num_sgs,
|
|
|
- req->direction ? DMA_TO_DEVICE
|
|
|
- : DMA_FROM_DEVICE);
|
|
|
-
|
|
|
- req->request.num_mapped_sgs = 0;
|
|
|
- return;
|
|
|
- }
|
|
|
-
|
|
|
- if (req->mapped) {
|
|
|
- dma_unmap_single(dwc->dev, req->request.dma,
|
|
|
- req->request.length, req->direction
|
|
|
- ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
|
|
|
- req->mapped = 0;
|
|
|
- req->request.dma = DMA_ADDR_INVALID;
|
|
|
- }
|
|
|
-}
|
|
|
-
|
|
|
void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
|
|
|
int status)
|
|
|
{
|
|
@@ -144,14 +80,15 @@ void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
|
|
|
if (req->request.status == -EINPROGRESS)
|
|
|
req->request.status = status;
|
|
|
|
|
|
- dwc3_unmap_buffer_from_dma(req);
|
|
|
+ usb_gadget_unmap_request(&dwc->gadget, &req->request,
|
|
|
+ req->direction);
|
|
|
|
|
|
dev_dbg(dwc->dev, "request %p from %s completed %d/%d ===> %d\n",
|
|
|
req, dep->name, req->request.actual,
|
|
|
req->request.length, status);
|
|
|
|
|
|
spin_unlock(&dwc->lock);
|
|
|
- req->request.complete(&req->dep->endpoint, &req->request);
|
|
|
+ req->request.complete(&dep->endpoint, &req->request);
|
|
|
spin_lock(&dwc->lock);
|
|
|
}
|
|
|
|
|
@@ -563,7 +500,6 @@ static struct usb_request *dwc3_gadget_ep_alloc_request(struct usb_ep *ep,
|
|
|
|
|
|
req->epnum = dep->number;
|
|
|
req->dep = dep;
|
|
|
- req->request.dma = DMA_ADDR_INVALID;
|
|
|
|
|
|
return &req->request;
|
|
|
}
|
|
@@ -822,7 +758,8 @@ static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep, u16 cmd_param,
|
|
|
* here and stop, unmap, free and del each of the linked
|
|
|
* requests instead of we do now.
|
|
|
*/
|
|
|
- dwc3_unmap_buffer_from_dma(req);
|
|
|
+ usb_gadget_unmap_request(&dwc->gadget, &req->request,
|
|
|
+ req->direction);
|
|
|
list_del(&req->list);
|
|
|
return ret;
|
|
|
}
|
|
@@ -838,6 +775,9 @@ static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep, u16 cmd_param,
|
|
|
|
|
|
static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
|
|
|
{
|
|
|
+ struct dwc3 *dwc = dep->dwc;
|
|
|
+ int ret;
|
|
|
+
|
|
|
req->request.actual = 0;
|
|
|
req->request.status = -EINPROGRESS;
|
|
|
req->direction = dep->direction;
|
|
@@ -855,7 +795,11 @@ static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
|
|
|
* This will also avoid Host cancelling URBs due to too
|
|
|
* many NACKs.
|
|
|
*/
|
|
|
- dwc3_map_buffer_to_dma(req);
|
|
|
+ ret = usb_gadget_map_request(&dwc->gadget, &req->request,
|
|
|
+ dep->direction);
|
|
|
+ if (ret)
|
|
|
+ return ret;
|
|
|
+
|
|
|
list_add_tail(&req->list, &dep->request_list);
|
|
|
|
|
|
/*
|
|
@@ -2150,9 +2094,8 @@ int __devinit dwc3_gadget_init(struct dwc3 *dwc)
|
|
|
goto err1;
|
|
|
}
|
|
|
|
|
|
- dwc->setup_buf = dma_alloc_coherent(dwc->dev,
|
|
|
- sizeof(*dwc->setup_buf) * 2,
|
|
|
- &dwc->setup_buf_addr, GFP_KERNEL);
|
|
|
+ dwc->setup_buf = kzalloc(sizeof(*dwc->setup_buf) * 2,
|
|
|
+ GFP_KERNEL);
|
|
|
if (!dwc->setup_buf) {
|
|
|
dev_err(dwc->dev, "failed to allocate setup buffer\n");
|
|
|
ret = -ENOMEM;
|
|
@@ -2243,8 +2186,7 @@ err4:
|
|
|
dwc->ep0_bounce_addr);
|
|
|
|
|
|
err3:
|
|
|
- dma_free_coherent(dwc->dev, sizeof(*dwc->setup_buf) * 2,
|
|
|
- dwc->setup_buf, dwc->setup_buf_addr);
|
|
|
+ kfree(dwc->setup_buf);
|
|
|
|
|
|
err2:
|
|
|
dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb),
|
|
@@ -2273,8 +2215,7 @@ void dwc3_gadget_exit(struct dwc3 *dwc)
|
|
|
dma_free_coherent(dwc->dev, 512, dwc->ep0_bounce,
|
|
|
dwc->ep0_bounce_addr);
|
|
|
|
|
|
- dma_free_coherent(dwc->dev, sizeof(*dwc->setup_buf) * 2,
|
|
|
- dwc->setup_buf, dwc->setup_buf_addr);
|
|
|
+ kfree(dwc->setup_buf);
|
|
|
|
|
|
dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb),
|
|
|
dwc->ep0_trb, dwc->ep0_trb_addr);
|