|
@@ -881,66 +881,16 @@ static void fsldma_cleanup_descriptor(struct fsldma_chan *chan,
|
|
dma_pool_free(chan->desc_pool, desc, txd->phys);
|
|
dma_pool_free(chan->desc_pool, desc, txd->phys);
|
|
}
|
|
}
|
|
|
|
|
|
-/**
|
|
|
|
- * fsl_chan_ld_cleanup - Clean up link descriptors
|
|
|
|
- * @chan : Freescale DMA channel
|
|
|
|
- *
|
|
|
|
- * This function is run after the queue of running descriptors has been
|
|
|
|
- * executed by the DMA engine. It will run any callbacks, and then free
|
|
|
|
- * the descriptors.
|
|
|
|
- *
|
|
|
|
- * HARDWARE STATE: idle
|
|
|
|
- */
|
|
|
|
-static void fsl_chan_ld_cleanup(struct fsldma_chan *chan)
|
|
|
|
-{
|
|
|
|
- struct fsl_desc_sw *desc, *_desc;
|
|
|
|
- LIST_HEAD(ld_cleanup);
|
|
|
|
- unsigned long flags;
|
|
|
|
-
|
|
|
|
- spin_lock_irqsave(&chan->desc_lock, flags);
|
|
|
|
-
|
|
|
|
- /* update the cookie if we have some descriptors to cleanup */
|
|
|
|
- if (!list_empty(&chan->ld_running)) {
|
|
|
|
- dma_cookie_t cookie;
|
|
|
|
-
|
|
|
|
- desc = to_fsl_desc(chan->ld_running.prev);
|
|
|
|
- cookie = desc->async_tx.cookie;
|
|
|
|
-
|
|
|
|
- chan->completed_cookie = cookie;
|
|
|
|
- chan_dbg(chan, "completed cookie=%d\n", cookie);
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
- /*
|
|
|
|
- * move the descriptors to a temporary list so we can drop the lock
|
|
|
|
- * during the entire cleanup operation
|
|
|
|
- */
|
|
|
|
- list_splice_tail_init(&chan->ld_running, &ld_cleanup);
|
|
|
|
-
|
|
|
|
- spin_unlock_irqrestore(&chan->desc_lock, flags);
|
|
|
|
-
|
|
|
|
- /* Run the callback for each descriptor, in order */
|
|
|
|
- list_for_each_entry_safe(desc, _desc, &ld_cleanup, node) {
|
|
|
|
-
|
|
|
|
- /* Remove from the list of transactions */
|
|
|
|
- list_del(&desc->node);
|
|
|
|
-
|
|
|
|
- /* Run all cleanup for this descriptor */
|
|
|
|
- fsldma_cleanup_descriptor(chan, desc);
|
|
|
|
- }
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
/**
|
|
/**
|
|
* fsl_chan_xfer_ld_queue - transfer any pending transactions
|
|
* fsl_chan_xfer_ld_queue - transfer any pending transactions
|
|
* @chan : Freescale DMA channel
|
|
* @chan : Freescale DMA channel
|
|
*
|
|
*
|
|
* HARDWARE STATE: idle
|
|
* HARDWARE STATE: idle
|
|
|
|
+ * LOCKING: must hold chan->desc_lock
|
|
*/
|
|
*/
|
|
static void fsl_chan_xfer_ld_queue(struct fsldma_chan *chan)
|
|
static void fsl_chan_xfer_ld_queue(struct fsldma_chan *chan)
|
|
{
|
|
{
|
|
struct fsl_desc_sw *desc;
|
|
struct fsl_desc_sw *desc;
|
|
- unsigned long flags;
|
|
|
|
-
|
|
|
|
- spin_lock_irqsave(&chan->desc_lock, flags);
|
|
|
|
|
|
|
|
/*
|
|
/*
|
|
* If the list of pending descriptors is empty, then we
|
|
* If the list of pending descriptors is empty, then we
|
|
@@ -948,7 +898,7 @@ static void fsl_chan_xfer_ld_queue(struct fsldma_chan *chan)
|
|
*/
|
|
*/
|
|
if (list_empty(&chan->ld_pending)) {
|
|
if (list_empty(&chan->ld_pending)) {
|
|
chan_dbg(chan, "no pending LDs\n");
|
|
chan_dbg(chan, "no pending LDs\n");
|
|
- goto out_unlock;
|
|
|
|
|
|
+ return;
|
|
}
|
|
}
|
|
|
|
|
|
/*
|
|
/*
|
|
@@ -958,7 +908,7 @@ static void fsl_chan_xfer_ld_queue(struct fsldma_chan *chan)
|
|
*/
|
|
*/
|
|
if (!chan->idle) {
|
|
if (!chan->idle) {
|
|
chan_dbg(chan, "DMA controller still busy\n");
|
|
chan_dbg(chan, "DMA controller still busy\n");
|
|
- goto out_unlock;
|
|
|
|
|
|
+ return;
|
|
}
|
|
}
|
|
|
|
|
|
/*
|
|
/*
|
|
@@ -996,9 +946,6 @@ static void fsl_chan_xfer_ld_queue(struct fsldma_chan *chan)
|
|
|
|
|
|
dma_start(chan);
|
|
dma_start(chan);
|
|
chan->idle = false;
|
|
chan->idle = false;
|
|
-
|
|
|
|
-out_unlock:
|
|
|
|
- spin_unlock_irqrestore(&chan->desc_lock, flags);
|
|
|
|
}
|
|
}
|
|
|
|
|
|
/**
|
|
/**
|
|
@@ -1008,7 +955,11 @@ out_unlock:
|
|
static void fsl_dma_memcpy_issue_pending(struct dma_chan *dchan)
|
|
static void fsl_dma_memcpy_issue_pending(struct dma_chan *dchan)
|
|
{
|
|
{
|
|
struct fsldma_chan *chan = to_fsl_chan(dchan);
|
|
struct fsldma_chan *chan = to_fsl_chan(dchan);
|
|
|
|
+ unsigned long flags;
|
|
|
|
+
|
|
|
|
+ spin_lock_irqsave(&chan->desc_lock, flags);
|
|
fsl_chan_xfer_ld_queue(chan);
|
|
fsl_chan_xfer_ld_queue(chan);
|
|
|
|
+ spin_unlock_irqrestore(&chan->desc_lock, flags);
|
|
}
|
|
}
|
|
|
|
|
|
/**
|
|
/**
|
|
@@ -1109,20 +1060,53 @@ static irqreturn_t fsldma_chan_irq(int irq, void *data)
|
|
static void dma_do_tasklet(unsigned long data)
|
|
static void dma_do_tasklet(unsigned long data)
|
|
{
|
|
{
|
|
struct fsldma_chan *chan = (struct fsldma_chan *)data;
|
|
struct fsldma_chan *chan = (struct fsldma_chan *)data;
|
|
|
|
+ struct fsl_desc_sw *desc, *_desc;
|
|
|
|
+ LIST_HEAD(ld_cleanup);
|
|
unsigned long flags;
|
|
unsigned long flags;
|
|
|
|
|
|
chan_dbg(chan, "tasklet entry\n");
|
|
chan_dbg(chan, "tasklet entry\n");
|
|
|
|
|
|
- /* run all callbacks, free all used descriptors */
|
|
|
|
- fsl_chan_ld_cleanup(chan);
|
|
|
|
-
|
|
|
|
- /* the channel is now idle */
|
|
|
|
spin_lock_irqsave(&chan->desc_lock, flags);
|
|
spin_lock_irqsave(&chan->desc_lock, flags);
|
|
|
|
+
|
|
|
|
+ /* update the cookie if we have some descriptors to cleanup */
|
|
|
|
+ if (!list_empty(&chan->ld_running)) {
|
|
|
|
+ dma_cookie_t cookie;
|
|
|
|
+
|
|
|
|
+ desc = to_fsl_desc(chan->ld_running.prev);
|
|
|
|
+ cookie = desc->async_tx.cookie;
|
|
|
|
+
|
|
|
|
+ chan->completed_cookie = cookie;
|
|
|
|
+ chan_dbg(chan, "completed_cookie=%d\n", cookie);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * move the descriptors to a temporary list so we can drop the lock
|
|
|
|
+ * during the entire cleanup operation
|
|
|
|
+ */
|
|
|
|
+ list_splice_tail_init(&chan->ld_running, &ld_cleanup);
|
|
|
|
+
|
|
|
|
+ /* the hardware is now idle and ready for more */
|
|
chan->idle = true;
|
|
chan->idle = true;
|
|
- spin_unlock_irqrestore(&chan->desc_lock, flags);
|
|
|
|
|
|
|
|
- /* start any pending transactions automatically */
|
|
|
|
|
|
+ /*
|
|
|
|
+ * Start any pending transactions automatically
|
|
|
|
+ *
|
|
|
|
+ * In the ideal case, we keep the DMA controller busy while we go
|
|
|
|
+ * ahead and free the descriptors below.
|
|
|
|
+ */
|
|
fsl_chan_xfer_ld_queue(chan);
|
|
fsl_chan_xfer_ld_queue(chan);
|
|
|
|
+ spin_unlock_irqrestore(&chan->desc_lock, flags);
|
|
|
|
+
|
|
|
|
+ /* Run the callback for each descriptor, in order */
|
|
|
|
+ list_for_each_entry_safe(desc, _desc, &ld_cleanup, node) {
|
|
|
|
+
|
|
|
|
+ /* Remove from the list of transactions */
|
|
|
|
+ list_del(&desc->node);
|
|
|
|
+
|
|
|
|
+ /* Run all cleanup for this descriptor */
|
|
|
|
+ fsldma_cleanup_descriptor(chan, desc);
|
|
|
|
+ }
|
|
|
|
+
|
|
chan_dbg(chan, "tasklet exit\n");
|
|
chan_dbg(chan, "tasklet exit\n");
|
|
}
|
|
}
|
|
|
|
|