|
@@ -3,6 +3,7 @@
|
|
|
* AVR32 systems.)
|
|
|
*
|
|
|
* Copyright (C) 2007-2008 Atmel Corporation
|
|
|
+ * Copyright (C) 2010-2011 ST Microelectronics
|
|
|
*
|
|
|
* This program is free software; you can redistribute it and/or modify
|
|
|
* it under the terms of the GNU General Public License version 2 as
|
|
@@ -93,8 +94,9 @@ static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc)
|
|
|
struct dw_desc *desc, *_desc;
|
|
|
struct dw_desc *ret = NULL;
|
|
|
unsigned int i = 0;
|
|
|
+ unsigned long flags;
|
|
|
|
|
|
- spin_lock_bh(&dwc->lock);
|
|
|
+ spin_lock_irqsave(&dwc->lock, flags);
|
|
|
list_for_each_entry_safe(desc, _desc, &dwc->free_list, desc_node) {
|
|
|
if (async_tx_test_ack(&desc->txd)) {
|
|
|
list_del(&desc->desc_node);
|
|
@@ -104,7 +106,7 @@ static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc)
|
|
|
dev_dbg(chan2dev(&dwc->chan), "desc %p not ACKed\n", desc);
|
|
|
i++;
|
|
|
}
|
|
|
- spin_unlock_bh(&dwc->lock);
|
|
|
+ spin_unlock_irqrestore(&dwc->lock, flags);
|
|
|
|
|
|
dev_vdbg(chan2dev(&dwc->chan), "scanned %u descriptors on freelist\n", i);
|
|
|
|
|
@@ -130,12 +132,14 @@ static void dwc_sync_desc_for_cpu(struct dw_dma_chan *dwc, struct dw_desc *desc)
|
|
|
*/
|
|
|
static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc)
|
|
|
{
|
|
|
+ unsigned long flags;
|
|
|
+
|
|
|
if (desc) {
|
|
|
struct dw_desc *child;
|
|
|
|
|
|
dwc_sync_desc_for_cpu(dwc, desc);
|
|
|
|
|
|
- spin_lock_bh(&dwc->lock);
|
|
|
+ spin_lock_irqsave(&dwc->lock, flags);
|
|
|
list_for_each_entry(child, &desc->tx_list, desc_node)
|
|
|
dev_vdbg(chan2dev(&dwc->chan),
|
|
|
"moving child desc %p to freelist\n",
|
|
@@ -143,7 +147,7 @@ static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc)
|
|
|
list_splice_init(&desc->tx_list, &dwc->free_list);
|
|
|
dev_vdbg(chan2dev(&dwc->chan), "moving desc %p to freelist\n", desc);
|
|
|
list_add(&desc->desc_node, &dwc->free_list);
|
|
|
- spin_unlock_bh(&dwc->lock);
|
|
|
+ spin_unlock_irqrestore(&dwc->lock, flags);
|
|
|
}
|
|
|
}
|
|
|
|
|
@@ -195,18 +199,23 @@ static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first)
|
|
|
/*----------------------------------------------------------------------*/
|
|
|
|
|
|
static void
|
|
|
-dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc)
|
|
|
+dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc,
|
|
|
+ bool callback_required)
|
|
|
{
|
|
|
- dma_async_tx_callback callback;
|
|
|
- void *param;
|
|
|
+ dma_async_tx_callback callback = NULL;
|
|
|
+ void *param = NULL;
|
|
|
struct dma_async_tx_descriptor *txd = &desc->txd;
|
|
|
struct dw_desc *child;
|
|
|
+ unsigned long flags;
|
|
|
|
|
|
dev_vdbg(chan2dev(&dwc->chan), "descriptor %u complete\n", txd->cookie);
|
|
|
|
|
|
+ spin_lock_irqsave(&dwc->lock, flags);
|
|
|
dwc->completed = txd->cookie;
|
|
|
- callback = txd->callback;
|
|
|
- param = txd->callback_param;
|
|
|
+ if (callback_required) {
|
|
|
+ callback = txd->callback;
|
|
|
+ param = txd->callback_param;
|
|
|
+ }
|
|
|
|
|
|
dwc_sync_desc_for_cpu(dwc, desc);
|
|
|
|
|
@@ -238,11 +247,9 @@ dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
- /*
|
|
|
- * The API requires that no submissions are done from a
|
|
|
- * callback, so we don't need to drop the lock here
|
|
|
- */
|
|
|
- if (callback)
|
|
|
+ spin_unlock_irqrestore(&dwc->lock, flags);
|
|
|
+
|
|
|
+ if (callback_required && callback)
|
|
|
callback(param);
|
|
|
}
|
|
|
|
|
@@ -250,7 +257,9 @@ static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc)
|
|
|
{
|
|
|
struct dw_desc *desc, *_desc;
|
|
|
LIST_HEAD(list);
|
|
|
+ unsigned long flags;
|
|
|
|
|
|
+ spin_lock_irqsave(&dwc->lock, flags);
|
|
|
if (dma_readl(dw, CH_EN) & dwc->mask) {
|
|
|
dev_err(chan2dev(&dwc->chan),
|
|
|
"BUG: XFER bit set, but channel not idle!\n");
|
|
@@ -271,8 +280,10 @@ static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc)
|
|
|
dwc_dostart(dwc, dwc_first_active(dwc));
|
|
|
}
|
|
|
|
|
|
+ spin_unlock_irqrestore(&dwc->lock, flags);
|
|
|
+
|
|
|
list_for_each_entry_safe(desc, _desc, &list, desc_node)
|
|
|
- dwc_descriptor_complete(dwc, desc);
|
|
|
+ dwc_descriptor_complete(dwc, desc, true);
|
|
|
}
|
|
|
|
|
|
static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
|
|
@@ -281,7 +292,9 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
|
|
|
struct dw_desc *desc, *_desc;
|
|
|
struct dw_desc *child;
|
|
|
u32 status_xfer;
|
|
|
+ unsigned long flags;
|
|
|
|
|
|
+ spin_lock_irqsave(&dwc->lock, flags);
|
|
|
/*
|
|
|
* Clear block interrupt flag before scanning so that we don't
|
|
|
* miss any, and read LLP before RAW_XFER to ensure it is
|
|
@@ -294,30 +307,47 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
|
|
|
if (status_xfer & dwc->mask) {
|
|
|
/* Everything we've submitted is done */
|
|
|
dma_writel(dw, CLEAR.XFER, dwc->mask);
|
|
|
+ spin_unlock_irqrestore(&dwc->lock, flags);
|
|
|
+
|
|
|
dwc_complete_all(dw, dwc);
|
|
|
return;
|
|
|
}
|
|
|
|
|
|
- if (list_empty(&dwc->active_list))
|
|
|
+ if (list_empty(&dwc->active_list)) {
|
|
|
+ spin_unlock_irqrestore(&dwc->lock, flags);
|
|
|
return;
|
|
|
+ }
|
|
|
|
|
|
dev_vdbg(chan2dev(&dwc->chan), "scan_descriptors: llp=0x%x\n", llp);
|
|
|
|
|
|
list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) {
|
|
|
- if (desc->lli.llp == llp)
|
|
|
+ /* check first descriptors addr */
|
|
|
+ if (desc->txd.phys == llp) {
|
|
|
+ spin_unlock_irqrestore(&dwc->lock, flags);
|
|
|
+ return;
|
|
|
+ }
|
|
|
+
|
|
|
+ /* check first descriptors llp */
|
|
|
+ if (desc->lli.llp == llp) {
|
|
|
/* This one is currently in progress */
|
|
|
+ spin_unlock_irqrestore(&dwc->lock, flags);
|
|
|
return;
|
|
|
+ }
|
|
|
|
|
|
list_for_each_entry(child, &desc->tx_list, desc_node)
|
|
|
- if (child->lli.llp == llp)
|
|
|
+ if (child->lli.llp == llp) {
|
|
|
/* Currently in progress */
|
|
|
+ spin_unlock_irqrestore(&dwc->lock, flags);
|
|
|
return;
|
|
|
+ }
|
|
|
|
|
|
/*
|
|
|
* No descriptors so far seem to be in progress, i.e.
|
|
|
* this one must be done.
|
|
|
*/
|
|
|
- dwc_descriptor_complete(dwc, desc);
|
|
|
+ spin_unlock_irqrestore(&dwc->lock, flags);
|
|
|
+ dwc_descriptor_complete(dwc, desc, true);
|
|
|
+ spin_lock_irqsave(&dwc->lock, flags);
|
|
|
}
|
|
|
|
|
|
dev_err(chan2dev(&dwc->chan),
|
|
@@ -332,6 +362,7 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
|
|
|
list_move(dwc->queue.next, &dwc->active_list);
|
|
|
dwc_dostart(dwc, dwc_first_active(dwc));
|
|
|
}
|
|
|
+ spin_unlock_irqrestore(&dwc->lock, flags);
|
|
|
}
|
|
|
|
|
|
static void dwc_dump_lli(struct dw_dma_chan *dwc, struct dw_lli *lli)
|
|
@@ -346,9 +377,12 @@ static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc)
|
|
|
{
|
|
|
struct dw_desc *bad_desc;
|
|
|
struct dw_desc *child;
|
|
|
+ unsigned long flags;
|
|
|
|
|
|
dwc_scan_descriptors(dw, dwc);
|
|
|
|
|
|
+ spin_lock_irqsave(&dwc->lock, flags);
|
|
|
+
|
|
|
/*
|
|
|
* The descriptor currently at the head of the active list is
|
|
|
* borked. Since we don't have any way to report errors, we'll
|
|
@@ -378,8 +412,10 @@ static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc)
|
|
|
list_for_each_entry(child, &bad_desc->tx_list, desc_node)
|
|
|
dwc_dump_lli(dwc, &child->lli);
|
|
|
|
|
|
+ spin_unlock_irqrestore(&dwc->lock, flags);
|
|
|
+
|
|
|
/* Pretend the descriptor completed successfully */
|
|
|
- dwc_descriptor_complete(dwc, bad_desc);
|
|
|
+ dwc_descriptor_complete(dwc, bad_desc, true);
|
|
|
}
|
|
|
|
|
|
/* --------------------- Cyclic DMA API extensions -------------------- */
|
|
@@ -402,6 +438,8 @@ EXPORT_SYMBOL(dw_dma_get_dst_addr);
|
|
|
static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc,
|
|
|
u32 status_block, u32 status_err, u32 status_xfer)
|
|
|
{
|
|
|
+ unsigned long flags;
|
|
|
+
|
|
|
if (status_block & dwc->mask) {
|
|
|
void (*callback)(void *param);
|
|
|
void *callback_param;
|
|
@@ -412,11 +450,9 @@ static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc,
|
|
|
|
|
|
callback = dwc->cdesc->period_callback;
|
|
|
callback_param = dwc->cdesc->period_callback_param;
|
|
|
- if (callback) {
|
|
|
- spin_unlock(&dwc->lock);
|
|
|
+
|
|
|
+ if (callback)
|
|
|
callback(callback_param);
|
|
|
- spin_lock(&dwc->lock);
|
|
|
- }
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -430,6 +466,9 @@ static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc,
|
|
|
dev_err(chan2dev(&dwc->chan), "cyclic DMA unexpected %s "
|
|
|
"interrupt, stopping DMA transfer\n",
|
|
|
status_xfer ? "xfer" : "error");
|
|
|
+
|
|
|
+ spin_lock_irqsave(&dwc->lock, flags);
|
|
|
+
|
|
|
dev_err(chan2dev(&dwc->chan),
|
|
|
" SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n",
|
|
|
channel_readl(dwc, SAR),
|
|
@@ -453,6 +492,8 @@ static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc,
|
|
|
|
|
|
for (i = 0; i < dwc->cdesc->periods; i++)
|
|
|
dwc_dump_lli(dwc, &dwc->cdesc->desc[i]->lli);
|
|
|
+
|
|
|
+ spin_unlock_irqrestore(&dwc->lock, flags);
|
|
|
}
|
|
|
}
|
|
|
|
|
@@ -476,7 +517,6 @@ static void dw_dma_tasklet(unsigned long data)
|
|
|
|
|
|
for (i = 0; i < dw->dma.chancnt; i++) {
|
|
|
dwc = &dw->chan[i];
|
|
|
- spin_lock(&dwc->lock);
|
|
|
if (test_bit(DW_DMA_IS_CYCLIC, &dwc->flags))
|
|
|
dwc_handle_cyclic(dw, dwc, status_block, status_err,
|
|
|
status_xfer);
|
|
@@ -484,7 +524,6 @@ static void dw_dma_tasklet(unsigned long data)
|
|
|
dwc_handle_error(dw, dwc);
|
|
|
else if ((status_block | status_xfer) & (1 << i))
|
|
|
dwc_scan_descriptors(dw, dwc);
|
|
|
- spin_unlock(&dwc->lock);
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -539,8 +578,9 @@ static dma_cookie_t dwc_tx_submit(struct dma_async_tx_descriptor *tx)
|
|
|
struct dw_desc *desc = txd_to_dw_desc(tx);
|
|
|
struct dw_dma_chan *dwc = to_dw_dma_chan(tx->chan);
|
|
|
dma_cookie_t cookie;
|
|
|
+ unsigned long flags;
|
|
|
|
|
|
- spin_lock_bh(&dwc->lock);
|
|
|
+ spin_lock_irqsave(&dwc->lock, flags);
|
|
|
cookie = dwc_assign_cookie(dwc, desc);
|
|
|
|
|
|
/*
|
|
@@ -560,7 +600,7 @@ static dma_cookie_t dwc_tx_submit(struct dma_async_tx_descriptor *tx)
|
|
|
list_add_tail(&desc->desc_node, &dwc->queue);
|
|
|
}
|
|
|
|
|
|
- spin_unlock_bh(&dwc->lock);
|
|
|
+ spin_unlock_irqrestore(&dwc->lock, flags);
|
|
|
|
|
|
return cookie;
|
|
|
}
|
|
@@ -689,9 +729,15 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
|
|
|
reg = dws->tx_reg;
|
|
|
for_each_sg(sgl, sg, sg_len, i) {
|
|
|
struct dw_desc *desc;
|
|
|
- u32 len;
|
|
|
- u32 mem;
|
|
|
+ u32 len, dlen, mem;
|
|
|
+
|
|
|
+ mem = sg_phys(sg);
|
|
|
+ len = sg_dma_len(sg);
|
|
|
+ mem_width = 2;
|
|
|
+ if (unlikely(mem & 3 || len & 3))
|
|
|
+ mem_width = 0;
|
|
|
|
|
|
+slave_sg_todev_fill_desc:
|
|
|
desc = dwc_desc_get(dwc);
|
|
|
if (!desc) {
|
|
|
dev_err(chan2dev(chan),
|
|
@@ -699,16 +745,19 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
|
|
|
goto err_desc_get;
|
|
|
}
|
|
|
|
|
|
- mem = sg_phys(sg);
|
|
|
- len = sg_dma_len(sg);
|
|
|
- mem_width = 2;
|
|
|
- if (unlikely(mem & 3 || len & 3))
|
|
|
- mem_width = 0;
|
|
|
-
|
|
|
desc->lli.sar = mem;
|
|
|
desc->lli.dar = reg;
|
|
|
desc->lli.ctllo = ctllo | DWC_CTLL_SRC_WIDTH(mem_width);
|
|
|
- desc->lli.ctlhi = len >> mem_width;
|
|
|
+ if ((len >> mem_width) > DWC_MAX_COUNT) {
|
|
|
+ dlen = DWC_MAX_COUNT << mem_width;
|
|
|
+ mem += dlen;
|
|
|
+ len -= dlen;
|
|
|
+ } else {
|
|
|
+ dlen = len;
|
|
|
+ len = 0;
|
|
|
+ }
|
|
|
+
|
|
|
+ desc->lli.ctlhi = dlen >> mem_width;
|
|
|
|
|
|
if (!first) {
|
|
|
first = desc;
|
|
@@ -722,7 +771,10 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
|
|
|
&first->tx_list);
|
|
|
}
|
|
|
prev = desc;
|
|
|
- total_len += len;
|
|
|
+ total_len += dlen;
|
|
|
+
|
|
|
+ if (len)
|
|
|
+ goto slave_sg_todev_fill_desc;
|
|
|
}
|
|
|
break;
|
|
|
case DMA_FROM_DEVICE:
|
|
@@ -735,15 +787,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
|
|
|
reg = dws->rx_reg;
|
|
|
for_each_sg(sgl, sg, sg_len, i) {
|
|
|
struct dw_desc *desc;
|
|
|
- u32 len;
|
|
|
- u32 mem;
|
|
|
-
|
|
|
- desc = dwc_desc_get(dwc);
|
|
|
- if (!desc) {
|
|
|
- dev_err(chan2dev(chan),
|
|
|
- "not enough descriptors available\n");
|
|
|
- goto err_desc_get;
|
|
|
- }
|
|
|
+ u32 len, dlen, mem;
|
|
|
|
|
|
mem = sg_phys(sg);
|
|
|
len = sg_dma_len(sg);
|
|
@@ -751,10 +795,26 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
|
|
|
if (unlikely(mem & 3 || len & 3))
|
|
|
mem_width = 0;
|
|
|
|
|
|
+slave_sg_fromdev_fill_desc:
|
|
|
+ desc = dwc_desc_get(dwc);
|
|
|
+ if (!desc) {
|
|
|
+ dev_err(chan2dev(chan),
|
|
|
+ "not enough descriptors available\n");
|
|
|
+ goto err_desc_get;
|
|
|
+ }
|
|
|
+
|
|
|
desc->lli.sar = reg;
|
|
|
desc->lli.dar = mem;
|
|
|
desc->lli.ctllo = ctllo | DWC_CTLL_DST_WIDTH(mem_width);
|
|
|
- desc->lli.ctlhi = len >> reg_width;
|
|
|
+ if ((len >> reg_width) > DWC_MAX_COUNT) {
|
|
|
+ dlen = DWC_MAX_COUNT << reg_width;
|
|
|
+ mem += dlen;
|
|
|
+ len -= dlen;
|
|
|
+ } else {
|
|
|
+ dlen = len;
|
|
|
+ len = 0;
|
|
|
+ }
|
|
|
+ desc->lli.ctlhi = dlen >> reg_width;
|
|
|
|
|
|
if (!first) {
|
|
|
first = desc;
|
|
@@ -768,7 +828,10 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
|
|
|
&first->tx_list);
|
|
|
}
|
|
|
prev = desc;
|
|
|
- total_len += len;
|
|
|
+ total_len += dlen;
|
|
|
+
|
|
|
+ if (len)
|
|
|
+ goto slave_sg_fromdev_fill_desc;
|
|
|
}
|
|
|
break;
|
|
|
default:
|
|
@@ -799,34 +862,51 @@ static int dwc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
|
|
|
struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
|
|
|
struct dw_dma *dw = to_dw_dma(chan->device);
|
|
|
struct dw_desc *desc, *_desc;
|
|
|
+ unsigned long flags;
|
|
|
+ u32 cfglo;
|
|
|
LIST_HEAD(list);
|
|
|
|
|
|
- /* Only supports DMA_TERMINATE_ALL */
|
|
|
- if (cmd != DMA_TERMINATE_ALL)
|
|
|
- return -ENXIO;
|
|
|
+ if (cmd == DMA_PAUSE) {
|
|
|
+ spin_lock_irqsave(&dwc->lock, flags);
|
|
|
|
|
|
- /*
|
|
|
- * This is only called when something went wrong elsewhere, so
|
|
|
- * we don't really care about the data. Just disable the
|
|
|
- * channel. We still have to poll the channel enable bit due
|
|
|
- * to AHB/HSB limitations.
|
|
|
- */
|
|
|
- spin_lock_bh(&dwc->lock);
|
|
|
+ cfglo = channel_readl(dwc, CFG_LO);
|
|
|
+ channel_writel(dwc, CFG_LO, cfglo | DWC_CFGL_CH_SUSP);
|
|
|
+ while (!(channel_readl(dwc, CFG_LO) & DWC_CFGL_FIFO_EMPTY))
|
|
|
+ cpu_relax();
|
|
|
|
|
|
- channel_clear_bit(dw, CH_EN, dwc->mask);
|
|
|
+ dwc->paused = true;
|
|
|
+ spin_unlock_irqrestore(&dwc->lock, flags);
|
|
|
+ } else if (cmd == DMA_RESUME) {
|
|
|
+ if (!dwc->paused)
|
|
|
+ return 0;
|
|
|
|
|
|
- while (dma_readl(dw, CH_EN) & dwc->mask)
|
|
|
- cpu_relax();
|
|
|
+ spin_lock_irqsave(&dwc->lock, flags);
|
|
|
|
|
|
- /* active_list entries will end up before queued entries */
|
|
|
- list_splice_init(&dwc->queue, &list);
|
|
|
- list_splice_init(&dwc->active_list, &list);
|
|
|
+ cfglo = channel_readl(dwc, CFG_LO);
|
|
|
+ channel_writel(dwc, CFG_LO, cfglo & ~DWC_CFGL_CH_SUSP);
|
|
|
+ dwc->paused = false;
|
|
|
|
|
|
- spin_unlock_bh(&dwc->lock);
|
|
|
+ spin_unlock_irqrestore(&dwc->lock, flags);
|
|
|
+ } else if (cmd == DMA_TERMINATE_ALL) {
|
|
|
+ spin_lock_irqsave(&dwc->lock, flags);
|
|
|
|
|
|
- /* Flush all pending and queued descriptors */
|
|
|
- list_for_each_entry_safe(desc, _desc, &list, desc_node)
|
|
|
- dwc_descriptor_complete(dwc, desc);
|
|
|
+ channel_clear_bit(dw, CH_EN, dwc->mask);
|
|
|
+ while (dma_readl(dw, CH_EN) & dwc->mask)
|
|
|
+ cpu_relax();
|
|
|
+
|
|
|
+ dwc->paused = false;
|
|
|
+
|
|
|
+ /* active_list entries will end up before queued entries */
|
|
|
+ list_splice_init(&dwc->queue, &list);
|
|
|
+ list_splice_init(&dwc->active_list, &list);
|
|
|
+
|
|
|
+ spin_unlock_irqrestore(&dwc->lock, flags);
|
|
|
+
|
|
|
+ /* Flush all pending and queued descriptors */
|
|
|
+ list_for_each_entry_safe(desc, _desc, &list, desc_node)
|
|
|
+ dwc_descriptor_complete(dwc, desc, false);
|
|
|
+ } else
|
|
|
+ return -ENXIO;
|
|
|
|
|
|
return 0;
|
|
|
}
|
|
@@ -846,9 +926,7 @@ dwc_tx_status(struct dma_chan *chan,
|
|
|
|
|
|
ret = dma_async_is_complete(cookie, last_complete, last_used);
|
|
|
if (ret != DMA_SUCCESS) {
|
|
|
- spin_lock_bh(&dwc->lock);
|
|
|
dwc_scan_descriptors(to_dw_dma(chan->device), dwc);
|
|
|
- spin_unlock_bh(&dwc->lock);
|
|
|
|
|
|
last_complete = dwc->completed;
|
|
|
last_used = chan->cookie;
|
|
@@ -856,7 +934,14 @@ dwc_tx_status(struct dma_chan *chan,
|
|
|
ret = dma_async_is_complete(cookie, last_complete, last_used);
|
|
|
}
|
|
|
|
|
|
- dma_set_tx_state(txstate, last_complete, last_used, 0);
|
|
|
+ if (ret != DMA_SUCCESS)
|
|
|
+ dma_set_tx_state(txstate, last_complete, last_used,
|
|
|
+ dwc_first_active(dwc)->len);
|
|
|
+ else
|
|
|
+ dma_set_tx_state(txstate, last_complete, last_used, 0);
|
|
|
+
|
|
|
+ if (dwc->paused)
|
|
|
+ return DMA_PAUSED;
|
|
|
|
|
|
return ret;
|
|
|
}
|
|
@@ -865,10 +950,8 @@ static void dwc_issue_pending(struct dma_chan *chan)
|
|
|
{
|
|
|
struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
|
|
|
|
|
|
- spin_lock_bh(&dwc->lock);
|
|
|
if (!list_empty(&dwc->queue))
|
|
|
dwc_scan_descriptors(to_dw_dma(chan->device), dwc);
|
|
|
- spin_unlock_bh(&dwc->lock);
|
|
|
}
|
|
|
|
|
|
static int dwc_alloc_chan_resources(struct dma_chan *chan)
|
|
@@ -880,6 +963,7 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan)
|
|
|
int i;
|
|
|
u32 cfghi;
|
|
|
u32 cfglo;
|
|
|
+ unsigned long flags;
|
|
|
|
|
|
dev_vdbg(chan2dev(chan), "alloc_chan_resources\n");
|
|
|
|
|
@@ -917,16 +1001,16 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan)
|
|
|
* doesn't mean what you think it means), and status writeback.
|
|
|
*/
|
|
|
|
|
|
- spin_lock_bh(&dwc->lock);
|
|
|
+ spin_lock_irqsave(&dwc->lock, flags);
|
|
|
i = dwc->descs_allocated;
|
|
|
while (dwc->descs_allocated < NR_DESCS_PER_CHANNEL) {
|
|
|
- spin_unlock_bh(&dwc->lock);
|
|
|
+ spin_unlock_irqrestore(&dwc->lock, flags);
|
|
|
|
|
|
desc = kzalloc(sizeof(struct dw_desc), GFP_KERNEL);
|
|
|
if (!desc) {
|
|
|
dev_info(chan2dev(chan),
|
|
|
"only allocated %d descriptors\n", i);
|
|
|
- spin_lock_bh(&dwc->lock);
|
|
|
+ spin_lock_irqsave(&dwc->lock, flags);
|
|
|
break;
|
|
|
}
|
|
|
|
|
@@ -938,7 +1022,7 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan)
|
|
|
sizeof(desc->lli), DMA_TO_DEVICE);
|
|
|
dwc_desc_put(dwc, desc);
|
|
|
|
|
|
- spin_lock_bh(&dwc->lock);
|
|
|
+ spin_lock_irqsave(&dwc->lock, flags);
|
|
|
i = ++dwc->descs_allocated;
|
|
|
}
|
|
|
|
|
@@ -947,7 +1031,7 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan)
|
|
|
channel_set_bit(dw, MASK.BLOCK, dwc->mask);
|
|
|
channel_set_bit(dw, MASK.ERROR, dwc->mask);
|
|
|
|
|
|
- spin_unlock_bh(&dwc->lock);
|
|
|
+ spin_unlock_irqrestore(&dwc->lock, flags);
|
|
|
|
|
|
dev_dbg(chan2dev(chan),
|
|
|
"alloc_chan_resources allocated %d descriptors\n", i);
|
|
@@ -960,6 +1044,7 @@ static void dwc_free_chan_resources(struct dma_chan *chan)
|
|
|
struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
|
|
|
struct dw_dma *dw = to_dw_dma(chan->device);
|
|
|
struct dw_desc *desc, *_desc;
|
|
|
+ unsigned long flags;
|
|
|
LIST_HEAD(list);
|
|
|
|
|
|
dev_dbg(chan2dev(chan), "free_chan_resources (descs allocated=%u)\n",
|
|
@@ -970,7 +1055,7 @@ static void dwc_free_chan_resources(struct dma_chan *chan)
|
|
|
BUG_ON(!list_empty(&dwc->queue));
|
|
|
BUG_ON(dma_readl(to_dw_dma(chan->device), CH_EN) & dwc->mask);
|
|
|
|
|
|
- spin_lock_bh(&dwc->lock);
|
|
|
+ spin_lock_irqsave(&dwc->lock, flags);
|
|
|
list_splice_init(&dwc->free_list, &list);
|
|
|
dwc->descs_allocated = 0;
|
|
|
|
|
@@ -979,7 +1064,7 @@ static void dwc_free_chan_resources(struct dma_chan *chan)
|
|
|
channel_clear_bit(dw, MASK.BLOCK, dwc->mask);
|
|
|
channel_clear_bit(dw, MASK.ERROR, dwc->mask);
|
|
|
|
|
|
- spin_unlock_bh(&dwc->lock);
|
|
|
+ spin_unlock_irqrestore(&dwc->lock, flags);
|
|
|
|
|
|
list_for_each_entry_safe(desc, _desc, &list, desc_node) {
|
|
|
dev_vdbg(chan2dev(chan), " freeing descriptor %p\n", desc);
|
|
@@ -1004,13 +1089,14 @@ int dw_dma_cyclic_start(struct dma_chan *chan)
|
|
|
{
|
|
|
struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
|
|
|
struct dw_dma *dw = to_dw_dma(dwc->chan.device);
|
|
|
+ unsigned long flags;
|
|
|
|
|
|
if (!test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) {
|
|
|
dev_err(chan2dev(&dwc->chan), "missing prep for cyclic DMA\n");
|
|
|
return -ENODEV;
|
|
|
}
|
|
|
|
|
|
- spin_lock(&dwc->lock);
|
|
|
+ spin_lock_irqsave(&dwc->lock, flags);
|
|
|
|
|
|
/* assert channel is idle */
|
|
|
if (dma_readl(dw, CH_EN) & dwc->mask) {
|
|
@@ -1023,7 +1109,7 @@ int dw_dma_cyclic_start(struct dma_chan *chan)
|
|
|
channel_readl(dwc, LLP),
|
|
|
channel_readl(dwc, CTL_HI),
|
|
|
channel_readl(dwc, CTL_LO));
|
|
|
- spin_unlock(&dwc->lock);
|
|
|
+ spin_unlock_irqrestore(&dwc->lock, flags);
|
|
|
return -EBUSY;
|
|
|
}
|
|
|
|
|
@@ -1038,7 +1124,7 @@ int dw_dma_cyclic_start(struct dma_chan *chan)
|
|
|
|
|
|
channel_set_bit(dw, CH_EN, dwc->mask);
|
|
|
|
|
|
- spin_unlock(&dwc->lock);
|
|
|
+ spin_unlock_irqrestore(&dwc->lock, flags);
|
|
|
|
|
|
return 0;
|
|
|
}
|
|
@@ -1054,14 +1140,15 @@ void dw_dma_cyclic_stop(struct dma_chan *chan)
|
|
|
{
|
|
|
struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
|
|
|
struct dw_dma *dw = to_dw_dma(dwc->chan.device);
|
|
|
+ unsigned long flags;
|
|
|
|
|
|
- spin_lock(&dwc->lock);
|
|
|
+ spin_lock_irqsave(&dwc->lock, flags);
|
|
|
|
|
|
channel_clear_bit(dw, CH_EN, dwc->mask);
|
|
|
while (dma_readl(dw, CH_EN) & dwc->mask)
|
|
|
cpu_relax();
|
|
|
|
|
|
- spin_unlock(&dwc->lock);
|
|
|
+ spin_unlock_irqrestore(&dwc->lock, flags);
|
|
|
}
|
|
|
EXPORT_SYMBOL(dw_dma_cyclic_stop);
|
|
|
|
|
@@ -1090,17 +1177,18 @@ struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
|
|
|
unsigned int reg_width;
|
|
|
unsigned int periods;
|
|
|
unsigned int i;
|
|
|
+ unsigned long flags;
|
|
|
|
|
|
- spin_lock_bh(&dwc->lock);
|
|
|
+ spin_lock_irqsave(&dwc->lock, flags);
|
|
|
if (!list_empty(&dwc->queue) || !list_empty(&dwc->active_list)) {
|
|
|
- spin_unlock_bh(&dwc->lock);
|
|
|
+ spin_unlock_irqrestore(&dwc->lock, flags);
|
|
|
dev_dbg(chan2dev(&dwc->chan),
|
|
|
"queue and/or active list are not empty\n");
|
|
|
return ERR_PTR(-EBUSY);
|
|
|
}
|
|
|
|
|
|
was_cyclic = test_and_set_bit(DW_DMA_IS_CYCLIC, &dwc->flags);
|
|
|
- spin_unlock_bh(&dwc->lock);
|
|
|
+ spin_unlock_irqrestore(&dwc->lock, flags);
|
|
|
if (was_cyclic) {
|
|
|
dev_dbg(chan2dev(&dwc->chan),
|
|
|
"channel already prepared for cyclic DMA\n");
|
|
@@ -1214,13 +1302,14 @@ void dw_dma_cyclic_free(struct dma_chan *chan)
|
|
|
struct dw_dma *dw = to_dw_dma(dwc->chan.device);
|
|
|
struct dw_cyclic_desc *cdesc = dwc->cdesc;
|
|
|
int i;
|
|
|
+ unsigned long flags;
|
|
|
|
|
|
dev_dbg(chan2dev(&dwc->chan), "cyclic free\n");
|
|
|
|
|
|
if (!cdesc)
|
|
|
return;
|
|
|
|
|
|
- spin_lock_bh(&dwc->lock);
|
|
|
+ spin_lock_irqsave(&dwc->lock, flags);
|
|
|
|
|
|
channel_clear_bit(dw, CH_EN, dwc->mask);
|
|
|
while (dma_readl(dw, CH_EN) & dwc->mask)
|
|
@@ -1230,7 +1319,7 @@ void dw_dma_cyclic_free(struct dma_chan *chan)
|
|
|
dma_writel(dw, CLEAR.ERROR, dwc->mask);
|
|
|
dma_writel(dw, CLEAR.XFER, dwc->mask);
|
|
|
|
|
|
- spin_unlock_bh(&dwc->lock);
|
|
|
+ spin_unlock_irqrestore(&dwc->lock, flags);
|
|
|
|
|
|
for (i = 0; i < cdesc->periods; i++)
|
|
|
dwc_desc_put(dwc, cdesc->desc[i]);
|
|
@@ -1487,3 +1576,4 @@ module_exit(dw_exit);
|
|
|
MODULE_LICENSE("GPL v2");
|
|
|
MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller driver");
|
|
|
MODULE_AUTHOR("Haavard Skinnemoen <haavard.skinnemoen@atmel.com>");
|
|
|
+MODULE_AUTHOR("Viresh Kumar <viresh.kumar@st.com>");
|