|
@@ -437,17 +437,18 @@ static void dma_irq_handle_channel(struct imxdma_channel *imxdmac)
|
|
|
struct imxdma_engine *imxdma = imxdmac->imxdma;
|
|
|
int chno = imxdmac->channel;
|
|
|
struct imxdma_desc *desc;
|
|
|
+ unsigned long flags;
|
|
|
|
|
|
- spin_lock(&imxdma->lock);
|
|
|
+ spin_lock_irqsave(&imxdma->lock, flags);
|
|
|
if (list_empty(&imxdmac->ld_active)) {
|
|
|
- spin_unlock(&imxdma->lock);
|
|
|
+ spin_unlock_irqrestore(&imxdma->lock, flags);
|
|
|
goto out;
|
|
|
}
|
|
|
|
|
|
desc = list_first_entry(&imxdmac->ld_active,
|
|
|
struct imxdma_desc,
|
|
|
node);
|
|
|
- spin_unlock(&imxdma->lock);
|
|
|
+ spin_unlock_irqrestore(&imxdma->lock, flags);
|
|
|
|
|
|
if (desc->sg) {
|
|
|
u32 tmp;
|
|
@@ -519,7 +520,6 @@ static int imxdma_xfer_desc(struct imxdma_desc *d)
|
|
|
{
|
|
|
struct imxdma_channel *imxdmac = to_imxdma_chan(d->desc.chan);
|
|
|
struct imxdma_engine *imxdma = imxdmac->imxdma;
|
|
|
- unsigned long flags;
|
|
|
int slot = -1;
|
|
|
int i;
|
|
|
|
|
@@ -527,7 +527,6 @@ static int imxdma_xfer_desc(struct imxdma_desc *d)
|
|
|
switch (d->type) {
|
|
|
case IMXDMA_DESC_INTERLEAVED:
|
|
|
/* Try to get a free 2D slot */
|
|
|
- spin_lock_irqsave(&imxdma->lock, flags);
|
|
|
for (i = 0; i < IMX_DMA_2D_SLOTS; i++) {
|
|
|
if ((imxdma->slots_2d[i].count > 0) &&
|
|
|
((imxdma->slots_2d[i].xsr != d->x) ||
|
|
@@ -537,10 +536,8 @@ static int imxdma_xfer_desc(struct imxdma_desc *d)
|
|
|
slot = i;
|
|
|
break;
|
|
|
}
|
|
|
- if (slot < 0) {
|
|
|
- spin_unlock_irqrestore(&imxdma->lock, flags);
|
|
|
+ if (slot < 0)
|
|
|
return -EBUSY;
|
|
|
- }
|
|
|
|
|
|
imxdma->slots_2d[slot].xsr = d->x;
|
|
|
imxdma->slots_2d[slot].ysr = d->y;
|
|
@@ -549,7 +546,6 @@ static int imxdma_xfer_desc(struct imxdma_desc *d)
|
|
|
|
|
|
imxdmac->slot_2d = slot;
|
|
|
imxdmac->enabled_2d = true;
|
|
|
- spin_unlock_irqrestore(&imxdma->lock, flags);
|
|
|
|
|
|
if (slot == IMX_DMA_2D_SLOT_A) {
|
|
|
d->config_mem &= ~CCR_MSEL_B;
|
|
@@ -625,18 +621,17 @@ static void imxdma_tasklet(unsigned long data)
|
|
|
struct imxdma_channel *imxdmac = (void *)data;
|
|
|
struct imxdma_engine *imxdma = imxdmac->imxdma;
|
|
|
struct imxdma_desc *desc;
|
|
|
+ unsigned long flags;
|
|
|
|
|
|
- spin_lock(&imxdma->lock);
|
|
|
+ spin_lock_irqsave(&imxdma->lock, flags);
|
|
|
|
|
|
if (list_empty(&imxdmac->ld_active)) {
|
|
|
/* Someone might have called terminate all */
|
|
|
- goto out;
|
|
|
+ spin_unlock_irqrestore(&imxdma->lock, flags);
|
|
|
+ return;
|
|
|
}
|
|
|
desc = list_first_entry(&imxdmac->ld_active, struct imxdma_desc, node);
|
|
|
|
|
|
- if (desc->desc.callback)
|
|
|
- desc->desc.callback(desc->desc.callback_param);
|
|
|
-
|
|
|
/* If we are dealing with a cyclic descriptor, keep it on ld_active
|
|
|
* and dont mark the descriptor as complete.
|
|
|
* Only in non-cyclic cases it would be marked as complete
|
|
@@ -663,7 +658,11 @@ static void imxdma_tasklet(unsigned long data)
|
|
|
__func__, imxdmac->channel);
|
|
|
}
|
|
|
out:
|
|
|
- spin_unlock(&imxdma->lock);
|
|
|
+ spin_unlock_irqrestore(&imxdma->lock, flags);
|
|
|
+
|
|
|
+ if (desc->desc.callback)
|
|
|
+ desc->desc.callback(desc->desc.callback_param);
|
|
|
+
|
|
|
}
|
|
|
|
|
|
static int imxdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
|
|
@@ -883,7 +882,7 @@ static struct dma_async_tx_descriptor *imxdma_prep_dma_cyclic(
|
|
|
kfree(imxdmac->sg_list);
|
|
|
|
|
|
imxdmac->sg_list = kcalloc(periods + 1,
|
|
|
- sizeof(struct scatterlist), GFP_KERNEL);
|
|
|
+ sizeof(struct scatterlist), GFP_ATOMIC);
|
|
|
if (!imxdmac->sg_list)
|
|
|
return NULL;
|
|
|
|