|
@@ -109,7 +109,7 @@ static void sirfsoc_dma_execute(struct sirfsoc_dma_chan *schan)
|
|
|
sdesc = list_first_entry(&schan->queued, struct sirfsoc_dma_desc,
|
|
|
node);
|
|
|
/* Move the first queued descriptor to active list */
|
|
|
- list_move_tail(&schan->queued, &schan->active);
|
|
|
+ list_move_tail(&sdesc->node, &schan->active);
|
|
|
|
|
|
/* Start the DMA transfer */
|
|
|
writel_relaxed(sdesc->width, sdma->base + SIRFSOC_DMA_WIDTH_0 +
|
|
@@ -428,7 +428,7 @@ static struct dma_async_tx_descriptor *sirfsoc_dma_prep_interleaved(
|
|
|
unsigned long iflags;
|
|
|
int ret;
|
|
|
|
|
|
- if ((xt->dir != DMA_MEM_TO_DEV) || (xt->dir != DMA_DEV_TO_MEM)) {
|
|
|
+ if ((xt->dir != DMA_MEM_TO_DEV) && (xt->dir != DMA_DEV_TO_MEM)) {
|
|
|
ret = -EINVAL;
|
|
|
goto err_dir;
|
|
|
}
|