|
@@ -1863,108 +1863,19 @@ static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan,
|
|
|
size_t size,
|
|
|
unsigned long dma_flags)
|
|
|
{
|
|
|
- struct d40_desc *d40d;
|
|
|
- struct d40_chan *d40c = container_of(chan, struct d40_chan,
|
|
|
- chan);
|
|
|
- unsigned long flags;
|
|
|
-
|
|
|
- if (d40c->phy_chan == NULL) {
|
|
|
- chan_err(d40c, "Channel is not allocated.\n");
|
|
|
- return ERR_PTR(-EINVAL);
|
|
|
- }
|
|
|
-
|
|
|
- spin_lock_irqsave(&d40c->lock, flags);
|
|
|
- d40d = d40_desc_get(d40c);
|
|
|
-
|
|
|
- if (d40d == NULL) {
|
|
|
- chan_err(d40c, "Descriptor is NULL\n");
|
|
|
- goto err;
|
|
|
- }
|
|
|
-
|
|
|
- d40d->txd.flags = dma_flags;
|
|
|
- d40d->lli_len = d40_size_2_dmalen(size,
|
|
|
- d40c->dma_cfg.src_info.data_width,
|
|
|
- d40c->dma_cfg.dst_info.data_width);
|
|
|
- if (d40d->lli_len < 0) {
|
|
|
- chan_err(d40c, "Unaligned size\n");
|
|
|
- goto err;
|
|
|
- }
|
|
|
-
|
|
|
-
|
|
|
- dma_async_tx_descriptor_init(&d40d->txd, chan);
|
|
|
-
|
|
|
- d40d->txd.tx_submit = d40_tx_submit;
|
|
|
-
|
|
|
- if (chan_is_logical(d40c)) {
|
|
|
-
|
|
|
- if (d40_pool_lli_alloc(d40c,d40d, d40d->lli_len, true) < 0) {
|
|
|
- chan_err(d40c, "Out of memory\n");
|
|
|
- goto err;
|
|
|
- }
|
|
|
- d40d->lli_current = 0;
|
|
|
-
|
|
|
- if (d40_log_buf_to_lli(d40d->lli_log.src,
|
|
|
- src,
|
|
|
- size,
|
|
|
- d40c->log_def.lcsp1,
|
|
|
- d40c->dma_cfg.src_info.data_width,
|
|
|
- d40c->dma_cfg.dst_info.data_width,
|
|
|
- true) == NULL)
|
|
|
- goto err;
|
|
|
-
|
|
|
- if (d40_log_buf_to_lli(d40d->lli_log.dst,
|
|
|
- dst,
|
|
|
- size,
|
|
|
- d40c->log_def.lcsp3,
|
|
|
- d40c->dma_cfg.dst_info.data_width,
|
|
|
- d40c->dma_cfg.src_info.data_width,
|
|
|
- true) == NULL)
|
|
|
- goto err;
|
|
|
-
|
|
|
- } else {
|
|
|
-
|
|
|
- if (d40_pool_lli_alloc(d40c, d40d, d40d->lli_len, false) < 0) {
|
|
|
- chan_err(d40c, "Out of memory\n");
|
|
|
- goto err;
|
|
|
- }
|
|
|
-
|
|
|
- if (d40_phy_buf_to_lli(d40d->lli_phy.src,
|
|
|
- src,
|
|
|
- size,
|
|
|
- d40c->dma_cfg.src_info.psize,
|
|
|
- 0,
|
|
|
- d40c->src_def_cfg,
|
|
|
- true,
|
|
|
- d40c->dma_cfg.src_info.data_width,
|
|
|
- d40c->dma_cfg.dst_info.data_width,
|
|
|
- false) == NULL)
|
|
|
- goto err;
|
|
|
+ struct scatterlist dst_sg;
|
|
|
+ struct scatterlist src_sg;
|
|
|
|
|
|
- if (d40_phy_buf_to_lli(d40d->lli_phy.dst,
|
|
|
- dst,
|
|
|
- size,
|
|
|
- d40c->dma_cfg.dst_info.psize,
|
|
|
- 0,
|
|
|
- d40c->dst_def_cfg,
|
|
|
- true,
|
|
|
- d40c->dma_cfg.dst_info.data_width,
|
|
|
- d40c->dma_cfg.src_info.data_width,
|
|
|
- false) == NULL)
|
|
|
- goto err;
|
|
|
+ sg_init_table(&dst_sg, 1);
|
|
|
+ sg_init_table(&src_sg, 1);
|
|
|
|
|
|
- dma_sync_single_for_device(d40c->base->dev,
|
|
|
- d40d->lli_pool.dma_addr,
|
|
|
- d40d->lli_pool.size, DMA_TO_DEVICE);
|
|
|
- }
|
|
|
+ sg_dma_address(&dst_sg) = dst;
|
|
|
+ sg_dma_address(&src_sg) = src;
|
|
|
|
|
|
- spin_unlock_irqrestore(&d40c->lock, flags);
|
|
|
- return &d40d->txd;
|
|
|
+ sg_dma_len(&dst_sg) = size;
|
|
|
+ sg_dma_len(&src_sg) = size;
|
|
|
|
|
|
-err:
|
|
|
- if (d40d)
|
|
|
- d40_desc_free(d40c, d40d);
|
|
|
- spin_unlock_irqrestore(&d40c->lock, flags);
|
|
|
- return NULL;
|
|
|
+ return stedma40_memcpy_sg(chan, &dst_sg, &src_sg, 1, dma_flags);
|
|
|
}
|
|
|
|
|
|
static struct dma_async_tx_descriptor *
|