|
@@ -14,6 +14,8 @@
|
|
|
#include <linux/platform_device.h>
|
|
|
#include <linux/clk.h>
|
|
|
#include <linux/delay.h>
|
|
|
+#include <linux/pm.h>
|
|
|
+#include <linux/pm_runtime.h>
|
|
|
#include <linux/err.h>
|
|
|
#include <linux/amba/bus.h>
|
|
|
|
|
@@ -32,6 +34,9 @@
|
|
|
/* Maximum iterations taken before giving up suspending a channel */
|
|
|
#define D40_SUSPEND_MAX_IT 500
|
|
|
|
|
|
+/* Milliseconds */
|
|
|
+#define DMA40_AUTOSUSPEND_DELAY 100
|
|
|
+
|
|
|
/* Hardware requirement on LCLA alignment */
|
|
|
#define LCLA_ALIGNMENT 0x40000
|
|
|
|
|
@@ -62,6 +67,55 @@ enum d40_command {
|
|
|
D40_DMA_SUSPENDED = 3
|
|
|
};
|
|
|
|
|
|
+/*
|
|
|
+ * These are the registers that has to be saved and later restored
|
|
|
+ * when the DMA hw is powered off.
|
|
|
+ * TODO: Add save/restore of D40_DREG_GCC on dma40 v3 or later, if that works.
|
|
|
+ */
|
|
|
+static u32 d40_backup_regs[] = {
|
|
|
+ D40_DREG_LCPA,
|
|
|
+ D40_DREG_LCLA,
|
|
|
+ D40_DREG_PRMSE,
|
|
|
+ D40_DREG_PRMSO,
|
|
|
+ D40_DREG_PRMOE,
|
|
|
+ D40_DREG_PRMOO,
|
|
|
+};
|
|
|
+
|
|
|
+#define BACKUP_REGS_SZ ARRAY_SIZE(d40_backup_regs)
|
|
|
+
|
|
|
+/* TODO: Check if all these registers have to be saved/restored on dma40 v3 */
|
|
|
+static u32 d40_backup_regs_v3[] = {
|
|
|
+ D40_DREG_PSEG1,
|
|
|
+ D40_DREG_PSEG2,
|
|
|
+ D40_DREG_PSEG3,
|
|
|
+ D40_DREG_PSEG4,
|
|
|
+ D40_DREG_PCEG1,
|
|
|
+ D40_DREG_PCEG2,
|
|
|
+ D40_DREG_PCEG3,
|
|
|
+ D40_DREG_PCEG4,
|
|
|
+ D40_DREG_RSEG1,
|
|
|
+ D40_DREG_RSEG2,
|
|
|
+ D40_DREG_RSEG3,
|
|
|
+ D40_DREG_RSEG4,
|
|
|
+ D40_DREG_RCEG1,
|
|
|
+ D40_DREG_RCEG2,
|
|
|
+ D40_DREG_RCEG3,
|
|
|
+ D40_DREG_RCEG4,
|
|
|
+};
|
|
|
+
|
|
|
+#define BACKUP_REGS_SZ_V3 ARRAY_SIZE(d40_backup_regs_v3)
|
|
|
+
|
|
|
+static u32 d40_backup_regs_chan[] = {
|
|
|
+ D40_CHAN_REG_SSCFG,
|
|
|
+ D40_CHAN_REG_SSELT,
|
|
|
+ D40_CHAN_REG_SSPTR,
|
|
|
+ D40_CHAN_REG_SSLNK,
|
|
|
+ D40_CHAN_REG_SDCFG,
|
|
|
+ D40_CHAN_REG_SDELT,
|
|
|
+ D40_CHAN_REG_SDPTR,
|
|
|
+ D40_CHAN_REG_SDLNK,
|
|
|
+};
|
|
|
+
|
|
|
/**
|
|
|
* struct d40_lli_pool - Structure for keeping LLIs in memory
|
|
|
*
|
|
@@ -96,7 +150,7 @@ struct d40_lli_pool {
|
|
|
* during a transfer.
|
|
|
* @node: List entry.
|
|
|
* @is_in_client_list: true if the client owns this descriptor.
|
|
|
- * the previous one.
|
|
|
+ * @cyclic: true if this is a cyclic job
|
|
|
*
|
|
|
* This descriptor is used for both logical and physical transfers.
|
|
|
*/
|
|
@@ -143,6 +197,7 @@ struct d40_lcla_pool {
|
|
|
* channels.
|
|
|
*
|
|
|
* @lock: A lock protection this entity.
|
|
|
+ * @reserved: True if used by secure world or otherwise.
|
|
|
* @num: The physical channel number of this entity.
|
|
|
* @allocated_src: Bit mapped to show which src event line's are mapped to
|
|
|
* this physical channel. Can also be free or physically allocated.
|
|
@@ -152,6 +207,7 @@ struct d40_lcla_pool {
|
|
|
*/
|
|
|
struct d40_phy_res {
|
|
|
spinlock_t lock;
|
|
|
+ bool reserved;
|
|
|
int num;
|
|
|
u32 allocated_src;
|
|
|
u32 allocated_dst;
|
|
@@ -185,7 +241,6 @@ struct d40_base;
|
|
|
* @src_def_cfg: Default cfg register setting for src.
|
|
|
* @dst_def_cfg: Default cfg register setting for dst.
|
|
|
* @log_def: Default logical channel settings.
|
|
|
- * @lcla: Space for one dst src pair for logical channel transfers.
|
|
|
* @lcpa: Pointer to dst and src lcpa settings.
|
|
|
* @runtime_addr: runtime configured address.
|
|
|
* @runtime_direction: runtime configured direction.
|
|
@@ -217,7 +272,7 @@ struct d40_chan {
|
|
|
struct d40_log_lli_full *lcpa;
|
|
|
/* Runtime reconfiguration */
|
|
|
dma_addr_t runtime_addr;
|
|
|
- enum dma_data_direction runtime_direction;
|
|
|
+ enum dma_transfer_direction runtime_direction;
|
|
|
};
|
|
|
|
|
|
/**
|
|
@@ -241,6 +296,7 @@ struct d40_chan {
|
|
|
* @dma_both: dma_device channels that can do both memcpy and slave transfers.
|
|
|
* @dma_slave: dma_device channels that can do only do slave transfers.
|
|
|
* @dma_memcpy: dma_device channels that can do only do memcpy transfers.
|
|
|
+ * @phy_chans: Room for all possible physical channels in system.
|
|
|
* @log_chans: Room for all possible logical channels in system.
|
|
|
* @lookup_log_chans: Used to map interrupt number to logical channel. Points
|
|
|
* to log_chans entries.
|
|
@@ -248,12 +304,20 @@ struct d40_chan {
|
|
|
* to phy_chans entries.
|
|
|
* @plat_data: Pointer to provided platform_data which is the driver
|
|
|
* configuration.
|
|
|
+ * @lcpa_regulator: Pointer to hold the regulator for the esram bank for lcla.
|
|
|
* @phy_res: Vector containing all physical channels.
|
|
|
* @lcla_pool: lcla pool settings and data.
|
|
|
* @lcpa_base: The virtual mapped address of LCPA.
|
|
|
* @phy_lcpa: The physical address of the LCPA.
|
|
|
* @lcpa_size: The size of the LCPA area.
|
|
|
* @desc_slab: cache for descriptors.
|
|
|
+ * @reg_val_backup: Here the values of some hardware registers are stored
|
|
|
+ * before the DMA is powered off. They are restored when the power is back on.
|
|
|
+ * @reg_val_backup_v3: Backup of registers that only exits on dma40 v3 and
|
|
|
+ * later.
|
|
|
+ * @reg_val_backup_chan: Backup data for standard channel parameter registers.
|
|
|
+ * @gcc_pwr_off_mask: Mask to maintain the channels that can be turned off.
|
|
|
+ * @initialized: true if the dma has been initialized
|
|
|
*/
|
|
|
struct d40_base {
|
|
|
spinlock_t interrupt_lock;
|
|
@@ -275,6 +339,7 @@ struct d40_base {
|
|
|
struct d40_chan **lookup_log_chans;
|
|
|
struct d40_chan **lookup_phy_chans;
|
|
|
struct stedma40_platform_data *plat_data;
|
|
|
+ struct regulator *lcpa_regulator;
|
|
|
/* Physical half channels */
|
|
|
struct d40_phy_res *phy_res;
|
|
|
struct d40_lcla_pool lcla_pool;
|
|
@@ -282,6 +347,11 @@ struct d40_base {
|
|
|
dma_addr_t phy_lcpa;
|
|
|
resource_size_t lcpa_size;
|
|
|
struct kmem_cache *desc_slab;
|
|
|
+ u32 reg_val_backup[BACKUP_REGS_SZ];
|
|
|
+ u32 reg_val_backup_v3[BACKUP_REGS_SZ_V3];
|
|
|
+ u32 *reg_val_backup_chan;
|
|
|
+ u16 gcc_pwr_off_mask;
|
|
|
+ bool initialized;
|
|
|
};
|
|
|
|
|
|
/**
|
|
@@ -479,13 +549,14 @@ static struct d40_desc *d40_desc_get(struct d40_chan *d40c)
|
|
|
struct d40_desc *d;
|
|
|
struct d40_desc *_d;
|
|
|
|
|
|
- list_for_each_entry_safe(d, _d, &d40c->client, node)
|
|
|
+ list_for_each_entry_safe(d, _d, &d40c->client, node) {
|
|
|
if (async_tx_test_ack(&d->txd)) {
|
|
|
d40_desc_remove(d);
|
|
|
desc = d;
|
|
|
memset(desc, 0, sizeof(*desc));
|
|
|
break;
|
|
|
}
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
if (!desc)
|
|
@@ -536,6 +607,7 @@ static void d40_log_lli_to_lcxa(struct d40_chan *chan, struct d40_desc *desc)
|
|
|
bool cyclic = desc->cyclic;
|
|
|
int curr_lcla = -EINVAL;
|
|
|
int first_lcla = 0;
|
|
|
+ bool use_esram_lcla = chan->base->plat_data->use_esram_lcla;
|
|
|
bool linkback;
|
|
|
|
|
|
/*
|
|
@@ -608,11 +680,16 @@ static void d40_log_lli_to_lcxa(struct d40_chan *chan, struct d40_desc *desc)
|
|
|
&lli->src[lli_current],
|
|
|
next_lcla, flags);
|
|
|
|
|
|
- dma_sync_single_range_for_device(chan->base->dev,
|
|
|
- pool->dma_addr, lcla_offset,
|
|
|
- 2 * sizeof(struct d40_log_lli),
|
|
|
- DMA_TO_DEVICE);
|
|
|
-
|
|
|
+ /*
|
|
|
+ * Cache maintenance is not needed if lcla is
|
|
|
+ * mapped in esram
|
|
|
+ */
|
|
|
+ if (!use_esram_lcla) {
|
|
|
+ dma_sync_single_range_for_device(chan->base->dev,
|
|
|
+ pool->dma_addr, lcla_offset,
|
|
|
+ 2 * sizeof(struct d40_log_lli),
|
|
|
+ DMA_TO_DEVICE);
|
|
|
+ }
|
|
|
curr_lcla = next_lcla;
|
|
|
|
|
|
if (curr_lcla == -EINVAL || curr_lcla == first_lcla) {
|
|
@@ -740,7 +817,61 @@ static int d40_sg_2_dmalen(struct scatterlist *sgl, int sg_len,
|
|
|
return len;
|
|
|
}
|
|
|
|
|
|
-/* Support functions for logical channels */
|
|
|
+
|
|
|
+#ifdef CONFIG_PM
|
|
|
+static void dma40_backup(void __iomem *baseaddr, u32 *backup,
|
|
|
+ u32 *regaddr, int num, bool save)
|
|
|
+{
|
|
|
+ int i;
|
|
|
+
|
|
|
+ for (i = 0; i < num; i++) {
|
|
|
+ void __iomem *addr = baseaddr + regaddr[i];
|
|
|
+
|
|
|
+ if (save)
|
|
|
+ backup[i] = readl_relaxed(addr);
|
|
|
+ else
|
|
|
+ writel_relaxed(backup[i], addr);
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+static void d40_save_restore_registers(struct d40_base *base, bool save)
|
|
|
+{
|
|
|
+ int i;
|
|
|
+
|
|
|
+ /* Save/Restore channel specific registers */
|
|
|
+ for (i = 0; i < base->num_phy_chans; i++) {
|
|
|
+ void __iomem *addr;
|
|
|
+ int idx;
|
|
|
+
|
|
|
+ if (base->phy_res[i].reserved)
|
|
|
+ continue;
|
|
|
+
|
|
|
+ addr = base->virtbase + D40_DREG_PCBASE + i * D40_DREG_PCDELTA;
|
|
|
+ idx = i * ARRAY_SIZE(d40_backup_regs_chan);
|
|
|
+
|
|
|
+ dma40_backup(addr, &base->reg_val_backup_chan[idx],
|
|
|
+ d40_backup_regs_chan,
|
|
|
+ ARRAY_SIZE(d40_backup_regs_chan),
|
|
|
+ save);
|
|
|
+ }
|
|
|
+
|
|
|
+ /* Save/Restore global registers */
|
|
|
+ dma40_backup(base->virtbase, base->reg_val_backup,
|
|
|
+ d40_backup_regs, ARRAY_SIZE(d40_backup_regs),
|
|
|
+ save);
|
|
|
+
|
|
|
+ /* Save/Restore registers only existing on dma40 v3 and later */
|
|
|
+ if (base->rev >= 3)
|
|
|
+ dma40_backup(base->virtbase, base->reg_val_backup_v3,
|
|
|
+ d40_backup_regs_v3,
|
|
|
+ ARRAY_SIZE(d40_backup_regs_v3),
|
|
|
+ save);
|
|
|
+}
|
|
|
+#else
|
|
|
+static void d40_save_restore_registers(struct d40_base *base, bool save)
|
|
|
+{
|
|
|
+}
|
|
|
+#endif
|
|
|
|
|
|
static int d40_channel_execute_command(struct d40_chan *d40c,
|
|
|
enum d40_command command)
|
|
@@ -973,6 +1104,10 @@ static void d40_config_write(struct d40_chan *d40c)
|
|
|
/* Set LIDX for lcla */
|
|
|
writel(lidx, chanbase + D40_CHAN_REG_SSELT);
|
|
|
writel(lidx, chanbase + D40_CHAN_REG_SDELT);
|
|
|
+
|
|
|
+ /* Clear LNK which will be used by d40_chan_has_events() */
|
|
|
+ writel(0, chanbase + D40_CHAN_REG_SSLNK);
|
|
|
+ writel(0, chanbase + D40_CHAN_REG_SDLNK);
|
|
|
}
|
|
|
}
|
|
|
|
|
@@ -1013,6 +1148,7 @@ static int d40_pause(struct d40_chan *d40c)
|
|
|
if (!d40c->busy)
|
|
|
return 0;
|
|
|
|
|
|
+ pm_runtime_get_sync(d40c->base->dev);
|
|
|
spin_lock_irqsave(&d40c->lock, flags);
|
|
|
|
|
|
res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
|
|
@@ -1025,7 +1161,8 @@ static int d40_pause(struct d40_chan *d40c)
|
|
|
D40_DMA_RUN);
|
|
|
}
|
|
|
}
|
|
|
-
|
|
|
+ pm_runtime_mark_last_busy(d40c->base->dev);
|
|
|
+ pm_runtime_put_autosuspend(d40c->base->dev);
|
|
|
spin_unlock_irqrestore(&d40c->lock, flags);
|
|
|
return res;
|
|
|
}
|
|
@@ -1039,7 +1176,7 @@ static int d40_resume(struct d40_chan *d40c)
|
|
|
return 0;
|
|
|
|
|
|
spin_lock_irqsave(&d40c->lock, flags);
|
|
|
-
|
|
|
+ pm_runtime_get_sync(d40c->base->dev);
|
|
|
if (d40c->base->rev == 0)
|
|
|
if (chan_is_logical(d40c)) {
|
|
|
res = d40_channel_execute_command(d40c,
|
|
@@ -1057,6 +1194,8 @@ static int d40_resume(struct d40_chan *d40c)
|
|
|
}
|
|
|
|
|
|
no_suspend:
|
|
|
+ pm_runtime_mark_last_busy(d40c->base->dev);
|
|
|
+ pm_runtime_put_autosuspend(d40c->base->dev);
|
|
|
spin_unlock_irqrestore(&d40c->lock, flags);
|
|
|
return res;
|
|
|
}
|
|
@@ -1129,7 +1268,10 @@ static struct d40_desc *d40_queue_start(struct d40_chan *d40c)
|
|
|
d40d = d40_first_queued(d40c);
|
|
|
|
|
|
if (d40d != NULL) {
|
|
|
- d40c->busy = true;
|
|
|
+ if (!d40c->busy)
|
|
|
+ d40c->busy = true;
|
|
|
+
|
|
|
+ pm_runtime_get_sync(d40c->base->dev);
|
|
|
|
|
|
/* Remove from queue */
|
|
|
d40_desc_remove(d40d);
|
|
@@ -1190,6 +1332,8 @@ static void dma_tc_handle(struct d40_chan *d40c)
|
|
|
|
|
|
if (d40_queue_start(d40c) == NULL)
|
|
|
d40c->busy = false;
|
|
|
+ pm_runtime_mark_last_busy(d40c->base->dev);
|
|
|
+ pm_runtime_put_autosuspend(d40c->base->dev);
|
|
|
}
|
|
|
|
|
|
d40c->pending_tx++;
|
|
@@ -1405,11 +1549,16 @@ static int d40_validate_conf(struct d40_chan *d40c,
|
|
|
return res;
|
|
|
}
|
|
|
|
|
|
-static bool d40_alloc_mask_set(struct d40_phy_res *phy, bool is_src,
|
|
|
- int log_event_line, bool is_log)
|
|
|
+static bool d40_alloc_mask_set(struct d40_phy_res *phy,
|
|
|
+ bool is_src, int log_event_line, bool is_log,
|
|
|
+ bool *first_user)
|
|
|
{
|
|
|
unsigned long flags;
|
|
|
spin_lock_irqsave(&phy->lock, flags);
|
|
|
+
|
|
|
+ *first_user = ((phy->allocated_src | phy->allocated_dst)
|
|
|
+ == D40_ALLOC_FREE);
|
|
|
+
|
|
|
if (!is_log) {
|
|
|
/* Physical interrupts are masked per physical full channel */
|
|
|
if (phy->allocated_src == D40_ALLOC_FREE &&
|
|
@@ -1490,7 +1639,7 @@ out:
|
|
|
return is_free;
|
|
|
}
|
|
|
|
|
|
-static int d40_allocate_channel(struct d40_chan *d40c)
|
|
|
+static int d40_allocate_channel(struct d40_chan *d40c, bool *first_phy_user)
|
|
|
{
|
|
|
int dev_type;
|
|
|
int event_group;
|
|
@@ -1526,7 +1675,8 @@ static int d40_allocate_channel(struct d40_chan *d40c)
|
|
|
for (i = 0; i < d40c->base->num_phy_chans; i++) {
|
|
|
|
|
|
if (d40_alloc_mask_set(&phys[i], is_src,
|
|
|
- 0, is_log))
|
|
|
+ 0, is_log,
|
|
|
+ first_phy_user))
|
|
|
goto found_phy;
|
|
|
}
|
|
|
} else
|
|
@@ -1536,7 +1686,8 @@ static int d40_allocate_channel(struct d40_chan *d40c)
|
|
|
if (d40_alloc_mask_set(&phys[i],
|
|
|
is_src,
|
|
|
0,
|
|
|
- is_log))
|
|
|
+ is_log,
|
|
|
+ first_phy_user))
|
|
|
goto found_phy;
|
|
|
}
|
|
|
}
|
|
@@ -1552,6 +1703,25 @@ found_phy:
|
|
|
/* Find logical channel */
|
|
|
for (j = 0; j < d40c->base->num_phy_chans; j += 8) {
|
|
|
int phy_num = j + event_group * 2;
|
|
|
+
|
|
|
+ if (d40c->dma_cfg.use_fixed_channel) {
|
|
|
+ i = d40c->dma_cfg.phy_channel;
|
|
|
+
|
|
|
+ if ((i != phy_num) && (i != phy_num + 1)) {
|
|
|
+ dev_err(chan2dev(d40c),
|
|
|
+ "invalid fixed phy channel %d\n", i);
|
|
|
+ return -EINVAL;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (d40_alloc_mask_set(&phys[i], is_src, event_line,
|
|
|
+ is_log, first_phy_user))
|
|
|
+ goto found_log;
|
|
|
+
|
|
|
+ dev_err(chan2dev(d40c),
|
|
|
+ "could not allocate fixed phy channel %d\n", i);
|
|
|
+ return -EINVAL;
|
|
|
+ }
|
|
|
+
|
|
|
/*
|
|
|
* Spread logical channels across all available physical rather
|
|
|
* than pack every logical channel at the first available phy
|
|
@@ -1560,13 +1730,15 @@ found_phy:
|
|
|
if (is_src) {
|
|
|
for (i = phy_num; i < phy_num + 2; i++) {
|
|
|
if (d40_alloc_mask_set(&phys[i], is_src,
|
|
|
- event_line, is_log))
|
|
|
+ event_line, is_log,
|
|
|
+ first_phy_user))
|
|
|
goto found_log;
|
|
|
}
|
|
|
} else {
|
|
|
for (i = phy_num + 1; i >= phy_num; i--) {
|
|
|
if (d40_alloc_mask_set(&phys[i], is_src,
|
|
|
- event_line, is_log))
|
|
|
+ event_line, is_log,
|
|
|
+ first_phy_user))
|
|
|
goto found_log;
|
|
|
}
|
|
|
}
|
|
@@ -1643,10 +1815,11 @@ static int d40_free_dma(struct d40_chan *d40c)
|
|
|
return -EINVAL;
|
|
|
}
|
|
|
|
|
|
+ pm_runtime_get_sync(d40c->base->dev);
|
|
|
res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
|
|
|
if (res) {
|
|
|
chan_err(d40c, "suspend failed\n");
|
|
|
- return res;
|
|
|
+ goto out;
|
|
|
}
|
|
|
|
|
|
if (chan_is_logical(d40c)) {
|
|
@@ -1664,13 +1837,11 @@ static int d40_free_dma(struct d40_chan *d40c)
|
|
|
if (d40_chan_has_events(d40c)) {
|
|
|
res = d40_channel_execute_command(d40c,
|
|
|
D40_DMA_RUN);
|
|
|
- if (res) {
|
|
|
+ if (res)
|
|
|
chan_err(d40c,
|
|
|
"Executing RUN command\n");
|
|
|
- return res;
|
|
|
- }
|
|
|
}
|
|
|
- return 0;
|
|
|
+ goto out;
|
|
|
}
|
|
|
} else {
|
|
|
(void) d40_alloc_mask_free(phy, is_src, 0);
|
|
@@ -1680,13 +1851,23 @@ static int d40_free_dma(struct d40_chan *d40c)
|
|
|
res = d40_channel_execute_command(d40c, D40_DMA_STOP);
|
|
|
if (res) {
|
|
|
chan_err(d40c, "Failed to stop channel\n");
|
|
|
- return res;
|
|
|
+ goto out;
|
|
|
}
|
|
|
+
|
|
|
+ if (d40c->busy) {
|
|
|
+ pm_runtime_mark_last_busy(d40c->base->dev);
|
|
|
+ pm_runtime_put_autosuspend(d40c->base->dev);
|
|
|
+ }
|
|
|
+
|
|
|
+ d40c->busy = false;
|
|
|
d40c->phy_chan = NULL;
|
|
|
d40c->configured = false;
|
|
|
d40c->base->lookup_phy_chans[phy->num] = NULL;
|
|
|
+out:
|
|
|
|
|
|
- return 0;
|
|
|
+ pm_runtime_mark_last_busy(d40c->base->dev);
|
|
|
+ pm_runtime_put_autosuspend(d40c->base->dev);
|
|
|
+ return res;
|
|
|
}
|
|
|
|
|
|
static bool d40_is_paused(struct d40_chan *d40c)
|
|
@@ -1855,7 +2036,7 @@ err:
|
|
|
}
|
|
|
|
|
|
static dma_addr_t
|
|
|
-d40_get_dev_addr(struct d40_chan *chan, enum dma_data_direction direction)
|
|
|
+d40_get_dev_addr(struct d40_chan *chan, enum dma_transfer_direction direction)
|
|
|
{
|
|
|
struct stedma40_platform_data *plat = chan->base->plat_data;
|
|
|
struct stedma40_chan_cfg *cfg = &chan->dma_cfg;
|
|
@@ -1864,9 +2045,9 @@ d40_get_dev_addr(struct d40_chan *chan, enum dma_data_direction direction)
|
|
|
if (chan->runtime_addr)
|
|
|
return chan->runtime_addr;
|
|
|
|
|
|
- if (direction == DMA_FROM_DEVICE)
|
|
|
+ if (direction == DMA_DEV_TO_MEM)
|
|
|
addr = plat->dev_rx[cfg->src_dev_type];
|
|
|
- else if (direction == DMA_TO_DEVICE)
|
|
|
+ else if (direction == DMA_MEM_TO_DEV)
|
|
|
addr = plat->dev_tx[cfg->dst_dev_type];
|
|
|
|
|
|
return addr;
|
|
@@ -1875,7 +2056,7 @@ d40_get_dev_addr(struct d40_chan *chan, enum dma_data_direction direction)
|
|
|
static struct dma_async_tx_descriptor *
|
|
|
d40_prep_sg(struct dma_chan *dchan, struct scatterlist *sg_src,
|
|
|
struct scatterlist *sg_dst, unsigned int sg_len,
|
|
|
- enum dma_data_direction direction, unsigned long dma_flags)
|
|
|
+ enum dma_transfer_direction direction, unsigned long dma_flags)
|
|
|
{
|
|
|
struct d40_chan *chan = container_of(dchan, struct d40_chan, chan);
|
|
|
dma_addr_t src_dev_addr = 0;
|
|
@@ -1902,9 +2083,9 @@ d40_prep_sg(struct dma_chan *dchan, struct scatterlist *sg_src,
|
|
|
if (direction != DMA_NONE) {
|
|
|
dma_addr_t dev_addr = d40_get_dev_addr(chan, direction);
|
|
|
|
|
|
- if (direction == DMA_FROM_DEVICE)
|
|
|
+ if (direction == DMA_DEV_TO_MEM)
|
|
|
src_dev_addr = dev_addr;
|
|
|
- else if (direction == DMA_TO_DEVICE)
|
|
|
+ else if (direction == DMA_MEM_TO_DEV)
|
|
|
dst_dev_addr = dev_addr;
|
|
|
}
|
|
|
|
|
@@ -2011,14 +2192,15 @@ static int d40_alloc_chan_resources(struct dma_chan *chan)
|
|
|
goto fail;
|
|
|
}
|
|
|
}
|
|
|
- is_free_phy = (d40c->phy_chan == NULL);
|
|
|
|
|
|
- err = d40_allocate_channel(d40c);
|
|
|
+ err = d40_allocate_channel(d40c, &is_free_phy);
|
|
|
if (err) {
|
|
|
chan_err(d40c, "Failed to allocate channel\n");
|
|
|
+ d40c->configured = false;
|
|
|
goto fail;
|
|
|
}
|
|
|
|
|
|
+ pm_runtime_get_sync(d40c->base->dev);
|
|
|
/* Fill in basic CFG register values */
|
|
|
d40_phy_cfg(&d40c->dma_cfg, &d40c->src_def_cfg,
|
|
|
&d40c->dst_def_cfg, chan_is_logical(d40c));
|
|
@@ -2038,6 +2220,12 @@ static int d40_alloc_chan_resources(struct dma_chan *chan)
|
|
|
D40_LCPA_CHAN_SIZE + D40_LCPA_CHAN_DST_DELTA;
|
|
|
}
|
|
|
|
|
|
+ dev_dbg(chan2dev(d40c), "allocated %s channel (phy %d%s)\n",
|
|
|
+ chan_is_logical(d40c) ? "logical" : "physical",
|
|
|
+ d40c->phy_chan->num,
|
|
|
+ d40c->dma_cfg.use_fixed_channel ? ", fixed" : "");
|
|
|
+
|
|
|
+
|
|
|
/*
|
|
|
* Only write channel configuration to the DMA if the physical
|
|
|
* resource is free. In case of multiple logical channels
|
|
@@ -2046,6 +2234,8 @@ static int d40_alloc_chan_resources(struct dma_chan *chan)
|
|
|
if (is_free_phy)
|
|
|
d40_config_write(d40c);
|
|
|
fail:
|
|
|
+ pm_runtime_mark_last_busy(d40c->base->dev);
|
|
|
+ pm_runtime_put_autosuspend(d40c->base->dev);
|
|
|
spin_unlock_irqrestore(&d40c->lock, flags);
|
|
|
return err;
|
|
|
}
|
|
@@ -2108,10 +2298,10 @@ d40_prep_memcpy_sg(struct dma_chan *chan,
|
|
|
static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan,
|
|
|
struct scatterlist *sgl,
|
|
|
unsigned int sg_len,
|
|
|
- enum dma_data_direction direction,
|
|
|
+ enum dma_transfer_direction direction,
|
|
|
unsigned long dma_flags)
|
|
|
{
|
|
|
- if (direction != DMA_FROM_DEVICE && direction != DMA_TO_DEVICE)
|
|
|
+ if (direction != DMA_DEV_TO_MEM && direction != DMA_MEM_TO_DEV)
|
|
|
return NULL;
|
|
|
|
|
|
return d40_prep_sg(chan, sgl, sgl, sg_len, direction, dma_flags);
|
|
@@ -2120,7 +2310,7 @@ static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan,
|
|
|
static struct dma_async_tx_descriptor *
|
|
|
dma40_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr,
|
|
|
size_t buf_len, size_t period_len,
|
|
|
- enum dma_data_direction direction)
|
|
|
+ enum dma_transfer_direction direction)
|
|
|
{
|
|
|
unsigned int periods = buf_len / period_len;
|
|
|
struct dma_async_tx_descriptor *txd;
|
|
@@ -2269,7 +2459,7 @@ static int d40_set_runtime_config(struct dma_chan *chan,
|
|
|
dst_addr_width = config->dst_addr_width;
|
|
|
dst_maxburst = config->dst_maxburst;
|
|
|
|
|
|
- if (config->direction == DMA_FROM_DEVICE) {
|
|
|
+ if (config->direction == DMA_DEV_TO_MEM) {
|
|
|
dma_addr_t dev_addr_rx =
|
|
|
d40c->base->plat_data->dev_rx[cfg->src_dev_type];
|
|
|
|
|
@@ -2292,7 +2482,7 @@ static int d40_set_runtime_config(struct dma_chan *chan,
|
|
|
if (dst_maxburst == 0)
|
|
|
dst_maxburst = src_maxburst;
|
|
|
|
|
|
- } else if (config->direction == DMA_TO_DEVICE) {
|
|
|
+ } else if (config->direction == DMA_MEM_TO_DEV) {
|
|
|
dma_addr_t dev_addr_tx =
|
|
|
d40c->base->plat_data->dev_tx[cfg->dst_dev_type];
|
|
|
|
|
@@ -2357,7 +2547,7 @@ static int d40_set_runtime_config(struct dma_chan *chan,
|
|
|
"configured channel %s for %s, data width %d/%d, "
|
|
|
"maxburst %d/%d elements, LE, no flow control\n",
|
|
|
dma_chan_name(chan),
|
|
|
- (config->direction == DMA_FROM_DEVICE) ? "RX" : "TX",
|
|
|
+ (config->direction == DMA_DEV_TO_MEM) ? "RX" : "TX",
|
|
|
src_addr_width, dst_addr_width,
|
|
|
src_maxburst, dst_maxburst);
|
|
|
|
|
@@ -2519,6 +2709,72 @@ failure1:
|
|
|
return err;
|
|
|
}
|
|
|
|
|
|
+/* Suspend resume functionality */
|
|
|
+#ifdef CONFIG_PM
|
|
|
+static int dma40_pm_suspend(struct device *dev)
|
|
|
+{
|
|
|
+ struct platform_device *pdev = to_platform_device(dev);
|
|
|
+ struct d40_base *base = platform_get_drvdata(pdev);
|
|
|
+ int ret = 0;
|
|
|
+ if (!pm_runtime_suspended(dev))
|
|
|
+ return -EBUSY;
|
|
|
+
|
|
|
+ if (base->lcpa_regulator)
|
|
|
+ ret = regulator_disable(base->lcpa_regulator);
|
|
|
+ return ret;
|
|
|
+}
|
|
|
+
|
|
|
+static int dma40_runtime_suspend(struct device *dev)
|
|
|
+{
|
|
|
+ struct platform_device *pdev = to_platform_device(dev);
|
|
|
+ struct d40_base *base = platform_get_drvdata(pdev);
|
|
|
+
|
|
|
+ d40_save_restore_registers(base, true);
|
|
|
+
|
|
|
+ /* Don't disable/enable clocks for v1 due to HW bugs */
|
|
|
+ if (base->rev != 1)
|
|
|
+ writel_relaxed(base->gcc_pwr_off_mask,
|
|
|
+ base->virtbase + D40_DREG_GCC);
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+static int dma40_runtime_resume(struct device *dev)
|
|
|
+{
|
|
|
+ struct platform_device *pdev = to_platform_device(dev);
|
|
|
+ struct d40_base *base = platform_get_drvdata(pdev);
|
|
|
+
|
|
|
+ if (base->initialized)
|
|
|
+ d40_save_restore_registers(base, false);
|
|
|
+
|
|
|
+ writel_relaxed(D40_DREG_GCC_ENABLE_ALL,
|
|
|
+ base->virtbase + D40_DREG_GCC);
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+static int dma40_resume(struct device *dev)
|
|
|
+{
|
|
|
+ struct platform_device *pdev = to_platform_device(dev);
|
|
|
+ struct d40_base *base = platform_get_drvdata(pdev);
|
|
|
+ int ret = 0;
|
|
|
+
|
|
|
+ if (base->lcpa_regulator)
|
|
|
+ ret = regulator_enable(base->lcpa_regulator);
|
|
|
+
|
|
|
+ return ret;
|
|
|
+}
|
|
|
+
|
|
|
+static const struct dev_pm_ops dma40_pm_ops = {
|
|
|
+ .suspend = dma40_pm_suspend,
|
|
|
+ .runtime_suspend = dma40_runtime_suspend,
|
|
|
+ .runtime_resume = dma40_runtime_resume,
|
|
|
+ .resume = dma40_resume,
|
|
|
+};
|
|
|
+#define DMA40_PM_OPS (&dma40_pm_ops)
|
|
|
+#else
|
|
|
+#define DMA40_PM_OPS NULL
|
|
|
+#endif
|
|
|
+
|
|
|
/* Initialization functions. */
|
|
|
|
|
|
static int __init d40_phy_res_init(struct d40_base *base)
|
|
@@ -2527,6 +2783,7 @@ static int __init d40_phy_res_init(struct d40_base *base)
|
|
|
int num_phy_chans_avail = 0;
|
|
|
u32 val[2];
|
|
|
int odd_even_bit = -2;
|
|
|
+ int gcc = D40_DREG_GCC_ENA;
|
|
|
|
|
|
val[0] = readl(base->virtbase + D40_DREG_PRSME);
|
|
|
val[1] = readl(base->virtbase + D40_DREG_PRSMO);
|
|
@@ -2538,9 +2795,17 @@ static int __init d40_phy_res_init(struct d40_base *base)
|
|
|
/* Mark security only channels as occupied */
|
|
|
base->phy_res[i].allocated_src = D40_ALLOC_PHY;
|
|
|
base->phy_res[i].allocated_dst = D40_ALLOC_PHY;
|
|
|
+ base->phy_res[i].reserved = true;
|
|
|
+ gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(i),
|
|
|
+ D40_DREG_GCC_SRC);
|
|
|
+ gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(i),
|
|
|
+ D40_DREG_GCC_DST);
|
|
|
+
|
|
|
+
|
|
|
} else {
|
|
|
base->phy_res[i].allocated_src = D40_ALLOC_FREE;
|
|
|
base->phy_res[i].allocated_dst = D40_ALLOC_FREE;
|
|
|
+ base->phy_res[i].reserved = false;
|
|
|
num_phy_chans_avail++;
|
|
|
}
|
|
|
spin_lock_init(&base->phy_res[i].lock);
|
|
@@ -2552,6 +2817,11 @@ static int __init d40_phy_res_init(struct d40_base *base)
|
|
|
|
|
|
base->phy_res[chan].allocated_src = D40_ALLOC_PHY;
|
|
|
base->phy_res[chan].allocated_dst = D40_ALLOC_PHY;
|
|
|
+ base->phy_res[chan].reserved = true;
|
|
|
+ gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(chan),
|
|
|
+ D40_DREG_GCC_SRC);
|
|
|
+ gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(chan),
|
|
|
+ D40_DREG_GCC_DST);
|
|
|
num_phy_chans_avail--;
|
|
|
}
|
|
|
|
|
@@ -2572,6 +2842,15 @@ static int __init d40_phy_res_init(struct d40_base *base)
|
|
|
val[0] = val[0] >> 2;
|
|
|
}
|
|
|
|
|
|
+ /*
|
|
|
+ * To keep things simple, Enable all clocks initially.
|
|
|
+ * The clocks will get managed later post channel allocation.
|
|
|
+ * The clocks for the event lines on which reserved channels exists
|
|
|
+ * are not managed here.
|
|
|
+ */
|
|
|
+ writel(D40_DREG_GCC_ENABLE_ALL, base->virtbase + D40_DREG_GCC);
|
|
|
+ base->gcc_pwr_off_mask = gcc;
|
|
|
+
|
|
|
return num_phy_chans_avail;
|
|
|
}
|
|
|
|
|
@@ -2699,10 +2978,15 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
|
|
|
goto failure;
|
|
|
}
|
|
|
|
|
|
- base->lcla_pool.alloc_map = kzalloc(num_phy_chans *
|
|
|
- sizeof(struct d40_desc *) *
|
|
|
- D40_LCLA_LINK_PER_EVENT_GRP,
|
|
|
+ base->reg_val_backup_chan = kmalloc(base->num_phy_chans *
|
|
|
+ sizeof(d40_backup_regs_chan),
|
|
|
GFP_KERNEL);
|
|
|
+ if (!base->reg_val_backup_chan)
|
|
|
+ goto failure;
|
|
|
+
|
|
|
+ base->lcla_pool.alloc_map =
|
|
|
+ kzalloc(num_phy_chans * sizeof(struct d40_desc *)
|
|
|
+ * D40_LCLA_LINK_PER_EVENT_GRP, GFP_KERNEL);
|
|
|
if (!base->lcla_pool.alloc_map)
|
|
|
goto failure;
|
|
|
|
|
@@ -2741,9 +3025,9 @@ failure:
|
|
|
static void __init d40_hw_init(struct d40_base *base)
|
|
|
{
|
|
|
|
|
|
- static const struct d40_reg_val dma_init_reg[] = {
|
|
|
+ static struct d40_reg_val dma_init_reg[] = {
|
|
|
/* Clock every part of the DMA block from start */
|
|
|
- { .reg = D40_DREG_GCC, .val = 0x0000ff01},
|
|
|
+ { .reg = D40_DREG_GCC, .val = D40_DREG_GCC_ENABLE_ALL},
|
|
|
|
|
|
/* Interrupts on all logical channels */
|
|
|
{ .reg = D40_DREG_LCMIS0, .val = 0xFFFFFFFF},
|
|
@@ -2943,11 +3227,31 @@ static int __init d40_probe(struct platform_device *pdev)
|
|
|
d40_err(&pdev->dev, "Failed to ioremap LCPA region\n");
|
|
|
goto failure;
|
|
|
}
|
|
|
+ /* If lcla has to be located in ESRAM we don't need to allocate */
|
|
|
+ if (base->plat_data->use_esram_lcla) {
|
|
|
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
|
|
|
+ "lcla_esram");
|
|
|
+ if (!res) {
|
|
|
+ ret = -ENOENT;
|
|
|
+ d40_err(&pdev->dev,
|
|
|
+ "No \"lcla_esram\" memory resource\n");
|
|
|
+ goto failure;
|
|
|
+ }
|
|
|
+ base->lcla_pool.base = ioremap(res->start,
|
|
|
+ resource_size(res));
|
|
|
+ if (!base->lcla_pool.base) {
|
|
|
+ ret = -ENOMEM;
|
|
|
+ d40_err(&pdev->dev, "Failed to ioremap LCLA region\n");
|
|
|
+ goto failure;
|
|
|
+ }
|
|
|
+ writel(res->start, base->virtbase + D40_DREG_LCLA);
|
|
|
|
|
|
- ret = d40_lcla_allocate(base);
|
|
|
- if (ret) {
|
|
|
- d40_err(&pdev->dev, "Failed to allocate LCLA area\n");
|
|
|
- goto failure;
|
|
|
+ } else {
|
|
|
+ ret = d40_lcla_allocate(base);
|
|
|
+ if (ret) {
|
|
|
+ d40_err(&pdev->dev, "Failed to allocate LCLA area\n");
|
|
|
+ goto failure;
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
spin_lock_init(&base->lcla_pool.lock);
|
|
@@ -2960,6 +3264,32 @@ static int __init d40_probe(struct platform_device *pdev)
|
|
|
goto failure;
|
|
|
}
|
|
|
|
|
|
+ pm_runtime_irq_safe(base->dev);
|
|
|
+ pm_runtime_set_autosuspend_delay(base->dev, DMA40_AUTOSUSPEND_DELAY);
|
|
|
+ pm_runtime_use_autosuspend(base->dev);
|
|
|
+ pm_runtime_enable(base->dev);
|
|
|
+ pm_runtime_resume(base->dev);
|
|
|
+
|
|
|
+ if (base->plat_data->use_esram_lcla) {
|
|
|
+
|
|
|
+ base->lcpa_regulator = regulator_get(base->dev, "lcla_esram");
|
|
|
+ if (IS_ERR(base->lcpa_regulator)) {
|
|
|
+ d40_err(&pdev->dev, "Failed to get lcpa_regulator\n");
|
|
|
+ base->lcpa_regulator = NULL;
|
|
|
+ goto failure;
|
|
|
+ }
|
|
|
+
|
|
|
+ ret = regulator_enable(base->lcpa_regulator);
|
|
|
+ if (ret) {
|
|
|
+ d40_err(&pdev->dev,
|
|
|
+ "Failed to enable lcpa_regulator\n");
|
|
|
+ regulator_put(base->lcpa_regulator);
|
|
|
+ base->lcpa_regulator = NULL;
|
|
|
+ goto failure;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ base->initialized = true;
|
|
|
err = d40_dmaengine_init(base, num_reserved_chans);
|
|
|
if (err)
|
|
|
goto failure;
|
|
@@ -2976,6 +3306,11 @@ failure:
|
|
|
if (base->virtbase)
|
|
|
iounmap(base->virtbase);
|
|
|
|
|
|
+ if (base->lcla_pool.base && base->plat_data->use_esram_lcla) {
|
|
|
+ iounmap(base->lcla_pool.base);
|
|
|
+ base->lcla_pool.base = NULL;
|
|
|
+ }
|
|
|
+
|
|
|
if (base->lcla_pool.dma_addr)
|
|
|
dma_unmap_single(base->dev, base->lcla_pool.dma_addr,
|
|
|
SZ_1K * base->num_phy_chans,
|
|
@@ -2998,6 +3333,11 @@ failure:
|
|
|
clk_put(base->clk);
|
|
|
}
|
|
|
|
|
|
+ if (base->lcpa_regulator) {
|
|
|
+ regulator_disable(base->lcpa_regulator);
|
|
|
+ regulator_put(base->lcpa_regulator);
|
|
|
+ }
|
|
|
+
|
|
|
kfree(base->lcla_pool.alloc_map);
|
|
|
kfree(base->lookup_log_chans);
|
|
|
kfree(base->lookup_phy_chans);
|
|
@@ -3013,6 +3353,7 @@ static struct platform_driver d40_driver = {
|
|
|
.driver = {
|
|
|
.owner = THIS_MODULE,
|
|
|
.name = D40_NAME,
|
|
|
+ .pm = DMA40_PM_OPS,
|
|
|
},
|
|
|
};
|
|
|
|