|
@@ -370,6 +370,33 @@ struct dma_slave_config {
|
|
|
unsigned int slave_id;
|
|
|
};
|
|
|
|
|
|
+/* struct dma_slave_caps - expose capabilities of a slave channel only
|
|
|
+ *
|
|
|
+ * @src_addr_widths: bit mask of src addr widths the channel supports
|
|
|
+ * @dstn_addr_widths: bit mask of dstn addr widths the channel supports
|
|
|
+ * @directions: bit mask of slave direction the channel supported
|
|
|
+ * since the enum dma_transfer_direction is not defined as bits for each
|
|
|
+ * type of direction, the dma controller should fill (1 << <TYPE>) and same
|
|
|
+ * should be checked by controller as well
|
|
|
+ * @cmd_pause: true, if pause and thereby resume is supported
|
|
|
+ * @cmd_terminate: true, if terminate cmd is supported
|
|
|
+ *
|
|
|
+ * @max_sg_nr: maximum number of SG segments supported
|
|
|
+ * 0 for no maximum
|
|
|
+ * @max_sg_len: maximum length of a SG segment supported
|
|
|
+ * 0 for no maximum
|
|
|
+ */
|
|
|
+struct dma_slave_caps {
|
|
|
+ u32 src_addr_widths;
|
|
|
+ u32 dstn_addr_widths;
|
|
|
+ u32 directions;
|
|
|
+ bool cmd_pause;
|
|
|
+ bool cmd_terminate;
|
|
|
+
|
|
|
+ u32 max_sg_nr;
|
|
|
+ u32 max_sg_len;
|
|
|
+};
|
|
|
+
|
|
|
static inline const char *dma_chan_name(struct dma_chan *chan)
|
|
|
{
|
|
|
return dev_name(&chan->dev->device);
|
|
@@ -532,6 +559,7 @@ struct dma_tx_state {
|
|
|
* struct with auxiliary transfer status information, otherwise the call
|
|
|
* will just return a simple status code
|
|
|
* @device_issue_pending: push pending transactions to hardware
|
|
|
+ * @device_slave_caps: return the slave channel capabilities
|
|
|
*/
|
|
|
struct dma_device {
|
|
|
|
|
@@ -597,6 +625,7 @@ struct dma_device {
|
|
|
dma_cookie_t cookie,
|
|
|
struct dma_tx_state *txstate);
|
|
|
void (*device_issue_pending)(struct dma_chan *chan);
|
|
|
+ int (*device_slave_caps)(struct dma_chan *chan, struct dma_slave_caps *caps);
|
|
|
};
|
|
|
|
|
|
static inline int dmaengine_device_control(struct dma_chan *chan,
|
|
@@ -670,6 +699,21 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_interleaved_dma(
|
|
|
return chan->device->device_prep_interleaved_dma(chan, xt, flags);
|
|
|
}
|
|
|
|
|
|
+static inline int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps)
|
|
|
+{
|
|
|
+ if (!chan || !caps)
|
|
|
+ return -EINVAL;
|
|
|
+
|
|
|
+ /* check if the channel supports slave transactions */
|
|
|
+ if (!test_bit(DMA_SLAVE, chan->device->cap_mask.bits))
|
|
|
+ return -ENXIO;
|
|
|
+
|
|
|
+ if (chan->device->device_slave_caps)
|
|
|
+ return chan->device->device_slave_caps(chan, caps);
|
|
|
+
|
|
|
+ return -ENXIO;
|
|
|
+}
|
|
|
+
|
|
|
static inline int dmaengine_terminate_all(struct dma_chan *chan)
|
|
|
{
|
|
|
return dmaengine_device_control(chan, DMA_TERMINATE_ALL, 0);
|