|
@@ -413,6 +413,17 @@ void dma_chan_cleanup(struct kref *kref);
|
|
|
typedef bool (*dma_filter_fn)(struct dma_chan *chan, void *filter_param);
|
|
|
|
|
|
typedef void (*dma_async_tx_callback)(void *dma_async_param);
|
|
|
+
|
|
|
+struct dmaengine_unmap_data {
|
|
|
+ u8 to_cnt;
|
|
|
+ u8 from_cnt;
|
|
|
+ u8 bidi_cnt;
|
|
|
+ struct device *dev;
|
|
|
+ struct kref kref;
|
|
|
+ size_t len;
|
|
|
+ dma_addr_t addr[0];
|
|
|
+};
|
|
|
+
|
|
|
/**
|
|
|
* struct dma_async_tx_descriptor - async transaction descriptor
|
|
|
* ---dma generic offload fields---
|
|
@@ -438,6 +449,7 @@ struct dma_async_tx_descriptor {
|
|
|
dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *tx);
|
|
|
dma_async_tx_callback callback;
|
|
|
void *callback_param;
|
|
|
+ struct dmaengine_unmap_data *unmap;
|
|
|
#ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
|
|
|
struct dma_async_tx_descriptor *next;
|
|
|
struct dma_async_tx_descriptor *parent;
|
|
@@ -445,6 +457,20 @@ struct dma_async_tx_descriptor {
|
|
|
#endif
|
|
|
};
|
|
|
|
|
|
+static inline void dma_set_unmap(struct dma_async_tx_descriptor *tx,
|
|
|
+ struct dmaengine_unmap_data *unmap)
|
|
|
+{
|
|
|
+ kref_get(&unmap->kref);
|
|
|
+ tx->unmap = unmap;
|
|
|
+}
|
|
|
+
|
|
|
+static inline void dma_descriptor_unmap(struct dma_async_tx_descriptor *tx)
|
|
|
+{
|
|
|
+ if (tx->unmap) {
|
|
|
+ tx->unmap = NULL;
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
#ifndef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
|
|
|
static inline void txd_lock(struct dma_async_tx_descriptor *txd)
|
|
|
{
|