Эх сурвалжийг харах

async_tx: make async_tx channel switching opt-in

The majority of drivers in drivers/dma/ will never establish cross
channel operation chains and do not need the extra overhead in struct
dma_async_tx_descriptor.  Make channel switching opt-in by default.

Cc: Anatolij Gustschin <agust@denx.de>
Cc: Ira Snyder <iws@ovro.caltech.edu>
Cc: Linus Walleij <linus.walleij@stericsson.com>
Cc: Saeed Bishara <saeed@marvell.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Dan Williams 14 жил өмнө
parent
commit
5fc6d897fd

+ 5 - 2
drivers/dma/Kconfig

@@ -46,7 +46,7 @@ config INTEL_MID_DMAC
 
 
 	  If unsure, say N.
 	  If unsure, say N.
 
 
-config ASYNC_TX_DISABLE_CHANNEL_SWITCH
+config ASYNC_TX_ENABLE_CHANNEL_SWITCH
 	bool
 	bool
 
 
 config AMBA_PL08X
 config AMBA_PL08X
@@ -62,7 +62,6 @@ config INTEL_IOATDMA
 	depends on PCI && X86
 	depends on PCI && X86
 	select DMA_ENGINE
 	select DMA_ENGINE
 	select DCA
 	select DCA
-	select ASYNC_TX_DISABLE_CHANNEL_SWITCH
 	select ASYNC_TX_DISABLE_PQ_VAL_DMA
 	select ASYNC_TX_DISABLE_PQ_VAL_DMA
 	select ASYNC_TX_DISABLE_XOR_VAL_DMA
 	select ASYNC_TX_DISABLE_XOR_VAL_DMA
 	help
 	help
@@ -77,6 +76,7 @@ config INTEL_IOP_ADMA
 	tristate "Intel IOP ADMA support"
 	tristate "Intel IOP ADMA support"
 	depends on ARCH_IOP32X || ARCH_IOP33X || ARCH_IOP13XX
 	depends on ARCH_IOP32X || ARCH_IOP33X || ARCH_IOP13XX
 	select DMA_ENGINE
 	select DMA_ENGINE
+	select ASYNC_TX_ENABLE_CHANNEL_SWITCH
 	help
 	help
 	  Enable support for the Intel(R) IOP Series RAID engines.
 	  Enable support for the Intel(R) IOP Series RAID engines.
 
 
@@ -101,6 +101,7 @@ config FSL_DMA
 	tristate "Freescale Elo and Elo Plus DMA support"
 	tristate "Freescale Elo and Elo Plus DMA support"
 	depends on FSL_SOC
 	depends on FSL_SOC
 	select DMA_ENGINE
 	select DMA_ENGINE
+	select ASYNC_TX_ENABLE_CHANNEL_SWITCH
 	---help---
 	---help---
 	  Enable support for the Freescale Elo and Elo Plus DMA controllers.
 	  Enable support for the Freescale Elo and Elo Plus DMA controllers.
 	  The Elo is the DMA controller on some 82xx and 83xx parts, and the
 	  The Elo is the DMA controller on some 82xx and 83xx parts, and the
@@ -117,6 +118,7 @@ config MV_XOR
 	bool "Marvell XOR engine support"
 	bool "Marvell XOR engine support"
 	depends on PLAT_ORION
 	depends on PLAT_ORION
 	select DMA_ENGINE
 	select DMA_ENGINE
+	select ASYNC_TX_ENABLE_CHANNEL_SWITCH
 	---help---
 	---help---
 	  Enable support for the Marvell XOR engine.
 	  Enable support for the Marvell XOR engine.
 
 
@@ -174,6 +176,7 @@ config AMCC_PPC440SPE_ADMA
 	depends on 440SPe || 440SP
 	depends on 440SPe || 440SP
 	select DMA_ENGINE
 	select DMA_ENGINE
 	select ARCH_HAS_ASYNC_TX_FIND_CHANNEL
 	select ARCH_HAS_ASYNC_TX_FIND_CHANNEL
+	select ASYNC_TX_ENABLE_CHANNEL_SWITCH
 	help
 	help
 	  Enable support for the AMCC PPC440SPe RAID engines.
 	  Enable support for the AMCC PPC440SPe RAID engines.
 
 

+ 2 - 2
drivers/dma/dmaengine.c

@@ -706,7 +706,7 @@ int dma_async_device_register(struct dma_device *device)
 	BUG_ON(!device->dev);
 	BUG_ON(!device->dev);
 
 
 	/* note: this only matters in the
 	/* note: this only matters in the
-	 * CONFIG_ASYNC_TX_DISABLE_CHANNEL_SWITCH=y case
+	 * CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH=n case
 	 */
 	 */
 	if (device_has_all_tx_types(device))
 	if (device_has_all_tx_types(device))
 		dma_cap_set(DMA_ASYNC_TX, device->cap_mask);
 		dma_cap_set(DMA_ASYNC_TX, device->cap_mask);
@@ -980,7 +980,7 @@ void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
 	struct dma_chan *chan)
 	struct dma_chan *chan)
 {
 {
 	tx->chan = chan;
 	tx->chan = chan;
-	#ifndef CONFIG_ASYNC_TX_DISABLE_CHANNEL_SWITCH
+	#ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
 	spin_lock_init(&tx->lock);
 	spin_lock_init(&tx->lock);
 	#endif
 	#endif
 }
 }

+ 4 - 4
include/linux/dmaengine.h

@@ -321,14 +321,14 @@ struct dma_async_tx_descriptor {
 	dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *tx);
 	dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *tx);
 	dma_async_tx_callback callback;
 	dma_async_tx_callback callback;
 	void *callback_param;
 	void *callback_param;
-#ifndef CONFIG_ASYNC_TX_DISABLE_CHANNEL_SWITCH
+#ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
 	struct dma_async_tx_descriptor *next;
 	struct dma_async_tx_descriptor *next;
 	struct dma_async_tx_descriptor *parent;
 	struct dma_async_tx_descriptor *parent;
 	spinlock_t lock;
 	spinlock_t lock;
 #endif
 #endif
 };
 };
 
 
-#ifdef CONFIG_ASYNC_TX_DISABLE_CHANNEL_SWITCH
+#ifndef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
 static inline void txd_lock(struct dma_async_tx_descriptor *txd)
 static inline void txd_lock(struct dma_async_tx_descriptor *txd)
 {
 {
 }
 }
@@ -656,11 +656,11 @@ static inline void net_dmaengine_put(void)
 #ifdef CONFIG_ASYNC_TX_DMA
 #ifdef CONFIG_ASYNC_TX_DMA
 #define async_dmaengine_get()	dmaengine_get()
 #define async_dmaengine_get()	dmaengine_get()
 #define async_dmaengine_put()	dmaengine_put()
 #define async_dmaengine_put()	dmaengine_put()
-#ifdef CONFIG_ASYNC_TX_DISABLE_CHANNEL_SWITCH
+#ifndef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
 #define async_dma_find_channel(type) dma_find_channel(DMA_ASYNC_TX)
 #define async_dma_find_channel(type) dma_find_channel(DMA_ASYNC_TX)
 #else
 #else
 #define async_dma_find_channel(type) dma_find_channel(type)
 #define async_dma_find_channel(type) dma_find_channel(type)
-#endif /* CONFIG_ASYNC_TX_DISABLE_CHANNEL_SWITCH */
+#endif /* CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH */
 #else
 #else
 static inline void async_dmaengine_get(void)
 static inline void async_dmaengine_get(void)
 {
 {