Эх сурвалжийг харах

netdma: adding alignment check for NETDMA ops

This is the fallout from adding memcpy alignment workaround for certain
IOATDMA hardware. NetDMA will only use DMA engine that can handle byte align
ops.

Acked-by: David S. Miller <davem@davemloft.net>
Signed-off-by: Dave Jiang <dave.jiang@intel.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Dave Jiang 13 жил өмнө
parent
commit
a2bd1140a2

+ 14 - 0
drivers/dma/dmaengine.c

@@ -332,6 +332,20 @@ struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type)
 }
 }
 EXPORT_SYMBOL(dma_find_channel);
 EXPORT_SYMBOL(dma_find_channel);
 
 
+/*
+ * net_dma_find_channel - find a channel for net_dma
+ * net_dma has alignment requirements
+ */
+struct dma_chan *net_dma_find_channel(void)
+{
+	struct dma_chan *chan = dma_find_channel(DMA_MEMCPY);
+	if (chan && !is_dma_copy_aligned(chan->device, 1, 1, 1))
+		return NULL;
+
+	return chan;
+}
+EXPORT_SYMBOL(net_dma_find_channel);
+
 /**
 /**
  * dma_issue_pending_all - flush all pending operations across all channels
  * dma_issue_pending_all - flush all pending operations across all channels
  */
  */

+ 1 - 0
include/linux/dmaengine.h

@@ -948,6 +948,7 @@ int dma_async_device_register(struct dma_device *device);
 void dma_async_device_unregister(struct dma_device *device);
 void dma_async_device_unregister(struct dma_device *device);
 void dma_run_dependencies(struct dma_async_tx_descriptor *tx);
 void dma_run_dependencies(struct dma_async_tx_descriptor *tx);
 struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type);
 struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type);
+struct dma_chan *net_dma_find_channel(void);
 #define dma_request_channel(mask, x, y) __dma_request_channel(&(mask), x, y)
 #define dma_request_channel(mask, x, y) __dma_request_channel(&(mask), x, y)
 
 
 /* --- Helper iov-locking functions --- */
 /* --- Helper iov-locking functions --- */

+ 2 - 2
net/ipv4/tcp.c

@@ -1450,7 +1450,7 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
 		if ((available < target) &&
 		if ((available < target) &&
 		    (len > sysctl_tcp_dma_copybreak) && !(flags & MSG_PEEK) &&
 		    (len > sysctl_tcp_dma_copybreak) && !(flags & MSG_PEEK) &&
 		    !sysctl_tcp_low_latency &&
 		    !sysctl_tcp_low_latency &&
-		    dma_find_channel(DMA_MEMCPY)) {
+		    net_dma_find_channel()) {
 			preempt_enable_no_resched();
 			preempt_enable_no_resched();
 			tp->ucopy.pinned_list =
 			tp->ucopy.pinned_list =
 					dma_pin_iovec_pages(msg->msg_iov, len);
 					dma_pin_iovec_pages(msg->msg_iov, len);
@@ -1665,7 +1665,7 @@ do_prequeue:
 		if (!(flags & MSG_TRUNC)) {
 		if (!(flags & MSG_TRUNC)) {
 #ifdef CONFIG_NET_DMA
 #ifdef CONFIG_NET_DMA
 			if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
 			if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
-				tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY);
+				tp->ucopy.dma_chan = net_dma_find_channel();
 
 
 			if (tp->ucopy.dma_chan) {
 			if (tp->ucopy.dma_chan) {
 				tp->ucopy.dma_cookie = dma_skb_copy_datagram_iovec(
 				tp->ucopy.dma_cookie = dma_skb_copy_datagram_iovec(

+ 1 - 1
net/ipv4/tcp_input.c

@@ -5190,7 +5190,7 @@ static int tcp_dma_try_early_copy(struct sock *sk, struct sk_buff *skb,
 		return 0;
 		return 0;
 
 
 	if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
 	if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
-		tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY);
+		tp->ucopy.dma_chan = net_dma_find_channel();
 
 
 	if (tp->ucopy.dma_chan && skb_csum_unnecessary(skb)) {
 	if (tp->ucopy.dma_chan && skb_csum_unnecessary(skb)) {
 
 

+ 1 - 1
net/ipv4/tcp_ipv4.c

@@ -1727,7 +1727,7 @@ process:
 #ifdef CONFIG_NET_DMA
 #ifdef CONFIG_NET_DMA
 		struct tcp_sock *tp = tcp_sk(sk);
 		struct tcp_sock *tp = tcp_sk(sk);
 		if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
 		if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
-			tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY);
+			tp->ucopy.dma_chan = net_dma_find_channel();
 		if (tp->ucopy.dma_chan)
 		if (tp->ucopy.dma_chan)
 			ret = tcp_v4_do_rcv(sk, skb);
 			ret = tcp_v4_do_rcv(sk, skb);
 		else
 		else

+ 1 - 1
net/ipv6/tcp_ipv6.c

@@ -1755,7 +1755,7 @@ process:
 #ifdef CONFIG_NET_DMA
 #ifdef CONFIG_NET_DMA
 		struct tcp_sock *tp = tcp_sk(sk);
 		struct tcp_sock *tp = tcp_sk(sk);
 		if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
 		if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
-			tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY);
+			tp->ucopy.dma_chan = net_dma_find_channel();
 		if (tp->ucopy.dma_chan)
 		if (tp->ucopy.dma_chan)
 			ret = tcp_v6_do_rcv(sk, skb);
 			ret = tcp_v6_do_rcv(sk, skb);
 		else
 		else