|
@@ -1116,7 +1116,6 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
|
|
|
long timeo;
|
|
|
struct task_struct *user_recv = NULL;
|
|
|
int copied_early = 0;
|
|
|
- int available = 0;
|
|
|
struct sk_buff *skb;
|
|
|
|
|
|
lock_sock(sk);
|
|
@@ -1145,15 +1144,22 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
|
|
|
tp->ucopy.dma_chan = NULL;
|
|
|
preempt_disable();
|
|
|
skb = skb_peek_tail(&sk->sk_receive_queue);
|
|
|
- if (skb)
|
|
|
- available = TCP_SKB_CB(skb)->seq + skb->len - (*seq);
|
|
|
- if ((available < target) &&
|
|
|
- (len > sysctl_tcp_dma_copybreak) && !(flags & MSG_PEEK) &&
|
|
|
- !sysctl_tcp_low_latency && __get_cpu_var(softnet_data).net_dma) {
|
|
|
- preempt_enable_no_resched();
|
|
|
- tp->ucopy.pinned_list = dma_pin_iovec_pages(msg->msg_iov, len);
|
|
|
- } else
|
|
|
- preempt_enable_no_resched();
|
|
|
+ {
|
|
|
+ int available = 0;
|
|
|
+
|
|
|
+ if (skb)
|
|
|
+ available = TCP_SKB_CB(skb)->seq + skb->len - (*seq);
|
|
|
+ if ((available < target) &&
|
|
|
+ (len > sysctl_tcp_dma_copybreak) && !(flags & MSG_PEEK) &&
|
|
|
+ !sysctl_tcp_low_latency &&
|
|
|
+ __get_cpu_var(softnet_data).net_dma) {
|
|
|
+ preempt_enable_no_resched();
|
|
|
+ tp->ucopy.pinned_list =
|
|
|
+ dma_pin_iovec_pages(msg->msg_iov, len);
|
|
|
+ } else {
|
|
|
+ preempt_enable_no_resched();
|
|
|
+ }
|
|
|
+ }
|
|
|
#endif
|
|
|
|
|
|
do {
|