|
@@ -461,16 +461,16 @@ static inline void sk_stream_free_skb(struct sock *sk, struct sk_buff *skb)
|
|
|
}
|
|
|
|
|
|
/* The per-socket spinlock must be held here. */
|
|
|
-#define sk_add_backlog(__sk, __skb) \
|
|
|
-do { if (!(__sk)->sk_backlog.tail) { \
|
|
|
- (__sk)->sk_backlog.head = \
|
|
|
- (__sk)->sk_backlog.tail = (__skb); \
|
|
|
- } else { \
|
|
|
- ((__sk)->sk_backlog.tail)->next = (__skb); \
|
|
|
- (__sk)->sk_backlog.tail = (__skb); \
|
|
|
- } \
|
|
|
- (__skb)->next = NULL; \
|
|
|
-} while(0)
|
|
|
+static inline void sk_add_backlog(struct sock *sk, struct sk_buff *skb)
|
|
|
+{
|
|
|
+ if (!sk->sk_backlog.tail) {
|
|
|
+ sk->sk_backlog.head = sk->sk_backlog.tail = skb;
|
|
|
+ } else {
|
|
|
+ sk->sk_backlog.tail->next = skb;
|
|
|
+ sk->sk_backlog.tail = skb;
|
|
|
+ }
|
|
|
+ skb->next = NULL;
|
|
|
+}
|
|
|
|
|
|
#define sk_wait_event(__sk, __timeo, __condition) \
|
|
|
({ int rc; \
|