|
@@ -938,28 +938,7 @@ static inline void sock_put(struct sock *sk)
|
|
|
sk_free(sk);
|
|
|
}
|
|
|
|
|
|
-static inline int sk_receive_skb(struct sock *sk, struct sk_buff *skb)
|
|
|
-{
|
|
|
- int rc = NET_RX_SUCCESS;
|
|
|
-
|
|
|
- if (sk_filter(sk, skb, 0))
|
|
|
- goto discard_and_relse;
|
|
|
-
|
|
|
- skb->dev = NULL;
|
|
|
-
|
|
|
- bh_lock_sock(sk);
|
|
|
- if (!sock_owned_by_user(sk))
|
|
|
- rc = sk->sk_backlog_rcv(sk, skb);
|
|
|
- else
|
|
|
- sk_add_backlog(sk, skb);
|
|
|
- bh_unlock_sock(sk);
|
|
|
-out:
|
|
|
- sock_put(sk);
|
|
|
- return rc;
|
|
|
-discard_and_relse:
|
|
|
- kfree_skb(skb);
|
|
|
- goto out;
|
|
|
-}
|
|
|
+extern int sk_receive_skb(struct sock *sk, struct sk_buff *skb);
|
|
|
|
|
|
/* Detach socket from process context.
|
|
|
* Announce socket dead, detach it from wait queue and inode.
|
|
@@ -1044,33 +1023,9 @@ sk_dst_reset(struct sock *sk)
|
|
|
write_unlock(&sk->sk_dst_lock);
|
|
|
}
|
|
|
|
|
|
-static inline struct dst_entry *
|
|
|
-__sk_dst_check(struct sock *sk, u32 cookie)
|
|
|
-{
|
|
|
- struct dst_entry *dst = sk->sk_dst_cache;
|
|
|
-
|
|
|
- if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
|
|
|
- sk->sk_dst_cache = NULL;
|
|
|
- dst_release(dst);
|
|
|
- return NULL;
|
|
|
- }
|
|
|
-
|
|
|
- return dst;
|
|
|
-}
|
|
|
-
|
|
|
-static inline struct dst_entry *
|
|
|
-sk_dst_check(struct sock *sk, u32 cookie)
|
|
|
-{
|
|
|
- struct dst_entry *dst = sk_dst_get(sk);
|
|
|
+extern struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie);
|
|
|
|
|
|
- if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
|
|
|
- sk_dst_reset(sk);
|
|
|
- dst_release(dst);
|
|
|
- return NULL;
|
|
|
- }
|
|
|
-
|
|
|
- return dst;
|
|
|
-}
|
|
|
+extern struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie);
|
|
|
|
|
|
static inline void sk_setup_caps(struct sock *sk, struct dst_entry *dst)
|
|
|
{
|
|
@@ -1140,45 +1095,7 @@ extern void sk_reset_timer(struct sock *sk, struct timer_list* timer,
|
|
|
|
|
|
extern void sk_stop_timer(struct sock *sk, struct timer_list* timer);
|
|
|
|
|
|
-static inline int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
|
|
|
-{
|
|
|
- int err = 0;
|
|
|
- int skb_len;
|
|
|
-
|
|
|
- /* Cast skb->rcvbuf to unsigned... It's pointless, but reduces
|
|
|
- number of warnings when compiling with -W --ANK
|
|
|
- */
|
|
|
- if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
|
|
|
- (unsigned)sk->sk_rcvbuf) {
|
|
|
- err = -ENOMEM;
|
|
|
- goto out;
|
|
|
- }
|
|
|
-
|
|
|
- /* It would be deadlock, if sock_queue_rcv_skb is used
|
|
|
- with socket lock! We assume that users of this
|
|
|
- function are lock free.
|
|
|
- */
|
|
|
- err = sk_filter(sk, skb, 1);
|
|
|
- if (err)
|
|
|
- goto out;
|
|
|
-
|
|
|
- skb->dev = NULL;
|
|
|
- skb_set_owner_r(skb, sk);
|
|
|
-
|
|
|
- /* Cache the SKB length before we tack it onto the receive
|
|
|
- * queue. Once it is added it no longer belongs to us and
|
|
|
- * may be freed by other threads of control pulling packets
|
|
|
- * from the queue.
|
|
|
- */
|
|
|
- skb_len = skb->len;
|
|
|
-
|
|
|
- skb_queue_tail(&sk->sk_receive_queue, skb);
|
|
|
-
|
|
|
- if (!sock_flag(sk, SOCK_DEAD))
|
|
|
- sk->sk_data_ready(sk, skb_len);
|
|
|
-out:
|
|
|
- return err;
|
|
|
-}
|
|
|
+extern int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
|
|
|
|
|
|
static inline int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb)
|
|
|
{
|