Explorar o código

[NET]: Possible cleanups.

This patch contains the following possible cleanups:
- make the following needlessly global functions statis:
  - ipv4/tcp.c: __tcp_alloc_md5sig_pool()
  - ipv4/tcp_ipv4.c: tcp_v4_reqsk_md5_lookup()
  - ipv4/udplite.c: udplite_rcv()
  - ipv4/udplite.c: udplite_err()
- make the following needlessly global structs static:
  - ipv4/tcp_ipv4.c: tcp_request_sock_ipv4_ops
  - ipv4/tcp_ipv4.c: tcp_sock_ipv4_specific
  - ipv6/tcp_ipv6.c: tcp_request_sock_ipv6_ops
- net/ipv{4,6}/udplite.c: remove inline's from static functions
                          (gcc should know best when to inline them)

Signed-off-by: Adrian Bunk <bunk@stusta.de>
Signed-off-by: David S. Miller <davem@davemloft.net>
Adrian Bunk %!s(int64=18) %!d(string=hai) anos
pai
achega
f5b99bcddd
Modificáronse 5 ficheiros con 16 adicións e 16 borrados
  1. 1 1
      net/ipv4/tcp.c
  2. 4 4
      net/ipv4/tcp_ipv4.c
  3. 5 5
      net/ipv4/udplite.c
  4. 1 1
      net/ipv6/tcp_ipv6.c
  5. 5 5
      net/ipv6/udplite.c

+ 1 - 1
net/ipv4/tcp.c

@@ -2278,7 +2278,7 @@ void tcp_free_md5sig_pool(void)
 
 
 EXPORT_SYMBOL(tcp_free_md5sig_pool);
 EXPORT_SYMBOL(tcp_free_md5sig_pool);
 
 
-struct tcp_md5sig_pool **__tcp_alloc_md5sig_pool(void)
+static struct tcp_md5sig_pool **__tcp_alloc_md5sig_pool(void)
 {
 {
 	int cpu;
 	int cpu;
 	struct tcp_md5sig_pool **pool;
 	struct tcp_md5sig_pool **pool;

+ 4 - 4
net/ipv4/tcp_ipv4.c

@@ -840,8 +840,8 @@ struct tcp_md5sig_key *tcp_v4_md5_lookup(struct sock *sk,
 
 
 EXPORT_SYMBOL(tcp_v4_md5_lookup);
 EXPORT_SYMBOL(tcp_v4_md5_lookup);
 
 
-struct tcp_md5sig_key *tcp_v4_reqsk_md5_lookup(struct sock *sk,
-					       struct request_sock *req)
+static struct tcp_md5sig_key *tcp_v4_reqsk_md5_lookup(struct sock *sk,
+						      struct request_sock *req)
 {
 {
 	return tcp_v4_md5_do_lookup(sk, inet_rsk(req)->rmt_addr);
 	return tcp_v4_md5_do_lookup(sk, inet_rsk(req)->rmt_addr);
 }
 }
@@ -1233,7 +1233,7 @@ struct request_sock_ops tcp_request_sock_ops __read_mostly = {
 	.send_reset	=	tcp_v4_send_reset,
 	.send_reset	=	tcp_v4_send_reset,
 };
 };
 
 
-struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
+static struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
 #ifdef CONFIG_TCP_MD5SIG
 #ifdef CONFIG_TCP_MD5SIG
 	.md5_lookup	=	tcp_v4_reqsk_md5_lookup,
 	.md5_lookup	=	tcp_v4_reqsk_md5_lookup,
 #endif
 #endif
@@ -1820,7 +1820,7 @@ struct inet_connection_sock_af_ops ipv4_specific = {
 #endif
 #endif
 };
 };
 
 
-struct tcp_sock_af_ops tcp_sock_ipv4_specific = {
+static struct tcp_sock_af_ops tcp_sock_ipv4_specific = {
 #ifdef CONFIG_TCP_MD5SIG
 #ifdef CONFIG_TCP_MD5SIG
 	.md5_lookup		= tcp_v4_md5_lookup,
 	.md5_lookup		= tcp_v4_md5_lookup,
 	.calc_md5_hash		= tcp_v4_calc_md5_hash,
 	.calc_md5_hash		= tcp_v4_calc_md5_hash,

+ 5 - 5
net/ipv4/udplite.c

@@ -18,23 +18,23 @@ DEFINE_SNMP_STAT(struct udp_mib, udplite_statistics)	__read_mostly;
 struct hlist_head 	udplite_hash[UDP_HTABLE_SIZE];
 struct hlist_head 	udplite_hash[UDP_HTABLE_SIZE];
 static int		udplite_port_rover;
 static int		udplite_port_rover;
 
 
-__inline__ int udplite_get_port(struct sock *sk, unsigned short p,
-			int (*c)(const struct sock *, const struct sock *))
+int udplite_get_port(struct sock *sk, unsigned short p,
+		     int (*c)(const struct sock *, const struct sock *))
 {
 {
 	return  __udp_lib_get_port(sk, p, udplite_hash, &udplite_port_rover, c);
 	return  __udp_lib_get_port(sk, p, udplite_hash, &udplite_port_rover, c);
 }
 }
 
 
-static __inline__ int udplite_v4_get_port(struct sock *sk, unsigned short snum)
+static int udplite_v4_get_port(struct sock *sk, unsigned short snum)
 {
 {
 	return udplite_get_port(sk, snum, ipv4_rcv_saddr_equal);
 	return udplite_get_port(sk, snum, ipv4_rcv_saddr_equal);
 }
 }
 
 
-__inline__ int udplite_rcv(struct sk_buff *skb)
+static int udplite_rcv(struct sk_buff *skb)
 {
 {
 	return __udp4_lib_rcv(skb, udplite_hash, 1);
 	return __udp4_lib_rcv(skb, udplite_hash, 1);
 }
 }
 
 
-__inline__ void udplite_err(struct sk_buff *skb, u32 info)
+static void udplite_err(struct sk_buff *skb, u32 info)
 {
 {
 	return __udp4_lib_err(skb, info, udplite_hash);
 	return __udp4_lib_err(skb, info, udplite_hash);
 }
 }

+ 1 - 1
net/ipv6/tcp_ipv6.c

@@ -929,7 +929,7 @@ static struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
 	.send_reset	=	tcp_v6_send_reset
 	.send_reset	=	tcp_v6_send_reset
 };
 };
 
 
-struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
+static struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
 #ifdef CONFIG_TCP_MD5SIG
 #ifdef CONFIG_TCP_MD5SIG
 	.md5_lookup	=	tcp_v6_reqsk_md5_lookup,
 	.md5_lookup	=	tcp_v6_reqsk_md5_lookup,
 #endif
 #endif

+ 5 - 5
net/ipv6/udplite.c

@@ -17,14 +17,14 @@
 
 
 DEFINE_SNMP_STAT(struct udp_mib, udplite_stats_in6) __read_mostly;
 DEFINE_SNMP_STAT(struct udp_mib, udplite_stats_in6) __read_mostly;
 
 
-static __inline__ int udplitev6_rcv(struct sk_buff **pskb)
+static int udplitev6_rcv(struct sk_buff **pskb)
 {
 {
 	return __udp6_lib_rcv(pskb, udplite_hash, 1);
 	return __udp6_lib_rcv(pskb, udplite_hash, 1);
 }
 }
 
 
-static __inline__ void udplitev6_err(struct sk_buff *skb,
-				     struct inet6_skb_parm *opt,
-				     int type, int code, int offset, __be32 info)
+static void udplitev6_err(struct sk_buff *skb,
+			  struct inet6_skb_parm *opt,
+			  int type, int code, int offset, __be32 info)
 {
 {
 	return __udp6_lib_err(skb, opt, type, code, offset, info, udplite_hash);
 	return __udp6_lib_err(skb, opt, type, code, offset, info, udplite_hash);
 }
 }
@@ -35,7 +35,7 @@ static struct inet6_protocol udplitev6_protocol = {
 	.flags		=	INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
 	.flags		=	INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
 };
 };
 
 
-static __inline__ int udplite_v6_get_port(struct sock *sk, unsigned short snum)
+static int udplite_v6_get_port(struct sock *sk, unsigned short snum)
 {
 {
 	return udplite_get_port(sk, snum, ipv6_rcv_saddr_equal);
 	return udplite_get_port(sk, snum, ipv6_rcv_saddr_equal);
 }
 }