Bladeren bron

Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/kaber/nf-next-2.6

David S. Miller 14 jaren geleden
bovenliggende
commit
3872b28408

+ 17 - 1
include/net/ip_vs.h

@@ -494,7 +494,7 @@ struct ip_vs_conn_param {
  *	IP_VS structure allocated for each dynamically scheduled connection
  *	IP_VS structure allocated for each dynamically scheduled connection
  */
  */
 struct ip_vs_conn {
 struct ip_vs_conn {
-	struct list_head        c_list;         /* hashed list heads */
+	struct hlist_node	c_list;         /* hashed list heads */
 #ifdef CONFIG_NET_NS
 #ifdef CONFIG_NET_NS
 	struct net              *net;           /* Name space */
 	struct net              *net;           /* Name space */
 #endif
 #endif
@@ -1019,6 +1019,8 @@ ip_vs_schedule(struct ip_vs_service *svc, struct sk_buff *skb,
 extern int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
 extern int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
 			struct ip_vs_proto_data *pd);
 			struct ip_vs_proto_data *pd);
 
 
+extern void ip_vs_scheduler_err(struct ip_vs_service *svc, const char *msg);
+
 
 
 /*
 /*
  *      IPVS control data and functions (from ip_vs_ctl.c)
  *      IPVS control data and functions (from ip_vs_ctl.c)
@@ -1241,6 +1243,20 @@ static inline void ip_vs_conn_drop_conntrack(struct ip_vs_conn *cp)
 /* CONFIG_IP_VS_NFCT */
 /* CONFIG_IP_VS_NFCT */
 #endif
 #endif
 
 
+static inline unsigned int
+ip_vs_dest_conn_overhead(struct ip_vs_dest *dest)
+{
+	/*
+	 * We think the overhead of processing active connections is 256
+	 * times higher than that of inactive connections in average. (This
+	 * 256 times might not be accurate, we will change it later) We
+	 * use the following formula to estimate the overhead now:
+	 *		  dest->activeconns*256 + dest->inactconns
+	 */
+	return (atomic_read(&dest->activeconns) << 8) +
+		atomic_read(&dest->inactconns);
+}
+
 #endif /* __KERNEL__ */
 #endif /* __KERNEL__ */
 
 
 #endif	/* _NET_IP_VS_H */
 #endif	/* _NET_IP_VS_H */

+ 2 - 0
net/bridge/netfilter/ebtables.c

@@ -1107,6 +1107,8 @@ static int do_replace(struct net *net, const void __user *user,
 	if (tmp.num_counters >= INT_MAX / sizeof(struct ebt_counter))
 	if (tmp.num_counters >= INT_MAX / sizeof(struct ebt_counter))
 		return -ENOMEM;
 		return -ENOMEM;
 
 
+	tmp.name[sizeof(tmp.name) - 1] = 0;
+
 	countersize = COUNTER_OFFSET(tmp.nentries) * nr_cpu_ids;
 	countersize = COUNTER_OFFSET(tmp.nentries) * nr_cpu_ids;
 	newinfo = vmalloc(sizeof(*newinfo) + countersize);
 	newinfo = vmalloc(sizeof(*newinfo) + countersize);
 	if (!newinfo)
 	if (!newinfo)

+ 1 - 0
net/netfilter/ipset/Kconfig

@@ -1,6 +1,7 @@
 menuconfig IP_SET
 menuconfig IP_SET
 	tristate "IP set support"
 	tristate "IP set support"
 	depends on INET && NETFILTER
 	depends on INET && NETFILTER
+	depends on NETFILTER_NETLINK
 	help
 	help
 	  This option adds IP set support to the kernel.
 	  This option adds IP set support to the kernel.
 	  In order to define and use the sets, you need the userspace utility
 	  In order to define and use the sets, you need the userspace utility

+ 29 - 23
net/netfilter/ipvs/ip_vs_conn.c

@@ -59,7 +59,7 @@ static int ip_vs_conn_tab_mask __read_mostly;
 /*
 /*
  *  Connection hash table: for input and output packets lookups of IPVS
  *  Connection hash table: for input and output packets lookups of IPVS
  */
  */
-static struct list_head *ip_vs_conn_tab __read_mostly;
+static struct hlist_head *ip_vs_conn_tab __read_mostly;
 
 
 /*  SLAB cache for IPVS connections */
 /*  SLAB cache for IPVS connections */
 static struct kmem_cache *ip_vs_conn_cachep __read_mostly;
 static struct kmem_cache *ip_vs_conn_cachep __read_mostly;
@@ -201,7 +201,7 @@ static inline int ip_vs_conn_hash(struct ip_vs_conn *cp)
 	spin_lock(&cp->lock);
 	spin_lock(&cp->lock);
 
 
 	if (!(cp->flags & IP_VS_CONN_F_HASHED)) {
 	if (!(cp->flags & IP_VS_CONN_F_HASHED)) {
-		list_add(&cp->c_list, &ip_vs_conn_tab[hash]);
+		hlist_add_head(&cp->c_list, &ip_vs_conn_tab[hash]);
 		cp->flags |= IP_VS_CONN_F_HASHED;
 		cp->flags |= IP_VS_CONN_F_HASHED;
 		atomic_inc(&cp->refcnt);
 		atomic_inc(&cp->refcnt);
 		ret = 1;
 		ret = 1;
@@ -234,7 +234,7 @@ static inline int ip_vs_conn_unhash(struct ip_vs_conn *cp)
 	spin_lock(&cp->lock);
 	spin_lock(&cp->lock);
 
 
 	if (cp->flags & IP_VS_CONN_F_HASHED) {
 	if (cp->flags & IP_VS_CONN_F_HASHED) {
-		list_del(&cp->c_list);
+		hlist_del(&cp->c_list);
 		cp->flags &= ~IP_VS_CONN_F_HASHED;
 		cp->flags &= ~IP_VS_CONN_F_HASHED;
 		atomic_dec(&cp->refcnt);
 		atomic_dec(&cp->refcnt);
 		ret = 1;
 		ret = 1;
@@ -259,12 +259,13 @@ __ip_vs_conn_in_get(const struct ip_vs_conn_param *p)
 {
 {
 	unsigned hash;
 	unsigned hash;
 	struct ip_vs_conn *cp;
 	struct ip_vs_conn *cp;
+	struct hlist_node *n;
 
 
 	hash = ip_vs_conn_hashkey_param(p, false);
 	hash = ip_vs_conn_hashkey_param(p, false);
 
 
 	ct_read_lock(hash);
 	ct_read_lock(hash);
 
 
-	list_for_each_entry(cp, &ip_vs_conn_tab[hash], c_list) {
+	hlist_for_each_entry(cp, n, &ip_vs_conn_tab[hash], c_list) {
 		if (cp->af == p->af &&
 		if (cp->af == p->af &&
 		    p->cport == cp->cport && p->vport == cp->vport &&
 		    p->cport == cp->cport && p->vport == cp->vport &&
 		    ip_vs_addr_equal(p->af, p->caddr, &cp->caddr) &&
 		    ip_vs_addr_equal(p->af, p->caddr, &cp->caddr) &&
@@ -345,12 +346,13 @@ struct ip_vs_conn *ip_vs_ct_in_get(const struct ip_vs_conn_param *p)
 {
 {
 	unsigned hash;
 	unsigned hash;
 	struct ip_vs_conn *cp;
 	struct ip_vs_conn *cp;
+	struct hlist_node *n;
 
 
 	hash = ip_vs_conn_hashkey_param(p, false);
 	hash = ip_vs_conn_hashkey_param(p, false);
 
 
 	ct_read_lock(hash);
 	ct_read_lock(hash);
 
 
-	list_for_each_entry(cp, &ip_vs_conn_tab[hash], c_list) {
+	hlist_for_each_entry(cp, n, &ip_vs_conn_tab[hash], c_list) {
 		if (!ip_vs_conn_net_eq(cp, p->net))
 		if (!ip_vs_conn_net_eq(cp, p->net))
 			continue;
 			continue;
 		if (p->pe_data && p->pe->ct_match) {
 		if (p->pe_data && p->pe->ct_match) {
@@ -394,6 +396,7 @@ struct ip_vs_conn *ip_vs_conn_out_get(const struct ip_vs_conn_param *p)
 {
 {
 	unsigned hash;
 	unsigned hash;
 	struct ip_vs_conn *cp, *ret=NULL;
 	struct ip_vs_conn *cp, *ret=NULL;
+	struct hlist_node *n;
 
 
 	/*
 	/*
 	 *	Check for "full" addressed entries
 	 *	Check for "full" addressed entries
@@ -402,7 +405,7 @@ struct ip_vs_conn *ip_vs_conn_out_get(const struct ip_vs_conn_param *p)
 
 
 	ct_read_lock(hash);
 	ct_read_lock(hash);
 
 
-	list_for_each_entry(cp, &ip_vs_conn_tab[hash], c_list) {
+	hlist_for_each_entry(cp, n, &ip_vs_conn_tab[hash], c_list) {
 		if (cp->af == p->af &&
 		if (cp->af == p->af &&
 		    p->vport == cp->cport && p->cport == cp->dport &&
 		    p->vport == cp->cport && p->cport == cp->dport &&
 		    ip_vs_addr_equal(p->af, p->vaddr, &cp->caddr) &&
 		    ip_vs_addr_equal(p->af, p->vaddr, &cp->caddr) &&
@@ -818,7 +821,7 @@ ip_vs_conn_new(const struct ip_vs_conn_param *p,
 		return NULL;
 		return NULL;
 	}
 	}
 
 
-	INIT_LIST_HEAD(&cp->c_list);
+	INIT_HLIST_NODE(&cp->c_list);
 	setup_timer(&cp->timer, ip_vs_conn_expire, (unsigned long)cp);
 	setup_timer(&cp->timer, ip_vs_conn_expire, (unsigned long)cp);
 	ip_vs_conn_net_set(cp, p->net);
 	ip_vs_conn_net_set(cp, p->net);
 	cp->af		   = p->af;
 	cp->af		   = p->af;
@@ -894,8 +897,8 @@ ip_vs_conn_new(const struct ip_vs_conn_param *p,
  */
  */
 #ifdef CONFIG_PROC_FS
 #ifdef CONFIG_PROC_FS
 struct ip_vs_iter_state {
 struct ip_vs_iter_state {
-	struct seq_net_private p;
-	struct list_head *l;
+	struct seq_net_private	p;
+	struct hlist_head	*l;
 };
 };
 
 
 static void *ip_vs_conn_array(struct seq_file *seq, loff_t pos)
 static void *ip_vs_conn_array(struct seq_file *seq, loff_t pos)
@@ -903,13 +906,14 @@ static void *ip_vs_conn_array(struct seq_file *seq, loff_t pos)
 	int idx;
 	int idx;
 	struct ip_vs_conn *cp;
 	struct ip_vs_conn *cp;
 	struct ip_vs_iter_state *iter = seq->private;
 	struct ip_vs_iter_state *iter = seq->private;
+	struct hlist_node *n;
 
 
 	for (idx = 0; idx < ip_vs_conn_tab_size; idx++) {
 	for (idx = 0; idx < ip_vs_conn_tab_size; idx++) {
 		ct_read_lock_bh(idx);
 		ct_read_lock_bh(idx);
-		list_for_each_entry(cp, &ip_vs_conn_tab[idx], c_list) {
+		hlist_for_each_entry(cp, n, &ip_vs_conn_tab[idx], c_list) {
 			if (pos-- == 0) {
 			if (pos-- == 0) {
 				iter->l = &ip_vs_conn_tab[idx];
 				iter->l = &ip_vs_conn_tab[idx];
-			return cp;
+				return cp;
 			}
 			}
 		}
 		}
 		ct_read_unlock_bh(idx);
 		ct_read_unlock_bh(idx);
@@ -930,7 +934,8 @@ static void *ip_vs_conn_seq_next(struct seq_file *seq, void *v, loff_t *pos)
 {
 {
 	struct ip_vs_conn *cp = v;
 	struct ip_vs_conn *cp = v;
 	struct ip_vs_iter_state *iter = seq->private;
 	struct ip_vs_iter_state *iter = seq->private;
-	struct list_head *e, *l = iter->l;
+	struct hlist_node *e;
+	struct hlist_head *l = iter->l;
 	int idx;
 	int idx;
 
 
 	++*pos;
 	++*pos;
@@ -938,15 +943,15 @@ static void *ip_vs_conn_seq_next(struct seq_file *seq, void *v, loff_t *pos)
 		return ip_vs_conn_array(seq, 0);
 		return ip_vs_conn_array(seq, 0);
 
 
 	/* more on same hash chain? */
 	/* more on same hash chain? */
-	if ((e = cp->c_list.next) != l)
-		return list_entry(e, struct ip_vs_conn, c_list);
+	if ((e = cp->c_list.next))
+		return hlist_entry(e, struct ip_vs_conn, c_list);
 
 
 	idx = l - ip_vs_conn_tab;
 	idx = l - ip_vs_conn_tab;
 	ct_read_unlock_bh(idx);
 	ct_read_unlock_bh(idx);
 
 
 	while (++idx < ip_vs_conn_tab_size) {
 	while (++idx < ip_vs_conn_tab_size) {
 		ct_read_lock_bh(idx);
 		ct_read_lock_bh(idx);
-		list_for_each_entry(cp, &ip_vs_conn_tab[idx], c_list) {
+		hlist_for_each_entry(cp, e, &ip_vs_conn_tab[idx], c_list) {
 			iter->l = &ip_vs_conn_tab[idx];
 			iter->l = &ip_vs_conn_tab[idx];
 			return cp;
 			return cp;
 		}
 		}
@@ -959,7 +964,7 @@ static void *ip_vs_conn_seq_next(struct seq_file *seq, void *v, loff_t *pos)
 static void ip_vs_conn_seq_stop(struct seq_file *seq, void *v)
 static void ip_vs_conn_seq_stop(struct seq_file *seq, void *v)
 {
 {
 	struct ip_vs_iter_state *iter = seq->private;
 	struct ip_vs_iter_state *iter = seq->private;
-	struct list_head *l = iter->l;
+	struct hlist_head *l = iter->l;
 
 
 	if (l)
 	if (l)
 		ct_read_unlock_bh(l - ip_vs_conn_tab);
 		ct_read_unlock_bh(l - ip_vs_conn_tab);
@@ -1148,13 +1153,14 @@ void ip_vs_random_dropentry(struct net *net)
 	 */
 	 */
 	for (idx = 0; idx < (ip_vs_conn_tab_size>>5); idx++) {
 	for (idx = 0; idx < (ip_vs_conn_tab_size>>5); idx++) {
 		unsigned hash = net_random() & ip_vs_conn_tab_mask;
 		unsigned hash = net_random() & ip_vs_conn_tab_mask;
+		struct hlist_node *n;
 
 
 		/*
 		/*
 		 *  Lock is actually needed in this loop.
 		 *  Lock is actually needed in this loop.
 		 */
 		 */
 		ct_write_lock_bh(hash);
 		ct_write_lock_bh(hash);
 
 
-		list_for_each_entry(cp, &ip_vs_conn_tab[hash], c_list) {
+		hlist_for_each_entry(cp, n, &ip_vs_conn_tab[hash], c_list) {
 			if (cp->flags & IP_VS_CONN_F_TEMPLATE)
 			if (cp->flags & IP_VS_CONN_F_TEMPLATE)
 				/* connection template */
 				/* connection template */
 				continue;
 				continue;
@@ -1202,12 +1208,14 @@ static void ip_vs_conn_flush(struct net *net)
 
 
 flush_again:
 flush_again:
 	for (idx = 0; idx < ip_vs_conn_tab_size; idx++) {
 	for (idx = 0; idx < ip_vs_conn_tab_size; idx++) {
+		struct hlist_node *n;
+
 		/*
 		/*
 		 *  Lock is actually needed in this loop.
 		 *  Lock is actually needed in this loop.
 		 */
 		 */
 		ct_write_lock_bh(idx);
 		ct_write_lock_bh(idx);
 
 
-		list_for_each_entry(cp, &ip_vs_conn_tab[idx], c_list) {
+		hlist_for_each_entry(cp, n, &ip_vs_conn_tab[idx], c_list) {
 			if (!ip_vs_conn_net_eq(cp, net))
 			if (!ip_vs_conn_net_eq(cp, net))
 				continue;
 				continue;
 			IP_VS_DBG(4, "del connection\n");
 			IP_VS_DBG(4, "del connection\n");
@@ -1265,8 +1273,7 @@ int __init ip_vs_conn_init(void)
 	/*
 	/*
 	 * Allocate the connection hash table and initialize its list heads
 	 * Allocate the connection hash table and initialize its list heads
 	 */
 	 */
-	ip_vs_conn_tab = vmalloc(ip_vs_conn_tab_size *
-				 sizeof(struct list_head));
+	ip_vs_conn_tab = vmalloc(ip_vs_conn_tab_size * sizeof(*ip_vs_conn_tab));
 	if (!ip_vs_conn_tab)
 	if (!ip_vs_conn_tab)
 		return -ENOMEM;
 		return -ENOMEM;
 
 
@@ -1286,9 +1293,8 @@ int __init ip_vs_conn_init(void)
 	IP_VS_DBG(0, "Each connection entry needs %Zd bytes at least\n",
 	IP_VS_DBG(0, "Each connection entry needs %Zd bytes at least\n",
 		  sizeof(struct ip_vs_conn));
 		  sizeof(struct ip_vs_conn));
 
 
-	for (idx = 0; idx < ip_vs_conn_tab_size; idx++) {
-		INIT_LIST_HEAD(&ip_vs_conn_tab[idx]);
-	}
+	for (idx = 0; idx < ip_vs_conn_tab_size; idx++)
+		INIT_HLIST_HEAD(&ip_vs_conn_tab[idx]);
 
 
 	for (idx = 0; idx < CT_LOCKARRAY_SIZE; idx++)  {
 	for (idx = 0; idx < CT_LOCKARRAY_SIZE; idx++)  {
 		rwlock_init(&__ip_vs_conntbl_lock_array[idx].l);
 		rwlock_init(&__ip_vs_conntbl_lock_array[idx].l);

+ 3 - 25
net/netfilter/ipvs/ip_vs_core.c

@@ -729,7 +729,7 @@ void ip_vs_nat_icmp_v6(struct sk_buff *skb, struct ip_vs_protocol *pp,
 #endif
 #endif
 
 
 /* Handle relevant response ICMP messages - forward to the right
 /* Handle relevant response ICMP messages - forward to the right
- * destination host. Used for NAT and local client.
+ * destination host.
  */
  */
 static int handle_response_icmp(int af, struct sk_buff *skb,
 static int handle_response_icmp(int af, struct sk_buff *skb,
 				union nf_inet_addr *snet,
 				union nf_inet_addr *snet,
@@ -979,7 +979,6 @@ static inline int is_tcp_reset(const struct sk_buff *skb, int nh_len)
 }
 }
 
 
 /* Handle response packets: rewrite addresses and send away...
 /* Handle response packets: rewrite addresses and send away...
- * Used for NAT and local client.
  */
  */
 static unsigned int
 static unsigned int
 handle_response(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd,
 handle_response(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd,
@@ -1280,7 +1279,6 @@ ip_vs_in_icmp(struct sk_buff *skb, int *related, unsigned int hooknum)
 	struct ip_vs_protocol *pp;
 	struct ip_vs_protocol *pp;
 	struct ip_vs_proto_data *pd;
 	struct ip_vs_proto_data *pd;
 	unsigned int offset, ihl, verdict;
 	unsigned int offset, ihl, verdict;
-	union nf_inet_addr snet;
 
 
 	*related = 1;
 	*related = 1;
 
 
@@ -1339,17 +1337,8 @@ ip_vs_in_icmp(struct sk_buff *skb, int *related, unsigned int hooknum)
 	ip_vs_fill_iphdr(AF_INET, cih, &ciph);
 	ip_vs_fill_iphdr(AF_INET, cih, &ciph);
 	/* The embedded headers contain source and dest in reverse order */
 	/* The embedded headers contain source and dest in reverse order */
 	cp = pp->conn_in_get(AF_INET, skb, &ciph, offset, 1);
 	cp = pp->conn_in_get(AF_INET, skb, &ciph, offset, 1);
-	if (!cp) {
-		/* The packet could also belong to a local client */
-		cp = pp->conn_out_get(AF_INET, skb, &ciph, offset, 1);
-		if (cp) {
-			snet.ip = iph->saddr;
-			return handle_response_icmp(AF_INET, skb, &snet,
-						    cih->protocol, cp, pp,
-						    offset, ihl);
-		}
+	if (!cp)
 		return NF_ACCEPT;
 		return NF_ACCEPT;
-	}
 
 
 	verdict = NF_DROP;
 	verdict = NF_DROP;
 
 
@@ -1395,7 +1384,6 @@ ip_vs_in_icmp_v6(struct sk_buff *skb, int *related, unsigned int hooknum)
 	struct ip_vs_protocol *pp;
 	struct ip_vs_protocol *pp;
 	struct ip_vs_proto_data *pd;
 	struct ip_vs_proto_data *pd;
 	unsigned int offset, verdict;
 	unsigned int offset, verdict;
-	union nf_inet_addr snet;
 	struct rt6_info *rt;
 	struct rt6_info *rt;
 
 
 	*related = 1;
 	*related = 1;
@@ -1455,18 +1443,8 @@ ip_vs_in_icmp_v6(struct sk_buff *skb, int *related, unsigned int hooknum)
 	ip_vs_fill_iphdr(AF_INET6, cih, &ciph);
 	ip_vs_fill_iphdr(AF_INET6, cih, &ciph);
 	/* The embedded headers contain source and dest in reverse order */
 	/* The embedded headers contain source and dest in reverse order */
 	cp = pp->conn_in_get(AF_INET6, skb, &ciph, offset, 1);
 	cp = pp->conn_in_get(AF_INET6, skb, &ciph, offset, 1);
-	if (!cp) {
-		/* The packet could also belong to a local client */
-		cp = pp->conn_out_get(AF_INET6, skb, &ciph, offset, 1);
-		if (cp) {
-			ipv6_addr_copy(&snet.in6, &iph->saddr);
-			return handle_response_icmp(AF_INET6, skb, &snet,
-						    cih->nexthdr,
-						    cp, pp, offset,
-						    sizeof(struct ipv6hdr));
-		}
+	if (!cp)
 		return NF_ACCEPT;
 		return NF_ACCEPT;
-	}
 
 
 	verdict = NF_DROP;
 	verdict = NF_DROP;
 
 

+ 4 - 11
net/netfilter/ipvs/ip_vs_lblc.c

@@ -389,12 +389,7 @@ __ip_vs_lblc_schedule(struct ip_vs_service *svc)
 	int loh, doh;
 	int loh, doh;
 
 
 	/*
 	/*
-	 * We think the overhead of processing active connections is fifty
-	 * times higher than that of inactive connections in average. (This
-	 * fifty times might not be accurate, we will change it later.) We
-	 * use the following formula to estimate the overhead:
-	 *                dest->activeconns*50 + dest->inactconns
-	 * and the load:
+	 * We use the following formula to estimate the load:
 	 *                (dest overhead) / dest->weight
 	 *                (dest overhead) / dest->weight
 	 *
 	 *
 	 * Remember -- no floats in kernel mode!!!
 	 * Remember -- no floats in kernel mode!!!
@@ -410,8 +405,7 @@ __ip_vs_lblc_schedule(struct ip_vs_service *svc)
 			continue;
 			continue;
 		if (atomic_read(&dest->weight) > 0) {
 		if (atomic_read(&dest->weight) > 0) {
 			least = dest;
 			least = dest;
-			loh = atomic_read(&least->activeconns) * 50
-				+ atomic_read(&least->inactconns);
+			loh = ip_vs_dest_conn_overhead(least);
 			goto nextstage;
 			goto nextstage;
 		}
 		}
 	}
 	}
@@ -425,8 +419,7 @@ __ip_vs_lblc_schedule(struct ip_vs_service *svc)
 		if (dest->flags & IP_VS_DEST_F_OVERLOAD)
 		if (dest->flags & IP_VS_DEST_F_OVERLOAD)
 			continue;
 			continue;
 
 
-		doh = atomic_read(&dest->activeconns) * 50
-			+ atomic_read(&dest->inactconns);
+		doh = ip_vs_dest_conn_overhead(dest);
 		if (loh * atomic_read(&dest->weight) >
 		if (loh * atomic_read(&dest->weight) >
 		    doh * atomic_read(&least->weight)) {
 		    doh * atomic_read(&least->weight)) {
 			least = dest;
 			least = dest;
@@ -510,7 +503,7 @@ ip_vs_lblc_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
 	/* No cache entry or it is invalid, time to schedule */
 	/* No cache entry or it is invalid, time to schedule */
 	dest = __ip_vs_lblc_schedule(svc);
 	dest = __ip_vs_lblc_schedule(svc);
 	if (!dest) {
 	if (!dest) {
-		IP_VS_ERR_RL("LBLC: no destination available\n");
+		ip_vs_scheduler_err(svc, "no destination available");
 		return NULL;
 		return NULL;
 	}
 	}
 
 

+ 8 - 19
net/netfilter/ipvs/ip_vs_lblcr.c

@@ -178,8 +178,7 @@ static inline struct ip_vs_dest *ip_vs_dest_set_min(struct ip_vs_dest_set *set)
 
 
 		if ((atomic_read(&least->weight) > 0)
 		if ((atomic_read(&least->weight) > 0)
 		    && (least->flags & IP_VS_DEST_F_AVAILABLE)) {
 		    && (least->flags & IP_VS_DEST_F_AVAILABLE)) {
-			loh = atomic_read(&least->activeconns) * 50
-				+ atomic_read(&least->inactconns);
+			loh = ip_vs_dest_conn_overhead(least);
 			goto nextstage;
 			goto nextstage;
 		}
 		}
 	}
 	}
@@ -192,8 +191,7 @@ static inline struct ip_vs_dest *ip_vs_dest_set_min(struct ip_vs_dest_set *set)
 		if (dest->flags & IP_VS_DEST_F_OVERLOAD)
 		if (dest->flags & IP_VS_DEST_F_OVERLOAD)
 			continue;
 			continue;
 
 
-		doh = atomic_read(&dest->activeconns) * 50
-			+ atomic_read(&dest->inactconns);
+		doh = ip_vs_dest_conn_overhead(dest);
 		if ((loh * atomic_read(&dest->weight) >
 		if ((loh * atomic_read(&dest->weight) >
 		     doh * atomic_read(&least->weight))
 		     doh * atomic_read(&least->weight))
 		    && (dest->flags & IP_VS_DEST_F_AVAILABLE)) {
 		    && (dest->flags & IP_VS_DEST_F_AVAILABLE)) {
@@ -228,8 +226,7 @@ static inline struct ip_vs_dest *ip_vs_dest_set_max(struct ip_vs_dest_set *set)
 	list_for_each_entry(e, &set->list, list) {
 	list_for_each_entry(e, &set->list, list) {
 		most = e->dest;
 		most = e->dest;
 		if (atomic_read(&most->weight) > 0) {
 		if (atomic_read(&most->weight) > 0) {
-			moh = atomic_read(&most->activeconns) * 50
-				+ atomic_read(&most->inactconns);
+			moh = ip_vs_dest_conn_overhead(most);
 			goto nextstage;
 			goto nextstage;
 		}
 		}
 	}
 	}
@@ -239,8 +236,7 @@ static inline struct ip_vs_dest *ip_vs_dest_set_max(struct ip_vs_dest_set *set)
   nextstage:
   nextstage:
 	list_for_each_entry(e, &set->list, list) {
 	list_for_each_entry(e, &set->list, list) {
 		dest = e->dest;
 		dest = e->dest;
-		doh = atomic_read(&dest->activeconns) * 50
-			+ atomic_read(&dest->inactconns);
+		doh = ip_vs_dest_conn_overhead(dest);
 		/* moh/mw < doh/dw ==> moh*dw < doh*mw, where mw,dw>0 */
 		/* moh/mw < doh/dw ==> moh*dw < doh*mw, where mw,dw>0 */
 		if ((moh * atomic_read(&dest->weight) <
 		if ((moh * atomic_read(&dest->weight) <
 		     doh * atomic_read(&most->weight))
 		     doh * atomic_read(&most->weight))
@@ -563,12 +559,7 @@ __ip_vs_lblcr_schedule(struct ip_vs_service *svc)
 	int loh, doh;
 	int loh, doh;
 
 
 	/*
 	/*
-	 * We think the overhead of processing active connections is fifty
-	 * times higher than that of inactive connections in average. (This
-	 * fifty times might not be accurate, we will change it later.) We
-	 * use the following formula to estimate the overhead:
-	 *                dest->activeconns*50 + dest->inactconns
-	 * and the load:
+	 * We use the following formula to estimate the load:
 	 *                (dest overhead) / dest->weight
 	 *                (dest overhead) / dest->weight
 	 *
 	 *
 	 * Remember -- no floats in kernel mode!!!
 	 * Remember -- no floats in kernel mode!!!
@@ -585,8 +576,7 @@ __ip_vs_lblcr_schedule(struct ip_vs_service *svc)
 
 
 		if (atomic_read(&dest->weight) > 0) {
 		if (atomic_read(&dest->weight) > 0) {
 			least = dest;
 			least = dest;
-			loh = atomic_read(&least->activeconns) * 50
-				+ atomic_read(&least->inactconns);
+			loh = ip_vs_dest_conn_overhead(least);
 			goto nextstage;
 			goto nextstage;
 		}
 		}
 	}
 	}
@@ -600,8 +590,7 @@ __ip_vs_lblcr_schedule(struct ip_vs_service *svc)
 		if (dest->flags & IP_VS_DEST_F_OVERLOAD)
 		if (dest->flags & IP_VS_DEST_F_OVERLOAD)
 			continue;
 			continue;
 
 
-		doh = atomic_read(&dest->activeconns) * 50
-			+ atomic_read(&dest->inactconns);
+		doh = ip_vs_dest_conn_overhead(dest);
 		if (loh * atomic_read(&dest->weight) >
 		if (loh * atomic_read(&dest->weight) >
 		    doh * atomic_read(&least->weight)) {
 		    doh * atomic_read(&least->weight)) {
 			least = dest;
 			least = dest;
@@ -692,7 +681,7 @@ ip_vs_lblcr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
 		/* The cache entry is invalid, time to schedule */
 		/* The cache entry is invalid, time to schedule */
 		dest = __ip_vs_lblcr_schedule(svc);
 		dest = __ip_vs_lblcr_schedule(svc);
 		if (!dest) {
 		if (!dest) {
-			IP_VS_ERR_RL("LBLCR: no destination available\n");
+			ip_vs_scheduler_err(svc, "no destination available");
 			read_unlock(&svc->sched_lock);
 			read_unlock(&svc->sched_lock);
 			return NULL;
 			return NULL;
 		}
 		}

+ 2 - 18
net/netfilter/ipvs/ip_vs_lc.c

@@ -22,22 +22,6 @@
 
 
 #include <net/ip_vs.h>
 #include <net/ip_vs.h>
 
 
-
-static inline unsigned int
-ip_vs_lc_dest_overhead(struct ip_vs_dest *dest)
-{
-	/*
-	 * We think the overhead of processing active connections is 256
-	 * times higher than that of inactive connections in average. (This
-	 * 256 times might not be accurate, we will change it later) We
-	 * use the following formula to estimate the overhead now:
-	 *		  dest->activeconns*256 + dest->inactconns
-	 */
-	return (atomic_read(&dest->activeconns) << 8) +
-		atomic_read(&dest->inactconns);
-}
-
-
 /*
 /*
  *	Least Connection scheduling
  *	Least Connection scheduling
  */
  */
@@ -62,7 +46,7 @@ ip_vs_lc_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
 		if ((dest->flags & IP_VS_DEST_F_OVERLOAD) ||
 		if ((dest->flags & IP_VS_DEST_F_OVERLOAD) ||
 		    atomic_read(&dest->weight) == 0)
 		    atomic_read(&dest->weight) == 0)
 			continue;
 			continue;
-		doh = ip_vs_lc_dest_overhead(dest);
+		doh = ip_vs_dest_conn_overhead(dest);
 		if (!least || doh < loh) {
 		if (!least || doh < loh) {
 			least = dest;
 			least = dest;
 			loh = doh;
 			loh = doh;
@@ -70,7 +54,7 @@ ip_vs_lc_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
 	}
 	}
 
 
 	if (!least)
 	if (!least)
-		IP_VS_ERR_RL("LC: no destination available\n");
+		ip_vs_scheduler_err(svc, "no destination available");
 	else
 	else
 		IP_VS_DBG_BUF(6, "LC: server %s:%u activeconns %d "
 		IP_VS_DBG_BUF(6, "LC: server %s:%u activeconns %d "
 			      "inactconns %d\n",
 			      "inactconns %d\n",

+ 1 - 1
net/netfilter/ipvs/ip_vs_nq.c

@@ -99,7 +99,7 @@ ip_vs_nq_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
 	}
 	}
 
 
 	if (!least) {
 	if (!least) {
-		IP_VS_ERR_RL("NQ: no destination available\n");
+		ip_vs_scheduler_err(svc, "no destination available");
 		return NULL;
 		return NULL;
 	}
 	}
 
 

+ 1 - 1
net/netfilter/ipvs/ip_vs_rr.c

@@ -72,7 +72,7 @@ ip_vs_rr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
 		q = q->next;
 		q = q->next;
 	} while (q != p);
 	} while (q != p);
 	write_unlock(&svc->sched_lock);
 	write_unlock(&svc->sched_lock);
-	IP_VS_ERR_RL("RR: no destination available\n");
+	ip_vs_scheduler_err(svc, "no destination available");
 	return NULL;
 	return NULL;
 
 
   out:
   out:

+ 25 - 0
net/netfilter/ipvs/ip_vs_sched.c

@@ -29,6 +29,7 @@
 
 
 #include <net/ip_vs.h>
 #include <net/ip_vs.h>
 
 
+EXPORT_SYMBOL(ip_vs_scheduler_err);
 /*
 /*
  *  IPVS scheduler list
  *  IPVS scheduler list
  */
  */
@@ -146,6 +147,30 @@ void ip_vs_scheduler_put(struct ip_vs_scheduler *scheduler)
 		module_put(scheduler->module);
 		module_put(scheduler->module);
 }
 }
 
 
+/*
+ * Common error output helper for schedulers
+ */
+
+void ip_vs_scheduler_err(struct ip_vs_service *svc, const char *msg)
+{
+	if (svc->fwmark) {
+		IP_VS_ERR_RL("%s: FWM %u 0x%08X - %s\n",
+			     svc->scheduler->name, svc->fwmark,
+			     svc->fwmark, msg);
+#ifdef CONFIG_IP_VS_IPV6
+	} else if (svc->af == AF_INET6) {
+		IP_VS_ERR_RL("%s: %s [%pI6]:%d - %s\n",
+			     svc->scheduler->name,
+			     ip_vs_proto_name(svc->protocol),
+			     &svc->addr.in6, ntohs(svc->port), msg);
+#endif
+	} else {
+		IP_VS_ERR_RL("%s: %s %pI4:%d - %s\n",
+			     svc->scheduler->name,
+			     ip_vs_proto_name(svc->protocol),
+			     &svc->addr.ip, ntohs(svc->port), msg);
+	}
+}
 
 
 /*
 /*
  *  Register a scheduler in the scheduler list
  *  Register a scheduler in the scheduler list

+ 1 - 1
net/netfilter/ipvs/ip_vs_sed.c

@@ -87,7 +87,7 @@ ip_vs_sed_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
 			goto nextstage;
 			goto nextstage;
 		}
 		}
 	}
 	}
-	IP_VS_ERR_RL("SED: no destination available\n");
+	ip_vs_scheduler_err(svc, "no destination available");
 	return NULL;
 	return NULL;
 
 
 	/*
 	/*

+ 1 - 1
net/netfilter/ipvs/ip_vs_sh.c

@@ -223,7 +223,7 @@ ip_vs_sh_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
 	    || !(dest->flags & IP_VS_DEST_F_AVAILABLE)
 	    || !(dest->flags & IP_VS_DEST_F_AVAILABLE)
 	    || atomic_read(&dest->weight) <= 0
 	    || atomic_read(&dest->weight) <= 0
 	    || is_overloaded(dest)) {
 	    || is_overloaded(dest)) {
-		IP_VS_ERR_RL("SH: no destination available\n");
+		ip_vs_scheduler_err(svc, "no destination available");
 		return NULL;
 		return NULL;
 	}
 	}
 
 

+ 2 - 2
net/netfilter/ipvs/ip_vs_sync.c

@@ -374,8 +374,8 @@ get_curr_sync_buff(struct netns_ipvs *ipvs, unsigned long time)
 	struct ip_vs_sync_buff *sb;
 	struct ip_vs_sync_buff *sb;
 
 
 	spin_lock_bh(&ipvs->sync_buff_lock);
 	spin_lock_bh(&ipvs->sync_buff_lock);
-	if (ipvs->sync_buff && (time == 0 ||
-	    time_before(jiffies - ipvs->sync_buff->firstuse, time))) {
+	if (ipvs->sync_buff &&
+	    time_after_eq(jiffies - ipvs->sync_buff->firstuse, time)) {
 		sb = ipvs->sync_buff;
 		sb = ipvs->sync_buff;
 		ipvs->sync_buff = NULL;
 		ipvs->sync_buff = NULL;
 	} else
 	} else

+ 3 - 19
net/netfilter/ipvs/ip_vs_wlc.c

@@ -27,22 +27,6 @@
 
 
 #include <net/ip_vs.h>
 #include <net/ip_vs.h>
 
 
-
-static inline unsigned int
-ip_vs_wlc_dest_overhead(struct ip_vs_dest *dest)
-{
-	/*
-	 * We think the overhead of processing active connections is 256
-	 * times higher than that of inactive connections in average. (This
-	 * 256 times might not be accurate, we will change it later) We
-	 * use the following formula to estimate the overhead now:
-	 *		  dest->activeconns*256 + dest->inactconns
-	 */
-	return (atomic_read(&dest->activeconns) << 8) +
-		atomic_read(&dest->inactconns);
-}
-
-
 /*
 /*
  *	Weighted Least Connection scheduling
  *	Weighted Least Connection scheduling
  */
  */
@@ -71,11 +55,11 @@ ip_vs_wlc_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
 		if (!(dest->flags & IP_VS_DEST_F_OVERLOAD) &&
 		if (!(dest->flags & IP_VS_DEST_F_OVERLOAD) &&
 		    atomic_read(&dest->weight) > 0) {
 		    atomic_read(&dest->weight) > 0) {
 			least = dest;
 			least = dest;
-			loh = ip_vs_wlc_dest_overhead(least);
+			loh = ip_vs_dest_conn_overhead(least);
 			goto nextstage;
 			goto nextstage;
 		}
 		}
 	}
 	}
-	IP_VS_ERR_RL("WLC: no destination available\n");
+	ip_vs_scheduler_err(svc, "no destination available");
 	return NULL;
 	return NULL;
 
 
 	/*
 	/*
@@ -85,7 +69,7 @@ ip_vs_wlc_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
 	list_for_each_entry_continue(dest, &svc->destinations, n_list) {
 	list_for_each_entry_continue(dest, &svc->destinations, n_list) {
 		if (dest->flags & IP_VS_DEST_F_OVERLOAD)
 		if (dest->flags & IP_VS_DEST_F_OVERLOAD)
 			continue;
 			continue;
-		doh = ip_vs_wlc_dest_overhead(dest);
+		doh = ip_vs_dest_conn_overhead(dest);
 		if (loh * atomic_read(&dest->weight) >
 		if (loh * atomic_read(&dest->weight) >
 		    doh * atomic_read(&least->weight)) {
 		    doh * atomic_read(&least->weight)) {
 			least = dest;
 			least = dest;

+ 8 - 6
net/netfilter/ipvs/ip_vs_wrr.c

@@ -147,8 +147,9 @@ ip_vs_wrr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
 
 
 			if (mark->cl == mark->cl->next) {
 			if (mark->cl == mark->cl->next) {
 				/* no dest entry */
 				/* no dest entry */
-				IP_VS_ERR_RL("WRR: no destination available: "
-					     "no destinations present\n");
+				ip_vs_scheduler_err(svc,
+					"no destination available: "
+					"no destinations present");
 				dest = NULL;
 				dest = NULL;
 				goto out;
 				goto out;
 			}
 			}
@@ -162,8 +163,8 @@ ip_vs_wrr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
 				 */
 				 */
 				if (mark->cw == 0) {
 				if (mark->cw == 0) {
 					mark->cl = &svc->destinations;
 					mark->cl = &svc->destinations;
-					IP_VS_ERR_RL("WRR: no destination "
-						     "available\n");
+					ip_vs_scheduler_err(svc,
+						"no destination available");
 					dest = NULL;
 					dest = NULL;
 					goto out;
 					goto out;
 				}
 				}
@@ -185,8 +186,9 @@ ip_vs_wrr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
 			/* back to the start, and no dest is found.
 			/* back to the start, and no dest is found.
 			   It is only possible when all dests are OVERLOADED */
 			   It is only possible when all dests are OVERLOADED */
 			dest = NULL;
 			dest = NULL;
-			IP_VS_ERR_RL("WRR: no destination available: "
-				     "all destinations are overloaded\n");
+			ip_vs_scheduler_err(svc,
+				"no destination available: "
+				"all destinations are overloaded");
 			goto out;
 			goto out;
 		}
 		}
 	}
 	}

+ 27 - 14
net/netfilter/ipvs/ip_vs_xmit.c

@@ -43,6 +43,13 @@
 
 
 #include <net/ip_vs.h>
 #include <net/ip_vs.h>
 
 
+enum {
+	IP_VS_RT_MODE_LOCAL	= 1, /* Allow local dest */
+	IP_VS_RT_MODE_NON_LOCAL	= 2, /* Allow non-local dest */
+	IP_VS_RT_MODE_RDR	= 4, /* Allow redirect from remote daddr to
+				      * local
+				      */
+};
 
 
 /*
 /*
  *      Destination cache to speed up outgoing route lookup
  *      Destination cache to speed up outgoing route lookup
@@ -77,11 +84,7 @@ __ip_vs_dst_check(struct ip_vs_dest *dest, u32 rtos)
 	return dst;
 	return dst;
 }
 }
 
 
-/*
- * Get route to destination or remote server
- * rt_mode: flags, &1=Allow local dest, &2=Allow non-local dest,
- *	    &4=Allow redirect from remote daddr to local
- */
+/* Get route to destination or remote server */
 static struct rtable *
 static struct rtable *
 __ip_vs_get_out_rt(struct sk_buff *skb, struct ip_vs_dest *dest,
 __ip_vs_get_out_rt(struct sk_buff *skb, struct ip_vs_dest *dest,
 		   __be32 daddr, u32 rtos, int rt_mode)
 		   __be32 daddr, u32 rtos, int rt_mode)
@@ -126,15 +129,16 @@ __ip_vs_get_out_rt(struct sk_buff *skb, struct ip_vs_dest *dest,
 	}
 	}
 
 
 	local = rt->rt_flags & RTCF_LOCAL;
 	local = rt->rt_flags & RTCF_LOCAL;
-	if (!((local ? 1 : 2) & rt_mode)) {
+	if (!((local ? IP_VS_RT_MODE_LOCAL : IP_VS_RT_MODE_NON_LOCAL) &
+	      rt_mode)) {
 		IP_VS_DBG_RL("Stopping traffic to %s address, dest: %pI4\n",
 		IP_VS_DBG_RL("Stopping traffic to %s address, dest: %pI4\n",
 			     (rt->rt_flags & RTCF_LOCAL) ?
 			     (rt->rt_flags & RTCF_LOCAL) ?
 			     "local":"non-local", &rt->rt_dst);
 			     "local":"non-local", &rt->rt_dst);
 		ip_rt_put(rt);
 		ip_rt_put(rt);
 		return NULL;
 		return NULL;
 	}
 	}
-	if (local && !(rt_mode & 4) && !((ort = skb_rtable(skb)) &&
-					 ort->rt_flags & RTCF_LOCAL)) {
+	if (local && !(rt_mode & IP_VS_RT_MODE_RDR) &&
+	    !((ort = skb_rtable(skb)) && ort->rt_flags & RTCF_LOCAL)) {
 		IP_VS_DBG_RL("Redirect from non-local address %pI4 to local "
 		IP_VS_DBG_RL("Redirect from non-local address %pI4 to local "
 			     "requires NAT method, dest: %pI4\n",
 			     "requires NAT method, dest: %pI4\n",
 			     &ip_hdr(skb)->daddr, &rt->rt_dst);
 			     &ip_hdr(skb)->daddr, &rt->rt_dst);
@@ -383,8 +387,8 @@ ip_vs_bypass_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
 
 
 	EnterFunction(10);
 	EnterFunction(10);
 
 
-	if (!(rt = __ip_vs_get_out_rt(skb, NULL, iph->daddr,
-				      RT_TOS(iph->tos), 2)))
+	if (!(rt = __ip_vs_get_out_rt(skb, NULL, iph->daddr, RT_TOS(iph->tos),
+				      IP_VS_RT_MODE_NON_LOCAL)))
 		goto tx_error_icmp;
 		goto tx_error_icmp;
 
 
 	/* MTU checking */
 	/* MTU checking */
@@ -512,7 +516,10 @@ ip_vs_nat_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
 	}
 	}
 
 
 	if (!(rt = __ip_vs_get_out_rt(skb, cp->dest, cp->daddr.ip,
 	if (!(rt = __ip_vs_get_out_rt(skb, cp->dest, cp->daddr.ip,
-				      RT_TOS(iph->tos), 1|2|4)))
+				      RT_TOS(iph->tos),
+				      IP_VS_RT_MODE_LOCAL |
+					IP_VS_RT_MODE_NON_LOCAL |
+					IP_VS_RT_MODE_RDR)))
 		goto tx_error_icmp;
 		goto tx_error_icmp;
 	local = rt->rt_flags & RTCF_LOCAL;
 	local = rt->rt_flags & RTCF_LOCAL;
 	/*
 	/*
@@ -755,7 +762,8 @@ ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
 	EnterFunction(10);
 	EnterFunction(10);
 
 
 	if (!(rt = __ip_vs_get_out_rt(skb, cp->dest, cp->daddr.ip,
 	if (!(rt = __ip_vs_get_out_rt(skb, cp->dest, cp->daddr.ip,
-				      RT_TOS(tos), 1|2)))
+				      RT_TOS(tos), IP_VS_RT_MODE_LOCAL |
+						   IP_VS_RT_MODE_NON_LOCAL)))
 		goto tx_error_icmp;
 		goto tx_error_icmp;
 	if (rt->rt_flags & RTCF_LOCAL) {
 	if (rt->rt_flags & RTCF_LOCAL) {
 		ip_rt_put(rt);
 		ip_rt_put(rt);
@@ -984,7 +992,9 @@ ip_vs_dr_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
 	EnterFunction(10);
 	EnterFunction(10);
 
 
 	if (!(rt = __ip_vs_get_out_rt(skb, cp->dest, cp->daddr.ip,
 	if (!(rt = __ip_vs_get_out_rt(skb, cp->dest, cp->daddr.ip,
-				      RT_TOS(iph->tos), 1|2)))
+				      RT_TOS(iph->tos),
+				      IP_VS_RT_MODE_LOCAL |
+					IP_VS_RT_MODE_NON_LOCAL)))
 		goto tx_error_icmp;
 		goto tx_error_icmp;
 	if (rt->rt_flags & RTCF_LOCAL) {
 	if (rt->rt_flags & RTCF_LOCAL) {
 		ip_rt_put(rt);
 		ip_rt_put(rt);
@@ -1128,7 +1138,10 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
 	 */
 	 */
 
 
 	if (!(rt = __ip_vs_get_out_rt(skb, cp->dest, cp->daddr.ip,
 	if (!(rt = __ip_vs_get_out_rt(skb, cp->dest, cp->daddr.ip,
-				      RT_TOS(ip_hdr(skb)->tos), 1|2|4)))
+				      RT_TOS(ip_hdr(skb)->tos),
+				      IP_VS_RT_MODE_LOCAL |
+					IP_VS_RT_MODE_NON_LOCAL |
+					IP_VS_RT_MODE_RDR)))
 		goto tx_error_icmp;
 		goto tx_error_icmp;
 	local = rt->rt_flags & RTCF_LOCAL;
 	local = rt->rt_flags & RTCF_LOCAL;
 
 

+ 2 - 2
net/netfilter/nf_conntrack_proto_tcp.c

@@ -227,11 +227,11 @@ static const u8 tcp_conntracks[2][6][TCP_CONNTRACK_MAX] = {
  *	sCL -> sIV
  *	sCL -> sIV
  */
  */
 /* 	     sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2	*/
 /* 	     sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2	*/
-/*synack*/ { sIV, sSR, sSR, sIG, sIG, sIG, sIG, sIG, sIG, sSR },
+/*synack*/ { sIV, sSR, sIG, sIG, sIG, sIG, sIG, sIG, sIG, sSR },
 /*
 /*
  *	sSS -> sSR	Standard open.
  *	sSS -> sSR	Standard open.
  *	sS2 -> sSR	Simultaneous open
  *	sS2 -> sSR	Simultaneous open
- *	sSR -> sSR	Retransmitted SYN/ACK.
+ *	sSR -> sIG	Retransmitted SYN/ACK, ignore it.
  *	sES -> sIG	Late retransmitted SYN/ACK?
  *	sES -> sIG	Late retransmitted SYN/ACK?
  *	sFW -> sIG	Might be SYN/ACK answering ignored SYN
  *	sFW -> sIG	Might be SYN/ACK answering ignored SYN
  *	sCW -> sIG
  *	sCW -> sIG

+ 1 - 2
net/netfilter/nfnetlink_log.c

@@ -376,7 +376,6 @@ __build_packet_message(struct nfulnl_instance *inst,
 			unsigned int hooknum,
 			unsigned int hooknum,
 			const struct net_device *indev,
 			const struct net_device *indev,
 			const struct net_device *outdev,
 			const struct net_device *outdev,
-			const struct nf_loginfo *li,
 			const char *prefix, unsigned int plen)
 			const char *prefix, unsigned int plen)
 {
 {
 	struct nfulnl_msg_packet_hdr pmsg;
 	struct nfulnl_msg_packet_hdr pmsg;
@@ -652,7 +651,7 @@ nfulnl_log_packet(u_int8_t pf,
 	inst->qlen++;
 	inst->qlen++;
 
 
 	__build_packet_message(inst, skb, data_len, pf,
 	__build_packet_message(inst, skb, data_len, pf,
-				hooknum, in, out, li, prefix, plen);
+				hooknum, in, out, prefix, plen);
 
 
 	if (inst->qlen >= qthreshold)
 	if (inst->qlen >= qthreshold)
 		__nfulnl_flush(inst);
 		__nfulnl_flush(inst);

+ 5 - 0
net/netfilter/xt_conntrack.c

@@ -272,6 +272,11 @@ static int conntrack_mt_check(const struct xt_mtchk_param *par)
 {
 {
 	int ret;
 	int ret;
 
 
+	if (strcmp(par->table, "raw") == 0) {
+		pr_info("state is undetermined at the time of raw table\n");
+		return -EINVAL;
+	}
+
 	ret = nf_ct_l3proto_try_module_get(par->family);
 	ret = nf_ct_l3proto_try_module_get(par->family);
 	if (ret < 0)
 	if (ret < 0)
 		pr_info("cannot load conntrack support for proto=%u\n",
 		pr_info("cannot load conntrack support for proto=%u\n",