|
@@ -39,6 +39,7 @@
|
|
|
#include <net/icmp.h>
|
|
|
#include <net/checksum.h>
|
|
|
#include <net/inetpeer.h>
|
|
|
+#include <net/inet_frag.h>
|
|
|
#include <linux/tcp.h>
|
|
|
#include <linux/udp.h>
|
|
|
#include <linux/inet.h>
|
|
@@ -49,21 +50,8 @@
|
|
|
* as well. Or notify me, at least. --ANK
|
|
|
*/
|
|
|
|
|
|
-/* Fragment cache limits. We will commit 256K at one time. Should we
|
|
|
- * cross that limit we will prune down to 192K. This should cope with
|
|
|
- * even the most extreme cases without allowing an attacker to measurably
|
|
|
- * harm machine performance.
|
|
|
- */
|
|
|
-int sysctl_ipfrag_high_thresh __read_mostly = 256*1024;
|
|
|
-int sysctl_ipfrag_low_thresh __read_mostly = 192*1024;
|
|
|
-
|
|
|
int sysctl_ipfrag_max_dist __read_mostly = 64;
|
|
|
|
|
|
-/* Important NOTE! Fragment queue must be destroyed before MSL expires.
|
|
|
- * RFC791 is wrong proposing to prolongate timer each fragment arrival by TTL.
|
|
|
- */
|
|
|
-int sysctl_ipfrag_time __read_mostly = IP_FRAG_TIME;
|
|
|
-
|
|
|
struct ipfrag_skb_cb
|
|
|
{
|
|
|
struct inet_skb_parm h;
|
|
@@ -74,153 +62,102 @@ struct ipfrag_skb_cb
|
|
|
|
|
|
/* Describe an entry in the "incomplete datagrams" queue. */
|
|
|
struct ipq {
|
|
|
- struct hlist_node list;
|
|
|
- struct list_head lru_list; /* lru list member */
|
|
|
+ struct inet_frag_queue q;
|
|
|
+
|
|
|
u32 user;
|
|
|
__be32 saddr;
|
|
|
__be32 daddr;
|
|
|
__be16 id;
|
|
|
u8 protocol;
|
|
|
- u8 last_in;
|
|
|
-#define COMPLETE 4
|
|
|
-#define FIRST_IN 2
|
|
|
-#define LAST_IN 1
|
|
|
-
|
|
|
- struct sk_buff *fragments; /* linked list of received fragments */
|
|
|
- int len; /* total length of original datagram */
|
|
|
- int meat;
|
|
|
- spinlock_t lock;
|
|
|
- atomic_t refcnt;
|
|
|
- struct timer_list timer; /* when will this queue expire? */
|
|
|
- ktime_t stamp;
|
|
|
int iif;
|
|
|
unsigned int rid;
|
|
|
struct inet_peer *peer;
|
|
|
};
|
|
|
|
|
|
-/* Hash table. */
|
|
|
+struct inet_frags_ctl ip4_frags_ctl __read_mostly = {
|
|
|
+ /*
|
|
|
+ * Fragment cache limits. We will commit 256K at one time. Should we
|
|
|
+ * cross that limit we will prune down to 192K. This should cope with
|
|
|
+ * even the most extreme cases without allowing an attacker to
|
|
|
+ * measurably harm machine performance.
|
|
|
+ */
|
|
|
+ .high_thresh = 256 * 1024,
|
|
|
+ .low_thresh = 192 * 1024,
|
|
|
|
|
|
-#define IPQ_HASHSZ 64
|
|
|
+ /*
|
|
|
+ * Important NOTE! Fragment queue must be destroyed before MSL expires.
|
|
|
+ * RFC791 is wrong proposing to prolongate timer each fragment arrival
|
|
|
+ * by TTL.
|
|
|
+ */
|
|
|
+ .timeout = IP_FRAG_TIME,
|
|
|
+ .secret_interval = 10 * 60 * HZ,
|
|
|
+};
|
|
|
|
|
|
-/* Per-bucket lock is easy to add now. */
|
|
|
-static struct hlist_head ipq_hash[IPQ_HASHSZ];
|
|
|
-static DEFINE_RWLOCK(ipfrag_lock);
|
|
|
-static u32 ipfrag_hash_rnd;
|
|
|
-static LIST_HEAD(ipq_lru_list);
|
|
|
-int ip_frag_nqueues = 0;
|
|
|
+static struct inet_frags ip4_frags;
|
|
|
|
|
|
-static __inline__ void __ipq_unlink(struct ipq *qp)
|
|
|
+int ip_frag_nqueues(void)
|
|
|
{
|
|
|
- hlist_del(&qp->list);
|
|
|
- list_del(&qp->lru_list);
|
|
|
- ip_frag_nqueues--;
|
|
|
+ return ip4_frags.nqueues;
|
|
|
}
|
|
|
|
|
|
-static __inline__ void ipq_unlink(struct ipq *ipq)
|
|
|
+int ip_frag_mem(void)
|
|
|
{
|
|
|
- write_lock(&ipfrag_lock);
|
|
|
- __ipq_unlink(ipq);
|
|
|
- write_unlock(&ipfrag_lock);
|
|
|
+ return atomic_read(&ip4_frags.mem);
|
|
|
}
|
|
|
|
|
|
+static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
|
|
|
+ struct net_device *dev);
|
|
|
+
|
|
|
static unsigned int ipqhashfn(__be16 id, __be32 saddr, __be32 daddr, u8 prot)
|
|
|
{
|
|
|
return jhash_3words((__force u32)id << 16 | prot,
|
|
|
(__force u32)saddr, (__force u32)daddr,
|
|
|
- ipfrag_hash_rnd) & (IPQ_HASHSZ - 1);
|
|
|
+ ip4_frags.rnd) & (INETFRAGS_HASHSZ - 1);
|
|
|
}
|
|
|
|
|
|
-static struct timer_list ipfrag_secret_timer;
|
|
|
-int sysctl_ipfrag_secret_interval __read_mostly = 10 * 60 * HZ;
|
|
|
-
|
|
|
-static void ipfrag_secret_rebuild(unsigned long dummy)
|
|
|
+static unsigned int ip4_hashfn(struct inet_frag_queue *q)
|
|
|
{
|
|
|
- unsigned long now = jiffies;
|
|
|
- int i;
|
|
|
-
|
|
|
- write_lock(&ipfrag_lock);
|
|
|
- get_random_bytes(&ipfrag_hash_rnd, sizeof(u32));
|
|
|
- for (i = 0; i < IPQ_HASHSZ; i++) {
|
|
|
- struct ipq *q;
|
|
|
- struct hlist_node *p, *n;
|
|
|
-
|
|
|
- hlist_for_each_entry_safe(q, p, n, &ipq_hash[i], list) {
|
|
|
- unsigned int hval = ipqhashfn(q->id, q->saddr,
|
|
|
- q->daddr, q->protocol);
|
|
|
-
|
|
|
- if (hval != i) {
|
|
|
- hlist_del(&q->list);
|
|
|
+ struct ipq *ipq;
|
|
|
|
|
|
- /* Relink to new hash chain. */
|
|
|
- hlist_add_head(&q->list, &ipq_hash[hval]);
|
|
|
- }
|
|
|
- }
|
|
|
- }
|
|
|
- write_unlock(&ipfrag_lock);
|
|
|
-
|
|
|
- mod_timer(&ipfrag_secret_timer, now + sysctl_ipfrag_secret_interval);
|
|
|
+ ipq = container_of(q, struct ipq, q);
|
|
|
+ return ipqhashfn(ipq->id, ipq->saddr, ipq->daddr, ipq->protocol);
|
|
|
}
|
|
|
|
|
|
-atomic_t ip_frag_mem = ATOMIC_INIT(0); /* Memory used for fragments */
|
|
|
-
|
|
|
/* Memory Tracking Functions. */
|
|
|
static __inline__ void frag_kfree_skb(struct sk_buff *skb, int *work)
|
|
|
{
|
|
|
if (work)
|
|
|
*work -= skb->truesize;
|
|
|
- atomic_sub(skb->truesize, &ip_frag_mem);
|
|
|
+ atomic_sub(skb->truesize, &ip4_frags.mem);
|
|
|
kfree_skb(skb);
|
|
|
}
|
|
|
|
|
|
-static __inline__ void frag_free_queue(struct ipq *qp, int *work)
|
|
|
+static __inline__ void ip4_frag_free(struct inet_frag_queue *q)
|
|
|
{
|
|
|
- if (work)
|
|
|
- *work -= sizeof(struct ipq);
|
|
|
- atomic_sub(sizeof(struct ipq), &ip_frag_mem);
|
|
|
+ struct ipq *qp;
|
|
|
+
|
|
|
+ qp = container_of(q, struct ipq, q);
|
|
|
+ if (qp->peer)
|
|
|
+ inet_putpeer(qp->peer);
|
|
|
kfree(qp);
|
|
|
}
|
|
|
|
|
|
static __inline__ struct ipq *frag_alloc_queue(void)
|
|
|
{
|
|
|
- struct ipq *qp = kmalloc(sizeof(struct ipq), GFP_ATOMIC);
|
|
|
+ struct ipq *qp = kzalloc(sizeof(struct ipq), GFP_ATOMIC);
|
|
|
|
|
|
if (!qp)
|
|
|
return NULL;
|
|
|
- atomic_add(sizeof(struct ipq), &ip_frag_mem);
|
|
|
+ atomic_add(sizeof(struct ipq), &ip4_frags.mem);
|
|
|
return qp;
|
|
|
}
|
|
|
|
|
|
|
|
|
/* Destruction primitives. */
|
|
|
|
|
|
-/* Complete destruction of ipq. */
|
|
|
-static void ip_frag_destroy(struct ipq *qp, int *work)
|
|
|
-{
|
|
|
- struct sk_buff *fp;
|
|
|
-
|
|
|
- BUG_TRAP(qp->last_in&COMPLETE);
|
|
|
- BUG_TRAP(del_timer(&qp->timer) == 0);
|
|
|
-
|
|
|
- if (qp->peer)
|
|
|
- inet_putpeer(qp->peer);
|
|
|
-
|
|
|
- /* Release all fragment data. */
|
|
|
- fp = qp->fragments;
|
|
|
- while (fp) {
|
|
|
- struct sk_buff *xp = fp->next;
|
|
|
-
|
|
|
- frag_kfree_skb(fp, work);
|
|
|
- fp = xp;
|
|
|
- }
|
|
|
-
|
|
|
- /* Finally, release the queue descriptor itself. */
|
|
|
- frag_free_queue(qp, work);
|
|
|
-}
|
|
|
-
|
|
|
-static __inline__ void ipq_put(struct ipq *ipq, int *work)
|
|
|
+static __inline__ void ipq_put(struct ipq *ipq)
|
|
|
{
|
|
|
- if (atomic_dec_and_test(&ipq->refcnt))
|
|
|
- ip_frag_destroy(ipq, work);
|
|
|
+ inet_frag_put(&ipq->q, &ip4_frags);
|
|
|
}
|
|
|
|
|
|
/* Kill ipq entry. It is not destroyed immediately,
|
|
@@ -228,14 +165,7 @@ static __inline__ void ipq_put(struct ipq *ipq, int *work)
|
|
|
*/
|
|
|
static void ipq_kill(struct ipq *ipq)
|
|
|
{
|
|
|
- if (del_timer(&ipq->timer))
|
|
|
- atomic_dec(&ipq->refcnt);
|
|
|
-
|
|
|
- if (!(ipq->last_in & COMPLETE)) {
|
|
|
- ipq_unlink(ipq);
|
|
|
- atomic_dec(&ipq->refcnt);
|
|
|
- ipq->last_in |= COMPLETE;
|
|
|
- }
|
|
|
+ inet_frag_kill(&ipq->q, &ip4_frags);
|
|
|
}
|
|
|
|
|
|
/* Memory limiting on fragments. Evictor trashes the oldest
|
|
@@ -243,33 +173,11 @@ static void ipq_kill(struct ipq *ipq)
|
|
|
*/
|
|
|
static void ip_evictor(void)
|
|
|
{
|
|
|
- struct ipq *qp;
|
|
|
- struct list_head *tmp;
|
|
|
- int work;
|
|
|
-
|
|
|
- work = atomic_read(&ip_frag_mem) - sysctl_ipfrag_low_thresh;
|
|
|
- if (work <= 0)
|
|
|
- return;
|
|
|
-
|
|
|
- while (work > 0) {
|
|
|
- read_lock(&ipfrag_lock);
|
|
|
- if (list_empty(&ipq_lru_list)) {
|
|
|
- read_unlock(&ipfrag_lock);
|
|
|
- return;
|
|
|
- }
|
|
|
- tmp = ipq_lru_list.next;
|
|
|
- qp = list_entry(tmp, struct ipq, lru_list);
|
|
|
- atomic_inc(&qp->refcnt);
|
|
|
- read_unlock(&ipfrag_lock);
|
|
|
+ int evicted;
|
|
|
|
|
|
- spin_lock(&qp->lock);
|
|
|
- if (!(qp->last_in&COMPLETE))
|
|
|
- ipq_kill(qp);
|
|
|
- spin_unlock(&qp->lock);
|
|
|
-
|
|
|
- ipq_put(qp, &work);
|
|
|
- IP_INC_STATS_BH(IPSTATS_MIB_REASMFAILS);
|
|
|
- }
|
|
|
+ evicted = inet_frag_evictor(&ip4_frags);
|
|
|
+ if (evicted)
|
|
|
+ IP_ADD_STATS_BH(IPSTATS_MIB_REASMFAILS, evicted);
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -279,9 +187,9 @@ static void ip_expire(unsigned long arg)
|
|
|
{
|
|
|
struct ipq *qp = (struct ipq *) arg;
|
|
|
|
|
|
- spin_lock(&qp->lock);
|
|
|
+ spin_lock(&qp->q.lock);
|
|
|
|
|
|
- if (qp->last_in & COMPLETE)
|
|
|
+ if (qp->q.last_in & COMPLETE)
|
|
|
goto out;
|
|
|
|
|
|
ipq_kill(qp);
|
|
@@ -289,8 +197,8 @@ static void ip_expire(unsigned long arg)
|
|
|
IP_INC_STATS_BH(IPSTATS_MIB_REASMTIMEOUT);
|
|
|
IP_INC_STATS_BH(IPSTATS_MIB_REASMFAILS);
|
|
|
|
|
|
- if ((qp->last_in&FIRST_IN) && qp->fragments != NULL) {
|
|
|
- struct sk_buff *head = qp->fragments;
|
|
|
+ if ((qp->q.last_in&FIRST_IN) && qp->q.fragments != NULL) {
|
|
|
+ struct sk_buff *head = qp->q.fragments;
|
|
|
/* Send an ICMP "Fragment Reassembly Timeout" message. */
|
|
|
if ((head->dev = dev_get_by_index(&init_net, qp->iif)) != NULL) {
|
|
|
icmp_send(head, ICMP_TIME_EXCEEDED, ICMP_EXC_FRAGTIME, 0);
|
|
@@ -298,8 +206,8 @@ static void ip_expire(unsigned long arg)
|
|
|
}
|
|
|
}
|
|
|
out:
|
|
|
- spin_unlock(&qp->lock);
|
|
|
- ipq_put(qp, NULL);
|
|
|
+ spin_unlock(&qp->q.lock);
|
|
|
+ ipq_put(qp);
|
|
|
}
|
|
|
|
|
|
/* Creation primitives. */
|
|
@@ -312,7 +220,7 @@ static struct ipq *ip_frag_intern(struct ipq *qp_in)
|
|
|
#endif
|
|
|
unsigned int hash;
|
|
|
|
|
|
- write_lock(&ipfrag_lock);
|
|
|
+ write_lock(&ip4_frags.lock);
|
|
|
hash = ipqhashfn(qp_in->id, qp_in->saddr, qp_in->daddr,
|
|
|
qp_in->protocol);
|
|
|
#ifdef CONFIG_SMP
|
|
@@ -320,31 +228,31 @@ static struct ipq *ip_frag_intern(struct ipq *qp_in)
|
|
|
* such entry could be created on other cpu, while we
|
|
|
* promoted read lock to write lock.
|
|
|
*/
|
|
|
- hlist_for_each_entry(qp, n, &ipq_hash[hash], list) {
|
|
|
+ hlist_for_each_entry(qp, n, &ip4_frags.hash[hash], q.list) {
|
|
|
if (qp->id == qp_in->id &&
|
|
|
qp->saddr == qp_in->saddr &&
|
|
|
qp->daddr == qp_in->daddr &&
|
|
|
qp->protocol == qp_in->protocol &&
|
|
|
qp->user == qp_in->user) {
|
|
|
- atomic_inc(&qp->refcnt);
|
|
|
- write_unlock(&ipfrag_lock);
|
|
|
- qp_in->last_in |= COMPLETE;
|
|
|
- ipq_put(qp_in, NULL);
|
|
|
+ atomic_inc(&qp->q.refcnt);
|
|
|
+ write_unlock(&ip4_frags.lock);
|
|
|
+ qp_in->q.last_in |= COMPLETE;
|
|
|
+ ipq_put(qp_in);
|
|
|
return qp;
|
|
|
}
|
|
|
}
|
|
|
#endif
|
|
|
qp = qp_in;
|
|
|
|
|
|
- if (!mod_timer(&qp->timer, jiffies + sysctl_ipfrag_time))
|
|
|
- atomic_inc(&qp->refcnt);
|
|
|
+ if (!mod_timer(&qp->q.timer, jiffies + ip4_frags_ctl.timeout))
|
|
|
+ atomic_inc(&qp->q.refcnt);
|
|
|
|
|
|
- atomic_inc(&qp->refcnt);
|
|
|
- hlist_add_head(&qp->list, &ipq_hash[hash]);
|
|
|
- INIT_LIST_HEAD(&qp->lru_list);
|
|
|
- list_add_tail(&qp->lru_list, &ipq_lru_list);
|
|
|
- ip_frag_nqueues++;
|
|
|
- write_unlock(&ipfrag_lock);
|
|
|
+ atomic_inc(&qp->q.refcnt);
|
|
|
+ hlist_add_head(&qp->q.list, &ip4_frags.hash[hash]);
|
|
|
+ INIT_LIST_HEAD(&qp->q.lru_list);
|
|
|
+ list_add_tail(&qp->q.lru_list, &ip4_frags.lru_list);
|
|
|
+ ip4_frags.nqueues++;
|
|
|
+ write_unlock(&ip4_frags.lock);
|
|
|
return qp;
|
|
|
}
|
|
|
|
|
@@ -357,23 +265,18 @@ static struct ipq *ip_frag_create(struct iphdr *iph, u32 user)
|
|
|
goto out_nomem;
|
|
|
|
|
|
qp->protocol = iph->protocol;
|
|
|
- qp->last_in = 0;
|
|
|
qp->id = iph->id;
|
|
|
qp->saddr = iph->saddr;
|
|
|
qp->daddr = iph->daddr;
|
|
|
qp->user = user;
|
|
|
- qp->len = 0;
|
|
|
- qp->meat = 0;
|
|
|
- qp->fragments = NULL;
|
|
|
- qp->iif = 0;
|
|
|
qp->peer = sysctl_ipfrag_max_dist ? inet_getpeer(iph->saddr, 1) : NULL;
|
|
|
|
|
|
/* Initialize a timer for this entry. */
|
|
|
- init_timer(&qp->timer);
|
|
|
- qp->timer.data = (unsigned long) qp; /* pointer to queue */
|
|
|
- qp->timer.function = ip_expire; /* expire function */
|
|
|
- spin_lock_init(&qp->lock);
|
|
|
- atomic_set(&qp->refcnt, 1);
|
|
|
+ init_timer(&qp->q.timer);
|
|
|
+ qp->q.timer.data = (unsigned long) qp; /* pointer to queue */
|
|
|
+ qp->q.timer.function = ip_expire; /* expire function */
|
|
|
+ spin_lock_init(&qp->q.lock);
|
|
|
+ atomic_set(&qp->q.refcnt, 1);
|
|
|
|
|
|
return ip_frag_intern(qp);
|
|
|
|
|
@@ -395,20 +298,20 @@ static inline struct ipq *ip_find(struct iphdr *iph, u32 user)
|
|
|
struct ipq *qp;
|
|
|
struct hlist_node *n;
|
|
|
|
|
|
- read_lock(&ipfrag_lock);
|
|
|
+ read_lock(&ip4_frags.lock);
|
|
|
hash = ipqhashfn(id, saddr, daddr, protocol);
|
|
|
- hlist_for_each_entry(qp, n, &ipq_hash[hash], list) {
|
|
|
+ hlist_for_each_entry(qp, n, &ip4_frags.hash[hash], q.list) {
|
|
|
if (qp->id == id &&
|
|
|
qp->saddr == saddr &&
|
|
|
qp->daddr == daddr &&
|
|
|
qp->protocol == protocol &&
|
|
|
qp->user == user) {
|
|
|
- atomic_inc(&qp->refcnt);
|
|
|
- read_unlock(&ipfrag_lock);
|
|
|
+ atomic_inc(&qp->q.refcnt);
|
|
|
+ read_unlock(&ip4_frags.lock);
|
|
|
return qp;
|
|
|
}
|
|
|
}
|
|
|
- read_unlock(&ipfrag_lock);
|
|
|
+ read_unlock(&ip4_frags.lock);
|
|
|
|
|
|
return ip_frag_create(iph, user);
|
|
|
}
|
|
@@ -429,7 +332,7 @@ static inline int ip_frag_too_far(struct ipq *qp)
|
|
|
end = atomic_inc_return(&peer->rid);
|
|
|
qp->rid = end;
|
|
|
|
|
|
- rc = qp->fragments && (end - start) > max;
|
|
|
+ rc = qp->q.fragments && (end - start) > max;
|
|
|
|
|
|
if (rc) {
|
|
|
IP_INC_STATS_BH(IPSTATS_MIB_REASMFAILS);
|
|
@@ -442,39 +345,42 @@ static int ip_frag_reinit(struct ipq *qp)
|
|
|
{
|
|
|
struct sk_buff *fp;
|
|
|
|
|
|
- if (!mod_timer(&qp->timer, jiffies + sysctl_ipfrag_time)) {
|
|
|
- atomic_inc(&qp->refcnt);
|
|
|
+ if (!mod_timer(&qp->q.timer, jiffies + ip4_frags_ctl.timeout)) {
|
|
|
+ atomic_inc(&qp->q.refcnt);
|
|
|
return -ETIMEDOUT;
|
|
|
}
|
|
|
|
|
|
- fp = qp->fragments;
|
|
|
+ fp = qp->q.fragments;
|
|
|
do {
|
|
|
struct sk_buff *xp = fp->next;
|
|
|
frag_kfree_skb(fp, NULL);
|
|
|
fp = xp;
|
|
|
} while (fp);
|
|
|
|
|
|
- qp->last_in = 0;
|
|
|
- qp->len = 0;
|
|
|
- qp->meat = 0;
|
|
|
- qp->fragments = NULL;
|
|
|
+ qp->q.last_in = 0;
|
|
|
+ qp->q.len = 0;
|
|
|
+ qp->q.meat = 0;
|
|
|
+ qp->q.fragments = NULL;
|
|
|
qp->iif = 0;
|
|
|
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
/* Add new segment to existing queue. */
|
|
|
-static void ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
|
|
|
+static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
|
|
|
{
|
|
|
struct sk_buff *prev, *next;
|
|
|
+ struct net_device *dev;
|
|
|
int flags, offset;
|
|
|
int ihl, end;
|
|
|
+ int err = -ENOENT;
|
|
|
|
|
|
- if (qp->last_in & COMPLETE)
|
|
|
+ if (qp->q.last_in & COMPLETE)
|
|
|
goto err;
|
|
|
|
|
|
if (!(IPCB(skb)->flags & IPSKB_FRAG_COMPLETE) &&
|
|
|
- unlikely(ip_frag_too_far(qp)) && unlikely(ip_frag_reinit(qp))) {
|
|
|
+ unlikely(ip_frag_too_far(qp)) &&
|
|
|
+ unlikely(err = ip_frag_reinit(qp))) {
|
|
|
ipq_kill(qp);
|
|
|
goto err;
|
|
|
}
|
|
@@ -487,36 +393,40 @@ static void ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
|
|
|
|
|
|
/* Determine the position of this fragment. */
|
|
|
end = offset + skb->len - ihl;
|
|
|
+ err = -EINVAL;
|
|
|
|
|
|
/* Is this the final fragment? */
|
|
|
if ((flags & IP_MF) == 0) {
|
|
|
/* If we already have some bits beyond end
|
|
|
* or have different end, the segment is corrrupted.
|
|
|
*/
|
|
|
- if (end < qp->len ||
|
|
|
- ((qp->last_in & LAST_IN) && end != qp->len))
|
|
|
+ if (end < qp->q.len ||
|
|
|
+ ((qp->q.last_in & LAST_IN) && end != qp->q.len))
|
|
|
goto err;
|
|
|
- qp->last_in |= LAST_IN;
|
|
|
- qp->len = end;
|
|
|
+ qp->q.last_in |= LAST_IN;
|
|
|
+ qp->q.len = end;
|
|
|
} else {
|
|
|
if (end&7) {
|
|
|
end &= ~7;
|
|
|
if (skb->ip_summed != CHECKSUM_UNNECESSARY)
|
|
|
skb->ip_summed = CHECKSUM_NONE;
|
|
|
}
|
|
|
- if (end > qp->len) {
|
|
|
+ if (end > qp->q.len) {
|
|
|
/* Some bits beyond end -> corruption. */
|
|
|
- if (qp->last_in & LAST_IN)
|
|
|
+ if (qp->q.last_in & LAST_IN)
|
|
|
goto err;
|
|
|
- qp->len = end;
|
|
|
+ qp->q.len = end;
|
|
|
}
|
|
|
}
|
|
|
if (end == offset)
|
|
|
goto err;
|
|
|
|
|
|
+ err = -ENOMEM;
|
|
|
if (pskb_pull(skb, ihl) == NULL)
|
|
|
goto err;
|
|
|
- if (pskb_trim_rcsum(skb, end-offset))
|
|
|
+
|
|
|
+ err = pskb_trim_rcsum(skb, end - offset);
|
|
|
+ if (err)
|
|
|
goto err;
|
|
|
|
|
|
/* Find out which fragments are in front and at the back of us
|
|
@@ -524,7 +434,7 @@ static void ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
|
|
|
* this fragment, right?
|
|
|
*/
|
|
|
prev = NULL;
|
|
|
- for (next = qp->fragments; next != NULL; next = next->next) {
|
|
|
+ for (next = qp->q.fragments; next != NULL; next = next->next) {
|
|
|
if (FRAG_CB(next)->offset >= offset)
|
|
|
break; /* bingo! */
|
|
|
prev = next;
|
|
@@ -539,8 +449,10 @@ static void ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
|
|
|
|
|
|
if (i > 0) {
|
|
|
offset += i;
|
|
|
+ err = -EINVAL;
|
|
|
if (end <= offset)
|
|
|
goto err;
|
|
|
+ err = -ENOMEM;
|
|
|
if (!pskb_pull(skb, i))
|
|
|
goto err;
|
|
|
if (skb->ip_summed != CHECKSUM_UNNECESSARY)
|
|
@@ -548,6 +460,8 @@ static void ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
+ err = -ENOMEM;
|
|
|
+
|
|
|
while (next && FRAG_CB(next)->offset < end) {
|
|
|
int i = end - FRAG_CB(next)->offset; /* overlap is 'i' bytes */
|
|
|
|
|
@@ -558,7 +472,7 @@ static void ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
|
|
|
if (!pskb_pull(next, i))
|
|
|
goto err;
|
|
|
FRAG_CB(next)->offset += i;
|
|
|
- qp->meat -= i;
|
|
|
+ qp->q.meat -= i;
|
|
|
if (next->ip_summed != CHECKSUM_UNNECESSARY)
|
|
|
next->ip_summed = CHECKSUM_NONE;
|
|
|
break;
|
|
@@ -573,9 +487,9 @@ static void ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
|
|
|
if (prev)
|
|
|
prev->next = next;
|
|
|
else
|
|
|
- qp->fragments = next;
|
|
|
+ qp->q.fragments = next;
|
|
|
|
|
|
- qp->meat -= free_it->len;
|
|
|
+ qp->q.meat -= free_it->len;
|
|
|
frag_kfree_skb(free_it, NULL);
|
|
|
}
|
|
|
}
|
|
@@ -587,50 +501,77 @@ static void ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
|
|
|
if (prev)
|
|
|
prev->next = skb;
|
|
|
else
|
|
|
- qp->fragments = skb;
|
|
|
-
|
|
|
- if (skb->dev)
|
|
|
- qp->iif = skb->dev->ifindex;
|
|
|
- skb->dev = NULL;
|
|
|
- qp->stamp = skb->tstamp;
|
|
|
- qp->meat += skb->len;
|
|
|
- atomic_add(skb->truesize, &ip_frag_mem);
|
|
|
+ qp->q.fragments = skb;
|
|
|
+
|
|
|
+ dev = skb->dev;
|
|
|
+ if (dev) {
|
|
|
+ qp->iif = dev->ifindex;
|
|
|
+ skb->dev = NULL;
|
|
|
+ }
|
|
|
+ qp->q.stamp = skb->tstamp;
|
|
|
+ qp->q.meat += skb->len;
|
|
|
+ atomic_add(skb->truesize, &ip4_frags.mem);
|
|
|
if (offset == 0)
|
|
|
- qp->last_in |= FIRST_IN;
|
|
|
+ qp->q.last_in |= FIRST_IN;
|
|
|
|
|
|
- write_lock(&ipfrag_lock);
|
|
|
- list_move_tail(&qp->lru_list, &ipq_lru_list);
|
|
|
- write_unlock(&ipfrag_lock);
|
|
|
+ if (qp->q.last_in == (FIRST_IN | LAST_IN) && qp->q.meat == qp->q.len)
|
|
|
+ return ip_frag_reasm(qp, prev, dev);
|
|
|
|
|
|
- return;
|
|
|
+ write_lock(&ip4_frags.lock);
|
|
|
+ list_move_tail(&qp->q.lru_list, &ip4_frags.lru_list);
|
|
|
+ write_unlock(&ip4_frags.lock);
|
|
|
+ return -EINPROGRESS;
|
|
|
|
|
|
err:
|
|
|
kfree_skb(skb);
|
|
|
+ return err;
|
|
|
}
|
|
|
|
|
|
|
|
|
/* Build a new IP datagram from all its fragments. */
|
|
|
|
|
|
-static struct sk_buff *ip_frag_reasm(struct ipq *qp, struct net_device *dev)
|
|
|
+static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
|
|
|
+ struct net_device *dev)
|
|
|
{
|
|
|
struct iphdr *iph;
|
|
|
- struct sk_buff *fp, *head = qp->fragments;
|
|
|
+ struct sk_buff *fp, *head = qp->q.fragments;
|
|
|
int len;
|
|
|
int ihlen;
|
|
|
+ int err;
|
|
|
|
|
|
ipq_kill(qp);
|
|
|
|
|
|
+ /* Make the one we just received the head. */
|
|
|
+ if (prev) {
|
|
|
+ head = prev->next;
|
|
|
+ fp = skb_clone(head, GFP_ATOMIC);
|
|
|
+
|
|
|
+ if (!fp)
|
|
|
+ goto out_nomem;
|
|
|
+
|
|
|
+ fp->next = head->next;
|
|
|
+ prev->next = fp;
|
|
|
+
|
|
|
+ skb_morph(head, qp->q.fragments);
|
|
|
+ head->next = qp->q.fragments->next;
|
|
|
+
|
|
|
+ kfree_skb(qp->q.fragments);
|
|
|
+ qp->q.fragments = head;
|
|
|
+ }
|
|
|
+
|
|
|
BUG_TRAP(head != NULL);
|
|
|
BUG_TRAP(FRAG_CB(head)->offset == 0);
|
|
|
|
|
|
/* Allocate a new buffer for the datagram. */
|
|
|
ihlen = ip_hdrlen(head);
|
|
|
- len = ihlen + qp->len;
|
|
|
+ len = ihlen + qp->q.len;
|
|
|
|
|
|
+ err = -E2BIG;
|
|
|
if (len > 65535)
|
|
|
goto out_oversize;
|
|
|
|
|
|
/* Head of list must not be cloned. */
|
|
|
+ err = -ENOMEM;
|
|
|
if (skb_cloned(head) && pskb_expand_head(head, 0, 0, GFP_ATOMIC))
|
|
|
goto out_nomem;
|
|
|
|
|
@@ -654,12 +595,12 @@ static struct sk_buff *ip_frag_reasm(struct ipq *qp, struct net_device *dev)
|
|
|
head->len -= clone->len;
|
|
|
clone->csum = 0;
|
|
|
clone->ip_summed = head->ip_summed;
|
|
|
- atomic_add(clone->truesize, &ip_frag_mem);
|
|
|
+ atomic_add(clone->truesize, &ip4_frags.mem);
|
|
|
}
|
|
|
|
|
|
skb_shinfo(head)->frag_list = head->next;
|
|
|
skb_push(head, head->data - skb_network_header(head));
|
|
|
- atomic_sub(head->truesize, &ip_frag_mem);
|
|
|
+ atomic_sub(head->truesize, &ip4_frags.mem);
|
|
|
|
|
|
for (fp=head->next; fp; fp = fp->next) {
|
|
|
head->data_len += fp->len;
|
|
@@ -669,19 +610,19 @@ static struct sk_buff *ip_frag_reasm(struct ipq *qp, struct net_device *dev)
|
|
|
else if (head->ip_summed == CHECKSUM_COMPLETE)
|
|
|
head->csum = csum_add(head->csum, fp->csum);
|
|
|
head->truesize += fp->truesize;
|
|
|
- atomic_sub(fp->truesize, &ip_frag_mem);
|
|
|
+ atomic_sub(fp->truesize, &ip4_frags.mem);
|
|
|
}
|
|
|
|
|
|
head->next = NULL;
|
|
|
head->dev = dev;
|
|
|
- head->tstamp = qp->stamp;
|
|
|
+ head->tstamp = qp->q.stamp;
|
|
|
|
|
|
iph = ip_hdr(head);
|
|
|
iph->frag_off = 0;
|
|
|
iph->tot_len = htons(len);
|
|
|
IP_INC_STATS_BH(IPSTATS_MIB_REASMOKS);
|
|
|
- qp->fragments = NULL;
|
|
|
- return head;
|
|
|
+ qp->q.fragments = NULL;
|
|
|
+ return 0;
|
|
|
|
|
|
out_nomem:
|
|
|
LIMIT_NETDEBUG(KERN_ERR "IP: queue_glue: no memory for gluing "
|
|
@@ -694,54 +635,46 @@ out_oversize:
|
|
|
NIPQUAD(qp->saddr));
|
|
|
out_fail:
|
|
|
IP_INC_STATS_BH(IPSTATS_MIB_REASMFAILS);
|
|
|
- return NULL;
|
|
|
+ return err;
|
|
|
}
|
|
|
|
|
|
/* Process an incoming IP datagram fragment. */
|
|
|
-struct sk_buff *ip_defrag(struct sk_buff *skb, u32 user)
|
|
|
+int ip_defrag(struct sk_buff *skb, u32 user)
|
|
|
{
|
|
|
struct ipq *qp;
|
|
|
- struct net_device *dev;
|
|
|
|
|
|
IP_INC_STATS_BH(IPSTATS_MIB_REASMREQDS);
|
|
|
|
|
|
/* Start by cleaning up the memory. */
|
|
|
- if (atomic_read(&ip_frag_mem) > sysctl_ipfrag_high_thresh)
|
|
|
+ if (atomic_read(&ip4_frags.mem) > ip4_frags_ctl.high_thresh)
|
|
|
ip_evictor();
|
|
|
|
|
|
- dev = skb->dev;
|
|
|
-
|
|
|
/* Lookup (or create) queue header */
|
|
|
if ((qp = ip_find(ip_hdr(skb), user)) != NULL) {
|
|
|
- struct sk_buff *ret = NULL;
|
|
|
-
|
|
|
- spin_lock(&qp->lock);
|
|
|
+ int ret;
|
|
|
|
|
|
- ip_frag_queue(qp, skb);
|
|
|
+ spin_lock(&qp->q.lock);
|
|
|
|
|
|
- if (qp->last_in == (FIRST_IN|LAST_IN) &&
|
|
|
- qp->meat == qp->len)
|
|
|
- ret = ip_frag_reasm(qp, dev);
|
|
|
+ ret = ip_frag_queue(qp, skb);
|
|
|
|
|
|
- spin_unlock(&qp->lock);
|
|
|
- ipq_put(qp, NULL);
|
|
|
+ spin_unlock(&qp->q.lock);
|
|
|
+ ipq_put(qp);
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
IP_INC_STATS_BH(IPSTATS_MIB_REASMFAILS);
|
|
|
kfree_skb(skb);
|
|
|
- return NULL;
|
|
|
+ return -ENOMEM;
|
|
|
}
|
|
|
|
|
|
void __init ipfrag_init(void)
|
|
|
{
|
|
|
- ipfrag_hash_rnd = (u32) ((num_physpages ^ (num_physpages>>7)) ^
|
|
|
- (jiffies ^ (jiffies >> 6)));
|
|
|
-
|
|
|
- init_timer(&ipfrag_secret_timer);
|
|
|
- ipfrag_secret_timer.function = ipfrag_secret_rebuild;
|
|
|
- ipfrag_secret_timer.expires = jiffies + sysctl_ipfrag_secret_interval;
|
|
|
- add_timer(&ipfrag_secret_timer);
|
|
|
+ ip4_frags.ctl = &ip4_frags_ctl;
|
|
|
+ ip4_frags.hashfn = ip4_hashfn;
|
|
|
+ ip4_frags.destructor = ip4_frag_free;
|
|
|
+ ip4_frags.skb_free = NULL;
|
|
|
+ ip4_frags.qsize = sizeof(struct ipq);
|
|
|
+ inet_frags_init(&ip4_frags);
|
|
|
}
|
|
|
|
|
|
EXPORT_SYMBOL(ip_defrag);
|