|
@@ -167,6 +167,7 @@ struct channel {
|
|
u8 avail; /* flag used in multilink stuff */
|
|
u8 avail; /* flag used in multilink stuff */
|
|
u8 had_frag; /* >= 1 fragments have been sent */
|
|
u8 had_frag; /* >= 1 fragments have been sent */
|
|
u32 lastseq; /* MP: last sequence # received */
|
|
u32 lastseq; /* MP: last sequence # received */
|
|
|
|
+ int speed; /* speed of the corresponding ppp channel*/
|
|
#endif /* CONFIG_PPP_MULTILINK */
|
|
#endif /* CONFIG_PPP_MULTILINK */
|
|
};
|
|
};
|
|
|
|
|
|
@@ -1307,138 +1308,181 @@ ppp_push(struct ppp *ppp)
|
|
*/
|
|
*/
|
|
static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb)
|
|
static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb)
|
|
{
|
|
{
|
|
- int len, fragsize;
|
|
|
|
- int i, bits, hdrlen, mtu;
|
|
|
|
- int flen;
|
|
|
|
- int navail, nfree;
|
|
|
|
- int nbigger;
|
|
|
|
|
|
+ int len, totlen;
|
|
|
|
+ int i, bits, hdrlen, mtu;
|
|
|
|
+ int flen;
|
|
|
|
+ int navail, nfree, nzero;
|
|
|
|
+ int nbigger;
|
|
|
|
+ int totspeed;
|
|
|
|
+ int totfree;
|
|
unsigned char *p, *q;
|
|
unsigned char *p, *q;
|
|
struct list_head *list;
|
|
struct list_head *list;
|
|
struct channel *pch;
|
|
struct channel *pch;
|
|
struct sk_buff *frag;
|
|
struct sk_buff *frag;
|
|
struct ppp_channel *chan;
|
|
struct ppp_channel *chan;
|
|
|
|
|
|
- nfree = 0; /* # channels which have no packet already queued */
|
|
|
|
|
|
+ totspeed = 0; /*total bitrate of the bundle*/
|
|
|
|
+ nfree = 0; /* # channels which have no packet already queued */
|
|
navail = 0; /* total # of usable channels (not deregistered) */
|
|
navail = 0; /* total # of usable channels (not deregistered) */
|
|
|
|
+ nzero = 0; /* number of channels with zero speed associated*/
|
|
|
|
+ totfree = 0; /*total # of channels available and
|
|
|
|
+ *having no queued packets before
|
|
|
|
+ *starting the fragmentation*/
|
|
|
|
+
|
|
hdrlen = (ppp->flags & SC_MP_XSHORTSEQ)? MPHDRLEN_SSN: MPHDRLEN;
|
|
hdrlen = (ppp->flags & SC_MP_XSHORTSEQ)? MPHDRLEN_SSN: MPHDRLEN;
|
|
- i = 0;
|
|
|
|
- list_for_each_entry(pch, &ppp->channels, clist) {
|
|
|
|
|
|
+ i = 0;
|
|
|
|
+ list_for_each_entry(pch, &ppp->channels, clist) {
|
|
navail += pch->avail = (pch->chan != NULL);
|
|
navail += pch->avail = (pch->chan != NULL);
|
|
- if (pch->avail) {
|
|
|
|
|
|
+ pch->speed = pch->chan->speed;
|
|
|
|
+ if (pch->avail) {
|
|
if (skb_queue_empty(&pch->file.xq) ||
|
|
if (skb_queue_empty(&pch->file.xq) ||
|
|
- !pch->had_frag) {
|
|
|
|
- pch->avail = 2;
|
|
|
|
- ++nfree;
|
|
|
|
- }
|
|
|
|
- if (!pch->had_frag && i < ppp->nxchan)
|
|
|
|
- ppp->nxchan = i;
|
|
|
|
|
|
+ !pch->had_frag) {
|
|
|
|
+ if (pch->speed == 0)
|
|
|
|
+ nzero++;
|
|
|
|
+ else
|
|
|
|
+ totspeed += pch->speed;
|
|
|
|
+
|
|
|
|
+ pch->avail = 2;
|
|
|
|
+ ++nfree;
|
|
|
|
+ ++totfree;
|
|
|
|
+ }
|
|
|
|
+ if (!pch->had_frag && i < ppp->nxchan)
|
|
|
|
+ ppp->nxchan = i;
|
|
}
|
|
}
|
|
++i;
|
|
++i;
|
|
}
|
|
}
|
|
-
|
|
|
|
/*
|
|
/*
|
|
- * Don't start sending this packet unless at least half of
|
|
|
|
- * the channels are free. This gives much better TCP
|
|
|
|
- * performance if we have a lot of channels.
|
|
|
|
|
|
+ * Don't start sending this packet unless at least half of
|
|
|
|
+ * the channels are free. This gives much better TCP
|
|
|
|
+ * performance if we have a lot of channels.
|
|
*/
|
|
*/
|
|
- if (nfree == 0 || nfree < navail / 2)
|
|
|
|
- return 0; /* can't take now, leave it in xmit_pending */
|
|
|
|
|
|
+ if (nfree == 0 || nfree < navail / 2)
|
|
|
|
+ return 0; /* can't take now, leave it in xmit_pending */
|
|
|
|
|
|
/* Do protocol field compression (XXX this should be optional) */
|
|
/* Do protocol field compression (XXX this should be optional) */
|
|
- p = skb->data;
|
|
|
|
- len = skb->len;
|
|
|
|
|
|
+ p = skb->data;
|
|
|
|
+ len = skb->len;
|
|
if (*p == 0) {
|
|
if (*p == 0) {
|
|
++p;
|
|
++p;
|
|
--len;
|
|
--len;
|
|
}
|
|
}
|
|
|
|
|
|
- /*
|
|
|
|
- * Decide on fragment size.
|
|
|
|
- * We create a fragment for each free channel regardless of
|
|
|
|
- * how small they are (i.e. even 0 length) in order to minimize
|
|
|
|
- * the time that it will take to detect when a channel drops
|
|
|
|
- * a fragment.
|
|
|
|
- */
|
|
|
|
- fragsize = len;
|
|
|
|
- if (nfree > 1)
|
|
|
|
- fragsize = DIV_ROUND_UP(fragsize, nfree);
|
|
|
|
- /* nbigger channels get fragsize bytes, the rest get fragsize-1,
|
|
|
|
- except if nbigger==0, then they all get fragsize. */
|
|
|
|
- nbigger = len % nfree;
|
|
|
|
-
|
|
|
|
- /* skip to the channel after the one we last used
|
|
|
|
- and start at that one */
|
|
|
|
|
|
+ totlen = len;
|
|
|
|
+ nbigger = len % nfree;
|
|
|
|
+
|
|
|
|
+ /* skip to the channel after the one we last used
|
|
|
|
+ and start at that one */
|
|
list = &ppp->channels;
|
|
list = &ppp->channels;
|
|
- for (i = 0; i < ppp->nxchan; ++i) {
|
|
|
|
|
|
+ for (i = 0; i < ppp->nxchan; ++i) {
|
|
list = list->next;
|
|
list = list->next;
|
|
- if (list == &ppp->channels) {
|
|
|
|
- i = 0;
|
|
|
|
|
|
+ if (list == &ppp->channels) {
|
|
|
|
+ i = 0;
|
|
break;
|
|
break;
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
- /* create a fragment for each channel */
|
|
|
|
|
|
+ /* create a fragment for each channel */
|
|
bits = B;
|
|
bits = B;
|
|
- while (nfree > 0 || len > 0) {
|
|
|
|
|
|
+ while (nfree > 0 && len > 0) {
|
|
list = list->next;
|
|
list = list->next;
|
|
- if (list == &ppp->channels) {
|
|
|
|
- i = 0;
|
|
|
|
|
|
+ if (list == &ppp->channels) {
|
|
|
|
+ i = 0;
|
|
continue;
|
|
continue;
|
|
}
|
|
}
|
|
- pch = list_entry(list, struct channel, clist);
|
|
|
|
|
|
+ pch = list_entry(list, struct channel, clist);
|
|
++i;
|
|
++i;
|
|
if (!pch->avail)
|
|
if (!pch->avail)
|
|
continue;
|
|
continue;
|
|
|
|
|
|
/*
|
|
/*
|
|
- * Skip this channel if it has a fragment pending already and
|
|
|
|
- * we haven't given a fragment to all of the free channels.
|
|
|
|
|
|
+ * Skip this channel if it has a fragment pending already and
|
|
|
|
+ * we haven't given a fragment to all of the free channels.
|
|
*/
|
|
*/
|
|
if (pch->avail == 1) {
|
|
if (pch->avail == 1) {
|
|
- if (nfree > 0)
|
|
|
|
|
|
+ if (nfree > 0)
|
|
continue;
|
|
continue;
|
|
} else {
|
|
} else {
|
|
- --nfree;
|
|
|
|
pch->avail = 1;
|
|
pch->avail = 1;
|
|
}
|
|
}
|
|
|
|
|
|
/* check the channel's mtu and whether it is still attached. */
|
|
/* check the channel's mtu and whether it is still attached. */
|
|
spin_lock_bh(&pch->downl);
|
|
spin_lock_bh(&pch->downl);
|
|
if (pch->chan == NULL) {
|
|
if (pch->chan == NULL) {
|
|
- /* can't use this channel, it's being deregistered */
|
|
|
|
|
|
+ /* can't use this channel, it's being deregistered */
|
|
|
|
+ if (pch->speed == 0)
|
|
|
|
+ nzero--;
|
|
|
|
+ else
|
|
|
|
+ totspeed -= pch->speed;
|
|
|
|
+
|
|
spin_unlock_bh(&pch->downl);
|
|
spin_unlock_bh(&pch->downl);
|
|
pch->avail = 0;
|
|
pch->avail = 0;
|
|
- if (--navail == 0)
|
|
|
|
|
|
+ totlen = len;
|
|
|
|
+ totfree--;
|
|
|
|
+ nfree--;
|
|
|
|
+ if (--navail == 0)
|
|
break;
|
|
break;
|
|
continue;
|
|
continue;
|
|
}
|
|
}
|
|
|
|
|
|
/*
|
|
/*
|
|
- * Create a fragment for this channel of
|
|
|
|
- * min(max(mtu+2-hdrlen, 4), fragsize, len) bytes.
|
|
|
|
- * If mtu+2-hdrlen < 4, that is a ridiculously small
|
|
|
|
- * MTU, so we use mtu = 2 + hdrlen.
|
|
|
|
|
|
+ *if the channel speed is not set divide
|
|
|
|
+ *the packet evenly among the free channels;
|
|
|
|
+ *otherwise divide it according to the speed
|
|
|
|
+ *of the channel we are going to transmit on
|
|
|
|
+ */
|
|
|
|
+ if (pch->speed == 0) {
|
|
|
|
+ flen = totlen/nfree ;
|
|
|
|
+ if (nbigger > 0) {
|
|
|
|
+ flen++;
|
|
|
|
+ nbigger--;
|
|
|
|
+ }
|
|
|
|
+ } else {
|
|
|
|
+ flen = (((totfree - nzero)*(totlen + hdrlen*totfree)) /
|
|
|
|
+ ((totspeed*totfree)/pch->speed)) - hdrlen;
|
|
|
|
+ if (nbigger > 0) {
|
|
|
|
+ flen += ((totfree - nzero)*pch->speed)/totspeed;
|
|
|
|
+ nbigger -= ((totfree - nzero)*pch->speed)/
|
|
|
|
+ totspeed;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ nfree--;
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ *check if we are on the last channel or
|
|
|
|
+ *we exceded the lenght of the data to
|
|
|
|
+ *fragment
|
|
|
|
+ */
|
|
|
|
+ if ((nfree == 0) || (flen > len))
|
|
|
|
+ flen = len;
|
|
|
|
+ /*
|
|
|
|
+ *it is not worth to tx on slow channels:
|
|
|
|
+ *in that case from the resulting flen according to the
|
|
|
|
+ *above formula will be equal or less than zero.
|
|
|
|
+ *Skip the channel in this case
|
|
*/
|
|
*/
|
|
- if (fragsize > len)
|
|
|
|
- fragsize = len;
|
|
|
|
- flen = fragsize;
|
|
|
|
- mtu = pch->chan->mtu + 2 - hdrlen;
|
|
|
|
- if (mtu < 4)
|
|
|
|
- mtu = 4;
|
|
|
|
|
|
+ if (flen <= 0) {
|
|
|
|
+ pch->avail = 2;
|
|
|
|
+ spin_unlock_bh(&pch->downl);
|
|
|
|
+ continue;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ mtu = pch->chan->mtu + 2 - hdrlen;
|
|
|
|
+ if (mtu < 4)
|
|
|
|
+ mtu = 4;
|
|
if (flen > mtu)
|
|
if (flen > mtu)
|
|
flen = mtu;
|
|
flen = mtu;
|
|
- if (flen == len && nfree == 0)
|
|
|
|
- bits |= E;
|
|
|
|
- frag = alloc_skb(flen + hdrlen + (flen == 0), GFP_ATOMIC);
|
|
|
|
|
|
+ if (flen == len)
|
|
|
|
+ bits |= E;
|
|
|
|
+ frag = alloc_skb(flen + hdrlen + (flen == 0), GFP_ATOMIC);
|
|
if (!frag)
|
|
if (!frag)
|
|
goto noskb;
|
|
goto noskb;
|
|
- q = skb_put(frag, flen + hdrlen);
|
|
|
|
|
|
+ q = skb_put(frag, flen + hdrlen);
|
|
|
|
|
|
- /* make the MP header */
|
|
|
|
|
|
+ /* make the MP header */
|
|
q[0] = PPP_MP >> 8;
|
|
q[0] = PPP_MP >> 8;
|
|
q[1] = PPP_MP;
|
|
q[1] = PPP_MP;
|
|
if (ppp->flags & SC_MP_XSHORTSEQ) {
|
|
if (ppp->flags & SC_MP_XSHORTSEQ) {
|
|
- q[2] = bits + ((ppp->nxseq >> 8) & 0xf);
|
|
|
|
|
|
+ q[2] = bits + ((ppp->nxseq >> 8) & 0xf);
|
|
q[3] = ppp->nxseq;
|
|
q[3] = ppp->nxseq;
|
|
} else {
|
|
} else {
|
|
q[2] = bits;
|
|
q[2] = bits;
|
|
@@ -1447,43 +1491,28 @@ static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb)
|
|
q[5] = ppp->nxseq;
|
|
q[5] = ppp->nxseq;
|
|
}
|
|
}
|
|
|
|
|
|
- /*
|
|
|
|
- * Copy the data in.
|
|
|
|
- * Unfortunately there is a bug in older versions of
|
|
|
|
- * the Linux PPP multilink reconstruction code where it
|
|
|
|
- * drops 0-length fragments. Therefore we make sure the
|
|
|
|
- * fragment has at least one byte of data. Any bytes
|
|
|
|
- * we add in this situation will end up as padding on the
|
|
|
|
- * end of the reconstructed packet.
|
|
|
|
- */
|
|
|
|
- if (flen == 0)
|
|
|
|
- *skb_put(frag, 1) = 0;
|
|
|
|
- else
|
|
|
|
- memcpy(q + hdrlen, p, flen);
|
|
|
|
|
|
+ memcpy(q + hdrlen, p, flen);
|
|
|
|
|
|
/* try to send it down the channel */
|
|
/* try to send it down the channel */
|
|
chan = pch->chan;
|
|
chan = pch->chan;
|
|
- if (!skb_queue_empty(&pch->file.xq) ||
|
|
|
|
- !chan->ops->start_xmit(chan, frag))
|
|
|
|
|
|
+ if (!skb_queue_empty(&pch->file.xq) ||
|
|
|
|
+ !chan->ops->start_xmit(chan, frag))
|
|
skb_queue_tail(&pch->file.xq, frag);
|
|
skb_queue_tail(&pch->file.xq, frag);
|
|
- pch->had_frag = 1;
|
|
|
|
|
|
+ pch->had_frag = 1;
|
|
p += flen;
|
|
p += flen;
|
|
- len -= flen;
|
|
|
|
|
|
+ len -= flen;
|
|
++ppp->nxseq;
|
|
++ppp->nxseq;
|
|
bits = 0;
|
|
bits = 0;
|
|
spin_unlock_bh(&pch->downl);
|
|
spin_unlock_bh(&pch->downl);
|
|
-
|
|
|
|
- if (--nbigger == 0 && fragsize > 0)
|
|
|
|
- --fragsize;
|
|
|
|
}
|
|
}
|
|
- ppp->nxchan = i;
|
|
|
|
|
|
+ ppp->nxchan = i;
|
|
|
|
|
|
return 1;
|
|
return 1;
|
|
|
|
|
|
noskb:
|
|
noskb:
|
|
spin_unlock_bh(&pch->downl);
|
|
spin_unlock_bh(&pch->downl);
|
|
if (ppp->debug & 1)
|
|
if (ppp->debug & 1)
|
|
- printk(KERN_ERR "PPP: no memory (fragment)\n");
|
|
|
|
|
|
+ printk(KERN_ERR "PPP: no memory (fragment)\n");
|
|
++ppp->dev->stats.tx_errors;
|
|
++ppp->dev->stats.tx_errors;
|
|
++ppp->nxseq;
|
|
++ppp->nxseq;
|
|
return 1; /* abandon the frame */
|
|
return 1; /* abandon the frame */
|