shaper.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707
  1. /*
  2. * Simple traffic shaper for Linux NET3.
  3. *
  4. * (c) Copyright 1996 Alan Cox <alan@redhat.com>, All Rights Reserved.
  5. * http://www.redhat.com
  6. *
  7. * This program is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU General Public License
  9. * as published by the Free Software Foundation; either version
  10. * 2 of the License, or (at your option) any later version.
  11. *
  12. * Neither Alan Cox nor CymruNet Ltd. admit liability nor provide
  13. * warranty for any of this software. This material is provided
  14. * "AS-IS" and at no charge.
  15. *
  16. *
  17. * Algorithm:
  18. *
  19. * Queue Frame:
  20. * Compute time length of frame at regulated speed
  21. * Add frame to queue at appropriate point
  22. * Adjust time length computation for followup frames
  23. * Any frame that falls outside of its boundaries is freed
  24. *
  25. * We work to the following constants
  26. *
  27. * SHAPER_QLEN Maximum queued frames
  28. * SHAPER_LATENCY Bounding latency on a frame. Leaving this latency
  29. * window drops the frame. This stops us queueing
  30. * frames for a long time and confusing a remote
  31. * host.
  32. * SHAPER_MAXSLIP Maximum time a priority frame may jump forward.
  33. * That bounds the penalty we will inflict on low
  34. * priority traffic.
  35. * SHAPER_BURST Time range we call "now" in order to reduce
  36. * system load. The more we make this the burstier
  37. * the behaviour, the better local performance you
  38. * get through packet clustering on routers and the
  39. * worse the remote end gets to judge rtts.
  40. *
  41. * This is designed to handle lower speed links ( < 200K/second or so). We
  42. * run off a 100-150Hz base clock typically. This gives us a resolution at
  43. * 200Kbit/second of about 2Kbit or 256 bytes. Above that our timer
  44. * resolution may start to cause much more burstiness in the traffic. We
  45. * could avoid a lot of that by calling kick_shaper() at the end of the
  46. * tied device transmissions. If you run above about 100K second you
  47. * may need to tune the supposed speed rate for the right values.
  48. *
  49. * BUGS:
  50. * Downing the interface under the shaper before the shaper
  51. * will render your machine defunct. Don't for now shape over
  52. * PPP or SLIP therefore!
  53. * This will be fixed in BETA4
  54. *
  55. * Update History :
  56. *
  57. * bh_atomic() SMP races fixes and rewritten the locking code to
  58. * be SMP safe and irq-mask friendly.
  59. * NOTE: we can't use start_bh_atomic() in kick_shaper()
  60. * because it's going to be recalled from an irq handler,
  61. * and synchronize_bh() is a nono if called from irq context.
  62. * 1999 Andrea Arcangeli
  63. *
  64. * Device statistics (tx_pakets, tx_bytes,
  65. * tx_drops: queue_over_time and collisions: max_queue_exceded)
  66. * 1999/06/18 Jordi Murgo <savage@apostols.org>
  67. *
  68. * Use skb->cb for private data.
  69. * 2000/03 Andi Kleen
  70. */
  71. #include <linux/config.h>
  72. #include <linux/module.h>
  73. #include <linux/kernel.h>
  74. #include <linux/fcntl.h>
  75. #include <linux/mm.h>
  76. #include <linux/slab.h>
  77. #include <linux/string.h>
  78. #include <linux/errno.h>
  79. #include <linux/netdevice.h>
  80. #include <linux/etherdevice.h>
  81. #include <linux/skbuff.h>
  82. #include <linux/if_arp.h>
  83. #include <linux/init.h>
  84. #include <linux/if_shaper.h>
  85. #include <net/dst.h>
  86. #include <net/arp.h>
  87. struct shaper_cb {
  88. unsigned long shapeclock; /* Time it should go out */
  89. unsigned long shapestamp; /* Stamp for shaper */
  90. __u32 shapelatency; /* Latency on frame */
  91. __u32 shapelen; /* Frame length in clocks */
  92. __u16 shapepend; /* Pending */
  93. };
  94. #define SHAPERCB(skb) ((struct shaper_cb *) ((skb)->cb))
  95. static int sh_debug; /* Debug flag */
  96. #define SHAPER_BANNER "CymruNet Traffic Shaper BETA 0.04 for Linux 2.1\n"
  97. static void shaper_kick(struct shaper *sh);
  98. /*
  99. * Compute clocks on a buffer
  100. */
  101. static int shaper_clocks(struct shaper *shaper, struct sk_buff *skb)
  102. {
  103. int t=skb->len/shaper->bytespertick;
  104. return t;
  105. }
  106. /*
  107. * Set the speed of a shaper. We compute this in bytes per tick since
  108. * thats how the machine wants to run. Quoted input is in bits per second
  109. * as is traditional (note not BAUD). We assume 8 bit bytes.
  110. */
  111. static void shaper_setspeed(struct shaper *shaper, int bitspersec)
  112. {
  113. shaper->bitspersec=bitspersec;
  114. shaper->bytespertick=(bitspersec/HZ)/8;
  115. if(!shaper->bytespertick)
  116. shaper->bytespertick++;
  117. }
  118. /*
  119. * Throw a frame at a shaper.
  120. */
  121. static int shaper_start_xmit(struct sk_buff *skb, struct net_device *dev)
  122. {
  123. struct shaper *shaper = dev->priv;
  124. struct sk_buff *ptr;
  125. if (down_trylock(&shaper->sem))
  126. return -1;
  127. ptr=shaper->sendq.prev;
  128. /*
  129. * Set up our packet details
  130. */
  131. SHAPERCB(skb)->shapelatency=0;
  132. SHAPERCB(skb)->shapeclock=shaper->recovery;
  133. if(time_before(SHAPERCB(skb)->shapeclock, jiffies))
  134. SHAPERCB(skb)->shapeclock=jiffies;
  135. skb->priority=0; /* short term bug fix */
  136. SHAPERCB(skb)->shapestamp=jiffies;
  137. /*
  138. * Time slots for this packet.
  139. */
  140. SHAPERCB(skb)->shapelen= shaper_clocks(shaper,skb);
  141. #ifdef SHAPER_COMPLEX /* and broken.. */
  142. while(ptr && ptr!=(struct sk_buff *)&shaper->sendq)
  143. {
  144. if(ptr->pri<skb->pri
  145. && jiffies - SHAPERCB(ptr)->shapeclock < SHAPER_MAXSLIP)
  146. {
  147. struct sk_buff *tmp=ptr->prev;
  148. /*
  149. * It goes before us therefore we slip the length
  150. * of the new frame.
  151. */
  152. SHAPERCB(ptr)->shapeclock+=SHAPERCB(skb)->shapelen;
  153. SHAPERCB(ptr)->shapelatency+=SHAPERCB(skb)->shapelen;
  154. /*
  155. * The packet may have slipped so far back it
  156. * fell off.
  157. */
  158. if(SHAPERCB(ptr)->shapelatency > SHAPER_LATENCY)
  159. {
  160. skb_unlink(ptr);
  161. dev_kfree_skb(ptr);
  162. }
  163. ptr=tmp;
  164. }
  165. else
  166. break;
  167. }
  168. if(ptr==NULL || ptr==(struct sk_buff *)&shaper->sendq)
  169. skb_queue_head(&shaper->sendq,skb);
  170. else
  171. {
  172. struct sk_buff *tmp;
  173. /*
  174. * Set the packet clock out time according to the
  175. * frames ahead. Im sure a bit of thought could drop
  176. * this loop.
  177. */
  178. for(tmp=skb_peek(&shaper->sendq); tmp!=NULL && tmp!=ptr; tmp=tmp->next)
  179. SHAPERCB(skb)->shapeclock+=tmp->shapelen;
  180. skb_append(ptr,skb);
  181. }
  182. #else
  183. {
  184. struct sk_buff *tmp;
  185. /*
  186. * Up our shape clock by the time pending on the queue
  187. * (Should keep this in the shaper as a variable..)
  188. */
  189. for(tmp=skb_peek(&shaper->sendq); tmp!=NULL &&
  190. tmp!=(struct sk_buff *)&shaper->sendq; tmp=tmp->next)
  191. SHAPERCB(skb)->shapeclock+=SHAPERCB(tmp)->shapelen;
  192. /*
  193. * Queue over time. Spill packet.
  194. */
  195. if(SHAPERCB(skb)->shapeclock-jiffies > SHAPER_LATENCY) {
  196. dev_kfree_skb(skb);
  197. shaper->stats.tx_dropped++;
  198. } else
  199. skb_queue_tail(&shaper->sendq, skb);
  200. }
  201. #endif
  202. if(sh_debug)
  203. printk("Frame queued.\n");
  204. if(skb_queue_len(&shaper->sendq)>SHAPER_QLEN)
  205. {
  206. ptr=skb_dequeue(&shaper->sendq);
  207. dev_kfree_skb(ptr);
  208. shaper->stats.collisions++;
  209. }
  210. shaper_kick(shaper);
  211. up(&shaper->sem);
  212. return 0;
  213. }
  214. /*
  215. * Transmit from a shaper
  216. */
  217. static void shaper_queue_xmit(struct shaper *shaper, struct sk_buff *skb)
  218. {
  219. struct sk_buff *newskb=skb_clone(skb, GFP_ATOMIC);
  220. if(sh_debug)
  221. printk("Kick frame on %p\n",newskb);
  222. if(newskb)
  223. {
  224. newskb->dev=shaper->dev;
  225. newskb->priority=2;
  226. if(sh_debug)
  227. printk("Kick new frame to %s, %d\n",
  228. shaper->dev->name,newskb->priority);
  229. dev_queue_xmit(newskb);
  230. shaper->stats.tx_bytes += skb->len;
  231. shaper->stats.tx_packets++;
  232. if(sh_debug)
  233. printk("Kicked new frame out.\n");
  234. dev_kfree_skb(skb);
  235. }
  236. }
  237. /*
  238. * Timer handler for shaping clock
  239. */
  240. static void shaper_timer(unsigned long data)
  241. {
  242. struct shaper *shaper = (struct shaper *)data;
  243. if (!down_trylock(&shaper->sem)) {
  244. shaper_kick(shaper);
  245. up(&shaper->sem);
  246. } else
  247. mod_timer(&shaper->timer, jiffies);
  248. }
  249. /*
  250. * Kick a shaper queue and try and do something sensible with the
  251. * queue.
  252. */
  253. static void shaper_kick(struct shaper *shaper)
  254. {
  255. struct sk_buff *skb;
  256. /*
  257. * Walk the list (may be empty)
  258. */
  259. while((skb=skb_peek(&shaper->sendq))!=NULL)
  260. {
  261. /*
  262. * Each packet due to go out by now (within an error
  263. * of SHAPER_BURST) gets kicked onto the link
  264. */
  265. if(sh_debug)
  266. printk("Clock = %ld, jiffies = %ld\n", SHAPERCB(skb)->shapeclock, jiffies);
  267. if(time_before_eq(SHAPERCB(skb)->shapeclock, jiffies + SHAPER_BURST))
  268. {
  269. /*
  270. * Pull the frame and get interrupts back on.
  271. */
  272. skb_unlink(skb);
  273. if (shaper->recovery <
  274. SHAPERCB(skb)->shapeclock + SHAPERCB(skb)->shapelen)
  275. shaper->recovery = SHAPERCB(skb)->shapeclock + SHAPERCB(skb)->shapelen;
  276. /*
  277. * Pass on to the physical target device via
  278. * our low level packet thrower.
  279. */
  280. SHAPERCB(skb)->shapepend=0;
  281. shaper_queue_xmit(shaper, skb); /* Fire */
  282. }
  283. else
  284. break;
  285. }
  286. /*
  287. * Next kick.
  288. */
  289. if(skb!=NULL)
  290. mod_timer(&shaper->timer, SHAPERCB(skb)->shapeclock);
  291. }
  292. /*
  293. * Flush the shaper queues on a closedown
  294. */
  295. static void shaper_flush(struct shaper *shaper)
  296. {
  297. struct sk_buff *skb;
  298. down(&shaper->sem);
  299. while((skb=skb_dequeue(&shaper->sendq))!=NULL)
  300. dev_kfree_skb(skb);
  301. shaper_kick(shaper);
  302. up(&shaper->sem);
  303. }
  304. /*
  305. * Bring the interface up. We just disallow this until a
  306. * bind.
  307. */
  308. static int shaper_open(struct net_device *dev)
  309. {
  310. struct shaper *shaper=dev->priv;
  311. /*
  312. * Can't open until attached.
  313. * Also can't open until speed is set, or we'll get
  314. * a division by zero.
  315. */
  316. if(shaper->dev==NULL)
  317. return -ENODEV;
  318. if(shaper->bitspersec==0)
  319. return -EINVAL;
  320. return 0;
  321. }
  322. /*
  323. * Closing a shaper flushes the queues.
  324. */
  325. static int shaper_close(struct net_device *dev)
  326. {
  327. struct shaper *shaper=dev->priv;
  328. shaper_flush(shaper);
  329. del_timer_sync(&shaper->timer);
  330. return 0;
  331. }
  332. /*
  333. * Revectored calls. We alter the parameters and call the functions
  334. * for our attached device. This enables us to bandwidth allocate after
  335. * ARP and other resolutions and not before.
  336. */
  337. static struct net_device_stats *shaper_get_stats(struct net_device *dev)
  338. {
  339. struct shaper *sh=dev->priv;
  340. return &sh->stats;
  341. }
  342. static int shaper_header(struct sk_buff *skb, struct net_device *dev,
  343. unsigned short type, void *daddr, void *saddr, unsigned len)
  344. {
  345. struct shaper *sh=dev->priv;
  346. int v;
  347. if(sh_debug)
  348. printk("Shaper header\n");
  349. skb->dev=sh->dev;
  350. v=sh->hard_header(skb,sh->dev,type,daddr,saddr,len);
  351. skb->dev=dev;
  352. return v;
  353. }
  354. static int shaper_rebuild_header(struct sk_buff *skb)
  355. {
  356. struct shaper *sh=skb->dev->priv;
  357. struct net_device *dev=skb->dev;
  358. int v;
  359. if(sh_debug)
  360. printk("Shaper rebuild header\n");
  361. skb->dev=sh->dev;
  362. v=sh->rebuild_header(skb);
  363. skb->dev=dev;
  364. return v;
  365. }
  366. #if 0
  367. static int shaper_cache(struct neighbour *neigh, struct hh_cache *hh)
  368. {
  369. struct shaper *sh=neigh->dev->priv;
  370. struct net_device *tmp;
  371. int ret;
  372. if(sh_debug)
  373. printk("Shaper header cache bind\n");
  374. tmp=neigh->dev;
  375. neigh->dev=sh->dev;
  376. ret=sh->hard_header_cache(neigh,hh);
  377. neigh->dev=tmp;
  378. return ret;
  379. }
  380. static void shaper_cache_update(struct hh_cache *hh, struct net_device *dev,
  381. unsigned char *haddr)
  382. {
  383. struct shaper *sh=dev->priv;
  384. if(sh_debug)
  385. printk("Shaper cache update\n");
  386. sh->header_cache_update(hh, sh->dev, haddr);
  387. }
  388. #endif
  389. #ifdef CONFIG_INET
  390. static int shaper_neigh_setup(struct neighbour *n)
  391. {
  392. #ifdef CONFIG_INET
  393. if (n->nud_state == NUD_NONE) {
  394. n->ops = &arp_broken_ops;
  395. n->output = n->ops->output;
  396. }
  397. #endif
  398. return 0;
  399. }
  400. static int shaper_neigh_setup_dev(struct net_device *dev, struct neigh_parms *p)
  401. {
  402. #ifdef CONFIG_INET
  403. if (p->tbl->family == AF_INET) {
  404. p->neigh_setup = shaper_neigh_setup;
  405. p->ucast_probes = 0;
  406. p->mcast_probes = 0;
  407. }
  408. #endif
  409. return 0;
  410. }
  411. #else /* !(CONFIG_INET) */
  412. static int shaper_neigh_setup_dev(struct net_device *dev, struct neigh_parms *p)
  413. {
  414. return 0;
  415. }
  416. #endif
  417. static int shaper_attach(struct net_device *shdev, struct shaper *sh, struct net_device *dev)
  418. {
  419. sh->dev = dev;
  420. sh->hard_start_xmit=dev->hard_start_xmit;
  421. sh->get_stats=dev->get_stats;
  422. if(dev->hard_header)
  423. {
  424. sh->hard_header=dev->hard_header;
  425. shdev->hard_header = shaper_header;
  426. }
  427. else
  428. shdev->hard_header = NULL;
  429. if(dev->rebuild_header)
  430. {
  431. sh->rebuild_header = dev->rebuild_header;
  432. shdev->rebuild_header = shaper_rebuild_header;
  433. }
  434. else
  435. shdev->rebuild_header = NULL;
  436. #if 0
  437. if(dev->hard_header_cache)
  438. {
  439. sh->hard_header_cache = dev->hard_header_cache;
  440. shdev->hard_header_cache= shaper_cache;
  441. }
  442. else
  443. {
  444. shdev->hard_header_cache= NULL;
  445. }
  446. if(dev->header_cache_update)
  447. {
  448. sh->header_cache_update = dev->header_cache_update;
  449. shdev->header_cache_update = shaper_cache_update;
  450. }
  451. else
  452. shdev->header_cache_update= NULL;
  453. #else
  454. shdev->header_cache_update = NULL;
  455. shdev->hard_header_cache = NULL;
  456. #endif
  457. shdev->neigh_setup = shaper_neigh_setup_dev;
  458. shdev->hard_header_len=dev->hard_header_len;
  459. shdev->type=dev->type;
  460. shdev->addr_len=dev->addr_len;
  461. shdev->mtu=dev->mtu;
  462. sh->bitspersec=0;
  463. return 0;
  464. }
  465. static int shaper_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
  466. {
  467. struct shaperconf *ss= (struct shaperconf *)&ifr->ifr_ifru;
  468. struct shaper *sh=dev->priv;
  469. if(ss->ss_cmd == SHAPER_SET_DEV || ss->ss_cmd == SHAPER_SET_SPEED)
  470. {
  471. if(!capable(CAP_NET_ADMIN))
  472. return -EPERM;
  473. }
  474. switch(ss->ss_cmd)
  475. {
  476. case SHAPER_SET_DEV:
  477. {
  478. struct net_device *them=__dev_get_by_name(ss->ss_name);
  479. if(them==NULL)
  480. return -ENODEV;
  481. if(sh->dev)
  482. return -EBUSY;
  483. return shaper_attach(dev,dev->priv, them);
  484. }
  485. case SHAPER_GET_DEV:
  486. if(sh->dev==NULL)
  487. return -ENODEV;
  488. strcpy(ss->ss_name, sh->dev->name);
  489. return 0;
  490. case SHAPER_SET_SPEED:
  491. shaper_setspeed(sh,ss->ss_speed);
  492. return 0;
  493. case SHAPER_GET_SPEED:
  494. ss->ss_speed=sh->bitspersec;
  495. return 0;
  496. default:
  497. return -EINVAL;
  498. }
  499. }
  500. static void shaper_init_priv(struct net_device *dev)
  501. {
  502. struct shaper *sh = dev->priv;
  503. skb_queue_head_init(&sh->sendq);
  504. init_timer(&sh->timer);
  505. sh->timer.function=shaper_timer;
  506. sh->timer.data=(unsigned long)sh;
  507. }
  508. /*
  509. * Add a shaper device to the system
  510. */
  511. static void __init shaper_setup(struct net_device *dev)
  512. {
  513. /*
  514. * Set up the shaper.
  515. */
  516. SET_MODULE_OWNER(dev);
  517. shaper_init_priv(dev);
  518. dev->open = shaper_open;
  519. dev->stop = shaper_close;
  520. dev->hard_start_xmit = shaper_start_xmit;
  521. dev->get_stats = shaper_get_stats;
  522. dev->set_multicast_list = NULL;
  523. /*
  524. * Intialise the packet queues
  525. */
  526. /*
  527. * Handlers for when we attach to a device.
  528. */
  529. dev->hard_header = shaper_header;
  530. dev->rebuild_header = shaper_rebuild_header;
  531. #if 0
  532. dev->hard_header_cache = shaper_cache;
  533. dev->header_cache_update= shaper_cache_update;
  534. #endif
  535. dev->neigh_setup = shaper_neigh_setup_dev;
  536. dev->do_ioctl = shaper_ioctl;
  537. dev->hard_header_len = 0;
  538. dev->type = ARPHRD_ETHER; /* initially */
  539. dev->set_mac_address = NULL;
  540. dev->mtu = 1500;
  541. dev->addr_len = 0;
  542. dev->tx_queue_len = 10;
  543. dev->flags = 0;
  544. }
  545. static int shapers = 1;
  546. #ifdef MODULE
  547. module_param(shapers, int, 0);
  548. MODULE_PARM_DESC(shapers, "Traffic shaper: maximum number of shapers");
  549. #else /* MODULE */
  550. static int __init set_num_shapers(char *str)
  551. {
  552. shapers = simple_strtol(str, NULL, 0);
  553. return 1;
  554. }
  555. __setup("shapers=", set_num_shapers);
  556. #endif /* MODULE */
  557. static struct net_device **devs;
  558. static unsigned int shapers_registered = 0;
  559. static int __init shaper_init(void)
  560. {
  561. int i;
  562. size_t alloc_size;
  563. struct net_device *dev;
  564. char name[IFNAMSIZ];
  565. if (shapers < 1)
  566. return -ENODEV;
  567. alloc_size = sizeof(*dev) * shapers;
  568. devs = kmalloc(alloc_size, GFP_KERNEL);
  569. if (!devs)
  570. return -ENOMEM;
  571. memset(devs, 0, alloc_size);
  572. for (i = 0; i < shapers; i++) {
  573. snprintf(name, IFNAMSIZ, "shaper%d", i);
  574. dev = alloc_netdev(sizeof(struct shaper), name,
  575. shaper_setup);
  576. if (!dev)
  577. break;
  578. if (register_netdev(dev)) {
  579. free_netdev(dev);
  580. break;
  581. }
  582. devs[i] = dev;
  583. shapers_registered++;
  584. }
  585. if (!shapers_registered) {
  586. kfree(devs);
  587. devs = NULL;
  588. }
  589. return (shapers_registered ? 0 : -ENODEV);
  590. }
  591. static void __exit shaper_exit (void)
  592. {
  593. int i;
  594. for (i = 0; i < shapers_registered; i++) {
  595. if (devs[i]) {
  596. unregister_netdev(devs[i]);
  597. free_netdev(devs[i]);
  598. }
  599. }
  600. kfree(devs);
  601. devs = NULL;
  602. }
  603. module_init(shaper_init);
  604. module_exit(shaper_exit);
  605. MODULE_LICENSE("GPL");