sch_generic.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609
  1. /*
  2. * net/sched/sch_generic.c Generic packet scheduler routines.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License
  6. * as published by the Free Software Foundation; either version
  7. * 2 of the License, or (at your option) any later version.
  8. *
  9. * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
  10. * Jamal Hadi Salim, <hadi@cyberus.ca> 990601
  11. * - Ingress support
  12. */
  13. #include <asm/uaccess.h>
  14. #include <asm/system.h>
  15. #include <linux/bitops.h>
  16. #include <linux/module.h>
  17. #include <linux/types.h>
  18. #include <linux/kernel.h>
  19. #include <linux/sched.h>
  20. #include <linux/string.h>
  21. #include <linux/mm.h>
  22. #include <linux/socket.h>
  23. #include <linux/sockios.h>
  24. #include <linux/in.h>
  25. #include <linux/errno.h>
  26. #include <linux/interrupt.h>
  27. #include <linux/netdevice.h>
  28. #include <linux/skbuff.h>
  29. #include <linux/rtnetlink.h>
  30. #include <linux/init.h>
  31. #include <linux/rcupdate.h>
  32. #include <linux/list.h>
  33. #include <net/sock.h>
  34. #include <net/pkt_sched.h>
  35. /* Main transmission queue. */
  36. /* Modifications to data participating in scheduling must be protected with
  37. * dev->queue_lock spinlock.
  38. *
  39. * The idea is the following:
  40. * - enqueue, dequeue are serialized via top level device
  41. * spinlock dev->queue_lock.
  42. * - ingress filtering is serialized via top level device
  43. * spinlock dev->ingress_lock.
  44. * - updates to tree and tree walking are only done under the rtnl mutex.
  45. */
  46. void qdisc_lock_tree(struct net_device *dev)
  47. {
  48. spin_lock_bh(&dev->queue_lock);
  49. spin_lock(&dev->ingress_lock);
  50. }
  51. void qdisc_unlock_tree(struct net_device *dev)
  52. {
  53. spin_unlock(&dev->ingress_lock);
  54. spin_unlock_bh(&dev->queue_lock);
  55. }
  56. /*
  57. dev->queue_lock serializes queue accesses for this device
  58. AND dev->qdisc pointer itself.
  59. netif_tx_lock serializes accesses to device driver.
  60. dev->queue_lock and netif_tx_lock are mutually exclusive,
  61. if one is grabbed, another must be free.
  62. */
  63. /* Kick device.
  64. Returns: 0 - queue is empty or throttled.
  65. >0 - queue is not empty.
  66. NOTE: Called under dev->queue_lock with locally disabled BH.
  67. */
  68. static inline int qdisc_restart(struct net_device *dev)
  69. {
  70. struct Qdisc *q = dev->qdisc;
  71. struct sk_buff *skb;
  72. /* Dequeue packet */
  73. if (((skb = dev->gso_skb)) || ((skb = q->dequeue(q)))) {
  74. unsigned nolock = (dev->features & NETIF_F_LLTX);
  75. dev->gso_skb = NULL;
  76. /*
  77. * When the driver has LLTX set it does its own locking
  78. * in start_xmit. No need to add additional overhead by
  79. * locking again. These checks are worth it because
  80. * even uncongested locks can be quite expensive.
  81. * The driver can do trylock like here too, in case
  82. * of lock congestion it should return -1 and the packet
  83. * will be requeued.
  84. */
  85. if (!nolock) {
  86. if (!netif_tx_trylock(dev)) {
  87. collision:
  88. /* So, someone grabbed the driver. */
  89. /* It may be transient configuration error,
  90. when hard_start_xmit() recurses. We detect
  91. it by checking xmit owner and drop the
  92. packet when deadloop is detected.
  93. */
  94. if (dev->xmit_lock_owner == smp_processor_id()) {
  95. kfree_skb(skb);
  96. if (net_ratelimit())
  97. printk(KERN_DEBUG "Dead loop on netdevice %s, fix it urgently!\n", dev->name);
  98. goto out;
  99. }
  100. __get_cpu_var(netdev_rx_stat).cpu_collision++;
  101. goto requeue;
  102. }
  103. }
  104. {
  105. /* And release queue */
  106. spin_unlock(&dev->queue_lock);
  107. if (!netif_queue_stopped(dev)) {
  108. int ret;
  109. ret = dev_hard_start_xmit(skb, dev);
  110. if (ret == NETDEV_TX_OK) {
  111. if (!nolock) {
  112. netif_tx_unlock(dev);
  113. }
  114. spin_lock(&dev->queue_lock);
  115. q = dev->qdisc;
  116. goto out;
  117. }
  118. if (ret == NETDEV_TX_LOCKED && nolock) {
  119. spin_lock(&dev->queue_lock);
  120. q = dev->qdisc;
  121. goto collision;
  122. }
  123. }
  124. /* NETDEV_TX_BUSY - we need to requeue */
  125. /* Release the driver */
  126. if (!nolock) {
  127. netif_tx_unlock(dev);
  128. }
  129. spin_lock(&dev->queue_lock);
  130. q = dev->qdisc;
  131. }
  132. /* Device kicked us out :(
  133. This is possible in three cases:
  134. 0. driver is locked
  135. 1. fastroute is enabled
  136. 2. device cannot determine busy state
  137. before start of transmission (f.e. dialout)
  138. 3. device is buggy (ppp)
  139. */
  140. requeue:
  141. if (unlikely(q == &noop_qdisc))
  142. kfree_skb(skb);
  143. else if (skb->next)
  144. dev->gso_skb = skb;
  145. else
  146. q->ops->requeue(skb, q);
  147. netif_schedule(dev);
  148. return 0;
  149. }
  150. out:
  151. BUG_ON((int) q->q.qlen < 0);
  152. return q->q.qlen;
  153. }
  154. void __qdisc_run(struct net_device *dev)
  155. {
  156. do {
  157. if (!qdisc_restart(dev))
  158. break;
  159. } while (!netif_queue_stopped(dev));
  160. clear_bit(__LINK_STATE_QDISC_RUNNING, &dev->state);
  161. }
  162. static void dev_watchdog(unsigned long arg)
  163. {
  164. struct net_device *dev = (struct net_device *)arg;
  165. netif_tx_lock(dev);
  166. if (dev->qdisc != &noop_qdisc) {
  167. if (netif_device_present(dev) &&
  168. netif_running(dev) &&
  169. netif_carrier_ok(dev)) {
  170. if (netif_queue_stopped(dev) &&
  171. time_after(jiffies, dev->trans_start + dev->watchdog_timeo)) {
  172. printk(KERN_INFO "NETDEV WATCHDOG: %s: transmit timed out\n",
  173. dev->name);
  174. dev->tx_timeout(dev);
  175. }
  176. if (!mod_timer(&dev->watchdog_timer, round_jiffies(jiffies + dev->watchdog_timeo)))
  177. dev_hold(dev);
  178. }
  179. }
  180. netif_tx_unlock(dev);
  181. dev_put(dev);
  182. }
  183. static void dev_watchdog_init(struct net_device *dev)
  184. {
  185. init_timer(&dev->watchdog_timer);
  186. dev->watchdog_timer.data = (unsigned long)dev;
  187. dev->watchdog_timer.function = dev_watchdog;
  188. }
  189. void __netdev_watchdog_up(struct net_device *dev)
  190. {
  191. if (dev->tx_timeout) {
  192. if (dev->watchdog_timeo <= 0)
  193. dev->watchdog_timeo = 5*HZ;
  194. if (!mod_timer(&dev->watchdog_timer, jiffies + dev->watchdog_timeo))
  195. dev_hold(dev);
  196. }
  197. }
  198. static void dev_watchdog_up(struct net_device *dev)
  199. {
  200. __netdev_watchdog_up(dev);
  201. }
  202. static void dev_watchdog_down(struct net_device *dev)
  203. {
  204. netif_tx_lock_bh(dev);
  205. if (del_timer(&dev->watchdog_timer))
  206. dev_put(dev);
  207. netif_tx_unlock_bh(dev);
  208. }
  209. void netif_carrier_on(struct net_device *dev)
  210. {
  211. if (test_and_clear_bit(__LINK_STATE_NOCARRIER, &dev->state))
  212. linkwatch_fire_event(dev);
  213. if (netif_running(dev))
  214. __netdev_watchdog_up(dev);
  215. }
  216. void netif_carrier_off(struct net_device *dev)
  217. {
  218. if (!test_and_set_bit(__LINK_STATE_NOCARRIER, &dev->state))
  219. linkwatch_fire_event(dev);
  220. }
  221. /* "NOOP" scheduler: the best scheduler, recommended for all interfaces
  222. under all circumstances. It is difficult to invent anything faster or
  223. cheaper.
  224. */
  225. static int noop_enqueue(struct sk_buff *skb, struct Qdisc * qdisc)
  226. {
  227. kfree_skb(skb);
  228. return NET_XMIT_CN;
  229. }
  230. static struct sk_buff *noop_dequeue(struct Qdisc * qdisc)
  231. {
  232. return NULL;
  233. }
  234. static int noop_requeue(struct sk_buff *skb, struct Qdisc* qdisc)
  235. {
  236. if (net_ratelimit())
  237. printk(KERN_DEBUG "%s deferred output. It is buggy.\n",
  238. skb->dev->name);
  239. kfree_skb(skb);
  240. return NET_XMIT_CN;
  241. }
  242. struct Qdisc_ops noop_qdisc_ops = {
  243. .id = "noop",
  244. .priv_size = 0,
  245. .enqueue = noop_enqueue,
  246. .dequeue = noop_dequeue,
  247. .requeue = noop_requeue,
  248. .owner = THIS_MODULE,
  249. };
  250. struct Qdisc noop_qdisc = {
  251. .enqueue = noop_enqueue,
  252. .dequeue = noop_dequeue,
  253. .flags = TCQ_F_BUILTIN,
  254. .ops = &noop_qdisc_ops,
  255. .list = LIST_HEAD_INIT(noop_qdisc.list),
  256. };
  257. static struct Qdisc_ops noqueue_qdisc_ops = {
  258. .id = "noqueue",
  259. .priv_size = 0,
  260. .enqueue = noop_enqueue,
  261. .dequeue = noop_dequeue,
  262. .requeue = noop_requeue,
  263. .owner = THIS_MODULE,
  264. };
  265. static struct Qdisc noqueue_qdisc = {
  266. .enqueue = NULL,
  267. .dequeue = noop_dequeue,
  268. .flags = TCQ_F_BUILTIN,
  269. .ops = &noqueue_qdisc_ops,
  270. .list = LIST_HEAD_INIT(noqueue_qdisc.list),
  271. };
  272. static const u8 prio2band[TC_PRIO_MAX+1] =
  273. { 1, 2, 2, 2, 1, 2, 0, 0 , 1, 1, 1, 1, 1, 1, 1, 1 };
  274. /* 3-band FIFO queue: old style, but should be a bit faster than
  275. generic prio+fifo combination.
  276. */
  277. #define PFIFO_FAST_BANDS 3
  278. static inline struct sk_buff_head *prio2list(struct sk_buff *skb,
  279. struct Qdisc *qdisc)
  280. {
  281. struct sk_buff_head *list = qdisc_priv(qdisc);
  282. return list + prio2band[skb->priority & TC_PRIO_MAX];
  283. }
  284. static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc* qdisc)
  285. {
  286. struct sk_buff_head *list = prio2list(skb, qdisc);
  287. if (skb_queue_len(list) < qdisc->dev->tx_queue_len) {
  288. qdisc->q.qlen++;
  289. return __qdisc_enqueue_tail(skb, qdisc, list);
  290. }
  291. return qdisc_drop(skb, qdisc);
  292. }
  293. static struct sk_buff *pfifo_fast_dequeue(struct Qdisc* qdisc)
  294. {
  295. int prio;
  296. struct sk_buff_head *list = qdisc_priv(qdisc);
  297. for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) {
  298. if (!skb_queue_empty(list + prio)) {
  299. qdisc->q.qlen--;
  300. return __qdisc_dequeue_head(qdisc, list + prio);
  301. }
  302. }
  303. return NULL;
  304. }
  305. static int pfifo_fast_requeue(struct sk_buff *skb, struct Qdisc* qdisc)
  306. {
  307. qdisc->q.qlen++;
  308. return __qdisc_requeue(skb, qdisc, prio2list(skb, qdisc));
  309. }
  310. static void pfifo_fast_reset(struct Qdisc* qdisc)
  311. {
  312. int prio;
  313. struct sk_buff_head *list = qdisc_priv(qdisc);
  314. for (prio = 0; prio < PFIFO_FAST_BANDS; prio++)
  315. __qdisc_reset_queue(qdisc, list + prio);
  316. qdisc->qstats.backlog = 0;
  317. qdisc->q.qlen = 0;
  318. }
  319. static int pfifo_fast_dump(struct Qdisc *qdisc, struct sk_buff *skb)
  320. {
  321. struct tc_prio_qopt opt = { .bands = PFIFO_FAST_BANDS };
  322. memcpy(&opt.priomap, prio2band, TC_PRIO_MAX+1);
  323. RTA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
  324. return skb->len;
  325. rtattr_failure:
  326. return -1;
  327. }
  328. static int pfifo_fast_init(struct Qdisc *qdisc, struct rtattr *opt)
  329. {
  330. int prio;
  331. struct sk_buff_head *list = qdisc_priv(qdisc);
  332. for (prio = 0; prio < PFIFO_FAST_BANDS; prio++)
  333. skb_queue_head_init(list + prio);
  334. return 0;
  335. }
  336. static struct Qdisc_ops pfifo_fast_ops = {
  337. .id = "pfifo_fast",
  338. .priv_size = PFIFO_FAST_BANDS * sizeof(struct sk_buff_head),
  339. .enqueue = pfifo_fast_enqueue,
  340. .dequeue = pfifo_fast_dequeue,
  341. .requeue = pfifo_fast_requeue,
  342. .init = pfifo_fast_init,
  343. .reset = pfifo_fast_reset,
  344. .dump = pfifo_fast_dump,
  345. .owner = THIS_MODULE,
  346. };
  347. struct Qdisc *qdisc_alloc(struct net_device *dev, struct Qdisc_ops *ops)
  348. {
  349. void *p;
  350. struct Qdisc *sch;
  351. unsigned int size;
  352. int err = -ENOBUFS;
  353. /* ensure that the Qdisc and the private data are 32-byte aligned */
  354. size = QDISC_ALIGN(sizeof(*sch));
  355. size += ops->priv_size + (QDISC_ALIGNTO - 1);
  356. p = kzalloc(size, GFP_KERNEL);
  357. if (!p)
  358. goto errout;
  359. sch = (struct Qdisc *) QDISC_ALIGN((unsigned long) p);
  360. sch->padded = (char *) sch - (char *) p;
  361. INIT_LIST_HEAD(&sch->list);
  362. skb_queue_head_init(&sch->q);
  363. sch->ops = ops;
  364. sch->enqueue = ops->enqueue;
  365. sch->dequeue = ops->dequeue;
  366. sch->dev = dev;
  367. dev_hold(dev);
  368. atomic_set(&sch->refcnt, 1);
  369. return sch;
  370. errout:
  371. return ERR_PTR(-err);
  372. }
  373. struct Qdisc * qdisc_create_dflt(struct net_device *dev, struct Qdisc_ops *ops,
  374. unsigned int parentid)
  375. {
  376. struct Qdisc *sch;
  377. sch = qdisc_alloc(dev, ops);
  378. if (IS_ERR(sch))
  379. goto errout;
  380. sch->stats_lock = &dev->queue_lock;
  381. sch->parent = parentid;
  382. if (!ops->init || ops->init(sch, NULL) == 0)
  383. return sch;
  384. qdisc_destroy(sch);
  385. errout:
  386. return NULL;
  387. }
  388. /* Under dev->queue_lock and BH! */
  389. void qdisc_reset(struct Qdisc *qdisc)
  390. {
  391. struct Qdisc_ops *ops = qdisc->ops;
  392. if (ops->reset)
  393. ops->reset(qdisc);
  394. }
  395. /* this is the rcu callback function to clean up a qdisc when there
  396. * are no further references to it */
  397. static void __qdisc_destroy(struct rcu_head *head)
  398. {
  399. struct Qdisc *qdisc = container_of(head, struct Qdisc, q_rcu);
  400. kfree((char *) qdisc - qdisc->padded);
  401. }
  402. /* Under dev->queue_lock and BH! */
  403. void qdisc_destroy(struct Qdisc *qdisc)
  404. {
  405. struct Qdisc_ops *ops = qdisc->ops;
  406. if (qdisc->flags & TCQ_F_BUILTIN ||
  407. !atomic_dec_and_test(&qdisc->refcnt))
  408. return;
  409. list_del(&qdisc->list);
  410. #ifdef CONFIG_NET_ESTIMATOR
  411. gen_kill_estimator(&qdisc->bstats, &qdisc->rate_est);
  412. #endif
  413. if (ops->reset)
  414. ops->reset(qdisc);
  415. if (ops->destroy)
  416. ops->destroy(qdisc);
  417. module_put(ops->owner);
  418. dev_put(qdisc->dev);
  419. call_rcu(&qdisc->q_rcu, __qdisc_destroy);
  420. }
  421. void dev_activate(struct net_device *dev)
  422. {
  423. /* No queueing discipline is attached to device;
  424. create default one i.e. pfifo_fast for devices,
  425. which need queueing and noqueue_qdisc for
  426. virtual interfaces
  427. */
  428. if (dev->qdisc_sleeping == &noop_qdisc) {
  429. struct Qdisc *qdisc;
  430. if (dev->tx_queue_len) {
  431. qdisc = qdisc_create_dflt(dev, &pfifo_fast_ops,
  432. TC_H_ROOT);
  433. if (qdisc == NULL) {
  434. printk(KERN_INFO "%s: activation failed\n", dev->name);
  435. return;
  436. }
  437. list_add_tail(&qdisc->list, &dev->qdisc_list);
  438. } else {
  439. qdisc = &noqueue_qdisc;
  440. }
  441. dev->qdisc_sleeping = qdisc;
  442. }
  443. if (!netif_carrier_ok(dev))
  444. /* Delay activation until next carrier-on event */
  445. return;
  446. spin_lock_bh(&dev->queue_lock);
  447. rcu_assign_pointer(dev->qdisc, dev->qdisc_sleeping);
  448. if (dev->qdisc != &noqueue_qdisc) {
  449. dev->trans_start = jiffies;
  450. dev_watchdog_up(dev);
  451. }
  452. spin_unlock_bh(&dev->queue_lock);
  453. }
  454. void dev_deactivate(struct net_device *dev)
  455. {
  456. struct Qdisc *qdisc;
  457. struct sk_buff *skb;
  458. spin_lock_bh(&dev->queue_lock);
  459. qdisc = dev->qdisc;
  460. dev->qdisc = &noop_qdisc;
  461. qdisc_reset(qdisc);
  462. skb = dev->gso_skb;
  463. dev->gso_skb = NULL;
  464. spin_unlock_bh(&dev->queue_lock);
  465. kfree_skb(skb);
  466. dev_watchdog_down(dev);
  467. /* Wait for outstanding dev_queue_xmit calls. */
  468. synchronize_rcu();
  469. /* Wait for outstanding qdisc_run calls. */
  470. while (test_bit(__LINK_STATE_QDISC_RUNNING, &dev->state))
  471. yield();
  472. }
  473. void dev_init_scheduler(struct net_device *dev)
  474. {
  475. qdisc_lock_tree(dev);
  476. dev->qdisc = &noop_qdisc;
  477. dev->qdisc_sleeping = &noop_qdisc;
  478. INIT_LIST_HEAD(&dev->qdisc_list);
  479. qdisc_unlock_tree(dev);
  480. dev_watchdog_init(dev);
  481. }
  482. void dev_shutdown(struct net_device *dev)
  483. {
  484. struct Qdisc *qdisc;
  485. qdisc_lock_tree(dev);
  486. qdisc = dev->qdisc_sleeping;
  487. dev->qdisc = &noop_qdisc;
  488. dev->qdisc_sleeping = &noop_qdisc;
  489. qdisc_destroy(qdisc);
  490. #if defined(CONFIG_NET_SCH_INGRESS) || defined(CONFIG_NET_SCH_INGRESS_MODULE)
  491. if ((qdisc = dev->qdisc_ingress) != NULL) {
  492. dev->qdisc_ingress = NULL;
  493. qdisc_destroy(qdisc);
  494. }
  495. #endif
  496. BUG_TRAP(!timer_pending(&dev->watchdog_timer));
  497. qdisc_unlock_tree(dev);
  498. }
  499. EXPORT_SYMBOL(netif_carrier_on);
  500. EXPORT_SYMBOL(netif_carrier_off);
  501. EXPORT_SYMBOL(noop_qdisc);
  502. EXPORT_SYMBOL(qdisc_create_dflt);
  503. EXPORT_SYMBOL(qdisc_destroy);
  504. EXPORT_SYMBOL(qdisc_reset);
  505. EXPORT_SYMBOL(qdisc_lock_tree);
  506. EXPORT_SYMBOL(qdisc_unlock_tree);