sch_generic.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619
  1. /*
  2. * net/sched/sch_generic.c Generic packet scheduler routines.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License
  6. * as published by the Free Software Foundation; either version
  7. * 2 of the License, or (at your option) any later version.
  8. *
  9. * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
  10. * Jamal Hadi Salim, <hadi@cyberus.ca> 990601
  11. * - Ingress support
  12. */
  13. #include <asm/uaccess.h>
  14. #include <asm/system.h>
  15. #include <linux/bitops.h>
  16. #include <linux/module.h>
  17. #include <linux/types.h>
  18. #include <linux/kernel.h>
  19. #include <linux/sched.h>
  20. #include <linux/string.h>
  21. #include <linux/mm.h>
  22. #include <linux/socket.h>
  23. #include <linux/sockios.h>
  24. #include <linux/in.h>
  25. #include <linux/errno.h>
  26. #include <linux/interrupt.h>
  27. #include <linux/netdevice.h>
  28. #include <linux/skbuff.h>
  29. #include <linux/rtnetlink.h>
  30. #include <linux/init.h>
  31. #include <linux/rcupdate.h>
  32. #include <linux/list.h>
  33. #include <net/sock.h>
  34. #include <net/pkt_sched.h>
  35. /* Main transmission queue. */
  36. /* Main qdisc structure lock.
  37. However, modifications
  38. to data, participating in scheduling must be additionally
  39. protected with dev->queue_lock spinlock.
  40. The idea is the following:
  41. - enqueue, dequeue are serialized via top level device
  42. spinlock dev->queue_lock.
  43. - tree walking is protected by read_lock(qdisc_tree_lock)
  44. and this lock is used only in process context.
  45. - updates to tree are made only under rtnl semaphore,
  46. hence this lock may be made without local bh disabling.
  47. qdisc_tree_lock must be grabbed BEFORE dev->queue_lock!
  48. */
  49. DEFINE_RWLOCK(qdisc_tree_lock);
  50. void qdisc_lock_tree(struct net_device *dev)
  51. {
  52. write_lock(&qdisc_tree_lock);
  53. spin_lock_bh(&dev->queue_lock);
  54. }
  55. void qdisc_unlock_tree(struct net_device *dev)
  56. {
  57. spin_unlock_bh(&dev->queue_lock);
  58. write_unlock(&qdisc_tree_lock);
  59. }
  60. /*
  61. dev->queue_lock serializes queue accesses for this device
  62. AND dev->qdisc pointer itself.
  63. netif_tx_lock serializes accesses to device driver.
  64. dev->queue_lock and netif_tx_lock are mutually exclusive,
  65. if one is grabbed, another must be free.
  66. */
  67. /* Kick device.
  68. Note, that this procedure can be called by a watchdog timer, so that
  69. we do not check dev->tbusy flag here.
  70. Returns: 0 - queue is empty.
  71. >0 - queue is not empty, but throttled.
  72. <0 - queue is not empty. Device is throttled, if dev->tbusy != 0.
  73. NOTE: Called under dev->queue_lock with locally disabled BH.
  74. */
  75. static inline int qdisc_restart(struct net_device *dev)
  76. {
  77. struct Qdisc *q = dev->qdisc;
  78. struct sk_buff *skb;
  79. /* Dequeue packet */
  80. if (((skb = dev->gso_skb)) || ((skb = q->dequeue(q)))) {
  81. unsigned nolock = (dev->features & NETIF_F_LLTX);
  82. dev->gso_skb = NULL;
  83. /*
  84. * When the driver has LLTX set it does its own locking
  85. * in start_xmit. No need to add additional overhead by
  86. * locking again. These checks are worth it because
  87. * even uncongested locks can be quite expensive.
  88. * The driver can do trylock like here too, in case
  89. * of lock congestion it should return -1 and the packet
  90. * will be requeued.
  91. */
  92. if (!nolock) {
  93. if (!netif_tx_trylock(dev)) {
  94. collision:
  95. /* So, someone grabbed the driver. */
  96. /* It may be transient configuration error,
  97. when hard_start_xmit() recurses. We detect
  98. it by checking xmit owner and drop the
  99. packet when deadloop is detected.
  100. */
  101. if (dev->xmit_lock_owner == smp_processor_id()) {
  102. kfree_skb(skb);
  103. if (net_ratelimit())
  104. printk(KERN_DEBUG "Dead loop on netdevice %s, fix it urgently!\n", dev->name);
  105. return -1;
  106. }
  107. __get_cpu_var(netdev_rx_stat).cpu_collision++;
  108. goto requeue;
  109. }
  110. }
  111. {
  112. /* And release queue */
  113. spin_unlock(&dev->queue_lock);
  114. if (!netif_queue_stopped(dev)) {
  115. int ret;
  116. ret = dev_hard_start_xmit(skb, dev);
  117. if (ret == NETDEV_TX_OK) {
  118. if (!nolock) {
  119. netif_tx_unlock(dev);
  120. }
  121. spin_lock(&dev->queue_lock);
  122. return -1;
  123. }
  124. if (ret == NETDEV_TX_LOCKED && nolock) {
  125. spin_lock(&dev->queue_lock);
  126. goto collision;
  127. }
  128. }
  129. /* NETDEV_TX_BUSY - we need to requeue */
  130. /* Release the driver */
  131. if (!nolock) {
  132. netif_tx_unlock(dev);
  133. }
  134. spin_lock(&dev->queue_lock);
  135. q = dev->qdisc;
  136. }
  137. /* Device kicked us out :(
  138. This is possible in three cases:
  139. 0. driver is locked
  140. 1. fastroute is enabled
  141. 2. device cannot determine busy state
  142. before start of transmission (f.e. dialout)
  143. 3. device is buggy (ppp)
  144. */
  145. requeue:
  146. if (skb->next)
  147. dev->gso_skb = skb;
  148. else
  149. q->ops->requeue(skb, q);
  150. netif_schedule(dev);
  151. return 1;
  152. }
  153. BUG_ON((int) q->q.qlen < 0);
  154. return q->q.qlen;
  155. }
  156. void __qdisc_run(struct net_device *dev)
  157. {
  158. if (unlikely(dev->qdisc == &noop_qdisc))
  159. goto out;
  160. while (qdisc_restart(dev) < 0 && !netif_queue_stopped(dev))
  161. /* NOTHING */;
  162. out:
  163. clear_bit(__LINK_STATE_QDISC_RUNNING, &dev->state);
  164. }
  165. static void dev_watchdog(unsigned long arg)
  166. {
  167. struct net_device *dev = (struct net_device *)arg;
  168. netif_tx_lock(dev);
  169. if (dev->qdisc != &noop_qdisc) {
  170. if (netif_device_present(dev) &&
  171. netif_running(dev) &&
  172. netif_carrier_ok(dev)) {
  173. if (netif_queue_stopped(dev) &&
  174. time_after(jiffies, dev->trans_start + dev->watchdog_timeo)) {
  175. printk(KERN_INFO "NETDEV WATCHDOG: %s: transmit timed out\n",
  176. dev->name);
  177. dev->tx_timeout(dev);
  178. }
  179. if (!mod_timer(&dev->watchdog_timer, jiffies + dev->watchdog_timeo))
  180. dev_hold(dev);
  181. }
  182. }
  183. netif_tx_unlock(dev);
  184. dev_put(dev);
  185. }
  186. static void dev_watchdog_init(struct net_device *dev)
  187. {
  188. init_timer(&dev->watchdog_timer);
  189. dev->watchdog_timer.data = (unsigned long)dev;
  190. dev->watchdog_timer.function = dev_watchdog;
  191. }
  192. void __netdev_watchdog_up(struct net_device *dev)
  193. {
  194. if (dev->tx_timeout) {
  195. if (dev->watchdog_timeo <= 0)
  196. dev->watchdog_timeo = 5*HZ;
  197. if (!mod_timer(&dev->watchdog_timer, jiffies + dev->watchdog_timeo))
  198. dev_hold(dev);
  199. }
  200. }
  201. static void dev_watchdog_up(struct net_device *dev)
  202. {
  203. __netdev_watchdog_up(dev);
  204. }
  205. static void dev_watchdog_down(struct net_device *dev)
  206. {
  207. netif_tx_lock_bh(dev);
  208. if (del_timer(&dev->watchdog_timer))
  209. dev_put(dev);
  210. netif_tx_unlock_bh(dev);
  211. }
  212. void netif_carrier_on(struct net_device *dev)
  213. {
  214. if (test_and_clear_bit(__LINK_STATE_NOCARRIER, &dev->state))
  215. linkwatch_fire_event(dev);
  216. if (netif_running(dev))
  217. __netdev_watchdog_up(dev);
  218. }
  219. void netif_carrier_off(struct net_device *dev)
  220. {
  221. if (!test_and_set_bit(__LINK_STATE_NOCARRIER, &dev->state))
  222. linkwatch_fire_event(dev);
  223. }
  224. /* "NOOP" scheduler: the best scheduler, recommended for all interfaces
  225. under all circumstances. It is difficult to invent anything faster or
  226. cheaper.
  227. */
  228. static int noop_enqueue(struct sk_buff *skb, struct Qdisc * qdisc)
  229. {
  230. kfree_skb(skb);
  231. return NET_XMIT_CN;
  232. }
  233. static struct sk_buff *noop_dequeue(struct Qdisc * qdisc)
  234. {
  235. return NULL;
  236. }
  237. static int noop_requeue(struct sk_buff *skb, struct Qdisc* qdisc)
  238. {
  239. if (net_ratelimit())
  240. printk(KERN_DEBUG "%s deferred output. It is buggy.\n",
  241. skb->dev->name);
  242. kfree_skb(skb);
  243. return NET_XMIT_CN;
  244. }
  245. struct Qdisc_ops noop_qdisc_ops = {
  246. .id = "noop",
  247. .priv_size = 0,
  248. .enqueue = noop_enqueue,
  249. .dequeue = noop_dequeue,
  250. .requeue = noop_requeue,
  251. .owner = THIS_MODULE,
  252. };
  253. struct Qdisc noop_qdisc = {
  254. .enqueue = noop_enqueue,
  255. .dequeue = noop_dequeue,
  256. .flags = TCQ_F_BUILTIN,
  257. .ops = &noop_qdisc_ops,
  258. .list = LIST_HEAD_INIT(noop_qdisc.list),
  259. };
  260. static struct Qdisc_ops noqueue_qdisc_ops = {
  261. .id = "noqueue",
  262. .priv_size = 0,
  263. .enqueue = noop_enqueue,
  264. .dequeue = noop_dequeue,
  265. .requeue = noop_requeue,
  266. .owner = THIS_MODULE,
  267. };
  268. static struct Qdisc noqueue_qdisc = {
  269. .enqueue = NULL,
  270. .dequeue = noop_dequeue,
  271. .flags = TCQ_F_BUILTIN,
  272. .ops = &noqueue_qdisc_ops,
  273. .list = LIST_HEAD_INIT(noqueue_qdisc.list),
  274. };
  275. static const u8 prio2band[TC_PRIO_MAX+1] =
  276. { 1, 2, 2, 2, 1, 2, 0, 0 , 1, 1, 1, 1, 1, 1, 1, 1 };
  277. /* 3-band FIFO queue: old style, but should be a bit faster than
  278. generic prio+fifo combination.
  279. */
  280. #define PFIFO_FAST_BANDS 3
  281. static inline struct sk_buff_head *prio2list(struct sk_buff *skb,
  282. struct Qdisc *qdisc)
  283. {
  284. struct sk_buff_head *list = qdisc_priv(qdisc);
  285. return list + prio2band[skb->priority & TC_PRIO_MAX];
  286. }
  287. static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc* qdisc)
  288. {
  289. struct sk_buff_head *list = prio2list(skb, qdisc);
  290. if (skb_queue_len(list) < qdisc->dev->tx_queue_len) {
  291. qdisc->q.qlen++;
  292. return __qdisc_enqueue_tail(skb, qdisc, list);
  293. }
  294. return qdisc_drop(skb, qdisc);
  295. }
  296. static struct sk_buff *pfifo_fast_dequeue(struct Qdisc* qdisc)
  297. {
  298. int prio;
  299. struct sk_buff_head *list = qdisc_priv(qdisc);
  300. for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) {
  301. if (!skb_queue_empty(list + prio)) {
  302. qdisc->q.qlen--;
  303. return __qdisc_dequeue_head(qdisc, list + prio);
  304. }
  305. }
  306. return NULL;
  307. }
  308. static int pfifo_fast_requeue(struct sk_buff *skb, struct Qdisc* qdisc)
  309. {
  310. qdisc->q.qlen++;
  311. return __qdisc_requeue(skb, qdisc, prio2list(skb, qdisc));
  312. }
  313. static void pfifo_fast_reset(struct Qdisc* qdisc)
  314. {
  315. int prio;
  316. struct sk_buff_head *list = qdisc_priv(qdisc);
  317. for (prio = 0; prio < PFIFO_FAST_BANDS; prio++)
  318. __qdisc_reset_queue(qdisc, list + prio);
  319. qdisc->qstats.backlog = 0;
  320. qdisc->q.qlen = 0;
  321. }
  322. static int pfifo_fast_dump(struct Qdisc *qdisc, struct sk_buff *skb)
  323. {
  324. struct tc_prio_qopt opt = { .bands = PFIFO_FAST_BANDS };
  325. memcpy(&opt.priomap, prio2band, TC_PRIO_MAX+1);
  326. RTA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
  327. return skb->len;
  328. rtattr_failure:
  329. return -1;
  330. }
  331. static int pfifo_fast_init(struct Qdisc *qdisc, struct rtattr *opt)
  332. {
  333. int prio;
  334. struct sk_buff_head *list = qdisc_priv(qdisc);
  335. for (prio = 0; prio < PFIFO_FAST_BANDS; prio++)
  336. skb_queue_head_init(list + prio);
  337. return 0;
  338. }
  339. static struct Qdisc_ops pfifo_fast_ops = {
  340. .id = "pfifo_fast",
  341. .priv_size = PFIFO_FAST_BANDS * sizeof(struct sk_buff_head),
  342. .enqueue = pfifo_fast_enqueue,
  343. .dequeue = pfifo_fast_dequeue,
  344. .requeue = pfifo_fast_requeue,
  345. .init = pfifo_fast_init,
  346. .reset = pfifo_fast_reset,
  347. .dump = pfifo_fast_dump,
  348. .owner = THIS_MODULE,
  349. };
  350. struct Qdisc *qdisc_alloc(struct net_device *dev, struct Qdisc_ops *ops)
  351. {
  352. void *p;
  353. struct Qdisc *sch;
  354. unsigned int size;
  355. int err = -ENOBUFS;
  356. /* ensure that the Qdisc and the private data are 32-byte aligned */
  357. size = QDISC_ALIGN(sizeof(*sch));
  358. size += ops->priv_size + (QDISC_ALIGNTO - 1);
  359. p = kzalloc(size, GFP_KERNEL);
  360. if (!p)
  361. goto errout;
  362. sch = (struct Qdisc *) QDISC_ALIGN((unsigned long) p);
  363. sch->padded = (char *) sch - (char *) p;
  364. INIT_LIST_HEAD(&sch->list);
  365. skb_queue_head_init(&sch->q);
  366. sch->ops = ops;
  367. sch->enqueue = ops->enqueue;
  368. sch->dequeue = ops->dequeue;
  369. sch->dev = dev;
  370. dev_hold(dev);
  371. sch->stats_lock = &dev->queue_lock;
  372. atomic_set(&sch->refcnt, 1);
  373. return sch;
  374. errout:
  375. return ERR_PTR(-err);
  376. }
  377. struct Qdisc * qdisc_create_dflt(struct net_device *dev, struct Qdisc_ops *ops)
  378. {
  379. struct Qdisc *sch;
  380. sch = qdisc_alloc(dev, ops);
  381. if (IS_ERR(sch))
  382. goto errout;
  383. if (!ops->init || ops->init(sch, NULL) == 0)
  384. return sch;
  385. qdisc_destroy(sch);
  386. errout:
  387. return NULL;
  388. }
  389. /* Under dev->queue_lock and BH! */
  390. void qdisc_reset(struct Qdisc *qdisc)
  391. {
  392. struct Qdisc_ops *ops = qdisc->ops;
  393. if (ops->reset)
  394. ops->reset(qdisc);
  395. }
  396. /* this is the rcu callback function to clean up a qdisc when there
  397. * are no further references to it */
  398. static void __qdisc_destroy(struct rcu_head *head)
  399. {
  400. struct Qdisc *qdisc = container_of(head, struct Qdisc, q_rcu);
  401. kfree((char *) qdisc - qdisc->padded);
  402. }
  403. /* Under dev->queue_lock and BH! */
  404. void qdisc_destroy(struct Qdisc *qdisc)
  405. {
  406. struct Qdisc_ops *ops = qdisc->ops;
  407. if (qdisc->flags & TCQ_F_BUILTIN ||
  408. !atomic_dec_and_test(&qdisc->refcnt))
  409. return;
  410. list_del(&qdisc->list);
  411. #ifdef CONFIG_NET_ESTIMATOR
  412. gen_kill_estimator(&qdisc->bstats, &qdisc->rate_est);
  413. #endif
  414. if (ops->reset)
  415. ops->reset(qdisc);
  416. if (ops->destroy)
  417. ops->destroy(qdisc);
  418. module_put(ops->owner);
  419. dev_put(qdisc->dev);
  420. call_rcu(&qdisc->q_rcu, __qdisc_destroy);
  421. }
  422. void dev_activate(struct net_device *dev)
  423. {
  424. /* No queueing discipline is attached to device;
  425. create default one i.e. pfifo_fast for devices,
  426. which need queueing and noqueue_qdisc for
  427. virtual interfaces
  428. */
  429. if (dev->qdisc_sleeping == &noop_qdisc) {
  430. struct Qdisc *qdisc;
  431. if (dev->tx_queue_len) {
  432. qdisc = qdisc_create_dflt(dev, &pfifo_fast_ops);
  433. if (qdisc == NULL) {
  434. printk(KERN_INFO "%s: activation failed\n", dev->name);
  435. return;
  436. }
  437. write_lock(&qdisc_tree_lock);
  438. list_add_tail(&qdisc->list, &dev->qdisc_list);
  439. write_unlock(&qdisc_tree_lock);
  440. } else {
  441. qdisc = &noqueue_qdisc;
  442. }
  443. write_lock(&qdisc_tree_lock);
  444. dev->qdisc_sleeping = qdisc;
  445. write_unlock(&qdisc_tree_lock);
  446. }
  447. if (!netif_carrier_ok(dev))
  448. /* Delay activation until next carrier-on event */
  449. return;
  450. spin_lock_bh(&dev->queue_lock);
  451. rcu_assign_pointer(dev->qdisc, dev->qdisc_sleeping);
  452. if (dev->qdisc != &noqueue_qdisc) {
  453. dev->trans_start = jiffies;
  454. dev_watchdog_up(dev);
  455. }
  456. spin_unlock_bh(&dev->queue_lock);
  457. }
  458. void dev_deactivate(struct net_device *dev)
  459. {
  460. struct Qdisc *qdisc;
  461. spin_lock_bh(&dev->queue_lock);
  462. qdisc = dev->qdisc;
  463. dev->qdisc = &noop_qdisc;
  464. qdisc_reset(qdisc);
  465. spin_unlock_bh(&dev->queue_lock);
  466. dev_watchdog_down(dev);
  467. /* Wait for outstanding dev_queue_xmit calls. */
  468. synchronize_rcu();
  469. /* Wait for outstanding qdisc_run calls. */
  470. while (test_bit(__LINK_STATE_QDISC_RUNNING, &dev->state))
  471. yield();
  472. if (dev->gso_skb) {
  473. kfree_skb(dev->gso_skb);
  474. dev->gso_skb = NULL;
  475. }
  476. }
  477. void dev_init_scheduler(struct net_device *dev)
  478. {
  479. qdisc_lock_tree(dev);
  480. dev->qdisc = &noop_qdisc;
  481. dev->qdisc_sleeping = &noop_qdisc;
  482. INIT_LIST_HEAD(&dev->qdisc_list);
  483. qdisc_unlock_tree(dev);
  484. dev_watchdog_init(dev);
  485. }
  486. void dev_shutdown(struct net_device *dev)
  487. {
  488. struct Qdisc *qdisc;
  489. qdisc_lock_tree(dev);
  490. qdisc = dev->qdisc_sleeping;
  491. dev->qdisc = &noop_qdisc;
  492. dev->qdisc_sleeping = &noop_qdisc;
  493. qdisc_destroy(qdisc);
  494. #if defined(CONFIG_NET_SCH_INGRESS) || defined(CONFIG_NET_SCH_INGRESS_MODULE)
  495. if ((qdisc = dev->qdisc_ingress) != NULL) {
  496. dev->qdisc_ingress = NULL;
  497. qdisc_destroy(qdisc);
  498. }
  499. #endif
  500. BUG_TRAP(!timer_pending(&dev->watchdog_timer));
  501. qdisc_unlock_tree(dev);
  502. }
  503. EXPORT_SYMBOL(__netdev_watchdog_up);
  504. EXPORT_SYMBOL(netif_carrier_on);
  505. EXPORT_SYMBOL(netif_carrier_off);
  506. EXPORT_SYMBOL(noop_qdisc);
  507. EXPORT_SYMBOL(noop_qdisc_ops);
  508. EXPORT_SYMBOL(qdisc_create_dflt);
  509. EXPORT_SYMBOL(qdisc_alloc);
  510. EXPORT_SYMBOL(qdisc_destroy);
  511. EXPORT_SYMBOL(qdisc_reset);
  512. EXPORT_SYMBOL(qdisc_lock_tree);
  513. EXPORT_SYMBOL(qdisc_unlock_tree);