sch_generic.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772
  1. /*
  2. * net/sched/sch_generic.c Generic packet scheduler routines.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License
  6. * as published by the Free Software Foundation; either version
  7. * 2 of the License, or (at your option) any later version.
  8. *
  9. * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
  10. * Jamal Hadi Salim, <hadi@cyberus.ca> 990601
  11. * - Ingress support
  12. */
  13. #include <linux/bitops.h>
  14. #include <linux/module.h>
  15. #include <linux/types.h>
  16. #include <linux/kernel.h>
  17. #include <linux/sched.h>
  18. #include <linux/string.h>
  19. #include <linux/errno.h>
  20. #include <linux/netdevice.h>
  21. #include <linux/skbuff.h>
  22. #include <linux/rtnetlink.h>
  23. #include <linux/init.h>
  24. #include <linux/rcupdate.h>
  25. #include <linux/list.h>
  26. #include <net/pkt_sched.h>
  27. /* Main transmission queue. */
  28. /* Modifications to data participating in scheduling must be protected with
  29. * queue->lock spinlock.
  30. *
  31. * The idea is the following:
  32. * - enqueue, dequeue are serialized via top level device
  33. * spinlock queue->lock.
  34. * - ingress filtering is serialized via top level device
  35. * spinlock dev->rx_queue.lock.
  36. * - updates to tree and tree walking are only done under the rtnl mutex.
  37. */
  38. void qdisc_lock_tree(struct net_device *dev)
  39. __acquires(dev->rx_queue.lock)
  40. {
  41. unsigned int i;
  42. local_bh_disable();
  43. for (i = 0; i < dev->num_tx_queues; i++) {
  44. struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
  45. spin_lock(&txq->lock);
  46. }
  47. spin_lock(&dev->rx_queue.lock);
  48. }
  49. EXPORT_SYMBOL(qdisc_lock_tree);
  50. void qdisc_unlock_tree(struct net_device *dev)
  51. __releases(dev->rx_queue.lock)
  52. {
  53. unsigned int i;
  54. spin_unlock(&dev->rx_queue.lock);
  55. for (i = 0; i < dev->num_tx_queues; i++) {
  56. struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
  57. spin_unlock(&txq->lock);
  58. }
  59. local_bh_enable();
  60. }
  61. EXPORT_SYMBOL(qdisc_unlock_tree);
  62. static inline int qdisc_qlen(struct Qdisc *q)
  63. {
  64. return q->q.qlen;
  65. }
  66. static inline int dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q)
  67. {
  68. if (unlikely(skb->next))
  69. q->gso_skb = skb;
  70. else
  71. q->ops->requeue(skb, q);
  72. __netif_schedule(q);
  73. return 0;
  74. }
  75. static inline struct sk_buff *dequeue_skb(struct Qdisc *q)
  76. {
  77. struct sk_buff *skb;
  78. if ((skb = q->gso_skb))
  79. q->gso_skb = NULL;
  80. else
  81. skb = q->dequeue(q);
  82. return skb;
  83. }
  84. static inline int handle_dev_cpu_collision(struct sk_buff *skb,
  85. struct netdev_queue *dev_queue,
  86. struct Qdisc *q)
  87. {
  88. int ret;
  89. if (unlikely(dev_queue->xmit_lock_owner == smp_processor_id())) {
  90. /*
  91. * Same CPU holding the lock. It may be a transient
  92. * configuration error, when hard_start_xmit() recurses. We
  93. * detect it by checking xmit owner and drop the packet when
  94. * deadloop is detected. Return OK to try the next skb.
  95. */
  96. kfree_skb(skb);
  97. if (net_ratelimit())
  98. printk(KERN_WARNING "Dead loop on netdevice %s, "
  99. "fix it urgently!\n", dev_queue->dev->name);
  100. ret = qdisc_qlen(q);
  101. } else {
  102. /*
  103. * Another cpu is holding lock, requeue & delay xmits for
  104. * some time.
  105. */
  106. __get_cpu_var(netdev_rx_stat).cpu_collision++;
  107. ret = dev_requeue_skb(skb, q);
  108. }
  109. return ret;
  110. }
  111. /*
  112. * NOTE: Called under queue->lock with locally disabled BH.
  113. *
  114. * __QDISC_STATE_RUNNING guarantees only one CPU can process
  115. * this qdisc at a time. queue->lock serializes queue accesses for
  116. * this queue AND txq->qdisc pointer itself.
  117. *
  118. * netif_tx_lock serializes accesses to device driver.
  119. *
  120. * queue->lock and netif_tx_lock are mutually exclusive,
  121. * if one is grabbed, another must be free.
  122. *
  123. * Note, that this procedure can be called by a watchdog timer
  124. *
  125. * Returns to the caller:
  126. * 0 - queue is empty or throttled.
  127. * >0 - queue is not empty.
  128. *
  129. */
  130. static inline int qdisc_restart(struct Qdisc *q)
  131. {
  132. struct netdev_queue *txq;
  133. int ret = NETDEV_TX_BUSY;
  134. struct net_device *dev;
  135. spinlock_t *root_lock;
  136. struct sk_buff *skb;
  137. /* Dequeue packet */
  138. if (unlikely((skb = dequeue_skb(q)) == NULL))
  139. return 0;
  140. root_lock = qdisc_root_lock(q);
  141. /* And release qdisc */
  142. spin_unlock(root_lock);
  143. dev = qdisc_dev(q);
  144. txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
  145. HARD_TX_LOCK(dev, txq, smp_processor_id());
  146. if (!netif_subqueue_stopped(dev, skb))
  147. ret = dev_hard_start_xmit(skb, dev, txq);
  148. HARD_TX_UNLOCK(dev, txq);
  149. spin_lock(root_lock);
  150. switch (ret) {
  151. case NETDEV_TX_OK:
  152. /* Driver sent out skb successfully */
  153. ret = qdisc_qlen(q);
  154. break;
  155. case NETDEV_TX_LOCKED:
  156. /* Driver try lock failed */
  157. ret = handle_dev_cpu_collision(skb, txq, q);
  158. break;
  159. default:
  160. /* Driver returned NETDEV_TX_BUSY - requeue skb */
  161. if (unlikely (ret != NETDEV_TX_BUSY && net_ratelimit()))
  162. printk(KERN_WARNING "BUG %s code %d qlen %d\n",
  163. dev->name, ret, q->q.qlen);
  164. ret = dev_requeue_skb(skb, q);
  165. break;
  166. }
  167. if (ret && netif_tx_queue_stopped(txq))
  168. ret = 0;
  169. return ret;
  170. }
  171. void __qdisc_run(struct Qdisc *q)
  172. {
  173. unsigned long start_time = jiffies;
  174. while (qdisc_restart(q)) {
  175. /*
  176. * Postpone processing if
  177. * 1. another process needs the CPU;
  178. * 2. we've been doing it for too long.
  179. */
  180. if (need_resched() || jiffies != start_time) {
  181. __netif_schedule(q);
  182. break;
  183. }
  184. }
  185. clear_bit(__QDISC_STATE_RUNNING, &q->state);
  186. }
  187. static void dev_watchdog(unsigned long arg)
  188. {
  189. struct net_device *dev = (struct net_device *)arg;
  190. netif_tx_lock(dev);
  191. if (!qdisc_tx_is_noop(dev)) {
  192. if (netif_device_present(dev) &&
  193. netif_running(dev) &&
  194. netif_carrier_ok(dev)) {
  195. int some_queue_stopped = 0;
  196. unsigned int i;
  197. for (i = 0; i < dev->num_tx_queues; i++) {
  198. struct netdev_queue *txq;
  199. txq = netdev_get_tx_queue(dev, i);
  200. if (netif_tx_queue_stopped(txq)) {
  201. some_queue_stopped = 1;
  202. break;
  203. }
  204. }
  205. if (some_queue_stopped &&
  206. time_after(jiffies, (dev->trans_start +
  207. dev->watchdog_timeo))) {
  208. printk(KERN_INFO "NETDEV WATCHDOG: %s: "
  209. "transmit timed out\n",
  210. dev->name);
  211. dev->tx_timeout(dev);
  212. WARN_ON_ONCE(1);
  213. }
  214. if (!mod_timer(&dev->watchdog_timer,
  215. round_jiffies(jiffies +
  216. dev->watchdog_timeo)))
  217. dev_hold(dev);
  218. }
  219. }
  220. netif_tx_unlock(dev);
  221. dev_put(dev);
  222. }
  223. void __netdev_watchdog_up(struct net_device *dev)
  224. {
  225. if (dev->tx_timeout) {
  226. if (dev->watchdog_timeo <= 0)
  227. dev->watchdog_timeo = 5*HZ;
  228. if (!mod_timer(&dev->watchdog_timer,
  229. round_jiffies(jiffies + dev->watchdog_timeo)))
  230. dev_hold(dev);
  231. }
  232. }
  233. static void dev_watchdog_up(struct net_device *dev)
  234. {
  235. __netdev_watchdog_up(dev);
  236. }
  237. static void dev_watchdog_down(struct net_device *dev)
  238. {
  239. netif_tx_lock_bh(dev);
  240. if (del_timer(&dev->watchdog_timer))
  241. dev_put(dev);
  242. netif_tx_unlock_bh(dev);
  243. }
  244. /**
  245. * netif_carrier_on - set carrier
  246. * @dev: network device
  247. *
  248. * Device has detected that carrier.
  249. */
  250. void netif_carrier_on(struct net_device *dev)
  251. {
  252. if (test_and_clear_bit(__LINK_STATE_NOCARRIER, &dev->state)) {
  253. linkwatch_fire_event(dev);
  254. if (netif_running(dev))
  255. __netdev_watchdog_up(dev);
  256. }
  257. }
  258. EXPORT_SYMBOL(netif_carrier_on);
  259. /**
  260. * netif_carrier_off - clear carrier
  261. * @dev: network device
  262. *
  263. * Device has detected loss of carrier.
  264. */
  265. void netif_carrier_off(struct net_device *dev)
  266. {
  267. if (!test_and_set_bit(__LINK_STATE_NOCARRIER, &dev->state))
  268. linkwatch_fire_event(dev);
  269. }
  270. EXPORT_SYMBOL(netif_carrier_off);
  271. /* "NOOP" scheduler: the best scheduler, recommended for all interfaces
  272. under all circumstances. It is difficult to invent anything faster or
  273. cheaper.
  274. */
  275. static int noop_enqueue(struct sk_buff *skb, struct Qdisc * qdisc)
  276. {
  277. kfree_skb(skb);
  278. return NET_XMIT_CN;
  279. }
  280. static struct sk_buff *noop_dequeue(struct Qdisc * qdisc)
  281. {
  282. return NULL;
  283. }
  284. static int noop_requeue(struct sk_buff *skb, struct Qdisc* qdisc)
  285. {
  286. if (net_ratelimit())
  287. printk(KERN_DEBUG "%s deferred output. It is buggy.\n",
  288. skb->dev->name);
  289. kfree_skb(skb);
  290. return NET_XMIT_CN;
  291. }
  292. struct Qdisc_ops noop_qdisc_ops __read_mostly = {
  293. .id = "noop",
  294. .priv_size = 0,
  295. .enqueue = noop_enqueue,
  296. .dequeue = noop_dequeue,
  297. .requeue = noop_requeue,
  298. .owner = THIS_MODULE,
  299. };
  300. static struct netdev_queue noop_netdev_queue = {
  301. .lock = __SPIN_LOCK_UNLOCKED(noop_netdev_queue.lock),
  302. .qdisc = &noop_qdisc,
  303. };
  304. struct Qdisc noop_qdisc = {
  305. .enqueue = noop_enqueue,
  306. .dequeue = noop_dequeue,
  307. .flags = TCQ_F_BUILTIN,
  308. .ops = &noop_qdisc_ops,
  309. .list = LIST_HEAD_INIT(noop_qdisc.list),
  310. .dev_queue = &noop_netdev_queue,
  311. };
  312. EXPORT_SYMBOL(noop_qdisc);
  313. static struct Qdisc_ops noqueue_qdisc_ops __read_mostly = {
  314. .id = "noqueue",
  315. .priv_size = 0,
  316. .enqueue = noop_enqueue,
  317. .dequeue = noop_dequeue,
  318. .requeue = noop_requeue,
  319. .owner = THIS_MODULE,
  320. };
  321. static struct Qdisc noqueue_qdisc = {
  322. .enqueue = NULL,
  323. .dequeue = noop_dequeue,
  324. .flags = TCQ_F_BUILTIN,
  325. .ops = &noqueue_qdisc_ops,
  326. .list = LIST_HEAD_INIT(noqueue_qdisc.list),
  327. };
  328. static const u8 prio2band[TC_PRIO_MAX+1] =
  329. { 1, 2, 2, 2, 1, 2, 0, 0 , 1, 1, 1, 1, 1, 1, 1, 1 };
  330. /* 3-band FIFO queue: old style, but should be a bit faster than
  331. generic prio+fifo combination.
  332. */
  333. #define PFIFO_FAST_BANDS 3
  334. static inline struct sk_buff_head *prio2list(struct sk_buff *skb,
  335. struct Qdisc *qdisc)
  336. {
  337. struct sk_buff_head *list = qdisc_priv(qdisc);
  338. return list + prio2band[skb->priority & TC_PRIO_MAX];
  339. }
  340. static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc* qdisc)
  341. {
  342. struct sk_buff_head *list = prio2list(skb, qdisc);
  343. if (skb_queue_len(list) < qdisc_dev(qdisc)->tx_queue_len) {
  344. qdisc->q.qlen++;
  345. return __qdisc_enqueue_tail(skb, qdisc, list);
  346. }
  347. return qdisc_drop(skb, qdisc);
  348. }
  349. static struct sk_buff *pfifo_fast_dequeue(struct Qdisc* qdisc)
  350. {
  351. int prio;
  352. struct sk_buff_head *list = qdisc_priv(qdisc);
  353. for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) {
  354. if (!skb_queue_empty(list + prio)) {
  355. qdisc->q.qlen--;
  356. return __qdisc_dequeue_head(qdisc, list + prio);
  357. }
  358. }
  359. return NULL;
  360. }
  361. static int pfifo_fast_requeue(struct sk_buff *skb, struct Qdisc* qdisc)
  362. {
  363. qdisc->q.qlen++;
  364. return __qdisc_requeue(skb, qdisc, prio2list(skb, qdisc));
  365. }
  366. static void pfifo_fast_reset(struct Qdisc* qdisc)
  367. {
  368. int prio;
  369. struct sk_buff_head *list = qdisc_priv(qdisc);
  370. for (prio = 0; prio < PFIFO_FAST_BANDS; prio++)
  371. __qdisc_reset_queue(qdisc, list + prio);
  372. qdisc->qstats.backlog = 0;
  373. qdisc->q.qlen = 0;
  374. }
  375. static int pfifo_fast_dump(struct Qdisc *qdisc, struct sk_buff *skb)
  376. {
  377. struct tc_prio_qopt opt = { .bands = PFIFO_FAST_BANDS };
  378. memcpy(&opt.priomap, prio2band, TC_PRIO_MAX+1);
  379. NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
  380. return skb->len;
  381. nla_put_failure:
  382. return -1;
  383. }
  384. static int pfifo_fast_init(struct Qdisc *qdisc, struct nlattr *opt)
  385. {
  386. int prio;
  387. struct sk_buff_head *list = qdisc_priv(qdisc);
  388. for (prio = 0; prio < PFIFO_FAST_BANDS; prio++)
  389. skb_queue_head_init(list + prio);
  390. return 0;
  391. }
  392. static struct Qdisc_ops pfifo_fast_ops __read_mostly = {
  393. .id = "pfifo_fast",
  394. .priv_size = PFIFO_FAST_BANDS * sizeof(struct sk_buff_head),
  395. .enqueue = pfifo_fast_enqueue,
  396. .dequeue = pfifo_fast_dequeue,
  397. .requeue = pfifo_fast_requeue,
  398. .init = pfifo_fast_init,
  399. .reset = pfifo_fast_reset,
  400. .dump = pfifo_fast_dump,
  401. .owner = THIS_MODULE,
  402. };
  403. struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
  404. struct Qdisc_ops *ops)
  405. {
  406. void *p;
  407. struct Qdisc *sch;
  408. unsigned int size;
  409. int err = -ENOBUFS;
  410. /* ensure that the Qdisc and the private data are 32-byte aligned */
  411. size = QDISC_ALIGN(sizeof(*sch));
  412. size += ops->priv_size + (QDISC_ALIGNTO - 1);
  413. p = kzalloc(size, GFP_KERNEL);
  414. if (!p)
  415. goto errout;
  416. sch = (struct Qdisc *) QDISC_ALIGN((unsigned long) p);
  417. sch->padded = (char *) sch - (char *) p;
  418. INIT_LIST_HEAD(&sch->list);
  419. skb_queue_head_init(&sch->q);
  420. sch->ops = ops;
  421. sch->enqueue = ops->enqueue;
  422. sch->dequeue = ops->dequeue;
  423. sch->dev_queue = dev_queue;
  424. dev_hold(qdisc_dev(sch));
  425. atomic_set(&sch->refcnt, 1);
  426. return sch;
  427. errout:
  428. return ERR_PTR(err);
  429. }
  430. struct Qdisc * qdisc_create_dflt(struct net_device *dev,
  431. struct netdev_queue *dev_queue,
  432. struct Qdisc_ops *ops,
  433. unsigned int parentid)
  434. {
  435. struct Qdisc *sch;
  436. sch = qdisc_alloc(dev_queue, ops);
  437. if (IS_ERR(sch))
  438. goto errout;
  439. sch->parent = parentid;
  440. if (!ops->init || ops->init(sch, NULL) == 0)
  441. return sch;
  442. qdisc_destroy(sch);
  443. errout:
  444. return NULL;
  445. }
  446. EXPORT_SYMBOL(qdisc_create_dflt);
  447. /* Under queue->lock and BH! */
  448. void qdisc_reset(struct Qdisc *qdisc)
  449. {
  450. const struct Qdisc_ops *ops = qdisc->ops;
  451. if (ops->reset)
  452. ops->reset(qdisc);
  453. }
  454. EXPORT_SYMBOL(qdisc_reset);
  455. /* this is the rcu callback function to clean up a qdisc when there
  456. * are no further references to it */
  457. static void __qdisc_destroy(struct rcu_head *head)
  458. {
  459. struct Qdisc *qdisc = container_of(head, struct Qdisc, q_rcu);
  460. kfree((char *) qdisc - qdisc->padded);
  461. }
  462. /* Under queue->lock and BH! */
  463. void qdisc_destroy(struct Qdisc *qdisc)
  464. {
  465. const struct Qdisc_ops *ops = qdisc->ops;
  466. if (qdisc->flags & TCQ_F_BUILTIN ||
  467. !atomic_dec_and_test(&qdisc->refcnt))
  468. return;
  469. list_del(&qdisc->list);
  470. gen_kill_estimator(&qdisc->bstats, &qdisc->rate_est);
  471. if (ops->reset)
  472. ops->reset(qdisc);
  473. if (ops->destroy)
  474. ops->destroy(qdisc);
  475. module_put(ops->owner);
  476. dev_put(qdisc_dev(qdisc));
  477. call_rcu(&qdisc->q_rcu, __qdisc_destroy);
  478. }
  479. EXPORT_SYMBOL(qdisc_destroy);
  480. static bool dev_all_qdisc_sleeping_noop(struct net_device *dev)
  481. {
  482. unsigned int i;
  483. for (i = 0; i < dev->num_tx_queues; i++) {
  484. struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
  485. if (txq->qdisc_sleeping != &noop_qdisc)
  486. return false;
  487. }
  488. return true;
  489. }
  490. static void attach_one_default_qdisc(struct net_device *dev,
  491. struct netdev_queue *dev_queue,
  492. void *_unused)
  493. {
  494. struct Qdisc *qdisc;
  495. if (dev->tx_queue_len) {
  496. qdisc = qdisc_create_dflt(dev, dev_queue,
  497. &pfifo_fast_ops, TC_H_ROOT);
  498. if (!qdisc) {
  499. printk(KERN_INFO "%s: activation failed\n", dev->name);
  500. return;
  501. }
  502. list_add_tail(&qdisc->list, &dev_queue->qdisc_list);
  503. } else {
  504. qdisc = &noqueue_qdisc;
  505. }
  506. dev_queue->qdisc_sleeping = qdisc;
  507. }
  508. static void transition_one_qdisc(struct net_device *dev,
  509. struct netdev_queue *dev_queue,
  510. void *_need_watchdog)
  511. {
  512. int *need_watchdog_p = _need_watchdog;
  513. spin_lock_bh(&dev_queue->lock);
  514. rcu_assign_pointer(dev_queue->qdisc, dev_queue->qdisc_sleeping);
  515. if (dev_queue->qdisc != &noqueue_qdisc)
  516. *need_watchdog_p = 1;
  517. spin_unlock_bh(&dev_queue->lock);
  518. }
  519. void dev_activate(struct net_device *dev)
  520. {
  521. int need_watchdog;
  522. /* No queueing discipline is attached to device;
  523. create default one i.e. pfifo_fast for devices,
  524. which need queueing and noqueue_qdisc for
  525. virtual interfaces
  526. */
  527. if (dev_all_qdisc_sleeping_noop(dev))
  528. netdev_for_each_tx_queue(dev, attach_one_default_qdisc, NULL);
  529. if (!netif_carrier_ok(dev))
  530. /* Delay activation until next carrier-on event */
  531. return;
  532. need_watchdog = 0;
  533. netdev_for_each_tx_queue(dev, transition_one_qdisc, &need_watchdog);
  534. if (need_watchdog) {
  535. dev->trans_start = jiffies;
  536. dev_watchdog_up(dev);
  537. }
  538. }
  539. static void dev_deactivate_queue(struct net_device *dev,
  540. struct netdev_queue *dev_queue,
  541. void *_qdisc_default)
  542. {
  543. struct Qdisc *qdisc_default = _qdisc_default;
  544. struct sk_buff *skb = NULL;
  545. struct Qdisc *qdisc;
  546. spin_lock_bh(&dev_queue->lock);
  547. qdisc = dev_queue->qdisc;
  548. if (qdisc) {
  549. dev_queue->qdisc = qdisc_default;
  550. qdisc_reset(qdisc);
  551. skb = qdisc->gso_skb;
  552. qdisc->gso_skb = NULL;
  553. }
  554. spin_unlock_bh(&dev_queue->lock);
  555. kfree_skb(skb);
  556. }
  557. static bool some_qdisc_is_running(struct net_device *dev, int lock)
  558. {
  559. unsigned int i;
  560. for (i = 0; i < dev->num_tx_queues; i++) {
  561. struct netdev_queue *dev_queue;
  562. spinlock_t *root_lock;
  563. struct Qdisc *q;
  564. int val;
  565. dev_queue = netdev_get_tx_queue(dev, i);
  566. q = dev_queue->qdisc;
  567. root_lock = qdisc_root_lock(q);
  568. if (lock)
  569. spin_lock_bh(root_lock);
  570. val = test_bit(__QDISC_STATE_RUNNING, &q->state);
  571. if (lock)
  572. spin_unlock_bh(root_lock);
  573. if (val)
  574. return true;
  575. }
  576. return false;
  577. }
  578. void dev_deactivate(struct net_device *dev)
  579. {
  580. bool running;
  581. netdev_for_each_tx_queue(dev, dev_deactivate_queue, &noop_qdisc);
  582. dev_watchdog_down(dev);
  583. /* Wait for outstanding qdisc-less dev_queue_xmit calls. */
  584. synchronize_rcu();
  585. /* Wait for outstanding qdisc_run calls. */
  586. do {
  587. while (some_qdisc_is_running(dev, 0))
  588. yield();
  589. /*
  590. * Double-check inside queue lock to ensure that all effects
  591. * of the queue run are visible when we return.
  592. */
  593. running = some_qdisc_is_running(dev, 1);
  594. /*
  595. * The running flag should never be set at this point because
  596. * we've already set dev->qdisc to noop_qdisc *inside* the same
  597. * pair of spin locks. That is, if any qdisc_run starts after
  598. * our initial test it should see the noop_qdisc and then
  599. * clear the RUNNING bit before dropping the queue lock. So
  600. * if it is set here then we've found a bug.
  601. */
  602. } while (WARN_ON_ONCE(running));
  603. }
  604. static void dev_init_scheduler_queue(struct net_device *dev,
  605. struct netdev_queue *dev_queue,
  606. void *_qdisc)
  607. {
  608. struct Qdisc *qdisc = _qdisc;
  609. dev_queue->qdisc = qdisc;
  610. dev_queue->qdisc_sleeping = qdisc;
  611. INIT_LIST_HEAD(&dev_queue->qdisc_list);
  612. }
  613. void dev_init_scheduler(struct net_device *dev)
  614. {
  615. netdev_for_each_tx_queue(dev, dev_init_scheduler_queue, &noop_qdisc);
  616. dev_init_scheduler_queue(dev, &dev->rx_queue, NULL);
  617. setup_timer(&dev->watchdog_timer, dev_watchdog, (unsigned long)dev);
  618. }
  619. static void shutdown_scheduler_queue(struct net_device *dev,
  620. struct netdev_queue *dev_queue,
  621. void *_qdisc_default)
  622. {
  623. struct Qdisc *qdisc = dev_queue->qdisc_sleeping;
  624. struct Qdisc *qdisc_default = _qdisc_default;
  625. if (qdisc) {
  626. dev_queue->qdisc = qdisc_default;
  627. dev_queue->qdisc_sleeping = qdisc_default;
  628. qdisc_destroy(qdisc);
  629. }
  630. }
  631. void dev_shutdown(struct net_device *dev)
  632. {
  633. qdisc_lock_tree(dev);
  634. netdev_for_each_tx_queue(dev, shutdown_scheduler_queue, &noop_qdisc);
  635. shutdown_scheduler_queue(dev, &dev->rx_queue, NULL);
  636. BUG_TRAP(!timer_pending(&dev->watchdog_timer));
  637. qdisc_unlock_tree(dev);
  638. }