sch_generic.h 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700
  1. #ifndef __NET_SCHED_GENERIC_H
  2. #define __NET_SCHED_GENERIC_H
  3. #include <linux/netdevice.h>
  4. #include <linux/types.h>
  5. #include <linux/rcupdate.h>
  6. #include <linux/pkt_sched.h>
  7. #include <linux/pkt_cls.h>
  8. #include <net/gen_stats.h>
  9. #include <net/rtnetlink.h>
  10. struct Qdisc_ops;
  11. struct qdisc_walker;
  12. struct tcf_walker;
  13. struct module;
  14. struct qdisc_rate_table {
  15. struct tc_ratespec rate;
  16. u32 data[256];
  17. struct qdisc_rate_table *next;
  18. int refcnt;
  19. };
  20. enum qdisc_state_t {
  21. __QDISC_STATE_SCHED,
  22. __QDISC_STATE_DEACTIVATED,
  23. __QDISC_STATE_THROTTLED,
  24. };
  25. /*
  26. * following bits are only changed while qdisc lock is held
  27. */
  28. enum qdisc___state_t {
  29. __QDISC___STATE_RUNNING = 1,
  30. };
  31. struct qdisc_size_table {
  32. struct rcu_head rcu;
  33. struct list_head list;
  34. struct tc_sizespec szopts;
  35. int refcnt;
  36. u16 data[];
  37. };
  38. struct Qdisc {
  39. int (*enqueue)(struct sk_buff *skb, struct Qdisc *dev);
  40. struct sk_buff * (*dequeue)(struct Qdisc *dev);
  41. unsigned int flags;
  42. #define TCQ_F_BUILTIN 1
  43. #define TCQ_F_INGRESS 2
  44. #define TCQ_F_CAN_BYPASS 4
  45. #define TCQ_F_MQROOT 8
  46. #define TCQ_F_ONETXQUEUE 0x10 /* dequeue_skb() can assume all skbs are for
  47. * q->dev_queue : It can test
  48. * netif_xmit_frozen_or_stopped() before
  49. * dequeueing next packet.
  50. * Its true for MQ/MQPRIO slaves, or non
  51. * multiqueue device.
  52. */
  53. #define TCQ_F_WARN_NONWC (1 << 16)
  54. int padded;
  55. const struct Qdisc_ops *ops;
  56. struct qdisc_size_table __rcu *stab;
  57. struct list_head list;
  58. u32 handle;
  59. u32 parent;
  60. atomic_t refcnt;
  61. struct gnet_stats_rate_est rate_est;
  62. int (*reshape_fail)(struct sk_buff *skb,
  63. struct Qdisc *q);
  64. void *u32_node;
  65. /* This field is deprecated, but it is still used by CBQ
  66. * and it will live until better solution will be invented.
  67. */
  68. struct Qdisc *__parent;
  69. struct netdev_queue *dev_queue;
  70. struct Qdisc *next_sched;
  71. struct sk_buff *gso_skb;
  72. /*
  73. * For performance sake on SMP, we put highly modified fields at the end
  74. */
  75. unsigned long state;
  76. struct sk_buff_head q;
  77. struct gnet_stats_basic_packed bstats;
  78. unsigned int __state;
  79. struct gnet_stats_queue qstats;
  80. struct rcu_head rcu_head;
  81. spinlock_t busylock;
  82. u32 limit;
  83. };
  84. static inline bool qdisc_is_running(const struct Qdisc *qdisc)
  85. {
  86. return (qdisc->__state & __QDISC___STATE_RUNNING) ? true : false;
  87. }
  88. static inline bool qdisc_run_begin(struct Qdisc *qdisc)
  89. {
  90. if (qdisc_is_running(qdisc))
  91. return false;
  92. qdisc->__state |= __QDISC___STATE_RUNNING;
  93. return true;
  94. }
  95. static inline void qdisc_run_end(struct Qdisc *qdisc)
  96. {
  97. qdisc->__state &= ~__QDISC___STATE_RUNNING;
  98. }
  99. static inline bool qdisc_is_throttled(const struct Qdisc *qdisc)
  100. {
  101. return test_bit(__QDISC_STATE_THROTTLED, &qdisc->state) ? true : false;
  102. }
  103. static inline void qdisc_throttled(struct Qdisc *qdisc)
  104. {
  105. set_bit(__QDISC_STATE_THROTTLED, &qdisc->state);
  106. }
  107. static inline void qdisc_unthrottled(struct Qdisc *qdisc)
  108. {
  109. clear_bit(__QDISC_STATE_THROTTLED, &qdisc->state);
  110. }
  111. struct Qdisc_class_ops {
  112. /* Child qdisc manipulation */
  113. struct netdev_queue * (*select_queue)(struct Qdisc *, struct tcmsg *);
  114. int (*graft)(struct Qdisc *, unsigned long cl,
  115. struct Qdisc *, struct Qdisc **);
  116. struct Qdisc * (*leaf)(struct Qdisc *, unsigned long cl);
  117. void (*qlen_notify)(struct Qdisc *, unsigned long);
  118. /* Class manipulation routines */
  119. unsigned long (*get)(struct Qdisc *, u32 classid);
  120. void (*put)(struct Qdisc *, unsigned long);
  121. int (*change)(struct Qdisc *, u32, u32,
  122. struct nlattr **, unsigned long *);
  123. int (*delete)(struct Qdisc *, unsigned long);
  124. void (*walk)(struct Qdisc *, struct qdisc_walker * arg);
  125. /* Filter manipulation */
  126. struct tcf_proto ** (*tcf_chain)(struct Qdisc *, unsigned long);
  127. unsigned long (*bind_tcf)(struct Qdisc *, unsigned long,
  128. u32 classid);
  129. void (*unbind_tcf)(struct Qdisc *, unsigned long);
  130. /* rtnetlink specific */
  131. int (*dump)(struct Qdisc *, unsigned long,
  132. struct sk_buff *skb, struct tcmsg*);
  133. int (*dump_stats)(struct Qdisc *, unsigned long,
  134. struct gnet_dump *);
  135. };
  136. struct Qdisc_ops {
  137. struct Qdisc_ops *next;
  138. const struct Qdisc_class_ops *cl_ops;
  139. char id[IFNAMSIZ];
  140. int priv_size;
  141. int (*enqueue)(struct sk_buff *, struct Qdisc *);
  142. struct sk_buff * (*dequeue)(struct Qdisc *);
  143. struct sk_buff * (*peek)(struct Qdisc *);
  144. unsigned int (*drop)(struct Qdisc *);
  145. int (*init)(struct Qdisc *, struct nlattr *arg);
  146. void (*reset)(struct Qdisc *);
  147. void (*destroy)(struct Qdisc *);
  148. int (*change)(struct Qdisc *, struct nlattr *arg);
  149. void (*attach)(struct Qdisc *);
  150. int (*dump)(struct Qdisc *, struct sk_buff *);
  151. int (*dump_stats)(struct Qdisc *, struct gnet_dump *);
  152. struct module *owner;
  153. };
  154. struct tcf_result {
  155. unsigned long class;
  156. u32 classid;
  157. };
  158. struct tcf_proto_ops {
  159. struct tcf_proto_ops *next;
  160. char kind[IFNAMSIZ];
  161. int (*classify)(struct sk_buff *,
  162. const struct tcf_proto *,
  163. struct tcf_result *);
  164. int (*init)(struct tcf_proto*);
  165. void (*destroy)(struct tcf_proto*);
  166. unsigned long (*get)(struct tcf_proto*, u32 handle);
  167. void (*put)(struct tcf_proto*, unsigned long);
  168. int (*change)(struct net *net, struct sk_buff *,
  169. struct tcf_proto*, unsigned long,
  170. u32 handle, struct nlattr **,
  171. unsigned long *);
  172. int (*delete)(struct tcf_proto*, unsigned long);
  173. void (*walk)(struct tcf_proto*, struct tcf_walker *arg);
  174. /* rtnetlink specific */
  175. int (*dump)(struct tcf_proto*, unsigned long,
  176. struct sk_buff *skb, struct tcmsg*);
  177. struct module *owner;
  178. };
  179. struct tcf_proto {
  180. /* Fast access part */
  181. struct tcf_proto *next;
  182. void *root;
  183. int (*classify)(struct sk_buff *,
  184. const struct tcf_proto *,
  185. struct tcf_result *);
  186. __be16 protocol;
  187. /* All the rest */
  188. u32 prio;
  189. u32 classid;
  190. struct Qdisc *q;
  191. void *data;
  192. const struct tcf_proto_ops *ops;
  193. };
  194. struct qdisc_skb_cb {
  195. unsigned int pkt_len;
  196. u16 slave_dev_queue_mapping;
  197. u16 _pad;
  198. unsigned char data[20];
  199. };
  200. static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz)
  201. {
  202. struct qdisc_skb_cb *qcb;
  203. BUILD_BUG_ON(sizeof(skb->cb) < offsetof(struct qdisc_skb_cb, data) + sz);
  204. BUILD_BUG_ON(sizeof(qcb->data) < sz);
  205. }
  206. static inline int qdisc_qlen(const struct Qdisc *q)
  207. {
  208. return q->q.qlen;
  209. }
  210. static inline struct qdisc_skb_cb *qdisc_skb_cb(const struct sk_buff *skb)
  211. {
  212. return (struct qdisc_skb_cb *)skb->cb;
  213. }
  214. static inline spinlock_t *qdisc_lock(struct Qdisc *qdisc)
  215. {
  216. return &qdisc->q.lock;
  217. }
  218. static inline struct Qdisc *qdisc_root(const struct Qdisc *qdisc)
  219. {
  220. return qdisc->dev_queue->qdisc;
  221. }
  222. static inline struct Qdisc *qdisc_root_sleeping(const struct Qdisc *qdisc)
  223. {
  224. return qdisc->dev_queue->qdisc_sleeping;
  225. }
  226. /* The qdisc root lock is a mechanism by which to top level
  227. * of a qdisc tree can be locked from any qdisc node in the
  228. * forest. This allows changing the configuration of some
  229. * aspect of the qdisc tree while blocking out asynchronous
  230. * qdisc access in the packet processing paths.
  231. *
  232. * It is only legal to do this when the root will not change
  233. * on us. Otherwise we'll potentially lock the wrong qdisc
  234. * root. This is enforced by holding the RTNL semaphore, which
  235. * all users of this lock accessor must do.
  236. */
  237. static inline spinlock_t *qdisc_root_lock(const struct Qdisc *qdisc)
  238. {
  239. struct Qdisc *root = qdisc_root(qdisc);
  240. ASSERT_RTNL();
  241. return qdisc_lock(root);
  242. }
  243. static inline spinlock_t *qdisc_root_sleeping_lock(const struct Qdisc *qdisc)
  244. {
  245. struct Qdisc *root = qdisc_root_sleeping(qdisc);
  246. ASSERT_RTNL();
  247. return qdisc_lock(root);
  248. }
  249. static inline struct net_device *qdisc_dev(const struct Qdisc *qdisc)
  250. {
  251. return qdisc->dev_queue->dev;
  252. }
  253. static inline void sch_tree_lock(const struct Qdisc *q)
  254. {
  255. spin_lock_bh(qdisc_root_sleeping_lock(q));
  256. }
  257. static inline void sch_tree_unlock(const struct Qdisc *q)
  258. {
  259. spin_unlock_bh(qdisc_root_sleeping_lock(q));
  260. }
  261. #define tcf_tree_lock(tp) sch_tree_lock((tp)->q)
  262. #define tcf_tree_unlock(tp) sch_tree_unlock((tp)->q)
  263. extern struct Qdisc noop_qdisc;
  264. extern struct Qdisc_ops noop_qdisc_ops;
  265. extern struct Qdisc_ops pfifo_fast_ops;
  266. extern struct Qdisc_ops mq_qdisc_ops;
  267. struct Qdisc_class_common {
  268. u32 classid;
  269. struct hlist_node hnode;
  270. };
  271. struct Qdisc_class_hash {
  272. struct hlist_head *hash;
  273. unsigned int hashsize;
  274. unsigned int hashmask;
  275. unsigned int hashelems;
  276. };
  277. static inline unsigned int qdisc_class_hash(u32 id, u32 mask)
  278. {
  279. id ^= id >> 8;
  280. id ^= id >> 4;
  281. return id & mask;
  282. }
  283. static inline struct Qdisc_class_common *
  284. qdisc_class_find(const struct Qdisc_class_hash *hash, u32 id)
  285. {
  286. struct Qdisc_class_common *cl;
  287. unsigned int h;
  288. h = qdisc_class_hash(id, hash->hashmask);
  289. hlist_for_each_entry(cl, &hash->hash[h], hnode) {
  290. if (cl->classid == id)
  291. return cl;
  292. }
  293. return NULL;
  294. }
  295. extern int qdisc_class_hash_init(struct Qdisc_class_hash *);
  296. extern void qdisc_class_hash_insert(struct Qdisc_class_hash *, struct Qdisc_class_common *);
  297. extern void qdisc_class_hash_remove(struct Qdisc_class_hash *, struct Qdisc_class_common *);
  298. extern void qdisc_class_hash_grow(struct Qdisc *, struct Qdisc_class_hash *);
  299. extern void qdisc_class_hash_destroy(struct Qdisc_class_hash *);
  300. extern void dev_init_scheduler(struct net_device *dev);
  301. extern void dev_shutdown(struct net_device *dev);
  302. extern void dev_activate(struct net_device *dev);
  303. extern void dev_deactivate(struct net_device *dev);
  304. extern void dev_deactivate_many(struct list_head *head);
  305. extern struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue,
  306. struct Qdisc *qdisc);
  307. extern void qdisc_reset(struct Qdisc *qdisc);
  308. extern void qdisc_destroy(struct Qdisc *qdisc);
  309. extern void qdisc_tree_decrease_qlen(struct Qdisc *qdisc, unsigned int n);
  310. extern struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
  311. struct Qdisc_ops *ops);
  312. extern struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue,
  313. struct Qdisc_ops *ops, u32 parentid);
  314. extern void __qdisc_calculate_pkt_len(struct sk_buff *skb,
  315. const struct qdisc_size_table *stab);
  316. extern void tcf_destroy(struct tcf_proto *tp);
  317. extern void tcf_destroy_chain(struct tcf_proto **fl);
  318. /* Reset all TX qdiscs greater then index of a device. */
  319. static inline void qdisc_reset_all_tx_gt(struct net_device *dev, unsigned int i)
  320. {
  321. struct Qdisc *qdisc;
  322. for (; i < dev->num_tx_queues; i++) {
  323. qdisc = netdev_get_tx_queue(dev, i)->qdisc;
  324. if (qdisc) {
  325. spin_lock_bh(qdisc_lock(qdisc));
  326. qdisc_reset(qdisc);
  327. spin_unlock_bh(qdisc_lock(qdisc));
  328. }
  329. }
  330. }
  331. static inline void qdisc_reset_all_tx(struct net_device *dev)
  332. {
  333. qdisc_reset_all_tx_gt(dev, 0);
  334. }
  335. /* Are all TX queues of the device empty? */
  336. static inline bool qdisc_all_tx_empty(const struct net_device *dev)
  337. {
  338. unsigned int i;
  339. for (i = 0; i < dev->num_tx_queues; i++) {
  340. struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
  341. const struct Qdisc *q = txq->qdisc;
  342. if (q->q.qlen)
  343. return false;
  344. }
  345. return true;
  346. }
  347. /* Are any of the TX qdiscs changing? */
  348. static inline bool qdisc_tx_changing(const struct net_device *dev)
  349. {
  350. unsigned int i;
  351. for (i = 0; i < dev->num_tx_queues; i++) {
  352. struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
  353. if (txq->qdisc != txq->qdisc_sleeping)
  354. return true;
  355. }
  356. return false;
  357. }
  358. /* Is the device using the noop qdisc on all queues? */
  359. static inline bool qdisc_tx_is_noop(const struct net_device *dev)
  360. {
  361. unsigned int i;
  362. for (i = 0; i < dev->num_tx_queues; i++) {
  363. struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
  364. if (txq->qdisc != &noop_qdisc)
  365. return false;
  366. }
  367. return true;
  368. }
  369. static inline unsigned int qdisc_pkt_len(const struct sk_buff *skb)
  370. {
  371. return qdisc_skb_cb(skb)->pkt_len;
  372. }
  373. /* additional qdisc xmit flags (NET_XMIT_MASK in linux/netdevice.h) */
  374. enum net_xmit_qdisc_t {
  375. __NET_XMIT_STOLEN = 0x00010000,
  376. __NET_XMIT_BYPASS = 0x00020000,
  377. };
  378. #ifdef CONFIG_NET_CLS_ACT
  379. #define net_xmit_drop_count(e) ((e) & __NET_XMIT_STOLEN ? 0 : 1)
  380. #else
  381. #define net_xmit_drop_count(e) (1)
  382. #endif
  383. static inline void qdisc_calculate_pkt_len(struct sk_buff *skb,
  384. const struct Qdisc *sch)
  385. {
  386. #ifdef CONFIG_NET_SCHED
  387. struct qdisc_size_table *stab = rcu_dereference_bh(sch->stab);
  388. if (stab)
  389. __qdisc_calculate_pkt_len(skb, stab);
  390. #endif
  391. }
  392. static inline int qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
  393. {
  394. qdisc_calculate_pkt_len(skb, sch);
  395. return sch->enqueue(skb, sch);
  396. }
  397. static inline int qdisc_enqueue_root(struct sk_buff *skb, struct Qdisc *sch)
  398. {
  399. qdisc_skb_cb(skb)->pkt_len = skb->len;
  400. return qdisc_enqueue(skb, sch) & NET_XMIT_MASK;
  401. }
  402. static inline void bstats_update(struct gnet_stats_basic_packed *bstats,
  403. const struct sk_buff *skb)
  404. {
  405. bstats->bytes += qdisc_pkt_len(skb);
  406. bstats->packets += skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1;
  407. }
  408. static inline void qdisc_bstats_update(struct Qdisc *sch,
  409. const struct sk_buff *skb)
  410. {
  411. bstats_update(&sch->bstats, skb);
  412. }
  413. static inline int __qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch,
  414. struct sk_buff_head *list)
  415. {
  416. __skb_queue_tail(list, skb);
  417. sch->qstats.backlog += qdisc_pkt_len(skb);
  418. return NET_XMIT_SUCCESS;
  419. }
  420. static inline int qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch)
  421. {
  422. return __qdisc_enqueue_tail(skb, sch, &sch->q);
  423. }
  424. static inline struct sk_buff *__qdisc_dequeue_head(struct Qdisc *sch,
  425. struct sk_buff_head *list)
  426. {
  427. struct sk_buff *skb = __skb_dequeue(list);
  428. if (likely(skb != NULL)) {
  429. sch->qstats.backlog -= qdisc_pkt_len(skb);
  430. qdisc_bstats_update(sch, skb);
  431. }
  432. return skb;
  433. }
  434. static inline struct sk_buff *qdisc_dequeue_head(struct Qdisc *sch)
  435. {
  436. return __qdisc_dequeue_head(sch, &sch->q);
  437. }
  438. static inline unsigned int __qdisc_queue_drop_head(struct Qdisc *sch,
  439. struct sk_buff_head *list)
  440. {
  441. struct sk_buff *skb = __skb_dequeue(list);
  442. if (likely(skb != NULL)) {
  443. unsigned int len = qdisc_pkt_len(skb);
  444. sch->qstats.backlog -= len;
  445. kfree_skb(skb);
  446. return len;
  447. }
  448. return 0;
  449. }
  450. static inline unsigned int qdisc_queue_drop_head(struct Qdisc *sch)
  451. {
  452. return __qdisc_queue_drop_head(sch, &sch->q);
  453. }
  454. static inline struct sk_buff *__qdisc_dequeue_tail(struct Qdisc *sch,
  455. struct sk_buff_head *list)
  456. {
  457. struct sk_buff *skb = __skb_dequeue_tail(list);
  458. if (likely(skb != NULL))
  459. sch->qstats.backlog -= qdisc_pkt_len(skb);
  460. return skb;
  461. }
  462. static inline struct sk_buff *qdisc_dequeue_tail(struct Qdisc *sch)
  463. {
  464. return __qdisc_dequeue_tail(sch, &sch->q);
  465. }
  466. static inline struct sk_buff *qdisc_peek_head(struct Qdisc *sch)
  467. {
  468. return skb_peek(&sch->q);
  469. }
  470. /* generic pseudo peek method for non-work-conserving qdisc */
  471. static inline struct sk_buff *qdisc_peek_dequeued(struct Qdisc *sch)
  472. {
  473. /* we can reuse ->gso_skb because peek isn't called for root qdiscs */
  474. if (!sch->gso_skb) {
  475. sch->gso_skb = sch->dequeue(sch);
  476. if (sch->gso_skb)
  477. /* it's still part of the queue */
  478. sch->q.qlen++;
  479. }
  480. return sch->gso_skb;
  481. }
  482. /* use instead of qdisc->dequeue() for all qdiscs queried with ->peek() */
  483. static inline struct sk_buff *qdisc_dequeue_peeked(struct Qdisc *sch)
  484. {
  485. struct sk_buff *skb = sch->gso_skb;
  486. if (skb) {
  487. sch->gso_skb = NULL;
  488. sch->q.qlen--;
  489. } else {
  490. skb = sch->dequeue(sch);
  491. }
  492. return skb;
  493. }
  494. static inline void __qdisc_reset_queue(struct Qdisc *sch,
  495. struct sk_buff_head *list)
  496. {
  497. /*
  498. * We do not know the backlog in bytes of this list, it
  499. * is up to the caller to correct it
  500. */
  501. __skb_queue_purge(list);
  502. }
  503. static inline void qdisc_reset_queue(struct Qdisc *sch)
  504. {
  505. __qdisc_reset_queue(sch, &sch->q);
  506. sch->qstats.backlog = 0;
  507. }
  508. static inline unsigned int __qdisc_queue_drop(struct Qdisc *sch,
  509. struct sk_buff_head *list)
  510. {
  511. struct sk_buff *skb = __qdisc_dequeue_tail(sch, list);
  512. if (likely(skb != NULL)) {
  513. unsigned int len = qdisc_pkt_len(skb);
  514. kfree_skb(skb);
  515. return len;
  516. }
  517. return 0;
  518. }
  519. static inline unsigned int qdisc_queue_drop(struct Qdisc *sch)
  520. {
  521. return __qdisc_queue_drop(sch, &sch->q);
  522. }
  523. static inline int qdisc_drop(struct sk_buff *skb, struct Qdisc *sch)
  524. {
  525. kfree_skb(skb);
  526. sch->qstats.drops++;
  527. return NET_XMIT_DROP;
  528. }
  529. static inline int qdisc_reshape_fail(struct sk_buff *skb, struct Qdisc *sch)
  530. {
  531. sch->qstats.drops++;
  532. #ifdef CONFIG_NET_CLS_ACT
  533. if (sch->reshape_fail == NULL || sch->reshape_fail(skb, sch))
  534. goto drop;
  535. return NET_XMIT_SUCCESS;
  536. drop:
  537. #endif
  538. kfree_skb(skb);
  539. return NET_XMIT_DROP;
  540. }
  541. /* Length to Time (L2T) lookup in a qdisc_rate_table, to determine how
  542. long it will take to send a packet given its size.
  543. */
  544. static inline u32 qdisc_l2t(struct qdisc_rate_table* rtab, unsigned int pktlen)
  545. {
  546. int slot = pktlen + rtab->rate.cell_align + rtab->rate.overhead;
  547. if (slot < 0)
  548. slot = 0;
  549. slot >>= rtab->rate.cell_log;
  550. if (slot > 255)
  551. return rtab->data[255]*(slot >> 8) + rtab->data[slot & 0xFF];
  552. return rtab->data[slot];
  553. }
  554. #ifdef CONFIG_NET_CLS_ACT
  555. static inline struct sk_buff *skb_act_clone(struct sk_buff *skb, gfp_t gfp_mask,
  556. int action)
  557. {
  558. struct sk_buff *n;
  559. n = skb_clone(skb, gfp_mask);
  560. if (n) {
  561. n->tc_verd = SET_TC_VERD(n->tc_verd, 0);
  562. n->tc_verd = CLR_TC_OK2MUNGE(n->tc_verd);
  563. n->tc_verd = CLR_TC_MUNGED(n->tc_verd);
  564. }
  565. return n;
  566. }
  567. #endif
  568. struct psched_ratecfg {
  569. u64 rate_bps;
  570. u32 mult;
  571. u32 shift;
  572. };
  573. static inline u64 psched_l2t_ns(const struct psched_ratecfg *r,
  574. unsigned int len)
  575. {
  576. return ((u64)len * r->mult) >> r->shift;
  577. }
  578. extern void psched_ratecfg_precompute(struct psched_ratecfg *r, u32 rate);
  579. static inline u32 psched_ratecfg_getrate(const struct psched_ratecfg *r)
  580. {
  581. return r->rate_bps >> 3;
  582. }
  583. #endif