sch_generic.h 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662
  1. #ifndef __NET_SCHED_GENERIC_H
  2. #define __NET_SCHED_GENERIC_H
  3. #include <linux/netdevice.h>
  4. #include <linux/types.h>
  5. #include <linux/rcupdate.h>
  6. #include <linux/module.h>
  7. #include <linux/pkt_sched.h>
  8. #include <linux/pkt_cls.h>
  9. #include <net/gen_stats.h>
  10. #include <net/rtnetlink.h>
  11. struct Qdisc_ops;
  12. struct qdisc_walker;
  13. struct tcf_walker;
  14. struct module;
  15. struct qdisc_rate_table {
  16. struct tc_ratespec rate;
  17. u32 data[256];
  18. struct qdisc_rate_table *next;
  19. int refcnt;
  20. };
  21. enum qdisc_state_t {
  22. __QDISC_STATE_SCHED,
  23. __QDISC_STATE_DEACTIVATED,
  24. };
  25. /*
  26. * following bits are only changed while qdisc lock is held
  27. */
  28. enum qdisc___state_t {
  29. __QDISC___STATE_RUNNING = 1,
  30. __QDISC___STATE_THROTTLED = 2,
  31. };
  32. struct qdisc_size_table {
  33. struct rcu_head rcu;
  34. struct list_head list;
  35. struct tc_sizespec szopts;
  36. int refcnt;
  37. u16 data[];
  38. };
  39. struct Qdisc {
  40. int (*enqueue)(struct sk_buff *skb, struct Qdisc *dev);
  41. struct sk_buff * (*dequeue)(struct Qdisc *dev);
  42. unsigned flags;
  43. #define TCQ_F_BUILTIN 1
  44. #define TCQ_F_INGRESS 2
  45. #define TCQ_F_CAN_BYPASS 4
  46. #define TCQ_F_MQROOT 8
  47. #define TCQ_F_WARN_NONWC (1 << 16)
  48. int padded;
  49. struct Qdisc_ops *ops;
  50. struct qdisc_size_table __rcu *stab;
  51. struct list_head list;
  52. u32 handle;
  53. u32 parent;
  54. atomic_t refcnt;
  55. struct gnet_stats_rate_est rate_est;
  56. int (*reshape_fail)(struct sk_buff *skb,
  57. struct Qdisc *q);
  58. void *u32_node;
  59. /* This field is deprecated, but it is still used by CBQ
  60. * and it will live until better solution will be invented.
  61. */
  62. struct Qdisc *__parent;
  63. struct netdev_queue *dev_queue;
  64. struct Qdisc *next_sched;
  65. struct sk_buff *gso_skb;
  66. /*
  67. * For performance sake on SMP, we put highly modified fields at the end
  68. */
  69. unsigned long state;
  70. struct sk_buff_head q;
  71. struct gnet_stats_basic_packed bstats;
  72. unsigned int __state;
  73. struct gnet_stats_queue qstats;
  74. struct rcu_head rcu_head;
  75. spinlock_t busylock;
  76. };
  77. static inline bool qdisc_is_running(const struct Qdisc *qdisc)
  78. {
  79. return (qdisc->__state & __QDISC___STATE_RUNNING) ? true : false;
  80. }
  81. static inline bool qdisc_run_begin(struct Qdisc *qdisc)
  82. {
  83. if (qdisc_is_running(qdisc))
  84. return false;
  85. qdisc->__state |= __QDISC___STATE_RUNNING;
  86. return true;
  87. }
  88. static inline void qdisc_run_end(struct Qdisc *qdisc)
  89. {
  90. qdisc->__state &= ~__QDISC___STATE_RUNNING;
  91. }
  92. static inline bool qdisc_is_throttled(const struct Qdisc *qdisc)
  93. {
  94. return (qdisc->__state & __QDISC___STATE_THROTTLED) ? true : false;
  95. }
  96. static inline void qdisc_throttled(struct Qdisc *qdisc)
  97. {
  98. qdisc->__state |= __QDISC___STATE_THROTTLED;
  99. }
  100. static inline void qdisc_unthrottled(struct Qdisc *qdisc)
  101. {
  102. qdisc->__state &= ~__QDISC___STATE_THROTTLED;
  103. }
  104. struct Qdisc_class_ops {
  105. /* Child qdisc manipulation */
  106. struct netdev_queue * (*select_queue)(struct Qdisc *, struct tcmsg *);
  107. int (*graft)(struct Qdisc *, unsigned long cl,
  108. struct Qdisc *, struct Qdisc **);
  109. struct Qdisc * (*leaf)(struct Qdisc *, unsigned long cl);
  110. void (*qlen_notify)(struct Qdisc *, unsigned long);
  111. /* Class manipulation routines */
  112. unsigned long (*get)(struct Qdisc *, u32 classid);
  113. void (*put)(struct Qdisc *, unsigned long);
  114. int (*change)(struct Qdisc *, u32, u32,
  115. struct nlattr **, unsigned long *);
  116. int (*delete)(struct Qdisc *, unsigned long);
  117. void (*walk)(struct Qdisc *, struct qdisc_walker * arg);
  118. /* Filter manipulation */
  119. struct tcf_proto ** (*tcf_chain)(struct Qdisc *, unsigned long);
  120. unsigned long (*bind_tcf)(struct Qdisc *, unsigned long,
  121. u32 classid);
  122. void (*unbind_tcf)(struct Qdisc *, unsigned long);
  123. /* rtnetlink specific */
  124. int (*dump)(struct Qdisc *, unsigned long,
  125. struct sk_buff *skb, struct tcmsg*);
  126. int (*dump_stats)(struct Qdisc *, unsigned long,
  127. struct gnet_dump *);
  128. };
  129. struct Qdisc_ops {
  130. struct Qdisc_ops *next;
  131. const struct Qdisc_class_ops *cl_ops;
  132. char id[IFNAMSIZ];
  133. int priv_size;
  134. int (*enqueue)(struct sk_buff *, struct Qdisc *);
  135. struct sk_buff * (*dequeue)(struct Qdisc *);
  136. struct sk_buff * (*peek)(struct Qdisc *);
  137. unsigned int (*drop)(struct Qdisc *);
  138. int (*init)(struct Qdisc *, struct nlattr *arg);
  139. void (*reset)(struct Qdisc *);
  140. void (*destroy)(struct Qdisc *);
  141. int (*change)(struct Qdisc *, struct nlattr *arg);
  142. void (*attach)(struct Qdisc *);
  143. int (*dump)(struct Qdisc *, struct sk_buff *);
  144. int (*dump_stats)(struct Qdisc *, struct gnet_dump *);
  145. struct module *owner;
  146. };
  147. struct tcf_result {
  148. unsigned long class;
  149. u32 classid;
  150. };
  151. struct tcf_proto_ops {
  152. struct tcf_proto_ops *next;
  153. char kind[IFNAMSIZ];
  154. int (*classify)(struct sk_buff*, struct tcf_proto*,
  155. struct tcf_result *);
  156. int (*init)(struct tcf_proto*);
  157. void (*destroy)(struct tcf_proto*);
  158. unsigned long (*get)(struct tcf_proto*, u32 handle);
  159. void (*put)(struct tcf_proto*, unsigned long);
  160. int (*change)(struct tcf_proto*, unsigned long,
  161. u32 handle, struct nlattr **,
  162. unsigned long *);
  163. int (*delete)(struct tcf_proto*, unsigned long);
  164. void (*walk)(struct tcf_proto*, struct tcf_walker *arg);
  165. /* rtnetlink specific */
  166. int (*dump)(struct tcf_proto*, unsigned long,
  167. struct sk_buff *skb, struct tcmsg*);
  168. struct module *owner;
  169. };
  170. struct tcf_proto {
  171. /* Fast access part */
  172. struct tcf_proto *next;
  173. void *root;
  174. int (*classify)(struct sk_buff*, struct tcf_proto*,
  175. struct tcf_result *);
  176. __be16 protocol;
  177. /* All the rest */
  178. u32 prio;
  179. u32 classid;
  180. struct Qdisc *q;
  181. void *data;
  182. struct tcf_proto_ops *ops;
  183. };
  184. struct qdisc_skb_cb {
  185. unsigned int pkt_len;
  186. char data[];
  187. };
  188. static inline int qdisc_qlen(struct Qdisc *q)
  189. {
  190. return q->q.qlen;
  191. }
  192. static inline struct qdisc_skb_cb *qdisc_skb_cb(const struct sk_buff *skb)
  193. {
  194. return (struct qdisc_skb_cb *)skb->cb;
  195. }
  196. static inline spinlock_t *qdisc_lock(struct Qdisc *qdisc)
  197. {
  198. return &qdisc->q.lock;
  199. }
  200. static inline struct Qdisc *qdisc_root(struct Qdisc *qdisc)
  201. {
  202. return qdisc->dev_queue->qdisc;
  203. }
  204. static inline struct Qdisc *qdisc_root_sleeping(struct Qdisc *qdisc)
  205. {
  206. return qdisc->dev_queue->qdisc_sleeping;
  207. }
  208. /* The qdisc root lock is a mechanism by which to top level
  209. * of a qdisc tree can be locked from any qdisc node in the
  210. * forest. This allows changing the configuration of some
  211. * aspect of the qdisc tree while blocking out asynchronous
  212. * qdisc access in the packet processing paths.
  213. *
  214. * It is only legal to do this when the root will not change
  215. * on us. Otherwise we'll potentially lock the wrong qdisc
  216. * root. This is enforced by holding the RTNL semaphore, which
  217. * all users of this lock accessor must do.
  218. */
  219. static inline spinlock_t *qdisc_root_lock(struct Qdisc *qdisc)
  220. {
  221. struct Qdisc *root = qdisc_root(qdisc);
  222. ASSERT_RTNL();
  223. return qdisc_lock(root);
  224. }
  225. static inline spinlock_t *qdisc_root_sleeping_lock(struct Qdisc *qdisc)
  226. {
  227. struct Qdisc *root = qdisc_root_sleeping(qdisc);
  228. ASSERT_RTNL();
  229. return qdisc_lock(root);
  230. }
  231. static inline struct net_device *qdisc_dev(struct Qdisc *qdisc)
  232. {
  233. return qdisc->dev_queue->dev;
  234. }
  235. static inline void sch_tree_lock(struct Qdisc *q)
  236. {
  237. spin_lock_bh(qdisc_root_sleeping_lock(q));
  238. }
  239. static inline void sch_tree_unlock(struct Qdisc *q)
  240. {
  241. spin_unlock_bh(qdisc_root_sleeping_lock(q));
  242. }
  243. #define tcf_tree_lock(tp) sch_tree_lock((tp)->q)
  244. #define tcf_tree_unlock(tp) sch_tree_unlock((tp)->q)
  245. extern struct Qdisc noop_qdisc;
  246. extern struct Qdisc_ops noop_qdisc_ops;
  247. extern struct Qdisc_ops pfifo_fast_ops;
  248. extern struct Qdisc_ops mq_qdisc_ops;
  249. struct Qdisc_class_common {
  250. u32 classid;
  251. struct hlist_node hnode;
  252. };
  253. struct Qdisc_class_hash {
  254. struct hlist_head *hash;
  255. unsigned int hashsize;
  256. unsigned int hashmask;
  257. unsigned int hashelems;
  258. };
  259. static inline unsigned int qdisc_class_hash(u32 id, u32 mask)
  260. {
  261. id ^= id >> 8;
  262. id ^= id >> 4;
  263. return id & mask;
  264. }
  265. static inline struct Qdisc_class_common *
  266. qdisc_class_find(struct Qdisc_class_hash *hash, u32 id)
  267. {
  268. struct Qdisc_class_common *cl;
  269. struct hlist_node *n;
  270. unsigned int h;
  271. h = qdisc_class_hash(id, hash->hashmask);
  272. hlist_for_each_entry(cl, n, &hash->hash[h], hnode) {
  273. if (cl->classid == id)
  274. return cl;
  275. }
  276. return NULL;
  277. }
  278. extern int qdisc_class_hash_init(struct Qdisc_class_hash *);
  279. extern void qdisc_class_hash_insert(struct Qdisc_class_hash *, struct Qdisc_class_common *);
  280. extern void qdisc_class_hash_remove(struct Qdisc_class_hash *, struct Qdisc_class_common *);
  281. extern void qdisc_class_hash_grow(struct Qdisc *, struct Qdisc_class_hash *);
  282. extern void qdisc_class_hash_destroy(struct Qdisc_class_hash *);
  283. extern void dev_init_scheduler(struct net_device *dev);
  284. extern void dev_shutdown(struct net_device *dev);
  285. extern void dev_activate(struct net_device *dev);
  286. extern void dev_deactivate(struct net_device *dev);
  287. extern void dev_deactivate_many(struct list_head *head);
  288. extern struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue,
  289. struct Qdisc *qdisc);
  290. extern void qdisc_reset(struct Qdisc *qdisc);
  291. extern void qdisc_destroy(struct Qdisc *qdisc);
  292. extern void qdisc_tree_decrease_qlen(struct Qdisc *qdisc, unsigned int n);
  293. extern struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
  294. struct Qdisc_ops *ops);
  295. extern struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue,
  296. struct Qdisc_ops *ops, u32 parentid);
  297. extern void __qdisc_calculate_pkt_len(struct sk_buff *skb,
  298. const struct qdisc_size_table *stab);
  299. extern void tcf_destroy(struct tcf_proto *tp);
  300. extern void tcf_destroy_chain(struct tcf_proto **fl);
  301. /* Reset all TX qdiscs greater then index of a device. */
  302. static inline void qdisc_reset_all_tx_gt(struct net_device *dev, unsigned int i)
  303. {
  304. struct Qdisc *qdisc;
  305. for (; i < dev->num_tx_queues; i++) {
  306. qdisc = netdev_get_tx_queue(dev, i)->qdisc;
  307. if (qdisc) {
  308. spin_lock_bh(qdisc_lock(qdisc));
  309. qdisc_reset(qdisc);
  310. spin_unlock_bh(qdisc_lock(qdisc));
  311. }
  312. }
  313. }
  314. static inline void qdisc_reset_all_tx(struct net_device *dev)
  315. {
  316. qdisc_reset_all_tx_gt(dev, 0);
  317. }
  318. /* Are all TX queues of the device empty? */
  319. static inline bool qdisc_all_tx_empty(const struct net_device *dev)
  320. {
  321. unsigned int i;
  322. for (i = 0; i < dev->num_tx_queues; i++) {
  323. struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
  324. const struct Qdisc *q = txq->qdisc;
  325. if (q->q.qlen)
  326. return false;
  327. }
  328. return true;
  329. }
  330. /* Are any of the TX qdiscs changing? */
  331. static inline bool qdisc_tx_changing(struct net_device *dev)
  332. {
  333. unsigned int i;
  334. for (i = 0; i < dev->num_tx_queues; i++) {
  335. struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
  336. if (txq->qdisc != txq->qdisc_sleeping)
  337. return true;
  338. }
  339. return false;
  340. }
  341. /* Is the device using the noop qdisc on all queues? */
  342. static inline bool qdisc_tx_is_noop(const struct net_device *dev)
  343. {
  344. unsigned int i;
  345. for (i = 0; i < dev->num_tx_queues; i++) {
  346. struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
  347. if (txq->qdisc != &noop_qdisc)
  348. return false;
  349. }
  350. return true;
  351. }
  352. static inline unsigned int qdisc_pkt_len(const struct sk_buff *skb)
  353. {
  354. return qdisc_skb_cb(skb)->pkt_len;
  355. }
  356. /* additional qdisc xmit flags (NET_XMIT_MASK in linux/netdevice.h) */
  357. enum net_xmit_qdisc_t {
  358. __NET_XMIT_STOLEN = 0x00010000,
  359. __NET_XMIT_BYPASS = 0x00020000,
  360. };
  361. #ifdef CONFIG_NET_CLS_ACT
  362. #define net_xmit_drop_count(e) ((e) & __NET_XMIT_STOLEN ? 0 : 1)
  363. #else
  364. #define net_xmit_drop_count(e) (1)
  365. #endif
  366. static inline void qdisc_calculate_pkt_len(struct sk_buff *skb,
  367. const struct Qdisc *sch)
  368. {
  369. #ifdef CONFIG_NET_SCHED
  370. struct qdisc_size_table *stab = rcu_dereference_bh(sch->stab);
  371. if (stab)
  372. __qdisc_calculate_pkt_len(skb, stab);
  373. #endif
  374. }
  375. static inline int qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
  376. {
  377. qdisc_calculate_pkt_len(skb, sch);
  378. return sch->enqueue(skb, sch);
  379. }
  380. static inline int qdisc_enqueue_root(struct sk_buff *skb, struct Qdisc *sch)
  381. {
  382. qdisc_skb_cb(skb)->pkt_len = skb->len;
  383. return qdisc_enqueue(skb, sch) & NET_XMIT_MASK;
  384. }
  385. static inline void bstats_update(struct gnet_stats_basic_packed *bstats,
  386. const struct sk_buff *skb)
  387. {
  388. bstats->bytes += qdisc_pkt_len(skb);
  389. bstats->packets += skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1;
  390. }
  391. static inline void qdisc_bstats_update(struct Qdisc *sch,
  392. const struct sk_buff *skb)
  393. {
  394. bstats_update(&sch->bstats, skb);
  395. }
  396. static inline int __qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch,
  397. struct sk_buff_head *list)
  398. {
  399. __skb_queue_tail(list, skb);
  400. sch->qstats.backlog += qdisc_pkt_len(skb);
  401. return NET_XMIT_SUCCESS;
  402. }
  403. static inline int qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch)
  404. {
  405. return __qdisc_enqueue_tail(skb, sch, &sch->q);
  406. }
  407. static inline struct sk_buff *__qdisc_dequeue_head(struct Qdisc *sch,
  408. struct sk_buff_head *list)
  409. {
  410. struct sk_buff *skb = __skb_dequeue(list);
  411. if (likely(skb != NULL)) {
  412. sch->qstats.backlog -= qdisc_pkt_len(skb);
  413. qdisc_bstats_update(sch, skb);
  414. }
  415. return skb;
  416. }
  417. static inline struct sk_buff *qdisc_dequeue_head(struct Qdisc *sch)
  418. {
  419. return __qdisc_dequeue_head(sch, &sch->q);
  420. }
  421. static inline unsigned int __qdisc_queue_drop_head(struct Qdisc *sch,
  422. struct sk_buff_head *list)
  423. {
  424. struct sk_buff *skb = __skb_dequeue(list);
  425. if (likely(skb != NULL)) {
  426. unsigned int len = qdisc_pkt_len(skb);
  427. sch->qstats.backlog -= len;
  428. kfree_skb(skb);
  429. return len;
  430. }
  431. return 0;
  432. }
  433. static inline unsigned int qdisc_queue_drop_head(struct Qdisc *sch)
  434. {
  435. return __qdisc_queue_drop_head(sch, &sch->q);
  436. }
  437. static inline struct sk_buff *__qdisc_dequeue_tail(struct Qdisc *sch,
  438. struct sk_buff_head *list)
  439. {
  440. struct sk_buff *skb = __skb_dequeue_tail(list);
  441. if (likely(skb != NULL))
  442. sch->qstats.backlog -= qdisc_pkt_len(skb);
  443. return skb;
  444. }
  445. static inline struct sk_buff *qdisc_dequeue_tail(struct Qdisc *sch)
  446. {
  447. return __qdisc_dequeue_tail(sch, &sch->q);
  448. }
  449. static inline struct sk_buff *qdisc_peek_head(struct Qdisc *sch)
  450. {
  451. return skb_peek(&sch->q);
  452. }
  453. /* generic pseudo peek method for non-work-conserving qdisc */
  454. static inline struct sk_buff *qdisc_peek_dequeued(struct Qdisc *sch)
  455. {
  456. /* we can reuse ->gso_skb because peek isn't called for root qdiscs */
  457. if (!sch->gso_skb) {
  458. sch->gso_skb = sch->dequeue(sch);
  459. if (sch->gso_skb)
  460. /* it's still part of the queue */
  461. sch->q.qlen++;
  462. }
  463. return sch->gso_skb;
  464. }
  465. /* use instead of qdisc->dequeue() for all qdiscs queried with ->peek() */
  466. static inline struct sk_buff *qdisc_dequeue_peeked(struct Qdisc *sch)
  467. {
  468. struct sk_buff *skb = sch->gso_skb;
  469. if (skb) {
  470. sch->gso_skb = NULL;
  471. sch->q.qlen--;
  472. } else {
  473. skb = sch->dequeue(sch);
  474. }
  475. return skb;
  476. }
  477. static inline void __qdisc_reset_queue(struct Qdisc *sch,
  478. struct sk_buff_head *list)
  479. {
  480. /*
  481. * We do not know the backlog in bytes of this list, it
  482. * is up to the caller to correct it
  483. */
  484. __skb_queue_purge(list);
  485. }
  486. static inline void qdisc_reset_queue(struct Qdisc *sch)
  487. {
  488. __qdisc_reset_queue(sch, &sch->q);
  489. sch->qstats.backlog = 0;
  490. }
  491. static inline unsigned int __qdisc_queue_drop(struct Qdisc *sch,
  492. struct sk_buff_head *list)
  493. {
  494. struct sk_buff *skb = __qdisc_dequeue_tail(sch, list);
  495. if (likely(skb != NULL)) {
  496. unsigned int len = qdisc_pkt_len(skb);
  497. kfree_skb(skb);
  498. return len;
  499. }
  500. return 0;
  501. }
  502. static inline unsigned int qdisc_queue_drop(struct Qdisc *sch)
  503. {
  504. return __qdisc_queue_drop(sch, &sch->q);
  505. }
  506. static inline int qdisc_drop(struct sk_buff *skb, struct Qdisc *sch)
  507. {
  508. kfree_skb(skb);
  509. sch->qstats.drops++;
  510. return NET_XMIT_DROP;
  511. }
  512. static inline int qdisc_reshape_fail(struct sk_buff *skb, struct Qdisc *sch)
  513. {
  514. sch->qstats.drops++;
  515. #ifdef CONFIG_NET_CLS_ACT
  516. if (sch->reshape_fail == NULL || sch->reshape_fail(skb, sch))
  517. goto drop;
  518. return NET_XMIT_SUCCESS;
  519. drop:
  520. #endif
  521. kfree_skb(skb);
  522. return NET_XMIT_DROP;
  523. }
  524. /* Length to Time (L2T) lookup in a qdisc_rate_table, to determine how
  525. long it will take to send a packet given its size.
  526. */
  527. static inline u32 qdisc_l2t(struct qdisc_rate_table* rtab, unsigned int pktlen)
  528. {
  529. int slot = pktlen + rtab->rate.cell_align + rtab->rate.overhead;
  530. if (slot < 0)
  531. slot = 0;
  532. slot >>= rtab->rate.cell_log;
  533. if (slot > 255)
  534. return rtab->data[255]*(slot >> 8) + rtab->data[slot & 0xFF];
  535. return rtab->data[slot];
  536. }
  537. #ifdef CONFIG_NET_CLS_ACT
  538. static inline struct sk_buff *skb_act_clone(struct sk_buff *skb, gfp_t gfp_mask,
  539. int action)
  540. {
  541. struct sk_buff *n;
  542. n = skb_clone(skb, gfp_mask);
  543. if (n) {
  544. n->tc_verd = SET_TC_VERD(n->tc_verd, 0);
  545. n->tc_verd = CLR_TC_OK2MUNGE(n->tc_verd);
  546. n->tc_verd = CLR_TC_MUNGED(n->tc_verd);
  547. }
  548. return n;
  549. }
  550. #endif
  551. #endif