sch_generic.h 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663
  1. #ifndef __NET_SCHED_GENERIC_H
  2. #define __NET_SCHED_GENERIC_H
  3. #include <linux/netdevice.h>
  4. #include <linux/types.h>
  5. #include <linux/rcupdate.h>
  6. #include <linux/module.h>
  7. #include <linux/pkt_sched.h>
  8. #include <linux/pkt_cls.h>
  9. #include <net/gen_stats.h>
  10. #include <net/rtnetlink.h>
  11. struct Qdisc_ops;
  12. struct qdisc_walker;
  13. struct tcf_walker;
  14. struct module;
  15. struct qdisc_rate_table {
  16. struct tc_ratespec rate;
  17. u32 data[256];
  18. struct qdisc_rate_table *next;
  19. int refcnt;
  20. };
  21. enum qdisc_state_t {
  22. __QDISC_STATE_SCHED,
  23. __QDISC_STATE_DEACTIVATED,
  24. };
  25. /*
  26. * following bits are only changed while qdisc lock is held
  27. */
  28. enum qdisc___state_t {
  29. __QDISC___STATE_RUNNING = 1,
  30. __QDISC___STATE_THROTTLED = 2,
  31. };
  32. struct qdisc_size_table {
  33. struct rcu_head rcu;
  34. struct list_head list;
  35. struct tc_sizespec szopts;
  36. int refcnt;
  37. u16 data[];
  38. };
  39. struct Qdisc {
  40. int (*enqueue)(struct sk_buff *skb, struct Qdisc *dev);
  41. struct sk_buff * (*dequeue)(struct Qdisc *dev);
  42. unsigned flags;
  43. #define TCQ_F_BUILTIN 1
  44. #define TCQ_F_INGRESS 2
  45. #define TCQ_F_CAN_BYPASS 4
  46. #define TCQ_F_MQROOT 8
  47. #define TCQ_F_WARN_NONWC (1 << 16)
  48. int padded;
  49. struct Qdisc_ops *ops;
  50. struct qdisc_size_table __rcu *stab;
  51. struct list_head list;
  52. u32 handle;
  53. u32 parent;
  54. atomic_t refcnt;
  55. struct gnet_stats_rate_est rate_est;
  56. int (*reshape_fail)(struct sk_buff *skb,
  57. struct Qdisc *q);
  58. void *u32_node;
  59. /* This field is deprecated, but it is still used by CBQ
  60. * and it will live until better solution will be invented.
  61. */
  62. struct Qdisc *__parent;
  63. struct netdev_queue *dev_queue;
  64. struct Qdisc *next_sched;
  65. struct sk_buff *gso_skb;
  66. /*
  67. * For performance sake on SMP, we put highly modified fields at the end
  68. */
  69. unsigned long state;
  70. struct sk_buff_head q;
  71. struct gnet_stats_basic_packed bstats;
  72. unsigned int __state;
  73. struct gnet_stats_queue qstats;
  74. struct rcu_head rcu_head;
  75. spinlock_t busylock;
  76. u32 limit;
  77. };
  78. static inline bool qdisc_is_running(const struct Qdisc *qdisc)
  79. {
  80. return (qdisc->__state & __QDISC___STATE_RUNNING) ? true : false;
  81. }
  82. static inline bool qdisc_run_begin(struct Qdisc *qdisc)
  83. {
  84. if (qdisc_is_running(qdisc))
  85. return false;
  86. qdisc->__state |= __QDISC___STATE_RUNNING;
  87. return true;
  88. }
  89. static inline void qdisc_run_end(struct Qdisc *qdisc)
  90. {
  91. qdisc->__state &= ~__QDISC___STATE_RUNNING;
  92. }
  93. static inline bool qdisc_is_throttled(const struct Qdisc *qdisc)
  94. {
  95. return (qdisc->__state & __QDISC___STATE_THROTTLED) ? true : false;
  96. }
  97. static inline void qdisc_throttled(struct Qdisc *qdisc)
  98. {
  99. qdisc->__state |= __QDISC___STATE_THROTTLED;
  100. }
  101. static inline void qdisc_unthrottled(struct Qdisc *qdisc)
  102. {
  103. qdisc->__state &= ~__QDISC___STATE_THROTTLED;
  104. }
  105. struct Qdisc_class_ops {
  106. /* Child qdisc manipulation */
  107. struct netdev_queue * (*select_queue)(struct Qdisc *, struct tcmsg *);
  108. int (*graft)(struct Qdisc *, unsigned long cl,
  109. struct Qdisc *, struct Qdisc **);
  110. struct Qdisc * (*leaf)(struct Qdisc *, unsigned long cl);
  111. void (*qlen_notify)(struct Qdisc *, unsigned long);
  112. /* Class manipulation routines */
  113. unsigned long (*get)(struct Qdisc *, u32 classid);
  114. void (*put)(struct Qdisc *, unsigned long);
  115. int (*change)(struct Qdisc *, u32, u32,
  116. struct nlattr **, unsigned long *);
  117. int (*delete)(struct Qdisc *, unsigned long);
  118. void (*walk)(struct Qdisc *, struct qdisc_walker * arg);
  119. /* Filter manipulation */
  120. struct tcf_proto ** (*tcf_chain)(struct Qdisc *, unsigned long);
  121. unsigned long (*bind_tcf)(struct Qdisc *, unsigned long,
  122. u32 classid);
  123. void (*unbind_tcf)(struct Qdisc *, unsigned long);
  124. /* rtnetlink specific */
  125. int (*dump)(struct Qdisc *, unsigned long,
  126. struct sk_buff *skb, struct tcmsg*);
  127. int (*dump_stats)(struct Qdisc *, unsigned long,
  128. struct gnet_dump *);
  129. };
  130. struct Qdisc_ops {
  131. struct Qdisc_ops *next;
  132. const struct Qdisc_class_ops *cl_ops;
  133. char id[IFNAMSIZ];
  134. int priv_size;
  135. int (*enqueue)(struct sk_buff *, struct Qdisc *);
  136. struct sk_buff * (*dequeue)(struct Qdisc *);
  137. struct sk_buff * (*peek)(struct Qdisc *);
  138. unsigned int (*drop)(struct Qdisc *);
  139. int (*init)(struct Qdisc *, struct nlattr *arg);
  140. void (*reset)(struct Qdisc *);
  141. void (*destroy)(struct Qdisc *);
  142. int (*change)(struct Qdisc *, struct nlattr *arg);
  143. void (*attach)(struct Qdisc *);
  144. int (*dump)(struct Qdisc *, struct sk_buff *);
  145. int (*dump_stats)(struct Qdisc *, struct gnet_dump *);
  146. struct module *owner;
  147. };
  148. struct tcf_result {
  149. unsigned long class;
  150. u32 classid;
  151. };
  152. struct tcf_proto_ops {
  153. struct tcf_proto_ops *next;
  154. char kind[IFNAMSIZ];
  155. int (*classify)(struct sk_buff*, struct tcf_proto*,
  156. struct tcf_result *);
  157. int (*init)(struct tcf_proto*);
  158. void (*destroy)(struct tcf_proto*);
  159. unsigned long (*get)(struct tcf_proto*, u32 handle);
  160. void (*put)(struct tcf_proto*, unsigned long);
  161. int (*change)(struct tcf_proto*, unsigned long,
  162. u32 handle, struct nlattr **,
  163. unsigned long *);
  164. int (*delete)(struct tcf_proto*, unsigned long);
  165. void (*walk)(struct tcf_proto*, struct tcf_walker *arg);
  166. /* rtnetlink specific */
  167. int (*dump)(struct tcf_proto*, unsigned long,
  168. struct sk_buff *skb, struct tcmsg*);
  169. struct module *owner;
  170. };
  171. struct tcf_proto {
  172. /* Fast access part */
  173. struct tcf_proto *next;
  174. void *root;
  175. int (*classify)(struct sk_buff*, struct tcf_proto*,
  176. struct tcf_result *);
  177. __be16 protocol;
  178. /* All the rest */
  179. u32 prio;
  180. u32 classid;
  181. struct Qdisc *q;
  182. void *data;
  183. struct tcf_proto_ops *ops;
  184. };
  185. struct qdisc_skb_cb {
  186. unsigned int pkt_len;
  187. long data[];
  188. };
  189. static inline int qdisc_qlen(struct Qdisc *q)
  190. {
  191. return q->q.qlen;
  192. }
  193. static inline struct qdisc_skb_cb *qdisc_skb_cb(const struct sk_buff *skb)
  194. {
  195. return (struct qdisc_skb_cb *)skb->cb;
  196. }
  197. static inline spinlock_t *qdisc_lock(struct Qdisc *qdisc)
  198. {
  199. return &qdisc->q.lock;
  200. }
  201. static inline struct Qdisc *qdisc_root(struct Qdisc *qdisc)
  202. {
  203. return qdisc->dev_queue->qdisc;
  204. }
  205. static inline struct Qdisc *qdisc_root_sleeping(struct Qdisc *qdisc)
  206. {
  207. return qdisc->dev_queue->qdisc_sleeping;
  208. }
  209. /* The qdisc root lock is a mechanism by which to top level
  210. * of a qdisc tree can be locked from any qdisc node in the
  211. * forest. This allows changing the configuration of some
  212. * aspect of the qdisc tree while blocking out asynchronous
  213. * qdisc access in the packet processing paths.
  214. *
  215. * It is only legal to do this when the root will not change
  216. * on us. Otherwise we'll potentially lock the wrong qdisc
  217. * root. This is enforced by holding the RTNL semaphore, which
  218. * all users of this lock accessor must do.
  219. */
  220. static inline spinlock_t *qdisc_root_lock(struct Qdisc *qdisc)
  221. {
  222. struct Qdisc *root = qdisc_root(qdisc);
  223. ASSERT_RTNL();
  224. return qdisc_lock(root);
  225. }
  226. static inline spinlock_t *qdisc_root_sleeping_lock(struct Qdisc *qdisc)
  227. {
  228. struct Qdisc *root = qdisc_root_sleeping(qdisc);
  229. ASSERT_RTNL();
  230. return qdisc_lock(root);
  231. }
  232. static inline struct net_device *qdisc_dev(struct Qdisc *qdisc)
  233. {
  234. return qdisc->dev_queue->dev;
  235. }
  236. static inline void sch_tree_lock(struct Qdisc *q)
  237. {
  238. spin_lock_bh(qdisc_root_sleeping_lock(q));
  239. }
  240. static inline void sch_tree_unlock(struct Qdisc *q)
  241. {
  242. spin_unlock_bh(qdisc_root_sleeping_lock(q));
  243. }
  244. #define tcf_tree_lock(tp) sch_tree_lock((tp)->q)
  245. #define tcf_tree_unlock(tp) sch_tree_unlock((tp)->q)
  246. extern struct Qdisc noop_qdisc;
  247. extern struct Qdisc_ops noop_qdisc_ops;
  248. extern struct Qdisc_ops pfifo_fast_ops;
  249. extern struct Qdisc_ops mq_qdisc_ops;
  250. struct Qdisc_class_common {
  251. u32 classid;
  252. struct hlist_node hnode;
  253. };
  254. struct Qdisc_class_hash {
  255. struct hlist_head *hash;
  256. unsigned int hashsize;
  257. unsigned int hashmask;
  258. unsigned int hashelems;
  259. };
  260. static inline unsigned int qdisc_class_hash(u32 id, u32 mask)
  261. {
  262. id ^= id >> 8;
  263. id ^= id >> 4;
  264. return id & mask;
  265. }
  266. static inline struct Qdisc_class_common *
  267. qdisc_class_find(struct Qdisc_class_hash *hash, u32 id)
  268. {
  269. struct Qdisc_class_common *cl;
  270. struct hlist_node *n;
  271. unsigned int h;
  272. h = qdisc_class_hash(id, hash->hashmask);
  273. hlist_for_each_entry(cl, n, &hash->hash[h], hnode) {
  274. if (cl->classid == id)
  275. return cl;
  276. }
  277. return NULL;
  278. }
  279. extern int qdisc_class_hash_init(struct Qdisc_class_hash *);
  280. extern void qdisc_class_hash_insert(struct Qdisc_class_hash *, struct Qdisc_class_common *);
  281. extern void qdisc_class_hash_remove(struct Qdisc_class_hash *, struct Qdisc_class_common *);
  282. extern void qdisc_class_hash_grow(struct Qdisc *, struct Qdisc_class_hash *);
  283. extern void qdisc_class_hash_destroy(struct Qdisc_class_hash *);
  284. extern void dev_init_scheduler(struct net_device *dev);
  285. extern void dev_shutdown(struct net_device *dev);
  286. extern void dev_activate(struct net_device *dev);
  287. extern void dev_deactivate(struct net_device *dev);
  288. extern void dev_deactivate_many(struct list_head *head);
  289. extern struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue,
  290. struct Qdisc *qdisc);
  291. extern void qdisc_reset(struct Qdisc *qdisc);
  292. extern void qdisc_destroy(struct Qdisc *qdisc);
  293. extern void qdisc_tree_decrease_qlen(struct Qdisc *qdisc, unsigned int n);
  294. extern struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
  295. struct Qdisc_ops *ops);
  296. extern struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue,
  297. struct Qdisc_ops *ops, u32 parentid);
  298. extern void __qdisc_calculate_pkt_len(struct sk_buff *skb,
  299. const struct qdisc_size_table *stab);
  300. extern void tcf_destroy(struct tcf_proto *tp);
  301. extern void tcf_destroy_chain(struct tcf_proto **fl);
  302. /* Reset all TX qdiscs greater then index of a device. */
  303. static inline void qdisc_reset_all_tx_gt(struct net_device *dev, unsigned int i)
  304. {
  305. struct Qdisc *qdisc;
  306. for (; i < dev->num_tx_queues; i++) {
  307. qdisc = netdev_get_tx_queue(dev, i)->qdisc;
  308. if (qdisc) {
  309. spin_lock_bh(qdisc_lock(qdisc));
  310. qdisc_reset(qdisc);
  311. spin_unlock_bh(qdisc_lock(qdisc));
  312. }
  313. }
  314. }
  315. static inline void qdisc_reset_all_tx(struct net_device *dev)
  316. {
  317. qdisc_reset_all_tx_gt(dev, 0);
  318. }
  319. /* Are all TX queues of the device empty? */
  320. static inline bool qdisc_all_tx_empty(const struct net_device *dev)
  321. {
  322. unsigned int i;
  323. for (i = 0; i < dev->num_tx_queues; i++) {
  324. struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
  325. const struct Qdisc *q = txq->qdisc;
  326. if (q->q.qlen)
  327. return false;
  328. }
  329. return true;
  330. }
  331. /* Are any of the TX qdiscs changing? */
  332. static inline bool qdisc_tx_changing(struct net_device *dev)
  333. {
  334. unsigned int i;
  335. for (i = 0; i < dev->num_tx_queues; i++) {
  336. struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
  337. if (txq->qdisc != txq->qdisc_sleeping)
  338. return true;
  339. }
  340. return false;
  341. }
  342. /* Is the device using the noop qdisc on all queues? */
  343. static inline bool qdisc_tx_is_noop(const struct net_device *dev)
  344. {
  345. unsigned int i;
  346. for (i = 0; i < dev->num_tx_queues; i++) {
  347. struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
  348. if (txq->qdisc != &noop_qdisc)
  349. return false;
  350. }
  351. return true;
  352. }
  353. static inline unsigned int qdisc_pkt_len(const struct sk_buff *skb)
  354. {
  355. return qdisc_skb_cb(skb)->pkt_len;
  356. }
  357. /* additional qdisc xmit flags (NET_XMIT_MASK in linux/netdevice.h) */
  358. enum net_xmit_qdisc_t {
  359. __NET_XMIT_STOLEN = 0x00010000,
  360. __NET_XMIT_BYPASS = 0x00020000,
  361. };
  362. #ifdef CONFIG_NET_CLS_ACT
  363. #define net_xmit_drop_count(e) ((e) & __NET_XMIT_STOLEN ? 0 : 1)
  364. #else
  365. #define net_xmit_drop_count(e) (1)
  366. #endif
  367. static inline void qdisc_calculate_pkt_len(struct sk_buff *skb,
  368. const struct Qdisc *sch)
  369. {
  370. #ifdef CONFIG_NET_SCHED
  371. struct qdisc_size_table *stab = rcu_dereference_bh(sch->stab);
  372. if (stab)
  373. __qdisc_calculate_pkt_len(skb, stab);
  374. #endif
  375. }
  376. static inline int qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
  377. {
  378. qdisc_calculate_pkt_len(skb, sch);
  379. return sch->enqueue(skb, sch);
  380. }
  381. static inline int qdisc_enqueue_root(struct sk_buff *skb, struct Qdisc *sch)
  382. {
  383. qdisc_skb_cb(skb)->pkt_len = skb->len;
  384. return qdisc_enqueue(skb, sch) & NET_XMIT_MASK;
  385. }
  386. static inline void bstats_update(struct gnet_stats_basic_packed *bstats,
  387. const struct sk_buff *skb)
  388. {
  389. bstats->bytes += qdisc_pkt_len(skb);
  390. bstats->packets += skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1;
  391. }
  392. static inline void qdisc_bstats_update(struct Qdisc *sch,
  393. const struct sk_buff *skb)
  394. {
  395. bstats_update(&sch->bstats, skb);
  396. }
  397. static inline int __qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch,
  398. struct sk_buff_head *list)
  399. {
  400. __skb_queue_tail(list, skb);
  401. sch->qstats.backlog += qdisc_pkt_len(skb);
  402. return NET_XMIT_SUCCESS;
  403. }
  404. static inline int qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch)
  405. {
  406. return __qdisc_enqueue_tail(skb, sch, &sch->q);
  407. }
  408. static inline struct sk_buff *__qdisc_dequeue_head(struct Qdisc *sch,
  409. struct sk_buff_head *list)
  410. {
  411. struct sk_buff *skb = __skb_dequeue(list);
  412. if (likely(skb != NULL)) {
  413. sch->qstats.backlog -= qdisc_pkt_len(skb);
  414. qdisc_bstats_update(sch, skb);
  415. }
  416. return skb;
  417. }
  418. static inline struct sk_buff *qdisc_dequeue_head(struct Qdisc *sch)
  419. {
  420. return __qdisc_dequeue_head(sch, &sch->q);
  421. }
  422. static inline unsigned int __qdisc_queue_drop_head(struct Qdisc *sch,
  423. struct sk_buff_head *list)
  424. {
  425. struct sk_buff *skb = __skb_dequeue(list);
  426. if (likely(skb != NULL)) {
  427. unsigned int len = qdisc_pkt_len(skb);
  428. sch->qstats.backlog -= len;
  429. kfree_skb(skb);
  430. return len;
  431. }
  432. return 0;
  433. }
  434. static inline unsigned int qdisc_queue_drop_head(struct Qdisc *sch)
  435. {
  436. return __qdisc_queue_drop_head(sch, &sch->q);
  437. }
  438. static inline struct sk_buff *__qdisc_dequeue_tail(struct Qdisc *sch,
  439. struct sk_buff_head *list)
  440. {
  441. struct sk_buff *skb = __skb_dequeue_tail(list);
  442. if (likely(skb != NULL))
  443. sch->qstats.backlog -= qdisc_pkt_len(skb);
  444. return skb;
  445. }
  446. static inline struct sk_buff *qdisc_dequeue_tail(struct Qdisc *sch)
  447. {
  448. return __qdisc_dequeue_tail(sch, &sch->q);
  449. }
  450. static inline struct sk_buff *qdisc_peek_head(struct Qdisc *sch)
  451. {
  452. return skb_peek(&sch->q);
  453. }
  454. /* generic pseudo peek method for non-work-conserving qdisc */
  455. static inline struct sk_buff *qdisc_peek_dequeued(struct Qdisc *sch)
  456. {
  457. /* we can reuse ->gso_skb because peek isn't called for root qdiscs */
  458. if (!sch->gso_skb) {
  459. sch->gso_skb = sch->dequeue(sch);
  460. if (sch->gso_skb)
  461. /* it's still part of the queue */
  462. sch->q.qlen++;
  463. }
  464. return sch->gso_skb;
  465. }
  466. /* use instead of qdisc->dequeue() for all qdiscs queried with ->peek() */
  467. static inline struct sk_buff *qdisc_dequeue_peeked(struct Qdisc *sch)
  468. {
  469. struct sk_buff *skb = sch->gso_skb;
  470. if (skb) {
  471. sch->gso_skb = NULL;
  472. sch->q.qlen--;
  473. } else {
  474. skb = sch->dequeue(sch);
  475. }
  476. return skb;
  477. }
  478. static inline void __qdisc_reset_queue(struct Qdisc *sch,
  479. struct sk_buff_head *list)
  480. {
  481. /*
  482. * We do not know the backlog in bytes of this list, it
  483. * is up to the caller to correct it
  484. */
  485. __skb_queue_purge(list);
  486. }
  487. static inline void qdisc_reset_queue(struct Qdisc *sch)
  488. {
  489. __qdisc_reset_queue(sch, &sch->q);
  490. sch->qstats.backlog = 0;
  491. }
  492. static inline unsigned int __qdisc_queue_drop(struct Qdisc *sch,
  493. struct sk_buff_head *list)
  494. {
  495. struct sk_buff *skb = __qdisc_dequeue_tail(sch, list);
  496. if (likely(skb != NULL)) {
  497. unsigned int len = qdisc_pkt_len(skb);
  498. kfree_skb(skb);
  499. return len;
  500. }
  501. return 0;
  502. }
  503. static inline unsigned int qdisc_queue_drop(struct Qdisc *sch)
  504. {
  505. return __qdisc_queue_drop(sch, &sch->q);
  506. }
  507. static inline int qdisc_drop(struct sk_buff *skb, struct Qdisc *sch)
  508. {
  509. kfree_skb(skb);
  510. sch->qstats.drops++;
  511. return NET_XMIT_DROP;
  512. }
  513. static inline int qdisc_reshape_fail(struct sk_buff *skb, struct Qdisc *sch)
  514. {
  515. sch->qstats.drops++;
  516. #ifdef CONFIG_NET_CLS_ACT
  517. if (sch->reshape_fail == NULL || sch->reshape_fail(skb, sch))
  518. goto drop;
  519. return NET_XMIT_SUCCESS;
  520. drop:
  521. #endif
  522. kfree_skb(skb);
  523. return NET_XMIT_DROP;
  524. }
  525. /* Length to Time (L2T) lookup in a qdisc_rate_table, to determine how
  526. long it will take to send a packet given its size.
  527. */
  528. static inline u32 qdisc_l2t(struct qdisc_rate_table* rtab, unsigned int pktlen)
  529. {
  530. int slot = pktlen + rtab->rate.cell_align + rtab->rate.overhead;
  531. if (slot < 0)
  532. slot = 0;
  533. slot >>= rtab->rate.cell_log;
  534. if (slot > 255)
  535. return rtab->data[255]*(slot >> 8) + rtab->data[slot & 0xFF];
  536. return rtab->data[slot];
  537. }
  538. #ifdef CONFIG_NET_CLS_ACT
  539. static inline struct sk_buff *skb_act_clone(struct sk_buff *skb, gfp_t gfp_mask,
  540. int action)
  541. {
  542. struct sk_buff *n;
  543. n = skb_clone(skb, gfp_mask);
  544. if (n) {
  545. n->tc_verd = SET_TC_VERD(n->tc_verd, 0);
  546. n->tc_verd = CLR_TC_OK2MUNGE(n->tc_verd);
  547. n->tc_verd = CLR_TC_MUNGED(n->tc_verd);
  548. }
  549. return n;
  550. }
  551. #endif
  552. #endif