sch_dsmark.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532
  1. /* net/sched/sch_dsmark.c - Differentiated Services field marker */
  2. /* Written 1998-2000 by Werner Almesberger, EPFL ICA */
  3. #include <linux/module.h>
  4. #include <linux/init.h>
  5. #include <linux/types.h>
  6. #include <linux/string.h>
  7. #include <linux/errno.h>
  8. #include <linux/skbuff.h>
  9. #include <linux/rtnetlink.h>
  10. #include <linux/bitops.h>
  11. #include <net/pkt_sched.h>
  12. #include <net/dsfield.h>
  13. #include <net/inet_ecn.h>
  14. #include <asm/byteorder.h>
  15. /*
  16. * classid class marking
  17. * ------- ----- -------
  18. * n/a 0 n/a
  19. * x:0 1 use entry [0]
  20. * ... ... ...
  21. * x:y y>0 y+1 use entry [y]
  22. * ... ... ...
  23. * x:indices-1 indices use entry [indices-1]
  24. * ... ... ...
  25. * x:y y+1 use entry [y & (indices-1)]
  26. * ... ... ...
  27. * 0xffff 0x10000 use entry [indices-1]
  28. */
  29. #define NO_DEFAULT_INDEX (1 << 16)
  30. struct dsmark_qdisc_data {
  31. struct Qdisc *q;
  32. struct tcf_proto *filter_list;
  33. u8 *mask; /* "owns" the array */
  34. u8 *value;
  35. u16 indices;
  36. u32 default_index; /* index range is 0...0xffff */
  37. int set_tc_index;
  38. };
  39. static inline int dsmark_valid_index(struct dsmark_qdisc_data *p, u16 index)
  40. {
  41. return (index <= p->indices && index > 0);
  42. }
  43. /* ------------------------- Class/flow operations ------------------------- */
  44. static int dsmark_graft(struct Qdisc *sch, unsigned long arg,
  45. struct Qdisc *new, struct Qdisc **old)
  46. {
  47. struct dsmark_qdisc_data *p = qdisc_priv(sch);
  48. pr_debug("dsmark_graft(sch %p,[qdisc %p],new %p,old %p)\n",
  49. sch, p, new, old);
  50. if (new == NULL) {
  51. new = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue,
  52. &pfifo_qdisc_ops,
  53. sch->handle);
  54. if (new == NULL)
  55. new = &noop_qdisc;
  56. }
  57. sch_tree_lock(sch);
  58. *old = xchg(&p->q, new);
  59. qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
  60. qdisc_reset(*old);
  61. sch_tree_unlock(sch);
  62. return 0;
  63. }
  64. static struct Qdisc *dsmark_leaf(struct Qdisc *sch, unsigned long arg)
  65. {
  66. struct dsmark_qdisc_data *p = qdisc_priv(sch);
  67. return p->q;
  68. }
  69. static unsigned long dsmark_get(struct Qdisc *sch, u32 classid)
  70. {
  71. pr_debug("dsmark_get(sch %p,[qdisc %p],classid %x)\n",
  72. sch, qdisc_priv(sch), classid);
  73. return TC_H_MIN(classid) + 1;
  74. }
  75. static unsigned long dsmark_bind_filter(struct Qdisc *sch,
  76. unsigned long parent, u32 classid)
  77. {
  78. return dsmark_get(sch, classid);
  79. }
  80. static void dsmark_put(struct Qdisc *sch, unsigned long cl)
  81. {
  82. }
  83. static const struct nla_policy dsmark_policy[TCA_DSMARK_MAX + 1] = {
  84. [TCA_DSMARK_INDICES] = { .type = NLA_U16 },
  85. [TCA_DSMARK_DEFAULT_INDEX] = { .type = NLA_U16 },
  86. [TCA_DSMARK_SET_TC_INDEX] = { .type = NLA_FLAG },
  87. [TCA_DSMARK_MASK] = { .type = NLA_U8 },
  88. [TCA_DSMARK_VALUE] = { .type = NLA_U8 },
  89. };
  90. static int dsmark_change(struct Qdisc *sch, u32 classid, u32 parent,
  91. struct nlattr **tca, unsigned long *arg)
  92. {
  93. struct dsmark_qdisc_data *p = qdisc_priv(sch);
  94. struct nlattr *opt = tca[TCA_OPTIONS];
  95. struct nlattr *tb[TCA_DSMARK_MAX + 1];
  96. int err = -EINVAL;
  97. u8 mask = 0;
  98. pr_debug("dsmark_change(sch %p,[qdisc %p],classid %x,parent %x),"
  99. "arg 0x%lx\n", sch, p, classid, parent, *arg);
  100. if (!dsmark_valid_index(p, *arg)) {
  101. err = -ENOENT;
  102. goto errout;
  103. }
  104. if (!opt)
  105. goto errout;
  106. err = nla_parse_nested(tb, TCA_DSMARK_MAX, opt, dsmark_policy);
  107. if (err < 0)
  108. goto errout;
  109. if (tb[TCA_DSMARK_MASK])
  110. mask = nla_get_u8(tb[TCA_DSMARK_MASK]);
  111. if (tb[TCA_DSMARK_VALUE])
  112. p->value[*arg-1] = nla_get_u8(tb[TCA_DSMARK_VALUE]);
  113. if (tb[TCA_DSMARK_MASK])
  114. p->mask[*arg-1] = mask;
  115. err = 0;
  116. errout:
  117. return err;
  118. }
  119. static int dsmark_delete(struct Qdisc *sch, unsigned long arg)
  120. {
  121. struct dsmark_qdisc_data *p = qdisc_priv(sch);
  122. if (!dsmark_valid_index(p, arg))
  123. return -EINVAL;
  124. p->mask[arg-1] = 0xff;
  125. p->value[arg-1] = 0;
  126. return 0;
  127. }
  128. static void dsmark_walk(struct Qdisc *sch, struct qdisc_walker *walker)
  129. {
  130. struct dsmark_qdisc_data *p = qdisc_priv(sch);
  131. int i;
  132. pr_debug("dsmark_walk(sch %p,[qdisc %p],walker %p)\n", sch, p, walker);
  133. if (walker->stop)
  134. return;
  135. for (i = 0; i < p->indices; i++) {
  136. if (p->mask[i] == 0xff && !p->value[i])
  137. goto ignore;
  138. if (walker->count >= walker->skip) {
  139. if (walker->fn(sch, i+1, walker) < 0) {
  140. walker->stop = 1;
  141. break;
  142. }
  143. }
  144. ignore:
  145. walker->count++;
  146. }
  147. }
  148. static inline struct tcf_proto **dsmark_find_tcf(struct Qdisc *sch,
  149. unsigned long cl)
  150. {
  151. struct dsmark_qdisc_data *p = qdisc_priv(sch);
  152. return &p->filter_list;
  153. }
  154. /* --------------------------- Qdisc operations ---------------------------- */
  155. static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch)
  156. {
  157. struct dsmark_qdisc_data *p = qdisc_priv(sch);
  158. int err;
  159. pr_debug("dsmark_enqueue(skb %p,sch %p,[qdisc %p])\n", skb, sch, p);
  160. if (p->set_tc_index) {
  161. switch (skb->protocol) {
  162. case htons(ETH_P_IP):
  163. if (skb_cow_head(skb, sizeof(struct iphdr)))
  164. goto drop;
  165. skb->tc_index = ipv4_get_dsfield(ip_hdr(skb))
  166. & ~INET_ECN_MASK;
  167. break;
  168. case htons(ETH_P_IPV6):
  169. if (skb_cow_head(skb, sizeof(struct ipv6hdr)))
  170. goto drop;
  171. skb->tc_index = ipv6_get_dsfield(ipv6_hdr(skb))
  172. & ~INET_ECN_MASK;
  173. break;
  174. default:
  175. skb->tc_index = 0;
  176. break;
  177. }
  178. }
  179. if (TC_H_MAJ(skb->priority) == sch->handle)
  180. skb->tc_index = TC_H_MIN(skb->priority);
  181. else {
  182. struct tcf_result res;
  183. int result = tc_classify(skb, p->filter_list, &res);
  184. pr_debug("result %d class 0x%04x\n", result, res.classid);
  185. switch (result) {
  186. #ifdef CONFIG_NET_CLS_ACT
  187. case TC_ACT_QUEUED:
  188. case TC_ACT_STOLEN:
  189. kfree_skb(skb);
  190. return NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
  191. case TC_ACT_SHOT:
  192. goto drop;
  193. #endif
  194. case TC_ACT_OK:
  195. skb->tc_index = TC_H_MIN(res.classid);
  196. break;
  197. default:
  198. if (p->default_index != NO_DEFAULT_INDEX)
  199. skb->tc_index = p->default_index;
  200. break;
  201. }
  202. }
  203. err = qdisc_enqueue(skb, p->q);
  204. if (err != NET_XMIT_SUCCESS) {
  205. if (net_xmit_drop_count(err))
  206. sch->qstats.drops++;
  207. return err;
  208. }
  209. sch->bstats.bytes += qdisc_pkt_len(skb);
  210. sch->bstats.packets++;
  211. sch->q.qlen++;
  212. return NET_XMIT_SUCCESS;
  213. drop:
  214. kfree_skb(skb);
  215. sch->qstats.drops++;
  216. return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
  217. }
  218. static struct sk_buff *dsmark_dequeue(struct Qdisc *sch)
  219. {
  220. struct dsmark_qdisc_data *p = qdisc_priv(sch);
  221. struct sk_buff *skb;
  222. u32 index;
  223. pr_debug("dsmark_dequeue(sch %p,[qdisc %p])\n", sch, p);
  224. skb = p->q->ops->dequeue(p->q);
  225. if (skb == NULL)
  226. return NULL;
  227. sch->q.qlen--;
  228. index = skb->tc_index & (p->indices - 1);
  229. pr_debug("index %d->%d\n", skb->tc_index, index);
  230. switch (skb->protocol) {
  231. case htons(ETH_P_IP):
  232. ipv4_change_dsfield(ip_hdr(skb), p->mask[index],
  233. p->value[index]);
  234. break;
  235. case htons(ETH_P_IPV6):
  236. ipv6_change_dsfield(ipv6_hdr(skb), p->mask[index],
  237. p->value[index]);
  238. break;
  239. default:
  240. /*
  241. * Only complain if a change was actually attempted.
  242. * This way, we can send non-IP traffic through dsmark
  243. * and don't need yet another qdisc as a bypass.
  244. */
  245. if (p->mask[index] != 0xff || p->value[index])
  246. printk(KERN_WARNING
  247. "dsmark_dequeue: unsupported protocol %d\n",
  248. ntohs(skb->protocol));
  249. break;
  250. }
  251. return skb;
  252. }
  253. static struct sk_buff *dsmark_peek(struct Qdisc *sch)
  254. {
  255. struct dsmark_qdisc_data *p = qdisc_priv(sch);
  256. pr_debug("dsmark_peek(sch %p,[qdisc %p])\n", sch, p);
  257. return p->q->ops->peek(p->q);
  258. }
  259. static int dsmark_requeue(struct sk_buff *skb, struct Qdisc *sch)
  260. {
  261. struct dsmark_qdisc_data *p = qdisc_priv(sch);
  262. int err;
  263. pr_debug("dsmark_requeue(skb %p,sch %p,[qdisc %p])\n", skb, sch, p);
  264. err = p->q->ops->requeue(skb, p->q);
  265. if (err != NET_XMIT_SUCCESS) {
  266. if (net_xmit_drop_count(err))
  267. sch->qstats.drops++;
  268. return err;
  269. }
  270. sch->q.qlen++;
  271. sch->qstats.requeues++;
  272. return NET_XMIT_SUCCESS;
  273. }
  274. static unsigned int dsmark_drop(struct Qdisc *sch)
  275. {
  276. struct dsmark_qdisc_data *p = qdisc_priv(sch);
  277. unsigned int len;
  278. pr_debug("dsmark_reset(sch %p,[qdisc %p])\n", sch, p);
  279. if (p->q->ops->drop == NULL)
  280. return 0;
  281. len = p->q->ops->drop(p->q);
  282. if (len)
  283. sch->q.qlen--;
  284. return len;
  285. }
  286. static int dsmark_init(struct Qdisc *sch, struct nlattr *opt)
  287. {
  288. struct dsmark_qdisc_data *p = qdisc_priv(sch);
  289. struct nlattr *tb[TCA_DSMARK_MAX + 1];
  290. int err = -EINVAL;
  291. u32 default_index = NO_DEFAULT_INDEX;
  292. u16 indices;
  293. u8 *mask;
  294. pr_debug("dsmark_init(sch %p,[qdisc %p],opt %p)\n", sch, p, opt);
  295. if (!opt)
  296. goto errout;
  297. err = nla_parse_nested(tb, TCA_DSMARK_MAX, opt, dsmark_policy);
  298. if (err < 0)
  299. goto errout;
  300. err = -EINVAL;
  301. indices = nla_get_u16(tb[TCA_DSMARK_INDICES]);
  302. if (hweight32(indices) != 1)
  303. goto errout;
  304. if (tb[TCA_DSMARK_DEFAULT_INDEX])
  305. default_index = nla_get_u16(tb[TCA_DSMARK_DEFAULT_INDEX]);
  306. mask = kmalloc(indices * 2, GFP_KERNEL);
  307. if (mask == NULL) {
  308. err = -ENOMEM;
  309. goto errout;
  310. }
  311. p->mask = mask;
  312. memset(p->mask, 0xff, indices);
  313. p->value = p->mask + indices;
  314. memset(p->value, 0, indices);
  315. p->indices = indices;
  316. p->default_index = default_index;
  317. p->set_tc_index = nla_get_flag(tb[TCA_DSMARK_SET_TC_INDEX]);
  318. p->q = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue,
  319. &pfifo_qdisc_ops, sch->handle);
  320. if (p->q == NULL)
  321. p->q = &noop_qdisc;
  322. pr_debug("dsmark_init: qdisc %p\n", p->q);
  323. err = 0;
  324. errout:
  325. return err;
  326. }
  327. static void dsmark_reset(struct Qdisc *sch)
  328. {
  329. struct dsmark_qdisc_data *p = qdisc_priv(sch);
  330. pr_debug("dsmark_reset(sch %p,[qdisc %p])\n", sch, p);
  331. qdisc_reset(p->q);
  332. sch->q.qlen = 0;
  333. }
  334. static void dsmark_destroy(struct Qdisc *sch)
  335. {
  336. struct dsmark_qdisc_data *p = qdisc_priv(sch);
  337. pr_debug("dsmark_destroy(sch %p,[qdisc %p])\n", sch, p);
  338. tcf_destroy_chain(&p->filter_list);
  339. qdisc_destroy(p->q);
  340. kfree(p->mask);
  341. }
  342. static int dsmark_dump_class(struct Qdisc *sch, unsigned long cl,
  343. struct sk_buff *skb, struct tcmsg *tcm)
  344. {
  345. struct dsmark_qdisc_data *p = qdisc_priv(sch);
  346. struct nlattr *opts = NULL;
  347. pr_debug("dsmark_dump_class(sch %p,[qdisc %p],class %ld\n", sch, p, cl);
  348. if (!dsmark_valid_index(p, cl))
  349. return -EINVAL;
  350. tcm->tcm_handle = TC_H_MAKE(TC_H_MAJ(sch->handle), cl-1);
  351. tcm->tcm_info = p->q->handle;
  352. opts = nla_nest_start(skb, TCA_OPTIONS);
  353. if (opts == NULL)
  354. goto nla_put_failure;
  355. NLA_PUT_U8(skb, TCA_DSMARK_MASK, p->mask[cl-1]);
  356. NLA_PUT_U8(skb, TCA_DSMARK_VALUE, p->value[cl-1]);
  357. return nla_nest_end(skb, opts);
  358. nla_put_failure:
  359. nla_nest_cancel(skb, opts);
  360. return -EMSGSIZE;
  361. }
  362. static int dsmark_dump(struct Qdisc *sch, struct sk_buff *skb)
  363. {
  364. struct dsmark_qdisc_data *p = qdisc_priv(sch);
  365. struct nlattr *opts = NULL;
  366. opts = nla_nest_start(skb, TCA_OPTIONS);
  367. if (opts == NULL)
  368. goto nla_put_failure;
  369. NLA_PUT_U16(skb, TCA_DSMARK_INDICES, p->indices);
  370. if (p->default_index != NO_DEFAULT_INDEX)
  371. NLA_PUT_U16(skb, TCA_DSMARK_DEFAULT_INDEX, p->default_index);
  372. if (p->set_tc_index)
  373. NLA_PUT_FLAG(skb, TCA_DSMARK_SET_TC_INDEX);
  374. return nla_nest_end(skb, opts);
  375. nla_put_failure:
  376. nla_nest_cancel(skb, opts);
  377. return -EMSGSIZE;
  378. }
  379. static const struct Qdisc_class_ops dsmark_class_ops = {
  380. .graft = dsmark_graft,
  381. .leaf = dsmark_leaf,
  382. .get = dsmark_get,
  383. .put = dsmark_put,
  384. .change = dsmark_change,
  385. .delete = dsmark_delete,
  386. .walk = dsmark_walk,
  387. .tcf_chain = dsmark_find_tcf,
  388. .bind_tcf = dsmark_bind_filter,
  389. .unbind_tcf = dsmark_put,
  390. .dump = dsmark_dump_class,
  391. };
  392. static struct Qdisc_ops dsmark_qdisc_ops __read_mostly = {
  393. .next = NULL,
  394. .cl_ops = &dsmark_class_ops,
  395. .id = "dsmark",
  396. .priv_size = sizeof(struct dsmark_qdisc_data),
  397. .enqueue = dsmark_enqueue,
  398. .dequeue = dsmark_dequeue,
  399. .peek = dsmark_peek,
  400. .requeue = dsmark_requeue,
  401. .drop = dsmark_drop,
  402. .init = dsmark_init,
  403. .reset = dsmark_reset,
  404. .destroy = dsmark_destroy,
  405. .change = NULL,
  406. .dump = dsmark_dump,
  407. .owner = THIS_MODULE,
  408. };
  409. static int __init dsmark_module_init(void)
  410. {
  411. return register_qdisc(&dsmark_qdisc_ops);
  412. }
  413. static void __exit dsmark_module_exit(void)
  414. {
  415. unregister_qdisc(&dsmark_qdisc_ops);
  416. }
  417. module_init(dsmark_module_init)
  418. module_exit(dsmark_module_exit)
  419. MODULE_LICENSE("GPL");