ematch.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532
  1. /*
  2. * net/sched/ematch.c Extended Match API
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License
  6. * as published by the Free Software Foundation; either version
  7. * 2 of the License, or (at your option) any later version.
  8. *
  9. * Authors: Thomas Graf <tgraf@suug.ch>
  10. *
  11. * ==========================================================================
  12. *
  13. * An extended match (ematch) is a small classification tool not worth
  14. * writing a full classifier for. Ematches can be interconnected to form
  15. * a logic expression and get attached to classifiers to extend their
  16. * functionatlity.
  17. *
  18. * The userspace part transforms the logic expressions into an array
  19. * consisting of multiple sequences of interconnected ematches separated
  20. * by markers. Precedence is implemented by a special ematch kind
  21. * referencing a sequence beyond the marker of the current sequence
  22. * causing the current position in the sequence to be pushed onto a stack
  23. * to allow the current position to be overwritten by the position referenced
  24. * in the special ematch. Matching continues in the new sequence until a
  25. * marker is reached causing the position to be restored from the stack.
  26. *
  27. * Example:
  28. * A AND (B1 OR B2) AND C AND D
  29. *
  30. * ------->-PUSH-------
  31. * -->-- / -->-- \ -->--
  32. * / \ / / \ \ / \
  33. * +-------+-------+-------+-------+-------+--------+
  34. * | A AND | B AND | C AND | D END | B1 OR | B2 END |
  35. * +-------+-------+-------+-------+-------+--------+
  36. * \ /
  37. * --------<-POP---------
  38. *
  39. * where B is a virtual ematch referencing to sequence starting with B1.
  40. *
  41. * ==========================================================================
  42. *
  43. * How to write an ematch in 60 seconds
  44. * ------------------------------------
  45. *
  46. * 1) Provide a matcher function:
  47. * static int my_match(struct sk_buff *skb, struct tcf_ematch *m,
  48. * struct tcf_pkt_info *info)
  49. * {
  50. * struct mydata *d = (struct mydata *) m->data;
  51. *
  52. * if (...matching goes here...)
  53. * return 1;
  54. * else
  55. * return 0;
  56. * }
  57. *
  58. * 2) Fill out a struct tcf_ematch_ops:
  59. * static struct tcf_ematch_ops my_ops = {
  60. * .kind = unique id,
  61. * .datalen = sizeof(struct mydata),
  62. * .match = my_match,
  63. * .owner = THIS_MODULE,
  64. * };
  65. *
  66. * 3) Register/Unregister your ematch:
  67. * static int __init init_my_ematch(void)
  68. * {
  69. * return tcf_em_register(&my_ops);
  70. * }
  71. *
  72. * static void __exit exit_my_ematch(void)
  73. * {
  74. * tcf_em_unregister(&my_ops);
  75. * }
  76. *
  77. * module_init(init_my_ematch);
  78. * module_exit(exit_my_ematch);
  79. *
  80. * 4) By now you should have two more seconds left, barely enough to
  81. * open up a beer to watch the compilation going.
  82. */
  83. #include <linux/module.h>
  84. #include <linux/types.h>
  85. #include <linux/kernel.h>
  86. #include <linux/errno.h>
  87. #include <linux/rtnetlink.h>
  88. #include <linux/skbuff.h>
  89. #include <net/pkt_cls.h>
  90. static LIST_HEAD(ematch_ops);
  91. static DEFINE_RWLOCK(ematch_mod_lock);
  92. static inline struct tcf_ematch_ops * tcf_em_lookup(u16 kind)
  93. {
  94. struct tcf_ematch_ops *e = NULL;
  95. read_lock(&ematch_mod_lock);
  96. list_for_each_entry(e, &ematch_ops, link) {
  97. if (kind == e->kind) {
  98. if (!try_module_get(e->owner))
  99. e = NULL;
  100. read_unlock(&ematch_mod_lock);
  101. return e;
  102. }
  103. }
  104. read_unlock(&ematch_mod_lock);
  105. return NULL;
  106. }
  107. /**
  108. * tcf_em_register - register an extended match
  109. *
  110. * @ops: ematch operations lookup table
  111. *
  112. * This function must be called by ematches to announce their presence.
  113. * The given @ops must have kind set to a unique identifier and the
  114. * callback match() must be implemented. All other callbacks are optional
  115. * and a fallback implementation is used instead.
  116. *
  117. * Returns -EEXISTS if an ematch of the same kind has already registered.
  118. */
  119. int tcf_em_register(struct tcf_ematch_ops *ops)
  120. {
  121. int err = -EEXIST;
  122. struct tcf_ematch_ops *e;
  123. if (ops->match == NULL)
  124. return -EINVAL;
  125. write_lock(&ematch_mod_lock);
  126. list_for_each_entry(e, &ematch_ops, link)
  127. if (ops->kind == e->kind)
  128. goto errout;
  129. list_add_tail(&ops->link, &ematch_ops);
  130. err = 0;
  131. errout:
  132. write_unlock(&ematch_mod_lock);
  133. return err;
  134. }
  135. EXPORT_SYMBOL(tcf_em_register);
  136. /**
  137. * tcf_em_unregister - unregster and extended match
  138. *
  139. * @ops: ematch operations lookup table
  140. *
  141. * This function must be called by ematches to announce their disappearance
  142. * for examples when the module gets unloaded. The @ops parameter must be
  143. * the same as the one used for registration.
  144. *
  145. * Returns -ENOENT if no matching ematch was found.
  146. */
  147. void tcf_em_unregister(struct tcf_ematch_ops *ops)
  148. {
  149. write_lock(&ematch_mod_lock);
  150. list_del(&ops->link);
  151. write_unlock(&ematch_mod_lock);
  152. }
  153. EXPORT_SYMBOL(tcf_em_unregister);
  154. static inline struct tcf_ematch * tcf_em_get_match(struct tcf_ematch_tree *tree,
  155. int index)
  156. {
  157. return &tree->matches[index];
  158. }
  159. static int tcf_em_validate(struct tcf_proto *tp,
  160. struct tcf_ematch_tree_hdr *tree_hdr,
  161. struct tcf_ematch *em, struct nlattr *nla, int idx)
  162. {
  163. int err = -EINVAL;
  164. struct tcf_ematch_hdr *em_hdr = nla_data(nla);
  165. int data_len = nla_len(nla) - sizeof(*em_hdr);
  166. void *data = (void *) em_hdr + sizeof(*em_hdr);
  167. if (!TCF_EM_REL_VALID(em_hdr->flags))
  168. goto errout;
  169. if (em_hdr->kind == TCF_EM_CONTAINER) {
  170. /* Special ematch called "container", carries an index
  171. * referencing an external ematch sequence. */
  172. u32 ref;
  173. if (data_len < sizeof(ref))
  174. goto errout;
  175. ref = *(u32 *) data;
  176. if (ref >= tree_hdr->nmatches)
  177. goto errout;
  178. /* We do not allow backward jumps to avoid loops and jumps
  179. * to our own position are of course illegal. */
  180. if (ref <= idx)
  181. goto errout;
  182. em->data = ref;
  183. } else {
  184. /* Note: This lookup will increase the module refcnt
  185. * of the ematch module referenced. In case of a failure,
  186. * a destroy function is called by the underlying layer
  187. * which automatically releases the reference again, therefore
  188. * the module MUST not be given back under any circumstances
  189. * here. Be aware, the destroy function assumes that the
  190. * module is held if the ops field is non zero. */
  191. em->ops = tcf_em_lookup(em_hdr->kind);
  192. if (em->ops == NULL) {
  193. err = -ENOENT;
  194. #ifdef CONFIG_MODULES
  195. __rtnl_unlock();
  196. request_module("ematch-kind-%u", em_hdr->kind);
  197. rtnl_lock();
  198. em->ops = tcf_em_lookup(em_hdr->kind);
  199. if (em->ops) {
  200. /* We dropped the RTNL mutex in order to
  201. * perform the module load. Tell the caller
  202. * to replay the request. */
  203. module_put(em->ops->owner);
  204. err = -EAGAIN;
  205. }
  206. #endif
  207. goto errout;
  208. }
  209. /* ematch module provides expected length of data, so we
  210. * can do a basic sanity check. */
  211. if (em->ops->datalen && data_len < em->ops->datalen)
  212. goto errout;
  213. if (em->ops->change) {
  214. err = em->ops->change(tp, data, data_len, em);
  215. if (err < 0)
  216. goto errout;
  217. } else if (data_len > 0) {
  218. /* ematch module doesn't provide an own change
  219. * procedure and expects us to allocate and copy
  220. * the ematch data.
  221. *
  222. * TCF_EM_SIMPLE may be specified stating that the
  223. * data only consists of a u32 integer and the module
  224. * does not expected a memory reference but rather
  225. * the value carried. */
  226. if (em_hdr->flags & TCF_EM_SIMPLE) {
  227. if (data_len < sizeof(u32))
  228. goto errout;
  229. em->data = *(u32 *) data;
  230. } else {
  231. void *v = kmemdup(data, data_len, GFP_KERNEL);
  232. if (v == NULL) {
  233. err = -ENOBUFS;
  234. goto errout;
  235. }
  236. em->data = (unsigned long) v;
  237. }
  238. }
  239. }
  240. em->matchid = em_hdr->matchid;
  241. em->flags = em_hdr->flags;
  242. em->datalen = data_len;
  243. err = 0;
  244. errout:
  245. return err;
  246. }
  247. static const struct nla_policy em_policy[TCA_EMATCH_TREE_MAX + 1] = {
  248. [TCA_EMATCH_TREE_HDR] = { .len = sizeof(struct tcf_ematch_tree_hdr) },
  249. [TCA_EMATCH_TREE_LIST] = { .type = NLA_NESTED },
  250. };
  251. /**
  252. * tcf_em_tree_validate - validate ematch config TLV and build ematch tree
  253. *
  254. * @tp: classifier kind handle
  255. * @nla: ematch tree configuration TLV
  256. * @tree: destination ematch tree variable to store the resulting
  257. * ematch tree.
  258. *
  259. * This function validates the given configuration TLV @nla and builds an
  260. * ematch tree in @tree. The resulting tree must later be copied into
  261. * the private classifier data using tcf_em_tree_change(). You MUST NOT
  262. * provide the ematch tree variable of the private classifier data directly,
  263. * the changes would not be locked properly.
  264. *
  265. * Returns a negative error code if the configuration TLV contains errors.
  266. */
  267. int tcf_em_tree_validate(struct tcf_proto *tp, struct nlattr *nla,
  268. struct tcf_ematch_tree *tree)
  269. {
  270. int idx, list_len, matches_len, err;
  271. struct nlattr *tb[TCA_EMATCH_TREE_MAX + 1];
  272. struct nlattr *rt_match, *rt_hdr, *rt_list;
  273. struct tcf_ematch_tree_hdr *tree_hdr;
  274. struct tcf_ematch *em;
  275. memset(tree, 0, sizeof(*tree));
  276. if (!nla)
  277. return 0;
  278. err = nla_parse_nested(tb, TCA_EMATCH_TREE_MAX, nla, em_policy);
  279. if (err < 0)
  280. goto errout;
  281. err = -EINVAL;
  282. rt_hdr = tb[TCA_EMATCH_TREE_HDR];
  283. rt_list = tb[TCA_EMATCH_TREE_LIST];
  284. if (rt_hdr == NULL || rt_list == NULL)
  285. goto errout;
  286. tree_hdr = nla_data(rt_hdr);
  287. memcpy(&tree->hdr, tree_hdr, sizeof(*tree_hdr));
  288. rt_match = nla_data(rt_list);
  289. list_len = nla_len(rt_list);
  290. matches_len = tree_hdr->nmatches * sizeof(*em);
  291. tree->matches = kzalloc(matches_len, GFP_KERNEL);
  292. if (tree->matches == NULL)
  293. goto errout;
  294. /* We do not use nla_parse_nested here because the maximum
  295. * number of attributes is unknown. This saves us the allocation
  296. * for a tb buffer which would serve no purpose at all.
  297. *
  298. * The array of rt attributes is parsed in the order as they are
  299. * provided, their type must be incremental from 1 to n. Even
  300. * if it does not serve any real purpose, a failure of sticking
  301. * to this policy will result in parsing failure. */
  302. for (idx = 0; nla_ok(rt_match, list_len); idx++) {
  303. err = -EINVAL;
  304. if (rt_match->nla_type != (idx + 1))
  305. goto errout_abort;
  306. if (idx >= tree_hdr->nmatches)
  307. goto errout_abort;
  308. if (nla_len(rt_match) < sizeof(struct tcf_ematch_hdr))
  309. goto errout_abort;
  310. em = tcf_em_get_match(tree, idx);
  311. err = tcf_em_validate(tp, tree_hdr, em, rt_match, idx);
  312. if (err < 0)
  313. goto errout_abort;
  314. rt_match = nla_next(rt_match, &list_len);
  315. }
  316. /* Check if the number of matches provided by userspace actually
  317. * complies with the array of matches. The number was used for
  318. * the validation of references and a mismatch could lead to
  319. * undefined references during the matching process. */
  320. if (idx != tree_hdr->nmatches) {
  321. err = -EINVAL;
  322. goto errout_abort;
  323. }
  324. err = 0;
  325. errout:
  326. return err;
  327. errout_abort:
  328. tcf_em_tree_destroy(tp, tree);
  329. return err;
  330. }
  331. EXPORT_SYMBOL(tcf_em_tree_validate);
  332. /**
  333. * tcf_em_tree_destroy - destroy an ematch tree
  334. *
  335. * @tp: classifier kind handle
  336. * @tree: ematch tree to be deleted
  337. *
  338. * This functions destroys an ematch tree previously created by
  339. * tcf_em_tree_validate()/tcf_em_tree_change(). You must ensure that
  340. * the ematch tree is not in use before calling this function.
  341. */
  342. void tcf_em_tree_destroy(struct tcf_proto *tp, struct tcf_ematch_tree *tree)
  343. {
  344. int i;
  345. if (tree->matches == NULL)
  346. return;
  347. for (i = 0; i < tree->hdr.nmatches; i++) {
  348. struct tcf_ematch *em = tcf_em_get_match(tree, i);
  349. if (em->ops) {
  350. if (em->ops->destroy)
  351. em->ops->destroy(tp, em);
  352. else if (!tcf_em_is_simple(em))
  353. kfree((void *) em->data);
  354. module_put(em->ops->owner);
  355. }
  356. }
  357. tree->hdr.nmatches = 0;
  358. kfree(tree->matches);
  359. tree->matches = NULL;
  360. }
  361. EXPORT_SYMBOL(tcf_em_tree_destroy);
  362. /**
  363. * tcf_em_tree_dump - dump ematch tree into a rtnl message
  364. *
  365. * @skb: skb holding the rtnl message
  366. * @t: ematch tree to be dumped
  367. * @tlv: TLV type to be used to encapsulate the tree
  368. *
  369. * This function dumps a ematch tree into a rtnl message. It is valid to
  370. * call this function while the ematch tree is in use.
  371. *
  372. * Returns -1 if the skb tailroom is insufficient.
  373. */
  374. int tcf_em_tree_dump(struct sk_buff *skb, struct tcf_ematch_tree *tree, int tlv)
  375. {
  376. int i;
  377. u8 *tail;
  378. struct nlattr *top_start;
  379. struct nlattr *list_start;
  380. top_start = nla_nest_start(skb, tlv);
  381. if (top_start == NULL)
  382. goto nla_put_failure;
  383. NLA_PUT(skb, TCA_EMATCH_TREE_HDR, sizeof(tree->hdr), &tree->hdr);
  384. list_start = nla_nest_start(skb, TCA_EMATCH_TREE_LIST);
  385. if (list_start == NULL)
  386. goto nla_put_failure;
  387. tail = skb_tail_pointer(skb);
  388. for (i = 0; i < tree->hdr.nmatches; i++) {
  389. struct nlattr *match_start = (struct nlattr *)tail;
  390. struct tcf_ematch *em = tcf_em_get_match(tree, i);
  391. struct tcf_ematch_hdr em_hdr = {
  392. .kind = em->ops ? em->ops->kind : TCF_EM_CONTAINER,
  393. .matchid = em->matchid,
  394. .flags = em->flags
  395. };
  396. NLA_PUT(skb, i+1, sizeof(em_hdr), &em_hdr);
  397. if (em->ops && em->ops->dump) {
  398. if (em->ops->dump(skb, em) < 0)
  399. goto nla_put_failure;
  400. } else if (tcf_em_is_container(em) || tcf_em_is_simple(em)) {
  401. u32 u = em->data;
  402. nla_put_nohdr(skb, sizeof(u), &u);
  403. } else if (em->datalen > 0)
  404. nla_put_nohdr(skb, em->datalen, (void *) em->data);
  405. tail = skb_tail_pointer(skb);
  406. match_start->nla_len = tail - (u8 *)match_start;
  407. }
  408. nla_nest_end(skb, list_start);
  409. nla_nest_end(skb, top_start);
  410. return 0;
  411. nla_put_failure:
  412. return -1;
  413. }
  414. EXPORT_SYMBOL(tcf_em_tree_dump);
  415. static inline int tcf_em_match(struct sk_buff *skb, struct tcf_ematch *em,
  416. struct tcf_pkt_info *info)
  417. {
  418. int r = em->ops->match(skb, em, info);
  419. return tcf_em_is_inverted(em) ? !r : r;
  420. }
  421. /* Do not use this function directly, use tcf_em_tree_match instead */
  422. int __tcf_em_tree_match(struct sk_buff *skb, struct tcf_ematch_tree *tree,
  423. struct tcf_pkt_info *info)
  424. {
  425. int stackp = 0, match_idx = 0, res = 0;
  426. struct tcf_ematch *cur_match;
  427. int stack[CONFIG_NET_EMATCH_STACK];
  428. proceed:
  429. while (match_idx < tree->hdr.nmatches) {
  430. cur_match = tcf_em_get_match(tree, match_idx);
  431. if (tcf_em_is_container(cur_match)) {
  432. if (unlikely(stackp >= CONFIG_NET_EMATCH_STACK))
  433. goto stack_overflow;
  434. stack[stackp++] = match_idx;
  435. match_idx = cur_match->data;
  436. goto proceed;
  437. }
  438. res = tcf_em_match(skb, cur_match, info);
  439. if (tcf_em_early_end(cur_match, res))
  440. break;
  441. match_idx++;
  442. }
  443. pop_stack:
  444. if (stackp > 0) {
  445. match_idx = stack[--stackp];
  446. cur_match = tcf_em_get_match(tree, match_idx);
  447. if (tcf_em_early_end(cur_match, res))
  448. goto pop_stack;
  449. else {
  450. match_idx++;
  451. goto proceed;
  452. }
  453. }
  454. return res;
  455. stack_overflow:
  456. if (net_ratelimit())
  457. printk("Local stack overflow, increase NET_EMATCH_STACK\n");
  458. return -1;
  459. }
  460. EXPORT_SYMBOL(__tcf_em_tree_match);