cls_tcindex.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515
  1. /*
  2. * net/sched/cls_tcindex.c Packet classifier for skb->tc_index
  3. *
  4. * Written 1998,1999 by Werner Almesberger, EPFL ICA
  5. */
  6. #include <linux/module.h>
  7. #include <linux/types.h>
  8. #include <linux/kernel.h>
  9. #include <linux/skbuff.h>
  10. #include <linux/errno.h>
  11. #include <net/act_api.h>
  12. #include <net/netlink.h>
  13. #include <net/pkt_cls.h>
  14. /*
  15. * Not quite sure if we need all the xchgs Alexey uses when accessing things.
  16. * Can always add them later ... :)
  17. */
  18. /*
  19. * Passing parameters to the root seems to be done more awkwardly than really
  20. * necessary. At least, u32 doesn't seem to use such dirty hacks. To be
  21. * verified. FIXME.
  22. */
  23. #define PERFECT_HASH_THRESHOLD 64 /* use perfect hash if not bigger */
  24. #define DEFAULT_HASH_SIZE 64 /* optimized for diffserv */
  25. #define PRIV(tp) ((struct tcindex_data *) (tp)->root)
  26. struct tcindex_filter_result {
  27. struct tcf_exts exts;
  28. struct tcf_result res;
  29. };
  30. struct tcindex_filter {
  31. u16 key;
  32. struct tcindex_filter_result result;
  33. struct tcindex_filter *next;
  34. };
  35. struct tcindex_data {
  36. struct tcindex_filter_result *perfect; /* perfect hash; NULL if none */
  37. struct tcindex_filter **h; /* imperfect hash; only used if !perfect;
  38. NULL if unused */
  39. u16 mask; /* AND key with mask */
  40. int shift; /* shift ANDed key to the right */
  41. int hash; /* hash table size; 0 if undefined */
  42. int alloc_hash; /* allocated size */
  43. int fall_through; /* 0: only classify if explicit match */
  44. };
  45. static struct tcf_ext_map tcindex_ext_map = {
  46. .police = TCA_TCINDEX_POLICE,
  47. .action = TCA_TCINDEX_ACT
  48. };
  49. static inline int
  50. tcindex_filter_is_set(struct tcindex_filter_result *r)
  51. {
  52. return tcf_exts_is_predicative(&r->exts) || r->res.classid;
  53. }
  54. static struct tcindex_filter_result *
  55. tcindex_lookup(struct tcindex_data *p, u16 key)
  56. {
  57. struct tcindex_filter *f;
  58. if (p->perfect)
  59. return tcindex_filter_is_set(p->perfect + key) ?
  60. p->perfect + key : NULL;
  61. else if (p->h) {
  62. for (f = p->h[key % p->hash]; f; f = f->next)
  63. if (f->key == key)
  64. return &f->result;
  65. }
  66. return NULL;
  67. }
  68. static int tcindex_classify(struct sk_buff *skb, struct tcf_proto *tp,
  69. struct tcf_result *res)
  70. {
  71. struct tcindex_data *p = PRIV(tp);
  72. struct tcindex_filter_result *f;
  73. int key = (skb->tc_index & p->mask) >> p->shift;
  74. pr_debug("tcindex_classify(skb %p,tp %p,res %p),p %p\n",
  75. skb, tp, res, p);
  76. f = tcindex_lookup(p, key);
  77. if (!f) {
  78. if (!p->fall_through)
  79. return -1;
  80. res->classid = TC_H_MAKE(TC_H_MAJ(tp->q->handle), key);
  81. res->class = 0;
  82. pr_debug("alg 0x%x\n", res->classid);
  83. return 0;
  84. }
  85. *res = f->res;
  86. pr_debug("map 0x%x\n", res->classid);
  87. return tcf_exts_exec(skb, &f->exts, res);
  88. }
  89. static unsigned long tcindex_get(struct tcf_proto *tp, u32 handle)
  90. {
  91. struct tcindex_data *p = PRIV(tp);
  92. struct tcindex_filter_result *r;
  93. pr_debug("tcindex_get(tp %p,handle 0x%08x)\n", tp, handle);
  94. if (p->perfect && handle >= p->alloc_hash)
  95. return 0;
  96. r = tcindex_lookup(p, handle);
  97. return r && tcindex_filter_is_set(r) ? (unsigned long) r : 0UL;
  98. }
  99. static void tcindex_put(struct tcf_proto *tp, unsigned long f)
  100. {
  101. pr_debug("tcindex_put(tp %p,f 0x%lx)\n", tp, f);
  102. }
  103. static int tcindex_init(struct tcf_proto *tp)
  104. {
  105. struct tcindex_data *p;
  106. pr_debug("tcindex_init(tp %p)\n", tp);
  107. p = kzalloc(sizeof(struct tcindex_data), GFP_KERNEL);
  108. if (!p)
  109. return -ENOMEM;
  110. p->mask = 0xffff;
  111. p->hash = DEFAULT_HASH_SIZE;
  112. p->fall_through = 1;
  113. tp->root = p;
  114. return 0;
  115. }
  116. static int
  117. __tcindex_delete(struct tcf_proto *tp, unsigned long arg, int lock)
  118. {
  119. struct tcindex_data *p = PRIV(tp);
  120. struct tcindex_filter_result *r = (struct tcindex_filter_result *) arg;
  121. struct tcindex_filter *f = NULL;
  122. pr_debug("tcindex_delete(tp %p,arg 0x%lx),p %p,f %p\n", tp, arg, p, f);
  123. if (p->perfect) {
  124. if (!r->res.class)
  125. return -ENOENT;
  126. } else {
  127. int i;
  128. struct tcindex_filter **walk = NULL;
  129. for (i = 0; i < p->hash; i++)
  130. for (walk = p->h+i; *walk; walk = &(*walk)->next)
  131. if (&(*walk)->result == r)
  132. goto found;
  133. return -ENOENT;
  134. found:
  135. f = *walk;
  136. if (lock)
  137. tcf_tree_lock(tp);
  138. *walk = f->next;
  139. if (lock)
  140. tcf_tree_unlock(tp);
  141. }
  142. tcf_unbind_filter(tp, &r->res);
  143. tcf_exts_destroy(tp, &r->exts);
  144. kfree(f);
  145. return 0;
  146. }
  147. static int tcindex_delete(struct tcf_proto *tp, unsigned long arg)
  148. {
  149. return __tcindex_delete(tp, arg, 1);
  150. }
  151. static inline int
  152. valid_perfect_hash(struct tcindex_data *p)
  153. {
  154. return p->hash > (p->mask >> p->shift);
  155. }
  156. static int
  157. tcindex_set_parms(struct tcf_proto *tp, unsigned long base, u32 handle,
  158. struct tcindex_data *p, struct tcindex_filter_result *r,
  159. struct rtattr **tb, struct rtattr *est)
  160. {
  161. int err, balloc = 0;
  162. struct tcindex_filter_result new_filter_result, *old_r = r;
  163. struct tcindex_filter_result cr;
  164. struct tcindex_data cp;
  165. struct tcindex_filter *f = NULL; /* make gcc behave */
  166. struct tcf_exts e;
  167. err = tcf_exts_validate(tp, tb, est, &e, &tcindex_ext_map);
  168. if (err < 0)
  169. return err;
  170. memcpy(&cp, p, sizeof(cp));
  171. memset(&new_filter_result, 0, sizeof(new_filter_result));
  172. if (old_r)
  173. memcpy(&cr, r, sizeof(cr));
  174. else
  175. memset(&cr, 0, sizeof(cr));
  176. err = -EINVAL;
  177. if (tb[TCA_TCINDEX_HASH-1]) {
  178. if (RTA_PAYLOAD(tb[TCA_TCINDEX_HASH-1]) < sizeof(u32))
  179. goto errout;
  180. cp.hash = *(u32 *) RTA_DATA(tb[TCA_TCINDEX_HASH-1]);
  181. }
  182. if (tb[TCA_TCINDEX_MASK-1]) {
  183. if (RTA_PAYLOAD(tb[TCA_TCINDEX_MASK-1]) < sizeof(u16))
  184. goto errout;
  185. cp.mask = *(u16 *) RTA_DATA(tb[TCA_TCINDEX_MASK-1]);
  186. }
  187. if (tb[TCA_TCINDEX_SHIFT-1]) {
  188. if (RTA_PAYLOAD(tb[TCA_TCINDEX_SHIFT-1]) < sizeof(int))
  189. goto errout;
  190. cp.shift = *(int *) RTA_DATA(tb[TCA_TCINDEX_SHIFT-1]);
  191. }
  192. err = -EBUSY;
  193. /* Hash already allocated, make sure that we still meet the
  194. * requirements for the allocated hash.
  195. */
  196. if (cp.perfect) {
  197. if (!valid_perfect_hash(&cp) ||
  198. cp.hash > cp.alloc_hash)
  199. goto errout;
  200. } else if (cp.h && cp.hash != cp.alloc_hash)
  201. goto errout;
  202. err = -EINVAL;
  203. if (tb[TCA_TCINDEX_FALL_THROUGH-1]) {
  204. if (RTA_PAYLOAD(tb[TCA_TCINDEX_FALL_THROUGH-1]) < sizeof(u32))
  205. goto errout;
  206. cp.fall_through =
  207. *(u32 *) RTA_DATA(tb[TCA_TCINDEX_FALL_THROUGH-1]);
  208. }
  209. if (!cp.hash) {
  210. /* Hash not specified, use perfect hash if the upper limit
  211. * of the hashing index is below the threshold.
  212. */
  213. if ((cp.mask >> cp.shift) < PERFECT_HASH_THRESHOLD)
  214. cp.hash = (cp.mask >> cp.shift)+1;
  215. else
  216. cp.hash = DEFAULT_HASH_SIZE;
  217. }
  218. if (!cp.perfect && !cp.h)
  219. cp.alloc_hash = cp.hash;
  220. /* Note: this could be as restrictive as if (handle & ~(mask >> shift))
  221. * but then, we'd fail handles that may become valid after some future
  222. * mask change. While this is extremely unlikely to ever matter,
  223. * the check below is safer (and also more backwards-compatible).
  224. */
  225. if (cp.perfect || valid_perfect_hash(&cp))
  226. if (handle >= cp.alloc_hash)
  227. goto errout;
  228. err = -ENOMEM;
  229. if (!cp.perfect && !cp.h) {
  230. if (valid_perfect_hash(&cp)) {
  231. cp.perfect = kcalloc(cp.hash, sizeof(*r), GFP_KERNEL);
  232. if (!cp.perfect)
  233. goto errout;
  234. balloc = 1;
  235. } else {
  236. cp.h = kcalloc(cp.hash, sizeof(f), GFP_KERNEL);
  237. if (!cp.h)
  238. goto errout;
  239. balloc = 2;
  240. }
  241. }
  242. if (cp.perfect)
  243. r = cp.perfect + handle;
  244. else
  245. r = tcindex_lookup(&cp, handle) ? : &new_filter_result;
  246. if (r == &new_filter_result) {
  247. f = kzalloc(sizeof(*f), GFP_KERNEL);
  248. if (!f)
  249. goto errout_alloc;
  250. }
  251. if (tb[TCA_TCINDEX_CLASSID-1]) {
  252. cr.res.classid = *(u32 *) RTA_DATA(tb[TCA_TCINDEX_CLASSID-1]);
  253. tcf_bind_filter(tp, &cr.res, base);
  254. }
  255. tcf_exts_change(tp, &cr.exts, &e);
  256. tcf_tree_lock(tp);
  257. if (old_r && old_r != r)
  258. memset(old_r, 0, sizeof(*old_r));
  259. memcpy(p, &cp, sizeof(cp));
  260. memcpy(r, &cr, sizeof(cr));
  261. if (r == &new_filter_result) {
  262. struct tcindex_filter **fp;
  263. f->key = handle;
  264. f->result = new_filter_result;
  265. f->next = NULL;
  266. for (fp = p->h+(handle % p->hash); *fp; fp = &(*fp)->next)
  267. /* nothing */;
  268. *fp = f;
  269. }
  270. tcf_tree_unlock(tp);
  271. return 0;
  272. errout_alloc:
  273. if (balloc == 1)
  274. kfree(cp.perfect);
  275. else if (balloc == 2)
  276. kfree(cp.h);
  277. errout:
  278. tcf_exts_destroy(tp, &e);
  279. return err;
  280. }
  281. static int
  282. tcindex_change(struct tcf_proto *tp, unsigned long base, u32 handle,
  283. struct rtattr **tca, unsigned long *arg)
  284. {
  285. struct rtattr *opt = tca[TCA_OPTIONS-1];
  286. struct rtattr *tb[TCA_TCINDEX_MAX];
  287. struct tcindex_data *p = PRIV(tp);
  288. struct tcindex_filter_result *r = (struct tcindex_filter_result *) *arg;
  289. pr_debug("tcindex_change(tp %p,handle 0x%08x,tca %p,arg %p),opt %p,"
  290. "p %p,r %p,*arg 0x%lx\n",
  291. tp, handle, tca, arg, opt, p, r, arg ? *arg : 0L);
  292. if (!opt)
  293. return 0;
  294. if (rtattr_parse_nested(tb, TCA_TCINDEX_MAX, opt) < 0)
  295. return -EINVAL;
  296. return tcindex_set_parms(tp, base, handle, p, r, tb, tca[TCA_RATE-1]);
  297. }
  298. static void tcindex_walk(struct tcf_proto *tp, struct tcf_walker *walker)
  299. {
  300. struct tcindex_data *p = PRIV(tp);
  301. struct tcindex_filter *f, *next;
  302. int i;
  303. pr_debug("tcindex_walk(tp %p,walker %p),p %p\n", tp, walker, p);
  304. if (p->perfect) {
  305. for (i = 0; i < p->hash; i++) {
  306. if (!p->perfect[i].res.class)
  307. continue;
  308. if (walker->count >= walker->skip) {
  309. if (walker->fn(tp,
  310. (unsigned long) (p->perfect+i), walker)
  311. < 0) {
  312. walker->stop = 1;
  313. return;
  314. }
  315. }
  316. walker->count++;
  317. }
  318. }
  319. if (!p->h)
  320. return;
  321. for (i = 0; i < p->hash; i++) {
  322. for (f = p->h[i]; f; f = next) {
  323. next = f->next;
  324. if (walker->count >= walker->skip) {
  325. if (walker->fn(tp, (unsigned long) &f->result,
  326. walker) < 0) {
  327. walker->stop = 1;
  328. return;
  329. }
  330. }
  331. walker->count++;
  332. }
  333. }
  334. }
  335. static int tcindex_destroy_element(struct tcf_proto *tp,
  336. unsigned long arg, struct tcf_walker *walker)
  337. {
  338. return __tcindex_delete(tp, arg, 0);
  339. }
  340. static void tcindex_destroy(struct tcf_proto *tp)
  341. {
  342. struct tcindex_data *p = PRIV(tp);
  343. struct tcf_walker walker;
  344. pr_debug("tcindex_destroy(tp %p),p %p\n", tp, p);
  345. walker.count = 0;
  346. walker.skip = 0;
  347. walker.fn = &tcindex_destroy_element;
  348. tcindex_walk(tp, &walker);
  349. kfree(p->perfect);
  350. kfree(p->h);
  351. kfree(p);
  352. tp->root = NULL;
  353. }
  354. static int tcindex_dump(struct tcf_proto *tp, unsigned long fh,
  355. struct sk_buff *skb, struct tcmsg *t)
  356. {
  357. struct tcindex_data *p = PRIV(tp);
  358. struct tcindex_filter_result *r = (struct tcindex_filter_result *) fh;
  359. unsigned char *b = skb_tail_pointer(skb);
  360. struct rtattr *rta;
  361. pr_debug("tcindex_dump(tp %p,fh 0x%lx,skb %p,t %p),p %p,r %p,b %p\n",
  362. tp, fh, skb, t, p, r, b);
  363. pr_debug("p->perfect %p p->h %p\n", p->perfect, p->h);
  364. rta = (struct rtattr *) b;
  365. RTA_PUT(skb, TCA_OPTIONS, 0, NULL);
  366. if (!fh) {
  367. t->tcm_handle = ~0; /* whatever ... */
  368. RTA_PUT(skb, TCA_TCINDEX_HASH, sizeof(p->hash), &p->hash);
  369. RTA_PUT(skb, TCA_TCINDEX_MASK, sizeof(p->mask), &p->mask);
  370. RTA_PUT(skb, TCA_TCINDEX_SHIFT, sizeof(p->shift), &p->shift);
  371. RTA_PUT(skb, TCA_TCINDEX_FALL_THROUGH, sizeof(p->fall_through),
  372. &p->fall_through);
  373. rta->rta_len = skb_tail_pointer(skb) - b;
  374. } else {
  375. if (p->perfect) {
  376. t->tcm_handle = r-p->perfect;
  377. } else {
  378. struct tcindex_filter *f;
  379. int i;
  380. t->tcm_handle = 0;
  381. for (i = 0; !t->tcm_handle && i < p->hash; i++) {
  382. for (f = p->h[i]; !t->tcm_handle && f;
  383. f = f->next) {
  384. if (&f->result == r)
  385. t->tcm_handle = f->key;
  386. }
  387. }
  388. }
  389. pr_debug("handle = %d\n", t->tcm_handle);
  390. if (r->res.class)
  391. RTA_PUT(skb, TCA_TCINDEX_CLASSID, 4, &r->res.classid);
  392. if (tcf_exts_dump(skb, &r->exts, &tcindex_ext_map) < 0)
  393. goto rtattr_failure;
  394. rta->rta_len = skb_tail_pointer(skb) - b;
  395. if (tcf_exts_dump_stats(skb, &r->exts, &tcindex_ext_map) < 0)
  396. goto rtattr_failure;
  397. }
  398. return skb->len;
  399. rtattr_failure:
  400. nlmsg_trim(skb, b);
  401. return -1;
  402. }
  403. static struct tcf_proto_ops cls_tcindex_ops = {
  404. .next = NULL,
  405. .kind = "tcindex",
  406. .classify = tcindex_classify,
  407. .init = tcindex_init,
  408. .destroy = tcindex_destroy,
  409. .get = tcindex_get,
  410. .put = tcindex_put,
  411. .change = tcindex_change,
  412. .delete = tcindex_delete,
  413. .walk = tcindex_walk,
  414. .dump = tcindex_dump,
  415. .owner = THIS_MODULE,
  416. };
  417. static int __init init_tcindex(void)
  418. {
  419. return register_tcf_proto_ops(&cls_tcindex_ops);
  420. }
  421. static void __exit exit_tcindex(void)
  422. {
  423. unregister_tcf_proto_ops(&cls_tcindex_ops);
  424. }
  425. module_init(init_tcindex)
  426. module_exit(exit_tcindex)
  427. MODULE_LICENSE("GPL");