act_police.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603
  1. /*
  2. * net/sched/police.c Input police filter.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License
  6. * as published by the Free Software Foundation; either version
  7. * 2 of the License, or (at your option) any later version.
  8. *
  9. * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
  10. * J Hadi Salim (action changes)
  11. */
  12. #include <asm/uaccess.h>
  13. #include <asm/system.h>
  14. #include <linux/bitops.h>
  15. #include <linux/module.h>
  16. #include <linux/types.h>
  17. #include <linux/kernel.h>
  18. #include <linux/sched.h>
  19. #include <linux/string.h>
  20. #include <linux/mm.h>
  21. #include <linux/socket.h>
  22. #include <linux/sockios.h>
  23. #include <linux/in.h>
  24. #include <linux/errno.h>
  25. #include <linux/interrupt.h>
  26. #include <linux/netdevice.h>
  27. #include <linux/skbuff.h>
  28. #include <linux/module.h>
  29. #include <linux/rtnetlink.h>
  30. #include <linux/init.h>
  31. #include <net/sock.h>
  32. #include <net/act_api.h>
  33. #define L2T(p,L) ((p)->R_tab->data[(L)>>(p)->R_tab->rate.cell_log])
  34. #define L2T_P(p,L) ((p)->P_tab->data[(L)>>(p)->P_tab->rate.cell_log])
  35. #define PRIV(a) ((struct tcf_police *) (a)->priv)
  36. /* use generic hash table */
  37. #define MY_TAB_SIZE 16
  38. #define MY_TAB_MASK 15
  39. static u32 idx_gen;
  40. static struct tcf_police *tcf_police_ht[MY_TAB_SIZE];
  41. /* Policer hash table lock */
  42. static DEFINE_RWLOCK(police_lock);
  43. /* Each policer is serialized by its individual spinlock */
  44. static __inline__ unsigned tcf_police_hash(u32 index)
  45. {
  46. return index&0xF;
  47. }
  48. static __inline__ struct tcf_police * tcf_police_lookup(u32 index)
  49. {
  50. struct tcf_police *p;
  51. read_lock(&police_lock);
  52. for (p = tcf_police_ht[tcf_police_hash(index)]; p; p = p->next) {
  53. if (p->index == index)
  54. break;
  55. }
  56. read_unlock(&police_lock);
  57. return p;
  58. }
  59. #ifdef CONFIG_NET_CLS_ACT
  60. static int tcf_act_police_walker(struct sk_buff *skb, struct netlink_callback *cb,
  61. int type, struct tc_action *a)
  62. {
  63. struct tcf_police *p;
  64. int err = 0, index = -1, i = 0, s_i = 0, n_i = 0;
  65. struct rtattr *r;
  66. read_lock(&police_lock);
  67. s_i = cb->args[0];
  68. for (i = 0; i < MY_TAB_SIZE; i++) {
  69. p = tcf_police_ht[tcf_police_hash(i)];
  70. for (; p; p = p->next) {
  71. index++;
  72. if (index < s_i)
  73. continue;
  74. a->priv = p;
  75. a->order = index;
  76. r = (struct rtattr*) skb->tail;
  77. RTA_PUT(skb, a->order, 0, NULL);
  78. if (type == RTM_DELACTION)
  79. err = tcf_action_dump_1(skb, a, 0, 1);
  80. else
  81. err = tcf_action_dump_1(skb, a, 0, 0);
  82. if (err < 0) {
  83. index--;
  84. skb_trim(skb, (u8*)r - skb->data);
  85. goto done;
  86. }
  87. r->rta_len = skb->tail - (u8*)r;
  88. n_i++;
  89. }
  90. }
  91. done:
  92. read_unlock(&police_lock);
  93. if (n_i)
  94. cb->args[0] += n_i;
  95. return n_i;
  96. rtattr_failure:
  97. skb_trim(skb, (u8*)r - skb->data);
  98. goto done;
  99. }
  100. static inline int
  101. tcf_act_police_hash_search(struct tc_action *a, u32 index)
  102. {
  103. struct tcf_police *p = tcf_police_lookup(index);
  104. if (p != NULL) {
  105. a->priv = p;
  106. return 1;
  107. } else {
  108. return 0;
  109. }
  110. }
  111. #endif
  112. static inline u32 tcf_police_new_index(void)
  113. {
  114. do {
  115. if (++idx_gen == 0)
  116. idx_gen = 1;
  117. } while (tcf_police_lookup(idx_gen));
  118. return idx_gen;
  119. }
  120. void tcf_police_destroy(struct tcf_police *p)
  121. {
  122. unsigned h = tcf_police_hash(p->index);
  123. struct tcf_police **p1p;
  124. for (p1p = &tcf_police_ht[h]; *p1p; p1p = &(*p1p)->next) {
  125. if (*p1p == p) {
  126. write_lock_bh(&police_lock);
  127. *p1p = p->next;
  128. write_unlock_bh(&police_lock);
  129. #ifdef CONFIG_NET_ESTIMATOR
  130. gen_kill_estimator(&p->bstats, &p->rate_est);
  131. #endif
  132. if (p->R_tab)
  133. qdisc_put_rtab(p->R_tab);
  134. if (p->P_tab)
  135. qdisc_put_rtab(p->P_tab);
  136. kfree(p);
  137. return;
  138. }
  139. }
  140. BUG_TRAP(0);
  141. }
  142. #ifdef CONFIG_NET_CLS_ACT
  143. static int tcf_act_police_locate(struct rtattr *rta, struct rtattr *est,
  144. struct tc_action *a, int ovr, int bind)
  145. {
  146. unsigned h;
  147. int ret = 0, err;
  148. struct rtattr *tb[TCA_POLICE_MAX];
  149. struct tc_police *parm;
  150. struct tcf_police *p;
  151. struct qdisc_rate_table *R_tab = NULL, *P_tab = NULL;
  152. if (rta == NULL || rtattr_parse_nested(tb, TCA_POLICE_MAX, rta) < 0)
  153. return -EINVAL;
  154. if (tb[TCA_POLICE_TBF-1] == NULL ||
  155. RTA_PAYLOAD(tb[TCA_POLICE_TBF-1]) != sizeof(*parm))
  156. return -EINVAL;
  157. parm = RTA_DATA(tb[TCA_POLICE_TBF-1]);
  158. if (tb[TCA_POLICE_RESULT-1] != NULL &&
  159. RTA_PAYLOAD(tb[TCA_POLICE_RESULT-1]) != sizeof(u32))
  160. return -EINVAL;
  161. if (tb[TCA_POLICE_RESULT-1] != NULL &&
  162. RTA_PAYLOAD(tb[TCA_POLICE_RESULT-1]) != sizeof(u32))
  163. return -EINVAL;
  164. if (parm->index && (p = tcf_police_lookup(parm->index)) != NULL) {
  165. a->priv = p;
  166. if (bind) {
  167. p->bindcnt += 1;
  168. p->refcnt += 1;
  169. }
  170. if (ovr)
  171. goto override;
  172. return ret;
  173. }
  174. p = kmalloc(sizeof(*p), GFP_KERNEL);
  175. if (p == NULL)
  176. return -ENOMEM;
  177. memset(p, 0, sizeof(*p));
  178. ret = ACT_P_CREATED;
  179. p->refcnt = 1;
  180. spin_lock_init(&p->lock);
  181. p->stats_lock = &p->lock;
  182. if (bind)
  183. p->bindcnt = 1;
  184. override:
  185. if (parm->rate.rate) {
  186. err = -ENOMEM;
  187. R_tab = qdisc_get_rtab(&parm->rate, tb[TCA_POLICE_RATE-1]);
  188. if (R_tab == NULL)
  189. goto failure;
  190. if (parm->peakrate.rate) {
  191. P_tab = qdisc_get_rtab(&parm->peakrate,
  192. tb[TCA_POLICE_PEAKRATE-1]);
  193. if (p->P_tab == NULL) {
  194. qdisc_put_rtab(R_tab);
  195. goto failure;
  196. }
  197. }
  198. }
  199. /* No failure allowed after this point */
  200. spin_lock_bh(&p->lock);
  201. if (R_tab != NULL) {
  202. qdisc_put_rtab(p->R_tab);
  203. p->R_tab = R_tab;
  204. }
  205. if (P_tab != NULL) {
  206. qdisc_put_rtab(p->P_tab);
  207. p->P_tab = P_tab;
  208. }
  209. if (tb[TCA_POLICE_RESULT-1])
  210. p->result = *(u32*)RTA_DATA(tb[TCA_POLICE_RESULT-1]);
  211. p->toks = p->burst = parm->burst;
  212. p->mtu = parm->mtu;
  213. if (p->mtu == 0) {
  214. p->mtu = ~0;
  215. if (p->R_tab)
  216. p->mtu = 255<<p->R_tab->rate.cell_log;
  217. }
  218. if (p->P_tab)
  219. p->ptoks = L2T_P(p, p->mtu);
  220. p->action = parm->action;
  221. #ifdef CONFIG_NET_ESTIMATOR
  222. if (tb[TCA_POLICE_AVRATE-1])
  223. p->ewma_rate = *(u32*)RTA_DATA(tb[TCA_POLICE_AVRATE-1]);
  224. if (est)
  225. gen_replace_estimator(&p->bstats, &p->rate_est, p->stats_lock, est);
  226. #endif
  227. spin_unlock_bh(&p->lock);
  228. if (ret != ACT_P_CREATED)
  229. return ret;
  230. PSCHED_GET_TIME(p->t_c);
  231. p->index = parm->index ? : tcf_police_new_index();
  232. h = tcf_police_hash(p->index);
  233. write_lock_bh(&police_lock);
  234. p->next = tcf_police_ht[h];
  235. tcf_police_ht[h] = p;
  236. write_unlock_bh(&police_lock);
  237. a->priv = p;
  238. return ret;
  239. failure:
  240. if (ret == ACT_P_CREATED)
  241. kfree(p);
  242. return err;
  243. }
  244. static int tcf_act_police_cleanup(struct tc_action *a, int bind)
  245. {
  246. struct tcf_police *p = PRIV(a);
  247. if (p != NULL)
  248. return tcf_police_release(p, bind);
  249. return 0;
  250. }
  251. static int tcf_act_police(struct sk_buff *skb, struct tc_action *a,
  252. struct tcf_result *res)
  253. {
  254. psched_time_t now;
  255. struct tcf_police *p = PRIV(a);
  256. long toks;
  257. long ptoks = 0;
  258. spin_lock(&p->lock);
  259. p->bstats.bytes += skb->len;
  260. p->bstats.packets++;
  261. #ifdef CONFIG_NET_ESTIMATOR
  262. if (p->ewma_rate && p->rate_est.bps >= p->ewma_rate) {
  263. p->qstats.overlimits++;
  264. spin_unlock(&p->lock);
  265. return p->action;
  266. }
  267. #endif
  268. if (skb->len <= p->mtu) {
  269. if (p->R_tab == NULL) {
  270. spin_unlock(&p->lock);
  271. return p->result;
  272. }
  273. PSCHED_GET_TIME(now);
  274. toks = PSCHED_TDIFF_SAFE(now, p->t_c, p->burst);
  275. if (p->P_tab) {
  276. ptoks = toks + p->ptoks;
  277. if (ptoks > (long)L2T_P(p, p->mtu))
  278. ptoks = (long)L2T_P(p, p->mtu);
  279. ptoks -= L2T_P(p, skb->len);
  280. }
  281. toks += p->toks;
  282. if (toks > (long)p->burst)
  283. toks = p->burst;
  284. toks -= L2T(p, skb->len);
  285. if ((toks|ptoks) >= 0) {
  286. p->t_c = now;
  287. p->toks = toks;
  288. p->ptoks = ptoks;
  289. spin_unlock(&p->lock);
  290. return p->result;
  291. }
  292. }
  293. p->qstats.overlimits++;
  294. spin_unlock(&p->lock);
  295. return p->action;
  296. }
  297. static int
  298. tcf_act_police_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
  299. {
  300. unsigned char *b = skb->tail;
  301. struct tc_police opt;
  302. struct tcf_police *p = PRIV(a);
  303. opt.index = p->index;
  304. opt.action = p->action;
  305. opt.mtu = p->mtu;
  306. opt.burst = p->burst;
  307. opt.refcnt = p->refcnt - ref;
  308. opt.bindcnt = p->bindcnt - bind;
  309. if (p->R_tab)
  310. opt.rate = p->R_tab->rate;
  311. else
  312. memset(&opt.rate, 0, sizeof(opt.rate));
  313. if (p->P_tab)
  314. opt.peakrate = p->P_tab->rate;
  315. else
  316. memset(&opt.peakrate, 0, sizeof(opt.peakrate));
  317. RTA_PUT(skb, TCA_POLICE_TBF, sizeof(opt), &opt);
  318. if (p->result)
  319. RTA_PUT(skb, TCA_POLICE_RESULT, sizeof(int), &p->result);
  320. #ifdef CONFIG_NET_ESTIMATOR
  321. if (p->ewma_rate)
  322. RTA_PUT(skb, TCA_POLICE_AVRATE, 4, &p->ewma_rate);
  323. #endif
  324. return skb->len;
  325. rtattr_failure:
  326. skb_trim(skb, b - skb->data);
  327. return -1;
  328. }
  329. MODULE_AUTHOR("Alexey Kuznetsov");
  330. MODULE_DESCRIPTION("Policing actions");
  331. MODULE_LICENSE("GPL");
  332. static struct tc_action_ops act_police_ops = {
  333. .kind = "police",
  334. .type = TCA_ID_POLICE,
  335. .capab = TCA_CAP_NONE,
  336. .owner = THIS_MODULE,
  337. .act = tcf_act_police,
  338. .dump = tcf_act_police_dump,
  339. .cleanup = tcf_act_police_cleanup,
  340. .lookup = tcf_act_police_hash_search,
  341. .init = tcf_act_police_locate,
  342. .walk = tcf_act_police_walker
  343. };
  344. static int __init
  345. police_init_module(void)
  346. {
  347. return tcf_register_action(&act_police_ops);
  348. }
  349. static void __exit
  350. police_cleanup_module(void)
  351. {
  352. tcf_unregister_action(&act_police_ops);
  353. }
  354. module_init(police_init_module);
  355. module_exit(police_cleanup_module);
  356. #else /* CONFIG_NET_CLS_ACT */
  357. struct tcf_police * tcf_police_locate(struct rtattr *rta, struct rtattr *est)
  358. {
  359. unsigned h;
  360. struct tcf_police *p;
  361. struct rtattr *tb[TCA_POLICE_MAX];
  362. struct tc_police *parm;
  363. if (rtattr_parse_nested(tb, TCA_POLICE_MAX, rta) < 0)
  364. return NULL;
  365. if (tb[TCA_POLICE_TBF-1] == NULL ||
  366. RTA_PAYLOAD(tb[TCA_POLICE_TBF-1]) != sizeof(*parm))
  367. return NULL;
  368. parm = RTA_DATA(tb[TCA_POLICE_TBF-1]);
  369. if (parm->index && (p = tcf_police_lookup(parm->index)) != NULL) {
  370. p->refcnt++;
  371. return p;
  372. }
  373. p = kmalloc(sizeof(*p), GFP_KERNEL);
  374. if (p == NULL)
  375. return NULL;
  376. memset(p, 0, sizeof(*p));
  377. p->refcnt = 1;
  378. spin_lock_init(&p->lock);
  379. p->stats_lock = &p->lock;
  380. if (parm->rate.rate) {
  381. p->R_tab = qdisc_get_rtab(&parm->rate, tb[TCA_POLICE_RATE-1]);
  382. if (p->R_tab == NULL)
  383. goto failure;
  384. if (parm->peakrate.rate) {
  385. p->P_tab = qdisc_get_rtab(&parm->peakrate,
  386. tb[TCA_POLICE_PEAKRATE-1]);
  387. if (p->P_tab == NULL)
  388. goto failure;
  389. }
  390. }
  391. if (tb[TCA_POLICE_RESULT-1]) {
  392. if (RTA_PAYLOAD(tb[TCA_POLICE_RESULT-1]) != sizeof(u32))
  393. goto failure;
  394. p->result = *(u32*)RTA_DATA(tb[TCA_POLICE_RESULT-1]);
  395. }
  396. #ifdef CONFIG_NET_ESTIMATOR
  397. if (tb[TCA_POLICE_AVRATE-1]) {
  398. if (RTA_PAYLOAD(tb[TCA_POLICE_AVRATE-1]) != sizeof(u32))
  399. goto failure;
  400. p->ewma_rate = *(u32*)RTA_DATA(tb[TCA_POLICE_AVRATE-1]);
  401. }
  402. #endif
  403. p->toks = p->burst = parm->burst;
  404. p->mtu = parm->mtu;
  405. if (p->mtu == 0) {
  406. p->mtu = ~0;
  407. if (p->R_tab)
  408. p->mtu = 255<<p->R_tab->rate.cell_log;
  409. }
  410. if (p->P_tab)
  411. p->ptoks = L2T_P(p, p->mtu);
  412. PSCHED_GET_TIME(p->t_c);
  413. p->index = parm->index ? : tcf_police_new_index();
  414. p->action = parm->action;
  415. #ifdef CONFIG_NET_ESTIMATOR
  416. if (est)
  417. gen_new_estimator(&p->bstats, &p->rate_est, p->stats_lock, est);
  418. #endif
  419. h = tcf_police_hash(p->index);
  420. write_lock_bh(&police_lock);
  421. p->next = tcf_police_ht[h];
  422. tcf_police_ht[h] = p;
  423. write_unlock_bh(&police_lock);
  424. return p;
  425. failure:
  426. if (p->R_tab)
  427. qdisc_put_rtab(p->R_tab);
  428. kfree(p);
  429. return NULL;
  430. }
  431. int tcf_police(struct sk_buff *skb, struct tcf_police *p)
  432. {
  433. psched_time_t now;
  434. long toks;
  435. long ptoks = 0;
  436. spin_lock(&p->lock);
  437. p->bstats.bytes += skb->len;
  438. p->bstats.packets++;
  439. #ifdef CONFIG_NET_ESTIMATOR
  440. if (p->ewma_rate && p->rate_est.bps >= p->ewma_rate) {
  441. p->qstats.overlimits++;
  442. spin_unlock(&p->lock);
  443. return p->action;
  444. }
  445. #endif
  446. if (skb->len <= p->mtu) {
  447. if (p->R_tab == NULL) {
  448. spin_unlock(&p->lock);
  449. return p->result;
  450. }
  451. PSCHED_GET_TIME(now);
  452. toks = PSCHED_TDIFF_SAFE(now, p->t_c, p->burst);
  453. if (p->P_tab) {
  454. ptoks = toks + p->ptoks;
  455. if (ptoks > (long)L2T_P(p, p->mtu))
  456. ptoks = (long)L2T_P(p, p->mtu);
  457. ptoks -= L2T_P(p, skb->len);
  458. }
  459. toks += p->toks;
  460. if (toks > (long)p->burst)
  461. toks = p->burst;
  462. toks -= L2T(p, skb->len);
  463. if ((toks|ptoks) >= 0) {
  464. p->t_c = now;
  465. p->toks = toks;
  466. p->ptoks = ptoks;
  467. spin_unlock(&p->lock);
  468. return p->result;
  469. }
  470. }
  471. p->qstats.overlimits++;
  472. spin_unlock(&p->lock);
  473. return p->action;
  474. }
  475. EXPORT_SYMBOL(tcf_police);
  476. int tcf_police_dump(struct sk_buff *skb, struct tcf_police *p)
  477. {
  478. unsigned char *b = skb->tail;
  479. struct tc_police opt;
  480. opt.index = p->index;
  481. opt.action = p->action;
  482. opt.mtu = p->mtu;
  483. opt.burst = p->burst;
  484. if (p->R_tab)
  485. opt.rate = p->R_tab->rate;
  486. else
  487. memset(&opt.rate, 0, sizeof(opt.rate));
  488. if (p->P_tab)
  489. opt.peakrate = p->P_tab->rate;
  490. else
  491. memset(&opt.peakrate, 0, sizeof(opt.peakrate));
  492. RTA_PUT(skb, TCA_POLICE_TBF, sizeof(opt), &opt);
  493. if (p->result)
  494. RTA_PUT(skb, TCA_POLICE_RESULT, sizeof(int), &p->result);
  495. #ifdef CONFIG_NET_ESTIMATOR
  496. if (p->ewma_rate)
  497. RTA_PUT(skb, TCA_POLICE_AVRATE, 4, &p->ewma_rate);
  498. #endif
  499. return skb->len;
  500. rtattr_failure:
  501. skb_trim(skb, b - skb->data);
  502. return -1;
  503. }
  504. int tcf_police_dump_stats(struct sk_buff *skb, struct tcf_police *p)
  505. {
  506. struct gnet_dump d;
  507. if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS,
  508. TCA_XSTATS, p->stats_lock, &d) < 0)
  509. goto errout;
  510. if (gnet_stats_copy_basic(&d, &p->bstats) < 0 ||
  511. #ifdef CONFIG_NET_ESTIMATOR
  512. gnet_stats_copy_rate_est(&d, &p->rate_est) < 0 ||
  513. #endif
  514. gnet_stats_copy_queue(&d, &p->qstats) < 0)
  515. goto errout;
  516. if (gnet_stats_finish_copy(&d) < 0)
  517. goto errout;
  518. return 0;
  519. errout:
  520. return -1;
  521. }
  522. #endif /* CONFIG_NET_CLS_ACT */