police.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613
  1. /*
  2. * net/sched/police.c Input police filter.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License
  6. * as published by the Free Software Foundation; either version
  7. * 2 of the License, or (at your option) any later version.
  8. *
  9. * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
  10. * J Hadi Salim (action changes)
  11. */
  12. #include <asm/uaccess.h>
  13. #include <asm/system.h>
  14. #include <linux/bitops.h>
  15. #include <linux/config.h>
  16. #include <linux/module.h>
  17. #include <linux/types.h>
  18. #include <linux/kernel.h>
  19. #include <linux/sched.h>
  20. #include <linux/string.h>
  21. #include <linux/mm.h>
  22. #include <linux/socket.h>
  23. #include <linux/sockios.h>
  24. #include <linux/in.h>
  25. #include <linux/errno.h>
  26. #include <linux/interrupt.h>
  27. #include <linux/netdevice.h>
  28. #include <linux/skbuff.h>
  29. #include <linux/module.h>
  30. #include <linux/rtnetlink.h>
  31. #include <linux/init.h>
  32. #include <net/sock.h>
  33. #include <net/act_api.h>
  34. #define L2T(p,L) ((p)->R_tab->data[(L)>>(p)->R_tab->rate.cell_log])
  35. #define L2T_P(p,L) ((p)->P_tab->data[(L)>>(p)->P_tab->rate.cell_log])
  36. #define PRIV(a) ((struct tcf_police *) (a)->priv)
  37. /* use generic hash table */
  38. #define MY_TAB_SIZE 16
  39. #define MY_TAB_MASK 15
  40. static u32 idx_gen;
  41. static struct tcf_police *tcf_police_ht[MY_TAB_SIZE];
  42. /* Policer hash table lock */
  43. static DEFINE_RWLOCK(police_lock);
  44. /* Each policer is serialized by its individual spinlock */
  45. static __inline__ unsigned tcf_police_hash(u32 index)
  46. {
  47. return index&0xF;
  48. }
  49. static __inline__ struct tcf_police * tcf_police_lookup(u32 index)
  50. {
  51. struct tcf_police *p;
  52. read_lock(&police_lock);
  53. for (p = tcf_police_ht[tcf_police_hash(index)]; p; p = p->next) {
  54. if (p->index == index)
  55. break;
  56. }
  57. read_unlock(&police_lock);
  58. return p;
  59. }
  60. #ifdef CONFIG_NET_CLS_ACT
  61. static int tcf_generic_walker(struct sk_buff *skb, struct netlink_callback *cb,
  62. int type, struct tc_action *a)
  63. {
  64. struct tcf_police *p;
  65. int err = 0, index = -1, i = 0, s_i = 0, n_i = 0;
  66. struct rtattr *r;
  67. read_lock(&police_lock);
  68. s_i = cb->args[0];
  69. for (i = 0; i < MY_TAB_SIZE; i++) {
  70. p = tcf_police_ht[tcf_police_hash(i)];
  71. for (; p; p = p->next) {
  72. index++;
  73. if (index < s_i)
  74. continue;
  75. a->priv = p;
  76. a->order = index;
  77. r = (struct rtattr*) skb->tail;
  78. RTA_PUT(skb, a->order, 0, NULL);
  79. if (type == RTM_DELACTION)
  80. err = tcf_action_dump_1(skb, a, 0, 1);
  81. else
  82. err = tcf_action_dump_1(skb, a, 0, 0);
  83. if (err < 0) {
  84. index--;
  85. skb_trim(skb, (u8*)r - skb->data);
  86. goto done;
  87. }
  88. r->rta_len = skb->tail - (u8*)r;
  89. n_i++;
  90. }
  91. }
  92. done:
  93. read_unlock(&police_lock);
  94. if (n_i)
  95. cb->args[0] += n_i;
  96. return n_i;
  97. rtattr_failure:
  98. skb_trim(skb, (u8*)r - skb->data);
  99. goto done;
  100. }
  101. static inline int
  102. tcf_hash_search(struct tc_action *a, u32 index)
  103. {
  104. struct tcf_police *p = tcf_police_lookup(index);
  105. if (p != NULL) {
  106. a->priv = p;
  107. return 1;
  108. } else {
  109. return 0;
  110. }
  111. }
  112. #endif
  113. static inline u32 tcf_police_new_index(void)
  114. {
  115. do {
  116. if (++idx_gen == 0)
  117. idx_gen = 1;
  118. } while (tcf_police_lookup(idx_gen));
  119. return idx_gen;
  120. }
  121. void tcf_police_destroy(struct tcf_police *p)
  122. {
  123. unsigned h = tcf_police_hash(p->index);
  124. struct tcf_police **p1p;
  125. for (p1p = &tcf_police_ht[h]; *p1p; p1p = &(*p1p)->next) {
  126. if (*p1p == p) {
  127. write_lock_bh(&police_lock);
  128. *p1p = p->next;
  129. write_unlock_bh(&police_lock);
  130. #ifdef CONFIG_NET_ESTIMATOR
  131. gen_kill_estimator(&p->bstats, &p->rate_est);
  132. #endif
  133. if (p->R_tab)
  134. qdisc_put_rtab(p->R_tab);
  135. if (p->P_tab)
  136. qdisc_put_rtab(p->P_tab);
  137. kfree(p);
  138. return;
  139. }
  140. }
  141. BUG_TRAP(0);
  142. }
  143. #ifdef CONFIG_NET_CLS_ACT
  144. static int tcf_act_police_locate(struct rtattr *rta, struct rtattr *est,
  145. struct tc_action *a, int ovr, int bind)
  146. {
  147. unsigned h;
  148. int ret = 0, err;
  149. struct rtattr *tb[TCA_POLICE_MAX];
  150. struct tc_police *parm;
  151. struct tcf_police *p;
  152. struct qdisc_rate_table *R_tab = NULL, *P_tab = NULL;
  153. if (rta == NULL || rtattr_parse_nested(tb, TCA_POLICE_MAX, rta) < 0)
  154. return -EINVAL;
  155. if (tb[TCA_POLICE_TBF-1] == NULL ||
  156. RTA_PAYLOAD(tb[TCA_POLICE_TBF-1]) != sizeof(*parm))
  157. return -EINVAL;
  158. parm = RTA_DATA(tb[TCA_POLICE_TBF-1]);
  159. if (tb[TCA_POLICE_RESULT-1] != NULL &&
  160. RTA_PAYLOAD(tb[TCA_POLICE_RESULT-1]) != sizeof(u32))
  161. return -EINVAL;
  162. if (tb[TCA_POLICE_RESULT-1] != NULL &&
  163. RTA_PAYLOAD(tb[TCA_POLICE_RESULT-1]) != sizeof(u32))
  164. return -EINVAL;
  165. if (parm->index && (p = tcf_police_lookup(parm->index)) != NULL) {
  166. a->priv = p;
  167. if (bind) {
  168. p->bindcnt += 1;
  169. p->refcnt += 1;
  170. }
  171. if (ovr)
  172. goto override;
  173. return ret;
  174. }
  175. p = kmalloc(sizeof(*p), GFP_KERNEL);
  176. if (p == NULL)
  177. return -ENOMEM;
  178. memset(p, 0, sizeof(*p));
  179. ret = ACT_P_CREATED;
  180. p->refcnt = 1;
  181. spin_lock_init(&p->lock);
  182. p->stats_lock = &p->lock;
  183. if (bind)
  184. p->bindcnt = 1;
  185. override:
  186. if (parm->rate.rate) {
  187. err = -ENOMEM;
  188. R_tab = qdisc_get_rtab(&parm->rate, tb[TCA_POLICE_RATE-1]);
  189. if (R_tab == NULL)
  190. goto failure;
  191. if (parm->peakrate.rate) {
  192. P_tab = qdisc_get_rtab(&parm->peakrate,
  193. tb[TCA_POLICE_PEAKRATE-1]);
  194. if (p->P_tab == NULL) {
  195. qdisc_put_rtab(R_tab);
  196. goto failure;
  197. }
  198. }
  199. }
  200. /* No failure allowed after this point */
  201. spin_lock_bh(&p->lock);
  202. if (R_tab != NULL) {
  203. qdisc_put_rtab(p->R_tab);
  204. p->R_tab = R_tab;
  205. }
  206. if (P_tab != NULL) {
  207. qdisc_put_rtab(p->P_tab);
  208. p->P_tab = P_tab;
  209. }
  210. if (tb[TCA_POLICE_RESULT-1])
  211. p->result = *(u32*)RTA_DATA(tb[TCA_POLICE_RESULT-1]);
  212. p->toks = p->burst = parm->burst;
  213. p->mtu = parm->mtu;
  214. if (p->mtu == 0) {
  215. p->mtu = ~0;
  216. if (p->R_tab)
  217. p->mtu = 255<<p->R_tab->rate.cell_log;
  218. }
  219. if (p->P_tab)
  220. p->ptoks = L2T_P(p, p->mtu);
  221. p->action = parm->action;
  222. #ifdef CONFIG_NET_ESTIMATOR
  223. if (tb[TCA_POLICE_AVRATE-1])
  224. p->ewma_rate = *(u32*)RTA_DATA(tb[TCA_POLICE_AVRATE-1]);
  225. if (est)
  226. gen_replace_estimator(&p->bstats, &p->rate_est, p->stats_lock, est);
  227. #endif
  228. spin_unlock_bh(&p->lock);
  229. if (ret != ACT_P_CREATED)
  230. return ret;
  231. PSCHED_GET_TIME(p->t_c);
  232. p->index = parm->index ? : tcf_police_new_index();
  233. h = tcf_police_hash(p->index);
  234. write_lock_bh(&police_lock);
  235. p->next = tcf_police_ht[h];
  236. tcf_police_ht[h] = p;
  237. write_unlock_bh(&police_lock);
  238. a->priv = p;
  239. return ret;
  240. failure:
  241. if (ret == ACT_P_CREATED)
  242. kfree(p);
  243. return err;
  244. }
  245. static int tcf_act_police_cleanup(struct tc_action *a, int bind)
  246. {
  247. struct tcf_police *p = PRIV(a);
  248. if (p != NULL)
  249. return tcf_police_release(p, bind);
  250. return 0;
  251. }
  252. static int tcf_act_police(struct sk_buff **pskb, struct tc_action *a,
  253. struct tcf_result *res)
  254. {
  255. psched_time_t now;
  256. struct sk_buff *skb = *pskb;
  257. struct tcf_police *p = PRIV(a);
  258. long toks;
  259. long ptoks = 0;
  260. spin_lock(&p->lock);
  261. p->bstats.bytes += skb->len;
  262. p->bstats.packets++;
  263. #ifdef CONFIG_NET_ESTIMATOR
  264. if (p->ewma_rate && p->rate_est.bps >= p->ewma_rate) {
  265. p->qstats.overlimits++;
  266. spin_unlock(&p->lock);
  267. return p->action;
  268. }
  269. #endif
  270. if (skb->len <= p->mtu) {
  271. if (p->R_tab == NULL) {
  272. spin_unlock(&p->lock);
  273. return p->result;
  274. }
  275. PSCHED_GET_TIME(now);
  276. toks = PSCHED_TDIFF_SAFE(now, p->t_c, p->burst);
  277. if (p->P_tab) {
  278. ptoks = toks + p->ptoks;
  279. if (ptoks > (long)L2T_P(p, p->mtu))
  280. ptoks = (long)L2T_P(p, p->mtu);
  281. ptoks -= L2T_P(p, skb->len);
  282. }
  283. toks += p->toks;
  284. if (toks > (long)p->burst)
  285. toks = p->burst;
  286. toks -= L2T(p, skb->len);
  287. if ((toks|ptoks) >= 0) {
  288. p->t_c = now;
  289. p->toks = toks;
  290. p->ptoks = ptoks;
  291. spin_unlock(&p->lock);
  292. return p->result;
  293. }
  294. }
  295. p->qstats.overlimits++;
  296. spin_unlock(&p->lock);
  297. return p->action;
  298. }
  299. static int
  300. tcf_act_police_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
  301. {
  302. unsigned char *b = skb->tail;
  303. struct tc_police opt;
  304. struct tcf_police *p = PRIV(a);
  305. opt.index = p->index;
  306. opt.action = p->action;
  307. opt.mtu = p->mtu;
  308. opt.burst = p->burst;
  309. opt.refcnt = p->refcnt - ref;
  310. opt.bindcnt = p->bindcnt - bind;
  311. if (p->R_tab)
  312. opt.rate = p->R_tab->rate;
  313. else
  314. memset(&opt.rate, 0, sizeof(opt.rate));
  315. if (p->P_tab)
  316. opt.peakrate = p->P_tab->rate;
  317. else
  318. memset(&opt.peakrate, 0, sizeof(opt.peakrate));
  319. RTA_PUT(skb, TCA_POLICE_TBF, sizeof(opt), &opt);
  320. if (p->result)
  321. RTA_PUT(skb, TCA_POLICE_RESULT, sizeof(int), &p->result);
  322. #ifdef CONFIG_NET_ESTIMATOR
  323. if (p->ewma_rate)
  324. RTA_PUT(skb, TCA_POLICE_AVRATE, 4, &p->ewma_rate);
  325. #endif
  326. return skb->len;
  327. rtattr_failure:
  328. skb_trim(skb, b - skb->data);
  329. return -1;
  330. }
  331. MODULE_AUTHOR("Alexey Kuznetsov");
  332. MODULE_DESCRIPTION("Policing actions");
  333. MODULE_LICENSE("GPL");
  334. static struct tc_action_ops act_police_ops = {
  335. .kind = "police",
  336. .type = TCA_ID_POLICE,
  337. .capab = TCA_CAP_NONE,
  338. .owner = THIS_MODULE,
  339. .act = tcf_act_police,
  340. .dump = tcf_act_police_dump,
  341. .cleanup = tcf_act_police_cleanup,
  342. .lookup = tcf_hash_search,
  343. .init = tcf_act_police_locate,
  344. .walk = tcf_generic_walker
  345. };
  346. static int __init
  347. police_init_module(void)
  348. {
  349. return tcf_register_action(&act_police_ops);
  350. }
  351. static void __exit
  352. police_cleanup_module(void)
  353. {
  354. tcf_unregister_action(&act_police_ops);
  355. }
  356. module_init(police_init_module);
  357. module_exit(police_cleanup_module);
  358. #endif
  359. struct tcf_police * tcf_police_locate(struct rtattr *rta, struct rtattr *est)
  360. {
  361. unsigned h;
  362. struct tcf_police *p;
  363. struct rtattr *tb[TCA_POLICE_MAX];
  364. struct tc_police *parm;
  365. if (rtattr_parse_nested(tb, TCA_POLICE_MAX, rta) < 0)
  366. return NULL;
  367. if (tb[TCA_POLICE_TBF-1] == NULL ||
  368. RTA_PAYLOAD(tb[TCA_POLICE_TBF-1]) != sizeof(*parm))
  369. return NULL;
  370. parm = RTA_DATA(tb[TCA_POLICE_TBF-1]);
  371. if (parm->index && (p = tcf_police_lookup(parm->index)) != NULL) {
  372. p->refcnt++;
  373. return p;
  374. }
  375. p = kmalloc(sizeof(*p), GFP_KERNEL);
  376. if (p == NULL)
  377. return NULL;
  378. memset(p, 0, sizeof(*p));
  379. p->refcnt = 1;
  380. spin_lock_init(&p->lock);
  381. p->stats_lock = &p->lock;
  382. if (parm->rate.rate) {
  383. p->R_tab = qdisc_get_rtab(&parm->rate, tb[TCA_POLICE_RATE-1]);
  384. if (p->R_tab == NULL)
  385. goto failure;
  386. if (parm->peakrate.rate) {
  387. p->P_tab = qdisc_get_rtab(&parm->peakrate,
  388. tb[TCA_POLICE_PEAKRATE-1]);
  389. if (p->P_tab == NULL)
  390. goto failure;
  391. }
  392. }
  393. if (tb[TCA_POLICE_RESULT-1]) {
  394. if (RTA_PAYLOAD(tb[TCA_POLICE_RESULT-1]) != sizeof(u32))
  395. goto failure;
  396. p->result = *(u32*)RTA_DATA(tb[TCA_POLICE_RESULT-1]);
  397. }
  398. #ifdef CONFIG_NET_ESTIMATOR
  399. if (tb[TCA_POLICE_AVRATE-1]) {
  400. if (RTA_PAYLOAD(tb[TCA_POLICE_AVRATE-1]) != sizeof(u32))
  401. goto failure;
  402. p->ewma_rate = *(u32*)RTA_DATA(tb[TCA_POLICE_AVRATE-1]);
  403. }
  404. #endif
  405. p->toks = p->burst = parm->burst;
  406. p->mtu = parm->mtu;
  407. if (p->mtu == 0) {
  408. p->mtu = ~0;
  409. if (p->R_tab)
  410. p->mtu = 255<<p->R_tab->rate.cell_log;
  411. }
  412. if (p->P_tab)
  413. p->ptoks = L2T_P(p, p->mtu);
  414. PSCHED_GET_TIME(p->t_c);
  415. p->index = parm->index ? : tcf_police_new_index();
  416. p->action = parm->action;
  417. #ifdef CONFIG_NET_ESTIMATOR
  418. if (est)
  419. gen_new_estimator(&p->bstats, &p->rate_est, p->stats_lock, est);
  420. #endif
  421. h = tcf_police_hash(p->index);
  422. write_lock_bh(&police_lock);
  423. p->next = tcf_police_ht[h];
  424. tcf_police_ht[h] = p;
  425. write_unlock_bh(&police_lock);
  426. return p;
  427. failure:
  428. if (p->R_tab)
  429. qdisc_put_rtab(p->R_tab);
  430. kfree(p);
  431. return NULL;
  432. }
  433. int tcf_police(struct sk_buff *skb, struct tcf_police *p)
  434. {
  435. psched_time_t now;
  436. long toks;
  437. long ptoks = 0;
  438. spin_lock(&p->lock);
  439. p->bstats.bytes += skb->len;
  440. p->bstats.packets++;
  441. #ifdef CONFIG_NET_ESTIMATOR
  442. if (p->ewma_rate && p->rate_est.bps >= p->ewma_rate) {
  443. p->qstats.overlimits++;
  444. spin_unlock(&p->lock);
  445. return p->action;
  446. }
  447. #endif
  448. if (skb->len <= p->mtu) {
  449. if (p->R_tab == NULL) {
  450. spin_unlock(&p->lock);
  451. return p->result;
  452. }
  453. PSCHED_GET_TIME(now);
  454. toks = PSCHED_TDIFF_SAFE(now, p->t_c, p->burst);
  455. if (p->P_tab) {
  456. ptoks = toks + p->ptoks;
  457. if (ptoks > (long)L2T_P(p, p->mtu))
  458. ptoks = (long)L2T_P(p, p->mtu);
  459. ptoks -= L2T_P(p, skb->len);
  460. }
  461. toks += p->toks;
  462. if (toks > (long)p->burst)
  463. toks = p->burst;
  464. toks -= L2T(p, skb->len);
  465. if ((toks|ptoks) >= 0) {
  466. p->t_c = now;
  467. p->toks = toks;
  468. p->ptoks = ptoks;
  469. spin_unlock(&p->lock);
  470. return p->result;
  471. }
  472. }
  473. p->qstats.overlimits++;
  474. spin_unlock(&p->lock);
  475. return p->action;
  476. }
  477. int tcf_police_dump(struct sk_buff *skb, struct tcf_police *p)
  478. {
  479. unsigned char *b = skb->tail;
  480. struct tc_police opt;
  481. opt.index = p->index;
  482. opt.action = p->action;
  483. opt.mtu = p->mtu;
  484. opt.burst = p->burst;
  485. if (p->R_tab)
  486. opt.rate = p->R_tab->rate;
  487. else
  488. memset(&opt.rate, 0, sizeof(opt.rate));
  489. if (p->P_tab)
  490. opt.peakrate = p->P_tab->rate;
  491. else
  492. memset(&opt.peakrate, 0, sizeof(opt.peakrate));
  493. RTA_PUT(skb, TCA_POLICE_TBF, sizeof(opt), &opt);
  494. if (p->result)
  495. RTA_PUT(skb, TCA_POLICE_RESULT, sizeof(int), &p->result);
  496. #ifdef CONFIG_NET_ESTIMATOR
  497. if (p->ewma_rate)
  498. RTA_PUT(skb, TCA_POLICE_AVRATE, 4, &p->ewma_rate);
  499. #endif
  500. return skb->len;
  501. rtattr_failure:
  502. skb_trim(skb, b - skb->data);
  503. return -1;
  504. }
  505. int tcf_police_dump_stats(struct sk_buff *skb, struct tcf_police *p)
  506. {
  507. struct gnet_dump d;
  508. if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS,
  509. TCA_XSTATS, p->stats_lock, &d) < 0)
  510. goto errout;
  511. if (gnet_stats_copy_basic(&d, &p->bstats) < 0 ||
  512. #ifdef CONFIG_NET_ESTIMATOR
  513. gnet_stats_copy_rate_est(&d, &p->rate_est) < 0 ||
  514. #endif
  515. gnet_stats_copy_queue(&d, &p->qstats) < 0)
  516. goto errout;
  517. if (gnet_stats_finish_copy(&d) < 0)
  518. goto errout;
  519. return 0;
  520. errout:
  521. return -1;
  522. }
  523. EXPORT_SYMBOL(tcf_police);
  524. EXPORT_SYMBOL(tcf_police_destroy);
  525. EXPORT_SYMBOL(tcf_police_dump);
  526. EXPORT_SYMBOL(tcf_police_dump_stats);
  527. EXPORT_SYMBOL(tcf_police_hash);
  528. EXPORT_SYMBOL(tcf_police_ht);
  529. EXPORT_SYMBOL(tcf_police_locate);
  530. EXPORT_SYMBOL(tcf_police_lookup);
  531. EXPORT_SYMBOL(tcf_police_new_index);