fib_rules.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483
  1. /*
  2. * INET An implementation of the TCP/IP protocol suite for the LINUX
  3. * operating system. INET is implemented using the BSD Socket
  4. * interface as the means of communication with the user level.
  5. *
  6. * IPv4 Forwarding Information Base: policy rules.
  7. *
  8. * Version: $Id: fib_rules.c,v 1.17 2001/10/31 21:55:54 davem Exp $
  9. *
  10. * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
  11. *
  12. * This program is free software; you can redistribute it and/or
  13. * modify it under the terms of the GNU General Public License
  14. * as published by the Free Software Foundation; either version
  15. * 2 of the License, or (at your option) any later version.
  16. *
  17. * Fixes:
  18. * Rani Assaf : local_rule cannot be deleted
  19. * Marc Boucher : routing by fwmark
  20. */
  21. #include <linux/config.h>
  22. #include <asm/uaccess.h>
  23. #include <asm/system.h>
  24. #include <linux/bitops.h>
  25. #include <linux/types.h>
  26. #include <linux/kernel.h>
  27. #include <linux/sched.h>
  28. #include <linux/mm.h>
  29. #include <linux/string.h>
  30. #include <linux/socket.h>
  31. #include <linux/sockios.h>
  32. #include <linux/errno.h>
  33. #include <linux/in.h>
  34. #include <linux/inet.h>
  35. #include <linux/inetdevice.h>
  36. #include <linux/netdevice.h>
  37. #include <linux/if_arp.h>
  38. #include <linux/proc_fs.h>
  39. #include <linux/skbuff.h>
  40. #include <linux/netlink.h>
  41. #include <linux/init.h>
  42. #include <linux/list.h>
  43. #include <linux/rcupdate.h>
  44. #include <net/ip.h>
  45. #include <net/protocol.h>
  46. #include <net/route.h>
  47. #include <net/tcp.h>
  48. #include <net/sock.h>
  49. #include <net/ip_fib.h>
  50. #define FRprintk(a...)
  51. struct fib_rule
  52. {
  53. struct hlist_node hlist;
  54. atomic_t r_clntref;
  55. u32 r_preference;
  56. unsigned char r_table;
  57. unsigned char r_action;
  58. unsigned char r_dst_len;
  59. unsigned char r_src_len;
  60. u32 r_src;
  61. u32 r_srcmask;
  62. u32 r_dst;
  63. u32 r_dstmask;
  64. u32 r_srcmap;
  65. u8 r_flags;
  66. u8 r_tos;
  67. #ifdef CONFIG_IP_ROUTE_FWMARK
  68. u32 r_fwmark;
  69. #endif
  70. int r_ifindex;
  71. #ifdef CONFIG_NET_CLS_ROUTE
  72. __u32 r_tclassid;
  73. #endif
  74. char r_ifname[IFNAMSIZ];
  75. int r_dead;
  76. struct rcu_head rcu;
  77. };
  78. static struct fib_rule default_rule = {
  79. .r_clntref = ATOMIC_INIT(2),
  80. .r_preference = 0x7FFF,
  81. .r_table = RT_TABLE_DEFAULT,
  82. .r_action = RTN_UNICAST,
  83. };
  84. static struct fib_rule main_rule = {
  85. .r_clntref = ATOMIC_INIT(2),
  86. .r_preference = 0x7FFE,
  87. .r_table = RT_TABLE_MAIN,
  88. .r_action = RTN_UNICAST,
  89. };
  90. static struct fib_rule local_rule = {
  91. .r_clntref = ATOMIC_INIT(2),
  92. .r_table = RT_TABLE_LOCAL,
  93. .r_action = RTN_UNICAST,
  94. };
  95. static struct hlist_head fib_rules;
  96. /* writer func called from netlink -- rtnl_sem hold*/
  97. static void rtmsg_rule(int, struct fib_rule *);
  98. int inet_rtm_delrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
  99. {
  100. struct rtattr **rta = arg;
  101. struct rtmsg *rtm = NLMSG_DATA(nlh);
  102. struct fib_rule *r;
  103. struct hlist_node *node;
  104. int err = -ESRCH;
  105. hlist_for_each_entry(r, node, &fib_rules, hlist) {
  106. if ((!rta[RTA_SRC-1] || memcmp(RTA_DATA(rta[RTA_SRC-1]), &r->r_src, 4) == 0) &&
  107. rtm->rtm_src_len == r->r_src_len &&
  108. rtm->rtm_dst_len == r->r_dst_len &&
  109. (!rta[RTA_DST-1] || memcmp(RTA_DATA(rta[RTA_DST-1]), &r->r_dst, 4) == 0) &&
  110. rtm->rtm_tos == r->r_tos &&
  111. #ifdef CONFIG_IP_ROUTE_FWMARK
  112. (!rta[RTA_PROTOINFO-1] || memcmp(RTA_DATA(rta[RTA_PROTOINFO-1]), &r->r_fwmark, 4) == 0) &&
  113. #endif
  114. (!rtm->rtm_type || rtm->rtm_type == r->r_action) &&
  115. (!rta[RTA_PRIORITY-1] || memcmp(RTA_DATA(rta[RTA_PRIORITY-1]), &r->r_preference, 4) == 0) &&
  116. (!rta[RTA_IIF-1] || rtattr_strcmp(rta[RTA_IIF-1], r->r_ifname) == 0) &&
  117. (!rtm->rtm_table || (r && rtm->rtm_table == r->r_table))) {
  118. err = -EPERM;
  119. if (r == &local_rule)
  120. break;
  121. hlist_del_rcu(&r->hlist);
  122. r->r_dead = 1;
  123. rtmsg_rule(RTM_DELRULE, r);
  124. fib_rule_put(r);
  125. err = 0;
  126. break;
  127. }
  128. }
  129. return err;
  130. }
  131. /* Allocate new unique table id */
  132. static struct fib_table *fib_empty_table(void)
  133. {
  134. int id;
  135. for (id = 1; id <= RT_TABLE_MAX; id++)
  136. if (fib_tables[id] == NULL)
  137. return __fib_new_table(id);
  138. return NULL;
  139. }
  140. static inline void fib_rule_put_rcu(struct rcu_head *head)
  141. {
  142. struct fib_rule *r = container_of(head, struct fib_rule, rcu);
  143. kfree(r);
  144. }
  145. void fib_rule_put(struct fib_rule *r)
  146. {
  147. if (atomic_dec_and_test(&r->r_clntref)) {
  148. if (r->r_dead)
  149. call_rcu(&r->rcu, fib_rule_put_rcu);
  150. else
  151. printk("Freeing alive rule %p\n", r);
  152. }
  153. }
  154. /* writer func called from netlink -- rtnl_sem hold*/
  155. int inet_rtm_newrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
  156. {
  157. struct rtattr **rta = arg;
  158. struct rtmsg *rtm = NLMSG_DATA(nlh);
  159. struct fib_rule *r, *new_r, *last = NULL;
  160. struct hlist_node *node = NULL;
  161. unsigned char table_id;
  162. if (rtm->rtm_src_len > 32 || rtm->rtm_dst_len > 32 ||
  163. (rtm->rtm_tos & ~IPTOS_TOS_MASK))
  164. return -EINVAL;
  165. if (rta[RTA_IIF-1] && RTA_PAYLOAD(rta[RTA_IIF-1]) > IFNAMSIZ)
  166. return -EINVAL;
  167. table_id = rtm->rtm_table;
  168. if (table_id == RT_TABLE_UNSPEC) {
  169. struct fib_table *table;
  170. if (rtm->rtm_type == RTN_UNICAST) {
  171. if ((table = fib_empty_table()) == NULL)
  172. return -ENOBUFS;
  173. table_id = table->tb_id;
  174. }
  175. }
  176. new_r = kmalloc(sizeof(*new_r), GFP_KERNEL);
  177. if (!new_r)
  178. return -ENOMEM;
  179. memset(new_r, 0, sizeof(*new_r));
  180. if (rta[RTA_SRC-1])
  181. memcpy(&new_r->r_src, RTA_DATA(rta[RTA_SRC-1]), 4);
  182. if (rta[RTA_DST-1])
  183. memcpy(&new_r->r_dst, RTA_DATA(rta[RTA_DST-1]), 4);
  184. if (rta[RTA_GATEWAY-1])
  185. memcpy(&new_r->r_srcmap, RTA_DATA(rta[RTA_GATEWAY-1]), 4);
  186. new_r->r_src_len = rtm->rtm_src_len;
  187. new_r->r_dst_len = rtm->rtm_dst_len;
  188. new_r->r_srcmask = inet_make_mask(rtm->rtm_src_len);
  189. new_r->r_dstmask = inet_make_mask(rtm->rtm_dst_len);
  190. new_r->r_tos = rtm->rtm_tos;
  191. #ifdef CONFIG_IP_ROUTE_FWMARK
  192. if (rta[RTA_PROTOINFO-1])
  193. memcpy(&new_r->r_fwmark, RTA_DATA(rta[RTA_PROTOINFO-1]), 4);
  194. #endif
  195. new_r->r_action = rtm->rtm_type;
  196. new_r->r_flags = rtm->rtm_flags;
  197. if (rta[RTA_PRIORITY-1])
  198. memcpy(&new_r->r_preference, RTA_DATA(rta[RTA_PRIORITY-1]), 4);
  199. new_r->r_table = table_id;
  200. if (rta[RTA_IIF-1]) {
  201. struct net_device *dev;
  202. rtattr_strlcpy(new_r->r_ifname, rta[RTA_IIF-1], IFNAMSIZ);
  203. new_r->r_ifindex = -1;
  204. dev = __dev_get_by_name(new_r->r_ifname);
  205. if (dev)
  206. new_r->r_ifindex = dev->ifindex;
  207. }
  208. #ifdef CONFIG_NET_CLS_ROUTE
  209. if (rta[RTA_FLOW-1])
  210. memcpy(&new_r->r_tclassid, RTA_DATA(rta[RTA_FLOW-1]), 4);
  211. #endif
  212. r = container_of(fib_rules.first, struct fib_rule, hlist);
  213. if (!new_r->r_preference) {
  214. if (r && r->hlist.next != NULL) {
  215. r = container_of(r->hlist.next, struct fib_rule, hlist);
  216. if (r->r_preference)
  217. new_r->r_preference = r->r_preference - 1;
  218. }
  219. }
  220. hlist_for_each_entry(r, node, &fib_rules, hlist) {
  221. if (r->r_preference > new_r->r_preference)
  222. break;
  223. last = r;
  224. }
  225. atomic_inc(&new_r->r_clntref);
  226. if (last)
  227. hlist_add_after_rcu(&last->hlist, &new_r->hlist);
  228. else
  229. hlist_add_before_rcu(&new_r->hlist, &r->hlist);
  230. rtmsg_rule(RTM_NEWRULE, new_r);
  231. return 0;
  232. }
  233. #ifdef CONFIG_NET_CLS_ROUTE
  234. u32 fib_rules_tclass(struct fib_result *res)
  235. {
  236. if (res->r)
  237. return res->r->r_tclassid;
  238. return 0;
  239. }
  240. #endif
  241. /* callers should hold rtnl semaphore */
  242. static void fib_rules_detach(struct net_device *dev)
  243. {
  244. struct hlist_node *node;
  245. struct fib_rule *r;
  246. hlist_for_each_entry(r, node, &fib_rules, hlist) {
  247. if (r->r_ifindex == dev->ifindex)
  248. r->r_ifindex = -1;
  249. }
  250. }
  251. /* callers should hold rtnl semaphore */
  252. static void fib_rules_attach(struct net_device *dev)
  253. {
  254. struct hlist_node *node;
  255. struct fib_rule *r;
  256. hlist_for_each_entry(r, node, &fib_rules, hlist) {
  257. if (r->r_ifindex == -1 && strcmp(dev->name, r->r_ifname) == 0)
  258. r->r_ifindex = dev->ifindex;
  259. }
  260. }
  261. int fib_lookup(const struct flowi *flp, struct fib_result *res)
  262. {
  263. int err;
  264. struct fib_rule *r, *policy;
  265. struct fib_table *tb;
  266. struct hlist_node *node;
  267. u32 daddr = flp->fl4_dst;
  268. u32 saddr = flp->fl4_src;
  269. FRprintk("Lookup: %u.%u.%u.%u <- %u.%u.%u.%u ",
  270. NIPQUAD(flp->fl4_dst), NIPQUAD(flp->fl4_src));
  271. rcu_read_lock();
  272. hlist_for_each_entry_rcu(r, node, &fib_rules, hlist) {
  273. if (((saddr^r->r_src) & r->r_srcmask) ||
  274. ((daddr^r->r_dst) & r->r_dstmask) ||
  275. (r->r_tos && r->r_tos != flp->fl4_tos) ||
  276. #ifdef CONFIG_IP_ROUTE_FWMARK
  277. (r->r_fwmark && r->r_fwmark != flp->fl4_fwmark) ||
  278. #endif
  279. (r->r_ifindex && r->r_ifindex != flp->iif))
  280. continue;
  281. FRprintk("tb %d r %d ", r->r_table, r->r_action);
  282. switch (r->r_action) {
  283. case RTN_UNICAST:
  284. policy = r;
  285. break;
  286. case RTN_UNREACHABLE:
  287. rcu_read_unlock();
  288. return -ENETUNREACH;
  289. default:
  290. case RTN_BLACKHOLE:
  291. rcu_read_unlock();
  292. return -EINVAL;
  293. case RTN_PROHIBIT:
  294. rcu_read_unlock();
  295. return -EACCES;
  296. }
  297. if ((tb = fib_get_table(r->r_table)) == NULL)
  298. continue;
  299. err = tb->tb_lookup(tb, flp, res);
  300. if (err == 0) {
  301. res->r = policy;
  302. if (policy)
  303. atomic_inc(&policy->r_clntref);
  304. rcu_read_unlock();
  305. return 0;
  306. }
  307. if (err < 0 && err != -EAGAIN) {
  308. rcu_read_unlock();
  309. return err;
  310. }
  311. }
  312. FRprintk("FAILURE\n");
  313. rcu_read_unlock();
  314. return -ENETUNREACH;
  315. }
  316. void fib_select_default(const struct flowi *flp, struct fib_result *res)
  317. {
  318. if (res->r && res->r->r_action == RTN_UNICAST &&
  319. FIB_RES_GW(*res) && FIB_RES_NH(*res).nh_scope == RT_SCOPE_LINK) {
  320. struct fib_table *tb;
  321. if ((tb = fib_get_table(res->r->r_table)) != NULL)
  322. tb->tb_select_default(tb, flp, res);
  323. }
  324. }
  325. static int fib_rules_event(struct notifier_block *this, unsigned long event, void *ptr)
  326. {
  327. struct net_device *dev = ptr;
  328. if (event == NETDEV_UNREGISTER)
  329. fib_rules_detach(dev);
  330. else if (event == NETDEV_REGISTER)
  331. fib_rules_attach(dev);
  332. return NOTIFY_DONE;
  333. }
  334. static struct notifier_block fib_rules_notifier = {
  335. .notifier_call =fib_rules_event,
  336. };
  337. static __inline__ int inet_fill_rule(struct sk_buff *skb,
  338. struct fib_rule *r,
  339. u32 pid, u32 seq, int event,
  340. unsigned int flags)
  341. {
  342. struct rtmsg *rtm;
  343. struct nlmsghdr *nlh;
  344. unsigned char *b = skb->tail;
  345. nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*rtm), flags);
  346. rtm = NLMSG_DATA(nlh);
  347. rtm->rtm_family = AF_INET;
  348. rtm->rtm_dst_len = r->r_dst_len;
  349. rtm->rtm_src_len = r->r_src_len;
  350. rtm->rtm_tos = r->r_tos;
  351. #ifdef CONFIG_IP_ROUTE_FWMARK
  352. if (r->r_fwmark)
  353. RTA_PUT(skb, RTA_PROTOINFO, 4, &r->r_fwmark);
  354. #endif
  355. rtm->rtm_table = r->r_table;
  356. rtm->rtm_protocol = 0;
  357. rtm->rtm_scope = 0;
  358. rtm->rtm_type = r->r_action;
  359. rtm->rtm_flags = r->r_flags;
  360. if (r->r_dst_len)
  361. RTA_PUT(skb, RTA_DST, 4, &r->r_dst);
  362. if (r->r_src_len)
  363. RTA_PUT(skb, RTA_SRC, 4, &r->r_src);
  364. if (r->r_ifname[0])
  365. RTA_PUT(skb, RTA_IIF, IFNAMSIZ, &r->r_ifname);
  366. if (r->r_preference)
  367. RTA_PUT(skb, RTA_PRIORITY, 4, &r->r_preference);
  368. if (r->r_srcmap)
  369. RTA_PUT(skb, RTA_GATEWAY, 4, &r->r_srcmap);
  370. #ifdef CONFIG_NET_CLS_ROUTE
  371. if (r->r_tclassid)
  372. RTA_PUT(skb, RTA_FLOW, 4, &r->r_tclassid);
  373. #endif
  374. nlh->nlmsg_len = skb->tail - b;
  375. return skb->len;
  376. nlmsg_failure:
  377. rtattr_failure:
  378. skb_trim(skb, b - skb->data);
  379. return -1;
  380. }
  381. /* callers should hold rtnl semaphore */
  382. static void rtmsg_rule(int event, struct fib_rule *r)
  383. {
  384. int size = NLMSG_SPACE(sizeof(struct rtmsg) + 128);
  385. struct sk_buff *skb = alloc_skb(size, GFP_KERNEL);
  386. if (!skb)
  387. netlink_set_err(rtnl, 0, RTNLGRP_IPV4_RULE, ENOBUFS);
  388. else if (inet_fill_rule(skb, r, 0, 0, event, 0) < 0) {
  389. kfree_skb(skb);
  390. netlink_set_err(rtnl, 0, RTNLGRP_IPV4_RULE, EINVAL);
  391. } else {
  392. netlink_broadcast(rtnl, skb, 0, RTNLGRP_IPV4_RULE, GFP_KERNEL);
  393. }
  394. }
  395. int inet_dump_rules(struct sk_buff *skb, struct netlink_callback *cb)
  396. {
  397. int idx = 0;
  398. int s_idx = cb->args[0];
  399. struct fib_rule *r;
  400. struct hlist_node *node;
  401. rcu_read_lock();
  402. hlist_for_each_entry(r, node, &fib_rules, hlist) {
  403. if (idx < s_idx)
  404. continue;
  405. if (inet_fill_rule(skb, r, NETLINK_CB(cb->skb).pid,
  406. cb->nlh->nlmsg_seq,
  407. RTM_NEWRULE, NLM_F_MULTI) < 0)
  408. break;
  409. idx++;
  410. }
  411. rcu_read_unlock();
  412. cb->args[0] = idx;
  413. return skb->len;
  414. }
  415. void __init fib_rules_init(void)
  416. {
  417. INIT_HLIST_HEAD(&fib_rules);
  418. hlist_add_head(&local_rule.hlist, &fib_rules);
  419. hlist_add_after(&local_rule.hlist, &main_rule.hlist);
  420. hlist_add_after(&main_rule.hlist, &default_rule.hlist);
  421. register_netdevice_notifier(&fib_rules_notifier);
  422. }