fib_rules.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481
  1. /*
  2. * INET An implementation of the TCP/IP protocol suite for the LINUX
  3. * operating system. INET is implemented using the BSD Socket
  4. * interface as the means of communication with the user level.
  5. *
  6. * IPv4 Forwarding Information Base: policy rules.
  7. *
  8. * Version: $Id: fib_rules.c,v 1.17 2001/10/31 21:55:54 davem Exp $
  9. *
  10. * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
  11. *
  12. * This program is free software; you can redistribute it and/or
  13. * modify it under the terms of the GNU General Public License
  14. * as published by the Free Software Foundation; either version
  15. * 2 of the License, or (at your option) any later version.
  16. *
  17. * Fixes:
  18. * Rani Assaf : local_rule cannot be deleted
  19. * Marc Boucher : routing by fwmark
  20. */
  21. #include <asm/uaccess.h>
  22. #include <asm/system.h>
  23. #include <linux/bitops.h>
  24. #include <linux/types.h>
  25. #include <linux/kernel.h>
  26. #include <linux/sched.h>
  27. #include <linux/mm.h>
  28. #include <linux/string.h>
  29. #include <linux/socket.h>
  30. #include <linux/sockios.h>
  31. #include <linux/errno.h>
  32. #include <linux/in.h>
  33. #include <linux/inet.h>
  34. #include <linux/inetdevice.h>
  35. #include <linux/netdevice.h>
  36. #include <linux/if_arp.h>
  37. #include <linux/proc_fs.h>
  38. #include <linux/skbuff.h>
  39. #include <linux/netlink.h>
  40. #include <linux/init.h>
  41. #include <linux/list.h>
  42. #include <linux/rcupdate.h>
  43. #include <net/ip.h>
  44. #include <net/protocol.h>
  45. #include <net/route.h>
  46. #include <net/tcp.h>
  47. #include <net/sock.h>
  48. #include <net/ip_fib.h>
  49. #define FRprintk(a...)
  50. struct fib_rule
  51. {
  52. struct hlist_node hlist;
  53. atomic_t r_clntref;
  54. u32 r_preference;
  55. unsigned char r_table;
  56. unsigned char r_action;
  57. unsigned char r_dst_len;
  58. unsigned char r_src_len;
  59. u32 r_src;
  60. u32 r_srcmask;
  61. u32 r_dst;
  62. u32 r_dstmask;
  63. u32 r_srcmap;
  64. u8 r_flags;
  65. u8 r_tos;
  66. #ifdef CONFIG_IP_ROUTE_FWMARK
  67. u32 r_fwmark;
  68. #endif
  69. int r_ifindex;
  70. #ifdef CONFIG_NET_CLS_ROUTE
  71. __u32 r_tclassid;
  72. #endif
  73. char r_ifname[IFNAMSIZ];
  74. int r_dead;
  75. struct rcu_head rcu;
  76. };
  77. static struct fib_rule default_rule = {
  78. .r_clntref = ATOMIC_INIT(2),
  79. .r_preference = 0x7FFF,
  80. .r_table = RT_TABLE_DEFAULT,
  81. .r_action = RTN_UNICAST,
  82. };
  83. static struct fib_rule main_rule = {
  84. .r_clntref = ATOMIC_INIT(2),
  85. .r_preference = 0x7FFE,
  86. .r_table = RT_TABLE_MAIN,
  87. .r_action = RTN_UNICAST,
  88. };
  89. static struct fib_rule local_rule = {
  90. .r_clntref = ATOMIC_INIT(2),
  91. .r_table = RT_TABLE_LOCAL,
  92. .r_action = RTN_UNICAST,
  93. };
  94. static struct hlist_head fib_rules;
  95. /* writer func called from netlink -- rtnl_sem hold*/
  96. static void rtmsg_rule(int, struct fib_rule *);
  97. int inet_rtm_delrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
  98. {
  99. struct rtattr **rta = arg;
  100. struct rtmsg *rtm = NLMSG_DATA(nlh);
  101. struct fib_rule *r;
  102. struct hlist_node *node;
  103. int err = -ESRCH;
  104. hlist_for_each_entry(r, node, &fib_rules, hlist) {
  105. if ((!rta[RTA_SRC-1] || memcmp(RTA_DATA(rta[RTA_SRC-1]), &r->r_src, 4) == 0) &&
  106. rtm->rtm_src_len == r->r_src_len &&
  107. rtm->rtm_dst_len == r->r_dst_len &&
  108. (!rta[RTA_DST-1] || memcmp(RTA_DATA(rta[RTA_DST-1]), &r->r_dst, 4) == 0) &&
  109. rtm->rtm_tos == r->r_tos &&
  110. #ifdef CONFIG_IP_ROUTE_FWMARK
  111. (!rta[RTA_PROTOINFO-1] || memcmp(RTA_DATA(rta[RTA_PROTOINFO-1]), &r->r_fwmark, 4) == 0) &&
  112. #endif
  113. (!rtm->rtm_type || rtm->rtm_type == r->r_action) &&
  114. (!rta[RTA_PRIORITY-1] || memcmp(RTA_DATA(rta[RTA_PRIORITY-1]), &r->r_preference, 4) == 0) &&
  115. (!rta[RTA_IIF-1] || rtattr_strcmp(rta[RTA_IIF-1], r->r_ifname) == 0) &&
  116. (!rtm->rtm_table || (r && rtm->rtm_table == r->r_table))) {
  117. err = -EPERM;
  118. if (r == &local_rule)
  119. break;
  120. hlist_del_rcu(&r->hlist);
  121. r->r_dead = 1;
  122. rtmsg_rule(RTM_DELRULE, r);
  123. fib_rule_put(r);
  124. err = 0;
  125. break;
  126. }
  127. }
  128. return err;
  129. }
  130. /* Allocate new unique table id */
  131. static struct fib_table *fib_empty_table(void)
  132. {
  133. int id;
  134. for (id = 1; id <= RT_TABLE_MAX; id++)
  135. if (fib_tables[id] == NULL)
  136. return __fib_new_table(id);
  137. return NULL;
  138. }
  139. static inline void fib_rule_put_rcu(struct rcu_head *head)
  140. {
  141. struct fib_rule *r = container_of(head, struct fib_rule, rcu);
  142. kfree(r);
  143. }
  144. void fib_rule_put(struct fib_rule *r)
  145. {
  146. if (atomic_dec_and_test(&r->r_clntref)) {
  147. if (r->r_dead)
  148. call_rcu(&r->rcu, fib_rule_put_rcu);
  149. else
  150. printk("Freeing alive rule %p\n", r);
  151. }
  152. }
  153. /* writer func called from netlink -- rtnl_sem hold*/
  154. int inet_rtm_newrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
  155. {
  156. struct rtattr **rta = arg;
  157. struct rtmsg *rtm = NLMSG_DATA(nlh);
  158. struct fib_rule *r, *new_r, *last = NULL;
  159. struct hlist_node *node = NULL;
  160. unsigned char table_id;
  161. if (rtm->rtm_src_len > 32 || rtm->rtm_dst_len > 32 ||
  162. (rtm->rtm_tos & ~IPTOS_TOS_MASK))
  163. return -EINVAL;
  164. if (rta[RTA_IIF-1] && RTA_PAYLOAD(rta[RTA_IIF-1]) > IFNAMSIZ)
  165. return -EINVAL;
  166. table_id = rtm->rtm_table;
  167. if (table_id == RT_TABLE_UNSPEC) {
  168. struct fib_table *table;
  169. if (rtm->rtm_type == RTN_UNICAST) {
  170. if ((table = fib_empty_table()) == NULL)
  171. return -ENOBUFS;
  172. table_id = table->tb_id;
  173. }
  174. }
  175. new_r = kzalloc(sizeof(*new_r), GFP_KERNEL);
  176. if (!new_r)
  177. return -ENOMEM;
  178. if (rta[RTA_SRC-1])
  179. memcpy(&new_r->r_src, RTA_DATA(rta[RTA_SRC-1]), 4);
  180. if (rta[RTA_DST-1])
  181. memcpy(&new_r->r_dst, RTA_DATA(rta[RTA_DST-1]), 4);
  182. if (rta[RTA_GATEWAY-1])
  183. memcpy(&new_r->r_srcmap, RTA_DATA(rta[RTA_GATEWAY-1]), 4);
  184. new_r->r_src_len = rtm->rtm_src_len;
  185. new_r->r_dst_len = rtm->rtm_dst_len;
  186. new_r->r_srcmask = inet_make_mask(rtm->rtm_src_len);
  187. new_r->r_dstmask = inet_make_mask(rtm->rtm_dst_len);
  188. new_r->r_tos = rtm->rtm_tos;
  189. #ifdef CONFIG_IP_ROUTE_FWMARK
  190. if (rta[RTA_PROTOINFO-1])
  191. memcpy(&new_r->r_fwmark, RTA_DATA(rta[RTA_PROTOINFO-1]), 4);
  192. #endif
  193. new_r->r_action = rtm->rtm_type;
  194. new_r->r_flags = rtm->rtm_flags;
  195. if (rta[RTA_PRIORITY-1])
  196. memcpy(&new_r->r_preference, RTA_DATA(rta[RTA_PRIORITY-1]), 4);
  197. new_r->r_table = table_id;
  198. if (rta[RTA_IIF-1]) {
  199. struct net_device *dev;
  200. rtattr_strlcpy(new_r->r_ifname, rta[RTA_IIF-1], IFNAMSIZ);
  201. new_r->r_ifindex = -1;
  202. dev = __dev_get_by_name(new_r->r_ifname);
  203. if (dev)
  204. new_r->r_ifindex = dev->ifindex;
  205. }
  206. #ifdef CONFIG_NET_CLS_ROUTE
  207. if (rta[RTA_FLOW-1])
  208. memcpy(&new_r->r_tclassid, RTA_DATA(rta[RTA_FLOW-1]), 4);
  209. #endif
  210. r = container_of(fib_rules.first, struct fib_rule, hlist);
  211. if (!new_r->r_preference) {
  212. if (r && r->hlist.next != NULL) {
  213. r = container_of(r->hlist.next, struct fib_rule, hlist);
  214. if (r->r_preference)
  215. new_r->r_preference = r->r_preference - 1;
  216. }
  217. }
  218. hlist_for_each_entry(r, node, &fib_rules, hlist) {
  219. if (r->r_preference > new_r->r_preference)
  220. break;
  221. last = r;
  222. }
  223. atomic_inc(&new_r->r_clntref);
  224. if (last)
  225. hlist_add_after_rcu(&last->hlist, &new_r->hlist);
  226. else
  227. hlist_add_before_rcu(&new_r->hlist, &r->hlist);
  228. rtmsg_rule(RTM_NEWRULE, new_r);
  229. return 0;
  230. }
  231. #ifdef CONFIG_NET_CLS_ROUTE
  232. u32 fib_rules_tclass(struct fib_result *res)
  233. {
  234. if (res->r)
  235. return res->r->r_tclassid;
  236. return 0;
  237. }
  238. #endif
  239. /* callers should hold rtnl semaphore */
  240. static void fib_rules_detach(struct net_device *dev)
  241. {
  242. struct hlist_node *node;
  243. struct fib_rule *r;
  244. hlist_for_each_entry(r, node, &fib_rules, hlist) {
  245. if (r->r_ifindex == dev->ifindex)
  246. r->r_ifindex = -1;
  247. }
  248. }
  249. /* callers should hold rtnl semaphore */
  250. static void fib_rules_attach(struct net_device *dev)
  251. {
  252. struct hlist_node *node;
  253. struct fib_rule *r;
  254. hlist_for_each_entry(r, node, &fib_rules, hlist) {
  255. if (r->r_ifindex == -1 && strcmp(dev->name, r->r_ifname) == 0)
  256. r->r_ifindex = dev->ifindex;
  257. }
  258. }
  259. int fib_lookup(const struct flowi *flp, struct fib_result *res)
  260. {
  261. int err;
  262. struct fib_rule *r, *policy;
  263. struct fib_table *tb;
  264. struct hlist_node *node;
  265. u32 daddr = flp->fl4_dst;
  266. u32 saddr = flp->fl4_src;
  267. FRprintk("Lookup: %u.%u.%u.%u <- %u.%u.%u.%u ",
  268. NIPQUAD(flp->fl4_dst), NIPQUAD(flp->fl4_src));
  269. rcu_read_lock();
  270. hlist_for_each_entry_rcu(r, node, &fib_rules, hlist) {
  271. if (((saddr^r->r_src) & r->r_srcmask) ||
  272. ((daddr^r->r_dst) & r->r_dstmask) ||
  273. (r->r_tos && r->r_tos != flp->fl4_tos) ||
  274. #ifdef CONFIG_IP_ROUTE_FWMARK
  275. (r->r_fwmark && r->r_fwmark != flp->fl4_fwmark) ||
  276. #endif
  277. (r->r_ifindex && r->r_ifindex != flp->iif))
  278. continue;
  279. FRprintk("tb %d r %d ", r->r_table, r->r_action);
  280. switch (r->r_action) {
  281. case RTN_UNICAST:
  282. policy = r;
  283. break;
  284. case RTN_UNREACHABLE:
  285. rcu_read_unlock();
  286. return -ENETUNREACH;
  287. default:
  288. case RTN_BLACKHOLE:
  289. rcu_read_unlock();
  290. return -EINVAL;
  291. case RTN_PROHIBIT:
  292. rcu_read_unlock();
  293. return -EACCES;
  294. }
  295. if ((tb = fib_get_table(r->r_table)) == NULL)
  296. continue;
  297. err = tb->tb_lookup(tb, flp, res);
  298. if (err == 0) {
  299. res->r = policy;
  300. if (policy)
  301. atomic_inc(&policy->r_clntref);
  302. rcu_read_unlock();
  303. return 0;
  304. }
  305. if (err < 0 && err != -EAGAIN) {
  306. rcu_read_unlock();
  307. return err;
  308. }
  309. }
  310. FRprintk("FAILURE\n");
  311. rcu_read_unlock();
  312. return -ENETUNREACH;
  313. }
  314. void fib_select_default(const struct flowi *flp, struct fib_result *res)
  315. {
  316. if (res->r && res->r->r_action == RTN_UNICAST &&
  317. FIB_RES_GW(*res) && FIB_RES_NH(*res).nh_scope == RT_SCOPE_LINK) {
  318. struct fib_table *tb;
  319. if ((tb = fib_get_table(res->r->r_table)) != NULL)
  320. tb->tb_select_default(tb, flp, res);
  321. }
  322. }
  323. static int fib_rules_event(struct notifier_block *this, unsigned long event, void *ptr)
  324. {
  325. struct net_device *dev = ptr;
  326. if (event == NETDEV_UNREGISTER)
  327. fib_rules_detach(dev);
  328. else if (event == NETDEV_REGISTER)
  329. fib_rules_attach(dev);
  330. return NOTIFY_DONE;
  331. }
  332. static struct notifier_block fib_rules_notifier = {
  333. .notifier_call =fib_rules_event,
  334. };
  335. static __inline__ int inet_fill_rule(struct sk_buff *skb,
  336. struct fib_rule *r,
  337. u32 pid, u32 seq, int event,
  338. unsigned int flags)
  339. {
  340. struct rtmsg *rtm;
  341. struct nlmsghdr *nlh;
  342. unsigned char *b = skb->tail;
  343. nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*rtm), flags);
  344. rtm = NLMSG_DATA(nlh);
  345. rtm->rtm_family = AF_INET;
  346. rtm->rtm_dst_len = r->r_dst_len;
  347. rtm->rtm_src_len = r->r_src_len;
  348. rtm->rtm_tos = r->r_tos;
  349. #ifdef CONFIG_IP_ROUTE_FWMARK
  350. if (r->r_fwmark)
  351. RTA_PUT(skb, RTA_PROTOINFO, 4, &r->r_fwmark);
  352. #endif
  353. rtm->rtm_table = r->r_table;
  354. rtm->rtm_protocol = 0;
  355. rtm->rtm_scope = 0;
  356. rtm->rtm_type = r->r_action;
  357. rtm->rtm_flags = r->r_flags;
  358. if (r->r_dst_len)
  359. RTA_PUT(skb, RTA_DST, 4, &r->r_dst);
  360. if (r->r_src_len)
  361. RTA_PUT(skb, RTA_SRC, 4, &r->r_src);
  362. if (r->r_ifname[0])
  363. RTA_PUT(skb, RTA_IIF, IFNAMSIZ, &r->r_ifname);
  364. if (r->r_preference)
  365. RTA_PUT(skb, RTA_PRIORITY, 4, &r->r_preference);
  366. if (r->r_srcmap)
  367. RTA_PUT(skb, RTA_GATEWAY, 4, &r->r_srcmap);
  368. #ifdef CONFIG_NET_CLS_ROUTE
  369. if (r->r_tclassid)
  370. RTA_PUT(skb, RTA_FLOW, 4, &r->r_tclassid);
  371. #endif
  372. nlh->nlmsg_len = skb->tail - b;
  373. return skb->len;
  374. nlmsg_failure:
  375. rtattr_failure:
  376. skb_trim(skb, b - skb->data);
  377. return -1;
  378. }
  379. /* callers should hold rtnl semaphore */
  380. static void rtmsg_rule(int event, struct fib_rule *r)
  381. {
  382. int size = NLMSG_SPACE(sizeof(struct rtmsg) + 128);
  383. struct sk_buff *skb = alloc_skb(size, GFP_KERNEL);
  384. if (!skb)
  385. netlink_set_err(rtnl, 0, RTNLGRP_IPV4_RULE, ENOBUFS);
  386. else if (inet_fill_rule(skb, r, 0, 0, event, 0) < 0) {
  387. kfree_skb(skb);
  388. netlink_set_err(rtnl, 0, RTNLGRP_IPV4_RULE, EINVAL);
  389. } else {
  390. netlink_broadcast(rtnl, skb, 0, RTNLGRP_IPV4_RULE, GFP_KERNEL);
  391. }
  392. }
  393. int inet_dump_rules(struct sk_buff *skb, struct netlink_callback *cb)
  394. {
  395. int idx = 0;
  396. int s_idx = cb->args[0];
  397. struct fib_rule *r;
  398. struct hlist_node *node;
  399. rcu_read_lock();
  400. hlist_for_each_entry(r, node, &fib_rules, hlist) {
  401. if (idx < s_idx)
  402. goto next;
  403. if (inet_fill_rule(skb, r, NETLINK_CB(cb->skb).pid,
  404. cb->nlh->nlmsg_seq,
  405. RTM_NEWRULE, NLM_F_MULTI) < 0)
  406. break;
  407. next:
  408. idx++;
  409. }
  410. rcu_read_unlock();
  411. cb->args[0] = idx;
  412. return skb->len;
  413. }
  414. void __init fib_rules_init(void)
  415. {
  416. INIT_HLIST_HEAD(&fib_rules);
  417. hlist_add_head(&local_rule.hlist, &fib_rules);
  418. hlist_add_after(&local_rule.hlist, &main_rule.hlist);
  419. hlist_add_after(&main_rule.hlist, &default_rule.hlist);
  420. register_netdevice_notifier(&fib_rules_notifier);
  421. }