dn_rules.c 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426
  1. /*
  2. * DECnet An implementation of the DECnet protocol suite for the LINUX
  3. * operating system. DECnet is implemented using the BSD Socket
  4. * interface as the means of communication with the user level.
  5. *
  6. * DECnet Routing Forwarding Information Base (Rules)
  7. *
  8. * Author: Steve Whitehouse <SteveW@ACM.org>
  9. * Mostly copied from Alexey Kuznetsov's ipv4/fib_rules.c
  10. *
  11. *
  12. * Changes:
  13. *
  14. */
  15. #include <linux/config.h>
  16. #include <linux/string.h>
  17. #include <linux/net.h>
  18. #include <linux/socket.h>
  19. #include <linux/sockios.h>
  20. #include <linux/init.h>
  21. #include <linux/skbuff.h>
  22. #include <linux/netlink.h>
  23. #include <linux/rtnetlink.h>
  24. #include <linux/proc_fs.h>
  25. #include <linux/netdevice.h>
  26. #include <linux/timer.h>
  27. #include <linux/spinlock.h>
  28. #include <linux/in_route.h>
  29. #include <linux/list.h>
  30. #include <linux/rcupdate.h>
  31. #include <asm/atomic.h>
  32. #include <asm/uaccess.h>
  33. #include <net/neighbour.h>
  34. #include <net/dst.h>
  35. #include <net/flow.h>
  36. #include <net/dn.h>
  37. #include <net/dn_fib.h>
  38. #include <net/dn_neigh.h>
  39. #include <net/dn_dev.h>
  40. struct dn_fib_rule
  41. {
  42. struct hlist_node r_hlist;
  43. atomic_t r_clntref;
  44. u32 r_preference;
  45. unsigned char r_table;
  46. unsigned char r_action;
  47. unsigned char r_dst_len;
  48. unsigned char r_src_len;
  49. __le16 r_src;
  50. __le16 r_srcmask;
  51. __le16 r_dst;
  52. __le16 r_dstmask;
  53. __le16 r_srcmap;
  54. u8 r_flags;
  55. #ifdef CONFIG_DECNET_ROUTE_FWMARK
  56. u32 r_fwmark;
  57. #endif
  58. int r_ifindex;
  59. char r_ifname[IFNAMSIZ];
  60. int r_dead;
  61. struct rcu_head rcu;
  62. };
  63. static struct dn_fib_rule default_rule = {
  64. .r_clntref = ATOMIC_INIT(2),
  65. .r_preference = 0x7fff,
  66. .r_table = RT_TABLE_MAIN,
  67. .r_action = RTN_UNICAST
  68. };
  69. static struct hlist_head dn_fib_rules;
  70. int dn_fib_rtm_delrule(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
  71. {
  72. struct rtattr **rta = arg;
  73. struct rtmsg *rtm = NLMSG_DATA(nlh);
  74. struct dn_fib_rule *r;
  75. struct hlist_node *node;
  76. int err = -ESRCH;
  77. hlist_for_each_entry(r, node, &dn_fib_rules, r_hlist) {
  78. if ((!rta[RTA_SRC-1] || memcmp(RTA_DATA(rta[RTA_SRC-1]), &r->r_src, 2) == 0) &&
  79. rtm->rtm_src_len == r->r_src_len &&
  80. rtm->rtm_dst_len == r->r_dst_len &&
  81. (!rta[RTA_DST-1] || memcmp(RTA_DATA(rta[RTA_DST-1]), &r->r_dst, 2) == 0) &&
  82. #ifdef CONFIG_DECNET_ROUTE_FWMARK
  83. (!rta[RTA_PROTOINFO-1] || memcmp(RTA_DATA(rta[RTA_PROTOINFO-1]), &r->r_fwmark, 4) == 0) &&
  84. #endif
  85. (!rtm->rtm_type || rtm->rtm_type == r->r_action) &&
  86. (!rta[RTA_PRIORITY-1] || memcmp(RTA_DATA(rta[RTA_PRIORITY-1]), &r->r_preference, 4) == 0) &&
  87. (!rta[RTA_IIF-1] || rtattr_strcmp(rta[RTA_IIF-1], r->r_ifname) == 0) &&
  88. (!rtm->rtm_table || (r && rtm->rtm_table == r->r_table))) {
  89. err = -EPERM;
  90. if (r == &default_rule)
  91. break;
  92. hlist_del_rcu(&r->r_hlist);
  93. r->r_dead = 1;
  94. dn_fib_rule_put(r);
  95. err = 0;
  96. break;
  97. }
  98. }
  99. return err;
  100. }
  101. static inline void dn_fib_rule_put_rcu(struct rcu_head *head)
  102. {
  103. struct dn_fib_rule *r = container_of(head, struct dn_fib_rule, rcu);
  104. kfree(r);
  105. }
  106. void dn_fib_rule_put(struct dn_fib_rule *r)
  107. {
  108. if (atomic_dec_and_test(&r->r_clntref)) {
  109. if (r->r_dead)
  110. call_rcu(&r->rcu, dn_fib_rule_put_rcu);
  111. else
  112. printk(KERN_DEBUG "Attempt to free alive dn_fib_rule\n");
  113. }
  114. }
  115. int dn_fib_rtm_newrule(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
  116. {
  117. struct rtattr **rta = arg;
  118. struct rtmsg *rtm = NLMSG_DATA(nlh);
  119. struct dn_fib_rule *r, *new_r, *last = NULL;
  120. struct hlist_node *node = NULL;
  121. unsigned char table_id;
  122. if (rtm->rtm_src_len > 16 || rtm->rtm_dst_len > 16)
  123. return -EINVAL;
  124. if (rta[RTA_IIF-1] && RTA_PAYLOAD(rta[RTA_IIF-1]) > IFNAMSIZ)
  125. return -EINVAL;
  126. if (rtm->rtm_type == RTN_NAT)
  127. return -EINVAL;
  128. table_id = rtm->rtm_table;
  129. if (table_id == RT_TABLE_UNSPEC) {
  130. struct dn_fib_table *tb;
  131. if (rtm->rtm_type == RTN_UNICAST) {
  132. if ((tb = dn_fib_empty_table()) == NULL)
  133. return -ENOBUFS;
  134. table_id = tb->n;
  135. }
  136. }
  137. new_r = kmalloc(sizeof(*new_r), GFP_KERNEL);
  138. if (!new_r)
  139. return -ENOMEM;
  140. memset(new_r, 0, sizeof(*new_r));
  141. if (rta[RTA_SRC-1])
  142. memcpy(&new_r->r_src, RTA_DATA(rta[RTA_SRC-1]), 2);
  143. if (rta[RTA_DST-1])
  144. memcpy(&new_r->r_dst, RTA_DATA(rta[RTA_DST-1]), 2);
  145. if (rta[RTA_GATEWAY-1])
  146. memcpy(&new_r->r_srcmap, RTA_DATA(rta[RTA_GATEWAY-1]), 2);
  147. new_r->r_src_len = rtm->rtm_src_len;
  148. new_r->r_dst_len = rtm->rtm_dst_len;
  149. new_r->r_srcmask = dnet_make_mask(rtm->rtm_src_len);
  150. new_r->r_dstmask = dnet_make_mask(rtm->rtm_dst_len);
  151. #ifdef CONFIG_DECNET_ROUTE_FWMARK
  152. if (rta[RTA_PROTOINFO-1])
  153. memcpy(&new_r->r_fwmark, RTA_DATA(rta[RTA_PROTOINFO-1]), 4);
  154. #endif
  155. new_r->r_action = rtm->rtm_type;
  156. new_r->r_flags = rtm->rtm_flags;
  157. if (rta[RTA_PRIORITY-1])
  158. memcpy(&new_r->r_preference, RTA_DATA(rta[RTA_PRIORITY-1]), 4);
  159. new_r->r_table = table_id;
  160. if (rta[RTA_IIF-1]) {
  161. struct net_device *dev;
  162. rtattr_strlcpy(new_r->r_ifname, rta[RTA_IIF-1], IFNAMSIZ);
  163. new_r->r_ifindex = -1;
  164. dev = dev_get_by_name(new_r->r_ifname);
  165. if (dev) {
  166. new_r->r_ifindex = dev->ifindex;
  167. dev_put(dev);
  168. }
  169. }
  170. r = container_of(dn_fib_rules.first, struct dn_fib_rule, r_hlist);
  171. if (!new_r->r_preference) {
  172. if (r && r->r_hlist.next != NULL) {
  173. r = container_of(r->r_hlist.next, struct dn_fib_rule, r_hlist);
  174. if (r->r_preference)
  175. new_r->r_preference = r->r_preference - 1;
  176. }
  177. }
  178. hlist_for_each_entry(r, node, &dn_fib_rules, r_hlist) {
  179. if (r->r_preference > new_r->r_preference)
  180. break;
  181. last = r;
  182. }
  183. atomic_inc(&new_r->r_clntref);
  184. if (last)
  185. hlist_add_after_rcu(&last->r_hlist, &new_r->r_hlist);
  186. else
  187. hlist_add_before_rcu(&new_r->r_hlist, &r->r_hlist);
  188. return 0;
  189. }
  190. int dn_fib_lookup(const struct flowi *flp, struct dn_fib_res *res)
  191. {
  192. struct dn_fib_rule *r, *policy;
  193. struct dn_fib_table *tb;
  194. __le16 saddr = flp->fld_src;
  195. __le16 daddr = flp->fld_dst;
  196. struct hlist_node *node;
  197. int err;
  198. rcu_read_lock();
  199. hlist_for_each_entry_rcu(r, node, &dn_fib_rules, r_hlist) {
  200. if (((saddr^r->r_src) & r->r_srcmask) ||
  201. ((daddr^r->r_dst) & r->r_dstmask) ||
  202. #ifdef CONFIG_DECNET_ROUTE_FWMARK
  203. (r->r_fwmark && r->r_fwmark != flp->fld_fwmark) ||
  204. #endif
  205. (r->r_ifindex && r->r_ifindex != flp->iif))
  206. continue;
  207. switch(r->r_action) {
  208. case RTN_UNICAST:
  209. case RTN_NAT:
  210. policy = r;
  211. break;
  212. case RTN_UNREACHABLE:
  213. rcu_read_unlock();
  214. return -ENETUNREACH;
  215. default:
  216. case RTN_BLACKHOLE:
  217. rcu_read_unlock();
  218. return -EINVAL;
  219. case RTN_PROHIBIT:
  220. rcu_read_unlock();
  221. return -EACCES;
  222. }
  223. if ((tb = dn_fib_get_table(r->r_table, 0)) == NULL)
  224. continue;
  225. err = tb->lookup(tb, flp, res);
  226. if (err == 0) {
  227. res->r = policy;
  228. if (policy)
  229. atomic_inc(&policy->r_clntref);
  230. rcu_read_unlock();
  231. return 0;
  232. }
  233. if (err < 0 && err != -EAGAIN) {
  234. rcu_read_unlock();
  235. return err;
  236. }
  237. }
  238. rcu_read_unlock();
  239. return -ESRCH;
  240. }
  241. unsigned dnet_addr_type(__le16 addr)
  242. {
  243. struct flowi fl = { .nl_u = { .dn_u = { .daddr = addr } } };
  244. struct dn_fib_res res;
  245. unsigned ret = RTN_UNICAST;
  246. struct dn_fib_table *tb = dn_fib_tables[RT_TABLE_LOCAL];
  247. res.r = NULL;
  248. if (tb) {
  249. if (!tb->lookup(tb, &fl, &res)) {
  250. ret = res.type;
  251. dn_fib_res_put(&res);
  252. }
  253. }
  254. return ret;
  255. }
  256. __le16 dn_fib_rules_policy(__le16 saddr, struct dn_fib_res *res, unsigned *flags)
  257. {
  258. struct dn_fib_rule *r = res->r;
  259. if (r->r_action == RTN_NAT) {
  260. int addrtype = dnet_addr_type(r->r_srcmap);
  261. if (addrtype == RTN_NAT) {
  262. saddr = (saddr&~r->r_srcmask)|r->r_srcmap;
  263. *flags |= RTCF_SNAT;
  264. } else if (addrtype == RTN_LOCAL || r->r_srcmap == 0) {
  265. saddr = r->r_srcmap;
  266. *flags |= RTCF_MASQ;
  267. }
  268. }
  269. return saddr;
  270. }
  271. static void dn_fib_rules_detach(struct net_device *dev)
  272. {
  273. struct hlist_node *node;
  274. struct dn_fib_rule *r;
  275. hlist_for_each_entry(r, node, &dn_fib_rules, r_hlist) {
  276. if (r->r_ifindex == dev->ifindex)
  277. r->r_ifindex = -1;
  278. }
  279. }
  280. static void dn_fib_rules_attach(struct net_device *dev)
  281. {
  282. struct hlist_node *node;
  283. struct dn_fib_rule *r;
  284. hlist_for_each_entry(r, node, &dn_fib_rules, r_hlist) {
  285. if (r->r_ifindex == -1 && strcmp(dev->name, r->r_ifname) == 0)
  286. r->r_ifindex = dev->ifindex;
  287. }
  288. }
  289. static int dn_fib_rules_event(struct notifier_block *this, unsigned long event, void *ptr)
  290. {
  291. struct net_device *dev = ptr;
  292. switch(event) {
  293. case NETDEV_UNREGISTER:
  294. dn_fib_rules_detach(dev);
  295. dn_fib_sync_down(0, dev, 1);
  296. case NETDEV_REGISTER:
  297. dn_fib_rules_attach(dev);
  298. dn_fib_sync_up(dev);
  299. }
  300. return NOTIFY_DONE;
  301. }
  302. static struct notifier_block dn_fib_rules_notifier = {
  303. .notifier_call = dn_fib_rules_event,
  304. };
  305. static int dn_fib_fill_rule(struct sk_buff *skb, struct dn_fib_rule *r,
  306. struct netlink_callback *cb, unsigned int flags)
  307. {
  308. struct rtmsg *rtm;
  309. struct nlmsghdr *nlh;
  310. unsigned char *b = skb->tail;
  311. nlh = NLMSG_NEW_ANSWER(skb, cb, RTM_NEWRULE, sizeof(*rtm), flags);
  312. rtm = NLMSG_DATA(nlh);
  313. rtm->rtm_family = AF_DECnet;
  314. rtm->rtm_dst_len = r->r_dst_len;
  315. rtm->rtm_src_len = r->r_src_len;
  316. rtm->rtm_tos = 0;
  317. #ifdef CONFIG_DECNET_ROUTE_FWMARK
  318. if (r->r_fwmark)
  319. RTA_PUT(skb, RTA_PROTOINFO, 4, &r->r_fwmark);
  320. #endif
  321. rtm->rtm_table = r->r_table;
  322. rtm->rtm_protocol = 0;
  323. rtm->rtm_scope = 0;
  324. rtm->rtm_type = r->r_action;
  325. rtm->rtm_flags = r->r_flags;
  326. if (r->r_dst_len)
  327. RTA_PUT(skb, RTA_DST, 2, &r->r_dst);
  328. if (r->r_src_len)
  329. RTA_PUT(skb, RTA_SRC, 2, &r->r_src);
  330. if (r->r_ifname[0])
  331. RTA_PUT(skb, RTA_IIF, IFNAMSIZ, &r->r_ifname);
  332. if (r->r_preference)
  333. RTA_PUT(skb, RTA_PRIORITY, 4, &r->r_preference);
  334. if (r->r_srcmap)
  335. RTA_PUT(skb, RTA_GATEWAY, 2, &r->r_srcmap);
  336. nlh->nlmsg_len = skb->tail - b;
  337. return skb->len;
  338. nlmsg_failure:
  339. rtattr_failure:
  340. skb_trim(skb, b - skb->data);
  341. return -1;
  342. }
  343. int dn_fib_dump_rules(struct sk_buff *skb, struct netlink_callback *cb)
  344. {
  345. int idx = 0;
  346. int s_idx = cb->args[0];
  347. struct dn_fib_rule *r;
  348. struct hlist_node *node;
  349. rcu_read_lock();
  350. hlist_for_each_entry(r, node, &dn_fib_rules, r_hlist) {
  351. if (idx < s_idx)
  352. continue;
  353. if (dn_fib_fill_rule(skb, r, cb, NLM_F_MULTI) < 0)
  354. break;
  355. idx++;
  356. }
  357. rcu_read_unlock();
  358. cb->args[0] = idx;
  359. return skb->len;
  360. }
  361. void __init dn_fib_rules_init(void)
  362. {
  363. INIT_HLIST_HEAD(&dn_fib_rules);
  364. hlist_add_head(&default_rule.r_hlist, &dn_fib_rules);
  365. register_netdevice_notifier(&dn_fib_rules_notifier);
  366. }
  367. void __exit dn_fib_rules_cleanup(void)
  368. {
  369. unregister_netdevice_notifier(&dn_fib_rules_notifier);
  370. }