br_mdb.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488
  1. #include <linux/err.h>
  2. #include <linux/igmp.h>
  3. #include <linux/kernel.h>
  4. #include <linux/netdevice.h>
  5. #include <linux/rculist.h>
  6. #include <linux/skbuff.h>
  7. #include <linux/if_ether.h>
  8. #include <net/ip.h>
  9. #include <net/netlink.h>
  10. #if IS_ENABLED(CONFIG_IPV6)
  11. #include <net/ipv6.h>
  12. #endif
  13. #include "br_private.h"
  14. static int br_rports_fill_info(struct sk_buff *skb, struct netlink_callback *cb,
  15. struct net_device *dev)
  16. {
  17. struct net_bridge *br = netdev_priv(dev);
  18. struct net_bridge_port *p;
  19. struct nlattr *nest;
  20. if (!br->multicast_router || hlist_empty(&br->router_list))
  21. return 0;
  22. nest = nla_nest_start(skb, MDBA_ROUTER);
  23. if (nest == NULL)
  24. return -EMSGSIZE;
  25. hlist_for_each_entry_rcu(p, &br->router_list, rlist) {
  26. if (p && nla_put_u32(skb, MDBA_ROUTER_PORT, p->dev->ifindex))
  27. goto fail;
  28. }
  29. nla_nest_end(skb, nest);
  30. return 0;
  31. fail:
  32. nla_nest_cancel(skb, nest);
  33. return -EMSGSIZE;
  34. }
  35. static int br_mdb_fill_info(struct sk_buff *skb, struct netlink_callback *cb,
  36. struct net_device *dev)
  37. {
  38. struct net_bridge *br = netdev_priv(dev);
  39. struct net_bridge_mdb_htable *mdb;
  40. struct nlattr *nest, *nest2;
  41. int i, err = 0;
  42. int idx = 0, s_idx = cb->args[1];
  43. if (br->multicast_disabled)
  44. return 0;
  45. mdb = rcu_dereference(br->mdb);
  46. if (!mdb)
  47. return 0;
  48. nest = nla_nest_start(skb, MDBA_MDB);
  49. if (nest == NULL)
  50. return -EMSGSIZE;
  51. for (i = 0; i < mdb->max; i++) {
  52. struct net_bridge_mdb_entry *mp;
  53. struct net_bridge_port_group *p, **pp;
  54. struct net_bridge_port *port;
  55. hlist_for_each_entry_rcu(mp, &mdb->mhash[i], hlist[mdb->ver]) {
  56. if (idx < s_idx)
  57. goto skip;
  58. nest2 = nla_nest_start(skb, MDBA_MDB_ENTRY);
  59. if (nest2 == NULL) {
  60. err = -EMSGSIZE;
  61. goto out;
  62. }
  63. for (pp = &mp->ports;
  64. (p = rcu_dereference(*pp)) != NULL;
  65. pp = &p->next) {
  66. port = p->port;
  67. if (port) {
  68. struct br_mdb_entry e;
  69. e.ifindex = port->dev->ifindex;
  70. e.state = p->state;
  71. if (p->addr.proto == htons(ETH_P_IP))
  72. e.addr.u.ip4 = p->addr.u.ip4;
  73. #if IS_ENABLED(CONFIG_IPV6)
  74. if (p->addr.proto == htons(ETH_P_IPV6))
  75. e.addr.u.ip6 = p->addr.u.ip6;
  76. #endif
  77. e.addr.proto = p->addr.proto;
  78. if (nla_put(skb, MDBA_MDB_ENTRY_INFO, sizeof(e), &e)) {
  79. nla_nest_cancel(skb, nest2);
  80. err = -EMSGSIZE;
  81. goto out;
  82. }
  83. }
  84. }
  85. nla_nest_end(skb, nest2);
  86. skip:
  87. idx++;
  88. }
  89. }
  90. out:
  91. cb->args[1] = idx;
  92. nla_nest_end(skb, nest);
  93. return err;
  94. }
  95. static int br_mdb_dump(struct sk_buff *skb, struct netlink_callback *cb)
  96. {
  97. struct net_device *dev;
  98. struct net *net = sock_net(skb->sk);
  99. struct nlmsghdr *nlh = NULL;
  100. int idx = 0, s_idx;
  101. s_idx = cb->args[0];
  102. rcu_read_lock();
  103. /* In theory this could be wrapped to 0... */
  104. cb->seq = net->dev_base_seq + br_mdb_rehash_seq;
  105. for_each_netdev_rcu(net, dev) {
  106. if (dev->priv_flags & IFF_EBRIDGE) {
  107. struct br_port_msg *bpm;
  108. if (idx < s_idx)
  109. goto skip;
  110. nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid,
  111. cb->nlh->nlmsg_seq, RTM_GETMDB,
  112. sizeof(*bpm), NLM_F_MULTI);
  113. if (nlh == NULL)
  114. break;
  115. bpm = nlmsg_data(nlh);
  116. bpm->ifindex = dev->ifindex;
  117. if (br_mdb_fill_info(skb, cb, dev) < 0)
  118. goto out;
  119. if (br_rports_fill_info(skb, cb, dev) < 0)
  120. goto out;
  121. cb->args[1] = 0;
  122. nlmsg_end(skb, nlh);
  123. skip:
  124. idx++;
  125. }
  126. }
  127. out:
  128. if (nlh)
  129. nlmsg_end(skb, nlh);
  130. rcu_read_unlock();
  131. cb->args[0] = idx;
  132. return skb->len;
  133. }
  134. static int nlmsg_populate_mdb_fill(struct sk_buff *skb,
  135. struct net_device *dev,
  136. struct br_mdb_entry *entry, u32 pid,
  137. u32 seq, int type, unsigned int flags)
  138. {
  139. struct nlmsghdr *nlh;
  140. struct br_port_msg *bpm;
  141. struct nlattr *nest, *nest2;
  142. nlh = nlmsg_put(skb, pid, seq, type, sizeof(*bpm), NLM_F_MULTI);
  143. if (!nlh)
  144. return -EMSGSIZE;
  145. bpm = nlmsg_data(nlh);
  146. bpm->family = AF_BRIDGE;
  147. bpm->ifindex = dev->ifindex;
  148. nest = nla_nest_start(skb, MDBA_MDB);
  149. if (nest == NULL)
  150. goto cancel;
  151. nest2 = nla_nest_start(skb, MDBA_MDB_ENTRY);
  152. if (nest2 == NULL)
  153. goto end;
  154. if (nla_put(skb, MDBA_MDB_ENTRY_INFO, sizeof(*entry), entry))
  155. goto end;
  156. nla_nest_end(skb, nest2);
  157. nla_nest_end(skb, nest);
  158. return nlmsg_end(skb, nlh);
  159. end:
  160. nla_nest_end(skb, nest);
  161. cancel:
  162. nlmsg_cancel(skb, nlh);
  163. return -EMSGSIZE;
  164. }
  165. static inline size_t rtnl_mdb_nlmsg_size(void)
  166. {
  167. return NLMSG_ALIGN(sizeof(struct br_port_msg))
  168. + nla_total_size(sizeof(struct br_mdb_entry));
  169. }
  170. static void __br_mdb_notify(struct net_device *dev, struct br_mdb_entry *entry,
  171. int type)
  172. {
  173. struct net *net = dev_net(dev);
  174. struct sk_buff *skb;
  175. int err = -ENOBUFS;
  176. skb = nlmsg_new(rtnl_mdb_nlmsg_size(), GFP_ATOMIC);
  177. if (!skb)
  178. goto errout;
  179. err = nlmsg_populate_mdb_fill(skb, dev, entry, 0, 0, type, NTF_SELF);
  180. if (err < 0) {
  181. kfree_skb(skb);
  182. goto errout;
  183. }
  184. rtnl_notify(skb, net, 0, RTNLGRP_MDB, NULL, GFP_ATOMIC);
  185. return;
  186. errout:
  187. rtnl_set_sk_err(net, RTNLGRP_MDB, err);
  188. }
  189. void br_mdb_notify(struct net_device *dev, struct net_bridge_port *port,
  190. struct br_ip *group, int type)
  191. {
  192. struct br_mdb_entry entry;
  193. entry.ifindex = port->dev->ifindex;
  194. entry.addr.proto = group->proto;
  195. entry.addr.u.ip4 = group->u.ip4;
  196. #if IS_ENABLED(CONFIG_IPV6)
  197. entry.addr.u.ip6 = group->u.ip6;
  198. #endif
  199. __br_mdb_notify(dev, &entry, type);
  200. }
  201. static bool is_valid_mdb_entry(struct br_mdb_entry *entry)
  202. {
  203. if (entry->ifindex == 0)
  204. return false;
  205. if (entry->addr.proto == htons(ETH_P_IP)) {
  206. if (!ipv4_is_multicast(entry->addr.u.ip4))
  207. return false;
  208. if (ipv4_is_local_multicast(entry->addr.u.ip4))
  209. return false;
  210. #if IS_ENABLED(CONFIG_IPV6)
  211. } else if (entry->addr.proto == htons(ETH_P_IPV6)) {
  212. if (!ipv6_is_transient_multicast(&entry->addr.u.ip6))
  213. return false;
  214. #endif
  215. } else
  216. return false;
  217. if (entry->state != MDB_PERMANENT && entry->state != MDB_TEMPORARY)
  218. return false;
  219. return true;
  220. }
  221. static int br_mdb_parse(struct sk_buff *skb, struct nlmsghdr *nlh,
  222. struct net_device **pdev, struct br_mdb_entry **pentry)
  223. {
  224. struct net *net = sock_net(skb->sk);
  225. struct br_mdb_entry *entry;
  226. struct br_port_msg *bpm;
  227. struct nlattr *tb[MDBA_SET_ENTRY_MAX+1];
  228. struct net_device *dev;
  229. int err;
  230. err = nlmsg_parse(nlh, sizeof(*bpm), tb, MDBA_SET_ENTRY, NULL);
  231. if (err < 0)
  232. return err;
  233. bpm = nlmsg_data(nlh);
  234. if (bpm->ifindex == 0) {
  235. pr_info("PF_BRIDGE: br_mdb_parse() with invalid ifindex\n");
  236. return -EINVAL;
  237. }
  238. dev = __dev_get_by_index(net, bpm->ifindex);
  239. if (dev == NULL) {
  240. pr_info("PF_BRIDGE: br_mdb_parse() with unknown ifindex\n");
  241. return -ENODEV;
  242. }
  243. if (!(dev->priv_flags & IFF_EBRIDGE)) {
  244. pr_info("PF_BRIDGE: br_mdb_parse() with non-bridge\n");
  245. return -EOPNOTSUPP;
  246. }
  247. *pdev = dev;
  248. if (!tb[MDBA_SET_ENTRY] ||
  249. nla_len(tb[MDBA_SET_ENTRY]) != sizeof(struct br_mdb_entry)) {
  250. pr_info("PF_BRIDGE: br_mdb_parse() with invalid attr\n");
  251. return -EINVAL;
  252. }
  253. entry = nla_data(tb[MDBA_SET_ENTRY]);
  254. if (!is_valid_mdb_entry(entry)) {
  255. pr_info("PF_BRIDGE: br_mdb_parse() with invalid entry\n");
  256. return -EINVAL;
  257. }
  258. *pentry = entry;
  259. return 0;
  260. }
  261. static int br_mdb_add_group(struct net_bridge *br, struct net_bridge_port *port,
  262. struct br_ip *group, unsigned char state)
  263. {
  264. struct net_bridge_mdb_entry *mp;
  265. struct net_bridge_port_group *p;
  266. struct net_bridge_port_group __rcu **pp;
  267. struct net_bridge_mdb_htable *mdb;
  268. int err;
  269. mdb = mlock_dereference(br->mdb, br);
  270. mp = br_mdb_ip_get(mdb, group);
  271. if (!mp) {
  272. mp = br_multicast_new_group(br, port, group);
  273. err = PTR_ERR(mp);
  274. if (IS_ERR(mp))
  275. return err;
  276. }
  277. for (pp = &mp->ports;
  278. (p = mlock_dereference(*pp, br)) != NULL;
  279. pp = &p->next) {
  280. if (p->port == port)
  281. return -EEXIST;
  282. if ((unsigned long)p->port < (unsigned long)port)
  283. break;
  284. }
  285. p = br_multicast_new_port_group(port, group, *pp, state);
  286. if (unlikely(!p))
  287. return -ENOMEM;
  288. rcu_assign_pointer(*pp, p);
  289. br_mdb_notify(br->dev, port, group, RTM_NEWMDB);
  290. return 0;
  291. }
  292. static int __br_mdb_add(struct net *net, struct net_bridge *br,
  293. struct br_mdb_entry *entry)
  294. {
  295. struct br_ip ip;
  296. struct net_device *dev;
  297. struct net_bridge_port *p;
  298. int ret;
  299. if (!netif_running(br->dev) || br->multicast_disabled)
  300. return -EINVAL;
  301. dev = __dev_get_by_index(net, entry->ifindex);
  302. if (!dev)
  303. return -ENODEV;
  304. p = br_port_get_rtnl(dev);
  305. if (!p || p->br != br || p->state == BR_STATE_DISABLED)
  306. return -EINVAL;
  307. ip.proto = entry->addr.proto;
  308. if (ip.proto == htons(ETH_P_IP))
  309. ip.u.ip4 = entry->addr.u.ip4;
  310. #if IS_ENABLED(CONFIG_IPV6)
  311. else
  312. ip.u.ip6 = entry->addr.u.ip6;
  313. #endif
  314. spin_lock_bh(&br->multicast_lock);
  315. ret = br_mdb_add_group(br, p, &ip, entry->state);
  316. spin_unlock_bh(&br->multicast_lock);
  317. return ret;
  318. }
  319. static int br_mdb_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
  320. {
  321. struct net *net = sock_net(skb->sk);
  322. struct br_mdb_entry *entry;
  323. struct net_device *dev;
  324. struct net_bridge *br;
  325. int err;
  326. err = br_mdb_parse(skb, nlh, &dev, &entry);
  327. if (err < 0)
  328. return err;
  329. br = netdev_priv(dev);
  330. err = __br_mdb_add(net, br, entry);
  331. if (!err)
  332. __br_mdb_notify(dev, entry, RTM_NEWMDB);
  333. return err;
  334. }
  335. static int __br_mdb_del(struct net_bridge *br, struct br_mdb_entry *entry)
  336. {
  337. struct net_bridge_mdb_htable *mdb;
  338. struct net_bridge_mdb_entry *mp;
  339. struct net_bridge_port_group *p;
  340. struct net_bridge_port_group __rcu **pp;
  341. struct br_ip ip;
  342. int err = -EINVAL;
  343. if (!netif_running(br->dev) || br->multicast_disabled)
  344. return -EINVAL;
  345. if (timer_pending(&br->multicast_querier_timer))
  346. return -EBUSY;
  347. ip.proto = entry->addr.proto;
  348. if (ip.proto == htons(ETH_P_IP))
  349. ip.u.ip4 = entry->addr.u.ip4;
  350. #if IS_ENABLED(CONFIG_IPV6)
  351. else
  352. ip.u.ip6 = entry->addr.u.ip6;
  353. #endif
  354. spin_lock_bh(&br->multicast_lock);
  355. mdb = mlock_dereference(br->mdb, br);
  356. mp = br_mdb_ip_get(mdb, &ip);
  357. if (!mp)
  358. goto unlock;
  359. for (pp = &mp->ports;
  360. (p = mlock_dereference(*pp, br)) != NULL;
  361. pp = &p->next) {
  362. if (!p->port || p->port->dev->ifindex != entry->ifindex)
  363. continue;
  364. if (p->port->state == BR_STATE_DISABLED)
  365. goto unlock;
  366. rcu_assign_pointer(*pp, p->next);
  367. hlist_del_init(&p->mglist);
  368. del_timer(&p->timer);
  369. call_rcu_bh(&p->rcu, br_multicast_free_pg);
  370. err = 0;
  371. if (!mp->ports && !mp->mglist &&
  372. netif_running(br->dev))
  373. mod_timer(&mp->timer, jiffies);
  374. break;
  375. }
  376. unlock:
  377. spin_unlock_bh(&br->multicast_lock);
  378. return err;
  379. }
  380. static int br_mdb_del(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
  381. {
  382. struct net_device *dev;
  383. struct br_mdb_entry *entry;
  384. struct net_bridge *br;
  385. int err;
  386. err = br_mdb_parse(skb, nlh, &dev, &entry);
  387. if (err < 0)
  388. return err;
  389. br = netdev_priv(dev);
  390. err = __br_mdb_del(br, entry);
  391. if (!err)
  392. __br_mdb_notify(dev, entry, RTM_DELMDB);
  393. return err;
  394. }
  395. void br_mdb_init(void)
  396. {
  397. rtnl_register(PF_BRIDGE, RTM_GETMDB, NULL, br_mdb_dump, NULL);
  398. rtnl_register(PF_BRIDGE, RTM_NEWMDB, br_mdb_add, NULL, NULL);
  399. rtnl_register(PF_BRIDGE, RTM_DELMDB, br_mdb_del, NULL, NULL);
  400. }
  401. void br_mdb_uninit(void)
  402. {
  403. rtnl_unregister(PF_BRIDGE, RTM_GETMDB);
  404. rtnl_unregister(PF_BRIDGE, RTM_NEWMDB);
  405. rtnl_unregister(PF_BRIDGE, RTM_DELMDB);
  406. }