hard-interface.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658
  1. /*
  2. * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
  3. *
  4. * Marek Lindner, Simon Wunderlich
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of version 2 of the GNU General Public
  8. * License as published by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope that it will be useful, but
  11. * WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  13. * General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program; if not, write to the Free Software
  17. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
  18. * 02110-1301, USA
  19. *
  20. */
  21. #include "main.h"
  22. #include "hard-interface.h"
  23. #include "soft-interface.h"
  24. #include "send.h"
  25. #include "translation-table.h"
  26. #include "routing.h"
  27. #include "bat_sysfs.h"
  28. #include "originator.h"
  29. #include "hash.h"
  30. #include <linux/if_arp.h>
  31. /* protect update critical side of if_list - but not the content */
  32. static DEFINE_SPINLOCK(if_list_lock);
  33. static int batman_skb_recv(struct sk_buff *skb,
  34. struct net_device *dev,
  35. struct packet_type *ptype,
  36. struct net_device *orig_dev);
  37. static void hardif_free_rcu(struct rcu_head *rcu)
  38. {
  39. struct batman_if *batman_if;
  40. batman_if = container_of(rcu, struct batman_if, rcu);
  41. dev_put(batman_if->net_dev);
  42. kref_put(&batman_if->refcount, hardif_free_ref);
  43. }
  44. struct batman_if *get_batman_if_by_netdev(struct net_device *net_dev)
  45. {
  46. struct batman_if *batman_if;
  47. rcu_read_lock();
  48. list_for_each_entry_rcu(batman_if, &if_list, list) {
  49. if (batman_if->net_dev == net_dev)
  50. goto out;
  51. }
  52. batman_if = NULL;
  53. out:
  54. if (batman_if)
  55. kref_get(&batman_if->refcount);
  56. rcu_read_unlock();
  57. return batman_if;
  58. }
  59. static int is_valid_iface(struct net_device *net_dev)
  60. {
  61. if (net_dev->flags & IFF_LOOPBACK)
  62. return 0;
  63. if (net_dev->type != ARPHRD_ETHER)
  64. return 0;
  65. if (net_dev->addr_len != ETH_ALEN)
  66. return 0;
  67. /* no batman over batman */
  68. #ifdef HAVE_NET_DEVICE_OPS
  69. if (net_dev->netdev_ops->ndo_start_xmit == interface_tx)
  70. return 0;
  71. #else
  72. if (net_dev->hard_start_xmit == interface_tx)
  73. return 0;
  74. #endif
  75. /* Device is being bridged */
  76. /* if (net_dev->priv_flags & IFF_BRIDGE_PORT)
  77. return 0; */
  78. return 1;
  79. }
  80. static struct batman_if *get_active_batman_if(struct net_device *soft_iface)
  81. {
  82. struct batman_if *batman_if;
  83. rcu_read_lock();
  84. list_for_each_entry_rcu(batman_if, &if_list, list) {
  85. if (batman_if->soft_iface != soft_iface)
  86. continue;
  87. if (batman_if->if_status == IF_ACTIVE)
  88. goto out;
  89. }
  90. batman_if = NULL;
  91. out:
  92. if (batman_if)
  93. kref_get(&batman_if->refcount);
  94. rcu_read_unlock();
  95. return batman_if;
  96. }
  97. static void update_primary_addr(struct bat_priv *bat_priv)
  98. {
  99. struct vis_packet *vis_packet;
  100. vis_packet = (struct vis_packet *)
  101. bat_priv->my_vis_info->skb_packet->data;
  102. memcpy(vis_packet->vis_orig,
  103. bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN);
  104. memcpy(vis_packet->sender_orig,
  105. bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN);
  106. }
  107. static void set_primary_if(struct bat_priv *bat_priv,
  108. struct batman_if *batman_if)
  109. {
  110. struct batman_packet *batman_packet;
  111. struct batman_if *old_if;
  112. if (batman_if)
  113. kref_get(&batman_if->refcount);
  114. old_if = bat_priv->primary_if;
  115. bat_priv->primary_if = batman_if;
  116. if (old_if)
  117. kref_put(&old_if->refcount, hardif_free_ref);
  118. if (!bat_priv->primary_if)
  119. return;
  120. batman_packet = (struct batman_packet *)(batman_if->packet_buff);
  121. batman_packet->flags = PRIMARIES_FIRST_HOP;
  122. batman_packet->ttl = TTL;
  123. update_primary_addr(bat_priv);
  124. /***
  125. * hacky trick to make sure that we send the HNA information via
  126. * our new primary interface
  127. */
  128. atomic_set(&bat_priv->hna_local_changed, 1);
  129. }
  130. static bool hardif_is_iface_up(struct batman_if *batman_if)
  131. {
  132. if (batman_if->net_dev->flags & IFF_UP)
  133. return true;
  134. return false;
  135. }
  136. static void update_mac_addresses(struct batman_if *batman_if)
  137. {
  138. memcpy(((struct batman_packet *)(batman_if->packet_buff))->orig,
  139. batman_if->net_dev->dev_addr, ETH_ALEN);
  140. memcpy(((struct batman_packet *)(batman_if->packet_buff))->prev_sender,
  141. batman_if->net_dev->dev_addr, ETH_ALEN);
  142. }
  143. static void check_known_mac_addr(struct net_device *net_dev)
  144. {
  145. struct batman_if *batman_if;
  146. rcu_read_lock();
  147. list_for_each_entry_rcu(batman_if, &if_list, list) {
  148. if ((batman_if->if_status != IF_ACTIVE) &&
  149. (batman_if->if_status != IF_TO_BE_ACTIVATED))
  150. continue;
  151. if (batman_if->net_dev == net_dev)
  152. continue;
  153. if (!compare_orig(batman_if->net_dev->dev_addr,
  154. net_dev->dev_addr))
  155. continue;
  156. pr_warning("The newly added mac address (%pM) already exists "
  157. "on: %s\n", net_dev->dev_addr,
  158. batman_if->net_dev->name);
  159. pr_warning("It is strongly recommended to keep mac addresses "
  160. "unique to avoid problems!\n");
  161. }
  162. rcu_read_unlock();
  163. }
  164. int hardif_min_mtu(struct net_device *soft_iface)
  165. {
  166. struct bat_priv *bat_priv = netdev_priv(soft_iface);
  167. struct batman_if *batman_if;
  168. /* allow big frames if all devices are capable to do so
  169. * (have MTU > 1500 + BAT_HEADER_LEN) */
  170. int min_mtu = ETH_DATA_LEN;
  171. if (atomic_read(&bat_priv->fragmentation))
  172. goto out;
  173. rcu_read_lock();
  174. list_for_each_entry_rcu(batman_if, &if_list, list) {
  175. if ((batman_if->if_status != IF_ACTIVE) &&
  176. (batman_if->if_status != IF_TO_BE_ACTIVATED))
  177. continue;
  178. if (batman_if->soft_iface != soft_iface)
  179. continue;
  180. min_mtu = min_t(int, batman_if->net_dev->mtu - BAT_HEADER_LEN,
  181. min_mtu);
  182. }
  183. rcu_read_unlock();
  184. out:
  185. return min_mtu;
  186. }
  187. /* adjusts the MTU if a new interface with a smaller MTU appeared. */
  188. void update_min_mtu(struct net_device *soft_iface)
  189. {
  190. int min_mtu;
  191. min_mtu = hardif_min_mtu(soft_iface);
  192. if (soft_iface->mtu != min_mtu)
  193. soft_iface->mtu = min_mtu;
  194. }
  195. static void hardif_activate_interface(struct batman_if *batman_if)
  196. {
  197. struct bat_priv *bat_priv;
  198. if (batman_if->if_status != IF_INACTIVE)
  199. return;
  200. bat_priv = netdev_priv(batman_if->soft_iface);
  201. update_mac_addresses(batman_if);
  202. batman_if->if_status = IF_TO_BE_ACTIVATED;
  203. /**
  204. * the first active interface becomes our primary interface or
  205. * the next active interface after the old primay interface was removed
  206. */
  207. if (!bat_priv->primary_if)
  208. set_primary_if(bat_priv, batman_if);
  209. bat_info(batman_if->soft_iface, "Interface activated: %s\n",
  210. batman_if->net_dev->name);
  211. update_min_mtu(batman_if->soft_iface);
  212. return;
  213. }
  214. static void hardif_deactivate_interface(struct batman_if *batman_if)
  215. {
  216. if ((batman_if->if_status != IF_ACTIVE) &&
  217. (batman_if->if_status != IF_TO_BE_ACTIVATED))
  218. return;
  219. batman_if->if_status = IF_INACTIVE;
  220. bat_info(batman_if->soft_iface, "Interface deactivated: %s\n",
  221. batman_if->net_dev->name);
  222. update_min_mtu(batman_if->soft_iface);
  223. }
  224. int hardif_enable_interface(struct batman_if *batman_if, char *iface_name)
  225. {
  226. struct bat_priv *bat_priv;
  227. struct batman_packet *batman_packet;
  228. if (batman_if->if_status != IF_NOT_IN_USE)
  229. goto out;
  230. batman_if->soft_iface = dev_get_by_name(&init_net, iface_name);
  231. if (!batman_if->soft_iface) {
  232. batman_if->soft_iface = softif_create(iface_name);
  233. if (!batman_if->soft_iface)
  234. goto err;
  235. /* dev_get_by_name() increases the reference counter for us */
  236. dev_hold(batman_if->soft_iface);
  237. }
  238. bat_priv = netdev_priv(batman_if->soft_iface);
  239. batman_if->packet_len = BAT_PACKET_LEN;
  240. batman_if->packet_buff = kmalloc(batman_if->packet_len, GFP_ATOMIC);
  241. if (!batman_if->packet_buff) {
  242. bat_err(batman_if->soft_iface, "Can't add interface packet "
  243. "(%s): out of memory\n", batman_if->net_dev->name);
  244. goto err;
  245. }
  246. batman_packet = (struct batman_packet *)(batman_if->packet_buff);
  247. batman_packet->packet_type = BAT_PACKET;
  248. batman_packet->version = COMPAT_VERSION;
  249. batman_packet->flags = 0;
  250. batman_packet->ttl = 2;
  251. batman_packet->tq = TQ_MAX_VALUE;
  252. batman_packet->num_hna = 0;
  253. batman_if->if_num = bat_priv->num_ifaces;
  254. bat_priv->num_ifaces++;
  255. batman_if->if_status = IF_INACTIVE;
  256. orig_hash_add_if(batman_if, bat_priv->num_ifaces);
  257. batman_if->batman_adv_ptype.type = __constant_htons(ETH_P_BATMAN);
  258. batman_if->batman_adv_ptype.func = batman_skb_recv;
  259. batman_if->batman_adv_ptype.dev = batman_if->net_dev;
  260. kref_get(&batman_if->refcount);
  261. dev_add_pack(&batman_if->batman_adv_ptype);
  262. atomic_set(&batman_if->seqno, 1);
  263. atomic_set(&batman_if->frag_seqno, 1);
  264. bat_info(batman_if->soft_iface, "Adding interface: %s\n",
  265. batman_if->net_dev->name);
  266. if (atomic_read(&bat_priv->fragmentation) && batman_if->net_dev->mtu <
  267. ETH_DATA_LEN + BAT_HEADER_LEN)
  268. bat_info(batman_if->soft_iface,
  269. "The MTU of interface %s is too small (%i) to handle "
  270. "the transport of batman-adv packets. Packets going "
  271. "over this interface will be fragmented on layer2 "
  272. "which could impact the performance. Setting the MTU "
  273. "to %zi would solve the problem.\n",
  274. batman_if->net_dev->name, batman_if->net_dev->mtu,
  275. ETH_DATA_LEN + BAT_HEADER_LEN);
  276. if (!atomic_read(&bat_priv->fragmentation) && batman_if->net_dev->mtu <
  277. ETH_DATA_LEN + BAT_HEADER_LEN)
  278. bat_info(batman_if->soft_iface,
  279. "The MTU of interface %s is too small (%i) to handle "
  280. "the transport of batman-adv packets. If you experience"
  281. " problems getting traffic through try increasing the "
  282. "MTU to %zi.\n",
  283. batman_if->net_dev->name, batman_if->net_dev->mtu,
  284. ETH_DATA_LEN + BAT_HEADER_LEN);
  285. if (hardif_is_iface_up(batman_if))
  286. hardif_activate_interface(batman_if);
  287. else
  288. bat_err(batman_if->soft_iface, "Not using interface %s "
  289. "(retrying later): interface not active\n",
  290. batman_if->net_dev->name);
  291. /* begin scheduling originator messages on that interface */
  292. schedule_own_packet(batman_if);
  293. out:
  294. return 0;
  295. err:
  296. return -ENOMEM;
  297. }
  298. void hardif_disable_interface(struct batman_if *batman_if)
  299. {
  300. struct bat_priv *bat_priv = netdev_priv(batman_if->soft_iface);
  301. if (batman_if->if_status == IF_ACTIVE)
  302. hardif_deactivate_interface(batman_if);
  303. if (batman_if->if_status != IF_INACTIVE)
  304. return;
  305. bat_info(batman_if->soft_iface, "Removing interface: %s\n",
  306. batman_if->net_dev->name);
  307. dev_remove_pack(&batman_if->batman_adv_ptype);
  308. kref_put(&batman_if->refcount, hardif_free_ref);
  309. bat_priv->num_ifaces--;
  310. orig_hash_del_if(batman_if, bat_priv->num_ifaces);
  311. if (batman_if == bat_priv->primary_if) {
  312. struct batman_if *new_if;
  313. new_if = get_active_batman_if(batman_if->soft_iface);
  314. set_primary_if(bat_priv, new_if);
  315. if (new_if)
  316. kref_put(&new_if->refcount, hardif_free_ref);
  317. }
  318. kfree(batman_if->packet_buff);
  319. batman_if->packet_buff = NULL;
  320. batman_if->if_status = IF_NOT_IN_USE;
  321. /* delete all references to this batman_if */
  322. purge_orig_ref(bat_priv);
  323. purge_outstanding_packets(bat_priv, batman_if);
  324. dev_put(batman_if->soft_iface);
  325. /* nobody uses this interface anymore */
  326. if (!bat_priv->num_ifaces)
  327. softif_destroy(batman_if->soft_iface);
  328. batman_if->soft_iface = NULL;
  329. }
  330. static struct batman_if *hardif_add_interface(struct net_device *net_dev)
  331. {
  332. struct batman_if *batman_if;
  333. int ret;
  334. ret = is_valid_iface(net_dev);
  335. if (ret != 1)
  336. goto out;
  337. dev_hold(net_dev);
  338. batman_if = kmalloc(sizeof(struct batman_if), GFP_ATOMIC);
  339. if (!batman_if) {
  340. pr_err("Can't add interface (%s): out of memory\n",
  341. net_dev->name);
  342. goto release_dev;
  343. }
  344. ret = sysfs_add_hardif(&batman_if->hardif_obj, net_dev);
  345. if (ret)
  346. goto free_if;
  347. batman_if->if_num = -1;
  348. batman_if->net_dev = net_dev;
  349. batman_if->soft_iface = NULL;
  350. batman_if->if_status = IF_NOT_IN_USE;
  351. INIT_LIST_HEAD(&batman_if->list);
  352. kref_init(&batman_if->refcount);
  353. check_known_mac_addr(batman_if->net_dev);
  354. spin_lock(&if_list_lock);
  355. list_add_tail_rcu(&batman_if->list, &if_list);
  356. spin_unlock(&if_list_lock);
  357. /* extra reference for return */
  358. kref_get(&batman_if->refcount);
  359. return batman_if;
  360. free_if:
  361. kfree(batman_if);
  362. release_dev:
  363. dev_put(net_dev);
  364. out:
  365. return NULL;
  366. }
  367. static void hardif_remove_interface(struct batman_if *batman_if)
  368. {
  369. /* first deactivate interface */
  370. if (batman_if->if_status != IF_NOT_IN_USE)
  371. hardif_disable_interface(batman_if);
  372. if (batman_if->if_status != IF_NOT_IN_USE)
  373. return;
  374. batman_if->if_status = IF_TO_BE_REMOVED;
  375. sysfs_del_hardif(&batman_if->hardif_obj);
  376. call_rcu(&batman_if->rcu, hardif_free_rcu);
  377. }
  378. void hardif_remove_interfaces(void)
  379. {
  380. struct batman_if *batman_if, *batman_if_tmp;
  381. struct list_head if_queue;
  382. INIT_LIST_HEAD(&if_queue);
  383. spin_lock(&if_list_lock);
  384. list_for_each_entry_safe(batman_if, batman_if_tmp, &if_list, list) {
  385. list_del_rcu(&batman_if->list);
  386. list_add_tail(&batman_if->list, &if_queue);
  387. }
  388. spin_unlock(&if_list_lock);
  389. rtnl_lock();
  390. list_for_each_entry_safe(batman_if, batman_if_tmp, &if_queue, list) {
  391. hardif_remove_interface(batman_if);
  392. }
  393. rtnl_unlock();
  394. }
  395. static int hard_if_event(struct notifier_block *this,
  396. unsigned long event, void *ptr)
  397. {
  398. struct net_device *net_dev = (struct net_device *)ptr;
  399. struct batman_if *batman_if = get_batman_if_by_netdev(net_dev);
  400. struct bat_priv *bat_priv;
  401. if (!batman_if && event == NETDEV_REGISTER)
  402. batman_if = hardif_add_interface(net_dev);
  403. if (!batman_if)
  404. goto out;
  405. switch (event) {
  406. case NETDEV_UP:
  407. hardif_activate_interface(batman_if);
  408. break;
  409. case NETDEV_GOING_DOWN:
  410. case NETDEV_DOWN:
  411. hardif_deactivate_interface(batman_if);
  412. break;
  413. case NETDEV_UNREGISTER:
  414. spin_lock(&if_list_lock);
  415. list_del_rcu(&batman_if->list);
  416. spin_unlock(&if_list_lock);
  417. hardif_remove_interface(batman_if);
  418. break;
  419. case NETDEV_CHANGEMTU:
  420. if (batman_if->soft_iface)
  421. update_min_mtu(batman_if->soft_iface);
  422. break;
  423. case NETDEV_CHANGEADDR:
  424. if (batman_if->if_status == IF_NOT_IN_USE)
  425. goto hardif_put;
  426. check_known_mac_addr(batman_if->net_dev);
  427. update_mac_addresses(batman_if);
  428. bat_priv = netdev_priv(batman_if->soft_iface);
  429. if (batman_if == bat_priv->primary_if)
  430. update_primary_addr(bat_priv);
  431. break;
  432. default:
  433. break;
  434. };
  435. hardif_put:
  436. kref_put(&batman_if->refcount, hardif_free_ref);
  437. out:
  438. return NOTIFY_DONE;
  439. }
  440. /* receive a packet with the batman ethertype coming on a hard
  441. * interface */
  442. static int batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
  443. struct packet_type *ptype,
  444. struct net_device *orig_dev)
  445. {
  446. struct bat_priv *bat_priv;
  447. struct batman_packet *batman_packet;
  448. struct batman_if *batman_if;
  449. int ret;
  450. batman_if = container_of(ptype, struct batman_if, batman_adv_ptype);
  451. skb = skb_share_check(skb, GFP_ATOMIC);
  452. /* skb was released by skb_share_check() */
  453. if (!skb)
  454. goto err_out;
  455. /* packet should hold at least type and version */
  456. if (unlikely(!pskb_may_pull(skb, 2)))
  457. goto err_free;
  458. /* expect a valid ethernet header here. */
  459. if (unlikely(skb->mac_len != sizeof(struct ethhdr)
  460. || !skb_mac_header(skb)))
  461. goto err_free;
  462. if (!batman_if->soft_iface)
  463. goto err_free;
  464. bat_priv = netdev_priv(batman_if->soft_iface);
  465. if (atomic_read(&bat_priv->mesh_state) != MESH_ACTIVE)
  466. goto err_free;
  467. /* discard frames on not active interfaces */
  468. if (batman_if->if_status != IF_ACTIVE)
  469. goto err_free;
  470. batman_packet = (struct batman_packet *)skb->data;
  471. if (batman_packet->version != COMPAT_VERSION) {
  472. bat_dbg(DBG_BATMAN, bat_priv,
  473. "Drop packet: incompatible batman version (%i)\n",
  474. batman_packet->version);
  475. goto err_free;
  476. }
  477. /* all receive handlers return whether they received or reused
  478. * the supplied skb. if not, we have to free the skb. */
  479. switch (batman_packet->packet_type) {
  480. /* batman originator packet */
  481. case BAT_PACKET:
  482. ret = recv_bat_packet(skb, batman_if);
  483. break;
  484. /* batman icmp packet */
  485. case BAT_ICMP:
  486. ret = recv_icmp_packet(skb, batman_if);
  487. break;
  488. /* unicast packet */
  489. case BAT_UNICAST:
  490. ret = recv_unicast_packet(skb, batman_if);
  491. break;
  492. /* fragmented unicast packet */
  493. case BAT_UNICAST_FRAG:
  494. ret = recv_ucast_frag_packet(skb, batman_if);
  495. break;
  496. /* broadcast packet */
  497. case BAT_BCAST:
  498. ret = recv_bcast_packet(skb, batman_if);
  499. break;
  500. /* vis packet */
  501. case BAT_VIS:
  502. ret = recv_vis_packet(skb, batman_if);
  503. break;
  504. default:
  505. ret = NET_RX_DROP;
  506. }
  507. if (ret == NET_RX_DROP)
  508. kfree_skb(skb);
  509. /* return NET_RX_SUCCESS in any case as we
  510. * most probably dropped the packet for
  511. * routing-logical reasons. */
  512. return NET_RX_SUCCESS;
  513. err_free:
  514. kfree_skb(skb);
  515. err_out:
  516. return NET_RX_DROP;
  517. }
  518. struct notifier_block hard_if_notifier = {
  519. .notifier_call = hard_if_event,
  520. };