routing.c 30 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140
  1. /*
  2. * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
  3. *
  4. * Marek Lindner, Simon Wunderlich
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of version 2 of the GNU General Public
  8. * License as published by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope that it will be useful, but
  11. * WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  13. * General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program; if not, write to the Free Software
  17. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
  18. * 02110-1301, USA
  19. *
  20. */
  21. #include "main.h"
  22. #include "routing.h"
  23. #include "send.h"
  24. #include "soft-interface.h"
  25. #include "hard-interface.h"
  26. #include "icmp_socket.h"
  27. #include "translation-table.h"
  28. #include "originator.h"
  29. #include "vis.h"
  30. #include "unicast.h"
  31. #include "bridge_loop_avoidance.h"
  32. static int route_unicast_packet(struct sk_buff *skb,
  33. struct hard_iface *recv_if);
  34. void slide_own_bcast_window(struct hard_iface *hard_iface)
  35. {
  36. struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
  37. struct hashtable_t *hash = bat_priv->orig_hash;
  38. struct hlist_node *node;
  39. struct hlist_head *head;
  40. struct orig_node *orig_node;
  41. unsigned long *word;
  42. uint32_t i;
  43. size_t word_index;
  44. for (i = 0; i < hash->size; i++) {
  45. head = &hash->table[i];
  46. rcu_read_lock();
  47. hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) {
  48. spin_lock_bh(&orig_node->ogm_cnt_lock);
  49. word_index = hard_iface->if_num * NUM_WORDS;
  50. word = &(orig_node->bcast_own[word_index]);
  51. bit_get_packet(bat_priv, word, 1, 0);
  52. orig_node->bcast_own_sum[hard_iface->if_num] =
  53. bitmap_weight(word, TQ_LOCAL_WINDOW_SIZE);
  54. spin_unlock_bh(&orig_node->ogm_cnt_lock);
  55. }
  56. rcu_read_unlock();
  57. }
  58. }
  59. static void _update_route(struct bat_priv *bat_priv,
  60. struct orig_node *orig_node,
  61. struct neigh_node *neigh_node)
  62. {
  63. struct neigh_node *curr_router;
  64. curr_router = orig_node_get_router(orig_node);
  65. /* route deleted */
  66. if ((curr_router) && (!neigh_node)) {
  67. bat_dbg(DBG_ROUTES, bat_priv, "Deleting route towards: %pM\n",
  68. orig_node->orig);
  69. tt_global_del_orig(bat_priv, orig_node,
  70. "Deleted route towards originator");
  71. /* route added */
  72. } else if ((!curr_router) && (neigh_node)) {
  73. bat_dbg(DBG_ROUTES, bat_priv,
  74. "Adding route towards: %pM (via %pM)\n",
  75. orig_node->orig, neigh_node->addr);
  76. /* route changed */
  77. } else if (neigh_node && curr_router) {
  78. bat_dbg(DBG_ROUTES, bat_priv,
  79. "Changing route towards: %pM (now via %pM - was via %pM)\n",
  80. orig_node->orig, neigh_node->addr,
  81. curr_router->addr);
  82. }
  83. if (curr_router)
  84. neigh_node_free_ref(curr_router);
  85. /* increase refcount of new best neighbor */
  86. if (neigh_node && !atomic_inc_not_zero(&neigh_node->refcount))
  87. neigh_node = NULL;
  88. spin_lock_bh(&orig_node->neigh_list_lock);
  89. rcu_assign_pointer(orig_node->router, neigh_node);
  90. spin_unlock_bh(&orig_node->neigh_list_lock);
  91. /* decrease refcount of previous best neighbor */
  92. if (curr_router)
  93. neigh_node_free_ref(curr_router);
  94. }
  95. void update_route(struct bat_priv *bat_priv, struct orig_node *orig_node,
  96. struct neigh_node *neigh_node)
  97. {
  98. struct neigh_node *router = NULL;
  99. if (!orig_node)
  100. goto out;
  101. router = orig_node_get_router(orig_node);
  102. if (router != neigh_node)
  103. _update_route(bat_priv, orig_node, neigh_node);
  104. out:
  105. if (router)
  106. neigh_node_free_ref(router);
  107. }
  108. /* caller must hold the neigh_list_lock */
  109. void bonding_candidate_del(struct orig_node *orig_node,
  110. struct neigh_node *neigh_node)
  111. {
  112. /* this neighbor is not part of our candidate list */
  113. if (list_empty(&neigh_node->bonding_list))
  114. goto out;
  115. list_del_rcu(&neigh_node->bonding_list);
  116. INIT_LIST_HEAD(&neigh_node->bonding_list);
  117. neigh_node_free_ref(neigh_node);
  118. atomic_dec(&orig_node->bond_candidates);
  119. out:
  120. return;
  121. }
  122. void bonding_candidate_add(struct orig_node *orig_node,
  123. struct neigh_node *neigh_node)
  124. {
  125. struct hlist_node *node;
  126. struct neigh_node *tmp_neigh_node, *router = NULL;
  127. uint8_t interference_candidate = 0;
  128. spin_lock_bh(&orig_node->neigh_list_lock);
  129. /* only consider if it has the same primary address ... */
  130. if (!compare_eth(orig_node->orig,
  131. neigh_node->orig_node->primary_addr))
  132. goto candidate_del;
  133. router = orig_node_get_router(orig_node);
  134. if (!router)
  135. goto candidate_del;
  136. /* ... and is good enough to be considered */
  137. if (neigh_node->tq_avg < router->tq_avg - BONDING_TQ_THRESHOLD)
  138. goto candidate_del;
  139. /**
  140. * check if we have another candidate with the same mac address or
  141. * interface. If we do, we won't select this candidate because of
  142. * possible interference.
  143. */
  144. hlist_for_each_entry_rcu(tmp_neigh_node, node,
  145. &orig_node->neigh_list, list) {
  146. if (tmp_neigh_node == neigh_node)
  147. continue;
  148. /* we only care if the other candidate is even
  149. * considered as candidate. */
  150. if (list_empty(&tmp_neigh_node->bonding_list))
  151. continue;
  152. if ((neigh_node->if_incoming == tmp_neigh_node->if_incoming) ||
  153. (compare_eth(neigh_node->addr, tmp_neigh_node->addr))) {
  154. interference_candidate = 1;
  155. break;
  156. }
  157. }
  158. /* don't care further if it is an interference candidate */
  159. if (interference_candidate)
  160. goto candidate_del;
  161. /* this neighbor already is part of our candidate list */
  162. if (!list_empty(&neigh_node->bonding_list))
  163. goto out;
  164. if (!atomic_inc_not_zero(&neigh_node->refcount))
  165. goto out;
  166. list_add_rcu(&neigh_node->bonding_list, &orig_node->bond_list);
  167. atomic_inc(&orig_node->bond_candidates);
  168. goto out;
  169. candidate_del:
  170. bonding_candidate_del(orig_node, neigh_node);
  171. out:
  172. spin_unlock_bh(&orig_node->neigh_list_lock);
  173. if (router)
  174. neigh_node_free_ref(router);
  175. }
  176. /* copy primary address for bonding */
  177. void bonding_save_primary(const struct orig_node *orig_node,
  178. struct orig_node *orig_neigh_node,
  179. const struct batman_ogm_packet *batman_ogm_packet)
  180. {
  181. if (!(batman_ogm_packet->flags & PRIMARIES_FIRST_HOP))
  182. return;
  183. memcpy(orig_neigh_node->primary_addr, orig_node->orig, ETH_ALEN);
  184. }
  185. /* checks whether the host restarted and is in the protection time.
  186. * returns:
  187. * 0 if the packet is to be accepted
  188. * 1 if the packet is to be ignored.
  189. */
  190. int window_protected(struct bat_priv *bat_priv, int32_t seq_num_diff,
  191. unsigned long *last_reset)
  192. {
  193. if ((seq_num_diff <= -TQ_LOCAL_WINDOW_SIZE) ||
  194. (seq_num_diff >= EXPECTED_SEQNO_RANGE)) {
  195. if (has_timed_out(*last_reset, RESET_PROTECTION_MS)) {
  196. *last_reset = jiffies;
  197. bat_dbg(DBG_BATMAN, bat_priv,
  198. "old packet received, start protection\n");
  199. return 0;
  200. } else {
  201. return 1;
  202. }
  203. }
  204. return 0;
  205. }
  206. int recv_bat_ogm_packet(struct sk_buff *skb, struct hard_iface *hard_iface)
  207. {
  208. struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
  209. struct ethhdr *ethhdr;
  210. /* drop packet if it has not necessary minimum size */
  211. if (unlikely(!pskb_may_pull(skb, BATMAN_OGM_LEN)))
  212. return NET_RX_DROP;
  213. ethhdr = (struct ethhdr *)skb_mac_header(skb);
  214. /* packet with broadcast indication but unicast recipient */
  215. if (!is_broadcast_ether_addr(ethhdr->h_dest))
  216. return NET_RX_DROP;
  217. /* packet with broadcast sender address */
  218. if (is_broadcast_ether_addr(ethhdr->h_source))
  219. return NET_RX_DROP;
  220. /* create a copy of the skb, if needed, to modify it. */
  221. if (skb_cow(skb, 0) < 0)
  222. return NET_RX_DROP;
  223. /* keep skb linear */
  224. if (skb_linearize(skb) < 0)
  225. return NET_RX_DROP;
  226. bat_priv->bat_algo_ops->bat_ogm_receive(hard_iface, skb);
  227. kfree_skb(skb);
  228. return NET_RX_SUCCESS;
  229. }
  230. static int recv_my_icmp_packet(struct bat_priv *bat_priv,
  231. struct sk_buff *skb, size_t icmp_len)
  232. {
  233. struct hard_iface *primary_if = NULL;
  234. struct orig_node *orig_node = NULL;
  235. struct neigh_node *router = NULL;
  236. struct icmp_packet_rr *icmp_packet;
  237. int ret = NET_RX_DROP;
  238. icmp_packet = (struct icmp_packet_rr *)skb->data;
  239. /* add data to device queue */
  240. if (icmp_packet->msg_type != ECHO_REQUEST) {
  241. bat_socket_receive_packet(icmp_packet, icmp_len);
  242. goto out;
  243. }
  244. primary_if = primary_if_get_selected(bat_priv);
  245. if (!primary_if)
  246. goto out;
  247. /* answer echo request (ping) */
  248. /* get routing information */
  249. orig_node = orig_hash_find(bat_priv, icmp_packet->orig);
  250. if (!orig_node)
  251. goto out;
  252. router = orig_node_get_router(orig_node);
  253. if (!router)
  254. goto out;
  255. /* create a copy of the skb, if needed, to modify it. */
  256. if (skb_cow(skb, sizeof(struct ethhdr)) < 0)
  257. goto out;
  258. icmp_packet = (struct icmp_packet_rr *)skb->data;
  259. memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN);
  260. memcpy(icmp_packet->orig, primary_if->net_dev->dev_addr, ETH_ALEN);
  261. icmp_packet->msg_type = ECHO_REPLY;
  262. icmp_packet->header.ttl = TTL;
  263. send_skb_packet(skb, router->if_incoming, router->addr);
  264. ret = NET_RX_SUCCESS;
  265. out:
  266. if (primary_if)
  267. hardif_free_ref(primary_if);
  268. if (router)
  269. neigh_node_free_ref(router);
  270. if (orig_node)
  271. orig_node_free_ref(orig_node);
  272. return ret;
  273. }
  274. static int recv_icmp_ttl_exceeded(struct bat_priv *bat_priv,
  275. struct sk_buff *skb)
  276. {
  277. struct hard_iface *primary_if = NULL;
  278. struct orig_node *orig_node = NULL;
  279. struct neigh_node *router = NULL;
  280. struct icmp_packet *icmp_packet;
  281. int ret = NET_RX_DROP;
  282. icmp_packet = (struct icmp_packet *)skb->data;
  283. /* send TTL exceeded if packet is an echo request (traceroute) */
  284. if (icmp_packet->msg_type != ECHO_REQUEST) {
  285. pr_debug("Warning - can't forward icmp packet from %pM to %pM: ttl exceeded\n",
  286. icmp_packet->orig, icmp_packet->dst);
  287. goto out;
  288. }
  289. primary_if = primary_if_get_selected(bat_priv);
  290. if (!primary_if)
  291. goto out;
  292. /* get routing information */
  293. orig_node = orig_hash_find(bat_priv, icmp_packet->orig);
  294. if (!orig_node)
  295. goto out;
  296. router = orig_node_get_router(orig_node);
  297. if (!router)
  298. goto out;
  299. /* create a copy of the skb, if needed, to modify it. */
  300. if (skb_cow(skb, sizeof(struct ethhdr)) < 0)
  301. goto out;
  302. icmp_packet = (struct icmp_packet *)skb->data;
  303. memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN);
  304. memcpy(icmp_packet->orig, primary_if->net_dev->dev_addr, ETH_ALEN);
  305. icmp_packet->msg_type = TTL_EXCEEDED;
  306. icmp_packet->header.ttl = TTL;
  307. send_skb_packet(skb, router->if_incoming, router->addr);
  308. ret = NET_RX_SUCCESS;
  309. out:
  310. if (primary_if)
  311. hardif_free_ref(primary_if);
  312. if (router)
  313. neigh_node_free_ref(router);
  314. if (orig_node)
  315. orig_node_free_ref(orig_node);
  316. return ret;
  317. }
  318. int recv_icmp_packet(struct sk_buff *skb, struct hard_iface *recv_if)
  319. {
  320. struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
  321. struct icmp_packet_rr *icmp_packet;
  322. struct ethhdr *ethhdr;
  323. struct orig_node *orig_node = NULL;
  324. struct neigh_node *router = NULL;
  325. int hdr_size = sizeof(struct icmp_packet);
  326. int ret = NET_RX_DROP;
  327. /**
  328. * we truncate all incoming icmp packets if they don't match our size
  329. */
  330. if (skb->len >= sizeof(struct icmp_packet_rr))
  331. hdr_size = sizeof(struct icmp_packet_rr);
  332. /* drop packet if it has not necessary minimum size */
  333. if (unlikely(!pskb_may_pull(skb, hdr_size)))
  334. goto out;
  335. ethhdr = (struct ethhdr *)skb_mac_header(skb);
  336. /* packet with unicast indication but broadcast recipient */
  337. if (is_broadcast_ether_addr(ethhdr->h_dest))
  338. goto out;
  339. /* packet with broadcast sender address */
  340. if (is_broadcast_ether_addr(ethhdr->h_source))
  341. goto out;
  342. /* not for me */
  343. if (!is_my_mac(ethhdr->h_dest))
  344. goto out;
  345. icmp_packet = (struct icmp_packet_rr *)skb->data;
  346. /* add record route information if not full */
  347. if ((hdr_size == sizeof(struct icmp_packet_rr)) &&
  348. (icmp_packet->rr_cur < BAT_RR_LEN)) {
  349. memcpy(&(icmp_packet->rr[icmp_packet->rr_cur]),
  350. ethhdr->h_dest, ETH_ALEN);
  351. icmp_packet->rr_cur++;
  352. }
  353. /* packet for me */
  354. if (is_my_mac(icmp_packet->dst))
  355. return recv_my_icmp_packet(bat_priv, skb, hdr_size);
  356. /* TTL exceeded */
  357. if (icmp_packet->header.ttl < 2)
  358. return recv_icmp_ttl_exceeded(bat_priv, skb);
  359. /* get routing information */
  360. orig_node = orig_hash_find(bat_priv, icmp_packet->dst);
  361. if (!orig_node)
  362. goto out;
  363. router = orig_node_get_router(orig_node);
  364. if (!router)
  365. goto out;
  366. /* create a copy of the skb, if needed, to modify it. */
  367. if (skb_cow(skb, sizeof(struct ethhdr)) < 0)
  368. goto out;
  369. icmp_packet = (struct icmp_packet_rr *)skb->data;
  370. /* decrement ttl */
  371. icmp_packet->header.ttl--;
  372. /* route it */
  373. send_skb_packet(skb, router->if_incoming, router->addr);
  374. ret = NET_RX_SUCCESS;
  375. out:
  376. if (router)
  377. neigh_node_free_ref(router);
  378. if (orig_node)
  379. orig_node_free_ref(orig_node);
  380. return ret;
  381. }
  382. /* In the bonding case, send the packets in a round
  383. * robin fashion over the remaining interfaces.
  384. *
  385. * This method rotates the bonding list and increases the
  386. * returned router's refcount. */
  387. static struct neigh_node *find_bond_router(struct orig_node *primary_orig,
  388. const struct hard_iface *recv_if)
  389. {
  390. struct neigh_node *tmp_neigh_node;
  391. struct neigh_node *router = NULL, *first_candidate = NULL;
  392. rcu_read_lock();
  393. list_for_each_entry_rcu(tmp_neigh_node, &primary_orig->bond_list,
  394. bonding_list) {
  395. if (!first_candidate)
  396. first_candidate = tmp_neigh_node;
  397. /* recv_if == NULL on the first node. */
  398. if (tmp_neigh_node->if_incoming == recv_if)
  399. continue;
  400. if (!atomic_inc_not_zero(&tmp_neigh_node->refcount))
  401. continue;
  402. router = tmp_neigh_node;
  403. break;
  404. }
  405. /* use the first candidate if nothing was found. */
  406. if (!router && first_candidate &&
  407. atomic_inc_not_zero(&first_candidate->refcount))
  408. router = first_candidate;
  409. if (!router)
  410. goto out;
  411. /* selected should point to the next element
  412. * after the current router */
  413. spin_lock_bh(&primary_orig->neigh_list_lock);
  414. /* this is a list_move(), which unfortunately
  415. * does not exist as rcu version */
  416. list_del_rcu(&primary_orig->bond_list);
  417. list_add_rcu(&primary_orig->bond_list,
  418. &router->bonding_list);
  419. spin_unlock_bh(&primary_orig->neigh_list_lock);
  420. out:
  421. rcu_read_unlock();
  422. return router;
  423. }
  424. /* Interface Alternating: Use the best of the
  425. * remaining candidates which are not using
  426. * this interface.
  427. *
  428. * Increases the returned router's refcount */
  429. static struct neigh_node *find_ifalter_router(struct orig_node *primary_orig,
  430. const struct hard_iface *recv_if)
  431. {
  432. struct neigh_node *tmp_neigh_node;
  433. struct neigh_node *router = NULL, *first_candidate = NULL;
  434. rcu_read_lock();
  435. list_for_each_entry_rcu(tmp_neigh_node, &primary_orig->bond_list,
  436. bonding_list) {
  437. if (!first_candidate)
  438. first_candidate = tmp_neigh_node;
  439. /* recv_if == NULL on the first node. */
  440. if (tmp_neigh_node->if_incoming == recv_if)
  441. continue;
  442. if (!atomic_inc_not_zero(&tmp_neigh_node->refcount))
  443. continue;
  444. /* if we don't have a router yet
  445. * or this one is better, choose it. */
  446. if ((!router) ||
  447. (tmp_neigh_node->tq_avg > router->tq_avg)) {
  448. /* decrement refcount of
  449. * previously selected router */
  450. if (router)
  451. neigh_node_free_ref(router);
  452. router = tmp_neigh_node;
  453. atomic_inc_not_zero(&router->refcount);
  454. }
  455. neigh_node_free_ref(tmp_neigh_node);
  456. }
  457. /* use the first candidate if nothing was found. */
  458. if (!router && first_candidate &&
  459. atomic_inc_not_zero(&first_candidate->refcount))
  460. router = first_candidate;
  461. rcu_read_unlock();
  462. return router;
  463. }
  464. int recv_tt_query(struct sk_buff *skb, struct hard_iface *recv_if)
  465. {
  466. struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
  467. struct tt_query_packet *tt_query;
  468. uint16_t tt_len;
  469. struct ethhdr *ethhdr;
  470. /* drop packet if it has not necessary minimum size */
  471. if (unlikely(!pskb_may_pull(skb, sizeof(struct tt_query_packet))))
  472. goto out;
  473. /* I could need to modify it */
  474. if (skb_cow(skb, sizeof(struct tt_query_packet)) < 0)
  475. goto out;
  476. ethhdr = (struct ethhdr *)skb_mac_header(skb);
  477. /* packet with unicast indication but broadcast recipient */
  478. if (is_broadcast_ether_addr(ethhdr->h_dest))
  479. goto out;
  480. /* packet with broadcast sender address */
  481. if (is_broadcast_ether_addr(ethhdr->h_source))
  482. goto out;
  483. tt_query = (struct tt_query_packet *)skb->data;
  484. tt_query->tt_data = ntohs(tt_query->tt_data);
  485. switch (tt_query->flags & TT_QUERY_TYPE_MASK) {
  486. case TT_REQUEST:
  487. /* If we cannot provide an answer the tt_request is
  488. * forwarded */
  489. if (!send_tt_response(bat_priv, tt_query)) {
  490. bat_dbg(DBG_TT, bat_priv,
  491. "Routing TT_REQUEST to %pM [%c]\n",
  492. tt_query->dst,
  493. (tt_query->flags & TT_FULL_TABLE ? 'F' : '.'));
  494. tt_query->tt_data = htons(tt_query->tt_data);
  495. return route_unicast_packet(skb, recv_if);
  496. }
  497. break;
  498. case TT_RESPONSE:
  499. if (is_my_mac(tt_query->dst)) {
  500. /* packet needs to be linearized to access the TT
  501. * changes */
  502. if (skb_linearize(skb) < 0)
  503. goto out;
  504. tt_len = tt_query->tt_data * sizeof(struct tt_change);
  505. /* Ensure we have all the claimed data */
  506. if (unlikely(skb_headlen(skb) <
  507. sizeof(struct tt_query_packet) + tt_len))
  508. goto out;
  509. handle_tt_response(bat_priv, tt_query);
  510. } else {
  511. bat_dbg(DBG_TT, bat_priv,
  512. "Routing TT_RESPONSE to %pM [%c]\n",
  513. tt_query->dst,
  514. (tt_query->flags & TT_FULL_TABLE ? 'F' : '.'));
  515. tt_query->tt_data = htons(tt_query->tt_data);
  516. return route_unicast_packet(skb, recv_if);
  517. }
  518. break;
  519. }
  520. out:
  521. /* returning NET_RX_DROP will make the caller function kfree the skb */
  522. return NET_RX_DROP;
  523. }
  524. int recv_roam_adv(struct sk_buff *skb, struct hard_iface *recv_if)
  525. {
  526. struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
  527. struct roam_adv_packet *roam_adv_packet;
  528. struct orig_node *orig_node;
  529. struct ethhdr *ethhdr;
  530. /* drop packet if it has not necessary minimum size */
  531. if (unlikely(!pskb_may_pull(skb, sizeof(struct roam_adv_packet))))
  532. goto out;
  533. ethhdr = (struct ethhdr *)skb_mac_header(skb);
  534. /* packet with unicast indication but broadcast recipient */
  535. if (is_broadcast_ether_addr(ethhdr->h_dest))
  536. goto out;
  537. /* packet with broadcast sender address */
  538. if (is_broadcast_ether_addr(ethhdr->h_source))
  539. goto out;
  540. roam_adv_packet = (struct roam_adv_packet *)skb->data;
  541. if (!is_my_mac(roam_adv_packet->dst))
  542. return route_unicast_packet(skb, recv_if);
  543. orig_node = orig_hash_find(bat_priv, roam_adv_packet->src);
  544. if (!orig_node)
  545. goto out;
  546. bat_dbg(DBG_TT, bat_priv,
  547. "Received ROAMING_ADV from %pM (client %pM)\n",
  548. roam_adv_packet->src, roam_adv_packet->client);
  549. tt_global_add(bat_priv, orig_node, roam_adv_packet->client,
  550. atomic_read(&orig_node->last_ttvn) + 1, true, false);
  551. /* Roaming phase starts: I have new information but the ttvn has not
  552. * been incremented yet. This flag will make me check all the incoming
  553. * packets for the correct destination. */
  554. bat_priv->tt_poss_change = true;
  555. orig_node_free_ref(orig_node);
  556. out:
  557. /* returning NET_RX_DROP will make the caller function kfree the skb */
  558. return NET_RX_DROP;
  559. }
  560. /* find a suitable router for this originator, and use
  561. * bonding if possible. increases the found neighbors
  562. * refcount.*/
  563. struct neigh_node *find_router(struct bat_priv *bat_priv,
  564. struct orig_node *orig_node,
  565. const struct hard_iface *recv_if)
  566. {
  567. struct orig_node *primary_orig_node;
  568. struct orig_node *router_orig;
  569. struct neigh_node *router;
  570. static uint8_t zero_mac[ETH_ALEN] = {0, 0, 0, 0, 0, 0};
  571. int bonding_enabled;
  572. if (!orig_node)
  573. return NULL;
  574. router = orig_node_get_router(orig_node);
  575. if (!router)
  576. goto err;
  577. /* without bonding, the first node should
  578. * always choose the default router. */
  579. bonding_enabled = atomic_read(&bat_priv->bonding);
  580. rcu_read_lock();
  581. /* select default router to output */
  582. router_orig = router->orig_node;
  583. if (!router_orig)
  584. goto err_unlock;
  585. if ((!recv_if) && (!bonding_enabled))
  586. goto return_router;
  587. /* if we have something in the primary_addr, we can search
  588. * for a potential bonding candidate. */
  589. if (compare_eth(router_orig->primary_addr, zero_mac))
  590. goto return_router;
  591. /* find the orig_node which has the primary interface. might
  592. * even be the same as our router_orig in many cases */
  593. if (compare_eth(router_orig->primary_addr, router_orig->orig)) {
  594. primary_orig_node = router_orig;
  595. } else {
  596. primary_orig_node = orig_hash_find(bat_priv,
  597. router_orig->primary_addr);
  598. if (!primary_orig_node)
  599. goto return_router;
  600. orig_node_free_ref(primary_orig_node);
  601. }
  602. /* with less than 2 candidates, we can't do any
  603. * bonding and prefer the original router. */
  604. if (atomic_read(&primary_orig_node->bond_candidates) < 2)
  605. goto return_router;
  606. /* all nodes between should choose a candidate which
  607. * is is not on the interface where the packet came
  608. * in. */
  609. neigh_node_free_ref(router);
  610. if (bonding_enabled)
  611. router = find_bond_router(primary_orig_node, recv_if);
  612. else
  613. router = find_ifalter_router(primary_orig_node, recv_if);
  614. return_router:
  615. if (router && router->if_incoming->if_status != IF_ACTIVE)
  616. goto err_unlock;
  617. rcu_read_unlock();
  618. return router;
  619. err_unlock:
  620. rcu_read_unlock();
  621. err:
  622. if (router)
  623. neigh_node_free_ref(router);
  624. return NULL;
  625. }
  626. static int check_unicast_packet(struct sk_buff *skb, int hdr_size)
  627. {
  628. struct ethhdr *ethhdr;
  629. /* drop packet if it has not necessary minimum size */
  630. if (unlikely(!pskb_may_pull(skb, hdr_size)))
  631. return -1;
  632. ethhdr = (struct ethhdr *)skb_mac_header(skb);
  633. /* packet with unicast indication but broadcast recipient */
  634. if (is_broadcast_ether_addr(ethhdr->h_dest))
  635. return -1;
  636. /* packet with broadcast sender address */
  637. if (is_broadcast_ether_addr(ethhdr->h_source))
  638. return -1;
  639. /* not for me */
  640. if (!is_my_mac(ethhdr->h_dest))
  641. return -1;
  642. return 0;
  643. }
  644. static int route_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if)
  645. {
  646. struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
  647. struct orig_node *orig_node = NULL;
  648. struct neigh_node *neigh_node = NULL;
  649. struct unicast_packet *unicast_packet;
  650. struct ethhdr *ethhdr = (struct ethhdr *)skb_mac_header(skb);
  651. int ret = NET_RX_DROP;
  652. struct sk_buff *new_skb;
  653. unicast_packet = (struct unicast_packet *)skb->data;
  654. /* TTL exceeded */
  655. if (unicast_packet->header.ttl < 2) {
  656. pr_debug("Warning - can't forward unicast packet from %pM to %pM: ttl exceeded\n",
  657. ethhdr->h_source, unicast_packet->dest);
  658. goto out;
  659. }
  660. /* get routing information */
  661. orig_node = orig_hash_find(bat_priv, unicast_packet->dest);
  662. if (!orig_node)
  663. goto out;
  664. /* find_router() increases neigh_nodes refcount if found. */
  665. neigh_node = find_router(bat_priv, orig_node, recv_if);
  666. if (!neigh_node)
  667. goto out;
  668. /* create a copy of the skb, if needed, to modify it. */
  669. if (skb_cow(skb, sizeof(struct ethhdr)) < 0)
  670. goto out;
  671. unicast_packet = (struct unicast_packet *)skb->data;
  672. if (unicast_packet->header.packet_type == BAT_UNICAST &&
  673. atomic_read(&bat_priv->fragmentation) &&
  674. skb->len > neigh_node->if_incoming->net_dev->mtu) {
  675. ret = frag_send_skb(skb, bat_priv,
  676. neigh_node->if_incoming, neigh_node->addr);
  677. goto out;
  678. }
  679. if (unicast_packet->header.packet_type == BAT_UNICAST_FRAG &&
  680. frag_can_reassemble(skb, neigh_node->if_incoming->net_dev->mtu)) {
  681. ret = frag_reassemble_skb(skb, bat_priv, &new_skb);
  682. if (ret == NET_RX_DROP)
  683. goto out;
  684. /* packet was buffered for late merge */
  685. if (!new_skb) {
  686. ret = NET_RX_SUCCESS;
  687. goto out;
  688. }
  689. skb = new_skb;
  690. unicast_packet = (struct unicast_packet *)skb->data;
  691. }
  692. /* decrement ttl */
  693. unicast_packet->header.ttl--;
  694. /* route it */
  695. send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
  696. ret = NET_RX_SUCCESS;
  697. out:
  698. if (neigh_node)
  699. neigh_node_free_ref(neigh_node);
  700. if (orig_node)
  701. orig_node_free_ref(orig_node);
  702. return ret;
  703. }
  704. static int check_unicast_ttvn(struct bat_priv *bat_priv,
  705. struct sk_buff *skb) {
  706. uint8_t curr_ttvn;
  707. struct orig_node *orig_node;
  708. struct ethhdr *ethhdr;
  709. struct hard_iface *primary_if;
  710. struct unicast_packet *unicast_packet;
  711. bool tt_poss_change;
  712. /* I could need to modify it */
  713. if (skb_cow(skb, sizeof(struct unicast_packet)) < 0)
  714. return 0;
  715. unicast_packet = (struct unicast_packet *)skb->data;
  716. if (is_my_mac(unicast_packet->dest)) {
  717. tt_poss_change = bat_priv->tt_poss_change;
  718. curr_ttvn = (uint8_t)atomic_read(&bat_priv->ttvn);
  719. } else {
  720. orig_node = orig_hash_find(bat_priv, unicast_packet->dest);
  721. if (!orig_node)
  722. return 0;
  723. curr_ttvn = (uint8_t)atomic_read(&orig_node->last_ttvn);
  724. tt_poss_change = orig_node->tt_poss_change;
  725. orig_node_free_ref(orig_node);
  726. }
  727. /* Check whether I have to reroute the packet */
  728. if (seq_before(unicast_packet->ttvn, curr_ttvn) || tt_poss_change) {
  729. /* Linearize the skb before accessing it */
  730. if (skb_linearize(skb) < 0)
  731. return 0;
  732. ethhdr = (struct ethhdr *)(skb->data +
  733. sizeof(struct unicast_packet));
  734. orig_node = transtable_search(bat_priv, NULL, ethhdr->h_dest);
  735. if (!orig_node) {
  736. if (!is_my_client(bat_priv, ethhdr->h_dest))
  737. return 0;
  738. primary_if = primary_if_get_selected(bat_priv);
  739. if (!primary_if)
  740. return 0;
  741. memcpy(unicast_packet->dest,
  742. primary_if->net_dev->dev_addr, ETH_ALEN);
  743. hardif_free_ref(primary_if);
  744. } else {
  745. memcpy(unicast_packet->dest, orig_node->orig,
  746. ETH_ALEN);
  747. curr_ttvn = (uint8_t)
  748. atomic_read(&orig_node->last_ttvn);
  749. orig_node_free_ref(orig_node);
  750. }
  751. bat_dbg(DBG_ROUTES, bat_priv,
  752. "TTVN mismatch (old_ttvn %u new_ttvn %u)! Rerouting unicast packet (for %pM) to %pM\n",
  753. unicast_packet->ttvn, curr_ttvn, ethhdr->h_dest,
  754. unicast_packet->dest);
  755. unicast_packet->ttvn = curr_ttvn;
  756. }
  757. return 1;
  758. }
  759. int recv_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if)
  760. {
  761. struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
  762. struct unicast_packet *unicast_packet;
  763. int hdr_size = sizeof(*unicast_packet);
  764. if (check_unicast_packet(skb, hdr_size) < 0)
  765. return NET_RX_DROP;
  766. if (!check_unicast_ttvn(bat_priv, skb))
  767. return NET_RX_DROP;
  768. unicast_packet = (struct unicast_packet *)skb->data;
  769. /* packet for me */
  770. if (is_my_mac(unicast_packet->dest)) {
  771. interface_rx(recv_if->soft_iface, skb, recv_if, hdr_size);
  772. return NET_RX_SUCCESS;
  773. }
  774. return route_unicast_packet(skb, recv_if);
  775. }
  776. int recv_ucast_frag_packet(struct sk_buff *skb, struct hard_iface *recv_if)
  777. {
  778. struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
  779. struct unicast_frag_packet *unicast_packet;
  780. int hdr_size = sizeof(*unicast_packet);
  781. struct sk_buff *new_skb = NULL;
  782. int ret;
  783. if (check_unicast_packet(skb, hdr_size) < 0)
  784. return NET_RX_DROP;
  785. if (!check_unicast_ttvn(bat_priv, skb))
  786. return NET_RX_DROP;
  787. unicast_packet = (struct unicast_frag_packet *)skb->data;
  788. /* packet for me */
  789. if (is_my_mac(unicast_packet->dest)) {
  790. ret = frag_reassemble_skb(skb, bat_priv, &new_skb);
  791. if (ret == NET_RX_DROP)
  792. return NET_RX_DROP;
  793. /* packet was buffered for late merge */
  794. if (!new_skb)
  795. return NET_RX_SUCCESS;
  796. interface_rx(recv_if->soft_iface, new_skb, recv_if,
  797. sizeof(struct unicast_packet));
  798. return NET_RX_SUCCESS;
  799. }
  800. return route_unicast_packet(skb, recv_if);
  801. }
  802. int recv_bcast_packet(struct sk_buff *skb, struct hard_iface *recv_if)
  803. {
  804. struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
  805. struct orig_node *orig_node = NULL;
  806. struct bcast_packet *bcast_packet;
  807. struct ethhdr *ethhdr;
  808. int hdr_size = sizeof(*bcast_packet);
  809. int ret = NET_RX_DROP;
  810. int32_t seq_diff;
  811. /* drop packet if it has not necessary minimum size */
  812. if (unlikely(!pskb_may_pull(skb, hdr_size)))
  813. goto out;
  814. ethhdr = (struct ethhdr *)skb_mac_header(skb);
  815. /* packet with broadcast indication but unicast recipient */
  816. if (!is_broadcast_ether_addr(ethhdr->h_dest))
  817. goto out;
  818. /* packet with broadcast sender address */
  819. if (is_broadcast_ether_addr(ethhdr->h_source))
  820. goto out;
  821. /* ignore broadcasts sent by myself */
  822. if (is_my_mac(ethhdr->h_source))
  823. goto out;
  824. bcast_packet = (struct bcast_packet *)skb->data;
  825. /* ignore broadcasts originated by myself */
  826. if (is_my_mac(bcast_packet->orig))
  827. goto out;
  828. if (bcast_packet->header.ttl < 2)
  829. goto out;
  830. orig_node = orig_hash_find(bat_priv, bcast_packet->orig);
  831. if (!orig_node)
  832. goto out;
  833. spin_lock_bh(&orig_node->bcast_seqno_lock);
  834. /* check whether the packet is a duplicate */
  835. if (bat_test_bit(orig_node->bcast_bits, orig_node->last_bcast_seqno,
  836. ntohl(bcast_packet->seqno)))
  837. goto spin_unlock;
  838. seq_diff = ntohl(bcast_packet->seqno) - orig_node->last_bcast_seqno;
  839. /* check whether the packet is old and the host just restarted. */
  840. if (window_protected(bat_priv, seq_diff,
  841. &orig_node->bcast_seqno_reset))
  842. goto spin_unlock;
  843. /* mark broadcast in flood history, update window position
  844. * if required. */
  845. if (bit_get_packet(bat_priv, orig_node->bcast_bits, seq_diff, 1))
  846. orig_node->last_bcast_seqno = ntohl(bcast_packet->seqno);
  847. spin_unlock_bh(&orig_node->bcast_seqno_lock);
  848. /* rebroadcast packet */
  849. add_bcast_packet_to_list(bat_priv, skb, 1);
  850. /* don't hand the broadcast up if it is from an originator
  851. * from the same backbone.
  852. */
  853. if (bla_is_backbone_gw(skb, orig_node, hdr_size))
  854. goto out;
  855. /* broadcast for me */
  856. interface_rx(recv_if->soft_iface, skb, recv_if, hdr_size);
  857. ret = NET_RX_SUCCESS;
  858. goto out;
  859. spin_unlock:
  860. spin_unlock_bh(&orig_node->bcast_seqno_lock);
  861. out:
  862. if (orig_node)
  863. orig_node_free_ref(orig_node);
  864. return ret;
  865. }
  866. int recv_vis_packet(struct sk_buff *skb, struct hard_iface *recv_if)
  867. {
  868. struct vis_packet *vis_packet;
  869. struct ethhdr *ethhdr;
  870. struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
  871. int hdr_size = sizeof(*vis_packet);
  872. /* keep skb linear */
  873. if (skb_linearize(skb) < 0)
  874. return NET_RX_DROP;
  875. if (unlikely(!pskb_may_pull(skb, hdr_size)))
  876. return NET_RX_DROP;
  877. vis_packet = (struct vis_packet *)skb->data;
  878. ethhdr = (struct ethhdr *)skb_mac_header(skb);
  879. /* not for me */
  880. if (!is_my_mac(ethhdr->h_dest))
  881. return NET_RX_DROP;
  882. /* ignore own packets */
  883. if (is_my_mac(vis_packet->vis_orig))
  884. return NET_RX_DROP;
  885. if (is_my_mac(vis_packet->sender_orig))
  886. return NET_RX_DROP;
  887. switch (vis_packet->vis_type) {
  888. case VIS_TYPE_SERVER_SYNC:
  889. receive_server_sync_packet(bat_priv, vis_packet,
  890. skb_headlen(skb));
  891. break;
  892. case VIS_TYPE_CLIENT_UPDATE:
  893. receive_client_update_packet(bat_priv, vis_packet,
  894. skb_headlen(skb));
  895. break;
  896. default: /* ignore unknown packet */
  897. break;
  898. }
  899. /* We take a copy of the data in the packet, so we should
  900. always free the skbuf. */
  901. return NET_RX_DROP;
  902. }