routing.c 30 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151
  1. /*
  2. * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
  3. *
  4. * Marek Lindner, Simon Wunderlich
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of version 2 of the GNU General Public
  8. * License as published by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope that it will be useful, but
  11. * WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  13. * General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program; if not, write to the Free Software
  17. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
  18. * 02110-1301, USA
  19. *
  20. */
  21. #include "main.h"
  22. #include "routing.h"
  23. #include "send.h"
  24. #include "soft-interface.h"
  25. #include "hard-interface.h"
  26. #include "icmp_socket.h"
  27. #include "translation-table.h"
  28. #include "originator.h"
  29. #include "vis.h"
  30. #include "unicast.h"
  31. #include "bridge_loop_avoidance.h"
  32. static int route_unicast_packet(struct sk_buff *skb,
  33. struct hard_iface *recv_if);
  34. void slide_own_bcast_window(struct hard_iface *hard_iface)
  35. {
  36. struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
  37. struct hashtable_t *hash = bat_priv->orig_hash;
  38. struct hlist_node *node;
  39. struct hlist_head *head;
  40. struct orig_node *orig_node;
  41. unsigned long *word;
  42. uint32_t i;
  43. size_t word_index;
  44. for (i = 0; i < hash->size; i++) {
  45. head = &hash->table[i];
  46. rcu_read_lock();
  47. hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) {
  48. spin_lock_bh(&orig_node->ogm_cnt_lock);
  49. word_index = hard_iface->if_num * NUM_WORDS;
  50. word = &(orig_node->bcast_own[word_index]);
  51. bit_get_packet(bat_priv, word, 1, 0);
  52. orig_node->bcast_own_sum[hard_iface->if_num] =
  53. bitmap_weight(word, TQ_LOCAL_WINDOW_SIZE);
  54. spin_unlock_bh(&orig_node->ogm_cnt_lock);
  55. }
  56. rcu_read_unlock();
  57. }
  58. }
  59. static void _update_route(struct bat_priv *bat_priv,
  60. struct orig_node *orig_node,
  61. struct neigh_node *neigh_node)
  62. {
  63. struct neigh_node *curr_router;
  64. curr_router = orig_node_get_router(orig_node);
  65. /* route deleted */
  66. if ((curr_router) && (!neigh_node)) {
  67. bat_dbg(DBG_ROUTES, bat_priv, "Deleting route towards: %pM\n",
  68. orig_node->orig);
  69. tt_global_del_orig(bat_priv, orig_node,
  70. "Deleted route towards originator");
  71. /* route added */
  72. } else if ((!curr_router) && (neigh_node)) {
  73. bat_dbg(DBG_ROUTES, bat_priv,
  74. "Adding route towards: %pM (via %pM)\n",
  75. orig_node->orig, neigh_node->addr);
  76. /* route changed */
  77. } else if (neigh_node && curr_router) {
  78. bat_dbg(DBG_ROUTES, bat_priv,
  79. "Changing route towards: %pM (now via %pM - was via %pM)\n",
  80. orig_node->orig, neigh_node->addr,
  81. curr_router->addr);
  82. }
  83. if (curr_router)
  84. neigh_node_free_ref(curr_router);
  85. /* increase refcount of new best neighbor */
  86. if (neigh_node && !atomic_inc_not_zero(&neigh_node->refcount))
  87. neigh_node = NULL;
  88. spin_lock_bh(&orig_node->neigh_list_lock);
  89. rcu_assign_pointer(orig_node->router, neigh_node);
  90. spin_unlock_bh(&orig_node->neigh_list_lock);
  91. /* decrease refcount of previous best neighbor */
  92. if (curr_router)
  93. neigh_node_free_ref(curr_router);
  94. }
  95. void update_route(struct bat_priv *bat_priv, struct orig_node *orig_node,
  96. struct neigh_node *neigh_node)
  97. {
  98. struct neigh_node *router = NULL;
  99. if (!orig_node)
  100. goto out;
  101. router = orig_node_get_router(orig_node);
  102. if (router != neigh_node)
  103. _update_route(bat_priv, orig_node, neigh_node);
  104. out:
  105. if (router)
  106. neigh_node_free_ref(router);
  107. }
  108. /* caller must hold the neigh_list_lock */
  109. void bonding_candidate_del(struct orig_node *orig_node,
  110. struct neigh_node *neigh_node)
  111. {
  112. /* this neighbor is not part of our candidate list */
  113. if (list_empty(&neigh_node->bonding_list))
  114. goto out;
  115. list_del_rcu(&neigh_node->bonding_list);
  116. INIT_LIST_HEAD(&neigh_node->bonding_list);
  117. neigh_node_free_ref(neigh_node);
  118. atomic_dec(&orig_node->bond_candidates);
  119. out:
  120. return;
  121. }
  122. void bonding_candidate_add(struct orig_node *orig_node,
  123. struct neigh_node *neigh_node)
  124. {
  125. struct hlist_node *node;
  126. struct neigh_node *tmp_neigh_node, *router = NULL;
  127. uint8_t interference_candidate = 0;
  128. spin_lock_bh(&orig_node->neigh_list_lock);
  129. /* only consider if it has the same primary address ... */
  130. if (!compare_eth(orig_node->orig,
  131. neigh_node->orig_node->primary_addr))
  132. goto candidate_del;
  133. router = orig_node_get_router(orig_node);
  134. if (!router)
  135. goto candidate_del;
  136. /* ... and is good enough to be considered */
  137. if (neigh_node->tq_avg < router->tq_avg - BONDING_TQ_THRESHOLD)
  138. goto candidate_del;
  139. /**
  140. * check if we have another candidate with the same mac address or
  141. * interface. If we do, we won't select this candidate because of
  142. * possible interference.
  143. */
  144. hlist_for_each_entry_rcu(tmp_neigh_node, node,
  145. &orig_node->neigh_list, list) {
  146. if (tmp_neigh_node == neigh_node)
  147. continue;
  148. /* we only care if the other candidate is even
  149. * considered as candidate. */
  150. if (list_empty(&tmp_neigh_node->bonding_list))
  151. continue;
  152. if ((neigh_node->if_incoming == tmp_neigh_node->if_incoming) ||
  153. (compare_eth(neigh_node->addr, tmp_neigh_node->addr))) {
  154. interference_candidate = 1;
  155. break;
  156. }
  157. }
  158. /* don't care further if it is an interference candidate */
  159. if (interference_candidate)
  160. goto candidate_del;
  161. /* this neighbor already is part of our candidate list */
  162. if (!list_empty(&neigh_node->bonding_list))
  163. goto out;
  164. if (!atomic_inc_not_zero(&neigh_node->refcount))
  165. goto out;
  166. list_add_rcu(&neigh_node->bonding_list, &orig_node->bond_list);
  167. atomic_inc(&orig_node->bond_candidates);
  168. goto out;
  169. candidate_del:
  170. bonding_candidate_del(orig_node, neigh_node);
  171. out:
  172. spin_unlock_bh(&orig_node->neigh_list_lock);
  173. if (router)
  174. neigh_node_free_ref(router);
  175. }
  176. /* copy primary address for bonding */
  177. void bonding_save_primary(const struct orig_node *orig_node,
  178. struct orig_node *orig_neigh_node,
  179. const struct batman_ogm_packet *batman_ogm_packet)
  180. {
  181. if (!(batman_ogm_packet->flags & PRIMARIES_FIRST_HOP))
  182. return;
  183. memcpy(orig_neigh_node->primary_addr, orig_node->orig, ETH_ALEN);
  184. }
  185. /* checks whether the host restarted and is in the protection time.
  186. * returns:
  187. * 0 if the packet is to be accepted
  188. * 1 if the packet is to be ignored.
  189. */
  190. int window_protected(struct bat_priv *bat_priv, int32_t seq_num_diff,
  191. unsigned long *last_reset)
  192. {
  193. if ((seq_num_diff <= -TQ_LOCAL_WINDOW_SIZE) ||
  194. (seq_num_diff >= EXPECTED_SEQNO_RANGE)) {
  195. if (has_timed_out(*last_reset, RESET_PROTECTION_MS)) {
  196. *last_reset = jiffies;
  197. bat_dbg(DBG_BATMAN, bat_priv,
  198. "old packet received, start protection\n");
  199. return 0;
  200. } else {
  201. return 1;
  202. }
  203. }
  204. return 0;
  205. }
  206. int recv_bat_ogm_packet(struct sk_buff *skb, struct hard_iface *hard_iface)
  207. {
  208. struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
  209. struct ethhdr *ethhdr;
  210. /* drop packet if it has not necessary minimum size */
  211. if (unlikely(!pskb_may_pull(skb, BATMAN_OGM_LEN)))
  212. return NET_RX_DROP;
  213. ethhdr = (struct ethhdr *)skb_mac_header(skb);
  214. /* packet with broadcast indication but unicast recipient */
  215. if (!is_broadcast_ether_addr(ethhdr->h_dest))
  216. return NET_RX_DROP;
  217. /* packet with broadcast sender address */
  218. if (is_broadcast_ether_addr(ethhdr->h_source))
  219. return NET_RX_DROP;
  220. /* create a copy of the skb, if needed, to modify it. */
  221. if (skb_cow(skb, 0) < 0)
  222. return NET_RX_DROP;
  223. /* keep skb linear */
  224. if (skb_linearize(skb) < 0)
  225. return NET_RX_DROP;
  226. bat_priv->bat_algo_ops->bat_ogm_receive(hard_iface, skb);
  227. kfree_skb(skb);
  228. return NET_RX_SUCCESS;
  229. }
  230. static int recv_my_icmp_packet(struct bat_priv *bat_priv,
  231. struct sk_buff *skb, size_t icmp_len)
  232. {
  233. struct hard_iface *primary_if = NULL;
  234. struct orig_node *orig_node = NULL;
  235. struct neigh_node *router = NULL;
  236. struct icmp_packet_rr *icmp_packet;
  237. int ret = NET_RX_DROP;
  238. icmp_packet = (struct icmp_packet_rr *)skb->data;
  239. /* add data to device queue */
  240. if (icmp_packet->msg_type != ECHO_REQUEST) {
  241. bat_socket_receive_packet(icmp_packet, icmp_len);
  242. goto out;
  243. }
  244. primary_if = primary_if_get_selected(bat_priv);
  245. if (!primary_if)
  246. goto out;
  247. /* answer echo request (ping) */
  248. /* get routing information */
  249. orig_node = orig_hash_find(bat_priv, icmp_packet->orig);
  250. if (!orig_node)
  251. goto out;
  252. router = orig_node_get_router(orig_node);
  253. if (!router)
  254. goto out;
  255. /* create a copy of the skb, if needed, to modify it. */
  256. if (skb_cow(skb, sizeof(struct ethhdr)) < 0)
  257. goto out;
  258. icmp_packet = (struct icmp_packet_rr *)skb->data;
  259. memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN);
  260. memcpy(icmp_packet->orig, primary_if->net_dev->dev_addr, ETH_ALEN);
  261. icmp_packet->msg_type = ECHO_REPLY;
  262. icmp_packet->header.ttl = TTL;
  263. send_skb_packet(skb, router->if_incoming, router->addr);
  264. ret = NET_RX_SUCCESS;
  265. out:
  266. if (primary_if)
  267. hardif_free_ref(primary_if);
  268. if (router)
  269. neigh_node_free_ref(router);
  270. if (orig_node)
  271. orig_node_free_ref(orig_node);
  272. return ret;
  273. }
  274. static int recv_icmp_ttl_exceeded(struct bat_priv *bat_priv,
  275. struct sk_buff *skb)
  276. {
  277. struct hard_iface *primary_if = NULL;
  278. struct orig_node *orig_node = NULL;
  279. struct neigh_node *router = NULL;
  280. struct icmp_packet *icmp_packet;
  281. int ret = NET_RX_DROP;
  282. icmp_packet = (struct icmp_packet *)skb->data;
  283. /* send TTL exceeded if packet is an echo request (traceroute) */
  284. if (icmp_packet->msg_type != ECHO_REQUEST) {
  285. pr_debug("Warning - can't forward icmp packet from %pM to %pM: ttl exceeded\n",
  286. icmp_packet->orig, icmp_packet->dst);
  287. goto out;
  288. }
  289. primary_if = primary_if_get_selected(bat_priv);
  290. if (!primary_if)
  291. goto out;
  292. /* get routing information */
  293. orig_node = orig_hash_find(bat_priv, icmp_packet->orig);
  294. if (!orig_node)
  295. goto out;
  296. router = orig_node_get_router(orig_node);
  297. if (!router)
  298. goto out;
  299. /* create a copy of the skb, if needed, to modify it. */
  300. if (skb_cow(skb, sizeof(struct ethhdr)) < 0)
  301. goto out;
  302. icmp_packet = (struct icmp_packet *)skb->data;
  303. memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN);
  304. memcpy(icmp_packet->orig, primary_if->net_dev->dev_addr, ETH_ALEN);
  305. icmp_packet->msg_type = TTL_EXCEEDED;
  306. icmp_packet->header.ttl = TTL;
  307. send_skb_packet(skb, router->if_incoming, router->addr);
  308. ret = NET_RX_SUCCESS;
  309. out:
  310. if (primary_if)
  311. hardif_free_ref(primary_if);
  312. if (router)
  313. neigh_node_free_ref(router);
  314. if (orig_node)
  315. orig_node_free_ref(orig_node);
  316. return ret;
  317. }
  318. int recv_icmp_packet(struct sk_buff *skb, struct hard_iface *recv_if)
  319. {
  320. struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
  321. struct icmp_packet_rr *icmp_packet;
  322. struct ethhdr *ethhdr;
  323. struct orig_node *orig_node = NULL;
  324. struct neigh_node *router = NULL;
  325. int hdr_size = sizeof(struct icmp_packet);
  326. int ret = NET_RX_DROP;
  327. /**
  328. * we truncate all incoming icmp packets if they don't match our size
  329. */
  330. if (skb->len >= sizeof(struct icmp_packet_rr))
  331. hdr_size = sizeof(struct icmp_packet_rr);
  332. /* drop packet if it has not necessary minimum size */
  333. if (unlikely(!pskb_may_pull(skb, hdr_size)))
  334. goto out;
  335. ethhdr = (struct ethhdr *)skb_mac_header(skb);
  336. /* packet with unicast indication but broadcast recipient */
  337. if (is_broadcast_ether_addr(ethhdr->h_dest))
  338. goto out;
  339. /* packet with broadcast sender address */
  340. if (is_broadcast_ether_addr(ethhdr->h_source))
  341. goto out;
  342. /* not for me */
  343. if (!is_my_mac(ethhdr->h_dest))
  344. goto out;
  345. icmp_packet = (struct icmp_packet_rr *)skb->data;
  346. /* add record route information if not full */
  347. if ((hdr_size == sizeof(struct icmp_packet_rr)) &&
  348. (icmp_packet->rr_cur < BAT_RR_LEN)) {
  349. memcpy(&(icmp_packet->rr[icmp_packet->rr_cur]),
  350. ethhdr->h_dest, ETH_ALEN);
  351. icmp_packet->rr_cur++;
  352. }
  353. /* packet for me */
  354. if (is_my_mac(icmp_packet->dst))
  355. return recv_my_icmp_packet(bat_priv, skb, hdr_size);
  356. /* TTL exceeded */
  357. if (icmp_packet->header.ttl < 2)
  358. return recv_icmp_ttl_exceeded(bat_priv, skb);
  359. /* get routing information */
  360. orig_node = orig_hash_find(bat_priv, icmp_packet->dst);
  361. if (!orig_node)
  362. goto out;
  363. router = orig_node_get_router(orig_node);
  364. if (!router)
  365. goto out;
  366. /* create a copy of the skb, if needed, to modify it. */
  367. if (skb_cow(skb, sizeof(struct ethhdr)) < 0)
  368. goto out;
  369. icmp_packet = (struct icmp_packet_rr *)skb->data;
  370. /* decrement ttl */
  371. icmp_packet->header.ttl--;
  372. /* route it */
  373. send_skb_packet(skb, router->if_incoming, router->addr);
  374. ret = NET_RX_SUCCESS;
  375. out:
  376. if (router)
  377. neigh_node_free_ref(router);
  378. if (orig_node)
  379. orig_node_free_ref(orig_node);
  380. return ret;
  381. }
  382. /* In the bonding case, send the packets in a round
  383. * robin fashion over the remaining interfaces.
  384. *
  385. * This method rotates the bonding list and increases the
  386. * returned router's refcount. */
  387. static struct neigh_node *find_bond_router(struct orig_node *primary_orig,
  388. const struct hard_iface *recv_if)
  389. {
  390. struct neigh_node *tmp_neigh_node;
  391. struct neigh_node *router = NULL, *first_candidate = NULL;
  392. rcu_read_lock();
  393. list_for_each_entry_rcu(tmp_neigh_node, &primary_orig->bond_list,
  394. bonding_list) {
  395. if (!first_candidate)
  396. first_candidate = tmp_neigh_node;
  397. /* recv_if == NULL on the first node. */
  398. if (tmp_neigh_node->if_incoming == recv_if)
  399. continue;
  400. if (!atomic_inc_not_zero(&tmp_neigh_node->refcount))
  401. continue;
  402. router = tmp_neigh_node;
  403. break;
  404. }
  405. /* use the first candidate if nothing was found. */
  406. if (!router && first_candidate &&
  407. atomic_inc_not_zero(&first_candidate->refcount))
  408. router = first_candidate;
  409. if (!router)
  410. goto out;
  411. /* selected should point to the next element
  412. * after the current router */
  413. spin_lock_bh(&primary_orig->neigh_list_lock);
  414. /* this is a list_move(), which unfortunately
  415. * does not exist as rcu version */
  416. list_del_rcu(&primary_orig->bond_list);
  417. list_add_rcu(&primary_orig->bond_list,
  418. &router->bonding_list);
  419. spin_unlock_bh(&primary_orig->neigh_list_lock);
  420. out:
  421. rcu_read_unlock();
  422. return router;
  423. }
  424. /* Interface Alternating: Use the best of the
  425. * remaining candidates which are not using
  426. * this interface.
  427. *
  428. * Increases the returned router's refcount */
  429. static struct neigh_node *find_ifalter_router(struct orig_node *primary_orig,
  430. const struct hard_iface *recv_if)
  431. {
  432. struct neigh_node *tmp_neigh_node;
  433. struct neigh_node *router = NULL, *first_candidate = NULL;
  434. rcu_read_lock();
  435. list_for_each_entry_rcu(tmp_neigh_node, &primary_orig->bond_list,
  436. bonding_list) {
  437. if (!first_candidate)
  438. first_candidate = tmp_neigh_node;
  439. /* recv_if == NULL on the first node. */
  440. if (tmp_neigh_node->if_incoming == recv_if)
  441. continue;
  442. if (!atomic_inc_not_zero(&tmp_neigh_node->refcount))
  443. continue;
  444. /* if we don't have a router yet
  445. * or this one is better, choose it. */
  446. if ((!router) ||
  447. (tmp_neigh_node->tq_avg > router->tq_avg)) {
  448. /* decrement refcount of
  449. * previously selected router */
  450. if (router)
  451. neigh_node_free_ref(router);
  452. router = tmp_neigh_node;
  453. atomic_inc_not_zero(&router->refcount);
  454. }
  455. neigh_node_free_ref(tmp_neigh_node);
  456. }
  457. /* use the first candidate if nothing was found. */
  458. if (!router && first_candidate &&
  459. atomic_inc_not_zero(&first_candidate->refcount))
  460. router = first_candidate;
  461. rcu_read_unlock();
  462. return router;
  463. }
  464. int recv_tt_query(struct sk_buff *skb, struct hard_iface *recv_if)
  465. {
  466. struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
  467. struct tt_query_packet *tt_query;
  468. uint16_t tt_len;
  469. struct ethhdr *ethhdr;
  470. /* drop packet if it has not necessary minimum size */
  471. if (unlikely(!pskb_may_pull(skb, sizeof(struct tt_query_packet))))
  472. goto out;
  473. /* I could need to modify it */
  474. if (skb_cow(skb, sizeof(struct tt_query_packet)) < 0)
  475. goto out;
  476. ethhdr = (struct ethhdr *)skb_mac_header(skb);
  477. /* packet with unicast indication but broadcast recipient */
  478. if (is_broadcast_ether_addr(ethhdr->h_dest))
  479. goto out;
  480. /* packet with broadcast sender address */
  481. if (is_broadcast_ether_addr(ethhdr->h_source))
  482. goto out;
  483. tt_query = (struct tt_query_packet *)skb->data;
  484. tt_query->tt_data = ntohs(tt_query->tt_data);
  485. switch (tt_query->flags & TT_QUERY_TYPE_MASK) {
  486. case TT_REQUEST:
  487. /* If we cannot provide an answer the tt_request is
  488. * forwarded */
  489. if (!send_tt_response(bat_priv, tt_query)) {
  490. bat_dbg(DBG_TT, bat_priv,
  491. "Routing TT_REQUEST to %pM [%c]\n",
  492. tt_query->dst,
  493. (tt_query->flags & TT_FULL_TABLE ? 'F' : '.'));
  494. tt_query->tt_data = htons(tt_query->tt_data);
  495. return route_unicast_packet(skb, recv_if);
  496. }
  497. break;
  498. case TT_RESPONSE:
  499. if (is_my_mac(tt_query->dst)) {
  500. /* packet needs to be linearized to access the TT
  501. * changes */
  502. if (skb_linearize(skb) < 0)
  503. goto out;
  504. tt_len = tt_query->tt_data * sizeof(struct tt_change);
  505. /* Ensure we have all the claimed data */
  506. if (unlikely(skb_headlen(skb) <
  507. sizeof(struct tt_query_packet) + tt_len))
  508. goto out;
  509. handle_tt_response(bat_priv, tt_query);
  510. } else {
  511. bat_dbg(DBG_TT, bat_priv,
  512. "Routing TT_RESPONSE to %pM [%c]\n",
  513. tt_query->dst,
  514. (tt_query->flags & TT_FULL_TABLE ? 'F' : '.'));
  515. tt_query->tt_data = htons(tt_query->tt_data);
  516. return route_unicast_packet(skb, recv_if);
  517. }
  518. break;
  519. }
  520. out:
  521. /* returning NET_RX_DROP will make the caller function kfree the skb */
  522. return NET_RX_DROP;
  523. }
  524. int recv_roam_adv(struct sk_buff *skb, struct hard_iface *recv_if)
  525. {
  526. struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
  527. struct roam_adv_packet *roam_adv_packet;
  528. struct orig_node *orig_node;
  529. struct ethhdr *ethhdr;
  530. /* drop packet if it has not necessary minimum size */
  531. if (unlikely(!pskb_may_pull(skb, sizeof(struct roam_adv_packet))))
  532. goto out;
  533. ethhdr = (struct ethhdr *)skb_mac_header(skb);
  534. /* packet with unicast indication but broadcast recipient */
  535. if (is_broadcast_ether_addr(ethhdr->h_dest))
  536. goto out;
  537. /* packet with broadcast sender address */
  538. if (is_broadcast_ether_addr(ethhdr->h_source))
  539. goto out;
  540. roam_adv_packet = (struct roam_adv_packet *)skb->data;
  541. if (!is_my_mac(roam_adv_packet->dst))
  542. return route_unicast_packet(skb, recv_if);
  543. /* check if it is a backbone gateway. we don't accept
  544. * roaming advertisement from it, as it has the same
  545. * entries as we have.
  546. */
  547. if (bla_is_backbone_gw_orig(bat_priv, roam_adv_packet->src))
  548. goto out;
  549. orig_node = orig_hash_find(bat_priv, roam_adv_packet->src);
  550. if (!orig_node)
  551. goto out;
  552. bat_dbg(DBG_TT, bat_priv,
  553. "Received ROAMING_ADV from %pM (client %pM)\n",
  554. roam_adv_packet->src, roam_adv_packet->client);
  555. tt_global_add(bat_priv, orig_node, roam_adv_packet->client,
  556. atomic_read(&orig_node->last_ttvn) + 1, true, false);
  557. /* Roaming phase starts: I have new information but the ttvn has not
  558. * been incremented yet. This flag will make me check all the incoming
  559. * packets for the correct destination. */
  560. bat_priv->tt_poss_change = true;
  561. orig_node_free_ref(orig_node);
  562. out:
  563. /* returning NET_RX_DROP will make the caller function kfree the skb */
  564. return NET_RX_DROP;
  565. }
  566. /* find a suitable router for this originator, and use
  567. * bonding if possible. increases the found neighbors
  568. * refcount.*/
  569. struct neigh_node *find_router(struct bat_priv *bat_priv,
  570. struct orig_node *orig_node,
  571. const struct hard_iface *recv_if)
  572. {
  573. struct orig_node *primary_orig_node;
  574. struct orig_node *router_orig;
  575. struct neigh_node *router;
  576. static uint8_t zero_mac[ETH_ALEN] = {0, 0, 0, 0, 0, 0};
  577. int bonding_enabled;
  578. if (!orig_node)
  579. return NULL;
  580. router = orig_node_get_router(orig_node);
  581. if (!router)
  582. goto err;
  583. /* without bonding, the first node should
  584. * always choose the default router. */
  585. bonding_enabled = atomic_read(&bat_priv->bonding);
  586. rcu_read_lock();
  587. /* select default router to output */
  588. router_orig = router->orig_node;
  589. if (!router_orig)
  590. goto err_unlock;
  591. if ((!recv_if) && (!bonding_enabled))
  592. goto return_router;
  593. /* if we have something in the primary_addr, we can search
  594. * for a potential bonding candidate. */
  595. if (compare_eth(router_orig->primary_addr, zero_mac))
  596. goto return_router;
  597. /* find the orig_node which has the primary interface. might
  598. * even be the same as our router_orig in many cases */
  599. if (compare_eth(router_orig->primary_addr, router_orig->orig)) {
  600. primary_orig_node = router_orig;
  601. } else {
  602. primary_orig_node = orig_hash_find(bat_priv,
  603. router_orig->primary_addr);
  604. if (!primary_orig_node)
  605. goto return_router;
  606. orig_node_free_ref(primary_orig_node);
  607. }
  608. /* with less than 2 candidates, we can't do any
  609. * bonding and prefer the original router. */
  610. if (atomic_read(&primary_orig_node->bond_candidates) < 2)
  611. goto return_router;
  612. /* all nodes between should choose a candidate which
  613. * is is not on the interface where the packet came
  614. * in. */
  615. neigh_node_free_ref(router);
  616. if (bonding_enabled)
  617. router = find_bond_router(primary_orig_node, recv_if);
  618. else
  619. router = find_ifalter_router(primary_orig_node, recv_if);
  620. return_router:
  621. if (router && router->if_incoming->if_status != IF_ACTIVE)
  622. goto err_unlock;
  623. rcu_read_unlock();
  624. return router;
  625. err_unlock:
  626. rcu_read_unlock();
  627. err:
  628. if (router)
  629. neigh_node_free_ref(router);
  630. return NULL;
  631. }
  632. static int check_unicast_packet(struct sk_buff *skb, int hdr_size)
  633. {
  634. struct ethhdr *ethhdr;
  635. /* drop packet if it has not necessary minimum size */
  636. if (unlikely(!pskb_may_pull(skb, hdr_size)))
  637. return -1;
  638. ethhdr = (struct ethhdr *)skb_mac_header(skb);
  639. /* packet with unicast indication but broadcast recipient */
  640. if (is_broadcast_ether_addr(ethhdr->h_dest))
  641. return -1;
  642. /* packet with broadcast sender address */
  643. if (is_broadcast_ether_addr(ethhdr->h_source))
  644. return -1;
  645. /* not for me */
  646. if (!is_my_mac(ethhdr->h_dest))
  647. return -1;
  648. return 0;
  649. }
  650. static int route_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if)
  651. {
  652. struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
  653. struct orig_node *orig_node = NULL;
  654. struct neigh_node *neigh_node = NULL;
  655. struct unicast_packet *unicast_packet;
  656. struct ethhdr *ethhdr = (struct ethhdr *)skb_mac_header(skb);
  657. int ret = NET_RX_DROP;
  658. struct sk_buff *new_skb;
  659. unicast_packet = (struct unicast_packet *)skb->data;
  660. /* TTL exceeded */
  661. if (unicast_packet->header.ttl < 2) {
  662. pr_debug("Warning - can't forward unicast packet from %pM to %pM: ttl exceeded\n",
  663. ethhdr->h_source, unicast_packet->dest);
  664. goto out;
  665. }
  666. /* get routing information */
  667. orig_node = orig_hash_find(bat_priv, unicast_packet->dest);
  668. if (!orig_node)
  669. goto out;
  670. /* find_router() increases neigh_nodes refcount if found. */
  671. neigh_node = find_router(bat_priv, orig_node, recv_if);
  672. if (!neigh_node)
  673. goto out;
  674. /* create a copy of the skb, if needed, to modify it. */
  675. if (skb_cow(skb, sizeof(struct ethhdr)) < 0)
  676. goto out;
  677. unicast_packet = (struct unicast_packet *)skb->data;
  678. if (unicast_packet->header.packet_type == BAT_UNICAST &&
  679. atomic_read(&bat_priv->fragmentation) &&
  680. skb->len > neigh_node->if_incoming->net_dev->mtu) {
  681. ret = frag_send_skb(skb, bat_priv,
  682. neigh_node->if_incoming, neigh_node->addr);
  683. goto out;
  684. }
  685. if (unicast_packet->header.packet_type == BAT_UNICAST_FRAG &&
  686. frag_can_reassemble(skb, neigh_node->if_incoming->net_dev->mtu)) {
  687. ret = frag_reassemble_skb(skb, bat_priv, &new_skb);
  688. if (ret == NET_RX_DROP)
  689. goto out;
  690. /* packet was buffered for late merge */
  691. if (!new_skb) {
  692. ret = NET_RX_SUCCESS;
  693. goto out;
  694. }
  695. skb = new_skb;
  696. unicast_packet = (struct unicast_packet *)skb->data;
  697. }
  698. /* decrement ttl */
  699. unicast_packet->header.ttl--;
  700. /* route it */
  701. send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
  702. ret = NET_RX_SUCCESS;
  703. out:
  704. if (neigh_node)
  705. neigh_node_free_ref(neigh_node);
  706. if (orig_node)
  707. orig_node_free_ref(orig_node);
  708. return ret;
  709. }
  710. static int check_unicast_ttvn(struct bat_priv *bat_priv,
  711. struct sk_buff *skb) {
  712. uint8_t curr_ttvn;
  713. struct orig_node *orig_node;
  714. struct ethhdr *ethhdr;
  715. struct hard_iface *primary_if;
  716. struct unicast_packet *unicast_packet;
  717. bool tt_poss_change;
  718. /* I could need to modify it */
  719. if (skb_cow(skb, sizeof(struct unicast_packet)) < 0)
  720. return 0;
  721. unicast_packet = (struct unicast_packet *)skb->data;
  722. if (is_my_mac(unicast_packet->dest)) {
  723. tt_poss_change = bat_priv->tt_poss_change;
  724. curr_ttvn = (uint8_t)atomic_read(&bat_priv->ttvn);
  725. } else {
  726. orig_node = orig_hash_find(bat_priv, unicast_packet->dest);
  727. if (!orig_node)
  728. return 0;
  729. curr_ttvn = (uint8_t)atomic_read(&orig_node->last_ttvn);
  730. tt_poss_change = orig_node->tt_poss_change;
  731. orig_node_free_ref(orig_node);
  732. }
  733. /* Check whether I have to reroute the packet */
  734. if (seq_before(unicast_packet->ttvn, curr_ttvn) || tt_poss_change) {
  735. /* Linearize the skb before accessing it */
  736. if (skb_linearize(skb) < 0)
  737. return 0;
  738. ethhdr = (struct ethhdr *)(skb->data +
  739. sizeof(struct unicast_packet));
  740. orig_node = transtable_search(bat_priv, NULL, ethhdr->h_dest);
  741. if (!orig_node) {
  742. if (!is_my_client(bat_priv, ethhdr->h_dest))
  743. return 0;
  744. primary_if = primary_if_get_selected(bat_priv);
  745. if (!primary_if)
  746. return 0;
  747. memcpy(unicast_packet->dest,
  748. primary_if->net_dev->dev_addr, ETH_ALEN);
  749. hardif_free_ref(primary_if);
  750. } else {
  751. memcpy(unicast_packet->dest, orig_node->orig,
  752. ETH_ALEN);
  753. curr_ttvn = (uint8_t)
  754. atomic_read(&orig_node->last_ttvn);
  755. orig_node_free_ref(orig_node);
  756. }
  757. bat_dbg(DBG_ROUTES, bat_priv,
  758. "TTVN mismatch (old_ttvn %u new_ttvn %u)! Rerouting unicast packet (for %pM) to %pM\n",
  759. unicast_packet->ttvn, curr_ttvn, ethhdr->h_dest,
  760. unicast_packet->dest);
  761. unicast_packet->ttvn = curr_ttvn;
  762. }
  763. return 1;
  764. }
  765. int recv_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if)
  766. {
  767. struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
  768. struct unicast_packet *unicast_packet;
  769. int hdr_size = sizeof(*unicast_packet);
  770. if (check_unicast_packet(skb, hdr_size) < 0)
  771. return NET_RX_DROP;
  772. if (!check_unicast_ttvn(bat_priv, skb))
  773. return NET_RX_DROP;
  774. unicast_packet = (struct unicast_packet *)skb->data;
  775. /* packet for me */
  776. if (is_my_mac(unicast_packet->dest)) {
  777. interface_rx(recv_if->soft_iface, skb, recv_if, hdr_size);
  778. return NET_RX_SUCCESS;
  779. }
  780. return route_unicast_packet(skb, recv_if);
  781. }
  782. int recv_ucast_frag_packet(struct sk_buff *skb, struct hard_iface *recv_if)
  783. {
  784. struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
  785. struct unicast_frag_packet *unicast_packet;
  786. int hdr_size = sizeof(*unicast_packet);
  787. struct sk_buff *new_skb = NULL;
  788. int ret;
  789. if (check_unicast_packet(skb, hdr_size) < 0)
  790. return NET_RX_DROP;
  791. if (!check_unicast_ttvn(bat_priv, skb))
  792. return NET_RX_DROP;
  793. unicast_packet = (struct unicast_frag_packet *)skb->data;
  794. /* packet for me */
  795. if (is_my_mac(unicast_packet->dest)) {
  796. ret = frag_reassemble_skb(skb, bat_priv, &new_skb);
  797. if (ret == NET_RX_DROP)
  798. return NET_RX_DROP;
  799. /* packet was buffered for late merge */
  800. if (!new_skb)
  801. return NET_RX_SUCCESS;
  802. interface_rx(recv_if->soft_iface, new_skb, recv_if,
  803. sizeof(struct unicast_packet));
  804. return NET_RX_SUCCESS;
  805. }
  806. return route_unicast_packet(skb, recv_if);
  807. }
  808. int recv_bcast_packet(struct sk_buff *skb, struct hard_iface *recv_if)
  809. {
  810. struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
  811. struct orig_node *orig_node = NULL;
  812. struct bcast_packet *bcast_packet;
  813. struct ethhdr *ethhdr;
  814. int hdr_size = sizeof(*bcast_packet);
  815. int ret = NET_RX_DROP;
  816. int32_t seq_diff;
  817. /* drop packet if it has not necessary minimum size */
  818. if (unlikely(!pskb_may_pull(skb, hdr_size)))
  819. goto out;
  820. ethhdr = (struct ethhdr *)skb_mac_header(skb);
  821. /* packet with broadcast indication but unicast recipient */
  822. if (!is_broadcast_ether_addr(ethhdr->h_dest))
  823. goto out;
  824. /* packet with broadcast sender address */
  825. if (is_broadcast_ether_addr(ethhdr->h_source))
  826. goto out;
  827. /* ignore broadcasts sent by myself */
  828. if (is_my_mac(ethhdr->h_source))
  829. goto out;
  830. bcast_packet = (struct bcast_packet *)skb->data;
  831. /* ignore broadcasts originated by myself */
  832. if (is_my_mac(bcast_packet->orig))
  833. goto out;
  834. if (bcast_packet->header.ttl < 2)
  835. goto out;
  836. orig_node = orig_hash_find(bat_priv, bcast_packet->orig);
  837. if (!orig_node)
  838. goto out;
  839. spin_lock_bh(&orig_node->bcast_seqno_lock);
  840. /* check whether the packet is a duplicate */
  841. if (bat_test_bit(orig_node->bcast_bits, orig_node->last_bcast_seqno,
  842. ntohl(bcast_packet->seqno)))
  843. goto spin_unlock;
  844. seq_diff = ntohl(bcast_packet->seqno) - orig_node->last_bcast_seqno;
  845. /* check whether the packet is old and the host just restarted. */
  846. if (window_protected(bat_priv, seq_diff,
  847. &orig_node->bcast_seqno_reset))
  848. goto spin_unlock;
  849. /* mark broadcast in flood history, update window position
  850. * if required. */
  851. if (bit_get_packet(bat_priv, orig_node->bcast_bits, seq_diff, 1))
  852. orig_node->last_bcast_seqno = ntohl(bcast_packet->seqno);
  853. spin_unlock_bh(&orig_node->bcast_seqno_lock);
  854. /* check whether this has been sent by another originator before */
  855. if (bla_check_bcast_duplist(bat_priv, bcast_packet, hdr_size))
  856. goto out;
  857. /* rebroadcast packet */
  858. add_bcast_packet_to_list(bat_priv, skb, 1);
  859. /* don't hand the broadcast up if it is from an originator
  860. * from the same backbone.
  861. */
  862. if (bla_is_backbone_gw(skb, orig_node, hdr_size))
  863. goto out;
  864. /* broadcast for me */
  865. interface_rx(recv_if->soft_iface, skb, recv_if, hdr_size);
  866. ret = NET_RX_SUCCESS;
  867. goto out;
  868. spin_unlock:
  869. spin_unlock_bh(&orig_node->bcast_seqno_lock);
  870. out:
  871. if (orig_node)
  872. orig_node_free_ref(orig_node);
  873. return ret;
  874. }
  875. int recv_vis_packet(struct sk_buff *skb, struct hard_iface *recv_if)
  876. {
  877. struct vis_packet *vis_packet;
  878. struct ethhdr *ethhdr;
  879. struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
  880. int hdr_size = sizeof(*vis_packet);
  881. /* keep skb linear */
  882. if (skb_linearize(skb) < 0)
  883. return NET_RX_DROP;
  884. if (unlikely(!pskb_may_pull(skb, hdr_size)))
  885. return NET_RX_DROP;
  886. vis_packet = (struct vis_packet *)skb->data;
  887. ethhdr = (struct ethhdr *)skb_mac_header(skb);
  888. /* not for me */
  889. if (!is_my_mac(ethhdr->h_dest))
  890. return NET_RX_DROP;
  891. /* ignore own packets */
  892. if (is_my_mac(vis_packet->vis_orig))
  893. return NET_RX_DROP;
  894. if (is_my_mac(vis_packet->sender_orig))
  895. return NET_RX_DROP;
  896. switch (vis_packet->vis_type) {
  897. case VIS_TYPE_SERVER_SYNC:
  898. receive_server_sync_packet(bat_priv, vis_packet,
  899. skb_headlen(skb));
  900. break;
  901. case VIS_TYPE_CLIENT_UPDATE:
  902. receive_client_update_packet(bat_priv, vis_packet,
  903. skb_headlen(skb));
  904. break;
  905. default: /* ignore unknown packet */
  906. break;
  907. }
  908. /* We take a copy of the data in the packet, so we should
  909. always free the skbuf. */
  910. return NET_RX_DROP;
  911. }