routing.c 30 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163
  1. /*
  2. * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
  3. *
  4. * Marek Lindner, Simon Wunderlich
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of version 2 of the GNU General Public
  8. * License as published by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope that it will be useful, but
  11. * WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  13. * General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program; if not, write to the Free Software
  17. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
  18. * 02110-1301, USA
  19. *
  20. */
  21. #include "main.h"
  22. #include "routing.h"
  23. #include "send.h"
  24. #include "soft-interface.h"
  25. #include "hard-interface.h"
  26. #include "icmp_socket.h"
  27. #include "translation-table.h"
  28. #include "originator.h"
  29. #include "vis.h"
  30. #include "unicast.h"
  31. #include "bridge_loop_avoidance.h"
  32. static int route_unicast_packet(struct sk_buff *skb,
  33. struct hard_iface *recv_if);
  34. void slide_own_bcast_window(struct hard_iface *hard_iface)
  35. {
  36. struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
  37. struct hashtable_t *hash = bat_priv->orig_hash;
  38. struct hlist_node *node;
  39. struct hlist_head *head;
  40. struct orig_node *orig_node;
  41. unsigned long *word;
  42. uint32_t i;
  43. size_t word_index;
  44. for (i = 0; i < hash->size; i++) {
  45. head = &hash->table[i];
  46. rcu_read_lock();
  47. hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) {
  48. spin_lock_bh(&orig_node->ogm_cnt_lock);
  49. word_index = hard_iface->if_num * NUM_WORDS;
  50. word = &(orig_node->bcast_own[word_index]);
  51. batadv_bit_get_packet(bat_priv, word, 1, 0);
  52. orig_node->bcast_own_sum[hard_iface->if_num] =
  53. bitmap_weight(word, TQ_LOCAL_WINDOW_SIZE);
  54. spin_unlock_bh(&orig_node->ogm_cnt_lock);
  55. }
  56. rcu_read_unlock();
  57. }
  58. }
  59. static void _update_route(struct bat_priv *bat_priv,
  60. struct orig_node *orig_node,
  61. struct neigh_node *neigh_node)
  62. {
  63. struct neigh_node *curr_router;
  64. curr_router = orig_node_get_router(orig_node);
  65. /* route deleted */
  66. if ((curr_router) && (!neigh_node)) {
  67. bat_dbg(DBG_ROUTES, bat_priv, "Deleting route towards: %pM\n",
  68. orig_node->orig);
  69. tt_global_del_orig(bat_priv, orig_node,
  70. "Deleted route towards originator");
  71. /* route added */
  72. } else if ((!curr_router) && (neigh_node)) {
  73. bat_dbg(DBG_ROUTES, bat_priv,
  74. "Adding route towards: %pM (via %pM)\n",
  75. orig_node->orig, neigh_node->addr);
  76. /* route changed */
  77. } else if (neigh_node && curr_router) {
  78. bat_dbg(DBG_ROUTES, bat_priv,
  79. "Changing route towards: %pM (now via %pM - was via %pM)\n",
  80. orig_node->orig, neigh_node->addr,
  81. curr_router->addr);
  82. }
  83. if (curr_router)
  84. neigh_node_free_ref(curr_router);
  85. /* increase refcount of new best neighbor */
  86. if (neigh_node && !atomic_inc_not_zero(&neigh_node->refcount))
  87. neigh_node = NULL;
  88. spin_lock_bh(&orig_node->neigh_list_lock);
  89. rcu_assign_pointer(orig_node->router, neigh_node);
  90. spin_unlock_bh(&orig_node->neigh_list_lock);
  91. /* decrease refcount of previous best neighbor */
  92. if (curr_router)
  93. neigh_node_free_ref(curr_router);
  94. }
  95. void update_route(struct bat_priv *bat_priv, struct orig_node *orig_node,
  96. struct neigh_node *neigh_node)
  97. {
  98. struct neigh_node *router = NULL;
  99. if (!orig_node)
  100. goto out;
  101. router = orig_node_get_router(orig_node);
  102. if (router != neigh_node)
  103. _update_route(bat_priv, orig_node, neigh_node);
  104. out:
  105. if (router)
  106. neigh_node_free_ref(router);
  107. }
  108. /* caller must hold the neigh_list_lock */
  109. void bonding_candidate_del(struct orig_node *orig_node,
  110. struct neigh_node *neigh_node)
  111. {
  112. /* this neighbor is not part of our candidate list */
  113. if (list_empty(&neigh_node->bonding_list))
  114. goto out;
  115. list_del_rcu(&neigh_node->bonding_list);
  116. INIT_LIST_HEAD(&neigh_node->bonding_list);
  117. neigh_node_free_ref(neigh_node);
  118. atomic_dec(&orig_node->bond_candidates);
  119. out:
  120. return;
  121. }
  122. void bonding_candidate_add(struct orig_node *orig_node,
  123. struct neigh_node *neigh_node)
  124. {
  125. struct hlist_node *node;
  126. struct neigh_node *tmp_neigh_node, *router = NULL;
  127. uint8_t interference_candidate = 0;
  128. spin_lock_bh(&orig_node->neigh_list_lock);
  129. /* only consider if it has the same primary address ... */
  130. if (!compare_eth(orig_node->orig,
  131. neigh_node->orig_node->primary_addr))
  132. goto candidate_del;
  133. router = orig_node_get_router(orig_node);
  134. if (!router)
  135. goto candidate_del;
  136. /* ... and is good enough to be considered */
  137. if (neigh_node->tq_avg < router->tq_avg - BONDING_TQ_THRESHOLD)
  138. goto candidate_del;
  139. /**
  140. * check if we have another candidate with the same mac address or
  141. * interface. If we do, we won't select this candidate because of
  142. * possible interference.
  143. */
  144. hlist_for_each_entry_rcu(tmp_neigh_node, node,
  145. &orig_node->neigh_list, list) {
  146. if (tmp_neigh_node == neigh_node)
  147. continue;
  148. /* we only care if the other candidate is even
  149. * considered as candidate. */
  150. if (list_empty(&tmp_neigh_node->bonding_list))
  151. continue;
  152. if ((neigh_node->if_incoming == tmp_neigh_node->if_incoming) ||
  153. (compare_eth(neigh_node->addr, tmp_neigh_node->addr))) {
  154. interference_candidate = 1;
  155. break;
  156. }
  157. }
  158. /* don't care further if it is an interference candidate */
  159. if (interference_candidate)
  160. goto candidate_del;
  161. /* this neighbor already is part of our candidate list */
  162. if (!list_empty(&neigh_node->bonding_list))
  163. goto out;
  164. if (!atomic_inc_not_zero(&neigh_node->refcount))
  165. goto out;
  166. list_add_rcu(&neigh_node->bonding_list, &orig_node->bond_list);
  167. atomic_inc(&orig_node->bond_candidates);
  168. goto out;
  169. candidate_del:
  170. bonding_candidate_del(orig_node, neigh_node);
  171. out:
  172. spin_unlock_bh(&orig_node->neigh_list_lock);
  173. if (router)
  174. neigh_node_free_ref(router);
  175. }
  176. /* copy primary address for bonding */
  177. void bonding_save_primary(const struct orig_node *orig_node,
  178. struct orig_node *orig_neigh_node,
  179. const struct batman_ogm_packet *batman_ogm_packet)
  180. {
  181. if (!(batman_ogm_packet->flags & PRIMARIES_FIRST_HOP))
  182. return;
  183. memcpy(orig_neigh_node->primary_addr, orig_node->orig, ETH_ALEN);
  184. }
  185. /* checks whether the host restarted and is in the protection time.
  186. * returns:
  187. * 0 if the packet is to be accepted
  188. * 1 if the packet is to be ignored.
  189. */
  190. int window_protected(struct bat_priv *bat_priv, int32_t seq_num_diff,
  191. unsigned long *last_reset)
  192. {
  193. if ((seq_num_diff <= -TQ_LOCAL_WINDOW_SIZE) ||
  194. (seq_num_diff >= EXPECTED_SEQNO_RANGE)) {
  195. if (!has_timed_out(*last_reset, RESET_PROTECTION_MS))
  196. return 1;
  197. *last_reset = jiffies;
  198. bat_dbg(DBG_BATMAN, bat_priv,
  199. "old packet received, start protection\n");
  200. }
  201. return 0;
  202. }
  203. bool check_management_packet(struct sk_buff *skb,
  204. struct hard_iface *hard_iface,
  205. int header_len)
  206. {
  207. struct ethhdr *ethhdr;
  208. /* drop packet if it has not necessary minimum size */
  209. if (unlikely(!pskb_may_pull(skb, header_len)))
  210. return false;
  211. ethhdr = (struct ethhdr *)skb_mac_header(skb);
  212. /* packet with broadcast indication but unicast recipient */
  213. if (!is_broadcast_ether_addr(ethhdr->h_dest))
  214. return false;
  215. /* packet with broadcast sender address */
  216. if (is_broadcast_ether_addr(ethhdr->h_source))
  217. return false;
  218. /* create a copy of the skb, if needed, to modify it. */
  219. if (skb_cow(skb, 0) < 0)
  220. return false;
  221. /* keep skb linear */
  222. if (skb_linearize(skb) < 0)
  223. return false;
  224. return true;
  225. }
  226. static int recv_my_icmp_packet(struct bat_priv *bat_priv,
  227. struct sk_buff *skb, size_t icmp_len)
  228. {
  229. struct hard_iface *primary_if = NULL;
  230. struct orig_node *orig_node = NULL;
  231. struct neigh_node *router = NULL;
  232. struct icmp_packet_rr *icmp_packet;
  233. int ret = NET_RX_DROP;
  234. icmp_packet = (struct icmp_packet_rr *)skb->data;
  235. /* add data to device queue */
  236. if (icmp_packet->msg_type != ECHO_REQUEST) {
  237. bat_socket_receive_packet(icmp_packet, icmp_len);
  238. goto out;
  239. }
  240. primary_if = primary_if_get_selected(bat_priv);
  241. if (!primary_if)
  242. goto out;
  243. /* answer echo request (ping) */
  244. /* get routing information */
  245. orig_node = orig_hash_find(bat_priv, icmp_packet->orig);
  246. if (!orig_node)
  247. goto out;
  248. router = orig_node_get_router(orig_node);
  249. if (!router)
  250. goto out;
  251. /* create a copy of the skb, if needed, to modify it. */
  252. if (skb_cow(skb, ETH_HLEN) < 0)
  253. goto out;
  254. icmp_packet = (struct icmp_packet_rr *)skb->data;
  255. memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN);
  256. memcpy(icmp_packet->orig, primary_if->net_dev->dev_addr, ETH_ALEN);
  257. icmp_packet->msg_type = ECHO_REPLY;
  258. icmp_packet->header.ttl = TTL;
  259. send_skb_packet(skb, router->if_incoming, router->addr);
  260. ret = NET_RX_SUCCESS;
  261. out:
  262. if (primary_if)
  263. hardif_free_ref(primary_if);
  264. if (router)
  265. neigh_node_free_ref(router);
  266. if (orig_node)
  267. orig_node_free_ref(orig_node);
  268. return ret;
  269. }
  270. static int recv_icmp_ttl_exceeded(struct bat_priv *bat_priv,
  271. struct sk_buff *skb)
  272. {
  273. struct hard_iface *primary_if = NULL;
  274. struct orig_node *orig_node = NULL;
  275. struct neigh_node *router = NULL;
  276. struct icmp_packet *icmp_packet;
  277. int ret = NET_RX_DROP;
  278. icmp_packet = (struct icmp_packet *)skb->data;
  279. /* send TTL exceeded if packet is an echo request (traceroute) */
  280. if (icmp_packet->msg_type != ECHO_REQUEST) {
  281. pr_debug("Warning - can't forward icmp packet from %pM to %pM: ttl exceeded\n",
  282. icmp_packet->orig, icmp_packet->dst);
  283. goto out;
  284. }
  285. primary_if = primary_if_get_selected(bat_priv);
  286. if (!primary_if)
  287. goto out;
  288. /* get routing information */
  289. orig_node = orig_hash_find(bat_priv, icmp_packet->orig);
  290. if (!orig_node)
  291. goto out;
  292. router = orig_node_get_router(orig_node);
  293. if (!router)
  294. goto out;
  295. /* create a copy of the skb, if needed, to modify it. */
  296. if (skb_cow(skb, ETH_HLEN) < 0)
  297. goto out;
  298. icmp_packet = (struct icmp_packet *)skb->data;
  299. memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN);
  300. memcpy(icmp_packet->orig, primary_if->net_dev->dev_addr, ETH_ALEN);
  301. icmp_packet->msg_type = TTL_EXCEEDED;
  302. icmp_packet->header.ttl = TTL;
  303. send_skb_packet(skb, router->if_incoming, router->addr);
  304. ret = NET_RX_SUCCESS;
  305. out:
  306. if (primary_if)
  307. hardif_free_ref(primary_if);
  308. if (router)
  309. neigh_node_free_ref(router);
  310. if (orig_node)
  311. orig_node_free_ref(orig_node);
  312. return ret;
  313. }
  314. int recv_icmp_packet(struct sk_buff *skb, struct hard_iface *recv_if)
  315. {
  316. struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
  317. struct icmp_packet_rr *icmp_packet;
  318. struct ethhdr *ethhdr;
  319. struct orig_node *orig_node = NULL;
  320. struct neigh_node *router = NULL;
  321. int hdr_size = sizeof(struct icmp_packet);
  322. int ret = NET_RX_DROP;
  323. /**
  324. * we truncate all incoming icmp packets if they don't match our size
  325. */
  326. if (skb->len >= sizeof(struct icmp_packet_rr))
  327. hdr_size = sizeof(struct icmp_packet_rr);
  328. /* drop packet if it has not necessary minimum size */
  329. if (unlikely(!pskb_may_pull(skb, hdr_size)))
  330. goto out;
  331. ethhdr = (struct ethhdr *)skb_mac_header(skb);
  332. /* packet with unicast indication but broadcast recipient */
  333. if (is_broadcast_ether_addr(ethhdr->h_dest))
  334. goto out;
  335. /* packet with broadcast sender address */
  336. if (is_broadcast_ether_addr(ethhdr->h_source))
  337. goto out;
  338. /* not for me */
  339. if (!is_my_mac(ethhdr->h_dest))
  340. goto out;
  341. icmp_packet = (struct icmp_packet_rr *)skb->data;
  342. /* add record route information if not full */
  343. if ((hdr_size == sizeof(struct icmp_packet_rr)) &&
  344. (icmp_packet->rr_cur < BAT_RR_LEN)) {
  345. memcpy(&(icmp_packet->rr[icmp_packet->rr_cur]),
  346. ethhdr->h_dest, ETH_ALEN);
  347. icmp_packet->rr_cur++;
  348. }
  349. /* packet for me */
  350. if (is_my_mac(icmp_packet->dst))
  351. return recv_my_icmp_packet(bat_priv, skb, hdr_size);
  352. /* TTL exceeded */
  353. if (icmp_packet->header.ttl < 2)
  354. return recv_icmp_ttl_exceeded(bat_priv, skb);
  355. /* get routing information */
  356. orig_node = orig_hash_find(bat_priv, icmp_packet->dst);
  357. if (!orig_node)
  358. goto out;
  359. router = orig_node_get_router(orig_node);
  360. if (!router)
  361. goto out;
  362. /* create a copy of the skb, if needed, to modify it. */
  363. if (skb_cow(skb, ETH_HLEN) < 0)
  364. goto out;
  365. icmp_packet = (struct icmp_packet_rr *)skb->data;
  366. /* decrement ttl */
  367. icmp_packet->header.ttl--;
  368. /* route it */
  369. send_skb_packet(skb, router->if_incoming, router->addr);
  370. ret = NET_RX_SUCCESS;
  371. out:
  372. if (router)
  373. neigh_node_free_ref(router);
  374. if (orig_node)
  375. orig_node_free_ref(orig_node);
  376. return ret;
  377. }
  378. /* In the bonding case, send the packets in a round
  379. * robin fashion over the remaining interfaces.
  380. *
  381. * This method rotates the bonding list and increases the
  382. * returned router's refcount. */
  383. static struct neigh_node *find_bond_router(struct orig_node *primary_orig,
  384. const struct hard_iface *recv_if)
  385. {
  386. struct neigh_node *tmp_neigh_node;
  387. struct neigh_node *router = NULL, *first_candidate = NULL;
  388. rcu_read_lock();
  389. list_for_each_entry_rcu(tmp_neigh_node, &primary_orig->bond_list,
  390. bonding_list) {
  391. if (!first_candidate)
  392. first_candidate = tmp_neigh_node;
  393. /* recv_if == NULL on the first node. */
  394. if (tmp_neigh_node->if_incoming == recv_if)
  395. continue;
  396. if (!atomic_inc_not_zero(&tmp_neigh_node->refcount))
  397. continue;
  398. router = tmp_neigh_node;
  399. break;
  400. }
  401. /* use the first candidate if nothing was found. */
  402. if (!router && first_candidate &&
  403. atomic_inc_not_zero(&first_candidate->refcount))
  404. router = first_candidate;
  405. if (!router)
  406. goto out;
  407. /* selected should point to the next element
  408. * after the current router */
  409. spin_lock_bh(&primary_orig->neigh_list_lock);
  410. /* this is a list_move(), which unfortunately
  411. * does not exist as rcu version */
  412. list_del_rcu(&primary_orig->bond_list);
  413. list_add_rcu(&primary_orig->bond_list,
  414. &router->bonding_list);
  415. spin_unlock_bh(&primary_orig->neigh_list_lock);
  416. out:
  417. rcu_read_unlock();
  418. return router;
  419. }
  420. /* Interface Alternating: Use the best of the
  421. * remaining candidates which are not using
  422. * this interface.
  423. *
  424. * Increases the returned router's refcount */
  425. static struct neigh_node *find_ifalter_router(struct orig_node *primary_orig,
  426. const struct hard_iface *recv_if)
  427. {
  428. struct neigh_node *tmp_neigh_node;
  429. struct neigh_node *router = NULL, *first_candidate = NULL;
  430. rcu_read_lock();
  431. list_for_each_entry_rcu(tmp_neigh_node, &primary_orig->bond_list,
  432. bonding_list) {
  433. if (!first_candidate)
  434. first_candidate = tmp_neigh_node;
  435. /* recv_if == NULL on the first node. */
  436. if (tmp_neigh_node->if_incoming == recv_if)
  437. continue;
  438. if (!atomic_inc_not_zero(&tmp_neigh_node->refcount))
  439. continue;
  440. /* if we don't have a router yet
  441. * or this one is better, choose it. */
  442. if ((!router) ||
  443. (tmp_neigh_node->tq_avg > router->tq_avg)) {
  444. /* decrement refcount of
  445. * previously selected router */
  446. if (router)
  447. neigh_node_free_ref(router);
  448. router = tmp_neigh_node;
  449. atomic_inc_not_zero(&router->refcount);
  450. }
  451. neigh_node_free_ref(tmp_neigh_node);
  452. }
  453. /* use the first candidate if nothing was found. */
  454. if (!router && first_candidate &&
  455. atomic_inc_not_zero(&first_candidate->refcount))
  456. router = first_candidate;
  457. rcu_read_unlock();
  458. return router;
  459. }
  460. int recv_tt_query(struct sk_buff *skb, struct hard_iface *recv_if)
  461. {
  462. struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
  463. struct tt_query_packet *tt_query;
  464. uint16_t tt_size;
  465. struct ethhdr *ethhdr;
  466. /* drop packet if it has not necessary minimum size */
  467. if (unlikely(!pskb_may_pull(skb, sizeof(struct tt_query_packet))))
  468. goto out;
  469. /* I could need to modify it */
  470. if (skb_cow(skb, sizeof(struct tt_query_packet)) < 0)
  471. goto out;
  472. ethhdr = (struct ethhdr *)skb_mac_header(skb);
  473. /* packet with unicast indication but broadcast recipient */
  474. if (is_broadcast_ether_addr(ethhdr->h_dest))
  475. goto out;
  476. /* packet with broadcast sender address */
  477. if (is_broadcast_ether_addr(ethhdr->h_source))
  478. goto out;
  479. tt_query = (struct tt_query_packet *)skb->data;
  480. switch (tt_query->flags & TT_QUERY_TYPE_MASK) {
  481. case TT_REQUEST:
  482. batadv_inc_counter(bat_priv, BAT_CNT_TT_REQUEST_RX);
  483. /* If we cannot provide an answer the tt_request is
  484. * forwarded */
  485. if (!send_tt_response(bat_priv, tt_query)) {
  486. bat_dbg(DBG_TT, bat_priv,
  487. "Routing TT_REQUEST to %pM [%c]\n",
  488. tt_query->dst,
  489. (tt_query->flags & TT_FULL_TABLE ? 'F' : '.'));
  490. return route_unicast_packet(skb, recv_if);
  491. }
  492. break;
  493. case TT_RESPONSE:
  494. batadv_inc_counter(bat_priv, BAT_CNT_TT_RESPONSE_RX);
  495. if (is_my_mac(tt_query->dst)) {
  496. /* packet needs to be linearized to access the TT
  497. * changes */
  498. if (skb_linearize(skb) < 0)
  499. goto out;
  500. /* skb_linearize() possibly changed skb->data */
  501. tt_query = (struct tt_query_packet *)skb->data;
  502. tt_size = tt_len(ntohs(tt_query->tt_data));
  503. /* Ensure we have all the claimed data */
  504. if (unlikely(skb_headlen(skb) <
  505. sizeof(struct tt_query_packet) + tt_size))
  506. goto out;
  507. handle_tt_response(bat_priv, tt_query);
  508. } else {
  509. bat_dbg(DBG_TT, bat_priv,
  510. "Routing TT_RESPONSE to %pM [%c]\n",
  511. tt_query->dst,
  512. (tt_query->flags & TT_FULL_TABLE ? 'F' : '.'));
  513. return route_unicast_packet(skb, recv_if);
  514. }
  515. break;
  516. }
  517. out:
  518. /* returning NET_RX_DROP will make the caller function kfree the skb */
  519. return NET_RX_DROP;
  520. }
  521. int recv_roam_adv(struct sk_buff *skb, struct hard_iface *recv_if)
  522. {
  523. struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
  524. struct roam_adv_packet *roam_adv_packet;
  525. struct orig_node *orig_node;
  526. struct ethhdr *ethhdr;
  527. /* drop packet if it has not necessary minimum size */
  528. if (unlikely(!pskb_may_pull(skb, sizeof(struct roam_adv_packet))))
  529. goto out;
  530. ethhdr = (struct ethhdr *)skb_mac_header(skb);
  531. /* packet with unicast indication but broadcast recipient */
  532. if (is_broadcast_ether_addr(ethhdr->h_dest))
  533. goto out;
  534. /* packet with broadcast sender address */
  535. if (is_broadcast_ether_addr(ethhdr->h_source))
  536. goto out;
  537. batadv_inc_counter(bat_priv, BAT_CNT_TT_ROAM_ADV_RX);
  538. roam_adv_packet = (struct roam_adv_packet *)skb->data;
  539. if (!is_my_mac(roam_adv_packet->dst))
  540. return route_unicast_packet(skb, recv_if);
  541. /* check if it is a backbone gateway. we don't accept
  542. * roaming advertisement from it, as it has the same
  543. * entries as we have.
  544. */
  545. if (batadv_bla_is_backbone_gw_orig(bat_priv, roam_adv_packet->src))
  546. goto out;
  547. orig_node = orig_hash_find(bat_priv, roam_adv_packet->src);
  548. if (!orig_node)
  549. goto out;
  550. bat_dbg(DBG_TT, bat_priv,
  551. "Received ROAMING_ADV from %pM (client %pM)\n",
  552. roam_adv_packet->src, roam_adv_packet->client);
  553. tt_global_add(bat_priv, orig_node, roam_adv_packet->client,
  554. atomic_read(&orig_node->last_ttvn) + 1, true, false);
  555. /* Roaming phase starts: I have new information but the ttvn has not
  556. * been incremented yet. This flag will make me check all the incoming
  557. * packets for the correct destination. */
  558. bat_priv->tt_poss_change = true;
  559. orig_node_free_ref(orig_node);
  560. out:
  561. /* returning NET_RX_DROP will make the caller function kfree the skb */
  562. return NET_RX_DROP;
  563. }
  564. /* find a suitable router for this originator, and use
  565. * bonding if possible. increases the found neighbors
  566. * refcount.*/
  567. struct neigh_node *find_router(struct bat_priv *bat_priv,
  568. struct orig_node *orig_node,
  569. const struct hard_iface *recv_if)
  570. {
  571. struct orig_node *primary_orig_node;
  572. struct orig_node *router_orig;
  573. struct neigh_node *router;
  574. static uint8_t zero_mac[ETH_ALEN] = {0, 0, 0, 0, 0, 0};
  575. int bonding_enabled;
  576. if (!orig_node)
  577. return NULL;
  578. router = orig_node_get_router(orig_node);
  579. if (!router)
  580. goto err;
  581. /* without bonding, the first node should
  582. * always choose the default router. */
  583. bonding_enabled = atomic_read(&bat_priv->bonding);
  584. rcu_read_lock();
  585. /* select default router to output */
  586. router_orig = router->orig_node;
  587. if (!router_orig)
  588. goto err_unlock;
  589. if ((!recv_if) && (!bonding_enabled))
  590. goto return_router;
  591. /* if we have something in the primary_addr, we can search
  592. * for a potential bonding candidate. */
  593. if (compare_eth(router_orig->primary_addr, zero_mac))
  594. goto return_router;
  595. /* find the orig_node which has the primary interface. might
  596. * even be the same as our router_orig in many cases */
  597. if (compare_eth(router_orig->primary_addr, router_orig->orig)) {
  598. primary_orig_node = router_orig;
  599. } else {
  600. primary_orig_node = orig_hash_find(bat_priv,
  601. router_orig->primary_addr);
  602. if (!primary_orig_node)
  603. goto return_router;
  604. orig_node_free_ref(primary_orig_node);
  605. }
  606. /* with less than 2 candidates, we can't do any
  607. * bonding and prefer the original router. */
  608. if (atomic_read(&primary_orig_node->bond_candidates) < 2)
  609. goto return_router;
  610. /* all nodes between should choose a candidate which
  611. * is is not on the interface where the packet came
  612. * in. */
  613. neigh_node_free_ref(router);
  614. if (bonding_enabled)
  615. router = find_bond_router(primary_orig_node, recv_if);
  616. else
  617. router = find_ifalter_router(primary_orig_node, recv_if);
  618. return_router:
  619. if (router && router->if_incoming->if_status != IF_ACTIVE)
  620. goto err_unlock;
  621. rcu_read_unlock();
  622. return router;
  623. err_unlock:
  624. rcu_read_unlock();
  625. err:
  626. if (router)
  627. neigh_node_free_ref(router);
  628. return NULL;
  629. }
  630. static int check_unicast_packet(struct sk_buff *skb, int hdr_size)
  631. {
  632. struct ethhdr *ethhdr;
  633. /* drop packet if it has not necessary minimum size */
  634. if (unlikely(!pskb_may_pull(skb, hdr_size)))
  635. return -1;
  636. ethhdr = (struct ethhdr *)skb_mac_header(skb);
  637. /* packet with unicast indication but broadcast recipient */
  638. if (is_broadcast_ether_addr(ethhdr->h_dest))
  639. return -1;
  640. /* packet with broadcast sender address */
  641. if (is_broadcast_ether_addr(ethhdr->h_source))
  642. return -1;
  643. /* not for me */
  644. if (!is_my_mac(ethhdr->h_dest))
  645. return -1;
  646. return 0;
  647. }
  648. static int route_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if)
  649. {
  650. struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
  651. struct orig_node *orig_node = NULL;
  652. struct neigh_node *neigh_node = NULL;
  653. struct unicast_packet *unicast_packet;
  654. struct ethhdr *ethhdr = (struct ethhdr *)skb_mac_header(skb);
  655. int ret = NET_RX_DROP;
  656. struct sk_buff *new_skb;
  657. unicast_packet = (struct unicast_packet *)skb->data;
  658. /* TTL exceeded */
  659. if (unicast_packet->header.ttl < 2) {
  660. pr_debug("Warning - can't forward unicast packet from %pM to %pM: ttl exceeded\n",
  661. ethhdr->h_source, unicast_packet->dest);
  662. goto out;
  663. }
  664. /* get routing information */
  665. orig_node = orig_hash_find(bat_priv, unicast_packet->dest);
  666. if (!orig_node)
  667. goto out;
  668. /* find_router() increases neigh_nodes refcount if found. */
  669. neigh_node = find_router(bat_priv, orig_node, recv_if);
  670. if (!neigh_node)
  671. goto out;
  672. /* create a copy of the skb, if needed, to modify it. */
  673. if (skb_cow(skb, ETH_HLEN) < 0)
  674. goto out;
  675. unicast_packet = (struct unicast_packet *)skb->data;
  676. if (unicast_packet->header.packet_type == BAT_UNICAST &&
  677. atomic_read(&bat_priv->fragmentation) &&
  678. skb->len > neigh_node->if_incoming->net_dev->mtu) {
  679. ret = frag_send_skb(skb, bat_priv,
  680. neigh_node->if_incoming, neigh_node->addr);
  681. goto out;
  682. }
  683. if (unicast_packet->header.packet_type == BAT_UNICAST_FRAG &&
  684. frag_can_reassemble(skb, neigh_node->if_incoming->net_dev->mtu)) {
  685. ret = frag_reassemble_skb(skb, bat_priv, &new_skb);
  686. if (ret == NET_RX_DROP)
  687. goto out;
  688. /* packet was buffered for late merge */
  689. if (!new_skb) {
  690. ret = NET_RX_SUCCESS;
  691. goto out;
  692. }
  693. skb = new_skb;
  694. unicast_packet = (struct unicast_packet *)skb->data;
  695. }
  696. /* decrement ttl */
  697. unicast_packet->header.ttl--;
  698. /* Update stats counter */
  699. batadv_inc_counter(bat_priv, BAT_CNT_FORWARD);
  700. batadv_add_counter(bat_priv, BAT_CNT_FORWARD_BYTES,
  701. skb->len + ETH_HLEN);
  702. /* route it */
  703. send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
  704. ret = NET_RX_SUCCESS;
  705. out:
  706. if (neigh_node)
  707. neigh_node_free_ref(neigh_node);
  708. if (orig_node)
  709. orig_node_free_ref(orig_node);
  710. return ret;
  711. }
  712. static int check_unicast_ttvn(struct bat_priv *bat_priv,
  713. struct sk_buff *skb) {
  714. uint8_t curr_ttvn;
  715. struct orig_node *orig_node;
  716. struct ethhdr *ethhdr;
  717. struct hard_iface *primary_if;
  718. struct unicast_packet *unicast_packet;
  719. bool tt_poss_change;
  720. /* I could need to modify it */
  721. if (skb_cow(skb, sizeof(struct unicast_packet)) < 0)
  722. return 0;
  723. unicast_packet = (struct unicast_packet *)skb->data;
  724. if (is_my_mac(unicast_packet->dest)) {
  725. tt_poss_change = bat_priv->tt_poss_change;
  726. curr_ttvn = (uint8_t)atomic_read(&bat_priv->ttvn);
  727. } else {
  728. orig_node = orig_hash_find(bat_priv, unicast_packet->dest);
  729. if (!orig_node)
  730. return 0;
  731. curr_ttvn = (uint8_t)atomic_read(&orig_node->last_ttvn);
  732. tt_poss_change = orig_node->tt_poss_change;
  733. orig_node_free_ref(orig_node);
  734. }
  735. /* Check whether I have to reroute the packet */
  736. if (seq_before(unicast_packet->ttvn, curr_ttvn) || tt_poss_change) {
  737. /* check if there is enough data before accessing it */
  738. if (pskb_may_pull(skb, sizeof(struct unicast_packet) +
  739. ETH_HLEN) < 0)
  740. return 0;
  741. ethhdr = (struct ethhdr *)(skb->data +
  742. sizeof(struct unicast_packet));
  743. /* we don't have an updated route for this client, so we should
  744. * not try to reroute the packet!!
  745. */
  746. if (tt_global_client_is_roaming(bat_priv, ethhdr->h_dest))
  747. return 1;
  748. orig_node = transtable_search(bat_priv, NULL, ethhdr->h_dest);
  749. if (!orig_node) {
  750. if (!is_my_client(bat_priv, ethhdr->h_dest))
  751. return 0;
  752. primary_if = primary_if_get_selected(bat_priv);
  753. if (!primary_if)
  754. return 0;
  755. memcpy(unicast_packet->dest,
  756. primary_if->net_dev->dev_addr, ETH_ALEN);
  757. hardif_free_ref(primary_if);
  758. } else {
  759. memcpy(unicast_packet->dest, orig_node->orig,
  760. ETH_ALEN);
  761. curr_ttvn = (uint8_t)
  762. atomic_read(&orig_node->last_ttvn);
  763. orig_node_free_ref(orig_node);
  764. }
  765. bat_dbg(DBG_ROUTES, bat_priv,
  766. "TTVN mismatch (old_ttvn %u new_ttvn %u)! Rerouting unicast packet (for %pM) to %pM\n",
  767. unicast_packet->ttvn, curr_ttvn, ethhdr->h_dest,
  768. unicast_packet->dest);
  769. unicast_packet->ttvn = curr_ttvn;
  770. }
  771. return 1;
  772. }
  773. int recv_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if)
  774. {
  775. struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
  776. struct unicast_packet *unicast_packet;
  777. int hdr_size = sizeof(*unicast_packet);
  778. if (check_unicast_packet(skb, hdr_size) < 0)
  779. return NET_RX_DROP;
  780. if (!check_unicast_ttvn(bat_priv, skb))
  781. return NET_RX_DROP;
  782. unicast_packet = (struct unicast_packet *)skb->data;
  783. /* packet for me */
  784. if (is_my_mac(unicast_packet->dest)) {
  785. interface_rx(recv_if->soft_iface, skb, recv_if, hdr_size);
  786. return NET_RX_SUCCESS;
  787. }
  788. return route_unicast_packet(skb, recv_if);
  789. }
  790. int recv_ucast_frag_packet(struct sk_buff *skb, struct hard_iface *recv_if)
  791. {
  792. struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
  793. struct unicast_frag_packet *unicast_packet;
  794. int hdr_size = sizeof(*unicast_packet);
  795. struct sk_buff *new_skb = NULL;
  796. int ret;
  797. if (check_unicast_packet(skb, hdr_size) < 0)
  798. return NET_RX_DROP;
  799. if (!check_unicast_ttvn(bat_priv, skb))
  800. return NET_RX_DROP;
  801. unicast_packet = (struct unicast_frag_packet *)skb->data;
  802. /* packet for me */
  803. if (is_my_mac(unicast_packet->dest)) {
  804. ret = frag_reassemble_skb(skb, bat_priv, &new_skb);
  805. if (ret == NET_RX_DROP)
  806. return NET_RX_DROP;
  807. /* packet was buffered for late merge */
  808. if (!new_skb)
  809. return NET_RX_SUCCESS;
  810. interface_rx(recv_if->soft_iface, new_skb, recv_if,
  811. sizeof(struct unicast_packet));
  812. return NET_RX_SUCCESS;
  813. }
  814. return route_unicast_packet(skb, recv_if);
  815. }
  816. int recv_bcast_packet(struct sk_buff *skb, struct hard_iface *recv_if)
  817. {
  818. struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
  819. struct orig_node *orig_node = NULL;
  820. struct bcast_packet *bcast_packet;
  821. struct ethhdr *ethhdr;
  822. int hdr_size = sizeof(*bcast_packet);
  823. int ret = NET_RX_DROP;
  824. int32_t seq_diff;
  825. /* drop packet if it has not necessary minimum size */
  826. if (unlikely(!pskb_may_pull(skb, hdr_size)))
  827. goto out;
  828. ethhdr = (struct ethhdr *)skb_mac_header(skb);
  829. /* packet with broadcast indication but unicast recipient */
  830. if (!is_broadcast_ether_addr(ethhdr->h_dest))
  831. goto out;
  832. /* packet with broadcast sender address */
  833. if (is_broadcast_ether_addr(ethhdr->h_source))
  834. goto out;
  835. /* ignore broadcasts sent by myself */
  836. if (is_my_mac(ethhdr->h_source))
  837. goto out;
  838. bcast_packet = (struct bcast_packet *)skb->data;
  839. /* ignore broadcasts originated by myself */
  840. if (is_my_mac(bcast_packet->orig))
  841. goto out;
  842. if (bcast_packet->header.ttl < 2)
  843. goto out;
  844. orig_node = orig_hash_find(bat_priv, bcast_packet->orig);
  845. if (!orig_node)
  846. goto out;
  847. spin_lock_bh(&orig_node->bcast_seqno_lock);
  848. /* check whether the packet is a duplicate */
  849. if (bat_test_bit(orig_node->bcast_bits, orig_node->last_bcast_seqno,
  850. ntohl(bcast_packet->seqno)))
  851. goto spin_unlock;
  852. seq_diff = ntohl(bcast_packet->seqno) - orig_node->last_bcast_seqno;
  853. /* check whether the packet is old and the host just restarted. */
  854. if (window_protected(bat_priv, seq_diff,
  855. &orig_node->bcast_seqno_reset))
  856. goto spin_unlock;
  857. /* mark broadcast in flood history, update window position
  858. * if required. */
  859. if (batadv_bit_get_packet(bat_priv, orig_node->bcast_bits, seq_diff, 1))
  860. orig_node->last_bcast_seqno = ntohl(bcast_packet->seqno);
  861. spin_unlock_bh(&orig_node->bcast_seqno_lock);
  862. /* check whether this has been sent by another originator before */
  863. if (batadv_bla_check_bcast_duplist(bat_priv, bcast_packet, hdr_size))
  864. goto out;
  865. /* rebroadcast packet */
  866. add_bcast_packet_to_list(bat_priv, skb, 1);
  867. /* don't hand the broadcast up if it is from an originator
  868. * from the same backbone.
  869. */
  870. if (batadv_bla_is_backbone_gw(skb, orig_node, hdr_size))
  871. goto out;
  872. /* broadcast for me */
  873. interface_rx(recv_if->soft_iface, skb, recv_if, hdr_size);
  874. ret = NET_RX_SUCCESS;
  875. goto out;
  876. spin_unlock:
  877. spin_unlock_bh(&orig_node->bcast_seqno_lock);
  878. out:
  879. if (orig_node)
  880. orig_node_free_ref(orig_node);
  881. return ret;
  882. }
  883. int recv_vis_packet(struct sk_buff *skb, struct hard_iface *recv_if)
  884. {
  885. struct vis_packet *vis_packet;
  886. struct ethhdr *ethhdr;
  887. struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
  888. int hdr_size = sizeof(*vis_packet);
  889. /* keep skb linear */
  890. if (skb_linearize(skb) < 0)
  891. return NET_RX_DROP;
  892. if (unlikely(!pskb_may_pull(skb, hdr_size)))
  893. return NET_RX_DROP;
  894. vis_packet = (struct vis_packet *)skb->data;
  895. ethhdr = (struct ethhdr *)skb_mac_header(skb);
  896. /* not for me */
  897. if (!is_my_mac(ethhdr->h_dest))
  898. return NET_RX_DROP;
  899. /* ignore own packets */
  900. if (is_my_mac(vis_packet->vis_orig))
  901. return NET_RX_DROP;
  902. if (is_my_mac(vis_packet->sender_orig))
  903. return NET_RX_DROP;
  904. switch (vis_packet->vis_type) {
  905. case VIS_TYPE_SERVER_SYNC:
  906. receive_server_sync_packet(bat_priv, vis_packet,
  907. skb_headlen(skb));
  908. break;
  909. case VIS_TYPE_CLIENT_UPDATE:
  910. receive_client_update_packet(bat_priv, vis_packet,
  911. skb_headlen(skb));
  912. break;
  913. default: /* ignore unknown packet */
  914. break;
  915. }
  916. /* We take a copy of the data in the packet, so we should
  917. always free the skbuf. */
  918. return NET_RX_DROP;
  919. }