routing.c 41 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557
  1. /*
  2. * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
  3. *
  4. * Marek Lindner, Simon Wunderlich
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of version 2 of the GNU General Public
  8. * License as published by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope that it will be useful, but
  11. * WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  13. * General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program; if not, write to the Free Software
  17. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
  18. * 02110-1301, USA
  19. *
  20. */
  21. #include "main.h"
  22. #include "routing.h"
  23. #include "send.h"
  24. #include "hash.h"
  25. #include "soft-interface.h"
  26. #include "hard-interface.h"
  27. #include "icmp_socket.h"
  28. #include "translation-table.h"
  29. #include "originator.h"
  30. #include "ring_buffer.h"
  31. #include "vis.h"
  32. #include "aggregation.h"
  33. #include "gateway_common.h"
  34. #include "gateway_client.h"
  35. #include "unicast.h"
  36. void slide_own_bcast_window(struct hard_iface *hard_iface)
  37. {
  38. struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
  39. struct hashtable_t *hash = bat_priv->orig_hash;
  40. struct hlist_node *node;
  41. struct hlist_head *head;
  42. struct orig_node *orig_node;
  43. unsigned long *word;
  44. int i;
  45. size_t word_index;
  46. for (i = 0; i < hash->size; i++) {
  47. head = &hash->table[i];
  48. rcu_read_lock();
  49. hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) {
  50. spin_lock_bh(&orig_node->ogm_cnt_lock);
  51. word_index = hard_iface->if_num * NUM_WORDS;
  52. word = &(orig_node->bcast_own[word_index]);
  53. bit_get_packet(bat_priv, word, 1, 0);
  54. orig_node->bcast_own_sum[hard_iface->if_num] =
  55. bit_packet_count(word);
  56. spin_unlock_bh(&orig_node->ogm_cnt_lock);
  57. }
  58. rcu_read_unlock();
  59. }
  60. }
  61. static void update_HNA(struct bat_priv *bat_priv, struct orig_node *orig_node,
  62. unsigned char *hna_buff, int hna_buff_len)
  63. {
  64. if ((hna_buff_len != orig_node->hna_buff_len) ||
  65. ((hna_buff_len > 0) &&
  66. (orig_node->hna_buff_len > 0) &&
  67. (memcmp(orig_node->hna_buff, hna_buff, hna_buff_len) != 0))) {
  68. if (orig_node->hna_buff_len > 0)
  69. hna_global_del_orig(bat_priv, orig_node,
  70. "originator changed hna");
  71. if ((hna_buff_len > 0) && (hna_buff))
  72. hna_global_add_orig(bat_priv, orig_node,
  73. hna_buff, hna_buff_len);
  74. }
  75. }
  76. static void update_route(struct bat_priv *bat_priv,
  77. struct orig_node *orig_node,
  78. struct neigh_node *neigh_node,
  79. unsigned char *hna_buff, int hna_buff_len)
  80. {
  81. struct neigh_node *curr_router;
  82. curr_router = orig_node_get_router(orig_node);
  83. /* route deleted */
  84. if ((curr_router) && (!neigh_node)) {
  85. bat_dbg(DBG_ROUTES, bat_priv, "Deleting route towards: %pM\n",
  86. orig_node->orig);
  87. hna_global_del_orig(bat_priv, orig_node,
  88. "originator timed out");
  89. /* route added */
  90. } else if ((!curr_router) && (neigh_node)) {
  91. bat_dbg(DBG_ROUTES, bat_priv,
  92. "Adding route towards: %pM (via %pM)\n",
  93. orig_node->orig, neigh_node->addr);
  94. hna_global_add_orig(bat_priv, orig_node,
  95. hna_buff, hna_buff_len);
  96. /* route changed */
  97. } else {
  98. bat_dbg(DBG_ROUTES, bat_priv,
  99. "Changing route towards: %pM "
  100. "(now via %pM - was via %pM)\n",
  101. orig_node->orig, neigh_node->addr,
  102. curr_router->addr);
  103. }
  104. if (curr_router)
  105. neigh_node_free_ref(curr_router);
  106. /* increase refcount of new best neighbor */
  107. if (neigh_node && !atomic_inc_not_zero(&neigh_node->refcount))
  108. neigh_node = NULL;
  109. spin_lock_bh(&orig_node->neigh_list_lock);
  110. rcu_assign_pointer(orig_node->router, neigh_node);
  111. spin_unlock_bh(&orig_node->neigh_list_lock);
  112. /* decrease refcount of previous best neighbor */
  113. if (curr_router)
  114. neigh_node_free_ref(curr_router);
  115. }
  116. void update_routes(struct bat_priv *bat_priv, struct orig_node *orig_node,
  117. struct neigh_node *neigh_node, unsigned char *hna_buff,
  118. int hna_buff_len)
  119. {
  120. struct neigh_node *router = NULL;
  121. if (!orig_node)
  122. goto out;
  123. router = orig_node_get_router(orig_node);
  124. if (router != neigh_node)
  125. update_route(bat_priv, orig_node, neigh_node,
  126. hna_buff, hna_buff_len);
  127. /* may be just HNA changed */
  128. else
  129. update_HNA(bat_priv, orig_node, hna_buff, hna_buff_len);
  130. out:
  131. if (router)
  132. neigh_node_free_ref(router);
  133. }
  134. static int is_bidirectional_neigh(struct orig_node *orig_node,
  135. struct orig_node *orig_neigh_node,
  136. struct batman_packet *batman_packet,
  137. struct hard_iface *if_incoming)
  138. {
  139. struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
  140. struct neigh_node *neigh_node = NULL, *tmp_neigh_node;
  141. struct hlist_node *node;
  142. unsigned char total_count;
  143. uint8_t orig_eq_count, neigh_rq_count, tq_own;
  144. int tq_asym_penalty, ret = 0;
  145. if (orig_node == orig_neigh_node) {
  146. rcu_read_lock();
  147. hlist_for_each_entry_rcu(tmp_neigh_node, node,
  148. &orig_node->neigh_list, list) {
  149. if (!compare_eth(tmp_neigh_node->addr,
  150. orig_neigh_node->orig))
  151. continue;
  152. if (tmp_neigh_node->if_incoming != if_incoming)
  153. continue;
  154. if (!atomic_inc_not_zero(&tmp_neigh_node->refcount))
  155. continue;
  156. neigh_node = tmp_neigh_node;
  157. }
  158. rcu_read_unlock();
  159. if (!neigh_node)
  160. neigh_node = create_neighbor(orig_node,
  161. orig_neigh_node,
  162. orig_neigh_node->orig,
  163. if_incoming);
  164. if (!neigh_node)
  165. goto out;
  166. neigh_node->last_valid = jiffies;
  167. } else {
  168. /* find packet count of corresponding one hop neighbor */
  169. rcu_read_lock();
  170. hlist_for_each_entry_rcu(tmp_neigh_node, node,
  171. &orig_neigh_node->neigh_list, list) {
  172. if (!compare_eth(tmp_neigh_node->addr,
  173. orig_neigh_node->orig))
  174. continue;
  175. if (tmp_neigh_node->if_incoming != if_incoming)
  176. continue;
  177. if (!atomic_inc_not_zero(&tmp_neigh_node->refcount))
  178. continue;
  179. neigh_node = tmp_neigh_node;
  180. }
  181. rcu_read_unlock();
  182. if (!neigh_node)
  183. neigh_node = create_neighbor(orig_neigh_node,
  184. orig_neigh_node,
  185. orig_neigh_node->orig,
  186. if_incoming);
  187. if (!neigh_node)
  188. goto out;
  189. }
  190. orig_node->last_valid = jiffies;
  191. spin_lock_bh(&orig_node->ogm_cnt_lock);
  192. orig_eq_count = orig_neigh_node->bcast_own_sum[if_incoming->if_num];
  193. neigh_rq_count = neigh_node->real_packet_count;
  194. spin_unlock_bh(&orig_node->ogm_cnt_lock);
  195. /* pay attention to not get a value bigger than 100 % */
  196. total_count = (orig_eq_count > neigh_rq_count ?
  197. neigh_rq_count : orig_eq_count);
  198. /* if we have too few packets (too less data) we set tq_own to zero */
  199. /* if we receive too few packets it is not considered bidirectional */
  200. if ((total_count < TQ_LOCAL_BIDRECT_SEND_MINIMUM) ||
  201. (neigh_rq_count < TQ_LOCAL_BIDRECT_RECV_MINIMUM))
  202. tq_own = 0;
  203. else
  204. /* neigh_node->real_packet_count is never zero as we
  205. * only purge old information when getting new
  206. * information */
  207. tq_own = (TQ_MAX_VALUE * total_count) / neigh_rq_count;
  208. /*
  209. * 1 - ((1-x) ** 3), normalized to TQ_MAX_VALUE this does
  210. * affect the nearly-symmetric links only a little, but
  211. * punishes asymmetric links more. This will give a value
  212. * between 0 and TQ_MAX_VALUE
  213. */
  214. tq_asym_penalty = TQ_MAX_VALUE - (TQ_MAX_VALUE *
  215. (TQ_LOCAL_WINDOW_SIZE - neigh_rq_count) *
  216. (TQ_LOCAL_WINDOW_SIZE - neigh_rq_count) *
  217. (TQ_LOCAL_WINDOW_SIZE - neigh_rq_count)) /
  218. (TQ_LOCAL_WINDOW_SIZE *
  219. TQ_LOCAL_WINDOW_SIZE *
  220. TQ_LOCAL_WINDOW_SIZE);
  221. batman_packet->tq = ((batman_packet->tq * tq_own * tq_asym_penalty) /
  222. (TQ_MAX_VALUE * TQ_MAX_VALUE));
  223. bat_dbg(DBG_BATMAN, bat_priv,
  224. "bidirectional: "
  225. "orig = %-15pM neigh = %-15pM => own_bcast = %2i, "
  226. "real recv = %2i, local tq: %3i, asym_penalty: %3i, "
  227. "total tq: %3i\n",
  228. orig_node->orig, orig_neigh_node->orig, total_count,
  229. neigh_rq_count, tq_own, tq_asym_penalty, batman_packet->tq);
  230. /* if link has the minimum required transmission quality
  231. * consider it bidirectional */
  232. if (batman_packet->tq >= TQ_TOTAL_BIDRECT_LIMIT)
  233. ret = 1;
  234. out:
  235. if (neigh_node)
  236. neigh_node_free_ref(neigh_node);
  237. return ret;
  238. }
  239. /* caller must hold the neigh_list_lock */
  240. void bonding_candidate_del(struct orig_node *orig_node,
  241. struct neigh_node *neigh_node)
  242. {
  243. /* this neighbor is not part of our candidate list */
  244. if (list_empty(&neigh_node->bonding_list))
  245. goto out;
  246. list_del_rcu(&neigh_node->bonding_list);
  247. INIT_LIST_HEAD(&neigh_node->bonding_list);
  248. neigh_node_free_ref(neigh_node);
  249. atomic_dec(&orig_node->bond_candidates);
  250. out:
  251. return;
  252. }
  253. static void bonding_candidate_add(struct orig_node *orig_node,
  254. struct neigh_node *neigh_node)
  255. {
  256. struct hlist_node *node;
  257. struct neigh_node *tmp_neigh_node, *router = NULL;
  258. uint8_t interference_candidate = 0;
  259. spin_lock_bh(&orig_node->neigh_list_lock);
  260. /* only consider if it has the same primary address ... */
  261. if (!compare_eth(orig_node->orig,
  262. neigh_node->orig_node->primary_addr))
  263. goto candidate_del;
  264. router = orig_node_get_router(orig_node);
  265. if (!router)
  266. goto candidate_del;
  267. /* ... and is good enough to be considered */
  268. if (neigh_node->tq_avg < router->tq_avg - BONDING_TQ_THRESHOLD)
  269. goto candidate_del;
  270. /**
  271. * check if we have another candidate with the same mac address or
  272. * interface. If we do, we won't select this candidate because of
  273. * possible interference.
  274. */
  275. hlist_for_each_entry_rcu(tmp_neigh_node, node,
  276. &orig_node->neigh_list, list) {
  277. if (tmp_neigh_node == neigh_node)
  278. continue;
  279. /* we only care if the other candidate is even
  280. * considered as candidate. */
  281. if (list_empty(&tmp_neigh_node->bonding_list))
  282. continue;
  283. if ((neigh_node->if_incoming == tmp_neigh_node->if_incoming) ||
  284. (compare_eth(neigh_node->addr, tmp_neigh_node->addr))) {
  285. interference_candidate = 1;
  286. break;
  287. }
  288. }
  289. /* don't care further if it is an interference candidate */
  290. if (interference_candidate)
  291. goto candidate_del;
  292. /* this neighbor already is part of our candidate list */
  293. if (!list_empty(&neigh_node->bonding_list))
  294. goto out;
  295. if (!atomic_inc_not_zero(&neigh_node->refcount))
  296. goto out;
  297. list_add_rcu(&neigh_node->bonding_list, &orig_node->bond_list);
  298. atomic_inc(&orig_node->bond_candidates);
  299. goto out;
  300. candidate_del:
  301. bonding_candidate_del(orig_node, neigh_node);
  302. out:
  303. spin_unlock_bh(&orig_node->neigh_list_lock);
  304. if (router)
  305. neigh_node_free_ref(router);
  306. }
  307. /* copy primary address for bonding */
  308. static void bonding_save_primary(struct orig_node *orig_node,
  309. struct orig_node *orig_neigh_node,
  310. struct batman_packet *batman_packet)
  311. {
  312. if (!(batman_packet->flags & PRIMARIES_FIRST_HOP))
  313. return;
  314. memcpy(orig_neigh_node->primary_addr, orig_node->orig, ETH_ALEN);
  315. }
  316. static void update_orig(struct bat_priv *bat_priv,
  317. struct orig_node *orig_node,
  318. struct ethhdr *ethhdr,
  319. struct batman_packet *batman_packet,
  320. struct hard_iface *if_incoming,
  321. unsigned char *hna_buff, int hna_buff_len,
  322. char is_duplicate)
  323. {
  324. struct neigh_node *neigh_node = NULL, *tmp_neigh_node = NULL;
  325. struct neigh_node *router = NULL;
  326. struct orig_node *orig_node_tmp;
  327. struct hlist_node *node;
  328. int tmp_hna_buff_len;
  329. uint8_t bcast_own_sum_orig, bcast_own_sum_neigh;
  330. bat_dbg(DBG_BATMAN, bat_priv, "update_originator(): "
  331. "Searching and updating originator entry of received packet\n");
  332. rcu_read_lock();
  333. hlist_for_each_entry_rcu(tmp_neigh_node, node,
  334. &orig_node->neigh_list, list) {
  335. if (compare_eth(tmp_neigh_node->addr, ethhdr->h_source) &&
  336. (tmp_neigh_node->if_incoming == if_incoming) &&
  337. atomic_inc_not_zero(&tmp_neigh_node->refcount)) {
  338. if (neigh_node)
  339. neigh_node_free_ref(neigh_node);
  340. neigh_node = tmp_neigh_node;
  341. continue;
  342. }
  343. if (is_duplicate)
  344. continue;
  345. ring_buffer_set(tmp_neigh_node->tq_recv,
  346. &tmp_neigh_node->tq_index, 0);
  347. tmp_neigh_node->tq_avg =
  348. ring_buffer_avg(tmp_neigh_node->tq_recv);
  349. }
  350. if (!neigh_node) {
  351. struct orig_node *orig_tmp;
  352. orig_tmp = get_orig_node(bat_priv, ethhdr->h_source);
  353. if (!orig_tmp)
  354. goto unlock;
  355. neigh_node = create_neighbor(orig_node, orig_tmp,
  356. ethhdr->h_source, if_incoming);
  357. orig_node_free_ref(orig_tmp);
  358. if (!neigh_node)
  359. goto unlock;
  360. } else
  361. bat_dbg(DBG_BATMAN, bat_priv,
  362. "Updating existing last-hop neighbor of originator\n");
  363. rcu_read_unlock();
  364. orig_node->flags = batman_packet->flags;
  365. neigh_node->last_valid = jiffies;
  366. ring_buffer_set(neigh_node->tq_recv,
  367. &neigh_node->tq_index,
  368. batman_packet->tq);
  369. neigh_node->tq_avg = ring_buffer_avg(neigh_node->tq_recv);
  370. if (!is_duplicate) {
  371. orig_node->last_ttl = batman_packet->ttl;
  372. neigh_node->last_ttl = batman_packet->ttl;
  373. }
  374. bonding_candidate_add(orig_node, neigh_node);
  375. tmp_hna_buff_len = (hna_buff_len > batman_packet->num_hna * ETH_ALEN ?
  376. batman_packet->num_hna * ETH_ALEN : hna_buff_len);
  377. /* if this neighbor already is our next hop there is nothing
  378. * to change */
  379. router = orig_node_get_router(orig_node);
  380. if (router == neigh_node)
  381. goto update_hna;
  382. /* if this neighbor does not offer a better TQ we won't consider it */
  383. if (router && (router->tq_avg > neigh_node->tq_avg))
  384. goto update_hna;
  385. /* if the TQ is the same and the link not more symetric we
  386. * won't consider it either */
  387. if (router && (neigh_node->tq_avg == router->tq_avg)) {
  388. orig_node_tmp = router->orig_node;
  389. spin_lock_bh(&orig_node_tmp->ogm_cnt_lock);
  390. bcast_own_sum_orig =
  391. orig_node_tmp->bcast_own_sum[if_incoming->if_num];
  392. spin_unlock_bh(&orig_node_tmp->ogm_cnt_lock);
  393. orig_node_tmp = neigh_node->orig_node;
  394. spin_lock_bh(&orig_node_tmp->ogm_cnt_lock);
  395. bcast_own_sum_neigh =
  396. orig_node_tmp->bcast_own_sum[if_incoming->if_num];
  397. spin_unlock_bh(&orig_node_tmp->ogm_cnt_lock);
  398. if (bcast_own_sum_orig >= bcast_own_sum_neigh)
  399. goto update_hna;
  400. }
  401. update_routes(bat_priv, orig_node, neigh_node,
  402. hna_buff, tmp_hna_buff_len);
  403. goto update_gw;
  404. update_hna:
  405. update_routes(bat_priv, orig_node, router,
  406. hna_buff, tmp_hna_buff_len);
  407. update_gw:
  408. if (orig_node->gw_flags != batman_packet->gw_flags)
  409. gw_node_update(bat_priv, orig_node, batman_packet->gw_flags);
  410. orig_node->gw_flags = batman_packet->gw_flags;
  411. /* restart gateway selection if fast or late switching was enabled */
  412. if ((orig_node->gw_flags) &&
  413. (atomic_read(&bat_priv->gw_mode) == GW_MODE_CLIENT) &&
  414. (atomic_read(&bat_priv->gw_sel_class) > 2))
  415. gw_check_election(bat_priv, orig_node);
  416. goto out;
  417. unlock:
  418. rcu_read_unlock();
  419. out:
  420. if (neigh_node)
  421. neigh_node_free_ref(neigh_node);
  422. if (router)
  423. neigh_node_free_ref(router);
  424. }
  425. /* checks whether the host restarted and is in the protection time.
  426. * returns:
  427. * 0 if the packet is to be accepted
  428. * 1 if the packet is to be ignored.
  429. */
  430. static int window_protected(struct bat_priv *bat_priv,
  431. int32_t seq_num_diff,
  432. unsigned long *last_reset)
  433. {
  434. if ((seq_num_diff <= -TQ_LOCAL_WINDOW_SIZE)
  435. || (seq_num_diff >= EXPECTED_SEQNO_RANGE)) {
  436. if (time_after(jiffies, *last_reset +
  437. msecs_to_jiffies(RESET_PROTECTION_MS))) {
  438. *last_reset = jiffies;
  439. bat_dbg(DBG_BATMAN, bat_priv,
  440. "old packet received, start protection\n");
  441. return 0;
  442. } else
  443. return 1;
  444. }
  445. return 0;
  446. }
  447. /* processes a batman packet for all interfaces, adjusts the sequence number and
  448. * finds out whether it is a duplicate.
  449. * returns:
  450. * 1 the packet is a duplicate
  451. * 0 the packet has not yet been received
  452. * -1 the packet is old and has been received while the seqno window
  453. * was protected. Caller should drop it.
  454. */
  455. static char count_real_packets(struct ethhdr *ethhdr,
  456. struct batman_packet *batman_packet,
  457. struct hard_iface *if_incoming)
  458. {
  459. struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
  460. struct orig_node *orig_node;
  461. struct neigh_node *tmp_neigh_node;
  462. struct hlist_node *node;
  463. char is_duplicate = 0;
  464. int32_t seq_diff;
  465. int need_update = 0;
  466. int set_mark, ret = -1;
  467. orig_node = get_orig_node(bat_priv, batman_packet->orig);
  468. if (!orig_node)
  469. return 0;
  470. spin_lock_bh(&orig_node->ogm_cnt_lock);
  471. seq_diff = batman_packet->seqno - orig_node->last_real_seqno;
  472. /* signalize caller that the packet is to be dropped. */
  473. if (window_protected(bat_priv, seq_diff,
  474. &orig_node->batman_seqno_reset))
  475. goto out;
  476. rcu_read_lock();
  477. hlist_for_each_entry_rcu(tmp_neigh_node, node,
  478. &orig_node->neigh_list, list) {
  479. is_duplicate |= get_bit_status(tmp_neigh_node->real_bits,
  480. orig_node->last_real_seqno,
  481. batman_packet->seqno);
  482. if (compare_eth(tmp_neigh_node->addr, ethhdr->h_source) &&
  483. (tmp_neigh_node->if_incoming == if_incoming))
  484. set_mark = 1;
  485. else
  486. set_mark = 0;
  487. /* if the window moved, set the update flag. */
  488. need_update |= bit_get_packet(bat_priv,
  489. tmp_neigh_node->real_bits,
  490. seq_diff, set_mark);
  491. tmp_neigh_node->real_packet_count =
  492. bit_packet_count(tmp_neigh_node->real_bits);
  493. }
  494. rcu_read_unlock();
  495. if (need_update) {
  496. bat_dbg(DBG_BATMAN, bat_priv,
  497. "updating last_seqno: old %d, new %d\n",
  498. orig_node->last_real_seqno, batman_packet->seqno);
  499. orig_node->last_real_seqno = batman_packet->seqno;
  500. }
  501. ret = is_duplicate;
  502. out:
  503. spin_unlock_bh(&orig_node->ogm_cnt_lock);
  504. orig_node_free_ref(orig_node);
  505. return ret;
  506. }
  507. void receive_bat_packet(struct ethhdr *ethhdr,
  508. struct batman_packet *batman_packet,
  509. unsigned char *hna_buff, int hna_buff_len,
  510. struct hard_iface *if_incoming)
  511. {
  512. struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
  513. struct hard_iface *hard_iface;
  514. struct orig_node *orig_neigh_node, *orig_node;
  515. struct neigh_node *router = NULL, *router_router = NULL;
  516. struct neigh_node *orig_neigh_router = NULL;
  517. char has_directlink_flag;
  518. char is_my_addr = 0, is_my_orig = 0, is_my_oldorig = 0;
  519. char is_broadcast = 0, is_bidirectional, is_single_hop_neigh;
  520. char is_duplicate;
  521. uint32_t if_incoming_seqno;
  522. /* Silently drop when the batman packet is actually not a
  523. * correct packet.
  524. *
  525. * This might happen if a packet is padded (e.g. Ethernet has a
  526. * minimum frame length of 64 byte) and the aggregation interprets
  527. * it as an additional length.
  528. *
  529. * TODO: A more sane solution would be to have a bit in the
  530. * batman_packet to detect whether the packet is the last
  531. * packet in an aggregation. Here we expect that the padding
  532. * is always zero (or not 0x01)
  533. */
  534. if (batman_packet->packet_type != BAT_PACKET)
  535. return;
  536. /* could be changed by schedule_own_packet() */
  537. if_incoming_seqno = atomic_read(&if_incoming->seqno);
  538. has_directlink_flag = (batman_packet->flags & DIRECTLINK ? 1 : 0);
  539. is_single_hop_neigh = (compare_eth(ethhdr->h_source,
  540. batman_packet->orig) ? 1 : 0);
  541. bat_dbg(DBG_BATMAN, bat_priv,
  542. "Received BATMAN packet via NB: %pM, IF: %s [%pM] "
  543. "(from OG: %pM, via prev OG: %pM, seqno %d, tq %d, "
  544. "TTL %d, V %d, IDF %d)\n",
  545. ethhdr->h_source, if_incoming->net_dev->name,
  546. if_incoming->net_dev->dev_addr, batman_packet->orig,
  547. batman_packet->prev_sender, batman_packet->seqno,
  548. batman_packet->tq, batman_packet->ttl, batman_packet->version,
  549. has_directlink_flag);
  550. rcu_read_lock();
  551. list_for_each_entry_rcu(hard_iface, &hardif_list, list) {
  552. if (hard_iface->if_status != IF_ACTIVE)
  553. continue;
  554. if (hard_iface->soft_iface != if_incoming->soft_iface)
  555. continue;
  556. if (compare_eth(ethhdr->h_source,
  557. hard_iface->net_dev->dev_addr))
  558. is_my_addr = 1;
  559. if (compare_eth(batman_packet->orig,
  560. hard_iface->net_dev->dev_addr))
  561. is_my_orig = 1;
  562. if (compare_eth(batman_packet->prev_sender,
  563. hard_iface->net_dev->dev_addr))
  564. is_my_oldorig = 1;
  565. if (compare_eth(ethhdr->h_source, broadcast_addr))
  566. is_broadcast = 1;
  567. }
  568. rcu_read_unlock();
  569. if (batman_packet->version != COMPAT_VERSION) {
  570. bat_dbg(DBG_BATMAN, bat_priv,
  571. "Drop packet: incompatible batman version (%i)\n",
  572. batman_packet->version);
  573. return;
  574. }
  575. if (is_my_addr) {
  576. bat_dbg(DBG_BATMAN, bat_priv,
  577. "Drop packet: received my own broadcast (sender: %pM"
  578. ")\n",
  579. ethhdr->h_source);
  580. return;
  581. }
  582. if (is_broadcast) {
  583. bat_dbg(DBG_BATMAN, bat_priv, "Drop packet: "
  584. "ignoring all packets with broadcast source addr (sender: %pM"
  585. ")\n", ethhdr->h_source);
  586. return;
  587. }
  588. if (is_my_orig) {
  589. unsigned long *word;
  590. int offset;
  591. orig_neigh_node = get_orig_node(bat_priv, ethhdr->h_source);
  592. if (!orig_neigh_node)
  593. return;
  594. /* neighbor has to indicate direct link and it has to
  595. * come via the corresponding interface */
  596. /* if received seqno equals last send seqno save new
  597. * seqno for bidirectional check */
  598. if (has_directlink_flag &&
  599. compare_eth(if_incoming->net_dev->dev_addr,
  600. batman_packet->orig) &&
  601. (batman_packet->seqno - if_incoming_seqno + 2 == 0)) {
  602. offset = if_incoming->if_num * NUM_WORDS;
  603. spin_lock_bh(&orig_neigh_node->ogm_cnt_lock);
  604. word = &(orig_neigh_node->bcast_own[offset]);
  605. bit_mark(word, 0);
  606. orig_neigh_node->bcast_own_sum[if_incoming->if_num] =
  607. bit_packet_count(word);
  608. spin_unlock_bh(&orig_neigh_node->ogm_cnt_lock);
  609. }
  610. bat_dbg(DBG_BATMAN, bat_priv, "Drop packet: "
  611. "originator packet from myself (via neighbor)\n");
  612. orig_node_free_ref(orig_neigh_node);
  613. return;
  614. }
  615. if (is_my_oldorig) {
  616. bat_dbg(DBG_BATMAN, bat_priv,
  617. "Drop packet: ignoring all rebroadcast echos (sender: "
  618. "%pM)\n", ethhdr->h_source);
  619. return;
  620. }
  621. orig_node = get_orig_node(bat_priv, batman_packet->orig);
  622. if (!orig_node)
  623. return;
  624. is_duplicate = count_real_packets(ethhdr, batman_packet, if_incoming);
  625. if (is_duplicate == -1) {
  626. bat_dbg(DBG_BATMAN, bat_priv,
  627. "Drop packet: packet within seqno protection time "
  628. "(sender: %pM)\n", ethhdr->h_source);
  629. goto out;
  630. }
  631. if (batman_packet->tq == 0) {
  632. bat_dbg(DBG_BATMAN, bat_priv,
  633. "Drop packet: originator packet with tq equal 0\n");
  634. goto out;
  635. }
  636. router = orig_node_get_router(orig_node);
  637. if (router)
  638. router_router = orig_node_get_router(router->orig_node);
  639. /* avoid temporary routing loops */
  640. if (router && router_router &&
  641. (compare_eth(router->addr, batman_packet->prev_sender)) &&
  642. !(compare_eth(batman_packet->orig, batman_packet->prev_sender)) &&
  643. (compare_eth(router->addr, router_router->addr))) {
  644. bat_dbg(DBG_BATMAN, bat_priv,
  645. "Drop packet: ignoring all rebroadcast packets that "
  646. "may make me loop (sender: %pM)\n", ethhdr->h_source);
  647. goto out;
  648. }
  649. /* if sender is a direct neighbor the sender mac equals
  650. * originator mac */
  651. orig_neigh_node = (is_single_hop_neigh ?
  652. orig_node :
  653. get_orig_node(bat_priv, ethhdr->h_source));
  654. if (!orig_neigh_node)
  655. goto out;
  656. orig_neigh_router = orig_node_get_router(orig_neigh_node);
  657. /* drop packet if sender is not a direct neighbor and if we
  658. * don't route towards it */
  659. if (!is_single_hop_neigh && (!orig_neigh_router)) {
  660. bat_dbg(DBG_BATMAN, bat_priv,
  661. "Drop packet: OGM via unknown neighbor!\n");
  662. goto out_neigh;
  663. }
  664. is_bidirectional = is_bidirectional_neigh(orig_node, orig_neigh_node,
  665. batman_packet, if_incoming);
  666. bonding_save_primary(orig_node, orig_neigh_node, batman_packet);
  667. /* update ranking if it is not a duplicate or has the same
  668. * seqno and similar ttl as the non-duplicate */
  669. if (is_bidirectional &&
  670. (!is_duplicate ||
  671. ((orig_node->last_real_seqno == batman_packet->seqno) &&
  672. (orig_node->last_ttl - 3 <= batman_packet->ttl))))
  673. update_orig(bat_priv, orig_node, ethhdr, batman_packet,
  674. if_incoming, hna_buff, hna_buff_len, is_duplicate);
  675. /* is single hop (direct) neighbor */
  676. if (is_single_hop_neigh) {
  677. /* mark direct link on incoming interface */
  678. schedule_forward_packet(orig_node, ethhdr, batman_packet,
  679. 1, hna_buff_len, if_incoming);
  680. bat_dbg(DBG_BATMAN, bat_priv, "Forwarding packet: "
  681. "rebroadcast neighbor packet with direct link flag\n");
  682. goto out_neigh;
  683. }
  684. /* multihop originator */
  685. if (!is_bidirectional) {
  686. bat_dbg(DBG_BATMAN, bat_priv,
  687. "Drop packet: not received via bidirectional link\n");
  688. goto out_neigh;
  689. }
  690. if (is_duplicate) {
  691. bat_dbg(DBG_BATMAN, bat_priv,
  692. "Drop packet: duplicate packet received\n");
  693. goto out_neigh;
  694. }
  695. bat_dbg(DBG_BATMAN, bat_priv,
  696. "Forwarding packet: rebroadcast originator packet\n");
  697. schedule_forward_packet(orig_node, ethhdr, batman_packet,
  698. 0, hna_buff_len, if_incoming);
  699. out_neigh:
  700. if ((orig_neigh_node) && (!is_single_hop_neigh))
  701. orig_node_free_ref(orig_neigh_node);
  702. out:
  703. if (router)
  704. neigh_node_free_ref(router);
  705. if (router_router)
  706. neigh_node_free_ref(router_router);
  707. if (orig_neigh_router)
  708. neigh_node_free_ref(orig_neigh_router);
  709. orig_node_free_ref(orig_node);
  710. }
  711. int recv_bat_packet(struct sk_buff *skb, struct hard_iface *hard_iface)
  712. {
  713. struct ethhdr *ethhdr;
  714. /* drop packet if it has not necessary minimum size */
  715. if (unlikely(!pskb_may_pull(skb, sizeof(struct batman_packet))))
  716. return NET_RX_DROP;
  717. ethhdr = (struct ethhdr *)skb_mac_header(skb);
  718. /* packet with broadcast indication but unicast recipient */
  719. if (!is_broadcast_ether_addr(ethhdr->h_dest))
  720. return NET_RX_DROP;
  721. /* packet with broadcast sender address */
  722. if (is_broadcast_ether_addr(ethhdr->h_source))
  723. return NET_RX_DROP;
  724. /* create a copy of the skb, if needed, to modify it. */
  725. if (skb_cow(skb, 0) < 0)
  726. return NET_RX_DROP;
  727. /* keep skb linear */
  728. if (skb_linearize(skb) < 0)
  729. return NET_RX_DROP;
  730. ethhdr = (struct ethhdr *)skb_mac_header(skb);
  731. receive_aggr_bat_packet(ethhdr,
  732. skb->data,
  733. skb_headlen(skb),
  734. hard_iface);
  735. kfree_skb(skb);
  736. return NET_RX_SUCCESS;
  737. }
  738. static int recv_my_icmp_packet(struct bat_priv *bat_priv,
  739. struct sk_buff *skb, size_t icmp_len)
  740. {
  741. struct orig_node *orig_node = NULL;
  742. struct neigh_node *router = NULL;
  743. struct icmp_packet_rr *icmp_packet;
  744. int ret = NET_RX_DROP;
  745. icmp_packet = (struct icmp_packet_rr *)skb->data;
  746. /* add data to device queue */
  747. if (icmp_packet->msg_type != ECHO_REQUEST) {
  748. bat_socket_receive_packet(icmp_packet, icmp_len);
  749. goto out;
  750. }
  751. if (!bat_priv->primary_if)
  752. goto out;
  753. /* answer echo request (ping) */
  754. /* get routing information */
  755. orig_node = orig_hash_find(bat_priv, icmp_packet->orig);
  756. if (!orig_node)
  757. goto out;
  758. router = orig_node_get_router(orig_node);
  759. if (!router)
  760. goto out;
  761. /* create a copy of the skb, if needed, to modify it. */
  762. if (skb_cow(skb, sizeof(struct ethhdr)) < 0)
  763. goto out;
  764. icmp_packet = (struct icmp_packet_rr *)skb->data;
  765. memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN);
  766. memcpy(icmp_packet->orig,
  767. bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN);
  768. icmp_packet->msg_type = ECHO_REPLY;
  769. icmp_packet->ttl = TTL;
  770. send_skb_packet(skb, router->if_incoming, router->addr);
  771. ret = NET_RX_SUCCESS;
  772. out:
  773. if (router)
  774. neigh_node_free_ref(router);
  775. if (orig_node)
  776. orig_node_free_ref(orig_node);
  777. return ret;
  778. }
  779. static int recv_icmp_ttl_exceeded(struct bat_priv *bat_priv,
  780. struct sk_buff *skb)
  781. {
  782. struct orig_node *orig_node = NULL;
  783. struct neigh_node *router = NULL;
  784. struct icmp_packet *icmp_packet;
  785. int ret = NET_RX_DROP;
  786. icmp_packet = (struct icmp_packet *)skb->data;
  787. /* send TTL exceeded if packet is an echo request (traceroute) */
  788. if (icmp_packet->msg_type != ECHO_REQUEST) {
  789. pr_debug("Warning - can't forward icmp packet from %pM to "
  790. "%pM: ttl exceeded\n", icmp_packet->orig,
  791. icmp_packet->dst);
  792. goto out;
  793. }
  794. if (!bat_priv->primary_if)
  795. goto out;
  796. /* get routing information */
  797. orig_node = orig_hash_find(bat_priv, icmp_packet->orig);
  798. if (!orig_node)
  799. goto out;
  800. router = orig_node_get_router(orig_node);
  801. if (!router)
  802. goto out;
  803. /* create a copy of the skb, if needed, to modify it. */
  804. if (skb_cow(skb, sizeof(struct ethhdr)) < 0)
  805. goto out;
  806. icmp_packet = (struct icmp_packet *)skb->data;
  807. memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN);
  808. memcpy(icmp_packet->orig,
  809. bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN);
  810. icmp_packet->msg_type = TTL_EXCEEDED;
  811. icmp_packet->ttl = TTL;
  812. send_skb_packet(skb, router->if_incoming, router->addr);
  813. ret = NET_RX_SUCCESS;
  814. out:
  815. if (router)
  816. neigh_node_free_ref(router);
  817. if (orig_node)
  818. orig_node_free_ref(orig_node);
  819. return ret;
  820. }
  821. int recv_icmp_packet(struct sk_buff *skb, struct hard_iface *recv_if)
  822. {
  823. struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
  824. struct icmp_packet_rr *icmp_packet;
  825. struct ethhdr *ethhdr;
  826. struct orig_node *orig_node = NULL;
  827. struct neigh_node *router = NULL;
  828. int hdr_size = sizeof(struct icmp_packet);
  829. int ret = NET_RX_DROP;
  830. /**
  831. * we truncate all incoming icmp packets if they don't match our size
  832. */
  833. if (skb->len >= sizeof(struct icmp_packet_rr))
  834. hdr_size = sizeof(struct icmp_packet_rr);
  835. /* drop packet if it has not necessary minimum size */
  836. if (unlikely(!pskb_may_pull(skb, hdr_size)))
  837. goto out;
  838. ethhdr = (struct ethhdr *)skb_mac_header(skb);
  839. /* packet with unicast indication but broadcast recipient */
  840. if (is_broadcast_ether_addr(ethhdr->h_dest))
  841. goto out;
  842. /* packet with broadcast sender address */
  843. if (is_broadcast_ether_addr(ethhdr->h_source))
  844. goto out;
  845. /* not for me */
  846. if (!is_my_mac(ethhdr->h_dest))
  847. goto out;
  848. icmp_packet = (struct icmp_packet_rr *)skb->data;
  849. /* add record route information if not full */
  850. if ((hdr_size == sizeof(struct icmp_packet_rr)) &&
  851. (icmp_packet->rr_cur < BAT_RR_LEN)) {
  852. memcpy(&(icmp_packet->rr[icmp_packet->rr_cur]),
  853. ethhdr->h_dest, ETH_ALEN);
  854. icmp_packet->rr_cur++;
  855. }
  856. /* packet for me */
  857. if (is_my_mac(icmp_packet->dst))
  858. return recv_my_icmp_packet(bat_priv, skb, hdr_size);
  859. /* TTL exceeded */
  860. if (icmp_packet->ttl < 2)
  861. return recv_icmp_ttl_exceeded(bat_priv, skb);
  862. /* get routing information */
  863. orig_node = orig_hash_find(bat_priv, icmp_packet->dst);
  864. if (!orig_node)
  865. goto out;
  866. router = orig_node_get_router(orig_node);
  867. if (!router)
  868. goto out;
  869. /* create a copy of the skb, if needed, to modify it. */
  870. if (skb_cow(skb, sizeof(struct ethhdr)) < 0)
  871. goto out;
  872. icmp_packet = (struct icmp_packet_rr *)skb->data;
  873. /* decrement ttl */
  874. icmp_packet->ttl--;
  875. /* route it */
  876. send_skb_packet(skb, router->if_incoming, router->addr);
  877. ret = NET_RX_SUCCESS;
  878. out:
  879. if (router)
  880. neigh_node_free_ref(router);
  881. if (orig_node)
  882. orig_node_free_ref(orig_node);
  883. return ret;
  884. }
  885. /* In the bonding case, send the packets in a round
  886. * robin fashion over the remaining interfaces.
  887. *
  888. * This method rotates the bonding list and increases the
  889. * returned router's refcount. */
  890. static struct neigh_node *find_bond_router(struct orig_node *primary_orig,
  891. struct hard_iface *recv_if)
  892. {
  893. struct neigh_node *tmp_neigh_node;
  894. struct neigh_node *router = NULL, *first_candidate = NULL;
  895. rcu_read_lock();
  896. list_for_each_entry_rcu(tmp_neigh_node, &primary_orig->bond_list,
  897. bonding_list) {
  898. if (!first_candidate)
  899. first_candidate = tmp_neigh_node;
  900. /* recv_if == NULL on the first node. */
  901. if (tmp_neigh_node->if_incoming == recv_if)
  902. continue;
  903. if (!atomic_inc_not_zero(&tmp_neigh_node->refcount))
  904. continue;
  905. router = tmp_neigh_node;
  906. break;
  907. }
  908. /* use the first candidate if nothing was found. */
  909. if (!router && first_candidate &&
  910. atomic_inc_not_zero(&first_candidate->refcount))
  911. router = first_candidate;
  912. if (!router)
  913. goto out;
  914. /* selected should point to the next element
  915. * after the current router */
  916. spin_lock_bh(&primary_orig->neigh_list_lock);
  917. /* this is a list_move(), which unfortunately
  918. * does not exist as rcu version */
  919. list_del_rcu(&primary_orig->bond_list);
  920. list_add_rcu(&primary_orig->bond_list,
  921. &router->bonding_list);
  922. spin_unlock_bh(&primary_orig->neigh_list_lock);
  923. out:
  924. rcu_read_unlock();
  925. return router;
  926. }
  927. /* Interface Alternating: Use the best of the
  928. * remaining candidates which are not using
  929. * this interface.
  930. *
  931. * Increases the returned router's refcount */
  932. static struct neigh_node *find_ifalter_router(struct orig_node *primary_orig,
  933. struct hard_iface *recv_if)
  934. {
  935. struct neigh_node *tmp_neigh_node;
  936. struct neigh_node *router = NULL, *first_candidate = NULL;
  937. rcu_read_lock();
  938. list_for_each_entry_rcu(tmp_neigh_node, &primary_orig->bond_list,
  939. bonding_list) {
  940. if (!first_candidate)
  941. first_candidate = tmp_neigh_node;
  942. /* recv_if == NULL on the first node. */
  943. if (tmp_neigh_node->if_incoming == recv_if)
  944. continue;
  945. if (!atomic_inc_not_zero(&tmp_neigh_node->refcount))
  946. continue;
  947. /* if we don't have a router yet
  948. * or this one is better, choose it. */
  949. if ((!router) ||
  950. (tmp_neigh_node->tq_avg > router->tq_avg)) {
  951. /* decrement refcount of
  952. * previously selected router */
  953. if (router)
  954. neigh_node_free_ref(router);
  955. router = tmp_neigh_node;
  956. atomic_inc_not_zero(&router->refcount);
  957. }
  958. neigh_node_free_ref(tmp_neigh_node);
  959. }
  960. /* use the first candidate if nothing was found. */
  961. if (!router && first_candidate &&
  962. atomic_inc_not_zero(&first_candidate->refcount))
  963. router = first_candidate;
  964. rcu_read_unlock();
  965. return router;
  966. }
  967. /* find a suitable router for this originator, and use
  968. * bonding if possible. increases the found neighbors
  969. * refcount.*/
  970. struct neigh_node *find_router(struct bat_priv *bat_priv,
  971. struct orig_node *orig_node,
  972. struct hard_iface *recv_if)
  973. {
  974. struct orig_node *primary_orig_node;
  975. struct orig_node *router_orig;
  976. struct neigh_node *router;
  977. static uint8_t zero_mac[ETH_ALEN] = {0, 0, 0, 0, 0, 0};
  978. int bonding_enabled;
  979. if (!orig_node)
  980. return NULL;
  981. router = orig_node_get_router(orig_node);
  982. if (!router)
  983. return NULL;
  984. /* without bonding, the first node should
  985. * always choose the default router. */
  986. bonding_enabled = atomic_read(&bat_priv->bonding);
  987. rcu_read_lock();
  988. /* select default router to output */
  989. router_orig = router->orig_node;
  990. if (!router_orig) {
  991. rcu_read_unlock();
  992. return NULL;
  993. }
  994. if ((!recv_if) && (!bonding_enabled))
  995. goto return_router;
  996. /* if we have something in the primary_addr, we can search
  997. * for a potential bonding candidate. */
  998. if (compare_eth(router_orig->primary_addr, zero_mac))
  999. goto return_router;
  1000. /* find the orig_node which has the primary interface. might
  1001. * even be the same as our router_orig in many cases */
  1002. if (compare_eth(router_orig->primary_addr, router_orig->orig)) {
  1003. primary_orig_node = router_orig;
  1004. } else {
  1005. primary_orig_node = orig_hash_find(bat_priv,
  1006. router_orig->primary_addr);
  1007. if (!primary_orig_node)
  1008. goto return_router;
  1009. orig_node_free_ref(primary_orig_node);
  1010. }
  1011. /* with less than 2 candidates, we can't do any
  1012. * bonding and prefer the original router. */
  1013. if (atomic_read(&primary_orig_node->bond_candidates) < 2)
  1014. goto return_router;
  1015. /* all nodes between should choose a candidate which
  1016. * is is not on the interface where the packet came
  1017. * in. */
  1018. neigh_node_free_ref(router);
  1019. if (bonding_enabled)
  1020. router = find_bond_router(primary_orig_node, recv_if);
  1021. else
  1022. router = find_ifalter_router(primary_orig_node, recv_if);
  1023. return_router:
  1024. rcu_read_unlock();
  1025. return router;
  1026. }
  1027. static int check_unicast_packet(struct sk_buff *skb, int hdr_size)
  1028. {
  1029. struct ethhdr *ethhdr;
  1030. /* drop packet if it has not necessary minimum size */
  1031. if (unlikely(!pskb_may_pull(skb, hdr_size)))
  1032. return -1;
  1033. ethhdr = (struct ethhdr *)skb_mac_header(skb);
  1034. /* packet with unicast indication but broadcast recipient */
  1035. if (is_broadcast_ether_addr(ethhdr->h_dest))
  1036. return -1;
  1037. /* packet with broadcast sender address */
  1038. if (is_broadcast_ether_addr(ethhdr->h_source))
  1039. return -1;
  1040. /* not for me */
  1041. if (!is_my_mac(ethhdr->h_dest))
  1042. return -1;
  1043. return 0;
  1044. }
  1045. int route_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if)
  1046. {
  1047. struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
  1048. struct orig_node *orig_node = NULL;
  1049. struct neigh_node *neigh_node = NULL;
  1050. struct unicast_packet *unicast_packet;
  1051. struct ethhdr *ethhdr = (struct ethhdr *)skb_mac_header(skb);
  1052. int ret = NET_RX_DROP;
  1053. struct sk_buff *new_skb;
  1054. unicast_packet = (struct unicast_packet *)skb->data;
  1055. /* TTL exceeded */
  1056. if (unicast_packet->ttl < 2) {
  1057. pr_debug("Warning - can't forward unicast packet from %pM to "
  1058. "%pM: ttl exceeded\n", ethhdr->h_source,
  1059. unicast_packet->dest);
  1060. goto out;
  1061. }
  1062. /* get routing information */
  1063. rcu_read_lock();
  1064. orig_node = orig_hash_find(bat_priv, unicast_packet->dest);
  1065. if (!orig_node)
  1066. goto unlock;
  1067. rcu_read_unlock();
  1068. /* find_router() increases neigh_nodes refcount if found. */
  1069. neigh_node = find_router(bat_priv, orig_node, recv_if);
  1070. if (!neigh_node)
  1071. goto out;
  1072. /* create a copy of the skb, if needed, to modify it. */
  1073. if (skb_cow(skb, sizeof(struct ethhdr)) < 0)
  1074. goto out;
  1075. unicast_packet = (struct unicast_packet *)skb->data;
  1076. if (unicast_packet->packet_type == BAT_UNICAST &&
  1077. atomic_read(&bat_priv->fragmentation) &&
  1078. skb->len > neigh_node->if_incoming->net_dev->mtu) {
  1079. ret = frag_send_skb(skb, bat_priv,
  1080. neigh_node->if_incoming, neigh_node->addr);
  1081. goto out;
  1082. }
  1083. if (unicast_packet->packet_type == BAT_UNICAST_FRAG &&
  1084. frag_can_reassemble(skb, neigh_node->if_incoming->net_dev->mtu)) {
  1085. ret = frag_reassemble_skb(skb, bat_priv, &new_skb);
  1086. if (ret == NET_RX_DROP)
  1087. goto out;
  1088. /* packet was buffered for late merge */
  1089. if (!new_skb) {
  1090. ret = NET_RX_SUCCESS;
  1091. goto out;
  1092. }
  1093. skb = new_skb;
  1094. unicast_packet = (struct unicast_packet *)skb->data;
  1095. }
  1096. /* decrement ttl */
  1097. unicast_packet->ttl--;
  1098. /* route it */
  1099. send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
  1100. ret = NET_RX_SUCCESS;
  1101. goto out;
  1102. unlock:
  1103. rcu_read_unlock();
  1104. out:
  1105. if (neigh_node)
  1106. neigh_node_free_ref(neigh_node);
  1107. if (orig_node)
  1108. orig_node_free_ref(orig_node);
  1109. return ret;
  1110. }
  1111. int recv_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if)
  1112. {
  1113. struct unicast_packet *unicast_packet;
  1114. int hdr_size = sizeof(struct unicast_packet);
  1115. if (check_unicast_packet(skb, hdr_size) < 0)
  1116. return NET_RX_DROP;
  1117. unicast_packet = (struct unicast_packet *)skb->data;
  1118. /* packet for me */
  1119. if (is_my_mac(unicast_packet->dest)) {
  1120. interface_rx(recv_if->soft_iface, skb, recv_if, hdr_size);
  1121. return NET_RX_SUCCESS;
  1122. }
  1123. return route_unicast_packet(skb, recv_if);
  1124. }
  1125. int recv_ucast_frag_packet(struct sk_buff *skb, struct hard_iface *recv_if)
  1126. {
  1127. struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
  1128. struct unicast_frag_packet *unicast_packet;
  1129. int hdr_size = sizeof(struct unicast_frag_packet);
  1130. struct sk_buff *new_skb = NULL;
  1131. int ret;
  1132. if (check_unicast_packet(skb, hdr_size) < 0)
  1133. return NET_RX_DROP;
  1134. unicast_packet = (struct unicast_frag_packet *)skb->data;
  1135. /* packet for me */
  1136. if (is_my_mac(unicast_packet->dest)) {
  1137. ret = frag_reassemble_skb(skb, bat_priv, &new_skb);
  1138. if (ret == NET_RX_DROP)
  1139. return NET_RX_DROP;
  1140. /* packet was buffered for late merge */
  1141. if (!new_skb)
  1142. return NET_RX_SUCCESS;
  1143. interface_rx(recv_if->soft_iface, new_skb, recv_if,
  1144. sizeof(struct unicast_packet));
  1145. return NET_RX_SUCCESS;
  1146. }
  1147. return route_unicast_packet(skb, recv_if);
  1148. }
  1149. int recv_bcast_packet(struct sk_buff *skb, struct hard_iface *recv_if)
  1150. {
  1151. struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
  1152. struct orig_node *orig_node = NULL;
  1153. struct bcast_packet *bcast_packet;
  1154. struct ethhdr *ethhdr;
  1155. int hdr_size = sizeof(struct bcast_packet);
  1156. int ret = NET_RX_DROP;
  1157. int32_t seq_diff;
  1158. /* drop packet if it has not necessary minimum size */
  1159. if (unlikely(!pskb_may_pull(skb, hdr_size)))
  1160. goto out;
  1161. ethhdr = (struct ethhdr *)skb_mac_header(skb);
  1162. /* packet with broadcast indication but unicast recipient */
  1163. if (!is_broadcast_ether_addr(ethhdr->h_dest))
  1164. goto out;
  1165. /* packet with broadcast sender address */
  1166. if (is_broadcast_ether_addr(ethhdr->h_source))
  1167. goto out;
  1168. /* ignore broadcasts sent by myself */
  1169. if (is_my_mac(ethhdr->h_source))
  1170. goto out;
  1171. bcast_packet = (struct bcast_packet *)skb->data;
  1172. /* ignore broadcasts originated by myself */
  1173. if (is_my_mac(bcast_packet->orig))
  1174. goto out;
  1175. if (bcast_packet->ttl < 2)
  1176. goto out;
  1177. rcu_read_lock();
  1178. orig_node = orig_hash_find(bat_priv, bcast_packet->orig);
  1179. if (!orig_node)
  1180. goto rcu_unlock;
  1181. rcu_read_unlock();
  1182. spin_lock_bh(&orig_node->bcast_seqno_lock);
  1183. /* check whether the packet is a duplicate */
  1184. if (get_bit_status(orig_node->bcast_bits, orig_node->last_bcast_seqno,
  1185. ntohl(bcast_packet->seqno)))
  1186. goto spin_unlock;
  1187. seq_diff = ntohl(bcast_packet->seqno) - orig_node->last_bcast_seqno;
  1188. /* check whether the packet is old and the host just restarted. */
  1189. if (window_protected(bat_priv, seq_diff,
  1190. &orig_node->bcast_seqno_reset))
  1191. goto spin_unlock;
  1192. /* mark broadcast in flood history, update window position
  1193. * if required. */
  1194. if (bit_get_packet(bat_priv, orig_node->bcast_bits, seq_diff, 1))
  1195. orig_node->last_bcast_seqno = ntohl(bcast_packet->seqno);
  1196. spin_unlock_bh(&orig_node->bcast_seqno_lock);
  1197. /* rebroadcast packet */
  1198. add_bcast_packet_to_list(bat_priv, skb);
  1199. /* broadcast for me */
  1200. interface_rx(recv_if->soft_iface, skb, recv_if, hdr_size);
  1201. ret = NET_RX_SUCCESS;
  1202. goto out;
  1203. rcu_unlock:
  1204. rcu_read_unlock();
  1205. goto out;
  1206. spin_unlock:
  1207. spin_unlock_bh(&orig_node->bcast_seqno_lock);
  1208. out:
  1209. if (orig_node)
  1210. orig_node_free_ref(orig_node);
  1211. return ret;
  1212. }
  1213. int recv_vis_packet(struct sk_buff *skb, struct hard_iface *recv_if)
  1214. {
  1215. struct vis_packet *vis_packet;
  1216. struct ethhdr *ethhdr;
  1217. struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
  1218. int hdr_size = sizeof(struct vis_packet);
  1219. /* keep skb linear */
  1220. if (skb_linearize(skb) < 0)
  1221. return NET_RX_DROP;
  1222. if (unlikely(!pskb_may_pull(skb, hdr_size)))
  1223. return NET_RX_DROP;
  1224. vis_packet = (struct vis_packet *)skb->data;
  1225. ethhdr = (struct ethhdr *)skb_mac_header(skb);
  1226. /* not for me */
  1227. if (!is_my_mac(ethhdr->h_dest))
  1228. return NET_RX_DROP;
  1229. /* ignore own packets */
  1230. if (is_my_mac(vis_packet->vis_orig))
  1231. return NET_RX_DROP;
  1232. if (is_my_mac(vis_packet->sender_orig))
  1233. return NET_RX_DROP;
  1234. switch (vis_packet->vis_type) {
  1235. case VIS_TYPE_SERVER_SYNC:
  1236. receive_server_sync_packet(bat_priv, vis_packet,
  1237. skb_headlen(skb));
  1238. break;
  1239. case VIS_TYPE_CLIENT_UPDATE:
  1240. receive_client_update_packet(bat_priv, vis_packet,
  1241. skb_headlen(skb));
  1242. break;
  1243. default: /* ignore unknown packet */
  1244. break;
  1245. }
  1246. /* We take a copy of the data in the packet, so we should
  1247. always free the skbuf. */
  1248. return NET_RX_DROP;
  1249. }