routing.c 48 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752
  1. /*
  2. * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
  3. *
  4. * Marek Lindner, Simon Wunderlich
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of version 2 of the GNU General Public
  8. * License as published by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope that it will be useful, but
  11. * WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  13. * General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program; if not, write to the Free Software
  17. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
  18. * 02110-1301, USA
  19. *
  20. */
  21. #include "main.h"
  22. #include "routing.h"
  23. #include "send.h"
  24. #include "hash.h"
  25. #include "soft-interface.h"
  26. #include "hard-interface.h"
  27. #include "icmp_socket.h"
  28. #include "translation-table.h"
  29. #include "originator.h"
  30. #include "ring_buffer.h"
  31. #include "vis.h"
  32. #include "aggregation.h"
  33. #include "gateway_common.h"
  34. #include "gateway_client.h"
  35. #include "unicast.h"
  36. void slide_own_bcast_window(struct hard_iface *hard_iface)
  37. {
  38. struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
  39. struct hashtable_t *hash = bat_priv->orig_hash;
  40. struct hlist_node *node;
  41. struct hlist_head *head;
  42. struct orig_node *orig_node;
  43. unsigned long *word;
  44. int i;
  45. size_t word_index;
  46. for (i = 0; i < hash->size; i++) {
  47. head = &hash->table[i];
  48. rcu_read_lock();
  49. hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) {
  50. spin_lock_bh(&orig_node->ogm_cnt_lock);
  51. word_index = hard_iface->if_num * NUM_WORDS;
  52. word = &(orig_node->bcast_own[word_index]);
  53. bit_get_packet(bat_priv, word, 1, 0);
  54. orig_node->bcast_own_sum[hard_iface->if_num] =
  55. bit_packet_count(word);
  56. spin_unlock_bh(&orig_node->ogm_cnt_lock);
  57. }
  58. rcu_read_unlock();
  59. }
  60. }
  61. static void update_transtable(struct bat_priv *bat_priv,
  62. struct orig_node *orig_node,
  63. const unsigned char *tt_buff,
  64. uint8_t tt_num_changes, uint8_t ttvn,
  65. uint16_t tt_crc)
  66. {
  67. uint8_t orig_ttvn = (uint8_t)atomic_read(&orig_node->last_ttvn);
  68. bool full_table = true;
  69. /* the ttvn increased by one -> we can apply the attached changes */
  70. if (ttvn - orig_ttvn == 1) {
  71. /* the OGM could not contain the changes due to their size or
  72. * because they have already been sent TT_OGM_APPEND_MAX times.
  73. * In this case send a tt request */
  74. if (!tt_num_changes) {
  75. full_table = false;
  76. goto request_table;
  77. }
  78. tt_update_changes(bat_priv, orig_node, tt_num_changes, ttvn,
  79. (struct tt_change *)tt_buff);
  80. /* Even if we received the precomputed crc with the OGM, we
  81. * prefer to recompute it to spot any possible inconsistency
  82. * in the global table */
  83. orig_node->tt_crc = tt_global_crc(bat_priv, orig_node);
  84. /* The ttvn alone is not enough to guarantee consistency
  85. * because a single value could represent different states
  86. * (due to the wrap around). Thus a node has to check whether
  87. * the resulting table (after applying the changes) is still
  88. * consistent or not. E.g. a node could disconnect while its
  89. * ttvn is X and reconnect on ttvn = X + TTVN_MAX: in this case
  90. * checking the CRC value is mandatory to detect the
  91. * inconsistency */
  92. if (orig_node->tt_crc != tt_crc)
  93. goto request_table;
  94. /* Roaming phase is over: tables are in sync again. I can
  95. * unset the flag */
  96. orig_node->tt_poss_change = false;
  97. } else {
  98. /* if we missed more than one change or our tables are not
  99. * in sync anymore -> request fresh tt data */
  100. if (ttvn != orig_ttvn || orig_node->tt_crc != tt_crc) {
  101. request_table:
  102. bat_dbg(DBG_TT, bat_priv, "TT inconsistency for %pM. "
  103. "Need to retrieve the correct information "
  104. "(ttvn: %u last_ttvn: %u crc: %u last_crc: "
  105. "%u num_changes: %u)\n", orig_node->orig, ttvn,
  106. orig_ttvn, tt_crc, orig_node->tt_crc,
  107. tt_num_changes);
  108. send_tt_request(bat_priv, orig_node, ttvn, tt_crc,
  109. full_table);
  110. return;
  111. }
  112. }
  113. }
  114. static void update_route(struct bat_priv *bat_priv,
  115. struct orig_node *orig_node,
  116. struct neigh_node *neigh_node)
  117. {
  118. struct neigh_node *curr_router;
  119. curr_router = orig_node_get_router(orig_node);
  120. /* route deleted */
  121. if ((curr_router) && (!neigh_node)) {
  122. bat_dbg(DBG_ROUTES, bat_priv, "Deleting route towards: %pM\n",
  123. orig_node->orig);
  124. tt_global_del_orig(bat_priv, orig_node,
  125. "Deleted route towards originator");
  126. /* route added */
  127. } else if ((!curr_router) && (neigh_node)) {
  128. bat_dbg(DBG_ROUTES, bat_priv,
  129. "Adding route towards: %pM (via %pM)\n",
  130. orig_node->orig, neigh_node->addr);
  131. /* route changed */
  132. } else if (neigh_node && curr_router) {
  133. bat_dbg(DBG_ROUTES, bat_priv,
  134. "Changing route towards: %pM "
  135. "(now via %pM - was via %pM)\n",
  136. orig_node->orig, neigh_node->addr,
  137. curr_router->addr);
  138. }
  139. if (curr_router)
  140. neigh_node_free_ref(curr_router);
  141. /* increase refcount of new best neighbor */
  142. if (neigh_node && !atomic_inc_not_zero(&neigh_node->refcount))
  143. neigh_node = NULL;
  144. spin_lock_bh(&orig_node->neigh_list_lock);
  145. rcu_assign_pointer(orig_node->router, neigh_node);
  146. spin_unlock_bh(&orig_node->neigh_list_lock);
  147. /* decrease refcount of previous best neighbor */
  148. if (curr_router)
  149. neigh_node_free_ref(curr_router);
  150. }
  151. void update_routes(struct bat_priv *bat_priv, struct orig_node *orig_node,
  152. struct neigh_node *neigh_node)
  153. {
  154. struct neigh_node *router = NULL;
  155. if (!orig_node)
  156. goto out;
  157. router = orig_node_get_router(orig_node);
  158. if (router != neigh_node)
  159. update_route(bat_priv, orig_node, neigh_node);
  160. out:
  161. if (router)
  162. neigh_node_free_ref(router);
  163. }
  164. static int is_bidirectional_neigh(struct orig_node *orig_node,
  165. struct orig_node *orig_neigh_node,
  166. struct batman_packet *batman_packet,
  167. struct hard_iface *if_incoming)
  168. {
  169. struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
  170. struct neigh_node *neigh_node = NULL, *tmp_neigh_node;
  171. struct hlist_node *node;
  172. uint8_t total_count;
  173. uint8_t orig_eq_count, neigh_rq_count, tq_own;
  174. int tq_asym_penalty, ret = 0;
  175. /* find corresponding one hop neighbor */
  176. rcu_read_lock();
  177. hlist_for_each_entry_rcu(tmp_neigh_node, node,
  178. &orig_neigh_node->neigh_list, list) {
  179. if (!compare_eth(tmp_neigh_node->addr, orig_neigh_node->orig))
  180. continue;
  181. if (tmp_neigh_node->if_incoming != if_incoming)
  182. continue;
  183. if (!atomic_inc_not_zero(&tmp_neigh_node->refcount))
  184. continue;
  185. neigh_node = tmp_neigh_node;
  186. break;
  187. }
  188. rcu_read_unlock();
  189. if (!neigh_node)
  190. neigh_node = create_neighbor(orig_neigh_node,
  191. orig_neigh_node,
  192. orig_neigh_node->orig,
  193. if_incoming);
  194. if (!neigh_node)
  195. goto out;
  196. /* if orig_node is direct neighbor update neigh_node last_valid */
  197. if (orig_node == orig_neigh_node)
  198. neigh_node->last_valid = jiffies;
  199. orig_node->last_valid = jiffies;
  200. /* find packet count of corresponding one hop neighbor */
  201. spin_lock_bh(&orig_node->ogm_cnt_lock);
  202. orig_eq_count = orig_neigh_node->bcast_own_sum[if_incoming->if_num];
  203. neigh_rq_count = neigh_node->real_packet_count;
  204. spin_unlock_bh(&orig_node->ogm_cnt_lock);
  205. /* pay attention to not get a value bigger than 100 % */
  206. total_count = (orig_eq_count > neigh_rq_count ?
  207. neigh_rq_count : orig_eq_count);
  208. /* if we have too few packets (too less data) we set tq_own to zero */
  209. /* if we receive too few packets it is not considered bidirectional */
  210. if ((total_count < TQ_LOCAL_BIDRECT_SEND_MINIMUM) ||
  211. (neigh_rq_count < TQ_LOCAL_BIDRECT_RECV_MINIMUM))
  212. tq_own = 0;
  213. else
  214. /* neigh_node->real_packet_count is never zero as we
  215. * only purge old information when getting new
  216. * information */
  217. tq_own = (TQ_MAX_VALUE * total_count) / neigh_rq_count;
  218. /*
  219. * 1 - ((1-x) ** 3), normalized to TQ_MAX_VALUE this does
  220. * affect the nearly-symmetric links only a little, but
  221. * punishes asymmetric links more. This will give a value
  222. * between 0 and TQ_MAX_VALUE
  223. */
  224. tq_asym_penalty = TQ_MAX_VALUE - (TQ_MAX_VALUE *
  225. (TQ_LOCAL_WINDOW_SIZE - neigh_rq_count) *
  226. (TQ_LOCAL_WINDOW_SIZE - neigh_rq_count) *
  227. (TQ_LOCAL_WINDOW_SIZE - neigh_rq_count)) /
  228. (TQ_LOCAL_WINDOW_SIZE *
  229. TQ_LOCAL_WINDOW_SIZE *
  230. TQ_LOCAL_WINDOW_SIZE);
  231. batman_packet->tq = ((batman_packet->tq * tq_own * tq_asym_penalty) /
  232. (TQ_MAX_VALUE * TQ_MAX_VALUE));
  233. bat_dbg(DBG_BATMAN, bat_priv,
  234. "bidirectional: "
  235. "orig = %-15pM neigh = %-15pM => own_bcast = %2i, "
  236. "real recv = %2i, local tq: %3i, asym_penalty: %3i, "
  237. "total tq: %3i\n",
  238. orig_node->orig, orig_neigh_node->orig, total_count,
  239. neigh_rq_count, tq_own, tq_asym_penalty, batman_packet->tq);
  240. /* if link has the minimum required transmission quality
  241. * consider it bidirectional */
  242. if (batman_packet->tq >= TQ_TOTAL_BIDRECT_LIMIT)
  243. ret = 1;
  244. out:
  245. if (neigh_node)
  246. neigh_node_free_ref(neigh_node);
  247. return ret;
  248. }
  249. /* caller must hold the neigh_list_lock */
  250. void bonding_candidate_del(struct orig_node *orig_node,
  251. struct neigh_node *neigh_node)
  252. {
  253. /* this neighbor is not part of our candidate list */
  254. if (list_empty(&neigh_node->bonding_list))
  255. goto out;
  256. list_del_rcu(&neigh_node->bonding_list);
  257. INIT_LIST_HEAD(&neigh_node->bonding_list);
  258. neigh_node_free_ref(neigh_node);
  259. atomic_dec(&orig_node->bond_candidates);
  260. out:
  261. return;
  262. }
  263. static void bonding_candidate_add(struct orig_node *orig_node,
  264. struct neigh_node *neigh_node)
  265. {
  266. struct hlist_node *node;
  267. struct neigh_node *tmp_neigh_node, *router = NULL;
  268. uint8_t interference_candidate = 0;
  269. spin_lock_bh(&orig_node->neigh_list_lock);
  270. /* only consider if it has the same primary address ... */
  271. if (!compare_eth(orig_node->orig,
  272. neigh_node->orig_node->primary_addr))
  273. goto candidate_del;
  274. router = orig_node_get_router(orig_node);
  275. if (!router)
  276. goto candidate_del;
  277. /* ... and is good enough to be considered */
  278. if (neigh_node->tq_avg < router->tq_avg - BONDING_TQ_THRESHOLD)
  279. goto candidate_del;
  280. /**
  281. * check if we have another candidate with the same mac address or
  282. * interface. If we do, we won't select this candidate because of
  283. * possible interference.
  284. */
  285. hlist_for_each_entry_rcu(tmp_neigh_node, node,
  286. &orig_node->neigh_list, list) {
  287. if (tmp_neigh_node == neigh_node)
  288. continue;
  289. /* we only care if the other candidate is even
  290. * considered as candidate. */
  291. if (list_empty(&tmp_neigh_node->bonding_list))
  292. continue;
  293. if ((neigh_node->if_incoming == tmp_neigh_node->if_incoming) ||
  294. (compare_eth(neigh_node->addr, tmp_neigh_node->addr))) {
  295. interference_candidate = 1;
  296. break;
  297. }
  298. }
  299. /* don't care further if it is an interference candidate */
  300. if (interference_candidate)
  301. goto candidate_del;
  302. /* this neighbor already is part of our candidate list */
  303. if (!list_empty(&neigh_node->bonding_list))
  304. goto out;
  305. if (!atomic_inc_not_zero(&neigh_node->refcount))
  306. goto out;
  307. list_add_rcu(&neigh_node->bonding_list, &orig_node->bond_list);
  308. atomic_inc(&orig_node->bond_candidates);
  309. goto out;
  310. candidate_del:
  311. bonding_candidate_del(orig_node, neigh_node);
  312. out:
  313. spin_unlock_bh(&orig_node->neigh_list_lock);
  314. if (router)
  315. neigh_node_free_ref(router);
  316. }
  317. /* copy primary address for bonding */
  318. static void bonding_save_primary(const struct orig_node *orig_node,
  319. struct orig_node *orig_neigh_node,
  320. const struct batman_packet *batman_packet)
  321. {
  322. if (!(batman_packet->flags & PRIMARIES_FIRST_HOP))
  323. return;
  324. memcpy(orig_neigh_node->primary_addr, orig_node->orig, ETH_ALEN);
  325. }
  326. static void update_orig(struct bat_priv *bat_priv, struct orig_node *orig_node,
  327. const struct ethhdr *ethhdr,
  328. const struct batman_packet *batman_packet,
  329. struct hard_iface *if_incoming,
  330. const unsigned char *tt_buff, int is_duplicate)
  331. {
  332. struct neigh_node *neigh_node = NULL, *tmp_neigh_node = NULL;
  333. struct neigh_node *router = NULL;
  334. struct orig_node *orig_node_tmp;
  335. struct hlist_node *node;
  336. uint8_t bcast_own_sum_orig, bcast_own_sum_neigh;
  337. bat_dbg(DBG_BATMAN, bat_priv, "update_originator(): "
  338. "Searching and updating originator entry of received packet\n");
  339. rcu_read_lock();
  340. hlist_for_each_entry_rcu(tmp_neigh_node, node,
  341. &orig_node->neigh_list, list) {
  342. if (compare_eth(tmp_neigh_node->addr, ethhdr->h_source) &&
  343. (tmp_neigh_node->if_incoming == if_incoming) &&
  344. atomic_inc_not_zero(&tmp_neigh_node->refcount)) {
  345. if (neigh_node)
  346. neigh_node_free_ref(neigh_node);
  347. neigh_node = tmp_neigh_node;
  348. continue;
  349. }
  350. if (is_duplicate)
  351. continue;
  352. spin_lock_bh(&tmp_neigh_node->tq_lock);
  353. ring_buffer_set(tmp_neigh_node->tq_recv,
  354. &tmp_neigh_node->tq_index, 0);
  355. tmp_neigh_node->tq_avg =
  356. ring_buffer_avg(tmp_neigh_node->tq_recv);
  357. spin_unlock_bh(&tmp_neigh_node->tq_lock);
  358. }
  359. if (!neigh_node) {
  360. struct orig_node *orig_tmp;
  361. orig_tmp = get_orig_node(bat_priv, ethhdr->h_source);
  362. if (!orig_tmp)
  363. goto unlock;
  364. neigh_node = create_neighbor(orig_node, orig_tmp,
  365. ethhdr->h_source, if_incoming);
  366. orig_node_free_ref(orig_tmp);
  367. if (!neigh_node)
  368. goto unlock;
  369. } else
  370. bat_dbg(DBG_BATMAN, bat_priv,
  371. "Updating existing last-hop neighbor of originator\n");
  372. rcu_read_unlock();
  373. orig_node->flags = batman_packet->flags;
  374. neigh_node->last_valid = jiffies;
  375. spin_lock_bh(&neigh_node->tq_lock);
  376. ring_buffer_set(neigh_node->tq_recv,
  377. &neigh_node->tq_index,
  378. batman_packet->tq);
  379. neigh_node->tq_avg = ring_buffer_avg(neigh_node->tq_recv);
  380. spin_unlock_bh(&neigh_node->tq_lock);
  381. if (!is_duplicate) {
  382. orig_node->last_ttl = batman_packet->ttl;
  383. neigh_node->last_ttl = batman_packet->ttl;
  384. }
  385. bonding_candidate_add(orig_node, neigh_node);
  386. /* if this neighbor already is our next hop there is nothing
  387. * to change */
  388. router = orig_node_get_router(orig_node);
  389. if (router == neigh_node)
  390. goto update_tt;
  391. /* if this neighbor does not offer a better TQ we won't consider it */
  392. if (router && (router->tq_avg > neigh_node->tq_avg))
  393. goto update_tt;
  394. /* if the TQ is the same and the link not more symmetric we
  395. * won't consider it either */
  396. if (router && (neigh_node->tq_avg == router->tq_avg)) {
  397. orig_node_tmp = router->orig_node;
  398. spin_lock_bh(&orig_node_tmp->ogm_cnt_lock);
  399. bcast_own_sum_orig =
  400. orig_node_tmp->bcast_own_sum[if_incoming->if_num];
  401. spin_unlock_bh(&orig_node_tmp->ogm_cnt_lock);
  402. orig_node_tmp = neigh_node->orig_node;
  403. spin_lock_bh(&orig_node_tmp->ogm_cnt_lock);
  404. bcast_own_sum_neigh =
  405. orig_node_tmp->bcast_own_sum[if_incoming->if_num];
  406. spin_unlock_bh(&orig_node_tmp->ogm_cnt_lock);
  407. if (bcast_own_sum_orig >= bcast_own_sum_neigh)
  408. goto update_tt;
  409. }
  410. update_routes(bat_priv, orig_node, neigh_node);
  411. update_tt:
  412. /* I have to check for transtable changes only if the OGM has been
  413. * sent through a primary interface */
  414. if (((batman_packet->orig != ethhdr->h_source) &&
  415. (batman_packet->ttl > 2)) ||
  416. (batman_packet->flags & PRIMARIES_FIRST_HOP))
  417. update_transtable(bat_priv, orig_node, tt_buff,
  418. batman_packet->tt_num_changes,
  419. batman_packet->ttvn,
  420. batman_packet->tt_crc);
  421. if (orig_node->gw_flags != batman_packet->gw_flags)
  422. gw_node_update(bat_priv, orig_node, batman_packet->gw_flags);
  423. orig_node->gw_flags = batman_packet->gw_flags;
  424. /* restart gateway selection if fast or late switching was enabled */
  425. if ((orig_node->gw_flags) &&
  426. (atomic_read(&bat_priv->gw_mode) == GW_MODE_CLIENT) &&
  427. (atomic_read(&bat_priv->gw_sel_class) > 2))
  428. gw_check_election(bat_priv, orig_node);
  429. goto out;
  430. unlock:
  431. rcu_read_unlock();
  432. out:
  433. if (neigh_node)
  434. neigh_node_free_ref(neigh_node);
  435. if (router)
  436. neigh_node_free_ref(router);
  437. }
  438. /* checks whether the host restarted and is in the protection time.
  439. * returns:
  440. * 0 if the packet is to be accepted
  441. * 1 if the packet is to be ignored.
  442. */
  443. static int window_protected(struct bat_priv *bat_priv,
  444. int32_t seq_num_diff,
  445. unsigned long *last_reset)
  446. {
  447. if ((seq_num_diff <= -TQ_LOCAL_WINDOW_SIZE)
  448. || (seq_num_diff >= EXPECTED_SEQNO_RANGE)) {
  449. if (time_after(jiffies, *last_reset +
  450. msecs_to_jiffies(RESET_PROTECTION_MS))) {
  451. *last_reset = jiffies;
  452. bat_dbg(DBG_BATMAN, bat_priv,
  453. "old packet received, start protection\n");
  454. return 0;
  455. } else
  456. return 1;
  457. }
  458. return 0;
  459. }
  460. /* processes a batman packet for all interfaces, adjusts the sequence number and
  461. * finds out whether it is a duplicate.
  462. * returns:
  463. * 1 the packet is a duplicate
  464. * 0 the packet has not yet been received
  465. * -1 the packet is old and has been received while the seqno window
  466. * was protected. Caller should drop it.
  467. */
  468. static int count_real_packets(const struct ethhdr *ethhdr,
  469. const struct batman_packet *batman_packet,
  470. const struct hard_iface *if_incoming)
  471. {
  472. struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
  473. struct orig_node *orig_node;
  474. struct neigh_node *tmp_neigh_node;
  475. struct hlist_node *node;
  476. int is_duplicate = 0;
  477. int32_t seq_diff;
  478. int need_update = 0;
  479. int set_mark, ret = -1;
  480. orig_node = get_orig_node(bat_priv, batman_packet->orig);
  481. if (!orig_node)
  482. return 0;
  483. spin_lock_bh(&orig_node->ogm_cnt_lock);
  484. seq_diff = batman_packet->seqno - orig_node->last_real_seqno;
  485. /* signalize caller that the packet is to be dropped. */
  486. if (window_protected(bat_priv, seq_diff,
  487. &orig_node->batman_seqno_reset))
  488. goto out;
  489. rcu_read_lock();
  490. hlist_for_each_entry_rcu(tmp_neigh_node, node,
  491. &orig_node->neigh_list, list) {
  492. is_duplicate |= get_bit_status(tmp_neigh_node->real_bits,
  493. orig_node->last_real_seqno,
  494. batman_packet->seqno);
  495. if (compare_eth(tmp_neigh_node->addr, ethhdr->h_source) &&
  496. (tmp_neigh_node->if_incoming == if_incoming))
  497. set_mark = 1;
  498. else
  499. set_mark = 0;
  500. /* if the window moved, set the update flag. */
  501. need_update |= bit_get_packet(bat_priv,
  502. tmp_neigh_node->real_bits,
  503. seq_diff, set_mark);
  504. tmp_neigh_node->real_packet_count =
  505. bit_packet_count(tmp_neigh_node->real_bits);
  506. }
  507. rcu_read_unlock();
  508. if (need_update) {
  509. bat_dbg(DBG_BATMAN, bat_priv,
  510. "updating last_seqno: old %d, new %d\n",
  511. orig_node->last_real_seqno, batman_packet->seqno);
  512. orig_node->last_real_seqno = batman_packet->seqno;
  513. }
  514. ret = is_duplicate;
  515. out:
  516. spin_unlock_bh(&orig_node->ogm_cnt_lock);
  517. orig_node_free_ref(orig_node);
  518. return ret;
  519. }
  520. void receive_bat_packet(const struct ethhdr *ethhdr,
  521. struct batman_packet *batman_packet,
  522. const unsigned char *tt_buff,
  523. struct hard_iface *if_incoming)
  524. {
  525. struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
  526. struct hard_iface *hard_iface;
  527. struct orig_node *orig_neigh_node, *orig_node;
  528. struct neigh_node *router = NULL, *router_router = NULL;
  529. struct neigh_node *orig_neigh_router = NULL;
  530. int has_directlink_flag;
  531. int is_my_addr = 0, is_my_orig = 0, is_my_oldorig = 0;
  532. int is_broadcast = 0, is_bidirectional, is_single_hop_neigh;
  533. int is_duplicate;
  534. uint32_t if_incoming_seqno;
  535. /* Silently drop when the batman packet is actually not a
  536. * correct packet.
  537. *
  538. * This might happen if a packet is padded (e.g. Ethernet has a
  539. * minimum frame length of 64 byte) and the aggregation interprets
  540. * it as an additional length.
  541. *
  542. * TODO: A more sane solution would be to have a bit in the
  543. * batman_packet to detect whether the packet is the last
  544. * packet in an aggregation. Here we expect that the padding
  545. * is always zero (or not 0x01)
  546. */
  547. if (batman_packet->packet_type != BAT_PACKET)
  548. return;
  549. /* could be changed by schedule_own_packet() */
  550. if_incoming_seqno = atomic_read(&if_incoming->seqno);
  551. has_directlink_flag = (batman_packet->flags & DIRECTLINK ? 1 : 0);
  552. is_single_hop_neigh = (compare_eth(ethhdr->h_source,
  553. batman_packet->orig) ? 1 : 0);
  554. bat_dbg(DBG_BATMAN, bat_priv,
  555. "Received BATMAN packet via NB: %pM, IF: %s [%pM] "
  556. "(from OG: %pM, via prev OG: %pM, seqno %d, ttvn %u, "
  557. "crc %u, changes %u, td %d, TTL %d, V %d, IDF %d)\n",
  558. ethhdr->h_source, if_incoming->net_dev->name,
  559. if_incoming->net_dev->dev_addr, batman_packet->orig,
  560. batman_packet->prev_sender, batman_packet->seqno,
  561. batman_packet->ttvn, batman_packet->tt_crc,
  562. batman_packet->tt_num_changes, batman_packet->tq,
  563. batman_packet->ttl, batman_packet->version,
  564. has_directlink_flag);
  565. rcu_read_lock();
  566. list_for_each_entry_rcu(hard_iface, &hardif_list, list) {
  567. if (hard_iface->if_status != IF_ACTIVE)
  568. continue;
  569. if (hard_iface->soft_iface != if_incoming->soft_iface)
  570. continue;
  571. if (compare_eth(ethhdr->h_source,
  572. hard_iface->net_dev->dev_addr))
  573. is_my_addr = 1;
  574. if (compare_eth(batman_packet->orig,
  575. hard_iface->net_dev->dev_addr))
  576. is_my_orig = 1;
  577. if (compare_eth(batman_packet->prev_sender,
  578. hard_iface->net_dev->dev_addr))
  579. is_my_oldorig = 1;
  580. if (is_broadcast_ether_addr(ethhdr->h_source))
  581. is_broadcast = 1;
  582. }
  583. rcu_read_unlock();
  584. if (batman_packet->version != COMPAT_VERSION) {
  585. bat_dbg(DBG_BATMAN, bat_priv,
  586. "Drop packet: incompatible batman version (%i)\n",
  587. batman_packet->version);
  588. return;
  589. }
  590. if (is_my_addr) {
  591. bat_dbg(DBG_BATMAN, bat_priv,
  592. "Drop packet: received my own broadcast (sender: %pM"
  593. ")\n",
  594. ethhdr->h_source);
  595. return;
  596. }
  597. if (is_broadcast) {
  598. bat_dbg(DBG_BATMAN, bat_priv, "Drop packet: "
  599. "ignoring all packets with broadcast source addr (sender: %pM"
  600. ")\n", ethhdr->h_source);
  601. return;
  602. }
  603. if (is_my_orig) {
  604. unsigned long *word;
  605. int offset;
  606. orig_neigh_node = get_orig_node(bat_priv, ethhdr->h_source);
  607. if (!orig_neigh_node)
  608. return;
  609. /* neighbor has to indicate direct link and it has to
  610. * come via the corresponding interface */
  611. /* save packet seqno for bidirectional check */
  612. if (has_directlink_flag &&
  613. compare_eth(if_incoming->net_dev->dev_addr,
  614. batman_packet->orig)) {
  615. offset = if_incoming->if_num * NUM_WORDS;
  616. spin_lock_bh(&orig_neigh_node->ogm_cnt_lock);
  617. word = &(orig_neigh_node->bcast_own[offset]);
  618. bit_mark(word,
  619. if_incoming_seqno - batman_packet->seqno - 2);
  620. orig_neigh_node->bcast_own_sum[if_incoming->if_num] =
  621. bit_packet_count(word);
  622. spin_unlock_bh(&orig_neigh_node->ogm_cnt_lock);
  623. }
  624. bat_dbg(DBG_BATMAN, bat_priv, "Drop packet: "
  625. "originator packet from myself (via neighbor)\n");
  626. orig_node_free_ref(orig_neigh_node);
  627. return;
  628. }
  629. if (is_my_oldorig) {
  630. bat_dbg(DBG_BATMAN, bat_priv,
  631. "Drop packet: ignoring all rebroadcast echos (sender: "
  632. "%pM)\n", ethhdr->h_source);
  633. return;
  634. }
  635. orig_node = get_orig_node(bat_priv, batman_packet->orig);
  636. if (!orig_node)
  637. return;
  638. is_duplicate = count_real_packets(ethhdr, batman_packet, if_incoming);
  639. if (is_duplicate == -1) {
  640. bat_dbg(DBG_BATMAN, bat_priv,
  641. "Drop packet: packet within seqno protection time "
  642. "(sender: %pM)\n", ethhdr->h_source);
  643. goto out;
  644. }
  645. if (batman_packet->tq == 0) {
  646. bat_dbg(DBG_BATMAN, bat_priv,
  647. "Drop packet: originator packet with tq equal 0\n");
  648. goto out;
  649. }
  650. router = orig_node_get_router(orig_node);
  651. if (router)
  652. router_router = orig_node_get_router(router->orig_node);
  653. /* avoid temporary routing loops */
  654. if (router && router_router &&
  655. (compare_eth(router->addr, batman_packet->prev_sender)) &&
  656. !(compare_eth(batman_packet->orig, batman_packet->prev_sender)) &&
  657. (compare_eth(router->addr, router_router->addr))) {
  658. bat_dbg(DBG_BATMAN, bat_priv,
  659. "Drop packet: ignoring all rebroadcast packets that "
  660. "may make me loop (sender: %pM)\n", ethhdr->h_source);
  661. goto out;
  662. }
  663. /* if sender is a direct neighbor the sender mac equals
  664. * originator mac */
  665. orig_neigh_node = (is_single_hop_neigh ?
  666. orig_node :
  667. get_orig_node(bat_priv, ethhdr->h_source));
  668. if (!orig_neigh_node)
  669. goto out;
  670. orig_neigh_router = orig_node_get_router(orig_neigh_node);
  671. /* drop packet if sender is not a direct neighbor and if we
  672. * don't route towards it */
  673. if (!is_single_hop_neigh && (!orig_neigh_router)) {
  674. bat_dbg(DBG_BATMAN, bat_priv,
  675. "Drop packet: OGM via unknown neighbor!\n");
  676. goto out_neigh;
  677. }
  678. is_bidirectional = is_bidirectional_neigh(orig_node, orig_neigh_node,
  679. batman_packet, if_incoming);
  680. bonding_save_primary(orig_node, orig_neigh_node, batman_packet);
  681. /* update ranking if it is not a duplicate or has the same
  682. * seqno and similar ttl as the non-duplicate */
  683. if (is_bidirectional &&
  684. (!is_duplicate ||
  685. ((orig_node->last_real_seqno == batman_packet->seqno) &&
  686. (orig_node->last_ttl - 3 <= batman_packet->ttl))))
  687. update_orig(bat_priv, orig_node, ethhdr, batman_packet,
  688. if_incoming, tt_buff, is_duplicate);
  689. /* is single hop (direct) neighbor */
  690. if (is_single_hop_neigh) {
  691. /* mark direct link on incoming interface */
  692. schedule_forward_packet(orig_node, ethhdr, batman_packet,
  693. 1, if_incoming);
  694. bat_dbg(DBG_BATMAN, bat_priv, "Forwarding packet: "
  695. "rebroadcast neighbor packet with direct link flag\n");
  696. goto out_neigh;
  697. }
  698. /* multihop originator */
  699. if (!is_bidirectional) {
  700. bat_dbg(DBG_BATMAN, bat_priv,
  701. "Drop packet: not received via bidirectional link\n");
  702. goto out_neigh;
  703. }
  704. if (is_duplicate) {
  705. bat_dbg(DBG_BATMAN, bat_priv,
  706. "Drop packet: duplicate packet received\n");
  707. goto out_neigh;
  708. }
  709. bat_dbg(DBG_BATMAN, bat_priv,
  710. "Forwarding packet: rebroadcast originator packet\n");
  711. schedule_forward_packet(orig_node, ethhdr, batman_packet,
  712. 0, if_incoming);
  713. out_neigh:
  714. if ((orig_neigh_node) && (!is_single_hop_neigh))
  715. orig_node_free_ref(orig_neigh_node);
  716. out:
  717. if (router)
  718. neigh_node_free_ref(router);
  719. if (router_router)
  720. neigh_node_free_ref(router_router);
  721. if (orig_neigh_router)
  722. neigh_node_free_ref(orig_neigh_router);
  723. orig_node_free_ref(orig_node);
  724. }
  725. int recv_bat_packet(struct sk_buff *skb, struct hard_iface *hard_iface)
  726. {
  727. struct ethhdr *ethhdr;
  728. /* drop packet if it has not necessary minimum size */
  729. if (unlikely(!pskb_may_pull(skb, sizeof(struct batman_packet))))
  730. return NET_RX_DROP;
  731. ethhdr = (struct ethhdr *)skb_mac_header(skb);
  732. /* packet with broadcast indication but unicast recipient */
  733. if (!is_broadcast_ether_addr(ethhdr->h_dest))
  734. return NET_RX_DROP;
  735. /* packet with broadcast sender address */
  736. if (is_broadcast_ether_addr(ethhdr->h_source))
  737. return NET_RX_DROP;
  738. /* create a copy of the skb, if needed, to modify it. */
  739. if (skb_cow(skb, 0) < 0)
  740. return NET_RX_DROP;
  741. /* keep skb linear */
  742. if (skb_linearize(skb) < 0)
  743. return NET_RX_DROP;
  744. ethhdr = (struct ethhdr *)skb_mac_header(skb);
  745. receive_aggr_bat_packet(ethhdr,
  746. skb->data,
  747. skb_headlen(skb),
  748. hard_iface);
  749. kfree_skb(skb);
  750. return NET_RX_SUCCESS;
  751. }
  752. static int recv_my_icmp_packet(struct bat_priv *bat_priv,
  753. struct sk_buff *skb, size_t icmp_len)
  754. {
  755. struct hard_iface *primary_if = NULL;
  756. struct orig_node *orig_node = NULL;
  757. struct neigh_node *router = NULL;
  758. struct icmp_packet_rr *icmp_packet;
  759. int ret = NET_RX_DROP;
  760. icmp_packet = (struct icmp_packet_rr *)skb->data;
  761. /* add data to device queue */
  762. if (icmp_packet->msg_type != ECHO_REQUEST) {
  763. bat_socket_receive_packet(icmp_packet, icmp_len);
  764. goto out;
  765. }
  766. primary_if = primary_if_get_selected(bat_priv);
  767. if (!primary_if)
  768. goto out;
  769. /* answer echo request (ping) */
  770. /* get routing information */
  771. orig_node = orig_hash_find(bat_priv, icmp_packet->orig);
  772. if (!orig_node)
  773. goto out;
  774. router = orig_node_get_router(orig_node);
  775. if (!router)
  776. goto out;
  777. /* create a copy of the skb, if needed, to modify it. */
  778. if (skb_cow(skb, sizeof(struct ethhdr)) < 0)
  779. goto out;
  780. icmp_packet = (struct icmp_packet_rr *)skb->data;
  781. memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN);
  782. memcpy(icmp_packet->orig, primary_if->net_dev->dev_addr, ETH_ALEN);
  783. icmp_packet->msg_type = ECHO_REPLY;
  784. icmp_packet->ttl = TTL;
  785. send_skb_packet(skb, router->if_incoming, router->addr);
  786. ret = NET_RX_SUCCESS;
  787. out:
  788. if (primary_if)
  789. hardif_free_ref(primary_if);
  790. if (router)
  791. neigh_node_free_ref(router);
  792. if (orig_node)
  793. orig_node_free_ref(orig_node);
  794. return ret;
  795. }
  796. static int recv_icmp_ttl_exceeded(struct bat_priv *bat_priv,
  797. struct sk_buff *skb)
  798. {
  799. struct hard_iface *primary_if = NULL;
  800. struct orig_node *orig_node = NULL;
  801. struct neigh_node *router = NULL;
  802. struct icmp_packet *icmp_packet;
  803. int ret = NET_RX_DROP;
  804. icmp_packet = (struct icmp_packet *)skb->data;
  805. /* send TTL exceeded if packet is an echo request (traceroute) */
  806. if (icmp_packet->msg_type != ECHO_REQUEST) {
  807. pr_debug("Warning - can't forward icmp packet from %pM to "
  808. "%pM: ttl exceeded\n", icmp_packet->orig,
  809. icmp_packet->dst);
  810. goto out;
  811. }
  812. primary_if = primary_if_get_selected(bat_priv);
  813. if (!primary_if)
  814. goto out;
  815. /* get routing information */
  816. orig_node = orig_hash_find(bat_priv, icmp_packet->orig);
  817. if (!orig_node)
  818. goto out;
  819. router = orig_node_get_router(orig_node);
  820. if (!router)
  821. goto out;
  822. /* create a copy of the skb, if needed, to modify it. */
  823. if (skb_cow(skb, sizeof(struct ethhdr)) < 0)
  824. goto out;
  825. icmp_packet = (struct icmp_packet *)skb->data;
  826. memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN);
  827. memcpy(icmp_packet->orig, primary_if->net_dev->dev_addr, ETH_ALEN);
  828. icmp_packet->msg_type = TTL_EXCEEDED;
  829. icmp_packet->ttl = TTL;
  830. send_skb_packet(skb, router->if_incoming, router->addr);
  831. ret = NET_RX_SUCCESS;
  832. out:
  833. if (primary_if)
  834. hardif_free_ref(primary_if);
  835. if (router)
  836. neigh_node_free_ref(router);
  837. if (orig_node)
  838. orig_node_free_ref(orig_node);
  839. return ret;
  840. }
  841. int recv_icmp_packet(struct sk_buff *skb, struct hard_iface *recv_if)
  842. {
  843. struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
  844. struct icmp_packet_rr *icmp_packet;
  845. struct ethhdr *ethhdr;
  846. struct orig_node *orig_node = NULL;
  847. struct neigh_node *router = NULL;
  848. int hdr_size = sizeof(struct icmp_packet);
  849. int ret = NET_RX_DROP;
  850. /**
  851. * we truncate all incoming icmp packets if they don't match our size
  852. */
  853. if (skb->len >= sizeof(struct icmp_packet_rr))
  854. hdr_size = sizeof(struct icmp_packet_rr);
  855. /* drop packet if it has not necessary minimum size */
  856. if (unlikely(!pskb_may_pull(skb, hdr_size)))
  857. goto out;
  858. ethhdr = (struct ethhdr *)skb_mac_header(skb);
  859. /* packet with unicast indication but broadcast recipient */
  860. if (is_broadcast_ether_addr(ethhdr->h_dest))
  861. goto out;
  862. /* packet with broadcast sender address */
  863. if (is_broadcast_ether_addr(ethhdr->h_source))
  864. goto out;
  865. /* not for me */
  866. if (!is_my_mac(ethhdr->h_dest))
  867. goto out;
  868. icmp_packet = (struct icmp_packet_rr *)skb->data;
  869. /* add record route information if not full */
  870. if ((hdr_size == sizeof(struct icmp_packet_rr)) &&
  871. (icmp_packet->rr_cur < BAT_RR_LEN)) {
  872. memcpy(&(icmp_packet->rr[icmp_packet->rr_cur]),
  873. ethhdr->h_dest, ETH_ALEN);
  874. icmp_packet->rr_cur++;
  875. }
  876. /* packet for me */
  877. if (is_my_mac(icmp_packet->dst))
  878. return recv_my_icmp_packet(bat_priv, skb, hdr_size);
  879. /* TTL exceeded */
  880. if (icmp_packet->ttl < 2)
  881. return recv_icmp_ttl_exceeded(bat_priv, skb);
  882. /* get routing information */
  883. orig_node = orig_hash_find(bat_priv, icmp_packet->dst);
  884. if (!orig_node)
  885. goto out;
  886. router = orig_node_get_router(orig_node);
  887. if (!router)
  888. goto out;
  889. /* create a copy of the skb, if needed, to modify it. */
  890. if (skb_cow(skb, sizeof(struct ethhdr)) < 0)
  891. goto out;
  892. icmp_packet = (struct icmp_packet_rr *)skb->data;
  893. /* decrement ttl */
  894. icmp_packet->ttl--;
  895. /* route it */
  896. send_skb_packet(skb, router->if_incoming, router->addr);
  897. ret = NET_RX_SUCCESS;
  898. out:
  899. if (router)
  900. neigh_node_free_ref(router);
  901. if (orig_node)
  902. orig_node_free_ref(orig_node);
  903. return ret;
  904. }
  905. /* In the bonding case, send the packets in a round
  906. * robin fashion over the remaining interfaces.
  907. *
  908. * This method rotates the bonding list and increases the
  909. * returned router's refcount. */
  910. static struct neigh_node *find_bond_router(struct orig_node *primary_orig,
  911. const struct hard_iface *recv_if)
  912. {
  913. struct neigh_node *tmp_neigh_node;
  914. struct neigh_node *router = NULL, *first_candidate = NULL;
  915. rcu_read_lock();
  916. list_for_each_entry_rcu(tmp_neigh_node, &primary_orig->bond_list,
  917. bonding_list) {
  918. if (!first_candidate)
  919. first_candidate = tmp_neigh_node;
  920. /* recv_if == NULL on the first node. */
  921. if (tmp_neigh_node->if_incoming == recv_if)
  922. continue;
  923. if (!atomic_inc_not_zero(&tmp_neigh_node->refcount))
  924. continue;
  925. router = tmp_neigh_node;
  926. break;
  927. }
  928. /* use the first candidate if nothing was found. */
  929. if (!router && first_candidate &&
  930. atomic_inc_not_zero(&first_candidate->refcount))
  931. router = first_candidate;
  932. if (!router)
  933. goto out;
  934. /* selected should point to the next element
  935. * after the current router */
  936. spin_lock_bh(&primary_orig->neigh_list_lock);
  937. /* this is a list_move(), which unfortunately
  938. * does not exist as rcu version */
  939. list_del_rcu(&primary_orig->bond_list);
  940. list_add_rcu(&primary_orig->bond_list,
  941. &router->bonding_list);
  942. spin_unlock_bh(&primary_orig->neigh_list_lock);
  943. out:
  944. rcu_read_unlock();
  945. return router;
  946. }
  947. /* Interface Alternating: Use the best of the
  948. * remaining candidates which are not using
  949. * this interface.
  950. *
  951. * Increases the returned router's refcount */
  952. static struct neigh_node *find_ifalter_router(struct orig_node *primary_orig,
  953. const struct hard_iface *recv_if)
  954. {
  955. struct neigh_node *tmp_neigh_node;
  956. struct neigh_node *router = NULL, *first_candidate = NULL;
  957. rcu_read_lock();
  958. list_for_each_entry_rcu(tmp_neigh_node, &primary_orig->bond_list,
  959. bonding_list) {
  960. if (!first_candidate)
  961. first_candidate = tmp_neigh_node;
  962. /* recv_if == NULL on the first node. */
  963. if (tmp_neigh_node->if_incoming == recv_if)
  964. continue;
  965. if (!atomic_inc_not_zero(&tmp_neigh_node->refcount))
  966. continue;
  967. /* if we don't have a router yet
  968. * or this one is better, choose it. */
  969. if ((!router) ||
  970. (tmp_neigh_node->tq_avg > router->tq_avg)) {
  971. /* decrement refcount of
  972. * previously selected router */
  973. if (router)
  974. neigh_node_free_ref(router);
  975. router = tmp_neigh_node;
  976. atomic_inc_not_zero(&router->refcount);
  977. }
  978. neigh_node_free_ref(tmp_neigh_node);
  979. }
  980. /* use the first candidate if nothing was found. */
  981. if (!router && first_candidate &&
  982. atomic_inc_not_zero(&first_candidate->refcount))
  983. router = first_candidate;
  984. rcu_read_unlock();
  985. return router;
  986. }
  987. int recv_tt_query(struct sk_buff *skb, struct hard_iface *recv_if)
  988. {
  989. struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
  990. struct tt_query_packet *tt_query;
  991. struct ethhdr *ethhdr;
  992. /* drop packet if it has not necessary minimum size */
  993. if (unlikely(!pskb_may_pull(skb, sizeof(struct tt_query_packet))))
  994. goto out;
  995. /* I could need to modify it */
  996. if (skb_cow(skb, sizeof(struct tt_query_packet)) < 0)
  997. goto out;
  998. ethhdr = (struct ethhdr *)skb_mac_header(skb);
  999. /* packet with unicast indication but broadcast recipient */
  1000. if (is_broadcast_ether_addr(ethhdr->h_dest))
  1001. goto out;
  1002. /* packet with broadcast sender address */
  1003. if (is_broadcast_ether_addr(ethhdr->h_source))
  1004. goto out;
  1005. tt_query = (struct tt_query_packet *)skb->data;
  1006. tt_query->tt_data = ntohs(tt_query->tt_data);
  1007. switch (tt_query->flags & TT_QUERY_TYPE_MASK) {
  1008. case TT_REQUEST:
  1009. /* If we cannot provide an answer the tt_request is
  1010. * forwarded */
  1011. if (!send_tt_response(bat_priv, tt_query)) {
  1012. bat_dbg(DBG_TT, bat_priv,
  1013. "Routing TT_REQUEST to %pM [%c]\n",
  1014. tt_query->dst,
  1015. (tt_query->flags & TT_FULL_TABLE ? 'F' : '.'));
  1016. tt_query->tt_data = htons(tt_query->tt_data);
  1017. return route_unicast_packet(skb, recv_if);
  1018. }
  1019. break;
  1020. case TT_RESPONSE:
  1021. /* packet needs to be linearized to access the TT changes */
  1022. if (skb_linearize(skb) < 0)
  1023. goto out;
  1024. if (is_my_mac(tt_query->dst))
  1025. handle_tt_response(bat_priv, tt_query);
  1026. else {
  1027. bat_dbg(DBG_TT, bat_priv,
  1028. "Routing TT_RESPONSE to %pM [%c]\n",
  1029. tt_query->dst,
  1030. (tt_query->flags & TT_FULL_TABLE ? 'F' : '.'));
  1031. tt_query->tt_data = htons(tt_query->tt_data);
  1032. return route_unicast_packet(skb, recv_if);
  1033. }
  1034. break;
  1035. }
  1036. out:
  1037. /* returning NET_RX_DROP will make the caller function kfree the skb */
  1038. return NET_RX_DROP;
  1039. }
  1040. int recv_roam_adv(struct sk_buff *skb, struct hard_iface *recv_if)
  1041. {
  1042. struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
  1043. struct roam_adv_packet *roam_adv_packet;
  1044. struct orig_node *orig_node;
  1045. struct ethhdr *ethhdr;
  1046. /* drop packet if it has not necessary minimum size */
  1047. if (unlikely(!pskb_may_pull(skb, sizeof(struct roam_adv_packet))))
  1048. goto out;
  1049. ethhdr = (struct ethhdr *)skb_mac_header(skb);
  1050. /* packet with unicast indication but broadcast recipient */
  1051. if (is_broadcast_ether_addr(ethhdr->h_dest))
  1052. goto out;
  1053. /* packet with broadcast sender address */
  1054. if (is_broadcast_ether_addr(ethhdr->h_source))
  1055. goto out;
  1056. roam_adv_packet = (struct roam_adv_packet *)skb->data;
  1057. if (!is_my_mac(roam_adv_packet->dst))
  1058. return route_unicast_packet(skb, recv_if);
  1059. orig_node = orig_hash_find(bat_priv, roam_adv_packet->src);
  1060. if (!orig_node)
  1061. goto out;
  1062. bat_dbg(DBG_TT, bat_priv, "Received ROAMING_ADV from %pM "
  1063. "(client %pM)\n", roam_adv_packet->src,
  1064. roam_adv_packet->client);
  1065. tt_global_add(bat_priv, orig_node, roam_adv_packet->client,
  1066. atomic_read(&orig_node->last_ttvn) + 1, true);
  1067. /* Roaming phase starts: I have new information but the ttvn has not
  1068. * been incremented yet. This flag will make me check all the incoming
  1069. * packets for the correct destination. */
  1070. bat_priv->tt_poss_change = true;
  1071. orig_node_free_ref(orig_node);
  1072. out:
  1073. /* returning NET_RX_DROP will make the caller function kfree the skb */
  1074. return NET_RX_DROP;
  1075. }
  1076. /* find a suitable router for this originator, and use
  1077. * bonding if possible. increases the found neighbors
  1078. * refcount.*/
  1079. struct neigh_node *find_router(struct bat_priv *bat_priv,
  1080. struct orig_node *orig_node,
  1081. const struct hard_iface *recv_if)
  1082. {
  1083. struct orig_node *primary_orig_node;
  1084. struct orig_node *router_orig;
  1085. struct neigh_node *router;
  1086. static uint8_t zero_mac[ETH_ALEN] = {0, 0, 0, 0, 0, 0};
  1087. int bonding_enabled;
  1088. if (!orig_node)
  1089. return NULL;
  1090. router = orig_node_get_router(orig_node);
  1091. if (!router)
  1092. goto err;
  1093. /* without bonding, the first node should
  1094. * always choose the default router. */
  1095. bonding_enabled = atomic_read(&bat_priv->bonding);
  1096. rcu_read_lock();
  1097. /* select default router to output */
  1098. router_orig = router->orig_node;
  1099. if (!router_orig)
  1100. goto err_unlock;
  1101. if ((!recv_if) && (!bonding_enabled))
  1102. goto return_router;
  1103. /* if we have something in the primary_addr, we can search
  1104. * for a potential bonding candidate. */
  1105. if (compare_eth(router_orig->primary_addr, zero_mac))
  1106. goto return_router;
  1107. /* find the orig_node which has the primary interface. might
  1108. * even be the same as our router_orig in many cases */
  1109. if (compare_eth(router_orig->primary_addr, router_orig->orig)) {
  1110. primary_orig_node = router_orig;
  1111. } else {
  1112. primary_orig_node = orig_hash_find(bat_priv,
  1113. router_orig->primary_addr);
  1114. if (!primary_orig_node)
  1115. goto return_router;
  1116. orig_node_free_ref(primary_orig_node);
  1117. }
  1118. /* with less than 2 candidates, we can't do any
  1119. * bonding and prefer the original router. */
  1120. if (atomic_read(&primary_orig_node->bond_candidates) < 2)
  1121. goto return_router;
  1122. /* all nodes between should choose a candidate which
  1123. * is is not on the interface where the packet came
  1124. * in. */
  1125. neigh_node_free_ref(router);
  1126. if (bonding_enabled)
  1127. router = find_bond_router(primary_orig_node, recv_if);
  1128. else
  1129. router = find_ifalter_router(primary_orig_node, recv_if);
  1130. return_router:
  1131. if (router && router->if_incoming->if_status != IF_ACTIVE)
  1132. goto err_unlock;
  1133. rcu_read_unlock();
  1134. return router;
  1135. err_unlock:
  1136. rcu_read_unlock();
  1137. err:
  1138. if (router)
  1139. neigh_node_free_ref(router);
  1140. return NULL;
  1141. }
  1142. static int check_unicast_packet(struct sk_buff *skb, int hdr_size)
  1143. {
  1144. struct ethhdr *ethhdr;
  1145. /* drop packet if it has not necessary minimum size */
  1146. if (unlikely(!pskb_may_pull(skb, hdr_size)))
  1147. return -1;
  1148. ethhdr = (struct ethhdr *)skb_mac_header(skb);
  1149. /* packet with unicast indication but broadcast recipient */
  1150. if (is_broadcast_ether_addr(ethhdr->h_dest))
  1151. return -1;
  1152. /* packet with broadcast sender address */
  1153. if (is_broadcast_ether_addr(ethhdr->h_source))
  1154. return -1;
  1155. /* not for me */
  1156. if (!is_my_mac(ethhdr->h_dest))
  1157. return -1;
  1158. return 0;
  1159. }
  1160. int route_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if)
  1161. {
  1162. struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
  1163. struct orig_node *orig_node = NULL;
  1164. struct neigh_node *neigh_node = NULL;
  1165. struct unicast_packet *unicast_packet;
  1166. struct ethhdr *ethhdr = (struct ethhdr *)skb_mac_header(skb);
  1167. int ret = NET_RX_DROP;
  1168. struct sk_buff *new_skb;
  1169. unicast_packet = (struct unicast_packet *)skb->data;
  1170. /* TTL exceeded */
  1171. if (unicast_packet->ttl < 2) {
  1172. pr_debug("Warning - can't forward unicast packet from %pM to "
  1173. "%pM: ttl exceeded\n", ethhdr->h_source,
  1174. unicast_packet->dest);
  1175. goto out;
  1176. }
  1177. /* get routing information */
  1178. orig_node = orig_hash_find(bat_priv, unicast_packet->dest);
  1179. if (!orig_node)
  1180. goto out;
  1181. /* find_router() increases neigh_nodes refcount if found. */
  1182. neigh_node = find_router(bat_priv, orig_node, recv_if);
  1183. if (!neigh_node)
  1184. goto out;
  1185. /* create a copy of the skb, if needed, to modify it. */
  1186. if (skb_cow(skb, sizeof(struct ethhdr)) < 0)
  1187. goto out;
  1188. unicast_packet = (struct unicast_packet *)skb->data;
  1189. if (unicast_packet->packet_type == BAT_UNICAST &&
  1190. atomic_read(&bat_priv->fragmentation) &&
  1191. skb->len > neigh_node->if_incoming->net_dev->mtu) {
  1192. ret = frag_send_skb(skb, bat_priv,
  1193. neigh_node->if_incoming, neigh_node->addr);
  1194. goto out;
  1195. }
  1196. if (unicast_packet->packet_type == BAT_UNICAST_FRAG &&
  1197. frag_can_reassemble(skb, neigh_node->if_incoming->net_dev->mtu)) {
  1198. ret = frag_reassemble_skb(skb, bat_priv, &new_skb);
  1199. if (ret == NET_RX_DROP)
  1200. goto out;
  1201. /* packet was buffered for late merge */
  1202. if (!new_skb) {
  1203. ret = NET_RX_SUCCESS;
  1204. goto out;
  1205. }
  1206. skb = new_skb;
  1207. unicast_packet = (struct unicast_packet *)skb->data;
  1208. }
  1209. /* decrement ttl */
  1210. unicast_packet->ttl--;
  1211. /* route it */
  1212. send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
  1213. ret = NET_RX_SUCCESS;
  1214. out:
  1215. if (neigh_node)
  1216. neigh_node_free_ref(neigh_node);
  1217. if (orig_node)
  1218. orig_node_free_ref(orig_node);
  1219. return ret;
  1220. }
  1221. static int check_unicast_ttvn(struct bat_priv *bat_priv,
  1222. struct sk_buff *skb) {
  1223. uint8_t curr_ttvn;
  1224. struct orig_node *orig_node;
  1225. struct ethhdr *ethhdr;
  1226. struct hard_iface *primary_if;
  1227. struct unicast_packet *unicast_packet;
  1228. bool tt_poss_change;
  1229. /* I could need to modify it */
  1230. if (skb_cow(skb, sizeof(struct unicast_packet)) < 0)
  1231. return 0;
  1232. unicast_packet = (struct unicast_packet *)skb->data;
  1233. if (is_my_mac(unicast_packet->dest)) {
  1234. tt_poss_change = bat_priv->tt_poss_change;
  1235. curr_ttvn = (uint8_t)atomic_read(&bat_priv->ttvn);
  1236. } else {
  1237. orig_node = orig_hash_find(bat_priv, unicast_packet->dest);
  1238. if (!orig_node)
  1239. return 0;
  1240. curr_ttvn = (uint8_t)atomic_read(&orig_node->last_ttvn);
  1241. tt_poss_change = orig_node->tt_poss_change;
  1242. orig_node_free_ref(orig_node);
  1243. }
  1244. /* Check whether I have to reroute the packet */
  1245. if (seq_before(unicast_packet->ttvn, curr_ttvn) || tt_poss_change) {
  1246. /* Linearize the skb before accessing it */
  1247. if (skb_linearize(skb) < 0)
  1248. return 0;
  1249. ethhdr = (struct ethhdr *)(skb->data +
  1250. sizeof(struct unicast_packet));
  1251. orig_node = transtable_search(bat_priv, ethhdr->h_dest);
  1252. if (!orig_node) {
  1253. if (!is_my_client(bat_priv, ethhdr->h_dest))
  1254. return 0;
  1255. primary_if = primary_if_get_selected(bat_priv);
  1256. if (!primary_if)
  1257. return 0;
  1258. memcpy(unicast_packet->dest,
  1259. primary_if->net_dev->dev_addr, ETH_ALEN);
  1260. hardif_free_ref(primary_if);
  1261. } else {
  1262. memcpy(unicast_packet->dest, orig_node->orig,
  1263. ETH_ALEN);
  1264. curr_ttvn = (uint8_t)
  1265. atomic_read(&orig_node->last_ttvn);
  1266. orig_node_free_ref(orig_node);
  1267. }
  1268. bat_dbg(DBG_ROUTES, bat_priv, "TTVN mismatch (old_ttvn %u "
  1269. "new_ttvn %u)! Rerouting unicast packet (for %pM) to "
  1270. "%pM\n", unicast_packet->ttvn, curr_ttvn,
  1271. ethhdr->h_dest, unicast_packet->dest);
  1272. unicast_packet->ttvn = curr_ttvn;
  1273. }
  1274. return 1;
  1275. }
  1276. int recv_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if)
  1277. {
  1278. struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
  1279. struct unicast_packet *unicast_packet;
  1280. int hdr_size = sizeof(*unicast_packet);
  1281. if (check_unicast_packet(skb, hdr_size) < 0)
  1282. return NET_RX_DROP;
  1283. if (!check_unicast_ttvn(bat_priv, skb))
  1284. return NET_RX_DROP;
  1285. unicast_packet = (struct unicast_packet *)skb->data;
  1286. /* packet for me */
  1287. if (is_my_mac(unicast_packet->dest)) {
  1288. interface_rx(recv_if->soft_iface, skb, recv_if, hdr_size);
  1289. return NET_RX_SUCCESS;
  1290. }
  1291. return route_unicast_packet(skb, recv_if);
  1292. }
  1293. int recv_ucast_frag_packet(struct sk_buff *skb, struct hard_iface *recv_if)
  1294. {
  1295. struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
  1296. struct unicast_frag_packet *unicast_packet;
  1297. int hdr_size = sizeof(*unicast_packet);
  1298. struct sk_buff *new_skb = NULL;
  1299. int ret;
  1300. if (check_unicast_packet(skb, hdr_size) < 0)
  1301. return NET_RX_DROP;
  1302. if (!check_unicast_ttvn(bat_priv, skb))
  1303. return NET_RX_DROP;
  1304. unicast_packet = (struct unicast_frag_packet *)skb->data;
  1305. /* packet for me */
  1306. if (is_my_mac(unicast_packet->dest)) {
  1307. ret = frag_reassemble_skb(skb, bat_priv, &new_skb);
  1308. if (ret == NET_RX_DROP)
  1309. return NET_RX_DROP;
  1310. /* packet was buffered for late merge */
  1311. if (!new_skb)
  1312. return NET_RX_SUCCESS;
  1313. interface_rx(recv_if->soft_iface, new_skb, recv_if,
  1314. sizeof(struct unicast_packet));
  1315. return NET_RX_SUCCESS;
  1316. }
  1317. return route_unicast_packet(skb, recv_if);
  1318. }
  1319. int recv_bcast_packet(struct sk_buff *skb, struct hard_iface *recv_if)
  1320. {
  1321. struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
  1322. struct orig_node *orig_node = NULL;
  1323. struct bcast_packet *bcast_packet;
  1324. struct ethhdr *ethhdr;
  1325. int hdr_size = sizeof(*bcast_packet);
  1326. int ret = NET_RX_DROP;
  1327. int32_t seq_diff;
  1328. /* drop packet if it has not necessary minimum size */
  1329. if (unlikely(!pskb_may_pull(skb, hdr_size)))
  1330. goto out;
  1331. ethhdr = (struct ethhdr *)skb_mac_header(skb);
  1332. /* packet with broadcast indication but unicast recipient */
  1333. if (!is_broadcast_ether_addr(ethhdr->h_dest))
  1334. goto out;
  1335. /* packet with broadcast sender address */
  1336. if (is_broadcast_ether_addr(ethhdr->h_source))
  1337. goto out;
  1338. /* ignore broadcasts sent by myself */
  1339. if (is_my_mac(ethhdr->h_source))
  1340. goto out;
  1341. bcast_packet = (struct bcast_packet *)skb->data;
  1342. /* ignore broadcasts originated by myself */
  1343. if (is_my_mac(bcast_packet->orig))
  1344. goto out;
  1345. if (bcast_packet->ttl < 2)
  1346. goto out;
  1347. orig_node = orig_hash_find(bat_priv, bcast_packet->orig);
  1348. if (!orig_node)
  1349. goto out;
  1350. spin_lock_bh(&orig_node->bcast_seqno_lock);
  1351. /* check whether the packet is a duplicate */
  1352. if (get_bit_status(orig_node->bcast_bits, orig_node->last_bcast_seqno,
  1353. ntohl(bcast_packet->seqno)))
  1354. goto spin_unlock;
  1355. seq_diff = ntohl(bcast_packet->seqno) - orig_node->last_bcast_seqno;
  1356. /* check whether the packet is old and the host just restarted. */
  1357. if (window_protected(bat_priv, seq_diff,
  1358. &orig_node->bcast_seqno_reset))
  1359. goto spin_unlock;
  1360. /* mark broadcast in flood history, update window position
  1361. * if required. */
  1362. if (bit_get_packet(bat_priv, orig_node->bcast_bits, seq_diff, 1))
  1363. orig_node->last_bcast_seqno = ntohl(bcast_packet->seqno);
  1364. spin_unlock_bh(&orig_node->bcast_seqno_lock);
  1365. /* rebroadcast packet */
  1366. add_bcast_packet_to_list(bat_priv, skb, 1);
  1367. /* broadcast for me */
  1368. interface_rx(recv_if->soft_iface, skb, recv_if, hdr_size);
  1369. ret = NET_RX_SUCCESS;
  1370. goto out;
  1371. spin_unlock:
  1372. spin_unlock_bh(&orig_node->bcast_seqno_lock);
  1373. out:
  1374. if (orig_node)
  1375. orig_node_free_ref(orig_node);
  1376. return ret;
  1377. }
  1378. int recv_vis_packet(struct sk_buff *skb, struct hard_iface *recv_if)
  1379. {
  1380. struct vis_packet *vis_packet;
  1381. struct ethhdr *ethhdr;
  1382. struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
  1383. int hdr_size = sizeof(*vis_packet);
  1384. /* keep skb linear */
  1385. if (skb_linearize(skb) < 0)
  1386. return NET_RX_DROP;
  1387. if (unlikely(!pskb_may_pull(skb, hdr_size)))
  1388. return NET_RX_DROP;
  1389. vis_packet = (struct vis_packet *)skb->data;
  1390. ethhdr = (struct ethhdr *)skb_mac_header(skb);
  1391. /* not for me */
  1392. if (!is_my_mac(ethhdr->h_dest))
  1393. return NET_RX_DROP;
  1394. /* ignore own packets */
  1395. if (is_my_mac(vis_packet->vis_orig))
  1396. return NET_RX_DROP;
  1397. if (is_my_mac(vis_packet->sender_orig))
  1398. return NET_RX_DROP;
  1399. switch (vis_packet->vis_type) {
  1400. case VIS_TYPE_SERVER_SYNC:
  1401. receive_server_sync_packet(bat_priv, vis_packet,
  1402. skb_headlen(skb));
  1403. break;
  1404. case VIS_TYPE_CLIENT_UPDATE:
  1405. receive_client_update_packet(bat_priv, vis_packet,
  1406. skb_headlen(skb));
  1407. break;
  1408. default: /* ignore unknown packet */
  1409. break;
  1410. }
  1411. /* We take a copy of the data in the packet, so we should
  1412. always free the skbuf. */
  1413. return NET_RX_DROP;
  1414. }