routing.c 41 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494
  1. /*
  2. * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
  3. *
  4. * Marek Lindner, Simon Wunderlich
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of version 2 of the GNU General Public
  8. * License as published by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope that it will be useful, but
  11. * WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  13. * General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program; if not, write to the Free Software
  17. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
  18. * 02110-1301, USA
  19. *
  20. */
  21. #include "main.h"
  22. #include "routing.h"
  23. #include "send.h"
  24. #include "hash.h"
  25. #include "soft-interface.h"
  26. #include "hard-interface.h"
  27. #include "icmp_socket.h"
  28. #include "translation-table.h"
  29. #include "originator.h"
  30. #include "ring_buffer.h"
  31. #include "vis.h"
  32. #include "aggregation.h"
  33. #include "gateway_common.h"
  34. #include "gateway_client.h"
  35. #include "unicast.h"
  36. void slide_own_bcast_window(struct batman_if *batman_if)
  37. {
  38. struct bat_priv *bat_priv = netdev_priv(batman_if->soft_iface);
  39. struct hashtable_t *hash = bat_priv->orig_hash;
  40. struct hlist_node *walk;
  41. struct hlist_head *head;
  42. struct element_t *bucket;
  43. struct orig_node *orig_node;
  44. unsigned long *word;
  45. int i;
  46. size_t word_index;
  47. spin_lock_bh(&bat_priv->orig_hash_lock);
  48. for (i = 0; i < hash->size; i++) {
  49. head = &hash->table[i];
  50. rcu_read_lock();
  51. hlist_for_each_entry_rcu(bucket, walk, head, hlist) {
  52. orig_node = bucket->data;
  53. spin_lock_bh(&orig_node->ogm_cnt_lock);
  54. word_index = batman_if->if_num * NUM_WORDS;
  55. word = &(orig_node->bcast_own[word_index]);
  56. bit_get_packet(bat_priv, word, 1, 0);
  57. orig_node->bcast_own_sum[batman_if->if_num] =
  58. bit_packet_count(word);
  59. spin_unlock_bh(&orig_node->ogm_cnt_lock);
  60. }
  61. rcu_read_unlock();
  62. }
  63. spin_unlock_bh(&bat_priv->orig_hash_lock);
  64. }
  65. static void update_HNA(struct bat_priv *bat_priv, struct orig_node *orig_node,
  66. unsigned char *hna_buff, int hna_buff_len)
  67. {
  68. if ((hna_buff_len != orig_node->hna_buff_len) ||
  69. ((hna_buff_len > 0) &&
  70. (orig_node->hna_buff_len > 0) &&
  71. (memcmp(orig_node->hna_buff, hna_buff, hna_buff_len) != 0))) {
  72. if (orig_node->hna_buff_len > 0)
  73. hna_global_del_orig(bat_priv, orig_node,
  74. "originator changed hna");
  75. if ((hna_buff_len > 0) && (hna_buff))
  76. hna_global_add_orig(bat_priv, orig_node,
  77. hna_buff, hna_buff_len);
  78. }
  79. }
  80. static void update_route(struct bat_priv *bat_priv,
  81. struct orig_node *orig_node,
  82. struct neigh_node *neigh_node,
  83. unsigned char *hna_buff, int hna_buff_len)
  84. {
  85. struct neigh_node *neigh_node_tmp;
  86. /* route deleted */
  87. if ((orig_node->router) && (!neigh_node)) {
  88. bat_dbg(DBG_ROUTES, bat_priv, "Deleting route towards: %pM\n",
  89. orig_node->orig);
  90. hna_global_del_orig(bat_priv, orig_node,
  91. "originator timed out");
  92. /* route added */
  93. } else if ((!orig_node->router) && (neigh_node)) {
  94. bat_dbg(DBG_ROUTES, bat_priv,
  95. "Adding route towards: %pM (via %pM)\n",
  96. orig_node->orig, neigh_node->addr);
  97. hna_global_add_orig(bat_priv, orig_node,
  98. hna_buff, hna_buff_len);
  99. /* route changed */
  100. } else {
  101. bat_dbg(DBG_ROUTES, bat_priv,
  102. "Changing route towards: %pM "
  103. "(now via %pM - was via %pM)\n",
  104. orig_node->orig, neigh_node->addr,
  105. orig_node->router->addr);
  106. }
  107. if (neigh_node)
  108. kref_get(&neigh_node->refcount);
  109. neigh_node_tmp = orig_node->router;
  110. orig_node->router = neigh_node;
  111. if (neigh_node_tmp)
  112. kref_put(&neigh_node_tmp->refcount, neigh_node_free_ref);
  113. }
  114. void update_routes(struct bat_priv *bat_priv, struct orig_node *orig_node,
  115. struct neigh_node *neigh_node, unsigned char *hna_buff,
  116. int hna_buff_len)
  117. {
  118. if (!orig_node)
  119. return;
  120. if (orig_node->router != neigh_node)
  121. update_route(bat_priv, orig_node, neigh_node,
  122. hna_buff, hna_buff_len);
  123. /* may be just HNA changed */
  124. else
  125. update_HNA(bat_priv, orig_node, hna_buff, hna_buff_len);
  126. }
  127. static int is_bidirectional_neigh(struct orig_node *orig_node,
  128. struct orig_node *orig_neigh_node,
  129. struct batman_packet *batman_packet,
  130. struct batman_if *if_incoming)
  131. {
  132. struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
  133. struct neigh_node *neigh_node = NULL, *tmp_neigh_node = NULL;
  134. struct hlist_node *node;
  135. unsigned char total_count;
  136. int ret = 0;
  137. if (orig_node == orig_neigh_node) {
  138. rcu_read_lock();
  139. hlist_for_each_entry_rcu(tmp_neigh_node, node,
  140. &orig_node->neigh_list, list) {
  141. if (compare_orig(tmp_neigh_node->addr,
  142. orig_neigh_node->orig) &&
  143. (tmp_neigh_node->if_incoming == if_incoming))
  144. neigh_node = tmp_neigh_node;
  145. }
  146. if (!neigh_node)
  147. neigh_node = create_neighbor(orig_node,
  148. orig_neigh_node,
  149. orig_neigh_node->orig,
  150. if_incoming);
  151. /* create_neighbor failed, return 0 */
  152. if (!neigh_node)
  153. goto unlock;
  154. kref_get(&neigh_node->refcount);
  155. rcu_read_unlock();
  156. neigh_node->last_valid = jiffies;
  157. } else {
  158. /* find packet count of corresponding one hop neighbor */
  159. rcu_read_lock();
  160. hlist_for_each_entry_rcu(tmp_neigh_node, node,
  161. &orig_neigh_node->neigh_list, list) {
  162. if (compare_orig(tmp_neigh_node->addr,
  163. orig_neigh_node->orig) &&
  164. (tmp_neigh_node->if_incoming == if_incoming))
  165. neigh_node = tmp_neigh_node;
  166. }
  167. if (!neigh_node)
  168. neigh_node = create_neighbor(orig_neigh_node,
  169. orig_neigh_node,
  170. orig_neigh_node->orig,
  171. if_incoming);
  172. /* create_neighbor failed, return 0 */
  173. if (!neigh_node)
  174. goto unlock;
  175. kref_get(&neigh_node->refcount);
  176. rcu_read_unlock();
  177. }
  178. orig_node->last_valid = jiffies;
  179. /* pay attention to not get a value bigger than 100 % */
  180. total_count = (orig_neigh_node->bcast_own_sum[if_incoming->if_num] >
  181. neigh_node->real_packet_count ?
  182. neigh_node->real_packet_count :
  183. orig_neigh_node->bcast_own_sum[if_incoming->if_num]);
  184. /* if we have too few packets (too less data) we set tq_own to zero */
  185. /* if we receive too few packets it is not considered bidirectional */
  186. if ((total_count < TQ_LOCAL_BIDRECT_SEND_MINIMUM) ||
  187. (neigh_node->real_packet_count < TQ_LOCAL_BIDRECT_RECV_MINIMUM))
  188. orig_neigh_node->tq_own = 0;
  189. else
  190. /* neigh_node->real_packet_count is never zero as we
  191. * only purge old information when getting new
  192. * information */
  193. orig_neigh_node->tq_own = (TQ_MAX_VALUE * total_count) /
  194. neigh_node->real_packet_count;
  195. /*
  196. * 1 - ((1-x) ** 3), normalized to TQ_MAX_VALUE this does
  197. * affect the nearly-symmetric links only a little, but
  198. * punishes asymmetric links more. This will give a value
  199. * between 0 and TQ_MAX_VALUE
  200. */
  201. orig_neigh_node->tq_asym_penalty =
  202. TQ_MAX_VALUE -
  203. (TQ_MAX_VALUE *
  204. (TQ_LOCAL_WINDOW_SIZE - neigh_node->real_packet_count) *
  205. (TQ_LOCAL_WINDOW_SIZE - neigh_node->real_packet_count) *
  206. (TQ_LOCAL_WINDOW_SIZE - neigh_node->real_packet_count)) /
  207. (TQ_LOCAL_WINDOW_SIZE *
  208. TQ_LOCAL_WINDOW_SIZE *
  209. TQ_LOCAL_WINDOW_SIZE);
  210. batman_packet->tq = ((batman_packet->tq *
  211. orig_neigh_node->tq_own *
  212. orig_neigh_node->tq_asym_penalty) /
  213. (TQ_MAX_VALUE * TQ_MAX_VALUE));
  214. bat_dbg(DBG_BATMAN, bat_priv,
  215. "bidirectional: "
  216. "orig = %-15pM neigh = %-15pM => own_bcast = %2i, "
  217. "real recv = %2i, local tq: %3i, asym_penalty: %3i, "
  218. "total tq: %3i\n",
  219. orig_node->orig, orig_neigh_node->orig, total_count,
  220. neigh_node->real_packet_count, orig_neigh_node->tq_own,
  221. orig_neigh_node->tq_asym_penalty, batman_packet->tq);
  222. /* if link has the minimum required transmission quality
  223. * consider it bidirectional */
  224. if (batman_packet->tq >= TQ_TOTAL_BIDRECT_LIMIT)
  225. ret = 1;
  226. goto out;
  227. unlock:
  228. rcu_read_unlock();
  229. out:
  230. if (neigh_node)
  231. kref_put(&neigh_node->refcount, neigh_node_free_ref);
  232. return ret;
  233. }
  234. /* caller must hold the neigh_list_lock */
  235. void bonding_candidate_del(struct orig_node *orig_node,
  236. struct neigh_node *neigh_node)
  237. {
  238. /* this neighbor is not part of our candidate list */
  239. if (list_empty(&neigh_node->bonding_list))
  240. goto out;
  241. list_del_rcu(&neigh_node->bonding_list);
  242. call_rcu(&neigh_node->rcu_bond, neigh_node_free_rcu_bond);
  243. INIT_LIST_HEAD(&neigh_node->bonding_list);
  244. atomic_dec(&orig_node->bond_candidates);
  245. out:
  246. return;
  247. }
  248. static void bonding_candidate_add(struct orig_node *orig_node,
  249. struct neigh_node *neigh_node)
  250. {
  251. struct hlist_node *node;
  252. struct neigh_node *tmp_neigh_node;
  253. uint8_t best_tq, interference_candidate = 0;
  254. spin_lock_bh(&orig_node->neigh_list_lock);
  255. /* only consider if it has the same primary address ... */
  256. if (!compare_orig(orig_node->orig,
  257. neigh_node->orig_node->primary_addr))
  258. goto candidate_del;
  259. if (!orig_node->router)
  260. goto candidate_del;
  261. best_tq = orig_node->router->tq_avg;
  262. /* ... and is good enough to be considered */
  263. if (neigh_node->tq_avg < best_tq - BONDING_TQ_THRESHOLD)
  264. goto candidate_del;
  265. /**
  266. * check if we have another candidate with the same mac address or
  267. * interface. If we do, we won't select this candidate because of
  268. * possible interference.
  269. */
  270. hlist_for_each_entry_rcu(tmp_neigh_node, node,
  271. &orig_node->neigh_list, list) {
  272. if (tmp_neigh_node == neigh_node)
  273. continue;
  274. /* we only care if the other candidate is even
  275. * considered as candidate. */
  276. if (list_empty(&tmp_neigh_node->bonding_list))
  277. continue;
  278. if ((neigh_node->if_incoming == tmp_neigh_node->if_incoming) ||
  279. (compare_orig(neigh_node->addr, tmp_neigh_node->addr))) {
  280. interference_candidate = 1;
  281. break;
  282. }
  283. }
  284. /* don't care further if it is an interference candidate */
  285. if (interference_candidate)
  286. goto candidate_del;
  287. /* this neighbor already is part of our candidate list */
  288. if (!list_empty(&neigh_node->bonding_list))
  289. goto out;
  290. list_add_rcu(&neigh_node->bonding_list, &orig_node->bond_list);
  291. kref_get(&neigh_node->refcount);
  292. atomic_inc(&orig_node->bond_candidates);
  293. goto out;
  294. candidate_del:
  295. bonding_candidate_del(orig_node, neigh_node);
  296. out:
  297. spin_unlock_bh(&orig_node->neigh_list_lock);
  298. return;
  299. }
  300. /* copy primary address for bonding */
  301. static void bonding_save_primary(struct orig_node *orig_node,
  302. struct orig_node *orig_neigh_node,
  303. struct batman_packet *batman_packet)
  304. {
  305. if (!(batman_packet->flags & PRIMARIES_FIRST_HOP))
  306. return;
  307. memcpy(orig_neigh_node->primary_addr, orig_node->orig, ETH_ALEN);
  308. }
  309. static void update_orig(struct bat_priv *bat_priv,
  310. struct orig_node *orig_node,
  311. struct ethhdr *ethhdr,
  312. struct batman_packet *batman_packet,
  313. struct batman_if *if_incoming,
  314. unsigned char *hna_buff, int hna_buff_len,
  315. char is_duplicate)
  316. {
  317. struct neigh_node *neigh_node = NULL, *tmp_neigh_node = NULL;
  318. struct orig_node *orig_node_tmp;
  319. struct hlist_node *node;
  320. int tmp_hna_buff_len;
  321. uint8_t bcast_own_sum_orig, bcast_own_sum_neigh;
  322. bat_dbg(DBG_BATMAN, bat_priv, "update_originator(): "
  323. "Searching and updating originator entry of received packet\n");
  324. rcu_read_lock();
  325. hlist_for_each_entry_rcu(tmp_neigh_node, node,
  326. &orig_node->neigh_list, list) {
  327. if (compare_orig(tmp_neigh_node->addr, ethhdr->h_source) &&
  328. (tmp_neigh_node->if_incoming == if_incoming)) {
  329. neigh_node = tmp_neigh_node;
  330. continue;
  331. }
  332. if (is_duplicate)
  333. continue;
  334. ring_buffer_set(tmp_neigh_node->tq_recv,
  335. &tmp_neigh_node->tq_index, 0);
  336. tmp_neigh_node->tq_avg =
  337. ring_buffer_avg(tmp_neigh_node->tq_recv);
  338. }
  339. if (!neigh_node) {
  340. struct orig_node *orig_tmp;
  341. orig_tmp = get_orig_node(bat_priv, ethhdr->h_source);
  342. if (!orig_tmp)
  343. goto unlock;
  344. neigh_node = create_neighbor(orig_node, orig_tmp,
  345. ethhdr->h_source, if_incoming);
  346. kref_put(&orig_tmp->refcount, orig_node_free_ref);
  347. if (!neigh_node)
  348. goto unlock;
  349. } else
  350. bat_dbg(DBG_BATMAN, bat_priv,
  351. "Updating existing last-hop neighbor of originator\n");
  352. kref_get(&neigh_node->refcount);
  353. rcu_read_unlock();
  354. orig_node->flags = batman_packet->flags;
  355. neigh_node->last_valid = jiffies;
  356. ring_buffer_set(neigh_node->tq_recv,
  357. &neigh_node->tq_index,
  358. batman_packet->tq);
  359. neigh_node->tq_avg = ring_buffer_avg(neigh_node->tq_recv);
  360. if (!is_duplicate) {
  361. orig_node->last_ttl = batman_packet->ttl;
  362. neigh_node->last_ttl = batman_packet->ttl;
  363. }
  364. bonding_candidate_add(orig_node, neigh_node);
  365. tmp_hna_buff_len = (hna_buff_len > batman_packet->num_hna * ETH_ALEN ?
  366. batman_packet->num_hna * ETH_ALEN : hna_buff_len);
  367. /* if this neighbor already is our next hop there is nothing
  368. * to change */
  369. if (orig_node->router == neigh_node)
  370. goto update_hna;
  371. /* if this neighbor does not offer a better TQ we won't consider it */
  372. if ((orig_node->router) &&
  373. (orig_node->router->tq_avg > neigh_node->tq_avg))
  374. goto update_hna;
  375. /* if the TQ is the same and the link not more symetric we
  376. * won't consider it either */
  377. if ((orig_node->router) &&
  378. (neigh_node->tq_avg == orig_node->router->tq_avg)) {
  379. orig_node_tmp = orig_node->router->orig_node;
  380. spin_lock_bh(&orig_node_tmp->ogm_cnt_lock);
  381. bcast_own_sum_orig =
  382. orig_node_tmp->bcast_own_sum[if_incoming->if_num];
  383. spin_unlock_bh(&orig_node_tmp->ogm_cnt_lock);
  384. orig_node_tmp = neigh_node->orig_node;
  385. spin_lock_bh(&orig_node_tmp->ogm_cnt_lock);
  386. bcast_own_sum_neigh =
  387. orig_node_tmp->bcast_own_sum[if_incoming->if_num];
  388. spin_unlock_bh(&orig_node_tmp->ogm_cnt_lock);
  389. if (bcast_own_sum_orig >= bcast_own_sum_neigh)
  390. goto update_hna;
  391. }
  392. update_routes(bat_priv, orig_node, neigh_node,
  393. hna_buff, tmp_hna_buff_len);
  394. goto update_gw;
  395. update_hna:
  396. update_routes(bat_priv, orig_node, orig_node->router,
  397. hna_buff, tmp_hna_buff_len);
  398. update_gw:
  399. if (orig_node->gw_flags != batman_packet->gw_flags)
  400. gw_node_update(bat_priv, orig_node, batman_packet->gw_flags);
  401. orig_node->gw_flags = batman_packet->gw_flags;
  402. /* restart gateway selection if fast or late switching was enabled */
  403. if ((orig_node->gw_flags) &&
  404. (atomic_read(&bat_priv->gw_mode) == GW_MODE_CLIENT) &&
  405. (atomic_read(&bat_priv->gw_sel_class) > 2))
  406. gw_check_election(bat_priv, orig_node);
  407. goto out;
  408. unlock:
  409. rcu_read_unlock();
  410. out:
  411. if (neigh_node)
  412. kref_put(&neigh_node->refcount, neigh_node_free_ref);
  413. }
  414. /* checks whether the host restarted and is in the protection time.
  415. * returns:
  416. * 0 if the packet is to be accepted
  417. * 1 if the packet is to be ignored.
  418. */
  419. static int window_protected(struct bat_priv *bat_priv,
  420. int32_t seq_num_diff,
  421. unsigned long *last_reset)
  422. {
  423. if ((seq_num_diff <= -TQ_LOCAL_WINDOW_SIZE)
  424. || (seq_num_diff >= EXPECTED_SEQNO_RANGE)) {
  425. if (time_after(jiffies, *last_reset +
  426. msecs_to_jiffies(RESET_PROTECTION_MS))) {
  427. *last_reset = jiffies;
  428. bat_dbg(DBG_BATMAN, bat_priv,
  429. "old packet received, start protection\n");
  430. return 0;
  431. } else
  432. return 1;
  433. }
  434. return 0;
  435. }
  436. /* processes a batman packet for all interfaces, adjusts the sequence number and
  437. * finds out whether it is a duplicate.
  438. * returns:
  439. * 1 the packet is a duplicate
  440. * 0 the packet has not yet been received
  441. * -1 the packet is old and has been received while the seqno window
  442. * was protected. Caller should drop it.
  443. */
  444. static char count_real_packets(struct ethhdr *ethhdr,
  445. struct batman_packet *batman_packet,
  446. struct batman_if *if_incoming)
  447. {
  448. struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
  449. struct orig_node *orig_node;
  450. struct neigh_node *tmp_neigh_node;
  451. struct hlist_node *node;
  452. char is_duplicate = 0;
  453. int32_t seq_diff;
  454. int need_update = 0;
  455. int set_mark;
  456. orig_node = get_orig_node(bat_priv, batman_packet->orig);
  457. if (!orig_node)
  458. return 0;
  459. seq_diff = batman_packet->seqno - orig_node->last_real_seqno;
  460. /* signalize caller that the packet is to be dropped. */
  461. if (window_protected(bat_priv, seq_diff,
  462. &orig_node->batman_seqno_reset))
  463. goto err;
  464. rcu_read_lock();
  465. hlist_for_each_entry_rcu(tmp_neigh_node, node,
  466. &orig_node->neigh_list, list) {
  467. is_duplicate |= get_bit_status(tmp_neigh_node->real_bits,
  468. orig_node->last_real_seqno,
  469. batman_packet->seqno);
  470. if (compare_orig(tmp_neigh_node->addr, ethhdr->h_source) &&
  471. (tmp_neigh_node->if_incoming == if_incoming))
  472. set_mark = 1;
  473. else
  474. set_mark = 0;
  475. /* if the window moved, set the update flag. */
  476. need_update |= bit_get_packet(bat_priv,
  477. tmp_neigh_node->real_bits,
  478. seq_diff, set_mark);
  479. tmp_neigh_node->real_packet_count =
  480. bit_packet_count(tmp_neigh_node->real_bits);
  481. }
  482. rcu_read_unlock();
  483. if (need_update) {
  484. bat_dbg(DBG_BATMAN, bat_priv,
  485. "updating last_seqno: old %d, new %d\n",
  486. orig_node->last_real_seqno, batman_packet->seqno);
  487. orig_node->last_real_seqno = batman_packet->seqno;
  488. }
  489. kref_put(&orig_node->refcount, orig_node_free_ref);
  490. return is_duplicate;
  491. err:
  492. kref_put(&orig_node->refcount, orig_node_free_ref);
  493. return -1;
  494. }
  495. void receive_bat_packet(struct ethhdr *ethhdr,
  496. struct batman_packet *batman_packet,
  497. unsigned char *hna_buff, int hna_buff_len,
  498. struct batman_if *if_incoming)
  499. {
  500. struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
  501. struct batman_if *batman_if;
  502. struct orig_node *orig_neigh_node, *orig_node;
  503. char has_directlink_flag;
  504. char is_my_addr = 0, is_my_orig = 0, is_my_oldorig = 0;
  505. char is_broadcast = 0, is_bidirectional, is_single_hop_neigh;
  506. char is_duplicate;
  507. uint32_t if_incoming_seqno;
  508. /* Silently drop when the batman packet is actually not a
  509. * correct packet.
  510. *
  511. * This might happen if a packet is padded (e.g. Ethernet has a
  512. * minimum frame length of 64 byte) and the aggregation interprets
  513. * it as an additional length.
  514. *
  515. * TODO: A more sane solution would be to have a bit in the
  516. * batman_packet to detect whether the packet is the last
  517. * packet in an aggregation. Here we expect that the padding
  518. * is always zero (or not 0x01)
  519. */
  520. if (batman_packet->packet_type != BAT_PACKET)
  521. return;
  522. /* could be changed by schedule_own_packet() */
  523. if_incoming_seqno = atomic_read(&if_incoming->seqno);
  524. has_directlink_flag = (batman_packet->flags & DIRECTLINK ? 1 : 0);
  525. is_single_hop_neigh = (compare_orig(ethhdr->h_source,
  526. batman_packet->orig) ? 1 : 0);
  527. bat_dbg(DBG_BATMAN, bat_priv,
  528. "Received BATMAN packet via NB: %pM, IF: %s [%pM] "
  529. "(from OG: %pM, via prev OG: %pM, seqno %d, tq %d, "
  530. "TTL %d, V %d, IDF %d)\n",
  531. ethhdr->h_source, if_incoming->net_dev->name,
  532. if_incoming->net_dev->dev_addr, batman_packet->orig,
  533. batman_packet->prev_sender, batman_packet->seqno,
  534. batman_packet->tq, batman_packet->ttl, batman_packet->version,
  535. has_directlink_flag);
  536. rcu_read_lock();
  537. list_for_each_entry_rcu(batman_if, &if_list, list) {
  538. if (batman_if->if_status != IF_ACTIVE)
  539. continue;
  540. if (batman_if->soft_iface != if_incoming->soft_iface)
  541. continue;
  542. if (compare_orig(ethhdr->h_source,
  543. batman_if->net_dev->dev_addr))
  544. is_my_addr = 1;
  545. if (compare_orig(batman_packet->orig,
  546. batman_if->net_dev->dev_addr))
  547. is_my_orig = 1;
  548. if (compare_orig(batman_packet->prev_sender,
  549. batman_if->net_dev->dev_addr))
  550. is_my_oldorig = 1;
  551. if (compare_orig(ethhdr->h_source, broadcast_addr))
  552. is_broadcast = 1;
  553. }
  554. rcu_read_unlock();
  555. if (batman_packet->version != COMPAT_VERSION) {
  556. bat_dbg(DBG_BATMAN, bat_priv,
  557. "Drop packet: incompatible batman version (%i)\n",
  558. batman_packet->version);
  559. return;
  560. }
  561. if (is_my_addr) {
  562. bat_dbg(DBG_BATMAN, bat_priv,
  563. "Drop packet: received my own broadcast (sender: %pM"
  564. ")\n",
  565. ethhdr->h_source);
  566. return;
  567. }
  568. if (is_broadcast) {
  569. bat_dbg(DBG_BATMAN, bat_priv, "Drop packet: "
  570. "ignoring all packets with broadcast source addr (sender: %pM"
  571. ")\n", ethhdr->h_source);
  572. return;
  573. }
  574. if (is_my_orig) {
  575. unsigned long *word;
  576. int offset;
  577. orig_neigh_node = get_orig_node(bat_priv, ethhdr->h_source);
  578. if (!orig_neigh_node)
  579. return;
  580. /* neighbor has to indicate direct link and it has to
  581. * come via the corresponding interface */
  582. /* if received seqno equals last send seqno save new
  583. * seqno for bidirectional check */
  584. if (has_directlink_flag &&
  585. compare_orig(if_incoming->net_dev->dev_addr,
  586. batman_packet->orig) &&
  587. (batman_packet->seqno - if_incoming_seqno + 2 == 0)) {
  588. offset = if_incoming->if_num * NUM_WORDS;
  589. spin_lock_bh(&orig_neigh_node->ogm_cnt_lock);
  590. word = &(orig_neigh_node->bcast_own[offset]);
  591. bit_mark(word, 0);
  592. orig_neigh_node->bcast_own_sum[if_incoming->if_num] =
  593. bit_packet_count(word);
  594. spin_unlock_bh(&orig_neigh_node->ogm_cnt_lock);
  595. }
  596. bat_dbg(DBG_BATMAN, bat_priv, "Drop packet: "
  597. "originator packet from myself (via neighbor)\n");
  598. kref_put(&orig_neigh_node->refcount, orig_node_free_ref);
  599. return;
  600. }
  601. if (is_my_oldorig) {
  602. bat_dbg(DBG_BATMAN, bat_priv,
  603. "Drop packet: ignoring all rebroadcast echos (sender: "
  604. "%pM)\n", ethhdr->h_source);
  605. return;
  606. }
  607. orig_node = get_orig_node(bat_priv, batman_packet->orig);
  608. if (!orig_node)
  609. return;
  610. is_duplicate = count_real_packets(ethhdr, batman_packet, if_incoming);
  611. if (is_duplicate == -1) {
  612. bat_dbg(DBG_BATMAN, bat_priv,
  613. "Drop packet: packet within seqno protection time "
  614. "(sender: %pM)\n", ethhdr->h_source);
  615. goto out;
  616. }
  617. if (batman_packet->tq == 0) {
  618. bat_dbg(DBG_BATMAN, bat_priv,
  619. "Drop packet: originator packet with tq equal 0\n");
  620. goto out;
  621. }
  622. /* avoid temporary routing loops */
  623. if ((orig_node->router) &&
  624. (orig_node->router->orig_node->router) &&
  625. (compare_orig(orig_node->router->addr,
  626. batman_packet->prev_sender)) &&
  627. !(compare_orig(batman_packet->orig, batman_packet->prev_sender)) &&
  628. (compare_orig(orig_node->router->addr,
  629. orig_node->router->orig_node->router->addr))) {
  630. bat_dbg(DBG_BATMAN, bat_priv,
  631. "Drop packet: ignoring all rebroadcast packets that "
  632. "may make me loop (sender: %pM)\n", ethhdr->h_source);
  633. goto out;
  634. }
  635. /* if sender is a direct neighbor the sender mac equals
  636. * originator mac */
  637. orig_neigh_node = (is_single_hop_neigh ?
  638. orig_node :
  639. get_orig_node(bat_priv, ethhdr->h_source));
  640. if (!orig_neigh_node)
  641. goto out_neigh;
  642. /* drop packet if sender is not a direct neighbor and if we
  643. * don't route towards it */
  644. if (!is_single_hop_neigh && (!orig_neigh_node->router)) {
  645. bat_dbg(DBG_BATMAN, bat_priv,
  646. "Drop packet: OGM via unknown neighbor!\n");
  647. goto out_neigh;
  648. }
  649. is_bidirectional = is_bidirectional_neigh(orig_node, orig_neigh_node,
  650. batman_packet, if_incoming);
  651. bonding_save_primary(orig_node, orig_neigh_node, batman_packet);
  652. /* update ranking if it is not a duplicate or has the same
  653. * seqno and similar ttl as the non-duplicate */
  654. if (is_bidirectional &&
  655. (!is_duplicate ||
  656. ((orig_node->last_real_seqno == batman_packet->seqno) &&
  657. (orig_node->last_ttl - 3 <= batman_packet->ttl))))
  658. update_orig(bat_priv, orig_node, ethhdr, batman_packet,
  659. if_incoming, hna_buff, hna_buff_len, is_duplicate);
  660. /* is single hop (direct) neighbor */
  661. if (is_single_hop_neigh) {
  662. /* mark direct link on incoming interface */
  663. schedule_forward_packet(orig_node, ethhdr, batman_packet,
  664. 1, hna_buff_len, if_incoming);
  665. bat_dbg(DBG_BATMAN, bat_priv, "Forwarding packet: "
  666. "rebroadcast neighbor packet with direct link flag\n");
  667. goto out_neigh;
  668. }
  669. /* multihop originator */
  670. if (!is_bidirectional) {
  671. bat_dbg(DBG_BATMAN, bat_priv,
  672. "Drop packet: not received via bidirectional link\n");
  673. goto out_neigh;
  674. }
  675. if (is_duplicate) {
  676. bat_dbg(DBG_BATMAN, bat_priv,
  677. "Drop packet: duplicate packet received\n");
  678. goto out_neigh;
  679. }
  680. bat_dbg(DBG_BATMAN, bat_priv,
  681. "Forwarding packet: rebroadcast originator packet\n");
  682. schedule_forward_packet(orig_node, ethhdr, batman_packet,
  683. 0, hna_buff_len, if_incoming);
  684. out_neigh:
  685. if (!is_single_hop_neigh)
  686. kref_put(&orig_neigh_node->refcount, orig_node_free_ref);
  687. out:
  688. kref_put(&orig_node->refcount, orig_node_free_ref);
  689. }
  690. int recv_bat_packet(struct sk_buff *skb, struct batman_if *batman_if)
  691. {
  692. struct bat_priv *bat_priv = netdev_priv(batman_if->soft_iface);
  693. struct ethhdr *ethhdr;
  694. /* drop packet if it has not necessary minimum size */
  695. if (unlikely(!pskb_may_pull(skb, sizeof(struct batman_packet))))
  696. return NET_RX_DROP;
  697. ethhdr = (struct ethhdr *)skb_mac_header(skb);
  698. /* packet with broadcast indication but unicast recipient */
  699. if (!is_broadcast_ether_addr(ethhdr->h_dest))
  700. return NET_RX_DROP;
  701. /* packet with broadcast sender address */
  702. if (is_broadcast_ether_addr(ethhdr->h_source))
  703. return NET_RX_DROP;
  704. /* create a copy of the skb, if needed, to modify it. */
  705. if (skb_cow(skb, 0) < 0)
  706. return NET_RX_DROP;
  707. /* keep skb linear */
  708. if (skb_linearize(skb) < 0)
  709. return NET_RX_DROP;
  710. ethhdr = (struct ethhdr *)skb_mac_header(skb);
  711. spin_lock_bh(&bat_priv->orig_hash_lock);
  712. receive_aggr_bat_packet(ethhdr,
  713. skb->data,
  714. skb_headlen(skb),
  715. batman_if);
  716. spin_unlock_bh(&bat_priv->orig_hash_lock);
  717. kfree_skb(skb);
  718. return NET_RX_SUCCESS;
  719. }
  720. static int recv_my_icmp_packet(struct bat_priv *bat_priv,
  721. struct sk_buff *skb, size_t icmp_len)
  722. {
  723. struct orig_node *orig_node;
  724. struct icmp_packet_rr *icmp_packet;
  725. struct batman_if *batman_if;
  726. int ret;
  727. uint8_t dstaddr[ETH_ALEN];
  728. icmp_packet = (struct icmp_packet_rr *)skb->data;
  729. /* add data to device queue */
  730. if (icmp_packet->msg_type != ECHO_REQUEST) {
  731. bat_socket_receive_packet(icmp_packet, icmp_len);
  732. return NET_RX_DROP;
  733. }
  734. if (!bat_priv->primary_if)
  735. return NET_RX_DROP;
  736. /* answer echo request (ping) */
  737. /* get routing information */
  738. spin_lock_bh(&bat_priv->orig_hash_lock);
  739. rcu_read_lock();
  740. orig_node = ((struct orig_node *)hash_find(bat_priv->orig_hash,
  741. compare_orig, choose_orig,
  742. icmp_packet->orig));
  743. rcu_read_unlock();
  744. ret = NET_RX_DROP;
  745. if ((orig_node) && (orig_node->router)) {
  746. /* don't lock while sending the packets ... we therefore
  747. * copy the required data before sending */
  748. batman_if = orig_node->router->if_incoming;
  749. memcpy(dstaddr, orig_node->router->addr, ETH_ALEN);
  750. spin_unlock_bh(&bat_priv->orig_hash_lock);
  751. /* create a copy of the skb, if needed, to modify it. */
  752. if (skb_cow(skb, sizeof(struct ethhdr)) < 0)
  753. return NET_RX_DROP;
  754. icmp_packet = (struct icmp_packet_rr *)skb->data;
  755. memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN);
  756. memcpy(icmp_packet->orig,
  757. bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN);
  758. icmp_packet->msg_type = ECHO_REPLY;
  759. icmp_packet->ttl = TTL;
  760. send_skb_packet(skb, batman_if, dstaddr);
  761. ret = NET_RX_SUCCESS;
  762. } else
  763. spin_unlock_bh(&bat_priv->orig_hash_lock);
  764. return ret;
  765. }
  766. static int recv_icmp_ttl_exceeded(struct bat_priv *bat_priv,
  767. struct sk_buff *skb)
  768. {
  769. struct orig_node *orig_node;
  770. struct icmp_packet *icmp_packet;
  771. struct batman_if *batman_if;
  772. int ret;
  773. uint8_t dstaddr[ETH_ALEN];
  774. icmp_packet = (struct icmp_packet *)skb->data;
  775. /* send TTL exceeded if packet is an echo request (traceroute) */
  776. if (icmp_packet->msg_type != ECHO_REQUEST) {
  777. pr_debug("Warning - can't forward icmp packet from %pM to "
  778. "%pM: ttl exceeded\n", icmp_packet->orig,
  779. icmp_packet->dst);
  780. return NET_RX_DROP;
  781. }
  782. if (!bat_priv->primary_if)
  783. return NET_RX_DROP;
  784. /* get routing information */
  785. spin_lock_bh(&bat_priv->orig_hash_lock);
  786. rcu_read_lock();
  787. orig_node = ((struct orig_node *)
  788. hash_find(bat_priv->orig_hash, compare_orig, choose_orig,
  789. icmp_packet->orig));
  790. rcu_read_unlock();
  791. ret = NET_RX_DROP;
  792. if ((orig_node) && (orig_node->router)) {
  793. /* don't lock while sending the packets ... we therefore
  794. * copy the required data before sending */
  795. batman_if = orig_node->router->if_incoming;
  796. memcpy(dstaddr, orig_node->router->addr, ETH_ALEN);
  797. spin_unlock_bh(&bat_priv->orig_hash_lock);
  798. /* create a copy of the skb, if needed, to modify it. */
  799. if (skb_cow(skb, sizeof(struct ethhdr)) < 0)
  800. return NET_RX_DROP;
  801. icmp_packet = (struct icmp_packet *) skb->data;
  802. memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN);
  803. memcpy(icmp_packet->orig,
  804. bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN);
  805. icmp_packet->msg_type = TTL_EXCEEDED;
  806. icmp_packet->ttl = TTL;
  807. send_skb_packet(skb, batman_if, dstaddr);
  808. ret = NET_RX_SUCCESS;
  809. } else
  810. spin_unlock_bh(&bat_priv->orig_hash_lock);
  811. return ret;
  812. }
  813. int recv_icmp_packet(struct sk_buff *skb, struct batman_if *recv_if)
  814. {
  815. struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
  816. struct icmp_packet_rr *icmp_packet;
  817. struct ethhdr *ethhdr;
  818. struct orig_node *orig_node;
  819. struct batman_if *batman_if;
  820. int hdr_size = sizeof(struct icmp_packet);
  821. int ret;
  822. uint8_t dstaddr[ETH_ALEN];
  823. /**
  824. * we truncate all incoming icmp packets if they don't match our size
  825. */
  826. if (skb->len >= sizeof(struct icmp_packet_rr))
  827. hdr_size = sizeof(struct icmp_packet_rr);
  828. /* drop packet if it has not necessary minimum size */
  829. if (unlikely(!pskb_may_pull(skb, hdr_size)))
  830. return NET_RX_DROP;
  831. ethhdr = (struct ethhdr *)skb_mac_header(skb);
  832. /* packet with unicast indication but broadcast recipient */
  833. if (is_broadcast_ether_addr(ethhdr->h_dest))
  834. return NET_RX_DROP;
  835. /* packet with broadcast sender address */
  836. if (is_broadcast_ether_addr(ethhdr->h_source))
  837. return NET_RX_DROP;
  838. /* not for me */
  839. if (!is_my_mac(ethhdr->h_dest))
  840. return NET_RX_DROP;
  841. icmp_packet = (struct icmp_packet_rr *)skb->data;
  842. /* add record route information if not full */
  843. if ((hdr_size == sizeof(struct icmp_packet_rr)) &&
  844. (icmp_packet->rr_cur < BAT_RR_LEN)) {
  845. memcpy(&(icmp_packet->rr[icmp_packet->rr_cur]),
  846. ethhdr->h_dest, ETH_ALEN);
  847. icmp_packet->rr_cur++;
  848. }
  849. /* packet for me */
  850. if (is_my_mac(icmp_packet->dst))
  851. return recv_my_icmp_packet(bat_priv, skb, hdr_size);
  852. /* TTL exceeded */
  853. if (icmp_packet->ttl < 2)
  854. return recv_icmp_ttl_exceeded(bat_priv, skb);
  855. ret = NET_RX_DROP;
  856. /* get routing information */
  857. spin_lock_bh(&bat_priv->orig_hash_lock);
  858. rcu_read_lock();
  859. orig_node = ((struct orig_node *)
  860. hash_find(bat_priv->orig_hash, compare_orig, choose_orig,
  861. icmp_packet->dst));
  862. rcu_read_unlock();
  863. if ((orig_node) && (orig_node->router)) {
  864. /* don't lock while sending the packets ... we therefore
  865. * copy the required data before sending */
  866. batman_if = orig_node->router->if_incoming;
  867. memcpy(dstaddr, orig_node->router->addr, ETH_ALEN);
  868. spin_unlock_bh(&bat_priv->orig_hash_lock);
  869. /* create a copy of the skb, if needed, to modify it. */
  870. if (skb_cow(skb, sizeof(struct ethhdr)) < 0)
  871. return NET_RX_DROP;
  872. icmp_packet = (struct icmp_packet_rr *)skb->data;
  873. /* decrement ttl */
  874. icmp_packet->ttl--;
  875. /* route it */
  876. send_skb_packet(skb, batman_if, dstaddr);
  877. ret = NET_RX_SUCCESS;
  878. } else
  879. spin_unlock_bh(&bat_priv->orig_hash_lock);
  880. return ret;
  881. }
  882. /* find a suitable router for this originator, and use
  883. * bonding if possible. increases the found neighbors
  884. * refcount.*/
  885. struct neigh_node *find_router(struct bat_priv *bat_priv,
  886. struct orig_node *orig_node,
  887. struct batman_if *recv_if)
  888. {
  889. struct orig_node *primary_orig_node;
  890. struct orig_node *router_orig;
  891. struct neigh_node *router, *first_candidate, *tmp_neigh_node;
  892. static uint8_t zero_mac[ETH_ALEN] = {0, 0, 0, 0, 0, 0};
  893. int bonding_enabled;
  894. if (!orig_node)
  895. return NULL;
  896. if (!orig_node->router)
  897. return NULL;
  898. /* without bonding, the first node should
  899. * always choose the default router. */
  900. bonding_enabled = atomic_read(&bat_priv->bonding);
  901. rcu_read_lock();
  902. /* select default router to output */
  903. router = orig_node->router;
  904. router_orig = orig_node->router->orig_node;
  905. if (!router_orig) {
  906. rcu_read_unlock();
  907. return NULL;
  908. }
  909. if ((!recv_if) && (!bonding_enabled))
  910. goto return_router;
  911. /* if we have something in the primary_addr, we can search
  912. * for a potential bonding candidate. */
  913. if (memcmp(router_orig->primary_addr, zero_mac, ETH_ALEN) == 0)
  914. goto return_router;
  915. /* find the orig_node which has the primary interface. might
  916. * even be the same as our router_orig in many cases */
  917. if (memcmp(router_orig->primary_addr,
  918. router_orig->orig, ETH_ALEN) == 0) {
  919. primary_orig_node = router_orig;
  920. } else {
  921. primary_orig_node = hash_find(bat_priv->orig_hash, compare_orig,
  922. choose_orig,
  923. router_orig->primary_addr);
  924. if (!primary_orig_node)
  925. goto return_router;
  926. }
  927. /* with less than 2 candidates, we can't do any
  928. * bonding and prefer the original router. */
  929. if (atomic_read(&primary_orig_node->bond_candidates) < 2)
  930. goto return_router;
  931. /* all nodes between should choose a candidate which
  932. * is is not on the interface where the packet came
  933. * in. */
  934. first_candidate = NULL;
  935. router = NULL;
  936. if (bonding_enabled) {
  937. /* in the bonding case, send the packets in a round
  938. * robin fashion over the remaining interfaces. */
  939. list_for_each_entry_rcu(tmp_neigh_node,
  940. &primary_orig_node->bond_list, bonding_list) {
  941. if (!first_candidate)
  942. first_candidate = tmp_neigh_node;
  943. /* recv_if == NULL on the first node. */
  944. if (tmp_neigh_node->if_incoming != recv_if) {
  945. router = tmp_neigh_node;
  946. break;
  947. }
  948. }
  949. /* use the first candidate if nothing was found. */
  950. if (!router)
  951. router = first_candidate;
  952. /* selected should point to the next element
  953. * after the current router */
  954. spin_lock_bh(&primary_orig_node->neigh_list_lock);
  955. /* this is a list_move(), which unfortunately
  956. * does not exist as rcu version */
  957. list_del_rcu(&primary_orig_node->bond_list);
  958. list_add_rcu(&primary_orig_node->bond_list,
  959. &router->bonding_list);
  960. spin_unlock_bh(&primary_orig_node->neigh_list_lock);
  961. } else {
  962. /* if bonding is disabled, use the best of the
  963. * remaining candidates which are not using
  964. * this interface. */
  965. list_for_each_entry_rcu(tmp_neigh_node,
  966. &primary_orig_node->bond_list, bonding_list) {
  967. if (!first_candidate)
  968. first_candidate = tmp_neigh_node;
  969. /* recv_if == NULL on the first node. */
  970. if (tmp_neigh_node->if_incoming != recv_if)
  971. /* if we don't have a router yet
  972. * or this one is better, choose it. */
  973. if ((!router) ||
  974. (tmp_neigh_node->tq_avg > router->tq_avg)) {
  975. router = tmp_neigh_node;
  976. }
  977. }
  978. /* use the first candidate if nothing was found. */
  979. if (!router)
  980. router = first_candidate;
  981. }
  982. return_router:
  983. kref_get(&router->refcount);
  984. rcu_read_unlock();
  985. return router;
  986. }
  987. static int check_unicast_packet(struct sk_buff *skb, int hdr_size)
  988. {
  989. struct ethhdr *ethhdr;
  990. /* drop packet if it has not necessary minimum size */
  991. if (unlikely(!pskb_may_pull(skb, hdr_size)))
  992. return -1;
  993. ethhdr = (struct ethhdr *)skb_mac_header(skb);
  994. /* packet with unicast indication but broadcast recipient */
  995. if (is_broadcast_ether_addr(ethhdr->h_dest))
  996. return -1;
  997. /* packet with broadcast sender address */
  998. if (is_broadcast_ether_addr(ethhdr->h_source))
  999. return -1;
  1000. /* not for me */
  1001. if (!is_my_mac(ethhdr->h_dest))
  1002. return -1;
  1003. return 0;
  1004. }
  1005. int route_unicast_packet(struct sk_buff *skb, struct batman_if *recv_if,
  1006. int hdr_size)
  1007. {
  1008. struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
  1009. struct orig_node *orig_node;
  1010. struct neigh_node *router;
  1011. struct batman_if *batman_if;
  1012. uint8_t dstaddr[ETH_ALEN];
  1013. struct unicast_packet *unicast_packet;
  1014. struct ethhdr *ethhdr = (struct ethhdr *)skb_mac_header(skb);
  1015. int ret;
  1016. struct sk_buff *new_skb;
  1017. unicast_packet = (struct unicast_packet *)skb->data;
  1018. /* TTL exceeded */
  1019. if (unicast_packet->ttl < 2) {
  1020. pr_debug("Warning - can't forward unicast packet from %pM to "
  1021. "%pM: ttl exceeded\n", ethhdr->h_source,
  1022. unicast_packet->dest);
  1023. return NET_RX_DROP;
  1024. }
  1025. /* get routing information */
  1026. spin_lock_bh(&bat_priv->orig_hash_lock);
  1027. rcu_read_lock();
  1028. orig_node = ((struct orig_node *)
  1029. hash_find(bat_priv->orig_hash, compare_orig, choose_orig,
  1030. unicast_packet->dest));
  1031. rcu_read_unlock();
  1032. /* find_router() increases neigh_nodes refcount if found. */
  1033. router = find_router(bat_priv, orig_node, recv_if);
  1034. if (!router) {
  1035. spin_unlock_bh(&bat_priv->orig_hash_lock);
  1036. return NET_RX_DROP;
  1037. }
  1038. /* don't lock while sending the packets ... we therefore
  1039. * copy the required data before sending */
  1040. batman_if = router->if_incoming;
  1041. memcpy(dstaddr, router->addr, ETH_ALEN);
  1042. spin_unlock_bh(&bat_priv->orig_hash_lock);
  1043. /* create a copy of the skb, if needed, to modify it. */
  1044. if (skb_cow(skb, sizeof(struct ethhdr)) < 0)
  1045. return NET_RX_DROP;
  1046. unicast_packet = (struct unicast_packet *)skb->data;
  1047. if (unicast_packet->packet_type == BAT_UNICAST &&
  1048. atomic_read(&bat_priv->fragmentation) &&
  1049. skb->len > batman_if->net_dev->mtu)
  1050. return frag_send_skb(skb, bat_priv, batman_if,
  1051. dstaddr);
  1052. if (unicast_packet->packet_type == BAT_UNICAST_FRAG &&
  1053. frag_can_reassemble(skb, batman_if->net_dev->mtu)) {
  1054. ret = frag_reassemble_skb(skb, bat_priv, &new_skb);
  1055. if (ret == NET_RX_DROP)
  1056. return NET_RX_DROP;
  1057. /* packet was buffered for late merge */
  1058. if (!new_skb)
  1059. return NET_RX_SUCCESS;
  1060. skb = new_skb;
  1061. unicast_packet = (struct unicast_packet *)skb->data;
  1062. }
  1063. /* decrement ttl */
  1064. unicast_packet->ttl--;
  1065. /* route it */
  1066. send_skb_packet(skb, batman_if, dstaddr);
  1067. return NET_RX_SUCCESS;
  1068. }
  1069. int recv_unicast_packet(struct sk_buff *skb, struct batman_if *recv_if)
  1070. {
  1071. struct unicast_packet *unicast_packet;
  1072. int hdr_size = sizeof(struct unicast_packet);
  1073. if (check_unicast_packet(skb, hdr_size) < 0)
  1074. return NET_RX_DROP;
  1075. unicast_packet = (struct unicast_packet *)skb->data;
  1076. /* packet for me */
  1077. if (is_my_mac(unicast_packet->dest)) {
  1078. interface_rx(recv_if->soft_iface, skb, recv_if, hdr_size);
  1079. return NET_RX_SUCCESS;
  1080. }
  1081. return route_unicast_packet(skb, recv_if, hdr_size);
  1082. }
  1083. int recv_ucast_frag_packet(struct sk_buff *skb, struct batman_if *recv_if)
  1084. {
  1085. struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
  1086. struct unicast_frag_packet *unicast_packet;
  1087. int hdr_size = sizeof(struct unicast_frag_packet);
  1088. struct sk_buff *new_skb = NULL;
  1089. int ret;
  1090. if (check_unicast_packet(skb, hdr_size) < 0)
  1091. return NET_RX_DROP;
  1092. unicast_packet = (struct unicast_frag_packet *)skb->data;
  1093. /* packet for me */
  1094. if (is_my_mac(unicast_packet->dest)) {
  1095. ret = frag_reassemble_skb(skb, bat_priv, &new_skb);
  1096. if (ret == NET_RX_DROP)
  1097. return NET_RX_DROP;
  1098. /* packet was buffered for late merge */
  1099. if (!new_skb)
  1100. return NET_RX_SUCCESS;
  1101. interface_rx(recv_if->soft_iface, new_skb, recv_if,
  1102. sizeof(struct unicast_packet));
  1103. return NET_RX_SUCCESS;
  1104. }
  1105. return route_unicast_packet(skb, recv_if, hdr_size);
  1106. }
  1107. int recv_bcast_packet(struct sk_buff *skb, struct batman_if *recv_if)
  1108. {
  1109. struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
  1110. struct orig_node *orig_node;
  1111. struct bcast_packet *bcast_packet;
  1112. struct ethhdr *ethhdr;
  1113. int hdr_size = sizeof(struct bcast_packet);
  1114. int32_t seq_diff;
  1115. /* drop packet if it has not necessary minimum size */
  1116. if (unlikely(!pskb_may_pull(skb, hdr_size)))
  1117. return NET_RX_DROP;
  1118. ethhdr = (struct ethhdr *)skb_mac_header(skb);
  1119. /* packet with broadcast indication but unicast recipient */
  1120. if (!is_broadcast_ether_addr(ethhdr->h_dest))
  1121. return NET_RX_DROP;
  1122. /* packet with broadcast sender address */
  1123. if (is_broadcast_ether_addr(ethhdr->h_source))
  1124. return NET_RX_DROP;
  1125. /* ignore broadcasts sent by myself */
  1126. if (is_my_mac(ethhdr->h_source))
  1127. return NET_RX_DROP;
  1128. bcast_packet = (struct bcast_packet *)skb->data;
  1129. /* ignore broadcasts originated by myself */
  1130. if (is_my_mac(bcast_packet->orig))
  1131. return NET_RX_DROP;
  1132. if (bcast_packet->ttl < 2)
  1133. return NET_RX_DROP;
  1134. spin_lock_bh(&bat_priv->orig_hash_lock);
  1135. rcu_read_lock();
  1136. orig_node = ((struct orig_node *)
  1137. hash_find(bat_priv->orig_hash, compare_orig, choose_orig,
  1138. bcast_packet->orig));
  1139. rcu_read_unlock();
  1140. if (!orig_node) {
  1141. spin_unlock_bh(&bat_priv->orig_hash_lock);
  1142. return NET_RX_DROP;
  1143. }
  1144. /* check whether the packet is a duplicate */
  1145. if (get_bit_status(orig_node->bcast_bits,
  1146. orig_node->last_bcast_seqno,
  1147. ntohl(bcast_packet->seqno))) {
  1148. spin_unlock_bh(&bat_priv->orig_hash_lock);
  1149. return NET_RX_DROP;
  1150. }
  1151. seq_diff = ntohl(bcast_packet->seqno) - orig_node->last_bcast_seqno;
  1152. /* check whether the packet is old and the host just restarted. */
  1153. if (window_protected(bat_priv, seq_diff,
  1154. &orig_node->bcast_seqno_reset)) {
  1155. spin_unlock_bh(&bat_priv->orig_hash_lock);
  1156. return NET_RX_DROP;
  1157. }
  1158. /* mark broadcast in flood history, update window position
  1159. * if required. */
  1160. if (bit_get_packet(bat_priv, orig_node->bcast_bits, seq_diff, 1))
  1161. orig_node->last_bcast_seqno = ntohl(bcast_packet->seqno);
  1162. spin_unlock_bh(&bat_priv->orig_hash_lock);
  1163. /* rebroadcast packet */
  1164. add_bcast_packet_to_list(bat_priv, skb);
  1165. /* broadcast for me */
  1166. interface_rx(recv_if->soft_iface, skb, recv_if, hdr_size);
  1167. return NET_RX_SUCCESS;
  1168. }
  1169. int recv_vis_packet(struct sk_buff *skb, struct batman_if *recv_if)
  1170. {
  1171. struct vis_packet *vis_packet;
  1172. struct ethhdr *ethhdr;
  1173. struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
  1174. int hdr_size = sizeof(struct vis_packet);
  1175. /* keep skb linear */
  1176. if (skb_linearize(skb) < 0)
  1177. return NET_RX_DROP;
  1178. if (unlikely(!pskb_may_pull(skb, hdr_size)))
  1179. return NET_RX_DROP;
  1180. vis_packet = (struct vis_packet *)skb->data;
  1181. ethhdr = (struct ethhdr *)skb_mac_header(skb);
  1182. /* not for me */
  1183. if (!is_my_mac(ethhdr->h_dest))
  1184. return NET_RX_DROP;
  1185. /* ignore own packets */
  1186. if (is_my_mac(vis_packet->vis_orig))
  1187. return NET_RX_DROP;
  1188. if (is_my_mac(vis_packet->sender_orig))
  1189. return NET_RX_DROP;
  1190. switch (vis_packet->vis_type) {
  1191. case VIS_TYPE_SERVER_SYNC:
  1192. receive_server_sync_packet(bat_priv, vis_packet,
  1193. skb_headlen(skb));
  1194. break;
  1195. case VIS_TYPE_CLIENT_UPDATE:
  1196. receive_client_update_packet(bat_priv, vis_packet,
  1197. skb_headlen(skb));
  1198. break;
  1199. default: /* ignore unknown packet */
  1200. break;
  1201. }
  1202. /* We take a copy of the data in the packet, so we should
  1203. always free the skbuf. */
  1204. return NET_RX_DROP;
  1205. }