routing.c 39 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449
  1. /*
  2. * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
  3. *
  4. * Marek Lindner, Simon Wunderlich
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of version 2 of the GNU General Public
  8. * License as published by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope that it will be useful, but
  11. * WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  13. * General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program; if not, write to the Free Software
  17. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
  18. * 02110-1301, USA
  19. *
  20. */
  21. #include "main.h"
  22. #include "routing.h"
  23. #include "send.h"
  24. #include "hash.h"
  25. #include "soft-interface.h"
  26. #include "hard-interface.h"
  27. #include "icmp_socket.h"
  28. #include "translation-table.h"
  29. #include "originator.h"
  30. #include "ring_buffer.h"
  31. #include "vis.h"
  32. #include "aggregation.h"
  33. #include "gateway_common.h"
  34. #include "gateway_client.h"
  35. #include "unicast.h"
  36. void slide_own_bcast_window(struct batman_if *batman_if)
  37. {
  38. struct bat_priv *bat_priv = netdev_priv(batman_if->soft_iface);
  39. struct hashtable_t *hash = bat_priv->orig_hash;
  40. struct hlist_node *walk;
  41. struct hlist_head *head;
  42. struct element_t *bucket;
  43. struct orig_node *orig_node;
  44. unsigned long *word;
  45. int i;
  46. size_t word_index;
  47. spin_lock_bh(&bat_priv->orig_hash_lock);
  48. for (i = 0; i < hash->size; i++) {
  49. head = &hash->table[i];
  50. rcu_read_lock();
  51. hlist_for_each_entry_rcu(bucket, walk, head, hlist) {
  52. orig_node = bucket->data;
  53. word_index = batman_if->if_num * NUM_WORDS;
  54. word = &(orig_node->bcast_own[word_index]);
  55. bit_get_packet(bat_priv, word, 1, 0);
  56. orig_node->bcast_own_sum[batman_if->if_num] =
  57. bit_packet_count(word);
  58. }
  59. rcu_read_unlock();
  60. }
  61. spin_unlock_bh(&bat_priv->orig_hash_lock);
  62. }
  63. static void update_HNA(struct bat_priv *bat_priv, struct orig_node *orig_node,
  64. unsigned char *hna_buff, int hna_buff_len)
  65. {
  66. if ((hna_buff_len != orig_node->hna_buff_len) ||
  67. ((hna_buff_len > 0) &&
  68. (orig_node->hna_buff_len > 0) &&
  69. (memcmp(orig_node->hna_buff, hna_buff, hna_buff_len) != 0))) {
  70. if (orig_node->hna_buff_len > 0)
  71. hna_global_del_orig(bat_priv, orig_node,
  72. "originator changed hna");
  73. if ((hna_buff_len > 0) && (hna_buff))
  74. hna_global_add_orig(bat_priv, orig_node,
  75. hna_buff, hna_buff_len);
  76. }
  77. }
  78. static void update_route(struct bat_priv *bat_priv,
  79. struct orig_node *orig_node,
  80. struct neigh_node *neigh_node,
  81. unsigned char *hna_buff, int hna_buff_len)
  82. {
  83. struct neigh_node *neigh_node_tmp;
  84. /* route deleted */
  85. if ((orig_node->router) && (!neigh_node)) {
  86. bat_dbg(DBG_ROUTES, bat_priv, "Deleting route towards: %pM\n",
  87. orig_node->orig);
  88. hna_global_del_orig(bat_priv, orig_node,
  89. "originator timed out");
  90. /* route added */
  91. } else if ((!orig_node->router) && (neigh_node)) {
  92. bat_dbg(DBG_ROUTES, bat_priv,
  93. "Adding route towards: %pM (via %pM)\n",
  94. orig_node->orig, neigh_node->addr);
  95. hna_global_add_orig(bat_priv, orig_node,
  96. hna_buff, hna_buff_len);
  97. /* route changed */
  98. } else {
  99. bat_dbg(DBG_ROUTES, bat_priv,
  100. "Changing route towards: %pM "
  101. "(now via %pM - was via %pM)\n",
  102. orig_node->orig, neigh_node->addr,
  103. orig_node->router->addr);
  104. }
  105. if (neigh_node)
  106. kref_get(&neigh_node->refcount);
  107. neigh_node_tmp = orig_node->router;
  108. orig_node->router = neigh_node;
  109. if (neigh_node_tmp)
  110. kref_put(&neigh_node_tmp->refcount, neigh_node_free_ref);
  111. }
  112. void update_routes(struct bat_priv *bat_priv, struct orig_node *orig_node,
  113. struct neigh_node *neigh_node, unsigned char *hna_buff,
  114. int hna_buff_len)
  115. {
  116. if (!orig_node)
  117. return;
  118. if (orig_node->router != neigh_node)
  119. update_route(bat_priv, orig_node, neigh_node,
  120. hna_buff, hna_buff_len);
  121. /* may be just HNA changed */
  122. else
  123. update_HNA(bat_priv, orig_node, hna_buff, hna_buff_len);
  124. }
  125. static int is_bidirectional_neigh(struct orig_node *orig_node,
  126. struct orig_node *orig_neigh_node,
  127. struct batman_packet *batman_packet,
  128. struct batman_if *if_incoming)
  129. {
  130. struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
  131. struct neigh_node *neigh_node = NULL, *tmp_neigh_node = NULL;
  132. struct hlist_node *node;
  133. unsigned char total_count;
  134. int ret = 0;
  135. if (orig_node == orig_neigh_node) {
  136. rcu_read_lock();
  137. hlist_for_each_entry_rcu(tmp_neigh_node, node,
  138. &orig_node->neigh_list, list) {
  139. if (compare_orig(tmp_neigh_node->addr,
  140. orig_neigh_node->orig) &&
  141. (tmp_neigh_node->if_incoming == if_incoming))
  142. neigh_node = tmp_neigh_node;
  143. }
  144. if (!neigh_node)
  145. neigh_node = create_neighbor(orig_node,
  146. orig_neigh_node,
  147. orig_neigh_node->orig,
  148. if_incoming);
  149. /* create_neighbor failed, return 0 */
  150. if (!neigh_node)
  151. goto unlock;
  152. kref_get(&neigh_node->refcount);
  153. rcu_read_unlock();
  154. neigh_node->last_valid = jiffies;
  155. } else {
  156. /* find packet count of corresponding one hop neighbor */
  157. rcu_read_lock();
  158. hlist_for_each_entry_rcu(tmp_neigh_node, node,
  159. &orig_neigh_node->neigh_list, list) {
  160. if (compare_orig(tmp_neigh_node->addr,
  161. orig_neigh_node->orig) &&
  162. (tmp_neigh_node->if_incoming == if_incoming))
  163. neigh_node = tmp_neigh_node;
  164. }
  165. if (!neigh_node)
  166. neigh_node = create_neighbor(orig_neigh_node,
  167. orig_neigh_node,
  168. orig_neigh_node->orig,
  169. if_incoming);
  170. /* create_neighbor failed, return 0 */
  171. if (!neigh_node)
  172. goto unlock;
  173. kref_get(&neigh_node->refcount);
  174. rcu_read_unlock();
  175. }
  176. orig_node->last_valid = jiffies;
  177. /* pay attention to not get a value bigger than 100 % */
  178. total_count = (orig_neigh_node->bcast_own_sum[if_incoming->if_num] >
  179. neigh_node->real_packet_count ?
  180. neigh_node->real_packet_count :
  181. orig_neigh_node->bcast_own_sum[if_incoming->if_num]);
  182. /* if we have too few packets (too less data) we set tq_own to zero */
  183. /* if we receive too few packets it is not considered bidirectional */
  184. if ((total_count < TQ_LOCAL_BIDRECT_SEND_MINIMUM) ||
  185. (neigh_node->real_packet_count < TQ_LOCAL_BIDRECT_RECV_MINIMUM))
  186. orig_neigh_node->tq_own = 0;
  187. else
  188. /* neigh_node->real_packet_count is never zero as we
  189. * only purge old information when getting new
  190. * information */
  191. orig_neigh_node->tq_own = (TQ_MAX_VALUE * total_count) /
  192. neigh_node->real_packet_count;
  193. /*
  194. * 1 - ((1-x) ** 3), normalized to TQ_MAX_VALUE this does
  195. * affect the nearly-symmetric links only a little, but
  196. * punishes asymmetric links more. This will give a value
  197. * between 0 and TQ_MAX_VALUE
  198. */
  199. orig_neigh_node->tq_asym_penalty =
  200. TQ_MAX_VALUE -
  201. (TQ_MAX_VALUE *
  202. (TQ_LOCAL_WINDOW_SIZE - neigh_node->real_packet_count) *
  203. (TQ_LOCAL_WINDOW_SIZE - neigh_node->real_packet_count) *
  204. (TQ_LOCAL_WINDOW_SIZE - neigh_node->real_packet_count)) /
  205. (TQ_LOCAL_WINDOW_SIZE *
  206. TQ_LOCAL_WINDOW_SIZE *
  207. TQ_LOCAL_WINDOW_SIZE);
  208. batman_packet->tq = ((batman_packet->tq *
  209. orig_neigh_node->tq_own *
  210. orig_neigh_node->tq_asym_penalty) /
  211. (TQ_MAX_VALUE * TQ_MAX_VALUE));
  212. bat_dbg(DBG_BATMAN, bat_priv,
  213. "bidirectional: "
  214. "orig = %-15pM neigh = %-15pM => own_bcast = %2i, "
  215. "real recv = %2i, local tq: %3i, asym_penalty: %3i, "
  216. "total tq: %3i\n",
  217. orig_node->orig, orig_neigh_node->orig, total_count,
  218. neigh_node->real_packet_count, orig_neigh_node->tq_own,
  219. orig_neigh_node->tq_asym_penalty, batman_packet->tq);
  220. /* if link has the minimum required transmission quality
  221. * consider it bidirectional */
  222. if (batman_packet->tq >= TQ_TOTAL_BIDRECT_LIMIT)
  223. ret = 1;
  224. goto out;
  225. unlock:
  226. rcu_read_unlock();
  227. out:
  228. if (neigh_node)
  229. kref_put(&neigh_node->refcount, neigh_node_free_ref);
  230. return ret;
  231. }
  232. static void update_orig(struct bat_priv *bat_priv,
  233. struct orig_node *orig_node,
  234. struct ethhdr *ethhdr,
  235. struct batman_packet *batman_packet,
  236. struct batman_if *if_incoming,
  237. unsigned char *hna_buff, int hna_buff_len,
  238. char is_duplicate)
  239. {
  240. struct neigh_node *neigh_node = NULL, *tmp_neigh_node = NULL;
  241. struct hlist_node *node;
  242. int tmp_hna_buff_len;
  243. bat_dbg(DBG_BATMAN, bat_priv, "update_originator(): "
  244. "Searching and updating originator entry of received packet\n");
  245. rcu_read_lock();
  246. hlist_for_each_entry_rcu(tmp_neigh_node, node,
  247. &orig_node->neigh_list, list) {
  248. if (compare_orig(tmp_neigh_node->addr, ethhdr->h_source) &&
  249. (tmp_neigh_node->if_incoming == if_incoming)) {
  250. neigh_node = tmp_neigh_node;
  251. continue;
  252. }
  253. if (is_duplicate)
  254. continue;
  255. ring_buffer_set(tmp_neigh_node->tq_recv,
  256. &tmp_neigh_node->tq_index, 0);
  257. tmp_neigh_node->tq_avg =
  258. ring_buffer_avg(tmp_neigh_node->tq_recv);
  259. }
  260. if (!neigh_node) {
  261. struct orig_node *orig_tmp;
  262. orig_tmp = get_orig_node(bat_priv, ethhdr->h_source);
  263. if (!orig_tmp)
  264. goto unlock;
  265. neigh_node = create_neighbor(orig_node, orig_tmp,
  266. ethhdr->h_source, if_incoming);
  267. if (!neigh_node)
  268. goto unlock;
  269. } else
  270. bat_dbg(DBG_BATMAN, bat_priv,
  271. "Updating existing last-hop neighbor of originator\n");
  272. kref_get(&neigh_node->refcount);
  273. rcu_read_unlock();
  274. orig_node->flags = batman_packet->flags;
  275. neigh_node->last_valid = jiffies;
  276. ring_buffer_set(neigh_node->tq_recv,
  277. &neigh_node->tq_index,
  278. batman_packet->tq);
  279. neigh_node->tq_avg = ring_buffer_avg(neigh_node->tq_recv);
  280. if (!is_duplicate) {
  281. orig_node->last_ttl = batman_packet->ttl;
  282. neigh_node->last_ttl = batman_packet->ttl;
  283. }
  284. tmp_hna_buff_len = (hna_buff_len > batman_packet->num_hna * ETH_ALEN ?
  285. batman_packet->num_hna * ETH_ALEN : hna_buff_len);
  286. /* if this neighbor already is our next hop there is nothing
  287. * to change */
  288. if (orig_node->router == neigh_node)
  289. goto update_hna;
  290. /* if this neighbor does not offer a better TQ we won't consider it */
  291. if ((orig_node->router) &&
  292. (orig_node->router->tq_avg > neigh_node->tq_avg))
  293. goto update_hna;
  294. /* if the TQ is the same and the link not more symetric we
  295. * won't consider it either */
  296. if ((orig_node->router) &&
  297. ((neigh_node->tq_avg == orig_node->router->tq_avg) &&
  298. (orig_node->router->orig_node->bcast_own_sum[if_incoming->if_num]
  299. >= neigh_node->orig_node->bcast_own_sum[if_incoming->if_num])))
  300. goto update_hna;
  301. update_routes(bat_priv, orig_node, neigh_node,
  302. hna_buff, tmp_hna_buff_len);
  303. goto update_gw;
  304. update_hna:
  305. update_routes(bat_priv, orig_node, orig_node->router,
  306. hna_buff, tmp_hna_buff_len);
  307. update_gw:
  308. if (orig_node->gw_flags != batman_packet->gw_flags)
  309. gw_node_update(bat_priv, orig_node, batman_packet->gw_flags);
  310. orig_node->gw_flags = batman_packet->gw_flags;
  311. /* restart gateway selection if fast or late switching was enabled */
  312. if ((orig_node->gw_flags) &&
  313. (atomic_read(&bat_priv->gw_mode) == GW_MODE_CLIENT) &&
  314. (atomic_read(&bat_priv->gw_sel_class) > 2))
  315. gw_check_election(bat_priv, orig_node);
  316. goto out;
  317. unlock:
  318. rcu_read_unlock();
  319. out:
  320. if (neigh_node)
  321. kref_put(&neigh_node->refcount, neigh_node_free_ref);
  322. }
  323. /* checks whether the host restarted and is in the protection time.
  324. * returns:
  325. * 0 if the packet is to be accepted
  326. * 1 if the packet is to be ignored.
  327. */
  328. static int window_protected(struct bat_priv *bat_priv,
  329. int32_t seq_num_diff,
  330. unsigned long *last_reset)
  331. {
  332. if ((seq_num_diff <= -TQ_LOCAL_WINDOW_SIZE)
  333. || (seq_num_diff >= EXPECTED_SEQNO_RANGE)) {
  334. if (time_after(jiffies, *last_reset +
  335. msecs_to_jiffies(RESET_PROTECTION_MS))) {
  336. *last_reset = jiffies;
  337. bat_dbg(DBG_BATMAN, bat_priv,
  338. "old packet received, start protection\n");
  339. return 0;
  340. } else
  341. return 1;
  342. }
  343. return 0;
  344. }
  345. /* processes a batman packet for all interfaces, adjusts the sequence number and
  346. * finds out whether it is a duplicate.
  347. * returns:
  348. * 1 the packet is a duplicate
  349. * 0 the packet has not yet been received
  350. * -1 the packet is old and has been received while the seqno window
  351. * was protected. Caller should drop it.
  352. */
  353. static char count_real_packets(struct ethhdr *ethhdr,
  354. struct batman_packet *batman_packet,
  355. struct batman_if *if_incoming)
  356. {
  357. struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
  358. struct orig_node *orig_node;
  359. struct neigh_node *tmp_neigh_node;
  360. struct hlist_node *node;
  361. char is_duplicate = 0;
  362. int32_t seq_diff;
  363. int need_update = 0;
  364. int set_mark;
  365. orig_node = get_orig_node(bat_priv, batman_packet->orig);
  366. if (!orig_node)
  367. return 0;
  368. seq_diff = batman_packet->seqno - orig_node->last_real_seqno;
  369. /* signalize caller that the packet is to be dropped. */
  370. if (window_protected(bat_priv, seq_diff,
  371. &orig_node->batman_seqno_reset))
  372. return -1;
  373. rcu_read_lock();
  374. hlist_for_each_entry_rcu(tmp_neigh_node, node,
  375. &orig_node->neigh_list, list) {
  376. is_duplicate |= get_bit_status(tmp_neigh_node->real_bits,
  377. orig_node->last_real_seqno,
  378. batman_packet->seqno);
  379. if (compare_orig(tmp_neigh_node->addr, ethhdr->h_source) &&
  380. (tmp_neigh_node->if_incoming == if_incoming))
  381. set_mark = 1;
  382. else
  383. set_mark = 0;
  384. /* if the window moved, set the update flag. */
  385. need_update |= bit_get_packet(bat_priv,
  386. tmp_neigh_node->real_bits,
  387. seq_diff, set_mark);
  388. tmp_neigh_node->real_packet_count =
  389. bit_packet_count(tmp_neigh_node->real_bits);
  390. }
  391. rcu_read_unlock();
  392. if (need_update) {
  393. bat_dbg(DBG_BATMAN, bat_priv,
  394. "updating last_seqno: old %d, new %d\n",
  395. orig_node->last_real_seqno, batman_packet->seqno);
  396. orig_node->last_real_seqno = batman_packet->seqno;
  397. }
  398. return is_duplicate;
  399. }
  400. /* copy primary address for bonding */
  401. static void mark_bonding_address(struct orig_node *orig_node,
  402. struct orig_node *orig_neigh_node,
  403. struct batman_packet *batman_packet)
  404. {
  405. if (batman_packet->flags & PRIMARIES_FIRST_HOP)
  406. memcpy(orig_neigh_node->primary_addr,
  407. orig_node->orig, ETH_ALEN);
  408. return;
  409. }
  410. /* mark possible bond.candidates in the neighbor list */
  411. void update_bonding_candidates(struct orig_node *orig_node)
  412. {
  413. int candidates;
  414. int interference_candidate;
  415. int best_tq;
  416. struct hlist_node *node, *node2;
  417. struct neigh_node *tmp_neigh_node, *tmp_neigh_node2;
  418. struct neigh_node *first_candidate, *last_candidate;
  419. /* update the candidates for this originator */
  420. if (!orig_node->router) {
  421. orig_node->bond.candidates = 0;
  422. return;
  423. }
  424. best_tq = orig_node->router->tq_avg;
  425. /* update bond.candidates */
  426. candidates = 0;
  427. /* mark other nodes which also received "PRIMARIES FIRST HOP" packets
  428. * as "bonding partner" */
  429. /* first, zero the list */
  430. rcu_read_lock();
  431. hlist_for_each_entry_rcu(tmp_neigh_node, node,
  432. &orig_node->neigh_list, list) {
  433. tmp_neigh_node->next_bond_candidate = NULL;
  434. }
  435. rcu_read_unlock();
  436. first_candidate = NULL;
  437. last_candidate = NULL;
  438. rcu_read_lock();
  439. hlist_for_each_entry_rcu(tmp_neigh_node, node,
  440. &orig_node->neigh_list, list) {
  441. /* only consider if it has the same primary address ... */
  442. if (memcmp(orig_node->orig,
  443. tmp_neigh_node->orig_node->primary_addr,
  444. ETH_ALEN) != 0)
  445. continue;
  446. /* ... and is good enough to be considered */
  447. if (tmp_neigh_node->tq_avg < best_tq - BONDING_TQ_THRESHOLD)
  448. continue;
  449. /* check if we have another candidate with the same
  450. * mac address or interface. If we do, we won't
  451. * select this candidate because of possible interference. */
  452. interference_candidate = 0;
  453. hlist_for_each_entry_rcu(tmp_neigh_node2, node2,
  454. &orig_node->neigh_list, list) {
  455. if (tmp_neigh_node2 == tmp_neigh_node)
  456. continue;
  457. /* we only care if the other candidate is even
  458. * considered as candidate. */
  459. if (!tmp_neigh_node2->next_bond_candidate)
  460. continue;
  461. if ((tmp_neigh_node->if_incoming ==
  462. tmp_neigh_node2->if_incoming)
  463. || (memcmp(tmp_neigh_node->addr,
  464. tmp_neigh_node2->addr, ETH_ALEN) == 0)) {
  465. interference_candidate = 1;
  466. break;
  467. }
  468. }
  469. /* don't care further if it is an interference candidate */
  470. if (interference_candidate)
  471. continue;
  472. if (!first_candidate) {
  473. first_candidate = tmp_neigh_node;
  474. tmp_neigh_node->next_bond_candidate = first_candidate;
  475. } else
  476. tmp_neigh_node->next_bond_candidate = last_candidate;
  477. last_candidate = tmp_neigh_node;
  478. candidates++;
  479. }
  480. rcu_read_unlock();
  481. if (candidates > 0) {
  482. first_candidate->next_bond_candidate = last_candidate;
  483. orig_node->bond.selected = first_candidate;
  484. }
  485. orig_node->bond.candidates = candidates;
  486. }
  487. void receive_bat_packet(struct ethhdr *ethhdr,
  488. struct batman_packet *batman_packet,
  489. unsigned char *hna_buff, int hna_buff_len,
  490. struct batman_if *if_incoming)
  491. {
  492. struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
  493. struct batman_if *batman_if;
  494. struct orig_node *orig_neigh_node, *orig_node;
  495. char has_directlink_flag;
  496. char is_my_addr = 0, is_my_orig = 0, is_my_oldorig = 0;
  497. char is_broadcast = 0, is_bidirectional, is_single_hop_neigh;
  498. char is_duplicate;
  499. uint32_t if_incoming_seqno;
  500. /* Silently drop when the batman packet is actually not a
  501. * correct packet.
  502. *
  503. * This might happen if a packet is padded (e.g. Ethernet has a
  504. * minimum frame length of 64 byte) and the aggregation interprets
  505. * it as an additional length.
  506. *
  507. * TODO: A more sane solution would be to have a bit in the
  508. * batman_packet to detect whether the packet is the last
  509. * packet in an aggregation. Here we expect that the padding
  510. * is always zero (or not 0x01)
  511. */
  512. if (batman_packet->packet_type != BAT_PACKET)
  513. return;
  514. /* could be changed by schedule_own_packet() */
  515. if_incoming_seqno = atomic_read(&if_incoming->seqno);
  516. has_directlink_flag = (batman_packet->flags & DIRECTLINK ? 1 : 0);
  517. is_single_hop_neigh = (compare_orig(ethhdr->h_source,
  518. batman_packet->orig) ? 1 : 0);
  519. bat_dbg(DBG_BATMAN, bat_priv,
  520. "Received BATMAN packet via NB: %pM, IF: %s [%pM] "
  521. "(from OG: %pM, via prev OG: %pM, seqno %d, tq %d, "
  522. "TTL %d, V %d, IDF %d)\n",
  523. ethhdr->h_source, if_incoming->net_dev->name,
  524. if_incoming->net_dev->dev_addr, batman_packet->orig,
  525. batman_packet->prev_sender, batman_packet->seqno,
  526. batman_packet->tq, batman_packet->ttl, batman_packet->version,
  527. has_directlink_flag);
  528. rcu_read_lock();
  529. list_for_each_entry_rcu(batman_if, &if_list, list) {
  530. if (batman_if->if_status != IF_ACTIVE)
  531. continue;
  532. if (batman_if->soft_iface != if_incoming->soft_iface)
  533. continue;
  534. if (compare_orig(ethhdr->h_source,
  535. batman_if->net_dev->dev_addr))
  536. is_my_addr = 1;
  537. if (compare_orig(batman_packet->orig,
  538. batman_if->net_dev->dev_addr))
  539. is_my_orig = 1;
  540. if (compare_orig(batman_packet->prev_sender,
  541. batman_if->net_dev->dev_addr))
  542. is_my_oldorig = 1;
  543. if (compare_orig(ethhdr->h_source, broadcast_addr))
  544. is_broadcast = 1;
  545. }
  546. rcu_read_unlock();
  547. if (batman_packet->version != COMPAT_VERSION) {
  548. bat_dbg(DBG_BATMAN, bat_priv,
  549. "Drop packet: incompatible batman version (%i)\n",
  550. batman_packet->version);
  551. return;
  552. }
  553. if (is_my_addr) {
  554. bat_dbg(DBG_BATMAN, bat_priv,
  555. "Drop packet: received my own broadcast (sender: %pM"
  556. ")\n",
  557. ethhdr->h_source);
  558. return;
  559. }
  560. if (is_broadcast) {
  561. bat_dbg(DBG_BATMAN, bat_priv, "Drop packet: "
  562. "ignoring all packets with broadcast source addr (sender: %pM"
  563. ")\n", ethhdr->h_source);
  564. return;
  565. }
  566. if (is_my_orig) {
  567. unsigned long *word;
  568. int offset;
  569. orig_neigh_node = get_orig_node(bat_priv, ethhdr->h_source);
  570. if (!orig_neigh_node)
  571. return;
  572. /* neighbor has to indicate direct link and it has to
  573. * come via the corresponding interface */
  574. /* if received seqno equals last send seqno save new
  575. * seqno for bidirectional check */
  576. if (has_directlink_flag &&
  577. compare_orig(if_incoming->net_dev->dev_addr,
  578. batman_packet->orig) &&
  579. (batman_packet->seqno - if_incoming_seqno + 2 == 0)) {
  580. offset = if_incoming->if_num * NUM_WORDS;
  581. word = &(orig_neigh_node->bcast_own[offset]);
  582. bit_mark(word, 0);
  583. orig_neigh_node->bcast_own_sum[if_incoming->if_num] =
  584. bit_packet_count(word);
  585. }
  586. bat_dbg(DBG_BATMAN, bat_priv, "Drop packet: "
  587. "originator packet from myself (via neighbor)\n");
  588. return;
  589. }
  590. if (is_my_oldorig) {
  591. bat_dbg(DBG_BATMAN, bat_priv,
  592. "Drop packet: ignoring all rebroadcast echos (sender: "
  593. "%pM)\n", ethhdr->h_source);
  594. return;
  595. }
  596. orig_node = get_orig_node(bat_priv, batman_packet->orig);
  597. if (!orig_node)
  598. return;
  599. is_duplicate = count_real_packets(ethhdr, batman_packet, if_incoming);
  600. if (is_duplicate == -1) {
  601. bat_dbg(DBG_BATMAN, bat_priv,
  602. "Drop packet: packet within seqno protection time "
  603. "(sender: %pM)\n", ethhdr->h_source);
  604. return;
  605. }
  606. if (batman_packet->tq == 0) {
  607. bat_dbg(DBG_BATMAN, bat_priv,
  608. "Drop packet: originator packet with tq equal 0\n");
  609. return;
  610. }
  611. /* avoid temporary routing loops */
  612. if ((orig_node->router) &&
  613. (orig_node->router->orig_node->router) &&
  614. (compare_orig(orig_node->router->addr,
  615. batman_packet->prev_sender)) &&
  616. !(compare_orig(batman_packet->orig, batman_packet->prev_sender)) &&
  617. (compare_orig(orig_node->router->addr,
  618. orig_node->router->orig_node->router->addr))) {
  619. bat_dbg(DBG_BATMAN, bat_priv,
  620. "Drop packet: ignoring all rebroadcast packets that "
  621. "may make me loop (sender: %pM)\n", ethhdr->h_source);
  622. return;
  623. }
  624. /* if sender is a direct neighbor the sender mac equals
  625. * originator mac */
  626. orig_neigh_node = (is_single_hop_neigh ?
  627. orig_node :
  628. get_orig_node(bat_priv, ethhdr->h_source));
  629. if (!orig_neigh_node)
  630. return;
  631. /* drop packet if sender is not a direct neighbor and if we
  632. * don't route towards it */
  633. if (!is_single_hop_neigh && (!orig_neigh_node->router)) {
  634. bat_dbg(DBG_BATMAN, bat_priv,
  635. "Drop packet: OGM via unknown neighbor!\n");
  636. return;
  637. }
  638. is_bidirectional = is_bidirectional_neigh(orig_node, orig_neigh_node,
  639. batman_packet, if_incoming);
  640. /* update ranking if it is not a duplicate or has the same
  641. * seqno and similar ttl as the non-duplicate */
  642. if (is_bidirectional &&
  643. (!is_duplicate ||
  644. ((orig_node->last_real_seqno == batman_packet->seqno) &&
  645. (orig_node->last_ttl - 3 <= batman_packet->ttl))))
  646. update_orig(bat_priv, orig_node, ethhdr, batman_packet,
  647. if_incoming, hna_buff, hna_buff_len, is_duplicate);
  648. mark_bonding_address(orig_node, orig_neigh_node, batman_packet);
  649. update_bonding_candidates(orig_node);
  650. /* is single hop (direct) neighbor */
  651. if (is_single_hop_neigh) {
  652. /* mark direct link on incoming interface */
  653. schedule_forward_packet(orig_node, ethhdr, batman_packet,
  654. 1, hna_buff_len, if_incoming);
  655. bat_dbg(DBG_BATMAN, bat_priv, "Forwarding packet: "
  656. "rebroadcast neighbor packet with direct link flag\n");
  657. return;
  658. }
  659. /* multihop originator */
  660. if (!is_bidirectional) {
  661. bat_dbg(DBG_BATMAN, bat_priv,
  662. "Drop packet: not received via bidirectional link\n");
  663. return;
  664. }
  665. if (is_duplicate) {
  666. bat_dbg(DBG_BATMAN, bat_priv,
  667. "Drop packet: duplicate packet received\n");
  668. return;
  669. }
  670. bat_dbg(DBG_BATMAN, bat_priv,
  671. "Forwarding packet: rebroadcast originator packet\n");
  672. schedule_forward_packet(orig_node, ethhdr, batman_packet,
  673. 0, hna_buff_len, if_incoming);
  674. }
  675. int recv_bat_packet(struct sk_buff *skb, struct batman_if *batman_if)
  676. {
  677. struct bat_priv *bat_priv = netdev_priv(batman_if->soft_iface);
  678. struct ethhdr *ethhdr;
  679. /* drop packet if it has not necessary minimum size */
  680. if (unlikely(!pskb_may_pull(skb, sizeof(struct batman_packet))))
  681. return NET_RX_DROP;
  682. ethhdr = (struct ethhdr *)skb_mac_header(skb);
  683. /* packet with broadcast indication but unicast recipient */
  684. if (!is_broadcast_ether_addr(ethhdr->h_dest))
  685. return NET_RX_DROP;
  686. /* packet with broadcast sender address */
  687. if (is_broadcast_ether_addr(ethhdr->h_source))
  688. return NET_RX_DROP;
  689. /* create a copy of the skb, if needed, to modify it. */
  690. if (skb_cow(skb, 0) < 0)
  691. return NET_RX_DROP;
  692. /* keep skb linear */
  693. if (skb_linearize(skb) < 0)
  694. return NET_RX_DROP;
  695. ethhdr = (struct ethhdr *)skb_mac_header(skb);
  696. spin_lock_bh(&bat_priv->orig_hash_lock);
  697. receive_aggr_bat_packet(ethhdr,
  698. skb->data,
  699. skb_headlen(skb),
  700. batman_if);
  701. spin_unlock_bh(&bat_priv->orig_hash_lock);
  702. kfree_skb(skb);
  703. return NET_RX_SUCCESS;
  704. }
  705. static int recv_my_icmp_packet(struct bat_priv *bat_priv,
  706. struct sk_buff *skb, size_t icmp_len)
  707. {
  708. struct orig_node *orig_node;
  709. struct icmp_packet_rr *icmp_packet;
  710. struct batman_if *batman_if;
  711. int ret;
  712. uint8_t dstaddr[ETH_ALEN];
  713. icmp_packet = (struct icmp_packet_rr *)skb->data;
  714. /* add data to device queue */
  715. if (icmp_packet->msg_type != ECHO_REQUEST) {
  716. bat_socket_receive_packet(icmp_packet, icmp_len);
  717. return NET_RX_DROP;
  718. }
  719. if (!bat_priv->primary_if)
  720. return NET_RX_DROP;
  721. /* answer echo request (ping) */
  722. /* get routing information */
  723. spin_lock_bh(&bat_priv->orig_hash_lock);
  724. rcu_read_lock();
  725. orig_node = ((struct orig_node *)hash_find(bat_priv->orig_hash,
  726. compare_orig, choose_orig,
  727. icmp_packet->orig));
  728. rcu_read_unlock();
  729. ret = NET_RX_DROP;
  730. if ((orig_node) && (orig_node->router)) {
  731. /* don't lock while sending the packets ... we therefore
  732. * copy the required data before sending */
  733. batman_if = orig_node->router->if_incoming;
  734. memcpy(dstaddr, orig_node->router->addr, ETH_ALEN);
  735. spin_unlock_bh(&bat_priv->orig_hash_lock);
  736. /* create a copy of the skb, if needed, to modify it. */
  737. if (skb_cow(skb, sizeof(struct ethhdr)) < 0)
  738. return NET_RX_DROP;
  739. icmp_packet = (struct icmp_packet_rr *)skb->data;
  740. memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN);
  741. memcpy(icmp_packet->orig,
  742. bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN);
  743. icmp_packet->msg_type = ECHO_REPLY;
  744. icmp_packet->ttl = TTL;
  745. send_skb_packet(skb, batman_if, dstaddr);
  746. ret = NET_RX_SUCCESS;
  747. } else
  748. spin_unlock_bh(&bat_priv->orig_hash_lock);
  749. return ret;
  750. }
  751. static int recv_icmp_ttl_exceeded(struct bat_priv *bat_priv,
  752. struct sk_buff *skb)
  753. {
  754. struct orig_node *orig_node;
  755. struct icmp_packet *icmp_packet;
  756. struct batman_if *batman_if;
  757. int ret;
  758. uint8_t dstaddr[ETH_ALEN];
  759. icmp_packet = (struct icmp_packet *)skb->data;
  760. /* send TTL exceeded if packet is an echo request (traceroute) */
  761. if (icmp_packet->msg_type != ECHO_REQUEST) {
  762. pr_debug("Warning - can't forward icmp packet from %pM to "
  763. "%pM: ttl exceeded\n", icmp_packet->orig,
  764. icmp_packet->dst);
  765. return NET_RX_DROP;
  766. }
  767. if (!bat_priv->primary_if)
  768. return NET_RX_DROP;
  769. /* get routing information */
  770. spin_lock_bh(&bat_priv->orig_hash_lock);
  771. rcu_read_lock();
  772. orig_node = ((struct orig_node *)
  773. hash_find(bat_priv->orig_hash, compare_orig, choose_orig,
  774. icmp_packet->orig));
  775. rcu_read_unlock();
  776. ret = NET_RX_DROP;
  777. if ((orig_node) && (orig_node->router)) {
  778. /* don't lock while sending the packets ... we therefore
  779. * copy the required data before sending */
  780. batman_if = orig_node->router->if_incoming;
  781. memcpy(dstaddr, orig_node->router->addr, ETH_ALEN);
  782. spin_unlock_bh(&bat_priv->orig_hash_lock);
  783. /* create a copy of the skb, if needed, to modify it. */
  784. if (skb_cow(skb, sizeof(struct ethhdr)) < 0)
  785. return NET_RX_DROP;
  786. icmp_packet = (struct icmp_packet *) skb->data;
  787. memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN);
  788. memcpy(icmp_packet->orig,
  789. bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN);
  790. icmp_packet->msg_type = TTL_EXCEEDED;
  791. icmp_packet->ttl = TTL;
  792. send_skb_packet(skb, batman_if, dstaddr);
  793. ret = NET_RX_SUCCESS;
  794. } else
  795. spin_unlock_bh(&bat_priv->orig_hash_lock);
  796. return ret;
  797. }
  798. int recv_icmp_packet(struct sk_buff *skb, struct batman_if *recv_if)
  799. {
  800. struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
  801. struct icmp_packet_rr *icmp_packet;
  802. struct ethhdr *ethhdr;
  803. struct orig_node *orig_node;
  804. struct batman_if *batman_if;
  805. int hdr_size = sizeof(struct icmp_packet);
  806. int ret;
  807. uint8_t dstaddr[ETH_ALEN];
  808. /**
  809. * we truncate all incoming icmp packets if they don't match our size
  810. */
  811. if (skb->len >= sizeof(struct icmp_packet_rr))
  812. hdr_size = sizeof(struct icmp_packet_rr);
  813. /* drop packet if it has not necessary minimum size */
  814. if (unlikely(!pskb_may_pull(skb, hdr_size)))
  815. return NET_RX_DROP;
  816. ethhdr = (struct ethhdr *)skb_mac_header(skb);
  817. /* packet with unicast indication but broadcast recipient */
  818. if (is_broadcast_ether_addr(ethhdr->h_dest))
  819. return NET_RX_DROP;
  820. /* packet with broadcast sender address */
  821. if (is_broadcast_ether_addr(ethhdr->h_source))
  822. return NET_RX_DROP;
  823. /* not for me */
  824. if (!is_my_mac(ethhdr->h_dest))
  825. return NET_RX_DROP;
  826. icmp_packet = (struct icmp_packet_rr *)skb->data;
  827. /* add record route information if not full */
  828. if ((hdr_size == sizeof(struct icmp_packet_rr)) &&
  829. (icmp_packet->rr_cur < BAT_RR_LEN)) {
  830. memcpy(&(icmp_packet->rr[icmp_packet->rr_cur]),
  831. ethhdr->h_dest, ETH_ALEN);
  832. icmp_packet->rr_cur++;
  833. }
  834. /* packet for me */
  835. if (is_my_mac(icmp_packet->dst))
  836. return recv_my_icmp_packet(bat_priv, skb, hdr_size);
  837. /* TTL exceeded */
  838. if (icmp_packet->ttl < 2)
  839. return recv_icmp_ttl_exceeded(bat_priv, skb);
  840. ret = NET_RX_DROP;
  841. /* get routing information */
  842. spin_lock_bh(&bat_priv->orig_hash_lock);
  843. rcu_read_lock();
  844. orig_node = ((struct orig_node *)
  845. hash_find(bat_priv->orig_hash, compare_orig, choose_orig,
  846. icmp_packet->dst));
  847. rcu_read_unlock();
  848. if ((orig_node) && (orig_node->router)) {
  849. /* don't lock while sending the packets ... we therefore
  850. * copy the required data before sending */
  851. batman_if = orig_node->router->if_incoming;
  852. memcpy(dstaddr, orig_node->router->addr, ETH_ALEN);
  853. spin_unlock_bh(&bat_priv->orig_hash_lock);
  854. /* create a copy of the skb, if needed, to modify it. */
  855. if (skb_cow(skb, sizeof(struct ethhdr)) < 0)
  856. return NET_RX_DROP;
  857. icmp_packet = (struct icmp_packet_rr *)skb->data;
  858. /* decrement ttl */
  859. icmp_packet->ttl--;
  860. /* route it */
  861. send_skb_packet(skb, batman_if, dstaddr);
  862. ret = NET_RX_SUCCESS;
  863. } else
  864. spin_unlock_bh(&bat_priv->orig_hash_lock);
  865. return ret;
  866. }
  867. /* find a suitable router for this originator, and use
  868. * bonding if possible. */
  869. struct neigh_node *find_router(struct bat_priv *bat_priv,
  870. struct orig_node *orig_node,
  871. struct batman_if *recv_if)
  872. {
  873. struct orig_node *primary_orig_node;
  874. struct orig_node *router_orig;
  875. struct neigh_node *router, *first_candidate, *best_router;
  876. static uint8_t zero_mac[ETH_ALEN] = {0, 0, 0, 0, 0, 0};
  877. int bonding_enabled;
  878. if (!orig_node)
  879. return NULL;
  880. if (!orig_node->router)
  881. return NULL;
  882. /* without bonding, the first node should
  883. * always choose the default router. */
  884. bonding_enabled = atomic_read(&bat_priv->bonding);
  885. if ((!recv_if) && (!bonding_enabled))
  886. return orig_node->router;
  887. router_orig = orig_node->router->orig_node;
  888. /* if we have something in the primary_addr, we can search
  889. * for a potential bonding candidate. */
  890. if (memcmp(router_orig->primary_addr, zero_mac, ETH_ALEN) == 0)
  891. return orig_node->router;
  892. /* find the orig_node which has the primary interface. might
  893. * even be the same as our router_orig in many cases */
  894. if (memcmp(router_orig->primary_addr,
  895. router_orig->orig, ETH_ALEN) == 0) {
  896. primary_orig_node = router_orig;
  897. } else {
  898. rcu_read_lock();
  899. primary_orig_node = hash_find(bat_priv->orig_hash, compare_orig,
  900. choose_orig,
  901. router_orig->primary_addr);
  902. rcu_read_unlock();
  903. if (!primary_orig_node)
  904. return orig_node->router;
  905. }
  906. /* with less than 2 candidates, we can't do any
  907. * bonding and prefer the original router. */
  908. if (primary_orig_node->bond.candidates < 2)
  909. return orig_node->router;
  910. /* all nodes between should choose a candidate which
  911. * is is not on the interface where the packet came
  912. * in. */
  913. first_candidate = primary_orig_node->bond.selected;
  914. router = first_candidate;
  915. if (bonding_enabled) {
  916. /* in the bonding case, send the packets in a round
  917. * robin fashion over the remaining interfaces. */
  918. do {
  919. /* recv_if == NULL on the first node. */
  920. if (router->if_incoming != recv_if)
  921. break;
  922. router = router->next_bond_candidate;
  923. } while (router != first_candidate);
  924. primary_orig_node->bond.selected = router->next_bond_candidate;
  925. } else {
  926. /* if bonding is disabled, use the best of the
  927. * remaining candidates which are not using
  928. * this interface. */
  929. best_router = first_candidate;
  930. do {
  931. /* recv_if == NULL on the first node. */
  932. if ((router->if_incoming != recv_if) &&
  933. (router->tq_avg > best_router->tq_avg))
  934. best_router = router;
  935. router = router->next_bond_candidate;
  936. } while (router != first_candidate);
  937. router = best_router;
  938. }
  939. return router;
  940. }
  941. static int check_unicast_packet(struct sk_buff *skb, int hdr_size)
  942. {
  943. struct ethhdr *ethhdr;
  944. /* drop packet if it has not necessary minimum size */
  945. if (unlikely(!pskb_may_pull(skb, hdr_size)))
  946. return -1;
  947. ethhdr = (struct ethhdr *)skb_mac_header(skb);
  948. /* packet with unicast indication but broadcast recipient */
  949. if (is_broadcast_ether_addr(ethhdr->h_dest))
  950. return -1;
  951. /* packet with broadcast sender address */
  952. if (is_broadcast_ether_addr(ethhdr->h_source))
  953. return -1;
  954. /* not for me */
  955. if (!is_my_mac(ethhdr->h_dest))
  956. return -1;
  957. return 0;
  958. }
  959. int route_unicast_packet(struct sk_buff *skb, struct batman_if *recv_if,
  960. int hdr_size)
  961. {
  962. struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
  963. struct orig_node *orig_node;
  964. struct neigh_node *router;
  965. struct batman_if *batman_if;
  966. uint8_t dstaddr[ETH_ALEN];
  967. struct unicast_packet *unicast_packet;
  968. struct ethhdr *ethhdr = (struct ethhdr *)skb_mac_header(skb);
  969. int ret;
  970. struct sk_buff *new_skb;
  971. unicast_packet = (struct unicast_packet *)skb->data;
  972. /* TTL exceeded */
  973. if (unicast_packet->ttl < 2) {
  974. pr_debug("Warning - can't forward unicast packet from %pM to "
  975. "%pM: ttl exceeded\n", ethhdr->h_source,
  976. unicast_packet->dest);
  977. return NET_RX_DROP;
  978. }
  979. /* get routing information */
  980. spin_lock_bh(&bat_priv->orig_hash_lock);
  981. rcu_read_lock();
  982. orig_node = ((struct orig_node *)
  983. hash_find(bat_priv->orig_hash, compare_orig, choose_orig,
  984. unicast_packet->dest));
  985. rcu_read_unlock();
  986. router = find_router(bat_priv, orig_node, recv_if);
  987. if (!router) {
  988. spin_unlock_bh(&bat_priv->orig_hash_lock);
  989. return NET_RX_DROP;
  990. }
  991. /* don't lock while sending the packets ... we therefore
  992. * copy the required data before sending */
  993. batman_if = router->if_incoming;
  994. memcpy(dstaddr, router->addr, ETH_ALEN);
  995. spin_unlock_bh(&bat_priv->orig_hash_lock);
  996. /* create a copy of the skb, if needed, to modify it. */
  997. if (skb_cow(skb, sizeof(struct ethhdr)) < 0)
  998. return NET_RX_DROP;
  999. unicast_packet = (struct unicast_packet *)skb->data;
  1000. if (unicast_packet->packet_type == BAT_UNICAST &&
  1001. atomic_read(&bat_priv->fragmentation) &&
  1002. skb->len > batman_if->net_dev->mtu)
  1003. return frag_send_skb(skb, bat_priv, batman_if,
  1004. dstaddr);
  1005. if (unicast_packet->packet_type == BAT_UNICAST_FRAG &&
  1006. frag_can_reassemble(skb, batman_if->net_dev->mtu)) {
  1007. ret = frag_reassemble_skb(skb, bat_priv, &new_skb);
  1008. if (ret == NET_RX_DROP)
  1009. return NET_RX_DROP;
  1010. /* packet was buffered for late merge */
  1011. if (!new_skb)
  1012. return NET_RX_SUCCESS;
  1013. skb = new_skb;
  1014. unicast_packet = (struct unicast_packet *)skb->data;
  1015. }
  1016. /* decrement ttl */
  1017. unicast_packet->ttl--;
  1018. /* route it */
  1019. send_skb_packet(skb, batman_if, dstaddr);
  1020. return NET_RX_SUCCESS;
  1021. }
  1022. int recv_unicast_packet(struct sk_buff *skb, struct batman_if *recv_if)
  1023. {
  1024. struct unicast_packet *unicast_packet;
  1025. int hdr_size = sizeof(struct unicast_packet);
  1026. if (check_unicast_packet(skb, hdr_size) < 0)
  1027. return NET_RX_DROP;
  1028. unicast_packet = (struct unicast_packet *)skb->data;
  1029. /* packet for me */
  1030. if (is_my_mac(unicast_packet->dest)) {
  1031. interface_rx(recv_if->soft_iface, skb, recv_if, hdr_size);
  1032. return NET_RX_SUCCESS;
  1033. }
  1034. return route_unicast_packet(skb, recv_if, hdr_size);
  1035. }
  1036. int recv_ucast_frag_packet(struct sk_buff *skb, struct batman_if *recv_if)
  1037. {
  1038. struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
  1039. struct unicast_frag_packet *unicast_packet;
  1040. int hdr_size = sizeof(struct unicast_frag_packet);
  1041. struct sk_buff *new_skb = NULL;
  1042. int ret;
  1043. if (check_unicast_packet(skb, hdr_size) < 0)
  1044. return NET_RX_DROP;
  1045. unicast_packet = (struct unicast_frag_packet *)skb->data;
  1046. /* packet for me */
  1047. if (is_my_mac(unicast_packet->dest)) {
  1048. ret = frag_reassemble_skb(skb, bat_priv, &new_skb);
  1049. if (ret == NET_RX_DROP)
  1050. return NET_RX_DROP;
  1051. /* packet was buffered for late merge */
  1052. if (!new_skb)
  1053. return NET_RX_SUCCESS;
  1054. interface_rx(recv_if->soft_iface, new_skb, recv_if,
  1055. sizeof(struct unicast_packet));
  1056. return NET_RX_SUCCESS;
  1057. }
  1058. return route_unicast_packet(skb, recv_if, hdr_size);
  1059. }
  1060. int recv_bcast_packet(struct sk_buff *skb, struct batman_if *recv_if)
  1061. {
  1062. struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
  1063. struct orig_node *orig_node;
  1064. struct bcast_packet *bcast_packet;
  1065. struct ethhdr *ethhdr;
  1066. int hdr_size = sizeof(struct bcast_packet);
  1067. int32_t seq_diff;
  1068. /* drop packet if it has not necessary minimum size */
  1069. if (unlikely(!pskb_may_pull(skb, hdr_size)))
  1070. return NET_RX_DROP;
  1071. ethhdr = (struct ethhdr *)skb_mac_header(skb);
  1072. /* packet with broadcast indication but unicast recipient */
  1073. if (!is_broadcast_ether_addr(ethhdr->h_dest))
  1074. return NET_RX_DROP;
  1075. /* packet with broadcast sender address */
  1076. if (is_broadcast_ether_addr(ethhdr->h_source))
  1077. return NET_RX_DROP;
  1078. /* ignore broadcasts sent by myself */
  1079. if (is_my_mac(ethhdr->h_source))
  1080. return NET_RX_DROP;
  1081. bcast_packet = (struct bcast_packet *)skb->data;
  1082. /* ignore broadcasts originated by myself */
  1083. if (is_my_mac(bcast_packet->orig))
  1084. return NET_RX_DROP;
  1085. if (bcast_packet->ttl < 2)
  1086. return NET_RX_DROP;
  1087. spin_lock_bh(&bat_priv->orig_hash_lock);
  1088. rcu_read_lock();
  1089. orig_node = ((struct orig_node *)
  1090. hash_find(bat_priv->orig_hash, compare_orig, choose_orig,
  1091. bcast_packet->orig));
  1092. rcu_read_unlock();
  1093. if (!orig_node) {
  1094. spin_unlock_bh(&bat_priv->orig_hash_lock);
  1095. return NET_RX_DROP;
  1096. }
  1097. /* check whether the packet is a duplicate */
  1098. if (get_bit_status(orig_node->bcast_bits,
  1099. orig_node->last_bcast_seqno,
  1100. ntohl(bcast_packet->seqno))) {
  1101. spin_unlock_bh(&bat_priv->orig_hash_lock);
  1102. return NET_RX_DROP;
  1103. }
  1104. seq_diff = ntohl(bcast_packet->seqno) - orig_node->last_bcast_seqno;
  1105. /* check whether the packet is old and the host just restarted. */
  1106. if (window_protected(bat_priv, seq_diff,
  1107. &orig_node->bcast_seqno_reset)) {
  1108. spin_unlock_bh(&bat_priv->orig_hash_lock);
  1109. return NET_RX_DROP;
  1110. }
  1111. /* mark broadcast in flood history, update window position
  1112. * if required. */
  1113. if (bit_get_packet(bat_priv, orig_node->bcast_bits, seq_diff, 1))
  1114. orig_node->last_bcast_seqno = ntohl(bcast_packet->seqno);
  1115. spin_unlock_bh(&bat_priv->orig_hash_lock);
  1116. /* rebroadcast packet */
  1117. add_bcast_packet_to_list(bat_priv, skb);
  1118. /* broadcast for me */
  1119. interface_rx(recv_if->soft_iface, skb, recv_if, hdr_size);
  1120. return NET_RX_SUCCESS;
  1121. }
  1122. int recv_vis_packet(struct sk_buff *skb, struct batman_if *recv_if)
  1123. {
  1124. struct vis_packet *vis_packet;
  1125. struct ethhdr *ethhdr;
  1126. struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
  1127. int hdr_size = sizeof(struct vis_packet);
  1128. /* keep skb linear */
  1129. if (skb_linearize(skb) < 0)
  1130. return NET_RX_DROP;
  1131. if (unlikely(!pskb_may_pull(skb, hdr_size)))
  1132. return NET_RX_DROP;
  1133. vis_packet = (struct vis_packet *)skb->data;
  1134. ethhdr = (struct ethhdr *)skb_mac_header(skb);
  1135. /* not for me */
  1136. if (!is_my_mac(ethhdr->h_dest))
  1137. return NET_RX_DROP;
  1138. /* ignore own packets */
  1139. if (is_my_mac(vis_packet->vis_orig))
  1140. return NET_RX_DROP;
  1141. if (is_my_mac(vis_packet->sender_orig))
  1142. return NET_RX_DROP;
  1143. switch (vis_packet->vis_type) {
  1144. case VIS_TYPE_SERVER_SYNC:
  1145. receive_server_sync_packet(bat_priv, vis_packet,
  1146. skb_headlen(skb));
  1147. break;
  1148. case VIS_TYPE_CLIENT_UPDATE:
  1149. receive_client_update_packet(bat_priv, vis_packet,
  1150. skb_headlen(skb));
  1151. break;
  1152. default: /* ignore unknown packet */
  1153. break;
  1154. }
  1155. /* We take a copy of the data in the packet, so we should
  1156. always free the skbuf. */
  1157. return NET_RX_DROP;
  1158. }