routing.c 38 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386
  1. /*
  2. * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
  3. *
  4. * Marek Lindner, Simon Wunderlich
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of version 2 of the GNU General Public
  8. * License as published by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope that it will be useful, but
  11. * WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  13. * General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program; if not, write to the Free Software
  17. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
  18. * 02110-1301, USA
  19. *
  20. */
  21. #include "main.h"
  22. #include "routing.h"
  23. #include "send.h"
  24. #include "hash.h"
  25. #include "soft-interface.h"
  26. #include "hard-interface.h"
  27. #include "icmp_socket.h"
  28. #include "translation-table.h"
  29. #include "originator.h"
  30. #include "ring_buffer.h"
  31. #include "vis.h"
  32. #include "aggregation.h"
  33. #include "gateway_common.h"
  34. #include "gateway_client.h"
  35. #include "unicast.h"
  36. void slide_own_bcast_window(struct batman_if *batman_if)
  37. {
  38. struct bat_priv *bat_priv = netdev_priv(batman_if->soft_iface);
  39. struct hashtable_t *hash = bat_priv->orig_hash;
  40. struct hlist_node *walk;
  41. struct hlist_head *head;
  42. struct element_t *bucket;
  43. struct orig_node *orig_node;
  44. unsigned long *word;
  45. int i;
  46. size_t word_index;
  47. spin_lock_bh(&bat_priv->orig_hash_lock);
  48. for (i = 0; i < hash->size; i++) {
  49. head = &hash->table[i];
  50. hlist_for_each_entry(bucket, walk, head, hlist) {
  51. orig_node = bucket->data;
  52. word_index = batman_if->if_num * NUM_WORDS;
  53. word = &(orig_node->bcast_own[word_index]);
  54. bit_get_packet(bat_priv, word, 1, 0);
  55. orig_node->bcast_own_sum[batman_if->if_num] =
  56. bit_packet_count(word);
  57. }
  58. }
  59. spin_unlock_bh(&bat_priv->orig_hash_lock);
  60. }
  61. static void update_HNA(struct bat_priv *bat_priv, struct orig_node *orig_node,
  62. unsigned char *hna_buff, int hna_buff_len)
  63. {
  64. if ((hna_buff_len != orig_node->hna_buff_len) ||
  65. ((hna_buff_len > 0) &&
  66. (orig_node->hna_buff_len > 0) &&
  67. (memcmp(orig_node->hna_buff, hna_buff, hna_buff_len) != 0))) {
  68. if (orig_node->hna_buff_len > 0)
  69. hna_global_del_orig(bat_priv, orig_node,
  70. "originator changed hna");
  71. if ((hna_buff_len > 0) && (hna_buff))
  72. hna_global_add_orig(bat_priv, orig_node,
  73. hna_buff, hna_buff_len);
  74. }
  75. }
  76. static void update_route(struct bat_priv *bat_priv,
  77. struct orig_node *orig_node,
  78. struct neigh_node *neigh_node,
  79. unsigned char *hna_buff, int hna_buff_len)
  80. {
  81. /* route deleted */
  82. if ((orig_node->router) && (!neigh_node)) {
  83. bat_dbg(DBG_ROUTES, bat_priv, "Deleting route towards: %pM\n",
  84. orig_node->orig);
  85. hna_global_del_orig(bat_priv, orig_node,
  86. "originator timed out");
  87. /* route added */
  88. } else if ((!orig_node->router) && (neigh_node)) {
  89. bat_dbg(DBG_ROUTES, bat_priv,
  90. "Adding route towards: %pM (via %pM)\n",
  91. orig_node->orig, neigh_node->addr);
  92. hna_global_add_orig(bat_priv, orig_node,
  93. hna_buff, hna_buff_len);
  94. /* route changed */
  95. } else {
  96. bat_dbg(DBG_ROUTES, bat_priv,
  97. "Changing route towards: %pM "
  98. "(now via %pM - was via %pM)\n",
  99. orig_node->orig, neigh_node->addr,
  100. orig_node->router->addr);
  101. }
  102. orig_node->router = neigh_node;
  103. }
  104. void update_routes(struct bat_priv *bat_priv, struct orig_node *orig_node,
  105. struct neigh_node *neigh_node, unsigned char *hna_buff,
  106. int hna_buff_len)
  107. {
  108. if (!orig_node)
  109. return;
  110. if (orig_node->router != neigh_node)
  111. update_route(bat_priv, orig_node, neigh_node,
  112. hna_buff, hna_buff_len);
  113. /* may be just HNA changed */
  114. else
  115. update_HNA(bat_priv, orig_node, hna_buff, hna_buff_len);
  116. }
  117. static int is_bidirectional_neigh(struct orig_node *orig_node,
  118. struct orig_node *orig_neigh_node,
  119. struct batman_packet *batman_packet,
  120. struct batman_if *if_incoming)
  121. {
  122. struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
  123. struct neigh_node *neigh_node = NULL, *tmp_neigh_node = NULL;
  124. unsigned char total_count;
  125. if (orig_node == orig_neigh_node) {
  126. list_for_each_entry(tmp_neigh_node,
  127. &orig_node->neigh_list,
  128. list) {
  129. if (compare_orig(tmp_neigh_node->addr,
  130. orig_neigh_node->orig) &&
  131. (tmp_neigh_node->if_incoming == if_incoming))
  132. neigh_node = tmp_neigh_node;
  133. }
  134. if (!neigh_node)
  135. neigh_node = create_neighbor(orig_node,
  136. orig_neigh_node,
  137. orig_neigh_node->orig,
  138. if_incoming);
  139. /* create_neighbor failed, return 0 */
  140. if (!neigh_node)
  141. return 0;
  142. neigh_node->last_valid = jiffies;
  143. } else {
  144. /* find packet count of corresponding one hop neighbor */
  145. list_for_each_entry(tmp_neigh_node,
  146. &orig_neigh_node->neigh_list, list) {
  147. if (compare_orig(tmp_neigh_node->addr,
  148. orig_neigh_node->orig) &&
  149. (tmp_neigh_node->if_incoming == if_incoming))
  150. neigh_node = tmp_neigh_node;
  151. }
  152. if (!neigh_node)
  153. neigh_node = create_neighbor(orig_neigh_node,
  154. orig_neigh_node,
  155. orig_neigh_node->orig,
  156. if_incoming);
  157. /* create_neighbor failed, return 0 */
  158. if (!neigh_node)
  159. return 0;
  160. }
  161. orig_node->last_valid = jiffies;
  162. /* pay attention to not get a value bigger than 100 % */
  163. total_count = (orig_neigh_node->bcast_own_sum[if_incoming->if_num] >
  164. neigh_node->real_packet_count ?
  165. neigh_node->real_packet_count :
  166. orig_neigh_node->bcast_own_sum[if_incoming->if_num]);
  167. /* if we have too few packets (too less data) we set tq_own to zero */
  168. /* if we receive too few packets it is not considered bidirectional */
  169. if ((total_count < TQ_LOCAL_BIDRECT_SEND_MINIMUM) ||
  170. (neigh_node->real_packet_count < TQ_LOCAL_BIDRECT_RECV_MINIMUM))
  171. orig_neigh_node->tq_own = 0;
  172. else
  173. /* neigh_node->real_packet_count is never zero as we
  174. * only purge old information when getting new
  175. * information */
  176. orig_neigh_node->tq_own = (TQ_MAX_VALUE * total_count) /
  177. neigh_node->real_packet_count;
  178. /*
  179. * 1 - ((1-x) ** 3), normalized to TQ_MAX_VALUE this does
  180. * affect the nearly-symmetric links only a little, but
  181. * punishes asymmetric links more. This will give a value
  182. * between 0 and TQ_MAX_VALUE
  183. */
  184. orig_neigh_node->tq_asym_penalty =
  185. TQ_MAX_VALUE -
  186. (TQ_MAX_VALUE *
  187. (TQ_LOCAL_WINDOW_SIZE - neigh_node->real_packet_count) *
  188. (TQ_LOCAL_WINDOW_SIZE - neigh_node->real_packet_count) *
  189. (TQ_LOCAL_WINDOW_SIZE - neigh_node->real_packet_count)) /
  190. (TQ_LOCAL_WINDOW_SIZE *
  191. TQ_LOCAL_WINDOW_SIZE *
  192. TQ_LOCAL_WINDOW_SIZE);
  193. batman_packet->tq = ((batman_packet->tq *
  194. orig_neigh_node->tq_own *
  195. orig_neigh_node->tq_asym_penalty) /
  196. (TQ_MAX_VALUE * TQ_MAX_VALUE));
  197. bat_dbg(DBG_BATMAN, bat_priv,
  198. "bidirectional: "
  199. "orig = %-15pM neigh = %-15pM => own_bcast = %2i, "
  200. "real recv = %2i, local tq: %3i, asym_penalty: %3i, "
  201. "total tq: %3i\n",
  202. orig_node->orig, orig_neigh_node->orig, total_count,
  203. neigh_node->real_packet_count, orig_neigh_node->tq_own,
  204. orig_neigh_node->tq_asym_penalty, batman_packet->tq);
  205. /* if link has the minimum required transmission quality
  206. * consider it bidirectional */
  207. if (batman_packet->tq >= TQ_TOTAL_BIDRECT_LIMIT)
  208. return 1;
  209. return 0;
  210. }
  211. static void update_orig(struct bat_priv *bat_priv,
  212. struct orig_node *orig_node,
  213. struct ethhdr *ethhdr,
  214. struct batman_packet *batman_packet,
  215. struct batman_if *if_incoming,
  216. unsigned char *hna_buff, int hna_buff_len,
  217. char is_duplicate)
  218. {
  219. struct neigh_node *neigh_node = NULL, *tmp_neigh_node = NULL;
  220. int tmp_hna_buff_len;
  221. bat_dbg(DBG_BATMAN, bat_priv, "update_originator(): "
  222. "Searching and updating originator entry of received packet\n");
  223. list_for_each_entry(tmp_neigh_node, &orig_node->neigh_list, list) {
  224. if (compare_orig(tmp_neigh_node->addr, ethhdr->h_source) &&
  225. (tmp_neigh_node->if_incoming == if_incoming)) {
  226. neigh_node = tmp_neigh_node;
  227. continue;
  228. }
  229. if (is_duplicate)
  230. continue;
  231. ring_buffer_set(tmp_neigh_node->tq_recv,
  232. &tmp_neigh_node->tq_index, 0);
  233. tmp_neigh_node->tq_avg =
  234. ring_buffer_avg(tmp_neigh_node->tq_recv);
  235. }
  236. if (!neigh_node) {
  237. struct orig_node *orig_tmp;
  238. orig_tmp = get_orig_node(bat_priv, ethhdr->h_source);
  239. if (!orig_tmp)
  240. return;
  241. neigh_node = create_neighbor(orig_node, orig_tmp,
  242. ethhdr->h_source, if_incoming);
  243. if (!neigh_node)
  244. return;
  245. } else
  246. bat_dbg(DBG_BATMAN, bat_priv,
  247. "Updating existing last-hop neighbor of originator\n");
  248. orig_node->flags = batman_packet->flags;
  249. neigh_node->last_valid = jiffies;
  250. ring_buffer_set(neigh_node->tq_recv,
  251. &neigh_node->tq_index,
  252. batman_packet->tq);
  253. neigh_node->tq_avg = ring_buffer_avg(neigh_node->tq_recv);
  254. if (!is_duplicate) {
  255. orig_node->last_ttl = batman_packet->ttl;
  256. neigh_node->last_ttl = batman_packet->ttl;
  257. }
  258. tmp_hna_buff_len = (hna_buff_len > batman_packet->num_hna * ETH_ALEN ?
  259. batman_packet->num_hna * ETH_ALEN : hna_buff_len);
  260. /* if this neighbor already is our next hop there is nothing
  261. * to change */
  262. if (orig_node->router == neigh_node)
  263. goto update_hna;
  264. /* if this neighbor does not offer a better TQ we won't consider it */
  265. if ((orig_node->router) &&
  266. (orig_node->router->tq_avg > neigh_node->tq_avg))
  267. goto update_hna;
  268. /* if the TQ is the same and the link not more symetric we
  269. * won't consider it either */
  270. if ((orig_node->router) &&
  271. ((neigh_node->tq_avg == orig_node->router->tq_avg) &&
  272. (orig_node->router->orig_node->bcast_own_sum[if_incoming->if_num]
  273. >= neigh_node->orig_node->bcast_own_sum[if_incoming->if_num])))
  274. goto update_hna;
  275. update_routes(bat_priv, orig_node, neigh_node,
  276. hna_buff, tmp_hna_buff_len);
  277. goto update_gw;
  278. update_hna:
  279. update_routes(bat_priv, orig_node, orig_node->router,
  280. hna_buff, tmp_hna_buff_len);
  281. update_gw:
  282. if (orig_node->gw_flags != batman_packet->gw_flags)
  283. gw_node_update(bat_priv, orig_node, batman_packet->gw_flags);
  284. orig_node->gw_flags = batman_packet->gw_flags;
  285. /* restart gateway selection if fast or late switching was enabled */
  286. if ((orig_node->gw_flags) &&
  287. (atomic_read(&bat_priv->gw_mode) == GW_MODE_CLIENT) &&
  288. (atomic_read(&bat_priv->gw_sel_class) > 2))
  289. gw_check_election(bat_priv, orig_node);
  290. }
  291. /* checks whether the host restarted and is in the protection time.
  292. * returns:
  293. * 0 if the packet is to be accepted
  294. * 1 if the packet is to be ignored.
  295. */
  296. static int window_protected(struct bat_priv *bat_priv,
  297. int32_t seq_num_diff,
  298. unsigned long *last_reset)
  299. {
  300. if ((seq_num_diff <= -TQ_LOCAL_WINDOW_SIZE)
  301. || (seq_num_diff >= EXPECTED_SEQNO_RANGE)) {
  302. if (time_after(jiffies, *last_reset +
  303. msecs_to_jiffies(RESET_PROTECTION_MS))) {
  304. *last_reset = jiffies;
  305. bat_dbg(DBG_BATMAN, bat_priv,
  306. "old packet received, start protection\n");
  307. return 0;
  308. } else
  309. return 1;
  310. }
  311. return 0;
  312. }
  313. /* processes a batman packet for all interfaces, adjusts the sequence number and
  314. * finds out whether it is a duplicate.
  315. * returns:
  316. * 1 the packet is a duplicate
  317. * 0 the packet has not yet been received
  318. * -1 the packet is old and has been received while the seqno window
  319. * was protected. Caller should drop it.
  320. */
  321. static char count_real_packets(struct ethhdr *ethhdr,
  322. struct batman_packet *batman_packet,
  323. struct batman_if *if_incoming)
  324. {
  325. struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
  326. struct orig_node *orig_node;
  327. struct neigh_node *tmp_neigh_node;
  328. char is_duplicate = 0;
  329. int32_t seq_diff;
  330. int need_update = 0;
  331. int set_mark;
  332. orig_node = get_orig_node(bat_priv, batman_packet->orig);
  333. if (!orig_node)
  334. return 0;
  335. seq_diff = batman_packet->seqno - orig_node->last_real_seqno;
  336. /* signalize caller that the packet is to be dropped. */
  337. if (window_protected(bat_priv, seq_diff,
  338. &orig_node->batman_seqno_reset))
  339. return -1;
  340. list_for_each_entry(tmp_neigh_node, &orig_node->neigh_list, list) {
  341. is_duplicate |= get_bit_status(tmp_neigh_node->real_bits,
  342. orig_node->last_real_seqno,
  343. batman_packet->seqno);
  344. if (compare_orig(tmp_neigh_node->addr, ethhdr->h_source) &&
  345. (tmp_neigh_node->if_incoming == if_incoming))
  346. set_mark = 1;
  347. else
  348. set_mark = 0;
  349. /* if the window moved, set the update flag. */
  350. need_update |= bit_get_packet(bat_priv,
  351. tmp_neigh_node->real_bits,
  352. seq_diff, set_mark);
  353. tmp_neigh_node->real_packet_count =
  354. bit_packet_count(tmp_neigh_node->real_bits);
  355. }
  356. if (need_update) {
  357. bat_dbg(DBG_BATMAN, bat_priv,
  358. "updating last_seqno: old %d, new %d\n",
  359. orig_node->last_real_seqno, batman_packet->seqno);
  360. orig_node->last_real_seqno = batman_packet->seqno;
  361. }
  362. return is_duplicate;
  363. }
  364. /* copy primary address for bonding */
  365. static void mark_bonding_address(struct orig_node *orig_node,
  366. struct orig_node *orig_neigh_node,
  367. struct batman_packet *batman_packet)
  368. {
  369. if (batman_packet->flags & PRIMARIES_FIRST_HOP)
  370. memcpy(orig_neigh_node->primary_addr,
  371. orig_node->orig, ETH_ALEN);
  372. return;
  373. }
  374. /* mark possible bond.candidates in the neighbor list */
  375. void update_bonding_candidates(struct orig_node *orig_node)
  376. {
  377. int candidates;
  378. int interference_candidate;
  379. int best_tq;
  380. struct neigh_node *tmp_neigh_node, *tmp_neigh_node2;
  381. struct neigh_node *first_candidate, *last_candidate;
  382. /* update the candidates for this originator */
  383. if (!orig_node->router) {
  384. orig_node->bond.candidates = 0;
  385. return;
  386. }
  387. best_tq = orig_node->router->tq_avg;
  388. /* update bond.candidates */
  389. candidates = 0;
  390. /* mark other nodes which also received "PRIMARIES FIRST HOP" packets
  391. * as "bonding partner" */
  392. /* first, zero the list */
  393. list_for_each_entry(tmp_neigh_node, &orig_node->neigh_list, list) {
  394. tmp_neigh_node->next_bond_candidate = NULL;
  395. }
  396. first_candidate = NULL;
  397. last_candidate = NULL;
  398. list_for_each_entry(tmp_neigh_node, &orig_node->neigh_list, list) {
  399. /* only consider if it has the same primary address ... */
  400. if (memcmp(orig_node->orig,
  401. tmp_neigh_node->orig_node->primary_addr,
  402. ETH_ALEN) != 0)
  403. continue;
  404. /* ... and is good enough to be considered */
  405. if (tmp_neigh_node->tq_avg < best_tq - BONDING_TQ_THRESHOLD)
  406. continue;
  407. /* check if we have another candidate with the same
  408. * mac address or interface. If we do, we won't
  409. * select this candidate because of possible interference. */
  410. interference_candidate = 0;
  411. list_for_each_entry(tmp_neigh_node2,
  412. &orig_node->neigh_list, list) {
  413. if (tmp_neigh_node2 == tmp_neigh_node)
  414. continue;
  415. /* we only care if the other candidate is even
  416. * considered as candidate. */
  417. if (!tmp_neigh_node2->next_bond_candidate)
  418. continue;
  419. if ((tmp_neigh_node->if_incoming ==
  420. tmp_neigh_node2->if_incoming)
  421. || (memcmp(tmp_neigh_node->addr,
  422. tmp_neigh_node2->addr, ETH_ALEN) == 0)) {
  423. interference_candidate = 1;
  424. break;
  425. }
  426. }
  427. /* don't care further if it is an interference candidate */
  428. if (interference_candidate)
  429. continue;
  430. if (!first_candidate) {
  431. first_candidate = tmp_neigh_node;
  432. tmp_neigh_node->next_bond_candidate = first_candidate;
  433. } else
  434. tmp_neigh_node->next_bond_candidate = last_candidate;
  435. last_candidate = tmp_neigh_node;
  436. candidates++;
  437. }
  438. if (candidates > 0) {
  439. first_candidate->next_bond_candidate = last_candidate;
  440. orig_node->bond.selected = first_candidate;
  441. }
  442. orig_node->bond.candidates = candidates;
  443. }
  444. void receive_bat_packet(struct ethhdr *ethhdr,
  445. struct batman_packet *batman_packet,
  446. unsigned char *hna_buff, int hna_buff_len,
  447. struct batman_if *if_incoming)
  448. {
  449. struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
  450. struct batman_if *batman_if;
  451. struct orig_node *orig_neigh_node, *orig_node;
  452. char has_directlink_flag;
  453. char is_my_addr = 0, is_my_orig = 0, is_my_oldorig = 0;
  454. char is_broadcast = 0, is_bidirectional, is_single_hop_neigh;
  455. char is_duplicate;
  456. uint32_t if_incoming_seqno;
  457. /* Silently drop when the batman packet is actually not a
  458. * correct packet.
  459. *
  460. * This might happen if a packet is padded (e.g. Ethernet has a
  461. * minimum frame length of 64 byte) and the aggregation interprets
  462. * it as an additional length.
  463. *
  464. * TODO: A more sane solution would be to have a bit in the
  465. * batman_packet to detect whether the packet is the last
  466. * packet in an aggregation. Here we expect that the padding
  467. * is always zero (or not 0x01)
  468. */
  469. if (batman_packet->packet_type != BAT_PACKET)
  470. return;
  471. /* could be changed by schedule_own_packet() */
  472. if_incoming_seqno = atomic_read(&if_incoming->seqno);
  473. has_directlink_flag = (batman_packet->flags & DIRECTLINK ? 1 : 0);
  474. is_single_hop_neigh = (compare_orig(ethhdr->h_source,
  475. batman_packet->orig) ? 1 : 0);
  476. bat_dbg(DBG_BATMAN, bat_priv,
  477. "Received BATMAN packet via NB: %pM, IF: %s [%pM] "
  478. "(from OG: %pM, via prev OG: %pM, seqno %d, tq %d, "
  479. "TTL %d, V %d, IDF %d)\n",
  480. ethhdr->h_source, if_incoming->net_dev->name,
  481. if_incoming->net_dev->dev_addr, batman_packet->orig,
  482. batman_packet->prev_sender, batman_packet->seqno,
  483. batman_packet->tq, batman_packet->ttl, batman_packet->version,
  484. has_directlink_flag);
  485. rcu_read_lock();
  486. list_for_each_entry_rcu(batman_if, &if_list, list) {
  487. if (batman_if->if_status != IF_ACTIVE)
  488. continue;
  489. if (batman_if->soft_iface != if_incoming->soft_iface)
  490. continue;
  491. if (compare_orig(ethhdr->h_source,
  492. batman_if->net_dev->dev_addr))
  493. is_my_addr = 1;
  494. if (compare_orig(batman_packet->orig,
  495. batman_if->net_dev->dev_addr))
  496. is_my_orig = 1;
  497. if (compare_orig(batman_packet->prev_sender,
  498. batman_if->net_dev->dev_addr))
  499. is_my_oldorig = 1;
  500. if (compare_orig(ethhdr->h_source, broadcast_addr))
  501. is_broadcast = 1;
  502. }
  503. rcu_read_unlock();
  504. if (batman_packet->version != COMPAT_VERSION) {
  505. bat_dbg(DBG_BATMAN, bat_priv,
  506. "Drop packet: incompatible batman version (%i)\n",
  507. batman_packet->version);
  508. return;
  509. }
  510. if (is_my_addr) {
  511. bat_dbg(DBG_BATMAN, bat_priv,
  512. "Drop packet: received my own broadcast (sender: %pM"
  513. ")\n",
  514. ethhdr->h_source);
  515. return;
  516. }
  517. if (is_broadcast) {
  518. bat_dbg(DBG_BATMAN, bat_priv, "Drop packet: "
  519. "ignoring all packets with broadcast source addr (sender: %pM"
  520. ")\n", ethhdr->h_source);
  521. return;
  522. }
  523. if (is_my_orig) {
  524. unsigned long *word;
  525. int offset;
  526. orig_neigh_node = get_orig_node(bat_priv, ethhdr->h_source);
  527. if (!orig_neigh_node)
  528. return;
  529. /* neighbor has to indicate direct link and it has to
  530. * come via the corresponding interface */
  531. /* if received seqno equals last send seqno save new
  532. * seqno for bidirectional check */
  533. if (has_directlink_flag &&
  534. compare_orig(if_incoming->net_dev->dev_addr,
  535. batman_packet->orig) &&
  536. (batman_packet->seqno - if_incoming_seqno + 2 == 0)) {
  537. offset = if_incoming->if_num * NUM_WORDS;
  538. word = &(orig_neigh_node->bcast_own[offset]);
  539. bit_mark(word, 0);
  540. orig_neigh_node->bcast_own_sum[if_incoming->if_num] =
  541. bit_packet_count(word);
  542. }
  543. bat_dbg(DBG_BATMAN, bat_priv, "Drop packet: "
  544. "originator packet from myself (via neighbor)\n");
  545. return;
  546. }
  547. if (is_my_oldorig) {
  548. bat_dbg(DBG_BATMAN, bat_priv,
  549. "Drop packet: ignoring all rebroadcast echos (sender: "
  550. "%pM)\n", ethhdr->h_source);
  551. return;
  552. }
  553. orig_node = get_orig_node(bat_priv, batman_packet->orig);
  554. if (!orig_node)
  555. return;
  556. is_duplicate = count_real_packets(ethhdr, batman_packet, if_incoming);
  557. if (is_duplicate == -1) {
  558. bat_dbg(DBG_BATMAN, bat_priv,
  559. "Drop packet: packet within seqno protection time "
  560. "(sender: %pM)\n", ethhdr->h_source);
  561. return;
  562. }
  563. if (batman_packet->tq == 0) {
  564. bat_dbg(DBG_BATMAN, bat_priv,
  565. "Drop packet: originator packet with tq equal 0\n");
  566. return;
  567. }
  568. /* avoid temporary routing loops */
  569. if ((orig_node->router) &&
  570. (orig_node->router->orig_node->router) &&
  571. (compare_orig(orig_node->router->addr,
  572. batman_packet->prev_sender)) &&
  573. !(compare_orig(batman_packet->orig, batman_packet->prev_sender)) &&
  574. (compare_orig(orig_node->router->addr,
  575. orig_node->router->orig_node->router->addr))) {
  576. bat_dbg(DBG_BATMAN, bat_priv,
  577. "Drop packet: ignoring all rebroadcast packets that "
  578. "may make me loop (sender: %pM)\n", ethhdr->h_source);
  579. return;
  580. }
  581. /* if sender is a direct neighbor the sender mac equals
  582. * originator mac */
  583. orig_neigh_node = (is_single_hop_neigh ?
  584. orig_node :
  585. get_orig_node(bat_priv, ethhdr->h_source));
  586. if (!orig_neigh_node)
  587. return;
  588. /* drop packet if sender is not a direct neighbor and if we
  589. * don't route towards it */
  590. if (!is_single_hop_neigh && (!orig_neigh_node->router)) {
  591. bat_dbg(DBG_BATMAN, bat_priv,
  592. "Drop packet: OGM via unknown neighbor!\n");
  593. return;
  594. }
  595. is_bidirectional = is_bidirectional_neigh(orig_node, orig_neigh_node,
  596. batman_packet, if_incoming);
  597. /* update ranking if it is not a duplicate or has the same
  598. * seqno and similar ttl as the non-duplicate */
  599. if (is_bidirectional &&
  600. (!is_duplicate ||
  601. ((orig_node->last_real_seqno == batman_packet->seqno) &&
  602. (orig_node->last_ttl - 3 <= batman_packet->ttl))))
  603. update_orig(bat_priv, orig_node, ethhdr, batman_packet,
  604. if_incoming, hna_buff, hna_buff_len, is_duplicate);
  605. mark_bonding_address(orig_node, orig_neigh_node, batman_packet);
  606. update_bonding_candidates(orig_node);
  607. /* is single hop (direct) neighbor */
  608. if (is_single_hop_neigh) {
  609. /* mark direct link on incoming interface */
  610. schedule_forward_packet(orig_node, ethhdr, batman_packet,
  611. 1, hna_buff_len, if_incoming);
  612. bat_dbg(DBG_BATMAN, bat_priv, "Forwarding packet: "
  613. "rebroadcast neighbor packet with direct link flag\n");
  614. return;
  615. }
  616. /* multihop originator */
  617. if (!is_bidirectional) {
  618. bat_dbg(DBG_BATMAN, bat_priv,
  619. "Drop packet: not received via bidirectional link\n");
  620. return;
  621. }
  622. if (is_duplicate) {
  623. bat_dbg(DBG_BATMAN, bat_priv,
  624. "Drop packet: duplicate packet received\n");
  625. return;
  626. }
  627. bat_dbg(DBG_BATMAN, bat_priv,
  628. "Forwarding packet: rebroadcast originator packet\n");
  629. schedule_forward_packet(orig_node, ethhdr, batman_packet,
  630. 0, hna_buff_len, if_incoming);
  631. }
  632. int recv_bat_packet(struct sk_buff *skb, struct batman_if *batman_if)
  633. {
  634. struct bat_priv *bat_priv = netdev_priv(batman_if->soft_iface);
  635. struct ethhdr *ethhdr;
  636. /* drop packet if it has not necessary minimum size */
  637. if (unlikely(!pskb_may_pull(skb, sizeof(struct batman_packet))))
  638. return NET_RX_DROP;
  639. ethhdr = (struct ethhdr *)skb_mac_header(skb);
  640. /* packet with broadcast indication but unicast recipient */
  641. if (!is_broadcast_ether_addr(ethhdr->h_dest))
  642. return NET_RX_DROP;
  643. /* packet with broadcast sender address */
  644. if (is_broadcast_ether_addr(ethhdr->h_source))
  645. return NET_RX_DROP;
  646. /* create a copy of the skb, if needed, to modify it. */
  647. if (skb_cow(skb, 0) < 0)
  648. return NET_RX_DROP;
  649. /* keep skb linear */
  650. if (skb_linearize(skb) < 0)
  651. return NET_RX_DROP;
  652. ethhdr = (struct ethhdr *)skb_mac_header(skb);
  653. spin_lock_bh(&bat_priv->orig_hash_lock);
  654. receive_aggr_bat_packet(ethhdr,
  655. skb->data,
  656. skb_headlen(skb),
  657. batman_if);
  658. spin_unlock_bh(&bat_priv->orig_hash_lock);
  659. kfree_skb(skb);
  660. return NET_RX_SUCCESS;
  661. }
  662. static int recv_my_icmp_packet(struct bat_priv *bat_priv,
  663. struct sk_buff *skb, size_t icmp_len)
  664. {
  665. struct orig_node *orig_node;
  666. struct icmp_packet_rr *icmp_packet;
  667. struct batman_if *batman_if;
  668. int ret;
  669. uint8_t dstaddr[ETH_ALEN];
  670. icmp_packet = (struct icmp_packet_rr *)skb->data;
  671. /* add data to device queue */
  672. if (icmp_packet->msg_type != ECHO_REQUEST) {
  673. bat_socket_receive_packet(icmp_packet, icmp_len);
  674. return NET_RX_DROP;
  675. }
  676. if (!bat_priv->primary_if)
  677. return NET_RX_DROP;
  678. /* answer echo request (ping) */
  679. /* get routing information */
  680. spin_lock_bh(&bat_priv->orig_hash_lock);
  681. orig_node = ((struct orig_node *)hash_find(bat_priv->orig_hash,
  682. compare_orig, choose_orig,
  683. icmp_packet->orig));
  684. ret = NET_RX_DROP;
  685. if ((orig_node) && (orig_node->router)) {
  686. /* don't lock while sending the packets ... we therefore
  687. * copy the required data before sending */
  688. batman_if = orig_node->router->if_incoming;
  689. memcpy(dstaddr, orig_node->router->addr, ETH_ALEN);
  690. spin_unlock_bh(&bat_priv->orig_hash_lock);
  691. /* create a copy of the skb, if needed, to modify it. */
  692. if (skb_cow(skb, sizeof(struct ethhdr)) < 0)
  693. return NET_RX_DROP;
  694. icmp_packet = (struct icmp_packet_rr *)skb->data;
  695. memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN);
  696. memcpy(icmp_packet->orig,
  697. bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN);
  698. icmp_packet->msg_type = ECHO_REPLY;
  699. icmp_packet->ttl = TTL;
  700. send_skb_packet(skb, batman_if, dstaddr);
  701. ret = NET_RX_SUCCESS;
  702. } else
  703. spin_unlock_bh(&bat_priv->orig_hash_lock);
  704. return ret;
  705. }
  706. static int recv_icmp_ttl_exceeded(struct bat_priv *bat_priv,
  707. struct sk_buff *skb)
  708. {
  709. struct orig_node *orig_node;
  710. struct icmp_packet *icmp_packet;
  711. struct batman_if *batman_if;
  712. int ret;
  713. uint8_t dstaddr[ETH_ALEN];
  714. icmp_packet = (struct icmp_packet *)skb->data;
  715. /* send TTL exceeded if packet is an echo request (traceroute) */
  716. if (icmp_packet->msg_type != ECHO_REQUEST) {
  717. pr_debug("Warning - can't forward icmp packet from %pM to "
  718. "%pM: ttl exceeded\n", icmp_packet->orig,
  719. icmp_packet->dst);
  720. return NET_RX_DROP;
  721. }
  722. if (!bat_priv->primary_if)
  723. return NET_RX_DROP;
  724. /* get routing information */
  725. spin_lock_bh(&bat_priv->orig_hash_lock);
  726. orig_node = ((struct orig_node *)
  727. hash_find(bat_priv->orig_hash, compare_orig, choose_orig,
  728. icmp_packet->orig));
  729. ret = NET_RX_DROP;
  730. if ((orig_node) && (orig_node->router)) {
  731. /* don't lock while sending the packets ... we therefore
  732. * copy the required data before sending */
  733. batman_if = orig_node->router->if_incoming;
  734. memcpy(dstaddr, orig_node->router->addr, ETH_ALEN);
  735. spin_unlock_bh(&bat_priv->orig_hash_lock);
  736. /* create a copy of the skb, if needed, to modify it. */
  737. if (skb_cow(skb, sizeof(struct ethhdr)) < 0)
  738. return NET_RX_DROP;
  739. icmp_packet = (struct icmp_packet *) skb->data;
  740. memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN);
  741. memcpy(icmp_packet->orig,
  742. bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN);
  743. icmp_packet->msg_type = TTL_EXCEEDED;
  744. icmp_packet->ttl = TTL;
  745. send_skb_packet(skb, batman_if, dstaddr);
  746. ret = NET_RX_SUCCESS;
  747. } else
  748. spin_unlock_bh(&bat_priv->orig_hash_lock);
  749. return ret;
  750. }
  751. int recv_icmp_packet(struct sk_buff *skb, struct batman_if *recv_if)
  752. {
  753. struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
  754. struct icmp_packet_rr *icmp_packet;
  755. struct ethhdr *ethhdr;
  756. struct orig_node *orig_node;
  757. struct batman_if *batman_if;
  758. int hdr_size = sizeof(struct icmp_packet);
  759. int ret;
  760. uint8_t dstaddr[ETH_ALEN];
  761. /**
  762. * we truncate all incoming icmp packets if they don't match our size
  763. */
  764. if (skb->len >= sizeof(struct icmp_packet_rr))
  765. hdr_size = sizeof(struct icmp_packet_rr);
  766. /* drop packet if it has not necessary minimum size */
  767. if (unlikely(!pskb_may_pull(skb, hdr_size)))
  768. return NET_RX_DROP;
  769. ethhdr = (struct ethhdr *)skb_mac_header(skb);
  770. /* packet with unicast indication but broadcast recipient */
  771. if (is_broadcast_ether_addr(ethhdr->h_dest))
  772. return NET_RX_DROP;
  773. /* packet with broadcast sender address */
  774. if (is_broadcast_ether_addr(ethhdr->h_source))
  775. return NET_RX_DROP;
  776. /* not for me */
  777. if (!is_my_mac(ethhdr->h_dest))
  778. return NET_RX_DROP;
  779. icmp_packet = (struct icmp_packet_rr *)skb->data;
  780. /* add record route information if not full */
  781. if ((hdr_size == sizeof(struct icmp_packet_rr)) &&
  782. (icmp_packet->rr_cur < BAT_RR_LEN)) {
  783. memcpy(&(icmp_packet->rr[icmp_packet->rr_cur]),
  784. ethhdr->h_dest, ETH_ALEN);
  785. icmp_packet->rr_cur++;
  786. }
  787. /* packet for me */
  788. if (is_my_mac(icmp_packet->dst))
  789. return recv_my_icmp_packet(bat_priv, skb, hdr_size);
  790. /* TTL exceeded */
  791. if (icmp_packet->ttl < 2)
  792. return recv_icmp_ttl_exceeded(bat_priv, skb);
  793. ret = NET_RX_DROP;
  794. /* get routing information */
  795. spin_lock_bh(&bat_priv->orig_hash_lock);
  796. orig_node = ((struct orig_node *)
  797. hash_find(bat_priv->orig_hash, compare_orig, choose_orig,
  798. icmp_packet->dst));
  799. if ((orig_node) && (orig_node->router)) {
  800. /* don't lock while sending the packets ... we therefore
  801. * copy the required data before sending */
  802. batman_if = orig_node->router->if_incoming;
  803. memcpy(dstaddr, orig_node->router->addr, ETH_ALEN);
  804. spin_unlock_bh(&bat_priv->orig_hash_lock);
  805. /* create a copy of the skb, if needed, to modify it. */
  806. if (skb_cow(skb, sizeof(struct ethhdr)) < 0)
  807. return NET_RX_DROP;
  808. icmp_packet = (struct icmp_packet_rr *)skb->data;
  809. /* decrement ttl */
  810. icmp_packet->ttl--;
  811. /* route it */
  812. send_skb_packet(skb, batman_if, dstaddr);
  813. ret = NET_RX_SUCCESS;
  814. } else
  815. spin_unlock_bh(&bat_priv->orig_hash_lock);
  816. return ret;
  817. }
  818. /* find a suitable router for this originator, and use
  819. * bonding if possible. */
  820. struct neigh_node *find_router(struct bat_priv *bat_priv,
  821. struct orig_node *orig_node,
  822. struct batman_if *recv_if)
  823. {
  824. struct orig_node *primary_orig_node;
  825. struct orig_node *router_orig;
  826. struct neigh_node *router, *first_candidate, *best_router;
  827. static uint8_t zero_mac[ETH_ALEN] = {0, 0, 0, 0, 0, 0};
  828. int bonding_enabled;
  829. if (!orig_node)
  830. return NULL;
  831. if (!orig_node->router)
  832. return NULL;
  833. /* without bonding, the first node should
  834. * always choose the default router. */
  835. bonding_enabled = atomic_read(&bat_priv->bonding);
  836. if ((!recv_if) && (!bonding_enabled))
  837. return orig_node->router;
  838. router_orig = orig_node->router->orig_node;
  839. /* if we have something in the primary_addr, we can search
  840. * for a potential bonding candidate. */
  841. if (memcmp(router_orig->primary_addr, zero_mac, ETH_ALEN) == 0)
  842. return orig_node->router;
  843. /* find the orig_node which has the primary interface. might
  844. * even be the same as our router_orig in many cases */
  845. if (memcmp(router_orig->primary_addr,
  846. router_orig->orig, ETH_ALEN) == 0) {
  847. primary_orig_node = router_orig;
  848. } else {
  849. primary_orig_node = hash_find(bat_priv->orig_hash, compare_orig,
  850. choose_orig,
  851. router_orig->primary_addr);
  852. if (!primary_orig_node)
  853. return orig_node->router;
  854. }
  855. /* with less than 2 candidates, we can't do any
  856. * bonding and prefer the original router. */
  857. if (primary_orig_node->bond.candidates < 2)
  858. return orig_node->router;
  859. /* all nodes between should choose a candidate which
  860. * is is not on the interface where the packet came
  861. * in. */
  862. first_candidate = primary_orig_node->bond.selected;
  863. router = first_candidate;
  864. if (bonding_enabled) {
  865. /* in the bonding case, send the packets in a round
  866. * robin fashion over the remaining interfaces. */
  867. do {
  868. /* recv_if == NULL on the first node. */
  869. if (router->if_incoming != recv_if)
  870. break;
  871. router = router->next_bond_candidate;
  872. } while (router != first_candidate);
  873. primary_orig_node->bond.selected = router->next_bond_candidate;
  874. } else {
  875. /* if bonding is disabled, use the best of the
  876. * remaining candidates which are not using
  877. * this interface. */
  878. best_router = first_candidate;
  879. do {
  880. /* recv_if == NULL on the first node. */
  881. if ((router->if_incoming != recv_if) &&
  882. (router->tq_avg > best_router->tq_avg))
  883. best_router = router;
  884. router = router->next_bond_candidate;
  885. } while (router != first_candidate);
  886. router = best_router;
  887. }
  888. return router;
  889. }
  890. static int check_unicast_packet(struct sk_buff *skb, int hdr_size)
  891. {
  892. struct ethhdr *ethhdr;
  893. /* drop packet if it has not necessary minimum size */
  894. if (unlikely(!pskb_may_pull(skb, hdr_size)))
  895. return -1;
  896. ethhdr = (struct ethhdr *)skb_mac_header(skb);
  897. /* packet with unicast indication but broadcast recipient */
  898. if (is_broadcast_ether_addr(ethhdr->h_dest))
  899. return -1;
  900. /* packet with broadcast sender address */
  901. if (is_broadcast_ether_addr(ethhdr->h_source))
  902. return -1;
  903. /* not for me */
  904. if (!is_my_mac(ethhdr->h_dest))
  905. return -1;
  906. return 0;
  907. }
  908. int route_unicast_packet(struct sk_buff *skb, struct batman_if *recv_if,
  909. int hdr_size)
  910. {
  911. struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
  912. struct orig_node *orig_node;
  913. struct neigh_node *router;
  914. struct batman_if *batman_if;
  915. uint8_t dstaddr[ETH_ALEN];
  916. struct unicast_packet *unicast_packet;
  917. struct ethhdr *ethhdr = (struct ethhdr *)skb_mac_header(skb);
  918. int ret;
  919. struct sk_buff *new_skb;
  920. unicast_packet = (struct unicast_packet *)skb->data;
  921. /* TTL exceeded */
  922. if (unicast_packet->ttl < 2) {
  923. pr_debug("Warning - can't forward unicast packet from %pM to "
  924. "%pM: ttl exceeded\n", ethhdr->h_source,
  925. unicast_packet->dest);
  926. return NET_RX_DROP;
  927. }
  928. /* get routing information */
  929. spin_lock_bh(&bat_priv->orig_hash_lock);
  930. orig_node = ((struct orig_node *)
  931. hash_find(bat_priv->orig_hash, compare_orig, choose_orig,
  932. unicast_packet->dest));
  933. router = find_router(bat_priv, orig_node, recv_if);
  934. if (!router) {
  935. spin_unlock_bh(&bat_priv->orig_hash_lock);
  936. return NET_RX_DROP;
  937. }
  938. /* don't lock while sending the packets ... we therefore
  939. * copy the required data before sending */
  940. batman_if = router->if_incoming;
  941. memcpy(dstaddr, router->addr, ETH_ALEN);
  942. spin_unlock_bh(&bat_priv->orig_hash_lock);
  943. /* create a copy of the skb, if needed, to modify it. */
  944. if (skb_cow(skb, sizeof(struct ethhdr)) < 0)
  945. return NET_RX_DROP;
  946. unicast_packet = (struct unicast_packet *)skb->data;
  947. if (unicast_packet->packet_type == BAT_UNICAST &&
  948. atomic_read(&bat_priv->fragmentation) &&
  949. skb->len > batman_if->net_dev->mtu)
  950. return frag_send_skb(skb, bat_priv, batman_if,
  951. dstaddr);
  952. if (unicast_packet->packet_type == BAT_UNICAST_FRAG &&
  953. frag_can_reassemble(skb, batman_if->net_dev->mtu)) {
  954. ret = frag_reassemble_skb(skb, bat_priv, &new_skb);
  955. if (ret == NET_RX_DROP)
  956. return NET_RX_DROP;
  957. /* packet was buffered for late merge */
  958. if (!new_skb)
  959. return NET_RX_SUCCESS;
  960. skb = new_skb;
  961. unicast_packet = (struct unicast_packet *)skb->data;
  962. }
  963. /* decrement ttl */
  964. unicast_packet->ttl--;
  965. /* route it */
  966. send_skb_packet(skb, batman_if, dstaddr);
  967. return NET_RX_SUCCESS;
  968. }
  969. int recv_unicast_packet(struct sk_buff *skb, struct batman_if *recv_if)
  970. {
  971. struct unicast_packet *unicast_packet;
  972. int hdr_size = sizeof(struct unicast_packet);
  973. if (check_unicast_packet(skb, hdr_size) < 0)
  974. return NET_RX_DROP;
  975. unicast_packet = (struct unicast_packet *)skb->data;
  976. /* packet for me */
  977. if (is_my_mac(unicast_packet->dest)) {
  978. interface_rx(recv_if->soft_iface, skb, recv_if, hdr_size);
  979. return NET_RX_SUCCESS;
  980. }
  981. return route_unicast_packet(skb, recv_if, hdr_size);
  982. }
  983. int recv_ucast_frag_packet(struct sk_buff *skb, struct batman_if *recv_if)
  984. {
  985. struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
  986. struct unicast_frag_packet *unicast_packet;
  987. int hdr_size = sizeof(struct unicast_frag_packet);
  988. struct sk_buff *new_skb = NULL;
  989. int ret;
  990. if (check_unicast_packet(skb, hdr_size) < 0)
  991. return NET_RX_DROP;
  992. unicast_packet = (struct unicast_frag_packet *)skb->data;
  993. /* packet for me */
  994. if (is_my_mac(unicast_packet->dest)) {
  995. ret = frag_reassemble_skb(skb, bat_priv, &new_skb);
  996. if (ret == NET_RX_DROP)
  997. return NET_RX_DROP;
  998. /* packet was buffered for late merge */
  999. if (!new_skb)
  1000. return NET_RX_SUCCESS;
  1001. interface_rx(recv_if->soft_iface, new_skb, recv_if,
  1002. sizeof(struct unicast_packet));
  1003. return NET_RX_SUCCESS;
  1004. }
  1005. return route_unicast_packet(skb, recv_if, hdr_size);
  1006. }
  1007. int recv_bcast_packet(struct sk_buff *skb, struct batman_if *recv_if)
  1008. {
  1009. struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
  1010. struct orig_node *orig_node;
  1011. struct bcast_packet *bcast_packet;
  1012. struct ethhdr *ethhdr;
  1013. int hdr_size = sizeof(struct bcast_packet);
  1014. int32_t seq_diff;
  1015. /* drop packet if it has not necessary minimum size */
  1016. if (unlikely(!pskb_may_pull(skb, hdr_size)))
  1017. return NET_RX_DROP;
  1018. ethhdr = (struct ethhdr *)skb_mac_header(skb);
  1019. /* packet with broadcast indication but unicast recipient */
  1020. if (!is_broadcast_ether_addr(ethhdr->h_dest))
  1021. return NET_RX_DROP;
  1022. /* packet with broadcast sender address */
  1023. if (is_broadcast_ether_addr(ethhdr->h_source))
  1024. return NET_RX_DROP;
  1025. /* ignore broadcasts sent by myself */
  1026. if (is_my_mac(ethhdr->h_source))
  1027. return NET_RX_DROP;
  1028. bcast_packet = (struct bcast_packet *)skb->data;
  1029. /* ignore broadcasts originated by myself */
  1030. if (is_my_mac(bcast_packet->orig))
  1031. return NET_RX_DROP;
  1032. if (bcast_packet->ttl < 2)
  1033. return NET_RX_DROP;
  1034. spin_lock_bh(&bat_priv->orig_hash_lock);
  1035. orig_node = ((struct orig_node *)
  1036. hash_find(bat_priv->orig_hash, compare_orig, choose_orig,
  1037. bcast_packet->orig));
  1038. if (!orig_node) {
  1039. spin_unlock_bh(&bat_priv->orig_hash_lock);
  1040. return NET_RX_DROP;
  1041. }
  1042. /* check whether the packet is a duplicate */
  1043. if (get_bit_status(orig_node->bcast_bits,
  1044. orig_node->last_bcast_seqno,
  1045. ntohl(bcast_packet->seqno))) {
  1046. spin_unlock_bh(&bat_priv->orig_hash_lock);
  1047. return NET_RX_DROP;
  1048. }
  1049. seq_diff = ntohl(bcast_packet->seqno) - orig_node->last_bcast_seqno;
  1050. /* check whether the packet is old and the host just restarted. */
  1051. if (window_protected(bat_priv, seq_diff,
  1052. &orig_node->bcast_seqno_reset)) {
  1053. spin_unlock_bh(&bat_priv->orig_hash_lock);
  1054. return NET_RX_DROP;
  1055. }
  1056. /* mark broadcast in flood history, update window position
  1057. * if required. */
  1058. if (bit_get_packet(bat_priv, orig_node->bcast_bits, seq_diff, 1))
  1059. orig_node->last_bcast_seqno = ntohl(bcast_packet->seqno);
  1060. spin_unlock_bh(&bat_priv->orig_hash_lock);
  1061. /* rebroadcast packet */
  1062. add_bcast_packet_to_list(bat_priv, skb);
  1063. /* broadcast for me */
  1064. interface_rx(recv_if->soft_iface, skb, recv_if, hdr_size);
  1065. return NET_RX_SUCCESS;
  1066. }
  1067. int recv_vis_packet(struct sk_buff *skb, struct batman_if *recv_if)
  1068. {
  1069. struct vis_packet *vis_packet;
  1070. struct ethhdr *ethhdr;
  1071. struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
  1072. int hdr_size = sizeof(struct vis_packet);
  1073. /* keep skb linear */
  1074. if (skb_linearize(skb) < 0)
  1075. return NET_RX_DROP;
  1076. if (unlikely(!pskb_may_pull(skb, hdr_size)))
  1077. return NET_RX_DROP;
  1078. vis_packet = (struct vis_packet *)skb->data;
  1079. ethhdr = (struct ethhdr *)skb_mac_header(skb);
  1080. /* not for me */
  1081. if (!is_my_mac(ethhdr->h_dest))
  1082. return NET_RX_DROP;
  1083. /* ignore own packets */
  1084. if (is_my_mac(vis_packet->vis_orig))
  1085. return NET_RX_DROP;
  1086. if (is_my_mac(vis_packet->sender_orig))
  1087. return NET_RX_DROP;
  1088. switch (vis_packet->vis_type) {
  1089. case VIS_TYPE_SERVER_SYNC:
  1090. receive_server_sync_packet(bat_priv, vis_packet,
  1091. skb_headlen(skb));
  1092. break;
  1093. case VIS_TYPE_CLIENT_UPDATE:
  1094. receive_client_update_packet(bat_priv, vis_packet,
  1095. skb_headlen(skb));
  1096. break;
  1097. default: /* ignore unknown packet */
  1098. break;
  1099. }
  1100. /* We take a copy of the data in the packet, so we should
  1101. always free the skbuf. */
  1102. return NET_RX_DROP;
  1103. }