routing.c 38 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387
  1. /*
  2. * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
  3. *
  4. * Marek Lindner, Simon Wunderlich
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of version 2 of the GNU General Public
  8. * License as published by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope that it will be useful, but
  11. * WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  13. * General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program; if not, write to the Free Software
  17. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
  18. * 02110-1301, USA
  19. *
  20. */
  21. #include "main.h"
  22. #include "routing.h"
  23. #include "send.h"
  24. #include "hash.h"
  25. #include "soft-interface.h"
  26. #include "hard-interface.h"
  27. #include "icmp_socket.h"
  28. #include "translation-table.h"
  29. #include "originator.h"
  30. #include "types.h"
  31. #include "ring_buffer.h"
  32. #include "vis.h"
  33. #include "aggregation.h"
  34. #include "gateway_common.h"
  35. #include "gateway_client.h"
  36. #include "unicast.h"
  37. void slide_own_bcast_window(struct batman_if *batman_if)
  38. {
  39. struct bat_priv *bat_priv = netdev_priv(batman_if->soft_iface);
  40. struct hashtable_t *hash = bat_priv->orig_hash;
  41. struct hlist_node *walk;
  42. struct hlist_head *head;
  43. struct element_t *bucket;
  44. struct orig_node *orig_node;
  45. unsigned long *word;
  46. int i;
  47. size_t word_index;
  48. spin_lock_bh(&bat_priv->orig_hash_lock);
  49. for (i = 0; i < hash->size; i++) {
  50. head = &hash->table[i];
  51. hlist_for_each_entry(bucket, walk, head, hlist) {
  52. orig_node = bucket->data;
  53. word_index = batman_if->if_num * NUM_WORDS;
  54. word = &(orig_node->bcast_own[word_index]);
  55. bit_get_packet(bat_priv, word, 1, 0);
  56. orig_node->bcast_own_sum[batman_if->if_num] =
  57. bit_packet_count(word);
  58. }
  59. }
  60. spin_unlock_bh(&bat_priv->orig_hash_lock);
  61. }
  62. static void update_HNA(struct bat_priv *bat_priv, struct orig_node *orig_node,
  63. unsigned char *hna_buff, int hna_buff_len)
  64. {
  65. if ((hna_buff_len != orig_node->hna_buff_len) ||
  66. ((hna_buff_len > 0) &&
  67. (orig_node->hna_buff_len > 0) &&
  68. (memcmp(orig_node->hna_buff, hna_buff, hna_buff_len) != 0))) {
  69. if (orig_node->hna_buff_len > 0)
  70. hna_global_del_orig(bat_priv, orig_node,
  71. "originator changed hna");
  72. if ((hna_buff_len > 0) && (hna_buff))
  73. hna_global_add_orig(bat_priv, orig_node,
  74. hna_buff, hna_buff_len);
  75. }
  76. }
  77. static void update_route(struct bat_priv *bat_priv,
  78. struct orig_node *orig_node,
  79. struct neigh_node *neigh_node,
  80. unsigned char *hna_buff, int hna_buff_len)
  81. {
  82. /* route deleted */
  83. if ((orig_node->router) && (!neigh_node)) {
  84. bat_dbg(DBG_ROUTES, bat_priv, "Deleting route towards: %pM\n",
  85. orig_node->orig);
  86. hna_global_del_orig(bat_priv, orig_node,
  87. "originator timed out");
  88. /* route added */
  89. } else if ((!orig_node->router) && (neigh_node)) {
  90. bat_dbg(DBG_ROUTES, bat_priv,
  91. "Adding route towards: %pM (via %pM)\n",
  92. orig_node->orig, neigh_node->addr);
  93. hna_global_add_orig(bat_priv, orig_node,
  94. hna_buff, hna_buff_len);
  95. /* route changed */
  96. } else {
  97. bat_dbg(DBG_ROUTES, bat_priv,
  98. "Changing route towards: %pM "
  99. "(now via %pM - was via %pM)\n",
  100. orig_node->orig, neigh_node->addr,
  101. orig_node->router->addr);
  102. }
  103. orig_node->router = neigh_node;
  104. }
  105. void update_routes(struct bat_priv *bat_priv, struct orig_node *orig_node,
  106. struct neigh_node *neigh_node, unsigned char *hna_buff,
  107. int hna_buff_len)
  108. {
  109. if (!orig_node)
  110. return;
  111. if (orig_node->router != neigh_node)
  112. update_route(bat_priv, orig_node, neigh_node,
  113. hna_buff, hna_buff_len);
  114. /* may be just HNA changed */
  115. else
  116. update_HNA(bat_priv, orig_node, hna_buff, hna_buff_len);
  117. }
  118. static int is_bidirectional_neigh(struct orig_node *orig_node,
  119. struct orig_node *orig_neigh_node,
  120. struct batman_packet *batman_packet,
  121. struct batman_if *if_incoming)
  122. {
  123. struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
  124. struct neigh_node *neigh_node = NULL, *tmp_neigh_node = NULL;
  125. unsigned char total_count;
  126. if (orig_node == orig_neigh_node) {
  127. list_for_each_entry(tmp_neigh_node,
  128. &orig_node->neigh_list,
  129. list) {
  130. if (compare_orig(tmp_neigh_node->addr,
  131. orig_neigh_node->orig) &&
  132. (tmp_neigh_node->if_incoming == if_incoming))
  133. neigh_node = tmp_neigh_node;
  134. }
  135. if (!neigh_node)
  136. neigh_node = create_neighbor(orig_node,
  137. orig_neigh_node,
  138. orig_neigh_node->orig,
  139. if_incoming);
  140. /* create_neighbor failed, return 0 */
  141. if (!neigh_node)
  142. return 0;
  143. neigh_node->last_valid = jiffies;
  144. } else {
  145. /* find packet count of corresponding one hop neighbor */
  146. list_for_each_entry(tmp_neigh_node,
  147. &orig_neigh_node->neigh_list, list) {
  148. if (compare_orig(tmp_neigh_node->addr,
  149. orig_neigh_node->orig) &&
  150. (tmp_neigh_node->if_incoming == if_incoming))
  151. neigh_node = tmp_neigh_node;
  152. }
  153. if (!neigh_node)
  154. neigh_node = create_neighbor(orig_neigh_node,
  155. orig_neigh_node,
  156. orig_neigh_node->orig,
  157. if_incoming);
  158. /* create_neighbor failed, return 0 */
  159. if (!neigh_node)
  160. return 0;
  161. }
  162. orig_node->last_valid = jiffies;
  163. /* pay attention to not get a value bigger than 100 % */
  164. total_count = (orig_neigh_node->bcast_own_sum[if_incoming->if_num] >
  165. neigh_node->real_packet_count ?
  166. neigh_node->real_packet_count :
  167. orig_neigh_node->bcast_own_sum[if_incoming->if_num]);
  168. /* if we have too few packets (too less data) we set tq_own to zero */
  169. /* if we receive too few packets it is not considered bidirectional */
  170. if ((total_count < TQ_LOCAL_BIDRECT_SEND_MINIMUM) ||
  171. (neigh_node->real_packet_count < TQ_LOCAL_BIDRECT_RECV_MINIMUM))
  172. orig_neigh_node->tq_own = 0;
  173. else
  174. /* neigh_node->real_packet_count is never zero as we
  175. * only purge old information when getting new
  176. * information */
  177. orig_neigh_node->tq_own = (TQ_MAX_VALUE * total_count) /
  178. neigh_node->real_packet_count;
  179. /*
  180. * 1 - ((1-x) ** 3), normalized to TQ_MAX_VALUE this does
  181. * affect the nearly-symmetric links only a little, but
  182. * punishes asymmetric links more. This will give a value
  183. * between 0 and TQ_MAX_VALUE
  184. */
  185. orig_neigh_node->tq_asym_penalty =
  186. TQ_MAX_VALUE -
  187. (TQ_MAX_VALUE *
  188. (TQ_LOCAL_WINDOW_SIZE - neigh_node->real_packet_count) *
  189. (TQ_LOCAL_WINDOW_SIZE - neigh_node->real_packet_count) *
  190. (TQ_LOCAL_WINDOW_SIZE - neigh_node->real_packet_count)) /
  191. (TQ_LOCAL_WINDOW_SIZE *
  192. TQ_LOCAL_WINDOW_SIZE *
  193. TQ_LOCAL_WINDOW_SIZE);
  194. batman_packet->tq = ((batman_packet->tq *
  195. orig_neigh_node->tq_own *
  196. orig_neigh_node->tq_asym_penalty) /
  197. (TQ_MAX_VALUE * TQ_MAX_VALUE));
  198. bat_dbg(DBG_BATMAN, bat_priv,
  199. "bidirectional: "
  200. "orig = %-15pM neigh = %-15pM => own_bcast = %2i, "
  201. "real recv = %2i, local tq: %3i, asym_penalty: %3i, "
  202. "total tq: %3i\n",
  203. orig_node->orig, orig_neigh_node->orig, total_count,
  204. neigh_node->real_packet_count, orig_neigh_node->tq_own,
  205. orig_neigh_node->tq_asym_penalty, batman_packet->tq);
  206. /* if link has the minimum required transmission quality
  207. * consider it bidirectional */
  208. if (batman_packet->tq >= TQ_TOTAL_BIDRECT_LIMIT)
  209. return 1;
  210. return 0;
  211. }
  212. static void update_orig(struct bat_priv *bat_priv,
  213. struct orig_node *orig_node,
  214. struct ethhdr *ethhdr,
  215. struct batman_packet *batman_packet,
  216. struct batman_if *if_incoming,
  217. unsigned char *hna_buff, int hna_buff_len,
  218. char is_duplicate)
  219. {
  220. struct neigh_node *neigh_node = NULL, *tmp_neigh_node = NULL;
  221. int tmp_hna_buff_len;
  222. bat_dbg(DBG_BATMAN, bat_priv, "update_originator(): "
  223. "Searching and updating originator entry of received packet\n");
  224. list_for_each_entry(tmp_neigh_node, &orig_node->neigh_list, list) {
  225. if (compare_orig(tmp_neigh_node->addr, ethhdr->h_source) &&
  226. (tmp_neigh_node->if_incoming == if_incoming)) {
  227. neigh_node = tmp_neigh_node;
  228. continue;
  229. }
  230. if (is_duplicate)
  231. continue;
  232. ring_buffer_set(tmp_neigh_node->tq_recv,
  233. &tmp_neigh_node->tq_index, 0);
  234. tmp_neigh_node->tq_avg =
  235. ring_buffer_avg(tmp_neigh_node->tq_recv);
  236. }
  237. if (!neigh_node) {
  238. struct orig_node *orig_tmp;
  239. orig_tmp = get_orig_node(bat_priv, ethhdr->h_source);
  240. if (!orig_tmp)
  241. return;
  242. neigh_node = create_neighbor(orig_node, orig_tmp,
  243. ethhdr->h_source, if_incoming);
  244. if (!neigh_node)
  245. return;
  246. } else
  247. bat_dbg(DBG_BATMAN, bat_priv,
  248. "Updating existing last-hop neighbor of originator\n");
  249. orig_node->flags = batman_packet->flags;
  250. neigh_node->last_valid = jiffies;
  251. ring_buffer_set(neigh_node->tq_recv,
  252. &neigh_node->tq_index,
  253. batman_packet->tq);
  254. neigh_node->tq_avg = ring_buffer_avg(neigh_node->tq_recv);
  255. if (!is_duplicate) {
  256. orig_node->last_ttl = batman_packet->ttl;
  257. neigh_node->last_ttl = batman_packet->ttl;
  258. }
  259. tmp_hna_buff_len = (hna_buff_len > batman_packet->num_hna * ETH_ALEN ?
  260. batman_packet->num_hna * ETH_ALEN : hna_buff_len);
  261. /* if this neighbor already is our next hop there is nothing
  262. * to change */
  263. if (orig_node->router == neigh_node)
  264. goto update_hna;
  265. /* if this neighbor does not offer a better TQ we won't consider it */
  266. if ((orig_node->router) &&
  267. (orig_node->router->tq_avg > neigh_node->tq_avg))
  268. goto update_hna;
  269. /* if the TQ is the same and the link not more symetric we
  270. * won't consider it either */
  271. if ((orig_node->router) &&
  272. ((neigh_node->tq_avg == orig_node->router->tq_avg) &&
  273. (orig_node->router->orig_node->bcast_own_sum[if_incoming->if_num]
  274. >= neigh_node->orig_node->bcast_own_sum[if_incoming->if_num])))
  275. goto update_hna;
  276. update_routes(bat_priv, orig_node, neigh_node,
  277. hna_buff, tmp_hna_buff_len);
  278. goto update_gw;
  279. update_hna:
  280. update_routes(bat_priv, orig_node, orig_node->router,
  281. hna_buff, tmp_hna_buff_len);
  282. update_gw:
  283. if (orig_node->gw_flags != batman_packet->gw_flags)
  284. gw_node_update(bat_priv, orig_node, batman_packet->gw_flags);
  285. orig_node->gw_flags = batman_packet->gw_flags;
  286. /* restart gateway selection if fast or late switching was enabled */
  287. if ((orig_node->gw_flags) &&
  288. (atomic_read(&bat_priv->gw_mode) == GW_MODE_CLIENT) &&
  289. (atomic_read(&bat_priv->gw_sel_class) > 2))
  290. gw_check_election(bat_priv, orig_node);
  291. }
  292. /* checks whether the host restarted and is in the protection time.
  293. * returns:
  294. * 0 if the packet is to be accepted
  295. * 1 if the packet is to be ignored.
  296. */
  297. static int window_protected(struct bat_priv *bat_priv,
  298. int32_t seq_num_diff,
  299. unsigned long *last_reset)
  300. {
  301. if ((seq_num_diff <= -TQ_LOCAL_WINDOW_SIZE)
  302. || (seq_num_diff >= EXPECTED_SEQNO_RANGE)) {
  303. if (time_after(jiffies, *last_reset +
  304. msecs_to_jiffies(RESET_PROTECTION_MS))) {
  305. *last_reset = jiffies;
  306. bat_dbg(DBG_BATMAN, bat_priv,
  307. "old packet received, start protection\n");
  308. return 0;
  309. } else
  310. return 1;
  311. }
  312. return 0;
  313. }
  314. /* processes a batman packet for all interfaces, adjusts the sequence number and
  315. * finds out whether it is a duplicate.
  316. * returns:
  317. * 1 the packet is a duplicate
  318. * 0 the packet has not yet been received
  319. * -1 the packet is old and has been received while the seqno window
  320. * was protected. Caller should drop it.
  321. */
  322. static char count_real_packets(struct ethhdr *ethhdr,
  323. struct batman_packet *batman_packet,
  324. struct batman_if *if_incoming)
  325. {
  326. struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
  327. struct orig_node *orig_node;
  328. struct neigh_node *tmp_neigh_node;
  329. char is_duplicate = 0;
  330. int32_t seq_diff;
  331. int need_update = 0;
  332. int set_mark;
  333. orig_node = get_orig_node(bat_priv, batman_packet->orig);
  334. if (!orig_node)
  335. return 0;
  336. seq_diff = batman_packet->seqno - orig_node->last_real_seqno;
  337. /* signalize caller that the packet is to be dropped. */
  338. if (window_protected(bat_priv, seq_diff,
  339. &orig_node->batman_seqno_reset))
  340. return -1;
  341. list_for_each_entry(tmp_neigh_node, &orig_node->neigh_list, list) {
  342. is_duplicate |= get_bit_status(tmp_neigh_node->real_bits,
  343. orig_node->last_real_seqno,
  344. batman_packet->seqno);
  345. if (compare_orig(tmp_neigh_node->addr, ethhdr->h_source) &&
  346. (tmp_neigh_node->if_incoming == if_incoming))
  347. set_mark = 1;
  348. else
  349. set_mark = 0;
  350. /* if the window moved, set the update flag. */
  351. need_update |= bit_get_packet(bat_priv,
  352. tmp_neigh_node->real_bits,
  353. seq_diff, set_mark);
  354. tmp_neigh_node->real_packet_count =
  355. bit_packet_count(tmp_neigh_node->real_bits);
  356. }
  357. if (need_update) {
  358. bat_dbg(DBG_BATMAN, bat_priv,
  359. "updating last_seqno: old %d, new %d\n",
  360. orig_node->last_real_seqno, batman_packet->seqno);
  361. orig_node->last_real_seqno = batman_packet->seqno;
  362. }
  363. return is_duplicate;
  364. }
  365. /* copy primary address for bonding */
  366. static void mark_bonding_address(struct orig_node *orig_node,
  367. struct orig_node *orig_neigh_node,
  368. struct batman_packet *batman_packet)
  369. {
  370. if (batman_packet->flags & PRIMARIES_FIRST_HOP)
  371. memcpy(orig_neigh_node->primary_addr,
  372. orig_node->orig, ETH_ALEN);
  373. return;
  374. }
  375. /* mark possible bond.candidates in the neighbor list */
  376. void update_bonding_candidates(struct orig_node *orig_node)
  377. {
  378. int candidates;
  379. int interference_candidate;
  380. int best_tq;
  381. struct neigh_node *tmp_neigh_node, *tmp_neigh_node2;
  382. struct neigh_node *first_candidate, *last_candidate;
  383. /* update the candidates for this originator */
  384. if (!orig_node->router) {
  385. orig_node->bond.candidates = 0;
  386. return;
  387. }
  388. best_tq = orig_node->router->tq_avg;
  389. /* update bond.candidates */
  390. candidates = 0;
  391. /* mark other nodes which also received "PRIMARIES FIRST HOP" packets
  392. * as "bonding partner" */
  393. /* first, zero the list */
  394. list_for_each_entry(tmp_neigh_node, &orig_node->neigh_list, list) {
  395. tmp_neigh_node->next_bond_candidate = NULL;
  396. }
  397. first_candidate = NULL;
  398. last_candidate = NULL;
  399. list_for_each_entry(tmp_neigh_node, &orig_node->neigh_list, list) {
  400. /* only consider if it has the same primary address ... */
  401. if (memcmp(orig_node->orig,
  402. tmp_neigh_node->orig_node->primary_addr,
  403. ETH_ALEN) != 0)
  404. continue;
  405. /* ... and is good enough to be considered */
  406. if (tmp_neigh_node->tq_avg < best_tq - BONDING_TQ_THRESHOLD)
  407. continue;
  408. /* check if we have another candidate with the same
  409. * mac address or interface. If we do, we won't
  410. * select this candidate because of possible interference. */
  411. interference_candidate = 0;
  412. list_for_each_entry(tmp_neigh_node2,
  413. &orig_node->neigh_list, list) {
  414. if (tmp_neigh_node2 == tmp_neigh_node)
  415. continue;
  416. /* we only care if the other candidate is even
  417. * considered as candidate. */
  418. if (!tmp_neigh_node2->next_bond_candidate)
  419. continue;
  420. if ((tmp_neigh_node->if_incoming ==
  421. tmp_neigh_node2->if_incoming)
  422. || (memcmp(tmp_neigh_node->addr,
  423. tmp_neigh_node2->addr, ETH_ALEN) == 0)) {
  424. interference_candidate = 1;
  425. break;
  426. }
  427. }
  428. /* don't care further if it is an interference candidate */
  429. if (interference_candidate)
  430. continue;
  431. if (!first_candidate) {
  432. first_candidate = tmp_neigh_node;
  433. tmp_neigh_node->next_bond_candidate = first_candidate;
  434. } else
  435. tmp_neigh_node->next_bond_candidate = last_candidate;
  436. last_candidate = tmp_neigh_node;
  437. candidates++;
  438. }
  439. if (candidates > 0) {
  440. first_candidate->next_bond_candidate = last_candidate;
  441. orig_node->bond.selected = first_candidate;
  442. }
  443. orig_node->bond.candidates = candidates;
  444. }
  445. void receive_bat_packet(struct ethhdr *ethhdr,
  446. struct batman_packet *batman_packet,
  447. unsigned char *hna_buff, int hna_buff_len,
  448. struct batman_if *if_incoming)
  449. {
  450. struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
  451. struct batman_if *batman_if;
  452. struct orig_node *orig_neigh_node, *orig_node;
  453. char has_directlink_flag;
  454. char is_my_addr = 0, is_my_orig = 0, is_my_oldorig = 0;
  455. char is_broadcast = 0, is_bidirectional, is_single_hop_neigh;
  456. char is_duplicate;
  457. uint32_t if_incoming_seqno;
  458. /* Silently drop when the batman packet is actually not a
  459. * correct packet.
  460. *
  461. * This might happen if a packet is padded (e.g. Ethernet has a
  462. * minimum frame length of 64 byte) and the aggregation interprets
  463. * it as an additional length.
  464. *
  465. * TODO: A more sane solution would be to have a bit in the
  466. * batman_packet to detect whether the packet is the last
  467. * packet in an aggregation. Here we expect that the padding
  468. * is always zero (or not 0x01)
  469. */
  470. if (batman_packet->packet_type != BAT_PACKET)
  471. return;
  472. /* could be changed by schedule_own_packet() */
  473. if_incoming_seqno = atomic_read(&if_incoming->seqno);
  474. has_directlink_flag = (batman_packet->flags & DIRECTLINK ? 1 : 0);
  475. is_single_hop_neigh = (compare_orig(ethhdr->h_source,
  476. batman_packet->orig) ? 1 : 0);
  477. bat_dbg(DBG_BATMAN, bat_priv,
  478. "Received BATMAN packet via NB: %pM, IF: %s [%pM] "
  479. "(from OG: %pM, via prev OG: %pM, seqno %d, tq %d, "
  480. "TTL %d, V %d, IDF %d)\n",
  481. ethhdr->h_source, if_incoming->net_dev->name,
  482. if_incoming->net_dev->dev_addr, batman_packet->orig,
  483. batman_packet->prev_sender, batman_packet->seqno,
  484. batman_packet->tq, batman_packet->ttl, batman_packet->version,
  485. has_directlink_flag);
  486. rcu_read_lock();
  487. list_for_each_entry_rcu(batman_if, &if_list, list) {
  488. if (batman_if->if_status != IF_ACTIVE)
  489. continue;
  490. if (batman_if->soft_iface != if_incoming->soft_iface)
  491. continue;
  492. if (compare_orig(ethhdr->h_source,
  493. batman_if->net_dev->dev_addr))
  494. is_my_addr = 1;
  495. if (compare_orig(batman_packet->orig,
  496. batman_if->net_dev->dev_addr))
  497. is_my_orig = 1;
  498. if (compare_orig(batman_packet->prev_sender,
  499. batman_if->net_dev->dev_addr))
  500. is_my_oldorig = 1;
  501. if (compare_orig(ethhdr->h_source, broadcast_addr))
  502. is_broadcast = 1;
  503. }
  504. rcu_read_unlock();
  505. if (batman_packet->version != COMPAT_VERSION) {
  506. bat_dbg(DBG_BATMAN, bat_priv,
  507. "Drop packet: incompatible batman version (%i)\n",
  508. batman_packet->version);
  509. return;
  510. }
  511. if (is_my_addr) {
  512. bat_dbg(DBG_BATMAN, bat_priv,
  513. "Drop packet: received my own broadcast (sender: %pM"
  514. ")\n",
  515. ethhdr->h_source);
  516. return;
  517. }
  518. if (is_broadcast) {
  519. bat_dbg(DBG_BATMAN, bat_priv, "Drop packet: "
  520. "ignoring all packets with broadcast source addr (sender: %pM"
  521. ")\n", ethhdr->h_source);
  522. return;
  523. }
  524. if (is_my_orig) {
  525. unsigned long *word;
  526. int offset;
  527. orig_neigh_node = get_orig_node(bat_priv, ethhdr->h_source);
  528. if (!orig_neigh_node)
  529. return;
  530. /* neighbor has to indicate direct link and it has to
  531. * come via the corresponding interface */
  532. /* if received seqno equals last send seqno save new
  533. * seqno for bidirectional check */
  534. if (has_directlink_flag &&
  535. compare_orig(if_incoming->net_dev->dev_addr,
  536. batman_packet->orig) &&
  537. (batman_packet->seqno - if_incoming_seqno + 2 == 0)) {
  538. offset = if_incoming->if_num * NUM_WORDS;
  539. word = &(orig_neigh_node->bcast_own[offset]);
  540. bit_mark(word, 0);
  541. orig_neigh_node->bcast_own_sum[if_incoming->if_num] =
  542. bit_packet_count(word);
  543. }
  544. bat_dbg(DBG_BATMAN, bat_priv, "Drop packet: "
  545. "originator packet from myself (via neighbor)\n");
  546. return;
  547. }
  548. if (is_my_oldorig) {
  549. bat_dbg(DBG_BATMAN, bat_priv,
  550. "Drop packet: ignoring all rebroadcast echos (sender: "
  551. "%pM)\n", ethhdr->h_source);
  552. return;
  553. }
  554. orig_node = get_orig_node(bat_priv, batman_packet->orig);
  555. if (!orig_node)
  556. return;
  557. is_duplicate = count_real_packets(ethhdr, batman_packet, if_incoming);
  558. if (is_duplicate == -1) {
  559. bat_dbg(DBG_BATMAN, bat_priv,
  560. "Drop packet: packet within seqno protection time "
  561. "(sender: %pM)\n", ethhdr->h_source);
  562. return;
  563. }
  564. if (batman_packet->tq == 0) {
  565. bat_dbg(DBG_BATMAN, bat_priv,
  566. "Drop packet: originator packet with tq equal 0\n");
  567. return;
  568. }
  569. /* avoid temporary routing loops */
  570. if ((orig_node->router) &&
  571. (orig_node->router->orig_node->router) &&
  572. (compare_orig(orig_node->router->addr,
  573. batman_packet->prev_sender)) &&
  574. !(compare_orig(batman_packet->orig, batman_packet->prev_sender)) &&
  575. (compare_orig(orig_node->router->addr,
  576. orig_node->router->orig_node->router->addr))) {
  577. bat_dbg(DBG_BATMAN, bat_priv,
  578. "Drop packet: ignoring all rebroadcast packets that "
  579. "may make me loop (sender: %pM)\n", ethhdr->h_source);
  580. return;
  581. }
  582. /* if sender is a direct neighbor the sender mac equals
  583. * originator mac */
  584. orig_neigh_node = (is_single_hop_neigh ?
  585. orig_node :
  586. get_orig_node(bat_priv, ethhdr->h_source));
  587. if (!orig_neigh_node)
  588. return;
  589. /* drop packet if sender is not a direct neighbor and if we
  590. * don't route towards it */
  591. if (!is_single_hop_neigh && (!orig_neigh_node->router)) {
  592. bat_dbg(DBG_BATMAN, bat_priv,
  593. "Drop packet: OGM via unknown neighbor!\n");
  594. return;
  595. }
  596. is_bidirectional = is_bidirectional_neigh(orig_node, orig_neigh_node,
  597. batman_packet, if_incoming);
  598. /* update ranking if it is not a duplicate or has the same
  599. * seqno and similar ttl as the non-duplicate */
  600. if (is_bidirectional &&
  601. (!is_duplicate ||
  602. ((orig_node->last_real_seqno == batman_packet->seqno) &&
  603. (orig_node->last_ttl - 3 <= batman_packet->ttl))))
  604. update_orig(bat_priv, orig_node, ethhdr, batman_packet,
  605. if_incoming, hna_buff, hna_buff_len, is_duplicate);
  606. mark_bonding_address(orig_node, orig_neigh_node, batman_packet);
  607. update_bonding_candidates(orig_node);
  608. /* is single hop (direct) neighbor */
  609. if (is_single_hop_neigh) {
  610. /* mark direct link on incoming interface */
  611. schedule_forward_packet(orig_node, ethhdr, batman_packet,
  612. 1, hna_buff_len, if_incoming);
  613. bat_dbg(DBG_BATMAN, bat_priv, "Forwarding packet: "
  614. "rebroadcast neighbor packet with direct link flag\n");
  615. return;
  616. }
  617. /* multihop originator */
  618. if (!is_bidirectional) {
  619. bat_dbg(DBG_BATMAN, bat_priv,
  620. "Drop packet: not received via bidirectional link\n");
  621. return;
  622. }
  623. if (is_duplicate) {
  624. bat_dbg(DBG_BATMAN, bat_priv,
  625. "Drop packet: duplicate packet received\n");
  626. return;
  627. }
  628. bat_dbg(DBG_BATMAN, bat_priv,
  629. "Forwarding packet: rebroadcast originator packet\n");
  630. schedule_forward_packet(orig_node, ethhdr, batman_packet,
  631. 0, hna_buff_len, if_incoming);
  632. }
  633. int recv_bat_packet(struct sk_buff *skb, struct batman_if *batman_if)
  634. {
  635. struct bat_priv *bat_priv = netdev_priv(batman_if->soft_iface);
  636. struct ethhdr *ethhdr;
  637. /* drop packet if it has not necessary minimum size */
  638. if (unlikely(!pskb_may_pull(skb, sizeof(struct batman_packet))))
  639. return NET_RX_DROP;
  640. ethhdr = (struct ethhdr *)skb_mac_header(skb);
  641. /* packet with broadcast indication but unicast recipient */
  642. if (!is_broadcast_ether_addr(ethhdr->h_dest))
  643. return NET_RX_DROP;
  644. /* packet with broadcast sender address */
  645. if (is_broadcast_ether_addr(ethhdr->h_source))
  646. return NET_RX_DROP;
  647. /* create a copy of the skb, if needed, to modify it. */
  648. if (skb_cow(skb, 0) < 0)
  649. return NET_RX_DROP;
  650. /* keep skb linear */
  651. if (skb_linearize(skb) < 0)
  652. return NET_RX_DROP;
  653. ethhdr = (struct ethhdr *)skb_mac_header(skb);
  654. spin_lock_bh(&bat_priv->orig_hash_lock);
  655. receive_aggr_bat_packet(ethhdr,
  656. skb->data,
  657. skb_headlen(skb),
  658. batman_if);
  659. spin_unlock_bh(&bat_priv->orig_hash_lock);
  660. kfree_skb(skb);
  661. return NET_RX_SUCCESS;
  662. }
  663. static int recv_my_icmp_packet(struct bat_priv *bat_priv,
  664. struct sk_buff *skb, size_t icmp_len)
  665. {
  666. struct orig_node *orig_node;
  667. struct icmp_packet_rr *icmp_packet;
  668. struct batman_if *batman_if;
  669. int ret;
  670. uint8_t dstaddr[ETH_ALEN];
  671. icmp_packet = (struct icmp_packet_rr *)skb->data;
  672. /* add data to device queue */
  673. if (icmp_packet->msg_type != ECHO_REQUEST) {
  674. bat_socket_receive_packet(icmp_packet, icmp_len);
  675. return NET_RX_DROP;
  676. }
  677. if (!bat_priv->primary_if)
  678. return NET_RX_DROP;
  679. /* answer echo request (ping) */
  680. /* get routing information */
  681. spin_lock_bh(&bat_priv->orig_hash_lock);
  682. orig_node = ((struct orig_node *)hash_find(bat_priv->orig_hash,
  683. compare_orig, choose_orig,
  684. icmp_packet->orig));
  685. ret = NET_RX_DROP;
  686. if ((orig_node) && (orig_node->router)) {
  687. /* don't lock while sending the packets ... we therefore
  688. * copy the required data before sending */
  689. batman_if = orig_node->router->if_incoming;
  690. memcpy(dstaddr, orig_node->router->addr, ETH_ALEN);
  691. spin_unlock_bh(&bat_priv->orig_hash_lock);
  692. /* create a copy of the skb, if needed, to modify it. */
  693. if (skb_cow(skb, sizeof(struct ethhdr)) < 0)
  694. return NET_RX_DROP;
  695. icmp_packet = (struct icmp_packet_rr *)skb->data;
  696. memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN);
  697. memcpy(icmp_packet->orig,
  698. bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN);
  699. icmp_packet->msg_type = ECHO_REPLY;
  700. icmp_packet->ttl = TTL;
  701. send_skb_packet(skb, batman_if, dstaddr);
  702. ret = NET_RX_SUCCESS;
  703. } else
  704. spin_unlock_bh(&bat_priv->orig_hash_lock);
  705. return ret;
  706. }
  707. static int recv_icmp_ttl_exceeded(struct bat_priv *bat_priv,
  708. struct sk_buff *skb)
  709. {
  710. struct orig_node *orig_node;
  711. struct icmp_packet *icmp_packet;
  712. struct batman_if *batman_if;
  713. int ret;
  714. uint8_t dstaddr[ETH_ALEN];
  715. icmp_packet = (struct icmp_packet *)skb->data;
  716. /* send TTL exceeded if packet is an echo request (traceroute) */
  717. if (icmp_packet->msg_type != ECHO_REQUEST) {
  718. pr_debug("Warning - can't forward icmp packet from %pM to "
  719. "%pM: ttl exceeded\n", icmp_packet->orig,
  720. icmp_packet->dst);
  721. return NET_RX_DROP;
  722. }
  723. if (!bat_priv->primary_if)
  724. return NET_RX_DROP;
  725. /* get routing information */
  726. spin_lock_bh(&bat_priv->orig_hash_lock);
  727. orig_node = ((struct orig_node *)
  728. hash_find(bat_priv->orig_hash, compare_orig, choose_orig,
  729. icmp_packet->orig));
  730. ret = NET_RX_DROP;
  731. if ((orig_node) && (orig_node->router)) {
  732. /* don't lock while sending the packets ... we therefore
  733. * copy the required data before sending */
  734. batman_if = orig_node->router->if_incoming;
  735. memcpy(dstaddr, orig_node->router->addr, ETH_ALEN);
  736. spin_unlock_bh(&bat_priv->orig_hash_lock);
  737. /* create a copy of the skb, if needed, to modify it. */
  738. if (skb_cow(skb, sizeof(struct ethhdr)) < 0)
  739. return NET_RX_DROP;
  740. icmp_packet = (struct icmp_packet *) skb->data;
  741. memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN);
  742. memcpy(icmp_packet->orig,
  743. bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN);
  744. icmp_packet->msg_type = TTL_EXCEEDED;
  745. icmp_packet->ttl = TTL;
  746. send_skb_packet(skb, batman_if, dstaddr);
  747. ret = NET_RX_SUCCESS;
  748. } else
  749. spin_unlock_bh(&bat_priv->orig_hash_lock);
  750. return ret;
  751. }
  752. int recv_icmp_packet(struct sk_buff *skb, struct batman_if *recv_if)
  753. {
  754. struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
  755. struct icmp_packet_rr *icmp_packet;
  756. struct ethhdr *ethhdr;
  757. struct orig_node *orig_node;
  758. struct batman_if *batman_if;
  759. int hdr_size = sizeof(struct icmp_packet);
  760. int ret;
  761. uint8_t dstaddr[ETH_ALEN];
  762. /**
  763. * we truncate all incoming icmp packets if they don't match our size
  764. */
  765. if (skb->len >= sizeof(struct icmp_packet_rr))
  766. hdr_size = sizeof(struct icmp_packet_rr);
  767. /* drop packet if it has not necessary minimum size */
  768. if (unlikely(!pskb_may_pull(skb, hdr_size)))
  769. return NET_RX_DROP;
  770. ethhdr = (struct ethhdr *)skb_mac_header(skb);
  771. /* packet with unicast indication but broadcast recipient */
  772. if (is_broadcast_ether_addr(ethhdr->h_dest))
  773. return NET_RX_DROP;
  774. /* packet with broadcast sender address */
  775. if (is_broadcast_ether_addr(ethhdr->h_source))
  776. return NET_RX_DROP;
  777. /* not for me */
  778. if (!is_my_mac(ethhdr->h_dest))
  779. return NET_RX_DROP;
  780. icmp_packet = (struct icmp_packet_rr *)skb->data;
  781. /* add record route information if not full */
  782. if ((hdr_size == sizeof(struct icmp_packet_rr)) &&
  783. (icmp_packet->rr_cur < BAT_RR_LEN)) {
  784. memcpy(&(icmp_packet->rr[icmp_packet->rr_cur]),
  785. ethhdr->h_dest, ETH_ALEN);
  786. icmp_packet->rr_cur++;
  787. }
  788. /* packet for me */
  789. if (is_my_mac(icmp_packet->dst))
  790. return recv_my_icmp_packet(bat_priv, skb, hdr_size);
  791. /* TTL exceeded */
  792. if (icmp_packet->ttl < 2)
  793. return recv_icmp_ttl_exceeded(bat_priv, skb);
  794. ret = NET_RX_DROP;
  795. /* get routing information */
  796. spin_lock_bh(&bat_priv->orig_hash_lock);
  797. orig_node = ((struct orig_node *)
  798. hash_find(bat_priv->orig_hash, compare_orig, choose_orig,
  799. icmp_packet->dst));
  800. if ((orig_node) && (orig_node->router)) {
  801. /* don't lock while sending the packets ... we therefore
  802. * copy the required data before sending */
  803. batman_if = orig_node->router->if_incoming;
  804. memcpy(dstaddr, orig_node->router->addr, ETH_ALEN);
  805. spin_unlock_bh(&bat_priv->orig_hash_lock);
  806. /* create a copy of the skb, if needed, to modify it. */
  807. if (skb_cow(skb, sizeof(struct ethhdr)) < 0)
  808. return NET_RX_DROP;
  809. icmp_packet = (struct icmp_packet_rr *)skb->data;
  810. /* decrement ttl */
  811. icmp_packet->ttl--;
  812. /* route it */
  813. send_skb_packet(skb, batman_if, dstaddr);
  814. ret = NET_RX_SUCCESS;
  815. } else
  816. spin_unlock_bh(&bat_priv->orig_hash_lock);
  817. return ret;
  818. }
  819. /* find a suitable router for this originator, and use
  820. * bonding if possible. */
  821. struct neigh_node *find_router(struct bat_priv *bat_priv,
  822. struct orig_node *orig_node,
  823. struct batman_if *recv_if)
  824. {
  825. struct orig_node *primary_orig_node;
  826. struct orig_node *router_orig;
  827. struct neigh_node *router, *first_candidate, *best_router;
  828. static uint8_t zero_mac[ETH_ALEN] = {0, 0, 0, 0, 0, 0};
  829. int bonding_enabled;
  830. if (!orig_node)
  831. return NULL;
  832. if (!orig_node->router)
  833. return NULL;
  834. /* without bonding, the first node should
  835. * always choose the default router. */
  836. bonding_enabled = atomic_read(&bat_priv->bonding);
  837. if ((!recv_if) && (!bonding_enabled))
  838. return orig_node->router;
  839. router_orig = orig_node->router->orig_node;
  840. /* if we have something in the primary_addr, we can search
  841. * for a potential bonding candidate. */
  842. if (memcmp(router_orig->primary_addr, zero_mac, ETH_ALEN) == 0)
  843. return orig_node->router;
  844. /* find the orig_node which has the primary interface. might
  845. * even be the same as our router_orig in many cases */
  846. if (memcmp(router_orig->primary_addr,
  847. router_orig->orig, ETH_ALEN) == 0) {
  848. primary_orig_node = router_orig;
  849. } else {
  850. primary_orig_node = hash_find(bat_priv->orig_hash, compare_orig,
  851. choose_orig,
  852. router_orig->primary_addr);
  853. if (!primary_orig_node)
  854. return orig_node->router;
  855. }
  856. /* with less than 2 candidates, we can't do any
  857. * bonding and prefer the original router. */
  858. if (primary_orig_node->bond.candidates < 2)
  859. return orig_node->router;
  860. /* all nodes between should choose a candidate which
  861. * is is not on the interface where the packet came
  862. * in. */
  863. first_candidate = primary_orig_node->bond.selected;
  864. router = first_candidate;
  865. if (bonding_enabled) {
  866. /* in the bonding case, send the packets in a round
  867. * robin fashion over the remaining interfaces. */
  868. do {
  869. /* recv_if == NULL on the first node. */
  870. if (router->if_incoming != recv_if)
  871. break;
  872. router = router->next_bond_candidate;
  873. } while (router != first_candidate);
  874. primary_orig_node->bond.selected = router->next_bond_candidate;
  875. } else {
  876. /* if bonding is disabled, use the best of the
  877. * remaining candidates which are not using
  878. * this interface. */
  879. best_router = first_candidate;
  880. do {
  881. /* recv_if == NULL on the first node. */
  882. if ((router->if_incoming != recv_if) &&
  883. (router->tq_avg > best_router->tq_avg))
  884. best_router = router;
  885. router = router->next_bond_candidate;
  886. } while (router != first_candidate);
  887. router = best_router;
  888. }
  889. return router;
  890. }
  891. static int check_unicast_packet(struct sk_buff *skb, int hdr_size)
  892. {
  893. struct ethhdr *ethhdr;
  894. /* drop packet if it has not necessary minimum size */
  895. if (unlikely(!pskb_may_pull(skb, hdr_size)))
  896. return -1;
  897. ethhdr = (struct ethhdr *)skb_mac_header(skb);
  898. /* packet with unicast indication but broadcast recipient */
  899. if (is_broadcast_ether_addr(ethhdr->h_dest))
  900. return -1;
  901. /* packet with broadcast sender address */
  902. if (is_broadcast_ether_addr(ethhdr->h_source))
  903. return -1;
  904. /* not for me */
  905. if (!is_my_mac(ethhdr->h_dest))
  906. return -1;
  907. return 0;
  908. }
  909. int route_unicast_packet(struct sk_buff *skb, struct batman_if *recv_if,
  910. int hdr_size)
  911. {
  912. struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
  913. struct orig_node *orig_node;
  914. struct neigh_node *router;
  915. struct batman_if *batman_if;
  916. uint8_t dstaddr[ETH_ALEN];
  917. struct unicast_packet *unicast_packet;
  918. struct ethhdr *ethhdr = (struct ethhdr *)skb_mac_header(skb);
  919. int ret;
  920. struct sk_buff *new_skb;
  921. unicast_packet = (struct unicast_packet *)skb->data;
  922. /* TTL exceeded */
  923. if (unicast_packet->ttl < 2) {
  924. pr_debug("Warning - can't forward unicast packet from %pM to "
  925. "%pM: ttl exceeded\n", ethhdr->h_source,
  926. unicast_packet->dest);
  927. return NET_RX_DROP;
  928. }
  929. /* get routing information */
  930. spin_lock_bh(&bat_priv->orig_hash_lock);
  931. orig_node = ((struct orig_node *)
  932. hash_find(bat_priv->orig_hash, compare_orig, choose_orig,
  933. unicast_packet->dest));
  934. router = find_router(bat_priv, orig_node, recv_if);
  935. if (!router) {
  936. spin_unlock_bh(&bat_priv->orig_hash_lock);
  937. return NET_RX_DROP;
  938. }
  939. /* don't lock while sending the packets ... we therefore
  940. * copy the required data before sending */
  941. batman_if = router->if_incoming;
  942. memcpy(dstaddr, router->addr, ETH_ALEN);
  943. spin_unlock_bh(&bat_priv->orig_hash_lock);
  944. /* create a copy of the skb, if needed, to modify it. */
  945. if (skb_cow(skb, sizeof(struct ethhdr)) < 0)
  946. return NET_RX_DROP;
  947. unicast_packet = (struct unicast_packet *)skb->data;
  948. if (unicast_packet->packet_type == BAT_UNICAST &&
  949. atomic_read(&bat_priv->fragmentation) &&
  950. skb->len > batman_if->net_dev->mtu)
  951. return frag_send_skb(skb, bat_priv, batman_if,
  952. dstaddr);
  953. if (unicast_packet->packet_type == BAT_UNICAST_FRAG &&
  954. frag_can_reassemble(skb, batman_if->net_dev->mtu)) {
  955. ret = frag_reassemble_skb(skb, bat_priv, &new_skb);
  956. if (ret == NET_RX_DROP)
  957. return NET_RX_DROP;
  958. /* packet was buffered for late merge */
  959. if (!new_skb)
  960. return NET_RX_SUCCESS;
  961. skb = new_skb;
  962. unicast_packet = (struct unicast_packet *)skb->data;
  963. }
  964. /* decrement ttl */
  965. unicast_packet->ttl--;
  966. /* route it */
  967. send_skb_packet(skb, batman_if, dstaddr);
  968. return NET_RX_SUCCESS;
  969. }
  970. int recv_unicast_packet(struct sk_buff *skb, struct batman_if *recv_if)
  971. {
  972. struct unicast_packet *unicast_packet;
  973. int hdr_size = sizeof(struct unicast_packet);
  974. if (check_unicast_packet(skb, hdr_size) < 0)
  975. return NET_RX_DROP;
  976. unicast_packet = (struct unicast_packet *)skb->data;
  977. /* packet for me */
  978. if (is_my_mac(unicast_packet->dest)) {
  979. interface_rx(recv_if->soft_iface, skb, recv_if, hdr_size);
  980. return NET_RX_SUCCESS;
  981. }
  982. return route_unicast_packet(skb, recv_if, hdr_size);
  983. }
  984. int recv_ucast_frag_packet(struct sk_buff *skb, struct batman_if *recv_if)
  985. {
  986. struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
  987. struct unicast_frag_packet *unicast_packet;
  988. int hdr_size = sizeof(struct unicast_frag_packet);
  989. struct sk_buff *new_skb = NULL;
  990. int ret;
  991. if (check_unicast_packet(skb, hdr_size) < 0)
  992. return NET_RX_DROP;
  993. unicast_packet = (struct unicast_frag_packet *)skb->data;
  994. /* packet for me */
  995. if (is_my_mac(unicast_packet->dest)) {
  996. ret = frag_reassemble_skb(skb, bat_priv, &new_skb);
  997. if (ret == NET_RX_DROP)
  998. return NET_RX_DROP;
  999. /* packet was buffered for late merge */
  1000. if (!new_skb)
  1001. return NET_RX_SUCCESS;
  1002. interface_rx(recv_if->soft_iface, new_skb, recv_if,
  1003. sizeof(struct unicast_packet));
  1004. return NET_RX_SUCCESS;
  1005. }
  1006. return route_unicast_packet(skb, recv_if, hdr_size);
  1007. }
  1008. int recv_bcast_packet(struct sk_buff *skb, struct batman_if *recv_if)
  1009. {
  1010. struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
  1011. struct orig_node *orig_node;
  1012. struct bcast_packet *bcast_packet;
  1013. struct ethhdr *ethhdr;
  1014. int hdr_size = sizeof(struct bcast_packet);
  1015. int32_t seq_diff;
  1016. /* drop packet if it has not necessary minimum size */
  1017. if (unlikely(!pskb_may_pull(skb, hdr_size)))
  1018. return NET_RX_DROP;
  1019. ethhdr = (struct ethhdr *)skb_mac_header(skb);
  1020. /* packet with broadcast indication but unicast recipient */
  1021. if (!is_broadcast_ether_addr(ethhdr->h_dest))
  1022. return NET_RX_DROP;
  1023. /* packet with broadcast sender address */
  1024. if (is_broadcast_ether_addr(ethhdr->h_source))
  1025. return NET_RX_DROP;
  1026. /* ignore broadcasts sent by myself */
  1027. if (is_my_mac(ethhdr->h_source))
  1028. return NET_RX_DROP;
  1029. bcast_packet = (struct bcast_packet *)skb->data;
  1030. /* ignore broadcasts originated by myself */
  1031. if (is_my_mac(bcast_packet->orig))
  1032. return NET_RX_DROP;
  1033. if (bcast_packet->ttl < 2)
  1034. return NET_RX_DROP;
  1035. spin_lock_bh(&bat_priv->orig_hash_lock);
  1036. orig_node = ((struct orig_node *)
  1037. hash_find(bat_priv->orig_hash, compare_orig, choose_orig,
  1038. bcast_packet->orig));
  1039. if (!orig_node) {
  1040. spin_unlock_bh(&bat_priv->orig_hash_lock);
  1041. return NET_RX_DROP;
  1042. }
  1043. /* check whether the packet is a duplicate */
  1044. if (get_bit_status(orig_node->bcast_bits,
  1045. orig_node->last_bcast_seqno,
  1046. ntohl(bcast_packet->seqno))) {
  1047. spin_unlock_bh(&bat_priv->orig_hash_lock);
  1048. return NET_RX_DROP;
  1049. }
  1050. seq_diff = ntohl(bcast_packet->seqno) - orig_node->last_bcast_seqno;
  1051. /* check whether the packet is old and the host just restarted. */
  1052. if (window_protected(bat_priv, seq_diff,
  1053. &orig_node->bcast_seqno_reset)) {
  1054. spin_unlock_bh(&bat_priv->orig_hash_lock);
  1055. return NET_RX_DROP;
  1056. }
  1057. /* mark broadcast in flood history, update window position
  1058. * if required. */
  1059. if (bit_get_packet(bat_priv, orig_node->bcast_bits, seq_diff, 1))
  1060. orig_node->last_bcast_seqno = ntohl(bcast_packet->seqno);
  1061. spin_unlock_bh(&bat_priv->orig_hash_lock);
  1062. /* rebroadcast packet */
  1063. add_bcast_packet_to_list(bat_priv, skb);
  1064. /* broadcast for me */
  1065. interface_rx(recv_if->soft_iface, skb, recv_if, hdr_size);
  1066. return NET_RX_SUCCESS;
  1067. }
  1068. int recv_vis_packet(struct sk_buff *skb, struct batman_if *recv_if)
  1069. {
  1070. struct vis_packet *vis_packet;
  1071. struct ethhdr *ethhdr;
  1072. struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
  1073. int hdr_size = sizeof(struct vis_packet);
  1074. /* keep skb linear */
  1075. if (skb_linearize(skb) < 0)
  1076. return NET_RX_DROP;
  1077. if (unlikely(!pskb_may_pull(skb, hdr_size)))
  1078. return NET_RX_DROP;
  1079. vis_packet = (struct vis_packet *)skb->data;
  1080. ethhdr = (struct ethhdr *)skb_mac_header(skb);
  1081. /* not for me */
  1082. if (!is_my_mac(ethhdr->h_dest))
  1083. return NET_RX_DROP;
  1084. /* ignore own packets */
  1085. if (is_my_mac(vis_packet->vis_orig))
  1086. return NET_RX_DROP;
  1087. if (is_my_mac(vis_packet->sender_orig))
  1088. return NET_RX_DROP;
  1089. switch (vis_packet->vis_type) {
  1090. case VIS_TYPE_SERVER_SYNC:
  1091. receive_server_sync_packet(bat_priv, vis_packet,
  1092. skb_headlen(skb));
  1093. break;
  1094. case VIS_TYPE_CLIENT_UPDATE:
  1095. receive_client_update_packet(bat_priv, vis_packet,
  1096. skb_headlen(skb));
  1097. break;
  1098. default: /* ignore unknown packet */
  1099. break;
  1100. }
  1101. /* We take a copy of the data in the packet, so we should
  1102. always free the skbuf. */
  1103. return NET_RX_DROP;
  1104. }