routing.c 40 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462
  1. /*
  2. * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
  3. *
  4. * Marek Lindner, Simon Wunderlich
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of version 2 of the GNU General Public
  8. * License as published by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope that it will be useful, but
  11. * WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  13. * General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program; if not, write to the Free Software
  17. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
  18. * 02110-1301, USA
  19. *
  20. */
  21. #include "main.h"
  22. #include "routing.h"
  23. #include "send.h"
  24. #include "hash.h"
  25. #include "soft-interface.h"
  26. #include "hard-interface.h"
  27. #include "icmp_socket.h"
  28. #include "translation-table.h"
  29. #include "originator.h"
  30. #include "ring_buffer.h"
  31. #include "vis.h"
  32. #include "aggregation.h"
  33. #include "gateway_common.h"
  34. #include "gateway_client.h"
  35. #include "unicast.h"
  36. void slide_own_bcast_window(struct batman_if *batman_if)
  37. {
  38. struct bat_priv *bat_priv = netdev_priv(batman_if->soft_iface);
  39. struct hashtable_t *hash = bat_priv->orig_hash;
  40. struct hlist_node *walk;
  41. struct hlist_head *head;
  42. struct element_t *bucket;
  43. struct orig_node *orig_node;
  44. unsigned long *word;
  45. int i;
  46. size_t word_index;
  47. spin_lock_bh(&bat_priv->orig_hash_lock);
  48. for (i = 0; i < hash->size; i++) {
  49. head = &hash->table[i];
  50. rcu_read_lock();
  51. hlist_for_each_entry_rcu(bucket, walk, head, hlist) {
  52. orig_node = bucket->data;
  53. word_index = batman_if->if_num * NUM_WORDS;
  54. word = &(orig_node->bcast_own[word_index]);
  55. bit_get_packet(bat_priv, word, 1, 0);
  56. orig_node->bcast_own_sum[batman_if->if_num] =
  57. bit_packet_count(word);
  58. }
  59. rcu_read_unlock();
  60. }
  61. spin_unlock_bh(&bat_priv->orig_hash_lock);
  62. }
  63. static void update_HNA(struct bat_priv *bat_priv, struct orig_node *orig_node,
  64. unsigned char *hna_buff, int hna_buff_len)
  65. {
  66. if ((hna_buff_len != orig_node->hna_buff_len) ||
  67. ((hna_buff_len > 0) &&
  68. (orig_node->hna_buff_len > 0) &&
  69. (memcmp(orig_node->hna_buff, hna_buff, hna_buff_len) != 0))) {
  70. if (orig_node->hna_buff_len > 0)
  71. hna_global_del_orig(bat_priv, orig_node,
  72. "originator changed hna");
  73. if ((hna_buff_len > 0) && (hna_buff))
  74. hna_global_add_orig(bat_priv, orig_node,
  75. hna_buff, hna_buff_len);
  76. }
  77. }
  78. static void update_route(struct bat_priv *bat_priv,
  79. struct orig_node *orig_node,
  80. struct neigh_node *neigh_node,
  81. unsigned char *hna_buff, int hna_buff_len)
  82. {
  83. struct neigh_node *neigh_node_tmp;
  84. /* route deleted */
  85. if ((orig_node->router) && (!neigh_node)) {
  86. bat_dbg(DBG_ROUTES, bat_priv, "Deleting route towards: %pM\n",
  87. orig_node->orig);
  88. hna_global_del_orig(bat_priv, orig_node,
  89. "originator timed out");
  90. /* route added */
  91. } else if ((!orig_node->router) && (neigh_node)) {
  92. bat_dbg(DBG_ROUTES, bat_priv,
  93. "Adding route towards: %pM (via %pM)\n",
  94. orig_node->orig, neigh_node->addr);
  95. hna_global_add_orig(bat_priv, orig_node,
  96. hna_buff, hna_buff_len);
  97. /* route changed */
  98. } else {
  99. bat_dbg(DBG_ROUTES, bat_priv,
  100. "Changing route towards: %pM "
  101. "(now via %pM - was via %pM)\n",
  102. orig_node->orig, neigh_node->addr,
  103. orig_node->router->addr);
  104. }
  105. if (neigh_node)
  106. kref_get(&neigh_node->refcount);
  107. neigh_node_tmp = orig_node->router;
  108. orig_node->router = neigh_node;
  109. if (neigh_node_tmp)
  110. kref_put(&neigh_node_tmp->refcount, neigh_node_free_ref);
  111. }
  112. void update_routes(struct bat_priv *bat_priv, struct orig_node *orig_node,
  113. struct neigh_node *neigh_node, unsigned char *hna_buff,
  114. int hna_buff_len)
  115. {
  116. if (!orig_node)
  117. return;
  118. if (orig_node->router != neigh_node)
  119. update_route(bat_priv, orig_node, neigh_node,
  120. hna_buff, hna_buff_len);
  121. /* may be just HNA changed */
  122. else
  123. update_HNA(bat_priv, orig_node, hna_buff, hna_buff_len);
  124. }
  125. static int is_bidirectional_neigh(struct orig_node *orig_node,
  126. struct orig_node *orig_neigh_node,
  127. struct batman_packet *batman_packet,
  128. struct batman_if *if_incoming)
  129. {
  130. struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
  131. struct neigh_node *neigh_node = NULL, *tmp_neigh_node = NULL;
  132. struct hlist_node *node;
  133. unsigned char total_count;
  134. int ret = 0;
  135. if (orig_node == orig_neigh_node) {
  136. rcu_read_lock();
  137. hlist_for_each_entry_rcu(tmp_neigh_node, node,
  138. &orig_node->neigh_list, list) {
  139. if (compare_orig(tmp_neigh_node->addr,
  140. orig_neigh_node->orig) &&
  141. (tmp_neigh_node->if_incoming == if_incoming))
  142. neigh_node = tmp_neigh_node;
  143. }
  144. if (!neigh_node)
  145. neigh_node = create_neighbor(orig_node,
  146. orig_neigh_node,
  147. orig_neigh_node->orig,
  148. if_incoming);
  149. /* create_neighbor failed, return 0 */
  150. if (!neigh_node)
  151. goto unlock;
  152. kref_get(&neigh_node->refcount);
  153. rcu_read_unlock();
  154. neigh_node->last_valid = jiffies;
  155. } else {
  156. /* find packet count of corresponding one hop neighbor */
  157. rcu_read_lock();
  158. hlist_for_each_entry_rcu(tmp_neigh_node, node,
  159. &orig_neigh_node->neigh_list, list) {
  160. if (compare_orig(tmp_neigh_node->addr,
  161. orig_neigh_node->orig) &&
  162. (tmp_neigh_node->if_incoming == if_incoming))
  163. neigh_node = tmp_neigh_node;
  164. }
  165. if (!neigh_node)
  166. neigh_node = create_neighbor(orig_neigh_node,
  167. orig_neigh_node,
  168. orig_neigh_node->orig,
  169. if_incoming);
  170. /* create_neighbor failed, return 0 */
  171. if (!neigh_node)
  172. goto unlock;
  173. kref_get(&neigh_node->refcount);
  174. rcu_read_unlock();
  175. }
  176. orig_node->last_valid = jiffies;
  177. /* pay attention to not get a value bigger than 100 % */
  178. total_count = (orig_neigh_node->bcast_own_sum[if_incoming->if_num] >
  179. neigh_node->real_packet_count ?
  180. neigh_node->real_packet_count :
  181. orig_neigh_node->bcast_own_sum[if_incoming->if_num]);
  182. /* if we have too few packets (too less data) we set tq_own to zero */
  183. /* if we receive too few packets it is not considered bidirectional */
  184. if ((total_count < TQ_LOCAL_BIDRECT_SEND_MINIMUM) ||
  185. (neigh_node->real_packet_count < TQ_LOCAL_BIDRECT_RECV_MINIMUM))
  186. orig_neigh_node->tq_own = 0;
  187. else
  188. /* neigh_node->real_packet_count is never zero as we
  189. * only purge old information when getting new
  190. * information */
  191. orig_neigh_node->tq_own = (TQ_MAX_VALUE * total_count) /
  192. neigh_node->real_packet_count;
  193. /*
  194. * 1 - ((1-x) ** 3), normalized to TQ_MAX_VALUE this does
  195. * affect the nearly-symmetric links only a little, but
  196. * punishes asymmetric links more. This will give a value
  197. * between 0 and TQ_MAX_VALUE
  198. */
  199. orig_neigh_node->tq_asym_penalty =
  200. TQ_MAX_VALUE -
  201. (TQ_MAX_VALUE *
  202. (TQ_LOCAL_WINDOW_SIZE - neigh_node->real_packet_count) *
  203. (TQ_LOCAL_WINDOW_SIZE - neigh_node->real_packet_count) *
  204. (TQ_LOCAL_WINDOW_SIZE - neigh_node->real_packet_count)) /
  205. (TQ_LOCAL_WINDOW_SIZE *
  206. TQ_LOCAL_WINDOW_SIZE *
  207. TQ_LOCAL_WINDOW_SIZE);
  208. batman_packet->tq = ((batman_packet->tq *
  209. orig_neigh_node->tq_own *
  210. orig_neigh_node->tq_asym_penalty) /
  211. (TQ_MAX_VALUE * TQ_MAX_VALUE));
  212. bat_dbg(DBG_BATMAN, bat_priv,
  213. "bidirectional: "
  214. "orig = %-15pM neigh = %-15pM => own_bcast = %2i, "
  215. "real recv = %2i, local tq: %3i, asym_penalty: %3i, "
  216. "total tq: %3i\n",
  217. orig_node->orig, orig_neigh_node->orig, total_count,
  218. neigh_node->real_packet_count, orig_neigh_node->tq_own,
  219. orig_neigh_node->tq_asym_penalty, batman_packet->tq);
  220. /* if link has the minimum required transmission quality
  221. * consider it bidirectional */
  222. if (batman_packet->tq >= TQ_TOTAL_BIDRECT_LIMIT)
  223. ret = 1;
  224. goto out;
  225. unlock:
  226. rcu_read_unlock();
  227. out:
  228. if (neigh_node)
  229. kref_put(&neigh_node->refcount, neigh_node_free_ref);
  230. return ret;
  231. }
  232. static void update_orig(struct bat_priv *bat_priv,
  233. struct orig_node *orig_node,
  234. struct ethhdr *ethhdr,
  235. struct batman_packet *batman_packet,
  236. struct batman_if *if_incoming,
  237. unsigned char *hna_buff, int hna_buff_len,
  238. char is_duplicate)
  239. {
  240. struct neigh_node *neigh_node = NULL, *tmp_neigh_node = NULL;
  241. struct hlist_node *node;
  242. int tmp_hna_buff_len;
  243. bat_dbg(DBG_BATMAN, bat_priv, "update_originator(): "
  244. "Searching and updating originator entry of received packet\n");
  245. rcu_read_lock();
  246. hlist_for_each_entry_rcu(tmp_neigh_node, node,
  247. &orig_node->neigh_list, list) {
  248. if (compare_orig(tmp_neigh_node->addr, ethhdr->h_source) &&
  249. (tmp_neigh_node->if_incoming == if_incoming)) {
  250. neigh_node = tmp_neigh_node;
  251. continue;
  252. }
  253. if (is_duplicate)
  254. continue;
  255. ring_buffer_set(tmp_neigh_node->tq_recv,
  256. &tmp_neigh_node->tq_index, 0);
  257. tmp_neigh_node->tq_avg =
  258. ring_buffer_avg(tmp_neigh_node->tq_recv);
  259. }
  260. if (!neigh_node) {
  261. struct orig_node *orig_tmp;
  262. orig_tmp = get_orig_node(bat_priv, ethhdr->h_source);
  263. if (!orig_tmp)
  264. goto unlock;
  265. neigh_node = create_neighbor(orig_node, orig_tmp,
  266. ethhdr->h_source, if_incoming);
  267. kref_put(&orig_tmp->refcount, orig_node_free_ref);
  268. if (!neigh_node)
  269. goto unlock;
  270. } else
  271. bat_dbg(DBG_BATMAN, bat_priv,
  272. "Updating existing last-hop neighbor of originator\n");
  273. kref_get(&neigh_node->refcount);
  274. rcu_read_unlock();
  275. orig_node->flags = batman_packet->flags;
  276. neigh_node->last_valid = jiffies;
  277. ring_buffer_set(neigh_node->tq_recv,
  278. &neigh_node->tq_index,
  279. batman_packet->tq);
  280. neigh_node->tq_avg = ring_buffer_avg(neigh_node->tq_recv);
  281. if (!is_duplicate) {
  282. orig_node->last_ttl = batman_packet->ttl;
  283. neigh_node->last_ttl = batman_packet->ttl;
  284. }
  285. tmp_hna_buff_len = (hna_buff_len > batman_packet->num_hna * ETH_ALEN ?
  286. batman_packet->num_hna * ETH_ALEN : hna_buff_len);
  287. /* if this neighbor already is our next hop there is nothing
  288. * to change */
  289. if (orig_node->router == neigh_node)
  290. goto update_hna;
  291. /* if this neighbor does not offer a better TQ we won't consider it */
  292. if ((orig_node->router) &&
  293. (orig_node->router->tq_avg > neigh_node->tq_avg))
  294. goto update_hna;
  295. /* if the TQ is the same and the link not more symetric we
  296. * won't consider it either */
  297. if ((orig_node->router) &&
  298. ((neigh_node->tq_avg == orig_node->router->tq_avg) &&
  299. (orig_node->router->orig_node->bcast_own_sum[if_incoming->if_num]
  300. >= neigh_node->orig_node->bcast_own_sum[if_incoming->if_num])))
  301. goto update_hna;
  302. update_routes(bat_priv, orig_node, neigh_node,
  303. hna_buff, tmp_hna_buff_len);
  304. goto update_gw;
  305. update_hna:
  306. update_routes(bat_priv, orig_node, orig_node->router,
  307. hna_buff, tmp_hna_buff_len);
  308. update_gw:
  309. if (orig_node->gw_flags != batman_packet->gw_flags)
  310. gw_node_update(bat_priv, orig_node, batman_packet->gw_flags);
  311. orig_node->gw_flags = batman_packet->gw_flags;
  312. /* restart gateway selection if fast or late switching was enabled */
  313. if ((orig_node->gw_flags) &&
  314. (atomic_read(&bat_priv->gw_mode) == GW_MODE_CLIENT) &&
  315. (atomic_read(&bat_priv->gw_sel_class) > 2))
  316. gw_check_election(bat_priv, orig_node);
  317. goto out;
  318. unlock:
  319. rcu_read_unlock();
  320. out:
  321. if (neigh_node)
  322. kref_put(&neigh_node->refcount, neigh_node_free_ref);
  323. }
  324. /* checks whether the host restarted and is in the protection time.
  325. * returns:
  326. * 0 if the packet is to be accepted
  327. * 1 if the packet is to be ignored.
  328. */
  329. static int window_protected(struct bat_priv *bat_priv,
  330. int32_t seq_num_diff,
  331. unsigned long *last_reset)
  332. {
  333. if ((seq_num_diff <= -TQ_LOCAL_WINDOW_SIZE)
  334. || (seq_num_diff >= EXPECTED_SEQNO_RANGE)) {
  335. if (time_after(jiffies, *last_reset +
  336. msecs_to_jiffies(RESET_PROTECTION_MS))) {
  337. *last_reset = jiffies;
  338. bat_dbg(DBG_BATMAN, bat_priv,
  339. "old packet received, start protection\n");
  340. return 0;
  341. } else
  342. return 1;
  343. }
  344. return 0;
  345. }
  346. /* processes a batman packet for all interfaces, adjusts the sequence number and
  347. * finds out whether it is a duplicate.
  348. * returns:
  349. * 1 the packet is a duplicate
  350. * 0 the packet has not yet been received
  351. * -1 the packet is old and has been received while the seqno window
  352. * was protected. Caller should drop it.
  353. */
  354. static char count_real_packets(struct ethhdr *ethhdr,
  355. struct batman_packet *batman_packet,
  356. struct batman_if *if_incoming)
  357. {
  358. struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
  359. struct orig_node *orig_node;
  360. struct neigh_node *tmp_neigh_node;
  361. struct hlist_node *node;
  362. char is_duplicate = 0;
  363. int32_t seq_diff;
  364. int need_update = 0;
  365. int set_mark;
  366. orig_node = get_orig_node(bat_priv, batman_packet->orig);
  367. if (!orig_node)
  368. return 0;
  369. seq_diff = batman_packet->seqno - orig_node->last_real_seqno;
  370. /* signalize caller that the packet is to be dropped. */
  371. if (window_protected(bat_priv, seq_diff,
  372. &orig_node->batman_seqno_reset))
  373. goto err;
  374. rcu_read_lock();
  375. hlist_for_each_entry_rcu(tmp_neigh_node, node,
  376. &orig_node->neigh_list, list) {
  377. is_duplicate |= get_bit_status(tmp_neigh_node->real_bits,
  378. orig_node->last_real_seqno,
  379. batman_packet->seqno);
  380. if (compare_orig(tmp_neigh_node->addr, ethhdr->h_source) &&
  381. (tmp_neigh_node->if_incoming == if_incoming))
  382. set_mark = 1;
  383. else
  384. set_mark = 0;
  385. /* if the window moved, set the update flag. */
  386. need_update |= bit_get_packet(bat_priv,
  387. tmp_neigh_node->real_bits,
  388. seq_diff, set_mark);
  389. tmp_neigh_node->real_packet_count =
  390. bit_packet_count(tmp_neigh_node->real_bits);
  391. }
  392. rcu_read_unlock();
  393. if (need_update) {
  394. bat_dbg(DBG_BATMAN, bat_priv,
  395. "updating last_seqno: old %d, new %d\n",
  396. orig_node->last_real_seqno, batman_packet->seqno);
  397. orig_node->last_real_seqno = batman_packet->seqno;
  398. }
  399. kref_put(&orig_node->refcount, orig_node_free_ref);
  400. return is_duplicate;
  401. err:
  402. kref_put(&orig_node->refcount, orig_node_free_ref);
  403. return -1;
  404. }
  405. /* copy primary address for bonding */
  406. static void mark_bonding_address(struct orig_node *orig_node,
  407. struct orig_node *orig_neigh_node,
  408. struct batman_packet *batman_packet)
  409. {
  410. if (batman_packet->flags & PRIMARIES_FIRST_HOP)
  411. memcpy(orig_neigh_node->primary_addr,
  412. orig_node->orig, ETH_ALEN);
  413. return;
  414. }
  415. /* mark possible bond.candidates in the neighbor list */
  416. void update_bonding_candidates(struct orig_node *orig_node)
  417. {
  418. int candidates;
  419. int interference_candidate;
  420. int best_tq;
  421. struct hlist_node *node, *node2;
  422. struct neigh_node *tmp_neigh_node, *tmp_neigh_node2;
  423. struct neigh_node *first_candidate, *last_candidate;
  424. /* update the candidates for this originator */
  425. if (!orig_node->router) {
  426. orig_node->bond.candidates = 0;
  427. return;
  428. }
  429. best_tq = orig_node->router->tq_avg;
  430. /* update bond.candidates */
  431. candidates = 0;
  432. /* mark other nodes which also received "PRIMARIES FIRST HOP" packets
  433. * as "bonding partner" */
  434. /* first, zero the list */
  435. rcu_read_lock();
  436. hlist_for_each_entry_rcu(tmp_neigh_node, node,
  437. &orig_node->neigh_list, list) {
  438. tmp_neigh_node->next_bond_candidate = NULL;
  439. }
  440. rcu_read_unlock();
  441. first_candidate = NULL;
  442. last_candidate = NULL;
  443. rcu_read_lock();
  444. hlist_for_each_entry_rcu(tmp_neigh_node, node,
  445. &orig_node->neigh_list, list) {
  446. /* only consider if it has the same primary address ... */
  447. if (memcmp(orig_node->orig,
  448. tmp_neigh_node->orig_node->primary_addr,
  449. ETH_ALEN) != 0)
  450. continue;
  451. /* ... and is good enough to be considered */
  452. if (tmp_neigh_node->tq_avg < best_tq - BONDING_TQ_THRESHOLD)
  453. continue;
  454. /* check if we have another candidate with the same
  455. * mac address or interface. If we do, we won't
  456. * select this candidate because of possible interference. */
  457. interference_candidate = 0;
  458. hlist_for_each_entry_rcu(tmp_neigh_node2, node2,
  459. &orig_node->neigh_list, list) {
  460. if (tmp_neigh_node2 == tmp_neigh_node)
  461. continue;
  462. /* we only care if the other candidate is even
  463. * considered as candidate. */
  464. if (!tmp_neigh_node2->next_bond_candidate)
  465. continue;
  466. if ((tmp_neigh_node->if_incoming ==
  467. tmp_neigh_node2->if_incoming)
  468. || (memcmp(tmp_neigh_node->addr,
  469. tmp_neigh_node2->addr, ETH_ALEN) == 0)) {
  470. interference_candidate = 1;
  471. break;
  472. }
  473. }
  474. /* don't care further if it is an interference candidate */
  475. if (interference_candidate)
  476. continue;
  477. if (!first_candidate) {
  478. first_candidate = tmp_neigh_node;
  479. tmp_neigh_node->next_bond_candidate = first_candidate;
  480. } else
  481. tmp_neigh_node->next_bond_candidate = last_candidate;
  482. last_candidate = tmp_neigh_node;
  483. candidates++;
  484. }
  485. rcu_read_unlock();
  486. if (candidates > 0) {
  487. first_candidate->next_bond_candidate = last_candidate;
  488. orig_node->bond.selected = first_candidate;
  489. }
  490. orig_node->bond.candidates = candidates;
  491. }
  492. void receive_bat_packet(struct ethhdr *ethhdr,
  493. struct batman_packet *batman_packet,
  494. unsigned char *hna_buff, int hna_buff_len,
  495. struct batman_if *if_incoming)
  496. {
  497. struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
  498. struct batman_if *batman_if;
  499. struct orig_node *orig_neigh_node, *orig_node;
  500. char has_directlink_flag;
  501. char is_my_addr = 0, is_my_orig = 0, is_my_oldorig = 0;
  502. char is_broadcast = 0, is_bidirectional, is_single_hop_neigh;
  503. char is_duplicate;
  504. uint32_t if_incoming_seqno;
  505. /* Silently drop when the batman packet is actually not a
  506. * correct packet.
  507. *
  508. * This might happen if a packet is padded (e.g. Ethernet has a
  509. * minimum frame length of 64 byte) and the aggregation interprets
  510. * it as an additional length.
  511. *
  512. * TODO: A more sane solution would be to have a bit in the
  513. * batman_packet to detect whether the packet is the last
  514. * packet in an aggregation. Here we expect that the padding
  515. * is always zero (or not 0x01)
  516. */
  517. if (batman_packet->packet_type != BAT_PACKET)
  518. return;
  519. /* could be changed by schedule_own_packet() */
  520. if_incoming_seqno = atomic_read(&if_incoming->seqno);
  521. has_directlink_flag = (batman_packet->flags & DIRECTLINK ? 1 : 0);
  522. is_single_hop_neigh = (compare_orig(ethhdr->h_source,
  523. batman_packet->orig) ? 1 : 0);
  524. bat_dbg(DBG_BATMAN, bat_priv,
  525. "Received BATMAN packet via NB: %pM, IF: %s [%pM] "
  526. "(from OG: %pM, via prev OG: %pM, seqno %d, tq %d, "
  527. "TTL %d, V %d, IDF %d)\n",
  528. ethhdr->h_source, if_incoming->net_dev->name,
  529. if_incoming->net_dev->dev_addr, batman_packet->orig,
  530. batman_packet->prev_sender, batman_packet->seqno,
  531. batman_packet->tq, batman_packet->ttl, batman_packet->version,
  532. has_directlink_flag);
  533. rcu_read_lock();
  534. list_for_each_entry_rcu(batman_if, &if_list, list) {
  535. if (batman_if->if_status != IF_ACTIVE)
  536. continue;
  537. if (batman_if->soft_iface != if_incoming->soft_iface)
  538. continue;
  539. if (compare_orig(ethhdr->h_source,
  540. batman_if->net_dev->dev_addr))
  541. is_my_addr = 1;
  542. if (compare_orig(batman_packet->orig,
  543. batman_if->net_dev->dev_addr))
  544. is_my_orig = 1;
  545. if (compare_orig(batman_packet->prev_sender,
  546. batman_if->net_dev->dev_addr))
  547. is_my_oldorig = 1;
  548. if (compare_orig(ethhdr->h_source, broadcast_addr))
  549. is_broadcast = 1;
  550. }
  551. rcu_read_unlock();
  552. if (batman_packet->version != COMPAT_VERSION) {
  553. bat_dbg(DBG_BATMAN, bat_priv,
  554. "Drop packet: incompatible batman version (%i)\n",
  555. batman_packet->version);
  556. return;
  557. }
  558. if (is_my_addr) {
  559. bat_dbg(DBG_BATMAN, bat_priv,
  560. "Drop packet: received my own broadcast (sender: %pM"
  561. ")\n",
  562. ethhdr->h_source);
  563. return;
  564. }
  565. if (is_broadcast) {
  566. bat_dbg(DBG_BATMAN, bat_priv, "Drop packet: "
  567. "ignoring all packets with broadcast source addr (sender: %pM"
  568. ")\n", ethhdr->h_source);
  569. return;
  570. }
  571. if (is_my_orig) {
  572. unsigned long *word;
  573. int offset;
  574. orig_neigh_node = get_orig_node(bat_priv, ethhdr->h_source);
  575. if (!orig_neigh_node)
  576. return;
  577. /* neighbor has to indicate direct link and it has to
  578. * come via the corresponding interface */
  579. /* if received seqno equals last send seqno save new
  580. * seqno for bidirectional check */
  581. if (has_directlink_flag &&
  582. compare_orig(if_incoming->net_dev->dev_addr,
  583. batman_packet->orig) &&
  584. (batman_packet->seqno - if_incoming_seqno + 2 == 0)) {
  585. offset = if_incoming->if_num * NUM_WORDS;
  586. word = &(orig_neigh_node->bcast_own[offset]);
  587. bit_mark(word, 0);
  588. orig_neigh_node->bcast_own_sum[if_incoming->if_num] =
  589. bit_packet_count(word);
  590. }
  591. bat_dbg(DBG_BATMAN, bat_priv, "Drop packet: "
  592. "originator packet from myself (via neighbor)\n");
  593. kref_put(&orig_neigh_node->refcount, orig_node_free_ref);
  594. return;
  595. }
  596. if (is_my_oldorig) {
  597. bat_dbg(DBG_BATMAN, bat_priv,
  598. "Drop packet: ignoring all rebroadcast echos (sender: "
  599. "%pM)\n", ethhdr->h_source);
  600. return;
  601. }
  602. orig_node = get_orig_node(bat_priv, batman_packet->orig);
  603. if (!orig_node)
  604. return;
  605. is_duplicate = count_real_packets(ethhdr, batman_packet, if_incoming);
  606. if (is_duplicate == -1) {
  607. bat_dbg(DBG_BATMAN, bat_priv,
  608. "Drop packet: packet within seqno protection time "
  609. "(sender: %pM)\n", ethhdr->h_source);
  610. goto out;
  611. }
  612. if (batman_packet->tq == 0) {
  613. bat_dbg(DBG_BATMAN, bat_priv,
  614. "Drop packet: originator packet with tq equal 0\n");
  615. goto out;
  616. }
  617. /* avoid temporary routing loops */
  618. if ((orig_node->router) &&
  619. (orig_node->router->orig_node->router) &&
  620. (compare_orig(orig_node->router->addr,
  621. batman_packet->prev_sender)) &&
  622. !(compare_orig(batman_packet->orig, batman_packet->prev_sender)) &&
  623. (compare_orig(orig_node->router->addr,
  624. orig_node->router->orig_node->router->addr))) {
  625. bat_dbg(DBG_BATMAN, bat_priv,
  626. "Drop packet: ignoring all rebroadcast packets that "
  627. "may make me loop (sender: %pM)\n", ethhdr->h_source);
  628. goto out;
  629. }
  630. /* if sender is a direct neighbor the sender mac equals
  631. * originator mac */
  632. orig_neigh_node = (is_single_hop_neigh ?
  633. orig_node :
  634. get_orig_node(bat_priv, ethhdr->h_source));
  635. if (!orig_neigh_node)
  636. goto out_neigh;
  637. /* drop packet if sender is not a direct neighbor and if we
  638. * don't route towards it */
  639. if (!is_single_hop_neigh && (!orig_neigh_node->router)) {
  640. bat_dbg(DBG_BATMAN, bat_priv,
  641. "Drop packet: OGM via unknown neighbor!\n");
  642. goto out_neigh;
  643. }
  644. is_bidirectional = is_bidirectional_neigh(orig_node, orig_neigh_node,
  645. batman_packet, if_incoming);
  646. /* update ranking if it is not a duplicate or has the same
  647. * seqno and similar ttl as the non-duplicate */
  648. if (is_bidirectional &&
  649. (!is_duplicate ||
  650. ((orig_node->last_real_seqno == batman_packet->seqno) &&
  651. (orig_node->last_ttl - 3 <= batman_packet->ttl))))
  652. update_orig(bat_priv, orig_node, ethhdr, batman_packet,
  653. if_incoming, hna_buff, hna_buff_len, is_duplicate);
  654. mark_bonding_address(orig_node, orig_neigh_node, batman_packet);
  655. update_bonding_candidates(orig_node);
  656. /* is single hop (direct) neighbor */
  657. if (is_single_hop_neigh) {
  658. /* mark direct link on incoming interface */
  659. schedule_forward_packet(orig_node, ethhdr, batman_packet,
  660. 1, hna_buff_len, if_incoming);
  661. bat_dbg(DBG_BATMAN, bat_priv, "Forwarding packet: "
  662. "rebroadcast neighbor packet with direct link flag\n");
  663. goto out_neigh;
  664. }
  665. /* multihop originator */
  666. if (!is_bidirectional) {
  667. bat_dbg(DBG_BATMAN, bat_priv,
  668. "Drop packet: not received via bidirectional link\n");
  669. goto out_neigh;
  670. }
  671. if (is_duplicate) {
  672. bat_dbg(DBG_BATMAN, bat_priv,
  673. "Drop packet: duplicate packet received\n");
  674. goto out_neigh;
  675. }
  676. bat_dbg(DBG_BATMAN, bat_priv,
  677. "Forwarding packet: rebroadcast originator packet\n");
  678. schedule_forward_packet(orig_node, ethhdr, batman_packet,
  679. 0, hna_buff_len, if_incoming);
  680. out_neigh:
  681. if (!is_single_hop_neigh)
  682. kref_put(&orig_neigh_node->refcount, orig_node_free_ref);
  683. out:
  684. kref_put(&orig_node->refcount, orig_node_free_ref);
  685. }
  686. int recv_bat_packet(struct sk_buff *skb, struct batman_if *batman_if)
  687. {
  688. struct bat_priv *bat_priv = netdev_priv(batman_if->soft_iface);
  689. struct ethhdr *ethhdr;
  690. /* drop packet if it has not necessary minimum size */
  691. if (unlikely(!pskb_may_pull(skb, sizeof(struct batman_packet))))
  692. return NET_RX_DROP;
  693. ethhdr = (struct ethhdr *)skb_mac_header(skb);
  694. /* packet with broadcast indication but unicast recipient */
  695. if (!is_broadcast_ether_addr(ethhdr->h_dest))
  696. return NET_RX_DROP;
  697. /* packet with broadcast sender address */
  698. if (is_broadcast_ether_addr(ethhdr->h_source))
  699. return NET_RX_DROP;
  700. /* create a copy of the skb, if needed, to modify it. */
  701. if (skb_cow(skb, 0) < 0)
  702. return NET_RX_DROP;
  703. /* keep skb linear */
  704. if (skb_linearize(skb) < 0)
  705. return NET_RX_DROP;
  706. ethhdr = (struct ethhdr *)skb_mac_header(skb);
  707. spin_lock_bh(&bat_priv->orig_hash_lock);
  708. receive_aggr_bat_packet(ethhdr,
  709. skb->data,
  710. skb_headlen(skb),
  711. batman_if);
  712. spin_unlock_bh(&bat_priv->orig_hash_lock);
  713. kfree_skb(skb);
  714. return NET_RX_SUCCESS;
  715. }
  716. static int recv_my_icmp_packet(struct bat_priv *bat_priv,
  717. struct sk_buff *skb, size_t icmp_len)
  718. {
  719. struct orig_node *orig_node;
  720. struct icmp_packet_rr *icmp_packet;
  721. struct batman_if *batman_if;
  722. int ret;
  723. uint8_t dstaddr[ETH_ALEN];
  724. icmp_packet = (struct icmp_packet_rr *)skb->data;
  725. /* add data to device queue */
  726. if (icmp_packet->msg_type != ECHO_REQUEST) {
  727. bat_socket_receive_packet(icmp_packet, icmp_len);
  728. return NET_RX_DROP;
  729. }
  730. if (!bat_priv->primary_if)
  731. return NET_RX_DROP;
  732. /* answer echo request (ping) */
  733. /* get routing information */
  734. spin_lock_bh(&bat_priv->orig_hash_lock);
  735. rcu_read_lock();
  736. orig_node = ((struct orig_node *)hash_find(bat_priv->orig_hash,
  737. compare_orig, choose_orig,
  738. icmp_packet->orig));
  739. rcu_read_unlock();
  740. ret = NET_RX_DROP;
  741. if ((orig_node) && (orig_node->router)) {
  742. /* don't lock while sending the packets ... we therefore
  743. * copy the required data before sending */
  744. batman_if = orig_node->router->if_incoming;
  745. memcpy(dstaddr, orig_node->router->addr, ETH_ALEN);
  746. spin_unlock_bh(&bat_priv->orig_hash_lock);
  747. /* create a copy of the skb, if needed, to modify it. */
  748. if (skb_cow(skb, sizeof(struct ethhdr)) < 0)
  749. return NET_RX_DROP;
  750. icmp_packet = (struct icmp_packet_rr *)skb->data;
  751. memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN);
  752. memcpy(icmp_packet->orig,
  753. bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN);
  754. icmp_packet->msg_type = ECHO_REPLY;
  755. icmp_packet->ttl = TTL;
  756. send_skb_packet(skb, batman_if, dstaddr);
  757. ret = NET_RX_SUCCESS;
  758. } else
  759. spin_unlock_bh(&bat_priv->orig_hash_lock);
  760. return ret;
  761. }
  762. static int recv_icmp_ttl_exceeded(struct bat_priv *bat_priv,
  763. struct sk_buff *skb)
  764. {
  765. struct orig_node *orig_node;
  766. struct icmp_packet *icmp_packet;
  767. struct batman_if *batman_if;
  768. int ret;
  769. uint8_t dstaddr[ETH_ALEN];
  770. icmp_packet = (struct icmp_packet *)skb->data;
  771. /* send TTL exceeded if packet is an echo request (traceroute) */
  772. if (icmp_packet->msg_type != ECHO_REQUEST) {
  773. pr_debug("Warning - can't forward icmp packet from %pM to "
  774. "%pM: ttl exceeded\n", icmp_packet->orig,
  775. icmp_packet->dst);
  776. return NET_RX_DROP;
  777. }
  778. if (!bat_priv->primary_if)
  779. return NET_RX_DROP;
  780. /* get routing information */
  781. spin_lock_bh(&bat_priv->orig_hash_lock);
  782. rcu_read_lock();
  783. orig_node = ((struct orig_node *)
  784. hash_find(bat_priv->orig_hash, compare_orig, choose_orig,
  785. icmp_packet->orig));
  786. rcu_read_unlock();
  787. ret = NET_RX_DROP;
  788. if ((orig_node) && (orig_node->router)) {
  789. /* don't lock while sending the packets ... we therefore
  790. * copy the required data before sending */
  791. batman_if = orig_node->router->if_incoming;
  792. memcpy(dstaddr, orig_node->router->addr, ETH_ALEN);
  793. spin_unlock_bh(&bat_priv->orig_hash_lock);
  794. /* create a copy of the skb, if needed, to modify it. */
  795. if (skb_cow(skb, sizeof(struct ethhdr)) < 0)
  796. return NET_RX_DROP;
  797. icmp_packet = (struct icmp_packet *) skb->data;
  798. memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN);
  799. memcpy(icmp_packet->orig,
  800. bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN);
  801. icmp_packet->msg_type = TTL_EXCEEDED;
  802. icmp_packet->ttl = TTL;
  803. send_skb_packet(skb, batman_if, dstaddr);
  804. ret = NET_RX_SUCCESS;
  805. } else
  806. spin_unlock_bh(&bat_priv->orig_hash_lock);
  807. return ret;
  808. }
  809. int recv_icmp_packet(struct sk_buff *skb, struct batman_if *recv_if)
  810. {
  811. struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
  812. struct icmp_packet_rr *icmp_packet;
  813. struct ethhdr *ethhdr;
  814. struct orig_node *orig_node;
  815. struct batman_if *batman_if;
  816. int hdr_size = sizeof(struct icmp_packet);
  817. int ret;
  818. uint8_t dstaddr[ETH_ALEN];
  819. /**
  820. * we truncate all incoming icmp packets if they don't match our size
  821. */
  822. if (skb->len >= sizeof(struct icmp_packet_rr))
  823. hdr_size = sizeof(struct icmp_packet_rr);
  824. /* drop packet if it has not necessary minimum size */
  825. if (unlikely(!pskb_may_pull(skb, hdr_size)))
  826. return NET_RX_DROP;
  827. ethhdr = (struct ethhdr *)skb_mac_header(skb);
  828. /* packet with unicast indication but broadcast recipient */
  829. if (is_broadcast_ether_addr(ethhdr->h_dest))
  830. return NET_RX_DROP;
  831. /* packet with broadcast sender address */
  832. if (is_broadcast_ether_addr(ethhdr->h_source))
  833. return NET_RX_DROP;
  834. /* not for me */
  835. if (!is_my_mac(ethhdr->h_dest))
  836. return NET_RX_DROP;
  837. icmp_packet = (struct icmp_packet_rr *)skb->data;
  838. /* add record route information if not full */
  839. if ((hdr_size == sizeof(struct icmp_packet_rr)) &&
  840. (icmp_packet->rr_cur < BAT_RR_LEN)) {
  841. memcpy(&(icmp_packet->rr[icmp_packet->rr_cur]),
  842. ethhdr->h_dest, ETH_ALEN);
  843. icmp_packet->rr_cur++;
  844. }
  845. /* packet for me */
  846. if (is_my_mac(icmp_packet->dst))
  847. return recv_my_icmp_packet(bat_priv, skb, hdr_size);
  848. /* TTL exceeded */
  849. if (icmp_packet->ttl < 2)
  850. return recv_icmp_ttl_exceeded(bat_priv, skb);
  851. ret = NET_RX_DROP;
  852. /* get routing information */
  853. spin_lock_bh(&bat_priv->orig_hash_lock);
  854. rcu_read_lock();
  855. orig_node = ((struct orig_node *)
  856. hash_find(bat_priv->orig_hash, compare_orig, choose_orig,
  857. icmp_packet->dst));
  858. rcu_read_unlock();
  859. if ((orig_node) && (orig_node->router)) {
  860. /* don't lock while sending the packets ... we therefore
  861. * copy the required data before sending */
  862. batman_if = orig_node->router->if_incoming;
  863. memcpy(dstaddr, orig_node->router->addr, ETH_ALEN);
  864. spin_unlock_bh(&bat_priv->orig_hash_lock);
  865. /* create a copy of the skb, if needed, to modify it. */
  866. if (skb_cow(skb, sizeof(struct ethhdr)) < 0)
  867. return NET_RX_DROP;
  868. icmp_packet = (struct icmp_packet_rr *)skb->data;
  869. /* decrement ttl */
  870. icmp_packet->ttl--;
  871. /* route it */
  872. send_skb_packet(skb, batman_if, dstaddr);
  873. ret = NET_RX_SUCCESS;
  874. } else
  875. spin_unlock_bh(&bat_priv->orig_hash_lock);
  876. return ret;
  877. }
  878. /* find a suitable router for this originator, and use
  879. * bonding if possible. */
  880. struct neigh_node *find_router(struct bat_priv *bat_priv,
  881. struct orig_node *orig_node,
  882. struct batman_if *recv_if)
  883. {
  884. struct orig_node *primary_orig_node;
  885. struct orig_node *router_orig;
  886. struct neigh_node *router, *first_candidate, *best_router;
  887. static uint8_t zero_mac[ETH_ALEN] = {0, 0, 0, 0, 0, 0};
  888. int bonding_enabled;
  889. if (!orig_node)
  890. return NULL;
  891. if (!orig_node->router)
  892. return NULL;
  893. /* without bonding, the first node should
  894. * always choose the default router. */
  895. bonding_enabled = atomic_read(&bat_priv->bonding);
  896. if ((!recv_if) && (!bonding_enabled))
  897. return orig_node->router;
  898. router_orig = orig_node->router->orig_node;
  899. /* if we have something in the primary_addr, we can search
  900. * for a potential bonding candidate. */
  901. if (memcmp(router_orig->primary_addr, zero_mac, ETH_ALEN) == 0)
  902. return orig_node->router;
  903. /* find the orig_node which has the primary interface. might
  904. * even be the same as our router_orig in many cases */
  905. if (memcmp(router_orig->primary_addr,
  906. router_orig->orig, ETH_ALEN) == 0) {
  907. primary_orig_node = router_orig;
  908. } else {
  909. rcu_read_lock();
  910. primary_orig_node = hash_find(bat_priv->orig_hash, compare_orig,
  911. choose_orig,
  912. router_orig->primary_addr);
  913. rcu_read_unlock();
  914. if (!primary_orig_node)
  915. return orig_node->router;
  916. }
  917. /* with less than 2 candidates, we can't do any
  918. * bonding and prefer the original router. */
  919. if (primary_orig_node->bond.candidates < 2)
  920. return orig_node->router;
  921. /* all nodes between should choose a candidate which
  922. * is is not on the interface where the packet came
  923. * in. */
  924. first_candidate = primary_orig_node->bond.selected;
  925. router = first_candidate;
  926. if (bonding_enabled) {
  927. /* in the bonding case, send the packets in a round
  928. * robin fashion over the remaining interfaces. */
  929. do {
  930. /* recv_if == NULL on the first node. */
  931. if (router->if_incoming != recv_if)
  932. break;
  933. router = router->next_bond_candidate;
  934. } while (router != first_candidate);
  935. primary_orig_node->bond.selected = router->next_bond_candidate;
  936. } else {
  937. /* if bonding is disabled, use the best of the
  938. * remaining candidates which are not using
  939. * this interface. */
  940. best_router = first_candidate;
  941. do {
  942. /* recv_if == NULL on the first node. */
  943. if ((router->if_incoming != recv_if) &&
  944. (router->tq_avg > best_router->tq_avg))
  945. best_router = router;
  946. router = router->next_bond_candidate;
  947. } while (router != first_candidate);
  948. router = best_router;
  949. }
  950. return router;
  951. }
  952. static int check_unicast_packet(struct sk_buff *skb, int hdr_size)
  953. {
  954. struct ethhdr *ethhdr;
  955. /* drop packet if it has not necessary minimum size */
  956. if (unlikely(!pskb_may_pull(skb, hdr_size)))
  957. return -1;
  958. ethhdr = (struct ethhdr *)skb_mac_header(skb);
  959. /* packet with unicast indication but broadcast recipient */
  960. if (is_broadcast_ether_addr(ethhdr->h_dest))
  961. return -1;
  962. /* packet with broadcast sender address */
  963. if (is_broadcast_ether_addr(ethhdr->h_source))
  964. return -1;
  965. /* not for me */
  966. if (!is_my_mac(ethhdr->h_dest))
  967. return -1;
  968. return 0;
  969. }
  970. int route_unicast_packet(struct sk_buff *skb, struct batman_if *recv_if,
  971. int hdr_size)
  972. {
  973. struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
  974. struct orig_node *orig_node;
  975. struct neigh_node *router;
  976. struct batman_if *batman_if;
  977. uint8_t dstaddr[ETH_ALEN];
  978. struct unicast_packet *unicast_packet;
  979. struct ethhdr *ethhdr = (struct ethhdr *)skb_mac_header(skb);
  980. int ret;
  981. struct sk_buff *new_skb;
  982. unicast_packet = (struct unicast_packet *)skb->data;
  983. /* TTL exceeded */
  984. if (unicast_packet->ttl < 2) {
  985. pr_debug("Warning - can't forward unicast packet from %pM to "
  986. "%pM: ttl exceeded\n", ethhdr->h_source,
  987. unicast_packet->dest);
  988. return NET_RX_DROP;
  989. }
  990. /* get routing information */
  991. spin_lock_bh(&bat_priv->orig_hash_lock);
  992. rcu_read_lock();
  993. orig_node = ((struct orig_node *)
  994. hash_find(bat_priv->orig_hash, compare_orig, choose_orig,
  995. unicast_packet->dest));
  996. rcu_read_unlock();
  997. router = find_router(bat_priv, orig_node, recv_if);
  998. if (!router) {
  999. spin_unlock_bh(&bat_priv->orig_hash_lock);
  1000. return NET_RX_DROP;
  1001. }
  1002. /* don't lock while sending the packets ... we therefore
  1003. * copy the required data before sending */
  1004. batman_if = router->if_incoming;
  1005. memcpy(dstaddr, router->addr, ETH_ALEN);
  1006. spin_unlock_bh(&bat_priv->orig_hash_lock);
  1007. /* create a copy of the skb, if needed, to modify it. */
  1008. if (skb_cow(skb, sizeof(struct ethhdr)) < 0)
  1009. return NET_RX_DROP;
  1010. unicast_packet = (struct unicast_packet *)skb->data;
  1011. if (unicast_packet->packet_type == BAT_UNICAST &&
  1012. atomic_read(&bat_priv->fragmentation) &&
  1013. skb->len > batman_if->net_dev->mtu)
  1014. return frag_send_skb(skb, bat_priv, batman_if,
  1015. dstaddr);
  1016. if (unicast_packet->packet_type == BAT_UNICAST_FRAG &&
  1017. frag_can_reassemble(skb, batman_if->net_dev->mtu)) {
  1018. ret = frag_reassemble_skb(skb, bat_priv, &new_skb);
  1019. if (ret == NET_RX_DROP)
  1020. return NET_RX_DROP;
  1021. /* packet was buffered for late merge */
  1022. if (!new_skb)
  1023. return NET_RX_SUCCESS;
  1024. skb = new_skb;
  1025. unicast_packet = (struct unicast_packet *)skb->data;
  1026. }
  1027. /* decrement ttl */
  1028. unicast_packet->ttl--;
  1029. /* route it */
  1030. send_skb_packet(skb, batman_if, dstaddr);
  1031. return NET_RX_SUCCESS;
  1032. }
  1033. int recv_unicast_packet(struct sk_buff *skb, struct batman_if *recv_if)
  1034. {
  1035. struct unicast_packet *unicast_packet;
  1036. int hdr_size = sizeof(struct unicast_packet);
  1037. if (check_unicast_packet(skb, hdr_size) < 0)
  1038. return NET_RX_DROP;
  1039. unicast_packet = (struct unicast_packet *)skb->data;
  1040. /* packet for me */
  1041. if (is_my_mac(unicast_packet->dest)) {
  1042. interface_rx(recv_if->soft_iface, skb, recv_if, hdr_size);
  1043. return NET_RX_SUCCESS;
  1044. }
  1045. return route_unicast_packet(skb, recv_if, hdr_size);
  1046. }
  1047. int recv_ucast_frag_packet(struct sk_buff *skb, struct batman_if *recv_if)
  1048. {
  1049. struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
  1050. struct unicast_frag_packet *unicast_packet;
  1051. int hdr_size = sizeof(struct unicast_frag_packet);
  1052. struct sk_buff *new_skb = NULL;
  1053. int ret;
  1054. if (check_unicast_packet(skb, hdr_size) < 0)
  1055. return NET_RX_DROP;
  1056. unicast_packet = (struct unicast_frag_packet *)skb->data;
  1057. /* packet for me */
  1058. if (is_my_mac(unicast_packet->dest)) {
  1059. ret = frag_reassemble_skb(skb, bat_priv, &new_skb);
  1060. if (ret == NET_RX_DROP)
  1061. return NET_RX_DROP;
  1062. /* packet was buffered for late merge */
  1063. if (!new_skb)
  1064. return NET_RX_SUCCESS;
  1065. interface_rx(recv_if->soft_iface, new_skb, recv_if,
  1066. sizeof(struct unicast_packet));
  1067. return NET_RX_SUCCESS;
  1068. }
  1069. return route_unicast_packet(skb, recv_if, hdr_size);
  1070. }
  1071. int recv_bcast_packet(struct sk_buff *skb, struct batman_if *recv_if)
  1072. {
  1073. struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
  1074. struct orig_node *orig_node;
  1075. struct bcast_packet *bcast_packet;
  1076. struct ethhdr *ethhdr;
  1077. int hdr_size = sizeof(struct bcast_packet);
  1078. int32_t seq_diff;
  1079. /* drop packet if it has not necessary minimum size */
  1080. if (unlikely(!pskb_may_pull(skb, hdr_size)))
  1081. return NET_RX_DROP;
  1082. ethhdr = (struct ethhdr *)skb_mac_header(skb);
  1083. /* packet with broadcast indication but unicast recipient */
  1084. if (!is_broadcast_ether_addr(ethhdr->h_dest))
  1085. return NET_RX_DROP;
  1086. /* packet with broadcast sender address */
  1087. if (is_broadcast_ether_addr(ethhdr->h_source))
  1088. return NET_RX_DROP;
  1089. /* ignore broadcasts sent by myself */
  1090. if (is_my_mac(ethhdr->h_source))
  1091. return NET_RX_DROP;
  1092. bcast_packet = (struct bcast_packet *)skb->data;
  1093. /* ignore broadcasts originated by myself */
  1094. if (is_my_mac(bcast_packet->orig))
  1095. return NET_RX_DROP;
  1096. if (bcast_packet->ttl < 2)
  1097. return NET_RX_DROP;
  1098. spin_lock_bh(&bat_priv->orig_hash_lock);
  1099. rcu_read_lock();
  1100. orig_node = ((struct orig_node *)
  1101. hash_find(bat_priv->orig_hash, compare_orig, choose_orig,
  1102. bcast_packet->orig));
  1103. rcu_read_unlock();
  1104. if (!orig_node) {
  1105. spin_unlock_bh(&bat_priv->orig_hash_lock);
  1106. return NET_RX_DROP;
  1107. }
  1108. /* check whether the packet is a duplicate */
  1109. if (get_bit_status(orig_node->bcast_bits,
  1110. orig_node->last_bcast_seqno,
  1111. ntohl(bcast_packet->seqno))) {
  1112. spin_unlock_bh(&bat_priv->orig_hash_lock);
  1113. return NET_RX_DROP;
  1114. }
  1115. seq_diff = ntohl(bcast_packet->seqno) - orig_node->last_bcast_seqno;
  1116. /* check whether the packet is old and the host just restarted. */
  1117. if (window_protected(bat_priv, seq_diff,
  1118. &orig_node->bcast_seqno_reset)) {
  1119. spin_unlock_bh(&bat_priv->orig_hash_lock);
  1120. return NET_RX_DROP;
  1121. }
  1122. /* mark broadcast in flood history, update window position
  1123. * if required. */
  1124. if (bit_get_packet(bat_priv, orig_node->bcast_bits, seq_diff, 1))
  1125. orig_node->last_bcast_seqno = ntohl(bcast_packet->seqno);
  1126. spin_unlock_bh(&bat_priv->orig_hash_lock);
  1127. /* rebroadcast packet */
  1128. add_bcast_packet_to_list(bat_priv, skb);
  1129. /* broadcast for me */
  1130. interface_rx(recv_if->soft_iface, skb, recv_if, hdr_size);
  1131. return NET_RX_SUCCESS;
  1132. }
  1133. int recv_vis_packet(struct sk_buff *skb, struct batman_if *recv_if)
  1134. {
  1135. struct vis_packet *vis_packet;
  1136. struct ethhdr *ethhdr;
  1137. struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
  1138. int hdr_size = sizeof(struct vis_packet);
  1139. /* keep skb linear */
  1140. if (skb_linearize(skb) < 0)
  1141. return NET_RX_DROP;
  1142. if (unlikely(!pskb_may_pull(skb, hdr_size)))
  1143. return NET_RX_DROP;
  1144. vis_packet = (struct vis_packet *)skb->data;
  1145. ethhdr = (struct ethhdr *)skb_mac_header(skb);
  1146. /* not for me */
  1147. if (!is_my_mac(ethhdr->h_dest))
  1148. return NET_RX_DROP;
  1149. /* ignore own packets */
  1150. if (is_my_mac(vis_packet->vis_orig))
  1151. return NET_RX_DROP;
  1152. if (is_my_mac(vis_packet->sender_orig))
  1153. return NET_RX_DROP;
  1154. switch (vis_packet->vis_type) {
  1155. case VIS_TYPE_SERVER_SYNC:
  1156. receive_server_sync_packet(bat_priv, vis_packet,
  1157. skb_headlen(skb));
  1158. break;
  1159. case VIS_TYPE_CLIENT_UPDATE:
  1160. receive_client_update_packet(bat_priv, vis_packet,
  1161. skb_headlen(skb));
  1162. break;
  1163. default: /* ignore unknown packet */
  1164. break;
  1165. }
  1166. /* We take a copy of the data in the packet, so we should
  1167. always free the skbuf. */
  1168. return NET_RX_DROP;
  1169. }