routing.c 43 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600
  1. /*
  2. * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
  3. *
  4. * Marek Lindner, Simon Wunderlich
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of version 2 of the GNU General Public
  8. * License as published by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope that it will be useful, but
  11. * WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  13. * General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program; if not, write to the Free Software
  17. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
  18. * 02110-1301, USA
  19. *
  20. */
  21. #include "main.h"
  22. #include "routing.h"
  23. #include "send.h"
  24. #include "hash.h"
  25. #include "soft-interface.h"
  26. #include "hard-interface.h"
  27. #include "icmp_socket.h"
  28. #include "translation-table.h"
  29. #include "originator.h"
  30. #include "ring_buffer.h"
  31. #include "vis.h"
  32. #include "aggregation.h"
  33. #include "gateway_common.h"
  34. #include "gateway_client.h"
  35. #include "unicast.h"
  36. void slide_own_bcast_window(struct batman_if *batman_if)
  37. {
  38. struct bat_priv *bat_priv = netdev_priv(batman_if->soft_iface);
  39. struct hashtable_t *hash = bat_priv->orig_hash;
  40. struct hlist_node *walk;
  41. struct hlist_head *head;
  42. struct element_t *bucket;
  43. struct orig_node *orig_node;
  44. unsigned long *word;
  45. int i;
  46. size_t word_index;
  47. spin_lock_bh(&bat_priv->orig_hash_lock);
  48. for (i = 0; i < hash->size; i++) {
  49. head = &hash->table[i];
  50. rcu_read_lock();
  51. hlist_for_each_entry_rcu(bucket, walk, head, hlist) {
  52. orig_node = bucket->data;
  53. spin_lock_bh(&orig_node->ogm_cnt_lock);
  54. word_index = batman_if->if_num * NUM_WORDS;
  55. word = &(orig_node->bcast_own[word_index]);
  56. bit_get_packet(bat_priv, word, 1, 0);
  57. orig_node->bcast_own_sum[batman_if->if_num] =
  58. bit_packet_count(word);
  59. spin_unlock_bh(&orig_node->ogm_cnt_lock);
  60. }
  61. rcu_read_unlock();
  62. }
  63. spin_unlock_bh(&bat_priv->orig_hash_lock);
  64. }
  65. static void update_HNA(struct bat_priv *bat_priv, struct orig_node *orig_node,
  66. unsigned char *hna_buff, int hna_buff_len)
  67. {
  68. if ((hna_buff_len != orig_node->hna_buff_len) ||
  69. ((hna_buff_len > 0) &&
  70. (orig_node->hna_buff_len > 0) &&
  71. (memcmp(orig_node->hna_buff, hna_buff, hna_buff_len) != 0))) {
  72. if (orig_node->hna_buff_len > 0)
  73. hna_global_del_orig(bat_priv, orig_node,
  74. "originator changed hna");
  75. if ((hna_buff_len > 0) && (hna_buff))
  76. hna_global_add_orig(bat_priv, orig_node,
  77. hna_buff, hna_buff_len);
  78. }
  79. }
  80. static void update_route(struct bat_priv *bat_priv,
  81. struct orig_node *orig_node,
  82. struct neigh_node *neigh_node,
  83. unsigned char *hna_buff, int hna_buff_len)
  84. {
  85. struct neigh_node *neigh_node_tmp;
  86. /* route deleted */
  87. if ((orig_node->router) && (!neigh_node)) {
  88. bat_dbg(DBG_ROUTES, bat_priv, "Deleting route towards: %pM\n",
  89. orig_node->orig);
  90. hna_global_del_orig(bat_priv, orig_node,
  91. "originator timed out");
  92. /* route added */
  93. } else if ((!orig_node->router) && (neigh_node)) {
  94. bat_dbg(DBG_ROUTES, bat_priv,
  95. "Adding route towards: %pM (via %pM)\n",
  96. orig_node->orig, neigh_node->addr);
  97. hna_global_add_orig(bat_priv, orig_node,
  98. hna_buff, hna_buff_len);
  99. /* route changed */
  100. } else {
  101. bat_dbg(DBG_ROUTES, bat_priv,
  102. "Changing route towards: %pM "
  103. "(now via %pM - was via %pM)\n",
  104. orig_node->orig, neigh_node->addr,
  105. orig_node->router->addr);
  106. }
  107. if (neigh_node && !atomic_inc_not_zero(&neigh_node->refcount))
  108. neigh_node = NULL;
  109. neigh_node_tmp = orig_node->router;
  110. orig_node->router = neigh_node;
  111. if (neigh_node_tmp)
  112. neigh_node_free_ref(neigh_node_tmp);
  113. }
  114. void update_routes(struct bat_priv *bat_priv, struct orig_node *orig_node,
  115. struct neigh_node *neigh_node, unsigned char *hna_buff,
  116. int hna_buff_len)
  117. {
  118. if (!orig_node)
  119. return;
  120. if (orig_node->router != neigh_node)
  121. update_route(bat_priv, orig_node, neigh_node,
  122. hna_buff, hna_buff_len);
  123. /* may be just HNA changed */
  124. else
  125. update_HNA(bat_priv, orig_node, hna_buff, hna_buff_len);
  126. }
  127. static int is_bidirectional_neigh(struct orig_node *orig_node,
  128. struct orig_node *orig_neigh_node,
  129. struct batman_packet *batman_packet,
  130. struct batman_if *if_incoming)
  131. {
  132. struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
  133. struct neigh_node *neigh_node = NULL, *tmp_neigh_node = NULL;
  134. struct hlist_node *node;
  135. unsigned char total_count;
  136. int ret = 0;
  137. if (orig_node == orig_neigh_node) {
  138. rcu_read_lock();
  139. hlist_for_each_entry_rcu(tmp_neigh_node, node,
  140. &orig_node->neigh_list, list) {
  141. if (compare_orig(tmp_neigh_node->addr,
  142. orig_neigh_node->orig) &&
  143. (tmp_neigh_node->if_incoming == if_incoming))
  144. neigh_node = tmp_neigh_node;
  145. }
  146. if (!neigh_node)
  147. neigh_node = create_neighbor(orig_node,
  148. orig_neigh_node,
  149. orig_neigh_node->orig,
  150. if_incoming);
  151. /* create_neighbor failed, return 0 */
  152. if (!neigh_node)
  153. goto unlock;
  154. if (!atomic_inc_not_zero(&neigh_node->refcount)) {
  155. neigh_node = NULL;
  156. goto unlock;
  157. }
  158. rcu_read_unlock();
  159. neigh_node->last_valid = jiffies;
  160. } else {
  161. /* find packet count of corresponding one hop neighbor */
  162. rcu_read_lock();
  163. hlist_for_each_entry_rcu(tmp_neigh_node, node,
  164. &orig_neigh_node->neigh_list, list) {
  165. if (compare_orig(tmp_neigh_node->addr,
  166. orig_neigh_node->orig) &&
  167. (tmp_neigh_node->if_incoming == if_incoming))
  168. neigh_node = tmp_neigh_node;
  169. }
  170. if (!neigh_node)
  171. neigh_node = create_neighbor(orig_neigh_node,
  172. orig_neigh_node,
  173. orig_neigh_node->orig,
  174. if_incoming);
  175. /* create_neighbor failed, return 0 */
  176. if (!neigh_node)
  177. goto unlock;
  178. if (!atomic_inc_not_zero(&neigh_node->refcount)) {
  179. neigh_node = NULL;
  180. goto unlock;
  181. }
  182. rcu_read_unlock();
  183. }
  184. orig_node->last_valid = jiffies;
  185. /* pay attention to not get a value bigger than 100 % */
  186. total_count = (orig_neigh_node->bcast_own_sum[if_incoming->if_num] >
  187. neigh_node->real_packet_count ?
  188. neigh_node->real_packet_count :
  189. orig_neigh_node->bcast_own_sum[if_incoming->if_num]);
  190. /* if we have too few packets (too less data) we set tq_own to zero */
  191. /* if we receive too few packets it is not considered bidirectional */
  192. if ((total_count < TQ_LOCAL_BIDRECT_SEND_MINIMUM) ||
  193. (neigh_node->real_packet_count < TQ_LOCAL_BIDRECT_RECV_MINIMUM))
  194. orig_neigh_node->tq_own = 0;
  195. else
  196. /* neigh_node->real_packet_count is never zero as we
  197. * only purge old information when getting new
  198. * information */
  199. orig_neigh_node->tq_own = (TQ_MAX_VALUE * total_count) /
  200. neigh_node->real_packet_count;
  201. /*
  202. * 1 - ((1-x) ** 3), normalized to TQ_MAX_VALUE this does
  203. * affect the nearly-symmetric links only a little, but
  204. * punishes asymmetric links more. This will give a value
  205. * between 0 and TQ_MAX_VALUE
  206. */
  207. orig_neigh_node->tq_asym_penalty =
  208. TQ_MAX_VALUE -
  209. (TQ_MAX_VALUE *
  210. (TQ_LOCAL_WINDOW_SIZE - neigh_node->real_packet_count) *
  211. (TQ_LOCAL_WINDOW_SIZE - neigh_node->real_packet_count) *
  212. (TQ_LOCAL_WINDOW_SIZE - neigh_node->real_packet_count)) /
  213. (TQ_LOCAL_WINDOW_SIZE *
  214. TQ_LOCAL_WINDOW_SIZE *
  215. TQ_LOCAL_WINDOW_SIZE);
  216. batman_packet->tq = ((batman_packet->tq *
  217. orig_neigh_node->tq_own *
  218. orig_neigh_node->tq_asym_penalty) /
  219. (TQ_MAX_VALUE * TQ_MAX_VALUE));
  220. bat_dbg(DBG_BATMAN, bat_priv,
  221. "bidirectional: "
  222. "orig = %-15pM neigh = %-15pM => own_bcast = %2i, "
  223. "real recv = %2i, local tq: %3i, asym_penalty: %3i, "
  224. "total tq: %3i\n",
  225. orig_node->orig, orig_neigh_node->orig, total_count,
  226. neigh_node->real_packet_count, orig_neigh_node->tq_own,
  227. orig_neigh_node->tq_asym_penalty, batman_packet->tq);
  228. /* if link has the minimum required transmission quality
  229. * consider it bidirectional */
  230. if (batman_packet->tq >= TQ_TOTAL_BIDRECT_LIMIT)
  231. ret = 1;
  232. goto out;
  233. unlock:
  234. rcu_read_unlock();
  235. out:
  236. if (neigh_node)
  237. neigh_node_free_ref(neigh_node);
  238. return ret;
  239. }
  240. /* caller must hold the neigh_list_lock */
  241. void bonding_candidate_del(struct orig_node *orig_node,
  242. struct neigh_node *neigh_node)
  243. {
  244. /* this neighbor is not part of our candidate list */
  245. if (list_empty(&neigh_node->bonding_list))
  246. goto out;
  247. list_del_rcu(&neigh_node->bonding_list);
  248. INIT_LIST_HEAD(&neigh_node->bonding_list);
  249. neigh_node_free_ref(neigh_node);
  250. atomic_dec(&orig_node->bond_candidates);
  251. out:
  252. return;
  253. }
  254. static void bonding_candidate_add(struct orig_node *orig_node,
  255. struct neigh_node *neigh_node)
  256. {
  257. struct hlist_node *node;
  258. struct neigh_node *tmp_neigh_node;
  259. uint8_t best_tq, interference_candidate = 0;
  260. spin_lock_bh(&orig_node->neigh_list_lock);
  261. /* only consider if it has the same primary address ... */
  262. if (!compare_orig(orig_node->orig,
  263. neigh_node->orig_node->primary_addr))
  264. goto candidate_del;
  265. if (!orig_node->router)
  266. goto candidate_del;
  267. best_tq = orig_node->router->tq_avg;
  268. /* ... and is good enough to be considered */
  269. if (neigh_node->tq_avg < best_tq - BONDING_TQ_THRESHOLD)
  270. goto candidate_del;
  271. /**
  272. * check if we have another candidate with the same mac address or
  273. * interface. If we do, we won't select this candidate because of
  274. * possible interference.
  275. */
  276. hlist_for_each_entry_rcu(tmp_neigh_node, node,
  277. &orig_node->neigh_list, list) {
  278. if (tmp_neigh_node == neigh_node)
  279. continue;
  280. /* we only care if the other candidate is even
  281. * considered as candidate. */
  282. if (list_empty(&tmp_neigh_node->bonding_list))
  283. continue;
  284. if ((neigh_node->if_incoming == tmp_neigh_node->if_incoming) ||
  285. (compare_orig(neigh_node->addr, tmp_neigh_node->addr))) {
  286. interference_candidate = 1;
  287. break;
  288. }
  289. }
  290. /* don't care further if it is an interference candidate */
  291. if (interference_candidate)
  292. goto candidate_del;
  293. /* this neighbor already is part of our candidate list */
  294. if (!list_empty(&neigh_node->bonding_list))
  295. goto out;
  296. if (!atomic_inc_not_zero(&neigh_node->refcount))
  297. goto out;
  298. list_add_rcu(&neigh_node->bonding_list, &orig_node->bond_list);
  299. atomic_inc(&orig_node->bond_candidates);
  300. goto out;
  301. candidate_del:
  302. bonding_candidate_del(orig_node, neigh_node);
  303. out:
  304. spin_unlock_bh(&orig_node->neigh_list_lock);
  305. return;
  306. }
  307. /* copy primary address for bonding */
  308. static void bonding_save_primary(struct orig_node *orig_node,
  309. struct orig_node *orig_neigh_node,
  310. struct batman_packet *batman_packet)
  311. {
  312. if (!(batman_packet->flags & PRIMARIES_FIRST_HOP))
  313. return;
  314. memcpy(orig_neigh_node->primary_addr, orig_node->orig, ETH_ALEN);
  315. }
  316. static void update_orig(struct bat_priv *bat_priv,
  317. struct orig_node *orig_node,
  318. struct ethhdr *ethhdr,
  319. struct batman_packet *batman_packet,
  320. struct batman_if *if_incoming,
  321. unsigned char *hna_buff, int hna_buff_len,
  322. char is_duplicate)
  323. {
  324. struct neigh_node *neigh_node = NULL, *tmp_neigh_node = NULL;
  325. struct orig_node *orig_node_tmp;
  326. struct hlist_node *node;
  327. int tmp_hna_buff_len;
  328. uint8_t bcast_own_sum_orig, bcast_own_sum_neigh;
  329. bat_dbg(DBG_BATMAN, bat_priv, "update_originator(): "
  330. "Searching and updating originator entry of received packet\n");
  331. rcu_read_lock();
  332. hlist_for_each_entry_rcu(tmp_neigh_node, node,
  333. &orig_node->neigh_list, list) {
  334. if (compare_orig(tmp_neigh_node->addr, ethhdr->h_source) &&
  335. (tmp_neigh_node->if_incoming == if_incoming) &&
  336. atomic_inc_not_zero(&tmp_neigh_node->refcount)) {
  337. if (neigh_node)
  338. neigh_node_free_ref(neigh_node);
  339. neigh_node = tmp_neigh_node;
  340. continue;
  341. }
  342. if (is_duplicate)
  343. continue;
  344. ring_buffer_set(tmp_neigh_node->tq_recv,
  345. &tmp_neigh_node->tq_index, 0);
  346. tmp_neigh_node->tq_avg =
  347. ring_buffer_avg(tmp_neigh_node->tq_recv);
  348. }
  349. if (!neigh_node) {
  350. struct orig_node *orig_tmp;
  351. orig_tmp = get_orig_node(bat_priv, ethhdr->h_source);
  352. if (!orig_tmp)
  353. goto unlock;
  354. neigh_node = create_neighbor(orig_node, orig_tmp,
  355. ethhdr->h_source, if_incoming);
  356. kref_put(&orig_tmp->refcount, orig_node_free_ref);
  357. if (!neigh_node)
  358. goto unlock;
  359. if (!atomic_inc_not_zero(&neigh_node->refcount)) {
  360. neigh_node = NULL;
  361. goto unlock;
  362. }
  363. } else
  364. bat_dbg(DBG_BATMAN, bat_priv,
  365. "Updating existing last-hop neighbor of originator\n");
  366. rcu_read_unlock();
  367. orig_node->flags = batman_packet->flags;
  368. neigh_node->last_valid = jiffies;
  369. ring_buffer_set(neigh_node->tq_recv,
  370. &neigh_node->tq_index,
  371. batman_packet->tq);
  372. neigh_node->tq_avg = ring_buffer_avg(neigh_node->tq_recv);
  373. if (!is_duplicate) {
  374. orig_node->last_ttl = batman_packet->ttl;
  375. neigh_node->last_ttl = batman_packet->ttl;
  376. }
  377. bonding_candidate_add(orig_node, neigh_node);
  378. tmp_hna_buff_len = (hna_buff_len > batman_packet->num_hna * ETH_ALEN ?
  379. batman_packet->num_hna * ETH_ALEN : hna_buff_len);
  380. /* if this neighbor already is our next hop there is nothing
  381. * to change */
  382. if (orig_node->router == neigh_node)
  383. goto update_hna;
  384. /* if this neighbor does not offer a better TQ we won't consider it */
  385. if ((orig_node->router) &&
  386. (orig_node->router->tq_avg > neigh_node->tq_avg))
  387. goto update_hna;
  388. /* if the TQ is the same and the link not more symetric we
  389. * won't consider it either */
  390. if ((orig_node->router) &&
  391. (neigh_node->tq_avg == orig_node->router->tq_avg)) {
  392. orig_node_tmp = orig_node->router->orig_node;
  393. spin_lock_bh(&orig_node_tmp->ogm_cnt_lock);
  394. bcast_own_sum_orig =
  395. orig_node_tmp->bcast_own_sum[if_incoming->if_num];
  396. spin_unlock_bh(&orig_node_tmp->ogm_cnt_lock);
  397. orig_node_tmp = neigh_node->orig_node;
  398. spin_lock_bh(&orig_node_tmp->ogm_cnt_lock);
  399. bcast_own_sum_neigh =
  400. orig_node_tmp->bcast_own_sum[if_incoming->if_num];
  401. spin_unlock_bh(&orig_node_tmp->ogm_cnt_lock);
  402. if (bcast_own_sum_orig >= bcast_own_sum_neigh)
  403. goto update_hna;
  404. }
  405. update_routes(bat_priv, orig_node, neigh_node,
  406. hna_buff, tmp_hna_buff_len);
  407. goto update_gw;
  408. update_hna:
  409. update_routes(bat_priv, orig_node, orig_node->router,
  410. hna_buff, tmp_hna_buff_len);
  411. update_gw:
  412. if (orig_node->gw_flags != batman_packet->gw_flags)
  413. gw_node_update(bat_priv, orig_node, batman_packet->gw_flags);
  414. orig_node->gw_flags = batman_packet->gw_flags;
  415. /* restart gateway selection if fast or late switching was enabled */
  416. if ((orig_node->gw_flags) &&
  417. (atomic_read(&bat_priv->gw_mode) == GW_MODE_CLIENT) &&
  418. (atomic_read(&bat_priv->gw_sel_class) > 2))
  419. gw_check_election(bat_priv, orig_node);
  420. goto out;
  421. unlock:
  422. rcu_read_unlock();
  423. out:
  424. if (neigh_node)
  425. neigh_node_free_ref(neigh_node);
  426. }
  427. /* checks whether the host restarted and is in the protection time.
  428. * returns:
  429. * 0 if the packet is to be accepted
  430. * 1 if the packet is to be ignored.
  431. */
  432. static int window_protected(struct bat_priv *bat_priv,
  433. int32_t seq_num_diff,
  434. unsigned long *last_reset)
  435. {
  436. if ((seq_num_diff <= -TQ_LOCAL_WINDOW_SIZE)
  437. || (seq_num_diff >= EXPECTED_SEQNO_RANGE)) {
  438. if (time_after(jiffies, *last_reset +
  439. msecs_to_jiffies(RESET_PROTECTION_MS))) {
  440. *last_reset = jiffies;
  441. bat_dbg(DBG_BATMAN, bat_priv,
  442. "old packet received, start protection\n");
  443. return 0;
  444. } else
  445. return 1;
  446. }
  447. return 0;
  448. }
  449. /* processes a batman packet for all interfaces, adjusts the sequence number and
  450. * finds out whether it is a duplicate.
  451. * returns:
  452. * 1 the packet is a duplicate
  453. * 0 the packet has not yet been received
  454. * -1 the packet is old and has been received while the seqno window
  455. * was protected. Caller should drop it.
  456. */
  457. static char count_real_packets(struct ethhdr *ethhdr,
  458. struct batman_packet *batman_packet,
  459. struct batman_if *if_incoming)
  460. {
  461. struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
  462. struct orig_node *orig_node;
  463. struct neigh_node *tmp_neigh_node;
  464. struct hlist_node *node;
  465. char is_duplicate = 0;
  466. int32_t seq_diff;
  467. int need_update = 0;
  468. int set_mark;
  469. orig_node = get_orig_node(bat_priv, batman_packet->orig);
  470. if (!orig_node)
  471. return 0;
  472. seq_diff = batman_packet->seqno - orig_node->last_real_seqno;
  473. /* signalize caller that the packet is to be dropped. */
  474. if (window_protected(bat_priv, seq_diff,
  475. &orig_node->batman_seqno_reset))
  476. goto err;
  477. rcu_read_lock();
  478. hlist_for_each_entry_rcu(tmp_neigh_node, node,
  479. &orig_node->neigh_list, list) {
  480. is_duplicate |= get_bit_status(tmp_neigh_node->real_bits,
  481. orig_node->last_real_seqno,
  482. batman_packet->seqno);
  483. if (compare_orig(tmp_neigh_node->addr, ethhdr->h_source) &&
  484. (tmp_neigh_node->if_incoming == if_incoming))
  485. set_mark = 1;
  486. else
  487. set_mark = 0;
  488. /* if the window moved, set the update flag. */
  489. need_update |= bit_get_packet(bat_priv,
  490. tmp_neigh_node->real_bits,
  491. seq_diff, set_mark);
  492. tmp_neigh_node->real_packet_count =
  493. bit_packet_count(tmp_neigh_node->real_bits);
  494. }
  495. rcu_read_unlock();
  496. if (need_update) {
  497. bat_dbg(DBG_BATMAN, bat_priv,
  498. "updating last_seqno: old %d, new %d\n",
  499. orig_node->last_real_seqno, batman_packet->seqno);
  500. orig_node->last_real_seqno = batman_packet->seqno;
  501. }
  502. kref_put(&orig_node->refcount, orig_node_free_ref);
  503. return is_duplicate;
  504. err:
  505. kref_put(&orig_node->refcount, orig_node_free_ref);
  506. return -1;
  507. }
  508. void receive_bat_packet(struct ethhdr *ethhdr,
  509. struct batman_packet *batman_packet,
  510. unsigned char *hna_buff, int hna_buff_len,
  511. struct batman_if *if_incoming)
  512. {
  513. struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
  514. struct batman_if *batman_if;
  515. struct orig_node *orig_neigh_node, *orig_node;
  516. char has_directlink_flag;
  517. char is_my_addr = 0, is_my_orig = 0, is_my_oldorig = 0;
  518. char is_broadcast = 0, is_bidirectional, is_single_hop_neigh;
  519. char is_duplicate;
  520. uint32_t if_incoming_seqno;
  521. /* Silently drop when the batman packet is actually not a
  522. * correct packet.
  523. *
  524. * This might happen if a packet is padded (e.g. Ethernet has a
  525. * minimum frame length of 64 byte) and the aggregation interprets
  526. * it as an additional length.
  527. *
  528. * TODO: A more sane solution would be to have a bit in the
  529. * batman_packet to detect whether the packet is the last
  530. * packet in an aggregation. Here we expect that the padding
  531. * is always zero (or not 0x01)
  532. */
  533. if (batman_packet->packet_type != BAT_PACKET)
  534. return;
  535. /* could be changed by schedule_own_packet() */
  536. if_incoming_seqno = atomic_read(&if_incoming->seqno);
  537. has_directlink_flag = (batman_packet->flags & DIRECTLINK ? 1 : 0);
  538. is_single_hop_neigh = (compare_orig(ethhdr->h_source,
  539. batman_packet->orig) ? 1 : 0);
  540. bat_dbg(DBG_BATMAN, bat_priv,
  541. "Received BATMAN packet via NB: %pM, IF: %s [%pM] "
  542. "(from OG: %pM, via prev OG: %pM, seqno %d, tq %d, "
  543. "TTL %d, V %d, IDF %d)\n",
  544. ethhdr->h_source, if_incoming->net_dev->name,
  545. if_incoming->net_dev->dev_addr, batman_packet->orig,
  546. batman_packet->prev_sender, batman_packet->seqno,
  547. batman_packet->tq, batman_packet->ttl, batman_packet->version,
  548. has_directlink_flag);
  549. rcu_read_lock();
  550. list_for_each_entry_rcu(batman_if, &if_list, list) {
  551. if (batman_if->if_status != IF_ACTIVE)
  552. continue;
  553. if (batman_if->soft_iface != if_incoming->soft_iface)
  554. continue;
  555. if (compare_orig(ethhdr->h_source,
  556. batman_if->net_dev->dev_addr))
  557. is_my_addr = 1;
  558. if (compare_orig(batman_packet->orig,
  559. batman_if->net_dev->dev_addr))
  560. is_my_orig = 1;
  561. if (compare_orig(batman_packet->prev_sender,
  562. batman_if->net_dev->dev_addr))
  563. is_my_oldorig = 1;
  564. if (compare_orig(ethhdr->h_source, broadcast_addr))
  565. is_broadcast = 1;
  566. }
  567. rcu_read_unlock();
  568. if (batman_packet->version != COMPAT_VERSION) {
  569. bat_dbg(DBG_BATMAN, bat_priv,
  570. "Drop packet: incompatible batman version (%i)\n",
  571. batman_packet->version);
  572. return;
  573. }
  574. if (is_my_addr) {
  575. bat_dbg(DBG_BATMAN, bat_priv,
  576. "Drop packet: received my own broadcast (sender: %pM"
  577. ")\n",
  578. ethhdr->h_source);
  579. return;
  580. }
  581. if (is_broadcast) {
  582. bat_dbg(DBG_BATMAN, bat_priv, "Drop packet: "
  583. "ignoring all packets with broadcast source addr (sender: %pM"
  584. ")\n", ethhdr->h_source);
  585. return;
  586. }
  587. if (is_my_orig) {
  588. unsigned long *word;
  589. int offset;
  590. orig_neigh_node = get_orig_node(bat_priv, ethhdr->h_source);
  591. if (!orig_neigh_node)
  592. return;
  593. /* neighbor has to indicate direct link and it has to
  594. * come via the corresponding interface */
  595. /* if received seqno equals last send seqno save new
  596. * seqno for bidirectional check */
  597. if (has_directlink_flag &&
  598. compare_orig(if_incoming->net_dev->dev_addr,
  599. batman_packet->orig) &&
  600. (batman_packet->seqno - if_incoming_seqno + 2 == 0)) {
  601. offset = if_incoming->if_num * NUM_WORDS;
  602. spin_lock_bh(&orig_neigh_node->ogm_cnt_lock);
  603. word = &(orig_neigh_node->bcast_own[offset]);
  604. bit_mark(word, 0);
  605. orig_neigh_node->bcast_own_sum[if_incoming->if_num] =
  606. bit_packet_count(word);
  607. spin_unlock_bh(&orig_neigh_node->ogm_cnt_lock);
  608. }
  609. bat_dbg(DBG_BATMAN, bat_priv, "Drop packet: "
  610. "originator packet from myself (via neighbor)\n");
  611. kref_put(&orig_neigh_node->refcount, orig_node_free_ref);
  612. return;
  613. }
  614. if (is_my_oldorig) {
  615. bat_dbg(DBG_BATMAN, bat_priv,
  616. "Drop packet: ignoring all rebroadcast echos (sender: "
  617. "%pM)\n", ethhdr->h_source);
  618. return;
  619. }
  620. orig_node = get_orig_node(bat_priv, batman_packet->orig);
  621. if (!orig_node)
  622. return;
  623. is_duplicate = count_real_packets(ethhdr, batman_packet, if_incoming);
  624. if (is_duplicate == -1) {
  625. bat_dbg(DBG_BATMAN, bat_priv,
  626. "Drop packet: packet within seqno protection time "
  627. "(sender: %pM)\n", ethhdr->h_source);
  628. goto out;
  629. }
  630. if (batman_packet->tq == 0) {
  631. bat_dbg(DBG_BATMAN, bat_priv,
  632. "Drop packet: originator packet with tq equal 0\n");
  633. goto out;
  634. }
  635. /* avoid temporary routing loops */
  636. if ((orig_node->router) &&
  637. (orig_node->router->orig_node->router) &&
  638. (compare_orig(orig_node->router->addr,
  639. batman_packet->prev_sender)) &&
  640. !(compare_orig(batman_packet->orig, batman_packet->prev_sender)) &&
  641. (compare_orig(orig_node->router->addr,
  642. orig_node->router->orig_node->router->addr))) {
  643. bat_dbg(DBG_BATMAN, bat_priv,
  644. "Drop packet: ignoring all rebroadcast packets that "
  645. "may make me loop (sender: %pM)\n", ethhdr->h_source);
  646. goto out;
  647. }
  648. /* if sender is a direct neighbor the sender mac equals
  649. * originator mac */
  650. orig_neigh_node = (is_single_hop_neigh ?
  651. orig_node :
  652. get_orig_node(bat_priv, ethhdr->h_source));
  653. if (!orig_neigh_node)
  654. goto out_neigh;
  655. /* drop packet if sender is not a direct neighbor and if we
  656. * don't route towards it */
  657. if (!is_single_hop_neigh && (!orig_neigh_node->router)) {
  658. bat_dbg(DBG_BATMAN, bat_priv,
  659. "Drop packet: OGM via unknown neighbor!\n");
  660. goto out_neigh;
  661. }
  662. is_bidirectional = is_bidirectional_neigh(orig_node, orig_neigh_node,
  663. batman_packet, if_incoming);
  664. bonding_save_primary(orig_node, orig_neigh_node, batman_packet);
  665. /* update ranking if it is not a duplicate or has the same
  666. * seqno and similar ttl as the non-duplicate */
  667. if (is_bidirectional &&
  668. (!is_duplicate ||
  669. ((orig_node->last_real_seqno == batman_packet->seqno) &&
  670. (orig_node->last_ttl - 3 <= batman_packet->ttl))))
  671. update_orig(bat_priv, orig_node, ethhdr, batman_packet,
  672. if_incoming, hna_buff, hna_buff_len, is_duplicate);
  673. /* is single hop (direct) neighbor */
  674. if (is_single_hop_neigh) {
  675. /* mark direct link on incoming interface */
  676. schedule_forward_packet(orig_node, ethhdr, batman_packet,
  677. 1, hna_buff_len, if_incoming);
  678. bat_dbg(DBG_BATMAN, bat_priv, "Forwarding packet: "
  679. "rebroadcast neighbor packet with direct link flag\n");
  680. goto out_neigh;
  681. }
  682. /* multihop originator */
  683. if (!is_bidirectional) {
  684. bat_dbg(DBG_BATMAN, bat_priv,
  685. "Drop packet: not received via bidirectional link\n");
  686. goto out_neigh;
  687. }
  688. if (is_duplicate) {
  689. bat_dbg(DBG_BATMAN, bat_priv,
  690. "Drop packet: duplicate packet received\n");
  691. goto out_neigh;
  692. }
  693. bat_dbg(DBG_BATMAN, bat_priv,
  694. "Forwarding packet: rebroadcast originator packet\n");
  695. schedule_forward_packet(orig_node, ethhdr, batman_packet,
  696. 0, hna_buff_len, if_incoming);
  697. out_neigh:
  698. if (!is_single_hop_neigh)
  699. kref_put(&orig_neigh_node->refcount, orig_node_free_ref);
  700. out:
  701. kref_put(&orig_node->refcount, orig_node_free_ref);
  702. }
  703. int recv_bat_packet(struct sk_buff *skb, struct batman_if *batman_if)
  704. {
  705. struct bat_priv *bat_priv = netdev_priv(batman_if->soft_iface);
  706. struct ethhdr *ethhdr;
  707. /* drop packet if it has not necessary minimum size */
  708. if (unlikely(!pskb_may_pull(skb, sizeof(struct batman_packet))))
  709. return NET_RX_DROP;
  710. ethhdr = (struct ethhdr *)skb_mac_header(skb);
  711. /* packet with broadcast indication but unicast recipient */
  712. if (!is_broadcast_ether_addr(ethhdr->h_dest))
  713. return NET_RX_DROP;
  714. /* packet with broadcast sender address */
  715. if (is_broadcast_ether_addr(ethhdr->h_source))
  716. return NET_RX_DROP;
  717. /* create a copy of the skb, if needed, to modify it. */
  718. if (skb_cow(skb, 0) < 0)
  719. return NET_RX_DROP;
  720. /* keep skb linear */
  721. if (skb_linearize(skb) < 0)
  722. return NET_RX_DROP;
  723. ethhdr = (struct ethhdr *)skb_mac_header(skb);
  724. spin_lock_bh(&bat_priv->orig_hash_lock);
  725. receive_aggr_bat_packet(ethhdr,
  726. skb->data,
  727. skb_headlen(skb),
  728. batman_if);
  729. spin_unlock_bh(&bat_priv->orig_hash_lock);
  730. kfree_skb(skb);
  731. return NET_RX_SUCCESS;
  732. }
  733. static int recv_my_icmp_packet(struct bat_priv *bat_priv,
  734. struct sk_buff *skb, size_t icmp_len)
  735. {
  736. struct orig_node *orig_node = NULL;
  737. struct neigh_node *neigh_node = NULL;
  738. struct icmp_packet_rr *icmp_packet;
  739. struct batman_if *batman_if;
  740. uint8_t dstaddr[ETH_ALEN];
  741. int ret = NET_RX_DROP;
  742. icmp_packet = (struct icmp_packet_rr *)skb->data;
  743. /* add data to device queue */
  744. if (icmp_packet->msg_type != ECHO_REQUEST) {
  745. bat_socket_receive_packet(icmp_packet, icmp_len);
  746. goto out;
  747. }
  748. if (!bat_priv->primary_if)
  749. goto out;
  750. /* answer echo request (ping) */
  751. /* get routing information */
  752. spin_lock_bh(&bat_priv->orig_hash_lock);
  753. rcu_read_lock();
  754. orig_node = ((struct orig_node *)hash_find(bat_priv->orig_hash,
  755. compare_orig, choose_orig,
  756. icmp_packet->orig));
  757. if (!orig_node)
  758. goto unlock;
  759. kref_get(&orig_node->refcount);
  760. neigh_node = orig_node->router;
  761. if (!neigh_node)
  762. goto unlock;
  763. if (!atomic_inc_not_zero(&neigh_node->refcount)) {
  764. neigh_node = NULL;
  765. goto unlock;
  766. }
  767. rcu_read_unlock();
  768. /* don't lock while sending the packets ... we therefore
  769. * copy the required data before sending */
  770. batman_if = orig_node->router->if_incoming;
  771. memcpy(dstaddr, orig_node->router->addr, ETH_ALEN);
  772. spin_unlock_bh(&bat_priv->orig_hash_lock);
  773. /* create a copy of the skb, if needed, to modify it. */
  774. if (skb_cow(skb, sizeof(struct ethhdr)) < 0)
  775. goto out;
  776. icmp_packet = (struct icmp_packet_rr *)skb->data;
  777. memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN);
  778. memcpy(icmp_packet->orig,
  779. bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN);
  780. icmp_packet->msg_type = ECHO_REPLY;
  781. icmp_packet->ttl = TTL;
  782. send_skb_packet(skb, batman_if, dstaddr);
  783. ret = NET_RX_SUCCESS;
  784. goto out;
  785. unlock:
  786. rcu_read_unlock();
  787. spin_unlock_bh(&bat_priv->orig_hash_lock);
  788. out:
  789. if (neigh_node)
  790. neigh_node_free_ref(neigh_node);
  791. if (orig_node)
  792. kref_put(&orig_node->refcount, orig_node_free_ref);
  793. return ret;
  794. }
  795. static int recv_icmp_ttl_exceeded(struct bat_priv *bat_priv,
  796. struct sk_buff *skb)
  797. {
  798. struct orig_node *orig_node = NULL;
  799. struct neigh_node *neigh_node = NULL;
  800. struct icmp_packet *icmp_packet;
  801. struct batman_if *batman_if;
  802. uint8_t dstaddr[ETH_ALEN];
  803. int ret = NET_RX_DROP;
  804. icmp_packet = (struct icmp_packet *)skb->data;
  805. /* send TTL exceeded if packet is an echo request (traceroute) */
  806. if (icmp_packet->msg_type != ECHO_REQUEST) {
  807. pr_debug("Warning - can't forward icmp packet from %pM to "
  808. "%pM: ttl exceeded\n", icmp_packet->orig,
  809. icmp_packet->dst);
  810. goto out;
  811. }
  812. if (!bat_priv->primary_if)
  813. goto out;
  814. /* get routing information */
  815. spin_lock_bh(&bat_priv->orig_hash_lock);
  816. rcu_read_lock();
  817. orig_node = ((struct orig_node *)
  818. hash_find(bat_priv->orig_hash, compare_orig, choose_orig,
  819. icmp_packet->orig));
  820. if (!orig_node)
  821. goto unlock;
  822. kref_get(&orig_node->refcount);
  823. neigh_node = orig_node->router;
  824. if (!neigh_node)
  825. goto unlock;
  826. if (!atomic_inc_not_zero(&neigh_node->refcount)) {
  827. neigh_node = NULL;
  828. goto unlock;
  829. }
  830. rcu_read_unlock();
  831. /* don't lock while sending the packets ... we therefore
  832. * copy the required data before sending */
  833. batman_if = orig_node->router->if_incoming;
  834. memcpy(dstaddr, orig_node->router->addr, ETH_ALEN);
  835. spin_unlock_bh(&bat_priv->orig_hash_lock);
  836. /* create a copy of the skb, if needed, to modify it. */
  837. if (skb_cow(skb, sizeof(struct ethhdr)) < 0)
  838. goto out;
  839. icmp_packet = (struct icmp_packet *)skb->data;
  840. memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN);
  841. memcpy(icmp_packet->orig,
  842. bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN);
  843. icmp_packet->msg_type = TTL_EXCEEDED;
  844. icmp_packet->ttl = TTL;
  845. send_skb_packet(skb, batman_if, dstaddr);
  846. ret = NET_RX_SUCCESS;
  847. goto out;
  848. unlock:
  849. rcu_read_unlock();
  850. spin_unlock_bh(&bat_priv->orig_hash_lock);
  851. out:
  852. if (neigh_node)
  853. neigh_node_free_ref(neigh_node);
  854. if (orig_node)
  855. kref_put(&orig_node->refcount, orig_node_free_ref);
  856. return ret;
  857. }
  858. int recv_icmp_packet(struct sk_buff *skb, struct batman_if *recv_if)
  859. {
  860. struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
  861. struct icmp_packet_rr *icmp_packet;
  862. struct ethhdr *ethhdr;
  863. struct orig_node *orig_node = NULL;
  864. struct neigh_node *neigh_node = NULL;
  865. struct batman_if *batman_if;
  866. int hdr_size = sizeof(struct icmp_packet);
  867. uint8_t dstaddr[ETH_ALEN];
  868. int ret = NET_RX_DROP;
  869. /**
  870. * we truncate all incoming icmp packets if they don't match our size
  871. */
  872. if (skb->len >= sizeof(struct icmp_packet_rr))
  873. hdr_size = sizeof(struct icmp_packet_rr);
  874. /* drop packet if it has not necessary minimum size */
  875. if (unlikely(!pskb_may_pull(skb, hdr_size)))
  876. goto out;
  877. ethhdr = (struct ethhdr *)skb_mac_header(skb);
  878. /* packet with unicast indication but broadcast recipient */
  879. if (is_broadcast_ether_addr(ethhdr->h_dest))
  880. goto out;
  881. /* packet with broadcast sender address */
  882. if (is_broadcast_ether_addr(ethhdr->h_source))
  883. goto out;
  884. /* not for me */
  885. if (!is_my_mac(ethhdr->h_dest))
  886. goto out;
  887. icmp_packet = (struct icmp_packet_rr *)skb->data;
  888. /* add record route information if not full */
  889. if ((hdr_size == sizeof(struct icmp_packet_rr)) &&
  890. (icmp_packet->rr_cur < BAT_RR_LEN)) {
  891. memcpy(&(icmp_packet->rr[icmp_packet->rr_cur]),
  892. ethhdr->h_dest, ETH_ALEN);
  893. icmp_packet->rr_cur++;
  894. }
  895. /* packet for me */
  896. if (is_my_mac(icmp_packet->dst))
  897. return recv_my_icmp_packet(bat_priv, skb, hdr_size);
  898. /* TTL exceeded */
  899. if (icmp_packet->ttl < 2)
  900. return recv_icmp_ttl_exceeded(bat_priv, skb);
  901. /* get routing information */
  902. spin_lock_bh(&bat_priv->orig_hash_lock);
  903. rcu_read_lock();
  904. orig_node = ((struct orig_node *)
  905. hash_find(bat_priv->orig_hash, compare_orig, choose_orig,
  906. icmp_packet->dst));
  907. if (!orig_node)
  908. goto unlock;
  909. kref_get(&orig_node->refcount);
  910. neigh_node = orig_node->router;
  911. if (!neigh_node)
  912. goto unlock;
  913. if (!atomic_inc_not_zero(&neigh_node->refcount)) {
  914. neigh_node = NULL;
  915. goto unlock;
  916. }
  917. rcu_read_unlock();
  918. /* don't lock while sending the packets ... we therefore
  919. * copy the required data before sending */
  920. batman_if = orig_node->router->if_incoming;
  921. memcpy(dstaddr, orig_node->router->addr, ETH_ALEN);
  922. spin_unlock_bh(&bat_priv->orig_hash_lock);
  923. /* create a copy of the skb, if needed, to modify it. */
  924. if (skb_cow(skb, sizeof(struct ethhdr)) < 0)
  925. goto out;
  926. icmp_packet = (struct icmp_packet_rr *)skb->data;
  927. /* decrement ttl */
  928. icmp_packet->ttl--;
  929. /* route it */
  930. send_skb_packet(skb, batman_if, dstaddr);
  931. ret = NET_RX_SUCCESS;
  932. goto out;
  933. unlock:
  934. rcu_read_unlock();
  935. spin_unlock_bh(&bat_priv->orig_hash_lock);
  936. out:
  937. if (neigh_node)
  938. neigh_node_free_ref(neigh_node);
  939. if (orig_node)
  940. kref_put(&orig_node->refcount, orig_node_free_ref);
  941. return ret;
  942. }
  943. /* find a suitable router for this originator, and use
  944. * bonding if possible. increases the found neighbors
  945. * refcount.*/
  946. struct neigh_node *find_router(struct bat_priv *bat_priv,
  947. struct orig_node *orig_node,
  948. struct batman_if *recv_if)
  949. {
  950. struct orig_node *primary_orig_node;
  951. struct orig_node *router_orig;
  952. struct neigh_node *router, *first_candidate, *tmp_neigh_node;
  953. static uint8_t zero_mac[ETH_ALEN] = {0, 0, 0, 0, 0, 0};
  954. int bonding_enabled;
  955. if (!orig_node)
  956. return NULL;
  957. if (!orig_node->router)
  958. return NULL;
  959. /* without bonding, the first node should
  960. * always choose the default router. */
  961. bonding_enabled = atomic_read(&bat_priv->bonding);
  962. rcu_read_lock();
  963. /* select default router to output */
  964. router = orig_node->router;
  965. router_orig = orig_node->router->orig_node;
  966. if (!router_orig || !atomic_inc_not_zero(&router->refcount)) {
  967. rcu_read_unlock();
  968. return NULL;
  969. }
  970. if ((!recv_if) && (!bonding_enabled))
  971. goto return_router;
  972. /* if we have something in the primary_addr, we can search
  973. * for a potential bonding candidate. */
  974. if (memcmp(router_orig->primary_addr, zero_mac, ETH_ALEN) == 0)
  975. goto return_router;
  976. /* find the orig_node which has the primary interface. might
  977. * even be the same as our router_orig in many cases */
  978. if (memcmp(router_orig->primary_addr,
  979. router_orig->orig, ETH_ALEN) == 0) {
  980. primary_orig_node = router_orig;
  981. } else {
  982. primary_orig_node = hash_find(bat_priv->orig_hash, compare_orig,
  983. choose_orig,
  984. router_orig->primary_addr);
  985. if (!primary_orig_node)
  986. goto return_router;
  987. }
  988. /* with less than 2 candidates, we can't do any
  989. * bonding and prefer the original router. */
  990. if (atomic_read(&primary_orig_node->bond_candidates) < 2)
  991. goto return_router;
  992. /* all nodes between should choose a candidate which
  993. * is is not on the interface where the packet came
  994. * in. */
  995. neigh_node_free_ref(router);
  996. first_candidate = NULL;
  997. router = NULL;
  998. if (bonding_enabled) {
  999. /* in the bonding case, send the packets in a round
  1000. * robin fashion over the remaining interfaces. */
  1001. list_for_each_entry_rcu(tmp_neigh_node,
  1002. &primary_orig_node->bond_list, bonding_list) {
  1003. if (!first_candidate)
  1004. first_candidate = tmp_neigh_node;
  1005. /* recv_if == NULL on the first node. */
  1006. if (tmp_neigh_node->if_incoming != recv_if &&
  1007. atomic_inc_not_zero(&tmp_neigh_node->refcount)) {
  1008. router = tmp_neigh_node;
  1009. break;
  1010. }
  1011. }
  1012. /* use the first candidate if nothing was found. */
  1013. if (!router && first_candidate &&
  1014. atomic_inc_not_zero(&first_candidate->refcount))
  1015. router = first_candidate;
  1016. if (!router) {
  1017. rcu_read_unlock();
  1018. return NULL;
  1019. }
  1020. /* selected should point to the next element
  1021. * after the current router */
  1022. spin_lock_bh(&primary_orig_node->neigh_list_lock);
  1023. /* this is a list_move(), which unfortunately
  1024. * does not exist as rcu version */
  1025. list_del_rcu(&primary_orig_node->bond_list);
  1026. list_add_rcu(&primary_orig_node->bond_list,
  1027. &router->bonding_list);
  1028. spin_unlock_bh(&primary_orig_node->neigh_list_lock);
  1029. } else {
  1030. /* if bonding is disabled, use the best of the
  1031. * remaining candidates which are not using
  1032. * this interface. */
  1033. list_for_each_entry_rcu(tmp_neigh_node,
  1034. &primary_orig_node->bond_list, bonding_list) {
  1035. if (!first_candidate)
  1036. first_candidate = tmp_neigh_node;
  1037. /* recv_if == NULL on the first node. */
  1038. if (tmp_neigh_node->if_incoming == recv_if)
  1039. continue;
  1040. if (!atomic_inc_not_zero(&tmp_neigh_node->refcount))
  1041. continue;
  1042. /* if we don't have a router yet
  1043. * or this one is better, choose it. */
  1044. if ((!router) ||
  1045. (tmp_neigh_node->tq_avg > router->tq_avg)) {
  1046. /* decrement refcount of
  1047. * previously selected router */
  1048. if (router)
  1049. neigh_node_free_ref(router);
  1050. router = tmp_neigh_node;
  1051. atomic_inc_not_zero(&router->refcount);
  1052. }
  1053. neigh_node_free_ref(tmp_neigh_node);
  1054. }
  1055. /* use the first candidate if nothing was found. */
  1056. if (!router && first_candidate &&
  1057. atomic_inc_not_zero(&first_candidate->refcount))
  1058. router = first_candidate;
  1059. }
  1060. return_router:
  1061. rcu_read_unlock();
  1062. return router;
  1063. }
  1064. static int check_unicast_packet(struct sk_buff *skb, int hdr_size)
  1065. {
  1066. struct ethhdr *ethhdr;
  1067. /* drop packet if it has not necessary minimum size */
  1068. if (unlikely(!pskb_may_pull(skb, hdr_size)))
  1069. return -1;
  1070. ethhdr = (struct ethhdr *)skb_mac_header(skb);
  1071. /* packet with unicast indication but broadcast recipient */
  1072. if (is_broadcast_ether_addr(ethhdr->h_dest))
  1073. return -1;
  1074. /* packet with broadcast sender address */
  1075. if (is_broadcast_ether_addr(ethhdr->h_source))
  1076. return -1;
  1077. /* not for me */
  1078. if (!is_my_mac(ethhdr->h_dest))
  1079. return -1;
  1080. return 0;
  1081. }
  1082. int route_unicast_packet(struct sk_buff *skb, struct batman_if *recv_if,
  1083. int hdr_size)
  1084. {
  1085. struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
  1086. struct orig_node *orig_node = NULL;
  1087. struct neigh_node *neigh_node = NULL;
  1088. struct batman_if *batman_if;
  1089. uint8_t dstaddr[ETH_ALEN];
  1090. struct unicast_packet *unicast_packet;
  1091. struct ethhdr *ethhdr = (struct ethhdr *)skb_mac_header(skb);
  1092. int ret = NET_RX_DROP;
  1093. struct sk_buff *new_skb;
  1094. unicast_packet = (struct unicast_packet *)skb->data;
  1095. /* TTL exceeded */
  1096. if (unicast_packet->ttl < 2) {
  1097. pr_debug("Warning - can't forward unicast packet from %pM to "
  1098. "%pM: ttl exceeded\n", ethhdr->h_source,
  1099. unicast_packet->dest);
  1100. goto out;
  1101. }
  1102. /* get routing information */
  1103. spin_lock_bh(&bat_priv->orig_hash_lock);
  1104. rcu_read_lock();
  1105. orig_node = ((struct orig_node *)
  1106. hash_find(bat_priv->orig_hash, compare_orig, choose_orig,
  1107. unicast_packet->dest));
  1108. if (!orig_node)
  1109. goto unlock;
  1110. kref_get(&orig_node->refcount);
  1111. rcu_read_unlock();
  1112. /* find_router() increases neigh_nodes refcount if found. */
  1113. neigh_node = find_router(bat_priv, orig_node, recv_if);
  1114. if (!neigh_node) {
  1115. spin_unlock_bh(&bat_priv->orig_hash_lock);
  1116. goto out;
  1117. }
  1118. /* don't lock while sending the packets ... we therefore
  1119. * copy the required data before sending */
  1120. batman_if = neigh_node->if_incoming;
  1121. memcpy(dstaddr, neigh_node->addr, ETH_ALEN);
  1122. spin_unlock_bh(&bat_priv->orig_hash_lock);
  1123. /* create a copy of the skb, if needed, to modify it. */
  1124. if (skb_cow(skb, sizeof(struct ethhdr)) < 0)
  1125. goto out;
  1126. unicast_packet = (struct unicast_packet *)skb->data;
  1127. if (unicast_packet->packet_type == BAT_UNICAST &&
  1128. atomic_read(&bat_priv->fragmentation) &&
  1129. skb->len > batman_if->net_dev->mtu)
  1130. return frag_send_skb(skb, bat_priv, batman_if,
  1131. dstaddr);
  1132. if (unicast_packet->packet_type == BAT_UNICAST_FRAG &&
  1133. frag_can_reassemble(skb, batman_if->net_dev->mtu)) {
  1134. ret = frag_reassemble_skb(skb, bat_priv, &new_skb);
  1135. if (ret == NET_RX_DROP)
  1136. goto out;
  1137. /* packet was buffered for late merge */
  1138. if (!new_skb) {
  1139. ret = NET_RX_SUCCESS;
  1140. goto out;
  1141. }
  1142. skb = new_skb;
  1143. unicast_packet = (struct unicast_packet *)skb->data;
  1144. }
  1145. /* decrement ttl */
  1146. unicast_packet->ttl--;
  1147. /* route it */
  1148. send_skb_packet(skb, batman_if, dstaddr);
  1149. ret = NET_RX_SUCCESS;
  1150. goto out;
  1151. unlock:
  1152. rcu_read_unlock();
  1153. spin_unlock_bh(&bat_priv->orig_hash_lock);
  1154. out:
  1155. if (neigh_node)
  1156. neigh_node_free_ref(neigh_node);
  1157. if (orig_node)
  1158. kref_put(&orig_node->refcount, orig_node_free_ref);
  1159. return ret;
  1160. }
  1161. int recv_unicast_packet(struct sk_buff *skb, struct batman_if *recv_if)
  1162. {
  1163. struct unicast_packet *unicast_packet;
  1164. int hdr_size = sizeof(struct unicast_packet);
  1165. if (check_unicast_packet(skb, hdr_size) < 0)
  1166. return NET_RX_DROP;
  1167. unicast_packet = (struct unicast_packet *)skb->data;
  1168. /* packet for me */
  1169. if (is_my_mac(unicast_packet->dest)) {
  1170. interface_rx(recv_if->soft_iface, skb, recv_if, hdr_size);
  1171. return NET_RX_SUCCESS;
  1172. }
  1173. return route_unicast_packet(skb, recv_if, hdr_size);
  1174. }
  1175. int recv_ucast_frag_packet(struct sk_buff *skb, struct batman_if *recv_if)
  1176. {
  1177. struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
  1178. struct unicast_frag_packet *unicast_packet;
  1179. int hdr_size = sizeof(struct unicast_frag_packet);
  1180. struct sk_buff *new_skb = NULL;
  1181. int ret;
  1182. if (check_unicast_packet(skb, hdr_size) < 0)
  1183. return NET_RX_DROP;
  1184. unicast_packet = (struct unicast_frag_packet *)skb->data;
  1185. /* packet for me */
  1186. if (is_my_mac(unicast_packet->dest)) {
  1187. ret = frag_reassemble_skb(skb, bat_priv, &new_skb);
  1188. if (ret == NET_RX_DROP)
  1189. return NET_RX_DROP;
  1190. /* packet was buffered for late merge */
  1191. if (!new_skb)
  1192. return NET_RX_SUCCESS;
  1193. interface_rx(recv_if->soft_iface, new_skb, recv_if,
  1194. sizeof(struct unicast_packet));
  1195. return NET_RX_SUCCESS;
  1196. }
  1197. return route_unicast_packet(skb, recv_if, hdr_size);
  1198. }
  1199. int recv_bcast_packet(struct sk_buff *skb, struct batman_if *recv_if)
  1200. {
  1201. struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
  1202. struct orig_node *orig_node;
  1203. struct bcast_packet *bcast_packet;
  1204. struct ethhdr *ethhdr;
  1205. int hdr_size = sizeof(struct bcast_packet);
  1206. int32_t seq_diff;
  1207. /* drop packet if it has not necessary minimum size */
  1208. if (unlikely(!pskb_may_pull(skb, hdr_size)))
  1209. return NET_RX_DROP;
  1210. ethhdr = (struct ethhdr *)skb_mac_header(skb);
  1211. /* packet with broadcast indication but unicast recipient */
  1212. if (!is_broadcast_ether_addr(ethhdr->h_dest))
  1213. return NET_RX_DROP;
  1214. /* packet with broadcast sender address */
  1215. if (is_broadcast_ether_addr(ethhdr->h_source))
  1216. return NET_RX_DROP;
  1217. /* ignore broadcasts sent by myself */
  1218. if (is_my_mac(ethhdr->h_source))
  1219. return NET_RX_DROP;
  1220. bcast_packet = (struct bcast_packet *)skb->data;
  1221. /* ignore broadcasts originated by myself */
  1222. if (is_my_mac(bcast_packet->orig))
  1223. return NET_RX_DROP;
  1224. if (bcast_packet->ttl < 2)
  1225. return NET_RX_DROP;
  1226. spin_lock_bh(&bat_priv->orig_hash_lock);
  1227. rcu_read_lock();
  1228. orig_node = ((struct orig_node *)
  1229. hash_find(bat_priv->orig_hash, compare_orig, choose_orig,
  1230. bcast_packet->orig));
  1231. rcu_read_unlock();
  1232. if (!orig_node) {
  1233. spin_unlock_bh(&bat_priv->orig_hash_lock);
  1234. return NET_RX_DROP;
  1235. }
  1236. /* check whether the packet is a duplicate */
  1237. if (get_bit_status(orig_node->bcast_bits,
  1238. orig_node->last_bcast_seqno,
  1239. ntohl(bcast_packet->seqno))) {
  1240. spin_unlock_bh(&bat_priv->orig_hash_lock);
  1241. return NET_RX_DROP;
  1242. }
  1243. seq_diff = ntohl(bcast_packet->seqno) - orig_node->last_bcast_seqno;
  1244. /* check whether the packet is old and the host just restarted. */
  1245. if (window_protected(bat_priv, seq_diff,
  1246. &orig_node->bcast_seqno_reset)) {
  1247. spin_unlock_bh(&bat_priv->orig_hash_lock);
  1248. return NET_RX_DROP;
  1249. }
  1250. /* mark broadcast in flood history, update window position
  1251. * if required. */
  1252. if (bit_get_packet(bat_priv, orig_node->bcast_bits, seq_diff, 1))
  1253. orig_node->last_bcast_seqno = ntohl(bcast_packet->seqno);
  1254. spin_unlock_bh(&bat_priv->orig_hash_lock);
  1255. /* rebroadcast packet */
  1256. add_bcast_packet_to_list(bat_priv, skb);
  1257. /* broadcast for me */
  1258. interface_rx(recv_if->soft_iface, skb, recv_if, hdr_size);
  1259. return NET_RX_SUCCESS;
  1260. }
  1261. int recv_vis_packet(struct sk_buff *skb, struct batman_if *recv_if)
  1262. {
  1263. struct vis_packet *vis_packet;
  1264. struct ethhdr *ethhdr;
  1265. struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
  1266. int hdr_size = sizeof(struct vis_packet);
  1267. /* keep skb linear */
  1268. if (skb_linearize(skb) < 0)
  1269. return NET_RX_DROP;
  1270. if (unlikely(!pskb_may_pull(skb, hdr_size)))
  1271. return NET_RX_DROP;
  1272. vis_packet = (struct vis_packet *)skb->data;
  1273. ethhdr = (struct ethhdr *)skb_mac_header(skb);
  1274. /* not for me */
  1275. if (!is_my_mac(ethhdr->h_dest))
  1276. return NET_RX_DROP;
  1277. /* ignore own packets */
  1278. if (is_my_mac(vis_packet->vis_orig))
  1279. return NET_RX_DROP;
  1280. if (is_my_mac(vis_packet->sender_orig))
  1281. return NET_RX_DROP;
  1282. switch (vis_packet->vis_type) {
  1283. case VIS_TYPE_SERVER_SYNC:
  1284. receive_server_sync_packet(bat_priv, vis_packet,
  1285. skb_headlen(skb));
  1286. break;
  1287. case VIS_TYPE_CLIENT_UPDATE:
  1288. receive_client_update_packet(bat_priv, vis_packet,
  1289. skb_headlen(skb));
  1290. break;
  1291. default: /* ignore unknown packet */
  1292. break;
  1293. }
  1294. /* We take a copy of the data in the packet, so we should
  1295. always free the skbuf. */
  1296. return NET_RX_DROP;
  1297. }