routing.c 48 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753
  1. /*
  2. * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
  3. *
  4. * Marek Lindner, Simon Wunderlich
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of version 2 of the GNU General Public
  8. * License as published by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope that it will be useful, but
  11. * WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  13. * General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program; if not, write to the Free Software
  17. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
  18. * 02110-1301, USA
  19. *
  20. */
  21. #include "main.h"
  22. #include "routing.h"
  23. #include "send.h"
  24. #include "hash.h"
  25. #include "soft-interface.h"
  26. #include "hard-interface.h"
  27. #include "icmp_socket.h"
  28. #include "translation-table.h"
  29. #include "originator.h"
  30. #include "ring_buffer.h"
  31. #include "vis.h"
  32. #include "aggregation.h"
  33. #include "gateway_common.h"
  34. #include "gateway_client.h"
  35. #include "unicast.h"
  36. void slide_own_bcast_window(struct hard_iface *hard_iface)
  37. {
  38. struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
  39. struct hashtable_t *hash = bat_priv->orig_hash;
  40. struct hlist_node *node;
  41. struct hlist_head *head;
  42. struct orig_node *orig_node;
  43. unsigned long *word;
  44. int i;
  45. size_t word_index;
  46. for (i = 0; i < hash->size; i++) {
  47. head = &hash->table[i];
  48. rcu_read_lock();
  49. hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) {
  50. spin_lock_bh(&orig_node->ogm_cnt_lock);
  51. word_index = hard_iface->if_num * NUM_WORDS;
  52. word = &(orig_node->bcast_own[word_index]);
  53. bit_get_packet(bat_priv, word, 1, 0);
  54. orig_node->bcast_own_sum[hard_iface->if_num] =
  55. bit_packet_count(word);
  56. spin_unlock_bh(&orig_node->ogm_cnt_lock);
  57. }
  58. rcu_read_unlock();
  59. }
  60. }
  61. static void update_transtable(struct bat_priv *bat_priv,
  62. struct orig_node *orig_node,
  63. const unsigned char *tt_buff,
  64. uint8_t tt_num_changes, uint8_t ttvn,
  65. uint16_t tt_crc)
  66. {
  67. uint8_t orig_ttvn = (uint8_t)atomic_read(&orig_node->last_ttvn);
  68. bool full_table = true;
  69. /* the ttvn increased by one -> we can apply the attached changes */
  70. if (ttvn - orig_ttvn == 1) {
  71. /* the OGM could not contain the changes because they were too
  72. * many to fit in one frame or because they have already been
  73. * sent TT_OGM_APPEND_MAX times. In this case send a tt
  74. * request */
  75. if (!tt_num_changes) {
  76. full_table = false;
  77. goto request_table;
  78. }
  79. tt_update_changes(bat_priv, orig_node, tt_num_changes, ttvn,
  80. (struct tt_change *)tt_buff);
  81. /* Even if we received the crc into the OGM, we prefer
  82. * to recompute it to spot any possible inconsistency
  83. * in the global table */
  84. orig_node->tt_crc = tt_global_crc(bat_priv, orig_node);
  85. /* The ttvn alone is not enough to guarantee consistency
  86. * because a single value could repesent different states
  87. * (due to the wrap around). Thus a node has to check whether
  88. * the resulting table (after applying the changes) is still
  89. * consistent or not. E.g. a node could disconnect while its
  90. * ttvn is X and reconnect on ttvn = X + TTVN_MAX: in this case
  91. * checking the CRC value is mandatory to detect the
  92. * inconsistency */
  93. if (orig_node->tt_crc != tt_crc)
  94. goto request_table;
  95. /* Roaming phase is over: tables are in sync again. I can
  96. * unset the flag */
  97. orig_node->tt_poss_change = false;
  98. } else {
  99. /* if we missed more than one change or our tables are not
  100. * in sync anymore -> request fresh tt data */
  101. if (ttvn != orig_ttvn || orig_node->tt_crc != tt_crc) {
  102. request_table:
  103. bat_dbg(DBG_TT, bat_priv, "TT inconsistency for %pM. "
  104. "Need to retrieve the correct information "
  105. "(ttvn: %u last_ttvn: %u crc: %u last_crc: "
  106. "%u num_changes: %u)\n", orig_node->orig, ttvn,
  107. orig_ttvn, tt_crc, orig_node->tt_crc,
  108. tt_num_changes);
  109. send_tt_request(bat_priv, orig_node, ttvn, tt_crc,
  110. full_table);
  111. return;
  112. }
  113. }
  114. }
  115. static void update_route(struct bat_priv *bat_priv,
  116. struct orig_node *orig_node,
  117. struct neigh_node *neigh_node)
  118. {
  119. struct neigh_node *curr_router;
  120. curr_router = orig_node_get_router(orig_node);
  121. /* route deleted */
  122. if ((curr_router) && (!neigh_node)) {
  123. bat_dbg(DBG_ROUTES, bat_priv, "Deleting route towards: %pM\n",
  124. orig_node->orig);
  125. tt_global_del_orig(bat_priv, orig_node,
  126. "Deleted route towards originator");
  127. /* route added */
  128. } else if ((!curr_router) && (neigh_node)) {
  129. bat_dbg(DBG_ROUTES, bat_priv,
  130. "Adding route towards: %pM (via %pM)\n",
  131. orig_node->orig, neigh_node->addr);
  132. /* route changed */
  133. } else if (neigh_node && curr_router) {
  134. bat_dbg(DBG_ROUTES, bat_priv,
  135. "Changing route towards: %pM "
  136. "(now via %pM - was via %pM)\n",
  137. orig_node->orig, neigh_node->addr,
  138. curr_router->addr);
  139. }
  140. if (curr_router)
  141. neigh_node_free_ref(curr_router);
  142. /* increase refcount of new best neighbor */
  143. if (neigh_node && !atomic_inc_not_zero(&neigh_node->refcount))
  144. neigh_node = NULL;
  145. spin_lock_bh(&orig_node->neigh_list_lock);
  146. rcu_assign_pointer(orig_node->router, neigh_node);
  147. spin_unlock_bh(&orig_node->neigh_list_lock);
  148. /* decrease refcount of previous best neighbor */
  149. if (curr_router)
  150. neigh_node_free_ref(curr_router);
  151. }
  152. void update_routes(struct bat_priv *bat_priv, struct orig_node *orig_node,
  153. struct neigh_node *neigh_node)
  154. {
  155. struct neigh_node *router = NULL;
  156. if (!orig_node)
  157. goto out;
  158. router = orig_node_get_router(orig_node);
  159. if (router != neigh_node)
  160. update_route(bat_priv, orig_node, neigh_node);
  161. out:
  162. if (router)
  163. neigh_node_free_ref(router);
  164. }
  165. static int is_bidirectional_neigh(struct orig_node *orig_node,
  166. struct orig_node *orig_neigh_node,
  167. struct batman_packet *batman_packet,
  168. struct hard_iface *if_incoming)
  169. {
  170. struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
  171. struct neigh_node *neigh_node = NULL, *tmp_neigh_node;
  172. struct hlist_node *node;
  173. uint8_t total_count;
  174. uint8_t orig_eq_count, neigh_rq_count, tq_own;
  175. int tq_asym_penalty, ret = 0;
  176. /* find corresponding one hop neighbor */
  177. rcu_read_lock();
  178. hlist_for_each_entry_rcu(tmp_neigh_node, node,
  179. &orig_neigh_node->neigh_list, list) {
  180. if (!compare_eth(tmp_neigh_node->addr, orig_neigh_node->orig))
  181. continue;
  182. if (tmp_neigh_node->if_incoming != if_incoming)
  183. continue;
  184. if (!atomic_inc_not_zero(&tmp_neigh_node->refcount))
  185. continue;
  186. neigh_node = tmp_neigh_node;
  187. break;
  188. }
  189. rcu_read_unlock();
  190. if (!neigh_node)
  191. neigh_node = create_neighbor(orig_neigh_node,
  192. orig_neigh_node,
  193. orig_neigh_node->orig,
  194. if_incoming);
  195. if (!neigh_node)
  196. goto out;
  197. /* if orig_node is direct neighbour update neigh_node last_valid */
  198. if (orig_node == orig_neigh_node)
  199. neigh_node->last_valid = jiffies;
  200. orig_node->last_valid = jiffies;
  201. /* find packet count of corresponding one hop neighbor */
  202. spin_lock_bh(&orig_node->ogm_cnt_lock);
  203. orig_eq_count = orig_neigh_node->bcast_own_sum[if_incoming->if_num];
  204. neigh_rq_count = neigh_node->real_packet_count;
  205. spin_unlock_bh(&orig_node->ogm_cnt_lock);
  206. /* pay attention to not get a value bigger than 100 % */
  207. total_count = (orig_eq_count > neigh_rq_count ?
  208. neigh_rq_count : orig_eq_count);
  209. /* if we have too few packets (too less data) we set tq_own to zero */
  210. /* if we receive too few packets it is not considered bidirectional */
  211. if ((total_count < TQ_LOCAL_BIDRECT_SEND_MINIMUM) ||
  212. (neigh_rq_count < TQ_LOCAL_BIDRECT_RECV_MINIMUM))
  213. tq_own = 0;
  214. else
  215. /* neigh_node->real_packet_count is never zero as we
  216. * only purge old information when getting new
  217. * information */
  218. tq_own = (TQ_MAX_VALUE * total_count) / neigh_rq_count;
  219. /*
  220. * 1 - ((1-x) ** 3), normalized to TQ_MAX_VALUE this does
  221. * affect the nearly-symmetric links only a little, but
  222. * punishes asymmetric links more. This will give a value
  223. * between 0 and TQ_MAX_VALUE
  224. */
  225. tq_asym_penalty = TQ_MAX_VALUE - (TQ_MAX_VALUE *
  226. (TQ_LOCAL_WINDOW_SIZE - neigh_rq_count) *
  227. (TQ_LOCAL_WINDOW_SIZE - neigh_rq_count) *
  228. (TQ_LOCAL_WINDOW_SIZE - neigh_rq_count)) /
  229. (TQ_LOCAL_WINDOW_SIZE *
  230. TQ_LOCAL_WINDOW_SIZE *
  231. TQ_LOCAL_WINDOW_SIZE);
  232. batman_packet->tq = ((batman_packet->tq * tq_own * tq_asym_penalty) /
  233. (TQ_MAX_VALUE * TQ_MAX_VALUE));
  234. bat_dbg(DBG_BATMAN, bat_priv,
  235. "bidirectional: "
  236. "orig = %-15pM neigh = %-15pM => own_bcast = %2i, "
  237. "real recv = %2i, local tq: %3i, asym_penalty: %3i, "
  238. "total tq: %3i\n",
  239. orig_node->orig, orig_neigh_node->orig, total_count,
  240. neigh_rq_count, tq_own, tq_asym_penalty, batman_packet->tq);
  241. /* if link has the minimum required transmission quality
  242. * consider it bidirectional */
  243. if (batman_packet->tq >= TQ_TOTAL_BIDRECT_LIMIT)
  244. ret = 1;
  245. out:
  246. if (neigh_node)
  247. neigh_node_free_ref(neigh_node);
  248. return ret;
  249. }
  250. /* caller must hold the neigh_list_lock */
  251. void bonding_candidate_del(struct orig_node *orig_node,
  252. struct neigh_node *neigh_node)
  253. {
  254. /* this neighbor is not part of our candidate list */
  255. if (list_empty(&neigh_node->bonding_list))
  256. goto out;
  257. list_del_rcu(&neigh_node->bonding_list);
  258. INIT_LIST_HEAD(&neigh_node->bonding_list);
  259. neigh_node_free_ref(neigh_node);
  260. atomic_dec(&orig_node->bond_candidates);
  261. out:
  262. return;
  263. }
  264. static void bonding_candidate_add(struct orig_node *orig_node,
  265. struct neigh_node *neigh_node)
  266. {
  267. struct hlist_node *node;
  268. struct neigh_node *tmp_neigh_node, *router = NULL;
  269. uint8_t interference_candidate = 0;
  270. spin_lock_bh(&orig_node->neigh_list_lock);
  271. /* only consider if it has the same primary address ... */
  272. if (!compare_eth(orig_node->orig,
  273. neigh_node->orig_node->primary_addr))
  274. goto candidate_del;
  275. router = orig_node_get_router(orig_node);
  276. if (!router)
  277. goto candidate_del;
  278. /* ... and is good enough to be considered */
  279. if (neigh_node->tq_avg < router->tq_avg - BONDING_TQ_THRESHOLD)
  280. goto candidate_del;
  281. /**
  282. * check if we have another candidate with the same mac address or
  283. * interface. If we do, we won't select this candidate because of
  284. * possible interference.
  285. */
  286. hlist_for_each_entry_rcu(tmp_neigh_node, node,
  287. &orig_node->neigh_list, list) {
  288. if (tmp_neigh_node == neigh_node)
  289. continue;
  290. /* we only care if the other candidate is even
  291. * considered as candidate. */
  292. if (list_empty(&tmp_neigh_node->bonding_list))
  293. continue;
  294. if ((neigh_node->if_incoming == tmp_neigh_node->if_incoming) ||
  295. (compare_eth(neigh_node->addr, tmp_neigh_node->addr))) {
  296. interference_candidate = 1;
  297. break;
  298. }
  299. }
  300. /* don't care further if it is an interference candidate */
  301. if (interference_candidate)
  302. goto candidate_del;
  303. /* this neighbor already is part of our candidate list */
  304. if (!list_empty(&neigh_node->bonding_list))
  305. goto out;
  306. if (!atomic_inc_not_zero(&neigh_node->refcount))
  307. goto out;
  308. list_add_rcu(&neigh_node->bonding_list, &orig_node->bond_list);
  309. atomic_inc(&orig_node->bond_candidates);
  310. goto out;
  311. candidate_del:
  312. bonding_candidate_del(orig_node, neigh_node);
  313. out:
  314. spin_unlock_bh(&orig_node->neigh_list_lock);
  315. if (router)
  316. neigh_node_free_ref(router);
  317. }
  318. /* copy primary address for bonding */
  319. static void bonding_save_primary(const struct orig_node *orig_node,
  320. struct orig_node *orig_neigh_node,
  321. const struct batman_packet *batman_packet)
  322. {
  323. if (!(batman_packet->flags & PRIMARIES_FIRST_HOP))
  324. return;
  325. memcpy(orig_neigh_node->primary_addr, orig_node->orig, ETH_ALEN);
  326. }
  327. static void update_orig(struct bat_priv *bat_priv, struct orig_node *orig_node,
  328. const struct ethhdr *ethhdr,
  329. const struct batman_packet *batman_packet,
  330. struct hard_iface *if_incoming,
  331. const unsigned char *tt_buff, int is_duplicate)
  332. {
  333. struct neigh_node *neigh_node = NULL, *tmp_neigh_node = NULL;
  334. struct neigh_node *router = NULL;
  335. struct orig_node *orig_node_tmp;
  336. struct hlist_node *node;
  337. uint8_t bcast_own_sum_orig, bcast_own_sum_neigh;
  338. bat_dbg(DBG_BATMAN, bat_priv, "update_originator(): "
  339. "Searching and updating originator entry of received packet\n");
  340. rcu_read_lock();
  341. hlist_for_each_entry_rcu(tmp_neigh_node, node,
  342. &orig_node->neigh_list, list) {
  343. if (compare_eth(tmp_neigh_node->addr, ethhdr->h_source) &&
  344. (tmp_neigh_node->if_incoming == if_incoming) &&
  345. atomic_inc_not_zero(&tmp_neigh_node->refcount)) {
  346. if (neigh_node)
  347. neigh_node_free_ref(neigh_node);
  348. neigh_node = tmp_neigh_node;
  349. continue;
  350. }
  351. if (is_duplicate)
  352. continue;
  353. spin_lock_bh(&tmp_neigh_node->tq_lock);
  354. ring_buffer_set(tmp_neigh_node->tq_recv,
  355. &tmp_neigh_node->tq_index, 0);
  356. tmp_neigh_node->tq_avg =
  357. ring_buffer_avg(tmp_neigh_node->tq_recv);
  358. spin_unlock_bh(&tmp_neigh_node->tq_lock);
  359. }
  360. if (!neigh_node) {
  361. struct orig_node *orig_tmp;
  362. orig_tmp = get_orig_node(bat_priv, ethhdr->h_source);
  363. if (!orig_tmp)
  364. goto unlock;
  365. neigh_node = create_neighbor(orig_node, orig_tmp,
  366. ethhdr->h_source, if_incoming);
  367. orig_node_free_ref(orig_tmp);
  368. if (!neigh_node)
  369. goto unlock;
  370. } else
  371. bat_dbg(DBG_BATMAN, bat_priv,
  372. "Updating existing last-hop neighbor of originator\n");
  373. rcu_read_unlock();
  374. orig_node->flags = batman_packet->flags;
  375. neigh_node->last_valid = jiffies;
  376. spin_lock_bh(&neigh_node->tq_lock);
  377. ring_buffer_set(neigh_node->tq_recv,
  378. &neigh_node->tq_index,
  379. batman_packet->tq);
  380. neigh_node->tq_avg = ring_buffer_avg(neigh_node->tq_recv);
  381. spin_unlock_bh(&neigh_node->tq_lock);
  382. if (!is_duplicate) {
  383. orig_node->last_ttl = batman_packet->ttl;
  384. neigh_node->last_ttl = batman_packet->ttl;
  385. }
  386. bonding_candidate_add(orig_node, neigh_node);
  387. /* if this neighbor already is our next hop there is nothing
  388. * to change */
  389. router = orig_node_get_router(orig_node);
  390. if (router == neigh_node)
  391. goto update_tt;
  392. /* if this neighbor does not offer a better TQ we won't consider it */
  393. if (router && (router->tq_avg > neigh_node->tq_avg))
  394. goto update_tt;
  395. /* if the TQ is the same and the link not more symetric we
  396. * won't consider it either */
  397. if (router && (neigh_node->tq_avg == router->tq_avg)) {
  398. orig_node_tmp = router->orig_node;
  399. spin_lock_bh(&orig_node_tmp->ogm_cnt_lock);
  400. bcast_own_sum_orig =
  401. orig_node_tmp->bcast_own_sum[if_incoming->if_num];
  402. spin_unlock_bh(&orig_node_tmp->ogm_cnt_lock);
  403. orig_node_tmp = neigh_node->orig_node;
  404. spin_lock_bh(&orig_node_tmp->ogm_cnt_lock);
  405. bcast_own_sum_neigh =
  406. orig_node_tmp->bcast_own_sum[if_incoming->if_num];
  407. spin_unlock_bh(&orig_node_tmp->ogm_cnt_lock);
  408. if (bcast_own_sum_orig >= bcast_own_sum_neigh)
  409. goto update_tt;
  410. }
  411. update_routes(bat_priv, orig_node, neigh_node);
  412. update_tt:
  413. /* I have to check for transtable changes only if the OGM has been
  414. * sent through a primary interface */
  415. if (((batman_packet->orig != ethhdr->h_source) &&
  416. (batman_packet->ttl > 2)) ||
  417. (batman_packet->flags & PRIMARIES_FIRST_HOP))
  418. update_transtable(bat_priv, orig_node, tt_buff,
  419. batman_packet->tt_num_changes,
  420. batman_packet->ttvn,
  421. batman_packet->tt_crc);
  422. if (orig_node->gw_flags != batman_packet->gw_flags)
  423. gw_node_update(bat_priv, orig_node, batman_packet->gw_flags);
  424. orig_node->gw_flags = batman_packet->gw_flags;
  425. /* restart gateway selection if fast or late switching was enabled */
  426. if ((orig_node->gw_flags) &&
  427. (atomic_read(&bat_priv->gw_mode) == GW_MODE_CLIENT) &&
  428. (atomic_read(&bat_priv->gw_sel_class) > 2))
  429. gw_check_election(bat_priv, orig_node);
  430. goto out;
  431. unlock:
  432. rcu_read_unlock();
  433. out:
  434. if (neigh_node)
  435. neigh_node_free_ref(neigh_node);
  436. if (router)
  437. neigh_node_free_ref(router);
  438. }
  439. /* checks whether the host restarted and is in the protection time.
  440. * returns:
  441. * 0 if the packet is to be accepted
  442. * 1 if the packet is to be ignored.
  443. */
  444. static int window_protected(struct bat_priv *bat_priv,
  445. int32_t seq_num_diff,
  446. unsigned long *last_reset)
  447. {
  448. if ((seq_num_diff <= -TQ_LOCAL_WINDOW_SIZE)
  449. || (seq_num_diff >= EXPECTED_SEQNO_RANGE)) {
  450. if (time_after(jiffies, *last_reset +
  451. msecs_to_jiffies(RESET_PROTECTION_MS))) {
  452. *last_reset = jiffies;
  453. bat_dbg(DBG_BATMAN, bat_priv,
  454. "old packet received, start protection\n");
  455. return 0;
  456. } else
  457. return 1;
  458. }
  459. return 0;
  460. }
  461. /* processes a batman packet for all interfaces, adjusts the sequence number and
  462. * finds out whether it is a duplicate.
  463. * returns:
  464. * 1 the packet is a duplicate
  465. * 0 the packet has not yet been received
  466. * -1 the packet is old and has been received while the seqno window
  467. * was protected. Caller should drop it.
  468. */
  469. static int count_real_packets(const struct ethhdr *ethhdr,
  470. const struct batman_packet *batman_packet,
  471. const struct hard_iface *if_incoming)
  472. {
  473. struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
  474. struct orig_node *orig_node;
  475. struct neigh_node *tmp_neigh_node;
  476. struct hlist_node *node;
  477. int is_duplicate = 0;
  478. int32_t seq_diff;
  479. int need_update = 0;
  480. int set_mark, ret = -1;
  481. orig_node = get_orig_node(bat_priv, batman_packet->orig);
  482. if (!orig_node)
  483. return 0;
  484. spin_lock_bh(&orig_node->ogm_cnt_lock);
  485. seq_diff = batman_packet->seqno - orig_node->last_real_seqno;
  486. /* signalize caller that the packet is to be dropped. */
  487. if (window_protected(bat_priv, seq_diff,
  488. &orig_node->batman_seqno_reset))
  489. goto out;
  490. rcu_read_lock();
  491. hlist_for_each_entry_rcu(tmp_neigh_node, node,
  492. &orig_node->neigh_list, list) {
  493. is_duplicate |= get_bit_status(tmp_neigh_node->real_bits,
  494. orig_node->last_real_seqno,
  495. batman_packet->seqno);
  496. if (compare_eth(tmp_neigh_node->addr, ethhdr->h_source) &&
  497. (tmp_neigh_node->if_incoming == if_incoming))
  498. set_mark = 1;
  499. else
  500. set_mark = 0;
  501. /* if the window moved, set the update flag. */
  502. need_update |= bit_get_packet(bat_priv,
  503. tmp_neigh_node->real_bits,
  504. seq_diff, set_mark);
  505. tmp_neigh_node->real_packet_count =
  506. bit_packet_count(tmp_neigh_node->real_bits);
  507. }
  508. rcu_read_unlock();
  509. if (need_update) {
  510. bat_dbg(DBG_BATMAN, bat_priv,
  511. "updating last_seqno: old %d, new %d\n",
  512. orig_node->last_real_seqno, batman_packet->seqno);
  513. orig_node->last_real_seqno = batman_packet->seqno;
  514. }
  515. ret = is_duplicate;
  516. out:
  517. spin_unlock_bh(&orig_node->ogm_cnt_lock);
  518. orig_node_free_ref(orig_node);
  519. return ret;
  520. }
  521. void receive_bat_packet(const struct ethhdr *ethhdr,
  522. struct batman_packet *batman_packet,
  523. const unsigned char *tt_buff,
  524. struct hard_iface *if_incoming)
  525. {
  526. struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
  527. struct hard_iface *hard_iface;
  528. struct orig_node *orig_neigh_node, *orig_node;
  529. struct neigh_node *router = NULL, *router_router = NULL;
  530. struct neigh_node *orig_neigh_router = NULL;
  531. int has_directlink_flag;
  532. int is_my_addr = 0, is_my_orig = 0, is_my_oldorig = 0;
  533. int is_broadcast = 0, is_bidirectional, is_single_hop_neigh;
  534. int is_duplicate;
  535. uint32_t if_incoming_seqno;
  536. /* Silently drop when the batman packet is actually not a
  537. * correct packet.
  538. *
  539. * This might happen if a packet is padded (e.g. Ethernet has a
  540. * minimum frame length of 64 byte) and the aggregation interprets
  541. * it as an additional length.
  542. *
  543. * TODO: A more sane solution would be to have a bit in the
  544. * batman_packet to detect whether the packet is the last
  545. * packet in an aggregation. Here we expect that the padding
  546. * is always zero (or not 0x01)
  547. */
  548. if (batman_packet->packet_type != BAT_PACKET)
  549. return;
  550. /* could be changed by schedule_own_packet() */
  551. if_incoming_seqno = atomic_read(&if_incoming->seqno);
  552. has_directlink_flag = (batman_packet->flags & DIRECTLINK ? 1 : 0);
  553. is_single_hop_neigh = (compare_eth(ethhdr->h_source,
  554. batman_packet->orig) ? 1 : 0);
  555. bat_dbg(DBG_BATMAN, bat_priv,
  556. "Received BATMAN packet via NB: %pM, IF: %s [%pM] "
  557. "(from OG: %pM, via prev OG: %pM, seqno %d, ttvn %u, "
  558. "crc %u, changes %u, td %d, TTL %d, V %d, IDF %d)\n",
  559. ethhdr->h_source, if_incoming->net_dev->name,
  560. if_incoming->net_dev->dev_addr, batman_packet->orig,
  561. batman_packet->prev_sender, batman_packet->seqno,
  562. batman_packet->ttvn, batman_packet->tt_crc,
  563. batman_packet->tt_num_changes, batman_packet->tq,
  564. batman_packet->ttl, batman_packet->version,
  565. has_directlink_flag);
  566. rcu_read_lock();
  567. list_for_each_entry_rcu(hard_iface, &hardif_list, list) {
  568. if (hard_iface->if_status != IF_ACTIVE)
  569. continue;
  570. if (hard_iface->soft_iface != if_incoming->soft_iface)
  571. continue;
  572. if (compare_eth(ethhdr->h_source,
  573. hard_iface->net_dev->dev_addr))
  574. is_my_addr = 1;
  575. if (compare_eth(batman_packet->orig,
  576. hard_iface->net_dev->dev_addr))
  577. is_my_orig = 1;
  578. if (compare_eth(batman_packet->prev_sender,
  579. hard_iface->net_dev->dev_addr))
  580. is_my_oldorig = 1;
  581. if (is_broadcast_ether_addr(ethhdr->h_source))
  582. is_broadcast = 1;
  583. }
  584. rcu_read_unlock();
  585. if (batman_packet->version != COMPAT_VERSION) {
  586. bat_dbg(DBG_BATMAN, bat_priv,
  587. "Drop packet: incompatible batman version (%i)\n",
  588. batman_packet->version);
  589. return;
  590. }
  591. if (is_my_addr) {
  592. bat_dbg(DBG_BATMAN, bat_priv,
  593. "Drop packet: received my own broadcast (sender: %pM"
  594. ")\n",
  595. ethhdr->h_source);
  596. return;
  597. }
  598. if (is_broadcast) {
  599. bat_dbg(DBG_BATMAN, bat_priv, "Drop packet: "
  600. "ignoring all packets with broadcast source addr (sender: %pM"
  601. ")\n", ethhdr->h_source);
  602. return;
  603. }
  604. if (is_my_orig) {
  605. unsigned long *word;
  606. int offset;
  607. orig_neigh_node = get_orig_node(bat_priv, ethhdr->h_source);
  608. if (!orig_neigh_node)
  609. return;
  610. /* neighbor has to indicate direct link and it has to
  611. * come via the corresponding interface */
  612. /* save packet seqno for bidirectional check */
  613. if (has_directlink_flag &&
  614. compare_eth(if_incoming->net_dev->dev_addr,
  615. batman_packet->orig)) {
  616. offset = if_incoming->if_num * NUM_WORDS;
  617. spin_lock_bh(&orig_neigh_node->ogm_cnt_lock);
  618. word = &(orig_neigh_node->bcast_own[offset]);
  619. bit_mark(word,
  620. if_incoming_seqno - batman_packet->seqno - 2);
  621. orig_neigh_node->bcast_own_sum[if_incoming->if_num] =
  622. bit_packet_count(word);
  623. spin_unlock_bh(&orig_neigh_node->ogm_cnt_lock);
  624. }
  625. bat_dbg(DBG_BATMAN, bat_priv, "Drop packet: "
  626. "originator packet from myself (via neighbor)\n");
  627. orig_node_free_ref(orig_neigh_node);
  628. return;
  629. }
  630. if (is_my_oldorig) {
  631. bat_dbg(DBG_BATMAN, bat_priv,
  632. "Drop packet: ignoring all rebroadcast echos (sender: "
  633. "%pM)\n", ethhdr->h_source);
  634. return;
  635. }
  636. orig_node = get_orig_node(bat_priv, batman_packet->orig);
  637. if (!orig_node)
  638. return;
  639. is_duplicate = count_real_packets(ethhdr, batman_packet, if_incoming);
  640. if (is_duplicate == -1) {
  641. bat_dbg(DBG_BATMAN, bat_priv,
  642. "Drop packet: packet within seqno protection time "
  643. "(sender: %pM)\n", ethhdr->h_source);
  644. goto out;
  645. }
  646. if (batman_packet->tq == 0) {
  647. bat_dbg(DBG_BATMAN, bat_priv,
  648. "Drop packet: originator packet with tq equal 0\n");
  649. goto out;
  650. }
  651. router = orig_node_get_router(orig_node);
  652. if (router)
  653. router_router = orig_node_get_router(router->orig_node);
  654. /* avoid temporary routing loops */
  655. if (router && router_router &&
  656. (compare_eth(router->addr, batman_packet->prev_sender)) &&
  657. !(compare_eth(batman_packet->orig, batman_packet->prev_sender)) &&
  658. (compare_eth(router->addr, router_router->addr))) {
  659. bat_dbg(DBG_BATMAN, bat_priv,
  660. "Drop packet: ignoring all rebroadcast packets that "
  661. "may make me loop (sender: %pM)\n", ethhdr->h_source);
  662. goto out;
  663. }
  664. /* if sender is a direct neighbor the sender mac equals
  665. * originator mac */
  666. orig_neigh_node = (is_single_hop_neigh ?
  667. orig_node :
  668. get_orig_node(bat_priv, ethhdr->h_source));
  669. if (!orig_neigh_node)
  670. goto out;
  671. orig_neigh_router = orig_node_get_router(orig_neigh_node);
  672. /* drop packet if sender is not a direct neighbor and if we
  673. * don't route towards it */
  674. if (!is_single_hop_neigh && (!orig_neigh_router)) {
  675. bat_dbg(DBG_BATMAN, bat_priv,
  676. "Drop packet: OGM via unknown neighbor!\n");
  677. goto out_neigh;
  678. }
  679. is_bidirectional = is_bidirectional_neigh(orig_node, orig_neigh_node,
  680. batman_packet, if_incoming);
  681. bonding_save_primary(orig_node, orig_neigh_node, batman_packet);
  682. /* update ranking if it is not a duplicate or has the same
  683. * seqno and similar ttl as the non-duplicate */
  684. if (is_bidirectional &&
  685. (!is_duplicate ||
  686. ((orig_node->last_real_seqno == batman_packet->seqno) &&
  687. (orig_node->last_ttl - 3 <= batman_packet->ttl))))
  688. update_orig(bat_priv, orig_node, ethhdr, batman_packet,
  689. if_incoming, tt_buff, is_duplicate);
  690. /* is single hop (direct) neighbor */
  691. if (is_single_hop_neigh) {
  692. /* mark direct link on incoming interface */
  693. schedule_forward_packet(orig_node, ethhdr, batman_packet,
  694. 1, if_incoming);
  695. bat_dbg(DBG_BATMAN, bat_priv, "Forwarding packet: "
  696. "rebroadcast neighbor packet with direct link flag\n");
  697. goto out_neigh;
  698. }
  699. /* multihop originator */
  700. if (!is_bidirectional) {
  701. bat_dbg(DBG_BATMAN, bat_priv,
  702. "Drop packet: not received via bidirectional link\n");
  703. goto out_neigh;
  704. }
  705. if (is_duplicate) {
  706. bat_dbg(DBG_BATMAN, bat_priv,
  707. "Drop packet: duplicate packet received\n");
  708. goto out_neigh;
  709. }
  710. bat_dbg(DBG_BATMAN, bat_priv,
  711. "Forwarding packet: rebroadcast originator packet\n");
  712. schedule_forward_packet(orig_node, ethhdr, batman_packet,
  713. 0, if_incoming);
  714. out_neigh:
  715. if ((orig_neigh_node) && (!is_single_hop_neigh))
  716. orig_node_free_ref(orig_neigh_node);
  717. out:
  718. if (router)
  719. neigh_node_free_ref(router);
  720. if (router_router)
  721. neigh_node_free_ref(router_router);
  722. if (orig_neigh_router)
  723. neigh_node_free_ref(orig_neigh_router);
  724. orig_node_free_ref(orig_node);
  725. }
  726. int recv_bat_packet(struct sk_buff *skb, struct hard_iface *hard_iface)
  727. {
  728. struct ethhdr *ethhdr;
  729. /* drop packet if it has not necessary minimum size */
  730. if (unlikely(!pskb_may_pull(skb, sizeof(struct batman_packet))))
  731. return NET_RX_DROP;
  732. ethhdr = (struct ethhdr *)skb_mac_header(skb);
  733. /* packet with broadcast indication but unicast recipient */
  734. if (!is_broadcast_ether_addr(ethhdr->h_dest))
  735. return NET_RX_DROP;
  736. /* packet with broadcast sender address */
  737. if (is_broadcast_ether_addr(ethhdr->h_source))
  738. return NET_RX_DROP;
  739. /* create a copy of the skb, if needed, to modify it. */
  740. if (skb_cow(skb, 0) < 0)
  741. return NET_RX_DROP;
  742. /* keep skb linear */
  743. if (skb_linearize(skb) < 0)
  744. return NET_RX_DROP;
  745. ethhdr = (struct ethhdr *)skb_mac_header(skb);
  746. receive_aggr_bat_packet(ethhdr,
  747. skb->data,
  748. skb_headlen(skb),
  749. hard_iface);
  750. kfree_skb(skb);
  751. return NET_RX_SUCCESS;
  752. }
  753. static int recv_my_icmp_packet(struct bat_priv *bat_priv,
  754. struct sk_buff *skb, size_t icmp_len)
  755. {
  756. struct hard_iface *primary_if = NULL;
  757. struct orig_node *orig_node = NULL;
  758. struct neigh_node *router = NULL;
  759. struct icmp_packet_rr *icmp_packet;
  760. int ret = NET_RX_DROP;
  761. icmp_packet = (struct icmp_packet_rr *)skb->data;
  762. /* add data to device queue */
  763. if (icmp_packet->msg_type != ECHO_REQUEST) {
  764. bat_socket_receive_packet(icmp_packet, icmp_len);
  765. goto out;
  766. }
  767. primary_if = primary_if_get_selected(bat_priv);
  768. if (!primary_if)
  769. goto out;
  770. /* answer echo request (ping) */
  771. /* get routing information */
  772. orig_node = orig_hash_find(bat_priv, icmp_packet->orig);
  773. if (!orig_node)
  774. goto out;
  775. router = orig_node_get_router(orig_node);
  776. if (!router)
  777. goto out;
  778. /* create a copy of the skb, if needed, to modify it. */
  779. if (skb_cow(skb, sizeof(struct ethhdr)) < 0)
  780. goto out;
  781. icmp_packet = (struct icmp_packet_rr *)skb->data;
  782. memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN);
  783. memcpy(icmp_packet->orig, primary_if->net_dev->dev_addr, ETH_ALEN);
  784. icmp_packet->msg_type = ECHO_REPLY;
  785. icmp_packet->ttl = TTL;
  786. send_skb_packet(skb, router->if_incoming, router->addr);
  787. ret = NET_RX_SUCCESS;
  788. out:
  789. if (primary_if)
  790. hardif_free_ref(primary_if);
  791. if (router)
  792. neigh_node_free_ref(router);
  793. if (orig_node)
  794. orig_node_free_ref(orig_node);
  795. return ret;
  796. }
  797. static int recv_icmp_ttl_exceeded(struct bat_priv *bat_priv,
  798. struct sk_buff *skb)
  799. {
  800. struct hard_iface *primary_if = NULL;
  801. struct orig_node *orig_node = NULL;
  802. struct neigh_node *router = NULL;
  803. struct icmp_packet *icmp_packet;
  804. int ret = NET_RX_DROP;
  805. icmp_packet = (struct icmp_packet *)skb->data;
  806. /* send TTL exceeded if packet is an echo request (traceroute) */
  807. if (icmp_packet->msg_type != ECHO_REQUEST) {
  808. pr_debug("Warning - can't forward icmp packet from %pM to "
  809. "%pM: ttl exceeded\n", icmp_packet->orig,
  810. icmp_packet->dst);
  811. goto out;
  812. }
  813. primary_if = primary_if_get_selected(bat_priv);
  814. if (!primary_if)
  815. goto out;
  816. /* get routing information */
  817. orig_node = orig_hash_find(bat_priv, icmp_packet->orig);
  818. if (!orig_node)
  819. goto out;
  820. router = orig_node_get_router(orig_node);
  821. if (!router)
  822. goto out;
  823. /* create a copy of the skb, if needed, to modify it. */
  824. if (skb_cow(skb, sizeof(struct ethhdr)) < 0)
  825. goto out;
  826. icmp_packet = (struct icmp_packet *)skb->data;
  827. memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN);
  828. memcpy(icmp_packet->orig, primary_if->net_dev->dev_addr, ETH_ALEN);
  829. icmp_packet->msg_type = TTL_EXCEEDED;
  830. icmp_packet->ttl = TTL;
  831. send_skb_packet(skb, router->if_incoming, router->addr);
  832. ret = NET_RX_SUCCESS;
  833. out:
  834. if (primary_if)
  835. hardif_free_ref(primary_if);
  836. if (router)
  837. neigh_node_free_ref(router);
  838. if (orig_node)
  839. orig_node_free_ref(orig_node);
  840. return ret;
  841. }
  842. int recv_icmp_packet(struct sk_buff *skb, struct hard_iface *recv_if)
  843. {
  844. struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
  845. struct icmp_packet_rr *icmp_packet;
  846. struct ethhdr *ethhdr;
  847. struct orig_node *orig_node = NULL;
  848. struct neigh_node *router = NULL;
  849. int hdr_size = sizeof(struct icmp_packet);
  850. int ret = NET_RX_DROP;
  851. /**
  852. * we truncate all incoming icmp packets if they don't match our size
  853. */
  854. if (skb->len >= sizeof(struct icmp_packet_rr))
  855. hdr_size = sizeof(struct icmp_packet_rr);
  856. /* drop packet if it has not necessary minimum size */
  857. if (unlikely(!pskb_may_pull(skb, hdr_size)))
  858. goto out;
  859. ethhdr = (struct ethhdr *)skb_mac_header(skb);
  860. /* packet with unicast indication but broadcast recipient */
  861. if (is_broadcast_ether_addr(ethhdr->h_dest))
  862. goto out;
  863. /* packet with broadcast sender address */
  864. if (is_broadcast_ether_addr(ethhdr->h_source))
  865. goto out;
  866. /* not for me */
  867. if (!is_my_mac(ethhdr->h_dest))
  868. goto out;
  869. icmp_packet = (struct icmp_packet_rr *)skb->data;
  870. /* add record route information if not full */
  871. if ((hdr_size == sizeof(struct icmp_packet_rr)) &&
  872. (icmp_packet->rr_cur < BAT_RR_LEN)) {
  873. memcpy(&(icmp_packet->rr[icmp_packet->rr_cur]),
  874. ethhdr->h_dest, ETH_ALEN);
  875. icmp_packet->rr_cur++;
  876. }
  877. /* packet for me */
  878. if (is_my_mac(icmp_packet->dst))
  879. return recv_my_icmp_packet(bat_priv, skb, hdr_size);
  880. /* TTL exceeded */
  881. if (icmp_packet->ttl < 2)
  882. return recv_icmp_ttl_exceeded(bat_priv, skb);
  883. /* get routing information */
  884. orig_node = orig_hash_find(bat_priv, icmp_packet->dst);
  885. if (!orig_node)
  886. goto out;
  887. router = orig_node_get_router(orig_node);
  888. if (!router)
  889. goto out;
  890. /* create a copy of the skb, if needed, to modify it. */
  891. if (skb_cow(skb, sizeof(struct ethhdr)) < 0)
  892. goto out;
  893. icmp_packet = (struct icmp_packet_rr *)skb->data;
  894. /* decrement ttl */
  895. icmp_packet->ttl--;
  896. /* route it */
  897. send_skb_packet(skb, router->if_incoming, router->addr);
  898. ret = NET_RX_SUCCESS;
  899. out:
  900. if (router)
  901. neigh_node_free_ref(router);
  902. if (orig_node)
  903. orig_node_free_ref(orig_node);
  904. return ret;
  905. }
  906. /* In the bonding case, send the packets in a round
  907. * robin fashion over the remaining interfaces.
  908. *
  909. * This method rotates the bonding list and increases the
  910. * returned router's refcount. */
  911. static struct neigh_node *find_bond_router(struct orig_node *primary_orig,
  912. const struct hard_iface *recv_if)
  913. {
  914. struct neigh_node *tmp_neigh_node;
  915. struct neigh_node *router = NULL, *first_candidate = NULL;
  916. rcu_read_lock();
  917. list_for_each_entry_rcu(tmp_neigh_node, &primary_orig->bond_list,
  918. bonding_list) {
  919. if (!first_candidate)
  920. first_candidate = tmp_neigh_node;
  921. /* recv_if == NULL on the first node. */
  922. if (tmp_neigh_node->if_incoming == recv_if)
  923. continue;
  924. if (!atomic_inc_not_zero(&tmp_neigh_node->refcount))
  925. continue;
  926. router = tmp_neigh_node;
  927. break;
  928. }
  929. /* use the first candidate if nothing was found. */
  930. if (!router && first_candidate &&
  931. atomic_inc_not_zero(&first_candidate->refcount))
  932. router = first_candidate;
  933. if (!router)
  934. goto out;
  935. /* selected should point to the next element
  936. * after the current router */
  937. spin_lock_bh(&primary_orig->neigh_list_lock);
  938. /* this is a list_move(), which unfortunately
  939. * does not exist as rcu version */
  940. list_del_rcu(&primary_orig->bond_list);
  941. list_add_rcu(&primary_orig->bond_list,
  942. &router->bonding_list);
  943. spin_unlock_bh(&primary_orig->neigh_list_lock);
  944. out:
  945. rcu_read_unlock();
  946. return router;
  947. }
  948. /* Interface Alternating: Use the best of the
  949. * remaining candidates which are not using
  950. * this interface.
  951. *
  952. * Increases the returned router's refcount */
  953. static struct neigh_node *find_ifalter_router(struct orig_node *primary_orig,
  954. const struct hard_iface *recv_if)
  955. {
  956. struct neigh_node *tmp_neigh_node;
  957. struct neigh_node *router = NULL, *first_candidate = NULL;
  958. rcu_read_lock();
  959. list_for_each_entry_rcu(tmp_neigh_node, &primary_orig->bond_list,
  960. bonding_list) {
  961. if (!first_candidate)
  962. first_candidate = tmp_neigh_node;
  963. /* recv_if == NULL on the first node. */
  964. if (tmp_neigh_node->if_incoming == recv_if)
  965. continue;
  966. if (!atomic_inc_not_zero(&tmp_neigh_node->refcount))
  967. continue;
  968. /* if we don't have a router yet
  969. * or this one is better, choose it. */
  970. if ((!router) ||
  971. (tmp_neigh_node->tq_avg > router->tq_avg)) {
  972. /* decrement refcount of
  973. * previously selected router */
  974. if (router)
  975. neigh_node_free_ref(router);
  976. router = tmp_neigh_node;
  977. atomic_inc_not_zero(&router->refcount);
  978. }
  979. neigh_node_free_ref(tmp_neigh_node);
  980. }
  981. /* use the first candidate if nothing was found. */
  982. if (!router && first_candidate &&
  983. atomic_inc_not_zero(&first_candidate->refcount))
  984. router = first_candidate;
  985. rcu_read_unlock();
  986. return router;
  987. }
  988. int recv_tt_query(struct sk_buff *skb, struct hard_iface *recv_if)
  989. {
  990. struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
  991. struct tt_query_packet *tt_query;
  992. struct ethhdr *ethhdr;
  993. /* drop packet if it has not necessary minimum size */
  994. if (unlikely(!pskb_may_pull(skb, sizeof(struct tt_query_packet))))
  995. goto out;
  996. /* I could need to modify it */
  997. if (skb_cow(skb, sizeof(struct tt_query_packet)) < 0)
  998. goto out;
  999. ethhdr = (struct ethhdr *)skb_mac_header(skb);
  1000. /* packet with unicast indication but broadcast recipient */
  1001. if (is_broadcast_ether_addr(ethhdr->h_dest))
  1002. goto out;
  1003. /* packet with broadcast sender address */
  1004. if (is_broadcast_ether_addr(ethhdr->h_source))
  1005. goto out;
  1006. tt_query = (struct tt_query_packet *)skb->data;
  1007. tt_query->tt_data = ntohs(tt_query->tt_data);
  1008. switch (tt_query->flags & TT_QUERY_TYPE_MASK) {
  1009. case TT_REQUEST:
  1010. /* If we cannot provide an answer the tt_request is
  1011. * forwarded */
  1012. if (!send_tt_response(bat_priv, tt_query)) {
  1013. bat_dbg(DBG_TT, bat_priv,
  1014. "Routing TT_REQUEST to %pM [%c]\n",
  1015. tt_query->dst,
  1016. (tt_query->flags & TT_FULL_TABLE ? 'F' : '.'));
  1017. tt_query->tt_data = htons(tt_query->tt_data);
  1018. return route_unicast_packet(skb, recv_if);
  1019. }
  1020. break;
  1021. case TT_RESPONSE:
  1022. /* packet needs to be linearised to access the TT changes */
  1023. if (skb_linearize(skb) < 0)
  1024. goto out;
  1025. if (is_my_mac(tt_query->dst))
  1026. handle_tt_response(bat_priv, tt_query);
  1027. else {
  1028. bat_dbg(DBG_TT, bat_priv,
  1029. "Routing TT_RESPONSE to %pM [%c]\n",
  1030. tt_query->dst,
  1031. (tt_query->flags & TT_FULL_TABLE ? 'F' : '.'));
  1032. tt_query->tt_data = htons(tt_query->tt_data);
  1033. return route_unicast_packet(skb, recv_if);
  1034. }
  1035. break;
  1036. }
  1037. out:
  1038. /* returning NET_RX_DROP will make the caller function kfree the skb */
  1039. return NET_RX_DROP;
  1040. }
  1041. int recv_roam_adv(struct sk_buff *skb, struct hard_iface *recv_if)
  1042. {
  1043. struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
  1044. struct roam_adv_packet *roam_adv_packet;
  1045. struct orig_node *orig_node;
  1046. struct ethhdr *ethhdr;
  1047. /* drop packet if it has not necessary minimum size */
  1048. if (unlikely(!pskb_may_pull(skb, sizeof(struct roam_adv_packet))))
  1049. goto out;
  1050. ethhdr = (struct ethhdr *)skb_mac_header(skb);
  1051. /* packet with unicast indication but broadcast recipient */
  1052. if (is_broadcast_ether_addr(ethhdr->h_dest))
  1053. goto out;
  1054. /* packet with broadcast sender address */
  1055. if (is_broadcast_ether_addr(ethhdr->h_source))
  1056. goto out;
  1057. roam_adv_packet = (struct roam_adv_packet *)skb->data;
  1058. if (!is_my_mac(roam_adv_packet->dst))
  1059. return route_unicast_packet(skb, recv_if);
  1060. orig_node = orig_hash_find(bat_priv, roam_adv_packet->src);
  1061. if (!orig_node)
  1062. goto out;
  1063. bat_dbg(DBG_TT, bat_priv, "Received ROAMING_ADV from %pM "
  1064. "(client %pM)\n", roam_adv_packet->src,
  1065. roam_adv_packet->client);
  1066. tt_global_add(bat_priv, orig_node, roam_adv_packet->client,
  1067. atomic_read(&orig_node->last_ttvn) + 1, true);
  1068. /* Roaming phase starts: I have new information but the ttvn has not
  1069. * been incremented yet. This flag will make me check all the incoming
  1070. * packets for the correct destination. */
  1071. bat_priv->tt_poss_change = true;
  1072. orig_node_free_ref(orig_node);
  1073. out:
  1074. /* returning NET_RX_DROP will make the caller function kfree the skb */
  1075. return NET_RX_DROP;
  1076. }
  1077. /* find a suitable router for this originator, and use
  1078. * bonding if possible. increases the found neighbors
  1079. * refcount.*/
  1080. struct neigh_node *find_router(struct bat_priv *bat_priv,
  1081. struct orig_node *orig_node,
  1082. const struct hard_iface *recv_if)
  1083. {
  1084. struct orig_node *primary_orig_node;
  1085. struct orig_node *router_orig;
  1086. struct neigh_node *router;
  1087. static uint8_t zero_mac[ETH_ALEN] = {0, 0, 0, 0, 0, 0};
  1088. int bonding_enabled;
  1089. if (!orig_node)
  1090. return NULL;
  1091. router = orig_node_get_router(orig_node);
  1092. if (!router)
  1093. goto err;
  1094. /* without bonding, the first node should
  1095. * always choose the default router. */
  1096. bonding_enabled = atomic_read(&bat_priv->bonding);
  1097. rcu_read_lock();
  1098. /* select default router to output */
  1099. router_orig = router->orig_node;
  1100. if (!router_orig)
  1101. goto err_unlock;
  1102. if ((!recv_if) && (!bonding_enabled))
  1103. goto return_router;
  1104. /* if we have something in the primary_addr, we can search
  1105. * for a potential bonding candidate. */
  1106. if (compare_eth(router_orig->primary_addr, zero_mac))
  1107. goto return_router;
  1108. /* find the orig_node which has the primary interface. might
  1109. * even be the same as our router_orig in many cases */
  1110. if (compare_eth(router_orig->primary_addr, router_orig->orig)) {
  1111. primary_orig_node = router_orig;
  1112. } else {
  1113. primary_orig_node = orig_hash_find(bat_priv,
  1114. router_orig->primary_addr);
  1115. if (!primary_orig_node)
  1116. goto return_router;
  1117. orig_node_free_ref(primary_orig_node);
  1118. }
  1119. /* with less than 2 candidates, we can't do any
  1120. * bonding and prefer the original router. */
  1121. if (atomic_read(&primary_orig_node->bond_candidates) < 2)
  1122. goto return_router;
  1123. /* all nodes between should choose a candidate which
  1124. * is is not on the interface where the packet came
  1125. * in. */
  1126. neigh_node_free_ref(router);
  1127. if (bonding_enabled)
  1128. router = find_bond_router(primary_orig_node, recv_if);
  1129. else
  1130. router = find_ifalter_router(primary_orig_node, recv_if);
  1131. return_router:
  1132. if (router && router->if_incoming->if_status != IF_ACTIVE)
  1133. goto err_unlock;
  1134. rcu_read_unlock();
  1135. return router;
  1136. err_unlock:
  1137. rcu_read_unlock();
  1138. err:
  1139. if (router)
  1140. neigh_node_free_ref(router);
  1141. return NULL;
  1142. }
  1143. static int check_unicast_packet(struct sk_buff *skb, int hdr_size)
  1144. {
  1145. struct ethhdr *ethhdr;
  1146. /* drop packet if it has not necessary minimum size */
  1147. if (unlikely(!pskb_may_pull(skb, hdr_size)))
  1148. return -1;
  1149. ethhdr = (struct ethhdr *)skb_mac_header(skb);
  1150. /* packet with unicast indication but broadcast recipient */
  1151. if (is_broadcast_ether_addr(ethhdr->h_dest))
  1152. return -1;
  1153. /* packet with broadcast sender address */
  1154. if (is_broadcast_ether_addr(ethhdr->h_source))
  1155. return -1;
  1156. /* not for me */
  1157. if (!is_my_mac(ethhdr->h_dest))
  1158. return -1;
  1159. return 0;
  1160. }
  1161. int route_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if)
  1162. {
  1163. struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
  1164. struct orig_node *orig_node = NULL;
  1165. struct neigh_node *neigh_node = NULL;
  1166. struct unicast_packet *unicast_packet;
  1167. struct ethhdr *ethhdr = (struct ethhdr *)skb_mac_header(skb);
  1168. int ret = NET_RX_DROP;
  1169. struct sk_buff *new_skb;
  1170. unicast_packet = (struct unicast_packet *)skb->data;
  1171. /* TTL exceeded */
  1172. if (unicast_packet->ttl < 2) {
  1173. pr_debug("Warning - can't forward unicast packet from %pM to "
  1174. "%pM: ttl exceeded\n", ethhdr->h_source,
  1175. unicast_packet->dest);
  1176. goto out;
  1177. }
  1178. /* get routing information */
  1179. orig_node = orig_hash_find(bat_priv, unicast_packet->dest);
  1180. if (!orig_node)
  1181. goto out;
  1182. /* find_router() increases neigh_nodes refcount if found. */
  1183. neigh_node = find_router(bat_priv, orig_node, recv_if);
  1184. if (!neigh_node)
  1185. goto out;
  1186. /* create a copy of the skb, if needed, to modify it. */
  1187. if (skb_cow(skb, sizeof(struct ethhdr)) < 0)
  1188. goto out;
  1189. unicast_packet = (struct unicast_packet *)skb->data;
  1190. if (unicast_packet->packet_type == BAT_UNICAST &&
  1191. atomic_read(&bat_priv->fragmentation) &&
  1192. skb->len > neigh_node->if_incoming->net_dev->mtu) {
  1193. ret = frag_send_skb(skb, bat_priv,
  1194. neigh_node->if_incoming, neigh_node->addr);
  1195. goto out;
  1196. }
  1197. if (unicast_packet->packet_type == BAT_UNICAST_FRAG &&
  1198. frag_can_reassemble(skb, neigh_node->if_incoming->net_dev->mtu)) {
  1199. ret = frag_reassemble_skb(skb, bat_priv, &new_skb);
  1200. if (ret == NET_RX_DROP)
  1201. goto out;
  1202. /* packet was buffered for late merge */
  1203. if (!new_skb) {
  1204. ret = NET_RX_SUCCESS;
  1205. goto out;
  1206. }
  1207. skb = new_skb;
  1208. unicast_packet = (struct unicast_packet *)skb->data;
  1209. }
  1210. /* decrement ttl */
  1211. unicast_packet->ttl--;
  1212. /* route it */
  1213. send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
  1214. ret = NET_RX_SUCCESS;
  1215. out:
  1216. if (neigh_node)
  1217. neigh_node_free_ref(neigh_node);
  1218. if (orig_node)
  1219. orig_node_free_ref(orig_node);
  1220. return ret;
  1221. }
  1222. static int check_unicast_ttvn(struct bat_priv *bat_priv,
  1223. struct sk_buff *skb) {
  1224. uint8_t curr_ttvn;
  1225. struct orig_node *orig_node;
  1226. struct ethhdr *ethhdr;
  1227. struct hard_iface *primary_if;
  1228. struct unicast_packet *unicast_packet;
  1229. bool tt_poss_change;
  1230. /* I could need to modify it */
  1231. if (skb_cow(skb, sizeof(struct unicast_packet)) < 0)
  1232. return 0;
  1233. unicast_packet = (struct unicast_packet *)skb->data;
  1234. if (is_my_mac(unicast_packet->dest)) {
  1235. tt_poss_change = bat_priv->tt_poss_change;
  1236. curr_ttvn = (uint8_t)atomic_read(&bat_priv->ttvn);
  1237. } else {
  1238. orig_node = orig_hash_find(bat_priv, unicast_packet->dest);
  1239. if (!orig_node)
  1240. return 0;
  1241. curr_ttvn = (uint8_t)atomic_read(&orig_node->last_ttvn);
  1242. tt_poss_change = orig_node->tt_poss_change;
  1243. orig_node_free_ref(orig_node);
  1244. }
  1245. /* Check whether I have to reroute the packet */
  1246. if (seq_before(unicast_packet->ttvn, curr_ttvn) || tt_poss_change) {
  1247. /* Linearize the skb before accessing it */
  1248. if (skb_linearize(skb) < 0)
  1249. return 0;
  1250. ethhdr = (struct ethhdr *)(skb->data +
  1251. sizeof(struct unicast_packet));
  1252. orig_node = transtable_search(bat_priv, ethhdr->h_dest);
  1253. if (!orig_node) {
  1254. if (!is_my_client(bat_priv, ethhdr->h_dest))
  1255. return 0;
  1256. primary_if = primary_if_get_selected(bat_priv);
  1257. if (!primary_if)
  1258. return 0;
  1259. memcpy(unicast_packet->dest,
  1260. primary_if->net_dev->dev_addr, ETH_ALEN);
  1261. hardif_free_ref(primary_if);
  1262. } else {
  1263. memcpy(unicast_packet->dest, orig_node->orig,
  1264. ETH_ALEN);
  1265. curr_ttvn = (uint8_t)
  1266. atomic_read(&orig_node->last_ttvn);
  1267. orig_node_free_ref(orig_node);
  1268. }
  1269. bat_dbg(DBG_ROUTES, bat_priv, "TTVN mismatch (old_ttvn %u "
  1270. "new_ttvn %u)! Rerouting unicast packet (for %pM) to "
  1271. "%pM\n", unicast_packet->ttvn, curr_ttvn,
  1272. ethhdr->h_dest, unicast_packet->dest);
  1273. unicast_packet->ttvn = curr_ttvn;
  1274. }
  1275. return 1;
  1276. }
  1277. int recv_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if)
  1278. {
  1279. struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
  1280. struct unicast_packet *unicast_packet;
  1281. int hdr_size = sizeof(*unicast_packet);
  1282. if (check_unicast_packet(skb, hdr_size) < 0)
  1283. return NET_RX_DROP;
  1284. if (!check_unicast_ttvn(bat_priv, skb))
  1285. return NET_RX_DROP;
  1286. unicast_packet = (struct unicast_packet *)skb->data;
  1287. /* packet for me */
  1288. if (is_my_mac(unicast_packet->dest)) {
  1289. interface_rx(recv_if->soft_iface, skb, recv_if, hdr_size);
  1290. return NET_RX_SUCCESS;
  1291. }
  1292. return route_unicast_packet(skb, recv_if);
  1293. }
  1294. int recv_ucast_frag_packet(struct sk_buff *skb, struct hard_iface *recv_if)
  1295. {
  1296. struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
  1297. struct unicast_frag_packet *unicast_packet;
  1298. int hdr_size = sizeof(*unicast_packet);
  1299. struct sk_buff *new_skb = NULL;
  1300. int ret;
  1301. if (check_unicast_packet(skb, hdr_size) < 0)
  1302. return NET_RX_DROP;
  1303. if (!check_unicast_ttvn(bat_priv, skb))
  1304. return NET_RX_DROP;
  1305. unicast_packet = (struct unicast_frag_packet *)skb->data;
  1306. /* packet for me */
  1307. if (is_my_mac(unicast_packet->dest)) {
  1308. ret = frag_reassemble_skb(skb, bat_priv, &new_skb);
  1309. if (ret == NET_RX_DROP)
  1310. return NET_RX_DROP;
  1311. /* packet was buffered for late merge */
  1312. if (!new_skb)
  1313. return NET_RX_SUCCESS;
  1314. interface_rx(recv_if->soft_iface, new_skb, recv_if,
  1315. sizeof(struct unicast_packet));
  1316. return NET_RX_SUCCESS;
  1317. }
  1318. return route_unicast_packet(skb, recv_if);
  1319. }
  1320. int recv_bcast_packet(struct sk_buff *skb, struct hard_iface *recv_if)
  1321. {
  1322. struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
  1323. struct orig_node *orig_node = NULL;
  1324. struct bcast_packet *bcast_packet;
  1325. struct ethhdr *ethhdr;
  1326. int hdr_size = sizeof(*bcast_packet);
  1327. int ret = NET_RX_DROP;
  1328. int32_t seq_diff;
  1329. /* drop packet if it has not necessary minimum size */
  1330. if (unlikely(!pskb_may_pull(skb, hdr_size)))
  1331. goto out;
  1332. ethhdr = (struct ethhdr *)skb_mac_header(skb);
  1333. /* packet with broadcast indication but unicast recipient */
  1334. if (!is_broadcast_ether_addr(ethhdr->h_dest))
  1335. goto out;
  1336. /* packet with broadcast sender address */
  1337. if (is_broadcast_ether_addr(ethhdr->h_source))
  1338. goto out;
  1339. /* ignore broadcasts sent by myself */
  1340. if (is_my_mac(ethhdr->h_source))
  1341. goto out;
  1342. bcast_packet = (struct bcast_packet *)skb->data;
  1343. /* ignore broadcasts originated by myself */
  1344. if (is_my_mac(bcast_packet->orig))
  1345. goto out;
  1346. if (bcast_packet->ttl < 2)
  1347. goto out;
  1348. orig_node = orig_hash_find(bat_priv, bcast_packet->orig);
  1349. if (!orig_node)
  1350. goto out;
  1351. spin_lock_bh(&orig_node->bcast_seqno_lock);
  1352. /* check whether the packet is a duplicate */
  1353. if (get_bit_status(orig_node->bcast_bits, orig_node->last_bcast_seqno,
  1354. ntohl(bcast_packet->seqno)))
  1355. goto spin_unlock;
  1356. seq_diff = ntohl(bcast_packet->seqno) - orig_node->last_bcast_seqno;
  1357. /* check whether the packet is old and the host just restarted. */
  1358. if (window_protected(bat_priv, seq_diff,
  1359. &orig_node->bcast_seqno_reset))
  1360. goto spin_unlock;
  1361. /* mark broadcast in flood history, update window position
  1362. * if required. */
  1363. if (bit_get_packet(bat_priv, orig_node->bcast_bits, seq_diff, 1))
  1364. orig_node->last_bcast_seqno = ntohl(bcast_packet->seqno);
  1365. spin_unlock_bh(&orig_node->bcast_seqno_lock);
  1366. /* rebroadcast packet */
  1367. add_bcast_packet_to_list(bat_priv, skb, 1);
  1368. /* broadcast for me */
  1369. interface_rx(recv_if->soft_iface, skb, recv_if, hdr_size);
  1370. ret = NET_RX_SUCCESS;
  1371. goto out;
  1372. spin_unlock:
  1373. spin_unlock_bh(&orig_node->bcast_seqno_lock);
  1374. out:
  1375. if (orig_node)
  1376. orig_node_free_ref(orig_node);
  1377. return ret;
  1378. }
  1379. int recv_vis_packet(struct sk_buff *skb, struct hard_iface *recv_if)
  1380. {
  1381. struct vis_packet *vis_packet;
  1382. struct ethhdr *ethhdr;
  1383. struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
  1384. int hdr_size = sizeof(*vis_packet);
  1385. /* keep skb linear */
  1386. if (skb_linearize(skb) < 0)
  1387. return NET_RX_DROP;
  1388. if (unlikely(!pskb_may_pull(skb, hdr_size)))
  1389. return NET_RX_DROP;
  1390. vis_packet = (struct vis_packet *)skb->data;
  1391. ethhdr = (struct ethhdr *)skb_mac_header(skb);
  1392. /* not for me */
  1393. if (!is_my_mac(ethhdr->h_dest))
  1394. return NET_RX_DROP;
  1395. /* ignore own packets */
  1396. if (is_my_mac(vis_packet->vis_orig))
  1397. return NET_RX_DROP;
  1398. if (is_my_mac(vis_packet->sender_orig))
  1399. return NET_RX_DROP;
  1400. switch (vis_packet->vis_type) {
  1401. case VIS_TYPE_SERVER_SYNC:
  1402. receive_server_sync_packet(bat_priv, vis_packet,
  1403. skb_headlen(skb));
  1404. break;
  1405. case VIS_TYPE_CLIENT_UPDATE:
  1406. receive_client_update_packet(bat_priv, vis_packet,
  1407. skb_headlen(skb));
  1408. break;
  1409. default: /* ignore unknown packet */
  1410. break;
  1411. }
  1412. /* We take a copy of the data in the packet, so we should
  1413. always free the skbuf. */
  1414. return NET_RX_DROP;
  1415. }