routing.c 47 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741
  1. /*
  2. * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
  3. *
  4. * Marek Lindner, Simon Wunderlich
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of version 2 of the GNU General Public
  8. * License as published by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope that it will be useful, but
  11. * WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  13. * General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program; if not, write to the Free Software
  17. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
  18. * 02110-1301, USA
  19. *
  20. */
  21. #include "main.h"
  22. #include "routing.h"
  23. #include "send.h"
  24. #include "hash.h"
  25. #include "soft-interface.h"
  26. #include "hard-interface.h"
  27. #include "icmp_socket.h"
  28. #include "translation-table.h"
  29. #include "originator.h"
  30. #include "ring_buffer.h"
  31. #include "vis.h"
  32. #include "aggregation.h"
  33. #include "gateway_common.h"
  34. #include "gateway_client.h"
  35. #include "unicast.h"
  36. void slide_own_bcast_window(struct hard_iface *hard_iface)
  37. {
  38. struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
  39. struct hashtable_t *hash = bat_priv->orig_hash;
  40. struct hlist_node *node;
  41. struct hlist_head *head;
  42. struct orig_node *orig_node;
  43. unsigned long *word;
  44. int i;
  45. size_t word_index;
  46. for (i = 0; i < hash->size; i++) {
  47. head = &hash->table[i];
  48. rcu_read_lock();
  49. hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) {
  50. spin_lock_bh(&orig_node->ogm_cnt_lock);
  51. word_index = hard_iface->if_num * NUM_WORDS;
  52. word = &(orig_node->bcast_own[word_index]);
  53. bit_get_packet(bat_priv, word, 1, 0);
  54. orig_node->bcast_own_sum[hard_iface->if_num] =
  55. bit_packet_count(word);
  56. spin_unlock_bh(&orig_node->ogm_cnt_lock);
  57. }
  58. rcu_read_unlock();
  59. }
  60. }
  61. static void update_transtable(struct bat_priv *bat_priv,
  62. struct orig_node *orig_node,
  63. const unsigned char *tt_buff,
  64. uint8_t tt_num_changes, uint8_t ttvn,
  65. uint16_t tt_crc)
  66. {
  67. uint8_t orig_ttvn = (uint8_t)atomic_read(&orig_node->last_ttvn);
  68. bool full_table = true;
  69. /* the ttvn increased by one -> we can apply the attached changes */
  70. if (ttvn - orig_ttvn == 1) {
  71. /* the OGM could not contain the changes because they were too
  72. * many to fit in one frame or because they have already been
  73. * sent TT_OGM_APPEND_MAX times. In this case send a tt
  74. * request */
  75. if (!tt_num_changes) {
  76. full_table = false;
  77. goto request_table;
  78. }
  79. tt_update_changes(bat_priv, orig_node, tt_num_changes, ttvn,
  80. (struct tt_change *)tt_buff);
  81. /* Even if we received the crc into the OGM, we prefer
  82. * to recompute it to spot any possible inconsistency
  83. * in the global table */
  84. orig_node->tt_crc = tt_global_crc(bat_priv, orig_node);
  85. /* Roaming phase is over: tables are in sync again. I can
  86. * unset the flag */
  87. orig_node->tt_poss_change = false;
  88. } else {
  89. /* if we missed more than one change or our tables are not
  90. * in sync anymore -> request fresh tt data */
  91. if (ttvn != orig_ttvn || orig_node->tt_crc != tt_crc) {
  92. request_table:
  93. bat_dbg(DBG_TT, bat_priv, "TT inconsistency for %pM. "
  94. "Need to retrieve the correct information "
  95. "(ttvn: %u last_ttvn: %u crc: %u last_crc: "
  96. "%u num_changes: %u)\n", orig_node->orig, ttvn,
  97. orig_ttvn, tt_crc, orig_node->tt_crc,
  98. tt_num_changes);
  99. send_tt_request(bat_priv, orig_node, ttvn, tt_crc,
  100. full_table);
  101. return;
  102. }
  103. }
  104. }
  105. static void update_route(struct bat_priv *bat_priv,
  106. struct orig_node *orig_node,
  107. struct neigh_node *neigh_node)
  108. {
  109. struct neigh_node *curr_router;
  110. curr_router = orig_node_get_router(orig_node);
  111. /* route deleted */
  112. if ((curr_router) && (!neigh_node)) {
  113. bat_dbg(DBG_ROUTES, bat_priv, "Deleting route towards: %pM\n",
  114. orig_node->orig);
  115. tt_global_del_orig(bat_priv, orig_node,
  116. "Deleted route towards originator");
  117. /* route added */
  118. } else if ((!curr_router) && (neigh_node)) {
  119. bat_dbg(DBG_ROUTES, bat_priv,
  120. "Adding route towards: %pM (via %pM)\n",
  121. orig_node->orig, neigh_node->addr);
  122. /* route changed */
  123. } else if (neigh_node && curr_router) {
  124. bat_dbg(DBG_ROUTES, bat_priv,
  125. "Changing route towards: %pM "
  126. "(now via %pM - was via %pM)\n",
  127. orig_node->orig, neigh_node->addr,
  128. curr_router->addr);
  129. }
  130. if (curr_router)
  131. neigh_node_free_ref(curr_router);
  132. /* increase refcount of new best neighbor */
  133. if (neigh_node && !atomic_inc_not_zero(&neigh_node->refcount))
  134. neigh_node = NULL;
  135. spin_lock_bh(&orig_node->neigh_list_lock);
  136. rcu_assign_pointer(orig_node->router, neigh_node);
  137. spin_unlock_bh(&orig_node->neigh_list_lock);
  138. /* decrease refcount of previous best neighbor */
  139. if (curr_router)
  140. neigh_node_free_ref(curr_router);
  141. }
  142. void update_routes(struct bat_priv *bat_priv, struct orig_node *orig_node,
  143. struct neigh_node *neigh_node)
  144. {
  145. struct neigh_node *router = NULL;
  146. if (!orig_node)
  147. goto out;
  148. router = orig_node_get_router(orig_node);
  149. if (router != neigh_node)
  150. update_route(bat_priv, orig_node, neigh_node);
  151. out:
  152. if (router)
  153. neigh_node_free_ref(router);
  154. }
  155. static int is_bidirectional_neigh(struct orig_node *orig_node,
  156. struct orig_node *orig_neigh_node,
  157. struct batman_packet *batman_packet,
  158. struct hard_iface *if_incoming)
  159. {
  160. struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
  161. struct neigh_node *neigh_node = NULL, *tmp_neigh_node;
  162. struct hlist_node *node;
  163. uint8_t total_count;
  164. uint8_t orig_eq_count, neigh_rq_count, tq_own;
  165. int tq_asym_penalty, ret = 0;
  166. /* find corresponding one hop neighbor */
  167. rcu_read_lock();
  168. hlist_for_each_entry_rcu(tmp_neigh_node, node,
  169. &orig_neigh_node->neigh_list, list) {
  170. if (!compare_eth(tmp_neigh_node->addr, orig_neigh_node->orig))
  171. continue;
  172. if (tmp_neigh_node->if_incoming != if_incoming)
  173. continue;
  174. if (!atomic_inc_not_zero(&tmp_neigh_node->refcount))
  175. continue;
  176. neigh_node = tmp_neigh_node;
  177. break;
  178. }
  179. rcu_read_unlock();
  180. if (!neigh_node)
  181. neigh_node = create_neighbor(orig_neigh_node,
  182. orig_neigh_node,
  183. orig_neigh_node->orig,
  184. if_incoming);
  185. if (!neigh_node)
  186. goto out;
  187. /* if orig_node is direct neighbour update neigh_node last_valid */
  188. if (orig_node == orig_neigh_node)
  189. neigh_node->last_valid = jiffies;
  190. orig_node->last_valid = jiffies;
  191. /* find packet count of corresponding one hop neighbor */
  192. spin_lock_bh(&orig_node->ogm_cnt_lock);
  193. orig_eq_count = orig_neigh_node->bcast_own_sum[if_incoming->if_num];
  194. neigh_rq_count = neigh_node->real_packet_count;
  195. spin_unlock_bh(&orig_node->ogm_cnt_lock);
  196. /* pay attention to not get a value bigger than 100 % */
  197. total_count = (orig_eq_count > neigh_rq_count ?
  198. neigh_rq_count : orig_eq_count);
  199. /* if we have too few packets (too less data) we set tq_own to zero */
  200. /* if we receive too few packets it is not considered bidirectional */
  201. if ((total_count < TQ_LOCAL_BIDRECT_SEND_MINIMUM) ||
  202. (neigh_rq_count < TQ_LOCAL_BIDRECT_RECV_MINIMUM))
  203. tq_own = 0;
  204. else
  205. /* neigh_node->real_packet_count is never zero as we
  206. * only purge old information when getting new
  207. * information */
  208. tq_own = (TQ_MAX_VALUE * total_count) / neigh_rq_count;
  209. /*
  210. * 1 - ((1-x) ** 3), normalized to TQ_MAX_VALUE this does
  211. * affect the nearly-symmetric links only a little, but
  212. * punishes asymmetric links more. This will give a value
  213. * between 0 and TQ_MAX_VALUE
  214. */
  215. tq_asym_penalty = TQ_MAX_VALUE - (TQ_MAX_VALUE *
  216. (TQ_LOCAL_WINDOW_SIZE - neigh_rq_count) *
  217. (TQ_LOCAL_WINDOW_SIZE - neigh_rq_count) *
  218. (TQ_LOCAL_WINDOW_SIZE - neigh_rq_count)) /
  219. (TQ_LOCAL_WINDOW_SIZE *
  220. TQ_LOCAL_WINDOW_SIZE *
  221. TQ_LOCAL_WINDOW_SIZE);
  222. batman_packet->tq = ((batman_packet->tq * tq_own * tq_asym_penalty) /
  223. (TQ_MAX_VALUE * TQ_MAX_VALUE));
  224. bat_dbg(DBG_BATMAN, bat_priv,
  225. "bidirectional: "
  226. "orig = %-15pM neigh = %-15pM => own_bcast = %2i, "
  227. "real recv = %2i, local tq: %3i, asym_penalty: %3i, "
  228. "total tq: %3i\n",
  229. orig_node->orig, orig_neigh_node->orig, total_count,
  230. neigh_rq_count, tq_own, tq_asym_penalty, batman_packet->tq);
  231. /* if link has the minimum required transmission quality
  232. * consider it bidirectional */
  233. if (batman_packet->tq >= TQ_TOTAL_BIDRECT_LIMIT)
  234. ret = 1;
  235. out:
  236. if (neigh_node)
  237. neigh_node_free_ref(neigh_node);
  238. return ret;
  239. }
  240. /* caller must hold the neigh_list_lock */
  241. void bonding_candidate_del(struct orig_node *orig_node,
  242. struct neigh_node *neigh_node)
  243. {
  244. /* this neighbor is not part of our candidate list */
  245. if (list_empty(&neigh_node->bonding_list))
  246. goto out;
  247. list_del_rcu(&neigh_node->bonding_list);
  248. INIT_LIST_HEAD(&neigh_node->bonding_list);
  249. neigh_node_free_ref(neigh_node);
  250. atomic_dec(&orig_node->bond_candidates);
  251. out:
  252. return;
  253. }
  254. static void bonding_candidate_add(struct orig_node *orig_node,
  255. struct neigh_node *neigh_node)
  256. {
  257. struct hlist_node *node;
  258. struct neigh_node *tmp_neigh_node, *router = NULL;
  259. uint8_t interference_candidate = 0;
  260. spin_lock_bh(&orig_node->neigh_list_lock);
  261. /* only consider if it has the same primary address ... */
  262. if (!compare_eth(orig_node->orig,
  263. neigh_node->orig_node->primary_addr))
  264. goto candidate_del;
  265. router = orig_node_get_router(orig_node);
  266. if (!router)
  267. goto candidate_del;
  268. /* ... and is good enough to be considered */
  269. if (neigh_node->tq_avg < router->tq_avg - BONDING_TQ_THRESHOLD)
  270. goto candidate_del;
  271. /**
  272. * check if we have another candidate with the same mac address or
  273. * interface. If we do, we won't select this candidate because of
  274. * possible interference.
  275. */
  276. hlist_for_each_entry_rcu(tmp_neigh_node, node,
  277. &orig_node->neigh_list, list) {
  278. if (tmp_neigh_node == neigh_node)
  279. continue;
  280. /* we only care if the other candidate is even
  281. * considered as candidate. */
  282. if (list_empty(&tmp_neigh_node->bonding_list))
  283. continue;
  284. if ((neigh_node->if_incoming == tmp_neigh_node->if_incoming) ||
  285. (compare_eth(neigh_node->addr, tmp_neigh_node->addr))) {
  286. interference_candidate = 1;
  287. break;
  288. }
  289. }
  290. /* don't care further if it is an interference candidate */
  291. if (interference_candidate)
  292. goto candidate_del;
  293. /* this neighbor already is part of our candidate list */
  294. if (!list_empty(&neigh_node->bonding_list))
  295. goto out;
  296. if (!atomic_inc_not_zero(&neigh_node->refcount))
  297. goto out;
  298. list_add_rcu(&neigh_node->bonding_list, &orig_node->bond_list);
  299. atomic_inc(&orig_node->bond_candidates);
  300. goto out;
  301. candidate_del:
  302. bonding_candidate_del(orig_node, neigh_node);
  303. out:
  304. spin_unlock_bh(&orig_node->neigh_list_lock);
  305. if (router)
  306. neigh_node_free_ref(router);
  307. }
  308. /* copy primary address for bonding */
  309. static void bonding_save_primary(const struct orig_node *orig_node,
  310. struct orig_node *orig_neigh_node,
  311. const struct batman_packet *batman_packet)
  312. {
  313. if (!(batman_packet->flags & PRIMARIES_FIRST_HOP))
  314. return;
  315. memcpy(orig_neigh_node->primary_addr, orig_node->orig, ETH_ALEN);
  316. }
  317. static void update_orig(struct bat_priv *bat_priv, struct orig_node *orig_node,
  318. const struct ethhdr *ethhdr,
  319. const struct batman_packet *batman_packet,
  320. struct hard_iface *if_incoming,
  321. const unsigned char *tt_buff, int is_duplicate)
  322. {
  323. struct neigh_node *neigh_node = NULL, *tmp_neigh_node = NULL;
  324. struct neigh_node *router = NULL;
  325. struct orig_node *orig_node_tmp;
  326. struct hlist_node *node;
  327. uint8_t bcast_own_sum_orig, bcast_own_sum_neigh;
  328. bat_dbg(DBG_BATMAN, bat_priv, "update_originator(): "
  329. "Searching and updating originator entry of received packet\n");
  330. rcu_read_lock();
  331. hlist_for_each_entry_rcu(tmp_neigh_node, node,
  332. &orig_node->neigh_list, list) {
  333. if (compare_eth(tmp_neigh_node->addr, ethhdr->h_source) &&
  334. (tmp_neigh_node->if_incoming == if_incoming) &&
  335. atomic_inc_not_zero(&tmp_neigh_node->refcount)) {
  336. if (neigh_node)
  337. neigh_node_free_ref(neigh_node);
  338. neigh_node = tmp_neigh_node;
  339. continue;
  340. }
  341. if (is_duplicate)
  342. continue;
  343. spin_lock_bh(&tmp_neigh_node->tq_lock);
  344. ring_buffer_set(tmp_neigh_node->tq_recv,
  345. &tmp_neigh_node->tq_index, 0);
  346. tmp_neigh_node->tq_avg =
  347. ring_buffer_avg(tmp_neigh_node->tq_recv);
  348. spin_unlock_bh(&tmp_neigh_node->tq_lock);
  349. }
  350. if (!neigh_node) {
  351. struct orig_node *orig_tmp;
  352. orig_tmp = get_orig_node(bat_priv, ethhdr->h_source);
  353. if (!orig_tmp)
  354. goto unlock;
  355. neigh_node = create_neighbor(orig_node, orig_tmp,
  356. ethhdr->h_source, if_incoming);
  357. orig_node_free_ref(orig_tmp);
  358. if (!neigh_node)
  359. goto unlock;
  360. } else
  361. bat_dbg(DBG_BATMAN, bat_priv,
  362. "Updating existing last-hop neighbor of originator\n");
  363. rcu_read_unlock();
  364. orig_node->flags = batman_packet->flags;
  365. neigh_node->last_valid = jiffies;
  366. spin_lock_bh(&neigh_node->tq_lock);
  367. ring_buffer_set(neigh_node->tq_recv,
  368. &neigh_node->tq_index,
  369. batman_packet->tq);
  370. neigh_node->tq_avg = ring_buffer_avg(neigh_node->tq_recv);
  371. spin_unlock_bh(&neigh_node->tq_lock);
  372. if (!is_duplicate) {
  373. orig_node->last_ttl = batman_packet->ttl;
  374. neigh_node->last_ttl = batman_packet->ttl;
  375. }
  376. bonding_candidate_add(orig_node, neigh_node);
  377. /* if this neighbor already is our next hop there is nothing
  378. * to change */
  379. router = orig_node_get_router(orig_node);
  380. if (router == neigh_node)
  381. goto update_tt;
  382. /* if this neighbor does not offer a better TQ we won't consider it */
  383. if (router && (router->tq_avg > neigh_node->tq_avg))
  384. goto update_tt;
  385. /* if the TQ is the same and the link not more symetric we
  386. * won't consider it either */
  387. if (router && (neigh_node->tq_avg == router->tq_avg)) {
  388. orig_node_tmp = router->orig_node;
  389. spin_lock_bh(&orig_node_tmp->ogm_cnt_lock);
  390. bcast_own_sum_orig =
  391. orig_node_tmp->bcast_own_sum[if_incoming->if_num];
  392. spin_unlock_bh(&orig_node_tmp->ogm_cnt_lock);
  393. orig_node_tmp = neigh_node->orig_node;
  394. spin_lock_bh(&orig_node_tmp->ogm_cnt_lock);
  395. bcast_own_sum_neigh =
  396. orig_node_tmp->bcast_own_sum[if_incoming->if_num];
  397. spin_unlock_bh(&orig_node_tmp->ogm_cnt_lock);
  398. if (bcast_own_sum_orig >= bcast_own_sum_neigh)
  399. goto update_tt;
  400. }
  401. update_routes(bat_priv, orig_node, neigh_node);
  402. update_tt:
  403. /* I have to check for transtable changes only if the OGM has been
  404. * sent through a primary interface */
  405. if (((batman_packet->orig != ethhdr->h_source) &&
  406. (batman_packet->ttl > 2)) ||
  407. (batman_packet->flags & PRIMARIES_FIRST_HOP))
  408. update_transtable(bat_priv, orig_node, tt_buff,
  409. batman_packet->tt_num_changes,
  410. batman_packet->ttvn,
  411. batman_packet->tt_crc);
  412. if (orig_node->gw_flags != batman_packet->gw_flags)
  413. gw_node_update(bat_priv, orig_node, batman_packet->gw_flags);
  414. orig_node->gw_flags = batman_packet->gw_flags;
  415. /* restart gateway selection if fast or late switching was enabled */
  416. if ((orig_node->gw_flags) &&
  417. (atomic_read(&bat_priv->gw_mode) == GW_MODE_CLIENT) &&
  418. (atomic_read(&bat_priv->gw_sel_class) > 2))
  419. gw_check_election(bat_priv, orig_node);
  420. goto out;
  421. unlock:
  422. rcu_read_unlock();
  423. out:
  424. if (neigh_node)
  425. neigh_node_free_ref(neigh_node);
  426. if (router)
  427. neigh_node_free_ref(router);
  428. }
  429. /* checks whether the host restarted and is in the protection time.
  430. * returns:
  431. * 0 if the packet is to be accepted
  432. * 1 if the packet is to be ignored.
  433. */
  434. static int window_protected(struct bat_priv *bat_priv,
  435. int32_t seq_num_diff,
  436. unsigned long *last_reset)
  437. {
  438. if ((seq_num_diff <= -TQ_LOCAL_WINDOW_SIZE)
  439. || (seq_num_diff >= EXPECTED_SEQNO_RANGE)) {
  440. if (time_after(jiffies, *last_reset +
  441. msecs_to_jiffies(RESET_PROTECTION_MS))) {
  442. *last_reset = jiffies;
  443. bat_dbg(DBG_BATMAN, bat_priv,
  444. "old packet received, start protection\n");
  445. return 0;
  446. } else
  447. return 1;
  448. }
  449. return 0;
  450. }
  451. /* processes a batman packet for all interfaces, adjusts the sequence number and
  452. * finds out whether it is a duplicate.
  453. * returns:
  454. * 1 the packet is a duplicate
  455. * 0 the packet has not yet been received
  456. * -1 the packet is old and has been received while the seqno window
  457. * was protected. Caller should drop it.
  458. */
  459. static int count_real_packets(const struct ethhdr *ethhdr,
  460. const struct batman_packet *batman_packet,
  461. const struct hard_iface *if_incoming)
  462. {
  463. struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
  464. struct orig_node *orig_node;
  465. struct neigh_node *tmp_neigh_node;
  466. struct hlist_node *node;
  467. int is_duplicate = 0;
  468. int32_t seq_diff;
  469. int need_update = 0;
  470. int set_mark, ret = -1;
  471. orig_node = get_orig_node(bat_priv, batman_packet->orig);
  472. if (!orig_node)
  473. return 0;
  474. spin_lock_bh(&orig_node->ogm_cnt_lock);
  475. seq_diff = batman_packet->seqno - orig_node->last_real_seqno;
  476. /* signalize caller that the packet is to be dropped. */
  477. if (window_protected(bat_priv, seq_diff,
  478. &orig_node->batman_seqno_reset))
  479. goto out;
  480. rcu_read_lock();
  481. hlist_for_each_entry_rcu(tmp_neigh_node, node,
  482. &orig_node->neigh_list, list) {
  483. is_duplicate |= get_bit_status(tmp_neigh_node->real_bits,
  484. orig_node->last_real_seqno,
  485. batman_packet->seqno);
  486. if (compare_eth(tmp_neigh_node->addr, ethhdr->h_source) &&
  487. (tmp_neigh_node->if_incoming == if_incoming))
  488. set_mark = 1;
  489. else
  490. set_mark = 0;
  491. /* if the window moved, set the update flag. */
  492. need_update |= bit_get_packet(bat_priv,
  493. tmp_neigh_node->real_bits,
  494. seq_diff, set_mark);
  495. tmp_neigh_node->real_packet_count =
  496. bit_packet_count(tmp_neigh_node->real_bits);
  497. }
  498. rcu_read_unlock();
  499. if (need_update) {
  500. bat_dbg(DBG_BATMAN, bat_priv,
  501. "updating last_seqno: old %d, new %d\n",
  502. orig_node->last_real_seqno, batman_packet->seqno);
  503. orig_node->last_real_seqno = batman_packet->seqno;
  504. }
  505. ret = is_duplicate;
  506. out:
  507. spin_unlock_bh(&orig_node->ogm_cnt_lock);
  508. orig_node_free_ref(orig_node);
  509. return ret;
  510. }
  511. void receive_bat_packet(const struct ethhdr *ethhdr,
  512. struct batman_packet *batman_packet,
  513. const unsigned char *tt_buff,
  514. struct hard_iface *if_incoming)
  515. {
  516. struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
  517. struct hard_iface *hard_iface;
  518. struct orig_node *orig_neigh_node, *orig_node;
  519. struct neigh_node *router = NULL, *router_router = NULL;
  520. struct neigh_node *orig_neigh_router = NULL;
  521. int has_directlink_flag;
  522. int is_my_addr = 0, is_my_orig = 0, is_my_oldorig = 0;
  523. int is_broadcast = 0, is_bidirectional, is_single_hop_neigh;
  524. int is_duplicate;
  525. uint32_t if_incoming_seqno;
  526. /* Silently drop when the batman packet is actually not a
  527. * correct packet.
  528. *
  529. * This might happen if a packet is padded (e.g. Ethernet has a
  530. * minimum frame length of 64 byte) and the aggregation interprets
  531. * it as an additional length.
  532. *
  533. * TODO: A more sane solution would be to have a bit in the
  534. * batman_packet to detect whether the packet is the last
  535. * packet in an aggregation. Here we expect that the padding
  536. * is always zero (or not 0x01)
  537. */
  538. if (batman_packet->packet_type != BAT_PACKET)
  539. return;
  540. /* could be changed by schedule_own_packet() */
  541. if_incoming_seqno = atomic_read(&if_incoming->seqno);
  542. has_directlink_flag = (batman_packet->flags & DIRECTLINK ? 1 : 0);
  543. is_single_hop_neigh = (compare_eth(ethhdr->h_source,
  544. batman_packet->orig) ? 1 : 0);
  545. bat_dbg(DBG_BATMAN, bat_priv,
  546. "Received BATMAN packet via NB: %pM, IF: %s [%pM] "
  547. "(from OG: %pM, via prev OG: %pM, seqno %d, ttvn %u, "
  548. "crc %u, changes %u, td %d, TTL %d, V %d, IDF %d)\n",
  549. ethhdr->h_source, if_incoming->net_dev->name,
  550. if_incoming->net_dev->dev_addr, batman_packet->orig,
  551. batman_packet->prev_sender, batman_packet->seqno,
  552. batman_packet->ttvn, batman_packet->tt_crc,
  553. batman_packet->tt_num_changes, batman_packet->tq,
  554. batman_packet->ttl, batman_packet->version,
  555. has_directlink_flag);
  556. rcu_read_lock();
  557. list_for_each_entry_rcu(hard_iface, &hardif_list, list) {
  558. if (hard_iface->if_status != IF_ACTIVE)
  559. continue;
  560. if (hard_iface->soft_iface != if_incoming->soft_iface)
  561. continue;
  562. if (compare_eth(ethhdr->h_source,
  563. hard_iface->net_dev->dev_addr))
  564. is_my_addr = 1;
  565. if (compare_eth(batman_packet->orig,
  566. hard_iface->net_dev->dev_addr))
  567. is_my_orig = 1;
  568. if (compare_eth(batman_packet->prev_sender,
  569. hard_iface->net_dev->dev_addr))
  570. is_my_oldorig = 1;
  571. if (is_broadcast_ether_addr(ethhdr->h_source))
  572. is_broadcast = 1;
  573. }
  574. rcu_read_unlock();
  575. if (batman_packet->version != COMPAT_VERSION) {
  576. bat_dbg(DBG_BATMAN, bat_priv,
  577. "Drop packet: incompatible batman version (%i)\n",
  578. batman_packet->version);
  579. return;
  580. }
  581. if (is_my_addr) {
  582. bat_dbg(DBG_BATMAN, bat_priv,
  583. "Drop packet: received my own broadcast (sender: %pM"
  584. ")\n",
  585. ethhdr->h_source);
  586. return;
  587. }
  588. if (is_broadcast) {
  589. bat_dbg(DBG_BATMAN, bat_priv, "Drop packet: "
  590. "ignoring all packets with broadcast source addr (sender: %pM"
  591. ")\n", ethhdr->h_source);
  592. return;
  593. }
  594. if (is_my_orig) {
  595. unsigned long *word;
  596. int offset;
  597. orig_neigh_node = get_orig_node(bat_priv, ethhdr->h_source);
  598. if (!orig_neigh_node)
  599. return;
  600. /* neighbor has to indicate direct link and it has to
  601. * come via the corresponding interface */
  602. /* save packet seqno for bidirectional check */
  603. if (has_directlink_flag &&
  604. compare_eth(if_incoming->net_dev->dev_addr,
  605. batman_packet->orig)) {
  606. offset = if_incoming->if_num * NUM_WORDS;
  607. spin_lock_bh(&orig_neigh_node->ogm_cnt_lock);
  608. word = &(orig_neigh_node->bcast_own[offset]);
  609. bit_mark(word,
  610. if_incoming_seqno - batman_packet->seqno - 2);
  611. orig_neigh_node->bcast_own_sum[if_incoming->if_num] =
  612. bit_packet_count(word);
  613. spin_unlock_bh(&orig_neigh_node->ogm_cnt_lock);
  614. }
  615. bat_dbg(DBG_BATMAN, bat_priv, "Drop packet: "
  616. "originator packet from myself (via neighbor)\n");
  617. orig_node_free_ref(orig_neigh_node);
  618. return;
  619. }
  620. if (is_my_oldorig) {
  621. bat_dbg(DBG_BATMAN, bat_priv,
  622. "Drop packet: ignoring all rebroadcast echos (sender: "
  623. "%pM)\n", ethhdr->h_source);
  624. return;
  625. }
  626. orig_node = get_orig_node(bat_priv, batman_packet->orig);
  627. if (!orig_node)
  628. return;
  629. is_duplicate = count_real_packets(ethhdr, batman_packet, if_incoming);
  630. if (is_duplicate == -1) {
  631. bat_dbg(DBG_BATMAN, bat_priv,
  632. "Drop packet: packet within seqno protection time "
  633. "(sender: %pM)\n", ethhdr->h_source);
  634. goto out;
  635. }
  636. if (batman_packet->tq == 0) {
  637. bat_dbg(DBG_BATMAN, bat_priv,
  638. "Drop packet: originator packet with tq equal 0\n");
  639. goto out;
  640. }
  641. router = orig_node_get_router(orig_node);
  642. if (router)
  643. router_router = orig_node_get_router(router->orig_node);
  644. /* avoid temporary routing loops */
  645. if (router && router_router &&
  646. (compare_eth(router->addr, batman_packet->prev_sender)) &&
  647. !(compare_eth(batman_packet->orig, batman_packet->prev_sender)) &&
  648. (compare_eth(router->addr, router_router->addr))) {
  649. bat_dbg(DBG_BATMAN, bat_priv,
  650. "Drop packet: ignoring all rebroadcast packets that "
  651. "may make me loop (sender: %pM)\n", ethhdr->h_source);
  652. goto out;
  653. }
  654. /* if sender is a direct neighbor the sender mac equals
  655. * originator mac */
  656. orig_neigh_node = (is_single_hop_neigh ?
  657. orig_node :
  658. get_orig_node(bat_priv, ethhdr->h_source));
  659. if (!orig_neigh_node)
  660. goto out;
  661. orig_neigh_router = orig_node_get_router(orig_neigh_node);
  662. /* drop packet if sender is not a direct neighbor and if we
  663. * don't route towards it */
  664. if (!is_single_hop_neigh && (!orig_neigh_router)) {
  665. bat_dbg(DBG_BATMAN, bat_priv,
  666. "Drop packet: OGM via unknown neighbor!\n");
  667. goto out_neigh;
  668. }
  669. is_bidirectional = is_bidirectional_neigh(orig_node, orig_neigh_node,
  670. batman_packet, if_incoming);
  671. bonding_save_primary(orig_node, orig_neigh_node, batman_packet);
  672. /* update ranking if it is not a duplicate or has the same
  673. * seqno and similar ttl as the non-duplicate */
  674. if (is_bidirectional &&
  675. (!is_duplicate ||
  676. ((orig_node->last_real_seqno == batman_packet->seqno) &&
  677. (orig_node->last_ttl - 3 <= batman_packet->ttl))))
  678. update_orig(bat_priv, orig_node, ethhdr, batman_packet,
  679. if_incoming, tt_buff, is_duplicate);
  680. /* is single hop (direct) neighbor */
  681. if (is_single_hop_neigh) {
  682. /* mark direct link on incoming interface */
  683. schedule_forward_packet(orig_node, ethhdr, batman_packet,
  684. 1, if_incoming);
  685. bat_dbg(DBG_BATMAN, bat_priv, "Forwarding packet: "
  686. "rebroadcast neighbor packet with direct link flag\n");
  687. goto out_neigh;
  688. }
  689. /* multihop originator */
  690. if (!is_bidirectional) {
  691. bat_dbg(DBG_BATMAN, bat_priv,
  692. "Drop packet: not received via bidirectional link\n");
  693. goto out_neigh;
  694. }
  695. if (is_duplicate) {
  696. bat_dbg(DBG_BATMAN, bat_priv,
  697. "Drop packet: duplicate packet received\n");
  698. goto out_neigh;
  699. }
  700. bat_dbg(DBG_BATMAN, bat_priv,
  701. "Forwarding packet: rebroadcast originator packet\n");
  702. schedule_forward_packet(orig_node, ethhdr, batman_packet,
  703. 0, if_incoming);
  704. out_neigh:
  705. if ((orig_neigh_node) && (!is_single_hop_neigh))
  706. orig_node_free_ref(orig_neigh_node);
  707. out:
  708. if (router)
  709. neigh_node_free_ref(router);
  710. if (router_router)
  711. neigh_node_free_ref(router_router);
  712. if (orig_neigh_router)
  713. neigh_node_free_ref(orig_neigh_router);
  714. orig_node_free_ref(orig_node);
  715. }
  716. int recv_bat_packet(struct sk_buff *skb, struct hard_iface *hard_iface)
  717. {
  718. struct ethhdr *ethhdr;
  719. /* drop packet if it has not necessary minimum size */
  720. if (unlikely(!pskb_may_pull(skb, sizeof(struct batman_packet))))
  721. return NET_RX_DROP;
  722. ethhdr = (struct ethhdr *)skb_mac_header(skb);
  723. /* packet with broadcast indication but unicast recipient */
  724. if (!is_broadcast_ether_addr(ethhdr->h_dest))
  725. return NET_RX_DROP;
  726. /* packet with broadcast sender address */
  727. if (is_broadcast_ether_addr(ethhdr->h_source))
  728. return NET_RX_DROP;
  729. /* create a copy of the skb, if needed, to modify it. */
  730. if (skb_cow(skb, 0) < 0)
  731. return NET_RX_DROP;
  732. /* keep skb linear */
  733. if (skb_linearize(skb) < 0)
  734. return NET_RX_DROP;
  735. ethhdr = (struct ethhdr *)skb_mac_header(skb);
  736. receive_aggr_bat_packet(ethhdr,
  737. skb->data,
  738. skb_headlen(skb),
  739. hard_iface);
  740. kfree_skb(skb);
  741. return NET_RX_SUCCESS;
  742. }
  743. static int recv_my_icmp_packet(struct bat_priv *bat_priv,
  744. struct sk_buff *skb, size_t icmp_len)
  745. {
  746. struct hard_iface *primary_if = NULL;
  747. struct orig_node *orig_node = NULL;
  748. struct neigh_node *router = NULL;
  749. struct icmp_packet_rr *icmp_packet;
  750. int ret = NET_RX_DROP;
  751. icmp_packet = (struct icmp_packet_rr *)skb->data;
  752. /* add data to device queue */
  753. if (icmp_packet->msg_type != ECHO_REQUEST) {
  754. bat_socket_receive_packet(icmp_packet, icmp_len);
  755. goto out;
  756. }
  757. primary_if = primary_if_get_selected(bat_priv);
  758. if (!primary_if)
  759. goto out;
  760. /* answer echo request (ping) */
  761. /* get routing information */
  762. orig_node = orig_hash_find(bat_priv, icmp_packet->orig);
  763. if (!orig_node)
  764. goto out;
  765. router = orig_node_get_router(orig_node);
  766. if (!router)
  767. goto out;
  768. /* create a copy of the skb, if needed, to modify it. */
  769. if (skb_cow(skb, sizeof(struct ethhdr)) < 0)
  770. goto out;
  771. icmp_packet = (struct icmp_packet_rr *)skb->data;
  772. memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN);
  773. memcpy(icmp_packet->orig, primary_if->net_dev->dev_addr, ETH_ALEN);
  774. icmp_packet->msg_type = ECHO_REPLY;
  775. icmp_packet->ttl = TTL;
  776. send_skb_packet(skb, router->if_incoming, router->addr);
  777. ret = NET_RX_SUCCESS;
  778. out:
  779. if (primary_if)
  780. hardif_free_ref(primary_if);
  781. if (router)
  782. neigh_node_free_ref(router);
  783. if (orig_node)
  784. orig_node_free_ref(orig_node);
  785. return ret;
  786. }
  787. static int recv_icmp_ttl_exceeded(struct bat_priv *bat_priv,
  788. struct sk_buff *skb)
  789. {
  790. struct hard_iface *primary_if = NULL;
  791. struct orig_node *orig_node = NULL;
  792. struct neigh_node *router = NULL;
  793. struct icmp_packet *icmp_packet;
  794. int ret = NET_RX_DROP;
  795. icmp_packet = (struct icmp_packet *)skb->data;
  796. /* send TTL exceeded if packet is an echo request (traceroute) */
  797. if (icmp_packet->msg_type != ECHO_REQUEST) {
  798. pr_debug("Warning - can't forward icmp packet from %pM to "
  799. "%pM: ttl exceeded\n", icmp_packet->orig,
  800. icmp_packet->dst);
  801. goto out;
  802. }
  803. primary_if = primary_if_get_selected(bat_priv);
  804. if (!primary_if)
  805. goto out;
  806. /* get routing information */
  807. orig_node = orig_hash_find(bat_priv, icmp_packet->orig);
  808. if (!orig_node)
  809. goto out;
  810. router = orig_node_get_router(orig_node);
  811. if (!router)
  812. goto out;
  813. /* create a copy of the skb, if needed, to modify it. */
  814. if (skb_cow(skb, sizeof(struct ethhdr)) < 0)
  815. goto out;
  816. icmp_packet = (struct icmp_packet *)skb->data;
  817. memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN);
  818. memcpy(icmp_packet->orig, primary_if->net_dev->dev_addr, ETH_ALEN);
  819. icmp_packet->msg_type = TTL_EXCEEDED;
  820. icmp_packet->ttl = TTL;
  821. send_skb_packet(skb, router->if_incoming, router->addr);
  822. ret = NET_RX_SUCCESS;
  823. out:
  824. if (primary_if)
  825. hardif_free_ref(primary_if);
  826. if (router)
  827. neigh_node_free_ref(router);
  828. if (orig_node)
  829. orig_node_free_ref(orig_node);
  830. return ret;
  831. }
  832. int recv_icmp_packet(struct sk_buff *skb, struct hard_iface *recv_if)
  833. {
  834. struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
  835. struct icmp_packet_rr *icmp_packet;
  836. struct ethhdr *ethhdr;
  837. struct orig_node *orig_node = NULL;
  838. struct neigh_node *router = NULL;
  839. int hdr_size = sizeof(struct icmp_packet);
  840. int ret = NET_RX_DROP;
  841. /**
  842. * we truncate all incoming icmp packets if they don't match our size
  843. */
  844. if (skb->len >= sizeof(struct icmp_packet_rr))
  845. hdr_size = sizeof(struct icmp_packet_rr);
  846. /* drop packet if it has not necessary minimum size */
  847. if (unlikely(!pskb_may_pull(skb, hdr_size)))
  848. goto out;
  849. ethhdr = (struct ethhdr *)skb_mac_header(skb);
  850. /* packet with unicast indication but broadcast recipient */
  851. if (is_broadcast_ether_addr(ethhdr->h_dest))
  852. goto out;
  853. /* packet with broadcast sender address */
  854. if (is_broadcast_ether_addr(ethhdr->h_source))
  855. goto out;
  856. /* not for me */
  857. if (!is_my_mac(ethhdr->h_dest))
  858. goto out;
  859. icmp_packet = (struct icmp_packet_rr *)skb->data;
  860. /* add record route information if not full */
  861. if ((hdr_size == sizeof(struct icmp_packet_rr)) &&
  862. (icmp_packet->rr_cur < BAT_RR_LEN)) {
  863. memcpy(&(icmp_packet->rr[icmp_packet->rr_cur]),
  864. ethhdr->h_dest, ETH_ALEN);
  865. icmp_packet->rr_cur++;
  866. }
  867. /* packet for me */
  868. if (is_my_mac(icmp_packet->dst))
  869. return recv_my_icmp_packet(bat_priv, skb, hdr_size);
  870. /* TTL exceeded */
  871. if (icmp_packet->ttl < 2)
  872. return recv_icmp_ttl_exceeded(bat_priv, skb);
  873. /* get routing information */
  874. orig_node = orig_hash_find(bat_priv, icmp_packet->dst);
  875. if (!orig_node)
  876. goto out;
  877. router = orig_node_get_router(orig_node);
  878. if (!router)
  879. goto out;
  880. /* create a copy of the skb, if needed, to modify it. */
  881. if (skb_cow(skb, sizeof(struct ethhdr)) < 0)
  882. goto out;
  883. icmp_packet = (struct icmp_packet_rr *)skb->data;
  884. /* decrement ttl */
  885. icmp_packet->ttl--;
  886. /* route it */
  887. send_skb_packet(skb, router->if_incoming, router->addr);
  888. ret = NET_RX_SUCCESS;
  889. out:
  890. if (router)
  891. neigh_node_free_ref(router);
  892. if (orig_node)
  893. orig_node_free_ref(orig_node);
  894. return ret;
  895. }
  896. /* In the bonding case, send the packets in a round
  897. * robin fashion over the remaining interfaces.
  898. *
  899. * This method rotates the bonding list and increases the
  900. * returned router's refcount. */
  901. static struct neigh_node *find_bond_router(struct orig_node *primary_orig,
  902. const struct hard_iface *recv_if)
  903. {
  904. struct neigh_node *tmp_neigh_node;
  905. struct neigh_node *router = NULL, *first_candidate = NULL;
  906. rcu_read_lock();
  907. list_for_each_entry_rcu(tmp_neigh_node, &primary_orig->bond_list,
  908. bonding_list) {
  909. if (!first_candidate)
  910. first_candidate = tmp_neigh_node;
  911. /* recv_if == NULL on the first node. */
  912. if (tmp_neigh_node->if_incoming == recv_if)
  913. continue;
  914. if (!atomic_inc_not_zero(&tmp_neigh_node->refcount))
  915. continue;
  916. router = tmp_neigh_node;
  917. break;
  918. }
  919. /* use the first candidate if nothing was found. */
  920. if (!router && first_candidate &&
  921. atomic_inc_not_zero(&first_candidate->refcount))
  922. router = first_candidate;
  923. if (!router)
  924. goto out;
  925. /* selected should point to the next element
  926. * after the current router */
  927. spin_lock_bh(&primary_orig->neigh_list_lock);
  928. /* this is a list_move(), which unfortunately
  929. * does not exist as rcu version */
  930. list_del_rcu(&primary_orig->bond_list);
  931. list_add_rcu(&primary_orig->bond_list,
  932. &router->bonding_list);
  933. spin_unlock_bh(&primary_orig->neigh_list_lock);
  934. out:
  935. rcu_read_unlock();
  936. return router;
  937. }
  938. /* Interface Alternating: Use the best of the
  939. * remaining candidates which are not using
  940. * this interface.
  941. *
  942. * Increases the returned router's refcount */
  943. static struct neigh_node *find_ifalter_router(struct orig_node *primary_orig,
  944. const struct hard_iface *recv_if)
  945. {
  946. struct neigh_node *tmp_neigh_node;
  947. struct neigh_node *router = NULL, *first_candidate = NULL;
  948. rcu_read_lock();
  949. list_for_each_entry_rcu(tmp_neigh_node, &primary_orig->bond_list,
  950. bonding_list) {
  951. if (!first_candidate)
  952. first_candidate = tmp_neigh_node;
  953. /* recv_if == NULL on the first node. */
  954. if (tmp_neigh_node->if_incoming == recv_if)
  955. continue;
  956. if (!atomic_inc_not_zero(&tmp_neigh_node->refcount))
  957. continue;
  958. /* if we don't have a router yet
  959. * or this one is better, choose it. */
  960. if ((!router) ||
  961. (tmp_neigh_node->tq_avg > router->tq_avg)) {
  962. /* decrement refcount of
  963. * previously selected router */
  964. if (router)
  965. neigh_node_free_ref(router);
  966. router = tmp_neigh_node;
  967. atomic_inc_not_zero(&router->refcount);
  968. }
  969. neigh_node_free_ref(tmp_neigh_node);
  970. }
  971. /* use the first candidate if nothing was found. */
  972. if (!router && first_candidate &&
  973. atomic_inc_not_zero(&first_candidate->refcount))
  974. router = first_candidate;
  975. rcu_read_unlock();
  976. return router;
  977. }
  978. int recv_tt_query(struct sk_buff *skb, struct hard_iface *recv_if)
  979. {
  980. struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
  981. struct tt_query_packet *tt_query;
  982. struct ethhdr *ethhdr;
  983. /* drop packet if it has not necessary minimum size */
  984. if (unlikely(!pskb_may_pull(skb, sizeof(struct tt_query_packet))))
  985. goto out;
  986. /* I could need to modify it */
  987. if (skb_cow(skb, sizeof(struct tt_query_packet)) < 0)
  988. goto out;
  989. ethhdr = (struct ethhdr *)skb_mac_header(skb);
  990. /* packet with unicast indication but broadcast recipient */
  991. if (is_broadcast_ether_addr(ethhdr->h_dest))
  992. goto out;
  993. /* packet with broadcast sender address */
  994. if (is_broadcast_ether_addr(ethhdr->h_source))
  995. goto out;
  996. tt_query = (struct tt_query_packet *)skb->data;
  997. tt_query->tt_data = ntohs(tt_query->tt_data);
  998. switch (tt_query->flags & TT_QUERY_TYPE_MASK) {
  999. case TT_REQUEST:
  1000. /* If we cannot provide an answer the tt_request is
  1001. * forwarded */
  1002. if (!send_tt_response(bat_priv, tt_query)) {
  1003. bat_dbg(DBG_TT, bat_priv,
  1004. "Routing TT_REQUEST to %pM [%c]\n",
  1005. tt_query->dst,
  1006. (tt_query->flags & TT_FULL_TABLE ? 'F' : '.'));
  1007. tt_query->tt_data = htons(tt_query->tt_data);
  1008. return route_unicast_packet(skb, recv_if);
  1009. }
  1010. break;
  1011. case TT_RESPONSE:
  1012. /* packet needs to be linearised to access the TT changes */
  1013. if (skb_linearize(skb) < 0)
  1014. goto out;
  1015. if (is_my_mac(tt_query->dst))
  1016. handle_tt_response(bat_priv, tt_query);
  1017. else {
  1018. bat_dbg(DBG_TT, bat_priv,
  1019. "Routing TT_RESPONSE to %pM [%c]\n",
  1020. tt_query->dst,
  1021. (tt_query->flags & TT_FULL_TABLE ? 'F' : '.'));
  1022. tt_query->tt_data = htons(tt_query->tt_data);
  1023. return route_unicast_packet(skb, recv_if);
  1024. }
  1025. break;
  1026. }
  1027. out:
  1028. /* returning NET_RX_DROP will make the caller function kfree the skb */
  1029. return NET_RX_DROP;
  1030. }
  1031. int recv_roam_adv(struct sk_buff *skb, struct hard_iface *recv_if)
  1032. {
  1033. struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
  1034. struct roam_adv_packet *roam_adv_packet;
  1035. struct orig_node *orig_node;
  1036. struct ethhdr *ethhdr;
  1037. /* drop packet if it has not necessary minimum size */
  1038. if (unlikely(!pskb_may_pull(skb, sizeof(struct roam_adv_packet))))
  1039. goto out;
  1040. ethhdr = (struct ethhdr *)skb_mac_header(skb);
  1041. /* packet with unicast indication but broadcast recipient */
  1042. if (is_broadcast_ether_addr(ethhdr->h_dest))
  1043. goto out;
  1044. /* packet with broadcast sender address */
  1045. if (is_broadcast_ether_addr(ethhdr->h_source))
  1046. goto out;
  1047. roam_adv_packet = (struct roam_adv_packet *)skb->data;
  1048. if (!is_my_mac(roam_adv_packet->dst))
  1049. return route_unicast_packet(skb, recv_if);
  1050. orig_node = orig_hash_find(bat_priv, roam_adv_packet->src);
  1051. if (!orig_node)
  1052. goto out;
  1053. bat_dbg(DBG_TT, bat_priv, "Received ROAMING_ADV from %pM "
  1054. "(client %pM)\n", roam_adv_packet->src,
  1055. roam_adv_packet->client);
  1056. tt_global_add(bat_priv, orig_node, roam_adv_packet->client,
  1057. atomic_read(&orig_node->last_ttvn) + 1, true);
  1058. /* Roaming phase starts: I have new information but the ttvn has not
  1059. * been incremented yet. This flag will make me check all the incoming
  1060. * packets for the correct destination. */
  1061. bat_priv->tt_poss_change = true;
  1062. orig_node_free_ref(orig_node);
  1063. out:
  1064. /* returning NET_RX_DROP will make the caller function kfree the skb */
  1065. return NET_RX_DROP;
  1066. }
  1067. /* find a suitable router for this originator, and use
  1068. * bonding if possible. increases the found neighbors
  1069. * refcount.*/
  1070. struct neigh_node *find_router(struct bat_priv *bat_priv,
  1071. struct orig_node *orig_node,
  1072. const struct hard_iface *recv_if)
  1073. {
  1074. struct orig_node *primary_orig_node;
  1075. struct orig_node *router_orig;
  1076. struct neigh_node *router;
  1077. static uint8_t zero_mac[ETH_ALEN] = {0, 0, 0, 0, 0, 0};
  1078. int bonding_enabled;
  1079. if (!orig_node)
  1080. return NULL;
  1081. router = orig_node_get_router(orig_node);
  1082. if (!router)
  1083. goto err;
  1084. /* without bonding, the first node should
  1085. * always choose the default router. */
  1086. bonding_enabled = atomic_read(&bat_priv->bonding);
  1087. rcu_read_lock();
  1088. /* select default router to output */
  1089. router_orig = router->orig_node;
  1090. if (!router_orig)
  1091. goto err_unlock;
  1092. if ((!recv_if) && (!bonding_enabled))
  1093. goto return_router;
  1094. /* if we have something in the primary_addr, we can search
  1095. * for a potential bonding candidate. */
  1096. if (compare_eth(router_orig->primary_addr, zero_mac))
  1097. goto return_router;
  1098. /* find the orig_node which has the primary interface. might
  1099. * even be the same as our router_orig in many cases */
  1100. if (compare_eth(router_orig->primary_addr, router_orig->orig)) {
  1101. primary_orig_node = router_orig;
  1102. } else {
  1103. primary_orig_node = orig_hash_find(bat_priv,
  1104. router_orig->primary_addr);
  1105. if (!primary_orig_node)
  1106. goto return_router;
  1107. orig_node_free_ref(primary_orig_node);
  1108. }
  1109. /* with less than 2 candidates, we can't do any
  1110. * bonding and prefer the original router. */
  1111. if (atomic_read(&primary_orig_node->bond_candidates) < 2)
  1112. goto return_router;
  1113. /* all nodes between should choose a candidate which
  1114. * is is not on the interface where the packet came
  1115. * in. */
  1116. neigh_node_free_ref(router);
  1117. if (bonding_enabled)
  1118. router = find_bond_router(primary_orig_node, recv_if);
  1119. else
  1120. router = find_ifalter_router(primary_orig_node, recv_if);
  1121. return_router:
  1122. if (router && router->if_incoming->if_status != IF_ACTIVE)
  1123. goto err_unlock;
  1124. rcu_read_unlock();
  1125. return router;
  1126. err_unlock:
  1127. rcu_read_unlock();
  1128. err:
  1129. if (router)
  1130. neigh_node_free_ref(router);
  1131. return NULL;
  1132. }
  1133. static int check_unicast_packet(struct sk_buff *skb, int hdr_size)
  1134. {
  1135. struct ethhdr *ethhdr;
  1136. /* drop packet if it has not necessary minimum size */
  1137. if (unlikely(!pskb_may_pull(skb, hdr_size)))
  1138. return -1;
  1139. ethhdr = (struct ethhdr *)skb_mac_header(skb);
  1140. /* packet with unicast indication but broadcast recipient */
  1141. if (is_broadcast_ether_addr(ethhdr->h_dest))
  1142. return -1;
  1143. /* packet with broadcast sender address */
  1144. if (is_broadcast_ether_addr(ethhdr->h_source))
  1145. return -1;
  1146. /* not for me */
  1147. if (!is_my_mac(ethhdr->h_dest))
  1148. return -1;
  1149. return 0;
  1150. }
  1151. int route_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if)
  1152. {
  1153. struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
  1154. struct orig_node *orig_node = NULL;
  1155. struct neigh_node *neigh_node = NULL;
  1156. struct unicast_packet *unicast_packet;
  1157. struct ethhdr *ethhdr = (struct ethhdr *)skb_mac_header(skb);
  1158. int ret = NET_RX_DROP;
  1159. struct sk_buff *new_skb;
  1160. unicast_packet = (struct unicast_packet *)skb->data;
  1161. /* TTL exceeded */
  1162. if (unicast_packet->ttl < 2) {
  1163. pr_debug("Warning - can't forward unicast packet from %pM to "
  1164. "%pM: ttl exceeded\n", ethhdr->h_source,
  1165. unicast_packet->dest);
  1166. goto out;
  1167. }
  1168. /* get routing information */
  1169. orig_node = orig_hash_find(bat_priv, unicast_packet->dest);
  1170. if (!orig_node)
  1171. goto out;
  1172. /* find_router() increases neigh_nodes refcount if found. */
  1173. neigh_node = find_router(bat_priv, orig_node, recv_if);
  1174. if (!neigh_node)
  1175. goto out;
  1176. /* create a copy of the skb, if needed, to modify it. */
  1177. if (skb_cow(skb, sizeof(struct ethhdr)) < 0)
  1178. goto out;
  1179. unicast_packet = (struct unicast_packet *)skb->data;
  1180. if (unicast_packet->packet_type == BAT_UNICAST &&
  1181. atomic_read(&bat_priv->fragmentation) &&
  1182. skb->len > neigh_node->if_incoming->net_dev->mtu) {
  1183. ret = frag_send_skb(skb, bat_priv,
  1184. neigh_node->if_incoming, neigh_node->addr);
  1185. goto out;
  1186. }
  1187. if (unicast_packet->packet_type == BAT_UNICAST_FRAG &&
  1188. frag_can_reassemble(skb, neigh_node->if_incoming->net_dev->mtu)) {
  1189. ret = frag_reassemble_skb(skb, bat_priv, &new_skb);
  1190. if (ret == NET_RX_DROP)
  1191. goto out;
  1192. /* packet was buffered for late merge */
  1193. if (!new_skb) {
  1194. ret = NET_RX_SUCCESS;
  1195. goto out;
  1196. }
  1197. skb = new_skb;
  1198. unicast_packet = (struct unicast_packet *)skb->data;
  1199. }
  1200. /* decrement ttl */
  1201. unicast_packet->ttl--;
  1202. /* route it */
  1203. send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
  1204. ret = NET_RX_SUCCESS;
  1205. out:
  1206. if (neigh_node)
  1207. neigh_node_free_ref(neigh_node);
  1208. if (orig_node)
  1209. orig_node_free_ref(orig_node);
  1210. return ret;
  1211. }
  1212. static int check_unicast_ttvn(struct bat_priv *bat_priv,
  1213. struct sk_buff *skb) {
  1214. uint8_t curr_ttvn;
  1215. struct orig_node *orig_node;
  1216. struct ethhdr *ethhdr;
  1217. struct hard_iface *primary_if;
  1218. struct unicast_packet *unicast_packet;
  1219. bool tt_poss_change;
  1220. /* I could need to modify it */
  1221. if (skb_cow(skb, sizeof(struct unicast_packet)) < 0)
  1222. return 0;
  1223. unicast_packet = (struct unicast_packet *)skb->data;
  1224. if (is_my_mac(unicast_packet->dest)) {
  1225. tt_poss_change = bat_priv->tt_poss_change;
  1226. curr_ttvn = (uint8_t)atomic_read(&bat_priv->ttvn);
  1227. } else {
  1228. orig_node = orig_hash_find(bat_priv, unicast_packet->dest);
  1229. if (!orig_node)
  1230. return 0;
  1231. curr_ttvn = (uint8_t)atomic_read(&orig_node->last_ttvn);
  1232. tt_poss_change = orig_node->tt_poss_change;
  1233. orig_node_free_ref(orig_node);
  1234. }
  1235. /* Check whether I have to reroute the packet */
  1236. if (seq_before(unicast_packet->ttvn, curr_ttvn) || tt_poss_change) {
  1237. /* Linearize the skb before accessing it */
  1238. if (skb_linearize(skb) < 0)
  1239. return 0;
  1240. ethhdr = (struct ethhdr *)(skb->data +
  1241. sizeof(struct unicast_packet));
  1242. orig_node = transtable_search(bat_priv, ethhdr->h_dest);
  1243. if (!orig_node) {
  1244. if (!is_my_client(bat_priv, ethhdr->h_dest))
  1245. return 0;
  1246. primary_if = primary_if_get_selected(bat_priv);
  1247. if (!primary_if)
  1248. return 0;
  1249. memcpy(unicast_packet->dest,
  1250. primary_if->net_dev->dev_addr, ETH_ALEN);
  1251. hardif_free_ref(primary_if);
  1252. } else {
  1253. memcpy(unicast_packet->dest, orig_node->orig,
  1254. ETH_ALEN);
  1255. curr_ttvn = (uint8_t)
  1256. atomic_read(&orig_node->last_ttvn);
  1257. orig_node_free_ref(orig_node);
  1258. }
  1259. bat_dbg(DBG_ROUTES, bat_priv, "TTVN mismatch (old_ttvn %u "
  1260. "new_ttvn %u)! Rerouting unicast packet (for %pM) to "
  1261. "%pM\n", unicast_packet->ttvn, curr_ttvn,
  1262. ethhdr->h_dest, unicast_packet->dest);
  1263. unicast_packet->ttvn = curr_ttvn;
  1264. }
  1265. return 1;
  1266. }
  1267. int recv_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if)
  1268. {
  1269. struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
  1270. struct unicast_packet *unicast_packet;
  1271. int hdr_size = sizeof(*unicast_packet);
  1272. if (check_unicast_packet(skb, hdr_size) < 0)
  1273. return NET_RX_DROP;
  1274. if (!check_unicast_ttvn(bat_priv, skb))
  1275. return NET_RX_DROP;
  1276. unicast_packet = (struct unicast_packet *)skb->data;
  1277. /* packet for me */
  1278. if (is_my_mac(unicast_packet->dest)) {
  1279. interface_rx(recv_if->soft_iface, skb, recv_if, hdr_size);
  1280. return NET_RX_SUCCESS;
  1281. }
  1282. return route_unicast_packet(skb, recv_if);
  1283. }
  1284. int recv_ucast_frag_packet(struct sk_buff *skb, struct hard_iface *recv_if)
  1285. {
  1286. struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
  1287. struct unicast_frag_packet *unicast_packet;
  1288. int hdr_size = sizeof(*unicast_packet);
  1289. struct sk_buff *new_skb = NULL;
  1290. int ret;
  1291. if (check_unicast_packet(skb, hdr_size) < 0)
  1292. return NET_RX_DROP;
  1293. if (!check_unicast_ttvn(bat_priv, skb))
  1294. return NET_RX_DROP;
  1295. unicast_packet = (struct unicast_frag_packet *)skb->data;
  1296. /* packet for me */
  1297. if (is_my_mac(unicast_packet->dest)) {
  1298. ret = frag_reassemble_skb(skb, bat_priv, &new_skb);
  1299. if (ret == NET_RX_DROP)
  1300. return NET_RX_DROP;
  1301. /* packet was buffered for late merge */
  1302. if (!new_skb)
  1303. return NET_RX_SUCCESS;
  1304. interface_rx(recv_if->soft_iface, new_skb, recv_if,
  1305. sizeof(struct unicast_packet));
  1306. return NET_RX_SUCCESS;
  1307. }
  1308. return route_unicast_packet(skb, recv_if);
  1309. }
  1310. int recv_bcast_packet(struct sk_buff *skb, struct hard_iface *recv_if)
  1311. {
  1312. struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
  1313. struct orig_node *orig_node = NULL;
  1314. struct bcast_packet *bcast_packet;
  1315. struct ethhdr *ethhdr;
  1316. int hdr_size = sizeof(*bcast_packet);
  1317. int ret = NET_RX_DROP;
  1318. int32_t seq_diff;
  1319. /* drop packet if it has not necessary minimum size */
  1320. if (unlikely(!pskb_may_pull(skb, hdr_size)))
  1321. goto out;
  1322. ethhdr = (struct ethhdr *)skb_mac_header(skb);
  1323. /* packet with broadcast indication but unicast recipient */
  1324. if (!is_broadcast_ether_addr(ethhdr->h_dest))
  1325. goto out;
  1326. /* packet with broadcast sender address */
  1327. if (is_broadcast_ether_addr(ethhdr->h_source))
  1328. goto out;
  1329. /* ignore broadcasts sent by myself */
  1330. if (is_my_mac(ethhdr->h_source))
  1331. goto out;
  1332. bcast_packet = (struct bcast_packet *)skb->data;
  1333. /* ignore broadcasts originated by myself */
  1334. if (is_my_mac(bcast_packet->orig))
  1335. goto out;
  1336. if (bcast_packet->ttl < 2)
  1337. goto out;
  1338. orig_node = orig_hash_find(bat_priv, bcast_packet->orig);
  1339. if (!orig_node)
  1340. goto out;
  1341. spin_lock_bh(&orig_node->bcast_seqno_lock);
  1342. /* check whether the packet is a duplicate */
  1343. if (get_bit_status(orig_node->bcast_bits, orig_node->last_bcast_seqno,
  1344. ntohl(bcast_packet->seqno)))
  1345. goto spin_unlock;
  1346. seq_diff = ntohl(bcast_packet->seqno) - orig_node->last_bcast_seqno;
  1347. /* check whether the packet is old and the host just restarted. */
  1348. if (window_protected(bat_priv, seq_diff,
  1349. &orig_node->bcast_seqno_reset))
  1350. goto spin_unlock;
  1351. /* mark broadcast in flood history, update window position
  1352. * if required. */
  1353. if (bit_get_packet(bat_priv, orig_node->bcast_bits, seq_diff, 1))
  1354. orig_node->last_bcast_seqno = ntohl(bcast_packet->seqno);
  1355. spin_unlock_bh(&orig_node->bcast_seqno_lock);
  1356. /* rebroadcast packet */
  1357. add_bcast_packet_to_list(bat_priv, skb, 1);
  1358. /* broadcast for me */
  1359. interface_rx(recv_if->soft_iface, skb, recv_if, hdr_size);
  1360. ret = NET_RX_SUCCESS;
  1361. goto out;
  1362. spin_unlock:
  1363. spin_unlock_bh(&orig_node->bcast_seqno_lock);
  1364. out:
  1365. if (orig_node)
  1366. orig_node_free_ref(orig_node);
  1367. return ret;
  1368. }
  1369. int recv_vis_packet(struct sk_buff *skb, struct hard_iface *recv_if)
  1370. {
  1371. struct vis_packet *vis_packet;
  1372. struct ethhdr *ethhdr;
  1373. struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
  1374. int hdr_size = sizeof(*vis_packet);
  1375. /* keep skb linear */
  1376. if (skb_linearize(skb) < 0)
  1377. return NET_RX_DROP;
  1378. if (unlikely(!pskb_may_pull(skb, hdr_size)))
  1379. return NET_RX_DROP;
  1380. vis_packet = (struct vis_packet *)skb->data;
  1381. ethhdr = (struct ethhdr *)skb_mac_header(skb);
  1382. /* not for me */
  1383. if (!is_my_mac(ethhdr->h_dest))
  1384. return NET_RX_DROP;
  1385. /* ignore own packets */
  1386. if (is_my_mac(vis_packet->vis_orig))
  1387. return NET_RX_DROP;
  1388. if (is_my_mac(vis_packet->sender_orig))
  1389. return NET_RX_DROP;
  1390. switch (vis_packet->vis_type) {
  1391. case VIS_TYPE_SERVER_SYNC:
  1392. receive_server_sync_packet(bat_priv, vis_packet,
  1393. skb_headlen(skb));
  1394. break;
  1395. case VIS_TYPE_CLIENT_UPDATE:
  1396. receive_client_update_packet(bat_priv, vis_packet,
  1397. skb_headlen(skb));
  1398. break;
  1399. default: /* ignore unknown packet */
  1400. break;
  1401. }
  1402. /* We take a copy of the data in the packet, so we should
  1403. always free the skbuf. */
  1404. return NET_RX_DROP;
  1405. }