routing.c 41 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535
  1. /*
  2. * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
  3. *
  4. * Marek Lindner, Simon Wunderlich
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of version 2 of the GNU General Public
  8. * License as published by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope that it will be useful, but
  11. * WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  13. * General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program; if not, write to the Free Software
  17. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
  18. * 02110-1301, USA
  19. *
  20. */
  21. #include "main.h"
  22. #include "routing.h"
  23. #include "send.h"
  24. #include "hash.h"
  25. #include "soft-interface.h"
  26. #include "hard-interface.h"
  27. #include "icmp_socket.h"
  28. #include "translation-table.h"
  29. #include "originator.h"
  30. #include "ring_buffer.h"
  31. #include "vis.h"
  32. #include "aggregation.h"
  33. #include "gateway_common.h"
  34. #include "gateway_client.h"
  35. #include "unicast.h"
  36. void slide_own_bcast_window(struct hard_iface *hard_iface)
  37. {
  38. struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
  39. struct hashtable_t *hash = bat_priv->orig_hash;
  40. struct hlist_node *node;
  41. struct hlist_head *head;
  42. struct orig_node *orig_node;
  43. unsigned long *word;
  44. int i;
  45. size_t word_index;
  46. for (i = 0; i < hash->size; i++) {
  47. head = &hash->table[i];
  48. rcu_read_lock();
  49. hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) {
  50. spin_lock_bh(&orig_node->ogm_cnt_lock);
  51. word_index = hard_iface->if_num * NUM_WORDS;
  52. word = &(orig_node->bcast_own[word_index]);
  53. bit_get_packet(bat_priv, word, 1, 0);
  54. orig_node->bcast_own_sum[hard_iface->if_num] =
  55. bit_packet_count(word);
  56. spin_unlock_bh(&orig_node->ogm_cnt_lock);
  57. }
  58. rcu_read_unlock();
  59. }
  60. }
  61. static void update_TT(struct bat_priv *bat_priv, struct orig_node *orig_node,
  62. const unsigned char *tt_buff, int tt_buff_len)
  63. {
  64. if ((tt_buff_len != orig_node->tt_buff_len) ||
  65. ((tt_buff_len > 0) &&
  66. (orig_node->tt_buff_len > 0) &&
  67. (memcmp(orig_node->tt_buff, tt_buff, tt_buff_len) != 0))) {
  68. if (orig_node->tt_buff_len > 0)
  69. tt_global_del_orig(bat_priv, orig_node,
  70. "originator changed tt");
  71. if ((tt_buff_len > 0) && (tt_buff))
  72. tt_global_add_orig(bat_priv, orig_node,
  73. tt_buff, tt_buff_len);
  74. }
  75. }
  76. static void update_route(struct bat_priv *bat_priv, struct orig_node *orig_node,
  77. struct neigh_node *neigh_node,
  78. const unsigned char *tt_buff, int tt_buff_len)
  79. {
  80. struct neigh_node *curr_router;
  81. curr_router = orig_node_get_router(orig_node);
  82. /* route deleted */
  83. if ((curr_router) && (!neigh_node)) {
  84. bat_dbg(DBG_ROUTES, bat_priv, "Deleting route towards: %pM\n",
  85. orig_node->orig);
  86. tt_global_del_orig(bat_priv, orig_node,
  87. "originator timed out");
  88. /* route added */
  89. } else if ((!curr_router) && (neigh_node)) {
  90. bat_dbg(DBG_ROUTES, bat_priv,
  91. "Adding route towards: %pM (via %pM)\n",
  92. orig_node->orig, neigh_node->addr);
  93. tt_global_add_orig(bat_priv, orig_node,
  94. tt_buff, tt_buff_len);
  95. /* route changed */
  96. } else if (neigh_node && curr_router) {
  97. bat_dbg(DBG_ROUTES, bat_priv,
  98. "Changing route towards: %pM "
  99. "(now via %pM - was via %pM)\n",
  100. orig_node->orig, neigh_node->addr,
  101. curr_router->addr);
  102. }
  103. if (curr_router)
  104. neigh_node_free_ref(curr_router);
  105. /* increase refcount of new best neighbor */
  106. if (neigh_node && !atomic_inc_not_zero(&neigh_node->refcount))
  107. neigh_node = NULL;
  108. spin_lock_bh(&orig_node->neigh_list_lock);
  109. rcu_assign_pointer(orig_node->router, neigh_node);
  110. spin_unlock_bh(&orig_node->neigh_list_lock);
  111. /* decrease refcount of previous best neighbor */
  112. if (curr_router)
  113. neigh_node_free_ref(curr_router);
  114. }
  115. void update_routes(struct bat_priv *bat_priv, struct orig_node *orig_node,
  116. struct neigh_node *neigh_node, const unsigned char *tt_buff,
  117. int tt_buff_len)
  118. {
  119. struct neigh_node *router = NULL;
  120. if (!orig_node)
  121. goto out;
  122. router = orig_node_get_router(orig_node);
  123. if (router != neigh_node)
  124. update_route(bat_priv, orig_node, neigh_node,
  125. tt_buff, tt_buff_len);
  126. /* may be just TT changed */
  127. else
  128. update_TT(bat_priv, orig_node, tt_buff, tt_buff_len);
  129. out:
  130. if (router)
  131. neigh_node_free_ref(router);
  132. }
  133. static int is_bidirectional_neigh(struct orig_node *orig_node,
  134. struct orig_node *orig_neigh_node,
  135. struct batman_packet *batman_packet,
  136. struct hard_iface *if_incoming)
  137. {
  138. struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
  139. struct neigh_node *neigh_node = NULL, *tmp_neigh_node;
  140. struct hlist_node *node;
  141. unsigned char total_count;
  142. uint8_t orig_eq_count, neigh_rq_count, tq_own;
  143. int tq_asym_penalty, ret = 0;
  144. /* find corresponding one hop neighbor */
  145. rcu_read_lock();
  146. hlist_for_each_entry_rcu(tmp_neigh_node, node,
  147. &orig_neigh_node->neigh_list, list) {
  148. if (!compare_eth(tmp_neigh_node->addr, orig_neigh_node->orig))
  149. continue;
  150. if (tmp_neigh_node->if_incoming != if_incoming)
  151. continue;
  152. if (!atomic_inc_not_zero(&tmp_neigh_node->refcount))
  153. continue;
  154. neigh_node = tmp_neigh_node;
  155. break;
  156. }
  157. rcu_read_unlock();
  158. if (!neigh_node)
  159. neigh_node = create_neighbor(orig_neigh_node,
  160. orig_neigh_node,
  161. orig_neigh_node->orig,
  162. if_incoming);
  163. if (!neigh_node)
  164. goto out;
  165. /* if orig_node is direct neighbour update neigh_node last_valid */
  166. if (orig_node == orig_neigh_node)
  167. neigh_node->last_valid = jiffies;
  168. orig_node->last_valid = jiffies;
  169. /* find packet count of corresponding one hop neighbor */
  170. spin_lock_bh(&orig_node->ogm_cnt_lock);
  171. orig_eq_count = orig_neigh_node->bcast_own_sum[if_incoming->if_num];
  172. neigh_rq_count = neigh_node->real_packet_count;
  173. spin_unlock_bh(&orig_node->ogm_cnt_lock);
  174. /* pay attention to not get a value bigger than 100 % */
  175. total_count = (orig_eq_count > neigh_rq_count ?
  176. neigh_rq_count : orig_eq_count);
  177. /* if we have too few packets (too less data) we set tq_own to zero */
  178. /* if we receive too few packets it is not considered bidirectional */
  179. if ((total_count < TQ_LOCAL_BIDRECT_SEND_MINIMUM) ||
  180. (neigh_rq_count < TQ_LOCAL_BIDRECT_RECV_MINIMUM))
  181. tq_own = 0;
  182. else
  183. /* neigh_node->real_packet_count is never zero as we
  184. * only purge old information when getting new
  185. * information */
  186. tq_own = (TQ_MAX_VALUE * total_count) / neigh_rq_count;
  187. /*
  188. * 1 - ((1-x) ** 3), normalized to TQ_MAX_VALUE this does
  189. * affect the nearly-symmetric links only a little, but
  190. * punishes asymmetric links more. This will give a value
  191. * between 0 and TQ_MAX_VALUE
  192. */
  193. tq_asym_penalty = TQ_MAX_VALUE - (TQ_MAX_VALUE *
  194. (TQ_LOCAL_WINDOW_SIZE - neigh_rq_count) *
  195. (TQ_LOCAL_WINDOW_SIZE - neigh_rq_count) *
  196. (TQ_LOCAL_WINDOW_SIZE - neigh_rq_count)) /
  197. (TQ_LOCAL_WINDOW_SIZE *
  198. TQ_LOCAL_WINDOW_SIZE *
  199. TQ_LOCAL_WINDOW_SIZE);
  200. batman_packet->tq = ((batman_packet->tq * tq_own * tq_asym_penalty) /
  201. (TQ_MAX_VALUE * TQ_MAX_VALUE));
  202. bat_dbg(DBG_BATMAN, bat_priv,
  203. "bidirectional: "
  204. "orig = %-15pM neigh = %-15pM => own_bcast = %2i, "
  205. "real recv = %2i, local tq: %3i, asym_penalty: %3i, "
  206. "total tq: %3i\n",
  207. orig_node->orig, orig_neigh_node->orig, total_count,
  208. neigh_rq_count, tq_own, tq_asym_penalty, batman_packet->tq);
  209. /* if link has the minimum required transmission quality
  210. * consider it bidirectional */
  211. if (batman_packet->tq >= TQ_TOTAL_BIDRECT_LIMIT)
  212. ret = 1;
  213. out:
  214. if (neigh_node)
  215. neigh_node_free_ref(neigh_node);
  216. return ret;
  217. }
  218. /* caller must hold the neigh_list_lock */
  219. void bonding_candidate_del(struct orig_node *orig_node,
  220. struct neigh_node *neigh_node)
  221. {
  222. /* this neighbor is not part of our candidate list */
  223. if (list_empty(&neigh_node->bonding_list))
  224. goto out;
  225. list_del_rcu(&neigh_node->bonding_list);
  226. INIT_LIST_HEAD(&neigh_node->bonding_list);
  227. neigh_node_free_ref(neigh_node);
  228. atomic_dec(&orig_node->bond_candidates);
  229. out:
  230. return;
  231. }
  232. static void bonding_candidate_add(struct orig_node *orig_node,
  233. struct neigh_node *neigh_node)
  234. {
  235. struct hlist_node *node;
  236. struct neigh_node *tmp_neigh_node, *router = NULL;
  237. uint8_t interference_candidate = 0;
  238. spin_lock_bh(&orig_node->neigh_list_lock);
  239. /* only consider if it has the same primary address ... */
  240. if (!compare_eth(orig_node->orig,
  241. neigh_node->orig_node->primary_addr))
  242. goto candidate_del;
  243. router = orig_node_get_router(orig_node);
  244. if (!router)
  245. goto candidate_del;
  246. /* ... and is good enough to be considered */
  247. if (neigh_node->tq_avg < router->tq_avg - BONDING_TQ_THRESHOLD)
  248. goto candidate_del;
  249. /**
  250. * check if we have another candidate with the same mac address or
  251. * interface. If we do, we won't select this candidate because of
  252. * possible interference.
  253. */
  254. hlist_for_each_entry_rcu(tmp_neigh_node, node,
  255. &orig_node->neigh_list, list) {
  256. if (tmp_neigh_node == neigh_node)
  257. continue;
  258. /* we only care if the other candidate is even
  259. * considered as candidate. */
  260. if (list_empty(&tmp_neigh_node->bonding_list))
  261. continue;
  262. if ((neigh_node->if_incoming == tmp_neigh_node->if_incoming) ||
  263. (compare_eth(neigh_node->addr, tmp_neigh_node->addr))) {
  264. interference_candidate = 1;
  265. break;
  266. }
  267. }
  268. /* don't care further if it is an interference candidate */
  269. if (interference_candidate)
  270. goto candidate_del;
  271. /* this neighbor already is part of our candidate list */
  272. if (!list_empty(&neigh_node->bonding_list))
  273. goto out;
  274. if (!atomic_inc_not_zero(&neigh_node->refcount))
  275. goto out;
  276. list_add_rcu(&neigh_node->bonding_list, &orig_node->bond_list);
  277. atomic_inc(&orig_node->bond_candidates);
  278. goto out;
  279. candidate_del:
  280. bonding_candidate_del(orig_node, neigh_node);
  281. out:
  282. spin_unlock_bh(&orig_node->neigh_list_lock);
  283. if (router)
  284. neigh_node_free_ref(router);
  285. }
  286. /* copy primary address for bonding */
  287. static void bonding_save_primary(const struct orig_node *orig_node,
  288. struct orig_node *orig_neigh_node,
  289. const struct batman_packet *batman_packet)
  290. {
  291. if (!(batman_packet->flags & PRIMARIES_FIRST_HOP))
  292. return;
  293. memcpy(orig_neigh_node->primary_addr, orig_node->orig, ETH_ALEN);
  294. }
  295. static void update_orig(struct bat_priv *bat_priv, struct orig_node *orig_node,
  296. const struct ethhdr *ethhdr,
  297. const struct batman_packet *batman_packet,
  298. struct hard_iface *if_incoming,
  299. const unsigned char *tt_buff, int tt_buff_len,
  300. char is_duplicate)
  301. {
  302. struct neigh_node *neigh_node = NULL, *tmp_neigh_node = NULL;
  303. struct neigh_node *router = NULL;
  304. struct orig_node *orig_node_tmp;
  305. struct hlist_node *node;
  306. int tmp_tt_buff_len;
  307. uint8_t bcast_own_sum_orig, bcast_own_sum_neigh;
  308. bat_dbg(DBG_BATMAN, bat_priv, "update_originator(): "
  309. "Searching and updating originator entry of received packet\n");
  310. rcu_read_lock();
  311. hlist_for_each_entry_rcu(tmp_neigh_node, node,
  312. &orig_node->neigh_list, list) {
  313. if (compare_eth(tmp_neigh_node->addr, ethhdr->h_source) &&
  314. (tmp_neigh_node->if_incoming == if_incoming) &&
  315. atomic_inc_not_zero(&tmp_neigh_node->refcount)) {
  316. if (neigh_node)
  317. neigh_node_free_ref(neigh_node);
  318. neigh_node = tmp_neigh_node;
  319. continue;
  320. }
  321. if (is_duplicate)
  322. continue;
  323. spin_lock_bh(&tmp_neigh_node->tq_lock);
  324. ring_buffer_set(tmp_neigh_node->tq_recv,
  325. &tmp_neigh_node->tq_index, 0);
  326. tmp_neigh_node->tq_avg =
  327. ring_buffer_avg(tmp_neigh_node->tq_recv);
  328. spin_unlock_bh(&tmp_neigh_node->tq_lock);
  329. }
  330. if (!neigh_node) {
  331. struct orig_node *orig_tmp;
  332. orig_tmp = get_orig_node(bat_priv, ethhdr->h_source);
  333. if (!orig_tmp)
  334. goto unlock;
  335. neigh_node = create_neighbor(orig_node, orig_tmp,
  336. ethhdr->h_source, if_incoming);
  337. orig_node_free_ref(orig_tmp);
  338. if (!neigh_node)
  339. goto unlock;
  340. } else
  341. bat_dbg(DBG_BATMAN, bat_priv,
  342. "Updating existing last-hop neighbor of originator\n");
  343. rcu_read_unlock();
  344. orig_node->flags = batman_packet->flags;
  345. neigh_node->last_valid = jiffies;
  346. spin_lock_bh(&neigh_node->tq_lock);
  347. ring_buffer_set(neigh_node->tq_recv,
  348. &neigh_node->tq_index,
  349. batman_packet->tq);
  350. neigh_node->tq_avg = ring_buffer_avg(neigh_node->tq_recv);
  351. spin_unlock_bh(&neigh_node->tq_lock);
  352. if (!is_duplicate) {
  353. orig_node->last_ttl = batman_packet->ttl;
  354. neigh_node->last_ttl = batman_packet->ttl;
  355. }
  356. bonding_candidate_add(orig_node, neigh_node);
  357. tmp_tt_buff_len = (tt_buff_len > batman_packet->num_tt * ETH_ALEN ?
  358. batman_packet->num_tt * ETH_ALEN : tt_buff_len);
  359. /* if this neighbor already is our next hop there is nothing
  360. * to change */
  361. router = orig_node_get_router(orig_node);
  362. if (router == neigh_node)
  363. goto update_tt;
  364. /* if this neighbor does not offer a better TQ we won't consider it */
  365. if (router && (router->tq_avg > neigh_node->tq_avg))
  366. goto update_tt;
  367. /* if the TQ is the same and the link not more symetric we
  368. * won't consider it either */
  369. if (router && (neigh_node->tq_avg == router->tq_avg)) {
  370. orig_node_tmp = router->orig_node;
  371. spin_lock_bh(&orig_node_tmp->ogm_cnt_lock);
  372. bcast_own_sum_orig =
  373. orig_node_tmp->bcast_own_sum[if_incoming->if_num];
  374. spin_unlock_bh(&orig_node_tmp->ogm_cnt_lock);
  375. orig_node_tmp = neigh_node->orig_node;
  376. spin_lock_bh(&orig_node_tmp->ogm_cnt_lock);
  377. bcast_own_sum_neigh =
  378. orig_node_tmp->bcast_own_sum[if_incoming->if_num];
  379. spin_unlock_bh(&orig_node_tmp->ogm_cnt_lock);
  380. if (bcast_own_sum_orig >= bcast_own_sum_neigh)
  381. goto update_tt;
  382. }
  383. update_routes(bat_priv, orig_node, neigh_node,
  384. tt_buff, tmp_tt_buff_len);
  385. goto update_gw;
  386. update_tt:
  387. update_routes(bat_priv, orig_node, router,
  388. tt_buff, tmp_tt_buff_len);
  389. update_gw:
  390. if (orig_node->gw_flags != batman_packet->gw_flags)
  391. gw_node_update(bat_priv, orig_node, batman_packet->gw_flags);
  392. orig_node->gw_flags = batman_packet->gw_flags;
  393. /* restart gateway selection if fast or late switching was enabled */
  394. if ((orig_node->gw_flags) &&
  395. (atomic_read(&bat_priv->gw_mode) == GW_MODE_CLIENT) &&
  396. (atomic_read(&bat_priv->gw_sel_class) > 2))
  397. gw_check_election(bat_priv, orig_node);
  398. goto out;
  399. unlock:
  400. rcu_read_unlock();
  401. out:
  402. if (neigh_node)
  403. neigh_node_free_ref(neigh_node);
  404. if (router)
  405. neigh_node_free_ref(router);
  406. }
  407. /* checks whether the host restarted and is in the protection time.
  408. * returns:
  409. * 0 if the packet is to be accepted
  410. * 1 if the packet is to be ignored.
  411. */
  412. static int window_protected(struct bat_priv *bat_priv,
  413. int32_t seq_num_diff,
  414. unsigned long *last_reset)
  415. {
  416. if ((seq_num_diff <= -TQ_LOCAL_WINDOW_SIZE)
  417. || (seq_num_diff >= EXPECTED_SEQNO_RANGE)) {
  418. if (time_after(jiffies, *last_reset +
  419. msecs_to_jiffies(RESET_PROTECTION_MS))) {
  420. *last_reset = jiffies;
  421. bat_dbg(DBG_BATMAN, bat_priv,
  422. "old packet received, start protection\n");
  423. return 0;
  424. } else
  425. return 1;
  426. }
  427. return 0;
  428. }
  429. /* processes a batman packet for all interfaces, adjusts the sequence number and
  430. * finds out whether it is a duplicate.
  431. * returns:
  432. * 1 the packet is a duplicate
  433. * 0 the packet has not yet been received
  434. * -1 the packet is old and has been received while the seqno window
  435. * was protected. Caller should drop it.
  436. */
  437. static char count_real_packets(const struct ethhdr *ethhdr,
  438. const struct batman_packet *batman_packet,
  439. const struct hard_iface *if_incoming)
  440. {
  441. struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
  442. struct orig_node *orig_node;
  443. struct neigh_node *tmp_neigh_node;
  444. struct hlist_node *node;
  445. char is_duplicate = 0;
  446. int32_t seq_diff;
  447. int need_update = 0;
  448. int set_mark, ret = -1;
  449. orig_node = get_orig_node(bat_priv, batman_packet->orig);
  450. if (!orig_node)
  451. return 0;
  452. spin_lock_bh(&orig_node->ogm_cnt_lock);
  453. seq_diff = batman_packet->seqno - orig_node->last_real_seqno;
  454. /* signalize caller that the packet is to be dropped. */
  455. if (window_protected(bat_priv, seq_diff,
  456. &orig_node->batman_seqno_reset))
  457. goto out;
  458. rcu_read_lock();
  459. hlist_for_each_entry_rcu(tmp_neigh_node, node,
  460. &orig_node->neigh_list, list) {
  461. is_duplicate |= get_bit_status(tmp_neigh_node->real_bits,
  462. orig_node->last_real_seqno,
  463. batman_packet->seqno);
  464. if (compare_eth(tmp_neigh_node->addr, ethhdr->h_source) &&
  465. (tmp_neigh_node->if_incoming == if_incoming))
  466. set_mark = 1;
  467. else
  468. set_mark = 0;
  469. /* if the window moved, set the update flag. */
  470. need_update |= bit_get_packet(bat_priv,
  471. tmp_neigh_node->real_bits,
  472. seq_diff, set_mark);
  473. tmp_neigh_node->real_packet_count =
  474. bit_packet_count(tmp_neigh_node->real_bits);
  475. }
  476. rcu_read_unlock();
  477. if (need_update) {
  478. bat_dbg(DBG_BATMAN, bat_priv,
  479. "updating last_seqno: old %d, new %d\n",
  480. orig_node->last_real_seqno, batman_packet->seqno);
  481. orig_node->last_real_seqno = batman_packet->seqno;
  482. }
  483. ret = is_duplicate;
  484. out:
  485. spin_unlock_bh(&orig_node->ogm_cnt_lock);
  486. orig_node_free_ref(orig_node);
  487. return ret;
  488. }
  489. void receive_bat_packet(const struct ethhdr *ethhdr,
  490. struct batman_packet *batman_packet,
  491. const unsigned char *tt_buff, int tt_buff_len,
  492. struct hard_iface *if_incoming)
  493. {
  494. struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
  495. struct hard_iface *hard_iface;
  496. struct orig_node *orig_neigh_node, *orig_node;
  497. struct neigh_node *router = NULL, *router_router = NULL;
  498. struct neigh_node *orig_neigh_router = NULL;
  499. char has_directlink_flag;
  500. char is_my_addr = 0, is_my_orig = 0, is_my_oldorig = 0;
  501. char is_broadcast = 0, is_bidirectional, is_single_hop_neigh;
  502. char is_duplicate;
  503. uint32_t if_incoming_seqno;
  504. /* Silently drop when the batman packet is actually not a
  505. * correct packet.
  506. *
  507. * This might happen if a packet is padded (e.g. Ethernet has a
  508. * minimum frame length of 64 byte) and the aggregation interprets
  509. * it as an additional length.
  510. *
  511. * TODO: A more sane solution would be to have a bit in the
  512. * batman_packet to detect whether the packet is the last
  513. * packet in an aggregation. Here we expect that the padding
  514. * is always zero (or not 0x01)
  515. */
  516. if (batman_packet->packet_type != BAT_PACKET)
  517. return;
  518. /* could be changed by schedule_own_packet() */
  519. if_incoming_seqno = atomic_read(&if_incoming->seqno);
  520. has_directlink_flag = (batman_packet->flags & DIRECTLINK ? 1 : 0);
  521. is_single_hop_neigh = (compare_eth(ethhdr->h_source,
  522. batman_packet->orig) ? 1 : 0);
  523. bat_dbg(DBG_BATMAN, bat_priv,
  524. "Received BATMAN packet via NB: %pM, IF: %s [%pM] "
  525. "(from OG: %pM, via prev OG: %pM, seqno %d, tq %d, "
  526. "TTL %d, V %d, IDF %d)\n",
  527. ethhdr->h_source, if_incoming->net_dev->name,
  528. if_incoming->net_dev->dev_addr, batman_packet->orig,
  529. batman_packet->prev_sender, batman_packet->seqno,
  530. batman_packet->tq, batman_packet->ttl, batman_packet->version,
  531. has_directlink_flag);
  532. rcu_read_lock();
  533. list_for_each_entry_rcu(hard_iface, &hardif_list, list) {
  534. if (hard_iface->if_status != IF_ACTIVE)
  535. continue;
  536. if (hard_iface->soft_iface != if_incoming->soft_iface)
  537. continue;
  538. if (compare_eth(ethhdr->h_source,
  539. hard_iface->net_dev->dev_addr))
  540. is_my_addr = 1;
  541. if (compare_eth(batman_packet->orig,
  542. hard_iface->net_dev->dev_addr))
  543. is_my_orig = 1;
  544. if (compare_eth(batman_packet->prev_sender,
  545. hard_iface->net_dev->dev_addr))
  546. is_my_oldorig = 1;
  547. if (is_broadcast_ether_addr(ethhdr->h_source))
  548. is_broadcast = 1;
  549. }
  550. rcu_read_unlock();
  551. if (batman_packet->version != COMPAT_VERSION) {
  552. bat_dbg(DBG_BATMAN, bat_priv,
  553. "Drop packet: incompatible batman version (%i)\n",
  554. batman_packet->version);
  555. return;
  556. }
  557. if (is_my_addr) {
  558. bat_dbg(DBG_BATMAN, bat_priv,
  559. "Drop packet: received my own broadcast (sender: %pM"
  560. ")\n",
  561. ethhdr->h_source);
  562. return;
  563. }
  564. if (is_broadcast) {
  565. bat_dbg(DBG_BATMAN, bat_priv, "Drop packet: "
  566. "ignoring all packets with broadcast source addr (sender: %pM"
  567. ")\n", ethhdr->h_source);
  568. return;
  569. }
  570. if (is_my_orig) {
  571. unsigned long *word;
  572. int offset;
  573. orig_neigh_node = get_orig_node(bat_priv, ethhdr->h_source);
  574. if (!orig_neigh_node)
  575. return;
  576. /* neighbor has to indicate direct link and it has to
  577. * come via the corresponding interface */
  578. /* if received seqno equals last send seqno save new
  579. * seqno for bidirectional check */
  580. if (has_directlink_flag &&
  581. compare_eth(if_incoming->net_dev->dev_addr,
  582. batman_packet->orig) &&
  583. (batman_packet->seqno - if_incoming_seqno + 2 == 0)) {
  584. offset = if_incoming->if_num * NUM_WORDS;
  585. spin_lock_bh(&orig_neigh_node->ogm_cnt_lock);
  586. word = &(orig_neigh_node->bcast_own[offset]);
  587. bit_mark(word, 0);
  588. orig_neigh_node->bcast_own_sum[if_incoming->if_num] =
  589. bit_packet_count(word);
  590. spin_unlock_bh(&orig_neigh_node->ogm_cnt_lock);
  591. }
  592. bat_dbg(DBG_BATMAN, bat_priv, "Drop packet: "
  593. "originator packet from myself (via neighbor)\n");
  594. orig_node_free_ref(orig_neigh_node);
  595. return;
  596. }
  597. if (is_my_oldorig) {
  598. bat_dbg(DBG_BATMAN, bat_priv,
  599. "Drop packet: ignoring all rebroadcast echos (sender: "
  600. "%pM)\n", ethhdr->h_source);
  601. return;
  602. }
  603. orig_node = get_orig_node(bat_priv, batman_packet->orig);
  604. if (!orig_node)
  605. return;
  606. is_duplicate = count_real_packets(ethhdr, batman_packet, if_incoming);
  607. if (is_duplicate == -1) {
  608. bat_dbg(DBG_BATMAN, bat_priv,
  609. "Drop packet: packet within seqno protection time "
  610. "(sender: %pM)\n", ethhdr->h_source);
  611. goto out;
  612. }
  613. if (batman_packet->tq == 0) {
  614. bat_dbg(DBG_BATMAN, bat_priv,
  615. "Drop packet: originator packet with tq equal 0\n");
  616. goto out;
  617. }
  618. router = orig_node_get_router(orig_node);
  619. if (router)
  620. router_router = orig_node_get_router(router->orig_node);
  621. /* avoid temporary routing loops */
  622. if (router && router_router &&
  623. (compare_eth(router->addr, batman_packet->prev_sender)) &&
  624. !(compare_eth(batman_packet->orig, batman_packet->prev_sender)) &&
  625. (compare_eth(router->addr, router_router->addr))) {
  626. bat_dbg(DBG_BATMAN, bat_priv,
  627. "Drop packet: ignoring all rebroadcast packets that "
  628. "may make me loop (sender: %pM)\n", ethhdr->h_source);
  629. goto out;
  630. }
  631. /* if sender is a direct neighbor the sender mac equals
  632. * originator mac */
  633. orig_neigh_node = (is_single_hop_neigh ?
  634. orig_node :
  635. get_orig_node(bat_priv, ethhdr->h_source));
  636. if (!orig_neigh_node)
  637. goto out;
  638. orig_neigh_router = orig_node_get_router(orig_neigh_node);
  639. /* drop packet if sender is not a direct neighbor and if we
  640. * don't route towards it */
  641. if (!is_single_hop_neigh && (!orig_neigh_router)) {
  642. bat_dbg(DBG_BATMAN, bat_priv,
  643. "Drop packet: OGM via unknown neighbor!\n");
  644. goto out_neigh;
  645. }
  646. is_bidirectional = is_bidirectional_neigh(orig_node, orig_neigh_node,
  647. batman_packet, if_incoming);
  648. bonding_save_primary(orig_node, orig_neigh_node, batman_packet);
  649. /* update ranking if it is not a duplicate or has the same
  650. * seqno and similar ttl as the non-duplicate */
  651. if (is_bidirectional &&
  652. (!is_duplicate ||
  653. ((orig_node->last_real_seqno == batman_packet->seqno) &&
  654. (orig_node->last_ttl - 3 <= batman_packet->ttl))))
  655. update_orig(bat_priv, orig_node, ethhdr, batman_packet,
  656. if_incoming, tt_buff, tt_buff_len, is_duplicate);
  657. /* is single hop (direct) neighbor */
  658. if (is_single_hop_neigh) {
  659. /* mark direct link on incoming interface */
  660. schedule_forward_packet(orig_node, ethhdr, batman_packet,
  661. 1, tt_buff_len, if_incoming);
  662. bat_dbg(DBG_BATMAN, bat_priv, "Forwarding packet: "
  663. "rebroadcast neighbor packet with direct link flag\n");
  664. goto out_neigh;
  665. }
  666. /* multihop originator */
  667. if (!is_bidirectional) {
  668. bat_dbg(DBG_BATMAN, bat_priv,
  669. "Drop packet: not received via bidirectional link\n");
  670. goto out_neigh;
  671. }
  672. if (is_duplicate) {
  673. bat_dbg(DBG_BATMAN, bat_priv,
  674. "Drop packet: duplicate packet received\n");
  675. goto out_neigh;
  676. }
  677. bat_dbg(DBG_BATMAN, bat_priv,
  678. "Forwarding packet: rebroadcast originator packet\n");
  679. schedule_forward_packet(orig_node, ethhdr, batman_packet,
  680. 0, tt_buff_len, if_incoming);
  681. out_neigh:
  682. if ((orig_neigh_node) && (!is_single_hop_neigh))
  683. orig_node_free_ref(orig_neigh_node);
  684. out:
  685. if (router)
  686. neigh_node_free_ref(router);
  687. if (router_router)
  688. neigh_node_free_ref(router_router);
  689. if (orig_neigh_router)
  690. neigh_node_free_ref(orig_neigh_router);
  691. orig_node_free_ref(orig_node);
  692. }
  693. int recv_bat_packet(struct sk_buff *skb, struct hard_iface *hard_iface)
  694. {
  695. struct ethhdr *ethhdr;
  696. /* drop packet if it has not necessary minimum size */
  697. if (unlikely(!pskb_may_pull(skb, sizeof(struct batman_packet))))
  698. return NET_RX_DROP;
  699. ethhdr = (struct ethhdr *)skb_mac_header(skb);
  700. /* packet with broadcast indication but unicast recipient */
  701. if (!is_broadcast_ether_addr(ethhdr->h_dest))
  702. return NET_RX_DROP;
  703. /* packet with broadcast sender address */
  704. if (is_broadcast_ether_addr(ethhdr->h_source))
  705. return NET_RX_DROP;
  706. /* create a copy of the skb, if needed, to modify it. */
  707. if (skb_cow(skb, 0) < 0)
  708. return NET_RX_DROP;
  709. /* keep skb linear */
  710. if (skb_linearize(skb) < 0)
  711. return NET_RX_DROP;
  712. ethhdr = (struct ethhdr *)skb_mac_header(skb);
  713. receive_aggr_bat_packet(ethhdr,
  714. skb->data,
  715. skb_headlen(skb),
  716. hard_iface);
  717. kfree_skb(skb);
  718. return NET_RX_SUCCESS;
  719. }
  720. static int recv_my_icmp_packet(struct bat_priv *bat_priv,
  721. struct sk_buff *skb, size_t icmp_len)
  722. {
  723. struct hard_iface *primary_if = NULL;
  724. struct orig_node *orig_node = NULL;
  725. struct neigh_node *router = NULL;
  726. struct icmp_packet_rr *icmp_packet;
  727. int ret = NET_RX_DROP;
  728. icmp_packet = (struct icmp_packet_rr *)skb->data;
  729. /* add data to device queue */
  730. if (icmp_packet->msg_type != ECHO_REQUEST) {
  731. bat_socket_receive_packet(icmp_packet, icmp_len);
  732. goto out;
  733. }
  734. primary_if = primary_if_get_selected(bat_priv);
  735. if (!primary_if)
  736. goto out;
  737. /* answer echo request (ping) */
  738. /* get routing information */
  739. orig_node = orig_hash_find(bat_priv, icmp_packet->orig);
  740. if (!orig_node)
  741. goto out;
  742. router = orig_node_get_router(orig_node);
  743. if (!router)
  744. goto out;
  745. /* create a copy of the skb, if needed, to modify it. */
  746. if (skb_cow(skb, sizeof(struct ethhdr)) < 0)
  747. goto out;
  748. icmp_packet = (struct icmp_packet_rr *)skb->data;
  749. memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN);
  750. memcpy(icmp_packet->orig, primary_if->net_dev->dev_addr, ETH_ALEN);
  751. icmp_packet->msg_type = ECHO_REPLY;
  752. icmp_packet->ttl = TTL;
  753. send_skb_packet(skb, router->if_incoming, router->addr);
  754. ret = NET_RX_SUCCESS;
  755. out:
  756. if (primary_if)
  757. hardif_free_ref(primary_if);
  758. if (router)
  759. neigh_node_free_ref(router);
  760. if (orig_node)
  761. orig_node_free_ref(orig_node);
  762. return ret;
  763. }
  764. static int recv_icmp_ttl_exceeded(struct bat_priv *bat_priv,
  765. struct sk_buff *skb)
  766. {
  767. struct hard_iface *primary_if = NULL;
  768. struct orig_node *orig_node = NULL;
  769. struct neigh_node *router = NULL;
  770. struct icmp_packet *icmp_packet;
  771. int ret = NET_RX_DROP;
  772. icmp_packet = (struct icmp_packet *)skb->data;
  773. /* send TTL exceeded if packet is an echo request (traceroute) */
  774. if (icmp_packet->msg_type != ECHO_REQUEST) {
  775. pr_debug("Warning - can't forward icmp packet from %pM to "
  776. "%pM: ttl exceeded\n", icmp_packet->orig,
  777. icmp_packet->dst);
  778. goto out;
  779. }
  780. primary_if = primary_if_get_selected(bat_priv);
  781. if (!primary_if)
  782. goto out;
  783. /* get routing information */
  784. orig_node = orig_hash_find(bat_priv, icmp_packet->orig);
  785. if (!orig_node)
  786. goto out;
  787. router = orig_node_get_router(orig_node);
  788. if (!router)
  789. goto out;
  790. /* create a copy of the skb, if needed, to modify it. */
  791. if (skb_cow(skb, sizeof(struct ethhdr)) < 0)
  792. goto out;
  793. icmp_packet = (struct icmp_packet *)skb->data;
  794. memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN);
  795. memcpy(icmp_packet->orig, primary_if->net_dev->dev_addr, ETH_ALEN);
  796. icmp_packet->msg_type = TTL_EXCEEDED;
  797. icmp_packet->ttl = TTL;
  798. send_skb_packet(skb, router->if_incoming, router->addr);
  799. ret = NET_RX_SUCCESS;
  800. out:
  801. if (primary_if)
  802. hardif_free_ref(primary_if);
  803. if (router)
  804. neigh_node_free_ref(router);
  805. if (orig_node)
  806. orig_node_free_ref(orig_node);
  807. return ret;
  808. }
  809. int recv_icmp_packet(struct sk_buff *skb, struct hard_iface *recv_if)
  810. {
  811. struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
  812. struct icmp_packet_rr *icmp_packet;
  813. struct ethhdr *ethhdr;
  814. struct orig_node *orig_node = NULL;
  815. struct neigh_node *router = NULL;
  816. int hdr_size = sizeof(struct icmp_packet);
  817. int ret = NET_RX_DROP;
  818. /**
  819. * we truncate all incoming icmp packets if they don't match our size
  820. */
  821. if (skb->len >= sizeof(struct icmp_packet_rr))
  822. hdr_size = sizeof(struct icmp_packet_rr);
  823. /* drop packet if it has not necessary minimum size */
  824. if (unlikely(!pskb_may_pull(skb, hdr_size)))
  825. goto out;
  826. ethhdr = (struct ethhdr *)skb_mac_header(skb);
  827. /* packet with unicast indication but broadcast recipient */
  828. if (is_broadcast_ether_addr(ethhdr->h_dest))
  829. goto out;
  830. /* packet with broadcast sender address */
  831. if (is_broadcast_ether_addr(ethhdr->h_source))
  832. goto out;
  833. /* not for me */
  834. if (!is_my_mac(ethhdr->h_dest))
  835. goto out;
  836. icmp_packet = (struct icmp_packet_rr *)skb->data;
  837. /* add record route information if not full */
  838. if ((hdr_size == sizeof(struct icmp_packet_rr)) &&
  839. (icmp_packet->rr_cur < BAT_RR_LEN)) {
  840. memcpy(&(icmp_packet->rr[icmp_packet->rr_cur]),
  841. ethhdr->h_dest, ETH_ALEN);
  842. icmp_packet->rr_cur++;
  843. }
  844. /* packet for me */
  845. if (is_my_mac(icmp_packet->dst))
  846. return recv_my_icmp_packet(bat_priv, skb, hdr_size);
  847. /* TTL exceeded */
  848. if (icmp_packet->ttl < 2)
  849. return recv_icmp_ttl_exceeded(bat_priv, skb);
  850. /* get routing information */
  851. orig_node = orig_hash_find(bat_priv, icmp_packet->dst);
  852. if (!orig_node)
  853. goto out;
  854. router = orig_node_get_router(orig_node);
  855. if (!router)
  856. goto out;
  857. /* create a copy of the skb, if needed, to modify it. */
  858. if (skb_cow(skb, sizeof(struct ethhdr)) < 0)
  859. goto out;
  860. icmp_packet = (struct icmp_packet_rr *)skb->data;
  861. /* decrement ttl */
  862. icmp_packet->ttl--;
  863. /* route it */
  864. send_skb_packet(skb, router->if_incoming, router->addr);
  865. ret = NET_RX_SUCCESS;
  866. out:
  867. if (router)
  868. neigh_node_free_ref(router);
  869. if (orig_node)
  870. orig_node_free_ref(orig_node);
  871. return ret;
  872. }
  873. /* In the bonding case, send the packets in a round
  874. * robin fashion over the remaining interfaces.
  875. *
  876. * This method rotates the bonding list and increases the
  877. * returned router's refcount. */
  878. static struct neigh_node *find_bond_router(struct orig_node *primary_orig,
  879. const struct hard_iface *recv_if)
  880. {
  881. struct neigh_node *tmp_neigh_node;
  882. struct neigh_node *router = NULL, *first_candidate = NULL;
  883. rcu_read_lock();
  884. list_for_each_entry_rcu(tmp_neigh_node, &primary_orig->bond_list,
  885. bonding_list) {
  886. if (!first_candidate)
  887. first_candidate = tmp_neigh_node;
  888. /* recv_if == NULL on the first node. */
  889. if (tmp_neigh_node->if_incoming == recv_if)
  890. continue;
  891. if (!atomic_inc_not_zero(&tmp_neigh_node->refcount))
  892. continue;
  893. router = tmp_neigh_node;
  894. break;
  895. }
  896. /* use the first candidate if nothing was found. */
  897. if (!router && first_candidate &&
  898. atomic_inc_not_zero(&first_candidate->refcount))
  899. router = first_candidate;
  900. if (!router)
  901. goto out;
  902. /* selected should point to the next element
  903. * after the current router */
  904. spin_lock_bh(&primary_orig->neigh_list_lock);
  905. /* this is a list_move(), which unfortunately
  906. * does not exist as rcu version */
  907. list_del_rcu(&primary_orig->bond_list);
  908. list_add_rcu(&primary_orig->bond_list,
  909. &router->bonding_list);
  910. spin_unlock_bh(&primary_orig->neigh_list_lock);
  911. out:
  912. rcu_read_unlock();
  913. return router;
  914. }
  915. /* Interface Alternating: Use the best of the
  916. * remaining candidates which are not using
  917. * this interface.
  918. *
  919. * Increases the returned router's refcount */
  920. static struct neigh_node *find_ifalter_router(struct orig_node *primary_orig,
  921. const struct hard_iface *recv_if)
  922. {
  923. struct neigh_node *tmp_neigh_node;
  924. struct neigh_node *router = NULL, *first_candidate = NULL;
  925. rcu_read_lock();
  926. list_for_each_entry_rcu(tmp_neigh_node, &primary_orig->bond_list,
  927. bonding_list) {
  928. if (!first_candidate)
  929. first_candidate = tmp_neigh_node;
  930. /* recv_if == NULL on the first node. */
  931. if (tmp_neigh_node->if_incoming == recv_if)
  932. continue;
  933. if (!atomic_inc_not_zero(&tmp_neigh_node->refcount))
  934. continue;
  935. /* if we don't have a router yet
  936. * or this one is better, choose it. */
  937. if ((!router) ||
  938. (tmp_neigh_node->tq_avg > router->tq_avg)) {
  939. /* decrement refcount of
  940. * previously selected router */
  941. if (router)
  942. neigh_node_free_ref(router);
  943. router = tmp_neigh_node;
  944. atomic_inc_not_zero(&router->refcount);
  945. }
  946. neigh_node_free_ref(tmp_neigh_node);
  947. }
  948. /* use the first candidate if nothing was found. */
  949. if (!router && first_candidate &&
  950. atomic_inc_not_zero(&first_candidate->refcount))
  951. router = first_candidate;
  952. rcu_read_unlock();
  953. return router;
  954. }
  955. /* find a suitable router for this originator, and use
  956. * bonding if possible. increases the found neighbors
  957. * refcount.*/
  958. struct neigh_node *find_router(struct bat_priv *bat_priv,
  959. struct orig_node *orig_node,
  960. const struct hard_iface *recv_if)
  961. {
  962. struct orig_node *primary_orig_node;
  963. struct orig_node *router_orig;
  964. struct neigh_node *router;
  965. static uint8_t zero_mac[ETH_ALEN] = {0, 0, 0, 0, 0, 0};
  966. int bonding_enabled;
  967. if (!orig_node)
  968. return NULL;
  969. router = orig_node_get_router(orig_node);
  970. if (!router)
  971. goto err;
  972. /* without bonding, the first node should
  973. * always choose the default router. */
  974. bonding_enabled = atomic_read(&bat_priv->bonding);
  975. rcu_read_lock();
  976. /* select default router to output */
  977. router_orig = router->orig_node;
  978. if (!router_orig)
  979. goto err_unlock;
  980. if ((!recv_if) && (!bonding_enabled))
  981. goto return_router;
  982. /* if we have something in the primary_addr, we can search
  983. * for a potential bonding candidate. */
  984. if (compare_eth(router_orig->primary_addr, zero_mac))
  985. goto return_router;
  986. /* find the orig_node which has the primary interface. might
  987. * even be the same as our router_orig in many cases */
  988. if (compare_eth(router_orig->primary_addr, router_orig->orig)) {
  989. primary_orig_node = router_orig;
  990. } else {
  991. primary_orig_node = orig_hash_find(bat_priv,
  992. router_orig->primary_addr);
  993. if (!primary_orig_node)
  994. goto return_router;
  995. orig_node_free_ref(primary_orig_node);
  996. }
  997. /* with less than 2 candidates, we can't do any
  998. * bonding and prefer the original router. */
  999. if (atomic_read(&primary_orig_node->bond_candidates) < 2)
  1000. goto return_router;
  1001. /* all nodes between should choose a candidate which
  1002. * is is not on the interface where the packet came
  1003. * in. */
  1004. neigh_node_free_ref(router);
  1005. if (bonding_enabled)
  1006. router = find_bond_router(primary_orig_node, recv_if);
  1007. else
  1008. router = find_ifalter_router(primary_orig_node, recv_if);
  1009. return_router:
  1010. if (router && router->if_incoming->if_status != IF_ACTIVE)
  1011. goto err_unlock;
  1012. rcu_read_unlock();
  1013. return router;
  1014. err_unlock:
  1015. rcu_read_unlock();
  1016. err:
  1017. if (router)
  1018. neigh_node_free_ref(router);
  1019. return NULL;
  1020. }
  1021. static int check_unicast_packet(struct sk_buff *skb, int hdr_size)
  1022. {
  1023. struct ethhdr *ethhdr;
  1024. /* drop packet if it has not necessary minimum size */
  1025. if (unlikely(!pskb_may_pull(skb, hdr_size)))
  1026. return -1;
  1027. ethhdr = (struct ethhdr *)skb_mac_header(skb);
  1028. /* packet with unicast indication but broadcast recipient */
  1029. if (is_broadcast_ether_addr(ethhdr->h_dest))
  1030. return -1;
  1031. /* packet with broadcast sender address */
  1032. if (is_broadcast_ether_addr(ethhdr->h_source))
  1033. return -1;
  1034. /* not for me */
  1035. if (!is_my_mac(ethhdr->h_dest))
  1036. return -1;
  1037. return 0;
  1038. }
  1039. int route_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if)
  1040. {
  1041. struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
  1042. struct orig_node *orig_node = NULL;
  1043. struct neigh_node *neigh_node = NULL;
  1044. struct unicast_packet *unicast_packet;
  1045. struct ethhdr *ethhdr = (struct ethhdr *)skb_mac_header(skb);
  1046. int ret = NET_RX_DROP;
  1047. struct sk_buff *new_skb;
  1048. unicast_packet = (struct unicast_packet *)skb->data;
  1049. /* TTL exceeded */
  1050. if (unicast_packet->ttl < 2) {
  1051. pr_debug("Warning - can't forward unicast packet from %pM to "
  1052. "%pM: ttl exceeded\n", ethhdr->h_source,
  1053. unicast_packet->dest);
  1054. goto out;
  1055. }
  1056. /* get routing information */
  1057. orig_node = orig_hash_find(bat_priv, unicast_packet->dest);
  1058. if (!orig_node)
  1059. goto out;
  1060. /* find_router() increases neigh_nodes refcount if found. */
  1061. neigh_node = find_router(bat_priv, orig_node, recv_if);
  1062. if (!neigh_node)
  1063. goto out;
  1064. /* create a copy of the skb, if needed, to modify it. */
  1065. if (skb_cow(skb, sizeof(struct ethhdr)) < 0)
  1066. goto out;
  1067. unicast_packet = (struct unicast_packet *)skb->data;
  1068. if (unicast_packet->packet_type == BAT_UNICAST &&
  1069. atomic_read(&bat_priv->fragmentation) &&
  1070. skb->len > neigh_node->if_incoming->net_dev->mtu) {
  1071. ret = frag_send_skb(skb, bat_priv,
  1072. neigh_node->if_incoming, neigh_node->addr);
  1073. goto out;
  1074. }
  1075. if (unicast_packet->packet_type == BAT_UNICAST_FRAG &&
  1076. frag_can_reassemble(skb, neigh_node->if_incoming->net_dev->mtu)) {
  1077. ret = frag_reassemble_skb(skb, bat_priv, &new_skb);
  1078. if (ret == NET_RX_DROP)
  1079. goto out;
  1080. /* packet was buffered for late merge */
  1081. if (!new_skb) {
  1082. ret = NET_RX_SUCCESS;
  1083. goto out;
  1084. }
  1085. skb = new_skb;
  1086. unicast_packet = (struct unicast_packet *)skb->data;
  1087. }
  1088. /* decrement ttl */
  1089. unicast_packet->ttl--;
  1090. /* route it */
  1091. send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
  1092. ret = NET_RX_SUCCESS;
  1093. out:
  1094. if (neigh_node)
  1095. neigh_node_free_ref(neigh_node);
  1096. if (orig_node)
  1097. orig_node_free_ref(orig_node);
  1098. return ret;
  1099. }
  1100. int recv_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if)
  1101. {
  1102. struct unicast_packet *unicast_packet;
  1103. int hdr_size = sizeof(*unicast_packet);
  1104. if (check_unicast_packet(skb, hdr_size) < 0)
  1105. return NET_RX_DROP;
  1106. unicast_packet = (struct unicast_packet *)skb->data;
  1107. /* packet for me */
  1108. if (is_my_mac(unicast_packet->dest)) {
  1109. interface_rx(recv_if->soft_iface, skb, recv_if, hdr_size);
  1110. return NET_RX_SUCCESS;
  1111. }
  1112. return route_unicast_packet(skb, recv_if);
  1113. }
  1114. int recv_ucast_frag_packet(struct sk_buff *skb, struct hard_iface *recv_if)
  1115. {
  1116. struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
  1117. struct unicast_frag_packet *unicast_packet;
  1118. int hdr_size = sizeof(*unicast_packet);
  1119. struct sk_buff *new_skb = NULL;
  1120. int ret;
  1121. if (check_unicast_packet(skb, hdr_size) < 0)
  1122. return NET_RX_DROP;
  1123. unicast_packet = (struct unicast_frag_packet *)skb->data;
  1124. /* packet for me */
  1125. if (is_my_mac(unicast_packet->dest)) {
  1126. ret = frag_reassemble_skb(skb, bat_priv, &new_skb);
  1127. if (ret == NET_RX_DROP)
  1128. return NET_RX_DROP;
  1129. /* packet was buffered for late merge */
  1130. if (!new_skb)
  1131. return NET_RX_SUCCESS;
  1132. interface_rx(recv_if->soft_iface, new_skb, recv_if,
  1133. sizeof(struct unicast_packet));
  1134. return NET_RX_SUCCESS;
  1135. }
  1136. return route_unicast_packet(skb, recv_if);
  1137. }
  1138. int recv_bcast_packet(struct sk_buff *skb, struct hard_iface *recv_if)
  1139. {
  1140. struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
  1141. struct orig_node *orig_node = NULL;
  1142. struct bcast_packet *bcast_packet;
  1143. struct ethhdr *ethhdr;
  1144. int hdr_size = sizeof(*bcast_packet);
  1145. int ret = NET_RX_DROP;
  1146. int32_t seq_diff;
  1147. /* drop packet if it has not necessary minimum size */
  1148. if (unlikely(!pskb_may_pull(skb, hdr_size)))
  1149. goto out;
  1150. ethhdr = (struct ethhdr *)skb_mac_header(skb);
  1151. /* packet with broadcast indication but unicast recipient */
  1152. if (!is_broadcast_ether_addr(ethhdr->h_dest))
  1153. goto out;
  1154. /* packet with broadcast sender address */
  1155. if (is_broadcast_ether_addr(ethhdr->h_source))
  1156. goto out;
  1157. /* ignore broadcasts sent by myself */
  1158. if (is_my_mac(ethhdr->h_source))
  1159. goto out;
  1160. bcast_packet = (struct bcast_packet *)skb->data;
  1161. /* ignore broadcasts originated by myself */
  1162. if (is_my_mac(bcast_packet->orig))
  1163. goto out;
  1164. if (bcast_packet->ttl < 2)
  1165. goto out;
  1166. orig_node = orig_hash_find(bat_priv, bcast_packet->orig);
  1167. if (!orig_node)
  1168. goto out;
  1169. spin_lock_bh(&orig_node->bcast_seqno_lock);
  1170. /* check whether the packet is a duplicate */
  1171. if (get_bit_status(orig_node->bcast_bits, orig_node->last_bcast_seqno,
  1172. ntohl(bcast_packet->seqno)))
  1173. goto spin_unlock;
  1174. seq_diff = ntohl(bcast_packet->seqno) - orig_node->last_bcast_seqno;
  1175. /* check whether the packet is old and the host just restarted. */
  1176. if (window_protected(bat_priv, seq_diff,
  1177. &orig_node->bcast_seqno_reset))
  1178. goto spin_unlock;
  1179. /* mark broadcast in flood history, update window position
  1180. * if required. */
  1181. if (bit_get_packet(bat_priv, orig_node->bcast_bits, seq_diff, 1))
  1182. orig_node->last_bcast_seqno = ntohl(bcast_packet->seqno);
  1183. spin_unlock_bh(&orig_node->bcast_seqno_lock);
  1184. /* rebroadcast packet */
  1185. add_bcast_packet_to_list(bat_priv, skb);
  1186. /* broadcast for me */
  1187. interface_rx(recv_if->soft_iface, skb, recv_if, hdr_size);
  1188. ret = NET_RX_SUCCESS;
  1189. goto out;
  1190. spin_unlock:
  1191. spin_unlock_bh(&orig_node->bcast_seqno_lock);
  1192. out:
  1193. if (orig_node)
  1194. orig_node_free_ref(orig_node);
  1195. return ret;
  1196. }
  1197. int recv_vis_packet(struct sk_buff *skb, struct hard_iface *recv_if)
  1198. {
  1199. struct vis_packet *vis_packet;
  1200. struct ethhdr *ethhdr;
  1201. struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
  1202. int hdr_size = sizeof(*vis_packet);
  1203. /* keep skb linear */
  1204. if (skb_linearize(skb) < 0)
  1205. return NET_RX_DROP;
  1206. if (unlikely(!pskb_may_pull(skb, hdr_size)))
  1207. return NET_RX_DROP;
  1208. vis_packet = (struct vis_packet *)skb->data;
  1209. ethhdr = (struct ethhdr *)skb_mac_header(skb);
  1210. /* not for me */
  1211. if (!is_my_mac(ethhdr->h_dest))
  1212. return NET_RX_DROP;
  1213. /* ignore own packets */
  1214. if (is_my_mac(vis_packet->vis_orig))
  1215. return NET_RX_DROP;
  1216. if (is_my_mac(vis_packet->sender_orig))
  1217. return NET_RX_DROP;
  1218. switch (vis_packet->vis_type) {
  1219. case VIS_TYPE_SERVER_SYNC:
  1220. receive_server_sync_packet(bat_priv, vis_packet,
  1221. skb_headlen(skb));
  1222. break;
  1223. case VIS_TYPE_CLIENT_UPDATE:
  1224. receive_client_update_packet(bat_priv, vis_packet,
  1225. skb_headlen(skb));
  1226. break;
  1227. default: /* ignore unknown packet */
  1228. break;
  1229. }
  1230. /* We take a copy of the data in the packet, so we should
  1231. always free the skbuf. */
  1232. return NET_RX_DROP;
  1233. }