routing.c 43 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610
  1. /*
  2. * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
  3. *
  4. * Marek Lindner, Simon Wunderlich
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of version 2 of the GNU General Public
  8. * License as published by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope that it will be useful, but
  11. * WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  13. * General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program; if not, write to the Free Software
  17. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
  18. * 02110-1301, USA
  19. *
  20. */
  21. #include "main.h"
  22. #include "routing.h"
  23. #include "send.h"
  24. #include "hash.h"
  25. #include "soft-interface.h"
  26. #include "hard-interface.h"
  27. #include "icmp_socket.h"
  28. #include "translation-table.h"
  29. #include "originator.h"
  30. #include "ring_buffer.h"
  31. #include "vis.h"
  32. #include "aggregation.h"
  33. #include "gateway_common.h"
  34. #include "gateway_client.h"
  35. #include "unicast.h"
  36. void slide_own_bcast_window(struct batman_if *batman_if)
  37. {
  38. struct bat_priv *bat_priv = netdev_priv(batman_if->soft_iface);
  39. struct hashtable_t *hash = bat_priv->orig_hash;
  40. struct hlist_node *walk;
  41. struct hlist_head *head;
  42. struct element_t *bucket;
  43. struct orig_node *orig_node;
  44. unsigned long *word;
  45. int i;
  46. size_t word_index;
  47. spin_lock_bh(&bat_priv->orig_hash_lock);
  48. for (i = 0; i < hash->size; i++) {
  49. head = &hash->table[i];
  50. rcu_read_lock();
  51. hlist_for_each_entry_rcu(bucket, walk, head, hlist) {
  52. orig_node = bucket->data;
  53. spin_lock_bh(&orig_node->ogm_cnt_lock);
  54. word_index = batman_if->if_num * NUM_WORDS;
  55. word = &(orig_node->bcast_own[word_index]);
  56. bit_get_packet(bat_priv, word, 1, 0);
  57. orig_node->bcast_own_sum[batman_if->if_num] =
  58. bit_packet_count(word);
  59. spin_unlock_bh(&orig_node->ogm_cnt_lock);
  60. }
  61. rcu_read_unlock();
  62. }
  63. spin_unlock_bh(&bat_priv->orig_hash_lock);
  64. }
  65. static void update_HNA(struct bat_priv *bat_priv, struct orig_node *orig_node,
  66. unsigned char *hna_buff, int hna_buff_len)
  67. {
  68. if ((hna_buff_len != orig_node->hna_buff_len) ||
  69. ((hna_buff_len > 0) &&
  70. (orig_node->hna_buff_len > 0) &&
  71. (memcmp(orig_node->hna_buff, hna_buff, hna_buff_len) != 0))) {
  72. if (orig_node->hna_buff_len > 0)
  73. hna_global_del_orig(bat_priv, orig_node,
  74. "originator changed hna");
  75. if ((hna_buff_len > 0) && (hna_buff))
  76. hna_global_add_orig(bat_priv, orig_node,
  77. hna_buff, hna_buff_len);
  78. }
  79. }
  80. static void update_route(struct bat_priv *bat_priv,
  81. struct orig_node *orig_node,
  82. struct neigh_node *neigh_node,
  83. unsigned char *hna_buff, int hna_buff_len)
  84. {
  85. struct neigh_node *neigh_node_tmp;
  86. /* route deleted */
  87. if ((orig_node->router) && (!neigh_node)) {
  88. bat_dbg(DBG_ROUTES, bat_priv, "Deleting route towards: %pM\n",
  89. orig_node->orig);
  90. hna_global_del_orig(bat_priv, orig_node,
  91. "originator timed out");
  92. /* route added */
  93. } else if ((!orig_node->router) && (neigh_node)) {
  94. bat_dbg(DBG_ROUTES, bat_priv,
  95. "Adding route towards: %pM (via %pM)\n",
  96. orig_node->orig, neigh_node->addr);
  97. hna_global_add_orig(bat_priv, orig_node,
  98. hna_buff, hna_buff_len);
  99. /* route changed */
  100. } else {
  101. bat_dbg(DBG_ROUTES, bat_priv,
  102. "Changing route towards: %pM "
  103. "(now via %pM - was via %pM)\n",
  104. orig_node->orig, neigh_node->addr,
  105. orig_node->router->addr);
  106. }
  107. if (neigh_node && !atomic_inc_not_zero(&neigh_node->refcount))
  108. neigh_node = NULL;
  109. neigh_node_tmp = orig_node->router;
  110. orig_node->router = neigh_node;
  111. if (neigh_node_tmp)
  112. neigh_node_free_ref(neigh_node_tmp);
  113. }
  114. void update_routes(struct bat_priv *bat_priv, struct orig_node *orig_node,
  115. struct neigh_node *neigh_node, unsigned char *hna_buff,
  116. int hna_buff_len)
  117. {
  118. if (!orig_node)
  119. return;
  120. if (orig_node->router != neigh_node)
  121. update_route(bat_priv, orig_node, neigh_node,
  122. hna_buff, hna_buff_len);
  123. /* may be just HNA changed */
  124. else
  125. update_HNA(bat_priv, orig_node, hna_buff, hna_buff_len);
  126. }
  127. static int is_bidirectional_neigh(struct orig_node *orig_node,
  128. struct orig_node *orig_neigh_node,
  129. struct batman_packet *batman_packet,
  130. struct batman_if *if_incoming)
  131. {
  132. struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
  133. struct neigh_node *neigh_node = NULL, *tmp_neigh_node = NULL;
  134. struct hlist_node *node;
  135. unsigned char total_count;
  136. uint8_t orig_eq_count, neigh_rq_count, tq_own;
  137. int tq_asym_penalty, ret = 0;
  138. if (orig_node == orig_neigh_node) {
  139. rcu_read_lock();
  140. hlist_for_each_entry_rcu(tmp_neigh_node, node,
  141. &orig_node->neigh_list, list) {
  142. if (compare_eth(tmp_neigh_node->addr,
  143. orig_neigh_node->orig) &&
  144. (tmp_neigh_node->if_incoming == if_incoming))
  145. neigh_node = tmp_neigh_node;
  146. }
  147. if (!neigh_node)
  148. neigh_node = create_neighbor(orig_node,
  149. orig_neigh_node,
  150. orig_neigh_node->orig,
  151. if_incoming);
  152. /* create_neighbor failed, return 0 */
  153. if (!neigh_node)
  154. goto unlock;
  155. if (!atomic_inc_not_zero(&neigh_node->refcount)) {
  156. neigh_node = NULL;
  157. goto unlock;
  158. }
  159. rcu_read_unlock();
  160. neigh_node->last_valid = jiffies;
  161. } else {
  162. /* find packet count of corresponding one hop neighbor */
  163. rcu_read_lock();
  164. hlist_for_each_entry_rcu(tmp_neigh_node, node,
  165. &orig_neigh_node->neigh_list, list) {
  166. if (compare_eth(tmp_neigh_node->addr,
  167. orig_neigh_node->orig) &&
  168. (tmp_neigh_node->if_incoming == if_incoming))
  169. neigh_node = tmp_neigh_node;
  170. }
  171. if (!neigh_node)
  172. neigh_node = create_neighbor(orig_neigh_node,
  173. orig_neigh_node,
  174. orig_neigh_node->orig,
  175. if_incoming);
  176. /* create_neighbor failed, return 0 */
  177. if (!neigh_node)
  178. goto unlock;
  179. if (!atomic_inc_not_zero(&neigh_node->refcount)) {
  180. neigh_node = NULL;
  181. goto unlock;
  182. }
  183. rcu_read_unlock();
  184. }
  185. orig_node->last_valid = jiffies;
  186. spin_lock_bh(&orig_node->ogm_cnt_lock);
  187. orig_eq_count = orig_neigh_node->bcast_own_sum[if_incoming->if_num];
  188. neigh_rq_count = neigh_node->real_packet_count;
  189. spin_unlock_bh(&orig_node->ogm_cnt_lock);
  190. /* pay attention to not get a value bigger than 100 % */
  191. total_count = (orig_eq_count > neigh_rq_count ?
  192. neigh_rq_count : orig_eq_count);
  193. /* if we have too few packets (too less data) we set tq_own to zero */
  194. /* if we receive too few packets it is not considered bidirectional */
  195. if ((total_count < TQ_LOCAL_BIDRECT_SEND_MINIMUM) ||
  196. (neigh_rq_count < TQ_LOCAL_BIDRECT_RECV_MINIMUM))
  197. tq_own = 0;
  198. else
  199. /* neigh_node->real_packet_count is never zero as we
  200. * only purge old information when getting new
  201. * information */
  202. tq_own = (TQ_MAX_VALUE * total_count) / neigh_rq_count;
  203. /*
  204. * 1 - ((1-x) ** 3), normalized to TQ_MAX_VALUE this does
  205. * affect the nearly-symmetric links only a little, but
  206. * punishes asymmetric links more. This will give a value
  207. * between 0 and TQ_MAX_VALUE
  208. */
  209. tq_asym_penalty = TQ_MAX_VALUE - (TQ_MAX_VALUE *
  210. (TQ_LOCAL_WINDOW_SIZE - neigh_rq_count) *
  211. (TQ_LOCAL_WINDOW_SIZE - neigh_rq_count) *
  212. (TQ_LOCAL_WINDOW_SIZE - neigh_rq_count)) /
  213. (TQ_LOCAL_WINDOW_SIZE *
  214. TQ_LOCAL_WINDOW_SIZE *
  215. TQ_LOCAL_WINDOW_SIZE);
  216. batman_packet->tq = ((batman_packet->tq * tq_own * tq_asym_penalty) /
  217. (TQ_MAX_VALUE * TQ_MAX_VALUE));
  218. bat_dbg(DBG_BATMAN, bat_priv,
  219. "bidirectional: "
  220. "orig = %-15pM neigh = %-15pM => own_bcast = %2i, "
  221. "real recv = %2i, local tq: %3i, asym_penalty: %3i, "
  222. "total tq: %3i\n",
  223. orig_node->orig, orig_neigh_node->orig, total_count,
  224. neigh_rq_count, tq_own, tq_asym_penalty, batman_packet->tq);
  225. /* if link has the minimum required transmission quality
  226. * consider it bidirectional */
  227. if (batman_packet->tq >= TQ_TOTAL_BIDRECT_LIMIT)
  228. ret = 1;
  229. goto out;
  230. unlock:
  231. rcu_read_unlock();
  232. out:
  233. if (neigh_node)
  234. neigh_node_free_ref(neigh_node);
  235. return ret;
  236. }
  237. /* caller must hold the neigh_list_lock */
  238. void bonding_candidate_del(struct orig_node *orig_node,
  239. struct neigh_node *neigh_node)
  240. {
  241. /* this neighbor is not part of our candidate list */
  242. if (list_empty(&neigh_node->bonding_list))
  243. goto out;
  244. list_del_rcu(&neigh_node->bonding_list);
  245. INIT_LIST_HEAD(&neigh_node->bonding_list);
  246. neigh_node_free_ref(neigh_node);
  247. atomic_dec(&orig_node->bond_candidates);
  248. out:
  249. return;
  250. }
  251. static void bonding_candidate_add(struct orig_node *orig_node,
  252. struct neigh_node *neigh_node)
  253. {
  254. struct hlist_node *node;
  255. struct neigh_node *tmp_neigh_node;
  256. uint8_t best_tq, interference_candidate = 0;
  257. spin_lock_bh(&orig_node->neigh_list_lock);
  258. /* only consider if it has the same primary address ... */
  259. if (!compare_eth(orig_node->orig,
  260. neigh_node->orig_node->primary_addr))
  261. goto candidate_del;
  262. if (!orig_node->router)
  263. goto candidate_del;
  264. best_tq = orig_node->router->tq_avg;
  265. /* ... and is good enough to be considered */
  266. if (neigh_node->tq_avg < best_tq - BONDING_TQ_THRESHOLD)
  267. goto candidate_del;
  268. /**
  269. * check if we have another candidate with the same mac address or
  270. * interface. If we do, we won't select this candidate because of
  271. * possible interference.
  272. */
  273. hlist_for_each_entry_rcu(tmp_neigh_node, node,
  274. &orig_node->neigh_list, list) {
  275. if (tmp_neigh_node == neigh_node)
  276. continue;
  277. /* we only care if the other candidate is even
  278. * considered as candidate. */
  279. if (list_empty(&tmp_neigh_node->bonding_list))
  280. continue;
  281. if ((neigh_node->if_incoming == tmp_neigh_node->if_incoming) ||
  282. (compare_eth(neigh_node->addr, tmp_neigh_node->addr))) {
  283. interference_candidate = 1;
  284. break;
  285. }
  286. }
  287. /* don't care further if it is an interference candidate */
  288. if (interference_candidate)
  289. goto candidate_del;
  290. /* this neighbor already is part of our candidate list */
  291. if (!list_empty(&neigh_node->bonding_list))
  292. goto out;
  293. if (!atomic_inc_not_zero(&neigh_node->refcount))
  294. goto out;
  295. list_add_rcu(&neigh_node->bonding_list, &orig_node->bond_list);
  296. atomic_inc(&orig_node->bond_candidates);
  297. goto out;
  298. candidate_del:
  299. bonding_candidate_del(orig_node, neigh_node);
  300. out:
  301. spin_unlock_bh(&orig_node->neigh_list_lock);
  302. return;
  303. }
  304. /* copy primary address for bonding */
  305. static void bonding_save_primary(struct orig_node *orig_node,
  306. struct orig_node *orig_neigh_node,
  307. struct batman_packet *batman_packet)
  308. {
  309. if (!(batman_packet->flags & PRIMARIES_FIRST_HOP))
  310. return;
  311. memcpy(orig_neigh_node->primary_addr, orig_node->orig, ETH_ALEN);
  312. }
  313. static void update_orig(struct bat_priv *bat_priv,
  314. struct orig_node *orig_node,
  315. struct ethhdr *ethhdr,
  316. struct batman_packet *batman_packet,
  317. struct batman_if *if_incoming,
  318. unsigned char *hna_buff, int hna_buff_len,
  319. char is_duplicate)
  320. {
  321. struct neigh_node *neigh_node = NULL, *tmp_neigh_node = NULL;
  322. struct orig_node *orig_node_tmp;
  323. struct hlist_node *node;
  324. int tmp_hna_buff_len;
  325. uint8_t bcast_own_sum_orig, bcast_own_sum_neigh;
  326. bat_dbg(DBG_BATMAN, bat_priv, "update_originator(): "
  327. "Searching and updating originator entry of received packet\n");
  328. rcu_read_lock();
  329. hlist_for_each_entry_rcu(tmp_neigh_node, node,
  330. &orig_node->neigh_list, list) {
  331. if (compare_eth(tmp_neigh_node->addr, ethhdr->h_source) &&
  332. (tmp_neigh_node->if_incoming == if_incoming) &&
  333. atomic_inc_not_zero(&tmp_neigh_node->refcount)) {
  334. if (neigh_node)
  335. neigh_node_free_ref(neigh_node);
  336. neigh_node = tmp_neigh_node;
  337. continue;
  338. }
  339. if (is_duplicate)
  340. continue;
  341. ring_buffer_set(tmp_neigh_node->tq_recv,
  342. &tmp_neigh_node->tq_index, 0);
  343. tmp_neigh_node->tq_avg =
  344. ring_buffer_avg(tmp_neigh_node->tq_recv);
  345. }
  346. if (!neigh_node) {
  347. struct orig_node *orig_tmp;
  348. orig_tmp = get_orig_node(bat_priv, ethhdr->h_source);
  349. if (!orig_tmp)
  350. goto unlock;
  351. neigh_node = create_neighbor(orig_node, orig_tmp,
  352. ethhdr->h_source, if_incoming);
  353. kref_put(&orig_tmp->refcount, orig_node_free_ref);
  354. if (!neigh_node)
  355. goto unlock;
  356. if (!atomic_inc_not_zero(&neigh_node->refcount)) {
  357. neigh_node = NULL;
  358. goto unlock;
  359. }
  360. } else
  361. bat_dbg(DBG_BATMAN, bat_priv,
  362. "Updating existing last-hop neighbor of originator\n");
  363. rcu_read_unlock();
  364. orig_node->flags = batman_packet->flags;
  365. neigh_node->last_valid = jiffies;
  366. ring_buffer_set(neigh_node->tq_recv,
  367. &neigh_node->tq_index,
  368. batman_packet->tq);
  369. neigh_node->tq_avg = ring_buffer_avg(neigh_node->tq_recv);
  370. if (!is_duplicate) {
  371. orig_node->last_ttl = batman_packet->ttl;
  372. neigh_node->last_ttl = batman_packet->ttl;
  373. }
  374. bonding_candidate_add(orig_node, neigh_node);
  375. tmp_hna_buff_len = (hna_buff_len > batman_packet->num_hna * ETH_ALEN ?
  376. batman_packet->num_hna * ETH_ALEN : hna_buff_len);
  377. /* if this neighbor already is our next hop there is nothing
  378. * to change */
  379. if (orig_node->router == neigh_node)
  380. goto update_hna;
  381. /* if this neighbor does not offer a better TQ we won't consider it */
  382. if ((orig_node->router) &&
  383. (orig_node->router->tq_avg > neigh_node->tq_avg))
  384. goto update_hna;
  385. /* if the TQ is the same and the link not more symetric we
  386. * won't consider it either */
  387. if ((orig_node->router) &&
  388. (neigh_node->tq_avg == orig_node->router->tq_avg)) {
  389. orig_node_tmp = orig_node->router->orig_node;
  390. spin_lock_bh(&orig_node_tmp->ogm_cnt_lock);
  391. bcast_own_sum_orig =
  392. orig_node_tmp->bcast_own_sum[if_incoming->if_num];
  393. spin_unlock_bh(&orig_node_tmp->ogm_cnt_lock);
  394. orig_node_tmp = neigh_node->orig_node;
  395. spin_lock_bh(&orig_node_tmp->ogm_cnt_lock);
  396. bcast_own_sum_neigh =
  397. orig_node_tmp->bcast_own_sum[if_incoming->if_num];
  398. spin_unlock_bh(&orig_node_tmp->ogm_cnt_lock);
  399. if (bcast_own_sum_orig >= bcast_own_sum_neigh)
  400. goto update_hna;
  401. }
  402. update_routes(bat_priv, orig_node, neigh_node,
  403. hna_buff, tmp_hna_buff_len);
  404. goto update_gw;
  405. update_hna:
  406. update_routes(bat_priv, orig_node, orig_node->router,
  407. hna_buff, tmp_hna_buff_len);
  408. update_gw:
  409. if (orig_node->gw_flags != batman_packet->gw_flags)
  410. gw_node_update(bat_priv, orig_node, batman_packet->gw_flags);
  411. orig_node->gw_flags = batman_packet->gw_flags;
  412. /* restart gateway selection if fast or late switching was enabled */
  413. if ((orig_node->gw_flags) &&
  414. (atomic_read(&bat_priv->gw_mode) == GW_MODE_CLIENT) &&
  415. (atomic_read(&bat_priv->gw_sel_class) > 2))
  416. gw_check_election(bat_priv, orig_node);
  417. goto out;
  418. unlock:
  419. rcu_read_unlock();
  420. out:
  421. if (neigh_node)
  422. neigh_node_free_ref(neigh_node);
  423. }
  424. /* checks whether the host restarted and is in the protection time.
  425. * returns:
  426. * 0 if the packet is to be accepted
  427. * 1 if the packet is to be ignored.
  428. */
  429. static int window_protected(struct bat_priv *bat_priv,
  430. int32_t seq_num_diff,
  431. unsigned long *last_reset)
  432. {
  433. if ((seq_num_diff <= -TQ_LOCAL_WINDOW_SIZE)
  434. || (seq_num_diff >= EXPECTED_SEQNO_RANGE)) {
  435. if (time_after(jiffies, *last_reset +
  436. msecs_to_jiffies(RESET_PROTECTION_MS))) {
  437. *last_reset = jiffies;
  438. bat_dbg(DBG_BATMAN, bat_priv,
  439. "old packet received, start protection\n");
  440. return 0;
  441. } else
  442. return 1;
  443. }
  444. return 0;
  445. }
  446. /* processes a batman packet for all interfaces, adjusts the sequence number and
  447. * finds out whether it is a duplicate.
  448. * returns:
  449. * 1 the packet is a duplicate
  450. * 0 the packet has not yet been received
  451. * -1 the packet is old and has been received while the seqno window
  452. * was protected. Caller should drop it.
  453. */
  454. static char count_real_packets(struct ethhdr *ethhdr,
  455. struct batman_packet *batman_packet,
  456. struct batman_if *if_incoming)
  457. {
  458. struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
  459. struct orig_node *orig_node;
  460. struct neigh_node *tmp_neigh_node;
  461. struct hlist_node *node;
  462. char is_duplicate = 0;
  463. int32_t seq_diff;
  464. int need_update = 0;
  465. int set_mark, ret = -1;
  466. orig_node = get_orig_node(bat_priv, batman_packet->orig);
  467. if (!orig_node)
  468. return 0;
  469. spin_lock_bh(&orig_node->ogm_cnt_lock);
  470. seq_diff = batman_packet->seqno - orig_node->last_real_seqno;
  471. /* signalize caller that the packet is to be dropped. */
  472. if (window_protected(bat_priv, seq_diff,
  473. &orig_node->batman_seqno_reset))
  474. goto out;
  475. rcu_read_lock();
  476. hlist_for_each_entry_rcu(tmp_neigh_node, node,
  477. &orig_node->neigh_list, list) {
  478. is_duplicate |= get_bit_status(tmp_neigh_node->real_bits,
  479. orig_node->last_real_seqno,
  480. batman_packet->seqno);
  481. if (compare_eth(tmp_neigh_node->addr, ethhdr->h_source) &&
  482. (tmp_neigh_node->if_incoming == if_incoming))
  483. set_mark = 1;
  484. else
  485. set_mark = 0;
  486. /* if the window moved, set the update flag. */
  487. need_update |= bit_get_packet(bat_priv,
  488. tmp_neigh_node->real_bits,
  489. seq_diff, set_mark);
  490. tmp_neigh_node->real_packet_count =
  491. bit_packet_count(tmp_neigh_node->real_bits);
  492. }
  493. rcu_read_unlock();
  494. if (need_update) {
  495. bat_dbg(DBG_BATMAN, bat_priv,
  496. "updating last_seqno: old %d, new %d\n",
  497. orig_node->last_real_seqno, batman_packet->seqno);
  498. orig_node->last_real_seqno = batman_packet->seqno;
  499. }
  500. ret = is_duplicate;
  501. out:
  502. spin_unlock_bh(&orig_node->ogm_cnt_lock);
  503. kref_put(&orig_node->refcount, orig_node_free_ref);
  504. return ret;
  505. }
  506. void receive_bat_packet(struct ethhdr *ethhdr,
  507. struct batman_packet *batman_packet,
  508. unsigned char *hna_buff, int hna_buff_len,
  509. struct batman_if *if_incoming)
  510. {
  511. struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
  512. struct batman_if *batman_if;
  513. struct orig_node *orig_neigh_node, *orig_node;
  514. char has_directlink_flag;
  515. char is_my_addr = 0, is_my_orig = 0, is_my_oldorig = 0;
  516. char is_broadcast = 0, is_bidirectional, is_single_hop_neigh;
  517. char is_duplicate;
  518. uint32_t if_incoming_seqno;
  519. /* Silently drop when the batman packet is actually not a
  520. * correct packet.
  521. *
  522. * This might happen if a packet is padded (e.g. Ethernet has a
  523. * minimum frame length of 64 byte) and the aggregation interprets
  524. * it as an additional length.
  525. *
  526. * TODO: A more sane solution would be to have a bit in the
  527. * batman_packet to detect whether the packet is the last
  528. * packet in an aggregation. Here we expect that the padding
  529. * is always zero (or not 0x01)
  530. */
  531. if (batman_packet->packet_type != BAT_PACKET)
  532. return;
  533. /* could be changed by schedule_own_packet() */
  534. if_incoming_seqno = atomic_read(&if_incoming->seqno);
  535. has_directlink_flag = (batman_packet->flags & DIRECTLINK ? 1 : 0);
  536. is_single_hop_neigh = (compare_eth(ethhdr->h_source,
  537. batman_packet->orig) ? 1 : 0);
  538. bat_dbg(DBG_BATMAN, bat_priv,
  539. "Received BATMAN packet via NB: %pM, IF: %s [%pM] "
  540. "(from OG: %pM, via prev OG: %pM, seqno %d, tq %d, "
  541. "TTL %d, V %d, IDF %d)\n",
  542. ethhdr->h_source, if_incoming->net_dev->name,
  543. if_incoming->net_dev->dev_addr, batman_packet->orig,
  544. batman_packet->prev_sender, batman_packet->seqno,
  545. batman_packet->tq, batman_packet->ttl, batman_packet->version,
  546. has_directlink_flag);
  547. rcu_read_lock();
  548. list_for_each_entry_rcu(batman_if, &if_list, list) {
  549. if (batman_if->if_status != IF_ACTIVE)
  550. continue;
  551. if (batman_if->soft_iface != if_incoming->soft_iface)
  552. continue;
  553. if (compare_eth(ethhdr->h_source,
  554. batman_if->net_dev->dev_addr))
  555. is_my_addr = 1;
  556. if (compare_eth(batman_packet->orig,
  557. batman_if->net_dev->dev_addr))
  558. is_my_orig = 1;
  559. if (compare_eth(batman_packet->prev_sender,
  560. batman_if->net_dev->dev_addr))
  561. is_my_oldorig = 1;
  562. if (compare_eth(ethhdr->h_source, broadcast_addr))
  563. is_broadcast = 1;
  564. }
  565. rcu_read_unlock();
  566. if (batman_packet->version != COMPAT_VERSION) {
  567. bat_dbg(DBG_BATMAN, bat_priv,
  568. "Drop packet: incompatible batman version (%i)\n",
  569. batman_packet->version);
  570. return;
  571. }
  572. if (is_my_addr) {
  573. bat_dbg(DBG_BATMAN, bat_priv,
  574. "Drop packet: received my own broadcast (sender: %pM"
  575. ")\n",
  576. ethhdr->h_source);
  577. return;
  578. }
  579. if (is_broadcast) {
  580. bat_dbg(DBG_BATMAN, bat_priv, "Drop packet: "
  581. "ignoring all packets with broadcast source addr (sender: %pM"
  582. ")\n", ethhdr->h_source);
  583. return;
  584. }
  585. if (is_my_orig) {
  586. unsigned long *word;
  587. int offset;
  588. orig_neigh_node = get_orig_node(bat_priv, ethhdr->h_source);
  589. if (!orig_neigh_node)
  590. return;
  591. /* neighbor has to indicate direct link and it has to
  592. * come via the corresponding interface */
  593. /* if received seqno equals last send seqno save new
  594. * seqno for bidirectional check */
  595. if (has_directlink_flag &&
  596. compare_eth(if_incoming->net_dev->dev_addr,
  597. batman_packet->orig) &&
  598. (batman_packet->seqno - if_incoming_seqno + 2 == 0)) {
  599. offset = if_incoming->if_num * NUM_WORDS;
  600. spin_lock_bh(&orig_neigh_node->ogm_cnt_lock);
  601. word = &(orig_neigh_node->bcast_own[offset]);
  602. bit_mark(word, 0);
  603. orig_neigh_node->bcast_own_sum[if_incoming->if_num] =
  604. bit_packet_count(word);
  605. spin_unlock_bh(&orig_neigh_node->ogm_cnt_lock);
  606. }
  607. bat_dbg(DBG_BATMAN, bat_priv, "Drop packet: "
  608. "originator packet from myself (via neighbor)\n");
  609. kref_put(&orig_neigh_node->refcount, orig_node_free_ref);
  610. return;
  611. }
  612. if (is_my_oldorig) {
  613. bat_dbg(DBG_BATMAN, bat_priv,
  614. "Drop packet: ignoring all rebroadcast echos (sender: "
  615. "%pM)\n", ethhdr->h_source);
  616. return;
  617. }
  618. orig_node = get_orig_node(bat_priv, batman_packet->orig);
  619. if (!orig_node)
  620. return;
  621. is_duplicate = count_real_packets(ethhdr, batman_packet, if_incoming);
  622. if (is_duplicate == -1) {
  623. bat_dbg(DBG_BATMAN, bat_priv,
  624. "Drop packet: packet within seqno protection time "
  625. "(sender: %pM)\n", ethhdr->h_source);
  626. goto out;
  627. }
  628. if (batman_packet->tq == 0) {
  629. bat_dbg(DBG_BATMAN, bat_priv,
  630. "Drop packet: originator packet with tq equal 0\n");
  631. goto out;
  632. }
  633. /* avoid temporary routing loops */
  634. if ((orig_node->router) &&
  635. (orig_node->router->orig_node->router) &&
  636. (compare_eth(orig_node->router->addr,
  637. batman_packet->prev_sender)) &&
  638. !(compare_eth(batman_packet->orig, batman_packet->prev_sender)) &&
  639. (compare_eth(orig_node->router->addr,
  640. orig_node->router->orig_node->router->addr))) {
  641. bat_dbg(DBG_BATMAN, bat_priv,
  642. "Drop packet: ignoring all rebroadcast packets that "
  643. "may make me loop (sender: %pM)\n", ethhdr->h_source);
  644. goto out;
  645. }
  646. /* if sender is a direct neighbor the sender mac equals
  647. * originator mac */
  648. orig_neigh_node = (is_single_hop_neigh ?
  649. orig_node :
  650. get_orig_node(bat_priv, ethhdr->h_source));
  651. if (!orig_neigh_node)
  652. goto out_neigh;
  653. /* drop packet if sender is not a direct neighbor and if we
  654. * don't route towards it */
  655. if (!is_single_hop_neigh && (!orig_neigh_node->router)) {
  656. bat_dbg(DBG_BATMAN, bat_priv,
  657. "Drop packet: OGM via unknown neighbor!\n");
  658. goto out_neigh;
  659. }
  660. is_bidirectional = is_bidirectional_neigh(orig_node, orig_neigh_node,
  661. batman_packet, if_incoming);
  662. bonding_save_primary(orig_node, orig_neigh_node, batman_packet);
  663. /* update ranking if it is not a duplicate or has the same
  664. * seqno and similar ttl as the non-duplicate */
  665. if (is_bidirectional &&
  666. (!is_duplicate ||
  667. ((orig_node->last_real_seqno == batman_packet->seqno) &&
  668. (orig_node->last_ttl - 3 <= batman_packet->ttl))))
  669. update_orig(bat_priv, orig_node, ethhdr, batman_packet,
  670. if_incoming, hna_buff, hna_buff_len, is_duplicate);
  671. /* is single hop (direct) neighbor */
  672. if (is_single_hop_neigh) {
  673. /* mark direct link on incoming interface */
  674. schedule_forward_packet(orig_node, ethhdr, batman_packet,
  675. 1, hna_buff_len, if_incoming);
  676. bat_dbg(DBG_BATMAN, bat_priv, "Forwarding packet: "
  677. "rebroadcast neighbor packet with direct link flag\n");
  678. goto out_neigh;
  679. }
  680. /* multihop originator */
  681. if (!is_bidirectional) {
  682. bat_dbg(DBG_BATMAN, bat_priv,
  683. "Drop packet: not received via bidirectional link\n");
  684. goto out_neigh;
  685. }
  686. if (is_duplicate) {
  687. bat_dbg(DBG_BATMAN, bat_priv,
  688. "Drop packet: duplicate packet received\n");
  689. goto out_neigh;
  690. }
  691. bat_dbg(DBG_BATMAN, bat_priv,
  692. "Forwarding packet: rebroadcast originator packet\n");
  693. schedule_forward_packet(orig_node, ethhdr, batman_packet,
  694. 0, hna_buff_len, if_incoming);
  695. out_neigh:
  696. if (!is_single_hop_neigh)
  697. kref_put(&orig_neigh_node->refcount, orig_node_free_ref);
  698. out:
  699. kref_put(&orig_node->refcount, orig_node_free_ref);
  700. }
  701. int recv_bat_packet(struct sk_buff *skb, struct batman_if *batman_if)
  702. {
  703. struct bat_priv *bat_priv = netdev_priv(batman_if->soft_iface);
  704. struct ethhdr *ethhdr;
  705. /* drop packet if it has not necessary minimum size */
  706. if (unlikely(!pskb_may_pull(skb, sizeof(struct batman_packet))))
  707. return NET_RX_DROP;
  708. ethhdr = (struct ethhdr *)skb_mac_header(skb);
  709. /* packet with broadcast indication but unicast recipient */
  710. if (!is_broadcast_ether_addr(ethhdr->h_dest))
  711. return NET_RX_DROP;
  712. /* packet with broadcast sender address */
  713. if (is_broadcast_ether_addr(ethhdr->h_source))
  714. return NET_RX_DROP;
  715. /* create a copy of the skb, if needed, to modify it. */
  716. if (skb_cow(skb, 0) < 0)
  717. return NET_RX_DROP;
  718. /* keep skb linear */
  719. if (skb_linearize(skb) < 0)
  720. return NET_RX_DROP;
  721. ethhdr = (struct ethhdr *)skb_mac_header(skb);
  722. spin_lock_bh(&bat_priv->orig_hash_lock);
  723. receive_aggr_bat_packet(ethhdr,
  724. skb->data,
  725. skb_headlen(skb),
  726. batman_if);
  727. spin_unlock_bh(&bat_priv->orig_hash_lock);
  728. kfree_skb(skb);
  729. return NET_RX_SUCCESS;
  730. }
  731. static int recv_my_icmp_packet(struct bat_priv *bat_priv,
  732. struct sk_buff *skb, size_t icmp_len)
  733. {
  734. struct orig_node *orig_node = NULL;
  735. struct neigh_node *neigh_node = NULL;
  736. struct icmp_packet_rr *icmp_packet;
  737. struct batman_if *batman_if;
  738. uint8_t dstaddr[ETH_ALEN];
  739. int ret = NET_RX_DROP;
  740. icmp_packet = (struct icmp_packet_rr *)skb->data;
  741. /* add data to device queue */
  742. if (icmp_packet->msg_type != ECHO_REQUEST) {
  743. bat_socket_receive_packet(icmp_packet, icmp_len);
  744. goto out;
  745. }
  746. if (!bat_priv->primary_if)
  747. goto out;
  748. /* answer echo request (ping) */
  749. /* get routing information */
  750. spin_lock_bh(&bat_priv->orig_hash_lock);
  751. rcu_read_lock();
  752. orig_node = ((struct orig_node *)hash_find(bat_priv->orig_hash,
  753. compare_orig, choose_orig,
  754. icmp_packet->orig));
  755. if (!orig_node)
  756. goto unlock;
  757. kref_get(&orig_node->refcount);
  758. neigh_node = orig_node->router;
  759. if (!neigh_node)
  760. goto unlock;
  761. if (!atomic_inc_not_zero(&neigh_node->refcount)) {
  762. neigh_node = NULL;
  763. goto unlock;
  764. }
  765. rcu_read_unlock();
  766. /* don't lock while sending the packets ... we therefore
  767. * copy the required data before sending */
  768. batman_if = orig_node->router->if_incoming;
  769. memcpy(dstaddr, orig_node->router->addr, ETH_ALEN);
  770. spin_unlock_bh(&bat_priv->orig_hash_lock);
  771. /* create a copy of the skb, if needed, to modify it. */
  772. if (skb_cow(skb, sizeof(struct ethhdr)) < 0)
  773. goto out;
  774. icmp_packet = (struct icmp_packet_rr *)skb->data;
  775. memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN);
  776. memcpy(icmp_packet->orig,
  777. bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN);
  778. icmp_packet->msg_type = ECHO_REPLY;
  779. icmp_packet->ttl = TTL;
  780. send_skb_packet(skb, batman_if, dstaddr);
  781. ret = NET_RX_SUCCESS;
  782. goto out;
  783. unlock:
  784. rcu_read_unlock();
  785. spin_unlock_bh(&bat_priv->orig_hash_lock);
  786. out:
  787. if (neigh_node)
  788. neigh_node_free_ref(neigh_node);
  789. if (orig_node)
  790. kref_put(&orig_node->refcount, orig_node_free_ref);
  791. return ret;
  792. }
  793. static int recv_icmp_ttl_exceeded(struct bat_priv *bat_priv,
  794. struct sk_buff *skb)
  795. {
  796. struct orig_node *orig_node = NULL;
  797. struct neigh_node *neigh_node = NULL;
  798. struct icmp_packet *icmp_packet;
  799. struct batman_if *batman_if;
  800. uint8_t dstaddr[ETH_ALEN];
  801. int ret = NET_RX_DROP;
  802. icmp_packet = (struct icmp_packet *)skb->data;
  803. /* send TTL exceeded if packet is an echo request (traceroute) */
  804. if (icmp_packet->msg_type != ECHO_REQUEST) {
  805. pr_debug("Warning - can't forward icmp packet from %pM to "
  806. "%pM: ttl exceeded\n", icmp_packet->orig,
  807. icmp_packet->dst);
  808. goto out;
  809. }
  810. if (!bat_priv->primary_if)
  811. goto out;
  812. /* get routing information */
  813. spin_lock_bh(&bat_priv->orig_hash_lock);
  814. rcu_read_lock();
  815. orig_node = ((struct orig_node *)
  816. hash_find(bat_priv->orig_hash, compare_orig, choose_orig,
  817. icmp_packet->orig));
  818. if (!orig_node)
  819. goto unlock;
  820. kref_get(&orig_node->refcount);
  821. neigh_node = orig_node->router;
  822. if (!neigh_node)
  823. goto unlock;
  824. if (!atomic_inc_not_zero(&neigh_node->refcount)) {
  825. neigh_node = NULL;
  826. goto unlock;
  827. }
  828. rcu_read_unlock();
  829. /* don't lock while sending the packets ... we therefore
  830. * copy the required data before sending */
  831. batman_if = orig_node->router->if_incoming;
  832. memcpy(dstaddr, orig_node->router->addr, ETH_ALEN);
  833. spin_unlock_bh(&bat_priv->orig_hash_lock);
  834. /* create a copy of the skb, if needed, to modify it. */
  835. if (skb_cow(skb, sizeof(struct ethhdr)) < 0)
  836. goto out;
  837. icmp_packet = (struct icmp_packet *)skb->data;
  838. memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN);
  839. memcpy(icmp_packet->orig,
  840. bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN);
  841. icmp_packet->msg_type = TTL_EXCEEDED;
  842. icmp_packet->ttl = TTL;
  843. send_skb_packet(skb, batman_if, dstaddr);
  844. ret = NET_RX_SUCCESS;
  845. goto out;
  846. unlock:
  847. rcu_read_unlock();
  848. spin_unlock_bh(&bat_priv->orig_hash_lock);
  849. out:
  850. if (neigh_node)
  851. neigh_node_free_ref(neigh_node);
  852. if (orig_node)
  853. kref_put(&orig_node->refcount, orig_node_free_ref);
  854. return ret;
  855. }
  856. int recv_icmp_packet(struct sk_buff *skb, struct batman_if *recv_if)
  857. {
  858. struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
  859. struct icmp_packet_rr *icmp_packet;
  860. struct ethhdr *ethhdr;
  861. struct orig_node *orig_node = NULL;
  862. struct neigh_node *neigh_node = NULL;
  863. struct batman_if *batman_if;
  864. int hdr_size = sizeof(struct icmp_packet);
  865. uint8_t dstaddr[ETH_ALEN];
  866. int ret = NET_RX_DROP;
  867. /**
  868. * we truncate all incoming icmp packets if they don't match our size
  869. */
  870. if (skb->len >= sizeof(struct icmp_packet_rr))
  871. hdr_size = sizeof(struct icmp_packet_rr);
  872. /* drop packet if it has not necessary minimum size */
  873. if (unlikely(!pskb_may_pull(skb, hdr_size)))
  874. goto out;
  875. ethhdr = (struct ethhdr *)skb_mac_header(skb);
  876. /* packet with unicast indication but broadcast recipient */
  877. if (is_broadcast_ether_addr(ethhdr->h_dest))
  878. goto out;
  879. /* packet with broadcast sender address */
  880. if (is_broadcast_ether_addr(ethhdr->h_source))
  881. goto out;
  882. /* not for me */
  883. if (!is_my_mac(ethhdr->h_dest))
  884. goto out;
  885. icmp_packet = (struct icmp_packet_rr *)skb->data;
  886. /* add record route information if not full */
  887. if ((hdr_size == sizeof(struct icmp_packet_rr)) &&
  888. (icmp_packet->rr_cur < BAT_RR_LEN)) {
  889. memcpy(&(icmp_packet->rr[icmp_packet->rr_cur]),
  890. ethhdr->h_dest, ETH_ALEN);
  891. icmp_packet->rr_cur++;
  892. }
  893. /* packet for me */
  894. if (is_my_mac(icmp_packet->dst))
  895. return recv_my_icmp_packet(bat_priv, skb, hdr_size);
  896. /* TTL exceeded */
  897. if (icmp_packet->ttl < 2)
  898. return recv_icmp_ttl_exceeded(bat_priv, skb);
  899. /* get routing information */
  900. spin_lock_bh(&bat_priv->orig_hash_lock);
  901. rcu_read_lock();
  902. orig_node = ((struct orig_node *)
  903. hash_find(bat_priv->orig_hash, compare_orig, choose_orig,
  904. icmp_packet->dst));
  905. if (!orig_node)
  906. goto unlock;
  907. kref_get(&orig_node->refcount);
  908. neigh_node = orig_node->router;
  909. if (!neigh_node)
  910. goto unlock;
  911. if (!atomic_inc_not_zero(&neigh_node->refcount)) {
  912. neigh_node = NULL;
  913. goto unlock;
  914. }
  915. rcu_read_unlock();
  916. /* don't lock while sending the packets ... we therefore
  917. * copy the required data before sending */
  918. batman_if = orig_node->router->if_incoming;
  919. memcpy(dstaddr, orig_node->router->addr, ETH_ALEN);
  920. spin_unlock_bh(&bat_priv->orig_hash_lock);
  921. /* create a copy of the skb, if needed, to modify it. */
  922. if (skb_cow(skb, sizeof(struct ethhdr)) < 0)
  923. goto out;
  924. icmp_packet = (struct icmp_packet_rr *)skb->data;
  925. /* decrement ttl */
  926. icmp_packet->ttl--;
  927. /* route it */
  928. send_skb_packet(skb, batman_if, dstaddr);
  929. ret = NET_RX_SUCCESS;
  930. goto out;
  931. unlock:
  932. rcu_read_unlock();
  933. spin_unlock_bh(&bat_priv->orig_hash_lock);
  934. out:
  935. if (neigh_node)
  936. neigh_node_free_ref(neigh_node);
  937. if (orig_node)
  938. kref_put(&orig_node->refcount, orig_node_free_ref);
  939. return ret;
  940. }
  941. /* find a suitable router for this originator, and use
  942. * bonding if possible. increases the found neighbors
  943. * refcount.*/
  944. struct neigh_node *find_router(struct bat_priv *bat_priv,
  945. struct orig_node *orig_node,
  946. struct batman_if *recv_if)
  947. {
  948. struct orig_node *primary_orig_node;
  949. struct orig_node *router_orig;
  950. struct neigh_node *router, *first_candidate, *tmp_neigh_node;
  951. static uint8_t zero_mac[ETH_ALEN] = {0, 0, 0, 0, 0, 0};
  952. int bonding_enabled;
  953. if (!orig_node)
  954. return NULL;
  955. if (!orig_node->router)
  956. return NULL;
  957. /* without bonding, the first node should
  958. * always choose the default router. */
  959. bonding_enabled = atomic_read(&bat_priv->bonding);
  960. rcu_read_lock();
  961. /* select default router to output */
  962. router = orig_node->router;
  963. router_orig = orig_node->router->orig_node;
  964. if (!router_orig || !atomic_inc_not_zero(&router->refcount)) {
  965. rcu_read_unlock();
  966. return NULL;
  967. }
  968. if ((!recv_if) && (!bonding_enabled))
  969. goto return_router;
  970. /* if we have something in the primary_addr, we can search
  971. * for a potential bonding candidate. */
  972. if (compare_eth(router_orig->primary_addr, zero_mac))
  973. goto return_router;
  974. /* find the orig_node which has the primary interface. might
  975. * even be the same as our router_orig in many cases */
  976. if (compare_eth(router_orig->primary_addr, router_orig->orig)) {
  977. primary_orig_node = router_orig;
  978. } else {
  979. primary_orig_node = hash_find(bat_priv->orig_hash, compare_orig,
  980. choose_orig,
  981. router_orig->primary_addr);
  982. if (!primary_orig_node)
  983. goto return_router;
  984. }
  985. /* with less than 2 candidates, we can't do any
  986. * bonding and prefer the original router. */
  987. if (atomic_read(&primary_orig_node->bond_candidates) < 2)
  988. goto return_router;
  989. /* all nodes between should choose a candidate which
  990. * is is not on the interface where the packet came
  991. * in. */
  992. neigh_node_free_ref(router);
  993. first_candidate = NULL;
  994. router = NULL;
  995. if (bonding_enabled) {
  996. /* in the bonding case, send the packets in a round
  997. * robin fashion over the remaining interfaces. */
  998. list_for_each_entry_rcu(tmp_neigh_node,
  999. &primary_orig_node->bond_list, bonding_list) {
  1000. if (!first_candidate)
  1001. first_candidate = tmp_neigh_node;
  1002. /* recv_if == NULL on the first node. */
  1003. if (tmp_neigh_node->if_incoming != recv_if &&
  1004. atomic_inc_not_zero(&tmp_neigh_node->refcount)) {
  1005. router = tmp_neigh_node;
  1006. break;
  1007. }
  1008. }
  1009. /* use the first candidate if nothing was found. */
  1010. if (!router && first_candidate &&
  1011. atomic_inc_not_zero(&first_candidate->refcount))
  1012. router = first_candidate;
  1013. if (!router) {
  1014. rcu_read_unlock();
  1015. return NULL;
  1016. }
  1017. /* selected should point to the next element
  1018. * after the current router */
  1019. spin_lock_bh(&primary_orig_node->neigh_list_lock);
  1020. /* this is a list_move(), which unfortunately
  1021. * does not exist as rcu version */
  1022. list_del_rcu(&primary_orig_node->bond_list);
  1023. list_add_rcu(&primary_orig_node->bond_list,
  1024. &router->bonding_list);
  1025. spin_unlock_bh(&primary_orig_node->neigh_list_lock);
  1026. } else {
  1027. /* if bonding is disabled, use the best of the
  1028. * remaining candidates which are not using
  1029. * this interface. */
  1030. list_for_each_entry_rcu(tmp_neigh_node,
  1031. &primary_orig_node->bond_list, bonding_list) {
  1032. if (!first_candidate)
  1033. first_candidate = tmp_neigh_node;
  1034. /* recv_if == NULL on the first node. */
  1035. if (tmp_neigh_node->if_incoming == recv_if)
  1036. continue;
  1037. if (!atomic_inc_not_zero(&tmp_neigh_node->refcount))
  1038. continue;
  1039. /* if we don't have a router yet
  1040. * or this one is better, choose it. */
  1041. if ((!router) ||
  1042. (tmp_neigh_node->tq_avg > router->tq_avg)) {
  1043. /* decrement refcount of
  1044. * previously selected router */
  1045. if (router)
  1046. neigh_node_free_ref(router);
  1047. router = tmp_neigh_node;
  1048. atomic_inc_not_zero(&router->refcount);
  1049. }
  1050. neigh_node_free_ref(tmp_neigh_node);
  1051. }
  1052. /* use the first candidate if nothing was found. */
  1053. if (!router && first_candidate &&
  1054. atomic_inc_not_zero(&first_candidate->refcount))
  1055. router = first_candidate;
  1056. }
  1057. return_router:
  1058. rcu_read_unlock();
  1059. return router;
  1060. }
  1061. static int check_unicast_packet(struct sk_buff *skb, int hdr_size)
  1062. {
  1063. struct ethhdr *ethhdr;
  1064. /* drop packet if it has not necessary minimum size */
  1065. if (unlikely(!pskb_may_pull(skb, hdr_size)))
  1066. return -1;
  1067. ethhdr = (struct ethhdr *)skb_mac_header(skb);
  1068. /* packet with unicast indication but broadcast recipient */
  1069. if (is_broadcast_ether_addr(ethhdr->h_dest))
  1070. return -1;
  1071. /* packet with broadcast sender address */
  1072. if (is_broadcast_ether_addr(ethhdr->h_source))
  1073. return -1;
  1074. /* not for me */
  1075. if (!is_my_mac(ethhdr->h_dest))
  1076. return -1;
  1077. return 0;
  1078. }
  1079. int route_unicast_packet(struct sk_buff *skb, struct batman_if *recv_if,
  1080. int hdr_size)
  1081. {
  1082. struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
  1083. struct orig_node *orig_node = NULL;
  1084. struct neigh_node *neigh_node = NULL;
  1085. struct batman_if *batman_if;
  1086. uint8_t dstaddr[ETH_ALEN];
  1087. struct unicast_packet *unicast_packet;
  1088. struct ethhdr *ethhdr = (struct ethhdr *)skb_mac_header(skb);
  1089. int ret = NET_RX_DROP;
  1090. struct sk_buff *new_skb;
  1091. unicast_packet = (struct unicast_packet *)skb->data;
  1092. /* TTL exceeded */
  1093. if (unicast_packet->ttl < 2) {
  1094. pr_debug("Warning - can't forward unicast packet from %pM to "
  1095. "%pM: ttl exceeded\n", ethhdr->h_source,
  1096. unicast_packet->dest);
  1097. goto out;
  1098. }
  1099. /* get routing information */
  1100. spin_lock_bh(&bat_priv->orig_hash_lock);
  1101. rcu_read_lock();
  1102. orig_node = ((struct orig_node *)
  1103. hash_find(bat_priv->orig_hash, compare_orig, choose_orig,
  1104. unicast_packet->dest));
  1105. if (!orig_node)
  1106. goto unlock;
  1107. kref_get(&orig_node->refcount);
  1108. rcu_read_unlock();
  1109. /* find_router() increases neigh_nodes refcount if found. */
  1110. neigh_node = find_router(bat_priv, orig_node, recv_if);
  1111. if (!neigh_node) {
  1112. spin_unlock_bh(&bat_priv->orig_hash_lock);
  1113. goto out;
  1114. }
  1115. /* don't lock while sending the packets ... we therefore
  1116. * copy the required data before sending */
  1117. batman_if = neigh_node->if_incoming;
  1118. memcpy(dstaddr, neigh_node->addr, ETH_ALEN);
  1119. spin_unlock_bh(&bat_priv->orig_hash_lock);
  1120. /* create a copy of the skb, if needed, to modify it. */
  1121. if (skb_cow(skb, sizeof(struct ethhdr)) < 0)
  1122. goto out;
  1123. unicast_packet = (struct unicast_packet *)skb->data;
  1124. if (unicast_packet->packet_type == BAT_UNICAST &&
  1125. atomic_read(&bat_priv->fragmentation) &&
  1126. skb->len > batman_if->net_dev->mtu)
  1127. return frag_send_skb(skb, bat_priv, batman_if,
  1128. dstaddr);
  1129. if (unicast_packet->packet_type == BAT_UNICAST_FRAG &&
  1130. frag_can_reassemble(skb, batman_if->net_dev->mtu)) {
  1131. ret = frag_reassemble_skb(skb, bat_priv, &new_skb);
  1132. if (ret == NET_RX_DROP)
  1133. goto out;
  1134. /* packet was buffered for late merge */
  1135. if (!new_skb) {
  1136. ret = NET_RX_SUCCESS;
  1137. goto out;
  1138. }
  1139. skb = new_skb;
  1140. unicast_packet = (struct unicast_packet *)skb->data;
  1141. }
  1142. /* decrement ttl */
  1143. unicast_packet->ttl--;
  1144. /* route it */
  1145. send_skb_packet(skb, batman_if, dstaddr);
  1146. ret = NET_RX_SUCCESS;
  1147. goto out;
  1148. unlock:
  1149. rcu_read_unlock();
  1150. spin_unlock_bh(&bat_priv->orig_hash_lock);
  1151. out:
  1152. if (neigh_node)
  1153. neigh_node_free_ref(neigh_node);
  1154. if (orig_node)
  1155. kref_put(&orig_node->refcount, orig_node_free_ref);
  1156. return ret;
  1157. }
  1158. int recv_unicast_packet(struct sk_buff *skb, struct batman_if *recv_if)
  1159. {
  1160. struct unicast_packet *unicast_packet;
  1161. int hdr_size = sizeof(struct unicast_packet);
  1162. if (check_unicast_packet(skb, hdr_size) < 0)
  1163. return NET_RX_DROP;
  1164. unicast_packet = (struct unicast_packet *)skb->data;
  1165. /* packet for me */
  1166. if (is_my_mac(unicast_packet->dest)) {
  1167. interface_rx(recv_if->soft_iface, skb, recv_if, hdr_size);
  1168. return NET_RX_SUCCESS;
  1169. }
  1170. return route_unicast_packet(skb, recv_if, hdr_size);
  1171. }
  1172. int recv_ucast_frag_packet(struct sk_buff *skb, struct batman_if *recv_if)
  1173. {
  1174. struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
  1175. struct unicast_frag_packet *unicast_packet;
  1176. int hdr_size = sizeof(struct unicast_frag_packet);
  1177. struct sk_buff *new_skb = NULL;
  1178. int ret;
  1179. if (check_unicast_packet(skb, hdr_size) < 0)
  1180. return NET_RX_DROP;
  1181. unicast_packet = (struct unicast_frag_packet *)skb->data;
  1182. /* packet for me */
  1183. if (is_my_mac(unicast_packet->dest)) {
  1184. ret = frag_reassemble_skb(skb, bat_priv, &new_skb);
  1185. if (ret == NET_RX_DROP)
  1186. return NET_RX_DROP;
  1187. /* packet was buffered for late merge */
  1188. if (!new_skb)
  1189. return NET_RX_SUCCESS;
  1190. interface_rx(recv_if->soft_iface, new_skb, recv_if,
  1191. sizeof(struct unicast_packet));
  1192. return NET_RX_SUCCESS;
  1193. }
  1194. return route_unicast_packet(skb, recv_if, hdr_size);
  1195. }
  1196. int recv_bcast_packet(struct sk_buff *skb, struct batman_if *recv_if)
  1197. {
  1198. struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
  1199. struct orig_node *orig_node = NULL;
  1200. struct bcast_packet *bcast_packet;
  1201. struct ethhdr *ethhdr;
  1202. int hdr_size = sizeof(struct bcast_packet);
  1203. int ret = NET_RX_DROP;
  1204. int32_t seq_diff;
  1205. /* drop packet if it has not necessary minimum size */
  1206. if (unlikely(!pskb_may_pull(skb, hdr_size)))
  1207. goto out;
  1208. ethhdr = (struct ethhdr *)skb_mac_header(skb);
  1209. /* packet with broadcast indication but unicast recipient */
  1210. if (!is_broadcast_ether_addr(ethhdr->h_dest))
  1211. goto out;
  1212. /* packet with broadcast sender address */
  1213. if (is_broadcast_ether_addr(ethhdr->h_source))
  1214. goto out;
  1215. /* ignore broadcasts sent by myself */
  1216. if (is_my_mac(ethhdr->h_source))
  1217. goto out;
  1218. bcast_packet = (struct bcast_packet *)skb->data;
  1219. /* ignore broadcasts originated by myself */
  1220. if (is_my_mac(bcast_packet->orig))
  1221. goto out;
  1222. if (bcast_packet->ttl < 2)
  1223. goto out;
  1224. spin_lock_bh(&bat_priv->orig_hash_lock);
  1225. rcu_read_lock();
  1226. orig_node = ((struct orig_node *)
  1227. hash_find(bat_priv->orig_hash, compare_orig, choose_orig,
  1228. bcast_packet->orig));
  1229. if (!orig_node)
  1230. goto rcu_unlock;
  1231. kref_get(&orig_node->refcount);
  1232. rcu_read_unlock();
  1233. spin_lock_bh(&orig_node->bcast_seqno_lock);
  1234. /* check whether the packet is a duplicate */
  1235. if (get_bit_status(orig_node->bcast_bits, orig_node->last_bcast_seqno,
  1236. ntohl(bcast_packet->seqno)))
  1237. goto spin_unlock;
  1238. seq_diff = ntohl(bcast_packet->seqno) - orig_node->last_bcast_seqno;
  1239. /* check whether the packet is old and the host just restarted. */
  1240. if (window_protected(bat_priv, seq_diff,
  1241. &orig_node->bcast_seqno_reset))
  1242. goto spin_unlock;
  1243. /* mark broadcast in flood history, update window position
  1244. * if required. */
  1245. if (bit_get_packet(bat_priv, orig_node->bcast_bits, seq_diff, 1))
  1246. orig_node->last_bcast_seqno = ntohl(bcast_packet->seqno);
  1247. spin_unlock_bh(&orig_node->bcast_seqno_lock);
  1248. spin_unlock_bh(&bat_priv->orig_hash_lock);
  1249. /* rebroadcast packet */
  1250. add_bcast_packet_to_list(bat_priv, skb);
  1251. /* broadcast for me */
  1252. interface_rx(recv_if->soft_iface, skb, recv_if, hdr_size);
  1253. ret = NET_RX_SUCCESS;
  1254. goto out;
  1255. rcu_unlock:
  1256. rcu_read_unlock();
  1257. spin_unlock_bh(&bat_priv->orig_hash_lock);
  1258. goto out;
  1259. spin_unlock:
  1260. spin_unlock_bh(&orig_node->bcast_seqno_lock);
  1261. spin_unlock_bh(&bat_priv->orig_hash_lock);
  1262. out:
  1263. if (orig_node)
  1264. kref_put(&orig_node->refcount, orig_node_free_ref);
  1265. return ret;
  1266. }
  1267. int recv_vis_packet(struct sk_buff *skb, struct batman_if *recv_if)
  1268. {
  1269. struct vis_packet *vis_packet;
  1270. struct ethhdr *ethhdr;
  1271. struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
  1272. int hdr_size = sizeof(struct vis_packet);
  1273. /* keep skb linear */
  1274. if (skb_linearize(skb) < 0)
  1275. return NET_RX_DROP;
  1276. if (unlikely(!pskb_may_pull(skb, hdr_size)))
  1277. return NET_RX_DROP;
  1278. vis_packet = (struct vis_packet *)skb->data;
  1279. ethhdr = (struct ethhdr *)skb_mac_header(skb);
  1280. /* not for me */
  1281. if (!is_my_mac(ethhdr->h_dest))
  1282. return NET_RX_DROP;
  1283. /* ignore own packets */
  1284. if (is_my_mac(vis_packet->vis_orig))
  1285. return NET_RX_DROP;
  1286. if (is_my_mac(vis_packet->sender_orig))
  1287. return NET_RX_DROP;
  1288. switch (vis_packet->vis_type) {
  1289. case VIS_TYPE_SERVER_SYNC:
  1290. receive_server_sync_packet(bat_priv, vis_packet,
  1291. skb_headlen(skb));
  1292. break;
  1293. case VIS_TYPE_CLIENT_UPDATE:
  1294. receive_client_update_packet(bat_priv, vis_packet,
  1295. skb_headlen(skb));
  1296. break;
  1297. default: /* ignore unknown packet */
  1298. break;
  1299. }
  1300. /* We take a copy of the data in the packet, so we should
  1301. always free the skbuf. */
  1302. return NET_RX_DROP;
  1303. }