routing.c 38 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397
  1. /*
  2. * Copyright (C) 2007-2010 B.A.T.M.A.N. contributors:
  3. *
  4. * Marek Lindner, Simon Wunderlich
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of version 2 of the GNU General Public
  8. * License as published by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope that it will be useful, but
  11. * WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  13. * General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program; if not, write to the Free Software
  17. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
  18. * 02110-1301, USA
  19. *
  20. */
  21. #include "main.h"
  22. #include "routing.h"
  23. #include "send.h"
  24. #include "hash.h"
  25. #include "soft-interface.h"
  26. #include "hard-interface.h"
  27. #include "icmp_socket.h"
  28. #include "translation-table.h"
  29. #include "originator.h"
  30. #include "types.h"
  31. #include "ring_buffer.h"
  32. #include "vis.h"
  33. #include "aggregation.h"
  34. #include "gateway_common.h"
  35. #include "gateway_client.h"
  36. #include "unicast.h"
  37. void slide_own_bcast_window(struct batman_if *batman_if)
  38. {
  39. struct bat_priv *bat_priv = netdev_priv(batman_if->soft_iface);
  40. struct hashtable_t *hash = bat_priv->orig_hash;
  41. struct hlist_node *walk;
  42. struct hlist_head *head;
  43. struct element_t *bucket;
  44. struct orig_node *orig_node;
  45. unsigned long *word;
  46. int i;
  47. size_t word_index;
  48. spin_lock_bh(&bat_priv->orig_hash_lock);
  49. for (i = 0; i < hash->size; i++) {
  50. head = &hash->table[i];
  51. hlist_for_each_entry(bucket, walk, head, hlist) {
  52. orig_node = bucket->data;
  53. word_index = batman_if->if_num * NUM_WORDS;
  54. word = &(orig_node->bcast_own[word_index]);
  55. bit_get_packet(bat_priv, word, 1, 0);
  56. orig_node->bcast_own_sum[batman_if->if_num] =
  57. bit_packet_count(word);
  58. }
  59. }
  60. spin_unlock_bh(&bat_priv->orig_hash_lock);
  61. }
  62. static void update_HNA(struct bat_priv *bat_priv, struct orig_node *orig_node,
  63. unsigned char *hna_buff, int hna_buff_len)
  64. {
  65. if ((hna_buff_len != orig_node->hna_buff_len) ||
  66. ((hna_buff_len > 0) &&
  67. (orig_node->hna_buff_len > 0) &&
  68. (memcmp(orig_node->hna_buff, hna_buff, hna_buff_len) != 0))) {
  69. if (orig_node->hna_buff_len > 0)
  70. hna_global_del_orig(bat_priv, orig_node,
  71. "originator changed hna");
  72. if ((hna_buff_len > 0) && (hna_buff))
  73. hna_global_add_orig(bat_priv, orig_node,
  74. hna_buff, hna_buff_len);
  75. }
  76. }
  77. static void update_route(struct bat_priv *bat_priv,
  78. struct orig_node *orig_node,
  79. struct neigh_node *neigh_node,
  80. unsigned char *hna_buff, int hna_buff_len)
  81. {
  82. /* route deleted */
  83. if ((orig_node->router) && (!neigh_node)) {
  84. bat_dbg(DBG_ROUTES, bat_priv, "Deleting route towards: %pM\n",
  85. orig_node->orig);
  86. hna_global_del_orig(bat_priv, orig_node,
  87. "originator timed out");
  88. /* route added */
  89. } else if ((!orig_node->router) && (neigh_node)) {
  90. bat_dbg(DBG_ROUTES, bat_priv,
  91. "Adding route towards: %pM (via %pM)\n",
  92. orig_node->orig, neigh_node->addr);
  93. hna_global_add_orig(bat_priv, orig_node,
  94. hna_buff, hna_buff_len);
  95. /* route changed */
  96. } else {
  97. bat_dbg(DBG_ROUTES, bat_priv,
  98. "Changing route towards: %pM "
  99. "(now via %pM - was via %pM)\n",
  100. orig_node->orig, neigh_node->addr,
  101. orig_node->router->addr);
  102. }
  103. orig_node->router = neigh_node;
  104. }
  105. void update_routes(struct bat_priv *bat_priv, struct orig_node *orig_node,
  106. struct neigh_node *neigh_node, unsigned char *hna_buff,
  107. int hna_buff_len)
  108. {
  109. if (!orig_node)
  110. return;
  111. if (orig_node->router != neigh_node)
  112. update_route(bat_priv, orig_node, neigh_node,
  113. hna_buff, hna_buff_len);
  114. /* may be just HNA changed */
  115. else
  116. update_HNA(bat_priv, orig_node, hna_buff, hna_buff_len);
  117. }
  118. static int is_bidirectional_neigh(struct orig_node *orig_node,
  119. struct orig_node *orig_neigh_node,
  120. struct batman_packet *batman_packet,
  121. struct batman_if *if_incoming)
  122. {
  123. struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
  124. struct neigh_node *neigh_node = NULL, *tmp_neigh_node = NULL;
  125. unsigned char total_count;
  126. if (orig_node == orig_neigh_node) {
  127. list_for_each_entry(tmp_neigh_node,
  128. &orig_node->neigh_list,
  129. list) {
  130. if (compare_orig(tmp_neigh_node->addr,
  131. orig_neigh_node->orig) &&
  132. (tmp_neigh_node->if_incoming == if_incoming))
  133. neigh_node = tmp_neigh_node;
  134. }
  135. if (!neigh_node)
  136. neigh_node = create_neighbor(orig_node,
  137. orig_neigh_node,
  138. orig_neigh_node->orig,
  139. if_incoming);
  140. /* create_neighbor failed, return 0 */
  141. if (!neigh_node)
  142. return 0;
  143. neigh_node->last_valid = jiffies;
  144. } else {
  145. /* find packet count of corresponding one hop neighbor */
  146. list_for_each_entry(tmp_neigh_node,
  147. &orig_neigh_node->neigh_list, list) {
  148. if (compare_orig(tmp_neigh_node->addr,
  149. orig_neigh_node->orig) &&
  150. (tmp_neigh_node->if_incoming == if_incoming))
  151. neigh_node = tmp_neigh_node;
  152. }
  153. if (!neigh_node)
  154. neigh_node = create_neighbor(orig_neigh_node,
  155. orig_neigh_node,
  156. orig_neigh_node->orig,
  157. if_incoming);
  158. /* create_neighbor failed, return 0 */
  159. if (!neigh_node)
  160. return 0;
  161. }
  162. orig_node->last_valid = jiffies;
  163. /* pay attention to not get a value bigger than 100 % */
  164. total_count = (orig_neigh_node->bcast_own_sum[if_incoming->if_num] >
  165. neigh_node->real_packet_count ?
  166. neigh_node->real_packet_count :
  167. orig_neigh_node->bcast_own_sum[if_incoming->if_num]);
  168. /* if we have too few packets (too less data) we set tq_own to zero */
  169. /* if we receive too few packets it is not considered bidirectional */
  170. if ((total_count < TQ_LOCAL_BIDRECT_SEND_MINIMUM) ||
  171. (neigh_node->real_packet_count < TQ_LOCAL_BIDRECT_RECV_MINIMUM))
  172. orig_neigh_node->tq_own = 0;
  173. else
  174. /* neigh_node->real_packet_count is never zero as we
  175. * only purge old information when getting new
  176. * information */
  177. orig_neigh_node->tq_own = (TQ_MAX_VALUE * total_count) /
  178. neigh_node->real_packet_count;
  179. /*
  180. * 1 - ((1-x) ** 3), normalized to TQ_MAX_VALUE this does
  181. * affect the nearly-symmetric links only a little, but
  182. * punishes asymmetric links more. This will give a value
  183. * between 0 and TQ_MAX_VALUE
  184. */
  185. orig_neigh_node->tq_asym_penalty =
  186. TQ_MAX_VALUE -
  187. (TQ_MAX_VALUE *
  188. (TQ_LOCAL_WINDOW_SIZE - neigh_node->real_packet_count) *
  189. (TQ_LOCAL_WINDOW_SIZE - neigh_node->real_packet_count) *
  190. (TQ_LOCAL_WINDOW_SIZE - neigh_node->real_packet_count)) /
  191. (TQ_LOCAL_WINDOW_SIZE *
  192. TQ_LOCAL_WINDOW_SIZE *
  193. TQ_LOCAL_WINDOW_SIZE);
  194. batman_packet->tq = ((batman_packet->tq *
  195. orig_neigh_node->tq_own *
  196. orig_neigh_node->tq_asym_penalty) /
  197. (TQ_MAX_VALUE * TQ_MAX_VALUE));
  198. bat_dbg(DBG_BATMAN, bat_priv,
  199. "bidirectional: "
  200. "orig = %-15pM neigh = %-15pM => own_bcast = %2i, "
  201. "real recv = %2i, local tq: %3i, asym_penalty: %3i, "
  202. "total tq: %3i\n",
  203. orig_node->orig, orig_neigh_node->orig, total_count,
  204. neigh_node->real_packet_count, orig_neigh_node->tq_own,
  205. orig_neigh_node->tq_asym_penalty, batman_packet->tq);
  206. /* if link has the minimum required transmission quality
  207. * consider it bidirectional */
  208. if (batman_packet->tq >= TQ_TOTAL_BIDRECT_LIMIT)
  209. return 1;
  210. return 0;
  211. }
  212. static void update_orig(struct bat_priv *bat_priv,
  213. struct orig_node *orig_node,
  214. struct ethhdr *ethhdr,
  215. struct batman_packet *batman_packet,
  216. struct batman_if *if_incoming,
  217. unsigned char *hna_buff, int hna_buff_len,
  218. char is_duplicate)
  219. {
  220. struct neigh_node *neigh_node = NULL, *tmp_neigh_node = NULL;
  221. int tmp_hna_buff_len;
  222. bat_dbg(DBG_BATMAN, bat_priv, "update_originator(): "
  223. "Searching and updating originator entry of received packet\n");
  224. list_for_each_entry(tmp_neigh_node, &orig_node->neigh_list, list) {
  225. if (compare_orig(tmp_neigh_node->addr, ethhdr->h_source) &&
  226. (tmp_neigh_node->if_incoming == if_incoming)) {
  227. neigh_node = tmp_neigh_node;
  228. continue;
  229. }
  230. if (is_duplicate)
  231. continue;
  232. ring_buffer_set(tmp_neigh_node->tq_recv,
  233. &tmp_neigh_node->tq_index, 0);
  234. tmp_neigh_node->tq_avg =
  235. ring_buffer_avg(tmp_neigh_node->tq_recv);
  236. }
  237. if (!neigh_node) {
  238. struct orig_node *orig_tmp;
  239. orig_tmp = get_orig_node(bat_priv, ethhdr->h_source);
  240. if (!orig_tmp)
  241. return;
  242. neigh_node = create_neighbor(orig_node, orig_tmp,
  243. ethhdr->h_source, if_incoming);
  244. if (!neigh_node)
  245. return;
  246. } else
  247. bat_dbg(DBG_BATMAN, bat_priv,
  248. "Updating existing last-hop neighbor of originator\n");
  249. orig_node->flags = batman_packet->flags;
  250. neigh_node->last_valid = jiffies;
  251. ring_buffer_set(neigh_node->tq_recv,
  252. &neigh_node->tq_index,
  253. batman_packet->tq);
  254. neigh_node->tq_avg = ring_buffer_avg(neigh_node->tq_recv);
  255. if (!is_duplicate) {
  256. orig_node->last_ttl = batman_packet->ttl;
  257. neigh_node->last_ttl = batman_packet->ttl;
  258. }
  259. tmp_hna_buff_len = (hna_buff_len > batman_packet->num_hna * ETH_ALEN ?
  260. batman_packet->num_hna * ETH_ALEN : hna_buff_len);
  261. /* if this neighbor already is our next hop there is nothing
  262. * to change */
  263. if (orig_node->router == neigh_node)
  264. goto update_hna;
  265. /* if this neighbor does not offer a better TQ we won't consider it */
  266. if ((orig_node->router) &&
  267. (orig_node->router->tq_avg > neigh_node->tq_avg))
  268. goto update_hna;
  269. /* if the TQ is the same and the link not more symetric we
  270. * won't consider it either */
  271. if ((orig_node->router) &&
  272. ((neigh_node->tq_avg == orig_node->router->tq_avg) &&
  273. (orig_node->router->orig_node->bcast_own_sum[if_incoming->if_num]
  274. >= neigh_node->orig_node->bcast_own_sum[if_incoming->if_num])))
  275. goto update_hna;
  276. update_routes(bat_priv, orig_node, neigh_node,
  277. hna_buff, tmp_hna_buff_len);
  278. goto update_gw;
  279. update_hna:
  280. update_routes(bat_priv, orig_node, orig_node->router,
  281. hna_buff, tmp_hna_buff_len);
  282. update_gw:
  283. if (orig_node->gw_flags != batman_packet->gw_flags)
  284. gw_node_update(bat_priv, orig_node, batman_packet->gw_flags);
  285. orig_node->gw_flags = batman_packet->gw_flags;
  286. /* restart gateway selection if fast or late switching was enabled */
  287. if ((orig_node->gw_flags) &&
  288. (atomic_read(&bat_priv->gw_mode) == GW_MODE_CLIENT) &&
  289. (atomic_read(&bat_priv->gw_sel_class) > 2))
  290. gw_check_election(bat_priv, orig_node);
  291. }
  292. /* checks whether the host restarted and is in the protection time.
  293. * returns:
  294. * 0 if the packet is to be accepted
  295. * 1 if the packet is to be ignored.
  296. */
  297. static int window_protected(struct bat_priv *bat_priv,
  298. int32_t seq_num_diff,
  299. unsigned long *last_reset)
  300. {
  301. if ((seq_num_diff <= -TQ_LOCAL_WINDOW_SIZE)
  302. || (seq_num_diff >= EXPECTED_SEQNO_RANGE)) {
  303. if (time_after(jiffies, *last_reset +
  304. msecs_to_jiffies(RESET_PROTECTION_MS))) {
  305. *last_reset = jiffies;
  306. bat_dbg(DBG_BATMAN, bat_priv,
  307. "old packet received, start protection\n");
  308. return 0;
  309. } else
  310. return 1;
  311. }
  312. return 0;
  313. }
  314. /* processes a batman packet for all interfaces, adjusts the sequence number and
  315. * finds out whether it is a duplicate.
  316. * returns:
  317. * 1 the packet is a duplicate
  318. * 0 the packet has not yet been received
  319. * -1 the packet is old and has been received while the seqno window
  320. * was protected. Caller should drop it.
  321. */
  322. static char count_real_packets(struct ethhdr *ethhdr,
  323. struct batman_packet *batman_packet,
  324. struct batman_if *if_incoming)
  325. {
  326. struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
  327. struct orig_node *orig_node;
  328. struct neigh_node *tmp_neigh_node;
  329. char is_duplicate = 0;
  330. int32_t seq_diff;
  331. int need_update = 0;
  332. int set_mark;
  333. orig_node = get_orig_node(bat_priv, batman_packet->orig);
  334. if (!orig_node)
  335. return 0;
  336. seq_diff = batman_packet->seqno - orig_node->last_real_seqno;
  337. /* signalize caller that the packet is to be dropped. */
  338. if (window_protected(bat_priv, seq_diff,
  339. &orig_node->batman_seqno_reset))
  340. return -1;
  341. list_for_each_entry(tmp_neigh_node, &orig_node->neigh_list, list) {
  342. is_duplicate |= get_bit_status(tmp_neigh_node->real_bits,
  343. orig_node->last_real_seqno,
  344. batman_packet->seqno);
  345. if (compare_orig(tmp_neigh_node->addr, ethhdr->h_source) &&
  346. (tmp_neigh_node->if_incoming == if_incoming))
  347. set_mark = 1;
  348. else
  349. set_mark = 0;
  350. /* if the window moved, set the update flag. */
  351. need_update |= bit_get_packet(bat_priv,
  352. tmp_neigh_node->real_bits,
  353. seq_diff, set_mark);
  354. tmp_neigh_node->real_packet_count =
  355. bit_packet_count(tmp_neigh_node->real_bits);
  356. }
  357. if (need_update) {
  358. bat_dbg(DBG_BATMAN, bat_priv,
  359. "updating last_seqno: old %d, new %d\n",
  360. orig_node->last_real_seqno, batman_packet->seqno);
  361. orig_node->last_real_seqno = batman_packet->seqno;
  362. }
  363. return is_duplicate;
  364. }
  365. /* copy primary address for bonding */
  366. static void mark_bonding_address(struct bat_priv *bat_priv,
  367. struct orig_node *orig_node,
  368. struct orig_node *orig_neigh_node,
  369. struct batman_packet *batman_packet)
  370. {
  371. if (batman_packet->flags & PRIMARIES_FIRST_HOP)
  372. memcpy(orig_neigh_node->primary_addr,
  373. orig_node->orig, ETH_ALEN);
  374. return;
  375. }
  376. /* mark possible bond.candidates in the neighbor list */
  377. void update_bonding_candidates(struct bat_priv *bat_priv,
  378. struct orig_node *orig_node)
  379. {
  380. int candidates;
  381. int interference_candidate;
  382. int best_tq;
  383. struct neigh_node *tmp_neigh_node, *tmp_neigh_node2;
  384. struct neigh_node *first_candidate, *last_candidate;
  385. /* update the candidates for this originator */
  386. if (!orig_node->router) {
  387. orig_node->bond.candidates = 0;
  388. return;
  389. }
  390. best_tq = orig_node->router->tq_avg;
  391. /* update bond.candidates */
  392. candidates = 0;
  393. /* mark other nodes which also received "PRIMARIES FIRST HOP" packets
  394. * as "bonding partner" */
  395. /* first, zero the list */
  396. list_for_each_entry(tmp_neigh_node, &orig_node->neigh_list, list) {
  397. tmp_neigh_node->next_bond_candidate = NULL;
  398. }
  399. first_candidate = NULL;
  400. last_candidate = NULL;
  401. list_for_each_entry(tmp_neigh_node, &orig_node->neigh_list, list) {
  402. /* only consider if it has the same primary address ... */
  403. if (memcmp(orig_node->orig,
  404. tmp_neigh_node->orig_node->primary_addr,
  405. ETH_ALEN) != 0)
  406. continue;
  407. /* ... and is good enough to be considered */
  408. if (tmp_neigh_node->tq_avg < best_tq - BONDING_TQ_THRESHOLD)
  409. continue;
  410. /* check if we have another candidate with the same
  411. * mac address or interface. If we do, we won't
  412. * select this candidate because of possible interference. */
  413. interference_candidate = 0;
  414. list_for_each_entry(tmp_neigh_node2,
  415. &orig_node->neigh_list, list) {
  416. if (tmp_neigh_node2 == tmp_neigh_node)
  417. continue;
  418. /* we only care if the other candidate is even
  419. * considered as candidate. */
  420. if (!tmp_neigh_node2->next_bond_candidate)
  421. continue;
  422. if ((tmp_neigh_node->if_incoming ==
  423. tmp_neigh_node2->if_incoming)
  424. || (memcmp(tmp_neigh_node->addr,
  425. tmp_neigh_node2->addr, ETH_ALEN) == 0)) {
  426. interference_candidate = 1;
  427. break;
  428. }
  429. }
  430. /* don't care further if it is an interference candidate */
  431. if (interference_candidate)
  432. continue;
  433. if (!first_candidate) {
  434. first_candidate = tmp_neigh_node;
  435. tmp_neigh_node->next_bond_candidate = first_candidate;
  436. } else
  437. tmp_neigh_node->next_bond_candidate = last_candidate;
  438. last_candidate = tmp_neigh_node;
  439. candidates++;
  440. }
  441. if (candidates > 0) {
  442. first_candidate->next_bond_candidate = last_candidate;
  443. orig_node->bond.selected = first_candidate;
  444. }
  445. orig_node->bond.candidates = candidates;
  446. }
  447. void receive_bat_packet(struct ethhdr *ethhdr,
  448. struct batman_packet *batman_packet,
  449. unsigned char *hna_buff, int hna_buff_len,
  450. struct batman_if *if_incoming)
  451. {
  452. struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
  453. struct batman_if *batman_if;
  454. struct orig_node *orig_neigh_node, *orig_node;
  455. char has_directlink_flag;
  456. char is_my_addr = 0, is_my_orig = 0, is_my_oldorig = 0;
  457. char is_broadcast = 0, is_bidirectional, is_single_hop_neigh;
  458. char is_duplicate;
  459. uint32_t if_incoming_seqno;
  460. /* Silently drop when the batman packet is actually not a
  461. * correct packet.
  462. *
  463. * This might happen if a packet is padded (e.g. Ethernet has a
  464. * minimum frame length of 64 byte) and the aggregation interprets
  465. * it as an additional length.
  466. *
  467. * TODO: A more sane solution would be to have a bit in the
  468. * batman_packet to detect whether the packet is the last
  469. * packet in an aggregation. Here we expect that the padding
  470. * is always zero (or not 0x01)
  471. */
  472. if (batman_packet->packet_type != BAT_PACKET)
  473. return;
  474. /* could be changed by schedule_own_packet() */
  475. if_incoming_seqno = atomic_read(&if_incoming->seqno);
  476. has_directlink_flag = (batman_packet->flags & DIRECTLINK ? 1 : 0);
  477. is_single_hop_neigh = (compare_orig(ethhdr->h_source,
  478. batman_packet->orig) ? 1 : 0);
  479. bat_dbg(DBG_BATMAN, bat_priv,
  480. "Received BATMAN packet via NB: %pM, IF: %s [%pM] "
  481. "(from OG: %pM, via prev OG: %pM, seqno %d, tq %d, "
  482. "TTL %d, V %d, IDF %d)\n",
  483. ethhdr->h_source, if_incoming->net_dev->name,
  484. if_incoming->net_dev->dev_addr, batman_packet->orig,
  485. batman_packet->prev_sender, batman_packet->seqno,
  486. batman_packet->tq, batman_packet->ttl, batman_packet->version,
  487. has_directlink_flag);
  488. rcu_read_lock();
  489. list_for_each_entry_rcu(batman_if, &if_list, list) {
  490. if (batman_if->if_status != IF_ACTIVE)
  491. continue;
  492. if (batman_if->soft_iface != if_incoming->soft_iface)
  493. continue;
  494. if (compare_orig(ethhdr->h_source,
  495. batman_if->net_dev->dev_addr))
  496. is_my_addr = 1;
  497. if (compare_orig(batman_packet->orig,
  498. batman_if->net_dev->dev_addr))
  499. is_my_orig = 1;
  500. if (compare_orig(batman_packet->prev_sender,
  501. batman_if->net_dev->dev_addr))
  502. is_my_oldorig = 1;
  503. if (compare_orig(ethhdr->h_source, broadcast_addr))
  504. is_broadcast = 1;
  505. }
  506. rcu_read_unlock();
  507. if (batman_packet->version != COMPAT_VERSION) {
  508. bat_dbg(DBG_BATMAN, bat_priv,
  509. "Drop packet: incompatible batman version (%i)\n",
  510. batman_packet->version);
  511. return;
  512. }
  513. if (is_my_addr) {
  514. bat_dbg(DBG_BATMAN, bat_priv,
  515. "Drop packet: received my own broadcast (sender: %pM"
  516. ")\n",
  517. ethhdr->h_source);
  518. return;
  519. }
  520. if (is_broadcast) {
  521. bat_dbg(DBG_BATMAN, bat_priv, "Drop packet: "
  522. "ignoring all packets with broadcast source addr (sender: %pM"
  523. ")\n", ethhdr->h_source);
  524. return;
  525. }
  526. if (is_my_orig) {
  527. unsigned long *word;
  528. int offset;
  529. orig_neigh_node = get_orig_node(bat_priv, ethhdr->h_source);
  530. if (!orig_neigh_node)
  531. return;
  532. /* neighbor has to indicate direct link and it has to
  533. * come via the corresponding interface */
  534. /* if received seqno equals last send seqno save new
  535. * seqno for bidirectional check */
  536. if (has_directlink_flag &&
  537. compare_orig(if_incoming->net_dev->dev_addr,
  538. batman_packet->orig) &&
  539. (batman_packet->seqno - if_incoming_seqno + 2 == 0)) {
  540. offset = if_incoming->if_num * NUM_WORDS;
  541. word = &(orig_neigh_node->bcast_own[offset]);
  542. bit_mark(word, 0);
  543. orig_neigh_node->bcast_own_sum[if_incoming->if_num] =
  544. bit_packet_count(word);
  545. }
  546. bat_dbg(DBG_BATMAN, bat_priv, "Drop packet: "
  547. "originator packet from myself (via neighbor)\n");
  548. return;
  549. }
  550. if (is_my_oldorig) {
  551. bat_dbg(DBG_BATMAN, bat_priv,
  552. "Drop packet: ignoring all rebroadcast echos (sender: "
  553. "%pM)\n", ethhdr->h_source);
  554. return;
  555. }
  556. orig_node = get_orig_node(bat_priv, batman_packet->orig);
  557. if (!orig_node)
  558. return;
  559. is_duplicate = count_real_packets(ethhdr, batman_packet, if_incoming);
  560. if (is_duplicate == -1) {
  561. bat_dbg(DBG_BATMAN, bat_priv,
  562. "Drop packet: packet within seqno protection time "
  563. "(sender: %pM)\n", ethhdr->h_source);
  564. return;
  565. }
  566. if (batman_packet->tq == 0) {
  567. bat_dbg(DBG_BATMAN, bat_priv,
  568. "Drop packet: originator packet with tq equal 0\n");
  569. return;
  570. }
  571. /* avoid temporary routing loops */
  572. if ((orig_node->router) &&
  573. (orig_node->router->orig_node->router) &&
  574. (compare_orig(orig_node->router->addr,
  575. batman_packet->prev_sender)) &&
  576. !(compare_orig(batman_packet->orig, batman_packet->prev_sender)) &&
  577. (compare_orig(orig_node->router->addr,
  578. orig_node->router->orig_node->router->addr))) {
  579. bat_dbg(DBG_BATMAN, bat_priv,
  580. "Drop packet: ignoring all rebroadcast packets that "
  581. "may make me loop (sender: %pM)\n", ethhdr->h_source);
  582. return;
  583. }
  584. /* if sender is a direct neighbor the sender mac equals
  585. * originator mac */
  586. orig_neigh_node = (is_single_hop_neigh ?
  587. orig_node :
  588. get_orig_node(bat_priv, ethhdr->h_source));
  589. if (!orig_neigh_node)
  590. return;
  591. /* drop packet if sender is not a direct neighbor and if we
  592. * don't route towards it */
  593. if (!is_single_hop_neigh && (!orig_neigh_node->router)) {
  594. bat_dbg(DBG_BATMAN, bat_priv,
  595. "Drop packet: OGM via unknown neighbor!\n");
  596. return;
  597. }
  598. is_bidirectional = is_bidirectional_neigh(orig_node, orig_neigh_node,
  599. batman_packet, if_incoming);
  600. /* update ranking if it is not a duplicate or has the same
  601. * seqno and similar ttl as the non-duplicate */
  602. if (is_bidirectional &&
  603. (!is_duplicate ||
  604. ((orig_node->last_real_seqno == batman_packet->seqno) &&
  605. (orig_node->last_ttl - 3 <= batman_packet->ttl))))
  606. update_orig(bat_priv, orig_node, ethhdr, batman_packet,
  607. if_incoming, hna_buff, hna_buff_len, is_duplicate);
  608. mark_bonding_address(bat_priv, orig_node,
  609. orig_neigh_node, batman_packet);
  610. update_bonding_candidates(bat_priv, orig_node);
  611. /* is single hop (direct) neighbor */
  612. if (is_single_hop_neigh) {
  613. /* mark direct link on incoming interface */
  614. schedule_forward_packet(orig_node, ethhdr, batman_packet,
  615. 1, hna_buff_len, if_incoming);
  616. bat_dbg(DBG_BATMAN, bat_priv, "Forwarding packet: "
  617. "rebroadcast neighbor packet with direct link flag\n");
  618. return;
  619. }
  620. /* multihop originator */
  621. if (!is_bidirectional) {
  622. bat_dbg(DBG_BATMAN, bat_priv,
  623. "Drop packet: not received via bidirectional link\n");
  624. return;
  625. }
  626. if (is_duplicate) {
  627. bat_dbg(DBG_BATMAN, bat_priv,
  628. "Drop packet: duplicate packet received\n");
  629. return;
  630. }
  631. bat_dbg(DBG_BATMAN, bat_priv,
  632. "Forwarding packet: rebroadcast originator packet\n");
  633. schedule_forward_packet(orig_node, ethhdr, batman_packet,
  634. 0, hna_buff_len, if_incoming);
  635. }
  636. int recv_bat_packet(struct sk_buff *skb, struct batman_if *batman_if)
  637. {
  638. struct bat_priv *bat_priv = netdev_priv(batman_if->soft_iface);
  639. struct ethhdr *ethhdr;
  640. /* drop packet if it has not necessary minimum size */
  641. if (unlikely(!pskb_may_pull(skb, sizeof(struct batman_packet))))
  642. return NET_RX_DROP;
  643. ethhdr = (struct ethhdr *)skb_mac_header(skb);
  644. /* packet with broadcast indication but unicast recipient */
  645. if (!is_broadcast_ether_addr(ethhdr->h_dest))
  646. return NET_RX_DROP;
  647. /* packet with broadcast sender address */
  648. if (is_broadcast_ether_addr(ethhdr->h_source))
  649. return NET_RX_DROP;
  650. /* create a copy of the skb, if needed, to modify it. */
  651. if (skb_cow(skb, 0) < 0)
  652. return NET_RX_DROP;
  653. /* keep skb linear */
  654. if (skb_linearize(skb) < 0)
  655. return NET_RX_DROP;
  656. ethhdr = (struct ethhdr *)skb_mac_header(skb);
  657. spin_lock_bh(&bat_priv->orig_hash_lock);
  658. receive_aggr_bat_packet(ethhdr,
  659. skb->data,
  660. skb_headlen(skb),
  661. batman_if);
  662. spin_unlock_bh(&bat_priv->orig_hash_lock);
  663. kfree_skb(skb);
  664. return NET_RX_SUCCESS;
  665. }
  666. static int recv_my_icmp_packet(struct bat_priv *bat_priv,
  667. struct sk_buff *skb, size_t icmp_len)
  668. {
  669. struct orig_node *orig_node;
  670. struct icmp_packet_rr *icmp_packet;
  671. struct ethhdr *ethhdr;
  672. struct batman_if *batman_if;
  673. int ret;
  674. uint8_t dstaddr[ETH_ALEN];
  675. icmp_packet = (struct icmp_packet_rr *)skb->data;
  676. ethhdr = (struct ethhdr *)skb_mac_header(skb);
  677. /* add data to device queue */
  678. if (icmp_packet->msg_type != ECHO_REQUEST) {
  679. bat_socket_receive_packet(icmp_packet, icmp_len);
  680. return NET_RX_DROP;
  681. }
  682. if (!bat_priv->primary_if)
  683. return NET_RX_DROP;
  684. /* answer echo request (ping) */
  685. /* get routing information */
  686. spin_lock_bh(&bat_priv->orig_hash_lock);
  687. orig_node = ((struct orig_node *)hash_find(bat_priv->orig_hash,
  688. compare_orig, choose_orig,
  689. icmp_packet->orig));
  690. ret = NET_RX_DROP;
  691. if ((orig_node) && (orig_node->router)) {
  692. /* don't lock while sending the packets ... we therefore
  693. * copy the required data before sending */
  694. batman_if = orig_node->router->if_incoming;
  695. memcpy(dstaddr, orig_node->router->addr, ETH_ALEN);
  696. spin_unlock_bh(&bat_priv->orig_hash_lock);
  697. /* create a copy of the skb, if needed, to modify it. */
  698. if (skb_cow(skb, sizeof(struct ethhdr)) < 0)
  699. return NET_RX_DROP;
  700. icmp_packet = (struct icmp_packet_rr *)skb->data;
  701. ethhdr = (struct ethhdr *)skb_mac_header(skb);
  702. memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN);
  703. memcpy(icmp_packet->orig,
  704. bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN);
  705. icmp_packet->msg_type = ECHO_REPLY;
  706. icmp_packet->ttl = TTL;
  707. send_skb_packet(skb, batman_if, dstaddr);
  708. ret = NET_RX_SUCCESS;
  709. } else
  710. spin_unlock_bh(&bat_priv->orig_hash_lock);
  711. return ret;
  712. }
  713. static int recv_icmp_ttl_exceeded(struct bat_priv *bat_priv,
  714. struct sk_buff *skb, size_t icmp_len)
  715. {
  716. struct orig_node *orig_node;
  717. struct icmp_packet *icmp_packet;
  718. struct ethhdr *ethhdr;
  719. struct batman_if *batman_if;
  720. int ret;
  721. uint8_t dstaddr[ETH_ALEN];
  722. icmp_packet = (struct icmp_packet *)skb->data;
  723. ethhdr = (struct ethhdr *)skb_mac_header(skb);
  724. /* send TTL exceeded if packet is an echo request (traceroute) */
  725. if (icmp_packet->msg_type != ECHO_REQUEST) {
  726. pr_debug("Warning - can't forward icmp packet from %pM to "
  727. "%pM: ttl exceeded\n", icmp_packet->orig,
  728. icmp_packet->dst);
  729. return NET_RX_DROP;
  730. }
  731. if (!bat_priv->primary_if)
  732. return NET_RX_DROP;
  733. /* get routing information */
  734. spin_lock_bh(&bat_priv->orig_hash_lock);
  735. orig_node = ((struct orig_node *)
  736. hash_find(bat_priv->orig_hash, compare_orig, choose_orig,
  737. icmp_packet->orig));
  738. ret = NET_RX_DROP;
  739. if ((orig_node) && (orig_node->router)) {
  740. /* don't lock while sending the packets ... we therefore
  741. * copy the required data before sending */
  742. batman_if = orig_node->router->if_incoming;
  743. memcpy(dstaddr, orig_node->router->addr, ETH_ALEN);
  744. spin_unlock_bh(&bat_priv->orig_hash_lock);
  745. /* create a copy of the skb, if needed, to modify it. */
  746. if (skb_cow(skb, sizeof(struct ethhdr)) < 0)
  747. return NET_RX_DROP;
  748. icmp_packet = (struct icmp_packet *) skb->data;
  749. ethhdr = (struct ethhdr *)skb_mac_header(skb);
  750. memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN);
  751. memcpy(icmp_packet->orig,
  752. bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN);
  753. icmp_packet->msg_type = TTL_EXCEEDED;
  754. icmp_packet->ttl = TTL;
  755. send_skb_packet(skb, batman_if, dstaddr);
  756. ret = NET_RX_SUCCESS;
  757. } else
  758. spin_unlock_bh(&bat_priv->orig_hash_lock);
  759. return ret;
  760. }
  761. int recv_icmp_packet(struct sk_buff *skb, struct batman_if *recv_if)
  762. {
  763. struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
  764. struct icmp_packet_rr *icmp_packet;
  765. struct ethhdr *ethhdr;
  766. struct orig_node *orig_node;
  767. struct batman_if *batman_if;
  768. int hdr_size = sizeof(struct icmp_packet);
  769. int ret;
  770. uint8_t dstaddr[ETH_ALEN];
  771. /**
  772. * we truncate all incoming icmp packets if they don't match our size
  773. */
  774. if (skb->len >= sizeof(struct icmp_packet_rr))
  775. hdr_size = sizeof(struct icmp_packet_rr);
  776. /* drop packet if it has not necessary minimum size */
  777. if (unlikely(!pskb_may_pull(skb, hdr_size)))
  778. return NET_RX_DROP;
  779. ethhdr = (struct ethhdr *)skb_mac_header(skb);
  780. /* packet with unicast indication but broadcast recipient */
  781. if (is_broadcast_ether_addr(ethhdr->h_dest))
  782. return NET_RX_DROP;
  783. /* packet with broadcast sender address */
  784. if (is_broadcast_ether_addr(ethhdr->h_source))
  785. return NET_RX_DROP;
  786. /* not for me */
  787. if (!is_my_mac(ethhdr->h_dest))
  788. return NET_RX_DROP;
  789. icmp_packet = (struct icmp_packet_rr *)skb->data;
  790. /* add record route information if not full */
  791. if ((hdr_size == sizeof(struct icmp_packet_rr)) &&
  792. (icmp_packet->rr_cur < BAT_RR_LEN)) {
  793. memcpy(&(icmp_packet->rr[icmp_packet->rr_cur]),
  794. ethhdr->h_dest, ETH_ALEN);
  795. icmp_packet->rr_cur++;
  796. }
  797. /* packet for me */
  798. if (is_my_mac(icmp_packet->dst))
  799. return recv_my_icmp_packet(bat_priv, skb, hdr_size);
  800. /* TTL exceeded */
  801. if (icmp_packet->ttl < 2)
  802. return recv_icmp_ttl_exceeded(bat_priv, skb, hdr_size);
  803. ret = NET_RX_DROP;
  804. /* get routing information */
  805. spin_lock_bh(&bat_priv->orig_hash_lock);
  806. orig_node = ((struct orig_node *)
  807. hash_find(bat_priv->orig_hash, compare_orig, choose_orig,
  808. icmp_packet->dst));
  809. if ((orig_node) && (orig_node->router)) {
  810. /* don't lock while sending the packets ... we therefore
  811. * copy the required data before sending */
  812. batman_if = orig_node->router->if_incoming;
  813. memcpy(dstaddr, orig_node->router->addr, ETH_ALEN);
  814. spin_unlock_bh(&bat_priv->orig_hash_lock);
  815. /* create a copy of the skb, if needed, to modify it. */
  816. if (skb_cow(skb, sizeof(struct ethhdr)) < 0)
  817. return NET_RX_DROP;
  818. icmp_packet = (struct icmp_packet_rr *)skb->data;
  819. ethhdr = (struct ethhdr *)skb_mac_header(skb);
  820. /* decrement ttl */
  821. icmp_packet->ttl--;
  822. /* route it */
  823. send_skb_packet(skb, batman_if, dstaddr);
  824. ret = NET_RX_SUCCESS;
  825. } else
  826. spin_unlock_bh(&bat_priv->orig_hash_lock);
  827. return ret;
  828. }
  829. /* find a suitable router for this originator, and use
  830. * bonding if possible. */
  831. struct neigh_node *find_router(struct bat_priv *bat_priv,
  832. struct orig_node *orig_node,
  833. struct batman_if *recv_if)
  834. {
  835. struct orig_node *primary_orig_node;
  836. struct orig_node *router_orig;
  837. struct neigh_node *router, *first_candidate, *best_router;
  838. static uint8_t zero_mac[ETH_ALEN] = {0, 0, 0, 0, 0, 0};
  839. int bonding_enabled;
  840. if (!orig_node)
  841. return NULL;
  842. if (!orig_node->router)
  843. return NULL;
  844. /* without bonding, the first node should
  845. * always choose the default router. */
  846. bonding_enabled = atomic_read(&bat_priv->bonding);
  847. if ((!recv_if) && (!bonding_enabled))
  848. return orig_node->router;
  849. router_orig = orig_node->router->orig_node;
  850. /* if we have something in the primary_addr, we can search
  851. * for a potential bonding candidate. */
  852. if (memcmp(router_orig->primary_addr, zero_mac, ETH_ALEN) == 0)
  853. return orig_node->router;
  854. /* find the orig_node which has the primary interface. might
  855. * even be the same as our router_orig in many cases */
  856. if (memcmp(router_orig->primary_addr,
  857. router_orig->orig, ETH_ALEN) == 0) {
  858. primary_orig_node = router_orig;
  859. } else {
  860. primary_orig_node = hash_find(bat_priv->orig_hash, compare_orig,
  861. choose_orig,
  862. router_orig->primary_addr);
  863. if (!primary_orig_node)
  864. return orig_node->router;
  865. }
  866. /* with less than 2 candidates, we can't do any
  867. * bonding and prefer the original router. */
  868. if (primary_orig_node->bond.candidates < 2)
  869. return orig_node->router;
  870. /* all nodes between should choose a candidate which
  871. * is is not on the interface where the packet came
  872. * in. */
  873. first_candidate = primary_orig_node->bond.selected;
  874. router = first_candidate;
  875. if (bonding_enabled) {
  876. /* in the bonding case, send the packets in a round
  877. * robin fashion over the remaining interfaces. */
  878. do {
  879. /* recv_if == NULL on the first node. */
  880. if (router->if_incoming != recv_if)
  881. break;
  882. router = router->next_bond_candidate;
  883. } while (router != first_candidate);
  884. primary_orig_node->bond.selected = router->next_bond_candidate;
  885. } else {
  886. /* if bonding is disabled, use the best of the
  887. * remaining candidates which are not using
  888. * this interface. */
  889. best_router = first_candidate;
  890. do {
  891. /* recv_if == NULL on the first node. */
  892. if ((router->if_incoming != recv_if) &&
  893. (router->tq_avg > best_router->tq_avg))
  894. best_router = router;
  895. router = router->next_bond_candidate;
  896. } while (router != first_candidate);
  897. router = best_router;
  898. }
  899. return router;
  900. }
  901. static int check_unicast_packet(struct sk_buff *skb, int hdr_size)
  902. {
  903. struct ethhdr *ethhdr;
  904. /* drop packet if it has not necessary minimum size */
  905. if (unlikely(!pskb_may_pull(skb, hdr_size)))
  906. return -1;
  907. ethhdr = (struct ethhdr *)skb_mac_header(skb);
  908. /* packet with unicast indication but broadcast recipient */
  909. if (is_broadcast_ether_addr(ethhdr->h_dest))
  910. return -1;
  911. /* packet with broadcast sender address */
  912. if (is_broadcast_ether_addr(ethhdr->h_source))
  913. return -1;
  914. /* not for me */
  915. if (!is_my_mac(ethhdr->h_dest))
  916. return -1;
  917. return 0;
  918. }
  919. int route_unicast_packet(struct sk_buff *skb, struct batman_if *recv_if,
  920. int hdr_size)
  921. {
  922. struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
  923. struct orig_node *orig_node;
  924. struct neigh_node *router;
  925. struct batman_if *batman_if;
  926. uint8_t dstaddr[ETH_ALEN];
  927. struct unicast_packet *unicast_packet;
  928. struct ethhdr *ethhdr = (struct ethhdr *)skb_mac_header(skb);
  929. int ret;
  930. struct sk_buff *new_skb;
  931. unicast_packet = (struct unicast_packet *)skb->data;
  932. /* TTL exceeded */
  933. if (unicast_packet->ttl < 2) {
  934. pr_debug("Warning - can't forward unicast packet from %pM to "
  935. "%pM: ttl exceeded\n", ethhdr->h_source,
  936. unicast_packet->dest);
  937. return NET_RX_DROP;
  938. }
  939. /* get routing information */
  940. spin_lock_bh(&bat_priv->orig_hash_lock);
  941. orig_node = ((struct orig_node *)
  942. hash_find(bat_priv->orig_hash, compare_orig, choose_orig,
  943. unicast_packet->dest));
  944. router = find_router(bat_priv, orig_node, recv_if);
  945. if (!router) {
  946. spin_unlock_bh(&bat_priv->orig_hash_lock);
  947. return NET_RX_DROP;
  948. }
  949. /* don't lock while sending the packets ... we therefore
  950. * copy the required data before sending */
  951. batman_if = router->if_incoming;
  952. memcpy(dstaddr, router->addr, ETH_ALEN);
  953. spin_unlock_bh(&bat_priv->orig_hash_lock);
  954. /* create a copy of the skb, if needed, to modify it. */
  955. if (skb_cow(skb, sizeof(struct ethhdr)) < 0)
  956. return NET_RX_DROP;
  957. unicast_packet = (struct unicast_packet *)skb->data;
  958. if (unicast_packet->packet_type == BAT_UNICAST &&
  959. atomic_read(&bat_priv->fragmentation) &&
  960. skb->len > batman_if->net_dev->mtu)
  961. return frag_send_skb(skb, bat_priv, batman_if,
  962. dstaddr);
  963. if (unicast_packet->packet_type == BAT_UNICAST_FRAG &&
  964. 2 * skb->len - hdr_size <= batman_if->net_dev->mtu) {
  965. ret = frag_reassemble_skb(skb, bat_priv, &new_skb);
  966. if (ret == NET_RX_DROP)
  967. return NET_RX_DROP;
  968. /* packet was buffered for late merge */
  969. if (!new_skb)
  970. return NET_RX_SUCCESS;
  971. skb = new_skb;
  972. unicast_packet = (struct unicast_packet *)skb->data;
  973. }
  974. /* decrement ttl */
  975. unicast_packet->ttl--;
  976. /* route it */
  977. send_skb_packet(skb, batman_if, dstaddr);
  978. return NET_RX_SUCCESS;
  979. }
  980. int recv_unicast_packet(struct sk_buff *skb, struct batman_if *recv_if)
  981. {
  982. struct unicast_packet *unicast_packet;
  983. int hdr_size = sizeof(struct unicast_packet);
  984. if (check_unicast_packet(skb, hdr_size) < 0)
  985. return NET_RX_DROP;
  986. unicast_packet = (struct unicast_packet *)skb->data;
  987. /* packet for me */
  988. if (is_my_mac(unicast_packet->dest)) {
  989. interface_rx(recv_if->soft_iface, skb, recv_if, hdr_size);
  990. return NET_RX_SUCCESS;
  991. }
  992. return route_unicast_packet(skb, recv_if, hdr_size);
  993. }
  994. int recv_ucast_frag_packet(struct sk_buff *skb, struct batman_if *recv_if)
  995. {
  996. struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
  997. struct unicast_frag_packet *unicast_packet;
  998. int hdr_size = sizeof(struct unicast_frag_packet);
  999. struct sk_buff *new_skb = NULL;
  1000. int ret;
  1001. if (check_unicast_packet(skb, hdr_size) < 0)
  1002. return NET_RX_DROP;
  1003. unicast_packet = (struct unicast_frag_packet *)skb->data;
  1004. /* packet for me */
  1005. if (is_my_mac(unicast_packet->dest)) {
  1006. ret = frag_reassemble_skb(skb, bat_priv, &new_skb);
  1007. if (ret == NET_RX_DROP)
  1008. return NET_RX_DROP;
  1009. /* packet was buffered for late merge */
  1010. if (!new_skb)
  1011. return NET_RX_SUCCESS;
  1012. interface_rx(recv_if->soft_iface, new_skb, recv_if,
  1013. sizeof(struct unicast_packet));
  1014. return NET_RX_SUCCESS;
  1015. }
  1016. return route_unicast_packet(skb, recv_if, hdr_size);
  1017. }
  1018. int recv_bcast_packet(struct sk_buff *skb, struct batman_if *recv_if)
  1019. {
  1020. struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
  1021. struct orig_node *orig_node;
  1022. struct bcast_packet *bcast_packet;
  1023. struct ethhdr *ethhdr;
  1024. int hdr_size = sizeof(struct bcast_packet);
  1025. int32_t seq_diff;
  1026. /* drop packet if it has not necessary minimum size */
  1027. if (unlikely(!pskb_may_pull(skb, hdr_size)))
  1028. return NET_RX_DROP;
  1029. ethhdr = (struct ethhdr *)skb_mac_header(skb);
  1030. /* packet with broadcast indication but unicast recipient */
  1031. if (!is_broadcast_ether_addr(ethhdr->h_dest))
  1032. return NET_RX_DROP;
  1033. /* packet with broadcast sender address */
  1034. if (is_broadcast_ether_addr(ethhdr->h_source))
  1035. return NET_RX_DROP;
  1036. /* ignore broadcasts sent by myself */
  1037. if (is_my_mac(ethhdr->h_source))
  1038. return NET_RX_DROP;
  1039. bcast_packet = (struct bcast_packet *)skb->data;
  1040. /* ignore broadcasts originated by myself */
  1041. if (is_my_mac(bcast_packet->orig))
  1042. return NET_RX_DROP;
  1043. if (bcast_packet->ttl < 2)
  1044. return NET_RX_DROP;
  1045. spin_lock_bh(&bat_priv->orig_hash_lock);
  1046. orig_node = ((struct orig_node *)
  1047. hash_find(bat_priv->orig_hash, compare_orig, choose_orig,
  1048. bcast_packet->orig));
  1049. if (!orig_node) {
  1050. spin_unlock_bh(&bat_priv->orig_hash_lock);
  1051. return NET_RX_DROP;
  1052. }
  1053. /* check whether the packet is a duplicate */
  1054. if (get_bit_status(orig_node->bcast_bits,
  1055. orig_node->last_bcast_seqno,
  1056. ntohl(bcast_packet->seqno))) {
  1057. spin_unlock_bh(&bat_priv->orig_hash_lock);
  1058. return NET_RX_DROP;
  1059. }
  1060. seq_diff = ntohl(bcast_packet->seqno) - orig_node->last_bcast_seqno;
  1061. /* check whether the packet is old and the host just restarted. */
  1062. if (window_protected(bat_priv, seq_diff,
  1063. &orig_node->bcast_seqno_reset)) {
  1064. spin_unlock_bh(&bat_priv->orig_hash_lock);
  1065. return NET_RX_DROP;
  1066. }
  1067. /* mark broadcast in flood history, update window position
  1068. * if required. */
  1069. if (bit_get_packet(bat_priv, orig_node->bcast_bits, seq_diff, 1))
  1070. orig_node->last_bcast_seqno = ntohl(bcast_packet->seqno);
  1071. spin_unlock_bh(&bat_priv->orig_hash_lock);
  1072. /* rebroadcast packet */
  1073. add_bcast_packet_to_list(bat_priv, skb);
  1074. /* broadcast for me */
  1075. interface_rx(recv_if->soft_iface, skb, recv_if, hdr_size);
  1076. return NET_RX_SUCCESS;
  1077. }
  1078. int recv_vis_packet(struct sk_buff *skb, struct batman_if *recv_if)
  1079. {
  1080. struct vis_packet *vis_packet;
  1081. struct ethhdr *ethhdr;
  1082. struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
  1083. int hdr_size = sizeof(struct vis_packet);
  1084. /* keep skb linear */
  1085. if (skb_linearize(skb) < 0)
  1086. return NET_RX_DROP;
  1087. if (unlikely(!pskb_may_pull(skb, hdr_size)))
  1088. return NET_RX_DROP;
  1089. vis_packet = (struct vis_packet *)skb->data;
  1090. ethhdr = (struct ethhdr *)skb_mac_header(skb);
  1091. /* not for me */
  1092. if (!is_my_mac(ethhdr->h_dest))
  1093. return NET_RX_DROP;
  1094. /* ignore own packets */
  1095. if (is_my_mac(vis_packet->vis_orig))
  1096. return NET_RX_DROP;
  1097. if (is_my_mac(vis_packet->sender_orig))
  1098. return NET_RX_DROP;
  1099. switch (vis_packet->vis_type) {
  1100. case VIS_TYPE_SERVER_SYNC:
  1101. receive_server_sync_packet(bat_priv, vis_packet,
  1102. skb_headlen(skb));
  1103. break;
  1104. case VIS_TYPE_CLIENT_UPDATE:
  1105. receive_client_update_packet(bat_priv, vis_packet,
  1106. skb_headlen(skb));
  1107. break;
  1108. default: /* ignore unknown packet */
  1109. break;
  1110. }
  1111. /* We take a copy of the data in the packet, so we should
  1112. always free the skbuf. */
  1113. return NET_RX_DROP;
  1114. }