bat_iv_ogm.c 38 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272
  1. /*
  2. * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
  3. *
  4. * Marek Lindner, Simon Wunderlich
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of version 2 of the GNU General Public
  8. * License as published by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope that it will be useful, but
  11. * WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  13. * General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program; if not, write to the Free Software
  17. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
  18. * 02110-1301, USA
  19. *
  20. */
  21. #include "main.h"
  22. #include "translation-table.h"
  23. #include "ring_buffer.h"
  24. #include "originator.h"
  25. #include "routing.h"
  26. #include "gateway_common.h"
  27. #include "gateway_client.h"
  28. #include "hard-interface.h"
  29. #include "send.h"
  30. #include "bat_algo.h"
  31. static struct neigh_node *bat_iv_ogm_neigh_new(struct hard_iface *hard_iface,
  32. const uint8_t *neigh_addr,
  33. struct orig_node *orig_node,
  34. struct orig_node *orig_neigh,
  35. uint32_t seqno)
  36. {
  37. struct neigh_node *neigh_node;
  38. neigh_node = batadv_neigh_node_new(hard_iface, neigh_addr, seqno);
  39. if (!neigh_node)
  40. goto out;
  41. INIT_LIST_HEAD(&neigh_node->bonding_list);
  42. neigh_node->orig_node = orig_neigh;
  43. neigh_node->if_incoming = hard_iface;
  44. spin_lock_bh(&orig_node->neigh_list_lock);
  45. hlist_add_head_rcu(&neigh_node->list, &orig_node->neigh_list);
  46. spin_unlock_bh(&orig_node->neigh_list_lock);
  47. out:
  48. return neigh_node;
  49. }
  50. static int bat_iv_ogm_iface_enable(struct hard_iface *hard_iface)
  51. {
  52. struct batman_ogm_packet *batman_ogm_packet;
  53. uint32_t random_seqno;
  54. int res = -1;
  55. /* randomize initial seqno to avoid collision */
  56. get_random_bytes(&random_seqno, sizeof(random_seqno));
  57. atomic_set(&hard_iface->seqno, random_seqno);
  58. hard_iface->packet_len = BATMAN_OGM_HLEN;
  59. hard_iface->packet_buff = kmalloc(hard_iface->packet_len, GFP_ATOMIC);
  60. if (!hard_iface->packet_buff)
  61. goto out;
  62. batman_ogm_packet = (struct batman_ogm_packet *)hard_iface->packet_buff;
  63. batman_ogm_packet->header.packet_type = BAT_IV_OGM;
  64. batman_ogm_packet->header.version = COMPAT_VERSION;
  65. batman_ogm_packet->header.ttl = 2;
  66. batman_ogm_packet->flags = NO_FLAGS;
  67. batman_ogm_packet->tq = TQ_MAX_VALUE;
  68. batman_ogm_packet->tt_num_changes = 0;
  69. batman_ogm_packet->ttvn = 0;
  70. res = 0;
  71. out:
  72. return res;
  73. }
  74. static void bat_iv_ogm_iface_disable(struct hard_iface *hard_iface)
  75. {
  76. kfree(hard_iface->packet_buff);
  77. hard_iface->packet_buff = NULL;
  78. }
  79. static void bat_iv_ogm_iface_update_mac(struct hard_iface *hard_iface)
  80. {
  81. struct batman_ogm_packet *batman_ogm_packet;
  82. batman_ogm_packet = (struct batman_ogm_packet *)hard_iface->packet_buff;
  83. memcpy(batman_ogm_packet->orig,
  84. hard_iface->net_dev->dev_addr, ETH_ALEN);
  85. memcpy(batman_ogm_packet->prev_sender,
  86. hard_iface->net_dev->dev_addr, ETH_ALEN);
  87. }
  88. static void bat_iv_ogm_primary_iface_set(struct hard_iface *hard_iface)
  89. {
  90. struct batman_ogm_packet *batman_ogm_packet;
  91. batman_ogm_packet = (struct batman_ogm_packet *)hard_iface->packet_buff;
  92. batman_ogm_packet->flags = PRIMARIES_FIRST_HOP;
  93. batman_ogm_packet->header.ttl = TTL;
  94. }
  95. /* when do we schedule our own ogm to be sent */
  96. static unsigned long bat_iv_ogm_emit_send_time(const struct bat_priv *bat_priv)
  97. {
  98. return jiffies + msecs_to_jiffies(
  99. atomic_read(&bat_priv->orig_interval) -
  100. JITTER + (random32() % 2*JITTER));
  101. }
  102. /* when do we schedule a ogm packet to be sent */
  103. static unsigned long bat_iv_ogm_fwd_send_time(void)
  104. {
  105. return jiffies + msecs_to_jiffies(random32() % (JITTER/2));
  106. }
  107. /* apply hop penalty for a normal link */
  108. static uint8_t hop_penalty(uint8_t tq, const struct bat_priv *bat_priv)
  109. {
  110. int hop_penalty = atomic_read(&bat_priv->hop_penalty);
  111. return (tq * (TQ_MAX_VALUE - hop_penalty)) / (TQ_MAX_VALUE);
  112. }
  113. /* is there another aggregated packet here? */
  114. static int bat_iv_ogm_aggr_packet(int buff_pos, int packet_len,
  115. int tt_num_changes)
  116. {
  117. int next_buff_pos = buff_pos + BATMAN_OGM_HLEN + tt_len(tt_num_changes);
  118. return (next_buff_pos <= packet_len) &&
  119. (next_buff_pos <= MAX_AGGREGATION_BYTES);
  120. }
  121. /* send a batman ogm to a given interface */
  122. static void bat_iv_ogm_send_to_if(struct forw_packet *forw_packet,
  123. struct hard_iface *hard_iface)
  124. {
  125. struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
  126. char *fwd_str;
  127. uint8_t packet_num;
  128. int16_t buff_pos;
  129. struct batman_ogm_packet *batman_ogm_packet;
  130. struct sk_buff *skb;
  131. if (hard_iface->if_status != IF_ACTIVE)
  132. return;
  133. packet_num = 0;
  134. buff_pos = 0;
  135. batman_ogm_packet = (struct batman_ogm_packet *)forw_packet->skb->data;
  136. /* adjust all flags and log packets */
  137. while (bat_iv_ogm_aggr_packet(buff_pos, forw_packet->packet_len,
  138. batman_ogm_packet->tt_num_changes)) {
  139. /* we might have aggregated direct link packets with an
  140. * ordinary base packet */
  141. if ((forw_packet->direct_link_flags & (1 << packet_num)) &&
  142. (forw_packet->if_incoming == hard_iface))
  143. batman_ogm_packet->flags |= DIRECTLINK;
  144. else
  145. batman_ogm_packet->flags &= ~DIRECTLINK;
  146. fwd_str = (packet_num > 0 ? "Forwarding" : (forw_packet->own ?
  147. "Sending own" :
  148. "Forwarding"));
  149. bat_dbg(DBG_BATMAN, bat_priv,
  150. "%s %spacket (originator %pM, seqno %u, TQ %d, TTL %d, IDF %s, ttvn %d) on interface %s [%pM]\n",
  151. fwd_str, (packet_num > 0 ? "aggregated " : ""),
  152. batman_ogm_packet->orig,
  153. ntohl(batman_ogm_packet->seqno),
  154. batman_ogm_packet->tq, batman_ogm_packet->header.ttl,
  155. (batman_ogm_packet->flags & DIRECTLINK ?
  156. "on" : "off"),
  157. batman_ogm_packet->ttvn, hard_iface->net_dev->name,
  158. hard_iface->net_dev->dev_addr);
  159. buff_pos += BATMAN_OGM_HLEN +
  160. tt_len(batman_ogm_packet->tt_num_changes);
  161. packet_num++;
  162. batman_ogm_packet = (struct batman_ogm_packet *)
  163. (forw_packet->skb->data + buff_pos);
  164. }
  165. /* create clone because function is called more than once */
  166. skb = skb_clone(forw_packet->skb, GFP_ATOMIC);
  167. if (skb) {
  168. batadv_inc_counter(bat_priv, BAT_CNT_MGMT_TX);
  169. batadv_add_counter(bat_priv, BAT_CNT_MGMT_TX_BYTES,
  170. skb->len + ETH_HLEN);
  171. send_skb_packet(skb, hard_iface, broadcast_addr);
  172. }
  173. }
  174. /* send a batman ogm packet */
  175. static void bat_iv_ogm_emit(struct forw_packet *forw_packet)
  176. {
  177. struct hard_iface *hard_iface;
  178. struct net_device *soft_iface;
  179. struct bat_priv *bat_priv;
  180. struct hard_iface *primary_if = NULL;
  181. struct batman_ogm_packet *batman_ogm_packet;
  182. unsigned char directlink;
  183. batman_ogm_packet = (struct batman_ogm_packet *)
  184. (forw_packet->skb->data);
  185. directlink = (batman_ogm_packet->flags & DIRECTLINK ? 1 : 0);
  186. if (!forw_packet->if_incoming) {
  187. pr_err("Error - can't forward packet: incoming iface not specified\n");
  188. goto out;
  189. }
  190. soft_iface = forw_packet->if_incoming->soft_iface;
  191. bat_priv = netdev_priv(soft_iface);
  192. if (forw_packet->if_incoming->if_status != IF_ACTIVE)
  193. goto out;
  194. primary_if = primary_if_get_selected(bat_priv);
  195. if (!primary_if)
  196. goto out;
  197. /* multihomed peer assumed */
  198. /* non-primary OGMs are only broadcasted on their interface */
  199. if ((directlink && (batman_ogm_packet->header.ttl == 1)) ||
  200. (forw_packet->own && (forw_packet->if_incoming != primary_if))) {
  201. /* FIXME: what about aggregated packets ? */
  202. bat_dbg(DBG_BATMAN, bat_priv,
  203. "%s packet (originator %pM, seqno %u, TTL %d) on interface %s [%pM]\n",
  204. (forw_packet->own ? "Sending own" : "Forwarding"),
  205. batman_ogm_packet->orig,
  206. ntohl(batman_ogm_packet->seqno),
  207. batman_ogm_packet->header.ttl,
  208. forw_packet->if_incoming->net_dev->name,
  209. forw_packet->if_incoming->net_dev->dev_addr);
  210. /* skb is only used once and than forw_packet is free'd */
  211. send_skb_packet(forw_packet->skb, forw_packet->if_incoming,
  212. broadcast_addr);
  213. forw_packet->skb = NULL;
  214. goto out;
  215. }
  216. /* broadcast on every interface */
  217. rcu_read_lock();
  218. list_for_each_entry_rcu(hard_iface, &hardif_list, list) {
  219. if (hard_iface->soft_iface != soft_iface)
  220. continue;
  221. bat_iv_ogm_send_to_if(forw_packet, hard_iface);
  222. }
  223. rcu_read_unlock();
  224. out:
  225. if (primary_if)
  226. hardif_free_ref(primary_if);
  227. }
  228. /* return true if new_packet can be aggregated with forw_packet */
  229. static bool bat_iv_ogm_can_aggregate(const struct batman_ogm_packet
  230. *new_batman_ogm_packet,
  231. struct bat_priv *bat_priv,
  232. int packet_len, unsigned long send_time,
  233. bool directlink,
  234. const struct hard_iface *if_incoming,
  235. const struct forw_packet *forw_packet)
  236. {
  237. struct batman_ogm_packet *batman_ogm_packet;
  238. int aggregated_bytes = forw_packet->packet_len + packet_len;
  239. struct hard_iface *primary_if = NULL;
  240. bool res = false;
  241. batman_ogm_packet = (struct batman_ogm_packet *)forw_packet->skb->data;
  242. /**
  243. * we can aggregate the current packet to this aggregated packet
  244. * if:
  245. *
  246. * - the send time is within our MAX_AGGREGATION_MS time
  247. * - the resulting packet wont be bigger than
  248. * MAX_AGGREGATION_BYTES
  249. */
  250. if (time_before(send_time, forw_packet->send_time) &&
  251. time_after_eq(send_time + msecs_to_jiffies(MAX_AGGREGATION_MS),
  252. forw_packet->send_time) &&
  253. (aggregated_bytes <= MAX_AGGREGATION_BYTES)) {
  254. /**
  255. * check aggregation compatibility
  256. * -> direct link packets are broadcasted on
  257. * their interface only
  258. * -> aggregate packet if the current packet is
  259. * a "global" packet as well as the base
  260. * packet
  261. */
  262. primary_if = primary_if_get_selected(bat_priv);
  263. if (!primary_if)
  264. goto out;
  265. /* packets without direct link flag and high TTL
  266. * are flooded through the net */
  267. if ((!directlink) &&
  268. (!(batman_ogm_packet->flags & DIRECTLINK)) &&
  269. (batman_ogm_packet->header.ttl != 1) &&
  270. /* own packets originating non-primary
  271. * interfaces leave only that interface */
  272. ((!forw_packet->own) ||
  273. (forw_packet->if_incoming == primary_if))) {
  274. res = true;
  275. goto out;
  276. }
  277. /* if the incoming packet is sent via this one
  278. * interface only - we still can aggregate */
  279. if ((directlink) &&
  280. (new_batman_ogm_packet->header.ttl == 1) &&
  281. (forw_packet->if_incoming == if_incoming) &&
  282. /* packets from direct neighbors or
  283. * own secondary interface packets
  284. * (= secondary interface packets in general) */
  285. (batman_ogm_packet->flags & DIRECTLINK ||
  286. (forw_packet->own &&
  287. forw_packet->if_incoming != primary_if))) {
  288. res = true;
  289. goto out;
  290. }
  291. }
  292. out:
  293. if (primary_if)
  294. hardif_free_ref(primary_if);
  295. return res;
  296. }
  297. /* create a new aggregated packet and add this packet to it */
  298. static void bat_iv_ogm_aggregate_new(const unsigned char *packet_buff,
  299. int packet_len, unsigned long send_time,
  300. bool direct_link,
  301. struct hard_iface *if_incoming,
  302. int own_packet)
  303. {
  304. struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
  305. struct forw_packet *forw_packet_aggr;
  306. unsigned char *skb_buff;
  307. if (!atomic_inc_not_zero(&if_incoming->refcount))
  308. return;
  309. /* own packet should always be scheduled */
  310. if (!own_packet) {
  311. if (!atomic_dec_not_zero(&bat_priv->batman_queue_left)) {
  312. bat_dbg(DBG_BATMAN, bat_priv,
  313. "batman packet queue full\n");
  314. goto out;
  315. }
  316. }
  317. forw_packet_aggr = kmalloc(sizeof(*forw_packet_aggr), GFP_ATOMIC);
  318. if (!forw_packet_aggr) {
  319. if (!own_packet)
  320. atomic_inc(&bat_priv->batman_queue_left);
  321. goto out;
  322. }
  323. if ((atomic_read(&bat_priv->aggregated_ogms)) &&
  324. (packet_len < MAX_AGGREGATION_BYTES))
  325. forw_packet_aggr->skb = dev_alloc_skb(MAX_AGGREGATION_BYTES +
  326. ETH_HLEN);
  327. else
  328. forw_packet_aggr->skb = dev_alloc_skb(packet_len + ETH_HLEN);
  329. if (!forw_packet_aggr->skb) {
  330. if (!own_packet)
  331. atomic_inc(&bat_priv->batman_queue_left);
  332. kfree(forw_packet_aggr);
  333. goto out;
  334. }
  335. skb_reserve(forw_packet_aggr->skb, ETH_HLEN);
  336. INIT_HLIST_NODE(&forw_packet_aggr->list);
  337. skb_buff = skb_put(forw_packet_aggr->skb, packet_len);
  338. forw_packet_aggr->packet_len = packet_len;
  339. memcpy(skb_buff, packet_buff, packet_len);
  340. forw_packet_aggr->own = own_packet;
  341. forw_packet_aggr->if_incoming = if_incoming;
  342. forw_packet_aggr->num_packets = 0;
  343. forw_packet_aggr->direct_link_flags = NO_FLAGS;
  344. forw_packet_aggr->send_time = send_time;
  345. /* save packet direct link flag status */
  346. if (direct_link)
  347. forw_packet_aggr->direct_link_flags |= 1;
  348. /* add new packet to packet list */
  349. spin_lock_bh(&bat_priv->forw_bat_list_lock);
  350. hlist_add_head(&forw_packet_aggr->list, &bat_priv->forw_bat_list);
  351. spin_unlock_bh(&bat_priv->forw_bat_list_lock);
  352. /* start timer for this packet */
  353. INIT_DELAYED_WORK(&forw_packet_aggr->delayed_work,
  354. send_outstanding_bat_ogm_packet);
  355. queue_delayed_work(bat_event_workqueue,
  356. &forw_packet_aggr->delayed_work,
  357. send_time - jiffies);
  358. return;
  359. out:
  360. hardif_free_ref(if_incoming);
  361. }
  362. /* aggregate a new packet into the existing ogm packet */
  363. static void bat_iv_ogm_aggregate(struct forw_packet *forw_packet_aggr,
  364. const unsigned char *packet_buff,
  365. int packet_len, bool direct_link)
  366. {
  367. unsigned char *skb_buff;
  368. skb_buff = skb_put(forw_packet_aggr->skb, packet_len);
  369. memcpy(skb_buff, packet_buff, packet_len);
  370. forw_packet_aggr->packet_len += packet_len;
  371. forw_packet_aggr->num_packets++;
  372. /* save packet direct link flag status */
  373. if (direct_link)
  374. forw_packet_aggr->direct_link_flags |=
  375. (1 << forw_packet_aggr->num_packets);
  376. }
  377. static void bat_iv_ogm_queue_add(struct bat_priv *bat_priv,
  378. unsigned char *packet_buff,
  379. int packet_len, struct hard_iface *if_incoming,
  380. int own_packet, unsigned long send_time)
  381. {
  382. /**
  383. * _aggr -> pointer to the packet we want to aggregate with
  384. * _pos -> pointer to the position in the queue
  385. */
  386. struct forw_packet *forw_packet_aggr = NULL, *forw_packet_pos = NULL;
  387. struct hlist_node *tmp_node;
  388. struct batman_ogm_packet *batman_ogm_packet;
  389. bool direct_link;
  390. batman_ogm_packet = (struct batman_ogm_packet *)packet_buff;
  391. direct_link = batman_ogm_packet->flags & DIRECTLINK ? 1 : 0;
  392. /* find position for the packet in the forward queue */
  393. spin_lock_bh(&bat_priv->forw_bat_list_lock);
  394. /* own packets are not to be aggregated */
  395. if ((atomic_read(&bat_priv->aggregated_ogms)) && (!own_packet)) {
  396. hlist_for_each_entry(forw_packet_pos, tmp_node,
  397. &bat_priv->forw_bat_list, list) {
  398. if (bat_iv_ogm_can_aggregate(batman_ogm_packet,
  399. bat_priv, packet_len,
  400. send_time, direct_link,
  401. if_incoming,
  402. forw_packet_pos)) {
  403. forw_packet_aggr = forw_packet_pos;
  404. break;
  405. }
  406. }
  407. }
  408. /* nothing to aggregate with - either aggregation disabled or no
  409. * suitable aggregation packet found */
  410. if (!forw_packet_aggr) {
  411. /* the following section can run without the lock */
  412. spin_unlock_bh(&bat_priv->forw_bat_list_lock);
  413. /**
  414. * if we could not aggregate this packet with one of the others
  415. * we hold it back for a while, so that it might be aggregated
  416. * later on
  417. */
  418. if ((!own_packet) &&
  419. (atomic_read(&bat_priv->aggregated_ogms)))
  420. send_time += msecs_to_jiffies(MAX_AGGREGATION_MS);
  421. bat_iv_ogm_aggregate_new(packet_buff, packet_len,
  422. send_time, direct_link,
  423. if_incoming, own_packet);
  424. } else {
  425. bat_iv_ogm_aggregate(forw_packet_aggr, packet_buff,
  426. packet_len, direct_link);
  427. spin_unlock_bh(&bat_priv->forw_bat_list_lock);
  428. }
  429. }
  430. static void bat_iv_ogm_forward(struct orig_node *orig_node,
  431. const struct ethhdr *ethhdr,
  432. struct batman_ogm_packet *batman_ogm_packet,
  433. bool is_single_hop_neigh,
  434. bool is_from_best_next_hop,
  435. struct hard_iface *if_incoming)
  436. {
  437. struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
  438. uint8_t tt_num_changes;
  439. if (batman_ogm_packet->header.ttl <= 1) {
  440. bat_dbg(DBG_BATMAN, bat_priv, "ttl exceeded\n");
  441. return;
  442. }
  443. if (!is_from_best_next_hop) {
  444. /* Mark the forwarded packet when it is not coming from our
  445. * best next hop. We still need to forward the packet for our
  446. * neighbor link quality detection to work in case the packet
  447. * originated from a single hop neighbor. Otherwise we can
  448. * simply drop the ogm.
  449. */
  450. if (is_single_hop_neigh)
  451. batman_ogm_packet->flags |= NOT_BEST_NEXT_HOP;
  452. else
  453. return;
  454. }
  455. tt_num_changes = batman_ogm_packet->tt_num_changes;
  456. batman_ogm_packet->header.ttl--;
  457. memcpy(batman_ogm_packet->prev_sender, ethhdr->h_source, ETH_ALEN);
  458. /* apply hop penalty */
  459. batman_ogm_packet->tq = hop_penalty(batman_ogm_packet->tq, bat_priv);
  460. bat_dbg(DBG_BATMAN, bat_priv,
  461. "Forwarding packet: tq: %i, ttl: %i\n",
  462. batman_ogm_packet->tq, batman_ogm_packet->header.ttl);
  463. batman_ogm_packet->seqno = htonl(batman_ogm_packet->seqno);
  464. batman_ogm_packet->tt_crc = htons(batman_ogm_packet->tt_crc);
  465. /* switch of primaries first hop flag when forwarding */
  466. batman_ogm_packet->flags &= ~PRIMARIES_FIRST_HOP;
  467. if (is_single_hop_neigh)
  468. batman_ogm_packet->flags |= DIRECTLINK;
  469. else
  470. batman_ogm_packet->flags &= ~DIRECTLINK;
  471. bat_iv_ogm_queue_add(bat_priv, (unsigned char *)batman_ogm_packet,
  472. BATMAN_OGM_HLEN + tt_len(tt_num_changes),
  473. if_incoming, 0, bat_iv_ogm_fwd_send_time());
  474. }
  475. static void bat_iv_ogm_schedule(struct hard_iface *hard_iface,
  476. int tt_num_changes)
  477. {
  478. struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
  479. struct batman_ogm_packet *batman_ogm_packet;
  480. struct hard_iface *primary_if;
  481. int vis_server;
  482. vis_server = atomic_read(&bat_priv->vis_mode);
  483. primary_if = primary_if_get_selected(bat_priv);
  484. batman_ogm_packet = (struct batman_ogm_packet *)hard_iface->packet_buff;
  485. /* change sequence number to network order */
  486. batman_ogm_packet->seqno =
  487. htonl((uint32_t)atomic_read(&hard_iface->seqno));
  488. batman_ogm_packet->ttvn = atomic_read(&bat_priv->ttvn);
  489. batman_ogm_packet->tt_crc = htons(bat_priv->tt_crc);
  490. if (tt_num_changes >= 0)
  491. batman_ogm_packet->tt_num_changes = tt_num_changes;
  492. if (vis_server == VIS_TYPE_SERVER_SYNC)
  493. batman_ogm_packet->flags |= VIS_SERVER;
  494. else
  495. batman_ogm_packet->flags &= ~VIS_SERVER;
  496. if ((hard_iface == primary_if) &&
  497. (atomic_read(&bat_priv->gw_mode) == GW_MODE_SERVER))
  498. batman_ogm_packet->gw_flags =
  499. (uint8_t)atomic_read(&bat_priv->gw_bandwidth);
  500. else
  501. batman_ogm_packet->gw_flags = NO_FLAGS;
  502. atomic_inc(&hard_iface->seqno);
  503. slide_own_bcast_window(hard_iface);
  504. bat_iv_ogm_queue_add(bat_priv, hard_iface->packet_buff,
  505. hard_iface->packet_len, hard_iface, 1,
  506. bat_iv_ogm_emit_send_time(bat_priv));
  507. if (primary_if)
  508. hardif_free_ref(primary_if);
  509. }
  510. static void bat_iv_ogm_orig_update(struct bat_priv *bat_priv,
  511. struct orig_node *orig_node,
  512. const struct ethhdr *ethhdr,
  513. const struct batman_ogm_packet
  514. *batman_ogm_packet,
  515. struct hard_iface *if_incoming,
  516. const unsigned char *tt_buff,
  517. int is_duplicate)
  518. {
  519. struct neigh_node *neigh_node = NULL, *tmp_neigh_node = NULL;
  520. struct neigh_node *router = NULL;
  521. struct orig_node *orig_node_tmp;
  522. struct hlist_node *node;
  523. uint8_t bcast_own_sum_orig, bcast_own_sum_neigh;
  524. bat_dbg(DBG_BATMAN, bat_priv,
  525. "update_originator(): Searching and updating originator entry of received packet\n");
  526. rcu_read_lock();
  527. hlist_for_each_entry_rcu(tmp_neigh_node, node,
  528. &orig_node->neigh_list, list) {
  529. if (compare_eth(tmp_neigh_node->addr, ethhdr->h_source) &&
  530. (tmp_neigh_node->if_incoming == if_incoming) &&
  531. atomic_inc_not_zero(&tmp_neigh_node->refcount)) {
  532. if (neigh_node)
  533. neigh_node_free_ref(neigh_node);
  534. neigh_node = tmp_neigh_node;
  535. continue;
  536. }
  537. if (is_duplicate)
  538. continue;
  539. spin_lock_bh(&tmp_neigh_node->lq_update_lock);
  540. ring_buffer_set(tmp_neigh_node->tq_recv,
  541. &tmp_neigh_node->tq_index, 0);
  542. tmp_neigh_node->tq_avg =
  543. ring_buffer_avg(tmp_neigh_node->tq_recv);
  544. spin_unlock_bh(&tmp_neigh_node->lq_update_lock);
  545. }
  546. if (!neigh_node) {
  547. struct orig_node *orig_tmp;
  548. orig_tmp = get_orig_node(bat_priv, ethhdr->h_source);
  549. if (!orig_tmp)
  550. goto unlock;
  551. neigh_node = bat_iv_ogm_neigh_new(if_incoming, ethhdr->h_source,
  552. orig_node, orig_tmp,
  553. batman_ogm_packet->seqno);
  554. orig_node_free_ref(orig_tmp);
  555. if (!neigh_node)
  556. goto unlock;
  557. } else
  558. bat_dbg(DBG_BATMAN, bat_priv,
  559. "Updating existing last-hop neighbor of originator\n");
  560. rcu_read_unlock();
  561. orig_node->flags = batman_ogm_packet->flags;
  562. neigh_node->last_seen = jiffies;
  563. spin_lock_bh(&neigh_node->lq_update_lock);
  564. ring_buffer_set(neigh_node->tq_recv,
  565. &neigh_node->tq_index,
  566. batman_ogm_packet->tq);
  567. neigh_node->tq_avg = ring_buffer_avg(neigh_node->tq_recv);
  568. spin_unlock_bh(&neigh_node->lq_update_lock);
  569. if (!is_duplicate) {
  570. orig_node->last_ttl = batman_ogm_packet->header.ttl;
  571. neigh_node->last_ttl = batman_ogm_packet->header.ttl;
  572. }
  573. bonding_candidate_add(orig_node, neigh_node);
  574. /* if this neighbor already is our next hop there is nothing
  575. * to change */
  576. router = orig_node_get_router(orig_node);
  577. if (router == neigh_node)
  578. goto update_tt;
  579. /* if this neighbor does not offer a better TQ we won't consider it */
  580. if (router && (router->tq_avg > neigh_node->tq_avg))
  581. goto update_tt;
  582. /* if the TQ is the same and the link not more symmetric we
  583. * won't consider it either */
  584. if (router && (neigh_node->tq_avg == router->tq_avg)) {
  585. orig_node_tmp = router->orig_node;
  586. spin_lock_bh(&orig_node_tmp->ogm_cnt_lock);
  587. bcast_own_sum_orig =
  588. orig_node_tmp->bcast_own_sum[if_incoming->if_num];
  589. spin_unlock_bh(&orig_node_tmp->ogm_cnt_lock);
  590. orig_node_tmp = neigh_node->orig_node;
  591. spin_lock_bh(&orig_node_tmp->ogm_cnt_lock);
  592. bcast_own_sum_neigh =
  593. orig_node_tmp->bcast_own_sum[if_incoming->if_num];
  594. spin_unlock_bh(&orig_node_tmp->ogm_cnt_lock);
  595. if (bcast_own_sum_orig >= bcast_own_sum_neigh)
  596. goto update_tt;
  597. }
  598. update_route(bat_priv, orig_node, neigh_node);
  599. update_tt:
  600. /* I have to check for transtable changes only if the OGM has been
  601. * sent through a primary interface */
  602. if (((batman_ogm_packet->orig != ethhdr->h_source) &&
  603. (batman_ogm_packet->header.ttl > 2)) ||
  604. (batman_ogm_packet->flags & PRIMARIES_FIRST_HOP))
  605. tt_update_orig(bat_priv, orig_node, tt_buff,
  606. batman_ogm_packet->tt_num_changes,
  607. batman_ogm_packet->ttvn,
  608. batman_ogm_packet->tt_crc);
  609. if (orig_node->gw_flags != batman_ogm_packet->gw_flags)
  610. gw_node_update(bat_priv, orig_node,
  611. batman_ogm_packet->gw_flags);
  612. orig_node->gw_flags = batman_ogm_packet->gw_flags;
  613. /* restart gateway selection if fast or late switching was enabled */
  614. if ((orig_node->gw_flags) &&
  615. (atomic_read(&bat_priv->gw_mode) == GW_MODE_CLIENT) &&
  616. (atomic_read(&bat_priv->gw_sel_class) > 2))
  617. gw_check_election(bat_priv, orig_node);
  618. goto out;
  619. unlock:
  620. rcu_read_unlock();
  621. out:
  622. if (neigh_node)
  623. neigh_node_free_ref(neigh_node);
  624. if (router)
  625. neigh_node_free_ref(router);
  626. }
  627. static int bat_iv_ogm_calc_tq(struct orig_node *orig_node,
  628. struct orig_node *orig_neigh_node,
  629. struct batman_ogm_packet *batman_ogm_packet,
  630. struct hard_iface *if_incoming)
  631. {
  632. struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
  633. struct neigh_node *neigh_node = NULL, *tmp_neigh_node;
  634. struct hlist_node *node;
  635. uint8_t total_count;
  636. uint8_t orig_eq_count, neigh_rq_count, tq_own;
  637. int tq_asym_penalty, ret = 0;
  638. /* find corresponding one hop neighbor */
  639. rcu_read_lock();
  640. hlist_for_each_entry_rcu(tmp_neigh_node, node,
  641. &orig_neigh_node->neigh_list, list) {
  642. if (!compare_eth(tmp_neigh_node->addr, orig_neigh_node->orig))
  643. continue;
  644. if (tmp_neigh_node->if_incoming != if_incoming)
  645. continue;
  646. if (!atomic_inc_not_zero(&tmp_neigh_node->refcount))
  647. continue;
  648. neigh_node = tmp_neigh_node;
  649. break;
  650. }
  651. rcu_read_unlock();
  652. if (!neigh_node)
  653. neigh_node = bat_iv_ogm_neigh_new(if_incoming,
  654. orig_neigh_node->orig,
  655. orig_neigh_node,
  656. orig_neigh_node,
  657. batman_ogm_packet->seqno);
  658. if (!neigh_node)
  659. goto out;
  660. /* if orig_node is direct neighbor update neigh_node last_seen */
  661. if (orig_node == orig_neigh_node)
  662. neigh_node->last_seen = jiffies;
  663. orig_node->last_seen = jiffies;
  664. /* find packet count of corresponding one hop neighbor */
  665. spin_lock_bh(&orig_node->ogm_cnt_lock);
  666. orig_eq_count = orig_neigh_node->bcast_own_sum[if_incoming->if_num];
  667. neigh_rq_count = neigh_node->real_packet_count;
  668. spin_unlock_bh(&orig_node->ogm_cnt_lock);
  669. /* pay attention to not get a value bigger than 100 % */
  670. total_count = (orig_eq_count > neigh_rq_count ?
  671. neigh_rq_count : orig_eq_count);
  672. /* if we have too few packets (too less data) we set tq_own to zero */
  673. /* if we receive too few packets it is not considered bidirectional */
  674. if ((total_count < TQ_LOCAL_BIDRECT_SEND_MINIMUM) ||
  675. (neigh_rq_count < TQ_LOCAL_BIDRECT_RECV_MINIMUM))
  676. tq_own = 0;
  677. else
  678. /* neigh_node->real_packet_count is never zero as we
  679. * only purge old information when getting new
  680. * information */
  681. tq_own = (TQ_MAX_VALUE * total_count) / neigh_rq_count;
  682. /* 1 - ((1-x) ** 3), normalized to TQ_MAX_VALUE this does
  683. * affect the nearly-symmetric links only a little, but
  684. * punishes asymmetric links more. This will give a value
  685. * between 0 and TQ_MAX_VALUE
  686. */
  687. tq_asym_penalty = TQ_MAX_VALUE - (TQ_MAX_VALUE *
  688. (TQ_LOCAL_WINDOW_SIZE - neigh_rq_count) *
  689. (TQ_LOCAL_WINDOW_SIZE - neigh_rq_count) *
  690. (TQ_LOCAL_WINDOW_SIZE - neigh_rq_count)) /
  691. (TQ_LOCAL_WINDOW_SIZE *
  692. TQ_LOCAL_WINDOW_SIZE *
  693. TQ_LOCAL_WINDOW_SIZE);
  694. batman_ogm_packet->tq = ((batman_ogm_packet->tq * tq_own
  695. * tq_asym_penalty) /
  696. (TQ_MAX_VALUE * TQ_MAX_VALUE));
  697. bat_dbg(DBG_BATMAN, bat_priv,
  698. "bidirectional: orig = %-15pM neigh = %-15pM => own_bcast = %2i, real recv = %2i, local tq: %3i, asym_penalty: %3i, total tq: %3i\n",
  699. orig_node->orig, orig_neigh_node->orig, total_count,
  700. neigh_rq_count, tq_own, tq_asym_penalty, batman_ogm_packet->tq);
  701. /* if link has the minimum required transmission quality
  702. * consider it bidirectional */
  703. if (batman_ogm_packet->tq >= TQ_TOTAL_BIDRECT_LIMIT)
  704. ret = 1;
  705. out:
  706. if (neigh_node)
  707. neigh_node_free_ref(neigh_node);
  708. return ret;
  709. }
  710. /* processes a batman packet for all interfaces, adjusts the sequence number and
  711. * finds out whether it is a duplicate.
  712. * returns:
  713. * 1 the packet is a duplicate
  714. * 0 the packet has not yet been received
  715. * -1 the packet is old and has been received while the seqno window
  716. * was protected. Caller should drop it.
  717. */
  718. static int bat_iv_ogm_update_seqnos(const struct ethhdr *ethhdr,
  719. const struct batman_ogm_packet
  720. *batman_ogm_packet,
  721. const struct hard_iface *if_incoming)
  722. {
  723. struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
  724. struct orig_node *orig_node;
  725. struct neigh_node *tmp_neigh_node;
  726. struct hlist_node *node;
  727. int is_duplicate = 0;
  728. int32_t seq_diff;
  729. int need_update = 0;
  730. int set_mark, ret = -1;
  731. orig_node = get_orig_node(bat_priv, batman_ogm_packet->orig);
  732. if (!orig_node)
  733. return 0;
  734. spin_lock_bh(&orig_node->ogm_cnt_lock);
  735. seq_diff = batman_ogm_packet->seqno - orig_node->last_real_seqno;
  736. /* signalize caller that the packet is to be dropped. */
  737. if (!hlist_empty(&orig_node->neigh_list) &&
  738. window_protected(bat_priv, seq_diff,
  739. &orig_node->batman_seqno_reset))
  740. goto out;
  741. rcu_read_lock();
  742. hlist_for_each_entry_rcu(tmp_neigh_node, node,
  743. &orig_node->neigh_list, list) {
  744. is_duplicate |= bat_test_bit(tmp_neigh_node->real_bits,
  745. orig_node->last_real_seqno,
  746. batman_ogm_packet->seqno);
  747. if (compare_eth(tmp_neigh_node->addr, ethhdr->h_source) &&
  748. (tmp_neigh_node->if_incoming == if_incoming))
  749. set_mark = 1;
  750. else
  751. set_mark = 0;
  752. /* if the window moved, set the update flag. */
  753. need_update |= bit_get_packet(bat_priv,
  754. tmp_neigh_node->real_bits,
  755. seq_diff, set_mark);
  756. tmp_neigh_node->real_packet_count =
  757. bitmap_weight(tmp_neigh_node->real_bits,
  758. TQ_LOCAL_WINDOW_SIZE);
  759. }
  760. rcu_read_unlock();
  761. if (need_update) {
  762. bat_dbg(DBG_BATMAN, bat_priv,
  763. "updating last_seqno: old %u, new %u\n",
  764. orig_node->last_real_seqno, batman_ogm_packet->seqno);
  765. orig_node->last_real_seqno = batman_ogm_packet->seqno;
  766. }
  767. ret = is_duplicate;
  768. out:
  769. spin_unlock_bh(&orig_node->ogm_cnt_lock);
  770. orig_node_free_ref(orig_node);
  771. return ret;
  772. }
  773. static void bat_iv_ogm_process(const struct ethhdr *ethhdr,
  774. struct batman_ogm_packet *batman_ogm_packet,
  775. const unsigned char *tt_buff,
  776. struct hard_iface *if_incoming)
  777. {
  778. struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
  779. struct hard_iface *hard_iface;
  780. struct orig_node *orig_neigh_node, *orig_node;
  781. struct neigh_node *router = NULL, *router_router = NULL;
  782. struct neigh_node *orig_neigh_router = NULL;
  783. int has_directlink_flag;
  784. int is_my_addr = 0, is_my_orig = 0, is_my_oldorig = 0;
  785. int is_broadcast = 0, is_bidirectional;
  786. bool is_single_hop_neigh = false;
  787. bool is_from_best_next_hop = false;
  788. int is_duplicate;
  789. uint32_t if_incoming_seqno;
  790. /* Silently drop when the batman packet is actually not a
  791. * correct packet.
  792. *
  793. * This might happen if a packet is padded (e.g. Ethernet has a
  794. * minimum frame length of 64 byte) and the aggregation interprets
  795. * it as an additional length.
  796. *
  797. * TODO: A more sane solution would be to have a bit in the
  798. * batman_ogm_packet to detect whether the packet is the last
  799. * packet in an aggregation. Here we expect that the padding
  800. * is always zero (or not 0x01)
  801. */
  802. if (batman_ogm_packet->header.packet_type != BAT_IV_OGM)
  803. return;
  804. /* could be changed by schedule_own_packet() */
  805. if_incoming_seqno = atomic_read(&if_incoming->seqno);
  806. has_directlink_flag = (batman_ogm_packet->flags & DIRECTLINK ? 1 : 0);
  807. if (compare_eth(ethhdr->h_source, batman_ogm_packet->orig))
  808. is_single_hop_neigh = true;
  809. bat_dbg(DBG_BATMAN, bat_priv,
  810. "Received BATMAN packet via NB: %pM, IF: %s [%pM] (from OG: %pM, via prev OG: %pM, seqno %u, ttvn %u, crc %u, changes %u, td %d, TTL %d, V %d, IDF %d)\n",
  811. ethhdr->h_source, if_incoming->net_dev->name,
  812. if_incoming->net_dev->dev_addr, batman_ogm_packet->orig,
  813. batman_ogm_packet->prev_sender, batman_ogm_packet->seqno,
  814. batman_ogm_packet->ttvn, batman_ogm_packet->tt_crc,
  815. batman_ogm_packet->tt_num_changes, batman_ogm_packet->tq,
  816. batman_ogm_packet->header.ttl,
  817. batman_ogm_packet->header.version, has_directlink_flag);
  818. rcu_read_lock();
  819. list_for_each_entry_rcu(hard_iface, &hardif_list, list) {
  820. if (hard_iface->if_status != IF_ACTIVE)
  821. continue;
  822. if (hard_iface->soft_iface != if_incoming->soft_iface)
  823. continue;
  824. if (compare_eth(ethhdr->h_source,
  825. hard_iface->net_dev->dev_addr))
  826. is_my_addr = 1;
  827. if (compare_eth(batman_ogm_packet->orig,
  828. hard_iface->net_dev->dev_addr))
  829. is_my_orig = 1;
  830. if (compare_eth(batman_ogm_packet->prev_sender,
  831. hard_iface->net_dev->dev_addr))
  832. is_my_oldorig = 1;
  833. if (is_broadcast_ether_addr(ethhdr->h_source))
  834. is_broadcast = 1;
  835. }
  836. rcu_read_unlock();
  837. if (batman_ogm_packet->header.version != COMPAT_VERSION) {
  838. bat_dbg(DBG_BATMAN, bat_priv,
  839. "Drop packet: incompatible batman version (%i)\n",
  840. batman_ogm_packet->header.version);
  841. return;
  842. }
  843. if (is_my_addr) {
  844. bat_dbg(DBG_BATMAN, bat_priv,
  845. "Drop packet: received my own broadcast (sender: %pM)\n",
  846. ethhdr->h_source);
  847. return;
  848. }
  849. if (is_broadcast) {
  850. bat_dbg(DBG_BATMAN, bat_priv,
  851. "Drop packet: ignoring all packets with broadcast source addr (sender: %pM)\n",
  852. ethhdr->h_source);
  853. return;
  854. }
  855. if (is_my_orig) {
  856. unsigned long *word;
  857. int offset;
  858. orig_neigh_node = get_orig_node(bat_priv, ethhdr->h_source);
  859. if (!orig_neigh_node)
  860. return;
  861. /* neighbor has to indicate direct link and it has to
  862. * come via the corresponding interface */
  863. /* save packet seqno for bidirectional check */
  864. if (has_directlink_flag &&
  865. compare_eth(if_incoming->net_dev->dev_addr,
  866. batman_ogm_packet->orig)) {
  867. offset = if_incoming->if_num * NUM_WORDS;
  868. spin_lock_bh(&orig_neigh_node->ogm_cnt_lock);
  869. word = &(orig_neigh_node->bcast_own[offset]);
  870. bat_set_bit(word,
  871. if_incoming_seqno -
  872. batman_ogm_packet->seqno - 2);
  873. orig_neigh_node->bcast_own_sum[if_incoming->if_num] =
  874. bitmap_weight(word, TQ_LOCAL_WINDOW_SIZE);
  875. spin_unlock_bh(&orig_neigh_node->ogm_cnt_lock);
  876. }
  877. bat_dbg(DBG_BATMAN, bat_priv,
  878. "Drop packet: originator packet from myself (via neighbor)\n");
  879. orig_node_free_ref(orig_neigh_node);
  880. return;
  881. }
  882. if (is_my_oldorig) {
  883. bat_dbg(DBG_BATMAN, bat_priv,
  884. "Drop packet: ignoring all rebroadcast echos (sender: %pM)\n",
  885. ethhdr->h_source);
  886. return;
  887. }
  888. if (batman_ogm_packet->flags & NOT_BEST_NEXT_HOP) {
  889. bat_dbg(DBG_BATMAN, bat_priv,
  890. "Drop packet: ignoring all packets not forwarded from the best next hop (sender: %pM)\n",
  891. ethhdr->h_source);
  892. return;
  893. }
  894. orig_node = get_orig_node(bat_priv, batman_ogm_packet->orig);
  895. if (!orig_node)
  896. return;
  897. is_duplicate = bat_iv_ogm_update_seqnos(ethhdr, batman_ogm_packet,
  898. if_incoming);
  899. if (is_duplicate == -1) {
  900. bat_dbg(DBG_BATMAN, bat_priv,
  901. "Drop packet: packet within seqno protection time (sender: %pM)\n",
  902. ethhdr->h_source);
  903. goto out;
  904. }
  905. if (batman_ogm_packet->tq == 0) {
  906. bat_dbg(DBG_BATMAN, bat_priv,
  907. "Drop packet: originator packet with tq equal 0\n");
  908. goto out;
  909. }
  910. router = orig_node_get_router(orig_node);
  911. if (router)
  912. router_router = orig_node_get_router(router->orig_node);
  913. if ((router && router->tq_avg != 0) &&
  914. (compare_eth(router->addr, ethhdr->h_source)))
  915. is_from_best_next_hop = true;
  916. /* avoid temporary routing loops */
  917. if (router && router_router &&
  918. (compare_eth(router->addr, batman_ogm_packet->prev_sender)) &&
  919. !(compare_eth(batman_ogm_packet->orig,
  920. batman_ogm_packet->prev_sender)) &&
  921. (compare_eth(router->addr, router_router->addr))) {
  922. bat_dbg(DBG_BATMAN, bat_priv,
  923. "Drop packet: ignoring all rebroadcast packets that may make me loop (sender: %pM)\n",
  924. ethhdr->h_source);
  925. goto out;
  926. }
  927. /* if sender is a direct neighbor the sender mac equals
  928. * originator mac */
  929. orig_neigh_node = (is_single_hop_neigh ?
  930. orig_node :
  931. get_orig_node(bat_priv, ethhdr->h_source));
  932. if (!orig_neigh_node)
  933. goto out;
  934. orig_neigh_router = orig_node_get_router(orig_neigh_node);
  935. /* drop packet if sender is not a direct neighbor and if we
  936. * don't route towards it */
  937. if (!is_single_hop_neigh && (!orig_neigh_router)) {
  938. bat_dbg(DBG_BATMAN, bat_priv,
  939. "Drop packet: OGM via unknown neighbor!\n");
  940. goto out_neigh;
  941. }
  942. is_bidirectional = bat_iv_ogm_calc_tq(orig_node, orig_neigh_node,
  943. batman_ogm_packet, if_incoming);
  944. bonding_save_primary(orig_node, orig_neigh_node, batman_ogm_packet);
  945. /* update ranking if it is not a duplicate or has the same
  946. * seqno and similar ttl as the non-duplicate */
  947. if (is_bidirectional &&
  948. (!is_duplicate ||
  949. ((orig_node->last_real_seqno == batman_ogm_packet->seqno) &&
  950. (orig_node->last_ttl - 3 <= batman_ogm_packet->header.ttl))))
  951. bat_iv_ogm_orig_update(bat_priv, orig_node, ethhdr,
  952. batman_ogm_packet, if_incoming,
  953. tt_buff, is_duplicate);
  954. /* is single hop (direct) neighbor */
  955. if (is_single_hop_neigh) {
  956. /* mark direct link on incoming interface */
  957. bat_iv_ogm_forward(orig_node, ethhdr, batman_ogm_packet,
  958. is_single_hop_neigh, is_from_best_next_hop,
  959. if_incoming);
  960. bat_dbg(DBG_BATMAN, bat_priv,
  961. "Forwarding packet: rebroadcast neighbor packet with direct link flag\n");
  962. goto out_neigh;
  963. }
  964. /* multihop originator */
  965. if (!is_bidirectional) {
  966. bat_dbg(DBG_BATMAN, bat_priv,
  967. "Drop packet: not received via bidirectional link\n");
  968. goto out_neigh;
  969. }
  970. if (is_duplicate) {
  971. bat_dbg(DBG_BATMAN, bat_priv,
  972. "Drop packet: duplicate packet received\n");
  973. goto out_neigh;
  974. }
  975. bat_dbg(DBG_BATMAN, bat_priv,
  976. "Forwarding packet: rebroadcast originator packet\n");
  977. bat_iv_ogm_forward(orig_node, ethhdr, batman_ogm_packet,
  978. is_single_hop_neigh, is_from_best_next_hop,
  979. if_incoming);
  980. out_neigh:
  981. if ((orig_neigh_node) && (!is_single_hop_neigh))
  982. orig_node_free_ref(orig_neigh_node);
  983. out:
  984. if (router)
  985. neigh_node_free_ref(router);
  986. if (router_router)
  987. neigh_node_free_ref(router_router);
  988. if (orig_neigh_router)
  989. neigh_node_free_ref(orig_neigh_router);
  990. orig_node_free_ref(orig_node);
  991. }
  992. static int bat_iv_ogm_receive(struct sk_buff *skb,
  993. struct hard_iface *if_incoming)
  994. {
  995. struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
  996. struct batman_ogm_packet *batman_ogm_packet;
  997. struct ethhdr *ethhdr;
  998. int buff_pos = 0, packet_len;
  999. unsigned char *tt_buff, *packet_buff;
  1000. bool ret;
  1001. ret = check_management_packet(skb, if_incoming, BATMAN_OGM_HLEN);
  1002. if (!ret)
  1003. return NET_RX_DROP;
  1004. /* did we receive a B.A.T.M.A.N. IV OGM packet on an interface
  1005. * that does not have B.A.T.M.A.N. IV enabled ?
  1006. */
  1007. if (bat_priv->bat_algo_ops->bat_ogm_emit != bat_iv_ogm_emit)
  1008. return NET_RX_DROP;
  1009. batadv_inc_counter(bat_priv, BAT_CNT_MGMT_RX);
  1010. batadv_add_counter(bat_priv, BAT_CNT_MGMT_RX_BYTES,
  1011. skb->len + ETH_HLEN);
  1012. packet_len = skb_headlen(skb);
  1013. ethhdr = (struct ethhdr *)skb_mac_header(skb);
  1014. packet_buff = skb->data;
  1015. batman_ogm_packet = (struct batman_ogm_packet *)packet_buff;
  1016. /* unpack the aggregated packets and process them one by one */
  1017. do {
  1018. /* network to host order for our 32bit seqno and the
  1019. orig_interval */
  1020. batman_ogm_packet->seqno = ntohl(batman_ogm_packet->seqno);
  1021. batman_ogm_packet->tt_crc = ntohs(batman_ogm_packet->tt_crc);
  1022. tt_buff = packet_buff + buff_pos + BATMAN_OGM_HLEN;
  1023. bat_iv_ogm_process(ethhdr, batman_ogm_packet,
  1024. tt_buff, if_incoming);
  1025. buff_pos += BATMAN_OGM_HLEN +
  1026. tt_len(batman_ogm_packet->tt_num_changes);
  1027. batman_ogm_packet = (struct batman_ogm_packet *)
  1028. (packet_buff + buff_pos);
  1029. } while (bat_iv_ogm_aggr_packet(buff_pos, packet_len,
  1030. batman_ogm_packet->tt_num_changes));
  1031. kfree_skb(skb);
  1032. return NET_RX_SUCCESS;
  1033. }
  1034. static struct bat_algo_ops batman_iv __read_mostly = {
  1035. .name = "BATMAN_IV",
  1036. .bat_iface_enable = bat_iv_ogm_iface_enable,
  1037. .bat_iface_disable = bat_iv_ogm_iface_disable,
  1038. .bat_iface_update_mac = bat_iv_ogm_iface_update_mac,
  1039. .bat_primary_iface_set = bat_iv_ogm_primary_iface_set,
  1040. .bat_ogm_schedule = bat_iv_ogm_schedule,
  1041. .bat_ogm_emit = bat_iv_ogm_emit,
  1042. };
  1043. int __init bat_iv_init(void)
  1044. {
  1045. int ret;
  1046. /* batman originator packet */
  1047. ret = recv_handler_register(BAT_IV_OGM, bat_iv_ogm_receive);
  1048. if (ret < 0)
  1049. goto out;
  1050. ret = bat_algo_register(&batman_iv);
  1051. if (ret < 0)
  1052. goto handler_unregister;
  1053. goto out;
  1054. handler_unregister:
  1055. recv_handler_unregister(BAT_IV_OGM);
  1056. out:
  1057. return ret;
  1058. }