routing.c 40 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481
  1. /*
  2. * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
  3. *
  4. * Marek Lindner, Simon Wunderlich
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of version 2 of the GNU General Public
  8. * License as published by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope that it will be useful, but
  11. * WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  13. * General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program; if not, write to the Free Software
  17. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
  18. * 02110-1301, USA
  19. *
  20. */
  21. #include "main.h"
  22. #include "routing.h"
  23. #include "send.h"
  24. #include "hash.h"
  25. #include "soft-interface.h"
  26. #include "hard-interface.h"
  27. #include "icmp_socket.h"
  28. #include "translation-table.h"
  29. #include "originator.h"
  30. #include "ring_buffer.h"
  31. #include "vis.h"
  32. #include "aggregation.h"
  33. #include "gateway_common.h"
  34. #include "gateway_client.h"
  35. #include "unicast.h"
  36. void slide_own_bcast_window(struct batman_if *batman_if)
  37. {
  38. struct bat_priv *bat_priv = netdev_priv(batman_if->soft_iface);
  39. struct hashtable_t *hash = bat_priv->orig_hash;
  40. struct hlist_node *walk;
  41. struct hlist_head *head;
  42. struct element_t *bucket;
  43. struct orig_node *orig_node;
  44. unsigned long *word;
  45. int i;
  46. size_t word_index;
  47. spin_lock_bh(&bat_priv->orig_hash_lock);
  48. for (i = 0; i < hash->size; i++) {
  49. head = &hash->table[i];
  50. rcu_read_lock();
  51. hlist_for_each_entry_rcu(bucket, walk, head, hlist) {
  52. orig_node = bucket->data;
  53. spin_lock_bh(&orig_node->ogm_cnt_lock);
  54. word_index = batman_if->if_num * NUM_WORDS;
  55. word = &(orig_node->bcast_own[word_index]);
  56. bit_get_packet(bat_priv, word, 1, 0);
  57. orig_node->bcast_own_sum[batman_if->if_num] =
  58. bit_packet_count(word);
  59. spin_unlock_bh(&orig_node->ogm_cnt_lock);
  60. }
  61. rcu_read_unlock();
  62. }
  63. spin_unlock_bh(&bat_priv->orig_hash_lock);
  64. }
  65. static void update_HNA(struct bat_priv *bat_priv, struct orig_node *orig_node,
  66. unsigned char *hna_buff, int hna_buff_len)
  67. {
  68. if ((hna_buff_len != orig_node->hna_buff_len) ||
  69. ((hna_buff_len > 0) &&
  70. (orig_node->hna_buff_len > 0) &&
  71. (memcmp(orig_node->hna_buff, hna_buff, hna_buff_len) != 0))) {
  72. if (orig_node->hna_buff_len > 0)
  73. hna_global_del_orig(bat_priv, orig_node,
  74. "originator changed hna");
  75. if ((hna_buff_len > 0) && (hna_buff))
  76. hna_global_add_orig(bat_priv, orig_node,
  77. hna_buff, hna_buff_len);
  78. }
  79. }
  80. static void update_route(struct bat_priv *bat_priv,
  81. struct orig_node *orig_node,
  82. struct neigh_node *neigh_node,
  83. unsigned char *hna_buff, int hna_buff_len)
  84. {
  85. struct neigh_node *neigh_node_tmp;
  86. /* route deleted */
  87. if ((orig_node->router) && (!neigh_node)) {
  88. bat_dbg(DBG_ROUTES, bat_priv, "Deleting route towards: %pM\n",
  89. orig_node->orig);
  90. hna_global_del_orig(bat_priv, orig_node,
  91. "originator timed out");
  92. /* route added */
  93. } else if ((!orig_node->router) && (neigh_node)) {
  94. bat_dbg(DBG_ROUTES, bat_priv,
  95. "Adding route towards: %pM (via %pM)\n",
  96. orig_node->orig, neigh_node->addr);
  97. hna_global_add_orig(bat_priv, orig_node,
  98. hna_buff, hna_buff_len);
  99. /* route changed */
  100. } else {
  101. bat_dbg(DBG_ROUTES, bat_priv,
  102. "Changing route towards: %pM "
  103. "(now via %pM - was via %pM)\n",
  104. orig_node->orig, neigh_node->addr,
  105. orig_node->router->addr);
  106. }
  107. if (neigh_node)
  108. kref_get(&neigh_node->refcount);
  109. neigh_node_tmp = orig_node->router;
  110. orig_node->router = neigh_node;
  111. if (neigh_node_tmp)
  112. kref_put(&neigh_node_tmp->refcount, neigh_node_free_ref);
  113. }
  114. void update_routes(struct bat_priv *bat_priv, struct orig_node *orig_node,
  115. struct neigh_node *neigh_node, unsigned char *hna_buff,
  116. int hna_buff_len)
  117. {
  118. if (!orig_node)
  119. return;
  120. if (orig_node->router != neigh_node)
  121. update_route(bat_priv, orig_node, neigh_node,
  122. hna_buff, hna_buff_len);
  123. /* may be just HNA changed */
  124. else
  125. update_HNA(bat_priv, orig_node, hna_buff, hna_buff_len);
  126. }
  127. static int is_bidirectional_neigh(struct orig_node *orig_node,
  128. struct orig_node *orig_neigh_node,
  129. struct batman_packet *batman_packet,
  130. struct batman_if *if_incoming)
  131. {
  132. struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
  133. struct neigh_node *neigh_node = NULL, *tmp_neigh_node = NULL;
  134. struct hlist_node *node;
  135. unsigned char total_count;
  136. int ret = 0;
  137. if (orig_node == orig_neigh_node) {
  138. rcu_read_lock();
  139. hlist_for_each_entry_rcu(tmp_neigh_node, node,
  140. &orig_node->neigh_list, list) {
  141. if (compare_orig(tmp_neigh_node->addr,
  142. orig_neigh_node->orig) &&
  143. (tmp_neigh_node->if_incoming == if_incoming))
  144. neigh_node = tmp_neigh_node;
  145. }
  146. if (!neigh_node)
  147. neigh_node = create_neighbor(orig_node,
  148. orig_neigh_node,
  149. orig_neigh_node->orig,
  150. if_incoming);
  151. /* create_neighbor failed, return 0 */
  152. if (!neigh_node)
  153. goto unlock;
  154. kref_get(&neigh_node->refcount);
  155. rcu_read_unlock();
  156. neigh_node->last_valid = jiffies;
  157. } else {
  158. /* find packet count of corresponding one hop neighbor */
  159. rcu_read_lock();
  160. hlist_for_each_entry_rcu(tmp_neigh_node, node,
  161. &orig_neigh_node->neigh_list, list) {
  162. if (compare_orig(tmp_neigh_node->addr,
  163. orig_neigh_node->orig) &&
  164. (tmp_neigh_node->if_incoming == if_incoming))
  165. neigh_node = tmp_neigh_node;
  166. }
  167. if (!neigh_node)
  168. neigh_node = create_neighbor(orig_neigh_node,
  169. orig_neigh_node,
  170. orig_neigh_node->orig,
  171. if_incoming);
  172. /* create_neighbor failed, return 0 */
  173. if (!neigh_node)
  174. goto unlock;
  175. kref_get(&neigh_node->refcount);
  176. rcu_read_unlock();
  177. }
  178. orig_node->last_valid = jiffies;
  179. /* pay attention to not get a value bigger than 100 % */
  180. total_count = (orig_neigh_node->bcast_own_sum[if_incoming->if_num] >
  181. neigh_node->real_packet_count ?
  182. neigh_node->real_packet_count :
  183. orig_neigh_node->bcast_own_sum[if_incoming->if_num]);
  184. /* if we have too few packets (too less data) we set tq_own to zero */
  185. /* if we receive too few packets it is not considered bidirectional */
  186. if ((total_count < TQ_LOCAL_BIDRECT_SEND_MINIMUM) ||
  187. (neigh_node->real_packet_count < TQ_LOCAL_BIDRECT_RECV_MINIMUM))
  188. orig_neigh_node->tq_own = 0;
  189. else
  190. /* neigh_node->real_packet_count is never zero as we
  191. * only purge old information when getting new
  192. * information */
  193. orig_neigh_node->tq_own = (TQ_MAX_VALUE * total_count) /
  194. neigh_node->real_packet_count;
  195. /*
  196. * 1 - ((1-x) ** 3), normalized to TQ_MAX_VALUE this does
  197. * affect the nearly-symmetric links only a little, but
  198. * punishes asymmetric links more. This will give a value
  199. * between 0 and TQ_MAX_VALUE
  200. */
  201. orig_neigh_node->tq_asym_penalty =
  202. TQ_MAX_VALUE -
  203. (TQ_MAX_VALUE *
  204. (TQ_LOCAL_WINDOW_SIZE - neigh_node->real_packet_count) *
  205. (TQ_LOCAL_WINDOW_SIZE - neigh_node->real_packet_count) *
  206. (TQ_LOCAL_WINDOW_SIZE - neigh_node->real_packet_count)) /
  207. (TQ_LOCAL_WINDOW_SIZE *
  208. TQ_LOCAL_WINDOW_SIZE *
  209. TQ_LOCAL_WINDOW_SIZE);
  210. batman_packet->tq = ((batman_packet->tq *
  211. orig_neigh_node->tq_own *
  212. orig_neigh_node->tq_asym_penalty) /
  213. (TQ_MAX_VALUE * TQ_MAX_VALUE));
  214. bat_dbg(DBG_BATMAN, bat_priv,
  215. "bidirectional: "
  216. "orig = %-15pM neigh = %-15pM => own_bcast = %2i, "
  217. "real recv = %2i, local tq: %3i, asym_penalty: %3i, "
  218. "total tq: %3i\n",
  219. orig_node->orig, orig_neigh_node->orig, total_count,
  220. neigh_node->real_packet_count, orig_neigh_node->tq_own,
  221. orig_neigh_node->tq_asym_penalty, batman_packet->tq);
  222. /* if link has the minimum required transmission quality
  223. * consider it bidirectional */
  224. if (batman_packet->tq >= TQ_TOTAL_BIDRECT_LIMIT)
  225. ret = 1;
  226. goto out;
  227. unlock:
  228. rcu_read_unlock();
  229. out:
  230. if (neigh_node)
  231. kref_put(&neigh_node->refcount, neigh_node_free_ref);
  232. return ret;
  233. }
  234. static void update_orig(struct bat_priv *bat_priv,
  235. struct orig_node *orig_node,
  236. struct ethhdr *ethhdr,
  237. struct batman_packet *batman_packet,
  238. struct batman_if *if_incoming,
  239. unsigned char *hna_buff, int hna_buff_len,
  240. char is_duplicate)
  241. {
  242. struct neigh_node *neigh_node = NULL, *tmp_neigh_node = NULL;
  243. struct orig_node *orig_node_tmp;
  244. struct hlist_node *node;
  245. int tmp_hna_buff_len;
  246. uint8_t bcast_own_sum_orig, bcast_own_sum_neigh;
  247. bat_dbg(DBG_BATMAN, bat_priv, "update_originator(): "
  248. "Searching and updating originator entry of received packet\n");
  249. rcu_read_lock();
  250. hlist_for_each_entry_rcu(tmp_neigh_node, node,
  251. &orig_node->neigh_list, list) {
  252. if (compare_orig(tmp_neigh_node->addr, ethhdr->h_source) &&
  253. (tmp_neigh_node->if_incoming == if_incoming)) {
  254. neigh_node = tmp_neigh_node;
  255. continue;
  256. }
  257. if (is_duplicate)
  258. continue;
  259. ring_buffer_set(tmp_neigh_node->tq_recv,
  260. &tmp_neigh_node->tq_index, 0);
  261. tmp_neigh_node->tq_avg =
  262. ring_buffer_avg(tmp_neigh_node->tq_recv);
  263. }
  264. if (!neigh_node) {
  265. struct orig_node *orig_tmp;
  266. orig_tmp = get_orig_node(bat_priv, ethhdr->h_source);
  267. if (!orig_tmp)
  268. goto unlock;
  269. neigh_node = create_neighbor(orig_node, orig_tmp,
  270. ethhdr->h_source, if_incoming);
  271. kref_put(&orig_tmp->refcount, orig_node_free_ref);
  272. if (!neigh_node)
  273. goto unlock;
  274. } else
  275. bat_dbg(DBG_BATMAN, bat_priv,
  276. "Updating existing last-hop neighbor of originator\n");
  277. kref_get(&neigh_node->refcount);
  278. rcu_read_unlock();
  279. orig_node->flags = batman_packet->flags;
  280. neigh_node->last_valid = jiffies;
  281. ring_buffer_set(neigh_node->tq_recv,
  282. &neigh_node->tq_index,
  283. batman_packet->tq);
  284. neigh_node->tq_avg = ring_buffer_avg(neigh_node->tq_recv);
  285. if (!is_duplicate) {
  286. orig_node->last_ttl = batman_packet->ttl;
  287. neigh_node->last_ttl = batman_packet->ttl;
  288. }
  289. tmp_hna_buff_len = (hna_buff_len > batman_packet->num_hna * ETH_ALEN ?
  290. batman_packet->num_hna * ETH_ALEN : hna_buff_len);
  291. /* if this neighbor already is our next hop there is nothing
  292. * to change */
  293. if (orig_node->router == neigh_node)
  294. goto update_hna;
  295. /* if this neighbor does not offer a better TQ we won't consider it */
  296. if ((orig_node->router) &&
  297. (orig_node->router->tq_avg > neigh_node->tq_avg))
  298. goto update_hna;
  299. /* if the TQ is the same and the link not more symetric we
  300. * won't consider it either */
  301. if ((orig_node->router) &&
  302. (neigh_node->tq_avg == orig_node->router->tq_avg)) {
  303. orig_node_tmp = orig_node->router->orig_node;
  304. spin_lock_bh(&orig_node_tmp->ogm_cnt_lock);
  305. bcast_own_sum_orig =
  306. orig_node_tmp->bcast_own_sum[if_incoming->if_num];
  307. spin_unlock_bh(&orig_node_tmp->ogm_cnt_lock);
  308. orig_node_tmp = neigh_node->orig_node;
  309. spin_lock_bh(&orig_node_tmp->ogm_cnt_lock);
  310. bcast_own_sum_neigh =
  311. orig_node_tmp->bcast_own_sum[if_incoming->if_num];
  312. spin_unlock_bh(&orig_node_tmp->ogm_cnt_lock);
  313. if (bcast_own_sum_orig >= bcast_own_sum_neigh)
  314. goto update_hna;
  315. }
  316. update_routes(bat_priv, orig_node, neigh_node,
  317. hna_buff, tmp_hna_buff_len);
  318. goto update_gw;
  319. update_hna:
  320. update_routes(bat_priv, orig_node, orig_node->router,
  321. hna_buff, tmp_hna_buff_len);
  322. update_gw:
  323. if (orig_node->gw_flags != batman_packet->gw_flags)
  324. gw_node_update(bat_priv, orig_node, batman_packet->gw_flags);
  325. orig_node->gw_flags = batman_packet->gw_flags;
  326. /* restart gateway selection if fast or late switching was enabled */
  327. if ((orig_node->gw_flags) &&
  328. (atomic_read(&bat_priv->gw_mode) == GW_MODE_CLIENT) &&
  329. (atomic_read(&bat_priv->gw_sel_class) > 2))
  330. gw_check_election(bat_priv, orig_node);
  331. goto out;
  332. unlock:
  333. rcu_read_unlock();
  334. out:
  335. if (neigh_node)
  336. kref_put(&neigh_node->refcount, neigh_node_free_ref);
  337. }
  338. /* checks whether the host restarted and is in the protection time.
  339. * returns:
  340. * 0 if the packet is to be accepted
  341. * 1 if the packet is to be ignored.
  342. */
  343. static int window_protected(struct bat_priv *bat_priv,
  344. int32_t seq_num_diff,
  345. unsigned long *last_reset)
  346. {
  347. if ((seq_num_diff <= -TQ_LOCAL_WINDOW_SIZE)
  348. || (seq_num_diff >= EXPECTED_SEQNO_RANGE)) {
  349. if (time_after(jiffies, *last_reset +
  350. msecs_to_jiffies(RESET_PROTECTION_MS))) {
  351. *last_reset = jiffies;
  352. bat_dbg(DBG_BATMAN, bat_priv,
  353. "old packet received, start protection\n");
  354. return 0;
  355. } else
  356. return 1;
  357. }
  358. return 0;
  359. }
  360. /* processes a batman packet for all interfaces, adjusts the sequence number and
  361. * finds out whether it is a duplicate.
  362. * returns:
  363. * 1 the packet is a duplicate
  364. * 0 the packet has not yet been received
  365. * -1 the packet is old and has been received while the seqno window
  366. * was protected. Caller should drop it.
  367. */
  368. static char count_real_packets(struct ethhdr *ethhdr,
  369. struct batman_packet *batman_packet,
  370. struct batman_if *if_incoming)
  371. {
  372. struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
  373. struct orig_node *orig_node;
  374. struct neigh_node *tmp_neigh_node;
  375. struct hlist_node *node;
  376. char is_duplicate = 0;
  377. int32_t seq_diff;
  378. int need_update = 0;
  379. int set_mark;
  380. orig_node = get_orig_node(bat_priv, batman_packet->orig);
  381. if (!orig_node)
  382. return 0;
  383. seq_diff = batman_packet->seqno - orig_node->last_real_seqno;
  384. /* signalize caller that the packet is to be dropped. */
  385. if (window_protected(bat_priv, seq_diff,
  386. &orig_node->batman_seqno_reset))
  387. goto err;
  388. rcu_read_lock();
  389. hlist_for_each_entry_rcu(tmp_neigh_node, node,
  390. &orig_node->neigh_list, list) {
  391. is_duplicate |= get_bit_status(tmp_neigh_node->real_bits,
  392. orig_node->last_real_seqno,
  393. batman_packet->seqno);
  394. if (compare_orig(tmp_neigh_node->addr, ethhdr->h_source) &&
  395. (tmp_neigh_node->if_incoming == if_incoming))
  396. set_mark = 1;
  397. else
  398. set_mark = 0;
  399. /* if the window moved, set the update flag. */
  400. need_update |= bit_get_packet(bat_priv,
  401. tmp_neigh_node->real_bits,
  402. seq_diff, set_mark);
  403. tmp_neigh_node->real_packet_count =
  404. bit_packet_count(tmp_neigh_node->real_bits);
  405. }
  406. rcu_read_unlock();
  407. if (need_update) {
  408. bat_dbg(DBG_BATMAN, bat_priv,
  409. "updating last_seqno: old %d, new %d\n",
  410. orig_node->last_real_seqno, batman_packet->seqno);
  411. orig_node->last_real_seqno = batman_packet->seqno;
  412. }
  413. kref_put(&orig_node->refcount, orig_node_free_ref);
  414. return is_duplicate;
  415. err:
  416. kref_put(&orig_node->refcount, orig_node_free_ref);
  417. return -1;
  418. }
  419. /* copy primary address for bonding */
  420. static void mark_bonding_address(struct orig_node *orig_node,
  421. struct orig_node *orig_neigh_node,
  422. struct batman_packet *batman_packet)
  423. {
  424. if (batman_packet->flags & PRIMARIES_FIRST_HOP)
  425. memcpy(orig_neigh_node->primary_addr,
  426. orig_node->orig, ETH_ALEN);
  427. return;
  428. }
  429. /* mark possible bond.candidates in the neighbor list */
  430. void update_bonding_candidates(struct orig_node *orig_node)
  431. {
  432. int candidates;
  433. int interference_candidate;
  434. int best_tq;
  435. struct hlist_node *node, *node2;
  436. struct neigh_node *tmp_neigh_node, *tmp_neigh_node2;
  437. struct neigh_node *first_candidate, *last_candidate;
  438. /* update the candidates for this originator */
  439. if (!orig_node->router) {
  440. orig_node->bond.candidates = 0;
  441. return;
  442. }
  443. best_tq = orig_node->router->tq_avg;
  444. /* update bond.candidates */
  445. candidates = 0;
  446. /* mark other nodes which also received "PRIMARIES FIRST HOP" packets
  447. * as "bonding partner" */
  448. /* first, zero the list */
  449. rcu_read_lock();
  450. hlist_for_each_entry_rcu(tmp_neigh_node, node,
  451. &orig_node->neigh_list, list) {
  452. tmp_neigh_node->next_bond_candidate = NULL;
  453. }
  454. rcu_read_unlock();
  455. first_candidate = NULL;
  456. last_candidate = NULL;
  457. rcu_read_lock();
  458. hlist_for_each_entry_rcu(tmp_neigh_node, node,
  459. &orig_node->neigh_list, list) {
  460. /* only consider if it has the same primary address ... */
  461. if (memcmp(orig_node->orig,
  462. tmp_neigh_node->orig_node->primary_addr,
  463. ETH_ALEN) != 0)
  464. continue;
  465. /* ... and is good enough to be considered */
  466. if (tmp_neigh_node->tq_avg < best_tq - BONDING_TQ_THRESHOLD)
  467. continue;
  468. /* check if we have another candidate with the same
  469. * mac address or interface. If we do, we won't
  470. * select this candidate because of possible interference. */
  471. interference_candidate = 0;
  472. hlist_for_each_entry_rcu(tmp_neigh_node2, node2,
  473. &orig_node->neigh_list, list) {
  474. if (tmp_neigh_node2 == tmp_neigh_node)
  475. continue;
  476. /* we only care if the other candidate is even
  477. * considered as candidate. */
  478. if (!tmp_neigh_node2->next_bond_candidate)
  479. continue;
  480. if ((tmp_neigh_node->if_incoming ==
  481. tmp_neigh_node2->if_incoming)
  482. || (memcmp(tmp_neigh_node->addr,
  483. tmp_neigh_node2->addr, ETH_ALEN) == 0)) {
  484. interference_candidate = 1;
  485. break;
  486. }
  487. }
  488. /* don't care further if it is an interference candidate */
  489. if (interference_candidate)
  490. continue;
  491. if (!first_candidate) {
  492. first_candidate = tmp_neigh_node;
  493. tmp_neigh_node->next_bond_candidate = first_candidate;
  494. } else
  495. tmp_neigh_node->next_bond_candidate = last_candidate;
  496. last_candidate = tmp_neigh_node;
  497. candidates++;
  498. }
  499. rcu_read_unlock();
  500. if (candidates > 0) {
  501. first_candidate->next_bond_candidate = last_candidate;
  502. orig_node->bond.selected = first_candidate;
  503. }
  504. orig_node->bond.candidates = candidates;
  505. }
  506. void receive_bat_packet(struct ethhdr *ethhdr,
  507. struct batman_packet *batman_packet,
  508. unsigned char *hna_buff, int hna_buff_len,
  509. struct batman_if *if_incoming)
  510. {
  511. struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
  512. struct batman_if *batman_if;
  513. struct orig_node *orig_neigh_node, *orig_node;
  514. char has_directlink_flag;
  515. char is_my_addr = 0, is_my_orig = 0, is_my_oldorig = 0;
  516. char is_broadcast = 0, is_bidirectional, is_single_hop_neigh;
  517. char is_duplicate;
  518. uint32_t if_incoming_seqno;
  519. /* Silently drop when the batman packet is actually not a
  520. * correct packet.
  521. *
  522. * This might happen if a packet is padded (e.g. Ethernet has a
  523. * minimum frame length of 64 byte) and the aggregation interprets
  524. * it as an additional length.
  525. *
  526. * TODO: A more sane solution would be to have a bit in the
  527. * batman_packet to detect whether the packet is the last
  528. * packet in an aggregation. Here we expect that the padding
  529. * is always zero (or not 0x01)
  530. */
  531. if (batman_packet->packet_type != BAT_PACKET)
  532. return;
  533. /* could be changed by schedule_own_packet() */
  534. if_incoming_seqno = atomic_read(&if_incoming->seqno);
  535. has_directlink_flag = (batman_packet->flags & DIRECTLINK ? 1 : 0);
  536. is_single_hop_neigh = (compare_orig(ethhdr->h_source,
  537. batman_packet->orig) ? 1 : 0);
  538. bat_dbg(DBG_BATMAN, bat_priv,
  539. "Received BATMAN packet via NB: %pM, IF: %s [%pM] "
  540. "(from OG: %pM, via prev OG: %pM, seqno %d, tq %d, "
  541. "TTL %d, V %d, IDF %d)\n",
  542. ethhdr->h_source, if_incoming->net_dev->name,
  543. if_incoming->net_dev->dev_addr, batman_packet->orig,
  544. batman_packet->prev_sender, batman_packet->seqno,
  545. batman_packet->tq, batman_packet->ttl, batman_packet->version,
  546. has_directlink_flag);
  547. rcu_read_lock();
  548. list_for_each_entry_rcu(batman_if, &if_list, list) {
  549. if (batman_if->if_status != IF_ACTIVE)
  550. continue;
  551. if (batman_if->soft_iface != if_incoming->soft_iface)
  552. continue;
  553. if (compare_orig(ethhdr->h_source,
  554. batman_if->net_dev->dev_addr))
  555. is_my_addr = 1;
  556. if (compare_orig(batman_packet->orig,
  557. batman_if->net_dev->dev_addr))
  558. is_my_orig = 1;
  559. if (compare_orig(batman_packet->prev_sender,
  560. batman_if->net_dev->dev_addr))
  561. is_my_oldorig = 1;
  562. if (compare_orig(ethhdr->h_source, broadcast_addr))
  563. is_broadcast = 1;
  564. }
  565. rcu_read_unlock();
  566. if (batman_packet->version != COMPAT_VERSION) {
  567. bat_dbg(DBG_BATMAN, bat_priv,
  568. "Drop packet: incompatible batman version (%i)\n",
  569. batman_packet->version);
  570. return;
  571. }
  572. if (is_my_addr) {
  573. bat_dbg(DBG_BATMAN, bat_priv,
  574. "Drop packet: received my own broadcast (sender: %pM"
  575. ")\n",
  576. ethhdr->h_source);
  577. return;
  578. }
  579. if (is_broadcast) {
  580. bat_dbg(DBG_BATMAN, bat_priv, "Drop packet: "
  581. "ignoring all packets with broadcast source addr (sender: %pM"
  582. ")\n", ethhdr->h_source);
  583. return;
  584. }
  585. if (is_my_orig) {
  586. unsigned long *word;
  587. int offset;
  588. orig_neigh_node = get_orig_node(bat_priv, ethhdr->h_source);
  589. if (!orig_neigh_node)
  590. return;
  591. /* neighbor has to indicate direct link and it has to
  592. * come via the corresponding interface */
  593. /* if received seqno equals last send seqno save new
  594. * seqno for bidirectional check */
  595. if (has_directlink_flag &&
  596. compare_orig(if_incoming->net_dev->dev_addr,
  597. batman_packet->orig) &&
  598. (batman_packet->seqno - if_incoming_seqno + 2 == 0)) {
  599. offset = if_incoming->if_num * NUM_WORDS;
  600. spin_lock_bh(&orig_neigh_node->ogm_cnt_lock);
  601. word = &(orig_neigh_node->bcast_own[offset]);
  602. bit_mark(word, 0);
  603. orig_neigh_node->bcast_own_sum[if_incoming->if_num] =
  604. bit_packet_count(word);
  605. spin_unlock_bh(&orig_neigh_node->ogm_cnt_lock);
  606. }
  607. bat_dbg(DBG_BATMAN, bat_priv, "Drop packet: "
  608. "originator packet from myself (via neighbor)\n");
  609. kref_put(&orig_neigh_node->refcount, orig_node_free_ref);
  610. return;
  611. }
  612. if (is_my_oldorig) {
  613. bat_dbg(DBG_BATMAN, bat_priv,
  614. "Drop packet: ignoring all rebroadcast echos (sender: "
  615. "%pM)\n", ethhdr->h_source);
  616. return;
  617. }
  618. orig_node = get_orig_node(bat_priv, batman_packet->orig);
  619. if (!orig_node)
  620. return;
  621. is_duplicate = count_real_packets(ethhdr, batman_packet, if_incoming);
  622. if (is_duplicate == -1) {
  623. bat_dbg(DBG_BATMAN, bat_priv,
  624. "Drop packet: packet within seqno protection time "
  625. "(sender: %pM)\n", ethhdr->h_source);
  626. goto out;
  627. }
  628. if (batman_packet->tq == 0) {
  629. bat_dbg(DBG_BATMAN, bat_priv,
  630. "Drop packet: originator packet with tq equal 0\n");
  631. goto out;
  632. }
  633. /* avoid temporary routing loops */
  634. if ((orig_node->router) &&
  635. (orig_node->router->orig_node->router) &&
  636. (compare_orig(orig_node->router->addr,
  637. batman_packet->prev_sender)) &&
  638. !(compare_orig(batman_packet->orig, batman_packet->prev_sender)) &&
  639. (compare_orig(orig_node->router->addr,
  640. orig_node->router->orig_node->router->addr))) {
  641. bat_dbg(DBG_BATMAN, bat_priv,
  642. "Drop packet: ignoring all rebroadcast packets that "
  643. "may make me loop (sender: %pM)\n", ethhdr->h_source);
  644. goto out;
  645. }
  646. /* if sender is a direct neighbor the sender mac equals
  647. * originator mac */
  648. orig_neigh_node = (is_single_hop_neigh ?
  649. orig_node :
  650. get_orig_node(bat_priv, ethhdr->h_source));
  651. if (!orig_neigh_node)
  652. goto out_neigh;
  653. /* drop packet if sender is not a direct neighbor and if we
  654. * don't route towards it */
  655. if (!is_single_hop_neigh && (!orig_neigh_node->router)) {
  656. bat_dbg(DBG_BATMAN, bat_priv,
  657. "Drop packet: OGM via unknown neighbor!\n");
  658. goto out_neigh;
  659. }
  660. is_bidirectional = is_bidirectional_neigh(orig_node, orig_neigh_node,
  661. batman_packet, if_incoming);
  662. /* update ranking if it is not a duplicate or has the same
  663. * seqno and similar ttl as the non-duplicate */
  664. if (is_bidirectional &&
  665. (!is_duplicate ||
  666. ((orig_node->last_real_seqno == batman_packet->seqno) &&
  667. (orig_node->last_ttl - 3 <= batman_packet->ttl))))
  668. update_orig(bat_priv, orig_node, ethhdr, batman_packet,
  669. if_incoming, hna_buff, hna_buff_len, is_duplicate);
  670. mark_bonding_address(orig_node, orig_neigh_node, batman_packet);
  671. update_bonding_candidates(orig_node);
  672. /* is single hop (direct) neighbor */
  673. if (is_single_hop_neigh) {
  674. /* mark direct link on incoming interface */
  675. schedule_forward_packet(orig_node, ethhdr, batman_packet,
  676. 1, hna_buff_len, if_incoming);
  677. bat_dbg(DBG_BATMAN, bat_priv, "Forwarding packet: "
  678. "rebroadcast neighbor packet with direct link flag\n");
  679. goto out_neigh;
  680. }
  681. /* multihop originator */
  682. if (!is_bidirectional) {
  683. bat_dbg(DBG_BATMAN, bat_priv,
  684. "Drop packet: not received via bidirectional link\n");
  685. goto out_neigh;
  686. }
  687. if (is_duplicate) {
  688. bat_dbg(DBG_BATMAN, bat_priv,
  689. "Drop packet: duplicate packet received\n");
  690. goto out_neigh;
  691. }
  692. bat_dbg(DBG_BATMAN, bat_priv,
  693. "Forwarding packet: rebroadcast originator packet\n");
  694. schedule_forward_packet(orig_node, ethhdr, batman_packet,
  695. 0, hna_buff_len, if_incoming);
  696. out_neigh:
  697. if (!is_single_hop_neigh)
  698. kref_put(&orig_neigh_node->refcount, orig_node_free_ref);
  699. out:
  700. kref_put(&orig_node->refcount, orig_node_free_ref);
  701. }
  702. int recv_bat_packet(struct sk_buff *skb, struct batman_if *batman_if)
  703. {
  704. struct bat_priv *bat_priv = netdev_priv(batman_if->soft_iface);
  705. struct ethhdr *ethhdr;
  706. /* drop packet if it has not necessary minimum size */
  707. if (unlikely(!pskb_may_pull(skb, sizeof(struct batman_packet))))
  708. return NET_RX_DROP;
  709. ethhdr = (struct ethhdr *)skb_mac_header(skb);
  710. /* packet with broadcast indication but unicast recipient */
  711. if (!is_broadcast_ether_addr(ethhdr->h_dest))
  712. return NET_RX_DROP;
  713. /* packet with broadcast sender address */
  714. if (is_broadcast_ether_addr(ethhdr->h_source))
  715. return NET_RX_DROP;
  716. /* create a copy of the skb, if needed, to modify it. */
  717. if (skb_cow(skb, 0) < 0)
  718. return NET_RX_DROP;
  719. /* keep skb linear */
  720. if (skb_linearize(skb) < 0)
  721. return NET_RX_DROP;
  722. ethhdr = (struct ethhdr *)skb_mac_header(skb);
  723. spin_lock_bh(&bat_priv->orig_hash_lock);
  724. receive_aggr_bat_packet(ethhdr,
  725. skb->data,
  726. skb_headlen(skb),
  727. batman_if);
  728. spin_unlock_bh(&bat_priv->orig_hash_lock);
  729. kfree_skb(skb);
  730. return NET_RX_SUCCESS;
  731. }
  732. static int recv_my_icmp_packet(struct bat_priv *bat_priv,
  733. struct sk_buff *skb, size_t icmp_len)
  734. {
  735. struct orig_node *orig_node;
  736. struct icmp_packet_rr *icmp_packet;
  737. struct batman_if *batman_if;
  738. int ret;
  739. uint8_t dstaddr[ETH_ALEN];
  740. icmp_packet = (struct icmp_packet_rr *)skb->data;
  741. /* add data to device queue */
  742. if (icmp_packet->msg_type != ECHO_REQUEST) {
  743. bat_socket_receive_packet(icmp_packet, icmp_len);
  744. return NET_RX_DROP;
  745. }
  746. if (!bat_priv->primary_if)
  747. return NET_RX_DROP;
  748. /* answer echo request (ping) */
  749. /* get routing information */
  750. spin_lock_bh(&bat_priv->orig_hash_lock);
  751. rcu_read_lock();
  752. orig_node = ((struct orig_node *)hash_find(bat_priv->orig_hash,
  753. compare_orig, choose_orig,
  754. icmp_packet->orig));
  755. rcu_read_unlock();
  756. ret = NET_RX_DROP;
  757. if ((orig_node) && (orig_node->router)) {
  758. /* don't lock while sending the packets ... we therefore
  759. * copy the required data before sending */
  760. batman_if = orig_node->router->if_incoming;
  761. memcpy(dstaddr, orig_node->router->addr, ETH_ALEN);
  762. spin_unlock_bh(&bat_priv->orig_hash_lock);
  763. /* create a copy of the skb, if needed, to modify it. */
  764. if (skb_cow(skb, sizeof(struct ethhdr)) < 0)
  765. return NET_RX_DROP;
  766. icmp_packet = (struct icmp_packet_rr *)skb->data;
  767. memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN);
  768. memcpy(icmp_packet->orig,
  769. bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN);
  770. icmp_packet->msg_type = ECHO_REPLY;
  771. icmp_packet->ttl = TTL;
  772. send_skb_packet(skb, batman_if, dstaddr);
  773. ret = NET_RX_SUCCESS;
  774. } else
  775. spin_unlock_bh(&bat_priv->orig_hash_lock);
  776. return ret;
  777. }
  778. static int recv_icmp_ttl_exceeded(struct bat_priv *bat_priv,
  779. struct sk_buff *skb)
  780. {
  781. struct orig_node *orig_node;
  782. struct icmp_packet *icmp_packet;
  783. struct batman_if *batman_if;
  784. int ret;
  785. uint8_t dstaddr[ETH_ALEN];
  786. icmp_packet = (struct icmp_packet *)skb->data;
  787. /* send TTL exceeded if packet is an echo request (traceroute) */
  788. if (icmp_packet->msg_type != ECHO_REQUEST) {
  789. pr_debug("Warning - can't forward icmp packet from %pM to "
  790. "%pM: ttl exceeded\n", icmp_packet->orig,
  791. icmp_packet->dst);
  792. return NET_RX_DROP;
  793. }
  794. if (!bat_priv->primary_if)
  795. return NET_RX_DROP;
  796. /* get routing information */
  797. spin_lock_bh(&bat_priv->orig_hash_lock);
  798. rcu_read_lock();
  799. orig_node = ((struct orig_node *)
  800. hash_find(bat_priv->orig_hash, compare_orig, choose_orig,
  801. icmp_packet->orig));
  802. rcu_read_unlock();
  803. ret = NET_RX_DROP;
  804. if ((orig_node) && (orig_node->router)) {
  805. /* don't lock while sending the packets ... we therefore
  806. * copy the required data before sending */
  807. batman_if = orig_node->router->if_incoming;
  808. memcpy(dstaddr, orig_node->router->addr, ETH_ALEN);
  809. spin_unlock_bh(&bat_priv->orig_hash_lock);
  810. /* create a copy of the skb, if needed, to modify it. */
  811. if (skb_cow(skb, sizeof(struct ethhdr)) < 0)
  812. return NET_RX_DROP;
  813. icmp_packet = (struct icmp_packet *) skb->data;
  814. memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN);
  815. memcpy(icmp_packet->orig,
  816. bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN);
  817. icmp_packet->msg_type = TTL_EXCEEDED;
  818. icmp_packet->ttl = TTL;
  819. send_skb_packet(skb, batman_if, dstaddr);
  820. ret = NET_RX_SUCCESS;
  821. } else
  822. spin_unlock_bh(&bat_priv->orig_hash_lock);
  823. return ret;
  824. }
  825. int recv_icmp_packet(struct sk_buff *skb, struct batman_if *recv_if)
  826. {
  827. struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
  828. struct icmp_packet_rr *icmp_packet;
  829. struct ethhdr *ethhdr;
  830. struct orig_node *orig_node;
  831. struct batman_if *batman_if;
  832. int hdr_size = sizeof(struct icmp_packet);
  833. int ret;
  834. uint8_t dstaddr[ETH_ALEN];
  835. /**
  836. * we truncate all incoming icmp packets if they don't match our size
  837. */
  838. if (skb->len >= sizeof(struct icmp_packet_rr))
  839. hdr_size = sizeof(struct icmp_packet_rr);
  840. /* drop packet if it has not necessary minimum size */
  841. if (unlikely(!pskb_may_pull(skb, hdr_size)))
  842. return NET_RX_DROP;
  843. ethhdr = (struct ethhdr *)skb_mac_header(skb);
  844. /* packet with unicast indication but broadcast recipient */
  845. if (is_broadcast_ether_addr(ethhdr->h_dest))
  846. return NET_RX_DROP;
  847. /* packet with broadcast sender address */
  848. if (is_broadcast_ether_addr(ethhdr->h_source))
  849. return NET_RX_DROP;
  850. /* not for me */
  851. if (!is_my_mac(ethhdr->h_dest))
  852. return NET_RX_DROP;
  853. icmp_packet = (struct icmp_packet_rr *)skb->data;
  854. /* add record route information if not full */
  855. if ((hdr_size == sizeof(struct icmp_packet_rr)) &&
  856. (icmp_packet->rr_cur < BAT_RR_LEN)) {
  857. memcpy(&(icmp_packet->rr[icmp_packet->rr_cur]),
  858. ethhdr->h_dest, ETH_ALEN);
  859. icmp_packet->rr_cur++;
  860. }
  861. /* packet for me */
  862. if (is_my_mac(icmp_packet->dst))
  863. return recv_my_icmp_packet(bat_priv, skb, hdr_size);
  864. /* TTL exceeded */
  865. if (icmp_packet->ttl < 2)
  866. return recv_icmp_ttl_exceeded(bat_priv, skb);
  867. ret = NET_RX_DROP;
  868. /* get routing information */
  869. spin_lock_bh(&bat_priv->orig_hash_lock);
  870. rcu_read_lock();
  871. orig_node = ((struct orig_node *)
  872. hash_find(bat_priv->orig_hash, compare_orig, choose_orig,
  873. icmp_packet->dst));
  874. rcu_read_unlock();
  875. if ((orig_node) && (orig_node->router)) {
  876. /* don't lock while sending the packets ... we therefore
  877. * copy the required data before sending */
  878. batman_if = orig_node->router->if_incoming;
  879. memcpy(dstaddr, orig_node->router->addr, ETH_ALEN);
  880. spin_unlock_bh(&bat_priv->orig_hash_lock);
  881. /* create a copy of the skb, if needed, to modify it. */
  882. if (skb_cow(skb, sizeof(struct ethhdr)) < 0)
  883. return NET_RX_DROP;
  884. icmp_packet = (struct icmp_packet_rr *)skb->data;
  885. /* decrement ttl */
  886. icmp_packet->ttl--;
  887. /* route it */
  888. send_skb_packet(skb, batman_if, dstaddr);
  889. ret = NET_RX_SUCCESS;
  890. } else
  891. spin_unlock_bh(&bat_priv->orig_hash_lock);
  892. return ret;
  893. }
  894. /* find a suitable router for this originator, and use
  895. * bonding if possible. */
  896. struct neigh_node *find_router(struct bat_priv *bat_priv,
  897. struct orig_node *orig_node,
  898. struct batman_if *recv_if)
  899. {
  900. struct orig_node *primary_orig_node;
  901. struct orig_node *router_orig;
  902. struct neigh_node *router, *first_candidate, *best_router;
  903. static uint8_t zero_mac[ETH_ALEN] = {0, 0, 0, 0, 0, 0};
  904. int bonding_enabled;
  905. if (!orig_node)
  906. return NULL;
  907. if (!orig_node->router)
  908. return NULL;
  909. /* without bonding, the first node should
  910. * always choose the default router. */
  911. bonding_enabled = atomic_read(&bat_priv->bonding);
  912. if ((!recv_if) && (!bonding_enabled))
  913. return orig_node->router;
  914. router_orig = orig_node->router->orig_node;
  915. /* if we have something in the primary_addr, we can search
  916. * for a potential bonding candidate. */
  917. if (memcmp(router_orig->primary_addr, zero_mac, ETH_ALEN) == 0)
  918. return orig_node->router;
  919. /* find the orig_node which has the primary interface. might
  920. * even be the same as our router_orig in many cases */
  921. if (memcmp(router_orig->primary_addr,
  922. router_orig->orig, ETH_ALEN) == 0) {
  923. primary_orig_node = router_orig;
  924. } else {
  925. rcu_read_lock();
  926. primary_orig_node = hash_find(bat_priv->orig_hash, compare_orig,
  927. choose_orig,
  928. router_orig->primary_addr);
  929. rcu_read_unlock();
  930. if (!primary_orig_node)
  931. return orig_node->router;
  932. }
  933. /* with less than 2 candidates, we can't do any
  934. * bonding and prefer the original router. */
  935. if (primary_orig_node->bond.candidates < 2)
  936. return orig_node->router;
  937. /* all nodes between should choose a candidate which
  938. * is is not on the interface where the packet came
  939. * in. */
  940. first_candidate = primary_orig_node->bond.selected;
  941. router = first_candidate;
  942. if (bonding_enabled) {
  943. /* in the bonding case, send the packets in a round
  944. * robin fashion over the remaining interfaces. */
  945. do {
  946. /* recv_if == NULL on the first node. */
  947. if (router->if_incoming != recv_if)
  948. break;
  949. router = router->next_bond_candidate;
  950. } while (router != first_candidate);
  951. primary_orig_node->bond.selected = router->next_bond_candidate;
  952. } else {
  953. /* if bonding is disabled, use the best of the
  954. * remaining candidates which are not using
  955. * this interface. */
  956. best_router = first_candidate;
  957. do {
  958. /* recv_if == NULL on the first node. */
  959. if ((router->if_incoming != recv_if) &&
  960. (router->tq_avg > best_router->tq_avg))
  961. best_router = router;
  962. router = router->next_bond_candidate;
  963. } while (router != first_candidate);
  964. router = best_router;
  965. }
  966. return router;
  967. }
  968. static int check_unicast_packet(struct sk_buff *skb, int hdr_size)
  969. {
  970. struct ethhdr *ethhdr;
  971. /* drop packet if it has not necessary minimum size */
  972. if (unlikely(!pskb_may_pull(skb, hdr_size)))
  973. return -1;
  974. ethhdr = (struct ethhdr *)skb_mac_header(skb);
  975. /* packet with unicast indication but broadcast recipient */
  976. if (is_broadcast_ether_addr(ethhdr->h_dest))
  977. return -1;
  978. /* packet with broadcast sender address */
  979. if (is_broadcast_ether_addr(ethhdr->h_source))
  980. return -1;
  981. /* not for me */
  982. if (!is_my_mac(ethhdr->h_dest))
  983. return -1;
  984. return 0;
  985. }
  986. int route_unicast_packet(struct sk_buff *skb, struct batman_if *recv_if,
  987. int hdr_size)
  988. {
  989. struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
  990. struct orig_node *orig_node;
  991. struct neigh_node *router;
  992. struct batman_if *batman_if;
  993. uint8_t dstaddr[ETH_ALEN];
  994. struct unicast_packet *unicast_packet;
  995. struct ethhdr *ethhdr = (struct ethhdr *)skb_mac_header(skb);
  996. int ret;
  997. struct sk_buff *new_skb;
  998. unicast_packet = (struct unicast_packet *)skb->data;
  999. /* TTL exceeded */
  1000. if (unicast_packet->ttl < 2) {
  1001. pr_debug("Warning - can't forward unicast packet from %pM to "
  1002. "%pM: ttl exceeded\n", ethhdr->h_source,
  1003. unicast_packet->dest);
  1004. return NET_RX_DROP;
  1005. }
  1006. /* get routing information */
  1007. spin_lock_bh(&bat_priv->orig_hash_lock);
  1008. rcu_read_lock();
  1009. orig_node = ((struct orig_node *)
  1010. hash_find(bat_priv->orig_hash, compare_orig, choose_orig,
  1011. unicast_packet->dest));
  1012. rcu_read_unlock();
  1013. router = find_router(bat_priv, orig_node, recv_if);
  1014. if (!router) {
  1015. spin_unlock_bh(&bat_priv->orig_hash_lock);
  1016. return NET_RX_DROP;
  1017. }
  1018. /* don't lock while sending the packets ... we therefore
  1019. * copy the required data before sending */
  1020. batman_if = router->if_incoming;
  1021. memcpy(dstaddr, router->addr, ETH_ALEN);
  1022. spin_unlock_bh(&bat_priv->orig_hash_lock);
  1023. /* create a copy of the skb, if needed, to modify it. */
  1024. if (skb_cow(skb, sizeof(struct ethhdr)) < 0)
  1025. return NET_RX_DROP;
  1026. unicast_packet = (struct unicast_packet *)skb->data;
  1027. if (unicast_packet->packet_type == BAT_UNICAST &&
  1028. atomic_read(&bat_priv->fragmentation) &&
  1029. skb->len > batman_if->net_dev->mtu)
  1030. return frag_send_skb(skb, bat_priv, batman_if,
  1031. dstaddr);
  1032. if (unicast_packet->packet_type == BAT_UNICAST_FRAG &&
  1033. frag_can_reassemble(skb, batman_if->net_dev->mtu)) {
  1034. ret = frag_reassemble_skb(skb, bat_priv, &new_skb);
  1035. if (ret == NET_RX_DROP)
  1036. return NET_RX_DROP;
  1037. /* packet was buffered for late merge */
  1038. if (!new_skb)
  1039. return NET_RX_SUCCESS;
  1040. skb = new_skb;
  1041. unicast_packet = (struct unicast_packet *)skb->data;
  1042. }
  1043. /* decrement ttl */
  1044. unicast_packet->ttl--;
  1045. /* route it */
  1046. send_skb_packet(skb, batman_if, dstaddr);
  1047. return NET_RX_SUCCESS;
  1048. }
  1049. int recv_unicast_packet(struct sk_buff *skb, struct batman_if *recv_if)
  1050. {
  1051. struct unicast_packet *unicast_packet;
  1052. int hdr_size = sizeof(struct unicast_packet);
  1053. if (check_unicast_packet(skb, hdr_size) < 0)
  1054. return NET_RX_DROP;
  1055. unicast_packet = (struct unicast_packet *)skb->data;
  1056. /* packet for me */
  1057. if (is_my_mac(unicast_packet->dest)) {
  1058. interface_rx(recv_if->soft_iface, skb, recv_if, hdr_size);
  1059. return NET_RX_SUCCESS;
  1060. }
  1061. return route_unicast_packet(skb, recv_if, hdr_size);
  1062. }
  1063. int recv_ucast_frag_packet(struct sk_buff *skb, struct batman_if *recv_if)
  1064. {
  1065. struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
  1066. struct unicast_frag_packet *unicast_packet;
  1067. int hdr_size = sizeof(struct unicast_frag_packet);
  1068. struct sk_buff *new_skb = NULL;
  1069. int ret;
  1070. if (check_unicast_packet(skb, hdr_size) < 0)
  1071. return NET_RX_DROP;
  1072. unicast_packet = (struct unicast_frag_packet *)skb->data;
  1073. /* packet for me */
  1074. if (is_my_mac(unicast_packet->dest)) {
  1075. ret = frag_reassemble_skb(skb, bat_priv, &new_skb);
  1076. if (ret == NET_RX_DROP)
  1077. return NET_RX_DROP;
  1078. /* packet was buffered for late merge */
  1079. if (!new_skb)
  1080. return NET_RX_SUCCESS;
  1081. interface_rx(recv_if->soft_iface, new_skb, recv_if,
  1082. sizeof(struct unicast_packet));
  1083. return NET_RX_SUCCESS;
  1084. }
  1085. return route_unicast_packet(skb, recv_if, hdr_size);
  1086. }
  1087. int recv_bcast_packet(struct sk_buff *skb, struct batman_if *recv_if)
  1088. {
  1089. struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
  1090. struct orig_node *orig_node;
  1091. struct bcast_packet *bcast_packet;
  1092. struct ethhdr *ethhdr;
  1093. int hdr_size = sizeof(struct bcast_packet);
  1094. int32_t seq_diff;
  1095. /* drop packet if it has not necessary minimum size */
  1096. if (unlikely(!pskb_may_pull(skb, hdr_size)))
  1097. return NET_RX_DROP;
  1098. ethhdr = (struct ethhdr *)skb_mac_header(skb);
  1099. /* packet with broadcast indication but unicast recipient */
  1100. if (!is_broadcast_ether_addr(ethhdr->h_dest))
  1101. return NET_RX_DROP;
  1102. /* packet with broadcast sender address */
  1103. if (is_broadcast_ether_addr(ethhdr->h_source))
  1104. return NET_RX_DROP;
  1105. /* ignore broadcasts sent by myself */
  1106. if (is_my_mac(ethhdr->h_source))
  1107. return NET_RX_DROP;
  1108. bcast_packet = (struct bcast_packet *)skb->data;
  1109. /* ignore broadcasts originated by myself */
  1110. if (is_my_mac(bcast_packet->orig))
  1111. return NET_RX_DROP;
  1112. if (bcast_packet->ttl < 2)
  1113. return NET_RX_DROP;
  1114. spin_lock_bh(&bat_priv->orig_hash_lock);
  1115. rcu_read_lock();
  1116. orig_node = ((struct orig_node *)
  1117. hash_find(bat_priv->orig_hash, compare_orig, choose_orig,
  1118. bcast_packet->orig));
  1119. rcu_read_unlock();
  1120. if (!orig_node) {
  1121. spin_unlock_bh(&bat_priv->orig_hash_lock);
  1122. return NET_RX_DROP;
  1123. }
  1124. /* check whether the packet is a duplicate */
  1125. if (get_bit_status(orig_node->bcast_bits,
  1126. orig_node->last_bcast_seqno,
  1127. ntohl(bcast_packet->seqno))) {
  1128. spin_unlock_bh(&bat_priv->orig_hash_lock);
  1129. return NET_RX_DROP;
  1130. }
  1131. seq_diff = ntohl(bcast_packet->seqno) - orig_node->last_bcast_seqno;
  1132. /* check whether the packet is old and the host just restarted. */
  1133. if (window_protected(bat_priv, seq_diff,
  1134. &orig_node->bcast_seqno_reset)) {
  1135. spin_unlock_bh(&bat_priv->orig_hash_lock);
  1136. return NET_RX_DROP;
  1137. }
  1138. /* mark broadcast in flood history, update window position
  1139. * if required. */
  1140. if (bit_get_packet(bat_priv, orig_node->bcast_bits, seq_diff, 1))
  1141. orig_node->last_bcast_seqno = ntohl(bcast_packet->seqno);
  1142. spin_unlock_bh(&bat_priv->orig_hash_lock);
  1143. /* rebroadcast packet */
  1144. add_bcast_packet_to_list(bat_priv, skb);
  1145. /* broadcast for me */
  1146. interface_rx(recv_if->soft_iface, skb, recv_if, hdr_size);
  1147. return NET_RX_SUCCESS;
  1148. }
  1149. int recv_vis_packet(struct sk_buff *skb, struct batman_if *recv_if)
  1150. {
  1151. struct vis_packet *vis_packet;
  1152. struct ethhdr *ethhdr;
  1153. struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
  1154. int hdr_size = sizeof(struct vis_packet);
  1155. /* keep skb linear */
  1156. if (skb_linearize(skb) < 0)
  1157. return NET_RX_DROP;
  1158. if (unlikely(!pskb_may_pull(skb, hdr_size)))
  1159. return NET_RX_DROP;
  1160. vis_packet = (struct vis_packet *)skb->data;
  1161. ethhdr = (struct ethhdr *)skb_mac_header(skb);
  1162. /* not for me */
  1163. if (!is_my_mac(ethhdr->h_dest))
  1164. return NET_RX_DROP;
  1165. /* ignore own packets */
  1166. if (is_my_mac(vis_packet->vis_orig))
  1167. return NET_RX_DROP;
  1168. if (is_my_mac(vis_packet->sender_orig))
  1169. return NET_RX_DROP;
  1170. switch (vis_packet->vis_type) {
  1171. case VIS_TYPE_SERVER_SYNC:
  1172. receive_server_sync_packet(bat_priv, vis_packet,
  1173. skb_headlen(skb));
  1174. break;
  1175. case VIS_TYPE_CLIENT_UPDATE:
  1176. receive_client_update_packet(bat_priv, vis_packet,
  1177. skb_headlen(skb));
  1178. break;
  1179. default: /* ignore unknown packet */
  1180. break;
  1181. }
  1182. /* We take a copy of the data in the packet, so we should
  1183. always free the skbuf. */
  1184. return NET_RX_DROP;
  1185. }