gateway_client.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590
  1. /*
  2. * Copyright (C) 2009-2011 B.A.T.M.A.N. contributors:
  3. *
  4. * Marek Lindner
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of version 2 of the GNU General Public
  8. * License as published by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope that it will be useful, but
  11. * WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  13. * General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program; if not, write to the Free Software
  17. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
  18. * 02110-1301, USA
  19. *
  20. */
  21. #include "main.h"
  22. #include "bat_sysfs.h"
  23. #include "gateway_client.h"
  24. #include "gateway_common.h"
  25. #include "hard-interface.h"
  26. #include "originator.h"
  27. #include <linux/ip.h>
  28. #include <linux/ipv6.h>
  29. #include <linux/udp.h>
  30. #include <linux/if_vlan.h>
  31. static void gw_node_free_ref(struct gw_node *gw_node)
  32. {
  33. if (atomic_dec_and_test(&gw_node->refcount))
  34. kfree_rcu(gw_node, rcu);
  35. }
  36. static struct gw_node *gw_get_selected_gw_node(struct bat_priv *bat_priv)
  37. {
  38. struct gw_node *gw_node;
  39. rcu_read_lock();
  40. gw_node = rcu_dereference(bat_priv->curr_gw);
  41. if (!gw_node)
  42. goto out;
  43. if (!atomic_inc_not_zero(&gw_node->refcount))
  44. gw_node = NULL;
  45. out:
  46. rcu_read_unlock();
  47. return gw_node;
  48. }
  49. struct orig_node *gw_get_selected_orig(struct bat_priv *bat_priv)
  50. {
  51. struct gw_node *gw_node;
  52. struct orig_node *orig_node = NULL;
  53. gw_node = gw_get_selected_gw_node(bat_priv);
  54. if (!gw_node)
  55. goto out;
  56. rcu_read_lock();
  57. orig_node = gw_node->orig_node;
  58. if (!orig_node)
  59. goto unlock;
  60. if (!atomic_inc_not_zero(&orig_node->refcount))
  61. orig_node = NULL;
  62. unlock:
  63. rcu_read_unlock();
  64. out:
  65. if (gw_node)
  66. gw_node_free_ref(gw_node);
  67. return orig_node;
  68. }
  69. static void gw_select(struct bat_priv *bat_priv, struct gw_node *new_gw_node)
  70. {
  71. struct gw_node *curr_gw_node;
  72. spin_lock_bh(&bat_priv->gw_list_lock);
  73. if (new_gw_node && !atomic_inc_not_zero(&new_gw_node->refcount))
  74. new_gw_node = NULL;
  75. curr_gw_node = rcu_dereference_protected(bat_priv->curr_gw, 1);
  76. rcu_assign_pointer(bat_priv->curr_gw, new_gw_node);
  77. if (curr_gw_node)
  78. gw_node_free_ref(curr_gw_node);
  79. spin_unlock_bh(&bat_priv->gw_list_lock);
  80. }
  81. void gw_deselect(struct bat_priv *bat_priv)
  82. {
  83. atomic_set(&bat_priv->gw_reselect, 1);
  84. }
  85. static struct gw_node *gw_get_best_gw_node(struct bat_priv *bat_priv)
  86. {
  87. struct neigh_node *router;
  88. struct hlist_node *node;
  89. struct gw_node *gw_node, *curr_gw = NULL;
  90. uint32_t max_gw_factor = 0, tmp_gw_factor = 0;
  91. uint8_t max_tq = 0;
  92. int down, up;
  93. rcu_read_lock();
  94. hlist_for_each_entry_rcu(gw_node, node, &bat_priv->gw_list, list) {
  95. if (gw_node->deleted)
  96. continue;
  97. router = orig_node_get_router(gw_node->orig_node);
  98. if (!router)
  99. continue;
  100. if (!atomic_inc_not_zero(&gw_node->refcount))
  101. goto next;
  102. switch (atomic_read(&bat_priv->gw_sel_class)) {
  103. case 1: /* fast connection */
  104. gw_bandwidth_to_kbit(gw_node->orig_node->gw_flags,
  105. &down, &up);
  106. tmp_gw_factor = (router->tq_avg * router->tq_avg *
  107. down * 100 * 100) /
  108. (TQ_LOCAL_WINDOW_SIZE *
  109. TQ_LOCAL_WINDOW_SIZE * 64);
  110. if ((tmp_gw_factor > max_gw_factor) ||
  111. ((tmp_gw_factor == max_gw_factor) &&
  112. (router->tq_avg > max_tq))) {
  113. if (curr_gw)
  114. gw_node_free_ref(curr_gw);
  115. curr_gw = gw_node;
  116. atomic_inc(&curr_gw->refcount);
  117. }
  118. break;
  119. default: /**
  120. * 2: stable connection (use best statistic)
  121. * 3: fast-switch (use best statistic but change as
  122. * soon as a better gateway appears)
  123. * XX: late-switch (use best statistic but change as
  124. * soon as a better gateway appears which has
  125. * $routing_class more tq points)
  126. **/
  127. if (router->tq_avg > max_tq) {
  128. if (curr_gw)
  129. gw_node_free_ref(curr_gw);
  130. curr_gw = gw_node;
  131. atomic_inc(&curr_gw->refcount);
  132. }
  133. break;
  134. }
  135. if (router->tq_avg > max_tq)
  136. max_tq = router->tq_avg;
  137. if (tmp_gw_factor > max_gw_factor)
  138. max_gw_factor = tmp_gw_factor;
  139. gw_node_free_ref(gw_node);
  140. next:
  141. neigh_node_free_ref(router);
  142. }
  143. rcu_read_unlock();
  144. return curr_gw;
  145. }
  146. void gw_election(struct bat_priv *bat_priv)
  147. {
  148. struct gw_node *curr_gw = NULL, *next_gw = NULL;
  149. struct neigh_node *router = NULL;
  150. char gw_addr[18] = { '\0' };
  151. /**
  152. * The batman daemon checks here if we already passed a full originator
  153. * cycle in order to make sure we don't choose the first gateway we
  154. * hear about. This check is based on the daemon's uptime which we
  155. * don't have.
  156. **/
  157. if (atomic_read(&bat_priv->gw_mode) != GW_MODE_CLIENT)
  158. goto out;
  159. if (!atomic_dec_not_zero(&bat_priv->gw_reselect))
  160. goto out;
  161. curr_gw = gw_get_selected_gw_node(bat_priv);
  162. next_gw = gw_get_best_gw_node(bat_priv);
  163. if (curr_gw == next_gw)
  164. goto out;
  165. if (next_gw) {
  166. sprintf(gw_addr, "%pM", next_gw->orig_node->orig);
  167. router = orig_node_get_router(next_gw->orig_node);
  168. if (!router) {
  169. gw_deselect(bat_priv);
  170. goto out;
  171. }
  172. }
  173. if ((curr_gw) && (!next_gw)) {
  174. bat_dbg(DBG_BATMAN, bat_priv,
  175. "Removing selected gateway - no gateway in range\n");
  176. throw_uevent(bat_priv, UEV_GW, UEV_DEL, NULL);
  177. } else if ((!curr_gw) && (next_gw)) {
  178. bat_dbg(DBG_BATMAN, bat_priv,
  179. "Adding route to gateway %pM (gw_flags: %i, tq: %i)\n",
  180. next_gw->orig_node->orig,
  181. next_gw->orig_node->gw_flags,
  182. router->tq_avg);
  183. throw_uevent(bat_priv, UEV_GW, UEV_ADD, gw_addr);
  184. } else {
  185. bat_dbg(DBG_BATMAN, bat_priv,
  186. "Changing route to gateway %pM "
  187. "(gw_flags: %i, tq: %i)\n",
  188. next_gw->orig_node->orig,
  189. next_gw->orig_node->gw_flags,
  190. router->tq_avg);
  191. throw_uevent(bat_priv, UEV_GW, UEV_CHANGE, gw_addr);
  192. }
  193. gw_select(bat_priv, next_gw);
  194. out:
  195. if (curr_gw)
  196. gw_node_free_ref(curr_gw);
  197. if (next_gw)
  198. gw_node_free_ref(next_gw);
  199. if (router)
  200. neigh_node_free_ref(router);
  201. }
  202. void gw_check_election(struct bat_priv *bat_priv, struct orig_node *orig_node)
  203. {
  204. struct orig_node *curr_gw_orig;
  205. struct neigh_node *router_gw = NULL, *router_orig = NULL;
  206. uint8_t gw_tq_avg, orig_tq_avg;
  207. curr_gw_orig = gw_get_selected_orig(bat_priv);
  208. if (!curr_gw_orig)
  209. goto deselect;
  210. router_gw = orig_node_get_router(curr_gw_orig);
  211. if (!router_gw)
  212. goto deselect;
  213. /* this node already is the gateway */
  214. if (curr_gw_orig == orig_node)
  215. goto out;
  216. router_orig = orig_node_get_router(orig_node);
  217. if (!router_orig)
  218. goto out;
  219. gw_tq_avg = router_gw->tq_avg;
  220. orig_tq_avg = router_orig->tq_avg;
  221. /* the TQ value has to be better */
  222. if (orig_tq_avg < gw_tq_avg)
  223. goto out;
  224. /**
  225. * if the routing class is greater than 3 the value tells us how much
  226. * greater the TQ value of the new gateway must be
  227. **/
  228. if ((atomic_read(&bat_priv->gw_sel_class) > 3) &&
  229. (orig_tq_avg - gw_tq_avg < atomic_read(&bat_priv->gw_sel_class)))
  230. goto out;
  231. bat_dbg(DBG_BATMAN, bat_priv,
  232. "Restarting gateway selection: better gateway found (tq curr: "
  233. "%i, tq new: %i)\n",
  234. gw_tq_avg, orig_tq_avg);
  235. deselect:
  236. gw_deselect(bat_priv);
  237. out:
  238. if (curr_gw_orig)
  239. orig_node_free_ref(curr_gw_orig);
  240. if (router_gw)
  241. neigh_node_free_ref(router_gw);
  242. if (router_orig)
  243. neigh_node_free_ref(router_orig);
  244. return;
  245. }
  246. static void gw_node_add(struct bat_priv *bat_priv,
  247. struct orig_node *orig_node, uint8_t new_gwflags)
  248. {
  249. struct gw_node *gw_node;
  250. int down, up;
  251. gw_node = kzalloc(sizeof(*gw_node), GFP_ATOMIC);
  252. if (!gw_node)
  253. return;
  254. INIT_HLIST_NODE(&gw_node->list);
  255. gw_node->orig_node = orig_node;
  256. atomic_set(&gw_node->refcount, 1);
  257. spin_lock_bh(&bat_priv->gw_list_lock);
  258. hlist_add_head_rcu(&gw_node->list, &bat_priv->gw_list);
  259. spin_unlock_bh(&bat_priv->gw_list_lock);
  260. gw_bandwidth_to_kbit(new_gwflags, &down, &up);
  261. bat_dbg(DBG_BATMAN, bat_priv,
  262. "Found new gateway %pM -> gw_class: %i - %i%s/%i%s\n",
  263. orig_node->orig, new_gwflags,
  264. (down > 2048 ? down / 1024 : down),
  265. (down > 2048 ? "MBit" : "KBit"),
  266. (up > 2048 ? up / 1024 : up),
  267. (up > 2048 ? "MBit" : "KBit"));
  268. }
  269. void gw_node_update(struct bat_priv *bat_priv,
  270. struct orig_node *orig_node, uint8_t new_gwflags)
  271. {
  272. struct hlist_node *node;
  273. struct gw_node *gw_node, *curr_gw;
  274. /**
  275. * Note: We don't need a NULL check here, since curr_gw never gets
  276. * dereferenced. If curr_gw is NULL we also should not exit as we may
  277. * have this gateway in our list (duplication check!) even though we
  278. * have no currently selected gateway.
  279. */
  280. curr_gw = gw_get_selected_gw_node(bat_priv);
  281. rcu_read_lock();
  282. hlist_for_each_entry_rcu(gw_node, node, &bat_priv->gw_list, list) {
  283. if (gw_node->orig_node != orig_node)
  284. continue;
  285. bat_dbg(DBG_BATMAN, bat_priv,
  286. "Gateway class of originator %pM changed from "
  287. "%i to %i\n",
  288. orig_node->orig, gw_node->orig_node->gw_flags,
  289. new_gwflags);
  290. gw_node->deleted = 0;
  291. if (new_gwflags == NO_FLAGS) {
  292. gw_node->deleted = jiffies;
  293. bat_dbg(DBG_BATMAN, bat_priv,
  294. "Gateway %pM removed from gateway list\n",
  295. orig_node->orig);
  296. if (gw_node == curr_gw)
  297. goto deselect;
  298. }
  299. goto unlock;
  300. }
  301. if (new_gwflags == NO_FLAGS)
  302. goto unlock;
  303. gw_node_add(bat_priv, orig_node, new_gwflags);
  304. goto unlock;
  305. deselect:
  306. gw_deselect(bat_priv);
  307. unlock:
  308. rcu_read_unlock();
  309. if (curr_gw)
  310. gw_node_free_ref(curr_gw);
  311. }
  312. void gw_node_delete(struct bat_priv *bat_priv, struct orig_node *orig_node)
  313. {
  314. gw_node_update(bat_priv, orig_node, 0);
  315. }
  316. void gw_node_purge(struct bat_priv *bat_priv)
  317. {
  318. struct gw_node *gw_node, *curr_gw;
  319. struct hlist_node *node, *node_tmp;
  320. unsigned long timeout = 2 * PURGE_TIMEOUT * HZ;
  321. int do_deselect = 0;
  322. curr_gw = gw_get_selected_gw_node(bat_priv);
  323. spin_lock_bh(&bat_priv->gw_list_lock);
  324. hlist_for_each_entry_safe(gw_node, node, node_tmp,
  325. &bat_priv->gw_list, list) {
  326. if (((!gw_node->deleted) ||
  327. (time_before(jiffies, gw_node->deleted + timeout))) &&
  328. atomic_read(&bat_priv->mesh_state) == MESH_ACTIVE)
  329. continue;
  330. if (curr_gw == gw_node)
  331. do_deselect = 1;
  332. hlist_del_rcu(&gw_node->list);
  333. gw_node_free_ref(gw_node);
  334. }
  335. spin_unlock_bh(&bat_priv->gw_list_lock);
  336. /* gw_deselect() needs to acquire the gw_list_lock */
  337. if (do_deselect)
  338. gw_deselect(bat_priv);
  339. if (curr_gw)
  340. gw_node_free_ref(curr_gw);
  341. }
  342. /**
  343. * fails if orig_node has no router
  344. */
  345. static int _write_buffer_text(struct bat_priv *bat_priv, struct seq_file *seq,
  346. const struct gw_node *gw_node)
  347. {
  348. struct gw_node *curr_gw;
  349. struct neigh_node *router;
  350. int down, up, ret = -1;
  351. gw_bandwidth_to_kbit(gw_node->orig_node->gw_flags, &down, &up);
  352. router = orig_node_get_router(gw_node->orig_node);
  353. if (!router)
  354. goto out;
  355. curr_gw = gw_get_selected_gw_node(bat_priv);
  356. ret = seq_printf(seq, "%s %pM (%3i) %pM [%10s]: %3i - %i%s/%i%s\n",
  357. (curr_gw == gw_node ? "=>" : " "),
  358. gw_node->orig_node->orig,
  359. router->tq_avg, router->addr,
  360. router->if_incoming->net_dev->name,
  361. gw_node->orig_node->gw_flags,
  362. (down > 2048 ? down / 1024 : down),
  363. (down > 2048 ? "MBit" : "KBit"),
  364. (up > 2048 ? up / 1024 : up),
  365. (up > 2048 ? "MBit" : "KBit"));
  366. neigh_node_free_ref(router);
  367. if (curr_gw)
  368. gw_node_free_ref(curr_gw);
  369. out:
  370. return ret;
  371. }
  372. int gw_client_seq_print_text(struct seq_file *seq, void *offset)
  373. {
  374. struct net_device *net_dev = (struct net_device *)seq->private;
  375. struct bat_priv *bat_priv = netdev_priv(net_dev);
  376. struct hard_iface *primary_if;
  377. struct gw_node *gw_node;
  378. struct hlist_node *node;
  379. int gw_count = 0, ret = 0;
  380. primary_if = primary_if_get_selected(bat_priv);
  381. if (!primary_if) {
  382. ret = seq_printf(seq, "BATMAN mesh %s disabled - please "
  383. "specify interfaces to enable it\n",
  384. net_dev->name);
  385. goto out;
  386. }
  387. if (primary_if->if_status != IF_ACTIVE) {
  388. ret = seq_printf(seq, "BATMAN mesh %s disabled - "
  389. "primary interface not active\n",
  390. net_dev->name);
  391. goto out;
  392. }
  393. seq_printf(seq, " %-12s (%s/%i) %17s [%10s]: gw_class ... "
  394. "[B.A.T.M.A.N. adv %s%s, MainIF/MAC: %s/%pM (%s)]\n",
  395. "Gateway", "#", TQ_MAX_VALUE, "Nexthop",
  396. "outgoingIF", SOURCE_VERSION, REVISION_VERSION_STR,
  397. primary_if->net_dev->name,
  398. primary_if->net_dev->dev_addr, net_dev->name);
  399. rcu_read_lock();
  400. hlist_for_each_entry_rcu(gw_node, node, &bat_priv->gw_list, list) {
  401. if (gw_node->deleted)
  402. continue;
  403. /* fails if orig_node has no router */
  404. if (_write_buffer_text(bat_priv, seq, gw_node) < 0)
  405. continue;
  406. gw_count++;
  407. }
  408. rcu_read_unlock();
  409. if (gw_count == 0)
  410. seq_printf(seq, "No gateways in range ...\n");
  411. out:
  412. if (primary_if)
  413. hardif_free_ref(primary_if);
  414. return ret;
  415. }
  416. int gw_is_target(struct bat_priv *bat_priv, struct sk_buff *skb)
  417. {
  418. struct ethhdr *ethhdr;
  419. struct iphdr *iphdr;
  420. struct ipv6hdr *ipv6hdr;
  421. struct udphdr *udphdr;
  422. struct gw_node *curr_gw;
  423. unsigned int header_len = 0;
  424. if (atomic_read(&bat_priv->gw_mode) == GW_MODE_OFF)
  425. return 0;
  426. /* check for ethernet header */
  427. if (!pskb_may_pull(skb, header_len + ETH_HLEN))
  428. return 0;
  429. ethhdr = (struct ethhdr *)skb->data;
  430. header_len += ETH_HLEN;
  431. /* check for initial vlan header */
  432. if (ntohs(ethhdr->h_proto) == ETH_P_8021Q) {
  433. if (!pskb_may_pull(skb, header_len + VLAN_HLEN))
  434. return 0;
  435. ethhdr = (struct ethhdr *)(skb->data + VLAN_HLEN);
  436. header_len += VLAN_HLEN;
  437. }
  438. /* check for ip header */
  439. switch (ntohs(ethhdr->h_proto)) {
  440. case ETH_P_IP:
  441. if (!pskb_may_pull(skb, header_len + sizeof(*iphdr)))
  442. return 0;
  443. iphdr = (struct iphdr *)(skb->data + header_len);
  444. header_len += iphdr->ihl * 4;
  445. /* check for udp header */
  446. if (iphdr->protocol != IPPROTO_UDP)
  447. return 0;
  448. break;
  449. case ETH_P_IPV6:
  450. if (!pskb_may_pull(skb, header_len + sizeof(*ipv6hdr)))
  451. return 0;
  452. ipv6hdr = (struct ipv6hdr *)(skb->data + header_len);
  453. header_len += sizeof(*ipv6hdr);
  454. /* check for udp header */
  455. if (ipv6hdr->nexthdr != IPPROTO_UDP)
  456. return 0;
  457. break;
  458. default:
  459. return 0;
  460. }
  461. if (!pskb_may_pull(skb, header_len + sizeof(*udphdr)))
  462. return 0;
  463. udphdr = (struct udphdr *)(skb->data + header_len);
  464. header_len += sizeof(*udphdr);
  465. /* check for bootp port */
  466. if ((ntohs(ethhdr->h_proto) == ETH_P_IP) &&
  467. (ntohs(udphdr->dest) != 67))
  468. return 0;
  469. if ((ntohs(ethhdr->h_proto) == ETH_P_IPV6) &&
  470. (ntohs(udphdr->dest) != 547))
  471. return 0;
  472. if (atomic_read(&bat_priv->gw_mode) == GW_MODE_SERVER)
  473. return -1;
  474. curr_gw = gw_get_selected_gw_node(bat_priv);
  475. if (!curr_gw)
  476. return 0;
  477. if (curr_gw)
  478. gw_node_free_ref(curr_gw);
  479. return 1;
  480. }