vis.c 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949
  1. /*
  2. * Copyright (C) 2008-2010 B.A.T.M.A.N. contributors:
  3. *
  4. * Simon Wunderlich
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of version 2 of the GNU General Public
  8. * License as published by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope that it will be useful, but
  11. * WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  13. * General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program; if not, write to the Free Software
  17. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
  18. * 02110-1301, USA
  19. *
  20. */
  21. #include "main.h"
  22. #include "send.h"
  23. #include "translation-table.h"
  24. #include "vis.h"
  25. #include "soft-interface.h"
  26. #include "hard-interface.h"
  27. #include "hash.h"
  28. #include "originator.h"
  29. #define MAX_VIS_PACKET_SIZE 1000
  30. /* Returns the smallest signed integer in two's complement with the sizeof x */
  31. #define smallest_signed_int(x) (1u << (7u + 8u * (sizeof(x) - 1u)))
  32. /* Checks if a sequence number x is a predecessor/successor of y.
  33. * they handle overflows/underflows and can correctly check for a
  34. * predecessor/successor unless the variable sequence number has grown by
  35. * more then 2**(bitwidth(x)-1)-1.
  36. * This means that for a uint8_t with the maximum value 255, it would think:
  37. * - when adding nothing - it is neither a predecessor nor a successor
  38. * - before adding more than 127 to the starting value - it is a predecessor,
  39. * - when adding 128 - it is neither a predecessor nor a successor,
  40. * - after adding more than 127 to the starting value - it is a successor */
  41. #define seq_before(x, y) ({typeof(x) _dummy = (x - y); \
  42. _dummy > smallest_signed_int(_dummy); })
  43. #define seq_after(x, y) seq_before(y, x)
  44. static void start_vis_timer(struct bat_priv *bat_priv);
  45. /* free the info */
  46. static void free_info(struct kref *ref)
  47. {
  48. struct vis_info *info = container_of(ref, struct vis_info, refcount);
  49. struct bat_priv *bat_priv = info->bat_priv;
  50. struct recvlist_node *entry, *tmp;
  51. list_del_init(&info->send_list);
  52. spin_lock_bh(&bat_priv->vis_list_lock);
  53. list_for_each_entry_safe(entry, tmp, &info->recv_list, list) {
  54. list_del(&entry->list);
  55. kfree(entry);
  56. }
  57. spin_unlock_bh(&bat_priv->vis_list_lock);
  58. kfree_skb(info->skb_packet);
  59. }
  60. /* Compare two vis packets, used by the hashing algorithm */
  61. static int vis_info_cmp(void *data1, void *data2)
  62. {
  63. struct vis_info *d1, *d2;
  64. struct vis_packet *p1, *p2;
  65. d1 = data1;
  66. d2 = data2;
  67. p1 = (struct vis_packet *)d1->skb_packet->data;
  68. p2 = (struct vis_packet *)d2->skb_packet->data;
  69. return compare_orig(p1->vis_orig, p2->vis_orig);
  70. }
  71. /* hash function to choose an entry in a hash table of given size */
  72. /* hash algorithm from http://en.wikipedia.org/wiki/Hash_table */
  73. static int vis_info_choose(void *data, int size)
  74. {
  75. struct vis_info *vis_info = data;
  76. struct vis_packet *packet;
  77. unsigned char *key;
  78. uint32_t hash = 0;
  79. size_t i;
  80. packet = (struct vis_packet *)vis_info->skb_packet->data;
  81. key = packet->vis_orig;
  82. for (i = 0; i < ETH_ALEN; i++) {
  83. hash += key[i];
  84. hash += (hash << 10);
  85. hash ^= (hash >> 6);
  86. }
  87. hash += (hash << 3);
  88. hash ^= (hash >> 11);
  89. hash += (hash << 15);
  90. return hash % size;
  91. }
  92. /* insert interface to the list of interfaces of one originator, if it
  93. * does not already exist in the list */
  94. static void vis_data_insert_interface(const uint8_t *interface,
  95. struct hlist_head *if_list,
  96. bool primary)
  97. {
  98. struct if_list_entry *entry;
  99. struct hlist_node *pos;
  100. hlist_for_each_entry(entry, pos, if_list, list) {
  101. if (compare_orig(entry->addr, (void *)interface))
  102. return;
  103. }
  104. /* its a new address, add it to the list */
  105. entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
  106. if (!entry)
  107. return;
  108. memcpy(entry->addr, interface, ETH_ALEN);
  109. entry->primary = primary;
  110. hlist_add_head(&entry->list, if_list);
  111. }
  112. static ssize_t vis_data_read_prim_sec(char *buff, struct hlist_head *if_list)
  113. {
  114. struct if_list_entry *entry;
  115. struct hlist_node *pos;
  116. size_t len = 0;
  117. hlist_for_each_entry(entry, pos, if_list, list) {
  118. if (entry->primary)
  119. len += sprintf(buff + len, "PRIMARY, ");
  120. else
  121. len += sprintf(buff + len, "SEC %pM, ", entry->addr);
  122. }
  123. return len;
  124. }
  125. static size_t vis_data_count_prim_sec(struct hlist_head *if_list)
  126. {
  127. struct if_list_entry *entry;
  128. struct hlist_node *pos;
  129. size_t count = 0;
  130. hlist_for_each_entry(entry, pos, if_list, list) {
  131. if (entry->primary)
  132. count += 9;
  133. else
  134. count += 23;
  135. }
  136. return count;
  137. }
  138. /* read an entry */
  139. static ssize_t vis_data_read_entry(char *buff, struct vis_info_entry *entry,
  140. uint8_t *src, bool primary)
  141. {
  142. /* maximal length: max(4+17+2, 3+17+1+3+2) == 26 */
  143. if (primary && entry->quality == 0)
  144. return sprintf(buff, "HNA %pM, ", entry->dest);
  145. else if (compare_orig(entry->src, src))
  146. return sprintf(buff, "TQ %pM %d, ", entry->dest,
  147. entry->quality);
  148. return 0;
  149. }
  150. int vis_seq_print_text(struct seq_file *seq, void *offset)
  151. {
  152. struct hlist_node *walk;
  153. struct hlist_head *head;
  154. struct element_t *bucket;
  155. struct vis_info *info;
  156. struct vis_packet *packet;
  157. struct vis_info_entry *entries;
  158. struct net_device *net_dev = (struct net_device *)seq->private;
  159. struct bat_priv *bat_priv = netdev_priv(net_dev);
  160. struct hashtable_t *hash = bat_priv->vis_hash;
  161. HLIST_HEAD(vis_if_list);
  162. struct if_list_entry *entry;
  163. struct hlist_node *pos, *n;
  164. int i, j;
  165. int vis_server = atomic_read(&bat_priv->vis_mode);
  166. size_t buff_pos, buf_size;
  167. char *buff;
  168. int compare;
  169. if ((!bat_priv->primary_if) ||
  170. (vis_server == VIS_TYPE_CLIENT_UPDATE))
  171. return 0;
  172. buf_size = 1;
  173. /* Estimate length */
  174. spin_lock_bh(&bat_priv->vis_hash_lock);
  175. for (i = 0; i < hash->size; i++) {
  176. head = &hash->table[i];
  177. hlist_for_each_entry(bucket, walk, head, hlist) {
  178. info = bucket->data;
  179. packet = (struct vis_packet *)info->skb_packet->data;
  180. entries = (struct vis_info_entry *)
  181. ((char *)packet + sizeof(struct vis_packet));
  182. for (j = 0; j < packet->entries; j++) {
  183. if (entries[j].quality == 0)
  184. continue;
  185. compare =
  186. compare_orig(entries[j].src, packet->vis_orig);
  187. vis_data_insert_interface(entries[j].src,
  188. &vis_if_list,
  189. compare);
  190. }
  191. hlist_for_each_entry(entry, pos, &vis_if_list, list) {
  192. buf_size += 18 + 26 * packet->entries;
  193. /* add primary/secondary records */
  194. if (compare_orig(entry->addr, packet->vis_orig))
  195. buf_size +=
  196. vis_data_count_prim_sec(&vis_if_list);
  197. buf_size += 1;
  198. }
  199. hlist_for_each_entry_safe(entry, pos, n, &vis_if_list,
  200. list) {
  201. hlist_del(&entry->list);
  202. kfree(entry);
  203. }
  204. }
  205. }
  206. buff = kmalloc(buf_size, GFP_ATOMIC);
  207. if (!buff) {
  208. spin_unlock_bh(&bat_priv->vis_hash_lock);
  209. return -ENOMEM;
  210. }
  211. buff[0] = '\0';
  212. buff_pos = 0;
  213. for (i = 0; i < hash->size; i++) {
  214. head = &hash->table[i];
  215. hlist_for_each_entry(bucket, walk, head, hlist) {
  216. info = bucket->data;
  217. packet = (struct vis_packet *)info->skb_packet->data;
  218. entries = (struct vis_info_entry *)
  219. ((char *)packet + sizeof(struct vis_packet));
  220. for (j = 0; j < packet->entries; j++) {
  221. if (entries[j].quality == 0)
  222. continue;
  223. compare =
  224. compare_orig(entries[j].src, packet->vis_orig);
  225. vis_data_insert_interface(entries[j].src,
  226. &vis_if_list,
  227. compare);
  228. }
  229. hlist_for_each_entry(entry, pos, &vis_if_list, list) {
  230. buff_pos += sprintf(buff + buff_pos, "%pM,",
  231. entry->addr);
  232. for (i = 0; i < packet->entries; i++)
  233. buff_pos += vis_data_read_entry(
  234. buff + buff_pos,
  235. &entries[i],
  236. entry->addr,
  237. entry->primary);
  238. /* add primary/secondary records */
  239. if (compare_orig(entry->addr, packet->vis_orig))
  240. buff_pos +=
  241. vis_data_read_prim_sec(buff + buff_pos,
  242. &vis_if_list);
  243. buff_pos += sprintf(buff + buff_pos, "\n");
  244. }
  245. hlist_for_each_entry_safe(entry, pos, n, &vis_if_list,
  246. list) {
  247. hlist_del(&entry->list);
  248. kfree(entry);
  249. }
  250. }
  251. }
  252. spin_unlock_bh(&bat_priv->vis_hash_lock);
  253. seq_printf(seq, "%s", buff);
  254. kfree(buff);
  255. return 0;
  256. }
  257. /* add the info packet to the send list, if it was not
  258. * already linked in. */
  259. static void send_list_add(struct bat_priv *bat_priv, struct vis_info *info)
  260. {
  261. if (list_empty(&info->send_list)) {
  262. kref_get(&info->refcount);
  263. list_add_tail(&info->send_list, &bat_priv->vis_send_list);
  264. }
  265. }
  266. /* delete the info packet from the send list, if it was
  267. * linked in. */
  268. static void send_list_del(struct vis_info *info)
  269. {
  270. if (!list_empty(&info->send_list)) {
  271. list_del_init(&info->send_list);
  272. kref_put(&info->refcount, free_info);
  273. }
  274. }
  275. /* tries to add one entry to the receive list. */
  276. static void recv_list_add(struct bat_priv *bat_priv,
  277. struct list_head *recv_list, char *mac)
  278. {
  279. struct recvlist_node *entry;
  280. entry = kmalloc(sizeof(struct recvlist_node), GFP_ATOMIC);
  281. if (!entry)
  282. return;
  283. memcpy(entry->mac, mac, ETH_ALEN);
  284. spin_lock_bh(&bat_priv->vis_list_lock);
  285. list_add_tail(&entry->list, recv_list);
  286. spin_unlock_bh(&bat_priv->vis_list_lock);
  287. }
  288. /* returns 1 if this mac is in the recv_list */
  289. static int recv_list_is_in(struct bat_priv *bat_priv,
  290. struct list_head *recv_list, char *mac)
  291. {
  292. struct recvlist_node *entry;
  293. spin_lock_bh(&bat_priv->vis_list_lock);
  294. list_for_each_entry(entry, recv_list, list) {
  295. if (memcmp(entry->mac, mac, ETH_ALEN) == 0) {
  296. spin_unlock_bh(&bat_priv->vis_list_lock);
  297. return 1;
  298. }
  299. }
  300. spin_unlock_bh(&bat_priv->vis_list_lock);
  301. return 0;
  302. }
  303. /* try to add the packet to the vis_hash. return NULL if invalid (e.g. too old,
  304. * broken.. ). vis hash must be locked outside. is_new is set when the packet
  305. * is newer than old entries in the hash. */
  306. static struct vis_info *add_packet(struct bat_priv *bat_priv,
  307. struct vis_packet *vis_packet,
  308. int vis_info_len, int *is_new,
  309. int make_broadcast)
  310. {
  311. struct vis_info *info, *old_info;
  312. struct vis_packet *search_packet, *old_packet;
  313. struct vis_info search_elem;
  314. struct vis_packet *packet;
  315. int hash_added;
  316. *is_new = 0;
  317. /* sanity check */
  318. if (!bat_priv->vis_hash)
  319. return NULL;
  320. /* see if the packet is already in vis_hash */
  321. search_elem.skb_packet = dev_alloc_skb(sizeof(struct vis_packet));
  322. if (!search_elem.skb_packet)
  323. return NULL;
  324. search_packet = (struct vis_packet *)skb_put(search_elem.skb_packet,
  325. sizeof(struct vis_packet));
  326. memcpy(search_packet->vis_orig, vis_packet->vis_orig, ETH_ALEN);
  327. old_info = hash_find(bat_priv->vis_hash, vis_info_cmp, vis_info_choose,
  328. &search_elem);
  329. kfree_skb(search_elem.skb_packet);
  330. if (old_info) {
  331. old_packet = (struct vis_packet *)old_info->skb_packet->data;
  332. if (!seq_after(ntohl(vis_packet->seqno),
  333. ntohl(old_packet->seqno))) {
  334. if (old_packet->seqno == vis_packet->seqno) {
  335. recv_list_add(bat_priv, &old_info->recv_list,
  336. vis_packet->sender_orig);
  337. return old_info;
  338. } else {
  339. /* newer packet is already in hash. */
  340. return NULL;
  341. }
  342. }
  343. /* remove old entry */
  344. hash_remove(bat_priv->vis_hash, vis_info_cmp, vis_info_choose,
  345. old_info);
  346. send_list_del(old_info);
  347. kref_put(&old_info->refcount, free_info);
  348. }
  349. info = kmalloc(sizeof(struct vis_info), GFP_ATOMIC);
  350. if (!info)
  351. return NULL;
  352. info->skb_packet = dev_alloc_skb(sizeof(struct vis_packet) +
  353. vis_info_len + sizeof(struct ethhdr));
  354. if (!info->skb_packet) {
  355. kfree(info);
  356. return NULL;
  357. }
  358. skb_reserve(info->skb_packet, sizeof(struct ethhdr));
  359. packet = (struct vis_packet *)skb_put(info->skb_packet,
  360. sizeof(struct vis_packet) +
  361. vis_info_len);
  362. kref_init(&info->refcount);
  363. INIT_LIST_HEAD(&info->send_list);
  364. INIT_LIST_HEAD(&info->recv_list);
  365. info->first_seen = jiffies;
  366. info->bat_priv = bat_priv;
  367. memcpy(packet, vis_packet, sizeof(struct vis_packet) + vis_info_len);
  368. /* initialize and add new packet. */
  369. *is_new = 1;
  370. /* Make it a broadcast packet, if required */
  371. if (make_broadcast)
  372. memcpy(packet->target_orig, broadcast_addr, ETH_ALEN);
  373. /* repair if entries is longer than packet. */
  374. if (packet->entries * sizeof(struct vis_info_entry) > vis_info_len)
  375. packet->entries = vis_info_len / sizeof(struct vis_info_entry);
  376. recv_list_add(bat_priv, &info->recv_list, packet->sender_orig);
  377. /* try to add it */
  378. hash_added = hash_add(bat_priv->vis_hash, vis_info_cmp, vis_info_choose,
  379. info);
  380. if (hash_added < 0) {
  381. /* did not work (for some reason) */
  382. kref_put(&old_info->refcount, free_info);
  383. info = NULL;
  384. }
  385. return info;
  386. }
  387. /* handle the server sync packet, forward if needed. */
  388. void receive_server_sync_packet(struct bat_priv *bat_priv,
  389. struct vis_packet *vis_packet,
  390. int vis_info_len)
  391. {
  392. struct vis_info *info;
  393. int is_new, make_broadcast;
  394. int vis_server = atomic_read(&bat_priv->vis_mode);
  395. make_broadcast = (vis_server == VIS_TYPE_SERVER_SYNC);
  396. spin_lock_bh(&bat_priv->vis_hash_lock);
  397. info = add_packet(bat_priv, vis_packet, vis_info_len,
  398. &is_new, make_broadcast);
  399. if (!info)
  400. goto end;
  401. /* only if we are server ourselves and packet is newer than the one in
  402. * hash.*/
  403. if (vis_server == VIS_TYPE_SERVER_SYNC && is_new)
  404. send_list_add(bat_priv, info);
  405. end:
  406. spin_unlock_bh(&bat_priv->vis_hash_lock);
  407. }
  408. /* handle an incoming client update packet and schedule forward if needed. */
  409. void receive_client_update_packet(struct bat_priv *bat_priv,
  410. struct vis_packet *vis_packet,
  411. int vis_info_len)
  412. {
  413. struct vis_info *info;
  414. struct vis_packet *packet;
  415. int is_new;
  416. int vis_server = atomic_read(&bat_priv->vis_mode);
  417. int are_target = 0;
  418. /* clients shall not broadcast. */
  419. if (is_broadcast_ether_addr(vis_packet->target_orig))
  420. return;
  421. /* Are we the target for this VIS packet? */
  422. if (vis_server == VIS_TYPE_SERVER_SYNC &&
  423. is_my_mac(vis_packet->target_orig))
  424. are_target = 1;
  425. spin_lock_bh(&bat_priv->vis_hash_lock);
  426. info = add_packet(bat_priv, vis_packet, vis_info_len,
  427. &is_new, are_target);
  428. if (!info)
  429. goto end;
  430. /* note that outdated packets will be dropped at this point. */
  431. packet = (struct vis_packet *)info->skb_packet->data;
  432. /* send only if we're the target server or ... */
  433. if (are_target && is_new) {
  434. packet->vis_type = VIS_TYPE_SERVER_SYNC; /* upgrade! */
  435. send_list_add(bat_priv, info);
  436. /* ... we're not the recipient (and thus need to forward). */
  437. } else if (!is_my_mac(packet->target_orig)) {
  438. send_list_add(bat_priv, info);
  439. }
  440. end:
  441. spin_unlock_bh(&bat_priv->vis_hash_lock);
  442. }
  443. /* Walk the originators and find the VIS server with the best tq. Set the packet
  444. * address to its address and return the best_tq.
  445. *
  446. * Must be called with the originator hash locked */
  447. static int find_best_vis_server(struct bat_priv *bat_priv,
  448. struct vis_info *info)
  449. {
  450. struct hashtable_t *hash = bat_priv->orig_hash;
  451. struct hlist_node *walk;
  452. struct hlist_head *head;
  453. struct element_t *bucket;
  454. struct orig_node *orig_node;
  455. struct vis_packet *packet;
  456. int best_tq = -1, i;
  457. packet = (struct vis_packet *)info->skb_packet->data;
  458. for (i = 0; i < hash->size; i++) {
  459. head = &hash->table[i];
  460. hlist_for_each_entry(bucket, walk, head, hlist) {
  461. orig_node = bucket->data;
  462. if ((orig_node) && (orig_node->router) &&
  463. (orig_node->flags & VIS_SERVER) &&
  464. (orig_node->router->tq_avg > best_tq)) {
  465. best_tq = orig_node->router->tq_avg;
  466. memcpy(packet->target_orig, orig_node->orig,
  467. ETH_ALEN);
  468. }
  469. }
  470. }
  471. return best_tq;
  472. }
  473. /* Return true if the vis packet is full. */
  474. static bool vis_packet_full(struct vis_info *info)
  475. {
  476. struct vis_packet *packet;
  477. packet = (struct vis_packet *)info->skb_packet->data;
  478. if (MAX_VIS_PACKET_SIZE / sizeof(struct vis_info_entry)
  479. < packet->entries + 1)
  480. return true;
  481. return false;
  482. }
  483. /* generates a packet of own vis data,
  484. * returns 0 on success, -1 if no packet could be generated */
  485. static int generate_vis_packet(struct bat_priv *bat_priv)
  486. {
  487. struct hashtable_t *hash = bat_priv->orig_hash;
  488. struct hlist_node *walk;
  489. struct hlist_head *head;
  490. struct element_t *bucket;
  491. struct orig_node *orig_node;
  492. struct neigh_node *neigh_node;
  493. struct vis_info *info = (struct vis_info *)bat_priv->my_vis_info;
  494. struct vis_packet *packet = (struct vis_packet *)info->skb_packet->data;
  495. struct vis_info_entry *entry;
  496. struct hna_local_entry *hna_local_entry;
  497. int best_tq = -1, i;
  498. info->first_seen = jiffies;
  499. packet->vis_type = atomic_read(&bat_priv->vis_mode);
  500. spin_lock_bh(&bat_priv->orig_hash_lock);
  501. memcpy(packet->target_orig, broadcast_addr, ETH_ALEN);
  502. packet->ttl = TTL;
  503. packet->seqno = htonl(ntohl(packet->seqno) + 1);
  504. packet->entries = 0;
  505. skb_trim(info->skb_packet, sizeof(struct vis_packet));
  506. if (packet->vis_type == VIS_TYPE_CLIENT_UPDATE) {
  507. best_tq = find_best_vis_server(bat_priv, info);
  508. if (best_tq < 0) {
  509. spin_unlock_bh(&bat_priv->orig_hash_lock);
  510. return -1;
  511. }
  512. }
  513. for (i = 0; i < hash->size; i++) {
  514. head = &hash->table[i];
  515. hlist_for_each_entry(bucket, walk, head, hlist) {
  516. orig_node = bucket->data;
  517. neigh_node = orig_node->router;
  518. if (!neigh_node)
  519. continue;
  520. if (!compare_orig(neigh_node->addr, orig_node->orig))
  521. continue;
  522. if (neigh_node->if_incoming->if_status != IF_ACTIVE)
  523. continue;
  524. if (neigh_node->tq_avg < 1)
  525. continue;
  526. /* fill one entry into buffer. */
  527. entry = (struct vis_info_entry *)
  528. skb_put(info->skb_packet, sizeof(*entry));
  529. memcpy(entry->src,
  530. neigh_node->if_incoming->net_dev->dev_addr,
  531. ETH_ALEN);
  532. memcpy(entry->dest, orig_node->orig, ETH_ALEN);
  533. entry->quality = neigh_node->tq_avg;
  534. packet->entries++;
  535. if (vis_packet_full(info)) {
  536. spin_unlock_bh(&bat_priv->orig_hash_lock);
  537. return 0;
  538. }
  539. }
  540. }
  541. spin_unlock_bh(&bat_priv->orig_hash_lock);
  542. hash = bat_priv->hna_local_hash;
  543. spin_lock_bh(&bat_priv->hna_lhash_lock);
  544. for (i = 0; i < hash->size; i++) {
  545. head = &hash->table[i];
  546. hlist_for_each_entry(bucket, walk, head, hlist) {
  547. hna_local_entry = bucket->data;
  548. entry = (struct vis_info_entry *)
  549. skb_put(info->skb_packet,
  550. sizeof(*entry));
  551. memset(entry->src, 0, ETH_ALEN);
  552. memcpy(entry->dest, hna_local_entry->addr, ETH_ALEN);
  553. entry->quality = 0; /* 0 means HNA */
  554. packet->entries++;
  555. if (vis_packet_full(info)) {
  556. spin_unlock_bh(&bat_priv->hna_lhash_lock);
  557. return 0;
  558. }
  559. }
  560. }
  561. spin_unlock_bh(&bat_priv->hna_lhash_lock);
  562. return 0;
  563. }
  564. /* free old vis packets. Must be called with this vis_hash_lock
  565. * held */
  566. static void purge_vis_packets(struct bat_priv *bat_priv)
  567. {
  568. int i;
  569. struct hashtable_t *hash = bat_priv->vis_hash;
  570. struct hlist_node *walk, *safe;
  571. struct hlist_head *head;
  572. struct element_t *bucket;
  573. struct vis_info *info;
  574. for (i = 0; i < hash->size; i++) {
  575. head = &hash->table[i];
  576. hlist_for_each_entry_safe(bucket, walk, safe, head, hlist) {
  577. info = bucket->data;
  578. /* never purge own data. */
  579. if (info == bat_priv->my_vis_info)
  580. continue;
  581. if (time_after(jiffies,
  582. info->first_seen + VIS_TIMEOUT * HZ)) {
  583. hlist_del(walk);
  584. kfree(bucket);
  585. send_list_del(info);
  586. kref_put(&info->refcount, free_info);
  587. }
  588. }
  589. }
  590. }
  591. static void broadcast_vis_packet(struct bat_priv *bat_priv,
  592. struct vis_info *info)
  593. {
  594. struct hashtable_t *hash = bat_priv->orig_hash;
  595. struct hlist_node *walk;
  596. struct hlist_head *head;
  597. struct element_t *bucket;
  598. struct orig_node *orig_node;
  599. struct vis_packet *packet;
  600. struct sk_buff *skb;
  601. struct batman_if *batman_if;
  602. uint8_t dstaddr[ETH_ALEN];
  603. int i;
  604. spin_lock_bh(&bat_priv->orig_hash_lock);
  605. packet = (struct vis_packet *)info->skb_packet->data;
  606. /* send to all routers in range. */
  607. for (i = 0; i < hash->size; i++) {
  608. head = &hash->table[i];
  609. hlist_for_each_entry(bucket, walk, head, hlist) {
  610. orig_node = bucket->data;
  611. /* if it's a vis server and reachable, send it. */
  612. if ((!orig_node) || (!orig_node->router))
  613. continue;
  614. if (!(orig_node->flags & VIS_SERVER))
  615. continue;
  616. /* don't send it if we already received the packet from
  617. * this node. */
  618. if (recv_list_is_in(bat_priv, &info->recv_list,
  619. orig_node->orig))
  620. continue;
  621. memcpy(packet->target_orig, orig_node->orig, ETH_ALEN);
  622. batman_if = orig_node->router->if_incoming;
  623. memcpy(dstaddr, orig_node->router->addr, ETH_ALEN);
  624. spin_unlock_bh(&bat_priv->orig_hash_lock);
  625. skb = skb_clone(info->skb_packet, GFP_ATOMIC);
  626. if (skb)
  627. send_skb_packet(skb, batman_if, dstaddr);
  628. spin_lock_bh(&bat_priv->orig_hash_lock);
  629. }
  630. }
  631. spin_unlock_bh(&bat_priv->orig_hash_lock);
  632. }
  633. static void unicast_vis_packet(struct bat_priv *bat_priv,
  634. struct vis_info *info)
  635. {
  636. struct orig_node *orig_node;
  637. struct sk_buff *skb;
  638. struct vis_packet *packet;
  639. struct batman_if *batman_if;
  640. uint8_t dstaddr[ETH_ALEN];
  641. spin_lock_bh(&bat_priv->orig_hash_lock);
  642. packet = (struct vis_packet *)info->skb_packet->data;
  643. orig_node = ((struct orig_node *)hash_find(bat_priv->orig_hash,
  644. compare_orig, choose_orig,
  645. packet->target_orig));
  646. if ((!orig_node) || (!orig_node->router))
  647. goto out;
  648. /* don't lock while sending the packets ... we therefore
  649. * copy the required data before sending */
  650. batman_if = orig_node->router->if_incoming;
  651. memcpy(dstaddr, orig_node->router->addr, ETH_ALEN);
  652. spin_unlock_bh(&bat_priv->orig_hash_lock);
  653. skb = skb_clone(info->skb_packet, GFP_ATOMIC);
  654. if (skb)
  655. send_skb_packet(skb, batman_if, dstaddr);
  656. return;
  657. out:
  658. spin_unlock_bh(&bat_priv->orig_hash_lock);
  659. }
  660. /* only send one vis packet. called from send_vis_packets() */
  661. static void send_vis_packet(struct bat_priv *bat_priv, struct vis_info *info)
  662. {
  663. struct vis_packet *packet;
  664. packet = (struct vis_packet *)info->skb_packet->data;
  665. if (packet->ttl < 2) {
  666. pr_debug("Error - can't send vis packet: ttl exceeded\n");
  667. return;
  668. }
  669. memcpy(packet->sender_orig, bat_priv->primary_if->net_dev->dev_addr,
  670. ETH_ALEN);
  671. packet->ttl--;
  672. if (is_broadcast_ether_addr(packet->target_orig))
  673. broadcast_vis_packet(bat_priv, info);
  674. else
  675. unicast_vis_packet(bat_priv, info);
  676. packet->ttl++; /* restore TTL */
  677. }
  678. /* called from timer; send (and maybe generate) vis packet. */
  679. static void send_vis_packets(struct work_struct *work)
  680. {
  681. struct delayed_work *delayed_work =
  682. container_of(work, struct delayed_work, work);
  683. struct bat_priv *bat_priv =
  684. container_of(delayed_work, struct bat_priv, vis_work);
  685. struct vis_info *info, *temp;
  686. spin_lock_bh(&bat_priv->vis_hash_lock);
  687. purge_vis_packets(bat_priv);
  688. if (generate_vis_packet(bat_priv) == 0) {
  689. /* schedule if generation was successful */
  690. send_list_add(bat_priv, bat_priv->my_vis_info);
  691. }
  692. list_for_each_entry_safe(info, temp, &bat_priv->vis_send_list,
  693. send_list) {
  694. kref_get(&info->refcount);
  695. spin_unlock_bh(&bat_priv->vis_hash_lock);
  696. if (bat_priv->primary_if)
  697. send_vis_packet(bat_priv, info);
  698. spin_lock_bh(&bat_priv->vis_hash_lock);
  699. send_list_del(info);
  700. kref_put(&info->refcount, free_info);
  701. }
  702. spin_unlock_bh(&bat_priv->vis_hash_lock);
  703. start_vis_timer(bat_priv);
  704. }
  705. /* init the vis server. this may only be called when if_list is already
  706. * initialized (e.g. bat0 is initialized, interfaces have been added) */
  707. int vis_init(struct bat_priv *bat_priv)
  708. {
  709. struct vis_packet *packet;
  710. int hash_added;
  711. if (bat_priv->vis_hash)
  712. return 1;
  713. spin_lock_bh(&bat_priv->vis_hash_lock);
  714. bat_priv->vis_hash = hash_new(256);
  715. if (!bat_priv->vis_hash) {
  716. pr_err("Can't initialize vis_hash\n");
  717. goto err;
  718. }
  719. bat_priv->my_vis_info = kmalloc(MAX_VIS_PACKET_SIZE, GFP_ATOMIC);
  720. if (!bat_priv->my_vis_info) {
  721. pr_err("Can't initialize vis packet\n");
  722. goto err;
  723. }
  724. bat_priv->my_vis_info->skb_packet = dev_alloc_skb(
  725. sizeof(struct vis_packet) +
  726. MAX_VIS_PACKET_SIZE +
  727. sizeof(struct ethhdr));
  728. if (!bat_priv->my_vis_info->skb_packet)
  729. goto free_info;
  730. skb_reserve(bat_priv->my_vis_info->skb_packet, sizeof(struct ethhdr));
  731. packet = (struct vis_packet *)skb_put(
  732. bat_priv->my_vis_info->skb_packet,
  733. sizeof(struct vis_packet));
  734. /* prefill the vis info */
  735. bat_priv->my_vis_info->first_seen = jiffies -
  736. msecs_to_jiffies(VIS_INTERVAL);
  737. INIT_LIST_HEAD(&bat_priv->my_vis_info->recv_list);
  738. INIT_LIST_HEAD(&bat_priv->my_vis_info->send_list);
  739. kref_init(&bat_priv->my_vis_info->refcount);
  740. bat_priv->my_vis_info->bat_priv = bat_priv;
  741. packet->version = COMPAT_VERSION;
  742. packet->packet_type = BAT_VIS;
  743. packet->ttl = TTL;
  744. packet->seqno = 0;
  745. packet->entries = 0;
  746. INIT_LIST_HEAD(&bat_priv->vis_send_list);
  747. hash_added = hash_add(bat_priv->vis_hash, vis_info_cmp, vis_info_choose,
  748. bat_priv->my_vis_info);
  749. if (hash_added < 0) {
  750. pr_err("Can't add own vis packet into hash\n");
  751. /* not in hash, need to remove it manually. */
  752. kref_put(&bat_priv->my_vis_info->refcount, free_info);
  753. goto err;
  754. }
  755. spin_unlock_bh(&bat_priv->vis_hash_lock);
  756. start_vis_timer(bat_priv);
  757. return 1;
  758. free_info:
  759. kfree(bat_priv->my_vis_info);
  760. bat_priv->my_vis_info = NULL;
  761. err:
  762. spin_unlock_bh(&bat_priv->vis_hash_lock);
  763. vis_quit(bat_priv);
  764. return 0;
  765. }
  766. /* Decrease the reference count on a hash item info */
  767. static void free_info_ref(void *data, void *arg)
  768. {
  769. struct vis_info *info = data;
  770. send_list_del(info);
  771. kref_put(&info->refcount, free_info);
  772. }
  773. /* shutdown vis-server */
  774. void vis_quit(struct bat_priv *bat_priv)
  775. {
  776. if (!bat_priv->vis_hash)
  777. return;
  778. cancel_delayed_work_sync(&bat_priv->vis_work);
  779. spin_lock_bh(&bat_priv->vis_hash_lock);
  780. /* properly remove, kill timers ... */
  781. hash_delete(bat_priv->vis_hash, free_info_ref, NULL);
  782. bat_priv->vis_hash = NULL;
  783. bat_priv->my_vis_info = NULL;
  784. spin_unlock_bh(&bat_priv->vis_hash_lock);
  785. }
  786. /* schedule packets for (re)transmission */
  787. static void start_vis_timer(struct bat_priv *bat_priv)
  788. {
  789. INIT_DELAYED_WORK(&bat_priv->vis_work, send_vis_packets);
  790. queue_delayed_work(bat_event_workqueue, &bat_priv->vis_work,
  791. msecs_to_jiffies(VIS_INTERVAL));
  792. }