translation-table.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547
  1. /*
  2. * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
  3. *
  4. * Marek Lindner, Simon Wunderlich
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of version 2 of the GNU General Public
  8. * License as published by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope that it will be useful, but
  11. * WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  13. * General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program; if not, write to the Free Software
  17. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
  18. * 02110-1301, USA
  19. *
  20. */
  21. #include "main.h"
  22. #include "translation-table.h"
  23. #include "soft-interface.h"
  24. #include "hash.h"
  25. #include "originator.h"
  26. static void hna_local_purge(struct work_struct *work);
  27. static void _hna_global_del_orig(struct bat_priv *bat_priv,
  28. struct hna_global_entry *hna_global_entry,
  29. char *message);
  30. static void hna_local_start_timer(struct bat_priv *bat_priv)
  31. {
  32. INIT_DELAYED_WORK(&bat_priv->hna_work, hna_local_purge);
  33. queue_delayed_work(bat_event_workqueue, &bat_priv->hna_work, 10 * HZ);
  34. }
  35. int hna_local_init(struct bat_priv *bat_priv)
  36. {
  37. if (bat_priv->hna_local_hash)
  38. return 1;
  39. bat_priv->hna_local_hash = hash_new(1024);
  40. if (!bat_priv->hna_local_hash)
  41. return 0;
  42. atomic_set(&bat_priv->hna_local_changed, 0);
  43. hna_local_start_timer(bat_priv);
  44. return 1;
  45. }
  46. void hna_local_add(struct net_device *soft_iface, uint8_t *addr)
  47. {
  48. struct bat_priv *bat_priv = netdev_priv(soft_iface);
  49. struct hna_local_entry *hna_local_entry;
  50. struct hna_global_entry *hna_global_entry;
  51. int required_bytes;
  52. spin_lock_bh(&bat_priv->hna_lhash_lock);
  53. rcu_read_lock();
  54. hna_local_entry =
  55. ((struct hna_local_entry *)hash_find(bat_priv->hna_local_hash,
  56. compare_orig, choose_orig,
  57. addr));
  58. rcu_read_unlock();
  59. spin_unlock_bh(&bat_priv->hna_lhash_lock);
  60. if (hna_local_entry) {
  61. hna_local_entry->last_seen = jiffies;
  62. return;
  63. }
  64. /* only announce as many hosts as possible in the batman-packet and
  65. space in batman_packet->num_hna That also should give a limit to
  66. MAC-flooding. */
  67. required_bytes = (bat_priv->num_local_hna + 1) * ETH_ALEN;
  68. required_bytes += BAT_PACKET_LEN;
  69. if ((required_bytes > ETH_DATA_LEN) ||
  70. (atomic_read(&bat_priv->aggregated_ogms) &&
  71. required_bytes > MAX_AGGREGATION_BYTES) ||
  72. (bat_priv->num_local_hna + 1 > 255)) {
  73. bat_dbg(DBG_ROUTES, bat_priv,
  74. "Can't add new local hna entry (%pM): "
  75. "number of local hna entries exceeds packet size\n",
  76. addr);
  77. return;
  78. }
  79. bat_dbg(DBG_ROUTES, bat_priv,
  80. "Creating new local hna entry: %pM\n", addr);
  81. hna_local_entry = kmalloc(sizeof(struct hna_local_entry), GFP_ATOMIC);
  82. if (!hna_local_entry)
  83. return;
  84. memcpy(hna_local_entry->addr, addr, ETH_ALEN);
  85. hna_local_entry->last_seen = jiffies;
  86. /* the batman interface mac address should never be purged */
  87. if (compare_eth(addr, soft_iface->dev_addr))
  88. hna_local_entry->never_purge = 1;
  89. else
  90. hna_local_entry->never_purge = 0;
  91. spin_lock_bh(&bat_priv->hna_lhash_lock);
  92. hash_add(bat_priv->hna_local_hash, compare_orig, choose_orig,
  93. hna_local_entry);
  94. bat_priv->num_local_hna++;
  95. atomic_set(&bat_priv->hna_local_changed, 1);
  96. spin_unlock_bh(&bat_priv->hna_lhash_lock);
  97. /* remove address from global hash if present */
  98. spin_lock_bh(&bat_priv->hna_ghash_lock);
  99. rcu_read_lock();
  100. hna_global_entry = ((struct hna_global_entry *)
  101. hash_find(bat_priv->hna_global_hash,
  102. compare_orig, choose_orig, addr));
  103. rcu_read_unlock();
  104. if (hna_global_entry)
  105. _hna_global_del_orig(bat_priv, hna_global_entry,
  106. "local hna received");
  107. spin_unlock_bh(&bat_priv->hna_ghash_lock);
  108. }
  109. int hna_local_fill_buffer(struct bat_priv *bat_priv,
  110. unsigned char *buff, int buff_len)
  111. {
  112. struct hashtable_t *hash = bat_priv->hna_local_hash;
  113. struct hna_local_entry *hna_local_entry;
  114. struct element_t *bucket;
  115. int i;
  116. struct hlist_node *walk;
  117. struct hlist_head *head;
  118. int count = 0;
  119. spin_lock_bh(&bat_priv->hna_lhash_lock);
  120. for (i = 0; i < hash->size; i++) {
  121. head = &hash->table[i];
  122. hlist_for_each_entry(bucket, walk, head, hlist) {
  123. if (buff_len < (count + 1) * ETH_ALEN)
  124. break;
  125. hna_local_entry = bucket->data;
  126. memcpy(buff + (count * ETH_ALEN), hna_local_entry->addr,
  127. ETH_ALEN);
  128. count++;
  129. }
  130. }
  131. /* if we did not get all new local hnas see you next time ;-) */
  132. if (count == bat_priv->num_local_hna)
  133. atomic_set(&bat_priv->hna_local_changed, 0);
  134. spin_unlock_bh(&bat_priv->hna_lhash_lock);
  135. return count;
  136. }
  137. int hna_local_seq_print_text(struct seq_file *seq, void *offset)
  138. {
  139. struct net_device *net_dev = (struct net_device *)seq->private;
  140. struct bat_priv *bat_priv = netdev_priv(net_dev);
  141. struct hashtable_t *hash = bat_priv->hna_local_hash;
  142. struct hna_local_entry *hna_local_entry;
  143. int i;
  144. struct hlist_node *walk;
  145. struct hlist_head *head;
  146. struct element_t *bucket;
  147. size_t buf_size, pos;
  148. char *buff;
  149. if (!bat_priv->primary_if) {
  150. return seq_printf(seq, "BATMAN mesh %s disabled - "
  151. "please specify interfaces to enable it\n",
  152. net_dev->name);
  153. }
  154. seq_printf(seq, "Locally retrieved addresses (from %s) "
  155. "announced via HNA:\n",
  156. net_dev->name);
  157. spin_lock_bh(&bat_priv->hna_lhash_lock);
  158. buf_size = 1;
  159. /* Estimate length for: " * xx:xx:xx:xx:xx:xx\n" */
  160. for (i = 0; i < hash->size; i++) {
  161. head = &hash->table[i];
  162. hlist_for_each(walk, head)
  163. buf_size += 21;
  164. }
  165. buff = kmalloc(buf_size, GFP_ATOMIC);
  166. if (!buff) {
  167. spin_unlock_bh(&bat_priv->hna_lhash_lock);
  168. return -ENOMEM;
  169. }
  170. buff[0] = '\0';
  171. pos = 0;
  172. for (i = 0; i < hash->size; i++) {
  173. head = &hash->table[i];
  174. hlist_for_each_entry(bucket, walk, head, hlist) {
  175. hna_local_entry = bucket->data;
  176. pos += snprintf(buff + pos, 22, " * %pM\n",
  177. hna_local_entry->addr);
  178. }
  179. }
  180. spin_unlock_bh(&bat_priv->hna_lhash_lock);
  181. seq_printf(seq, "%s", buff);
  182. kfree(buff);
  183. return 0;
  184. }
  185. static void _hna_local_del(void *data, void *arg)
  186. {
  187. struct bat_priv *bat_priv = (struct bat_priv *)arg;
  188. kfree(data);
  189. bat_priv->num_local_hna--;
  190. atomic_set(&bat_priv->hna_local_changed, 1);
  191. }
  192. static void hna_local_del(struct bat_priv *bat_priv,
  193. struct hna_local_entry *hna_local_entry,
  194. char *message)
  195. {
  196. bat_dbg(DBG_ROUTES, bat_priv, "Deleting local hna entry (%pM): %s\n",
  197. hna_local_entry->addr, message);
  198. hash_remove(bat_priv->hna_local_hash, compare_orig, choose_orig,
  199. hna_local_entry->addr);
  200. _hna_local_del(hna_local_entry, bat_priv);
  201. }
  202. void hna_local_remove(struct bat_priv *bat_priv,
  203. uint8_t *addr, char *message)
  204. {
  205. struct hna_local_entry *hna_local_entry;
  206. spin_lock_bh(&bat_priv->hna_lhash_lock);
  207. rcu_read_lock();
  208. hna_local_entry = (struct hna_local_entry *)
  209. hash_find(bat_priv->hna_local_hash, compare_orig, choose_orig,
  210. addr);
  211. rcu_read_unlock();
  212. if (hna_local_entry)
  213. hna_local_del(bat_priv, hna_local_entry, message);
  214. spin_unlock_bh(&bat_priv->hna_lhash_lock);
  215. }
  216. static void hna_local_purge(struct work_struct *work)
  217. {
  218. struct delayed_work *delayed_work =
  219. container_of(work, struct delayed_work, work);
  220. struct bat_priv *bat_priv =
  221. container_of(delayed_work, struct bat_priv, hna_work);
  222. struct hashtable_t *hash = bat_priv->hna_local_hash;
  223. struct hna_local_entry *hna_local_entry;
  224. int i;
  225. struct hlist_node *walk, *safe;
  226. struct hlist_head *head;
  227. struct element_t *bucket;
  228. unsigned long timeout;
  229. spin_lock_bh(&bat_priv->hna_lhash_lock);
  230. for (i = 0; i < hash->size; i++) {
  231. head = &hash->table[i];
  232. hlist_for_each_entry_safe(bucket, walk, safe, head, hlist) {
  233. hna_local_entry = bucket->data;
  234. timeout = hna_local_entry->last_seen;
  235. timeout += LOCAL_HNA_TIMEOUT * HZ;
  236. if ((!hna_local_entry->never_purge) &&
  237. time_after(jiffies, timeout))
  238. hna_local_del(bat_priv, hna_local_entry,
  239. "address timed out");
  240. }
  241. }
  242. spin_unlock_bh(&bat_priv->hna_lhash_lock);
  243. hna_local_start_timer(bat_priv);
  244. }
  245. void hna_local_free(struct bat_priv *bat_priv)
  246. {
  247. if (!bat_priv->hna_local_hash)
  248. return;
  249. cancel_delayed_work_sync(&bat_priv->hna_work);
  250. hash_delete(bat_priv->hna_local_hash, _hna_local_del, bat_priv);
  251. bat_priv->hna_local_hash = NULL;
  252. }
  253. int hna_global_init(struct bat_priv *bat_priv)
  254. {
  255. if (bat_priv->hna_global_hash)
  256. return 1;
  257. bat_priv->hna_global_hash = hash_new(1024);
  258. if (!bat_priv->hna_global_hash)
  259. return 0;
  260. return 1;
  261. }
  262. void hna_global_add_orig(struct bat_priv *bat_priv,
  263. struct orig_node *orig_node,
  264. unsigned char *hna_buff, int hna_buff_len)
  265. {
  266. struct hna_global_entry *hna_global_entry;
  267. struct hna_local_entry *hna_local_entry;
  268. int hna_buff_count = 0;
  269. unsigned char *hna_ptr;
  270. while ((hna_buff_count + 1) * ETH_ALEN <= hna_buff_len) {
  271. spin_lock_bh(&bat_priv->hna_ghash_lock);
  272. hna_ptr = hna_buff + (hna_buff_count * ETH_ALEN);
  273. rcu_read_lock();
  274. hna_global_entry = (struct hna_global_entry *)
  275. hash_find(bat_priv->hna_global_hash, compare_orig,
  276. choose_orig, hna_ptr);
  277. rcu_read_unlock();
  278. if (!hna_global_entry) {
  279. spin_unlock_bh(&bat_priv->hna_ghash_lock);
  280. hna_global_entry =
  281. kmalloc(sizeof(struct hna_global_entry),
  282. GFP_ATOMIC);
  283. if (!hna_global_entry)
  284. break;
  285. memcpy(hna_global_entry->addr, hna_ptr, ETH_ALEN);
  286. bat_dbg(DBG_ROUTES, bat_priv,
  287. "Creating new global hna entry: "
  288. "%pM (via %pM)\n",
  289. hna_global_entry->addr, orig_node->orig);
  290. spin_lock_bh(&bat_priv->hna_ghash_lock);
  291. hash_add(bat_priv->hna_global_hash, compare_orig,
  292. choose_orig, hna_global_entry);
  293. }
  294. hna_global_entry->orig_node = orig_node;
  295. spin_unlock_bh(&bat_priv->hna_ghash_lock);
  296. /* remove address from local hash if present */
  297. spin_lock_bh(&bat_priv->hna_lhash_lock);
  298. hna_ptr = hna_buff + (hna_buff_count * ETH_ALEN);
  299. rcu_read_lock();
  300. hna_local_entry = (struct hna_local_entry *)
  301. hash_find(bat_priv->hna_local_hash, compare_orig,
  302. choose_orig, hna_ptr);
  303. rcu_read_unlock();
  304. if (hna_local_entry)
  305. hna_local_del(bat_priv, hna_local_entry,
  306. "global hna received");
  307. spin_unlock_bh(&bat_priv->hna_lhash_lock);
  308. hna_buff_count++;
  309. }
  310. /* initialize, and overwrite if malloc succeeds */
  311. orig_node->hna_buff = NULL;
  312. orig_node->hna_buff_len = 0;
  313. if (hna_buff_len > 0) {
  314. orig_node->hna_buff = kmalloc(hna_buff_len, GFP_ATOMIC);
  315. if (orig_node->hna_buff) {
  316. memcpy(orig_node->hna_buff, hna_buff, hna_buff_len);
  317. orig_node->hna_buff_len = hna_buff_len;
  318. }
  319. }
  320. }
  321. int hna_global_seq_print_text(struct seq_file *seq, void *offset)
  322. {
  323. struct net_device *net_dev = (struct net_device *)seq->private;
  324. struct bat_priv *bat_priv = netdev_priv(net_dev);
  325. struct hashtable_t *hash = bat_priv->hna_global_hash;
  326. struct hna_global_entry *hna_global_entry;
  327. int i;
  328. struct hlist_node *walk;
  329. struct hlist_head *head;
  330. struct element_t *bucket;
  331. size_t buf_size, pos;
  332. char *buff;
  333. if (!bat_priv->primary_if) {
  334. return seq_printf(seq, "BATMAN mesh %s disabled - "
  335. "please specify interfaces to enable it\n",
  336. net_dev->name);
  337. }
  338. seq_printf(seq, "Globally announced HNAs received via the mesh %s\n",
  339. net_dev->name);
  340. spin_lock_bh(&bat_priv->hna_ghash_lock);
  341. buf_size = 1;
  342. /* Estimate length for: " * xx:xx:xx:xx:xx:xx via xx:xx:xx:xx:xx:xx\n"*/
  343. for (i = 0; i < hash->size; i++) {
  344. head = &hash->table[i];
  345. hlist_for_each(walk, head)
  346. buf_size += 43;
  347. }
  348. buff = kmalloc(buf_size, GFP_ATOMIC);
  349. if (!buff) {
  350. spin_unlock_bh(&bat_priv->hna_ghash_lock);
  351. return -ENOMEM;
  352. }
  353. buff[0] = '\0';
  354. pos = 0;
  355. for (i = 0; i < hash->size; i++) {
  356. head = &hash->table[i];
  357. hlist_for_each_entry(bucket, walk, head, hlist) {
  358. hna_global_entry = bucket->data;
  359. pos += snprintf(buff + pos, 44,
  360. " * %pM via %pM\n",
  361. hna_global_entry->addr,
  362. hna_global_entry->orig_node->orig);
  363. }
  364. }
  365. spin_unlock_bh(&bat_priv->hna_ghash_lock);
  366. seq_printf(seq, "%s", buff);
  367. kfree(buff);
  368. return 0;
  369. }
  370. static void _hna_global_del_orig(struct bat_priv *bat_priv,
  371. struct hna_global_entry *hna_global_entry,
  372. char *message)
  373. {
  374. bat_dbg(DBG_ROUTES, bat_priv,
  375. "Deleting global hna entry %pM (via %pM): %s\n",
  376. hna_global_entry->addr, hna_global_entry->orig_node->orig,
  377. message);
  378. hash_remove(bat_priv->hna_global_hash, compare_orig, choose_orig,
  379. hna_global_entry->addr);
  380. kfree(hna_global_entry);
  381. }
  382. void hna_global_del_orig(struct bat_priv *bat_priv,
  383. struct orig_node *orig_node, char *message)
  384. {
  385. struct hna_global_entry *hna_global_entry;
  386. int hna_buff_count = 0;
  387. unsigned char *hna_ptr;
  388. if (orig_node->hna_buff_len == 0)
  389. return;
  390. spin_lock_bh(&bat_priv->hna_ghash_lock);
  391. while ((hna_buff_count + 1) * ETH_ALEN <= orig_node->hna_buff_len) {
  392. hna_ptr = orig_node->hna_buff + (hna_buff_count * ETH_ALEN);
  393. rcu_read_lock();
  394. hna_global_entry = (struct hna_global_entry *)
  395. hash_find(bat_priv->hna_global_hash, compare_orig,
  396. choose_orig, hna_ptr);
  397. rcu_read_unlock();
  398. if ((hna_global_entry) &&
  399. (hna_global_entry->orig_node == orig_node))
  400. _hna_global_del_orig(bat_priv, hna_global_entry,
  401. message);
  402. hna_buff_count++;
  403. }
  404. spin_unlock_bh(&bat_priv->hna_ghash_lock);
  405. orig_node->hna_buff_len = 0;
  406. kfree(orig_node->hna_buff);
  407. orig_node->hna_buff = NULL;
  408. }
  409. static void hna_global_del(void *data, void *arg)
  410. {
  411. kfree(data);
  412. }
  413. void hna_global_free(struct bat_priv *bat_priv)
  414. {
  415. if (!bat_priv->hna_global_hash)
  416. return;
  417. hash_delete(bat_priv->hna_global_hash, hna_global_del, NULL);
  418. bat_priv->hna_global_hash = NULL;
  419. }
  420. struct orig_node *transtable_search(struct bat_priv *bat_priv, uint8_t *addr)
  421. {
  422. struct hna_global_entry *hna_global_entry;
  423. spin_lock_bh(&bat_priv->hna_ghash_lock);
  424. rcu_read_lock();
  425. hna_global_entry = (struct hna_global_entry *)
  426. hash_find(bat_priv->hna_global_hash,
  427. compare_orig, choose_orig, addr);
  428. rcu_read_unlock();
  429. spin_unlock_bh(&bat_priv->hna_ghash_lock);
  430. if (!hna_global_entry)
  431. return NULL;
  432. return hna_global_entry->orig_node;
  433. }