translation-table.c 45 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731
  1. /*
  2. * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
  3. *
  4. * Marek Lindner, Simon Wunderlich
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of version 2 of the GNU General Public
  8. * License as published by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope that it will be useful, but
  11. * WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  13. * General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program; if not, write to the Free Software
  17. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
  18. * 02110-1301, USA
  19. *
  20. */
  21. #include "main.h"
  22. #include "translation-table.h"
  23. #include "soft-interface.h"
  24. #include "hard-interface.h"
  25. #include "send.h"
  26. #include "hash.h"
  27. #include "originator.h"
  28. #include "routing.h"
  29. #include <linux/crc16.h>
  30. static void _tt_global_del(struct bat_priv *bat_priv,
  31. struct tt_global_entry *tt_global_entry,
  32. const char *message);
  33. static void tt_purge(struct work_struct *work);
  34. /* returns 1 if they are the same mac addr */
  35. static int compare_ltt(const struct hlist_node *node, const void *data2)
  36. {
  37. const void *data1 = container_of(node, struct tt_local_entry,
  38. hash_entry);
  39. return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0);
  40. }
  41. /* returns 1 if they are the same mac addr */
  42. static int compare_gtt(const struct hlist_node *node, const void *data2)
  43. {
  44. const void *data1 = container_of(node, struct tt_global_entry,
  45. hash_entry);
  46. return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0);
  47. }
  48. static void tt_start_timer(struct bat_priv *bat_priv)
  49. {
  50. INIT_DELAYED_WORK(&bat_priv->tt_work, tt_purge);
  51. queue_delayed_work(bat_event_workqueue, &bat_priv->tt_work,
  52. msecs_to_jiffies(5000));
  53. }
  54. static struct tt_local_entry *tt_local_hash_find(struct bat_priv *bat_priv,
  55. const void *data)
  56. {
  57. struct hashtable_t *hash = bat_priv->tt_local_hash;
  58. struct hlist_head *head;
  59. struct hlist_node *node;
  60. struct tt_local_entry *tt_local_entry, *tt_local_entry_tmp = NULL;
  61. int index;
  62. if (!hash)
  63. return NULL;
  64. index = choose_orig(data, hash->size);
  65. head = &hash->table[index];
  66. rcu_read_lock();
  67. hlist_for_each_entry_rcu(tt_local_entry, node, head, hash_entry) {
  68. if (!compare_eth(tt_local_entry, data))
  69. continue;
  70. if (!atomic_inc_not_zero(&tt_local_entry->refcount))
  71. continue;
  72. tt_local_entry_tmp = tt_local_entry;
  73. break;
  74. }
  75. rcu_read_unlock();
  76. return tt_local_entry_tmp;
  77. }
  78. static struct tt_global_entry *tt_global_hash_find(struct bat_priv *bat_priv,
  79. const void *data)
  80. {
  81. struct hashtable_t *hash = bat_priv->tt_global_hash;
  82. struct hlist_head *head;
  83. struct hlist_node *node;
  84. struct tt_global_entry *tt_global_entry;
  85. struct tt_global_entry *tt_global_entry_tmp = NULL;
  86. int index;
  87. if (!hash)
  88. return NULL;
  89. index = choose_orig(data, hash->size);
  90. head = &hash->table[index];
  91. rcu_read_lock();
  92. hlist_for_each_entry_rcu(tt_global_entry, node, head, hash_entry) {
  93. if (!compare_eth(tt_global_entry, data))
  94. continue;
  95. if (!atomic_inc_not_zero(&tt_global_entry->refcount))
  96. continue;
  97. tt_global_entry_tmp = tt_global_entry;
  98. break;
  99. }
  100. rcu_read_unlock();
  101. return tt_global_entry_tmp;
  102. }
  103. static bool is_out_of_time(unsigned long starting_time, unsigned long timeout)
  104. {
  105. unsigned long deadline;
  106. deadline = starting_time + msecs_to_jiffies(timeout);
  107. return time_after(jiffies, deadline);
  108. }
  109. static void tt_local_entry_free_ref(struct tt_local_entry *tt_local_entry)
  110. {
  111. if (atomic_dec_and_test(&tt_local_entry->refcount))
  112. kfree_rcu(tt_local_entry, rcu);
  113. }
  114. static void tt_global_entry_free_ref(struct tt_global_entry *tt_global_entry)
  115. {
  116. if (atomic_dec_and_test(&tt_global_entry->refcount))
  117. kfree_rcu(tt_global_entry, rcu);
  118. }
  119. static void tt_local_event(struct bat_priv *bat_priv, const uint8_t *addr,
  120. uint8_t flags)
  121. {
  122. struct tt_change_node *tt_change_node;
  123. tt_change_node = kmalloc(sizeof(*tt_change_node), GFP_ATOMIC);
  124. if (!tt_change_node)
  125. return;
  126. tt_change_node->change.flags = flags;
  127. memcpy(tt_change_node->change.addr, addr, ETH_ALEN);
  128. spin_lock_bh(&bat_priv->tt_changes_list_lock);
  129. /* track the change in the OGMinterval list */
  130. list_add_tail(&tt_change_node->list, &bat_priv->tt_changes_list);
  131. atomic_inc(&bat_priv->tt_local_changes);
  132. spin_unlock_bh(&bat_priv->tt_changes_list_lock);
  133. atomic_set(&bat_priv->tt_ogm_append_cnt, 0);
  134. }
  135. int tt_len(int changes_num)
  136. {
  137. return changes_num * sizeof(struct tt_change);
  138. }
  139. static int tt_local_init(struct bat_priv *bat_priv)
  140. {
  141. if (bat_priv->tt_local_hash)
  142. return 1;
  143. bat_priv->tt_local_hash = hash_new(1024);
  144. if (!bat_priv->tt_local_hash)
  145. return 0;
  146. return 1;
  147. }
  148. void tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
  149. int ifindex)
  150. {
  151. struct bat_priv *bat_priv = netdev_priv(soft_iface);
  152. struct tt_local_entry *tt_local_entry = NULL;
  153. struct tt_global_entry *tt_global_entry = NULL;
  154. tt_local_entry = tt_local_hash_find(bat_priv, addr);
  155. if (tt_local_entry) {
  156. tt_local_entry->last_seen = jiffies;
  157. goto out;
  158. }
  159. tt_local_entry = kmalloc(sizeof(*tt_local_entry), GFP_ATOMIC);
  160. if (!tt_local_entry)
  161. goto out;
  162. bat_dbg(DBG_TT, bat_priv,
  163. "Creating new local tt entry: %pM (ttvn: %d)\n", addr,
  164. (uint8_t)atomic_read(&bat_priv->ttvn));
  165. memcpy(tt_local_entry->addr, addr, ETH_ALEN);
  166. tt_local_entry->last_seen = jiffies;
  167. tt_local_entry->flags = NO_FLAGS;
  168. if (is_wifi_iface(ifindex))
  169. tt_local_entry->flags |= TT_CLIENT_WIFI;
  170. atomic_set(&tt_local_entry->refcount, 2);
  171. /* the batman interface mac address should never be purged */
  172. if (compare_eth(addr, soft_iface->dev_addr))
  173. tt_local_entry->flags |= TT_CLIENT_NOPURGE;
  174. tt_local_event(bat_priv, addr, tt_local_entry->flags);
  175. /* The local entry has to be marked as NEW to avoid to send it in
  176. * a full table response going out before the next ttvn increment
  177. * (consistency check) */
  178. tt_local_entry->flags |= TT_CLIENT_NEW;
  179. hash_add(bat_priv->tt_local_hash, compare_ltt, choose_orig,
  180. tt_local_entry, &tt_local_entry->hash_entry);
  181. /* remove address from global hash if present */
  182. tt_global_entry = tt_global_hash_find(bat_priv, addr);
  183. /* Check whether it is a roaming! */
  184. if (tt_global_entry) {
  185. /* This node is probably going to update its tt table */
  186. tt_global_entry->orig_node->tt_poss_change = true;
  187. /* The global entry has to be marked as PENDING and has to be
  188. * kept for consistency purpose */
  189. tt_global_entry->flags |= TT_CLIENT_PENDING;
  190. send_roam_adv(bat_priv, tt_global_entry->addr,
  191. tt_global_entry->orig_node);
  192. }
  193. out:
  194. if (tt_local_entry)
  195. tt_local_entry_free_ref(tt_local_entry);
  196. if (tt_global_entry)
  197. tt_global_entry_free_ref(tt_global_entry);
  198. }
  199. int tt_changes_fill_buffer(struct bat_priv *bat_priv,
  200. unsigned char *buff, int buff_len)
  201. {
  202. int count = 0, tot_changes = 0;
  203. struct tt_change_node *entry, *safe;
  204. if (buff_len > 0)
  205. tot_changes = buff_len / tt_len(1);
  206. spin_lock_bh(&bat_priv->tt_changes_list_lock);
  207. atomic_set(&bat_priv->tt_local_changes, 0);
  208. list_for_each_entry_safe(entry, safe, &bat_priv->tt_changes_list,
  209. list) {
  210. if (count < tot_changes) {
  211. memcpy(buff + tt_len(count),
  212. &entry->change, sizeof(struct tt_change));
  213. count++;
  214. }
  215. list_del(&entry->list);
  216. kfree(entry);
  217. }
  218. spin_unlock_bh(&bat_priv->tt_changes_list_lock);
  219. /* Keep the buffer for possible tt_request */
  220. spin_lock_bh(&bat_priv->tt_buff_lock);
  221. kfree(bat_priv->tt_buff);
  222. bat_priv->tt_buff_len = 0;
  223. bat_priv->tt_buff = NULL;
  224. /* We check whether this new OGM has no changes due to size
  225. * problems */
  226. if (buff_len > 0) {
  227. /**
  228. * if kmalloc() fails we will reply with the full table
  229. * instead of providing the diff
  230. */
  231. bat_priv->tt_buff = kmalloc(buff_len, GFP_ATOMIC);
  232. if (bat_priv->tt_buff) {
  233. memcpy(bat_priv->tt_buff, buff, buff_len);
  234. bat_priv->tt_buff_len = buff_len;
  235. }
  236. }
  237. spin_unlock_bh(&bat_priv->tt_buff_lock);
  238. return tot_changes;
  239. }
  240. int tt_local_seq_print_text(struct seq_file *seq, void *offset)
  241. {
  242. struct net_device *net_dev = (struct net_device *)seq->private;
  243. struct bat_priv *bat_priv = netdev_priv(net_dev);
  244. struct hashtable_t *hash = bat_priv->tt_local_hash;
  245. struct tt_local_entry *tt_local_entry;
  246. struct hard_iface *primary_if;
  247. struct hlist_node *node;
  248. struct hlist_head *head;
  249. size_t buf_size, pos;
  250. char *buff;
  251. int i, ret = 0;
  252. primary_if = primary_if_get_selected(bat_priv);
  253. if (!primary_if) {
  254. ret = seq_printf(seq, "BATMAN mesh %s disabled - "
  255. "please specify interfaces to enable it\n",
  256. net_dev->name);
  257. goto out;
  258. }
  259. if (primary_if->if_status != IF_ACTIVE) {
  260. ret = seq_printf(seq, "BATMAN mesh %s disabled - "
  261. "primary interface not active\n",
  262. net_dev->name);
  263. goto out;
  264. }
  265. seq_printf(seq, "Locally retrieved addresses (from %s) "
  266. "announced via TT (TTVN: %u):\n",
  267. net_dev->name, (uint8_t)atomic_read(&bat_priv->ttvn));
  268. buf_size = 1;
  269. /* Estimate length for: " * xx:xx:xx:xx:xx:xx\n" */
  270. for (i = 0; i < hash->size; i++) {
  271. head = &hash->table[i];
  272. rcu_read_lock();
  273. __hlist_for_each_rcu(node, head)
  274. buf_size += 21;
  275. rcu_read_unlock();
  276. }
  277. buff = kmalloc(buf_size, GFP_ATOMIC);
  278. if (!buff) {
  279. ret = -ENOMEM;
  280. goto out;
  281. }
  282. buff[0] = '\0';
  283. pos = 0;
  284. for (i = 0; i < hash->size; i++) {
  285. head = &hash->table[i];
  286. rcu_read_lock();
  287. hlist_for_each_entry_rcu(tt_local_entry, node,
  288. head, hash_entry) {
  289. pos += snprintf(buff + pos, 22, " * %pM\n",
  290. tt_local_entry->addr);
  291. }
  292. rcu_read_unlock();
  293. }
  294. seq_printf(seq, "%s", buff);
  295. kfree(buff);
  296. out:
  297. if (primary_if)
  298. hardif_free_ref(primary_if);
  299. return ret;
  300. }
  301. static void tt_local_set_pending(struct bat_priv *bat_priv,
  302. struct tt_local_entry *tt_local_entry,
  303. uint16_t flags)
  304. {
  305. tt_local_event(bat_priv, tt_local_entry->addr,
  306. tt_local_entry->flags | flags);
  307. /* The local client has to be marked as "pending to be removed" but has
  308. * to be kept in the table in order to send it in a full table
  309. * response issued before the net ttvn increment (consistency check) */
  310. tt_local_entry->flags |= TT_CLIENT_PENDING;
  311. }
  312. void tt_local_remove(struct bat_priv *bat_priv, const uint8_t *addr,
  313. const char *message, bool roaming)
  314. {
  315. struct tt_local_entry *tt_local_entry = NULL;
  316. tt_local_entry = tt_local_hash_find(bat_priv, addr);
  317. if (!tt_local_entry)
  318. goto out;
  319. tt_local_set_pending(bat_priv, tt_local_entry, TT_CLIENT_DEL |
  320. (roaming ? TT_CLIENT_ROAM : NO_FLAGS));
  321. bat_dbg(DBG_TT, bat_priv, "Local tt entry (%pM) pending to be removed: "
  322. "%s\n", tt_local_entry->addr, message);
  323. out:
  324. if (tt_local_entry)
  325. tt_local_entry_free_ref(tt_local_entry);
  326. }
  327. static void tt_local_purge(struct bat_priv *bat_priv)
  328. {
  329. struct hashtable_t *hash = bat_priv->tt_local_hash;
  330. struct tt_local_entry *tt_local_entry;
  331. struct hlist_node *node, *node_tmp;
  332. struct hlist_head *head;
  333. spinlock_t *list_lock; /* protects write access to the hash lists */
  334. int i;
  335. for (i = 0; i < hash->size; i++) {
  336. head = &hash->table[i];
  337. list_lock = &hash->list_locks[i];
  338. spin_lock_bh(list_lock);
  339. hlist_for_each_entry_safe(tt_local_entry, node, node_tmp,
  340. head, hash_entry) {
  341. if (tt_local_entry->flags & TT_CLIENT_NOPURGE)
  342. continue;
  343. /* entry already marked for deletion */
  344. if (tt_local_entry->flags & TT_CLIENT_PENDING)
  345. continue;
  346. if (!is_out_of_time(tt_local_entry->last_seen,
  347. TT_LOCAL_TIMEOUT * 1000))
  348. continue;
  349. tt_local_set_pending(bat_priv, tt_local_entry,
  350. TT_CLIENT_DEL);
  351. bat_dbg(DBG_TT, bat_priv, "Local tt entry (%pM) "
  352. "pending to be removed: timed out\n",
  353. tt_local_entry->addr);
  354. }
  355. spin_unlock_bh(list_lock);
  356. }
  357. }
  358. static void tt_local_table_free(struct bat_priv *bat_priv)
  359. {
  360. struct hashtable_t *hash;
  361. spinlock_t *list_lock; /* protects write access to the hash lists */
  362. struct tt_local_entry *tt_local_entry;
  363. struct hlist_node *node, *node_tmp;
  364. struct hlist_head *head;
  365. int i;
  366. if (!bat_priv->tt_local_hash)
  367. return;
  368. hash = bat_priv->tt_local_hash;
  369. for (i = 0; i < hash->size; i++) {
  370. head = &hash->table[i];
  371. list_lock = &hash->list_locks[i];
  372. spin_lock_bh(list_lock);
  373. hlist_for_each_entry_safe(tt_local_entry, node, node_tmp,
  374. head, hash_entry) {
  375. hlist_del_rcu(node);
  376. tt_local_entry_free_ref(tt_local_entry);
  377. }
  378. spin_unlock_bh(list_lock);
  379. }
  380. hash_destroy(hash);
  381. bat_priv->tt_local_hash = NULL;
  382. }
  383. static int tt_global_init(struct bat_priv *bat_priv)
  384. {
  385. if (bat_priv->tt_global_hash)
  386. return 1;
  387. bat_priv->tt_global_hash = hash_new(1024);
  388. if (!bat_priv->tt_global_hash)
  389. return 0;
  390. return 1;
  391. }
  392. static void tt_changes_list_free(struct bat_priv *bat_priv)
  393. {
  394. struct tt_change_node *entry, *safe;
  395. spin_lock_bh(&bat_priv->tt_changes_list_lock);
  396. list_for_each_entry_safe(entry, safe, &bat_priv->tt_changes_list,
  397. list) {
  398. list_del(&entry->list);
  399. kfree(entry);
  400. }
  401. atomic_set(&bat_priv->tt_local_changes, 0);
  402. spin_unlock_bh(&bat_priv->tt_changes_list_lock);
  403. }
  404. /* caller must hold orig_node refcount */
  405. int tt_global_add(struct bat_priv *bat_priv, struct orig_node *orig_node,
  406. const unsigned char *tt_addr, uint8_t ttvn, bool roaming,
  407. bool wifi)
  408. {
  409. struct tt_global_entry *tt_global_entry;
  410. struct orig_node *orig_node_tmp;
  411. int ret = 0;
  412. tt_global_entry = tt_global_hash_find(bat_priv, tt_addr);
  413. if (!tt_global_entry) {
  414. tt_global_entry =
  415. kmalloc(sizeof(*tt_global_entry),
  416. GFP_ATOMIC);
  417. if (!tt_global_entry)
  418. goto out;
  419. memcpy(tt_global_entry->addr, tt_addr, ETH_ALEN);
  420. /* Assign the new orig_node */
  421. atomic_inc(&orig_node->refcount);
  422. tt_global_entry->orig_node = orig_node;
  423. tt_global_entry->ttvn = ttvn;
  424. tt_global_entry->flags = NO_FLAGS;
  425. tt_global_entry->roam_at = 0;
  426. atomic_set(&tt_global_entry->refcount, 2);
  427. hash_add(bat_priv->tt_global_hash, compare_gtt,
  428. choose_orig, tt_global_entry,
  429. &tt_global_entry->hash_entry);
  430. atomic_inc(&orig_node->tt_size);
  431. } else {
  432. if (tt_global_entry->orig_node != orig_node) {
  433. atomic_dec(&tt_global_entry->orig_node->tt_size);
  434. orig_node_tmp = tt_global_entry->orig_node;
  435. atomic_inc(&orig_node->refcount);
  436. tt_global_entry->orig_node = orig_node;
  437. orig_node_free_ref(orig_node_tmp);
  438. atomic_inc(&orig_node->tt_size);
  439. }
  440. tt_global_entry->ttvn = ttvn;
  441. tt_global_entry->flags = NO_FLAGS;
  442. tt_global_entry->roam_at = 0;
  443. }
  444. if (wifi)
  445. tt_global_entry->flags |= TT_CLIENT_WIFI;
  446. bat_dbg(DBG_TT, bat_priv,
  447. "Creating new global tt entry: %pM (via %pM)\n",
  448. tt_global_entry->addr, orig_node->orig);
  449. /* remove address from local hash if present */
  450. tt_local_remove(bat_priv, tt_global_entry->addr,
  451. "global tt received", roaming);
  452. ret = 1;
  453. out:
  454. if (tt_global_entry)
  455. tt_global_entry_free_ref(tt_global_entry);
  456. return ret;
  457. }
  458. int tt_global_seq_print_text(struct seq_file *seq, void *offset)
  459. {
  460. struct net_device *net_dev = (struct net_device *)seq->private;
  461. struct bat_priv *bat_priv = netdev_priv(net_dev);
  462. struct hashtable_t *hash = bat_priv->tt_global_hash;
  463. struct tt_global_entry *tt_global_entry;
  464. struct hard_iface *primary_if;
  465. struct hlist_node *node;
  466. struct hlist_head *head;
  467. size_t buf_size, pos;
  468. char *buff;
  469. int i, ret = 0;
  470. primary_if = primary_if_get_selected(bat_priv);
  471. if (!primary_if) {
  472. ret = seq_printf(seq, "BATMAN mesh %s disabled - please "
  473. "specify interfaces to enable it\n",
  474. net_dev->name);
  475. goto out;
  476. }
  477. if (primary_if->if_status != IF_ACTIVE) {
  478. ret = seq_printf(seq, "BATMAN mesh %s disabled - "
  479. "primary interface not active\n",
  480. net_dev->name);
  481. goto out;
  482. }
  483. seq_printf(seq,
  484. "Globally announced TT entries received via the mesh %s\n",
  485. net_dev->name);
  486. seq_printf(seq, " %-13s %s %-15s %s\n",
  487. "Client", "(TTVN)", "Originator", "(Curr TTVN)");
  488. buf_size = 1;
  489. /* Estimate length for: " * xx:xx:xx:xx:xx:xx (ttvn) via
  490. * xx:xx:xx:xx:xx:xx (cur_ttvn)\n"*/
  491. for (i = 0; i < hash->size; i++) {
  492. head = &hash->table[i];
  493. rcu_read_lock();
  494. __hlist_for_each_rcu(node, head)
  495. buf_size += 59;
  496. rcu_read_unlock();
  497. }
  498. buff = kmalloc(buf_size, GFP_ATOMIC);
  499. if (!buff) {
  500. ret = -ENOMEM;
  501. goto out;
  502. }
  503. buff[0] = '\0';
  504. pos = 0;
  505. for (i = 0; i < hash->size; i++) {
  506. head = &hash->table[i];
  507. rcu_read_lock();
  508. hlist_for_each_entry_rcu(tt_global_entry, node,
  509. head, hash_entry) {
  510. pos += snprintf(buff + pos, 61,
  511. " * %pM (%3u) via %pM (%3u)\n",
  512. tt_global_entry->addr,
  513. tt_global_entry->ttvn,
  514. tt_global_entry->orig_node->orig,
  515. (uint8_t) atomic_read(
  516. &tt_global_entry->orig_node->
  517. last_ttvn));
  518. }
  519. rcu_read_unlock();
  520. }
  521. seq_printf(seq, "%s", buff);
  522. kfree(buff);
  523. out:
  524. if (primary_if)
  525. hardif_free_ref(primary_if);
  526. return ret;
  527. }
  528. static void _tt_global_del(struct bat_priv *bat_priv,
  529. struct tt_global_entry *tt_global_entry,
  530. const char *message)
  531. {
  532. if (!tt_global_entry)
  533. goto out;
  534. bat_dbg(DBG_TT, bat_priv,
  535. "Deleting global tt entry %pM (via %pM): %s\n",
  536. tt_global_entry->addr, tt_global_entry->orig_node->orig,
  537. message);
  538. atomic_dec(&tt_global_entry->orig_node->tt_size);
  539. hash_remove(bat_priv->tt_global_hash, compare_gtt, choose_orig,
  540. tt_global_entry->addr);
  541. out:
  542. if (tt_global_entry)
  543. tt_global_entry_free_ref(tt_global_entry);
  544. }
  545. void tt_global_del(struct bat_priv *bat_priv,
  546. struct orig_node *orig_node, const unsigned char *addr,
  547. const char *message, bool roaming)
  548. {
  549. struct tt_global_entry *tt_global_entry = NULL;
  550. tt_global_entry = tt_global_hash_find(bat_priv, addr);
  551. if (!tt_global_entry)
  552. goto out;
  553. if (tt_global_entry->orig_node == orig_node) {
  554. if (roaming) {
  555. tt_global_entry->flags |= TT_CLIENT_ROAM;
  556. tt_global_entry->roam_at = jiffies;
  557. goto out;
  558. }
  559. _tt_global_del(bat_priv, tt_global_entry, message);
  560. }
  561. out:
  562. if (tt_global_entry)
  563. tt_global_entry_free_ref(tt_global_entry);
  564. }
  565. void tt_global_del_orig(struct bat_priv *bat_priv,
  566. struct orig_node *orig_node, const char *message)
  567. {
  568. struct tt_global_entry *tt_global_entry;
  569. int i;
  570. struct hashtable_t *hash = bat_priv->tt_global_hash;
  571. struct hlist_node *node, *safe;
  572. struct hlist_head *head;
  573. spinlock_t *list_lock; /* protects write access to the hash lists */
  574. for (i = 0; i < hash->size; i++) {
  575. head = &hash->table[i];
  576. list_lock = &hash->list_locks[i];
  577. spin_lock_bh(list_lock);
  578. hlist_for_each_entry_safe(tt_global_entry, node, safe,
  579. head, hash_entry) {
  580. if (tt_global_entry->orig_node == orig_node) {
  581. bat_dbg(DBG_TT, bat_priv,
  582. "Deleting global tt entry %pM "
  583. "(via %pM): originator time out\n",
  584. tt_global_entry->addr,
  585. tt_global_entry->orig_node->orig);
  586. hlist_del_rcu(node);
  587. tt_global_entry_free_ref(tt_global_entry);
  588. }
  589. }
  590. spin_unlock_bh(list_lock);
  591. }
  592. atomic_set(&orig_node->tt_size, 0);
  593. }
  594. static void tt_global_roam_purge(struct bat_priv *bat_priv)
  595. {
  596. struct hashtable_t *hash = bat_priv->tt_global_hash;
  597. struct tt_global_entry *tt_global_entry;
  598. struct hlist_node *node, *node_tmp;
  599. struct hlist_head *head;
  600. spinlock_t *list_lock; /* protects write access to the hash lists */
  601. int i;
  602. for (i = 0; i < hash->size; i++) {
  603. head = &hash->table[i];
  604. list_lock = &hash->list_locks[i];
  605. spin_lock_bh(list_lock);
  606. hlist_for_each_entry_safe(tt_global_entry, node, node_tmp,
  607. head, hash_entry) {
  608. if (!(tt_global_entry->flags & TT_CLIENT_ROAM))
  609. continue;
  610. if (!is_out_of_time(tt_global_entry->roam_at,
  611. TT_CLIENT_ROAM_TIMEOUT * 1000))
  612. continue;
  613. bat_dbg(DBG_TT, bat_priv, "Deleting global "
  614. "tt entry (%pM): Roaming timeout\n",
  615. tt_global_entry->addr);
  616. atomic_dec(&tt_global_entry->orig_node->tt_size);
  617. hlist_del_rcu(node);
  618. tt_global_entry_free_ref(tt_global_entry);
  619. }
  620. spin_unlock_bh(list_lock);
  621. }
  622. }
  623. static void tt_global_table_free(struct bat_priv *bat_priv)
  624. {
  625. struct hashtable_t *hash;
  626. spinlock_t *list_lock; /* protects write access to the hash lists */
  627. struct tt_global_entry *tt_global_entry;
  628. struct hlist_node *node, *node_tmp;
  629. struct hlist_head *head;
  630. int i;
  631. if (!bat_priv->tt_global_hash)
  632. return;
  633. hash = bat_priv->tt_global_hash;
  634. for (i = 0; i < hash->size; i++) {
  635. head = &hash->table[i];
  636. list_lock = &hash->list_locks[i];
  637. spin_lock_bh(list_lock);
  638. hlist_for_each_entry_safe(tt_global_entry, node, node_tmp,
  639. head, hash_entry) {
  640. hlist_del_rcu(node);
  641. tt_global_entry_free_ref(tt_global_entry);
  642. }
  643. spin_unlock_bh(list_lock);
  644. }
  645. hash_destroy(hash);
  646. bat_priv->tt_global_hash = NULL;
  647. }
  648. struct orig_node *transtable_search(struct bat_priv *bat_priv,
  649. const uint8_t *addr)
  650. {
  651. struct tt_global_entry *tt_global_entry;
  652. struct orig_node *orig_node = NULL;
  653. tt_global_entry = tt_global_hash_find(bat_priv, addr);
  654. if (!tt_global_entry)
  655. goto out;
  656. if (!atomic_inc_not_zero(&tt_global_entry->orig_node->refcount))
  657. goto free_tt;
  658. /* A global client marked as PENDING has already moved from that
  659. * originator */
  660. if (tt_global_entry->flags & TT_CLIENT_PENDING)
  661. goto free_tt;
  662. orig_node = tt_global_entry->orig_node;
  663. free_tt:
  664. tt_global_entry_free_ref(tt_global_entry);
  665. out:
  666. return orig_node;
  667. }
  668. /* Calculates the checksum of the local table of a given orig_node */
  669. uint16_t tt_global_crc(struct bat_priv *bat_priv, struct orig_node *orig_node)
  670. {
  671. uint16_t total = 0, total_one;
  672. struct hashtable_t *hash = bat_priv->tt_global_hash;
  673. struct tt_global_entry *tt_global_entry;
  674. struct hlist_node *node;
  675. struct hlist_head *head;
  676. int i, j;
  677. for (i = 0; i < hash->size; i++) {
  678. head = &hash->table[i];
  679. rcu_read_lock();
  680. hlist_for_each_entry_rcu(tt_global_entry, node,
  681. head, hash_entry) {
  682. if (compare_eth(tt_global_entry->orig_node,
  683. orig_node)) {
  684. /* Roaming clients are in the global table for
  685. * consistency only. They don't have to be
  686. * taken into account while computing the
  687. * global crc */
  688. if (tt_global_entry->flags & TT_CLIENT_ROAM)
  689. continue;
  690. total_one = 0;
  691. for (j = 0; j < ETH_ALEN; j++)
  692. total_one = crc16_byte(total_one,
  693. tt_global_entry->addr[j]);
  694. total ^= total_one;
  695. }
  696. }
  697. rcu_read_unlock();
  698. }
  699. return total;
  700. }
  701. /* Calculates the checksum of the local table */
  702. uint16_t tt_local_crc(struct bat_priv *bat_priv)
  703. {
  704. uint16_t total = 0, total_one;
  705. struct hashtable_t *hash = bat_priv->tt_local_hash;
  706. struct tt_local_entry *tt_local_entry;
  707. struct hlist_node *node;
  708. struct hlist_head *head;
  709. int i, j;
  710. for (i = 0; i < hash->size; i++) {
  711. head = &hash->table[i];
  712. rcu_read_lock();
  713. hlist_for_each_entry_rcu(tt_local_entry, node,
  714. head, hash_entry) {
  715. /* not yet committed clients have not to be taken into
  716. * account while computing the CRC */
  717. if (tt_local_entry->flags & TT_CLIENT_NEW)
  718. continue;
  719. total_one = 0;
  720. for (j = 0; j < ETH_ALEN; j++)
  721. total_one = crc16_byte(total_one,
  722. tt_local_entry->addr[j]);
  723. total ^= total_one;
  724. }
  725. rcu_read_unlock();
  726. }
  727. return total;
  728. }
  729. static void tt_req_list_free(struct bat_priv *bat_priv)
  730. {
  731. struct tt_req_node *node, *safe;
  732. spin_lock_bh(&bat_priv->tt_req_list_lock);
  733. list_for_each_entry_safe(node, safe, &bat_priv->tt_req_list, list) {
  734. list_del(&node->list);
  735. kfree(node);
  736. }
  737. spin_unlock_bh(&bat_priv->tt_req_list_lock);
  738. }
  739. void tt_save_orig_buffer(struct bat_priv *bat_priv, struct orig_node *orig_node,
  740. const unsigned char *tt_buff, uint8_t tt_num_changes)
  741. {
  742. uint16_t tt_buff_len = tt_len(tt_num_changes);
  743. /* Replace the old buffer only if I received something in the
  744. * last OGM (the OGM could carry no changes) */
  745. spin_lock_bh(&orig_node->tt_buff_lock);
  746. if (tt_buff_len > 0) {
  747. kfree(orig_node->tt_buff);
  748. orig_node->tt_buff_len = 0;
  749. orig_node->tt_buff = kmalloc(tt_buff_len, GFP_ATOMIC);
  750. if (orig_node->tt_buff) {
  751. memcpy(orig_node->tt_buff, tt_buff, tt_buff_len);
  752. orig_node->tt_buff_len = tt_buff_len;
  753. }
  754. }
  755. spin_unlock_bh(&orig_node->tt_buff_lock);
  756. }
  757. static void tt_req_purge(struct bat_priv *bat_priv)
  758. {
  759. struct tt_req_node *node, *safe;
  760. spin_lock_bh(&bat_priv->tt_req_list_lock);
  761. list_for_each_entry_safe(node, safe, &bat_priv->tt_req_list, list) {
  762. if (is_out_of_time(node->issued_at,
  763. TT_REQUEST_TIMEOUT * 1000)) {
  764. list_del(&node->list);
  765. kfree(node);
  766. }
  767. }
  768. spin_unlock_bh(&bat_priv->tt_req_list_lock);
  769. }
  770. /* returns the pointer to the new tt_req_node struct if no request
  771. * has already been issued for this orig_node, NULL otherwise */
  772. static struct tt_req_node *new_tt_req_node(struct bat_priv *bat_priv,
  773. struct orig_node *orig_node)
  774. {
  775. struct tt_req_node *tt_req_node_tmp, *tt_req_node = NULL;
  776. spin_lock_bh(&bat_priv->tt_req_list_lock);
  777. list_for_each_entry(tt_req_node_tmp, &bat_priv->tt_req_list, list) {
  778. if (compare_eth(tt_req_node_tmp, orig_node) &&
  779. !is_out_of_time(tt_req_node_tmp->issued_at,
  780. TT_REQUEST_TIMEOUT * 1000))
  781. goto unlock;
  782. }
  783. tt_req_node = kmalloc(sizeof(*tt_req_node), GFP_ATOMIC);
  784. if (!tt_req_node)
  785. goto unlock;
  786. memcpy(tt_req_node->addr, orig_node->orig, ETH_ALEN);
  787. tt_req_node->issued_at = jiffies;
  788. list_add(&tt_req_node->list, &bat_priv->tt_req_list);
  789. unlock:
  790. spin_unlock_bh(&bat_priv->tt_req_list_lock);
  791. return tt_req_node;
  792. }
  793. /* data_ptr is useless here, but has to be kept to respect the prototype */
  794. static int tt_local_valid_entry(const void *entry_ptr, const void *data_ptr)
  795. {
  796. const struct tt_local_entry *tt_local_entry = entry_ptr;
  797. if (tt_local_entry->flags & TT_CLIENT_NEW)
  798. return 0;
  799. return 1;
  800. }
  801. static int tt_global_valid_entry(const void *entry_ptr, const void *data_ptr)
  802. {
  803. const struct tt_global_entry *tt_global_entry = entry_ptr;
  804. const struct orig_node *orig_node = data_ptr;
  805. if (tt_global_entry->flags & TT_CLIENT_ROAM)
  806. return 0;
  807. return (tt_global_entry->orig_node == orig_node);
  808. }
  809. static struct sk_buff *tt_response_fill_table(uint16_t tt_len, uint8_t ttvn,
  810. struct hashtable_t *hash,
  811. struct hard_iface *primary_if,
  812. int (*valid_cb)(const void *,
  813. const void *),
  814. void *cb_data)
  815. {
  816. struct tt_local_entry *tt_local_entry;
  817. struct tt_query_packet *tt_response;
  818. struct tt_change *tt_change;
  819. struct hlist_node *node;
  820. struct hlist_head *head;
  821. struct sk_buff *skb = NULL;
  822. uint16_t tt_tot, tt_count;
  823. ssize_t tt_query_size = sizeof(struct tt_query_packet);
  824. int i;
  825. if (tt_query_size + tt_len > primary_if->soft_iface->mtu) {
  826. tt_len = primary_if->soft_iface->mtu - tt_query_size;
  827. tt_len -= tt_len % sizeof(struct tt_change);
  828. }
  829. tt_tot = tt_len / sizeof(struct tt_change);
  830. skb = dev_alloc_skb(tt_query_size + tt_len + ETH_HLEN);
  831. if (!skb)
  832. goto out;
  833. skb_reserve(skb, ETH_HLEN);
  834. tt_response = (struct tt_query_packet *)skb_put(skb,
  835. tt_query_size + tt_len);
  836. tt_response->ttvn = ttvn;
  837. tt_response->tt_data = htons(tt_tot);
  838. tt_change = (struct tt_change *)(skb->data + tt_query_size);
  839. tt_count = 0;
  840. rcu_read_lock();
  841. for (i = 0; i < hash->size; i++) {
  842. head = &hash->table[i];
  843. hlist_for_each_entry_rcu(tt_local_entry, node,
  844. head, hash_entry) {
  845. if (tt_count == tt_tot)
  846. break;
  847. if ((valid_cb) && (!valid_cb(tt_local_entry, cb_data)))
  848. continue;
  849. memcpy(tt_change->addr, tt_local_entry->addr, ETH_ALEN);
  850. tt_change->flags = NO_FLAGS;
  851. tt_count++;
  852. tt_change++;
  853. }
  854. }
  855. rcu_read_unlock();
  856. out:
  857. return skb;
  858. }
  859. int send_tt_request(struct bat_priv *bat_priv, struct orig_node *dst_orig_node,
  860. uint8_t ttvn, uint16_t tt_crc, bool full_table)
  861. {
  862. struct sk_buff *skb = NULL;
  863. struct tt_query_packet *tt_request;
  864. struct neigh_node *neigh_node = NULL;
  865. struct hard_iface *primary_if;
  866. struct tt_req_node *tt_req_node = NULL;
  867. int ret = 1;
  868. primary_if = primary_if_get_selected(bat_priv);
  869. if (!primary_if)
  870. goto out;
  871. /* The new tt_req will be issued only if I'm not waiting for a
  872. * reply from the same orig_node yet */
  873. tt_req_node = new_tt_req_node(bat_priv, dst_orig_node);
  874. if (!tt_req_node)
  875. goto out;
  876. skb = dev_alloc_skb(sizeof(struct tt_query_packet) + ETH_HLEN);
  877. if (!skb)
  878. goto out;
  879. skb_reserve(skb, ETH_HLEN);
  880. tt_request = (struct tt_query_packet *)skb_put(skb,
  881. sizeof(struct tt_query_packet));
  882. tt_request->packet_type = BAT_TT_QUERY;
  883. tt_request->version = COMPAT_VERSION;
  884. memcpy(tt_request->src, primary_if->net_dev->dev_addr, ETH_ALEN);
  885. memcpy(tt_request->dst, dst_orig_node->orig, ETH_ALEN);
  886. tt_request->ttl = TTL;
  887. tt_request->ttvn = ttvn;
  888. tt_request->tt_data = tt_crc;
  889. tt_request->flags = TT_REQUEST;
  890. if (full_table)
  891. tt_request->flags |= TT_FULL_TABLE;
  892. neigh_node = orig_node_get_router(dst_orig_node);
  893. if (!neigh_node)
  894. goto out;
  895. bat_dbg(DBG_TT, bat_priv, "Sending TT_REQUEST to %pM via %pM "
  896. "[%c]\n", dst_orig_node->orig, neigh_node->addr,
  897. (full_table ? 'F' : '.'));
  898. send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
  899. ret = 0;
  900. out:
  901. if (neigh_node)
  902. neigh_node_free_ref(neigh_node);
  903. if (primary_if)
  904. hardif_free_ref(primary_if);
  905. if (ret)
  906. kfree_skb(skb);
  907. if (ret && tt_req_node) {
  908. spin_lock_bh(&bat_priv->tt_req_list_lock);
  909. list_del(&tt_req_node->list);
  910. spin_unlock_bh(&bat_priv->tt_req_list_lock);
  911. kfree(tt_req_node);
  912. }
  913. return ret;
  914. }
  915. static bool send_other_tt_response(struct bat_priv *bat_priv,
  916. struct tt_query_packet *tt_request)
  917. {
  918. struct orig_node *req_dst_orig_node = NULL, *res_dst_orig_node = NULL;
  919. struct neigh_node *neigh_node = NULL;
  920. struct hard_iface *primary_if = NULL;
  921. uint8_t orig_ttvn, req_ttvn, ttvn;
  922. int ret = false;
  923. unsigned char *tt_buff;
  924. bool full_table;
  925. uint16_t tt_len, tt_tot;
  926. struct sk_buff *skb = NULL;
  927. struct tt_query_packet *tt_response;
  928. bat_dbg(DBG_TT, bat_priv,
  929. "Received TT_REQUEST from %pM for "
  930. "ttvn: %u (%pM) [%c]\n", tt_request->src,
  931. tt_request->ttvn, tt_request->dst,
  932. (tt_request->flags & TT_FULL_TABLE ? 'F' : '.'));
  933. /* Let's get the orig node of the REAL destination */
  934. req_dst_orig_node = get_orig_node(bat_priv, tt_request->dst);
  935. if (!req_dst_orig_node)
  936. goto out;
  937. res_dst_orig_node = get_orig_node(bat_priv, tt_request->src);
  938. if (!res_dst_orig_node)
  939. goto out;
  940. neigh_node = orig_node_get_router(res_dst_orig_node);
  941. if (!neigh_node)
  942. goto out;
  943. primary_if = primary_if_get_selected(bat_priv);
  944. if (!primary_if)
  945. goto out;
  946. orig_ttvn = (uint8_t)atomic_read(&req_dst_orig_node->last_ttvn);
  947. req_ttvn = tt_request->ttvn;
  948. /* I don't have the requested data */
  949. if (orig_ttvn != req_ttvn ||
  950. tt_request->tt_data != req_dst_orig_node->tt_crc)
  951. goto out;
  952. /* If the full table has been explicitly requested */
  953. if (tt_request->flags & TT_FULL_TABLE ||
  954. !req_dst_orig_node->tt_buff)
  955. full_table = true;
  956. else
  957. full_table = false;
  958. /* In this version, fragmentation is not implemented, then
  959. * I'll send only one packet with as much TT entries as I can */
  960. if (!full_table) {
  961. spin_lock_bh(&req_dst_orig_node->tt_buff_lock);
  962. tt_len = req_dst_orig_node->tt_buff_len;
  963. tt_tot = tt_len / sizeof(struct tt_change);
  964. skb = dev_alloc_skb(sizeof(struct tt_query_packet) +
  965. tt_len + ETH_HLEN);
  966. if (!skb)
  967. goto unlock;
  968. skb_reserve(skb, ETH_HLEN);
  969. tt_response = (struct tt_query_packet *)skb_put(skb,
  970. sizeof(struct tt_query_packet) + tt_len);
  971. tt_response->ttvn = req_ttvn;
  972. tt_response->tt_data = htons(tt_tot);
  973. tt_buff = skb->data + sizeof(struct tt_query_packet);
  974. /* Copy the last orig_node's OGM buffer */
  975. memcpy(tt_buff, req_dst_orig_node->tt_buff,
  976. req_dst_orig_node->tt_buff_len);
  977. spin_unlock_bh(&req_dst_orig_node->tt_buff_lock);
  978. } else {
  979. tt_len = (uint16_t)atomic_read(&req_dst_orig_node->tt_size) *
  980. sizeof(struct tt_change);
  981. ttvn = (uint8_t)atomic_read(&req_dst_orig_node->last_ttvn);
  982. skb = tt_response_fill_table(tt_len, ttvn,
  983. bat_priv->tt_global_hash,
  984. primary_if, tt_global_valid_entry,
  985. req_dst_orig_node);
  986. if (!skb)
  987. goto out;
  988. tt_response = (struct tt_query_packet *)skb->data;
  989. }
  990. tt_response->packet_type = BAT_TT_QUERY;
  991. tt_response->version = COMPAT_VERSION;
  992. tt_response->ttl = TTL;
  993. memcpy(tt_response->src, req_dst_orig_node->orig, ETH_ALEN);
  994. memcpy(tt_response->dst, tt_request->src, ETH_ALEN);
  995. tt_response->flags = TT_RESPONSE;
  996. if (full_table)
  997. tt_response->flags |= TT_FULL_TABLE;
  998. bat_dbg(DBG_TT, bat_priv,
  999. "Sending TT_RESPONSE %pM via %pM for %pM (ttvn: %u)\n",
  1000. res_dst_orig_node->orig, neigh_node->addr,
  1001. req_dst_orig_node->orig, req_ttvn);
  1002. send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
  1003. ret = true;
  1004. goto out;
  1005. unlock:
  1006. spin_unlock_bh(&req_dst_orig_node->tt_buff_lock);
  1007. out:
  1008. if (res_dst_orig_node)
  1009. orig_node_free_ref(res_dst_orig_node);
  1010. if (req_dst_orig_node)
  1011. orig_node_free_ref(req_dst_orig_node);
  1012. if (neigh_node)
  1013. neigh_node_free_ref(neigh_node);
  1014. if (primary_if)
  1015. hardif_free_ref(primary_if);
  1016. if (!ret)
  1017. kfree_skb(skb);
  1018. return ret;
  1019. }
  1020. static bool send_my_tt_response(struct bat_priv *bat_priv,
  1021. struct tt_query_packet *tt_request)
  1022. {
  1023. struct orig_node *orig_node = NULL;
  1024. struct neigh_node *neigh_node = NULL;
  1025. struct hard_iface *primary_if = NULL;
  1026. uint8_t my_ttvn, req_ttvn, ttvn;
  1027. int ret = false;
  1028. unsigned char *tt_buff;
  1029. bool full_table;
  1030. uint16_t tt_len, tt_tot;
  1031. struct sk_buff *skb = NULL;
  1032. struct tt_query_packet *tt_response;
  1033. bat_dbg(DBG_TT, bat_priv,
  1034. "Received TT_REQUEST from %pM for "
  1035. "ttvn: %u (me) [%c]\n", tt_request->src,
  1036. tt_request->ttvn,
  1037. (tt_request->flags & TT_FULL_TABLE ? 'F' : '.'));
  1038. my_ttvn = (uint8_t)atomic_read(&bat_priv->ttvn);
  1039. req_ttvn = tt_request->ttvn;
  1040. orig_node = get_orig_node(bat_priv, tt_request->src);
  1041. if (!orig_node)
  1042. goto out;
  1043. neigh_node = orig_node_get_router(orig_node);
  1044. if (!neigh_node)
  1045. goto out;
  1046. primary_if = primary_if_get_selected(bat_priv);
  1047. if (!primary_if)
  1048. goto out;
  1049. /* If the full table has been explicitly requested or the gap
  1050. * is too big send the whole local translation table */
  1051. if (tt_request->flags & TT_FULL_TABLE || my_ttvn != req_ttvn ||
  1052. !bat_priv->tt_buff)
  1053. full_table = true;
  1054. else
  1055. full_table = false;
  1056. /* In this version, fragmentation is not implemented, then
  1057. * I'll send only one packet with as much TT entries as I can */
  1058. if (!full_table) {
  1059. spin_lock_bh(&bat_priv->tt_buff_lock);
  1060. tt_len = bat_priv->tt_buff_len;
  1061. tt_tot = tt_len / sizeof(struct tt_change);
  1062. skb = dev_alloc_skb(sizeof(struct tt_query_packet) +
  1063. tt_len + ETH_HLEN);
  1064. if (!skb)
  1065. goto unlock;
  1066. skb_reserve(skb, ETH_HLEN);
  1067. tt_response = (struct tt_query_packet *)skb_put(skb,
  1068. sizeof(struct tt_query_packet) + tt_len);
  1069. tt_response->ttvn = req_ttvn;
  1070. tt_response->tt_data = htons(tt_tot);
  1071. tt_buff = skb->data + sizeof(struct tt_query_packet);
  1072. memcpy(tt_buff, bat_priv->tt_buff,
  1073. bat_priv->tt_buff_len);
  1074. spin_unlock_bh(&bat_priv->tt_buff_lock);
  1075. } else {
  1076. tt_len = (uint16_t)atomic_read(&bat_priv->num_local_tt) *
  1077. sizeof(struct tt_change);
  1078. ttvn = (uint8_t)atomic_read(&bat_priv->ttvn);
  1079. skb = tt_response_fill_table(tt_len, ttvn,
  1080. bat_priv->tt_local_hash,
  1081. primary_if, tt_local_valid_entry,
  1082. NULL);
  1083. if (!skb)
  1084. goto out;
  1085. tt_response = (struct tt_query_packet *)skb->data;
  1086. }
  1087. tt_response->packet_type = BAT_TT_QUERY;
  1088. tt_response->version = COMPAT_VERSION;
  1089. tt_response->ttl = TTL;
  1090. memcpy(tt_response->src, primary_if->net_dev->dev_addr, ETH_ALEN);
  1091. memcpy(tt_response->dst, tt_request->src, ETH_ALEN);
  1092. tt_response->flags = TT_RESPONSE;
  1093. if (full_table)
  1094. tt_response->flags |= TT_FULL_TABLE;
  1095. bat_dbg(DBG_TT, bat_priv,
  1096. "Sending TT_RESPONSE to %pM via %pM [%c]\n",
  1097. orig_node->orig, neigh_node->addr,
  1098. (tt_response->flags & TT_FULL_TABLE ? 'F' : '.'));
  1099. send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
  1100. ret = true;
  1101. goto out;
  1102. unlock:
  1103. spin_unlock_bh(&bat_priv->tt_buff_lock);
  1104. out:
  1105. if (orig_node)
  1106. orig_node_free_ref(orig_node);
  1107. if (neigh_node)
  1108. neigh_node_free_ref(neigh_node);
  1109. if (primary_if)
  1110. hardif_free_ref(primary_if);
  1111. if (!ret)
  1112. kfree_skb(skb);
  1113. /* This packet was for me, so it doesn't need to be re-routed */
  1114. return true;
  1115. }
  1116. bool send_tt_response(struct bat_priv *bat_priv,
  1117. struct tt_query_packet *tt_request)
  1118. {
  1119. if (is_my_mac(tt_request->dst))
  1120. return send_my_tt_response(bat_priv, tt_request);
  1121. else
  1122. return send_other_tt_response(bat_priv, tt_request);
  1123. }
  1124. static void _tt_update_changes(struct bat_priv *bat_priv,
  1125. struct orig_node *orig_node,
  1126. struct tt_change *tt_change,
  1127. uint16_t tt_num_changes, uint8_t ttvn)
  1128. {
  1129. int i;
  1130. for (i = 0; i < tt_num_changes; i++) {
  1131. if ((tt_change + i)->flags & TT_CLIENT_DEL)
  1132. tt_global_del(bat_priv, orig_node,
  1133. (tt_change + i)->addr,
  1134. "tt removed by changes",
  1135. (tt_change + i)->flags & TT_CLIENT_ROAM);
  1136. else
  1137. if (!tt_global_add(bat_priv, orig_node,
  1138. (tt_change + i)->addr, ttvn, false,
  1139. (tt_change + i)->flags &
  1140. TT_CLIENT_WIFI))
  1141. /* In case of problem while storing a
  1142. * global_entry, we stop the updating
  1143. * procedure without committing the
  1144. * ttvn change. This will avoid to send
  1145. * corrupted data on tt_request
  1146. */
  1147. return;
  1148. }
  1149. }
  1150. static void tt_fill_gtable(struct bat_priv *bat_priv,
  1151. struct tt_query_packet *tt_response)
  1152. {
  1153. struct orig_node *orig_node = NULL;
  1154. orig_node = orig_hash_find(bat_priv, tt_response->src);
  1155. if (!orig_node)
  1156. goto out;
  1157. /* Purge the old table first.. */
  1158. tt_global_del_orig(bat_priv, orig_node, "Received full table");
  1159. _tt_update_changes(bat_priv, orig_node,
  1160. (struct tt_change *)(tt_response + 1),
  1161. tt_response->tt_data, tt_response->ttvn);
  1162. spin_lock_bh(&orig_node->tt_buff_lock);
  1163. kfree(orig_node->tt_buff);
  1164. orig_node->tt_buff_len = 0;
  1165. orig_node->tt_buff = NULL;
  1166. spin_unlock_bh(&orig_node->tt_buff_lock);
  1167. atomic_set(&orig_node->last_ttvn, tt_response->ttvn);
  1168. out:
  1169. if (orig_node)
  1170. orig_node_free_ref(orig_node);
  1171. }
  1172. void tt_update_changes(struct bat_priv *bat_priv, struct orig_node *orig_node,
  1173. uint16_t tt_num_changes, uint8_t ttvn,
  1174. struct tt_change *tt_change)
  1175. {
  1176. _tt_update_changes(bat_priv, orig_node, tt_change, tt_num_changes,
  1177. ttvn);
  1178. tt_save_orig_buffer(bat_priv, orig_node, (unsigned char *)tt_change,
  1179. tt_num_changes);
  1180. atomic_set(&orig_node->last_ttvn, ttvn);
  1181. }
  1182. bool is_my_client(struct bat_priv *bat_priv, const uint8_t *addr)
  1183. {
  1184. struct tt_local_entry *tt_local_entry = NULL;
  1185. bool ret = false;
  1186. tt_local_entry = tt_local_hash_find(bat_priv, addr);
  1187. if (!tt_local_entry)
  1188. goto out;
  1189. /* Check if the client has been logically deleted (but is kept for
  1190. * consistency purpose) */
  1191. if (tt_local_entry->flags & TT_CLIENT_PENDING)
  1192. goto out;
  1193. ret = true;
  1194. out:
  1195. if (tt_local_entry)
  1196. tt_local_entry_free_ref(tt_local_entry);
  1197. return ret;
  1198. }
  1199. void handle_tt_response(struct bat_priv *bat_priv,
  1200. struct tt_query_packet *tt_response)
  1201. {
  1202. struct tt_req_node *node, *safe;
  1203. struct orig_node *orig_node = NULL;
  1204. bat_dbg(DBG_TT, bat_priv, "Received TT_RESPONSE from %pM for "
  1205. "ttvn %d t_size: %d [%c]\n",
  1206. tt_response->src, tt_response->ttvn,
  1207. tt_response->tt_data,
  1208. (tt_response->flags & TT_FULL_TABLE ? 'F' : '.'));
  1209. orig_node = orig_hash_find(bat_priv, tt_response->src);
  1210. if (!orig_node)
  1211. goto out;
  1212. if (tt_response->flags & TT_FULL_TABLE)
  1213. tt_fill_gtable(bat_priv, tt_response);
  1214. else
  1215. tt_update_changes(bat_priv, orig_node, tt_response->tt_data,
  1216. tt_response->ttvn,
  1217. (struct tt_change *)(tt_response + 1));
  1218. /* Delete the tt_req_node from pending tt_requests list */
  1219. spin_lock_bh(&bat_priv->tt_req_list_lock);
  1220. list_for_each_entry_safe(node, safe, &bat_priv->tt_req_list, list) {
  1221. if (!compare_eth(node->addr, tt_response->src))
  1222. continue;
  1223. list_del(&node->list);
  1224. kfree(node);
  1225. }
  1226. spin_unlock_bh(&bat_priv->tt_req_list_lock);
  1227. /* Recalculate the CRC for this orig_node and store it */
  1228. orig_node->tt_crc = tt_global_crc(bat_priv, orig_node);
  1229. /* Roaming phase is over: tables are in sync again. I can
  1230. * unset the flag */
  1231. orig_node->tt_poss_change = false;
  1232. out:
  1233. if (orig_node)
  1234. orig_node_free_ref(orig_node);
  1235. }
  1236. int tt_init(struct bat_priv *bat_priv)
  1237. {
  1238. if (!tt_local_init(bat_priv))
  1239. return 0;
  1240. if (!tt_global_init(bat_priv))
  1241. return 0;
  1242. tt_start_timer(bat_priv);
  1243. return 1;
  1244. }
  1245. static void tt_roam_list_free(struct bat_priv *bat_priv)
  1246. {
  1247. struct tt_roam_node *node, *safe;
  1248. spin_lock_bh(&bat_priv->tt_roam_list_lock);
  1249. list_for_each_entry_safe(node, safe, &bat_priv->tt_roam_list, list) {
  1250. list_del(&node->list);
  1251. kfree(node);
  1252. }
  1253. spin_unlock_bh(&bat_priv->tt_roam_list_lock);
  1254. }
  1255. static void tt_roam_purge(struct bat_priv *bat_priv)
  1256. {
  1257. struct tt_roam_node *node, *safe;
  1258. spin_lock_bh(&bat_priv->tt_roam_list_lock);
  1259. list_for_each_entry_safe(node, safe, &bat_priv->tt_roam_list, list) {
  1260. if (!is_out_of_time(node->first_time,
  1261. ROAMING_MAX_TIME * 1000))
  1262. continue;
  1263. list_del(&node->list);
  1264. kfree(node);
  1265. }
  1266. spin_unlock_bh(&bat_priv->tt_roam_list_lock);
  1267. }
  1268. /* This function checks whether the client already reached the
  1269. * maximum number of possible roaming phases. In this case the ROAMING_ADV
  1270. * will not be sent.
  1271. *
  1272. * returns true if the ROAMING_ADV can be sent, false otherwise */
  1273. static bool tt_check_roam_count(struct bat_priv *bat_priv,
  1274. uint8_t *client)
  1275. {
  1276. struct tt_roam_node *tt_roam_node;
  1277. bool ret = false;
  1278. spin_lock_bh(&bat_priv->tt_roam_list_lock);
  1279. /* The new tt_req will be issued only if I'm not waiting for a
  1280. * reply from the same orig_node yet */
  1281. list_for_each_entry(tt_roam_node, &bat_priv->tt_roam_list, list) {
  1282. if (!compare_eth(tt_roam_node->addr, client))
  1283. continue;
  1284. if (is_out_of_time(tt_roam_node->first_time,
  1285. ROAMING_MAX_TIME * 1000))
  1286. continue;
  1287. if (!atomic_dec_not_zero(&tt_roam_node->counter))
  1288. /* Sorry, you roamed too many times! */
  1289. goto unlock;
  1290. ret = true;
  1291. break;
  1292. }
  1293. if (!ret) {
  1294. tt_roam_node = kmalloc(sizeof(*tt_roam_node), GFP_ATOMIC);
  1295. if (!tt_roam_node)
  1296. goto unlock;
  1297. tt_roam_node->first_time = jiffies;
  1298. atomic_set(&tt_roam_node->counter, ROAMING_MAX_COUNT - 1);
  1299. memcpy(tt_roam_node->addr, client, ETH_ALEN);
  1300. list_add(&tt_roam_node->list, &bat_priv->tt_roam_list);
  1301. ret = true;
  1302. }
  1303. unlock:
  1304. spin_unlock_bh(&bat_priv->tt_roam_list_lock);
  1305. return ret;
  1306. }
  1307. void send_roam_adv(struct bat_priv *bat_priv, uint8_t *client,
  1308. struct orig_node *orig_node)
  1309. {
  1310. struct neigh_node *neigh_node = NULL;
  1311. struct sk_buff *skb = NULL;
  1312. struct roam_adv_packet *roam_adv_packet;
  1313. int ret = 1;
  1314. struct hard_iface *primary_if;
  1315. /* before going on we have to check whether the client has
  1316. * already roamed to us too many times */
  1317. if (!tt_check_roam_count(bat_priv, client))
  1318. goto out;
  1319. skb = dev_alloc_skb(sizeof(struct roam_adv_packet) + ETH_HLEN);
  1320. if (!skb)
  1321. goto out;
  1322. skb_reserve(skb, ETH_HLEN);
  1323. roam_adv_packet = (struct roam_adv_packet *)skb_put(skb,
  1324. sizeof(struct roam_adv_packet));
  1325. roam_adv_packet->packet_type = BAT_ROAM_ADV;
  1326. roam_adv_packet->version = COMPAT_VERSION;
  1327. roam_adv_packet->ttl = TTL;
  1328. primary_if = primary_if_get_selected(bat_priv);
  1329. if (!primary_if)
  1330. goto out;
  1331. memcpy(roam_adv_packet->src, primary_if->net_dev->dev_addr, ETH_ALEN);
  1332. hardif_free_ref(primary_if);
  1333. memcpy(roam_adv_packet->dst, orig_node->orig, ETH_ALEN);
  1334. memcpy(roam_adv_packet->client, client, ETH_ALEN);
  1335. neigh_node = orig_node_get_router(orig_node);
  1336. if (!neigh_node)
  1337. goto out;
  1338. bat_dbg(DBG_TT, bat_priv,
  1339. "Sending ROAMING_ADV to %pM (client %pM) via %pM\n",
  1340. orig_node->orig, client, neigh_node->addr);
  1341. send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
  1342. ret = 0;
  1343. out:
  1344. if (neigh_node)
  1345. neigh_node_free_ref(neigh_node);
  1346. if (ret)
  1347. kfree_skb(skb);
  1348. return;
  1349. }
  1350. static void tt_purge(struct work_struct *work)
  1351. {
  1352. struct delayed_work *delayed_work =
  1353. container_of(work, struct delayed_work, work);
  1354. struct bat_priv *bat_priv =
  1355. container_of(delayed_work, struct bat_priv, tt_work);
  1356. tt_local_purge(bat_priv);
  1357. tt_global_roam_purge(bat_priv);
  1358. tt_req_purge(bat_priv);
  1359. tt_roam_purge(bat_priv);
  1360. tt_start_timer(bat_priv);
  1361. }
  1362. void tt_free(struct bat_priv *bat_priv)
  1363. {
  1364. cancel_delayed_work_sync(&bat_priv->tt_work);
  1365. tt_local_table_free(bat_priv);
  1366. tt_global_table_free(bat_priv);
  1367. tt_req_list_free(bat_priv);
  1368. tt_changes_list_free(bat_priv);
  1369. tt_roam_list_free(bat_priv);
  1370. kfree(bat_priv->tt_buff);
  1371. }
  1372. /* This function will reset the specified flags from all the entries in
  1373. * the given hash table and will increment num_local_tt for each involved
  1374. * entry */
  1375. static void tt_local_reset_flags(struct bat_priv *bat_priv, uint16_t flags)
  1376. {
  1377. int i;
  1378. struct hashtable_t *hash = bat_priv->tt_local_hash;
  1379. struct hlist_head *head;
  1380. struct hlist_node *node;
  1381. struct tt_local_entry *tt_local_entry;
  1382. if (!hash)
  1383. return;
  1384. for (i = 0; i < hash->size; i++) {
  1385. head = &hash->table[i];
  1386. rcu_read_lock();
  1387. hlist_for_each_entry_rcu(tt_local_entry, node,
  1388. head, hash_entry) {
  1389. tt_local_entry->flags &= ~flags;
  1390. atomic_inc(&bat_priv->num_local_tt);
  1391. }
  1392. rcu_read_unlock();
  1393. }
  1394. }
  1395. /* Purge out all the tt local entries marked with TT_CLIENT_PENDING */
  1396. static void tt_local_purge_pending_clients(struct bat_priv *bat_priv)
  1397. {
  1398. struct hashtable_t *hash = bat_priv->tt_local_hash;
  1399. struct tt_local_entry *tt_local_entry;
  1400. struct hlist_node *node, *node_tmp;
  1401. struct hlist_head *head;
  1402. spinlock_t *list_lock; /* protects write access to the hash lists */
  1403. int i;
  1404. if (!hash)
  1405. return;
  1406. for (i = 0; i < hash->size; i++) {
  1407. head = &hash->table[i];
  1408. list_lock = &hash->list_locks[i];
  1409. spin_lock_bh(list_lock);
  1410. hlist_for_each_entry_safe(tt_local_entry, node, node_tmp,
  1411. head, hash_entry) {
  1412. if (!(tt_local_entry->flags & TT_CLIENT_PENDING))
  1413. continue;
  1414. bat_dbg(DBG_TT, bat_priv, "Deleting local tt entry "
  1415. "(%pM): pending\n", tt_local_entry->addr);
  1416. atomic_dec(&bat_priv->num_local_tt);
  1417. hlist_del_rcu(node);
  1418. tt_local_entry_free_ref(tt_local_entry);
  1419. }
  1420. spin_unlock_bh(list_lock);
  1421. }
  1422. }
  1423. void tt_commit_changes(struct bat_priv *bat_priv)
  1424. {
  1425. tt_local_reset_flags(bat_priv, TT_CLIENT_NEW);
  1426. tt_local_purge_pending_clients(bat_priv);
  1427. /* Increment the TTVN only once per OGM interval */
  1428. atomic_inc(&bat_priv->ttvn);
  1429. bat_priv->tt_poss_change = false;
  1430. }