translation-table.c 70 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477
  1. /* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
  2. *
  3. * Marek Lindner, Simon Wunderlich, Antonio Quartulli
  4. *
  5. * This program is free software; you can redistribute it and/or
  6. * modify it under the terms of version 2 of the GNU General Public
  7. * License as published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it will be useful, but
  10. * WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  12. * General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; if not, write to the Free Software
  16. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
  17. * 02110-1301, USA
  18. */
  19. #include "main.h"
  20. #include "translation-table.h"
  21. #include "soft-interface.h"
  22. #include "hard-interface.h"
  23. #include "send.h"
  24. #include "hash.h"
  25. #include "originator.h"
  26. #include "routing.h"
  27. #include "bridge_loop_avoidance.h"
  28. #include <linux/crc16.h>
  29. static void batadv_send_roam_adv(struct batadv_priv *bat_priv, uint8_t *client,
  30. struct batadv_orig_node *orig_node);
  31. static void batadv_tt_purge(struct work_struct *work);
  32. static void
  33. batadv_tt_global_del_orig_list(struct batadv_tt_global_entry *tt_global_entry);
  34. static void batadv_tt_global_del(struct batadv_priv *bat_priv,
  35. struct batadv_orig_node *orig_node,
  36. const unsigned char *addr,
  37. const char *message, bool roaming);
  38. /* returns 1 if they are the same mac addr */
  39. static int batadv_compare_tt(const struct hlist_node *node, const void *data2)
  40. {
  41. const void *data1 = container_of(node, struct batadv_tt_common_entry,
  42. hash_entry);
  43. return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0);
  44. }
  45. static void batadv_tt_start_timer(struct batadv_priv *bat_priv)
  46. {
  47. INIT_DELAYED_WORK(&bat_priv->tt.work, batadv_tt_purge);
  48. queue_delayed_work(batadv_event_workqueue, &bat_priv->tt.work,
  49. msecs_to_jiffies(5000));
  50. }
  51. static struct batadv_tt_common_entry *
  52. batadv_tt_hash_find(struct batadv_hashtable *hash, const void *data)
  53. {
  54. struct hlist_head *head;
  55. struct hlist_node *node;
  56. struct batadv_tt_common_entry *tt_common_entry;
  57. struct batadv_tt_common_entry *tt_common_entry_tmp = NULL;
  58. uint32_t index;
  59. if (!hash)
  60. return NULL;
  61. index = batadv_choose_orig(data, hash->size);
  62. head = &hash->table[index];
  63. rcu_read_lock();
  64. hlist_for_each_entry_rcu(tt_common_entry, node, head, hash_entry) {
  65. if (!batadv_compare_eth(tt_common_entry, data))
  66. continue;
  67. if (!atomic_inc_not_zero(&tt_common_entry->refcount))
  68. continue;
  69. tt_common_entry_tmp = tt_common_entry;
  70. break;
  71. }
  72. rcu_read_unlock();
  73. return tt_common_entry_tmp;
  74. }
  75. static struct batadv_tt_local_entry *
  76. batadv_tt_local_hash_find(struct batadv_priv *bat_priv, const void *data)
  77. {
  78. struct batadv_tt_common_entry *tt_common_entry;
  79. struct batadv_tt_local_entry *tt_local_entry = NULL;
  80. tt_common_entry = batadv_tt_hash_find(bat_priv->tt.local_hash, data);
  81. if (tt_common_entry)
  82. tt_local_entry = container_of(tt_common_entry,
  83. struct batadv_tt_local_entry,
  84. common);
  85. return tt_local_entry;
  86. }
  87. static struct batadv_tt_global_entry *
  88. batadv_tt_global_hash_find(struct batadv_priv *bat_priv, const void *data)
  89. {
  90. struct batadv_tt_common_entry *tt_common_entry;
  91. struct batadv_tt_global_entry *tt_global_entry = NULL;
  92. tt_common_entry = batadv_tt_hash_find(bat_priv->tt.global_hash, data);
  93. if (tt_common_entry)
  94. tt_global_entry = container_of(tt_common_entry,
  95. struct batadv_tt_global_entry,
  96. common);
  97. return tt_global_entry;
  98. }
  99. static void
  100. batadv_tt_local_entry_free_ref(struct batadv_tt_local_entry *tt_local_entry)
  101. {
  102. if (atomic_dec_and_test(&tt_local_entry->common.refcount))
  103. kfree_rcu(tt_local_entry, common.rcu);
  104. }
  105. static void batadv_tt_global_entry_free_rcu(struct rcu_head *rcu)
  106. {
  107. struct batadv_tt_common_entry *tt_common_entry;
  108. struct batadv_tt_global_entry *tt_global_entry;
  109. tt_common_entry = container_of(rcu, struct batadv_tt_common_entry, rcu);
  110. tt_global_entry = container_of(tt_common_entry,
  111. struct batadv_tt_global_entry, common);
  112. kfree(tt_global_entry);
  113. }
  114. static void
  115. batadv_tt_global_entry_free_ref(struct batadv_tt_global_entry *tt_global_entry)
  116. {
  117. if (atomic_dec_and_test(&tt_global_entry->common.refcount)) {
  118. batadv_tt_global_del_orig_list(tt_global_entry);
  119. call_rcu(&tt_global_entry->common.rcu,
  120. batadv_tt_global_entry_free_rcu);
  121. }
  122. }
  123. static void batadv_tt_orig_list_entry_free_rcu(struct rcu_head *rcu)
  124. {
  125. struct batadv_tt_orig_list_entry *orig_entry;
  126. orig_entry = container_of(rcu, struct batadv_tt_orig_list_entry, rcu);
  127. batadv_orig_node_free_ref(orig_entry->orig_node);
  128. kfree(orig_entry);
  129. }
  130. static void
  131. batadv_tt_orig_list_entry_free_ref(struct batadv_tt_orig_list_entry *orig_entry)
  132. {
  133. if (!atomic_dec_and_test(&orig_entry->refcount))
  134. return;
  135. /* to avoid race conditions, immediately decrease the tt counter */
  136. atomic_dec(&orig_entry->orig_node->tt_size);
  137. call_rcu(&orig_entry->rcu, batadv_tt_orig_list_entry_free_rcu);
  138. }
  139. static void batadv_tt_local_event(struct batadv_priv *bat_priv,
  140. const uint8_t *addr, uint8_t flags)
  141. {
  142. struct batadv_tt_change_node *tt_change_node, *entry, *safe;
  143. bool event_removed = false;
  144. bool del_op_requested, del_op_entry;
  145. tt_change_node = kmalloc(sizeof(*tt_change_node), GFP_ATOMIC);
  146. if (!tt_change_node)
  147. return;
  148. tt_change_node->change.flags = flags;
  149. memcpy(tt_change_node->change.addr, addr, ETH_ALEN);
  150. del_op_requested = flags & BATADV_TT_CLIENT_DEL;
  151. /* check for ADD+DEL or DEL+ADD events */
  152. spin_lock_bh(&bat_priv->tt.changes_list_lock);
  153. list_for_each_entry_safe(entry, safe, &bat_priv->tt.changes_list,
  154. list) {
  155. if (!batadv_compare_eth(entry->change.addr, addr))
  156. continue;
  157. /* DEL+ADD in the same orig interval have no effect and can be
  158. * removed to avoid silly behaviour on the receiver side. The
  159. * other way around (ADD+DEL) can happen in case of roaming of
  160. * a client still in the NEW state. Roaming of NEW clients is
  161. * now possible due to automatically recognition of "temporary"
  162. * clients
  163. */
  164. del_op_entry = entry->change.flags & BATADV_TT_CLIENT_DEL;
  165. if (!del_op_requested && del_op_entry)
  166. goto del;
  167. if (del_op_requested && !del_op_entry)
  168. goto del;
  169. continue;
  170. del:
  171. list_del(&entry->list);
  172. kfree(entry);
  173. kfree(tt_change_node);
  174. event_removed = true;
  175. goto unlock;
  176. }
  177. /* track the change in the OGMinterval list */
  178. list_add_tail(&tt_change_node->list, &bat_priv->tt.changes_list);
  179. unlock:
  180. spin_unlock_bh(&bat_priv->tt.changes_list_lock);
  181. if (event_removed)
  182. atomic_dec(&bat_priv->tt.local_changes);
  183. else
  184. atomic_inc(&bat_priv->tt.local_changes);
  185. }
  186. int batadv_tt_len(int changes_num)
  187. {
  188. return changes_num * sizeof(struct batadv_tt_change);
  189. }
  190. static int batadv_tt_local_init(struct batadv_priv *bat_priv)
  191. {
  192. if (bat_priv->tt.local_hash)
  193. return 0;
  194. bat_priv->tt.local_hash = batadv_hash_new(1024);
  195. if (!bat_priv->tt.local_hash)
  196. return -ENOMEM;
  197. return 0;
  198. }
  199. void batadv_tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
  200. int ifindex)
  201. {
  202. struct batadv_priv *bat_priv = netdev_priv(soft_iface);
  203. struct batadv_tt_local_entry *tt_local_entry = NULL;
  204. struct batadv_tt_global_entry *tt_global_entry = NULL;
  205. struct hlist_head *head;
  206. struct hlist_node *node;
  207. struct batadv_tt_orig_list_entry *orig_entry;
  208. int hash_added;
  209. tt_local_entry = batadv_tt_local_hash_find(bat_priv, addr);
  210. if (tt_local_entry) {
  211. tt_local_entry->last_seen = jiffies;
  212. /* possibly unset the BATADV_TT_CLIENT_PENDING flag */
  213. tt_local_entry->common.flags &= ~BATADV_TT_CLIENT_PENDING;
  214. goto out;
  215. }
  216. tt_local_entry = kmalloc(sizeof(*tt_local_entry), GFP_ATOMIC);
  217. if (!tt_local_entry)
  218. goto out;
  219. batadv_dbg(BATADV_DBG_TT, bat_priv,
  220. "Creating new local tt entry: %pM (ttvn: %d)\n", addr,
  221. (uint8_t)atomic_read(&bat_priv->tt.vn));
  222. memcpy(tt_local_entry->common.addr, addr, ETH_ALEN);
  223. tt_local_entry->common.flags = BATADV_NO_FLAGS;
  224. if (batadv_is_wifi_iface(ifindex))
  225. tt_local_entry->common.flags |= BATADV_TT_CLIENT_WIFI;
  226. atomic_set(&tt_local_entry->common.refcount, 2);
  227. tt_local_entry->last_seen = jiffies;
  228. tt_local_entry->common.added_at = tt_local_entry->last_seen;
  229. /* the batman interface mac address should never be purged */
  230. if (batadv_compare_eth(addr, soft_iface->dev_addr))
  231. tt_local_entry->common.flags |= BATADV_TT_CLIENT_NOPURGE;
  232. /* The local entry has to be marked as NEW to avoid to send it in
  233. * a full table response going out before the next ttvn increment
  234. * (consistency check)
  235. */
  236. tt_local_entry->common.flags |= BATADV_TT_CLIENT_NEW;
  237. hash_added = batadv_hash_add(bat_priv->tt.local_hash, batadv_compare_tt,
  238. batadv_choose_orig,
  239. &tt_local_entry->common,
  240. &tt_local_entry->common.hash_entry);
  241. if (unlikely(hash_added != 0)) {
  242. /* remove the reference for the hash */
  243. batadv_tt_local_entry_free_ref(tt_local_entry);
  244. goto out;
  245. }
  246. batadv_tt_local_event(bat_priv, addr, tt_local_entry->common.flags);
  247. /* remove address from global hash if present */
  248. tt_global_entry = batadv_tt_global_hash_find(bat_priv, addr);
  249. /* Check whether it is a roaming! */
  250. if (tt_global_entry) {
  251. /* These node are probably going to update their tt table */
  252. head = &tt_global_entry->orig_list;
  253. rcu_read_lock();
  254. hlist_for_each_entry_rcu(orig_entry, node, head, list) {
  255. orig_entry->orig_node->tt_poss_change = true;
  256. batadv_send_roam_adv(bat_priv,
  257. tt_global_entry->common.addr,
  258. orig_entry->orig_node);
  259. }
  260. rcu_read_unlock();
  261. /* The global entry has to be marked as ROAMING and
  262. * has to be kept for consistency purpose
  263. */
  264. tt_global_entry->common.flags |= BATADV_TT_CLIENT_ROAM;
  265. tt_global_entry->roam_at = jiffies;
  266. }
  267. out:
  268. if (tt_local_entry)
  269. batadv_tt_local_entry_free_ref(tt_local_entry);
  270. if (tt_global_entry)
  271. batadv_tt_global_entry_free_ref(tt_global_entry);
  272. }
  273. static void batadv_tt_realloc_packet_buff(unsigned char **packet_buff,
  274. int *packet_buff_len,
  275. int min_packet_len,
  276. int new_packet_len)
  277. {
  278. unsigned char *new_buff;
  279. new_buff = kmalloc(new_packet_len, GFP_ATOMIC);
  280. /* keep old buffer if kmalloc should fail */
  281. if (new_buff) {
  282. memcpy(new_buff, *packet_buff, min_packet_len);
  283. kfree(*packet_buff);
  284. *packet_buff = new_buff;
  285. *packet_buff_len = new_packet_len;
  286. }
  287. }
  288. static void batadv_tt_prepare_packet_buff(struct batadv_priv *bat_priv,
  289. unsigned char **packet_buff,
  290. int *packet_buff_len,
  291. int min_packet_len)
  292. {
  293. struct batadv_hard_iface *primary_if;
  294. int req_len;
  295. primary_if = batadv_primary_if_get_selected(bat_priv);
  296. req_len = min_packet_len;
  297. req_len += batadv_tt_len(atomic_read(&bat_priv->tt.local_changes));
  298. /* if we have too many changes for one packet don't send any
  299. * and wait for the tt table request which will be fragmented
  300. */
  301. if ((!primary_if) || (req_len > primary_if->soft_iface->mtu))
  302. req_len = min_packet_len;
  303. batadv_tt_realloc_packet_buff(packet_buff, packet_buff_len,
  304. min_packet_len, req_len);
  305. if (primary_if)
  306. batadv_hardif_free_ref(primary_if);
  307. }
  308. static int batadv_tt_changes_fill_buff(struct batadv_priv *bat_priv,
  309. unsigned char **packet_buff,
  310. int *packet_buff_len,
  311. int min_packet_len)
  312. {
  313. struct batadv_tt_change_node *entry, *safe;
  314. int count = 0, tot_changes = 0, new_len;
  315. unsigned char *tt_buff;
  316. batadv_tt_prepare_packet_buff(bat_priv, packet_buff,
  317. packet_buff_len, min_packet_len);
  318. new_len = *packet_buff_len - min_packet_len;
  319. tt_buff = *packet_buff + min_packet_len;
  320. if (new_len > 0)
  321. tot_changes = new_len / batadv_tt_len(1);
  322. spin_lock_bh(&bat_priv->tt.changes_list_lock);
  323. atomic_set(&bat_priv->tt.local_changes, 0);
  324. list_for_each_entry_safe(entry, safe, &bat_priv->tt.changes_list,
  325. list) {
  326. if (count < tot_changes) {
  327. memcpy(tt_buff + batadv_tt_len(count),
  328. &entry->change, sizeof(struct batadv_tt_change));
  329. count++;
  330. }
  331. list_del(&entry->list);
  332. kfree(entry);
  333. }
  334. spin_unlock_bh(&bat_priv->tt.changes_list_lock);
  335. /* Keep the buffer for possible tt_request */
  336. spin_lock_bh(&bat_priv->tt.last_changeset_lock);
  337. kfree(bat_priv->tt.last_changeset);
  338. bat_priv->tt.last_changeset_len = 0;
  339. bat_priv->tt.last_changeset = NULL;
  340. /* check whether this new OGM has no changes due to size problems */
  341. if (new_len > 0) {
  342. /* if kmalloc() fails we will reply with the full table
  343. * instead of providing the diff
  344. */
  345. bat_priv->tt.last_changeset = kmalloc(new_len, GFP_ATOMIC);
  346. if (bat_priv->tt.last_changeset) {
  347. memcpy(bat_priv->tt.last_changeset, tt_buff, new_len);
  348. bat_priv->tt.last_changeset_len = new_len;
  349. }
  350. }
  351. spin_unlock_bh(&bat_priv->tt.last_changeset_lock);
  352. return count;
  353. }
  354. int batadv_tt_local_seq_print_text(struct seq_file *seq, void *offset)
  355. {
  356. struct net_device *net_dev = (struct net_device *)seq->private;
  357. struct batadv_priv *bat_priv = netdev_priv(net_dev);
  358. struct batadv_hashtable *hash = bat_priv->tt.local_hash;
  359. struct batadv_tt_common_entry *tt_common_entry;
  360. struct batadv_hard_iface *primary_if;
  361. struct hlist_node *node;
  362. struct hlist_head *head;
  363. uint32_t i;
  364. int ret = 0;
  365. primary_if = batadv_primary_if_get_selected(bat_priv);
  366. if (!primary_if) {
  367. ret = seq_printf(seq,
  368. "BATMAN mesh %s disabled - please specify interfaces to enable it\n",
  369. net_dev->name);
  370. goto out;
  371. }
  372. if (primary_if->if_status != BATADV_IF_ACTIVE) {
  373. ret = seq_printf(seq,
  374. "BATMAN mesh %s disabled - primary interface not active\n",
  375. net_dev->name);
  376. goto out;
  377. }
  378. seq_printf(seq,
  379. "Locally retrieved addresses (from %s) announced via TT (TTVN: %u):\n",
  380. net_dev->name, (uint8_t)atomic_read(&bat_priv->tt.vn));
  381. for (i = 0; i < hash->size; i++) {
  382. head = &hash->table[i];
  383. rcu_read_lock();
  384. hlist_for_each_entry_rcu(tt_common_entry, node,
  385. head, hash_entry) {
  386. seq_printf(seq, " * %pM [%c%c%c%c%c]\n",
  387. tt_common_entry->addr,
  388. (tt_common_entry->flags &
  389. BATADV_TT_CLIENT_ROAM ? 'R' : '.'),
  390. (tt_common_entry->flags &
  391. BATADV_TT_CLIENT_NOPURGE ? 'P' : '.'),
  392. (tt_common_entry->flags &
  393. BATADV_TT_CLIENT_NEW ? 'N' : '.'),
  394. (tt_common_entry->flags &
  395. BATADV_TT_CLIENT_PENDING ? 'X' : '.'),
  396. (tt_common_entry->flags &
  397. BATADV_TT_CLIENT_WIFI ? 'W' : '.'));
  398. }
  399. rcu_read_unlock();
  400. }
  401. out:
  402. if (primary_if)
  403. batadv_hardif_free_ref(primary_if);
  404. return ret;
  405. }
  406. static void
  407. batadv_tt_local_set_pending(struct batadv_priv *bat_priv,
  408. struct batadv_tt_local_entry *tt_local_entry,
  409. uint16_t flags, const char *message)
  410. {
  411. batadv_tt_local_event(bat_priv, tt_local_entry->common.addr,
  412. tt_local_entry->common.flags | flags);
  413. /* The local client has to be marked as "pending to be removed" but has
  414. * to be kept in the table in order to send it in a full table
  415. * response issued before the net ttvn increment (consistency check)
  416. */
  417. tt_local_entry->common.flags |= BATADV_TT_CLIENT_PENDING;
  418. batadv_dbg(BATADV_DBG_TT, bat_priv,
  419. "Local tt entry (%pM) pending to be removed: %s\n",
  420. tt_local_entry->common.addr, message);
  421. }
  422. void batadv_tt_local_remove(struct batadv_priv *bat_priv, const uint8_t *addr,
  423. const char *message, bool roaming)
  424. {
  425. struct batadv_tt_local_entry *tt_local_entry = NULL;
  426. uint16_t flags;
  427. tt_local_entry = batadv_tt_local_hash_find(bat_priv, addr);
  428. if (!tt_local_entry)
  429. goto out;
  430. flags = BATADV_TT_CLIENT_DEL;
  431. if (roaming)
  432. flags |= BATADV_TT_CLIENT_ROAM;
  433. batadv_tt_local_set_pending(bat_priv, tt_local_entry, flags, message);
  434. out:
  435. if (tt_local_entry)
  436. batadv_tt_local_entry_free_ref(tt_local_entry);
  437. }
  438. static void batadv_tt_local_purge_list(struct batadv_priv *bat_priv,
  439. struct hlist_head *head)
  440. {
  441. struct batadv_tt_local_entry *tt_local_entry;
  442. struct batadv_tt_common_entry *tt_common_entry;
  443. struct hlist_node *node, *node_tmp;
  444. hlist_for_each_entry_safe(tt_common_entry, node, node_tmp, head,
  445. hash_entry) {
  446. tt_local_entry = container_of(tt_common_entry,
  447. struct batadv_tt_local_entry,
  448. common);
  449. if (tt_local_entry->common.flags & BATADV_TT_CLIENT_NOPURGE)
  450. continue;
  451. /* entry already marked for deletion */
  452. if (tt_local_entry->common.flags & BATADV_TT_CLIENT_PENDING)
  453. continue;
  454. if (!batadv_has_timed_out(tt_local_entry->last_seen,
  455. BATADV_TT_LOCAL_TIMEOUT))
  456. continue;
  457. batadv_tt_local_set_pending(bat_priv, tt_local_entry,
  458. BATADV_TT_CLIENT_DEL, "timed out");
  459. }
  460. }
  461. static void batadv_tt_local_purge(struct batadv_priv *bat_priv)
  462. {
  463. struct batadv_hashtable *hash = bat_priv->tt.local_hash;
  464. struct hlist_head *head;
  465. spinlock_t *list_lock; /* protects write access to the hash lists */
  466. uint32_t i;
  467. for (i = 0; i < hash->size; i++) {
  468. head = &hash->table[i];
  469. list_lock = &hash->list_locks[i];
  470. spin_lock_bh(list_lock);
  471. batadv_tt_local_purge_list(bat_priv, head);
  472. spin_unlock_bh(list_lock);
  473. }
  474. }
  475. static void batadv_tt_local_table_free(struct batadv_priv *bat_priv)
  476. {
  477. struct batadv_hashtable *hash;
  478. spinlock_t *list_lock; /* protects write access to the hash lists */
  479. struct batadv_tt_common_entry *tt_common_entry;
  480. struct batadv_tt_local_entry *tt_local;
  481. struct hlist_node *node, *node_tmp;
  482. struct hlist_head *head;
  483. uint32_t i;
  484. if (!bat_priv->tt.local_hash)
  485. return;
  486. hash = bat_priv->tt.local_hash;
  487. for (i = 0; i < hash->size; i++) {
  488. head = &hash->table[i];
  489. list_lock = &hash->list_locks[i];
  490. spin_lock_bh(list_lock);
  491. hlist_for_each_entry_safe(tt_common_entry, node, node_tmp,
  492. head, hash_entry) {
  493. hlist_del_rcu(node);
  494. tt_local = container_of(tt_common_entry,
  495. struct batadv_tt_local_entry,
  496. common);
  497. batadv_tt_local_entry_free_ref(tt_local);
  498. }
  499. spin_unlock_bh(list_lock);
  500. }
  501. batadv_hash_destroy(hash);
  502. bat_priv->tt.local_hash = NULL;
  503. }
  504. static int batadv_tt_global_init(struct batadv_priv *bat_priv)
  505. {
  506. if (bat_priv->tt.global_hash)
  507. return 0;
  508. bat_priv->tt.global_hash = batadv_hash_new(1024);
  509. if (!bat_priv->tt.global_hash)
  510. return -ENOMEM;
  511. return 0;
  512. }
  513. static void batadv_tt_changes_list_free(struct batadv_priv *bat_priv)
  514. {
  515. struct batadv_tt_change_node *entry, *safe;
  516. spin_lock_bh(&bat_priv->tt.changes_list_lock);
  517. list_for_each_entry_safe(entry, safe, &bat_priv->tt.changes_list,
  518. list) {
  519. list_del(&entry->list);
  520. kfree(entry);
  521. }
  522. atomic_set(&bat_priv->tt.local_changes, 0);
  523. spin_unlock_bh(&bat_priv->tt.changes_list_lock);
  524. }
  525. /* retrieves the orig_tt_list_entry belonging to orig_node from the
  526. * batadv_tt_global_entry list
  527. *
  528. * returns it with an increased refcounter, NULL if not found
  529. */
  530. static struct batadv_tt_orig_list_entry *
  531. batadv_tt_global_orig_entry_find(const struct batadv_tt_global_entry *entry,
  532. const struct batadv_orig_node *orig_node)
  533. {
  534. struct batadv_tt_orig_list_entry *tmp_orig_entry, *orig_entry = NULL;
  535. const struct hlist_head *head;
  536. struct hlist_node *node;
  537. rcu_read_lock();
  538. head = &entry->orig_list;
  539. hlist_for_each_entry_rcu(tmp_orig_entry, node, head, list) {
  540. if (tmp_orig_entry->orig_node != orig_node)
  541. continue;
  542. if (!atomic_inc_not_zero(&tmp_orig_entry->refcount))
  543. continue;
  544. orig_entry = tmp_orig_entry;
  545. break;
  546. }
  547. rcu_read_unlock();
  548. return orig_entry;
  549. }
  550. /* find out if an orig_node is already in the list of a tt_global_entry.
  551. * returns true if found, false otherwise
  552. */
  553. static bool
  554. batadv_tt_global_entry_has_orig(const struct batadv_tt_global_entry *entry,
  555. const struct batadv_orig_node *orig_node)
  556. {
  557. struct batadv_tt_orig_list_entry *orig_entry;
  558. bool found = false;
  559. orig_entry = batadv_tt_global_orig_entry_find(entry, orig_node);
  560. if (orig_entry) {
  561. found = true;
  562. batadv_tt_orig_list_entry_free_ref(orig_entry);
  563. }
  564. return found;
  565. }
  566. static void
  567. batadv_tt_global_orig_entry_add(struct batadv_tt_global_entry *tt_global,
  568. struct batadv_orig_node *orig_node, int ttvn)
  569. {
  570. struct batadv_tt_orig_list_entry *orig_entry;
  571. orig_entry = batadv_tt_global_orig_entry_find(tt_global, orig_node);
  572. if (orig_entry) {
  573. /* refresh the ttvn: the current value could be a bogus one that
  574. * was added during a "temporary client detection"
  575. */
  576. orig_entry->ttvn = ttvn;
  577. goto out;
  578. }
  579. orig_entry = kzalloc(sizeof(*orig_entry), GFP_ATOMIC);
  580. if (!orig_entry)
  581. goto out;
  582. INIT_HLIST_NODE(&orig_entry->list);
  583. atomic_inc(&orig_node->refcount);
  584. atomic_inc(&orig_node->tt_size);
  585. orig_entry->orig_node = orig_node;
  586. orig_entry->ttvn = ttvn;
  587. atomic_set(&orig_entry->refcount, 2);
  588. spin_lock_bh(&tt_global->list_lock);
  589. hlist_add_head_rcu(&orig_entry->list,
  590. &tt_global->orig_list);
  591. spin_unlock_bh(&tt_global->list_lock);
  592. out:
  593. if (orig_entry)
  594. batadv_tt_orig_list_entry_free_ref(orig_entry);
  595. }
  596. /* caller must hold orig_node refcount */
  597. int batadv_tt_global_add(struct batadv_priv *bat_priv,
  598. struct batadv_orig_node *orig_node,
  599. const unsigned char *tt_addr, uint8_t flags,
  600. uint8_t ttvn)
  601. {
  602. struct batadv_tt_global_entry *tt_global_entry = NULL;
  603. int ret = 0;
  604. int hash_added;
  605. struct batadv_tt_common_entry *common;
  606. tt_global_entry = batadv_tt_global_hash_find(bat_priv, tt_addr);
  607. if (!tt_global_entry) {
  608. tt_global_entry = kzalloc(sizeof(*tt_global_entry), GFP_ATOMIC);
  609. if (!tt_global_entry)
  610. goto out;
  611. common = &tt_global_entry->common;
  612. memcpy(common->addr, tt_addr, ETH_ALEN);
  613. common->flags = flags;
  614. tt_global_entry->roam_at = 0;
  615. atomic_set(&common->refcount, 2);
  616. common->added_at = jiffies;
  617. INIT_HLIST_HEAD(&tt_global_entry->orig_list);
  618. spin_lock_init(&tt_global_entry->list_lock);
  619. hash_added = batadv_hash_add(bat_priv->tt.global_hash,
  620. batadv_compare_tt,
  621. batadv_choose_orig, common,
  622. &common->hash_entry);
  623. if (unlikely(hash_added != 0)) {
  624. /* remove the reference for the hash */
  625. batadv_tt_global_entry_free_ref(tt_global_entry);
  626. goto out_remove;
  627. }
  628. } else {
  629. /* If there is already a global entry, we can use this one for
  630. * our processing.
  631. * But if we are trying to add a temporary client we can exit
  632. * directly because the temporary information should never
  633. * override any already known client state (whatever it is)
  634. */
  635. if (flags & BATADV_TT_CLIENT_TEMP)
  636. goto out;
  637. /* if the client was temporary added before receiving the first
  638. * OGM announcing it, we have to clear the TEMP flag
  639. */
  640. tt_global_entry->common.flags &= ~BATADV_TT_CLIENT_TEMP;
  641. /* the change can carry possible "attribute" flags like the
  642. * TT_CLIENT_WIFI, therefore they have to be copied in the
  643. * client entry
  644. */
  645. tt_global_entry->common.flags |= flags;
  646. /* If there is the BATADV_TT_CLIENT_ROAM flag set, there is only
  647. * one originator left in the list and we previously received a
  648. * delete + roaming change for this originator.
  649. *
  650. * We should first delete the old originator before adding the
  651. * new one.
  652. */
  653. if (tt_global_entry->common.flags & BATADV_TT_CLIENT_ROAM) {
  654. batadv_tt_global_del_orig_list(tt_global_entry);
  655. tt_global_entry->common.flags &= ~BATADV_TT_CLIENT_ROAM;
  656. tt_global_entry->roam_at = 0;
  657. }
  658. }
  659. /* add the new orig_entry (if needed) or update it */
  660. batadv_tt_global_orig_entry_add(tt_global_entry, orig_node, ttvn);
  661. batadv_dbg(BATADV_DBG_TT, bat_priv,
  662. "Creating new global tt entry: %pM (via %pM)\n",
  663. tt_global_entry->common.addr, orig_node->orig);
  664. out_remove:
  665. /* remove address from local hash if present */
  666. batadv_tt_local_remove(bat_priv, tt_global_entry->common.addr,
  667. "global tt received",
  668. flags & BATADV_TT_CLIENT_ROAM);
  669. ret = 1;
  670. out:
  671. if (tt_global_entry)
  672. batadv_tt_global_entry_free_ref(tt_global_entry);
  673. return ret;
  674. }
  675. /* print all orig nodes who announce the address for this global entry.
  676. * it is assumed that the caller holds rcu_read_lock();
  677. */
  678. static void
  679. batadv_tt_global_print_entry(struct batadv_tt_global_entry *tt_global_entry,
  680. struct seq_file *seq)
  681. {
  682. struct hlist_head *head;
  683. struct hlist_node *node;
  684. struct batadv_tt_orig_list_entry *orig_entry;
  685. struct batadv_tt_common_entry *tt_common_entry;
  686. uint16_t flags;
  687. uint8_t last_ttvn;
  688. tt_common_entry = &tt_global_entry->common;
  689. head = &tt_global_entry->orig_list;
  690. hlist_for_each_entry_rcu(orig_entry, node, head, list) {
  691. flags = tt_common_entry->flags;
  692. last_ttvn = atomic_read(&orig_entry->orig_node->last_ttvn);
  693. seq_printf(seq, " * %pM (%3u) via %pM (%3u) [%c%c%c]\n",
  694. tt_global_entry->common.addr, orig_entry->ttvn,
  695. orig_entry->orig_node->orig, last_ttvn,
  696. (flags & BATADV_TT_CLIENT_ROAM ? 'R' : '.'),
  697. (flags & BATADV_TT_CLIENT_WIFI ? 'W' : '.'),
  698. (flags & BATADV_TT_CLIENT_TEMP ? 'T' : '.'));
  699. }
  700. }
  701. int batadv_tt_global_seq_print_text(struct seq_file *seq, void *offset)
  702. {
  703. struct net_device *net_dev = (struct net_device *)seq->private;
  704. struct batadv_priv *bat_priv = netdev_priv(net_dev);
  705. struct batadv_hashtable *hash = bat_priv->tt.global_hash;
  706. struct batadv_tt_common_entry *tt_common_entry;
  707. struct batadv_tt_global_entry *tt_global;
  708. struct batadv_hard_iface *primary_if;
  709. struct hlist_node *node;
  710. struct hlist_head *head;
  711. uint32_t i;
  712. int ret = 0;
  713. primary_if = batadv_primary_if_get_selected(bat_priv);
  714. if (!primary_if) {
  715. ret = seq_printf(seq,
  716. "BATMAN mesh %s disabled - please specify interfaces to enable it\n",
  717. net_dev->name);
  718. goto out;
  719. }
  720. if (primary_if->if_status != BATADV_IF_ACTIVE) {
  721. ret = seq_printf(seq,
  722. "BATMAN mesh %s disabled - primary interface not active\n",
  723. net_dev->name);
  724. goto out;
  725. }
  726. seq_printf(seq,
  727. "Globally announced TT entries received via the mesh %s\n",
  728. net_dev->name);
  729. seq_printf(seq, " %-13s %s %-15s %s %s\n",
  730. "Client", "(TTVN)", "Originator", "(Curr TTVN)", "Flags");
  731. for (i = 0; i < hash->size; i++) {
  732. head = &hash->table[i];
  733. rcu_read_lock();
  734. hlist_for_each_entry_rcu(tt_common_entry, node,
  735. head, hash_entry) {
  736. tt_global = container_of(tt_common_entry,
  737. struct batadv_tt_global_entry,
  738. common);
  739. batadv_tt_global_print_entry(tt_global, seq);
  740. }
  741. rcu_read_unlock();
  742. }
  743. out:
  744. if (primary_if)
  745. batadv_hardif_free_ref(primary_if);
  746. return ret;
  747. }
  748. /* deletes the orig list of a tt_global_entry */
  749. static void
  750. batadv_tt_global_del_orig_list(struct batadv_tt_global_entry *tt_global_entry)
  751. {
  752. struct hlist_head *head;
  753. struct hlist_node *node, *safe;
  754. struct batadv_tt_orig_list_entry *orig_entry;
  755. spin_lock_bh(&tt_global_entry->list_lock);
  756. head = &tt_global_entry->orig_list;
  757. hlist_for_each_entry_safe(orig_entry, node, safe, head, list) {
  758. hlist_del_rcu(node);
  759. batadv_tt_orig_list_entry_free_ref(orig_entry);
  760. }
  761. spin_unlock_bh(&tt_global_entry->list_lock);
  762. }
  763. static void
  764. batadv_tt_global_del_orig_entry(struct batadv_priv *bat_priv,
  765. struct batadv_tt_global_entry *tt_global_entry,
  766. struct batadv_orig_node *orig_node,
  767. const char *message)
  768. {
  769. struct hlist_head *head;
  770. struct hlist_node *node, *safe;
  771. struct batadv_tt_orig_list_entry *orig_entry;
  772. spin_lock_bh(&tt_global_entry->list_lock);
  773. head = &tt_global_entry->orig_list;
  774. hlist_for_each_entry_safe(orig_entry, node, safe, head, list) {
  775. if (orig_entry->orig_node == orig_node) {
  776. batadv_dbg(BATADV_DBG_TT, bat_priv,
  777. "Deleting %pM from global tt entry %pM: %s\n",
  778. orig_node->orig,
  779. tt_global_entry->common.addr, message);
  780. hlist_del_rcu(node);
  781. batadv_tt_orig_list_entry_free_ref(orig_entry);
  782. }
  783. }
  784. spin_unlock_bh(&tt_global_entry->list_lock);
  785. }
  786. static void
  787. batadv_tt_global_del_struct(struct batadv_priv *bat_priv,
  788. struct batadv_tt_global_entry *tt_global_entry,
  789. const char *message)
  790. {
  791. batadv_dbg(BATADV_DBG_TT, bat_priv,
  792. "Deleting global tt entry %pM: %s\n",
  793. tt_global_entry->common.addr, message);
  794. batadv_hash_remove(bat_priv->tt.global_hash, batadv_compare_tt,
  795. batadv_choose_orig, tt_global_entry->common.addr);
  796. batadv_tt_global_entry_free_ref(tt_global_entry);
  797. }
  798. /* If the client is to be deleted, we check if it is the last origantor entry
  799. * within tt_global entry. If yes, we set the BATADV_TT_CLIENT_ROAM flag and the
  800. * timer, otherwise we simply remove the originator scheduled for deletion.
  801. */
  802. static void
  803. batadv_tt_global_del_roaming(struct batadv_priv *bat_priv,
  804. struct batadv_tt_global_entry *tt_global_entry,
  805. struct batadv_orig_node *orig_node,
  806. const char *message)
  807. {
  808. bool last_entry = true;
  809. struct hlist_head *head;
  810. struct hlist_node *node;
  811. struct batadv_tt_orig_list_entry *orig_entry;
  812. /* no local entry exists, case 1:
  813. * Check if this is the last one or if other entries exist.
  814. */
  815. rcu_read_lock();
  816. head = &tt_global_entry->orig_list;
  817. hlist_for_each_entry_rcu(orig_entry, node, head, list) {
  818. if (orig_entry->orig_node != orig_node) {
  819. last_entry = false;
  820. break;
  821. }
  822. }
  823. rcu_read_unlock();
  824. if (last_entry) {
  825. /* its the last one, mark for roaming. */
  826. tt_global_entry->common.flags |= BATADV_TT_CLIENT_ROAM;
  827. tt_global_entry->roam_at = jiffies;
  828. } else
  829. /* there is another entry, we can simply delete this
  830. * one and can still use the other one.
  831. */
  832. batadv_tt_global_del_orig_entry(bat_priv, tt_global_entry,
  833. orig_node, message);
  834. }
  835. static void batadv_tt_global_del(struct batadv_priv *bat_priv,
  836. struct batadv_orig_node *orig_node,
  837. const unsigned char *addr,
  838. const char *message, bool roaming)
  839. {
  840. struct batadv_tt_global_entry *tt_global_entry = NULL;
  841. struct batadv_tt_local_entry *local_entry = NULL;
  842. tt_global_entry = batadv_tt_global_hash_find(bat_priv, addr);
  843. if (!tt_global_entry)
  844. goto out;
  845. if (!roaming) {
  846. batadv_tt_global_del_orig_entry(bat_priv, tt_global_entry,
  847. orig_node, message);
  848. if (hlist_empty(&tt_global_entry->orig_list))
  849. batadv_tt_global_del_struct(bat_priv, tt_global_entry,
  850. message);
  851. goto out;
  852. }
  853. /* if we are deleting a global entry due to a roam
  854. * event, there are two possibilities:
  855. * 1) the client roamed from node A to node B => if there
  856. * is only one originator left for this client, we mark
  857. * it with BATADV_TT_CLIENT_ROAM, we start a timer and we
  858. * wait for node B to claim it. In case of timeout
  859. * the entry is purged.
  860. *
  861. * If there are other originators left, we directly delete
  862. * the originator.
  863. * 2) the client roamed to us => we can directly delete
  864. * the global entry, since it is useless now.
  865. */
  866. local_entry = batadv_tt_local_hash_find(bat_priv,
  867. tt_global_entry->common.addr);
  868. if (local_entry) {
  869. /* local entry exists, case 2: client roamed to us. */
  870. batadv_tt_global_del_orig_list(tt_global_entry);
  871. batadv_tt_global_del_struct(bat_priv, tt_global_entry, message);
  872. } else
  873. /* no local entry exists, case 1: check for roaming */
  874. batadv_tt_global_del_roaming(bat_priv, tt_global_entry,
  875. orig_node, message);
  876. out:
  877. if (tt_global_entry)
  878. batadv_tt_global_entry_free_ref(tt_global_entry);
  879. if (local_entry)
  880. batadv_tt_local_entry_free_ref(local_entry);
  881. }
  882. void batadv_tt_global_del_orig(struct batadv_priv *bat_priv,
  883. struct batadv_orig_node *orig_node,
  884. const char *message)
  885. {
  886. struct batadv_tt_global_entry *tt_global;
  887. struct batadv_tt_common_entry *tt_common_entry;
  888. uint32_t i;
  889. struct batadv_hashtable *hash = bat_priv->tt.global_hash;
  890. struct hlist_node *node, *safe;
  891. struct hlist_head *head;
  892. spinlock_t *list_lock; /* protects write access to the hash lists */
  893. if (!hash)
  894. return;
  895. for (i = 0; i < hash->size; i++) {
  896. head = &hash->table[i];
  897. list_lock = &hash->list_locks[i];
  898. spin_lock_bh(list_lock);
  899. hlist_for_each_entry_safe(tt_common_entry, node, safe,
  900. head, hash_entry) {
  901. tt_global = container_of(tt_common_entry,
  902. struct batadv_tt_global_entry,
  903. common);
  904. batadv_tt_global_del_orig_entry(bat_priv, tt_global,
  905. orig_node, message);
  906. if (hlist_empty(&tt_global->orig_list)) {
  907. batadv_dbg(BATADV_DBG_TT, bat_priv,
  908. "Deleting global tt entry %pM: %s\n",
  909. tt_global->common.addr, message);
  910. hlist_del_rcu(node);
  911. batadv_tt_global_entry_free_ref(tt_global);
  912. }
  913. }
  914. spin_unlock_bh(list_lock);
  915. }
  916. orig_node->tt_initialised = false;
  917. }
  918. static bool batadv_tt_global_to_purge(struct batadv_tt_global_entry *tt_global,
  919. char **msg)
  920. {
  921. bool purge = false;
  922. unsigned long roam_timeout = BATADV_TT_CLIENT_ROAM_TIMEOUT;
  923. unsigned long temp_timeout = BATADV_TT_CLIENT_TEMP_TIMEOUT;
  924. if ((tt_global->common.flags & BATADV_TT_CLIENT_ROAM) &&
  925. batadv_has_timed_out(tt_global->roam_at, roam_timeout)) {
  926. purge = true;
  927. *msg = "Roaming timeout\n";
  928. }
  929. if ((tt_global->common.flags & BATADV_TT_CLIENT_TEMP) &&
  930. batadv_has_timed_out(tt_global->common.added_at, temp_timeout)) {
  931. purge = true;
  932. *msg = "Temporary client timeout\n";
  933. }
  934. return purge;
  935. }
  936. static void batadv_tt_global_purge(struct batadv_priv *bat_priv)
  937. {
  938. struct batadv_hashtable *hash = bat_priv->tt.global_hash;
  939. struct hlist_head *head;
  940. struct hlist_node *node, *node_tmp;
  941. spinlock_t *list_lock; /* protects write access to the hash lists */
  942. uint32_t i;
  943. char *msg = NULL;
  944. struct batadv_tt_common_entry *tt_common;
  945. struct batadv_tt_global_entry *tt_global;
  946. for (i = 0; i < hash->size; i++) {
  947. head = &hash->table[i];
  948. list_lock = &hash->list_locks[i];
  949. spin_lock_bh(list_lock);
  950. hlist_for_each_entry_safe(tt_common, node, node_tmp, head,
  951. hash_entry) {
  952. tt_global = container_of(tt_common,
  953. struct batadv_tt_global_entry,
  954. common);
  955. if (!batadv_tt_global_to_purge(tt_global, &msg))
  956. continue;
  957. batadv_dbg(BATADV_DBG_TT, bat_priv,
  958. "Deleting global tt entry (%pM): %s\n",
  959. tt_global->common.addr, msg);
  960. hlist_del_rcu(node);
  961. batadv_tt_global_entry_free_ref(tt_global);
  962. }
  963. spin_unlock_bh(list_lock);
  964. }
  965. }
  966. static void batadv_tt_global_table_free(struct batadv_priv *bat_priv)
  967. {
  968. struct batadv_hashtable *hash;
  969. spinlock_t *list_lock; /* protects write access to the hash lists */
  970. struct batadv_tt_common_entry *tt_common_entry;
  971. struct batadv_tt_global_entry *tt_global;
  972. struct hlist_node *node, *node_tmp;
  973. struct hlist_head *head;
  974. uint32_t i;
  975. if (!bat_priv->tt.global_hash)
  976. return;
  977. hash = bat_priv->tt.global_hash;
  978. for (i = 0; i < hash->size; i++) {
  979. head = &hash->table[i];
  980. list_lock = &hash->list_locks[i];
  981. spin_lock_bh(list_lock);
  982. hlist_for_each_entry_safe(tt_common_entry, node, node_tmp,
  983. head, hash_entry) {
  984. hlist_del_rcu(node);
  985. tt_global = container_of(tt_common_entry,
  986. struct batadv_tt_global_entry,
  987. common);
  988. batadv_tt_global_entry_free_ref(tt_global);
  989. }
  990. spin_unlock_bh(list_lock);
  991. }
  992. batadv_hash_destroy(hash);
  993. bat_priv->tt.global_hash = NULL;
  994. }
  995. static bool
  996. _batadv_is_ap_isolated(struct batadv_tt_local_entry *tt_local_entry,
  997. struct batadv_tt_global_entry *tt_global_entry)
  998. {
  999. bool ret = false;
  1000. if (tt_local_entry->common.flags & BATADV_TT_CLIENT_WIFI &&
  1001. tt_global_entry->common.flags & BATADV_TT_CLIENT_WIFI)
  1002. ret = true;
  1003. return ret;
  1004. }
  1005. struct batadv_orig_node *batadv_transtable_search(struct batadv_priv *bat_priv,
  1006. const uint8_t *src,
  1007. const uint8_t *addr)
  1008. {
  1009. struct batadv_tt_local_entry *tt_local_entry = NULL;
  1010. struct batadv_tt_global_entry *tt_global_entry = NULL;
  1011. struct batadv_orig_node *orig_node = NULL;
  1012. struct batadv_neigh_node *router = NULL;
  1013. struct hlist_head *head;
  1014. struct hlist_node *node;
  1015. struct batadv_tt_orig_list_entry *orig_entry;
  1016. int best_tq;
  1017. if (src && atomic_read(&bat_priv->ap_isolation)) {
  1018. tt_local_entry = batadv_tt_local_hash_find(bat_priv, src);
  1019. if (!tt_local_entry)
  1020. goto out;
  1021. }
  1022. tt_global_entry = batadv_tt_global_hash_find(bat_priv, addr);
  1023. if (!tt_global_entry)
  1024. goto out;
  1025. /* check whether the clients should not communicate due to AP
  1026. * isolation
  1027. */
  1028. if (tt_local_entry &&
  1029. _batadv_is_ap_isolated(tt_local_entry, tt_global_entry))
  1030. goto out;
  1031. best_tq = 0;
  1032. rcu_read_lock();
  1033. head = &tt_global_entry->orig_list;
  1034. hlist_for_each_entry_rcu(orig_entry, node, head, list) {
  1035. router = batadv_orig_node_get_router(orig_entry->orig_node);
  1036. if (!router)
  1037. continue;
  1038. if (router->tq_avg > best_tq) {
  1039. orig_node = orig_entry->orig_node;
  1040. best_tq = router->tq_avg;
  1041. }
  1042. batadv_neigh_node_free_ref(router);
  1043. }
  1044. /* found anything? */
  1045. if (orig_node && !atomic_inc_not_zero(&orig_node->refcount))
  1046. orig_node = NULL;
  1047. rcu_read_unlock();
  1048. out:
  1049. if (tt_global_entry)
  1050. batadv_tt_global_entry_free_ref(tt_global_entry);
  1051. if (tt_local_entry)
  1052. batadv_tt_local_entry_free_ref(tt_local_entry);
  1053. return orig_node;
  1054. }
  1055. /* Calculates the checksum of the local table of a given orig_node */
  1056. static uint16_t batadv_tt_global_crc(struct batadv_priv *bat_priv,
  1057. struct batadv_orig_node *orig_node)
  1058. {
  1059. uint16_t total = 0, total_one;
  1060. struct batadv_hashtable *hash = bat_priv->tt.global_hash;
  1061. struct batadv_tt_common_entry *tt_common;
  1062. struct batadv_tt_global_entry *tt_global;
  1063. struct hlist_node *node;
  1064. struct hlist_head *head;
  1065. uint32_t i;
  1066. int j;
  1067. for (i = 0; i < hash->size; i++) {
  1068. head = &hash->table[i];
  1069. rcu_read_lock();
  1070. hlist_for_each_entry_rcu(tt_common, node, head, hash_entry) {
  1071. tt_global = container_of(tt_common,
  1072. struct batadv_tt_global_entry,
  1073. common);
  1074. /* Roaming clients are in the global table for
  1075. * consistency only. They don't have to be
  1076. * taken into account while computing the
  1077. * global crc
  1078. */
  1079. if (tt_common->flags & BATADV_TT_CLIENT_ROAM)
  1080. continue;
  1081. /* Temporary clients have not been announced yet, so
  1082. * they have to be skipped while computing the global
  1083. * crc
  1084. */
  1085. if (tt_common->flags & BATADV_TT_CLIENT_TEMP)
  1086. continue;
  1087. /* find out if this global entry is announced by this
  1088. * originator
  1089. */
  1090. if (!batadv_tt_global_entry_has_orig(tt_global,
  1091. orig_node))
  1092. continue;
  1093. total_one = 0;
  1094. for (j = 0; j < ETH_ALEN; j++)
  1095. total_one = crc16_byte(total_one,
  1096. tt_common->addr[j]);
  1097. total ^= total_one;
  1098. }
  1099. rcu_read_unlock();
  1100. }
  1101. return total;
  1102. }
  1103. /* Calculates the checksum of the local table */
  1104. static uint16_t batadv_tt_local_crc(struct batadv_priv *bat_priv)
  1105. {
  1106. uint16_t total = 0, total_one;
  1107. struct batadv_hashtable *hash = bat_priv->tt.local_hash;
  1108. struct batadv_tt_common_entry *tt_common;
  1109. struct hlist_node *node;
  1110. struct hlist_head *head;
  1111. uint32_t i;
  1112. int j;
  1113. for (i = 0; i < hash->size; i++) {
  1114. head = &hash->table[i];
  1115. rcu_read_lock();
  1116. hlist_for_each_entry_rcu(tt_common, node, head, hash_entry) {
  1117. /* not yet committed clients have not to be taken into
  1118. * account while computing the CRC
  1119. */
  1120. if (tt_common->flags & BATADV_TT_CLIENT_NEW)
  1121. continue;
  1122. total_one = 0;
  1123. for (j = 0; j < ETH_ALEN; j++)
  1124. total_one = crc16_byte(total_one,
  1125. tt_common->addr[j]);
  1126. total ^= total_one;
  1127. }
  1128. rcu_read_unlock();
  1129. }
  1130. return total;
  1131. }
  1132. static void batadv_tt_req_list_free(struct batadv_priv *bat_priv)
  1133. {
  1134. struct batadv_tt_req_node *node, *safe;
  1135. spin_lock_bh(&bat_priv->tt.req_list_lock);
  1136. list_for_each_entry_safe(node, safe, &bat_priv->tt.req_list, list) {
  1137. list_del(&node->list);
  1138. kfree(node);
  1139. }
  1140. spin_unlock_bh(&bat_priv->tt.req_list_lock);
  1141. }
  1142. static void batadv_tt_save_orig_buffer(struct batadv_priv *bat_priv,
  1143. struct batadv_orig_node *orig_node,
  1144. const unsigned char *tt_buff,
  1145. uint8_t tt_num_changes)
  1146. {
  1147. uint16_t tt_buff_len = batadv_tt_len(tt_num_changes);
  1148. /* Replace the old buffer only if I received something in the
  1149. * last OGM (the OGM could carry no changes)
  1150. */
  1151. spin_lock_bh(&orig_node->tt_buff_lock);
  1152. if (tt_buff_len > 0) {
  1153. kfree(orig_node->tt_buff);
  1154. orig_node->tt_buff_len = 0;
  1155. orig_node->tt_buff = kmalloc(tt_buff_len, GFP_ATOMIC);
  1156. if (orig_node->tt_buff) {
  1157. memcpy(orig_node->tt_buff, tt_buff, tt_buff_len);
  1158. orig_node->tt_buff_len = tt_buff_len;
  1159. }
  1160. }
  1161. spin_unlock_bh(&orig_node->tt_buff_lock);
  1162. }
  1163. static void batadv_tt_req_purge(struct batadv_priv *bat_priv)
  1164. {
  1165. struct batadv_tt_req_node *node, *safe;
  1166. spin_lock_bh(&bat_priv->tt.req_list_lock);
  1167. list_for_each_entry_safe(node, safe, &bat_priv->tt.req_list, list) {
  1168. if (batadv_has_timed_out(node->issued_at,
  1169. BATADV_TT_REQUEST_TIMEOUT)) {
  1170. list_del(&node->list);
  1171. kfree(node);
  1172. }
  1173. }
  1174. spin_unlock_bh(&bat_priv->tt.req_list_lock);
  1175. }
  1176. /* returns the pointer to the new tt_req_node struct if no request
  1177. * has already been issued for this orig_node, NULL otherwise
  1178. */
  1179. static struct batadv_tt_req_node *
  1180. batadv_new_tt_req_node(struct batadv_priv *bat_priv,
  1181. struct batadv_orig_node *orig_node)
  1182. {
  1183. struct batadv_tt_req_node *tt_req_node_tmp, *tt_req_node = NULL;
  1184. spin_lock_bh(&bat_priv->tt.req_list_lock);
  1185. list_for_each_entry(tt_req_node_tmp, &bat_priv->tt.req_list, list) {
  1186. if (batadv_compare_eth(tt_req_node_tmp, orig_node) &&
  1187. !batadv_has_timed_out(tt_req_node_tmp->issued_at,
  1188. BATADV_TT_REQUEST_TIMEOUT))
  1189. goto unlock;
  1190. }
  1191. tt_req_node = kmalloc(sizeof(*tt_req_node), GFP_ATOMIC);
  1192. if (!tt_req_node)
  1193. goto unlock;
  1194. memcpy(tt_req_node->addr, orig_node->orig, ETH_ALEN);
  1195. tt_req_node->issued_at = jiffies;
  1196. list_add(&tt_req_node->list, &bat_priv->tt.req_list);
  1197. unlock:
  1198. spin_unlock_bh(&bat_priv->tt.req_list_lock);
  1199. return tt_req_node;
  1200. }
  1201. /* data_ptr is useless here, but has to be kept to respect the prototype */
  1202. static int batadv_tt_local_valid_entry(const void *entry_ptr,
  1203. const void *data_ptr)
  1204. {
  1205. const struct batadv_tt_common_entry *tt_common_entry = entry_ptr;
  1206. if (tt_common_entry->flags & BATADV_TT_CLIENT_NEW)
  1207. return 0;
  1208. return 1;
  1209. }
  1210. static int batadv_tt_global_valid(const void *entry_ptr,
  1211. const void *data_ptr)
  1212. {
  1213. const struct batadv_tt_common_entry *tt_common_entry = entry_ptr;
  1214. const struct batadv_tt_global_entry *tt_global_entry;
  1215. const struct batadv_orig_node *orig_node = data_ptr;
  1216. if (tt_common_entry->flags & BATADV_TT_CLIENT_ROAM ||
  1217. tt_common_entry->flags & BATADV_TT_CLIENT_TEMP)
  1218. return 0;
  1219. tt_global_entry = container_of(tt_common_entry,
  1220. struct batadv_tt_global_entry,
  1221. common);
  1222. return batadv_tt_global_entry_has_orig(tt_global_entry, orig_node);
  1223. }
  1224. static struct sk_buff *
  1225. batadv_tt_response_fill_table(uint16_t tt_len, uint8_t ttvn,
  1226. struct batadv_hashtable *hash,
  1227. struct batadv_hard_iface *primary_if,
  1228. int (*valid_cb)(const void *, const void *),
  1229. void *cb_data)
  1230. {
  1231. struct batadv_tt_common_entry *tt_common_entry;
  1232. struct batadv_tt_query_packet *tt_response;
  1233. struct batadv_tt_change *tt_change;
  1234. struct hlist_node *node;
  1235. struct hlist_head *head;
  1236. struct sk_buff *skb = NULL;
  1237. uint16_t tt_tot, tt_count;
  1238. ssize_t tt_query_size = sizeof(struct batadv_tt_query_packet);
  1239. uint32_t i;
  1240. size_t len;
  1241. if (tt_query_size + tt_len > primary_if->soft_iface->mtu) {
  1242. tt_len = primary_if->soft_iface->mtu - tt_query_size;
  1243. tt_len -= tt_len % sizeof(struct batadv_tt_change);
  1244. }
  1245. tt_tot = tt_len / sizeof(struct batadv_tt_change);
  1246. len = tt_query_size + tt_len;
  1247. skb = dev_alloc_skb(len + ETH_HLEN);
  1248. if (!skb)
  1249. goto out;
  1250. skb_reserve(skb, ETH_HLEN);
  1251. tt_response = (struct batadv_tt_query_packet *)skb_put(skb, len);
  1252. tt_response->ttvn = ttvn;
  1253. tt_change = (struct batadv_tt_change *)(skb->data + tt_query_size);
  1254. tt_count = 0;
  1255. rcu_read_lock();
  1256. for (i = 0; i < hash->size; i++) {
  1257. head = &hash->table[i];
  1258. hlist_for_each_entry_rcu(tt_common_entry, node,
  1259. head, hash_entry) {
  1260. if (tt_count == tt_tot)
  1261. break;
  1262. if ((valid_cb) && (!valid_cb(tt_common_entry, cb_data)))
  1263. continue;
  1264. memcpy(tt_change->addr, tt_common_entry->addr,
  1265. ETH_ALEN);
  1266. tt_change->flags = tt_common_entry->flags;
  1267. tt_count++;
  1268. tt_change++;
  1269. }
  1270. }
  1271. rcu_read_unlock();
  1272. /* store in the message the number of entries we have successfully
  1273. * copied
  1274. */
  1275. tt_response->tt_data = htons(tt_count);
  1276. out:
  1277. return skb;
  1278. }
  1279. static int batadv_send_tt_request(struct batadv_priv *bat_priv,
  1280. struct batadv_orig_node *dst_orig_node,
  1281. uint8_t ttvn, uint16_t tt_crc,
  1282. bool full_table)
  1283. {
  1284. struct sk_buff *skb = NULL;
  1285. struct batadv_tt_query_packet *tt_request;
  1286. struct batadv_neigh_node *neigh_node = NULL;
  1287. struct batadv_hard_iface *primary_if;
  1288. struct batadv_tt_req_node *tt_req_node = NULL;
  1289. int ret = 1;
  1290. size_t tt_req_len;
  1291. primary_if = batadv_primary_if_get_selected(bat_priv);
  1292. if (!primary_if)
  1293. goto out;
  1294. /* The new tt_req will be issued only if I'm not waiting for a
  1295. * reply from the same orig_node yet
  1296. */
  1297. tt_req_node = batadv_new_tt_req_node(bat_priv, dst_orig_node);
  1298. if (!tt_req_node)
  1299. goto out;
  1300. skb = dev_alloc_skb(sizeof(*tt_request) + ETH_HLEN);
  1301. if (!skb)
  1302. goto out;
  1303. skb_reserve(skb, ETH_HLEN);
  1304. tt_req_len = sizeof(*tt_request);
  1305. tt_request = (struct batadv_tt_query_packet *)skb_put(skb, tt_req_len);
  1306. tt_request->header.packet_type = BATADV_TT_QUERY;
  1307. tt_request->header.version = BATADV_COMPAT_VERSION;
  1308. memcpy(tt_request->src, primary_if->net_dev->dev_addr, ETH_ALEN);
  1309. memcpy(tt_request->dst, dst_orig_node->orig, ETH_ALEN);
  1310. tt_request->header.ttl = BATADV_TTL;
  1311. tt_request->ttvn = ttvn;
  1312. tt_request->tt_data = htons(tt_crc);
  1313. tt_request->flags = BATADV_TT_REQUEST;
  1314. if (full_table)
  1315. tt_request->flags |= BATADV_TT_FULL_TABLE;
  1316. neigh_node = batadv_orig_node_get_router(dst_orig_node);
  1317. if (!neigh_node)
  1318. goto out;
  1319. batadv_dbg(BATADV_DBG_TT, bat_priv,
  1320. "Sending TT_REQUEST to %pM via %pM [%c]\n",
  1321. dst_orig_node->orig, neigh_node->addr,
  1322. (full_table ? 'F' : '.'));
  1323. batadv_inc_counter(bat_priv, BATADV_CNT_TT_REQUEST_TX);
  1324. batadv_send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
  1325. ret = 0;
  1326. out:
  1327. if (neigh_node)
  1328. batadv_neigh_node_free_ref(neigh_node);
  1329. if (primary_if)
  1330. batadv_hardif_free_ref(primary_if);
  1331. if (ret)
  1332. kfree_skb(skb);
  1333. if (ret && tt_req_node) {
  1334. spin_lock_bh(&bat_priv->tt.req_list_lock);
  1335. list_del(&tt_req_node->list);
  1336. spin_unlock_bh(&bat_priv->tt.req_list_lock);
  1337. kfree(tt_req_node);
  1338. }
  1339. return ret;
  1340. }
  1341. static bool
  1342. batadv_send_other_tt_response(struct batadv_priv *bat_priv,
  1343. struct batadv_tt_query_packet *tt_request)
  1344. {
  1345. struct batadv_orig_node *req_dst_orig_node = NULL;
  1346. struct batadv_orig_node *res_dst_orig_node = NULL;
  1347. struct batadv_neigh_node *neigh_node = NULL;
  1348. struct batadv_hard_iface *primary_if = NULL;
  1349. uint8_t orig_ttvn, req_ttvn, ttvn;
  1350. int ret = false;
  1351. unsigned char *tt_buff;
  1352. bool full_table;
  1353. uint16_t tt_len, tt_tot;
  1354. struct sk_buff *skb = NULL;
  1355. struct batadv_tt_query_packet *tt_response;
  1356. uint8_t *packet_pos;
  1357. size_t len;
  1358. batadv_dbg(BATADV_DBG_TT, bat_priv,
  1359. "Received TT_REQUEST from %pM for ttvn: %u (%pM) [%c]\n",
  1360. tt_request->src, tt_request->ttvn, tt_request->dst,
  1361. (tt_request->flags & BATADV_TT_FULL_TABLE ? 'F' : '.'));
  1362. /* Let's get the orig node of the REAL destination */
  1363. req_dst_orig_node = batadv_orig_hash_find(bat_priv, tt_request->dst);
  1364. if (!req_dst_orig_node)
  1365. goto out;
  1366. res_dst_orig_node = batadv_orig_hash_find(bat_priv, tt_request->src);
  1367. if (!res_dst_orig_node)
  1368. goto out;
  1369. neigh_node = batadv_orig_node_get_router(res_dst_orig_node);
  1370. if (!neigh_node)
  1371. goto out;
  1372. primary_if = batadv_primary_if_get_selected(bat_priv);
  1373. if (!primary_if)
  1374. goto out;
  1375. orig_ttvn = (uint8_t)atomic_read(&req_dst_orig_node->last_ttvn);
  1376. req_ttvn = tt_request->ttvn;
  1377. /* I don't have the requested data */
  1378. if (orig_ttvn != req_ttvn ||
  1379. tt_request->tt_data != htons(req_dst_orig_node->tt_crc))
  1380. goto out;
  1381. /* If the full table has been explicitly requested */
  1382. if (tt_request->flags & BATADV_TT_FULL_TABLE ||
  1383. !req_dst_orig_node->tt_buff)
  1384. full_table = true;
  1385. else
  1386. full_table = false;
  1387. /* In this version, fragmentation is not implemented, then
  1388. * I'll send only one packet with as much TT entries as I can
  1389. */
  1390. if (!full_table) {
  1391. spin_lock_bh(&req_dst_orig_node->tt_buff_lock);
  1392. tt_len = req_dst_orig_node->tt_buff_len;
  1393. tt_tot = tt_len / sizeof(struct batadv_tt_change);
  1394. len = sizeof(*tt_response) + tt_len;
  1395. skb = dev_alloc_skb(len + ETH_HLEN);
  1396. if (!skb)
  1397. goto unlock;
  1398. skb_reserve(skb, ETH_HLEN);
  1399. packet_pos = skb_put(skb, len);
  1400. tt_response = (struct batadv_tt_query_packet *)packet_pos;
  1401. tt_response->ttvn = req_ttvn;
  1402. tt_response->tt_data = htons(tt_tot);
  1403. tt_buff = skb->data + sizeof(*tt_response);
  1404. /* Copy the last orig_node's OGM buffer */
  1405. memcpy(tt_buff, req_dst_orig_node->tt_buff,
  1406. req_dst_orig_node->tt_buff_len);
  1407. spin_unlock_bh(&req_dst_orig_node->tt_buff_lock);
  1408. } else {
  1409. tt_len = (uint16_t)atomic_read(&req_dst_orig_node->tt_size);
  1410. tt_len *= sizeof(struct batadv_tt_change);
  1411. ttvn = (uint8_t)atomic_read(&req_dst_orig_node->last_ttvn);
  1412. skb = batadv_tt_response_fill_table(tt_len, ttvn,
  1413. bat_priv->tt.global_hash,
  1414. primary_if,
  1415. batadv_tt_global_valid,
  1416. req_dst_orig_node);
  1417. if (!skb)
  1418. goto out;
  1419. tt_response = (struct batadv_tt_query_packet *)skb->data;
  1420. }
  1421. tt_response->header.packet_type = BATADV_TT_QUERY;
  1422. tt_response->header.version = BATADV_COMPAT_VERSION;
  1423. tt_response->header.ttl = BATADV_TTL;
  1424. memcpy(tt_response->src, req_dst_orig_node->orig, ETH_ALEN);
  1425. memcpy(tt_response->dst, tt_request->src, ETH_ALEN);
  1426. tt_response->flags = BATADV_TT_RESPONSE;
  1427. if (full_table)
  1428. tt_response->flags |= BATADV_TT_FULL_TABLE;
  1429. batadv_dbg(BATADV_DBG_TT, bat_priv,
  1430. "Sending TT_RESPONSE %pM via %pM for %pM (ttvn: %u)\n",
  1431. res_dst_orig_node->orig, neigh_node->addr,
  1432. req_dst_orig_node->orig, req_ttvn);
  1433. batadv_inc_counter(bat_priv, BATADV_CNT_TT_RESPONSE_TX);
  1434. batadv_send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
  1435. ret = true;
  1436. goto out;
  1437. unlock:
  1438. spin_unlock_bh(&req_dst_orig_node->tt_buff_lock);
  1439. out:
  1440. if (res_dst_orig_node)
  1441. batadv_orig_node_free_ref(res_dst_orig_node);
  1442. if (req_dst_orig_node)
  1443. batadv_orig_node_free_ref(req_dst_orig_node);
  1444. if (neigh_node)
  1445. batadv_neigh_node_free_ref(neigh_node);
  1446. if (primary_if)
  1447. batadv_hardif_free_ref(primary_if);
  1448. if (!ret)
  1449. kfree_skb(skb);
  1450. return ret;
  1451. }
  1452. static bool
  1453. batadv_send_my_tt_response(struct batadv_priv *bat_priv,
  1454. struct batadv_tt_query_packet *tt_request)
  1455. {
  1456. struct batadv_orig_node *orig_node = NULL;
  1457. struct batadv_neigh_node *neigh_node = NULL;
  1458. struct batadv_hard_iface *primary_if = NULL;
  1459. uint8_t my_ttvn, req_ttvn, ttvn;
  1460. int ret = false;
  1461. unsigned char *tt_buff;
  1462. bool full_table;
  1463. uint16_t tt_len, tt_tot;
  1464. struct sk_buff *skb = NULL;
  1465. struct batadv_tt_query_packet *tt_response;
  1466. uint8_t *packet_pos;
  1467. size_t len;
  1468. batadv_dbg(BATADV_DBG_TT, bat_priv,
  1469. "Received TT_REQUEST from %pM for ttvn: %u (me) [%c]\n",
  1470. tt_request->src, tt_request->ttvn,
  1471. (tt_request->flags & BATADV_TT_FULL_TABLE ? 'F' : '.'));
  1472. my_ttvn = (uint8_t)atomic_read(&bat_priv->tt.vn);
  1473. req_ttvn = tt_request->ttvn;
  1474. orig_node = batadv_orig_hash_find(bat_priv, tt_request->src);
  1475. if (!orig_node)
  1476. goto out;
  1477. neigh_node = batadv_orig_node_get_router(orig_node);
  1478. if (!neigh_node)
  1479. goto out;
  1480. primary_if = batadv_primary_if_get_selected(bat_priv);
  1481. if (!primary_if)
  1482. goto out;
  1483. /* If the full table has been explicitly requested or the gap
  1484. * is too big send the whole local translation table
  1485. */
  1486. if (tt_request->flags & BATADV_TT_FULL_TABLE || my_ttvn != req_ttvn ||
  1487. !bat_priv->tt.last_changeset)
  1488. full_table = true;
  1489. else
  1490. full_table = false;
  1491. /* In this version, fragmentation is not implemented, then
  1492. * I'll send only one packet with as much TT entries as I can
  1493. */
  1494. if (!full_table) {
  1495. spin_lock_bh(&bat_priv->tt.last_changeset_lock);
  1496. tt_len = bat_priv->tt.last_changeset_len;
  1497. tt_tot = tt_len / sizeof(struct batadv_tt_change);
  1498. len = sizeof(*tt_response) + tt_len;
  1499. skb = dev_alloc_skb(len + ETH_HLEN);
  1500. if (!skb)
  1501. goto unlock;
  1502. skb_reserve(skb, ETH_HLEN);
  1503. packet_pos = skb_put(skb, len);
  1504. tt_response = (struct batadv_tt_query_packet *)packet_pos;
  1505. tt_response->ttvn = req_ttvn;
  1506. tt_response->tt_data = htons(tt_tot);
  1507. tt_buff = skb->data + sizeof(*tt_response);
  1508. memcpy(tt_buff, bat_priv->tt.last_changeset,
  1509. bat_priv->tt.last_changeset_len);
  1510. spin_unlock_bh(&bat_priv->tt.last_changeset_lock);
  1511. } else {
  1512. tt_len = (uint16_t)atomic_read(&bat_priv->tt.local_entry_num);
  1513. tt_len *= sizeof(struct batadv_tt_change);
  1514. ttvn = (uint8_t)atomic_read(&bat_priv->tt.vn);
  1515. skb = batadv_tt_response_fill_table(tt_len, ttvn,
  1516. bat_priv->tt.local_hash,
  1517. primary_if,
  1518. batadv_tt_local_valid_entry,
  1519. NULL);
  1520. if (!skb)
  1521. goto out;
  1522. tt_response = (struct batadv_tt_query_packet *)skb->data;
  1523. }
  1524. tt_response->header.packet_type = BATADV_TT_QUERY;
  1525. tt_response->header.version = BATADV_COMPAT_VERSION;
  1526. tt_response->header.ttl = BATADV_TTL;
  1527. memcpy(tt_response->src, primary_if->net_dev->dev_addr, ETH_ALEN);
  1528. memcpy(tt_response->dst, tt_request->src, ETH_ALEN);
  1529. tt_response->flags = BATADV_TT_RESPONSE;
  1530. if (full_table)
  1531. tt_response->flags |= BATADV_TT_FULL_TABLE;
  1532. batadv_dbg(BATADV_DBG_TT, bat_priv,
  1533. "Sending TT_RESPONSE to %pM via %pM [%c]\n",
  1534. orig_node->orig, neigh_node->addr,
  1535. (tt_response->flags & BATADV_TT_FULL_TABLE ? 'F' : '.'));
  1536. batadv_inc_counter(bat_priv, BATADV_CNT_TT_RESPONSE_TX);
  1537. batadv_send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
  1538. ret = true;
  1539. goto out;
  1540. unlock:
  1541. spin_unlock_bh(&bat_priv->tt.last_changeset_lock);
  1542. out:
  1543. if (orig_node)
  1544. batadv_orig_node_free_ref(orig_node);
  1545. if (neigh_node)
  1546. batadv_neigh_node_free_ref(neigh_node);
  1547. if (primary_if)
  1548. batadv_hardif_free_ref(primary_if);
  1549. if (!ret)
  1550. kfree_skb(skb);
  1551. /* This packet was for me, so it doesn't need to be re-routed */
  1552. return true;
  1553. }
  1554. bool batadv_send_tt_response(struct batadv_priv *bat_priv,
  1555. struct batadv_tt_query_packet *tt_request)
  1556. {
  1557. if (batadv_is_my_mac(tt_request->dst)) {
  1558. /* don't answer backbone gws! */
  1559. if (batadv_bla_is_backbone_gw_orig(bat_priv, tt_request->src))
  1560. return true;
  1561. return batadv_send_my_tt_response(bat_priv, tt_request);
  1562. } else {
  1563. return batadv_send_other_tt_response(bat_priv, tt_request);
  1564. }
  1565. }
  1566. static void _batadv_tt_update_changes(struct batadv_priv *bat_priv,
  1567. struct batadv_orig_node *orig_node,
  1568. struct batadv_tt_change *tt_change,
  1569. uint16_t tt_num_changes, uint8_t ttvn)
  1570. {
  1571. int i;
  1572. int roams;
  1573. for (i = 0; i < tt_num_changes; i++) {
  1574. if ((tt_change + i)->flags & BATADV_TT_CLIENT_DEL) {
  1575. roams = (tt_change + i)->flags & BATADV_TT_CLIENT_ROAM;
  1576. batadv_tt_global_del(bat_priv, orig_node,
  1577. (tt_change + i)->addr,
  1578. "tt removed by changes",
  1579. roams);
  1580. } else {
  1581. if (!batadv_tt_global_add(bat_priv, orig_node,
  1582. (tt_change + i)->addr,
  1583. (tt_change + i)->flags, ttvn))
  1584. /* In case of problem while storing a
  1585. * global_entry, we stop the updating
  1586. * procedure without committing the
  1587. * ttvn change. This will avoid to send
  1588. * corrupted data on tt_request
  1589. */
  1590. return;
  1591. }
  1592. }
  1593. orig_node->tt_initialised = true;
  1594. }
  1595. static void batadv_tt_fill_gtable(struct batadv_priv *bat_priv,
  1596. struct batadv_tt_query_packet *tt_response)
  1597. {
  1598. struct batadv_orig_node *orig_node = NULL;
  1599. orig_node = batadv_orig_hash_find(bat_priv, tt_response->src);
  1600. if (!orig_node)
  1601. goto out;
  1602. /* Purge the old table first.. */
  1603. batadv_tt_global_del_orig(bat_priv, orig_node, "Received full table");
  1604. _batadv_tt_update_changes(bat_priv, orig_node,
  1605. (struct batadv_tt_change *)(tt_response + 1),
  1606. ntohs(tt_response->tt_data),
  1607. tt_response->ttvn);
  1608. spin_lock_bh(&orig_node->tt_buff_lock);
  1609. kfree(orig_node->tt_buff);
  1610. orig_node->tt_buff_len = 0;
  1611. orig_node->tt_buff = NULL;
  1612. spin_unlock_bh(&orig_node->tt_buff_lock);
  1613. atomic_set(&orig_node->last_ttvn, tt_response->ttvn);
  1614. out:
  1615. if (orig_node)
  1616. batadv_orig_node_free_ref(orig_node);
  1617. }
  1618. static void batadv_tt_update_changes(struct batadv_priv *bat_priv,
  1619. struct batadv_orig_node *orig_node,
  1620. uint16_t tt_num_changes, uint8_t ttvn,
  1621. struct batadv_tt_change *tt_change)
  1622. {
  1623. _batadv_tt_update_changes(bat_priv, orig_node, tt_change,
  1624. tt_num_changes, ttvn);
  1625. batadv_tt_save_orig_buffer(bat_priv, orig_node,
  1626. (unsigned char *)tt_change, tt_num_changes);
  1627. atomic_set(&orig_node->last_ttvn, ttvn);
  1628. }
  1629. bool batadv_is_my_client(struct batadv_priv *bat_priv, const uint8_t *addr)
  1630. {
  1631. struct batadv_tt_local_entry *tt_local_entry = NULL;
  1632. bool ret = false;
  1633. tt_local_entry = batadv_tt_local_hash_find(bat_priv, addr);
  1634. if (!tt_local_entry)
  1635. goto out;
  1636. /* Check if the client has been logically deleted (but is kept for
  1637. * consistency purpose)
  1638. */
  1639. if (tt_local_entry->common.flags & BATADV_TT_CLIENT_PENDING)
  1640. goto out;
  1641. ret = true;
  1642. out:
  1643. if (tt_local_entry)
  1644. batadv_tt_local_entry_free_ref(tt_local_entry);
  1645. return ret;
  1646. }
  1647. void batadv_handle_tt_response(struct batadv_priv *bat_priv,
  1648. struct batadv_tt_query_packet *tt_response)
  1649. {
  1650. struct batadv_tt_req_node *node, *safe;
  1651. struct batadv_orig_node *orig_node = NULL;
  1652. struct batadv_tt_change *tt_change;
  1653. batadv_dbg(BATADV_DBG_TT, bat_priv,
  1654. "Received TT_RESPONSE from %pM for ttvn %d t_size: %d [%c]\n",
  1655. tt_response->src, tt_response->ttvn,
  1656. ntohs(tt_response->tt_data),
  1657. (tt_response->flags & BATADV_TT_FULL_TABLE ? 'F' : '.'));
  1658. /* we should have never asked a backbone gw */
  1659. if (batadv_bla_is_backbone_gw_orig(bat_priv, tt_response->src))
  1660. goto out;
  1661. orig_node = batadv_orig_hash_find(bat_priv, tt_response->src);
  1662. if (!orig_node)
  1663. goto out;
  1664. if (tt_response->flags & BATADV_TT_FULL_TABLE) {
  1665. batadv_tt_fill_gtable(bat_priv, tt_response);
  1666. } else {
  1667. tt_change = (struct batadv_tt_change *)(tt_response + 1);
  1668. batadv_tt_update_changes(bat_priv, orig_node,
  1669. ntohs(tt_response->tt_data),
  1670. tt_response->ttvn, tt_change);
  1671. }
  1672. /* Delete the tt_req_node from pending tt_requests list */
  1673. spin_lock_bh(&bat_priv->tt.req_list_lock);
  1674. list_for_each_entry_safe(node, safe, &bat_priv->tt.req_list, list) {
  1675. if (!batadv_compare_eth(node->addr, tt_response->src))
  1676. continue;
  1677. list_del(&node->list);
  1678. kfree(node);
  1679. }
  1680. spin_unlock_bh(&bat_priv->tt.req_list_lock);
  1681. /* Recalculate the CRC for this orig_node and store it */
  1682. orig_node->tt_crc = batadv_tt_global_crc(bat_priv, orig_node);
  1683. /* Roaming phase is over: tables are in sync again. I can
  1684. * unset the flag
  1685. */
  1686. orig_node->tt_poss_change = false;
  1687. out:
  1688. if (orig_node)
  1689. batadv_orig_node_free_ref(orig_node);
  1690. }
  1691. int batadv_tt_init(struct batadv_priv *bat_priv)
  1692. {
  1693. int ret;
  1694. ret = batadv_tt_local_init(bat_priv);
  1695. if (ret < 0)
  1696. return ret;
  1697. ret = batadv_tt_global_init(bat_priv);
  1698. if (ret < 0)
  1699. return ret;
  1700. batadv_tt_start_timer(bat_priv);
  1701. return 1;
  1702. }
  1703. static void batadv_tt_roam_list_free(struct batadv_priv *bat_priv)
  1704. {
  1705. struct batadv_tt_roam_node *node, *safe;
  1706. spin_lock_bh(&bat_priv->tt.roam_list_lock);
  1707. list_for_each_entry_safe(node, safe, &bat_priv->tt.roam_list, list) {
  1708. list_del(&node->list);
  1709. kfree(node);
  1710. }
  1711. spin_unlock_bh(&bat_priv->tt.roam_list_lock);
  1712. }
  1713. static void batadv_tt_roam_purge(struct batadv_priv *bat_priv)
  1714. {
  1715. struct batadv_tt_roam_node *node, *safe;
  1716. spin_lock_bh(&bat_priv->tt.roam_list_lock);
  1717. list_for_each_entry_safe(node, safe, &bat_priv->tt.roam_list, list) {
  1718. if (!batadv_has_timed_out(node->first_time,
  1719. BATADV_ROAMING_MAX_TIME))
  1720. continue;
  1721. list_del(&node->list);
  1722. kfree(node);
  1723. }
  1724. spin_unlock_bh(&bat_priv->tt.roam_list_lock);
  1725. }
  1726. /* This function checks whether the client already reached the
  1727. * maximum number of possible roaming phases. In this case the ROAMING_ADV
  1728. * will not be sent.
  1729. *
  1730. * returns true if the ROAMING_ADV can be sent, false otherwise
  1731. */
  1732. static bool batadv_tt_check_roam_count(struct batadv_priv *bat_priv,
  1733. uint8_t *client)
  1734. {
  1735. struct batadv_tt_roam_node *tt_roam_node;
  1736. bool ret = false;
  1737. spin_lock_bh(&bat_priv->tt.roam_list_lock);
  1738. /* The new tt_req will be issued only if I'm not waiting for a
  1739. * reply from the same orig_node yet
  1740. */
  1741. list_for_each_entry(tt_roam_node, &bat_priv->tt.roam_list, list) {
  1742. if (!batadv_compare_eth(tt_roam_node->addr, client))
  1743. continue;
  1744. if (batadv_has_timed_out(tt_roam_node->first_time,
  1745. BATADV_ROAMING_MAX_TIME))
  1746. continue;
  1747. if (!batadv_atomic_dec_not_zero(&tt_roam_node->counter))
  1748. /* Sorry, you roamed too many times! */
  1749. goto unlock;
  1750. ret = true;
  1751. break;
  1752. }
  1753. if (!ret) {
  1754. tt_roam_node = kmalloc(sizeof(*tt_roam_node), GFP_ATOMIC);
  1755. if (!tt_roam_node)
  1756. goto unlock;
  1757. tt_roam_node->first_time = jiffies;
  1758. atomic_set(&tt_roam_node->counter,
  1759. BATADV_ROAMING_MAX_COUNT - 1);
  1760. memcpy(tt_roam_node->addr, client, ETH_ALEN);
  1761. list_add(&tt_roam_node->list, &bat_priv->tt.roam_list);
  1762. ret = true;
  1763. }
  1764. unlock:
  1765. spin_unlock_bh(&bat_priv->tt.roam_list_lock);
  1766. return ret;
  1767. }
  1768. static void batadv_send_roam_adv(struct batadv_priv *bat_priv, uint8_t *client,
  1769. struct batadv_orig_node *orig_node)
  1770. {
  1771. struct batadv_neigh_node *neigh_node = NULL;
  1772. struct sk_buff *skb = NULL;
  1773. struct batadv_roam_adv_packet *roam_adv_packet;
  1774. int ret = 1;
  1775. struct batadv_hard_iface *primary_if;
  1776. size_t len = sizeof(*roam_adv_packet);
  1777. /* before going on we have to check whether the client has
  1778. * already roamed to us too many times
  1779. */
  1780. if (!batadv_tt_check_roam_count(bat_priv, client))
  1781. goto out;
  1782. skb = dev_alloc_skb(sizeof(*roam_adv_packet) + ETH_HLEN);
  1783. if (!skb)
  1784. goto out;
  1785. skb_reserve(skb, ETH_HLEN);
  1786. roam_adv_packet = (struct batadv_roam_adv_packet *)skb_put(skb, len);
  1787. roam_adv_packet->header.packet_type = BATADV_ROAM_ADV;
  1788. roam_adv_packet->header.version = BATADV_COMPAT_VERSION;
  1789. roam_adv_packet->header.ttl = BATADV_TTL;
  1790. roam_adv_packet->reserved = 0;
  1791. primary_if = batadv_primary_if_get_selected(bat_priv);
  1792. if (!primary_if)
  1793. goto out;
  1794. memcpy(roam_adv_packet->src, primary_if->net_dev->dev_addr, ETH_ALEN);
  1795. batadv_hardif_free_ref(primary_if);
  1796. memcpy(roam_adv_packet->dst, orig_node->orig, ETH_ALEN);
  1797. memcpy(roam_adv_packet->client, client, ETH_ALEN);
  1798. neigh_node = batadv_orig_node_get_router(orig_node);
  1799. if (!neigh_node)
  1800. goto out;
  1801. batadv_dbg(BATADV_DBG_TT, bat_priv,
  1802. "Sending ROAMING_ADV to %pM (client %pM) via %pM\n",
  1803. orig_node->orig, client, neigh_node->addr);
  1804. batadv_inc_counter(bat_priv, BATADV_CNT_TT_ROAM_ADV_TX);
  1805. batadv_send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
  1806. ret = 0;
  1807. out:
  1808. if (neigh_node)
  1809. batadv_neigh_node_free_ref(neigh_node);
  1810. if (ret)
  1811. kfree_skb(skb);
  1812. return;
  1813. }
  1814. static void batadv_tt_purge(struct work_struct *work)
  1815. {
  1816. struct delayed_work *delayed_work;
  1817. struct batadv_priv_tt *priv_tt;
  1818. struct batadv_priv *bat_priv;
  1819. delayed_work = container_of(work, struct delayed_work, work);
  1820. priv_tt = container_of(delayed_work, struct batadv_priv_tt, work);
  1821. bat_priv = container_of(priv_tt, struct batadv_priv, tt);
  1822. batadv_tt_local_purge(bat_priv);
  1823. batadv_tt_global_purge(bat_priv);
  1824. batadv_tt_req_purge(bat_priv);
  1825. batadv_tt_roam_purge(bat_priv);
  1826. batadv_tt_start_timer(bat_priv);
  1827. }
  1828. void batadv_tt_free(struct batadv_priv *bat_priv)
  1829. {
  1830. cancel_delayed_work_sync(&bat_priv->tt.work);
  1831. batadv_tt_local_table_free(bat_priv);
  1832. batadv_tt_global_table_free(bat_priv);
  1833. batadv_tt_req_list_free(bat_priv);
  1834. batadv_tt_changes_list_free(bat_priv);
  1835. batadv_tt_roam_list_free(bat_priv);
  1836. kfree(bat_priv->tt.last_changeset);
  1837. }
  1838. /* This function will enable or disable the specified flags for all the entries
  1839. * in the given hash table and returns the number of modified entries
  1840. */
  1841. static uint16_t batadv_tt_set_flags(struct batadv_hashtable *hash,
  1842. uint16_t flags, bool enable)
  1843. {
  1844. uint32_t i;
  1845. uint16_t changed_num = 0;
  1846. struct hlist_head *head;
  1847. struct hlist_node *node;
  1848. struct batadv_tt_common_entry *tt_common_entry;
  1849. if (!hash)
  1850. goto out;
  1851. for (i = 0; i < hash->size; i++) {
  1852. head = &hash->table[i];
  1853. rcu_read_lock();
  1854. hlist_for_each_entry_rcu(tt_common_entry, node,
  1855. head, hash_entry) {
  1856. if (enable) {
  1857. if ((tt_common_entry->flags & flags) == flags)
  1858. continue;
  1859. tt_common_entry->flags |= flags;
  1860. } else {
  1861. if (!(tt_common_entry->flags & flags))
  1862. continue;
  1863. tt_common_entry->flags &= ~flags;
  1864. }
  1865. changed_num++;
  1866. }
  1867. rcu_read_unlock();
  1868. }
  1869. out:
  1870. return changed_num;
  1871. }
  1872. /* Purge out all the tt local entries marked with BATADV_TT_CLIENT_PENDING */
  1873. static void batadv_tt_local_purge_pending_clients(struct batadv_priv *bat_priv)
  1874. {
  1875. struct batadv_hashtable *hash = bat_priv->tt.local_hash;
  1876. struct batadv_tt_common_entry *tt_common;
  1877. struct batadv_tt_local_entry *tt_local;
  1878. struct hlist_node *node, *node_tmp;
  1879. struct hlist_head *head;
  1880. spinlock_t *list_lock; /* protects write access to the hash lists */
  1881. uint32_t i;
  1882. if (!hash)
  1883. return;
  1884. for (i = 0; i < hash->size; i++) {
  1885. head = &hash->table[i];
  1886. list_lock = &hash->list_locks[i];
  1887. spin_lock_bh(list_lock);
  1888. hlist_for_each_entry_safe(tt_common, node, node_tmp, head,
  1889. hash_entry) {
  1890. if (!(tt_common->flags & BATADV_TT_CLIENT_PENDING))
  1891. continue;
  1892. batadv_dbg(BATADV_DBG_TT, bat_priv,
  1893. "Deleting local tt entry (%pM): pending\n",
  1894. tt_common->addr);
  1895. atomic_dec(&bat_priv->tt.local_entry_num);
  1896. hlist_del_rcu(node);
  1897. tt_local = container_of(tt_common,
  1898. struct batadv_tt_local_entry,
  1899. common);
  1900. batadv_tt_local_entry_free_ref(tt_local);
  1901. }
  1902. spin_unlock_bh(list_lock);
  1903. }
  1904. }
  1905. static int batadv_tt_commit_changes(struct batadv_priv *bat_priv,
  1906. unsigned char **packet_buff,
  1907. int *packet_buff_len, int packet_min_len)
  1908. {
  1909. uint16_t changed_num = 0;
  1910. if (atomic_read(&bat_priv->tt.local_changes) < 1)
  1911. return -ENOENT;
  1912. changed_num = batadv_tt_set_flags(bat_priv->tt.local_hash,
  1913. BATADV_TT_CLIENT_NEW, false);
  1914. /* all reset entries have to be counted as local entries */
  1915. atomic_add(changed_num, &bat_priv->tt.local_entry_num);
  1916. batadv_tt_local_purge_pending_clients(bat_priv);
  1917. bat_priv->tt.local_crc = batadv_tt_local_crc(bat_priv);
  1918. /* Increment the TTVN only once per OGM interval */
  1919. atomic_inc(&bat_priv->tt.vn);
  1920. batadv_dbg(BATADV_DBG_TT, bat_priv,
  1921. "Local changes committed, updating to ttvn %u\n",
  1922. (uint8_t)atomic_read(&bat_priv->tt.vn));
  1923. bat_priv->tt.poss_change = false;
  1924. /* reset the sending counter */
  1925. atomic_set(&bat_priv->tt.ogm_append_cnt, BATADV_TT_OGM_APPEND_MAX);
  1926. return batadv_tt_changes_fill_buff(bat_priv, packet_buff,
  1927. packet_buff_len, packet_min_len);
  1928. }
  1929. /* when calling this function (hard_iface == primary_if) has to be true */
  1930. int batadv_tt_append_diff(struct batadv_priv *bat_priv,
  1931. unsigned char **packet_buff, int *packet_buff_len,
  1932. int packet_min_len)
  1933. {
  1934. int tt_num_changes;
  1935. /* if at least one change happened */
  1936. tt_num_changes = batadv_tt_commit_changes(bat_priv, packet_buff,
  1937. packet_buff_len,
  1938. packet_min_len);
  1939. /* if the changes have been sent often enough */
  1940. if ((tt_num_changes < 0) &&
  1941. (!batadv_atomic_dec_not_zero(&bat_priv->tt.ogm_append_cnt))) {
  1942. batadv_tt_realloc_packet_buff(packet_buff, packet_buff_len,
  1943. packet_min_len, packet_min_len);
  1944. tt_num_changes = 0;
  1945. }
  1946. return tt_num_changes;
  1947. }
  1948. bool batadv_is_ap_isolated(struct batadv_priv *bat_priv, uint8_t *src,
  1949. uint8_t *dst)
  1950. {
  1951. struct batadv_tt_local_entry *tt_local_entry = NULL;
  1952. struct batadv_tt_global_entry *tt_global_entry = NULL;
  1953. bool ret = false;
  1954. if (!atomic_read(&bat_priv->ap_isolation))
  1955. goto out;
  1956. tt_local_entry = batadv_tt_local_hash_find(bat_priv, dst);
  1957. if (!tt_local_entry)
  1958. goto out;
  1959. tt_global_entry = batadv_tt_global_hash_find(bat_priv, src);
  1960. if (!tt_global_entry)
  1961. goto out;
  1962. if (!_batadv_is_ap_isolated(tt_local_entry, tt_global_entry))
  1963. goto out;
  1964. ret = true;
  1965. out:
  1966. if (tt_global_entry)
  1967. batadv_tt_global_entry_free_ref(tt_global_entry);
  1968. if (tt_local_entry)
  1969. batadv_tt_local_entry_free_ref(tt_local_entry);
  1970. return ret;
  1971. }
  1972. void batadv_tt_update_orig(struct batadv_priv *bat_priv,
  1973. struct batadv_orig_node *orig_node,
  1974. const unsigned char *tt_buff, uint8_t tt_num_changes,
  1975. uint8_t ttvn, uint16_t tt_crc)
  1976. {
  1977. uint8_t orig_ttvn = (uint8_t)atomic_read(&orig_node->last_ttvn);
  1978. bool full_table = true;
  1979. struct batadv_tt_change *tt_change;
  1980. /* don't care about a backbone gateways updates. */
  1981. if (batadv_bla_is_backbone_gw_orig(bat_priv, orig_node->orig))
  1982. return;
  1983. /* orig table not initialised AND first diff is in the OGM OR the ttvn
  1984. * increased by one -> we can apply the attached changes
  1985. */
  1986. if ((!orig_node->tt_initialised && ttvn == 1) ||
  1987. ttvn - orig_ttvn == 1) {
  1988. /* the OGM could not contain the changes due to their size or
  1989. * because they have already been sent BATADV_TT_OGM_APPEND_MAX
  1990. * times.
  1991. * In this case send a tt request
  1992. */
  1993. if (!tt_num_changes) {
  1994. full_table = false;
  1995. goto request_table;
  1996. }
  1997. tt_change = (struct batadv_tt_change *)tt_buff;
  1998. batadv_tt_update_changes(bat_priv, orig_node, tt_num_changes,
  1999. ttvn, tt_change);
  2000. /* Even if we received the precomputed crc with the OGM, we
  2001. * prefer to recompute it to spot any possible inconsistency
  2002. * in the global table
  2003. */
  2004. orig_node->tt_crc = batadv_tt_global_crc(bat_priv, orig_node);
  2005. /* The ttvn alone is not enough to guarantee consistency
  2006. * because a single value could represent different states
  2007. * (due to the wrap around). Thus a node has to check whether
  2008. * the resulting table (after applying the changes) is still
  2009. * consistent or not. E.g. a node could disconnect while its
  2010. * ttvn is X and reconnect on ttvn = X + TTVN_MAX: in this case
  2011. * checking the CRC value is mandatory to detect the
  2012. * inconsistency
  2013. */
  2014. if (orig_node->tt_crc != tt_crc)
  2015. goto request_table;
  2016. /* Roaming phase is over: tables are in sync again. I can
  2017. * unset the flag
  2018. */
  2019. orig_node->tt_poss_change = false;
  2020. } else {
  2021. /* if we missed more than one change or our tables are not
  2022. * in sync anymore -> request fresh tt data
  2023. */
  2024. if (!orig_node->tt_initialised || ttvn != orig_ttvn ||
  2025. orig_node->tt_crc != tt_crc) {
  2026. request_table:
  2027. batadv_dbg(BATADV_DBG_TT, bat_priv,
  2028. "TT inconsistency for %pM. Need to retrieve the correct information (ttvn: %u last_ttvn: %u crc: %u last_crc: %u num_changes: %u)\n",
  2029. orig_node->orig, ttvn, orig_ttvn, tt_crc,
  2030. orig_node->tt_crc, tt_num_changes);
  2031. batadv_send_tt_request(bat_priv, orig_node, ttvn,
  2032. tt_crc, full_table);
  2033. return;
  2034. }
  2035. }
  2036. }
  2037. /* returns true whether we know that the client has moved from its old
  2038. * originator to another one. This entry is kept is still kept for consistency
  2039. * purposes
  2040. */
  2041. bool batadv_tt_global_client_is_roaming(struct batadv_priv *bat_priv,
  2042. uint8_t *addr)
  2043. {
  2044. struct batadv_tt_global_entry *tt_global_entry;
  2045. bool ret = false;
  2046. tt_global_entry = batadv_tt_global_hash_find(bat_priv, addr);
  2047. if (!tt_global_entry)
  2048. goto out;
  2049. ret = tt_global_entry->common.flags & BATADV_TT_CLIENT_ROAM;
  2050. batadv_tt_global_entry_free_ref(tt_global_entry);
  2051. out:
  2052. return ret;
  2053. }
  2054. bool batadv_tt_add_temporary_global_entry(struct batadv_priv *bat_priv,
  2055. struct batadv_orig_node *orig_node,
  2056. const unsigned char *addr)
  2057. {
  2058. bool ret = false;
  2059. /* if the originator is a backbone node (meaning it belongs to the same
  2060. * LAN of this node) the temporary client must not be added because to
  2061. * reach such destination the node must use the LAN instead of the mesh
  2062. */
  2063. if (batadv_bla_is_backbone_gw_orig(bat_priv, orig_node->orig))
  2064. goto out;
  2065. if (!batadv_tt_global_add(bat_priv, orig_node, addr,
  2066. BATADV_TT_CLIENT_TEMP,
  2067. atomic_read(&orig_node->last_ttvn)))
  2068. goto out;
  2069. batadv_dbg(BATADV_DBG_TT, bat_priv,
  2070. "Added temporary global client (addr: %pM orig: %pM)\n",
  2071. addr, orig_node->orig);
  2072. ret = true;
  2073. out:
  2074. return ret;
  2075. }