originator.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670
  1. /*
  2. * Copyright (C) 2009-2011 B.A.T.M.A.N. contributors:
  3. *
  4. * Marek Lindner, Simon Wunderlich
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of version 2 of the GNU General Public
  8. * License as published by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope that it will be useful, but
  11. * WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  13. * General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program; if not, write to the Free Software
  17. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
  18. * 02110-1301, USA
  19. *
  20. */
  21. /* increase the reference counter for this originator */
  22. #include "main.h"
  23. #include "originator.h"
  24. #include "hash.h"
  25. #include "translation-table.h"
  26. #include "routing.h"
  27. #include "gateway_client.h"
  28. #include "hard-interface.h"
  29. #include "unicast.h"
  30. #include "soft-interface.h"
  31. static void purge_orig(struct work_struct *work);
  32. static void start_purge_timer(struct bat_priv *bat_priv)
  33. {
  34. INIT_DELAYED_WORK(&bat_priv->orig_work, purge_orig);
  35. queue_delayed_work(bat_event_workqueue, &bat_priv->orig_work, 1 * HZ);
  36. }
  37. int originator_init(struct bat_priv *bat_priv)
  38. {
  39. if (bat_priv->orig_hash)
  40. return 1;
  41. spin_lock_bh(&bat_priv->orig_hash_lock);
  42. bat_priv->orig_hash = hash_new(1024);
  43. if (!bat_priv->orig_hash)
  44. goto err;
  45. spin_unlock_bh(&bat_priv->orig_hash_lock);
  46. start_purge_timer(bat_priv);
  47. return 1;
  48. err:
  49. spin_unlock_bh(&bat_priv->orig_hash_lock);
  50. return 0;
  51. }
  52. static void neigh_node_free_rcu(struct rcu_head *rcu)
  53. {
  54. struct neigh_node *neigh_node;
  55. neigh_node = container_of(rcu, struct neigh_node, rcu);
  56. kfree(neigh_node);
  57. }
  58. void neigh_node_free_ref(struct neigh_node *neigh_node)
  59. {
  60. if (atomic_dec_and_test(&neigh_node->refcount))
  61. call_rcu(&neigh_node->rcu, neigh_node_free_rcu);
  62. }
  63. struct neigh_node *create_neighbor(struct orig_node *orig_node,
  64. struct orig_node *orig_neigh_node,
  65. uint8_t *neigh,
  66. struct batman_if *if_incoming)
  67. {
  68. struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
  69. struct neigh_node *neigh_node;
  70. bat_dbg(DBG_BATMAN, bat_priv,
  71. "Creating new last-hop neighbor of originator\n");
  72. neigh_node = kzalloc(sizeof(struct neigh_node), GFP_ATOMIC);
  73. if (!neigh_node)
  74. return NULL;
  75. INIT_HLIST_NODE(&neigh_node->list);
  76. INIT_LIST_HEAD(&neigh_node->bonding_list);
  77. memcpy(neigh_node->addr, neigh, ETH_ALEN);
  78. neigh_node->orig_node = orig_neigh_node;
  79. neigh_node->if_incoming = if_incoming;
  80. atomic_set(&neigh_node->refcount, 1);
  81. spin_lock_bh(&orig_node->neigh_list_lock);
  82. hlist_add_head_rcu(&neigh_node->list, &orig_node->neigh_list);
  83. spin_unlock_bh(&orig_node->neigh_list_lock);
  84. return neigh_node;
  85. }
  86. void orig_node_free_ref(struct kref *refcount)
  87. {
  88. struct hlist_node *node, *node_tmp;
  89. struct neigh_node *neigh_node, *tmp_neigh_node;
  90. struct orig_node *orig_node;
  91. orig_node = container_of(refcount, struct orig_node, refcount);
  92. spin_lock_bh(&orig_node->neigh_list_lock);
  93. /* for all bonding members ... */
  94. list_for_each_entry_safe(neigh_node, tmp_neigh_node,
  95. &orig_node->bond_list, bonding_list) {
  96. list_del_rcu(&neigh_node->bonding_list);
  97. neigh_node_free_ref(neigh_node);
  98. }
  99. /* for all neighbors towards this originator ... */
  100. hlist_for_each_entry_safe(neigh_node, node, node_tmp,
  101. &orig_node->neigh_list, list) {
  102. hlist_del_rcu(&neigh_node->list);
  103. neigh_node_free_ref(neigh_node);
  104. }
  105. spin_unlock_bh(&orig_node->neigh_list_lock);
  106. frag_list_free(&orig_node->frag_list);
  107. hna_global_del_orig(orig_node->bat_priv, orig_node,
  108. "originator timed out");
  109. kfree(orig_node->bcast_own);
  110. kfree(orig_node->bcast_own_sum);
  111. kfree(orig_node);
  112. }
  113. void originator_free(struct bat_priv *bat_priv)
  114. {
  115. struct hashtable_t *hash = bat_priv->orig_hash;
  116. struct hlist_node *walk, *safe;
  117. struct hlist_head *head;
  118. struct element_t *bucket;
  119. spinlock_t *list_lock; /* spinlock to protect write access */
  120. struct orig_node *orig_node;
  121. int i;
  122. if (!hash)
  123. return;
  124. cancel_delayed_work_sync(&bat_priv->orig_work);
  125. spin_lock_bh(&bat_priv->orig_hash_lock);
  126. bat_priv->orig_hash = NULL;
  127. for (i = 0; i < hash->size; i++) {
  128. head = &hash->table[i];
  129. list_lock = &hash->list_locks[i];
  130. spin_lock_bh(list_lock);
  131. hlist_for_each_entry_safe(bucket, walk, safe, head, hlist) {
  132. orig_node = bucket->data;
  133. hlist_del_rcu(walk);
  134. call_rcu(&bucket->rcu, bucket_free_rcu);
  135. kref_put(&orig_node->refcount, orig_node_free_ref);
  136. }
  137. spin_unlock_bh(list_lock);
  138. }
  139. hash_destroy(hash);
  140. spin_unlock_bh(&bat_priv->orig_hash_lock);
  141. }
  142. static void bucket_free_orig_rcu(struct rcu_head *rcu)
  143. {
  144. struct element_t *bucket;
  145. struct orig_node *orig_node;
  146. bucket = container_of(rcu, struct element_t, rcu);
  147. orig_node = bucket->data;
  148. kref_put(&orig_node->refcount, orig_node_free_ref);
  149. kfree(bucket);
  150. }
  151. /* this function finds or creates an originator entry for the given
  152. * address if it does not exits */
  153. struct orig_node *get_orig_node(struct bat_priv *bat_priv, uint8_t *addr)
  154. {
  155. struct orig_node *orig_node;
  156. int size;
  157. int hash_added;
  158. rcu_read_lock();
  159. orig_node = ((struct orig_node *)hash_find(bat_priv->orig_hash,
  160. compare_orig, choose_orig,
  161. addr));
  162. rcu_read_unlock();
  163. if (orig_node) {
  164. kref_get(&orig_node->refcount);
  165. return orig_node;
  166. }
  167. bat_dbg(DBG_BATMAN, bat_priv,
  168. "Creating new originator: %pM\n", addr);
  169. orig_node = kzalloc(sizeof(struct orig_node), GFP_ATOMIC);
  170. if (!orig_node)
  171. return NULL;
  172. INIT_HLIST_HEAD(&orig_node->neigh_list);
  173. INIT_LIST_HEAD(&orig_node->bond_list);
  174. spin_lock_init(&orig_node->ogm_cnt_lock);
  175. spin_lock_init(&orig_node->neigh_list_lock);
  176. kref_init(&orig_node->refcount);
  177. orig_node->bat_priv = bat_priv;
  178. memcpy(orig_node->orig, addr, ETH_ALEN);
  179. orig_node->router = NULL;
  180. orig_node->hna_buff = NULL;
  181. orig_node->bcast_seqno_reset = jiffies - 1
  182. - msecs_to_jiffies(RESET_PROTECTION_MS);
  183. orig_node->batman_seqno_reset = jiffies - 1
  184. - msecs_to_jiffies(RESET_PROTECTION_MS);
  185. atomic_set(&orig_node->bond_candidates, 0);
  186. size = bat_priv->num_ifaces * sizeof(unsigned long) * NUM_WORDS;
  187. orig_node->bcast_own = kzalloc(size, GFP_ATOMIC);
  188. if (!orig_node->bcast_own)
  189. goto free_orig_node;
  190. size = bat_priv->num_ifaces * sizeof(uint8_t);
  191. orig_node->bcast_own_sum = kzalloc(size, GFP_ATOMIC);
  192. INIT_LIST_HEAD(&orig_node->frag_list);
  193. orig_node->last_frag_packet = 0;
  194. if (!orig_node->bcast_own_sum)
  195. goto free_bcast_own;
  196. hash_added = hash_add(bat_priv->orig_hash, compare_orig, choose_orig,
  197. orig_node);
  198. if (hash_added < 0)
  199. goto free_bcast_own_sum;
  200. /* extra reference for return */
  201. kref_get(&orig_node->refcount);
  202. return orig_node;
  203. free_bcast_own_sum:
  204. kfree(orig_node->bcast_own_sum);
  205. free_bcast_own:
  206. kfree(orig_node->bcast_own);
  207. free_orig_node:
  208. kfree(orig_node);
  209. return NULL;
  210. }
  211. static bool purge_orig_neighbors(struct bat_priv *bat_priv,
  212. struct orig_node *orig_node,
  213. struct neigh_node **best_neigh_node)
  214. {
  215. struct hlist_node *node, *node_tmp;
  216. struct neigh_node *neigh_node;
  217. bool neigh_purged = false;
  218. *best_neigh_node = NULL;
  219. spin_lock_bh(&orig_node->neigh_list_lock);
  220. /* for all neighbors towards this originator ... */
  221. hlist_for_each_entry_safe(neigh_node, node, node_tmp,
  222. &orig_node->neigh_list, list) {
  223. if ((time_after(jiffies,
  224. neigh_node->last_valid + PURGE_TIMEOUT * HZ)) ||
  225. (neigh_node->if_incoming->if_status == IF_INACTIVE) ||
  226. (neigh_node->if_incoming->if_status == IF_NOT_IN_USE) ||
  227. (neigh_node->if_incoming->if_status == IF_TO_BE_REMOVED)) {
  228. if ((neigh_node->if_incoming->if_status ==
  229. IF_INACTIVE) ||
  230. (neigh_node->if_incoming->if_status ==
  231. IF_NOT_IN_USE) ||
  232. (neigh_node->if_incoming->if_status ==
  233. IF_TO_BE_REMOVED))
  234. bat_dbg(DBG_BATMAN, bat_priv,
  235. "neighbor purge: originator %pM, "
  236. "neighbor: %pM, iface: %s\n",
  237. orig_node->orig, neigh_node->addr,
  238. neigh_node->if_incoming->net_dev->name);
  239. else
  240. bat_dbg(DBG_BATMAN, bat_priv,
  241. "neighbor timeout: originator %pM, "
  242. "neighbor: %pM, last_valid: %lu\n",
  243. orig_node->orig, neigh_node->addr,
  244. (neigh_node->last_valid / HZ));
  245. neigh_purged = true;
  246. hlist_del_rcu(&neigh_node->list);
  247. bonding_candidate_del(orig_node, neigh_node);
  248. neigh_node_free_ref(neigh_node);
  249. } else {
  250. if ((!*best_neigh_node) ||
  251. (neigh_node->tq_avg > (*best_neigh_node)->tq_avg))
  252. *best_neigh_node = neigh_node;
  253. }
  254. }
  255. spin_unlock_bh(&orig_node->neigh_list_lock);
  256. return neigh_purged;
  257. }
  258. static bool purge_orig_node(struct bat_priv *bat_priv,
  259. struct orig_node *orig_node)
  260. {
  261. struct neigh_node *best_neigh_node;
  262. if (time_after(jiffies,
  263. orig_node->last_valid + 2 * PURGE_TIMEOUT * HZ)) {
  264. bat_dbg(DBG_BATMAN, bat_priv,
  265. "Originator timeout: originator %pM, last_valid %lu\n",
  266. orig_node->orig, (orig_node->last_valid / HZ));
  267. return true;
  268. } else {
  269. if (purge_orig_neighbors(bat_priv, orig_node,
  270. &best_neigh_node)) {
  271. update_routes(bat_priv, orig_node,
  272. best_neigh_node,
  273. orig_node->hna_buff,
  274. orig_node->hna_buff_len);
  275. }
  276. }
  277. return false;
  278. }
  279. static void _purge_orig(struct bat_priv *bat_priv)
  280. {
  281. struct hashtable_t *hash = bat_priv->orig_hash;
  282. struct hlist_node *walk, *safe;
  283. struct hlist_head *head;
  284. struct element_t *bucket;
  285. spinlock_t *list_lock; /* spinlock to protect write access */
  286. struct orig_node *orig_node;
  287. int i;
  288. if (!hash)
  289. return;
  290. spin_lock_bh(&bat_priv->orig_hash_lock);
  291. /* for all origins... */
  292. for (i = 0; i < hash->size; i++) {
  293. head = &hash->table[i];
  294. list_lock = &hash->list_locks[i];
  295. spin_lock_bh(list_lock);
  296. hlist_for_each_entry_safe(bucket, walk, safe, head, hlist) {
  297. orig_node = bucket->data;
  298. if (purge_orig_node(bat_priv, orig_node)) {
  299. if (orig_node->gw_flags)
  300. gw_node_delete(bat_priv, orig_node);
  301. hlist_del_rcu(walk);
  302. call_rcu(&bucket->rcu, bucket_free_orig_rcu);
  303. continue;
  304. }
  305. if (time_after(jiffies, orig_node->last_frag_packet +
  306. msecs_to_jiffies(FRAG_TIMEOUT)))
  307. frag_list_free(&orig_node->frag_list);
  308. }
  309. spin_unlock_bh(list_lock);
  310. }
  311. spin_unlock_bh(&bat_priv->orig_hash_lock);
  312. gw_node_purge(bat_priv);
  313. gw_election(bat_priv);
  314. softif_neigh_purge(bat_priv);
  315. }
  316. static void purge_orig(struct work_struct *work)
  317. {
  318. struct delayed_work *delayed_work =
  319. container_of(work, struct delayed_work, work);
  320. struct bat_priv *bat_priv =
  321. container_of(delayed_work, struct bat_priv, orig_work);
  322. _purge_orig(bat_priv);
  323. start_purge_timer(bat_priv);
  324. }
  325. void purge_orig_ref(struct bat_priv *bat_priv)
  326. {
  327. _purge_orig(bat_priv);
  328. }
  329. int orig_seq_print_text(struct seq_file *seq, void *offset)
  330. {
  331. struct net_device *net_dev = (struct net_device *)seq->private;
  332. struct bat_priv *bat_priv = netdev_priv(net_dev);
  333. struct hashtable_t *hash = bat_priv->orig_hash;
  334. struct hlist_node *walk, *node;
  335. struct hlist_head *head;
  336. struct element_t *bucket;
  337. struct orig_node *orig_node;
  338. struct neigh_node *neigh_node;
  339. int batman_count = 0;
  340. int last_seen_secs;
  341. int last_seen_msecs;
  342. int i;
  343. if ((!bat_priv->primary_if) ||
  344. (bat_priv->primary_if->if_status != IF_ACTIVE)) {
  345. if (!bat_priv->primary_if)
  346. return seq_printf(seq, "BATMAN mesh %s disabled - "
  347. "please specify interfaces to enable it\n",
  348. net_dev->name);
  349. return seq_printf(seq, "BATMAN mesh %s "
  350. "disabled - primary interface not active\n",
  351. net_dev->name);
  352. }
  353. seq_printf(seq, "[B.A.T.M.A.N. adv %s%s, MainIF/MAC: %s/%pM (%s)]\n",
  354. SOURCE_VERSION, REVISION_VERSION_STR,
  355. bat_priv->primary_if->net_dev->name,
  356. bat_priv->primary_if->net_dev->dev_addr, net_dev->name);
  357. seq_printf(seq, " %-15s %s (%s/%i) %17s [%10s]: %20s ...\n",
  358. "Originator", "last-seen", "#", TQ_MAX_VALUE, "Nexthop",
  359. "outgoingIF", "Potential nexthops");
  360. spin_lock_bh(&bat_priv->orig_hash_lock);
  361. for (i = 0; i < hash->size; i++) {
  362. head = &hash->table[i];
  363. rcu_read_lock();
  364. hlist_for_each_entry_rcu(bucket, walk, head, hlist) {
  365. orig_node = bucket->data;
  366. if (!orig_node->router)
  367. continue;
  368. if (orig_node->router->tq_avg == 0)
  369. continue;
  370. last_seen_secs = jiffies_to_msecs(jiffies -
  371. orig_node->last_valid) / 1000;
  372. last_seen_msecs = jiffies_to_msecs(jiffies -
  373. orig_node->last_valid) % 1000;
  374. neigh_node = orig_node->router;
  375. seq_printf(seq, "%pM %4i.%03is (%3i) %pM [%10s]:",
  376. orig_node->orig, last_seen_secs,
  377. last_seen_msecs, neigh_node->tq_avg,
  378. neigh_node->addr,
  379. neigh_node->if_incoming->net_dev->name);
  380. hlist_for_each_entry_rcu(neigh_node, node,
  381. &orig_node->neigh_list, list) {
  382. seq_printf(seq, " %pM (%3i)", neigh_node->addr,
  383. neigh_node->tq_avg);
  384. }
  385. seq_printf(seq, "\n");
  386. batman_count++;
  387. }
  388. rcu_read_unlock();
  389. }
  390. spin_unlock_bh(&bat_priv->orig_hash_lock);
  391. if ((batman_count == 0))
  392. seq_printf(seq, "No batman nodes in range ...\n");
  393. return 0;
  394. }
  395. static int orig_node_add_if(struct orig_node *orig_node, int max_if_num)
  396. {
  397. void *data_ptr;
  398. data_ptr = kmalloc(max_if_num * sizeof(unsigned long) * NUM_WORDS,
  399. GFP_ATOMIC);
  400. if (!data_ptr) {
  401. pr_err("Can't resize orig: out of memory\n");
  402. return -1;
  403. }
  404. memcpy(data_ptr, orig_node->bcast_own,
  405. (max_if_num - 1) * sizeof(unsigned long) * NUM_WORDS);
  406. kfree(orig_node->bcast_own);
  407. orig_node->bcast_own = data_ptr;
  408. data_ptr = kmalloc(max_if_num * sizeof(uint8_t), GFP_ATOMIC);
  409. if (!data_ptr) {
  410. pr_err("Can't resize orig: out of memory\n");
  411. return -1;
  412. }
  413. memcpy(data_ptr, orig_node->bcast_own_sum,
  414. (max_if_num - 1) * sizeof(uint8_t));
  415. kfree(orig_node->bcast_own_sum);
  416. orig_node->bcast_own_sum = data_ptr;
  417. return 0;
  418. }
  419. int orig_hash_add_if(struct batman_if *batman_if, int max_if_num)
  420. {
  421. struct bat_priv *bat_priv = netdev_priv(batman_if->soft_iface);
  422. struct hashtable_t *hash = bat_priv->orig_hash;
  423. struct hlist_node *walk;
  424. struct hlist_head *head;
  425. struct element_t *bucket;
  426. struct orig_node *orig_node;
  427. int i, ret;
  428. /* resize all orig nodes because orig_node->bcast_own(_sum) depend on
  429. * if_num */
  430. spin_lock_bh(&bat_priv->orig_hash_lock);
  431. for (i = 0; i < hash->size; i++) {
  432. head = &hash->table[i];
  433. rcu_read_lock();
  434. hlist_for_each_entry_rcu(bucket, walk, head, hlist) {
  435. orig_node = bucket->data;
  436. spin_lock_bh(&orig_node->ogm_cnt_lock);
  437. ret = orig_node_add_if(orig_node, max_if_num);
  438. spin_unlock_bh(&orig_node->ogm_cnt_lock);
  439. if (ret == -1)
  440. goto err;
  441. }
  442. rcu_read_unlock();
  443. }
  444. spin_unlock_bh(&bat_priv->orig_hash_lock);
  445. return 0;
  446. err:
  447. rcu_read_unlock();
  448. spin_unlock_bh(&bat_priv->orig_hash_lock);
  449. return -ENOMEM;
  450. }
  451. static int orig_node_del_if(struct orig_node *orig_node,
  452. int max_if_num, int del_if_num)
  453. {
  454. void *data_ptr = NULL;
  455. int chunk_size;
  456. /* last interface was removed */
  457. if (max_if_num == 0)
  458. goto free_bcast_own;
  459. chunk_size = sizeof(unsigned long) * NUM_WORDS;
  460. data_ptr = kmalloc(max_if_num * chunk_size, GFP_ATOMIC);
  461. if (!data_ptr) {
  462. pr_err("Can't resize orig: out of memory\n");
  463. return -1;
  464. }
  465. /* copy first part */
  466. memcpy(data_ptr, orig_node->bcast_own, del_if_num * chunk_size);
  467. /* copy second part */
  468. memcpy(data_ptr + del_if_num * chunk_size,
  469. orig_node->bcast_own + ((del_if_num + 1) * chunk_size),
  470. (max_if_num - del_if_num) * chunk_size);
  471. free_bcast_own:
  472. kfree(orig_node->bcast_own);
  473. orig_node->bcast_own = data_ptr;
  474. if (max_if_num == 0)
  475. goto free_own_sum;
  476. data_ptr = kmalloc(max_if_num * sizeof(uint8_t), GFP_ATOMIC);
  477. if (!data_ptr) {
  478. pr_err("Can't resize orig: out of memory\n");
  479. return -1;
  480. }
  481. memcpy(data_ptr, orig_node->bcast_own_sum,
  482. del_if_num * sizeof(uint8_t));
  483. memcpy(data_ptr + del_if_num * sizeof(uint8_t),
  484. orig_node->bcast_own_sum + ((del_if_num + 1) * sizeof(uint8_t)),
  485. (max_if_num - del_if_num) * sizeof(uint8_t));
  486. free_own_sum:
  487. kfree(orig_node->bcast_own_sum);
  488. orig_node->bcast_own_sum = data_ptr;
  489. return 0;
  490. }
  491. int orig_hash_del_if(struct batman_if *batman_if, int max_if_num)
  492. {
  493. struct bat_priv *bat_priv = netdev_priv(batman_if->soft_iface);
  494. struct hashtable_t *hash = bat_priv->orig_hash;
  495. struct hlist_node *walk;
  496. struct hlist_head *head;
  497. struct element_t *bucket;
  498. struct batman_if *batman_if_tmp;
  499. struct orig_node *orig_node;
  500. int i, ret;
  501. /* resize all orig nodes because orig_node->bcast_own(_sum) depend on
  502. * if_num */
  503. spin_lock_bh(&bat_priv->orig_hash_lock);
  504. for (i = 0; i < hash->size; i++) {
  505. head = &hash->table[i];
  506. rcu_read_lock();
  507. hlist_for_each_entry_rcu(bucket, walk, head, hlist) {
  508. orig_node = bucket->data;
  509. spin_lock_bh(&orig_node->ogm_cnt_lock);
  510. ret = orig_node_del_if(orig_node, max_if_num,
  511. batman_if->if_num);
  512. spin_unlock_bh(&orig_node->ogm_cnt_lock);
  513. if (ret == -1)
  514. goto err;
  515. }
  516. rcu_read_unlock();
  517. }
  518. /* renumber remaining batman interfaces _inside_ of orig_hash_lock */
  519. rcu_read_lock();
  520. list_for_each_entry_rcu(batman_if_tmp, &if_list, list) {
  521. if (batman_if_tmp->if_status == IF_NOT_IN_USE)
  522. continue;
  523. if (batman_if == batman_if_tmp)
  524. continue;
  525. if (batman_if->soft_iface != batman_if_tmp->soft_iface)
  526. continue;
  527. if (batman_if_tmp->if_num > batman_if->if_num)
  528. batman_if_tmp->if_num--;
  529. }
  530. rcu_read_unlock();
  531. batman_if->if_num = -1;
  532. spin_unlock_bh(&bat_priv->orig_hash_lock);
  533. return 0;
  534. err:
  535. rcu_read_unlock();
  536. spin_unlock_bh(&bat_priv->orig_hash_lock);
  537. return -ENOMEM;
  538. }