send.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670
  1. /*
  2. * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
  3. *
  4. * Marek Lindner, Simon Wunderlich
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of version 2 of the GNU General Public
  8. * License as published by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope that it will be useful, but
  11. * WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  13. * General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program; if not, write to the Free Software
  17. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
  18. * 02110-1301, USA
  19. *
  20. */
  21. #include "main.h"
  22. #include "send.h"
  23. #include "routing.h"
  24. #include "translation-table.h"
  25. #include "soft-interface.h"
  26. #include "hard-interface.h"
  27. #include "vis.h"
  28. #include "aggregation.h"
  29. #include "gateway_common.h"
  30. #include "originator.h"
  31. static void send_outstanding_bcast_packet(struct work_struct *work);
  32. /* apply hop penalty for a normal link */
  33. static uint8_t hop_penalty(uint8_t tq, const struct bat_priv *bat_priv)
  34. {
  35. int hop_penalty = atomic_read(&bat_priv->hop_penalty);
  36. return (tq * (TQ_MAX_VALUE - hop_penalty)) / (TQ_MAX_VALUE);
  37. }
  38. /* when do we schedule our own packet to be sent */
  39. static unsigned long own_send_time(const struct bat_priv *bat_priv)
  40. {
  41. return jiffies + msecs_to_jiffies(
  42. atomic_read(&bat_priv->orig_interval) -
  43. JITTER + (random32() % 2*JITTER));
  44. }
  45. /* when do we schedule a forwarded packet to be sent */
  46. static unsigned long forward_send_time(void)
  47. {
  48. return jiffies + msecs_to_jiffies(random32() % (JITTER/2));
  49. }
  50. /* send out an already prepared packet to the given address via the
  51. * specified batman interface */
  52. int send_skb_packet(struct sk_buff *skb, struct hard_iface *hard_iface,
  53. const uint8_t *dst_addr)
  54. {
  55. struct ethhdr *ethhdr;
  56. if (hard_iface->if_status != IF_ACTIVE)
  57. goto send_skb_err;
  58. if (unlikely(!hard_iface->net_dev))
  59. goto send_skb_err;
  60. if (!(hard_iface->net_dev->flags & IFF_UP)) {
  61. pr_warning("Interface %s is not up - can't send packet via "
  62. "that interface!\n", hard_iface->net_dev->name);
  63. goto send_skb_err;
  64. }
  65. /* push to the ethernet header. */
  66. if (my_skb_head_push(skb, sizeof(*ethhdr)) < 0)
  67. goto send_skb_err;
  68. skb_reset_mac_header(skb);
  69. ethhdr = (struct ethhdr *) skb_mac_header(skb);
  70. memcpy(ethhdr->h_source, hard_iface->net_dev->dev_addr, ETH_ALEN);
  71. memcpy(ethhdr->h_dest, dst_addr, ETH_ALEN);
  72. ethhdr->h_proto = __constant_htons(ETH_P_BATMAN);
  73. skb_set_network_header(skb, ETH_HLEN);
  74. skb->priority = TC_PRIO_CONTROL;
  75. skb->protocol = __constant_htons(ETH_P_BATMAN);
  76. skb->dev = hard_iface->net_dev;
  77. /* dev_queue_xmit() returns a negative result on error. However on
  78. * congestion and traffic shaping, it drops and returns NET_XMIT_DROP
  79. * (which is > 0). This will not be treated as an error. */
  80. return dev_queue_xmit(skb);
  81. send_skb_err:
  82. kfree_skb(skb);
  83. return NET_XMIT_DROP;
  84. }
  85. /* Send a packet to a given interface */
  86. static void send_packet_to_if(struct forw_packet *forw_packet,
  87. struct hard_iface *hard_iface)
  88. {
  89. struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
  90. char *fwd_str;
  91. uint8_t packet_num;
  92. int16_t buff_pos;
  93. struct batman_ogm_packet *batman_ogm_packet;
  94. struct sk_buff *skb;
  95. if (hard_iface->if_status != IF_ACTIVE)
  96. return;
  97. packet_num = 0;
  98. buff_pos = 0;
  99. batman_ogm_packet = (struct batman_ogm_packet *)forw_packet->skb->data;
  100. /* adjust all flags and log packets */
  101. while (aggregated_packet(buff_pos,
  102. forw_packet->packet_len,
  103. batman_ogm_packet->tt_num_changes)) {
  104. /* we might have aggregated direct link packets with an
  105. * ordinary base packet */
  106. if ((forw_packet->direct_link_flags & (1 << packet_num)) &&
  107. (forw_packet->if_incoming == hard_iface))
  108. batman_ogm_packet->flags |= DIRECTLINK;
  109. else
  110. batman_ogm_packet->flags &= ~DIRECTLINK;
  111. fwd_str = (packet_num > 0 ? "Forwarding" : (forw_packet->own ?
  112. "Sending own" :
  113. "Forwarding"));
  114. bat_dbg(DBG_BATMAN, bat_priv,
  115. "%s %spacket (originator %pM, seqno %d, TQ %d, TTL %d,"
  116. " IDF %s, ttvn %d) on interface %s [%pM]\n",
  117. fwd_str, (packet_num > 0 ? "aggregated " : ""),
  118. batman_ogm_packet->orig,
  119. ntohl(batman_ogm_packet->seqno),
  120. batman_ogm_packet->tq, batman_ogm_packet->ttl,
  121. (batman_ogm_packet->flags & DIRECTLINK ?
  122. "on" : "off"),
  123. batman_ogm_packet->ttvn, hard_iface->net_dev->name,
  124. hard_iface->net_dev->dev_addr);
  125. buff_pos += BATMAN_OGM_LEN +
  126. tt_len(batman_ogm_packet->tt_num_changes);
  127. packet_num++;
  128. batman_ogm_packet = (struct batman_ogm_packet *)
  129. (forw_packet->skb->data + buff_pos);
  130. }
  131. /* create clone because function is called more than once */
  132. skb = skb_clone(forw_packet->skb, GFP_ATOMIC);
  133. if (skb)
  134. send_skb_packet(skb, hard_iface, broadcast_addr);
  135. }
  136. /* send a batman packet */
  137. static void send_packet(struct forw_packet *forw_packet)
  138. {
  139. struct hard_iface *hard_iface;
  140. struct net_device *soft_iface;
  141. struct bat_priv *bat_priv;
  142. struct hard_iface *primary_if = NULL;
  143. struct batman_ogm_packet *batman_ogm_packet =
  144. (struct batman_ogm_packet *)(forw_packet->skb->data);
  145. unsigned char directlink;
  146. directlink = (batman_ogm_packet->flags & DIRECTLINK ? 1 : 0);
  147. if (!forw_packet->if_incoming) {
  148. pr_err("Error - can't forward packet: incoming iface not "
  149. "specified\n");
  150. goto out;
  151. }
  152. soft_iface = forw_packet->if_incoming->soft_iface;
  153. bat_priv = netdev_priv(soft_iface);
  154. if (forw_packet->if_incoming->if_status != IF_ACTIVE)
  155. goto out;
  156. primary_if = primary_if_get_selected(bat_priv);
  157. if (!primary_if)
  158. goto out;
  159. /* multihomed peer assumed */
  160. /* non-primary OGMs are only broadcasted on their interface */
  161. if ((directlink && (batman_ogm_packet->ttl == 1)) ||
  162. (forw_packet->own && (forw_packet->if_incoming != primary_if))) {
  163. /* FIXME: what about aggregated packets ? */
  164. bat_dbg(DBG_BATMAN, bat_priv,
  165. "%s packet (originator %pM, seqno %d, TTL %d) "
  166. "on interface %s [%pM]\n",
  167. (forw_packet->own ? "Sending own" : "Forwarding"),
  168. batman_ogm_packet->orig,
  169. ntohl(batman_ogm_packet->seqno),
  170. batman_ogm_packet->ttl,
  171. forw_packet->if_incoming->net_dev->name,
  172. forw_packet->if_incoming->net_dev->dev_addr);
  173. /* skb is only used once and than forw_packet is free'd */
  174. send_skb_packet(forw_packet->skb, forw_packet->if_incoming,
  175. broadcast_addr);
  176. forw_packet->skb = NULL;
  177. goto out;
  178. }
  179. /* broadcast on every interface */
  180. rcu_read_lock();
  181. list_for_each_entry_rcu(hard_iface, &hardif_list, list) {
  182. if (hard_iface->soft_iface != soft_iface)
  183. continue;
  184. send_packet_to_if(forw_packet, hard_iface);
  185. }
  186. rcu_read_unlock();
  187. out:
  188. if (primary_if)
  189. hardif_free_ref(primary_if);
  190. }
  191. static void realloc_packet_buffer(struct hard_iface *hard_iface,
  192. int new_len)
  193. {
  194. unsigned char *new_buff;
  195. new_buff = kmalloc(new_len, GFP_ATOMIC);
  196. /* keep old buffer if kmalloc should fail */
  197. if (new_buff) {
  198. memcpy(new_buff, hard_iface->packet_buff,
  199. BATMAN_OGM_LEN);
  200. kfree(hard_iface->packet_buff);
  201. hard_iface->packet_buff = new_buff;
  202. hard_iface->packet_len = new_len;
  203. }
  204. }
  205. /* when calling this function (hard_iface == primary_if) has to be true */
  206. static void prepare_packet_buffer(struct bat_priv *bat_priv,
  207. struct hard_iface *hard_iface)
  208. {
  209. int new_len;
  210. struct batman_ogm_packet *batman_ogm_packet;
  211. new_len = BATMAN_OGM_LEN +
  212. tt_len((uint8_t)atomic_read(&bat_priv->tt_local_changes));
  213. /* if we have too many changes for one packet don't send any
  214. * and wait for the tt table request which will be fragmented */
  215. if (new_len > hard_iface->soft_iface->mtu)
  216. new_len = BATMAN_OGM_LEN;
  217. realloc_packet_buffer(hard_iface, new_len);
  218. batman_ogm_packet = (struct batman_ogm_packet *)hard_iface->packet_buff;
  219. atomic_set(&bat_priv->tt_crc, tt_local_crc(bat_priv));
  220. /* reset the sending counter */
  221. atomic_set(&bat_priv->tt_ogm_append_cnt, TT_OGM_APPEND_MAX);
  222. batman_ogm_packet->tt_num_changes = tt_changes_fill_buffer(bat_priv,
  223. hard_iface->packet_buff + BATMAN_OGM_LEN,
  224. hard_iface->packet_len - BATMAN_OGM_LEN);
  225. }
  226. static void reset_packet_buffer(struct bat_priv *bat_priv,
  227. struct hard_iface *hard_iface)
  228. {
  229. struct batman_ogm_packet *batman_ogm_packet;
  230. realloc_packet_buffer(hard_iface, BATMAN_OGM_LEN);
  231. batman_ogm_packet = (struct batman_ogm_packet *)hard_iface->packet_buff;
  232. batman_ogm_packet->tt_num_changes = 0;
  233. }
  234. void schedule_own_packet(struct hard_iface *hard_iface)
  235. {
  236. struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
  237. struct hard_iface *primary_if;
  238. unsigned long send_time;
  239. struct batman_ogm_packet *batman_ogm_packet;
  240. int vis_server;
  241. if ((hard_iface->if_status == IF_NOT_IN_USE) ||
  242. (hard_iface->if_status == IF_TO_BE_REMOVED))
  243. return;
  244. vis_server = atomic_read(&bat_priv->vis_mode);
  245. primary_if = primary_if_get_selected(bat_priv);
  246. /**
  247. * the interface gets activated here to avoid race conditions between
  248. * the moment of activating the interface in
  249. * hardif_activate_interface() where the originator mac is set and
  250. * outdated packets (especially uninitialized mac addresses) in the
  251. * packet queue
  252. */
  253. if (hard_iface->if_status == IF_TO_BE_ACTIVATED)
  254. hard_iface->if_status = IF_ACTIVE;
  255. if (hard_iface == primary_if) {
  256. /* if at least one change happened */
  257. if (atomic_read(&bat_priv->tt_local_changes) > 0) {
  258. tt_commit_changes(bat_priv);
  259. prepare_packet_buffer(bat_priv, hard_iface);
  260. }
  261. /* if the changes have been sent often enough */
  262. if (!atomic_dec_not_zero(&bat_priv->tt_ogm_append_cnt))
  263. reset_packet_buffer(bat_priv, hard_iface);
  264. }
  265. /**
  266. * NOTE: packet_buff might just have been re-allocated in
  267. * prepare_packet_buffer() or in reset_packet_buffer()
  268. */
  269. batman_ogm_packet = (struct batman_ogm_packet *)hard_iface->packet_buff;
  270. /* change sequence number to network order */
  271. batman_ogm_packet->seqno =
  272. htonl((uint32_t)atomic_read(&hard_iface->seqno));
  273. batman_ogm_packet->ttvn = atomic_read(&bat_priv->ttvn);
  274. batman_ogm_packet->tt_crc = htons((uint16_t)
  275. atomic_read(&bat_priv->tt_crc));
  276. if (vis_server == VIS_TYPE_SERVER_SYNC)
  277. batman_ogm_packet->flags |= VIS_SERVER;
  278. else
  279. batman_ogm_packet->flags &= ~VIS_SERVER;
  280. if ((hard_iface == primary_if) &&
  281. (atomic_read(&bat_priv->gw_mode) == GW_MODE_SERVER))
  282. batman_ogm_packet->gw_flags =
  283. (uint8_t)atomic_read(&bat_priv->gw_bandwidth);
  284. else
  285. batman_ogm_packet->gw_flags = NO_FLAGS;
  286. atomic_inc(&hard_iface->seqno);
  287. slide_own_bcast_window(hard_iface);
  288. send_time = own_send_time(bat_priv);
  289. add_bat_packet_to_list(bat_priv,
  290. hard_iface->packet_buff,
  291. hard_iface->packet_len,
  292. hard_iface, 1, send_time);
  293. if (primary_if)
  294. hardif_free_ref(primary_if);
  295. }
  296. void schedule_forward_packet(struct orig_node *orig_node,
  297. const struct ethhdr *ethhdr,
  298. struct batman_ogm_packet *batman_ogm_packet,
  299. int directlink,
  300. struct hard_iface *if_incoming)
  301. {
  302. struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
  303. struct neigh_node *router;
  304. uint8_t in_tq, in_ttl, tq_avg = 0;
  305. unsigned long send_time;
  306. uint8_t tt_num_changes;
  307. if (batman_ogm_packet->ttl <= 1) {
  308. bat_dbg(DBG_BATMAN, bat_priv, "ttl exceeded\n");
  309. return;
  310. }
  311. router = orig_node_get_router(orig_node);
  312. in_tq = batman_ogm_packet->tq;
  313. in_ttl = batman_ogm_packet->ttl;
  314. tt_num_changes = batman_ogm_packet->tt_num_changes;
  315. batman_ogm_packet->ttl--;
  316. memcpy(batman_ogm_packet->prev_sender, ethhdr->h_source, ETH_ALEN);
  317. /* rebroadcast tq of our best ranking neighbor to ensure the rebroadcast
  318. * of our best tq value */
  319. if (router && router->tq_avg != 0) {
  320. /* rebroadcast ogm of best ranking neighbor as is */
  321. if (!compare_eth(router->addr, ethhdr->h_source)) {
  322. batman_ogm_packet->tq = router->tq_avg;
  323. if (router->last_ttl)
  324. batman_ogm_packet->ttl = router->last_ttl - 1;
  325. }
  326. tq_avg = router->tq_avg;
  327. }
  328. if (router)
  329. neigh_node_free_ref(router);
  330. /* apply hop penalty */
  331. batman_ogm_packet->tq = hop_penalty(batman_ogm_packet->tq, bat_priv);
  332. bat_dbg(DBG_BATMAN, bat_priv,
  333. "Forwarding packet: tq_orig: %i, tq_avg: %i, "
  334. "tq_forw: %i, ttl_orig: %i, ttl_forw: %i\n",
  335. in_tq, tq_avg, batman_ogm_packet->tq, in_ttl - 1,
  336. batman_ogm_packet->ttl);
  337. batman_ogm_packet->seqno = htonl(batman_ogm_packet->seqno);
  338. batman_ogm_packet->tt_crc = htons(batman_ogm_packet->tt_crc);
  339. /* switch of primaries first hop flag when forwarding */
  340. batman_ogm_packet->flags &= ~PRIMARIES_FIRST_HOP;
  341. if (directlink)
  342. batman_ogm_packet->flags |= DIRECTLINK;
  343. else
  344. batman_ogm_packet->flags &= ~DIRECTLINK;
  345. send_time = forward_send_time();
  346. add_bat_packet_to_list(bat_priv,
  347. (unsigned char *)batman_ogm_packet,
  348. BATMAN_OGM_LEN + tt_len(tt_num_changes),
  349. if_incoming, 0, send_time);
  350. }
  351. static void forw_packet_free(struct forw_packet *forw_packet)
  352. {
  353. if (forw_packet->skb)
  354. kfree_skb(forw_packet->skb);
  355. if (forw_packet->if_incoming)
  356. hardif_free_ref(forw_packet->if_incoming);
  357. kfree(forw_packet);
  358. }
  359. static void _add_bcast_packet_to_list(struct bat_priv *bat_priv,
  360. struct forw_packet *forw_packet,
  361. unsigned long send_time)
  362. {
  363. INIT_HLIST_NODE(&forw_packet->list);
  364. /* add new packet to packet list */
  365. spin_lock_bh(&bat_priv->forw_bcast_list_lock);
  366. hlist_add_head(&forw_packet->list, &bat_priv->forw_bcast_list);
  367. spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
  368. /* start timer for this packet */
  369. INIT_DELAYED_WORK(&forw_packet->delayed_work,
  370. send_outstanding_bcast_packet);
  371. queue_delayed_work(bat_event_workqueue, &forw_packet->delayed_work,
  372. send_time);
  373. }
  374. /* add a broadcast packet to the queue and setup timers. broadcast packets
  375. * are sent multiple times to increase probability for being received.
  376. *
  377. * This function returns NETDEV_TX_OK on success and NETDEV_TX_BUSY on
  378. * errors.
  379. *
  380. * The skb is not consumed, so the caller should make sure that the
  381. * skb is freed. */
  382. int add_bcast_packet_to_list(struct bat_priv *bat_priv,
  383. const struct sk_buff *skb, unsigned long delay)
  384. {
  385. struct hard_iface *primary_if = NULL;
  386. struct forw_packet *forw_packet;
  387. struct bcast_packet *bcast_packet;
  388. struct sk_buff *newskb;
  389. if (!atomic_dec_not_zero(&bat_priv->bcast_queue_left)) {
  390. bat_dbg(DBG_BATMAN, bat_priv, "bcast packet queue full\n");
  391. goto out;
  392. }
  393. primary_if = primary_if_get_selected(bat_priv);
  394. if (!primary_if)
  395. goto out_and_inc;
  396. forw_packet = kmalloc(sizeof(*forw_packet), GFP_ATOMIC);
  397. if (!forw_packet)
  398. goto out_and_inc;
  399. newskb = skb_copy(skb, GFP_ATOMIC);
  400. if (!newskb)
  401. goto packet_free;
  402. /* as we have a copy now, it is safe to decrease the TTL */
  403. bcast_packet = (struct bcast_packet *)newskb->data;
  404. bcast_packet->ttl--;
  405. skb_reset_mac_header(newskb);
  406. forw_packet->skb = newskb;
  407. forw_packet->if_incoming = primary_if;
  408. /* how often did we send the bcast packet ? */
  409. forw_packet->num_packets = 0;
  410. _add_bcast_packet_to_list(bat_priv, forw_packet, delay);
  411. return NETDEV_TX_OK;
  412. packet_free:
  413. kfree(forw_packet);
  414. out_and_inc:
  415. atomic_inc(&bat_priv->bcast_queue_left);
  416. out:
  417. if (primary_if)
  418. hardif_free_ref(primary_if);
  419. return NETDEV_TX_BUSY;
  420. }
  421. static void send_outstanding_bcast_packet(struct work_struct *work)
  422. {
  423. struct hard_iface *hard_iface;
  424. struct delayed_work *delayed_work =
  425. container_of(work, struct delayed_work, work);
  426. struct forw_packet *forw_packet =
  427. container_of(delayed_work, struct forw_packet, delayed_work);
  428. struct sk_buff *skb1;
  429. struct net_device *soft_iface = forw_packet->if_incoming->soft_iface;
  430. struct bat_priv *bat_priv = netdev_priv(soft_iface);
  431. spin_lock_bh(&bat_priv->forw_bcast_list_lock);
  432. hlist_del(&forw_packet->list);
  433. spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
  434. if (atomic_read(&bat_priv->mesh_state) == MESH_DEACTIVATING)
  435. goto out;
  436. /* rebroadcast packet */
  437. rcu_read_lock();
  438. list_for_each_entry_rcu(hard_iface, &hardif_list, list) {
  439. if (hard_iface->soft_iface != soft_iface)
  440. continue;
  441. /* send a copy of the saved skb */
  442. skb1 = skb_clone(forw_packet->skb, GFP_ATOMIC);
  443. if (skb1)
  444. send_skb_packet(skb1, hard_iface, broadcast_addr);
  445. }
  446. rcu_read_unlock();
  447. forw_packet->num_packets++;
  448. /* if we still have some more bcasts to send */
  449. if (forw_packet->num_packets < 3) {
  450. _add_bcast_packet_to_list(bat_priv, forw_packet,
  451. ((5 * HZ) / 1000));
  452. return;
  453. }
  454. out:
  455. forw_packet_free(forw_packet);
  456. atomic_inc(&bat_priv->bcast_queue_left);
  457. }
  458. void send_outstanding_bat_packet(struct work_struct *work)
  459. {
  460. struct delayed_work *delayed_work =
  461. container_of(work, struct delayed_work, work);
  462. struct forw_packet *forw_packet =
  463. container_of(delayed_work, struct forw_packet, delayed_work);
  464. struct bat_priv *bat_priv;
  465. bat_priv = netdev_priv(forw_packet->if_incoming->soft_iface);
  466. spin_lock_bh(&bat_priv->forw_bat_list_lock);
  467. hlist_del(&forw_packet->list);
  468. spin_unlock_bh(&bat_priv->forw_bat_list_lock);
  469. if (atomic_read(&bat_priv->mesh_state) == MESH_DEACTIVATING)
  470. goto out;
  471. send_packet(forw_packet);
  472. /**
  473. * we have to have at least one packet in the queue
  474. * to determine the queues wake up time unless we are
  475. * shutting down
  476. */
  477. if (forw_packet->own)
  478. schedule_own_packet(forw_packet->if_incoming);
  479. out:
  480. /* don't count own packet */
  481. if (!forw_packet->own)
  482. atomic_inc(&bat_priv->batman_queue_left);
  483. forw_packet_free(forw_packet);
  484. }
  485. void purge_outstanding_packets(struct bat_priv *bat_priv,
  486. const struct hard_iface *hard_iface)
  487. {
  488. struct forw_packet *forw_packet;
  489. struct hlist_node *tmp_node, *safe_tmp_node;
  490. bool pending;
  491. if (hard_iface)
  492. bat_dbg(DBG_BATMAN, bat_priv,
  493. "purge_outstanding_packets(): %s\n",
  494. hard_iface->net_dev->name);
  495. else
  496. bat_dbg(DBG_BATMAN, bat_priv,
  497. "purge_outstanding_packets()\n");
  498. /* free bcast list */
  499. spin_lock_bh(&bat_priv->forw_bcast_list_lock);
  500. hlist_for_each_entry_safe(forw_packet, tmp_node, safe_tmp_node,
  501. &bat_priv->forw_bcast_list, list) {
  502. /**
  503. * if purge_outstanding_packets() was called with an argument
  504. * we delete only packets belonging to the given interface
  505. */
  506. if ((hard_iface) &&
  507. (forw_packet->if_incoming != hard_iface))
  508. continue;
  509. spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
  510. /**
  511. * send_outstanding_bcast_packet() will lock the list to
  512. * delete the item from the list
  513. */
  514. pending = cancel_delayed_work_sync(&forw_packet->delayed_work);
  515. spin_lock_bh(&bat_priv->forw_bcast_list_lock);
  516. if (pending) {
  517. hlist_del(&forw_packet->list);
  518. forw_packet_free(forw_packet);
  519. }
  520. }
  521. spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
  522. /* free batman packet list */
  523. spin_lock_bh(&bat_priv->forw_bat_list_lock);
  524. hlist_for_each_entry_safe(forw_packet, tmp_node, safe_tmp_node,
  525. &bat_priv->forw_bat_list, list) {
  526. /**
  527. * if purge_outstanding_packets() was called with an argument
  528. * we delete only packets belonging to the given interface
  529. */
  530. if ((hard_iface) &&
  531. (forw_packet->if_incoming != hard_iface))
  532. continue;
  533. spin_unlock_bh(&bat_priv->forw_bat_list_lock);
  534. /**
  535. * send_outstanding_bat_packet() will lock the list to
  536. * delete the item from the list
  537. */
  538. pending = cancel_delayed_work_sync(&forw_packet->delayed_work);
  539. spin_lock_bh(&bat_priv->forw_bat_list_lock);
  540. if (pending) {
  541. hlist_del(&forw_packet->list);
  542. forw_packet_free(forw_packet);
  543. }
  544. }
  545. spin_unlock_bh(&bat_priv->forw_bat_list_lock);
  546. }