bcast.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908
  1. /*
  2. * net/tipc/bcast.c: TIPC broadcast code
  3. *
  4. * Copyright (c) 2004-2006, Ericsson AB
  5. * Copyright (c) 2004, Intel Corporation.
  6. * Copyright (c) 2005, 2010-2011, Wind River Systems
  7. * All rights reserved.
  8. *
  9. * Redistribution and use in source and binary forms, with or without
  10. * modification, are permitted provided that the following conditions are met:
  11. *
  12. * 1. Redistributions of source code must retain the above copyright
  13. * notice, this list of conditions and the following disclaimer.
  14. * 2. Redistributions in binary form must reproduce the above copyright
  15. * notice, this list of conditions and the following disclaimer in the
  16. * documentation and/or other materials provided with the distribution.
  17. * 3. Neither the names of the copyright holders nor the names of its
  18. * contributors may be used to endorse or promote products derived from
  19. * this software without specific prior written permission.
  20. *
  21. * Alternatively, this software may be distributed under the terms of the
  22. * GNU General Public License ("GPL") version 2 as published by the Free
  23. * Software Foundation.
  24. *
  25. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  26. * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  27. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  28. * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
  29. * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  30. * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  31. * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
  32. * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
  33. * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  34. * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  35. * POSSIBILITY OF SUCH DAMAGE.
  36. */
  37. #include "core.h"
  38. #include "link.h"
  39. #include "port.h"
  40. #include "bcast.h"
  41. #include "name_distr.h"
  42. #define MAX_PKT_DEFAULT_MCAST 1500 /* bcast link max packet size (fixed) */
  43. #define BCLINK_WIN_DEFAULT 20 /* bcast link window size (default) */
  44. /**
  45. * struct bcbearer_pair - a pair of bearers used by broadcast link
  46. * @primary: pointer to primary bearer
  47. * @secondary: pointer to secondary bearer
  48. *
  49. * Bearers must have same priority and same set of reachable destinations
  50. * to be paired.
  51. */
  52. struct bcbearer_pair {
  53. struct tipc_bearer *primary;
  54. struct tipc_bearer *secondary;
  55. };
  56. /**
  57. * struct bcbearer - bearer used by broadcast link
  58. * @bearer: (non-standard) broadcast bearer structure
  59. * @media: (non-standard) broadcast media structure
  60. * @bpairs: array of bearer pairs
  61. * @bpairs_temp: temporary array of bearer pairs used by tipc_bcbearer_sort()
  62. * @remains: temporary node map used by tipc_bcbearer_send()
  63. * @remains_new: temporary node map used tipc_bcbearer_send()
  64. *
  65. * Note: The fields labelled "temporary" are incorporated into the bearer
  66. * to avoid consuming potentially limited stack space through the use of
  67. * large local variables within multicast routines. Concurrent access is
  68. * prevented through use of the spinlock "bc_lock".
  69. */
  70. struct bcbearer {
  71. struct tipc_bearer bearer;
  72. struct media media;
  73. struct bcbearer_pair bpairs[MAX_BEARERS];
  74. struct bcbearer_pair bpairs_temp[TIPC_MAX_LINK_PRI + 1];
  75. struct tipc_node_map remains;
  76. struct tipc_node_map remains_new;
  77. };
  78. /**
  79. * struct bclink - link used for broadcast messages
  80. * @link: (non-standard) broadcast link structure
  81. * @node: (non-standard) node structure representing b'cast link's peer node
  82. * @bcast_nodes: map of broadcast-capable nodes
  83. * @retransmit_to: node that most recently requested a retransmit
  84. *
  85. * Handles sequence numbering, fragmentation, bundling, etc.
  86. */
  87. struct bclink {
  88. struct link link;
  89. struct tipc_node node;
  90. struct tipc_node_map bcast_nodes;
  91. struct tipc_node *retransmit_to;
  92. };
  93. static struct bcbearer bcast_bearer;
  94. static struct bclink bcast_link;
  95. static struct bcbearer *bcbearer = &bcast_bearer;
  96. static struct bclink *bclink = &bcast_link;
  97. static struct link *bcl = &bcast_link.link;
  98. static DEFINE_SPINLOCK(bc_lock);
  99. const char tipc_bclink_name[] = "broadcast-link";
  100. static void tipc_nmap_diff(struct tipc_node_map *nm_a,
  101. struct tipc_node_map *nm_b,
  102. struct tipc_node_map *nm_diff);
  103. static u32 buf_seqno(struct sk_buff *buf)
  104. {
  105. return msg_seqno(buf_msg(buf));
  106. }
  107. static u32 bcbuf_acks(struct sk_buff *buf)
  108. {
  109. return (u32)(unsigned long)TIPC_SKB_CB(buf)->handle;
  110. }
  111. static void bcbuf_set_acks(struct sk_buff *buf, u32 acks)
  112. {
  113. TIPC_SKB_CB(buf)->handle = (void *)(unsigned long)acks;
  114. }
  115. static void bcbuf_decr_acks(struct sk_buff *buf)
  116. {
  117. bcbuf_set_acks(buf, bcbuf_acks(buf) - 1);
  118. }
  119. void tipc_bclink_add_node(u32 addr)
  120. {
  121. spin_lock_bh(&bc_lock);
  122. tipc_nmap_add(&bclink->bcast_nodes, addr);
  123. spin_unlock_bh(&bc_lock);
  124. }
  125. void tipc_bclink_remove_node(u32 addr)
  126. {
  127. spin_lock_bh(&bc_lock);
  128. tipc_nmap_remove(&bclink->bcast_nodes, addr);
  129. spin_unlock_bh(&bc_lock);
  130. }
  131. static void bclink_set_last_sent(void)
  132. {
  133. if (bcl->next_out)
  134. bcl->fsm_msg_cnt = mod(buf_seqno(bcl->next_out) - 1);
  135. else
  136. bcl->fsm_msg_cnt = mod(bcl->next_out_no - 1);
  137. }
  138. u32 tipc_bclink_get_last_sent(void)
  139. {
  140. return bcl->fsm_msg_cnt;
  141. }
  142. /**
  143. * bclink_set_gap - set gap according to contents of current deferred pkt queue
  144. *
  145. * Called with 'node' locked, bc_lock unlocked
  146. */
  147. static void bclink_set_gap(struct tipc_node *n_ptr)
  148. {
  149. struct sk_buff *buf = n_ptr->bclink.deferred_head;
  150. n_ptr->bclink.gap_after = n_ptr->bclink.gap_to =
  151. mod(n_ptr->bclink.last_in);
  152. if (unlikely(buf != NULL))
  153. n_ptr->bclink.gap_to = mod(buf_seqno(buf) - 1);
  154. }
  155. /**
  156. * bclink_ack_allowed - test if ACK or NACK message can be sent at this moment
  157. *
  158. * This mechanism endeavours to prevent all nodes in network from trying
  159. * to ACK or NACK at the same time.
  160. *
  161. * Note: TIPC uses a different trigger to distribute ACKs than it does to
  162. * distribute NACKs, but tries to use the same spacing (divide by 16).
  163. */
  164. static int bclink_ack_allowed(u32 n)
  165. {
  166. return (n % TIPC_MIN_LINK_WIN) == tipc_own_tag;
  167. }
  168. /**
  169. * tipc_bclink_retransmit_to - get most recent node to request retransmission
  170. *
  171. * Called with bc_lock locked
  172. */
  173. struct tipc_node *tipc_bclink_retransmit_to(void)
  174. {
  175. return bclink->retransmit_to;
  176. }
  177. /**
  178. * bclink_retransmit_pkt - retransmit broadcast packets
  179. * @after: sequence number of last packet to *not* retransmit
  180. * @to: sequence number of last packet to retransmit
  181. *
  182. * Called with bc_lock locked
  183. */
  184. static void bclink_retransmit_pkt(u32 after, u32 to)
  185. {
  186. struct sk_buff *buf;
  187. buf = bcl->first_out;
  188. while (buf && less_eq(buf_seqno(buf), after))
  189. buf = buf->next;
  190. tipc_link_retransmit(bcl, buf, mod(to - after));
  191. }
  192. /**
  193. * tipc_bclink_acknowledge - handle acknowledgement of broadcast packets
  194. * @n_ptr: node that sent acknowledgement info
  195. * @acked: broadcast sequence # that has been acknowledged
  196. *
  197. * Node is locked, bc_lock unlocked.
  198. */
  199. void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked)
  200. {
  201. struct sk_buff *crs;
  202. struct sk_buff *next;
  203. unsigned int released = 0;
  204. if (less_eq(acked, n_ptr->bclink.acked))
  205. return;
  206. spin_lock_bh(&bc_lock);
  207. /* Skip over packets that node has previously acknowledged */
  208. crs = bcl->first_out;
  209. while (crs && less_eq(buf_seqno(crs), n_ptr->bclink.acked))
  210. crs = crs->next;
  211. /* Update packets that node is now acknowledging */
  212. while (crs && less_eq(buf_seqno(crs), acked)) {
  213. next = crs->next;
  214. bcbuf_decr_acks(crs);
  215. if (bcbuf_acks(crs) == 0) {
  216. bcl->first_out = next;
  217. bcl->out_queue_size--;
  218. buf_discard(crs);
  219. released = 1;
  220. }
  221. crs = next;
  222. }
  223. n_ptr->bclink.acked = acked;
  224. /* Try resolving broadcast link congestion, if necessary */
  225. if (unlikely(bcl->next_out)) {
  226. tipc_link_push_queue(bcl);
  227. bclink_set_last_sent();
  228. }
  229. if (unlikely(released && !list_empty(&bcl->waiting_ports)))
  230. tipc_link_wakeup_ports(bcl, 0);
  231. spin_unlock_bh(&bc_lock);
  232. }
  233. /**
  234. * bclink_send_ack - unicast an ACK msg
  235. *
  236. * tipc_net_lock and node lock set
  237. */
  238. static void bclink_send_ack(struct tipc_node *n_ptr)
  239. {
  240. struct link *l_ptr = n_ptr->active_links[n_ptr->addr & 1];
  241. if (l_ptr != NULL)
  242. tipc_link_send_proto_msg(l_ptr, STATE_MSG, 0, 0, 0, 0, 0);
  243. }
  244. /**
  245. * bclink_send_nack- broadcast a NACK msg
  246. *
  247. * tipc_net_lock and node lock set
  248. */
  249. static void bclink_send_nack(struct tipc_node *n_ptr)
  250. {
  251. struct sk_buff *buf;
  252. struct tipc_msg *msg;
  253. if (!less(n_ptr->bclink.gap_after, n_ptr->bclink.gap_to))
  254. return;
  255. buf = tipc_buf_acquire(INT_H_SIZE);
  256. if (buf) {
  257. msg = buf_msg(buf);
  258. tipc_msg_init(msg, BCAST_PROTOCOL, STATE_MSG,
  259. INT_H_SIZE, n_ptr->addr);
  260. msg_set_non_seq(msg, 1);
  261. msg_set_mc_netid(msg, tipc_net_id);
  262. msg_set_bcast_ack(msg, mod(n_ptr->bclink.last_in));
  263. msg_set_bcgap_after(msg, n_ptr->bclink.gap_after);
  264. msg_set_bcgap_to(msg, n_ptr->bclink.gap_to);
  265. msg_set_bcast_tag(msg, tipc_own_tag);
  266. tipc_bearer_send(&bcbearer->bearer, buf, NULL);
  267. bcl->stats.sent_nacks++;
  268. buf_discard(buf);
  269. /*
  270. * Ensure we doesn't send another NACK msg to the node
  271. * until 16 more deferred messages arrive from it
  272. * (i.e. helps prevent all nodes from NACK'ing at same time)
  273. */
  274. n_ptr->bclink.nack_sync = tipc_own_tag;
  275. }
  276. }
  277. /**
  278. * tipc_bclink_check_gap - send a NACK if a sequence gap exists
  279. *
  280. * tipc_net_lock and node lock set
  281. */
  282. void tipc_bclink_check_gap(struct tipc_node *n_ptr, u32 last_sent)
  283. {
  284. if (!n_ptr->bclink.supported ||
  285. less_eq(last_sent, mod(n_ptr->bclink.last_in)))
  286. return;
  287. bclink_set_gap(n_ptr);
  288. if (n_ptr->bclink.gap_after == n_ptr->bclink.gap_to)
  289. n_ptr->bclink.gap_to = last_sent;
  290. bclink_send_nack(n_ptr);
  291. }
  292. /**
  293. * tipc_bclink_peek_nack - process a NACK msg meant for another node
  294. *
  295. * Only tipc_net_lock set.
  296. */
  297. static void tipc_bclink_peek_nack(u32 dest, u32 sender_tag, u32 gap_after, u32 gap_to)
  298. {
  299. struct tipc_node *n_ptr = tipc_node_find(dest);
  300. u32 my_after, my_to;
  301. if (unlikely(!n_ptr || !tipc_node_is_up(n_ptr)))
  302. return;
  303. tipc_node_lock(n_ptr);
  304. /*
  305. * Modify gap to suppress unnecessary NACKs from this node
  306. */
  307. my_after = n_ptr->bclink.gap_after;
  308. my_to = n_ptr->bclink.gap_to;
  309. if (less_eq(gap_after, my_after)) {
  310. if (less(my_after, gap_to) && less(gap_to, my_to))
  311. n_ptr->bclink.gap_after = gap_to;
  312. else if (less_eq(my_to, gap_to))
  313. n_ptr->bclink.gap_to = n_ptr->bclink.gap_after;
  314. } else if (less_eq(gap_after, my_to)) {
  315. if (less_eq(my_to, gap_to))
  316. n_ptr->bclink.gap_to = gap_after;
  317. } else {
  318. /*
  319. * Expand gap if missing bufs not in deferred queue:
  320. */
  321. struct sk_buff *buf = n_ptr->bclink.deferred_head;
  322. u32 prev = n_ptr->bclink.gap_to;
  323. for (; buf; buf = buf->next) {
  324. u32 seqno = buf_seqno(buf);
  325. if (mod(seqno - prev) != 1) {
  326. buf = NULL;
  327. break;
  328. }
  329. if (seqno == gap_after)
  330. break;
  331. prev = seqno;
  332. }
  333. if (buf == NULL)
  334. n_ptr->bclink.gap_to = gap_after;
  335. }
  336. /*
  337. * Some nodes may send a complementary NACK now:
  338. */
  339. if (bclink_ack_allowed(sender_tag + 1)) {
  340. if (n_ptr->bclink.gap_to != n_ptr->bclink.gap_after) {
  341. bclink_send_nack(n_ptr);
  342. bclink_set_gap(n_ptr);
  343. }
  344. }
  345. tipc_node_unlock(n_ptr);
  346. }
  347. /**
  348. * tipc_bclink_send_msg - broadcast a packet to all nodes in cluster
  349. */
  350. int tipc_bclink_send_msg(struct sk_buff *buf)
  351. {
  352. int res;
  353. spin_lock_bh(&bc_lock);
  354. res = tipc_link_send_buf(bcl, buf);
  355. if (likely(res > 0))
  356. bclink_set_last_sent();
  357. bcl->stats.queue_sz_counts++;
  358. bcl->stats.accu_queue_sz += bcl->out_queue_size;
  359. spin_unlock_bh(&bc_lock);
  360. return res;
  361. }
  362. /**
  363. * tipc_bclink_recv_pkt - receive a broadcast packet, and deliver upwards
  364. *
  365. * tipc_net_lock is read_locked, no other locks set
  366. */
  367. void tipc_bclink_recv_pkt(struct sk_buff *buf)
  368. {
  369. struct tipc_msg *msg = buf_msg(buf);
  370. struct tipc_node *node;
  371. u32 next_in;
  372. u32 seqno;
  373. struct sk_buff *deferred;
  374. /* Screen out unwanted broadcast messages */
  375. if (msg_mc_netid(msg) != tipc_net_id)
  376. goto exit;
  377. node = tipc_node_find(msg_prevnode(msg));
  378. if (unlikely(!node))
  379. goto exit;
  380. tipc_node_lock(node);
  381. if (unlikely(!node->bclink.supported))
  382. goto unlock;
  383. if (unlikely(msg_user(msg) == BCAST_PROTOCOL)) {
  384. if (msg_type(msg) != STATE_MSG)
  385. goto unlock;
  386. if (msg_destnode(msg) == tipc_own_addr) {
  387. tipc_bclink_acknowledge(node, msg_bcast_ack(msg));
  388. tipc_node_unlock(node);
  389. spin_lock_bh(&bc_lock);
  390. bcl->stats.recv_nacks++;
  391. bclink->retransmit_to = node;
  392. bclink_retransmit_pkt(msg_bcgap_after(msg),
  393. msg_bcgap_to(msg));
  394. spin_unlock_bh(&bc_lock);
  395. } else {
  396. tipc_node_unlock(node);
  397. tipc_bclink_peek_nack(msg_destnode(msg),
  398. msg_bcast_tag(msg),
  399. msg_bcgap_after(msg),
  400. msg_bcgap_to(msg));
  401. }
  402. goto exit;
  403. }
  404. /* Handle in-sequence broadcast message */
  405. receive:
  406. next_in = mod(node->bclink.last_in + 1);
  407. seqno = msg_seqno(msg);
  408. if (likely(seqno == next_in)) {
  409. bcl->stats.recv_info++;
  410. node->bclink.last_in++;
  411. bclink_set_gap(node);
  412. if (unlikely(bclink_ack_allowed(seqno))) {
  413. bclink_send_ack(node);
  414. bcl->stats.sent_acks++;
  415. }
  416. if (likely(msg_isdata(msg))) {
  417. tipc_node_unlock(node);
  418. if (likely(msg_mcast(msg)))
  419. tipc_port_recv_mcast(buf, NULL);
  420. else
  421. buf_discard(buf);
  422. } else if (msg_user(msg) == MSG_BUNDLER) {
  423. bcl->stats.recv_bundles++;
  424. bcl->stats.recv_bundled += msg_msgcnt(msg);
  425. tipc_node_unlock(node);
  426. tipc_link_recv_bundle(buf);
  427. } else if (msg_user(msg) == MSG_FRAGMENTER) {
  428. bcl->stats.recv_fragments++;
  429. if (tipc_link_recv_fragment(&node->bclink.defragm,
  430. &buf, &msg))
  431. bcl->stats.recv_fragmented++;
  432. tipc_node_unlock(node);
  433. tipc_net_route_msg(buf);
  434. } else if (msg_user(msg) == NAME_DISTRIBUTOR) {
  435. tipc_node_unlock(node);
  436. tipc_named_recv(buf);
  437. } else {
  438. tipc_node_unlock(node);
  439. buf_discard(buf);
  440. }
  441. buf = NULL;
  442. tipc_node_lock(node);
  443. deferred = node->bclink.deferred_head;
  444. if (deferred && (buf_seqno(deferred) == mod(next_in + 1))) {
  445. buf = deferred;
  446. msg = buf_msg(buf);
  447. node->bclink.deferred_head = deferred->next;
  448. goto receive;
  449. }
  450. } else if (less(next_in, seqno)) {
  451. u32 gap_after = node->bclink.gap_after;
  452. u32 gap_to = node->bclink.gap_to;
  453. if (tipc_link_defer_pkt(&node->bclink.deferred_head,
  454. &node->bclink.deferred_tail,
  455. buf)) {
  456. node->bclink.nack_sync++;
  457. bcl->stats.deferred_recv++;
  458. if (seqno == mod(gap_after + 1))
  459. node->bclink.gap_after = seqno;
  460. else if (less(gap_after, seqno) && less(seqno, gap_to))
  461. node->bclink.gap_to = seqno;
  462. }
  463. buf = NULL;
  464. if (bclink_ack_allowed(node->bclink.nack_sync)) {
  465. if (gap_to != gap_after)
  466. bclink_send_nack(node);
  467. bclink_set_gap(node);
  468. }
  469. } else {
  470. bcl->stats.duplicates++;
  471. }
  472. unlock:
  473. tipc_node_unlock(node);
  474. exit:
  475. buf_discard(buf);
  476. }
  477. u32 tipc_bclink_acks_missing(struct tipc_node *n_ptr)
  478. {
  479. return (n_ptr->bclink.supported &&
  480. (tipc_bclink_get_last_sent() != n_ptr->bclink.acked));
  481. }
  482. /**
  483. * tipc_bcbearer_send - send a packet through the broadcast pseudo-bearer
  484. *
  485. * Send packet over as many bearers as necessary to reach all nodes
  486. * that have joined the broadcast link.
  487. *
  488. * Returns 0 (packet sent successfully) under all circumstances,
  489. * since the broadcast link's pseudo-bearer never blocks
  490. */
  491. static int tipc_bcbearer_send(struct sk_buff *buf,
  492. struct tipc_bearer *unused1,
  493. struct tipc_media_addr *unused2)
  494. {
  495. int bp_index;
  496. /*
  497. * Prepare broadcast link message for reliable transmission,
  498. * if first time trying to send it;
  499. * preparation is skipped for broadcast link protocol messages
  500. * since they are sent in an unreliable manner and don't need it
  501. */
  502. if (likely(!msg_non_seq(buf_msg(buf)))) {
  503. struct tipc_msg *msg;
  504. bcbuf_set_acks(buf, bclink->bcast_nodes.count);
  505. msg = buf_msg(buf);
  506. msg_set_non_seq(msg, 1);
  507. msg_set_mc_netid(msg, tipc_net_id);
  508. bcl->stats.sent_info++;
  509. if (WARN_ON(!bclink->bcast_nodes.count)) {
  510. dump_stack();
  511. return 0;
  512. }
  513. }
  514. /* Send buffer over bearers until all targets reached */
  515. bcbearer->remains = bclink->bcast_nodes;
  516. for (bp_index = 0; bp_index < MAX_BEARERS; bp_index++) {
  517. struct tipc_bearer *p = bcbearer->bpairs[bp_index].primary;
  518. struct tipc_bearer *s = bcbearer->bpairs[bp_index].secondary;
  519. if (!p)
  520. break; /* no more bearers to try */
  521. tipc_nmap_diff(&bcbearer->remains, &p->nodes, &bcbearer->remains_new);
  522. if (bcbearer->remains_new.count == bcbearer->remains.count)
  523. continue; /* bearer pair doesn't add anything */
  524. if (p->blocked ||
  525. p->media->send_msg(buf, p, &p->media->bcast_addr)) {
  526. /* unable to send on primary bearer */
  527. if (!s || s->blocked ||
  528. s->media->send_msg(buf, s,
  529. &s->media->bcast_addr)) {
  530. /* unable to send on either bearer */
  531. continue;
  532. }
  533. }
  534. if (s) {
  535. bcbearer->bpairs[bp_index].primary = s;
  536. bcbearer->bpairs[bp_index].secondary = p;
  537. }
  538. if (bcbearer->remains_new.count == 0)
  539. break; /* all targets reached */
  540. bcbearer->remains = bcbearer->remains_new;
  541. }
  542. return 0;
  543. }
  544. /**
  545. * tipc_bcbearer_sort - create sets of bearer pairs used by broadcast bearer
  546. */
  547. void tipc_bcbearer_sort(void)
  548. {
  549. struct bcbearer_pair *bp_temp = bcbearer->bpairs_temp;
  550. struct bcbearer_pair *bp_curr;
  551. int b_index;
  552. int pri;
  553. spin_lock_bh(&bc_lock);
  554. /* Group bearers by priority (can assume max of two per priority) */
  555. memset(bp_temp, 0, sizeof(bcbearer->bpairs_temp));
  556. for (b_index = 0; b_index < MAX_BEARERS; b_index++) {
  557. struct tipc_bearer *b = &tipc_bearers[b_index];
  558. if (!b->active || !b->nodes.count)
  559. continue;
  560. if (!bp_temp[b->priority].primary)
  561. bp_temp[b->priority].primary = b;
  562. else
  563. bp_temp[b->priority].secondary = b;
  564. }
  565. /* Create array of bearer pairs for broadcasting */
  566. bp_curr = bcbearer->bpairs;
  567. memset(bcbearer->bpairs, 0, sizeof(bcbearer->bpairs));
  568. for (pri = TIPC_MAX_LINK_PRI; pri >= 0; pri--) {
  569. if (!bp_temp[pri].primary)
  570. continue;
  571. bp_curr->primary = bp_temp[pri].primary;
  572. if (bp_temp[pri].secondary) {
  573. if (tipc_nmap_equal(&bp_temp[pri].primary->nodes,
  574. &bp_temp[pri].secondary->nodes)) {
  575. bp_curr->secondary = bp_temp[pri].secondary;
  576. } else {
  577. bp_curr++;
  578. bp_curr->primary = bp_temp[pri].secondary;
  579. }
  580. }
  581. bp_curr++;
  582. }
  583. spin_unlock_bh(&bc_lock);
  584. }
  585. int tipc_bclink_stats(char *buf, const u32 buf_size)
  586. {
  587. struct print_buf pb;
  588. if (!bcl)
  589. return 0;
  590. tipc_printbuf_init(&pb, buf, buf_size);
  591. spin_lock_bh(&bc_lock);
  592. tipc_printf(&pb, "Link <%s>\n"
  593. " Window:%u packets\n",
  594. bcl->name, bcl->queue_limit[0]);
  595. tipc_printf(&pb, " RX packets:%u fragments:%u/%u bundles:%u/%u\n",
  596. bcl->stats.recv_info,
  597. bcl->stats.recv_fragments,
  598. bcl->stats.recv_fragmented,
  599. bcl->stats.recv_bundles,
  600. bcl->stats.recv_bundled);
  601. tipc_printf(&pb, " TX packets:%u fragments:%u/%u bundles:%u/%u\n",
  602. bcl->stats.sent_info,
  603. bcl->stats.sent_fragments,
  604. bcl->stats.sent_fragmented,
  605. bcl->stats.sent_bundles,
  606. bcl->stats.sent_bundled);
  607. tipc_printf(&pb, " RX naks:%u defs:%u dups:%u\n",
  608. bcl->stats.recv_nacks,
  609. bcl->stats.deferred_recv,
  610. bcl->stats.duplicates);
  611. tipc_printf(&pb, " TX naks:%u acks:%u dups:%u\n",
  612. bcl->stats.sent_nacks,
  613. bcl->stats.sent_acks,
  614. bcl->stats.retransmitted);
  615. tipc_printf(&pb, " Congestion bearer:%u link:%u Send queue max:%u avg:%u\n",
  616. bcl->stats.bearer_congs,
  617. bcl->stats.link_congs,
  618. bcl->stats.max_queue_sz,
  619. bcl->stats.queue_sz_counts
  620. ? (bcl->stats.accu_queue_sz / bcl->stats.queue_sz_counts)
  621. : 0);
  622. spin_unlock_bh(&bc_lock);
  623. return tipc_printbuf_validate(&pb);
  624. }
  625. int tipc_bclink_reset_stats(void)
  626. {
  627. if (!bcl)
  628. return -ENOPROTOOPT;
  629. spin_lock_bh(&bc_lock);
  630. memset(&bcl->stats, 0, sizeof(bcl->stats));
  631. spin_unlock_bh(&bc_lock);
  632. return 0;
  633. }
  634. int tipc_bclink_set_queue_limits(u32 limit)
  635. {
  636. if (!bcl)
  637. return -ENOPROTOOPT;
  638. if ((limit < TIPC_MIN_LINK_WIN) || (limit > TIPC_MAX_LINK_WIN))
  639. return -EINVAL;
  640. spin_lock_bh(&bc_lock);
  641. tipc_link_set_queue_limits(bcl, limit);
  642. spin_unlock_bh(&bc_lock);
  643. return 0;
  644. }
  645. void tipc_bclink_init(void)
  646. {
  647. INIT_LIST_HEAD(&bcbearer->bearer.cong_links);
  648. bcbearer->bearer.media = &bcbearer->media;
  649. bcbearer->media.send_msg = tipc_bcbearer_send;
  650. sprintf(bcbearer->media.name, "tipc-broadcast");
  651. INIT_LIST_HEAD(&bcl->waiting_ports);
  652. bcl->next_out_no = 1;
  653. spin_lock_init(&bclink->node.lock);
  654. bcl->owner = &bclink->node;
  655. bcl->max_pkt = MAX_PKT_DEFAULT_MCAST;
  656. tipc_link_set_queue_limits(bcl, BCLINK_WIN_DEFAULT);
  657. bcl->b_ptr = &bcbearer->bearer;
  658. bcl->state = WORKING_WORKING;
  659. strlcpy(bcl->name, tipc_bclink_name, TIPC_MAX_LINK_NAME);
  660. }
  661. void tipc_bclink_stop(void)
  662. {
  663. spin_lock_bh(&bc_lock);
  664. tipc_link_stop(bcl);
  665. spin_unlock_bh(&bc_lock);
  666. memset(bclink, 0, sizeof(*bclink));
  667. memset(bcbearer, 0, sizeof(*bcbearer));
  668. }
  669. /**
  670. * tipc_nmap_add - add a node to a node map
  671. */
  672. void tipc_nmap_add(struct tipc_node_map *nm_ptr, u32 node)
  673. {
  674. int n = tipc_node(node);
  675. int w = n / WSIZE;
  676. u32 mask = (1 << (n % WSIZE));
  677. if ((nm_ptr->map[w] & mask) == 0) {
  678. nm_ptr->count++;
  679. nm_ptr->map[w] |= mask;
  680. }
  681. }
  682. /**
  683. * tipc_nmap_remove - remove a node from a node map
  684. */
  685. void tipc_nmap_remove(struct tipc_node_map *nm_ptr, u32 node)
  686. {
  687. int n = tipc_node(node);
  688. int w = n / WSIZE;
  689. u32 mask = (1 << (n % WSIZE));
  690. if ((nm_ptr->map[w] & mask) != 0) {
  691. nm_ptr->map[w] &= ~mask;
  692. nm_ptr->count--;
  693. }
  694. }
  695. /**
  696. * tipc_nmap_diff - find differences between node maps
  697. * @nm_a: input node map A
  698. * @nm_b: input node map B
  699. * @nm_diff: output node map A-B (i.e. nodes of A that are not in B)
  700. */
  701. static void tipc_nmap_diff(struct tipc_node_map *nm_a,
  702. struct tipc_node_map *nm_b,
  703. struct tipc_node_map *nm_diff)
  704. {
  705. int stop = ARRAY_SIZE(nm_a->map);
  706. int w;
  707. int b;
  708. u32 map;
  709. memset(nm_diff, 0, sizeof(*nm_diff));
  710. for (w = 0; w < stop; w++) {
  711. map = nm_a->map[w] ^ (nm_a->map[w] & nm_b->map[w]);
  712. nm_diff->map[w] = map;
  713. if (map != 0) {
  714. for (b = 0 ; b < WSIZE; b++) {
  715. if (map & (1 << b))
  716. nm_diff->count++;
  717. }
  718. }
  719. }
  720. }
  721. /**
  722. * tipc_port_list_add - add a port to a port list, ensuring no duplicates
  723. */
  724. void tipc_port_list_add(struct port_list *pl_ptr, u32 port)
  725. {
  726. struct port_list *item = pl_ptr;
  727. int i;
  728. int item_sz = PLSIZE;
  729. int cnt = pl_ptr->count;
  730. for (; ; cnt -= item_sz, item = item->next) {
  731. if (cnt < PLSIZE)
  732. item_sz = cnt;
  733. for (i = 0; i < item_sz; i++)
  734. if (item->ports[i] == port)
  735. return;
  736. if (i < PLSIZE) {
  737. item->ports[i] = port;
  738. pl_ptr->count++;
  739. return;
  740. }
  741. if (!item->next) {
  742. item->next = kmalloc(sizeof(*item), GFP_ATOMIC);
  743. if (!item->next) {
  744. warn("Incomplete multicast delivery, no memory\n");
  745. return;
  746. }
  747. item->next->next = NULL;
  748. }
  749. }
  750. }
  751. /**
  752. * tipc_port_list_free - free dynamically created entries in port_list chain
  753. *
  754. */
  755. void tipc_port_list_free(struct port_list *pl_ptr)
  756. {
  757. struct port_list *item;
  758. struct port_list *next;
  759. for (item = pl_ptr->next; item; item = next) {
  760. next = item->next;
  761. kfree(item);
  762. }
  763. }