tipc: remove redundant #includes
[linux-3.10.git] / net / tipc / bcast.c
1 /*
2  * net/tipc/bcast.c: TIPC broadcast code
3  *
4  * Copyright (c) 2004-2006, Ericsson AB
5  * Copyright (c) 2004, Intel Corporation.
6  * Copyright (c) 2005, Wind River Systems
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions are met:
11  *
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. Neither the names of the copyright holders nor the names of its
18  *    contributors may be used to endorse or promote products derived from
19  *    this software without specific prior written permission.
20  *
21  * Alternatively, this software may be distributed under the terms of the
22  * GNU General Public License ("GPL") version 2 as published by the Free
23  * Software Foundation.
24  *
25  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
26  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
29  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35  * POSSIBILITY OF SUCH DAMAGE.
36  */
37
38 #include "core.h"
39 #include "link.h"
40 #include "port.h"
41 #include "bcast.h"
42
43 #define MAX_PKT_DEFAULT_MCAST 1500      /* bcast link max packet size (fixed) */
44
45 #define BCLINK_WIN_DEFAULT 20           /* bcast link window size (default) */
46
47 /*
48  * Loss rate for incoming broadcast frames; used to test retransmission code.
49  * Set to N to cause every N'th frame to be discarded; 0 => don't discard any.
50  */
51
52 #define TIPC_BCAST_LOSS_RATE 0
53
54 /**
55  * struct bcbearer_pair - a pair of bearers used by broadcast link
56  * @primary: pointer to primary bearer
57  * @secondary: pointer to secondary bearer
58  *
59  * Bearers must have same priority and same set of reachable destinations
60  * to be paired.
61  */
62
63 struct bcbearer_pair {
64         struct bearer *primary;
65         struct bearer *secondary;
66 };
67
68 /**
69  * struct bcbearer - bearer used by broadcast link
70  * @bearer: (non-standard) broadcast bearer structure
71  * @media: (non-standard) broadcast media structure
72  * @bpairs: array of bearer pairs
73  * @bpairs_temp: temporary array of bearer pairs used by tipc_bcbearer_sort()
74  * @remains: temporary node map used by tipc_bcbearer_send()
75  * @remains_new: temporary node map used tipc_bcbearer_send()
76  *
77  * Note: The fields labelled "temporary" are incorporated into the bearer
78  * to avoid consuming potentially limited stack space through the use of
79  * large local variables within multicast routines.  Concurrent access is
80  * prevented through use of the spinlock "bc_lock".
81  */
82
83 struct bcbearer {
84         struct bearer bearer;
85         struct media media;
86         struct bcbearer_pair bpairs[MAX_BEARERS];
87         struct bcbearer_pair bpairs_temp[TIPC_MAX_LINK_PRI + 1];
88         struct tipc_node_map remains;
89         struct tipc_node_map remains_new;
90 };
91
92 /**
93  * struct bclink - link used for broadcast messages
94  * @link: (non-standard) broadcast link structure
95  * @node: (non-standard) node structure representing b'cast link's peer node
96  *
97  * Handles sequence numbering, fragmentation, bundling, etc.
98  */
99
100 struct bclink {
101         struct link link;
102         struct tipc_node node;
103 };
104
105
106 static struct bcbearer *bcbearer = NULL;
107 static struct bclink *bclink = NULL;
108 static struct link *bcl = NULL;
109 static DEFINE_SPINLOCK(bc_lock);
110
111 /* broadcast-capable node map */
112 struct tipc_node_map tipc_bcast_nmap;
113
114 const char tipc_bclink_name[] = "broadcast-link";
115
116 static void tipc_nmap_diff(struct tipc_node_map *nm_a,
117                            struct tipc_node_map *nm_b,
118                            struct tipc_node_map *nm_diff);
119
120 static u32 buf_seqno(struct sk_buff *buf)
121 {
122         return msg_seqno(buf_msg(buf));
123 }
124
125 static u32 bcbuf_acks(struct sk_buff *buf)
126 {
127         return (u32)(unsigned long)TIPC_SKB_CB(buf)->handle;
128 }
129
130 static void bcbuf_set_acks(struct sk_buff *buf, u32 acks)
131 {
132         TIPC_SKB_CB(buf)->handle = (void *)(unsigned long)acks;
133 }
134
135 static void bcbuf_decr_acks(struct sk_buff *buf)
136 {
137         bcbuf_set_acks(buf, bcbuf_acks(buf) - 1);
138 }
139
140
141 static void bclink_set_last_sent(void)
142 {
143         if (bcl->next_out)
144                 bcl->fsm_msg_cnt = mod(buf_seqno(bcl->next_out) - 1);
145         else
146                 bcl->fsm_msg_cnt = mod(bcl->next_out_no - 1);
147 }
148
149 u32 tipc_bclink_get_last_sent(void)
150 {
151         return bcl->fsm_msg_cnt;
152 }
153
154 /**
155  * bclink_set_gap - set gap according to contents of current deferred pkt queue
156  *
157  * Called with 'node' locked, bc_lock unlocked
158  */
159
160 static void bclink_set_gap(struct tipc_node *n_ptr)
161 {
162         struct sk_buff *buf = n_ptr->bclink.deferred_head;
163
164         n_ptr->bclink.gap_after = n_ptr->bclink.gap_to =
165                 mod(n_ptr->bclink.last_in);
166         if (unlikely(buf != NULL))
167                 n_ptr->bclink.gap_to = mod(buf_seqno(buf) - 1);
168 }
169
170 /**
171  * bclink_ack_allowed - test if ACK or NACK message can be sent at this moment
172  *
173  * This mechanism endeavours to prevent all nodes in network from trying
174  * to ACK or NACK at the same time.
175  *
176  * Note: TIPC uses a different trigger to distribute ACKs than it does to
177  *       distribute NACKs, but tries to use the same spacing (divide by 16).
178  */
179
180 static int bclink_ack_allowed(u32 n)
181 {
182         return (n % TIPC_MIN_LINK_WIN) == tipc_own_tag;
183 }
184
185
186 /**
187  * bclink_retransmit_pkt - retransmit broadcast packets
188  * @after: sequence number of last packet to *not* retransmit
189  * @to: sequence number of last packet to retransmit
190  *
191  * Called with bc_lock locked
192  */
193
194 static void bclink_retransmit_pkt(u32 after, u32 to)
195 {
196         struct sk_buff *buf;
197
198         buf = bcl->first_out;
199         while (buf && less_eq(buf_seqno(buf), after)) {
200                 buf = buf->next;
201         }
202         tipc_link_retransmit(bcl, buf, mod(to - after));
203 }
204
205 /**
206  * tipc_bclink_acknowledge - handle acknowledgement of broadcast packets
207  * @n_ptr: node that sent acknowledgement info
208  * @acked: broadcast sequence # that has been acknowledged
209  *
210  * Node is locked, bc_lock unlocked.
211  */
212
213 void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked)
214 {
215         struct sk_buff *crs;
216         struct sk_buff *next;
217         unsigned int released = 0;
218
219         if (less_eq(acked, n_ptr->bclink.acked))
220                 return;
221
222         spin_lock_bh(&bc_lock);
223
224         /* Skip over packets that node has previously acknowledged */
225
226         crs = bcl->first_out;
227         while (crs && less_eq(buf_seqno(crs), n_ptr->bclink.acked)) {
228                 crs = crs->next;
229         }
230
231         /* Update packets that node is now acknowledging */
232
233         while (crs && less_eq(buf_seqno(crs), acked)) {
234                 next = crs->next;
235                 bcbuf_decr_acks(crs);
236                 if (bcbuf_acks(crs) == 0) {
237                         bcl->first_out = next;
238                         bcl->out_queue_size--;
239                         buf_discard(crs);
240                         released = 1;
241                 }
242                 crs = next;
243         }
244         n_ptr->bclink.acked = acked;
245
246         /* Try resolving broadcast link congestion, if necessary */
247
248         if (unlikely(bcl->next_out)) {
249                 tipc_link_push_queue(bcl);
250                 bclink_set_last_sent();
251         }
252         if (unlikely(released && !list_empty(&bcl->waiting_ports)))
253                 tipc_link_wakeup_ports(bcl, 0);
254         spin_unlock_bh(&bc_lock);
255 }
256
257 /**
258  * bclink_send_ack - unicast an ACK msg
259  *
260  * tipc_net_lock and node lock set
261  */
262
263 static void bclink_send_ack(struct tipc_node *n_ptr)
264 {
265         struct link *l_ptr = n_ptr->active_links[n_ptr->addr & 1];
266
267         if (l_ptr != NULL)
268                 tipc_link_send_proto_msg(l_ptr, STATE_MSG, 0, 0, 0, 0, 0);
269 }
270
271 /**
272  * bclink_send_nack- broadcast a NACK msg
273  *
274  * tipc_net_lock and node lock set
275  */
276
277 static void bclink_send_nack(struct tipc_node *n_ptr)
278 {
279         struct sk_buff *buf;
280         struct tipc_msg *msg;
281
282         if (!less(n_ptr->bclink.gap_after, n_ptr->bclink.gap_to))
283                 return;
284
285         buf = tipc_buf_acquire(INT_H_SIZE);
286         if (buf) {
287                 msg = buf_msg(buf);
288                 tipc_msg_init(msg, BCAST_PROTOCOL, STATE_MSG,
289                          INT_H_SIZE, n_ptr->addr);
290                 msg_set_mc_netid(msg, tipc_net_id);
291                 msg_set_bcast_ack(msg, mod(n_ptr->bclink.last_in));
292                 msg_set_bcgap_after(msg, n_ptr->bclink.gap_after);
293                 msg_set_bcgap_to(msg, n_ptr->bclink.gap_to);
294                 msg_set_bcast_tag(msg, tipc_own_tag);
295
296                 if (tipc_bearer_send(&bcbearer->bearer, buf, NULL)) {
297                         bcl->stats.sent_nacks++;
298                         buf_discard(buf);
299                 } else {
300                         tipc_bearer_schedule(bcl->b_ptr, bcl);
301                         bcl->proto_msg_queue = buf;
302                         bcl->stats.bearer_congs++;
303                 }
304
305                 /*
306                  * Ensure we doesn't send another NACK msg to the node
307                  * until 16 more deferred messages arrive from it
308                  * (i.e. helps prevent all nodes from NACK'ing at same time)
309                  */
310
311                 n_ptr->bclink.nack_sync = tipc_own_tag;
312         }
313 }
314
315 /**
316  * tipc_bclink_check_gap - send a NACK if a sequence gap exists
317  *
318  * tipc_net_lock and node lock set
319  */
320
321 void tipc_bclink_check_gap(struct tipc_node *n_ptr, u32 last_sent)
322 {
323         if (!n_ptr->bclink.supported ||
324             less_eq(last_sent, mod(n_ptr->bclink.last_in)))
325                 return;
326
327         bclink_set_gap(n_ptr);
328         if (n_ptr->bclink.gap_after == n_ptr->bclink.gap_to)
329                 n_ptr->bclink.gap_to = last_sent;
330         bclink_send_nack(n_ptr);
331 }
332
333 /**
334  * tipc_bclink_peek_nack - process a NACK msg meant for another node
335  *
336  * Only tipc_net_lock set.
337  */
338
339 static void tipc_bclink_peek_nack(u32 dest, u32 sender_tag, u32 gap_after, u32 gap_to)
340 {
341         struct tipc_node *n_ptr = tipc_node_find(dest);
342         u32 my_after, my_to;
343
344         if (unlikely(!n_ptr || !tipc_node_is_up(n_ptr)))
345                 return;
346         tipc_node_lock(n_ptr);
347         /*
348          * Modify gap to suppress unnecessary NACKs from this node
349          */
350         my_after = n_ptr->bclink.gap_after;
351         my_to = n_ptr->bclink.gap_to;
352
353         if (less_eq(gap_after, my_after)) {
354                 if (less(my_after, gap_to) && less(gap_to, my_to))
355                         n_ptr->bclink.gap_after = gap_to;
356                 else if (less_eq(my_to, gap_to))
357                         n_ptr->bclink.gap_to = n_ptr->bclink.gap_after;
358         } else if (less_eq(gap_after, my_to)) {
359                 if (less_eq(my_to, gap_to))
360                         n_ptr->bclink.gap_to = gap_after;
361         } else {
362                 /*
363                  * Expand gap if missing bufs not in deferred queue:
364                  */
365                 struct sk_buff *buf = n_ptr->bclink.deferred_head;
366                 u32 prev = n_ptr->bclink.gap_to;
367
368                 for (; buf; buf = buf->next) {
369                         u32 seqno = buf_seqno(buf);
370
371                         if (mod(seqno - prev) != 1) {
372                                 buf = NULL;
373                                 break;
374                         }
375                         if (seqno == gap_after)
376                                 break;
377                         prev = seqno;
378                 }
379                 if (buf == NULL)
380                         n_ptr->bclink.gap_to = gap_after;
381         }
382         /*
383          * Some nodes may send a complementary NACK now:
384          */
385         if (bclink_ack_allowed(sender_tag + 1)) {
386                 if (n_ptr->bclink.gap_to != n_ptr->bclink.gap_after) {
387                         bclink_send_nack(n_ptr);
388                         bclink_set_gap(n_ptr);
389                 }
390         }
391         tipc_node_unlock(n_ptr);
392 }
393
394 /**
395  * tipc_bclink_send_msg - broadcast a packet to all nodes in cluster
396  */
397
398 int tipc_bclink_send_msg(struct sk_buff *buf)
399 {
400         int res;
401
402         spin_lock_bh(&bc_lock);
403
404         res = tipc_link_send_buf(bcl, buf);
405         if (unlikely(res == -ELINKCONG))
406                 buf_discard(buf);
407         else
408                 bclink_set_last_sent();
409
410         if (bcl->out_queue_size > bcl->stats.max_queue_sz)
411                 bcl->stats.max_queue_sz = bcl->out_queue_size;
412         bcl->stats.queue_sz_counts++;
413         bcl->stats.accu_queue_sz += bcl->out_queue_size;
414
415         spin_unlock_bh(&bc_lock);
416         return res;
417 }
418
419 /**
420  * tipc_bclink_recv_pkt - receive a broadcast packet, and deliver upwards
421  *
422  * tipc_net_lock is read_locked, no other locks set
423  */
424
425 void tipc_bclink_recv_pkt(struct sk_buff *buf)
426 {
427 #if (TIPC_BCAST_LOSS_RATE)
428         static int rx_count = 0;
429 #endif
430         struct tipc_msg *msg = buf_msg(buf);
431         struct tipc_node* node = tipc_node_find(msg_prevnode(msg));
432         u32 next_in;
433         u32 seqno;
434         struct sk_buff *deferred;
435
436         if (unlikely(!node || !tipc_node_is_up(node) || !node->bclink.supported ||
437                      (msg_mc_netid(msg) != tipc_net_id))) {
438                 buf_discard(buf);
439                 return;
440         }
441
442         if (unlikely(msg_user(msg) == BCAST_PROTOCOL)) {
443                 if (msg_destnode(msg) == tipc_own_addr) {
444                         tipc_node_lock(node);
445                         tipc_bclink_acknowledge(node, msg_bcast_ack(msg));
446                         tipc_node_unlock(node);
447                         spin_lock_bh(&bc_lock);
448                         bcl->stats.recv_nacks++;
449                         bcl->owner->next = node;   /* remember requestor */
450                         bclink_retransmit_pkt(msg_bcgap_after(msg),
451                                               msg_bcgap_to(msg));
452                         bcl->owner->next = NULL;
453                         spin_unlock_bh(&bc_lock);
454                 } else {
455                         tipc_bclink_peek_nack(msg_destnode(msg),
456                                               msg_bcast_tag(msg),
457                                               msg_bcgap_after(msg),
458                                               msg_bcgap_to(msg));
459                 }
460                 buf_discard(buf);
461                 return;
462         }
463
464 #if (TIPC_BCAST_LOSS_RATE)
465         if (++rx_count == TIPC_BCAST_LOSS_RATE) {
466                 rx_count = 0;
467                 buf_discard(buf);
468                 return;
469         }
470 #endif
471
472         tipc_node_lock(node);
473 receive:
474         deferred = node->bclink.deferred_head;
475         next_in = mod(node->bclink.last_in + 1);
476         seqno = msg_seqno(msg);
477
478         if (likely(seqno == next_in)) {
479                 bcl->stats.recv_info++;
480                 node->bclink.last_in++;
481                 bclink_set_gap(node);
482                 if (unlikely(bclink_ack_allowed(seqno))) {
483                         bclink_send_ack(node);
484                         bcl->stats.sent_acks++;
485                 }
486                 if (likely(msg_isdata(msg))) {
487                         tipc_node_unlock(node);
488                         tipc_port_recv_mcast(buf, NULL);
489                 } else if (msg_user(msg) == MSG_BUNDLER) {
490                         bcl->stats.recv_bundles++;
491                         bcl->stats.recv_bundled += msg_msgcnt(msg);
492                         tipc_node_unlock(node);
493                         tipc_link_recv_bundle(buf);
494                 } else if (msg_user(msg) == MSG_FRAGMENTER) {
495                         bcl->stats.recv_fragments++;
496                         if (tipc_link_recv_fragment(&node->bclink.defragm,
497                                                     &buf, &msg))
498                                 bcl->stats.recv_fragmented++;
499                         tipc_node_unlock(node);
500                         tipc_net_route_msg(buf);
501                 } else {
502                         tipc_node_unlock(node);
503                         tipc_net_route_msg(buf);
504                 }
505                 if (deferred && (buf_seqno(deferred) == mod(next_in + 1))) {
506                         tipc_node_lock(node);
507                         buf = deferred;
508                         msg = buf_msg(buf);
509                         node->bclink.deferred_head = deferred->next;
510                         goto receive;
511                 }
512                 return;
513         } else if (less(next_in, seqno)) {
514                 u32 gap_after = node->bclink.gap_after;
515                 u32 gap_to = node->bclink.gap_to;
516
517                 if (tipc_link_defer_pkt(&node->bclink.deferred_head,
518                                         &node->bclink.deferred_tail,
519                                         buf)) {
520                         node->bclink.nack_sync++;
521                         bcl->stats.deferred_recv++;
522                         if (seqno == mod(gap_after + 1))
523                                 node->bclink.gap_after = seqno;
524                         else if (less(gap_after, seqno) && less(seqno, gap_to))
525                                 node->bclink.gap_to = seqno;
526                 }
527                 if (bclink_ack_allowed(node->bclink.nack_sync)) {
528                         if (gap_to != gap_after)
529                                 bclink_send_nack(node);
530                         bclink_set_gap(node);
531                 }
532         } else {
533                 bcl->stats.duplicates++;
534                 buf_discard(buf);
535         }
536         tipc_node_unlock(node);
537 }
538
539 u32 tipc_bclink_acks_missing(struct tipc_node *n_ptr)
540 {
541         return (n_ptr->bclink.supported &&
542                 (tipc_bclink_get_last_sent() != n_ptr->bclink.acked));
543 }
544
545
546 /**
547  * tipc_bcbearer_send - send a packet through the broadcast pseudo-bearer
548  *
549  * Send through as many bearers as necessary to reach all nodes
550  * that support TIPC multicasting.
551  *
552  * Returns 0 if packet sent successfully, non-zero if not
553  */
554
555 static int tipc_bcbearer_send(struct sk_buff *buf,
556                               struct tipc_bearer *unused1,
557                               struct tipc_media_addr *unused2)
558 {
559         int bp_index;
560
561         /* Prepare buffer for broadcasting (if first time trying to send it) */
562
563         if (likely(!msg_non_seq(buf_msg(buf)))) {
564                 struct tipc_msg *msg;
565
566                 assert(tipc_bcast_nmap.count != 0);
567                 bcbuf_set_acks(buf, tipc_bcast_nmap.count);
568                 msg = buf_msg(buf);
569                 msg_set_non_seq(msg, 1);
570                 msg_set_mc_netid(msg, tipc_net_id);
571                 bcl->stats.sent_info++;
572         }
573
574         /* Send buffer over bearers until all targets reached */
575
576         bcbearer->remains = tipc_bcast_nmap;
577
578         for (bp_index = 0; bp_index < MAX_BEARERS; bp_index++) {
579                 struct bearer *p = bcbearer->bpairs[bp_index].primary;
580                 struct bearer *s = bcbearer->bpairs[bp_index].secondary;
581
582                 if (!p)
583                         break;  /* no more bearers to try */
584
585                 tipc_nmap_diff(&bcbearer->remains, &p->nodes, &bcbearer->remains_new);
586                 if (bcbearer->remains_new.count == bcbearer->remains.count)
587                         continue;       /* bearer pair doesn't add anything */
588
589                 if (p->publ.blocked ||
590                     p->media->send_msg(buf, &p->publ, &p->media->bcast_addr)) {
591                         /* unable to send on primary bearer */
592                         if (!s || s->publ.blocked ||
593                             s->media->send_msg(buf, &s->publ,
594                                                &s->media->bcast_addr)) {
595                                 /* unable to send on either bearer */
596                                 continue;
597                         }
598                 }
599
600                 if (s) {
601                         bcbearer->bpairs[bp_index].primary = s;
602                         bcbearer->bpairs[bp_index].secondary = p;
603                 }
604
605                 if (bcbearer->remains_new.count == 0)
606                         return 0;
607
608                 bcbearer->remains = bcbearer->remains_new;
609         }
610
611         /*
612          * Unable to reach all targets (indicate success, since currently
613          * there isn't code in place to properly block & unblock the
614          * pseudo-bearer used by the broadcast link)
615          */
616
617         return TIPC_OK;
618 }
619
620 /**
621  * tipc_bcbearer_sort - create sets of bearer pairs used by broadcast bearer
622  */
623
624 void tipc_bcbearer_sort(void)
625 {
626         struct bcbearer_pair *bp_temp = bcbearer->bpairs_temp;
627         struct bcbearer_pair *bp_curr;
628         int b_index;
629         int pri;
630
631         spin_lock_bh(&bc_lock);
632
633         /* Group bearers by priority (can assume max of two per priority) */
634
635         memset(bp_temp, 0, sizeof(bcbearer->bpairs_temp));
636
637         for (b_index = 0; b_index < MAX_BEARERS; b_index++) {
638                 struct bearer *b = &tipc_bearers[b_index];
639
640                 if (!b->active || !b->nodes.count)
641                         continue;
642
643                 if (!bp_temp[b->priority].primary)
644                         bp_temp[b->priority].primary = b;
645                 else
646                         bp_temp[b->priority].secondary = b;
647         }
648
649         /* Create array of bearer pairs for broadcasting */
650
651         bp_curr = bcbearer->bpairs;
652         memset(bcbearer->bpairs, 0, sizeof(bcbearer->bpairs));
653
654         for (pri = TIPC_MAX_LINK_PRI; pri >= 0; pri--) {
655
656                 if (!bp_temp[pri].primary)
657                         continue;
658
659                 bp_curr->primary = bp_temp[pri].primary;
660
661                 if (bp_temp[pri].secondary) {
662                         if (tipc_nmap_equal(&bp_temp[pri].primary->nodes,
663                                             &bp_temp[pri].secondary->nodes)) {
664                                 bp_curr->secondary = bp_temp[pri].secondary;
665                         } else {
666                                 bp_curr++;
667                                 bp_curr->primary = bp_temp[pri].secondary;
668                         }
669                 }
670
671                 bp_curr++;
672         }
673
674         spin_unlock_bh(&bc_lock);
675 }
676
677 /**
678  * tipc_bcbearer_push - resolve bearer congestion
679  *
680  * Forces bclink to push out any unsent packets, until all packets are gone
681  * or congestion reoccurs.
682  * No locks set when function called
683  */
684
685 void tipc_bcbearer_push(void)
686 {
687         struct bearer *b_ptr;
688
689         spin_lock_bh(&bc_lock);
690         b_ptr = &bcbearer->bearer;
691         if (b_ptr->publ.blocked) {
692                 b_ptr->publ.blocked = 0;
693                 tipc_bearer_lock_push(b_ptr);
694         }
695         spin_unlock_bh(&bc_lock);
696 }
697
698
699 int tipc_bclink_stats(char *buf, const u32 buf_size)
700 {
701         struct print_buf pb;
702
703         if (!bcl)
704                 return 0;
705
706         tipc_printbuf_init(&pb, buf, buf_size);
707
708         spin_lock_bh(&bc_lock);
709
710         tipc_printf(&pb, "Link <%s>\n"
711                          "  Window:%u packets\n",
712                     bcl->name, bcl->queue_limit[0]);
713         tipc_printf(&pb, "  RX packets:%u fragments:%u/%u bundles:%u/%u\n",
714                     bcl->stats.recv_info,
715                     bcl->stats.recv_fragments,
716                     bcl->stats.recv_fragmented,
717                     bcl->stats.recv_bundles,
718                     bcl->stats.recv_bundled);
719         tipc_printf(&pb, "  TX packets:%u fragments:%u/%u bundles:%u/%u\n",
720                     bcl->stats.sent_info,
721                     bcl->stats.sent_fragments,
722                     bcl->stats.sent_fragmented,
723                     bcl->stats.sent_bundles,
724                     bcl->stats.sent_bundled);
725         tipc_printf(&pb, "  RX naks:%u defs:%u dups:%u\n",
726                     bcl->stats.recv_nacks,
727                     bcl->stats.deferred_recv,
728                     bcl->stats.duplicates);
729         tipc_printf(&pb, "  TX naks:%u acks:%u dups:%u\n",
730                     bcl->stats.sent_nacks,
731                     bcl->stats.sent_acks,
732                     bcl->stats.retransmitted);
733         tipc_printf(&pb, "  Congestion bearer:%u link:%u  Send queue max:%u avg:%u\n",
734                     bcl->stats.bearer_congs,
735                     bcl->stats.link_congs,
736                     bcl->stats.max_queue_sz,
737                     bcl->stats.queue_sz_counts
738                     ? (bcl->stats.accu_queue_sz / bcl->stats.queue_sz_counts)
739                     : 0);
740
741         spin_unlock_bh(&bc_lock);
742         return tipc_printbuf_validate(&pb);
743 }
744
745 int tipc_bclink_reset_stats(void)
746 {
747         if (!bcl)
748                 return -ENOPROTOOPT;
749
750         spin_lock_bh(&bc_lock);
751         memset(&bcl->stats, 0, sizeof(bcl->stats));
752         spin_unlock_bh(&bc_lock);
753         return 0;
754 }
755
756 int tipc_bclink_set_queue_limits(u32 limit)
757 {
758         if (!bcl)
759                 return -ENOPROTOOPT;
760         if ((limit < TIPC_MIN_LINK_WIN) || (limit > TIPC_MAX_LINK_WIN))
761                 return -EINVAL;
762
763         spin_lock_bh(&bc_lock);
764         tipc_link_set_queue_limits(bcl, limit);
765         spin_unlock_bh(&bc_lock);
766         return 0;
767 }
768
769 int tipc_bclink_init(void)
770 {
771         bcbearer = kzalloc(sizeof(*bcbearer), GFP_ATOMIC);
772         bclink = kzalloc(sizeof(*bclink), GFP_ATOMIC);
773         if (!bcbearer || !bclink) {
774                 warn("Multicast link creation failed, no memory\n");
775                 kfree(bcbearer);
776                 bcbearer = NULL;
777                 kfree(bclink);
778                 bclink = NULL;
779                 return -ENOMEM;
780         }
781
782         INIT_LIST_HEAD(&bcbearer->bearer.cong_links);
783         bcbearer->bearer.media = &bcbearer->media;
784         bcbearer->media.send_msg = tipc_bcbearer_send;
785         sprintf(bcbearer->media.name, "tipc-multicast");
786
787         bcl = &bclink->link;
788         INIT_LIST_HEAD(&bcl->waiting_ports);
789         bcl->next_out_no = 1;
790         spin_lock_init(&bclink->node.lock);
791         bcl->owner = &bclink->node;
792         bcl->max_pkt = MAX_PKT_DEFAULT_MCAST;
793         tipc_link_set_queue_limits(bcl, BCLINK_WIN_DEFAULT);
794         bcl->b_ptr = &bcbearer->bearer;
795         bcl->state = WORKING_WORKING;
796         strlcpy(bcl->name, tipc_bclink_name, TIPC_MAX_LINK_NAME);
797
798         return 0;
799 }
800
801 void tipc_bclink_stop(void)
802 {
803         spin_lock_bh(&bc_lock);
804         if (bcbearer) {
805                 tipc_link_stop(bcl);
806                 bcl = NULL;
807                 kfree(bclink);
808                 bclink = NULL;
809                 kfree(bcbearer);
810                 bcbearer = NULL;
811         }
812         spin_unlock_bh(&bc_lock);
813 }
814
815
816 /**
817  * tipc_nmap_add - add a node to a node map
818  */
819
820 void tipc_nmap_add(struct tipc_node_map *nm_ptr, u32 node)
821 {
822         int n = tipc_node(node);
823         int w = n / WSIZE;
824         u32 mask = (1 << (n % WSIZE));
825
826         if ((nm_ptr->map[w] & mask) == 0) {
827                 nm_ptr->count++;
828                 nm_ptr->map[w] |= mask;
829         }
830 }
831
832 /**
833  * tipc_nmap_remove - remove a node from a node map
834  */
835
836 void tipc_nmap_remove(struct tipc_node_map *nm_ptr, u32 node)
837 {
838         int n = tipc_node(node);
839         int w = n / WSIZE;
840         u32 mask = (1 << (n % WSIZE));
841
842         if ((nm_ptr->map[w] & mask) != 0) {
843                 nm_ptr->map[w] &= ~mask;
844                 nm_ptr->count--;
845         }
846 }
847
848 /**
849  * tipc_nmap_diff - find differences between node maps
850  * @nm_a: input node map A
851  * @nm_b: input node map B
852  * @nm_diff: output node map A-B (i.e. nodes of A that are not in B)
853  */
854
855 static void tipc_nmap_diff(struct tipc_node_map *nm_a,
856                            struct tipc_node_map *nm_b,
857                            struct tipc_node_map *nm_diff)
858 {
859         int stop = ARRAY_SIZE(nm_a->map);
860         int w;
861         int b;
862         u32 map;
863
864         memset(nm_diff, 0, sizeof(*nm_diff));
865         for (w = 0; w < stop; w++) {
866                 map = nm_a->map[w] ^ (nm_a->map[w] & nm_b->map[w]);
867                 nm_diff->map[w] = map;
868                 if (map != 0) {
869                         for (b = 0 ; b < WSIZE; b++) {
870                                 if (map & (1 << b))
871                                         nm_diff->count++;
872                         }
873                 }
874         }
875 }
876
877 /**
878  * tipc_port_list_add - add a port to a port list, ensuring no duplicates
879  */
880
881 void tipc_port_list_add(struct port_list *pl_ptr, u32 port)
882 {
883         struct port_list *item = pl_ptr;
884         int i;
885         int item_sz = PLSIZE;
886         int cnt = pl_ptr->count;
887
888         for (; ; cnt -= item_sz, item = item->next) {
889                 if (cnt < PLSIZE)
890                         item_sz = cnt;
891                 for (i = 0; i < item_sz; i++)
892                         if (item->ports[i] == port)
893                                 return;
894                 if (i < PLSIZE) {
895                         item->ports[i] = port;
896                         pl_ptr->count++;
897                         return;
898                 }
899                 if (!item->next) {
900                         item->next = kmalloc(sizeof(*item), GFP_ATOMIC);
901                         if (!item->next) {
902                                 warn("Incomplete multicast delivery, no memory\n");
903                                 return;
904                         }
905                         item->next->next = NULL;
906                 }
907         }
908 }
909
910 /**
911  * tipc_port_list_free - free dynamically created entries in port_list chain
912  *
913  */
914
915 void tipc_port_list_free(struct port_list *pl_ptr)
916 {
917         struct port_list *item;
918         struct port_list *next;
919
920         for (item = pl_ptr->next; item; item = next) {
921                 next = item->next;
922                 kfree(item);
923         }
924 }
925