[SK_BUFF]: Introduce skb_copy_to_linear_data{_offset}
[linux-2.6] / net / tipc / bcast.c
1 /*
2  * net/tipc/bcast.c: TIPC broadcast code
3  *
4  * Copyright (c) 2004-2006, Ericsson AB
5  * Copyright (c) 2004, Intel Corporation.
6  * Copyright (c) 2005, Wind River Systems
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions are met:
11  *
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. Neither the names of the copyright holders nor the names of its
18  *    contributors may be used to endorse or promote products derived from
19  *    this software without specific prior written permission.
20  *
21  * Alternatively, this software may be distributed under the terms of the
22  * GNU General Public License ("GPL") version 2 as published by the Free
23  * Software Foundation.
24  *
25  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
26  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
29  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35  * POSSIBILITY OF SUCH DAMAGE.
36  */
37
38 #include "core.h"
39 #include "msg.h"
40 #include "dbg.h"
41 #include "link.h"
42 #include "net.h"
43 #include "node.h"
44 #include "port.h"
45 #include "addr.h"
46 #include "node_subscr.h"
47 #include "name_distr.h"
48 #include "bearer.h"
49 #include "name_table.h"
50 #include "bcast.h"
51
52 #define MAX_PKT_DEFAULT_MCAST 1500      /* bcast link max packet size (fixed) */
53
54 #define BCLINK_WIN_DEFAULT 20           /* bcast link window size (default) */
55
56 #define BCLINK_LOG_BUF_SIZE 0
57
58 /*
59  * Loss rate for incoming broadcast frames; used to test retransmission code.
60  * Set to N to cause every N'th frame to be discarded; 0 => don't discard any.
61  */
62
63 #define TIPC_BCAST_LOSS_RATE 0
64
65 /**
66  * struct bcbearer_pair - a pair of bearers used by broadcast link
67  * @primary: pointer to primary bearer
68  * @secondary: pointer to secondary bearer
69  *
70  * Bearers must have same priority and same set of reachable destinations
71  * to be paired.
72  */
73
74 struct bcbearer_pair {
75         struct bearer *primary;
76         struct bearer *secondary;
77 };
78
79 /**
80  * struct bcbearer - bearer used by broadcast link
81  * @bearer: (non-standard) broadcast bearer structure
82  * @media: (non-standard) broadcast media structure
83  * @bpairs: array of bearer pairs
84  * @bpairs_temp: temporary array of bearer pairs used by tipc_bcbearer_sort()
85  * @remains: temporary node map used by tipc_bcbearer_send()
86  * @remains_new: temporary node map used tipc_bcbearer_send()
87  *
88  * Note: The fields labelled "temporary" are incorporated into the bearer
89  * to avoid consuming potentially limited stack space through the use of
90  * large local variables within multicast routines.  Concurrent access is
91  * prevented through use of the spinlock "bc_lock".
92  */
93
94 struct bcbearer {
95         struct bearer bearer;
96         struct media media;
97         struct bcbearer_pair bpairs[MAX_BEARERS];
98         struct bcbearer_pair bpairs_temp[TIPC_MAX_LINK_PRI + 1];
99         struct node_map remains;
100         struct node_map remains_new;
101 };
102
103 /**
104  * struct bclink - link used for broadcast messages
105  * @link: (non-standard) broadcast link structure
106  * @node: (non-standard) node structure representing b'cast link's peer node
107  *
108  * Handles sequence numbering, fragmentation, bundling, etc.
109  */
110
111 struct bclink {
112         struct link link;
113         struct node node;
114 };
115
116
117 static struct bcbearer *bcbearer = NULL;
118 static struct bclink *bclink = NULL;
119 static struct link *bcl = NULL;
120 static DEFINE_SPINLOCK(bc_lock);
121
122 char tipc_bclink_name[] = "multicast-link";
123
124
125 static u32 buf_seqno(struct sk_buff *buf)
126 {
127         return msg_seqno(buf_msg(buf));
128 }
129
130 static u32 bcbuf_acks(struct sk_buff *buf)
131 {
132         return (u32)(unsigned long)TIPC_SKB_CB(buf)->handle;
133 }
134
135 static void bcbuf_set_acks(struct sk_buff *buf, u32 acks)
136 {
137         TIPC_SKB_CB(buf)->handle = (void *)(unsigned long)acks;
138 }
139
140 static void bcbuf_decr_acks(struct sk_buff *buf)
141 {
142         bcbuf_set_acks(buf, bcbuf_acks(buf) - 1);
143 }
144
145
146 /**
147  * bclink_set_gap - set gap according to contents of current deferred pkt queue
148  *
149  * Called with 'node' locked, bc_lock unlocked
150  */
151
152 static void bclink_set_gap(struct node *n_ptr)
153 {
154         struct sk_buff *buf = n_ptr->bclink.deferred_head;
155
156         n_ptr->bclink.gap_after = n_ptr->bclink.gap_to =
157                 mod(n_ptr->bclink.last_in);
158         if (unlikely(buf != NULL))
159                 n_ptr->bclink.gap_to = mod(buf_seqno(buf) - 1);
160 }
161
162 /**
163  * bclink_ack_allowed - test if ACK or NACK message can be sent at this moment
164  *
165  * This mechanism endeavours to prevent all nodes in network from trying
166  * to ACK or NACK at the same time.
167  *
168  * Note: TIPC uses a different trigger to distribute ACKs than it does to
169  *       distribute NACKs, but tries to use the same spacing (divide by 16).
170  */
171
172 static int bclink_ack_allowed(u32 n)
173 {
174         return((n % TIPC_MIN_LINK_WIN) == tipc_own_tag);
175 }
176
177
178 /**
179  * bclink_retransmit_pkt - retransmit broadcast packets
180  * @after: sequence number of last packet to *not* retransmit
181  * @to: sequence number of last packet to retransmit
182  *
183  * Called with bc_lock locked
184  */
185
186 static void bclink_retransmit_pkt(u32 after, u32 to)
187 {
188         struct sk_buff *buf;
189
190         buf = bcl->first_out;
191         while (buf && less_eq(buf_seqno(buf), after)) {
192                 buf = buf->next;
193         }
194         tipc_link_retransmit(bcl, buf, mod(to - after));
195 }
196
197 /**
198  * tipc_bclink_acknowledge - handle acknowledgement of broadcast packets
199  * @n_ptr: node that sent acknowledgement info
200  * @acked: broadcast sequence # that has been acknowledged
201  *
202  * Node is locked, bc_lock unlocked.
203  */
204
205 void tipc_bclink_acknowledge(struct node *n_ptr, u32 acked)
206 {
207         struct sk_buff *crs;
208         struct sk_buff *next;
209         unsigned int released = 0;
210
211         if (less_eq(acked, n_ptr->bclink.acked))
212                 return;
213
214         spin_lock_bh(&bc_lock);
215
216         /* Skip over packets that node has previously acknowledged */
217
218         crs = bcl->first_out;
219         while (crs && less_eq(buf_seqno(crs), n_ptr->bclink.acked)) {
220                 crs = crs->next;
221         }
222
223         /* Update packets that node is now acknowledging */
224
225         while (crs && less_eq(buf_seqno(crs), acked)) {
226                 next = crs->next;
227                 bcbuf_decr_acks(crs);
228                 if (bcbuf_acks(crs) == 0) {
229                         bcl->first_out = next;
230                         bcl->out_queue_size--;
231                         buf_discard(crs);
232                         released = 1;
233                 }
234                 crs = next;
235         }
236         n_ptr->bclink.acked = acked;
237
238         /* Try resolving broadcast link congestion, if necessary */
239
240         if (unlikely(bcl->next_out))
241                 tipc_link_push_queue(bcl);
242         if (unlikely(released && !list_empty(&bcl->waiting_ports)))
243                 tipc_link_wakeup_ports(bcl, 0);
244         spin_unlock_bh(&bc_lock);
245 }
246
247 /**
248  * bclink_send_ack - unicast an ACK msg
249  *
250  * tipc_net_lock and node lock set
251  */
252
253 static void bclink_send_ack(struct node *n_ptr)
254 {
255         struct link *l_ptr = n_ptr->active_links[n_ptr->addr & 1];
256
257         if (l_ptr != NULL)
258                 tipc_link_send_proto_msg(l_ptr, STATE_MSG, 0, 0, 0, 0, 0);
259 }
260
261 /**
262  * bclink_send_nack- broadcast a NACK msg
263  *
264  * tipc_net_lock and node lock set
265  */
266
267 static void bclink_send_nack(struct node *n_ptr)
268 {
269         struct sk_buff *buf;
270         struct tipc_msg *msg;
271
272         if (!less(n_ptr->bclink.gap_after, n_ptr->bclink.gap_to))
273                 return;
274
275         buf = buf_acquire(INT_H_SIZE);
276         if (buf) {
277                 msg = buf_msg(buf);
278                 msg_init(msg, BCAST_PROTOCOL, STATE_MSG,
279                          TIPC_OK, INT_H_SIZE, n_ptr->addr);
280                 msg_set_mc_netid(msg, tipc_net_id);
281                 msg_set_bcast_ack(msg, mod(n_ptr->bclink.last_in));
282                 msg_set_bcgap_after(msg, n_ptr->bclink.gap_after);
283                 msg_set_bcgap_to(msg, n_ptr->bclink.gap_to);
284                 msg_set_bcast_tag(msg, tipc_own_tag);
285
286                 if (tipc_bearer_send(&bcbearer->bearer, buf, NULL)) {
287                         bcl->stats.sent_nacks++;
288                         buf_discard(buf);
289                 } else {
290                         tipc_bearer_schedule(bcl->b_ptr, bcl);
291                         bcl->proto_msg_queue = buf;
292                         bcl->stats.bearer_congs++;
293                 }
294
295                 /*
296                  * Ensure we doesn't send another NACK msg to the node
297                  * until 16 more deferred messages arrive from it
298                  * (i.e. helps prevent all nodes from NACK'ing at same time)
299                  */
300
301                 n_ptr->bclink.nack_sync = tipc_own_tag;
302         }
303 }
304
305 /**
306  * tipc_bclink_check_gap - send a NACK if a sequence gap exists
307  *
308  * tipc_net_lock and node lock set
309  */
310
311 void tipc_bclink_check_gap(struct node *n_ptr, u32 last_sent)
312 {
313         if (!n_ptr->bclink.supported ||
314             less_eq(last_sent, mod(n_ptr->bclink.last_in)))
315                 return;
316
317         bclink_set_gap(n_ptr);
318         if (n_ptr->bclink.gap_after == n_ptr->bclink.gap_to)
319                 n_ptr->bclink.gap_to = last_sent;
320         bclink_send_nack(n_ptr);
321 }
322
323 /**
324  * tipc_bclink_peek_nack - process a NACK msg meant for another node
325  *
326  * Only tipc_net_lock set.
327  */
328
329 static void tipc_bclink_peek_nack(u32 dest, u32 sender_tag, u32 gap_after, u32 gap_to)
330 {
331         struct node *n_ptr = tipc_node_find(dest);
332         u32 my_after, my_to;
333
334         if (unlikely(!n_ptr || !tipc_node_is_up(n_ptr)))
335                 return;
336         tipc_node_lock(n_ptr);
337         /*
338          * Modify gap to suppress unnecessary NACKs from this node
339          */
340         my_after = n_ptr->bclink.gap_after;
341         my_to = n_ptr->bclink.gap_to;
342
343         if (less_eq(gap_after, my_after)) {
344                 if (less(my_after, gap_to) && less(gap_to, my_to))
345                         n_ptr->bclink.gap_after = gap_to;
346                 else if (less_eq(my_to, gap_to))
347                         n_ptr->bclink.gap_to = n_ptr->bclink.gap_after;
348         } else if (less_eq(gap_after, my_to)) {
349                 if (less_eq(my_to, gap_to))
350                         n_ptr->bclink.gap_to = gap_after;
351         } else {
352                 /*
353                  * Expand gap if missing bufs not in deferred queue:
354                  */
355                 struct sk_buff *buf = n_ptr->bclink.deferred_head;
356                 u32 prev = n_ptr->bclink.gap_to;
357
358                 for (; buf; buf = buf->next) {
359                         u32 seqno = buf_seqno(buf);
360
361                         if (mod(seqno - prev) != 1) {
362                                 buf = NULL;
363                                 break;
364                         }
365                         if (seqno == gap_after)
366                                 break;
367                         prev = seqno;
368                 }
369                 if (buf == NULL)
370                         n_ptr->bclink.gap_to = gap_after;
371         }
372         /*
373          * Some nodes may send a complementary NACK now:
374          */
375         if (bclink_ack_allowed(sender_tag + 1)) {
376                 if (n_ptr->bclink.gap_to != n_ptr->bclink.gap_after) {
377                         bclink_send_nack(n_ptr);
378                         bclink_set_gap(n_ptr);
379                 }
380         }
381         tipc_node_unlock(n_ptr);
382 }
383
384 /**
385  * tipc_bclink_send_msg - broadcast a packet to all nodes in cluster
386  */
387
388 int tipc_bclink_send_msg(struct sk_buff *buf)
389 {
390         int res;
391
392         spin_lock_bh(&bc_lock);
393
394         res = tipc_link_send_buf(bcl, buf);
395         if (unlikely(res == -ELINKCONG))
396                 buf_discard(buf);
397         else
398                 bcl->stats.sent_info++;
399
400         if (bcl->out_queue_size > bcl->stats.max_queue_sz)
401                 bcl->stats.max_queue_sz = bcl->out_queue_size;
402         bcl->stats.queue_sz_counts++;
403         bcl->stats.accu_queue_sz += bcl->out_queue_size;
404
405         spin_unlock_bh(&bc_lock);
406         return res;
407 }
408
409 /**
410  * tipc_bclink_recv_pkt - receive a broadcast packet, and deliver upwards
411  *
412  * tipc_net_lock is read_locked, no other locks set
413  */
414
415 void tipc_bclink_recv_pkt(struct sk_buff *buf)
416 {
417 #if (TIPC_BCAST_LOSS_RATE)
418         static int rx_count = 0;
419 #endif
420         struct tipc_msg *msg = buf_msg(buf);
421         struct node* node = tipc_node_find(msg_prevnode(msg));
422         u32 next_in;
423         u32 seqno;
424         struct sk_buff *deferred;
425
426         msg_dbg(msg, "<BC<<<");
427
428         if (unlikely(!node || !tipc_node_is_up(node) || !node->bclink.supported ||
429                      (msg_mc_netid(msg) != tipc_net_id))) {
430                 buf_discard(buf);
431                 return;
432         }
433
434         if (unlikely(msg_user(msg) == BCAST_PROTOCOL)) {
435                 msg_dbg(msg, "<BCNACK<<<");
436                 if (msg_destnode(msg) == tipc_own_addr) {
437                         tipc_node_lock(node);
438                         tipc_bclink_acknowledge(node, msg_bcast_ack(msg));
439                         tipc_node_unlock(node);
440                         spin_lock_bh(&bc_lock);
441                         bcl->stats.recv_nacks++;
442                         bcl->owner->next = node;   /* remember requestor */
443                         bclink_retransmit_pkt(msg_bcgap_after(msg),
444                                               msg_bcgap_to(msg));
445                         bcl->owner->next = NULL;
446                         spin_unlock_bh(&bc_lock);
447                 } else {
448                         tipc_bclink_peek_nack(msg_destnode(msg),
449                                               msg_bcast_tag(msg),
450                                               msg_bcgap_after(msg),
451                                               msg_bcgap_to(msg));
452                 }
453                 buf_discard(buf);
454                 return;
455         }
456
457 #if (TIPC_BCAST_LOSS_RATE)
458         if (++rx_count == TIPC_BCAST_LOSS_RATE) {
459                 rx_count = 0;
460                 buf_discard(buf);
461                 return;
462         }
463 #endif
464
465         tipc_node_lock(node);
466 receive:
467         deferred = node->bclink.deferred_head;
468         next_in = mod(node->bclink.last_in + 1);
469         seqno = msg_seqno(msg);
470
471         if (likely(seqno == next_in)) {
472                 bcl->stats.recv_info++;
473                 node->bclink.last_in++;
474                 bclink_set_gap(node);
475                 if (unlikely(bclink_ack_allowed(seqno))) {
476                         bclink_send_ack(node);
477                         bcl->stats.sent_acks++;
478                 }
479                 if (likely(msg_isdata(msg))) {
480                         tipc_node_unlock(node);
481                         tipc_port_recv_mcast(buf, NULL);
482                 } else if (msg_user(msg) == MSG_BUNDLER) {
483                         bcl->stats.recv_bundles++;
484                         bcl->stats.recv_bundled += msg_msgcnt(msg);
485                         tipc_node_unlock(node);
486                         tipc_link_recv_bundle(buf);
487                 } else if (msg_user(msg) == MSG_FRAGMENTER) {
488                         bcl->stats.recv_fragments++;
489                         if (tipc_link_recv_fragment(&node->bclink.defragm,
490                                                     &buf, &msg))
491                                 bcl->stats.recv_fragmented++;
492                         tipc_node_unlock(node);
493                         tipc_net_route_msg(buf);
494                 } else {
495                         tipc_node_unlock(node);
496                         tipc_net_route_msg(buf);
497                 }
498                 if (deferred && (buf_seqno(deferred) == mod(next_in + 1))) {
499                         tipc_node_lock(node);
500                         buf = deferred;
501                         msg = buf_msg(buf);
502                         node->bclink.deferred_head = deferred->next;
503                         goto receive;
504                 }
505                 return;
506         } else if (less(next_in, seqno)) {
507                 u32 gap_after = node->bclink.gap_after;
508                 u32 gap_to = node->bclink.gap_to;
509
510                 if (tipc_link_defer_pkt(&node->bclink.deferred_head,
511                                         &node->bclink.deferred_tail,
512                                         buf)) {
513                         node->bclink.nack_sync++;
514                         bcl->stats.deferred_recv++;
515                         if (seqno == mod(gap_after + 1))
516                                 node->bclink.gap_after = seqno;
517                         else if (less(gap_after, seqno) && less(seqno, gap_to))
518                                 node->bclink.gap_to = seqno;
519                 }
520                 if (bclink_ack_allowed(node->bclink.nack_sync)) {
521                         if (gap_to != gap_after)
522                                 bclink_send_nack(node);
523                         bclink_set_gap(node);
524                 }
525         } else {
526                 bcl->stats.duplicates++;
527                 buf_discard(buf);
528         }
529         tipc_node_unlock(node);
530 }
531
532 u32 tipc_bclink_get_last_sent(void)
533 {
534         u32 last_sent = mod(bcl->next_out_no - 1);
535
536         if (bcl->next_out)
537                 last_sent = mod(buf_seqno(bcl->next_out) - 1);
538         return last_sent;
539 }
540
541 u32 tipc_bclink_acks_missing(struct node *n_ptr)
542 {
543         return (n_ptr->bclink.supported &&
544                 (tipc_bclink_get_last_sent() != n_ptr->bclink.acked));
545 }
546
547
548 /**
549  * tipc_bcbearer_send - send a packet through the broadcast pseudo-bearer
550  *
551  * Send through as many bearers as necessary to reach all nodes
552  * that support TIPC multicasting.
553  *
554  * Returns 0 if packet sent successfully, non-zero if not
555  */
556
557 static int tipc_bcbearer_send(struct sk_buff *buf,
558                               struct tipc_bearer *unused1,
559                               struct tipc_media_addr *unused2)
560 {
561         static int send_count = 0;
562
563         int bp_index;
564         int swap_time;
565
566         /* Prepare buffer for broadcasting (if first time trying to send it) */
567
568         if (likely(!msg_non_seq(buf_msg(buf)))) {
569                 struct tipc_msg *msg;
570
571                 assert(tipc_cltr_bcast_nodes.count != 0);
572                 bcbuf_set_acks(buf, tipc_cltr_bcast_nodes.count);
573                 msg = buf_msg(buf);
574                 msg_set_non_seq(msg);
575                 msg_set_mc_netid(msg, tipc_net_id);
576         }
577
578         /* Determine if bearer pairs should be swapped following this attempt */
579
580         if ((swap_time = (++send_count >= 10)))
581                 send_count = 0;
582
583         /* Send buffer over bearers until all targets reached */
584
585         bcbearer->remains = tipc_cltr_bcast_nodes;
586
587         for (bp_index = 0; bp_index < MAX_BEARERS; bp_index++) {
588                 struct bearer *p = bcbearer->bpairs[bp_index].primary;
589                 struct bearer *s = bcbearer->bpairs[bp_index].secondary;
590
591                 if (!p)
592                         break;  /* no more bearers to try */
593
594                 tipc_nmap_diff(&bcbearer->remains, &p->nodes, &bcbearer->remains_new);
595                 if (bcbearer->remains_new.count == bcbearer->remains.count)
596                         continue;       /* bearer pair doesn't add anything */
597
598                 if (!p->publ.blocked &&
599                     !p->media->send_msg(buf, &p->publ, &p->media->bcast_addr)) {
600                         if (swap_time && s && !s->publ.blocked)
601                                 goto swap;
602                         else
603                                 goto update;
604                 }
605
606                 if (!s || s->publ.blocked ||
607                     s->media->send_msg(buf, &s->publ, &s->media->bcast_addr))
608                         continue;       /* unable to send using bearer pair */
609 swap:
610                 bcbearer->bpairs[bp_index].primary = s;
611                 bcbearer->bpairs[bp_index].secondary = p;
612 update:
613                 if (bcbearer->remains_new.count == 0)
614                         return TIPC_OK;
615
616                 bcbearer->remains = bcbearer->remains_new;
617         }
618
619         /* Unable to reach all targets */
620
621         bcbearer->bearer.publ.blocked = 1;
622         bcl->stats.bearer_congs++;
623         return ~TIPC_OK;
624 }
625
626 /**
627  * tipc_bcbearer_sort - create sets of bearer pairs used by broadcast bearer
628  */
629
630 void tipc_bcbearer_sort(void)
631 {
632         struct bcbearer_pair *bp_temp = bcbearer->bpairs_temp;
633         struct bcbearer_pair *bp_curr;
634         int b_index;
635         int pri;
636
637         spin_lock_bh(&bc_lock);
638
639         /* Group bearers by priority (can assume max of two per priority) */
640
641         memset(bp_temp, 0, sizeof(bcbearer->bpairs_temp));
642
643         for (b_index = 0; b_index < MAX_BEARERS; b_index++) {
644                 struct bearer *b = &tipc_bearers[b_index];
645
646                 if (!b->active || !b->nodes.count)
647                         continue;
648
649                 if (!bp_temp[b->priority].primary)
650                         bp_temp[b->priority].primary = b;
651                 else
652                         bp_temp[b->priority].secondary = b;
653         }
654
655         /* Create array of bearer pairs for broadcasting */
656
657         bp_curr = bcbearer->bpairs;
658         memset(bcbearer->bpairs, 0, sizeof(bcbearer->bpairs));
659
660         for (pri = TIPC_MAX_LINK_PRI; pri >= 0; pri--) {
661
662                 if (!bp_temp[pri].primary)
663                         continue;
664
665                 bp_curr->primary = bp_temp[pri].primary;
666
667                 if (bp_temp[pri].secondary) {
668                         if (tipc_nmap_equal(&bp_temp[pri].primary->nodes,
669                                             &bp_temp[pri].secondary->nodes)) {
670                                 bp_curr->secondary = bp_temp[pri].secondary;
671                         } else {
672                                 bp_curr++;
673                                 bp_curr->primary = bp_temp[pri].secondary;
674                         }
675                 }
676
677                 bp_curr++;
678         }
679
680         spin_unlock_bh(&bc_lock);
681 }
682
683 /**
684  * tipc_bcbearer_push - resolve bearer congestion
685  *
686  * Forces bclink to push out any unsent packets, until all packets are gone
687  * or congestion reoccurs.
688  * No locks set when function called
689  */
690
691 void tipc_bcbearer_push(void)
692 {
693         struct bearer *b_ptr;
694
695         spin_lock_bh(&bc_lock);
696         b_ptr = &bcbearer->bearer;
697         if (b_ptr->publ.blocked) {
698                 b_ptr->publ.blocked = 0;
699                 tipc_bearer_lock_push(b_ptr);
700         }
701         spin_unlock_bh(&bc_lock);
702 }
703
704
705 int tipc_bclink_stats(char *buf, const u32 buf_size)
706 {
707         struct print_buf pb;
708
709         if (!bcl)
710                 return 0;
711
712         tipc_printbuf_init(&pb, buf, buf_size);
713
714         spin_lock_bh(&bc_lock);
715
716         tipc_printf(&pb, "Link <%s>\n"
717                          "  Window:%u packets\n",
718                     bcl->name, bcl->queue_limit[0]);
719         tipc_printf(&pb, "  RX packets:%u fragments:%u/%u bundles:%u/%u\n",
720                     bcl->stats.recv_info,
721                     bcl->stats.recv_fragments,
722                     bcl->stats.recv_fragmented,
723                     bcl->stats.recv_bundles,
724                     bcl->stats.recv_bundled);
725         tipc_printf(&pb, "  TX packets:%u fragments:%u/%u bundles:%u/%u\n",
726                     bcl->stats.sent_info,
727                     bcl->stats.sent_fragments,
728                     bcl->stats.sent_fragmented,
729                     bcl->stats.sent_bundles,
730                     bcl->stats.sent_bundled);
731         tipc_printf(&pb, "  RX naks:%u defs:%u dups:%u\n",
732                     bcl->stats.recv_nacks,
733                     bcl->stats.deferred_recv,
734                     bcl->stats.duplicates);
735         tipc_printf(&pb, "  TX naks:%u acks:%u dups:%u\n",
736                     bcl->stats.sent_nacks,
737                     bcl->stats.sent_acks,
738                     bcl->stats.retransmitted);
739         tipc_printf(&pb, "  Congestion bearer:%u link:%u  Send queue max:%u avg:%u\n",
740                     bcl->stats.bearer_congs,
741                     bcl->stats.link_congs,
742                     bcl->stats.max_queue_sz,
743                     bcl->stats.queue_sz_counts
744                     ? (bcl->stats.accu_queue_sz / bcl->stats.queue_sz_counts)
745                     : 0);
746
747         spin_unlock_bh(&bc_lock);
748         return tipc_printbuf_validate(&pb);
749 }
750
751 int tipc_bclink_reset_stats(void)
752 {
753         if (!bcl)
754                 return -ENOPROTOOPT;
755
756         spin_lock_bh(&bc_lock);
757         memset(&bcl->stats, 0, sizeof(bcl->stats));
758         spin_unlock_bh(&bc_lock);
759         return TIPC_OK;
760 }
761
762 int tipc_bclink_set_queue_limits(u32 limit)
763 {
764         if (!bcl)
765                 return -ENOPROTOOPT;
766         if ((limit < TIPC_MIN_LINK_WIN) || (limit > TIPC_MAX_LINK_WIN))
767                 return -EINVAL;
768
769         spin_lock_bh(&bc_lock);
770         tipc_link_set_queue_limits(bcl, limit);
771         spin_unlock_bh(&bc_lock);
772         return TIPC_OK;
773 }
774
775 int tipc_bclink_init(void)
776 {
777         bcbearer = kzalloc(sizeof(*bcbearer), GFP_ATOMIC);
778         bclink = kzalloc(sizeof(*bclink), GFP_ATOMIC);
779         if (!bcbearer || !bclink) {
780  nomem:
781                 warn("Multicast link creation failed, no memory\n");
782                 kfree(bcbearer);
783                 bcbearer = NULL;
784                 kfree(bclink);
785                 bclink = NULL;
786                 return -ENOMEM;
787         }
788
789         INIT_LIST_HEAD(&bcbearer->bearer.cong_links);
790         bcbearer->bearer.media = &bcbearer->media;
791         bcbearer->media.send_msg = tipc_bcbearer_send;
792         sprintf(bcbearer->media.name, "tipc-multicast");
793
794         bcl = &bclink->link;
795         INIT_LIST_HEAD(&bcl->waiting_ports);
796         bcl->next_out_no = 1;
797         spin_lock_init(&bclink->node.lock);
798         bcl->owner = &bclink->node;
799         bcl->max_pkt = MAX_PKT_DEFAULT_MCAST;
800         tipc_link_set_queue_limits(bcl, BCLINK_WIN_DEFAULT);
801         bcl->b_ptr = &bcbearer->bearer;
802         bcl->state = WORKING_WORKING;
803         sprintf(bcl->name, tipc_bclink_name);
804
805         if (BCLINK_LOG_BUF_SIZE) {
806                 char *pb = kmalloc(BCLINK_LOG_BUF_SIZE, GFP_ATOMIC);
807
808                 if (!pb)
809                         goto nomem;
810                 tipc_printbuf_init(&bcl->print_buf, pb, BCLINK_LOG_BUF_SIZE);
811         }
812
813         return TIPC_OK;
814 }
815
816 void tipc_bclink_stop(void)
817 {
818         spin_lock_bh(&bc_lock);
819         if (bcbearer) {
820                 tipc_link_stop(bcl);
821                 if (BCLINK_LOG_BUF_SIZE)
822                         kfree(bcl->print_buf.buf);
823                 bcl = NULL;
824                 kfree(bclink);
825                 bclink = NULL;
826                 kfree(bcbearer);
827                 bcbearer = NULL;
828         }
829         spin_unlock_bh(&bc_lock);
830 }
831