2 * net/tipc/link.c: TIPC link code
4 * Copyright (c) 1996-2006, Ericsson AB
5 * Copyright (c) 2004-2005, Wind River Systems
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
44 #include "node_subscr.h"
45 #include "name_distr.h"
47 #include "name_table.h"
54 * Limit for deferred reception queue:
57 #define DEF_QUEUE_LIMIT 256u
63 #define STARTING_EVT 856384768 /* link processing trigger */
64 #define TRAFFIC_MSG_EVT 560815u /* rx'd ??? */
65 #define TIMEOUT_EVT 560817u /* link timer expired */
68 * The following two 'message types' is really just implementation
69 * data conveniently stored in the message header.
70 * They must not be considered part of the protocol
76 * State value stored in 'exp_msg_count'
79 #define START_CHANGEOVER 100000u
82 * struct link_name - deconstructed link name
83 * @addr_local: network address of node at this end
84 * @if_local: name of interface at this end
85 * @addr_peer: network address of node at far end
86 * @if_peer: name of interface at far end
91 char if_local[TIPC_MAX_IF_NAME];
93 char if_peer[TIPC_MAX_IF_NAME];
98 /* LINK EVENT CODE IS NOT SUPPORTED AT PRESENT */
101 * struct link_event - link up/down event notification
107 void (*fcn)(u32, char *, int);
108 char name[TIPC_MAX_LINK_NAME];
113 static void link_handle_out_of_seq_msg(struct link *l_ptr,
114 struct sk_buff *buf);
115 static void link_recv_proto_msg(struct link *l_ptr, struct sk_buff *buf);
116 static int link_recv_changeover_msg(struct link **l_ptr, struct sk_buff **buf);
117 static void link_set_supervision_props(struct link *l_ptr, u32 tolerance);
118 static int link_send_sections_long(struct port *sender,
119 struct iovec const *msg_sect,
120 u32 num_sect, u32 destnode);
121 static void link_check_defragm_bufs(struct link *l_ptr);
122 static void link_state_event(struct link *l_ptr, u32 event);
123 static void link_reset_statistics(struct link *l_ptr);
124 static void link_print(struct link *l_ptr, struct print_buf *buf,
128 * Debugging code used by link routines only
130 * When debugging link problems on a system that has multiple links,
131 * the standard TIPC debugging routines may not be useful since they
132 * allow the output from multiple links to be intermixed. For this reason
133 * routines of the form "dbg_link_XXX()" have been created that will capture
134 * debug info into a link's personal print buffer, which can then be dumped
135 * into the TIPC system log (LOG) upon request.
137 * To enable per-link debugging, use LINK_LOG_BUF_SIZE to specify the size
138 * of the print buffer used by each link. If LINK_LOG_BUF_SIZE is set to 0,
139 * the dbg_link_XXX() routines simply send their output to the standard
140 * debug print buffer (DBG_OUTPUT), if it has been defined; this can be useful
141 * when there is only a single link in the system being debugged.
144 * - When enabled, LINK_LOG_BUF_SIZE should be set to at least 1000 (bytes)
145 * - "l_ptr" must be valid when using dbg_link_XXX() macros
148 #define LINK_LOG_BUF_SIZE 0
150 #define dbg_link(fmt, arg...) do {if (LINK_LOG_BUF_SIZE) tipc_printf(&l_ptr->print_buf, fmt, ## arg); } while(0)
151 #define dbg_link_msg(msg, txt) do {if (LINK_LOG_BUF_SIZE) msg_print(&l_ptr->print_buf, msg, txt); } while(0)
152 #define dbg_link_state(txt) do {if (LINK_LOG_BUF_SIZE) link_print(l_ptr, &l_ptr->print_buf, txt); } while(0)
153 #define dbg_link_dump() do { \
154 if (LINK_LOG_BUF_SIZE) { \
155 tipc_printf(LOG, "\n\nDumping link <%s>:\n", l_ptr->name); \
156 printbuf_move(LOG, &l_ptr->print_buf); \
160 static inline void dbg_print_link(struct link *l_ptr, const char *str)
163 link_print(l_ptr, DBG_OUTPUT, str);
166 static inline void dbg_print_buf_chain(struct sk_buff *root_buf)
169 struct sk_buff *buf = root_buf;
172 msg_dbg(buf_msg(buf), "In chain: ");
179 * Simple inlined link routines
182 static inline unsigned int align(unsigned int i)
184 return (i + 3) & ~3u;
187 static inline int link_working_working(struct link *l_ptr)
189 return (l_ptr->state == WORKING_WORKING);
192 static inline int link_working_unknown(struct link *l_ptr)
194 return (l_ptr->state == WORKING_UNKNOWN);
197 static inline int link_reset_unknown(struct link *l_ptr)
199 return (l_ptr->state == RESET_UNKNOWN);
202 static inline int link_reset_reset(struct link *l_ptr)
204 return (l_ptr->state == RESET_RESET);
207 static inline int link_blocked(struct link *l_ptr)
209 return (l_ptr->exp_msg_count || l_ptr->blocked);
212 static inline int link_congested(struct link *l_ptr)
214 return (l_ptr->out_queue_size >= l_ptr->queue_limit[0]);
217 static inline u32 link_max_pkt(struct link *l_ptr)
219 return l_ptr->max_pkt;
222 static inline void link_init_max_pkt(struct link *l_ptr)
226 max_pkt = (l_ptr->b_ptr->publ.mtu & ~3);
227 if (max_pkt > MAX_MSG_SIZE)
228 max_pkt = MAX_MSG_SIZE;
230 l_ptr->max_pkt_target = max_pkt;
231 if (l_ptr->max_pkt_target < MAX_PKT_DEFAULT)
232 l_ptr->max_pkt = l_ptr->max_pkt_target;
234 l_ptr->max_pkt = MAX_PKT_DEFAULT;
236 l_ptr->max_pkt_probes = 0;
239 static inline u32 link_next_sent(struct link *l_ptr)
242 return msg_seqno(buf_msg(l_ptr->next_out));
243 return mod(l_ptr->next_out_no);
246 static inline u32 link_last_sent(struct link *l_ptr)
248 return mod(link_next_sent(l_ptr) - 1);
252 * Simple non-inlined link routines (i.e. referenced outside this file)
255 int link_is_up(struct link *l_ptr)
259 return (link_working_working(l_ptr) || link_working_unknown(l_ptr));
262 int link_is_active(struct link *l_ptr)
264 return ((l_ptr->owner->active_links[0] == l_ptr) ||
265 (l_ptr->owner->active_links[1] == l_ptr));
269 * link_name_validate - validate & (optionally) deconstruct link name
270 * @name - ptr to link name string
271 * @name_parts - ptr to area for link name components (or NULL if not needed)
273 * Returns 1 if link name is valid, otherwise 0.
276 static int link_name_validate(const char *name, struct link_name *name_parts)
278 char name_copy[TIPC_MAX_LINK_NAME];
284 u32 z_local, c_local, n_local;
285 u32 z_peer, c_peer, n_peer;
289 /* copy link name & ensure length is OK */
291 name_copy[TIPC_MAX_LINK_NAME - 1] = 0;
292 /* need above in case non-Posix strncpy() doesn't pad with nulls */
293 strncpy(name_copy, name, TIPC_MAX_LINK_NAME);
294 if (name_copy[TIPC_MAX_LINK_NAME - 1] != 0)
297 /* ensure all component parts of link name are present */
299 addr_local = name_copy;
300 if ((if_local = strchr(addr_local, ':')) == NULL)
303 if ((addr_peer = strchr(if_local, '-')) == NULL)
306 if_local_len = addr_peer - if_local;
307 if ((if_peer = strchr(addr_peer, ':')) == NULL)
310 if_peer_len = strlen(if_peer) + 1;
312 /* validate component parts of link name */
314 if ((sscanf(addr_local, "%u.%u.%u%c",
315 &z_local, &c_local, &n_local, &dummy) != 3) ||
316 (sscanf(addr_peer, "%u.%u.%u%c",
317 &z_peer, &c_peer, &n_peer, &dummy) != 3) ||
318 (z_local > 255) || (c_local > 4095) || (n_local > 4095) ||
319 (z_peer > 255) || (c_peer > 4095) || (n_peer > 4095) ||
320 (if_local_len <= 1) || (if_local_len > TIPC_MAX_IF_NAME) ||
321 (if_peer_len <= 1) || (if_peer_len > TIPC_MAX_IF_NAME) ||
322 (strspn(if_local, tipc_alphabet) != (if_local_len - 1)) ||
323 (strspn(if_peer, tipc_alphabet) != (if_peer_len - 1)))
326 /* return link name components, if necessary */
329 name_parts->addr_local = tipc_addr(z_local, c_local, n_local);
330 strcpy(name_parts->if_local, if_local);
331 name_parts->addr_peer = tipc_addr(z_peer, c_peer, n_peer);
332 strcpy(name_parts->if_peer, if_peer);
338 * link_timeout - handle expiration of link timer
339 * @l_ptr: pointer to link
341 * This routine must not grab "net_lock" to avoid a potential deadlock conflict
342 * with link_delete(). (There is no risk that the node will be deleted by
343 * another thread because link_delete() always cancels the link timer before
344 * node_delete() is called.)
347 static void link_timeout(struct link *l_ptr)
349 node_lock(l_ptr->owner);
351 /* update counters used in statistical profiling of send traffic */
353 l_ptr->stats.accu_queue_sz += l_ptr->out_queue_size;
354 l_ptr->stats.queue_sz_counts++;
356 if (l_ptr->out_queue_size > l_ptr->stats.max_queue_sz)
357 l_ptr->stats.max_queue_sz = l_ptr->out_queue_size;
359 if (l_ptr->first_out) {
360 struct tipc_msg *msg = buf_msg(l_ptr->first_out);
361 u32 length = msg_size(msg);
363 if ((msg_user(msg) == MSG_FRAGMENTER)
364 && (msg_type(msg) == FIRST_FRAGMENT)) {
365 length = msg_size(msg_get_wrapped(msg));
368 l_ptr->stats.msg_lengths_total += length;
369 l_ptr->stats.msg_length_counts++;
371 l_ptr->stats.msg_length_profile[0]++;
372 else if (length <= 256)
373 l_ptr->stats.msg_length_profile[1]++;
374 else if (length <= 1024)
375 l_ptr->stats.msg_length_profile[2]++;
376 else if (length <= 4096)
377 l_ptr->stats.msg_length_profile[3]++;
378 else if (length <= 16384)
379 l_ptr->stats.msg_length_profile[4]++;
380 else if (length <= 32768)
381 l_ptr->stats.msg_length_profile[5]++;
383 l_ptr->stats.msg_length_profile[6]++;
387 /* do all other link processing performed on a periodic basis */
389 link_check_defragm_bufs(l_ptr);
391 link_state_event(l_ptr, TIMEOUT_EVT);
394 link_push_queue(l_ptr);
396 node_unlock(l_ptr->owner);
399 static inline void link_set_timer(struct link *l_ptr, u32 time)
401 k_start_timer(&l_ptr->timer, time);
405 * link_create - create a new link
406 * @b_ptr: pointer to associated bearer
407 * @peer: network address of node at other end of link
408 * @media_addr: media address to use when sending messages over link
410 * Returns pointer to link.
413 struct link *link_create(struct bearer *b_ptr, const u32 peer,
414 const struct tipc_media_addr *media_addr)
417 struct tipc_msg *msg;
420 l_ptr = (struct link *)kmalloc(sizeof(*l_ptr), GFP_ATOMIC);
422 warn("Memory squeeze; Failed to create link\n");
425 memset(l_ptr, 0, sizeof(*l_ptr));
428 if_name = strchr(b_ptr->publ.name, ':') + 1;
429 sprintf(l_ptr->name, "%u.%u.%u:%s-%u.%u.%u:",
430 tipc_zone(tipc_own_addr), tipc_cluster(tipc_own_addr),
431 tipc_node(tipc_own_addr),
433 tipc_zone(peer), tipc_cluster(peer), tipc_node(peer));
434 /* note: peer i/f is appended to link name by reset/activate */
435 memcpy(&l_ptr->media_addr, media_addr, sizeof(*media_addr));
436 k_init_timer(&l_ptr->timer, (Handler)link_timeout, (unsigned long)l_ptr);
437 list_add_tail(&l_ptr->link_list, &b_ptr->links);
438 l_ptr->checkpoint = 1;
439 l_ptr->b_ptr = b_ptr;
440 link_set_supervision_props(l_ptr, b_ptr->media->tolerance);
441 l_ptr->state = RESET_UNKNOWN;
443 l_ptr->pmsg = (struct tipc_msg *)&l_ptr->proto_msg;
445 msg_init(msg, LINK_PROTOCOL, RESET_MSG, TIPC_OK, INT_H_SIZE, l_ptr->addr);
446 msg_set_size(msg, sizeof(l_ptr->proto_msg));
447 msg_set_session(msg, tipc_random);
448 msg_set_bearer_id(msg, b_ptr->identity);
449 strcpy((char *)msg_data(msg), if_name);
451 l_ptr->priority = b_ptr->priority;
452 link_set_queue_limits(l_ptr, b_ptr->media->window);
454 link_init_max_pkt(l_ptr);
456 l_ptr->next_out_no = 1;
457 INIT_LIST_HEAD(&l_ptr->waiting_ports);
459 link_reset_statistics(l_ptr);
461 l_ptr->owner = node_attach_link(l_ptr);
467 if (LINK_LOG_BUF_SIZE) {
468 char *pb = kmalloc(LINK_LOG_BUF_SIZE, GFP_ATOMIC);
472 warn("Memory squeeze; Failed to create link\n");
475 printbuf_init(&l_ptr->print_buf, pb, LINK_LOG_BUF_SIZE);
478 k_signal((Handler)link_start, (unsigned long)l_ptr);
480 dbg("link_create(): tolerance = %u,cont intv = %u, abort_limit = %u\n",
481 l_ptr->tolerance, l_ptr->continuity_interval, l_ptr->abort_limit);
487 * link_delete - delete a link
488 * @l_ptr: pointer to link
490 * Note: 'net_lock' is write_locked, bearer is locked.
491 * This routine must not grab the node lock until after link timer cancellation
492 * to avoid a potential deadlock situation.
495 void link_delete(struct link *l_ptr)
498 err("Attempt to delete non-existent link\n");
502 dbg("link_delete()\n");
504 k_cancel_timer(&l_ptr->timer);
506 node_lock(l_ptr->owner);
508 node_detach_link(l_ptr->owner, l_ptr);
510 list_del_init(&l_ptr->link_list);
511 if (LINK_LOG_BUF_SIZE)
512 kfree(l_ptr->print_buf.buf);
513 node_unlock(l_ptr->owner);
514 k_term_timer(&l_ptr->timer);
518 void link_start(struct link *l_ptr)
520 dbg("link_start %x\n", l_ptr);
521 link_state_event(l_ptr, STARTING_EVT);
525 * link_schedule_port - schedule port for deferred sending
526 * @l_ptr: pointer to link
527 * @origport: reference to sending port
528 * @sz: amount of data to be sent
530 * Schedules port for renewed sending of messages after link congestion
534 static int link_schedule_port(struct link *l_ptr, u32 origport, u32 sz)
538 spin_lock_bh(&port_list_lock);
539 p_ptr = port_lock(origport);
543 if (!list_empty(&p_ptr->wait_list))
545 p_ptr->congested_link = l_ptr;
546 p_ptr->publ.congested = 1;
547 p_ptr->waiting_pkts = 1 + ((sz - 1) / link_max_pkt(l_ptr));
548 list_add_tail(&p_ptr->wait_list, &l_ptr->waiting_ports);
549 l_ptr->stats.link_congs++;
553 spin_unlock_bh(&port_list_lock);
557 void link_wakeup_ports(struct link *l_ptr, int all)
560 struct port *temp_p_ptr;
561 int win = l_ptr->queue_limit[0] - l_ptr->out_queue_size;
567 if (!spin_trylock_bh(&port_list_lock))
569 if (link_congested(l_ptr))
571 list_for_each_entry_safe(p_ptr, temp_p_ptr, &l_ptr->waiting_ports,
575 list_del_init(&p_ptr->wait_list);
576 p_ptr->congested_link = 0;
577 assert(p_ptr->wakeup);
578 spin_lock_bh(p_ptr->publ.lock);
579 p_ptr->publ.congested = 0;
580 p_ptr->wakeup(&p_ptr->publ);
581 win -= p_ptr->waiting_pkts;
582 spin_unlock_bh(p_ptr->publ.lock);
586 spin_unlock_bh(&port_list_lock);
590 * link_release_outqueue - purge link's outbound message queue
591 * @l_ptr: pointer to link
594 static void link_release_outqueue(struct link *l_ptr)
596 struct sk_buff *buf = l_ptr->first_out;
597 struct sk_buff *next;
604 l_ptr->first_out = NULL;
605 l_ptr->out_queue_size = 0;
609 * link_reset_fragments - purge link's inbound message fragments queue
610 * @l_ptr: pointer to link
613 void link_reset_fragments(struct link *l_ptr)
615 struct sk_buff *buf = l_ptr->defragm_buf;
616 struct sk_buff *next;
623 l_ptr->defragm_buf = NULL;
627 * link_stop - purge all inbound and outbound messages associated with link
628 * @l_ptr: pointer to link
631 void link_stop(struct link *l_ptr)
634 struct sk_buff *next;
636 buf = l_ptr->oldest_deferred_in;
643 buf = l_ptr->first_out;
650 link_reset_fragments(l_ptr);
652 buf_discard(l_ptr->proto_msg_queue);
653 l_ptr->proto_msg_queue = NULL;
658 /* LINK EVENT CODE IS NOT SUPPORTED AT PRESENT */
660 static void link_recv_event(struct link_event *ev)
662 ev->fcn(ev->addr, ev->name, ev->up);
666 static void link_send_event(void (*fcn)(u32 a, char *n, int up),
667 struct link *l_ptr, int up)
669 struct link_event *ev;
671 ev = kmalloc(sizeof(*ev), GFP_ATOMIC);
673 warn("Link event allocation failure\n");
676 ev->addr = l_ptr->addr;
679 memcpy(ev->name, l_ptr->name, TIPC_MAX_LINK_NAME);
680 k_signal((Handler)link_recv_event, (unsigned long)ev);
685 #define link_send_event(fcn, l_ptr, up) do { } while (0)
689 void link_reset(struct link *l_ptr)
692 u32 prev_state = l_ptr->state;
693 u32 checkpoint = l_ptr->next_in_no;
695 msg_set_session(l_ptr->pmsg, msg_session(l_ptr->pmsg) + 1);
697 /* Link is down, accept any session: */
698 l_ptr->peer_session = 0;
700 /* Prepare for max packet size negotiation */
701 link_init_max_pkt(l_ptr);
703 l_ptr->state = RESET_UNKNOWN;
704 dbg_link_state("Resetting Link\n");
706 if ((prev_state == RESET_UNKNOWN) || (prev_state == RESET_RESET))
709 node_link_down(l_ptr->owner, l_ptr);
710 bearer_remove_dest(l_ptr->b_ptr, l_ptr->addr);
712 tipc_printf(CONS, "\nReset link <%s>\n", l_ptr->name);
715 if (node_has_active_links(l_ptr->owner) &&
716 l_ptr->owner->permit_changeover) {
717 l_ptr->reset_checkpoint = checkpoint;
718 l_ptr->exp_msg_count = START_CHANGEOVER;
721 /* Clean up all queues: */
723 link_release_outqueue(l_ptr);
724 buf_discard(l_ptr->proto_msg_queue);
725 l_ptr->proto_msg_queue = NULL;
726 buf = l_ptr->oldest_deferred_in;
728 struct sk_buff *next = buf->next;
732 if (!list_empty(&l_ptr->waiting_ports))
733 link_wakeup_ports(l_ptr, 1);
735 l_ptr->retransm_queue_head = 0;
736 l_ptr->retransm_queue_size = 0;
737 l_ptr->last_out = NULL;
738 l_ptr->first_out = NULL;
739 l_ptr->next_out = NULL;
740 l_ptr->unacked_window = 0;
741 l_ptr->checkpoint = 1;
742 l_ptr->next_out_no = 1;
743 l_ptr->deferred_inqueue_sz = 0;
744 l_ptr->oldest_deferred_in = NULL;
745 l_ptr->newest_deferred_in = NULL;
746 l_ptr->fsm_msg_cnt = 0;
747 l_ptr->stale_count = 0;
748 link_reset_statistics(l_ptr);
750 link_send_event(cfg_link_event, l_ptr, 0);
751 if (!in_own_cluster(l_ptr->addr))
752 link_send_event(disc_link_event, l_ptr, 0);
756 static void link_activate(struct link *l_ptr)
758 l_ptr->next_in_no = 1;
759 node_link_up(l_ptr->owner, l_ptr);
760 bearer_add_dest(l_ptr->b_ptr, l_ptr->addr);
761 link_send_event(cfg_link_event, l_ptr, 1);
762 if (!in_own_cluster(l_ptr->addr))
763 link_send_event(disc_link_event, l_ptr, 1);
767 * link_state_event - link finite state machine
768 * @l_ptr: pointer to link
769 * @event: state machine event to process
772 static void link_state_event(struct link *l_ptr, unsigned event)
775 u32 cont_intv = l_ptr->continuity_interval;
777 if (!l_ptr->started && (event != STARTING_EVT))
778 return; /* Not yet. */
780 if (link_blocked(l_ptr)) {
781 if (event == TIMEOUT_EVT) {
782 link_set_timer(l_ptr, cont_intv);
784 return; /* Changeover going on */
786 dbg_link("STATE_EV: <%s> ", l_ptr->name);
788 switch (l_ptr->state) {
789 case WORKING_WORKING:
792 case TRAFFIC_MSG_EVT:
800 if (l_ptr->next_in_no != l_ptr->checkpoint) {
801 l_ptr->checkpoint = l_ptr->next_in_no;
802 if (bclink_acks_missing(l_ptr->owner)) {
803 link_send_proto_msg(l_ptr, STATE_MSG,
805 l_ptr->fsm_msg_cnt++;
806 } else if (l_ptr->max_pkt < l_ptr->max_pkt_target) {
807 link_send_proto_msg(l_ptr, STATE_MSG,
809 l_ptr->fsm_msg_cnt++;
811 link_set_timer(l_ptr, cont_intv);
814 dbg_link(" -> WU\n");
815 l_ptr->state = WORKING_UNKNOWN;
816 l_ptr->fsm_msg_cnt = 0;
817 link_send_proto_msg(l_ptr, STATE_MSG, 1, 0, 0, 0, 0);
818 l_ptr->fsm_msg_cnt++;
819 link_set_timer(l_ptr, cont_intv / 4);
822 dbg_link("RES -> RR\n");
824 l_ptr->state = RESET_RESET;
825 l_ptr->fsm_msg_cnt = 0;
826 link_send_proto_msg(l_ptr, ACTIVATE_MSG, 0, 0, 0, 0, 0);
827 l_ptr->fsm_msg_cnt++;
828 link_set_timer(l_ptr, cont_intv);
831 err("Unknown link event %u in WW state\n", event);
834 case WORKING_UNKNOWN:
837 case TRAFFIC_MSG_EVT:
840 dbg_link("ACT -> WW\n");
841 l_ptr->state = WORKING_WORKING;
842 l_ptr->fsm_msg_cnt = 0;
843 link_set_timer(l_ptr, cont_intv);
846 dbg_link("RES -> RR\n");
848 l_ptr->state = RESET_RESET;
849 l_ptr->fsm_msg_cnt = 0;
850 link_send_proto_msg(l_ptr, ACTIVATE_MSG, 0, 0, 0, 0, 0);
851 l_ptr->fsm_msg_cnt++;
852 link_set_timer(l_ptr, cont_intv);
856 if (l_ptr->next_in_no != l_ptr->checkpoint) {
857 dbg_link("-> WW \n");
858 l_ptr->state = WORKING_WORKING;
859 l_ptr->fsm_msg_cnt = 0;
860 l_ptr->checkpoint = l_ptr->next_in_no;
861 if (bclink_acks_missing(l_ptr->owner)) {
862 link_send_proto_msg(l_ptr, STATE_MSG,
864 l_ptr->fsm_msg_cnt++;
866 link_set_timer(l_ptr, cont_intv);
867 } else if (l_ptr->fsm_msg_cnt < l_ptr->abort_limit) {
868 dbg_link("Probing %u/%u,timer = %u ms)\n",
869 l_ptr->fsm_msg_cnt, l_ptr->abort_limit,
871 link_send_proto_msg(l_ptr, STATE_MSG,
873 l_ptr->fsm_msg_cnt++;
874 link_set_timer(l_ptr, cont_intv / 4);
875 } else { /* Link has failed */
876 dbg_link("-> RU (%u probes unanswered)\n",
879 l_ptr->state = RESET_UNKNOWN;
880 l_ptr->fsm_msg_cnt = 0;
881 link_send_proto_msg(l_ptr, RESET_MSG,
883 l_ptr->fsm_msg_cnt++;
884 link_set_timer(l_ptr, cont_intv);
888 err("Unknown link event %u in WU state\n", event);
894 case TRAFFIC_MSG_EVT:
898 other = l_ptr->owner->active_links[0];
899 if (other && link_working_unknown(other)) {
903 dbg_link("ACT -> WW\n");
904 l_ptr->state = WORKING_WORKING;
905 l_ptr->fsm_msg_cnt = 0;
906 link_activate(l_ptr);
907 link_send_proto_msg(l_ptr, STATE_MSG, 1, 0, 0, 0, 0);
908 l_ptr->fsm_msg_cnt++;
909 link_set_timer(l_ptr, cont_intv);
913 dbg_link(" -> RR\n");
914 l_ptr->state = RESET_RESET;
915 l_ptr->fsm_msg_cnt = 0;
916 link_send_proto_msg(l_ptr, ACTIVATE_MSG, 1, 0, 0, 0, 0);
917 l_ptr->fsm_msg_cnt++;
918 link_set_timer(l_ptr, cont_intv);
926 link_send_proto_msg(l_ptr, RESET_MSG, 0, 0, 0, 0, 0);
927 l_ptr->fsm_msg_cnt++;
928 link_set_timer(l_ptr, cont_intv);
931 err("Unknown link event %u in RU state\n", event);
937 case TRAFFIC_MSG_EVT:
941 other = l_ptr->owner->active_links[0];
942 if (other && link_working_unknown(other)) {
946 dbg_link("ACT -> WW\n");
947 l_ptr->state = WORKING_WORKING;
948 l_ptr->fsm_msg_cnt = 0;
949 link_activate(l_ptr);
950 link_send_proto_msg(l_ptr, STATE_MSG, 1, 0, 0, 0, 0);
951 l_ptr->fsm_msg_cnt++;
952 link_set_timer(l_ptr, cont_intv);
959 link_send_proto_msg(l_ptr, ACTIVATE_MSG, 0, 0, 0, 0, 0);
960 l_ptr->fsm_msg_cnt++;
961 link_set_timer(l_ptr, cont_intv);
962 dbg_link("fsm_msg_cnt %u\n", l_ptr->fsm_msg_cnt);
965 err("Unknown link event %u in RR state\n", event);
969 err("Unknown link state %u/%u\n", l_ptr->state, event);
974 * link_bundle_buf(): Append contents of a buffer to
975 * the tail of an existing one.
978 static int link_bundle_buf(struct link *l_ptr,
979 struct sk_buff *bundler,
982 struct tipc_msg *bundler_msg = buf_msg(bundler);
983 struct tipc_msg *msg = buf_msg(buf);
984 u32 size = msg_size(msg);
985 u32 to_pos = align(msg_size(bundler_msg));
986 u32 rest = link_max_pkt(l_ptr) - to_pos;
988 if (msg_user(bundler_msg) != MSG_BUNDLER)
990 if (msg_type(bundler_msg) != OPEN_MSG)
992 if (rest < align(size))
995 skb_put(bundler, (to_pos - msg_size(bundler_msg)) + size);
996 memcpy(bundler->data + to_pos, buf->data, size);
997 msg_set_size(bundler_msg, to_pos + size);
998 msg_set_msgcnt(bundler_msg, msg_msgcnt(bundler_msg) + 1);
999 dbg("Packed msg # %u(%u octets) into pos %u in buf(#%u)\n",
1000 msg_msgcnt(bundler_msg), size, to_pos, msg_seqno(bundler_msg));
1001 msg_dbg(msg, "PACKD:");
1003 l_ptr->stats.sent_bundled++;
1007 static inline void link_add_to_outqueue(struct link *l_ptr,
1008 struct sk_buff *buf,
1009 struct tipc_msg *msg)
1011 u32 ack = mod(l_ptr->next_in_no - 1);
1012 u32 seqno = mod(l_ptr->next_out_no++);
1014 msg_set_word(msg, 2, ((ack << 16) | seqno));
1015 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
1017 if (l_ptr->first_out) {
1018 l_ptr->last_out->next = buf;
1019 l_ptr->last_out = buf;
1021 l_ptr->first_out = l_ptr->last_out = buf;
1022 l_ptr->out_queue_size++;
1026 * link_send_buf() is the 'full path' for messages, called from
1027 * inside TIPC when the 'fast path' in tipc_send_buf
1028 * has failed, and from link_send()
1031 int link_send_buf(struct link *l_ptr, struct sk_buff *buf)
1033 struct tipc_msg *msg = buf_msg(buf);
1034 u32 size = msg_size(msg);
1035 u32 dsz = msg_data_sz(msg);
1036 u32 queue_size = l_ptr->out_queue_size;
1037 u32 imp = msg_tot_importance(msg);
1038 u32 queue_limit = l_ptr->queue_limit[imp];
1039 u32 max_packet = link_max_pkt(l_ptr);
1041 msg_set_prevnode(msg, tipc_own_addr); /* If routed message */
1043 /* Match msg importance against queue limits: */
1045 if (unlikely(queue_size >= queue_limit)) {
1046 if (imp <= TIPC_CRITICAL_IMPORTANCE) {
1047 return link_schedule_port(l_ptr, msg_origport(msg),
1050 msg_dbg(msg, "TIPC: Congestion, throwing away\n");
1052 if (imp > CONN_MANAGER) {
1053 warn("Resetting <%s>, send queue full", l_ptr->name);
1059 /* Fragmentation needed ? */
1061 if (size > max_packet)
1062 return link_send_long_buf(l_ptr, buf);
1064 /* Packet can be queued or sent: */
1066 if (queue_size > l_ptr->stats.max_queue_sz)
1067 l_ptr->stats.max_queue_sz = queue_size;
1069 if (likely(!bearer_congested(l_ptr->b_ptr, l_ptr) &&
1070 !link_congested(l_ptr))) {
1071 link_add_to_outqueue(l_ptr, buf, msg);
1073 if (likely(bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr))) {
1074 l_ptr->unacked_window = 0;
1076 bearer_schedule(l_ptr->b_ptr, l_ptr);
1077 l_ptr->stats.bearer_congs++;
1078 l_ptr->next_out = buf;
1082 /* Congestion: can message be bundled ?: */
1084 if ((msg_user(msg) != CHANGEOVER_PROTOCOL) &&
1085 (msg_user(msg) != MSG_FRAGMENTER)) {
1087 /* Try adding message to an existing bundle */
1089 if (l_ptr->next_out &&
1090 link_bundle_buf(l_ptr, l_ptr->last_out, buf)) {
1091 bearer_resolve_congestion(l_ptr->b_ptr, l_ptr);
1095 /* Try creating a new bundle */
1097 if (size <= max_packet * 2 / 3) {
1098 struct sk_buff *bundler = buf_acquire(max_packet);
1099 struct tipc_msg bundler_hdr;
1102 msg_init(&bundler_hdr, MSG_BUNDLER, OPEN_MSG,
1103 TIPC_OK, INT_H_SIZE, l_ptr->addr);
1104 memcpy(bundler->data, (unchar *)&bundler_hdr,
1106 skb_trim(bundler, INT_H_SIZE);
1107 link_bundle_buf(l_ptr, bundler, buf);
1110 l_ptr->stats.sent_bundles++;
1114 if (!l_ptr->next_out)
1115 l_ptr->next_out = buf;
1116 link_add_to_outqueue(l_ptr, buf, msg);
1117 bearer_resolve_congestion(l_ptr->b_ptr, l_ptr);
1122 * link_send(): same as link_send_buf(), but the link to use has
1123 * not been selected yet, and the the owner node is not locked
1124 * Called by TIPC internal users, e.g. the name distributor
1127 int link_send(struct sk_buff *buf, u32 dest, u32 selector)
1131 int res = -ELINKCONG;
1133 read_lock_bh(&net_lock);
1134 n_ptr = node_select(dest, selector);
1137 l_ptr = n_ptr->active_links[selector & 1];
1138 dbg("link_send: found link %x for dest %x\n", l_ptr, dest);
1140 res = link_send_buf(l_ptr, buf);
1144 dbg("Attempt to send msg to unknown node:\n");
1145 msg_dbg(buf_msg(buf),">>>");
1148 read_unlock_bh(&net_lock);
1153 * link_send_buf_fast: Entry for data messages where the
1154 * destination link is known and the header is complete,
1155 * inclusive total message length. Very time critical.
1156 * Link is locked. Returns user data length.
1159 static inline int link_send_buf_fast(struct link *l_ptr, struct sk_buff *buf,
1162 struct tipc_msg *msg = buf_msg(buf);
1163 int res = msg_data_sz(msg);
1165 if (likely(!link_congested(l_ptr))) {
1166 if (likely(msg_size(msg) <= link_max_pkt(l_ptr))) {
1167 if (likely(list_empty(&l_ptr->b_ptr->cong_links))) {
1168 link_add_to_outqueue(l_ptr, buf, msg);
1169 if (likely(bearer_send(l_ptr->b_ptr, buf,
1170 &l_ptr->media_addr))) {
1171 l_ptr->unacked_window = 0;
1172 msg_dbg(msg,"SENT_FAST:");
1175 dbg("failed sent fast...\n");
1176 bearer_schedule(l_ptr->b_ptr, l_ptr);
1177 l_ptr->stats.bearer_congs++;
1178 l_ptr->next_out = buf;
1183 *used_max_pkt = link_max_pkt(l_ptr);
1185 return link_send_buf(l_ptr, buf); /* All other cases */
1189 * tipc_send_buf_fast: Entry for data messages where the
1190 * destination node is known and the header is complete,
1191 * inclusive total message length.
1192 * Returns user data length.
1194 int tipc_send_buf_fast(struct sk_buff *buf, u32 destnode)
1199 u32 selector = msg_origport(buf_msg(buf)) & 1;
1202 if (destnode == tipc_own_addr)
1203 return port_recv_msg(buf);
1205 read_lock_bh(&net_lock);
1206 n_ptr = node_select(destnode, selector);
1207 if (likely(n_ptr)) {
1209 l_ptr = n_ptr->active_links[selector];
1210 dbg("send_fast: buf %x selected %x, destnode = %x\n",
1211 buf, l_ptr, destnode);
1212 if (likely(l_ptr)) {
1213 res = link_send_buf_fast(l_ptr, buf, &dummy);
1215 read_unlock_bh(&net_lock);
1220 read_unlock_bh(&net_lock);
1221 res = msg_data_sz(buf_msg(buf));
1222 tipc_reject_msg(buf, TIPC_ERR_NO_NODE);
1228 * link_send_sections_fast: Entry for messages where the
1229 * destination processor is known and the header is complete,
1230 * except for total message length.
1231 * Returns user data length or errno.
1233 int link_send_sections_fast(struct port *sender,
1234 struct iovec const *msg_sect,
1238 struct tipc_msg *hdr = &sender->publ.phdr;
1240 struct sk_buff *buf;
1243 u32 selector = msg_origport(hdr) & 1;
1245 assert(destaddr != tipc_own_addr);
1249 * Try building message using port's max_pkt hint.
1250 * (Must not hold any locks while building message.)
1253 res = msg_build(hdr, msg_sect, num_sect, sender->max_pkt,
1254 !sender->user_port, &buf);
1256 read_lock_bh(&net_lock);
1257 node = node_select(destaddr, selector);
1260 l_ptr = node->active_links[selector];
1261 if (likely(l_ptr)) {
1263 res = link_send_buf_fast(l_ptr, buf,
1265 if (unlikely(res < 0))
1269 read_unlock_bh(&net_lock);
1273 /* Exit if build request was invalid */
1275 if (unlikely(res < 0))
1278 /* Exit if link (or bearer) is congested */
1280 if (link_congested(l_ptr) ||
1281 !list_empty(&l_ptr->b_ptr->cong_links)) {
1282 res = link_schedule_port(l_ptr,
1283 sender->publ.ref, res);
1288 * Message size exceeds max_pkt hint; update hint,
1289 * then re-try fast path or fragment the message
1292 sender->max_pkt = link_max_pkt(l_ptr);
1294 read_unlock_bh(&net_lock);
1297 if ((msg_hdr_sz(hdr) + res) <= sender->max_pkt)
1300 return link_send_sections_long(sender, msg_sect,
1301 num_sect, destaddr);
1305 read_unlock_bh(&net_lock);
1307 /* Couldn't find a link to the destination node */
1310 return tipc_reject_msg(buf, TIPC_ERR_NO_NODE);
1312 return port_reject_sections(sender, hdr, msg_sect, num_sect,
1318 * link_send_sections_long(): Entry for long messages where the
1319 * destination node is known and the header is complete,
1320 * inclusive total message length.
1321 * Link and bearer congestion status have been checked to be ok,
1322 * and are ignored if they change.
1324 * Note that fragments do not use the full link MTU so that they won't have
1325 * to undergo refragmentation if link changeover causes them to be sent
1326 * over another link with an additional tunnel header added as prefix.
1327 * (Refragmentation will still occur if the other link has a smaller MTU.)
1329 * Returns user data length or errno.
1331 static int link_send_sections_long(struct port *sender,
1332 struct iovec const *msg_sect,
1338 struct tipc_msg *hdr = &sender->publ.phdr;
1339 u32 dsz = msg_data_sz(hdr);
1340 u32 max_pkt,fragm_sz,rest;
1341 struct tipc_msg fragm_hdr;
1342 struct sk_buff *buf,*buf_chain,*prev;
1343 u32 fragm_crs,fragm_rest,hsz,sect_rest;
1344 const unchar *sect_crs;
1350 max_pkt = sender->max_pkt - INT_H_SIZE;
1351 /* leave room for tunnel header in case of link changeover */
1352 fragm_sz = max_pkt - INT_H_SIZE;
1353 /* leave room for fragmentation header in each fragment */
1361 /* Prepare reusable fragment header: */
1363 msg_dbg(hdr, ">FRAGMENTING>");
1364 msg_init(&fragm_hdr, MSG_FRAGMENTER, FIRST_FRAGMENT,
1365 TIPC_OK, INT_H_SIZE, msg_destnode(hdr));
1366 msg_set_link_selector(&fragm_hdr, sender->publ.ref);
1367 msg_set_size(&fragm_hdr, max_pkt);
1368 msg_set_fragm_no(&fragm_hdr, 1);
1370 /* Prepare header of first fragment: */
1372 buf_chain = buf = buf_acquire(max_pkt);
1376 memcpy(buf->data, (unchar *)&fragm_hdr, INT_H_SIZE);
1377 hsz = msg_hdr_sz(hdr);
1378 memcpy(buf->data + INT_H_SIZE, (unchar *)hdr, hsz);
1379 msg_dbg(buf_msg(buf), ">BUILD>");
1381 /* Chop up message: */
1383 fragm_crs = INT_H_SIZE + hsz;
1384 fragm_rest = fragm_sz - hsz;
1386 do { /* For all sections */
1390 sect_rest = msg_sect[++curr_sect].iov_len;
1391 sect_crs = (const unchar *)msg_sect[curr_sect].iov_base;
1394 if (sect_rest < fragm_rest)
1399 if (likely(!sender->user_port)) {
1400 if (copy_from_user(buf->data + fragm_crs, sect_crs, sz)) {
1402 for (; buf_chain; buf_chain = buf) {
1403 buf = buf_chain->next;
1404 buf_discard(buf_chain);
1409 memcpy(buf->data + fragm_crs, sect_crs, sz);
1417 if (!fragm_rest && rest) {
1419 /* Initiate new fragment: */
1420 if (rest <= fragm_sz) {
1422 msg_set_type(&fragm_hdr,LAST_FRAGMENT);
1424 msg_set_type(&fragm_hdr, FRAGMENT);
1426 msg_set_size(&fragm_hdr, fragm_sz + INT_H_SIZE);
1427 msg_set_fragm_no(&fragm_hdr, ++fragm_no);
1429 buf = buf_acquire(fragm_sz + INT_H_SIZE);
1435 memcpy(buf->data, (unchar *)&fragm_hdr, INT_H_SIZE);
1436 fragm_crs = INT_H_SIZE;
1437 fragm_rest = fragm_sz;
1438 msg_dbg(buf_msg(buf)," >BUILD>");
1444 * Now we have a buffer chain. Select a link and check
1445 * that packet size is still OK
1447 node = node_select(destaddr, sender->publ.ref & 1);
1450 l_ptr = node->active_links[sender->publ.ref & 1];
1455 if (link_max_pkt(l_ptr) < max_pkt) {
1456 sender->max_pkt = link_max_pkt(l_ptr);
1458 for (; buf_chain; buf_chain = buf) {
1459 buf = buf_chain->next;
1460 buf_discard(buf_chain);
1466 for (; buf_chain; buf_chain = buf) {
1467 buf = buf_chain->next;
1468 buf_discard(buf_chain);
1470 return port_reject_sections(sender, hdr, msg_sect, num_sect,
1474 /* Append whole chain to send queue: */
1477 l_ptr->long_msg_seq_no = mod(l_ptr->long_msg_seq_no + 1);
1478 if (!l_ptr->next_out)
1479 l_ptr->next_out = buf_chain;
1480 l_ptr->stats.sent_fragmented++;
1482 struct sk_buff *next = buf->next;
1483 struct tipc_msg *msg = buf_msg(buf);
1485 l_ptr->stats.sent_fragments++;
1486 msg_set_long_msgno(msg, l_ptr->long_msg_seq_no);
1487 link_add_to_outqueue(l_ptr, buf, msg);
1488 msg_dbg(msg, ">ADD>");
1492 /* Send it, if possible: */
1494 link_push_queue(l_ptr);
1500 * link_push_packet: Push one unsent packet to the media
1502 u32 link_push_packet(struct link *l_ptr)
1504 struct sk_buff *buf = l_ptr->first_out;
1505 u32 r_q_size = l_ptr->retransm_queue_size;
1506 u32 r_q_head = l_ptr->retransm_queue_head;
1508 /* Step to position where retransmission failed, if any, */
1509 /* consider that buffers may have been released in meantime */
1511 if (r_q_size && buf) {
1512 u32 last = lesser(mod(r_q_head + r_q_size),
1513 link_last_sent(l_ptr));
1514 u32 first = msg_seqno(buf_msg(buf));
1516 while (buf && less(first, r_q_head)) {
1517 first = mod(first + 1);
1520 l_ptr->retransm_queue_head = r_q_head = first;
1521 l_ptr->retransm_queue_size = r_q_size = mod(last - first);
1524 /* Continue retransmission now, if there is anything: */
1526 if (r_q_size && buf && !skb_cloned(buf)) {
1527 msg_set_ack(buf_msg(buf), mod(l_ptr->next_in_no - 1));
1528 msg_set_bcast_ack(buf_msg(buf), l_ptr->owner->bclink.last_in);
1529 if (bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) {
1530 msg_dbg(buf_msg(buf), ">DEF-RETR>");
1531 l_ptr->retransm_queue_head = mod(++r_q_head);
1532 l_ptr->retransm_queue_size = --r_q_size;
1533 l_ptr->stats.retransmitted++;
1536 l_ptr->stats.bearer_congs++;
1537 msg_dbg(buf_msg(buf), "|>DEF-RETR>");
1542 /* Send deferred protocol message, if any: */
1544 buf = l_ptr->proto_msg_queue;
1546 msg_set_ack(buf_msg(buf), mod(l_ptr->next_in_no - 1));
1547 msg_set_bcast_ack(buf_msg(buf),l_ptr->owner->bclink.last_in);
1548 if (bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) {
1549 msg_dbg(buf_msg(buf), ">DEF-PROT>");
1550 l_ptr->unacked_window = 0;
1552 l_ptr->proto_msg_queue = 0;
1555 msg_dbg(buf_msg(buf), "|>DEF-PROT>");
1556 l_ptr->stats.bearer_congs++;
1561 /* Send one deferred data message, if send window not full: */
1563 buf = l_ptr->next_out;
1565 struct tipc_msg *msg = buf_msg(buf);
1566 u32 next = msg_seqno(msg);
1567 u32 first = msg_seqno(buf_msg(l_ptr->first_out));
1569 if (mod(next - first) < l_ptr->queue_limit[0]) {
1570 msg_set_ack(msg, mod(l_ptr->next_in_no - 1));
1571 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
1572 if (bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) {
1573 if (msg_user(msg) == MSG_BUNDLER)
1574 msg_set_type(msg, CLOSED_MSG);
1575 msg_dbg(msg, ">PUSH-DATA>");
1576 l_ptr->next_out = buf->next;
1579 msg_dbg(msg, "|PUSH-DATA|");
1580 l_ptr->stats.bearer_congs++;
1585 return PUSH_FINISHED;
1589 * push_queue(): push out the unsent messages of a link where
1590 * congestion has abated. Node is locked
1592 void link_push_queue(struct link *l_ptr)
1596 if (bearer_congested(l_ptr->b_ptr, l_ptr))
1600 res = link_push_packet(l_ptr);
1602 while (res == TIPC_OK);
1603 if (res == PUSH_FAILED)
1604 bearer_schedule(l_ptr->b_ptr, l_ptr);
1607 void link_retransmit(struct link *l_ptr, struct sk_buff *buf,
1610 struct tipc_msg *msg;
1612 dbg("Retransmitting %u in link %x\n", retransmits, l_ptr);
1614 if (bearer_congested(l_ptr->b_ptr, l_ptr) && buf && !skb_cloned(buf)) {
1615 msg_dbg(buf_msg(buf), ">NO_RETR->BCONG>");
1616 dbg_print_link(l_ptr, " ");
1617 l_ptr->retransm_queue_head = msg_seqno(buf_msg(buf));
1618 l_ptr->retransm_queue_size = retransmits;
1621 while (retransmits && (buf != l_ptr->next_out) && buf && !skb_cloned(buf)) {
1623 msg_set_ack(msg, mod(l_ptr->next_in_no - 1));
1624 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
1625 if (bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) {
1626 /* Catch if retransmissions fail repeatedly: */
1627 if (l_ptr->last_retransmitted == msg_seqno(msg)) {
1628 if (++l_ptr->stale_count > 100) {
1629 msg_print(CONS, buf_msg(buf), ">RETR>");
1630 info("...Retransmitted %u times\n",
1631 l_ptr->stale_count);
1632 link_print(l_ptr, CONS, "Resetting Link\n");;
1637 l_ptr->stale_count = 0;
1639 l_ptr->last_retransmitted = msg_seqno(msg);
1641 msg_dbg(buf_msg(buf), ">RETR>");
1644 l_ptr->stats.retransmitted++;
1646 bearer_schedule(l_ptr->b_ptr, l_ptr);
1647 l_ptr->stats.bearer_congs++;
1648 l_ptr->retransm_queue_head = msg_seqno(buf_msg(buf));
1649 l_ptr->retransm_queue_size = retransmits;
1653 l_ptr->retransm_queue_head = l_ptr->retransm_queue_size = 0;
1657 * link_recv_non_seq: Receive packets which are outside
1658 * the link sequence flow
1661 static void link_recv_non_seq(struct sk_buff *buf)
1663 struct tipc_msg *msg = buf_msg(buf);
1665 if (msg_user(msg) == LINK_CONFIG)
1668 bclink_recv_pkt(buf);
1672 * link_insert_deferred_queue - insert deferred messages back into receive chain
1675 static struct sk_buff *link_insert_deferred_queue(struct link *l_ptr,
1676 struct sk_buff *buf)
1680 if (l_ptr->oldest_deferred_in == NULL)
1683 seq_no = msg_seqno(buf_msg(l_ptr->oldest_deferred_in));
1684 if (seq_no == mod(l_ptr->next_in_no)) {
1685 l_ptr->newest_deferred_in->next = buf;
1686 buf = l_ptr->oldest_deferred_in;
1687 l_ptr->oldest_deferred_in = NULL;
1688 l_ptr->deferred_inqueue_sz = 0;
1693 void tipc_recv_msg(struct sk_buff *head, struct tipc_bearer *tb_ptr)
1695 read_lock_bh(&net_lock);
1697 struct bearer *b_ptr;
1700 struct sk_buff *crs;
1701 struct sk_buff *buf = head;
1702 struct tipc_msg *msg = buf_msg(buf);
1703 u32 seq_no = msg_seqno(msg);
1704 u32 ackd = msg_ack(msg);
1708 b_ptr = (struct bearer *)tb_ptr;
1709 TIPC_SKB_CB(buf)->handle = b_ptr;
1712 if (unlikely(msg_version(msg) != TIPC_VERSION))
1715 if (msg_user(msg) != LINK_PROTOCOL)
1717 msg_dbg(msg,"<REC<");
1719 if (unlikely(msg_non_seq(msg))) {
1720 link_recv_non_seq(buf);
1723 n_ptr = node_find(msg_prevnode(msg));
1724 if (unlikely(!n_ptr))
1728 l_ptr = n_ptr->links[b_ptr->identity];
1729 if (unlikely(!l_ptr)) {
1734 * Release acked messages
1736 if (less(n_ptr->bclink.acked, msg_bcast_ack(msg))) {
1737 if (node_is_up(n_ptr) && n_ptr->bclink.supported)
1738 bclink_acknowledge(n_ptr, msg_bcast_ack(msg));
1741 crs = l_ptr->first_out;
1742 while ((crs != l_ptr->next_out) &&
1743 less_eq(msg_seqno(buf_msg(crs)), ackd)) {
1744 struct sk_buff *next = crs->next;
1751 l_ptr->first_out = crs;
1752 l_ptr->out_queue_size -= released;
1754 if (unlikely(l_ptr->next_out))
1755 link_push_queue(l_ptr);
1756 if (unlikely(!list_empty(&l_ptr->waiting_ports)))
1757 link_wakeup_ports(l_ptr, 0);
1758 if (unlikely(++l_ptr->unacked_window >= TIPC_MIN_LINK_WIN)) {
1759 l_ptr->stats.sent_acks++;
1760 link_send_proto_msg(l_ptr, STATE_MSG, 0, 0, 0, 0, 0);
1764 if (likely(link_working_working(l_ptr))) {
1765 if (likely(seq_no == mod(l_ptr->next_in_no))) {
1766 l_ptr->next_in_no++;
1767 if (unlikely(l_ptr->oldest_deferred_in))
1768 head = link_insert_deferred_queue(l_ptr,
1770 if (likely(msg_is_dest(msg, tipc_own_addr))) {
1772 if (likely(msg_isdata(msg))) {
1777 switch (msg_user(msg)) {
1779 l_ptr->stats.recv_bundles++;
1780 l_ptr->stats.recv_bundled +=
1783 link_recv_bundle(buf);
1785 case ROUTE_DISTRIBUTOR:
1787 cluster_recv_routing_table(buf);
1789 case NAME_DISTRIBUTOR:
1795 port_recv_proto_msg(buf);
1797 case MSG_FRAGMENTER:
1798 l_ptr->stats.recv_fragments++;
1799 if (link_recv_fragment(
1800 &l_ptr->defragm_buf,
1802 l_ptr->stats.recv_fragmented++;
1806 case CHANGEOVER_PROTOCOL:
1807 type = msg_type(msg);
1808 if (link_recv_changeover_msg(
1811 seq_no = msg_seqno(msg);
1812 TIPC_SKB_CB(buf)->handle
1814 if (type == ORIGINAL_MSG)
1816 goto protocol_check;
1825 link_handle_out_of_seq_msg(l_ptr, buf);
1826 head = link_insert_deferred_queue(l_ptr, head);
1831 if (msg_user(msg) == LINK_PROTOCOL) {
1832 link_recv_proto_msg(l_ptr, buf);
1833 head = link_insert_deferred_queue(l_ptr, head);
1837 msg_dbg(msg,"NSEQ<REC<");
1838 link_state_event(l_ptr, TRAFFIC_MSG_EVT);
1840 if (link_working_working(l_ptr)) {
1841 /* Re-insert in front of queue */
1842 msg_dbg(msg,"RECV-REINS:");
1852 read_unlock_bh(&net_lock);
1856 * link_defer_buf(): Sort a received out-of-sequence packet
1857 * into the deferred reception queue.
1858 * Returns the increase of the queue length,i.e. 0 or 1
1861 u32 link_defer_pkt(struct sk_buff **head,
1862 struct sk_buff **tail,
1863 struct sk_buff *buf)
1865 struct sk_buff *prev = 0;
1866 struct sk_buff *crs = *head;
1867 u32 seq_no = msg_seqno(buf_msg(buf));
1872 if (*head == NULL) {
1873 *head = *tail = buf;
1878 if (less(msg_seqno(buf_msg(*tail)), seq_no)) {
1879 (*tail)->next = buf;
1884 /* Scan through queue and sort it in */
1886 struct tipc_msg *msg = buf_msg(crs);
1888 if (less(seq_no, msg_seqno(msg))) {
1896 if (seq_no == msg_seqno(msg)) {
1904 /* Message is a duplicate of an existing message */
1911 * link_handle_out_of_seq_msg - handle arrival of out-of-sequence packet
1914 static void link_handle_out_of_seq_msg(struct link *l_ptr,
1915 struct sk_buff *buf)
1917 u32 seq_no = msg_seqno(buf_msg(buf));
1919 if (likely(msg_user(buf_msg(buf)) == LINK_PROTOCOL)) {
1920 link_recv_proto_msg(l_ptr, buf);
1924 dbg("rx OOS msg: seq_no %u, expecting %u (%u)\n",
1925 seq_no, mod(l_ptr->next_in_no), l_ptr->next_in_no);
1927 /* Record OOS packet arrival (force mismatch on next timeout) */
1929 l_ptr->checkpoint--;
1932 * Discard packet if a duplicate; otherwise add it to deferred queue
1933 * and notify peer of gap as per protocol specification
1936 if (less(seq_no, mod(l_ptr->next_in_no))) {
1937 l_ptr->stats.duplicates++;
1942 if (link_defer_pkt(&l_ptr->oldest_deferred_in,
1943 &l_ptr->newest_deferred_in, buf)) {
1944 l_ptr->deferred_inqueue_sz++;
1945 l_ptr->stats.deferred_recv++;
1946 if ((l_ptr->deferred_inqueue_sz % 16) == 1)
1947 link_send_proto_msg(l_ptr, STATE_MSG, 0, 0, 0, 0, 0);
1949 l_ptr->stats.duplicates++;
1953 * Send protocol message to the other endpoint.
1955 void link_send_proto_msg(struct link *l_ptr, u32 msg_typ, int probe_msg,
1956 u32 gap, u32 tolerance, u32 priority, u32 ack_mtu)
1958 struct sk_buff *buf = 0;
1959 struct tipc_msg *msg = l_ptr->pmsg;
1960 u32 msg_size = sizeof(l_ptr->proto_msg);
1962 if (link_blocked(l_ptr))
1964 msg_set_type(msg, msg_typ);
1965 msg_set_net_plane(msg, l_ptr->b_ptr->net_plane);
1966 msg_set_bcast_ack(msg, mod(l_ptr->owner->bclink.last_in));
1967 msg_set_last_bcast(msg, bclink_get_last_sent());
1969 if (msg_typ == STATE_MSG) {
1970 u32 next_sent = mod(l_ptr->next_out_no);
1972 if (!link_is_up(l_ptr))
1974 if (l_ptr->next_out)
1975 next_sent = msg_seqno(buf_msg(l_ptr->next_out));
1976 msg_set_next_sent(msg, next_sent);
1977 if (l_ptr->oldest_deferred_in) {
1978 u32 rec = msg_seqno(buf_msg(l_ptr->oldest_deferred_in));
1979 gap = mod(rec - mod(l_ptr->next_in_no));
1981 msg_set_seq_gap(msg, gap);
1983 l_ptr->stats.sent_nacks++;
1984 msg_set_link_tolerance(msg, tolerance);
1985 msg_set_linkprio(msg, priority);
1986 msg_set_max_pkt(msg, ack_mtu);
1987 msg_set_ack(msg, mod(l_ptr->next_in_no - 1));
1988 msg_set_probe(msg, probe_msg != 0);
1990 u32 mtu = l_ptr->max_pkt;
1992 if ((mtu < l_ptr->max_pkt_target) &&
1993 link_working_working(l_ptr) &&
1994 l_ptr->fsm_msg_cnt) {
1995 msg_size = (mtu + (l_ptr->max_pkt_target - mtu)/2 + 2) & ~3;
1996 if (l_ptr->max_pkt_probes == 10) {
1997 l_ptr->max_pkt_target = (msg_size - 4);
1998 l_ptr->max_pkt_probes = 0;
1999 msg_size = (mtu + (l_ptr->max_pkt_target - mtu)/2 + 2) & ~3;
2001 l_ptr->max_pkt_probes++;
2004 l_ptr->stats.sent_probes++;
2006 l_ptr->stats.sent_states++;
2007 } else { /* RESET_MSG or ACTIVATE_MSG */
2008 msg_set_ack(msg, mod(l_ptr->reset_checkpoint - 1));
2009 msg_set_seq_gap(msg, 0);
2010 msg_set_next_sent(msg, 1);
2011 msg_set_link_tolerance(msg, l_ptr->tolerance);
2012 msg_set_linkprio(msg, l_ptr->priority);
2013 msg_set_max_pkt(msg, l_ptr->max_pkt_target);
2016 if (node_has_redundant_links(l_ptr->owner)) {
2017 msg_set_redundant_link(msg);
2019 msg_clear_redundant_link(msg);
2021 msg_set_linkprio(msg, l_ptr->priority);
2023 /* Ensure sequence number will not fit : */
2025 msg_set_seqno(msg, mod(l_ptr->next_out_no + (0xffff/2)));
2029 if (bearer_congested(l_ptr->b_ptr, l_ptr)) {
2030 if (!l_ptr->proto_msg_queue) {
2031 l_ptr->proto_msg_queue =
2032 buf_acquire(sizeof(l_ptr->proto_msg));
2034 buf = l_ptr->proto_msg_queue;
2037 memcpy(buf->data, (unchar *)msg, sizeof(l_ptr->proto_msg));
2040 msg_set_timestamp(msg, jiffies_to_msecs(jiffies));
2042 /* Message can be sent */
2046 buf = buf_acquire(msg_size);
2050 memcpy(buf->data, (unchar *)msg, sizeof(l_ptr->proto_msg));
2051 msg_set_size(buf_msg(buf), msg_size);
2053 if (bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) {
2054 l_ptr->unacked_window = 0;
2059 /* New congestion */
2060 bearer_schedule(l_ptr->b_ptr, l_ptr);
2061 l_ptr->proto_msg_queue = buf;
2062 l_ptr->stats.bearer_congs++;
2066 * Receive protocol message :
2067 * Note that network plane id propagates through the network, and may
2068 * change at any time. The node with lowest address rules
2071 static void link_recv_proto_msg(struct link *l_ptr, struct sk_buff *buf)
2077 struct tipc_msg *msg = buf_msg(buf);
2079 dbg("AT(%u):", jiffies_to_msecs(jiffies));
2081 if (link_blocked(l_ptr))
2084 /* record unnumbered packet arrival (force mismatch on next timeout) */
2086 l_ptr->checkpoint--;
2088 if (l_ptr->b_ptr->net_plane != msg_net_plane(msg))
2089 if (tipc_own_addr > msg_prevnode(msg))
2090 l_ptr->b_ptr->net_plane = msg_net_plane(msg);
2092 l_ptr->owner->permit_changeover = msg_redundant_link(msg);
2094 switch (msg_type(msg)) {
2097 if (!link_working_unknown(l_ptr) && l_ptr->peer_session) {
2098 if (msg_session(msg) == l_ptr->peer_session) {
2099 dbg("Duplicate RESET: %u<->%u\n",
2100 msg_session(msg), l_ptr->peer_session);
2101 break; /* duplicate: ignore */
2106 /* Update link settings according other endpoint's values */
2108 strcpy((strrchr(l_ptr->name, ':') + 1), (char *)msg_data(msg));
2110 if ((msg_tol = msg_link_tolerance(msg)) &&
2111 (msg_tol > l_ptr->tolerance))
2112 link_set_supervision_props(l_ptr, msg_tol);
2114 if (msg_linkprio(msg) > l_ptr->priority)
2115 l_ptr->priority = msg_linkprio(msg);
2117 max_pkt_info = msg_max_pkt(msg);
2119 if (max_pkt_info < l_ptr->max_pkt_target)
2120 l_ptr->max_pkt_target = max_pkt_info;
2121 if (l_ptr->max_pkt > l_ptr->max_pkt_target)
2122 l_ptr->max_pkt = l_ptr->max_pkt_target;
2124 l_ptr->max_pkt = l_ptr->max_pkt_target;
2126 l_ptr->owner->bclink.supported = (max_pkt_info != 0);
2128 link_state_event(l_ptr, msg_type(msg));
2130 l_ptr->peer_session = msg_session(msg);
2131 l_ptr->peer_bearer_id = msg_bearer_id(msg);
2133 /* Synchronize broadcast sequence numbers */
2134 if (!node_has_redundant_links(l_ptr->owner)) {
2135 l_ptr->owner->bclink.last_in = mod(msg_last_bcast(msg));
2140 if ((msg_tol = msg_link_tolerance(msg)))
2141 link_set_supervision_props(l_ptr, msg_tol);
2143 if (msg_linkprio(msg) &&
2144 (msg_linkprio(msg) != l_ptr->priority)) {
2145 warn("Changing prio <%s>: %u->%u\n",
2146 l_ptr->name, l_ptr->priority, msg_linkprio(msg));
2147 l_ptr->priority = msg_linkprio(msg);
2148 link_reset(l_ptr); /* Enforce change to take effect */
2151 link_state_event(l_ptr, TRAFFIC_MSG_EVT);
2152 l_ptr->stats.recv_states++;
2153 if (link_reset_unknown(l_ptr))
2156 if (less_eq(mod(l_ptr->next_in_no), msg_next_sent(msg))) {
2157 rec_gap = mod(msg_next_sent(msg) -
2158 mod(l_ptr->next_in_no));
2161 max_pkt_ack = msg_max_pkt(msg);
2162 if (max_pkt_ack > l_ptr->max_pkt) {
2163 dbg("Link <%s> updated MTU %u -> %u\n",
2164 l_ptr->name, l_ptr->max_pkt, max_pkt_ack);
2165 l_ptr->max_pkt = max_pkt_ack;
2166 l_ptr->max_pkt_probes = 0;
2170 if (msg_probe(msg)) {
2171 l_ptr->stats.recv_probes++;
2172 if (msg_size(msg) > sizeof(l_ptr->proto_msg)) {
2173 max_pkt_ack = msg_size(msg);
2177 /* Protocol message before retransmits, reduce loss risk */
2179 bclink_check_gap(l_ptr->owner, msg_last_bcast(msg));
2181 if (rec_gap || (msg_probe(msg))) {
2182 link_send_proto_msg(l_ptr, STATE_MSG,
2183 0, rec_gap, 0, 0, max_pkt_ack);
2185 if (msg_seq_gap(msg)) {
2186 msg_dbg(msg, "With Gap:");
2187 l_ptr->stats.recv_nacks++;
2188 link_retransmit(l_ptr, l_ptr->first_out,
2193 msg_dbg(buf_msg(buf), "<DISCARDING UNKNOWN<");
2201 * link_tunnel(): Send one message via a link belonging to
2202 * another bearer. Owner node is locked.
2204 void link_tunnel(struct link *l_ptr,
2205 struct tipc_msg *tunnel_hdr,
2206 struct tipc_msg *msg,
2209 struct link *tunnel;
2210 struct sk_buff *buf;
2211 u32 length = msg_size(msg);
2213 tunnel = l_ptr->owner->active_links[selector & 1];
2214 if (!link_is_up(tunnel))
2216 msg_set_size(tunnel_hdr, length + INT_H_SIZE);
2217 buf = buf_acquire(length + INT_H_SIZE);
2220 memcpy(buf->data, (unchar *)tunnel_hdr, INT_H_SIZE);
2221 memcpy(buf->data + INT_H_SIZE, (unchar *)msg, length);
2222 dbg("%c->%c:", l_ptr->b_ptr->net_plane, tunnel->b_ptr->net_plane);
2223 msg_dbg(buf_msg(buf), ">SEND>");
2225 link_send_buf(tunnel, buf);
2231 * changeover(): Send whole message queue via the remaining link
2232 * Owner node is locked.
2235 void link_changeover(struct link *l_ptr)
2237 u32 msgcount = l_ptr->out_queue_size;
2238 struct sk_buff *crs = l_ptr->first_out;
2239 struct link *tunnel = l_ptr->owner->active_links[0];
2240 int split_bundles = node_has_redundant_links(l_ptr->owner);
2241 struct tipc_msg tunnel_hdr;
2246 if (!l_ptr->owner->permit_changeover)
2249 msg_init(&tunnel_hdr, CHANGEOVER_PROTOCOL,
2250 ORIGINAL_MSG, TIPC_OK, INT_H_SIZE, l_ptr->addr);
2251 msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id);
2252 msg_set_msgcnt(&tunnel_hdr, msgcount);
2253 if (!l_ptr->first_out) {
2254 struct sk_buff *buf;
2257 buf = buf_acquire(INT_H_SIZE);
2259 memcpy(buf->data, (unchar *)&tunnel_hdr, INT_H_SIZE);
2260 msg_set_size(&tunnel_hdr, INT_H_SIZE);
2261 dbg("%c->%c:", l_ptr->b_ptr->net_plane,
2262 tunnel->b_ptr->net_plane);
2263 msg_dbg(&tunnel_hdr, "EMPTY>SEND>");
2264 link_send_buf(tunnel, buf);
2266 warn("Memory squeeze; link changeover failed\n");
2271 struct tipc_msg *msg = buf_msg(crs);
2273 if ((msg_user(msg) == MSG_BUNDLER) && split_bundles) {
2274 u32 msgcount = msg_msgcnt(msg);
2275 struct tipc_msg *m = msg_get_wrapped(msg);
2276 unchar* pos = (unchar*)m;
2278 while (msgcount--) {
2279 msg_set_seqno(m,msg_seqno(msg));
2280 link_tunnel(l_ptr, &tunnel_hdr, m,
2281 msg_link_selector(m));
2282 pos += align(msg_size(m));
2283 m = (struct tipc_msg *)pos;
2286 link_tunnel(l_ptr, &tunnel_hdr, msg,
2287 msg_link_selector(msg));
2293 void link_send_duplicate(struct link *l_ptr, struct link *tunnel)
2295 struct sk_buff *iter;
2296 struct tipc_msg tunnel_hdr;
2298 msg_init(&tunnel_hdr, CHANGEOVER_PROTOCOL,
2299 DUPLICATE_MSG, TIPC_OK, INT_H_SIZE, l_ptr->addr);
2300 msg_set_msgcnt(&tunnel_hdr, l_ptr->out_queue_size);
2301 msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id);
2302 iter = l_ptr->first_out;
2304 struct sk_buff *outbuf;
2305 struct tipc_msg *msg = buf_msg(iter);
2306 u32 length = msg_size(msg);
2308 if (msg_user(msg) == MSG_BUNDLER)
2309 msg_set_type(msg, CLOSED_MSG);
2310 msg_set_ack(msg, mod(l_ptr->next_in_no - 1)); /* Update */
2311 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
2312 msg_set_size(&tunnel_hdr, length + INT_H_SIZE);
2313 outbuf = buf_acquire(length + INT_H_SIZE);
2314 if (outbuf == NULL) {
2315 warn("Memory squeeze; buffer duplication failed\n");
2318 memcpy(outbuf->data, (unchar *)&tunnel_hdr, INT_H_SIZE);
2319 memcpy(outbuf->data + INT_H_SIZE, iter->data, length);
2320 dbg("%c->%c:", l_ptr->b_ptr->net_plane,
2321 tunnel->b_ptr->net_plane);
2322 msg_dbg(buf_msg(outbuf), ">SEND>");
2323 link_send_buf(tunnel, outbuf);
2324 if (!link_is_up(l_ptr))
2333 * buf_extract - extracts embedded TIPC message from another message
2334 * @skb: encapsulating message buffer
2335 * @from_pos: offset to extract from
2337 * Returns a new message buffer containing an embedded message. The
2338 * encapsulating message itself is left unchanged.
2341 static struct sk_buff *buf_extract(struct sk_buff *skb, u32 from_pos)
2343 struct tipc_msg *msg = (struct tipc_msg *)(skb->data + from_pos);
2344 u32 size = msg_size(msg);
2347 eb = buf_acquire(size);
2349 memcpy(eb->data, (unchar *)msg, size);
2354 * link_recv_changeover_msg(): Receive tunneled packet sent
2355 * via other link. Node is locked. Return extracted buffer.
2358 static int link_recv_changeover_msg(struct link **l_ptr,
2359 struct sk_buff **buf)
2361 struct sk_buff *tunnel_buf = *buf;
2362 struct link *dest_link;
2363 struct tipc_msg *msg;
2364 struct tipc_msg *tunnel_msg = buf_msg(tunnel_buf);
2365 u32 msg_typ = msg_type(tunnel_msg);
2366 u32 msg_count = msg_msgcnt(tunnel_msg);
2368 dest_link = (*l_ptr)->owner->links[msg_bearer_id(tunnel_msg)];
2369 assert(dest_link != *l_ptr);
2371 msg_dbg(tunnel_msg, "NOLINK/<REC<");
2374 dbg("%c<-%c:", dest_link->b_ptr->net_plane,
2375 (*l_ptr)->b_ptr->net_plane);
2377 msg = msg_get_wrapped(tunnel_msg);
2379 if (msg_typ == DUPLICATE_MSG) {
2380 if (less(msg_seqno(msg), mod(dest_link->next_in_no))) {
2381 msg_dbg(tunnel_msg, "DROP/<REC<");
2384 *buf = buf_extract(tunnel_buf,INT_H_SIZE);
2386 warn("Memory squeeze; failed to extract msg\n");
2389 msg_dbg(tunnel_msg, "TNL<REC<");
2390 buf_discard(tunnel_buf);
2394 /* First original message ?: */
2396 if (link_is_up(dest_link)) {
2397 msg_dbg(tunnel_msg, "UP/FIRST/<REC<");
2398 link_reset(dest_link);
2399 dest_link->exp_msg_count = msg_count;
2402 } else if (dest_link->exp_msg_count == START_CHANGEOVER) {
2403 msg_dbg(tunnel_msg, "BLK/FIRST/<REC<");
2404 dest_link->exp_msg_count = msg_count;
2409 /* Receive original message */
2411 if (dest_link->exp_msg_count == 0) {
2412 msg_dbg(tunnel_msg, "OVERDUE/DROP/<REC<");
2413 dbg_print_link(dest_link, "LINK:");
2416 dest_link->exp_msg_count--;
2417 if (less(msg_seqno(msg), dest_link->reset_checkpoint)) {
2418 msg_dbg(tunnel_msg, "DROP/DUPL/<REC<");
2421 *buf = buf_extract(tunnel_buf, INT_H_SIZE);
2423 msg_dbg(tunnel_msg, "TNL<REC<");
2424 buf_discard(tunnel_buf);
2427 warn("Memory squeeze; dropped incoming msg\n");
2432 buf_discard(tunnel_buf);
2437 * Bundler functionality:
2439 void link_recv_bundle(struct sk_buff *buf)
2441 u32 msgcount = msg_msgcnt(buf_msg(buf));
2442 u32 pos = INT_H_SIZE;
2443 struct sk_buff *obuf;
2445 msg_dbg(buf_msg(buf), "<BNDL<: ");
2446 while (msgcount--) {
2447 obuf = buf_extract(buf, pos);
2449 char addr_string[16];
2451 warn("Buffer allocation failure;\n");
2452 warn(" incoming message(s) from %s lost\n",
2453 addr_string_fill(addr_string,
2454 msg_orignode(buf_msg(buf))));
2457 pos += align(msg_size(buf_msg(obuf)));
2458 msg_dbg(buf_msg(obuf), " /");
2459 net_route_msg(obuf);
2465 * Fragmentation/defragmentation:
2470 * link_send_long_buf: Entry for buffers needing fragmentation.
2471 * The buffer is complete, inclusive total message length.
2472 * Returns user data length.
2474 int link_send_long_buf(struct link *l_ptr, struct sk_buff *buf)
2476 struct tipc_msg *inmsg = buf_msg(buf);
2477 struct tipc_msg fragm_hdr;
2478 u32 insize = msg_size(inmsg);
2479 u32 dsz = msg_data_sz(inmsg);
2480 unchar *crs = buf->data;
2482 u32 pack_sz = link_max_pkt(l_ptr);
2483 u32 fragm_sz = pack_sz - INT_H_SIZE;
2485 u32 destaddr = msg_destnode(inmsg);
2487 if (msg_short(inmsg))
2488 destaddr = l_ptr->addr;
2490 if (msg_routed(inmsg))
2491 msg_set_prevnode(inmsg, tipc_own_addr);
2493 /* Prepare reusable fragment header: */
2495 msg_init(&fragm_hdr, MSG_FRAGMENTER, FIRST_FRAGMENT,
2496 TIPC_OK, INT_H_SIZE, destaddr);
2497 msg_set_link_selector(&fragm_hdr, msg_link_selector(inmsg));
2498 msg_set_long_msgno(&fragm_hdr, mod(l_ptr->long_msg_seq_no++));
2499 msg_set_fragm_no(&fragm_hdr, fragm_no);
2500 l_ptr->stats.sent_fragmented++;
2502 /* Chop up message: */
2505 struct sk_buff *fragm;
2507 if (rest <= fragm_sz) {
2509 msg_set_type(&fragm_hdr, LAST_FRAGMENT);
2511 fragm = buf_acquire(fragm_sz + INT_H_SIZE);
2512 if (fragm == NULL) {
2513 warn("Memory squeeze; failed to fragment msg\n");
2517 msg_set_size(&fragm_hdr, fragm_sz + INT_H_SIZE);
2518 memcpy(fragm->data, (unchar *)&fragm_hdr, INT_H_SIZE);
2519 memcpy(fragm->data + INT_H_SIZE, crs, fragm_sz);
2521 /* Send queued messages first, if any: */
2523 l_ptr->stats.sent_fragments++;
2524 link_send_buf(l_ptr, fragm);
2525 if (!link_is_up(l_ptr))
2527 msg_set_fragm_no(&fragm_hdr, ++fragm_no);
2530 msg_set_type(&fragm_hdr, FRAGMENT);
2538 * A pending message being re-assembled must store certain values
2539 * to handle subsequent fragments correctly. The following functions
2540 * help storing these values in unused, available fields in the
2541 * pending message. This makes dynamic memory allocation unecessary.
2544 static inline u32 get_long_msg_seqno(struct sk_buff *buf)
2546 return msg_seqno(buf_msg(buf));
2549 static inline void set_long_msg_seqno(struct sk_buff *buf, u32 seqno)
2551 msg_set_seqno(buf_msg(buf), seqno);
2554 static inline u32 get_fragm_size(struct sk_buff *buf)
2556 return msg_ack(buf_msg(buf));
2559 static inline void set_fragm_size(struct sk_buff *buf, u32 sz)
2561 msg_set_ack(buf_msg(buf), sz);
2564 static inline u32 get_expected_frags(struct sk_buff *buf)
2566 return msg_bcast_ack(buf_msg(buf));
2569 static inline void set_expected_frags(struct sk_buff *buf, u32 exp)
2571 msg_set_bcast_ack(buf_msg(buf), exp);
2574 static inline u32 get_timer_cnt(struct sk_buff *buf)
2576 return msg_reroute_cnt(buf_msg(buf));
2579 static inline void incr_timer_cnt(struct sk_buff *buf)
2581 msg_incr_reroute_cnt(buf_msg(buf));
2585 * link_recv_fragment(): Called with node lock on. Returns
2586 * the reassembled buffer if message is complete.
2588 int link_recv_fragment(struct sk_buff **pending, struct sk_buff **fb,
2589 struct tipc_msg **m)
2591 struct sk_buff *prev = 0;
2592 struct sk_buff *fbuf = *fb;
2593 struct tipc_msg *fragm = buf_msg(fbuf);
2594 struct sk_buff *pbuf = *pending;
2595 u32 long_msg_seq_no = msg_long_msgno(fragm);
2598 msg_dbg(fragm,"FRG<REC<");
2600 /* Is there an incomplete message waiting for this fragment? */
2602 while (pbuf && ((msg_seqno(buf_msg(pbuf)) != long_msg_seq_no)
2603 || (msg_orignode(fragm) != msg_orignode(buf_msg(pbuf))))) {
2608 if (!pbuf && (msg_type(fragm) == FIRST_FRAGMENT)) {
2609 struct tipc_msg *imsg = (struct tipc_msg *)msg_data(fragm);
2610 u32 msg_sz = msg_size(imsg);
2611 u32 fragm_sz = msg_data_sz(fragm);
2612 u32 exp_fragm_cnt = msg_sz/fragm_sz + !!(msg_sz % fragm_sz);
2613 u32 max = TIPC_MAX_USER_MSG_SIZE + LONG_H_SIZE;
2614 if (msg_type(imsg) == TIPC_MCAST_MSG)
2615 max = TIPC_MAX_USER_MSG_SIZE + MCAST_H_SIZE;
2616 if (msg_size(imsg) > max) {
2617 msg_dbg(fragm,"<REC<Oversized: ");
2621 pbuf = buf_acquire(msg_size(imsg));
2623 pbuf->next = *pending;
2625 memcpy(pbuf->data, (unchar *)imsg, msg_data_sz(fragm));
2627 /* Prepare buffer for subsequent fragments. */
2629 set_long_msg_seqno(pbuf, long_msg_seq_no);
2630 set_fragm_size(pbuf,fragm_sz);
2631 set_expected_frags(pbuf,exp_fragm_cnt - 1);
2633 warn("Memory squeeze; got no defragmenting buffer\n");
2637 } else if (pbuf && (msg_type(fragm) != FIRST_FRAGMENT)) {
2638 u32 dsz = msg_data_sz(fragm);
2639 u32 fsz = get_fragm_size(pbuf);
2640 u32 crs = ((msg_fragm_no(fragm) - 1) * fsz);
2641 u32 exp_frags = get_expected_frags(pbuf) - 1;
2642 memcpy(pbuf->data + crs, msg_data(fragm), dsz);
2645 /* Is message complete? */
2647 if (exp_frags == 0) {
2649 prev->next = pbuf->next;
2651 *pending = pbuf->next;
2652 msg_reset_reroute_cnt(buf_msg(pbuf));
2657 set_expected_frags(pbuf,exp_frags);
2660 dbg(" Discarding orphan fragment %x\n",fbuf);
2661 msg_dbg(fragm,"ORPHAN:");
2662 dbg("Pending long buffers:\n");
2663 dbg_print_buf_chain(*pending);
2669 * link_check_defragm_bufs - flush stale incoming message fragments
2670 * @l_ptr: pointer to link
2673 static void link_check_defragm_bufs(struct link *l_ptr)
2675 struct sk_buff *prev = 0;
2676 struct sk_buff *next = 0;
2677 struct sk_buff *buf = l_ptr->defragm_buf;
2681 if (!link_working_working(l_ptr))
2684 u32 cnt = get_timer_cnt(buf);
2688 incr_timer_cnt(buf);
2691 dbg(" Discarding incomplete long buffer\n");
2692 msg_dbg(buf_msg(buf), "LONG:");
2693 dbg_print_link(l_ptr, "curr:");
2694 dbg("Pending long buffers:\n");
2695 dbg_print_buf_chain(l_ptr->defragm_buf);
2697 prev->next = buf->next;
2699 l_ptr->defragm_buf = buf->next;
2708 static void link_set_supervision_props(struct link *l_ptr, u32 tolerance)
2710 l_ptr->tolerance = tolerance;
2711 l_ptr->continuity_interval =
2712 ((tolerance / 4) > 500) ? 500 : tolerance / 4;
2713 l_ptr->abort_limit = tolerance / (l_ptr->continuity_interval / 4);
2717 void link_set_queue_limits(struct link *l_ptr, u32 window)
2719 /* Data messages from this node, inclusive FIRST_FRAGM */
2720 l_ptr->queue_limit[DATA_LOW] = window;
2721 l_ptr->queue_limit[DATA_MEDIUM] = (window / 3) * 4;
2722 l_ptr->queue_limit[DATA_HIGH] = (window / 3) * 5;
2723 l_ptr->queue_limit[DATA_CRITICAL] = (window / 3) * 6;
2724 /* Transiting data messages,inclusive FIRST_FRAGM */
2725 l_ptr->queue_limit[DATA_LOW + 4] = 300;
2726 l_ptr->queue_limit[DATA_MEDIUM + 4] = 600;
2727 l_ptr->queue_limit[DATA_HIGH + 4] = 900;
2728 l_ptr->queue_limit[DATA_CRITICAL + 4] = 1200;
2729 l_ptr->queue_limit[CONN_MANAGER] = 1200;
2730 l_ptr->queue_limit[ROUTE_DISTRIBUTOR] = 1200;
2731 l_ptr->queue_limit[CHANGEOVER_PROTOCOL] = 2500;
2732 l_ptr->queue_limit[NAME_DISTRIBUTOR] = 3000;
2733 /* FRAGMENT and LAST_FRAGMENT packets */
2734 l_ptr->queue_limit[MSG_FRAGMENTER] = 4000;
2738 * link_find_link - locate link by name
2739 * @name - ptr to link name string
2740 * @node - ptr to area to be filled with ptr to associated node
2742 * Caller must hold 'net_lock' to ensure node and bearer are not deleted;
2743 * this also prevents link deletion.
2745 * Returns pointer to link (or 0 if invalid link name).
2748 static struct link *link_find_link(const char *name, struct node **node)
2750 struct link_name link_name_parts;
2751 struct bearer *b_ptr;
2754 if (!link_name_validate(name, &link_name_parts))
2757 b_ptr = bearer_find_interface(link_name_parts.if_local);
2761 *node = node_find(link_name_parts.addr_peer);
2765 l_ptr = (*node)->links[b_ptr->identity];
2766 if (!l_ptr || strcmp(l_ptr->name, name))
2772 struct sk_buff *link_cmd_config(const void *req_tlv_area, int req_tlv_space,
2775 struct tipc_link_config *args;
2781 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_LINK_CONFIG))
2782 return cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
2784 args = (struct tipc_link_config *)TLV_DATA(req_tlv_area);
2785 new_value = ntohl(args->value);
2787 if (!strcmp(args->name, bc_link_name)) {
2788 if ((cmd == TIPC_CMD_SET_LINK_WINDOW) &&
2789 (bclink_set_queue_limits(new_value) == 0))
2790 return cfg_reply_none();
2791 return cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
2792 " (cannot change setting on broadcast link)");
2795 read_lock_bh(&net_lock);
2796 l_ptr = link_find_link(args->name, &node);
2798 read_unlock_bh(&net_lock);
2799 return cfg_reply_error_string("link not found");
2805 case TIPC_CMD_SET_LINK_TOL:
2806 if ((new_value >= TIPC_MIN_LINK_TOL) &&
2807 (new_value <= TIPC_MAX_LINK_TOL)) {
2808 link_set_supervision_props(l_ptr, new_value);
2809 link_send_proto_msg(l_ptr, STATE_MSG,
2810 0, 0, new_value, 0, 0);
2814 case TIPC_CMD_SET_LINK_PRI:
2815 if (new_value < TIPC_NUM_LINK_PRI) {
2816 l_ptr->priority = new_value;
2817 link_send_proto_msg(l_ptr, STATE_MSG,
2818 0, 0, 0, new_value, 0);
2822 case TIPC_CMD_SET_LINK_WINDOW:
2823 if ((new_value >= TIPC_MIN_LINK_WIN) &&
2824 (new_value <= TIPC_MAX_LINK_WIN)) {
2825 link_set_queue_limits(l_ptr, new_value);
2832 read_unlock_bh(&net_lock);
2834 return cfg_reply_error_string("cannot change link setting");
2836 return cfg_reply_none();
2840 * link_reset_statistics - reset link statistics
2841 * @l_ptr: pointer to link
2844 static void link_reset_statistics(struct link *l_ptr)
2846 memset(&l_ptr->stats, 0, sizeof(l_ptr->stats));
2847 l_ptr->stats.sent_info = l_ptr->next_out_no;
2848 l_ptr->stats.recv_info = l_ptr->next_in_no;
2851 struct sk_buff *link_cmd_reset_stats(const void *req_tlv_area, int req_tlv_space)
2857 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_LINK_NAME))
2858 return cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
2860 link_name = (char *)TLV_DATA(req_tlv_area);
2861 if (!strcmp(link_name, bc_link_name)) {
2862 if (bclink_reset_stats())
2863 return cfg_reply_error_string("link not found");
2864 return cfg_reply_none();
2867 read_lock_bh(&net_lock);
2868 l_ptr = link_find_link(link_name, &node);
2870 read_unlock_bh(&net_lock);
2871 return cfg_reply_error_string("link not found");
2875 link_reset_statistics(l_ptr);
2877 read_unlock_bh(&net_lock);
2878 return cfg_reply_none();
2882 * percent - convert count to a percentage of total (rounding up or down)
2885 static u32 percent(u32 count, u32 total)
2887 return (count * 100 + (total / 2)) / total;
2891 * link_stats - print link statistics
2893 * @buf: print buffer area
2894 * @buf_size: size of print buffer area
2896 * Returns length of print buffer data string (or 0 if error)
2899 static int link_stats(const char *name, char *buf, const u32 buf_size)
2901 struct print_buf pb;
2905 u32 profile_total = 0;
2907 if (!strcmp(name, bc_link_name))
2908 return bclink_stats(buf, buf_size);
2910 printbuf_init(&pb, buf, buf_size);
2912 read_lock_bh(&net_lock);
2913 l_ptr = link_find_link(name, &node);
2915 read_unlock_bh(&net_lock);
2920 if (link_is_active(l_ptr))
2922 else if (link_is_up(l_ptr))
2926 tipc_printf(&pb, "Link <%s>\n"
2927 " %s MTU:%u Priority:%u Tolerance:%u ms"
2928 " Window:%u packets\n",
2929 l_ptr->name, status, link_max_pkt(l_ptr),
2930 l_ptr->priority, l_ptr->tolerance, l_ptr->queue_limit[0]);
2931 tipc_printf(&pb, " RX packets:%u fragments:%u/%u bundles:%u/%u\n",
2932 l_ptr->next_in_no - l_ptr->stats.recv_info,
2933 l_ptr->stats.recv_fragments,
2934 l_ptr->stats.recv_fragmented,
2935 l_ptr->stats.recv_bundles,
2936 l_ptr->stats.recv_bundled);
2937 tipc_printf(&pb, " TX packets:%u fragments:%u/%u bundles:%u/%u\n",
2938 l_ptr->next_out_no - l_ptr->stats.sent_info,
2939 l_ptr->stats.sent_fragments,
2940 l_ptr->stats.sent_fragmented,
2941 l_ptr->stats.sent_bundles,
2942 l_ptr->stats.sent_bundled);
2943 profile_total = l_ptr->stats.msg_length_counts;
2946 tipc_printf(&pb, " TX profile sample:%u packets average:%u octets\n"
2947 " 0-64:%u%% -256:%u%% -1024:%u%% -4096:%u%% "
2948 "-16354:%u%% -32768:%u%% -66000:%u%%\n",
2949 l_ptr->stats.msg_length_counts,
2950 l_ptr->stats.msg_lengths_total / profile_total,
2951 percent(l_ptr->stats.msg_length_profile[0], profile_total),
2952 percent(l_ptr->stats.msg_length_profile[1], profile_total),
2953 percent(l_ptr->stats.msg_length_profile[2], profile_total),
2954 percent(l_ptr->stats.msg_length_profile[3], profile_total),
2955 percent(l_ptr->stats.msg_length_profile[4], profile_total),
2956 percent(l_ptr->stats.msg_length_profile[5], profile_total),
2957 percent(l_ptr->stats.msg_length_profile[6], profile_total));
2958 tipc_printf(&pb, " RX states:%u probes:%u naks:%u defs:%u dups:%u\n",
2959 l_ptr->stats.recv_states,
2960 l_ptr->stats.recv_probes,
2961 l_ptr->stats.recv_nacks,
2962 l_ptr->stats.deferred_recv,
2963 l_ptr->stats.duplicates);
2964 tipc_printf(&pb, " TX states:%u probes:%u naks:%u acks:%u dups:%u\n",
2965 l_ptr->stats.sent_states,
2966 l_ptr->stats.sent_probes,
2967 l_ptr->stats.sent_nacks,
2968 l_ptr->stats.sent_acks,
2969 l_ptr->stats.retransmitted);
2970 tipc_printf(&pb, " Congestion bearer:%u link:%u Send queue max:%u avg:%u\n",
2971 l_ptr->stats.bearer_congs,
2972 l_ptr->stats.link_congs,
2973 l_ptr->stats.max_queue_sz,
2974 l_ptr->stats.queue_sz_counts
2975 ? (l_ptr->stats.accu_queue_sz / l_ptr->stats.queue_sz_counts)
2979 read_unlock_bh(&net_lock);
2980 return printbuf_validate(&pb);
2983 #define MAX_LINK_STATS_INFO 2000
2985 struct sk_buff *link_cmd_show_stats(const void *req_tlv_area, int req_tlv_space)
2987 struct sk_buff *buf;
2988 struct tlv_desc *rep_tlv;
2991 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_LINK_NAME))
2992 return cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
2994 buf = cfg_reply_alloc(TLV_SPACE(MAX_LINK_STATS_INFO));
2998 rep_tlv = (struct tlv_desc *)buf->data;
3000 str_len = link_stats((char *)TLV_DATA(req_tlv_area),
3001 (char *)TLV_DATA(rep_tlv), MAX_LINK_STATS_INFO);
3004 return cfg_reply_error_string("link not found");
3007 skb_put(buf, TLV_SPACE(str_len));
3008 TLV_SET(rep_tlv, TIPC_TLV_ULTRA_STRING, NULL, str_len);
3014 int link_control(const char *name, u32 op, u32 val)
3022 a = link_name2addr(name, &bearer_id);
3023 read_lock_bh(&net_lock);
3024 node = node_find(a);
3027 l_ptr = node->links[bearer_id];
3029 if (op == TIPC_REMOVE_LINK) {
3030 struct bearer *b_ptr = l_ptr->b_ptr;
3031 spin_lock_bh(&b_ptr->publ.lock);
3033 spin_unlock_bh(&b_ptr->publ.lock);
3035 if (op == TIPC_CMD_BLOCK_LINK) {
3039 if (op == TIPC_CMD_UNBLOCK_LINK) {
3046 read_unlock_bh(&net_lock);
3052 * link_get_max_pkt - get maximum packet size to use when sending to destination
3053 * @dest: network address of destination node
3054 * @selector: used to select from set of active links
3056 * If no active link can be found, uses default maximum packet size.
3059 u32 link_get_max_pkt(u32 dest, u32 selector)
3063 u32 res = MAX_PKT_DEFAULT;
3065 if (dest == tipc_own_addr)
3066 return MAX_MSG_SIZE;
3068 read_lock_bh(&net_lock);
3069 n_ptr = node_select(dest, selector);
3072 l_ptr = n_ptr->active_links[selector & 1];
3074 res = link_max_pkt(l_ptr);
3077 read_unlock_bh(&net_lock);
3082 static void link_dump_rec_queue(struct link *l_ptr)
3084 struct sk_buff *crs;
3086 if (!l_ptr->oldest_deferred_in) {
3087 info("Reception queue empty\n");
3090 info("Contents of Reception queue:\n");
3091 crs = l_ptr->oldest_deferred_in;
3093 if (crs->data == (void *)0x0000a3a3) {
3094 info("buffer %x invalid\n", crs);
3097 msg_dbg(buf_msg(crs), "In rec queue: \n");
3103 static void link_dump_send_queue(struct link *l_ptr)
3105 if (l_ptr->next_out) {
3106 info("\nContents of unsent queue:\n");
3107 dbg_print_buf_chain(l_ptr->next_out);
3109 info("\nContents of send queue:\n");
3110 if (l_ptr->first_out) {
3111 dbg_print_buf_chain(l_ptr->first_out);
3113 info("Empty send queue\n");
3116 static void link_print(struct link *l_ptr, struct print_buf *buf,
3119 tipc_printf(buf, str);
3120 if (link_reset_reset(l_ptr) || link_reset_unknown(l_ptr))
3122 tipc_printf(buf, "Link %x<%s>:",
3123 l_ptr->addr, l_ptr->b_ptr->publ.name);
3124 tipc_printf(buf, ": NXO(%u):", mod(l_ptr->next_out_no));
3125 tipc_printf(buf, "NXI(%u):", mod(l_ptr->next_in_no));
3126 tipc_printf(buf, "SQUE");
3127 if (l_ptr->first_out) {
3128 tipc_printf(buf, "[%u..", msg_seqno(buf_msg(l_ptr->first_out)));
3129 if (l_ptr->next_out)
3130 tipc_printf(buf, "%u..",
3131 msg_seqno(buf_msg(l_ptr->next_out)));
3132 tipc_printf(buf, "%u]",
3134 (l_ptr->last_out)), l_ptr->out_queue_size);
3135 if ((mod(msg_seqno(buf_msg(l_ptr->last_out)) -
3136 msg_seqno(buf_msg(l_ptr->first_out)))
3137 != (l_ptr->out_queue_size - 1))
3138 || (l_ptr->last_out->next != 0)) {
3139 tipc_printf(buf, "\nSend queue inconsistency\n");
3140 tipc_printf(buf, "first_out= %x ", l_ptr->first_out);
3141 tipc_printf(buf, "next_out= %x ", l_ptr->next_out);
3142 tipc_printf(buf, "last_out= %x ", l_ptr->last_out);
3143 link_dump_send_queue(l_ptr);
3146 tipc_printf(buf, "[]");
3147 tipc_printf(buf, "SQSIZ(%u)", l_ptr->out_queue_size);
3148 if (l_ptr->oldest_deferred_in) {
3149 u32 o = msg_seqno(buf_msg(l_ptr->oldest_deferred_in));
3150 u32 n = msg_seqno(buf_msg(l_ptr->newest_deferred_in));
3151 tipc_printf(buf, ":RQUE[%u..%u]", o, n);
3152 if (l_ptr->deferred_inqueue_sz != mod((n + 1) - o)) {
3153 tipc_printf(buf, ":RQSIZ(%u)",
3154 l_ptr->deferred_inqueue_sz);
3157 if (link_working_unknown(l_ptr))
3158 tipc_printf(buf, ":WU");
3159 if (link_reset_reset(l_ptr))
3160 tipc_printf(buf, ":RR");
3161 if (link_reset_unknown(l_ptr))
3162 tipc_printf(buf, ":RU");
3163 if (link_working_working(l_ptr))
3164 tipc_printf(buf, ":WW");
3165 tipc_printf(buf, "\n");