2 * net/tipc/cluster.c: TIPC cluster management routines
4 * Copyright (c) 2000-2006, Ericsson AB
5 * Copyright (c) 2005, Wind River Systems
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
40 #include "node_subscr.h"
47 void cluster_multicast(struct cluster *c_ptr, struct sk_buff *buf,
48 u32 lower, u32 upper);
49 struct sk_buff *cluster_prepare_routing_msg(u32 data_size, u32 dest);
51 struct node **local_nodes = 0;
52 struct node_map cluster_bcast_nodes = {0,{0,}};
53 u32 highest_allowed_slave = 0;
55 struct cluster *cluster_create(u32 addr)
58 struct cluster *c_ptr;
62 c_ptr = (struct cluster *)kmalloc(sizeof(*c_ptr), GFP_ATOMIC);
65 memset(c_ptr, 0, sizeof(*c_ptr));
67 c_ptr->addr = tipc_addr(tipc_zone(addr), tipc_cluster(addr), 0);
68 if (in_own_cluster(addr))
69 max_nodes = LOWEST_SLAVE + tipc_max_slaves;
71 max_nodes = tipc_max_nodes + 1;
72 alloc = sizeof(void *) * (max_nodes + 1);
73 c_ptr->nodes = (struct node **)kmalloc(alloc, GFP_ATOMIC);
74 if (c_ptr->nodes == NULL) {
78 memset(c_ptr->nodes, 0, alloc);
79 if (in_own_cluster(addr))
80 local_nodes = c_ptr->nodes;
81 c_ptr->highest_slave = LOWEST_SLAVE - 1;
82 c_ptr->highest_node = 0;
84 z_ptr = zone_find(tipc_zone(addr));
86 z_ptr = zone_create(addr);
89 zone_attach_cluster(z_ptr, c_ptr);
100 void cluster_delete(struct cluster *c_ptr)
106 for (n_num = 1; n_num <= c_ptr->highest_node; n_num++) {
107 node_delete(c_ptr->nodes[n_num]);
109 for (n_num = LOWEST_SLAVE; n_num <= c_ptr->highest_slave; n_num++) {
110 node_delete(c_ptr->nodes[n_num]);
116 u32 cluster_next_node(struct cluster *c_ptr, u32 addr)
119 u32 n_num = tipc_node(addr) + 1;
123 for (; n_num <= c_ptr->highest_node; n_num++) {
124 n_ptr = c_ptr->nodes[n_num];
125 if (n_ptr && node_has_active_links(n_ptr))
128 for (n_num = 1; n_num < tipc_node(addr); n_num++) {
129 n_ptr = c_ptr->nodes[n_num];
130 if (n_ptr && node_has_active_links(n_ptr))
136 void cluster_attach_node(struct cluster *c_ptr, struct node *n_ptr)
138 u32 n_num = tipc_node(n_ptr->addr);
139 u32 max_n_num = tipc_max_nodes;
141 if (in_own_cluster(n_ptr->addr))
142 max_n_num = highest_allowed_slave;
144 assert(n_num <= max_n_num);
145 assert(c_ptr->nodes[n_num] == 0);
146 c_ptr->nodes[n_num] = n_ptr;
147 if (n_num > c_ptr->highest_node)
148 c_ptr->highest_node = n_num;
152 * cluster_select_router - select router to a cluster
154 * Uses deterministic and fair algorithm.
157 u32 cluster_select_router(struct cluster *c_ptr, u32 ref)
160 u32 ulim = c_ptr->highest_node;
164 assert(!in_own_cluster(c_ptr->addr));
168 /* Start entry must be random */
169 mask = tipc_max_nodes;
175 /* Lookup upwards with wrap-around */
177 if (node_is_up(c_ptr->nodes[n_num]))
179 } while (++n_num <= ulim);
183 if (node_is_up(c_ptr->nodes[n_num]))
185 } while (++n_num < tstart);
189 assert(n_num <= ulim);
190 return node_select_router(c_ptr->nodes[n_num], ref);
194 * cluster_select_node - select destination node within a remote cluster
196 * Uses deterministic and fair algorithm.
199 struct node *cluster_select_node(struct cluster *c_ptr, u32 selector)
202 u32 mask = tipc_max_nodes;
205 assert(!in_own_cluster(c_ptr->addr));
206 if (!c_ptr->highest_node)
209 /* Start entry must be random */
210 while (mask > c_ptr->highest_node) {
213 start_entry = (selector & mask) ? selector & mask : 1u;
214 assert(start_entry <= c_ptr->highest_node);
216 /* Lookup upwards with wrap-around */
217 for (n_num = start_entry; n_num <= c_ptr->highest_node; n_num++) {
218 if (node_has_active_links(c_ptr->nodes[n_num]))
219 return c_ptr->nodes[n_num];
221 for (n_num = 1; n_num < start_entry; n_num++) {
222 if (node_has_active_links(c_ptr->nodes[n_num]))
223 return c_ptr->nodes[n_num];
229 * Routing table management: See description in node.c
232 struct sk_buff *cluster_prepare_routing_msg(u32 data_size, u32 dest)
234 u32 size = INT_H_SIZE + data_size;
235 struct sk_buff *buf = buf_acquire(size);
236 struct tipc_msg *msg;
240 memset((char *)msg, 0, size);
241 msg_init(msg, ROUTE_DISTRIBUTOR, 0, TIPC_OK, INT_H_SIZE, dest);
246 void cluster_bcast_new_route(struct cluster *c_ptr, u32 dest,
247 u32 lower, u32 upper)
249 struct sk_buff *buf = cluster_prepare_routing_msg(0, c_ptr->addr);
250 struct tipc_msg *msg;
254 msg_set_remote_node(msg, dest);
255 msg_set_type(msg, ROUTE_ADDITION);
256 cluster_multicast(c_ptr, buf, lower, upper);
258 warn("Memory squeeze: broadcast of new route failed\n");
262 void cluster_bcast_lost_route(struct cluster *c_ptr, u32 dest,
263 u32 lower, u32 upper)
265 struct sk_buff *buf = cluster_prepare_routing_msg(0, c_ptr->addr);
266 struct tipc_msg *msg;
270 msg_set_remote_node(msg, dest);
271 msg_set_type(msg, ROUTE_REMOVAL);
272 cluster_multicast(c_ptr, buf, lower, upper);
274 warn("Memory squeeze: broadcast of lost route failed\n");
278 void cluster_send_slave_routes(struct cluster *c_ptr, u32 dest)
281 struct tipc_msg *msg;
282 u32 highest = c_ptr->highest_slave;
286 assert(!is_slave(dest));
287 assert(in_own_cluster(dest));
288 assert(in_own_cluster(c_ptr->addr));
289 if (highest <= LOWEST_SLAVE)
291 buf = cluster_prepare_routing_msg(highest - LOWEST_SLAVE + 1,
295 msg_set_remote_node(msg, c_ptr->addr);
296 msg_set_type(msg, SLAVE_ROUTING_TABLE);
297 for (n_num = LOWEST_SLAVE; n_num <= highest; n_num++) {
298 if (c_ptr->nodes[n_num] &&
299 node_has_active_links(c_ptr->nodes[n_num])) {
301 msg_set_dataoctet(msg, n_num);
305 link_send(buf, dest, dest);
309 warn("Memory squeeze: broadcast of lost route failed\n");
313 void cluster_send_ext_routes(struct cluster *c_ptr, u32 dest)
316 struct tipc_msg *msg;
317 u32 highest = c_ptr->highest_node;
321 if (in_own_cluster(c_ptr->addr))
323 assert(!is_slave(dest));
324 assert(in_own_cluster(dest));
325 highest = c_ptr->highest_node;
326 buf = cluster_prepare_routing_msg(highest + 1, c_ptr->addr);
329 msg_set_remote_node(msg, c_ptr->addr);
330 msg_set_type(msg, EXT_ROUTING_TABLE);
331 for (n_num = 1; n_num <= highest; n_num++) {
332 if (c_ptr->nodes[n_num] &&
333 node_has_active_links(c_ptr->nodes[n_num])) {
335 msg_set_dataoctet(msg, n_num);
339 link_send(buf, dest, dest);
343 warn("Memory squeeze: broadcast of external route failed\n");
347 void cluster_send_local_routes(struct cluster *c_ptr, u32 dest)
350 struct tipc_msg *msg;
351 u32 highest = c_ptr->highest_node;
355 assert(is_slave(dest));
356 assert(in_own_cluster(c_ptr->addr));
357 buf = cluster_prepare_routing_msg(highest, c_ptr->addr);
360 msg_set_remote_node(msg, c_ptr->addr);
361 msg_set_type(msg, LOCAL_ROUTING_TABLE);
362 for (n_num = 1; n_num <= highest; n_num++) {
363 if (c_ptr->nodes[n_num] &&
364 node_has_active_links(c_ptr->nodes[n_num])) {
366 msg_set_dataoctet(msg, n_num);
370 link_send(buf, dest, dest);
374 warn("Memory squeeze: broadcast of local route failed\n");
378 void cluster_recv_routing_table(struct sk_buff *buf)
380 struct tipc_msg *msg = buf_msg(buf);
381 struct cluster *c_ptr;
386 u32 rem_node = msg_remote_node(msg);
391 c_ptr = cluster_find(rem_node);
393 c_ptr = cluster_create(rem_node);
400 node_table = buf->data + msg_hdr_sz(msg);
401 table_size = msg_size(msg) - msg_hdr_sz(msg);
402 router = msg_prevnode(msg);
403 z_num = tipc_zone(rem_node);
404 c_num = tipc_cluster(rem_node);
406 switch (msg_type(msg)) {
407 case LOCAL_ROUTING_TABLE:
408 assert(is_slave(tipc_own_addr));
409 case EXT_ROUTING_TABLE:
410 for (n_num = 1; n_num < table_size; n_num++) {
411 if (node_table[n_num]) {
412 u32 addr = tipc_addr(z_num, c_num, n_num);
413 n_ptr = c_ptr->nodes[n_num];
415 n_ptr = node_create(addr);
418 node_add_router(n_ptr, router);
422 case SLAVE_ROUTING_TABLE:
423 assert(!is_slave(tipc_own_addr));
424 assert(in_own_cluster(c_ptr->addr));
425 for (n_num = 1; n_num < table_size; n_num++) {
426 if (node_table[n_num]) {
427 u32 slave_num = n_num + LOWEST_SLAVE;
428 u32 addr = tipc_addr(z_num, c_num, slave_num);
429 n_ptr = c_ptr->nodes[slave_num];
431 n_ptr = node_create(addr);
434 node_add_router(n_ptr, router);
439 if (!is_slave(tipc_own_addr)) {
440 assert(!in_own_cluster(c_ptr->addr)
441 || is_slave(rem_node));
443 assert(in_own_cluster(c_ptr->addr)
444 && !is_slave(rem_node));
446 n_ptr = c_ptr->nodes[tipc_node(rem_node)];
448 n_ptr = node_create(rem_node);
450 node_add_router(n_ptr, router);
453 if (!is_slave(tipc_own_addr)) {
454 assert(!in_own_cluster(c_ptr->addr)
455 || is_slave(rem_node));
457 assert(in_own_cluster(c_ptr->addr)
458 && !is_slave(rem_node));
460 n_ptr = c_ptr->nodes[tipc_node(rem_node)];
462 node_remove_router(n_ptr, router);
465 assert(!"Illegal routing manager message received\n");
470 void cluster_remove_as_router(struct cluster *c_ptr, u32 router)
476 if (is_slave(router))
477 return; /* Slave nodes can not be routers */
479 if (in_own_cluster(c_ptr->addr)) {
480 start_entry = LOWEST_SLAVE;
481 tstop = c_ptr->highest_slave;
484 tstop = c_ptr->highest_node;
487 for (n_num = start_entry; n_num <= tstop; n_num++) {
488 if (c_ptr->nodes[n_num]) {
489 node_remove_router(c_ptr->nodes[n_num], router);
495 * cluster_multicast - multicast message to local nodes
498 void cluster_multicast(struct cluster *c_ptr, struct sk_buff *buf,
499 u32 lower, u32 upper)
501 struct sk_buff *buf_copy;
506 assert(lower <= upper);
507 assert(((lower >= 1) && (lower <= tipc_max_nodes)) ||
508 ((lower >= LOWEST_SLAVE) && (lower <= highest_allowed_slave)));
509 assert(((upper >= 1) && (upper <= tipc_max_nodes)) ||
510 ((upper >= LOWEST_SLAVE) && (upper <= highest_allowed_slave)));
511 assert(in_own_cluster(c_ptr->addr));
513 tstop = is_slave(upper) ? c_ptr->highest_slave : c_ptr->highest_node;
516 for (n_num = lower; n_num <= tstop; n_num++) {
517 n_ptr = c_ptr->nodes[n_num];
518 if (n_ptr && node_has_active_links(n_ptr)) {
519 buf_copy = skb_copy(buf, GFP_ATOMIC);
520 if (buf_copy == NULL)
522 msg_set_destnode(buf_msg(buf_copy), n_ptr->addr);
523 link_send(buf_copy, n_ptr->addr, n_ptr->addr);
530 * cluster_broadcast - broadcast message to all nodes within cluster
533 void cluster_broadcast(struct sk_buff *buf)
535 struct sk_buff *buf_copy;
536 struct cluster *c_ptr;
543 if (tipc_mode == TIPC_NET_MODE) {
544 c_ptr = cluster_find(tipc_own_addr);
545 assert(in_own_cluster(c_ptr->addr)); /* For now */
547 /* Send to standard nodes, then repeat loop sending to slaves */
549 tstop = c_ptr->highest_node;
550 for (node_type = 1; node_type <= 2; node_type++) {
551 for (n_num = tstart; n_num <= tstop; n_num++) {
552 n_ptr = c_ptr->nodes[n_num];
553 if (n_ptr && node_has_active_links(n_ptr)) {
554 buf_copy = skb_copy(buf, GFP_ATOMIC);
555 if (buf_copy == NULL)
557 msg_set_destnode(buf_msg(buf_copy),
559 link_send(buf_copy, n_ptr->addr,
563 tstart = LOWEST_SLAVE;
564 tstop = c_ptr->highest_slave;
571 int cluster_init(void)
573 highest_allowed_slave = LOWEST_SLAVE + tipc_max_slaves;
574 return cluster_create(tipc_own_addr) ? TIPC_OK : -ENOMEM;