Merge branch 'from-linus' into upstream
[linux-2.6] / net / tipc / name_distr.c
1 /*
2  * net/tipc/name_distr.c: TIPC name distribution code
3  * 
4  * Copyright (c) 2000-2006, Ericsson AB
5  * Copyright (c) 2005, Wind River Systems
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. Neither the names of the copyright holders nor the names of its
17  *    contributors may be used to endorse or promote products derived from
18  *    this software without specific prior written permission.
19  *
20  * Alternatively, this software may be distributed under the terms of the
21  * GNU General Public License ("GPL") version 2 as published by the Free
22  * Software Foundation.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34  * POSSIBILITY OF SUCH DAMAGE.
35  */
36
37 #include "core.h"
38 #include "cluster.h"
39 #include "dbg.h"
40 #include "link.h"
41 #include "msg.h"
42 #include "name_distr.h"
43
44 #undef  DBG_OUTPUT
45 #define DBG_OUTPUT NULL
46
47 #define ITEM_SIZE sizeof(struct distr_item)
48
49 /**
50  * struct distr_item - publication info distributed to other nodes
51  * @type: name sequence type
52  * @lower: name sequence lower bound
53  * @upper: name sequence upper bound
54  * @ref: publishing port reference
55  * @key: publication key
56  * 
57  * ===> All fields are stored in network byte order. <===
58  * 
59  * First 3 fields identify (name or) name sequence being published.
60  * Reference field uniquely identifies port that published name sequence.
61  * Key field uniquely identifies publication, in the event a port has
62  * multiple publications of the same name sequence.
63  * 
64  * Note: There is no field that identifies the publishing node because it is 
65  * the same for all items contained within a publication message.
66  */
67
68 struct distr_item {
69         u32 type;
70         u32 lower;
71         u32 upper;
72         u32 ref;
73         u32 key;
74 };
75
76 /**
77  * List of externally visible publications by this node -- 
78  * that is, all publications having scope > TIPC_NODE_SCOPE.
79  */
80
81 static LIST_HEAD(publ_root);
82 static u32 publ_cnt = 0;                
83
84 /**
85  * publ_to_item - add publication info to a publication message
86  */
87
88 static void publ_to_item(struct distr_item *i, struct publication *p)
89 {
90         i->type = htonl(p->type);
91         i->lower = htonl(p->lower);
92         i->upper = htonl(p->upper);
93         i->ref = htonl(p->ref);
94         i->key = htonl(p->key);
95         dbg("publ_to_item: %u, %u, %u\n", p->type, p->lower, p->upper);
96 }
97
98 /**
99  * named_prepare_buf - allocate & initialize a publication message
100  */
101
102 static struct sk_buff *named_prepare_buf(u32 type, u32 size, u32 dest)
103 {
104         struct sk_buff *buf = buf_acquire(LONG_H_SIZE + size);  
105         struct tipc_msg *msg;
106
107         if (buf != NULL) {
108                 msg = buf_msg(buf);
109                 msg_init(msg, NAME_DISTRIBUTOR, type, TIPC_OK, 
110                          LONG_H_SIZE, dest);
111                 msg_set_size(msg, LONG_H_SIZE + size);
112         }
113         return buf;
114 }
115
116 /**
117  * tipc_named_publish - tell other nodes about a new publication by this node
118  */
119
120 void tipc_named_publish(struct publication *publ)
121 {
122         struct sk_buff *buf;
123         struct distr_item *item;
124
125         list_add(&publ->local_list, &publ_root);
126         publ_cnt++;
127
128         buf = named_prepare_buf(PUBLICATION, ITEM_SIZE, 0);
129         if (!buf) {
130                 warn("Memory squeeze; failed to distribute publication\n");
131                 return;
132         }
133
134         item = (struct distr_item *)msg_data(buf_msg(buf));
135         publ_to_item(item, publ);
136         dbg("tipc_named_withdraw: broadcasting publish msg\n");
137         tipc_cltr_broadcast(buf);
138 }
139
140 /**
141  * tipc_named_withdraw - tell other nodes about a withdrawn publication by this node
142  */
143
144 void tipc_named_withdraw(struct publication *publ)
145 {
146         struct sk_buff *buf;
147         struct distr_item *item;
148
149         list_del(&publ->local_list);
150         publ_cnt--;
151
152         buf = named_prepare_buf(WITHDRAWAL, ITEM_SIZE, 0);
153         if (!buf) {
154                 warn("Memory squeeze; failed to distribute withdrawal\n");
155                 return;
156         }
157
158         item = (struct distr_item *)msg_data(buf_msg(buf));
159         publ_to_item(item, publ);
160         dbg("tipc_named_withdraw: broadcasting withdraw msg\n");
161         tipc_cltr_broadcast(buf);
162 }
163
164 /**
165  * tipc_named_node_up - tell specified node about all publications by this node
166  */
167
168 void tipc_named_node_up(unsigned long node)
169 {
170         struct publication *publ;
171         struct distr_item *item = NULL;
172         struct sk_buff *buf = NULL;
173         u32 left = 0;
174         u32 rest;
175         u32 max_item_buf;
176
177         assert(in_own_cluster(node));
178         read_lock_bh(&tipc_nametbl_lock); 
179         max_item_buf = TIPC_MAX_USER_MSG_SIZE / ITEM_SIZE;
180         max_item_buf *= ITEM_SIZE;
181         rest = publ_cnt * ITEM_SIZE;
182
183         list_for_each_entry(publ, &publ_root, local_list) {
184                 if (!buf) {
185                         left = (rest <= max_item_buf) ? rest : max_item_buf;
186                         rest -= left;
187                         buf = named_prepare_buf(PUBLICATION, left, node);       
188                         if (buf == NULL) {
189                                 warn("Memory Squeeze; could not send publication\n");
190                                 goto exit;
191                         }
192                         item = (struct distr_item *)msg_data(buf_msg(buf));
193                 }
194                 publ_to_item(item, publ);
195                 item++;
196                 left -= ITEM_SIZE;
197                 if (!left) {
198                         msg_set_link_selector(buf_msg(buf), node);
199                         dbg("tipc_named_node_up: sending publish msg to "
200                             "<%u.%u.%u>\n", tipc_zone(node), 
201                             tipc_cluster(node), tipc_node(node));
202                         tipc_link_send(buf, node, node);
203                         buf = NULL;
204                 }
205         }
206 exit:
207         read_unlock_bh(&tipc_nametbl_lock); 
208 }
209
210 /**
211  * node_is_down - remove publication associated with a failed node
212  * 
213  * Invoked for each publication issued by a newly failed node.  
214  * Removes publication structure from name table & deletes it.
215  * In rare cases the link may have come back up again when this
216  * function is called, and we have two items representing the same
217  * publication. Nudge this item's key to distinguish it from the other.
218  * (Note: Publication's node subscription is already unsubscribed.)
219  */
220
221 static void node_is_down(struct publication *publ)
222 {
223         struct publication *p;
224         write_lock_bh(&tipc_nametbl_lock);
225         dbg("node_is_down: withdrawing %u, %u, %u\n", 
226             publ->type, publ->lower, publ->upper);
227         publ->key += 1222345;
228         p = tipc_nametbl_remove_publ(publ->type, publ->lower, 
229                                      publ->node, publ->ref, publ->key);
230         assert(p == publ);
231         write_unlock_bh(&tipc_nametbl_lock);
232         kfree(publ);
233 }
234
235 /**
236  * tipc_named_recv - process name table update message sent by another node
237  */
238
239 void tipc_named_recv(struct sk_buff *buf)
240 {
241         struct publication *publ;
242         struct tipc_msg *msg = buf_msg(buf);
243         struct distr_item *item = (struct distr_item *)msg_data(msg);
244         u32 count = msg_data_sz(msg) / ITEM_SIZE;
245
246         write_lock_bh(&tipc_nametbl_lock); 
247         while (count--) {
248                 if (msg_type(msg) == PUBLICATION) {
249                         dbg("tipc_named_recv: got publication for %u, %u, %u\n", 
250                             ntohl(item->type), ntohl(item->lower),
251                             ntohl(item->upper));
252                         publ = tipc_nametbl_insert_publ(ntohl(item->type), 
253                                                         ntohl(item->lower),
254                                                         ntohl(item->upper),
255                                                         TIPC_CLUSTER_SCOPE,
256                                                         msg_orignode(msg), 
257                                                         ntohl(item->ref),
258                                                         ntohl(item->key));
259                         if (publ) {
260                                 tipc_nodesub_subscribe(&publ->subscr, 
261                                                        msg_orignode(msg), 
262                                                        publ,
263                                                        (net_ev_handler)node_is_down);
264                         }
265                 } else if (msg_type(msg) == WITHDRAWAL) {
266                         dbg("tipc_named_recv: got withdrawl for %u, %u, %u\n", 
267                             ntohl(item->type), ntohl(item->lower),
268                             ntohl(item->upper));
269                         publ = tipc_nametbl_remove_publ(ntohl(item->type),
270                                                         ntohl(item->lower),
271                                                         msg_orignode(msg),
272                                                         ntohl(item->ref),
273                                                         ntohl(item->key));
274
275                         if (publ) {
276                                 tipc_nodesub_unsubscribe(&publ->subscr);
277                                 kfree(publ);
278                         }
279                 } else {
280                         warn("tipc_named_recv: unknown msg\n");
281                 }
282                 item++;
283         }
284         write_unlock_bh(&tipc_nametbl_lock); 
285         buf_discard(buf);
286 }
287
288 /**
289  * tipc_named_reinit - re-initialize local publication list
290  * 
291  * This routine is called whenever TIPC networking is (re)enabled.
292  * All existing publications by this node that have "cluster" or "zone" scope
293  * are updated to reflect the node's current network address.
294  * (If the node's address is unchanged, the update loop terminates immediately.)
295  */
296
297 void tipc_named_reinit(void)
298 {
299         struct publication *publ;
300
301         write_lock_bh(&tipc_nametbl_lock); 
302         list_for_each_entry(publ, &publ_root, local_list) {
303                 if (publ->node == tipc_own_addr)
304                         break;
305                 publ->node = tipc_own_addr;
306         }
307         write_unlock_bh(&tipc_nametbl_lock); 
308 }