[PATCH] fbdev: Fix return error of fb_write
[linux-2.6] / net / rxrpc / peer.c
1 /* peer.c: Rx RPC peer management
2  *
3  * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
4  * Written by David Howells (dhowells@redhat.com)
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License
8  * as published by the Free Software Foundation; either version
9  * 2 of the License, or (at your option) any later version.
10  */
11
12 #include <linux/sched.h>
13 #include <linux/slab.h>
14 #include <linux/module.h>
15 #include <rxrpc/rxrpc.h>
16 #include <rxrpc/transport.h>
17 #include <rxrpc/peer.h>
18 #include <rxrpc/connection.h>
19 #include <rxrpc/call.h>
20 #include <rxrpc/message.h>
21 #include <linux/udp.h>
22 #include <linux/ip.h>
23 #include <net/sock.h>
24 #include <asm/uaccess.h>
25 #include <asm/div64.h>
26 #include "internal.h"
27
28 __RXACCT_DECL(atomic_t rxrpc_peer_count);
29 LIST_HEAD(rxrpc_peers);
30 DECLARE_RWSEM(rxrpc_peers_sem);
31 unsigned long rxrpc_peer_timeout = 12 * 60 * 60;
32
33 static void rxrpc_peer_do_timeout(struct rxrpc_peer *peer);
34
35 static void __rxrpc_peer_timeout(rxrpc_timer_t *timer)
36 {
37         struct rxrpc_peer *peer =
38                 list_entry(timer, struct rxrpc_peer, timeout);
39
40         _debug("Rx PEER TIMEOUT [%p{u=%d}]", peer, atomic_read(&peer->usage));
41
42         rxrpc_peer_do_timeout(peer);
43 }
44
45 static const struct rxrpc_timer_ops rxrpc_peer_timer_ops = {
46         .timed_out      = __rxrpc_peer_timeout,
47 };
48
49 /*****************************************************************************/
50 /*
51  * create a peer record
52  */
53 static int __rxrpc_create_peer(struct rxrpc_transport *trans, __be32 addr,
54                                struct rxrpc_peer **_peer)
55 {
56         struct rxrpc_peer *peer;
57
58         _enter("%p,%08x", trans, ntohl(addr));
59
60         /* allocate and initialise a peer record */
61         peer = kmalloc(sizeof(struct rxrpc_peer), GFP_KERNEL);
62         if (!peer) {
63                 _leave(" = -ENOMEM");
64                 return -ENOMEM;
65         }
66
67         memset(peer, 0, sizeof(struct rxrpc_peer));
68         atomic_set(&peer->usage, 1);
69
70         INIT_LIST_HEAD(&peer->link);
71         INIT_LIST_HEAD(&peer->proc_link);
72         INIT_LIST_HEAD(&peer->conn_idlist);
73         INIT_LIST_HEAD(&peer->conn_active);
74         INIT_LIST_HEAD(&peer->conn_graveyard);
75         spin_lock_init(&peer->conn_gylock);
76         init_waitqueue_head(&peer->conn_gy_waitq);
77         rwlock_init(&peer->conn_idlock);
78         rwlock_init(&peer->conn_lock);
79         atomic_set(&peer->conn_count, 0);
80         spin_lock_init(&peer->lock);
81         rxrpc_timer_init(&peer->timeout, &rxrpc_peer_timer_ops);
82
83         peer->addr.s_addr = addr;
84
85         peer->trans = trans;
86         peer->ops = trans->peer_ops;
87
88         __RXACCT(atomic_inc(&rxrpc_peer_count));
89         *_peer = peer;
90         _leave(" = 0 (%p)", peer);
91
92         return 0;
93 } /* end __rxrpc_create_peer() */
94
95 /*****************************************************************************/
96 /*
97  * find a peer record on the specified transport
98  * - returns (if successful) with peer record usage incremented
99  * - resurrects it from the graveyard if found there
100  */
101 int rxrpc_peer_lookup(struct rxrpc_transport *trans, __be32 addr,
102                       struct rxrpc_peer **_peer)
103 {
104         struct rxrpc_peer *peer, *candidate = NULL;
105         struct list_head *_p;
106         int ret;
107
108         _enter("%p{%hu},%08x", trans, trans->port, ntohl(addr));
109
110         /* [common case] search the transport's active list first */
111         read_lock(&trans->peer_lock);
112         list_for_each(_p, &trans->peer_active) {
113                 peer = list_entry(_p, struct rxrpc_peer, link);
114                 if (peer->addr.s_addr == addr)
115                         goto found_active;
116         }
117         read_unlock(&trans->peer_lock);
118
119         /* [uncommon case] not active - create a candidate for a new record */
120         ret = __rxrpc_create_peer(trans, addr, &candidate);
121         if (ret < 0) {
122                 _leave(" = %d", ret);
123                 return ret;
124         }
125
126         /* search the active list again, just in case it appeared whilst we
127          * were busy */
128         write_lock(&trans->peer_lock);
129         list_for_each(_p, &trans->peer_active) {
130                 peer = list_entry(_p, struct rxrpc_peer, link);
131                 if (peer->addr.s_addr == addr)
132                         goto found_active_second_chance;
133         }
134
135         /* search the transport's graveyard list */
136         spin_lock(&trans->peer_gylock);
137         list_for_each(_p, &trans->peer_graveyard) {
138                 peer = list_entry(_p, struct rxrpc_peer, link);
139                 if (peer->addr.s_addr == addr)
140                         goto found_in_graveyard;
141         }
142         spin_unlock(&trans->peer_gylock);
143
144         /* we can now add the new candidate to the list
145          * - tell the application layer that this peer has been added
146          */
147         rxrpc_get_transport(trans);
148         peer = candidate;
149         candidate = NULL;
150
151         if (peer->ops && peer->ops->adding) {
152                 ret = peer->ops->adding(peer);
153                 if (ret < 0) {
154                         write_unlock(&trans->peer_lock);
155                         __RXACCT(atomic_dec(&rxrpc_peer_count));
156                         kfree(peer);
157                         rxrpc_put_transport(trans);
158                         _leave(" = %d", ret);
159                         return ret;
160                 }
161         }
162
163         atomic_inc(&trans->peer_count);
164
165  make_active:
166         list_add_tail(&peer->link, &trans->peer_active);
167
168  success_uwfree:
169         write_unlock(&trans->peer_lock);
170
171         if (candidate) {
172                 __RXACCT(atomic_dec(&rxrpc_peer_count));
173                 kfree(candidate);
174         }
175
176         if (list_empty(&peer->proc_link)) {
177                 down_write(&rxrpc_peers_sem);
178                 list_add_tail(&peer->proc_link, &rxrpc_peers);
179                 up_write(&rxrpc_peers_sem);
180         }
181
182  success:
183         *_peer = peer;
184
185         _leave(" = 0 (%p{u=%d cc=%d})",
186                peer,
187                atomic_read(&peer->usage),
188                atomic_read(&peer->conn_count));
189         return 0;
190
191         /* handle the peer being found in the active list straight off */
192  found_active:
193         rxrpc_get_peer(peer);
194         read_unlock(&trans->peer_lock);
195         goto success;
196
197         /* handle resurrecting a peer from the graveyard */
198  found_in_graveyard:
199         rxrpc_get_peer(peer);
200         rxrpc_get_transport(peer->trans);
201         rxrpc_krxtimod_del_timer(&peer->timeout);
202         list_del_init(&peer->link);
203         spin_unlock(&trans->peer_gylock);
204         goto make_active;
205
206         /* handle finding the peer on the second time through the active
207          * list */
208  found_active_second_chance:
209         rxrpc_get_peer(peer);
210         goto success_uwfree;
211
212 } /* end rxrpc_peer_lookup() */
213
214 /*****************************************************************************/
215 /*
216  * finish with a peer record
217  * - it gets sent to the graveyard from where it can be resurrected or timed
218  *   out
219  */
220 void rxrpc_put_peer(struct rxrpc_peer *peer)
221 {
222         struct rxrpc_transport *trans = peer->trans;
223
224         _enter("%p{cc=%d a=%08x}",
225                peer,
226                atomic_read(&peer->conn_count),
227                ntohl(peer->addr.s_addr));
228
229         /* sanity check */
230         if (atomic_read(&peer->usage) <= 0)
231                 BUG();
232
233         write_lock(&trans->peer_lock);
234         spin_lock(&trans->peer_gylock);
235         if (likely(!atomic_dec_and_test(&peer->usage))) {
236                 spin_unlock(&trans->peer_gylock);
237                 write_unlock(&trans->peer_lock);
238                 _leave("");
239                 return;
240         }
241
242         /* move to graveyard queue */
243         list_del(&peer->link);
244         write_unlock(&trans->peer_lock);
245
246         list_add_tail(&peer->link, &trans->peer_graveyard);
247
248         BUG_ON(!list_empty(&peer->conn_active));
249
250         rxrpc_krxtimod_add_timer(&peer->timeout, rxrpc_peer_timeout * HZ);
251
252         spin_unlock(&trans->peer_gylock);
253
254         rxrpc_put_transport(trans);
255
256         _leave(" [killed]");
257 } /* end rxrpc_put_peer() */
258
259 /*****************************************************************************/
260 /*
261  * handle a peer timing out in the graveyard
262  * - called from krxtimod
263  */
264 static void rxrpc_peer_do_timeout(struct rxrpc_peer *peer)
265 {
266         struct rxrpc_transport *trans = peer->trans;
267
268         _enter("%p{u=%d cc=%d a=%08x}",
269                peer,
270                atomic_read(&peer->usage),
271                atomic_read(&peer->conn_count),
272                ntohl(peer->addr.s_addr));
273
274         BUG_ON(atomic_read(&peer->usage) < 0);
275
276         /* remove from graveyard if still dead */
277         spin_lock(&trans->peer_gylock);
278         if (atomic_read(&peer->usage) == 0)
279                 list_del_init(&peer->link);
280         else
281                 peer = NULL;
282         spin_unlock(&trans->peer_gylock);
283
284         if (!peer) {
285                 _leave("");
286                 return; /* resurrected */
287         }
288
289         /* clear all connections on this peer */
290         rxrpc_conn_clearall(peer);
291
292         BUG_ON(!list_empty(&peer->conn_active));
293         BUG_ON(!list_empty(&peer->conn_graveyard));
294
295         /* inform the application layer */
296         if (peer->ops && peer->ops->discarding)
297                 peer->ops->discarding(peer);
298
299         if (!list_empty(&peer->proc_link)) {
300                 down_write(&rxrpc_peers_sem);
301                 list_del(&peer->proc_link);
302                 up_write(&rxrpc_peers_sem);
303         }
304
305         __RXACCT(atomic_dec(&rxrpc_peer_count));
306         kfree(peer);
307
308         /* if the graveyard is now empty, wake up anyone waiting for that */
309         if (atomic_dec_and_test(&trans->peer_count))
310                 wake_up(&trans->peer_gy_waitq);
311
312         _leave(" [destroyed]");
313 } /* end rxrpc_peer_do_timeout() */
314
315 /*****************************************************************************/
316 /*
317  * clear all peer records from a transport endpoint
318  */
319 void rxrpc_peer_clearall(struct rxrpc_transport *trans)
320 {
321         DECLARE_WAITQUEUE(myself,current);
322
323         struct rxrpc_peer *peer;
324         int err;
325
326         _enter("%p",trans);
327
328         /* there shouldn't be any active peers remaining */
329         BUG_ON(!list_empty(&trans->peer_active));
330
331         /* manually timeout all peers in the graveyard */
332         spin_lock(&trans->peer_gylock);
333         while (!list_empty(&trans->peer_graveyard)) {
334                 peer = list_entry(trans->peer_graveyard.next,
335                                   struct rxrpc_peer, link);
336                 _debug("Clearing peer %p\n", peer);
337                 err = rxrpc_krxtimod_del_timer(&peer->timeout);
338                 spin_unlock(&trans->peer_gylock);
339
340                 if (err == 0)
341                         rxrpc_peer_do_timeout(peer);
342
343                 spin_lock(&trans->peer_gylock);
344         }
345         spin_unlock(&trans->peer_gylock);
346
347         /* wait for the the peer graveyard to be completely cleared */
348         set_current_state(TASK_UNINTERRUPTIBLE);
349         add_wait_queue(&trans->peer_gy_waitq, &myself);
350
351         while (atomic_read(&trans->peer_count) != 0) {
352                 schedule();
353                 set_current_state(TASK_UNINTERRUPTIBLE);
354         }
355
356         remove_wait_queue(&trans->peer_gy_waitq, &myself);
357         set_current_state(TASK_RUNNING);
358
359         _leave("");
360 } /* end rxrpc_peer_clearall() */
361
362 /*****************************************************************************/
363 /*
364  * calculate and cache the Round-Trip-Time for a message and its response
365  */
366 void rxrpc_peer_calculate_rtt(struct rxrpc_peer *peer,
367                               struct rxrpc_message *msg,
368                               struct rxrpc_message *resp)
369 {
370         unsigned long long rtt;
371         int loop;
372
373         _enter("%p,%p,%p", peer, msg, resp);
374
375         /* calculate the latest RTT */
376         rtt = resp->stamp.tv_sec - msg->stamp.tv_sec;
377         rtt *= 1000000UL;
378         rtt += resp->stamp.tv_usec - msg->stamp.tv_usec;
379
380         /* add to cache */
381         peer->rtt_cache[peer->rtt_point] = rtt;
382         peer->rtt_point++;
383         peer->rtt_point %= RXRPC_RTT_CACHE_SIZE;
384
385         if (peer->rtt_usage < RXRPC_RTT_CACHE_SIZE)
386                 peer->rtt_usage++;
387
388         /* recalculate RTT */
389         rtt = 0;
390         for (loop = peer->rtt_usage - 1; loop >= 0; loop--)
391                 rtt += peer->rtt_cache[loop];
392
393         do_div(rtt, peer->rtt_usage);
394         peer->rtt = rtt;
395
396         _leave(" RTT=%lu.%lums",
397                (long) (peer->rtt / 1000), (long) (peer->rtt % 1000));
398
399 } /* end rxrpc_peer_calculate_rtt() */