[PATCH] agp-amd64: section mismatches with HOTPLUG=n
[linux-2.6] / net / sunrpc / xprt.c
1 /*
2  *  linux/net/sunrpc/xprt.c
3  *
4  *  This is a generic RPC call interface supporting congestion avoidance,
5  *  and asynchronous calls.
6  *
7  *  The interface works like this:
8  *
9  *  -   When a process places a call, it allocates a request slot if
10  *      one is available. Otherwise, it sleeps on the backlog queue
11  *      (xprt_reserve).
12  *  -   Next, the caller puts together the RPC message, stuffs it into
13  *      the request struct, and calls xprt_transmit().
14  *  -   xprt_transmit sends the message and installs the caller on the
15  *      transport's wait list. At the same time, it installs a timer that
16  *      is run after the packet's timeout has expired.
17  *  -   When a packet arrives, the data_ready handler walks the list of
18  *      pending requests for that transport. If a matching XID is found, the
19  *      caller is woken up, and the timer removed.
20  *  -   When no reply arrives within the timeout interval, the timer is
21  *      fired by the kernel and runs xprt_timer(). It either adjusts the
22  *      timeout values (minor timeout) or wakes up the caller with a status
23  *      of -ETIMEDOUT.
24  *  -   When the caller receives a notification from RPC that a reply arrived,
25  *      it should release the RPC slot, and process the reply.
26  *      If the call timed out, it may choose to retry the operation by
27  *      adjusting the initial timeout value, and simply calling rpc_call
28  *      again.
29  *
30  *  Support for async RPC is done through a set of RPC-specific scheduling
31  *  primitives that `transparently' work for processes as well as async
32  *  tasks that rely on callbacks.
33  *
34  *  Copyright (C) 1995-1997, Olaf Kirch <okir@monad.swb.de>
35  *
36  *  Transport switch API copyright (C) 2005, Chuck Lever <cel@netapp.com>
37  */
38
39 #include <linux/module.h>
40
41 #include <linux/types.h>
42 #include <linux/interrupt.h>
43 #include <linux/workqueue.h>
44 #include <linux/net.h>
45
46 #include <linux/sunrpc/clnt.h>
47 #include <linux/sunrpc/metrics.h>
48
49 /*
50  * Local variables
51  */
52
53 #ifdef RPC_DEBUG
54 # define RPCDBG_FACILITY        RPCDBG_XPRT
55 #endif
56
57 /*
58  * Local functions
59  */
60 static void     xprt_request_init(struct rpc_task *, struct rpc_xprt *);
61 static inline void      do_xprt_reserve(struct rpc_task *);
62 static void     xprt_connect_status(struct rpc_task *task);
63 static int      __xprt_get_cong(struct rpc_xprt *, struct rpc_task *);
64
65 /*
66  * The transport code maintains an estimate on the maximum number of out-
67  * standing RPC requests, using a smoothed version of the congestion
68  * avoidance implemented in 44BSD. This is basically the Van Jacobson
69  * congestion algorithm: If a retransmit occurs, the congestion window is
70  * halved; otherwise, it is incremented by 1/cwnd when
71  *
72  *      -       a reply is received and
73  *      -       a full number of requests are outstanding and
74  *      -       the congestion window hasn't been updated recently.
75  */
76 #define RPC_CWNDSHIFT           (8U)
77 #define RPC_CWNDSCALE           (1U << RPC_CWNDSHIFT)
78 #define RPC_INITCWND            RPC_CWNDSCALE
79 #define RPC_MAXCWND(xprt)       ((xprt)->max_reqs << RPC_CWNDSHIFT)
80
81 #define RPCXPRT_CONGESTED(xprt) ((xprt)->cong >= (xprt)->cwnd)
82
83 /**
84  * xprt_reserve_xprt - serialize write access to transports
85  * @task: task that is requesting access to the transport
86  *
87  * This prevents mixing the payload of separate requests, and prevents
88  * transport connects from colliding with writes.  No congestion control
89  * is provided.
90  */
91 int xprt_reserve_xprt(struct rpc_task *task)
92 {
93         struct rpc_xprt *xprt = task->tk_xprt;
94         struct rpc_rqst *req = task->tk_rqstp;
95
96         if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) {
97                 if (task == xprt->snd_task)
98                         return 1;
99                 if (task == NULL)
100                         return 0;
101                 goto out_sleep;
102         }
103         xprt->snd_task = task;
104         if (req) {
105                 req->rq_bytes_sent = 0;
106                 req->rq_ntrans++;
107         }
108         return 1;
109
110 out_sleep:
111         dprintk("RPC: %4d failed to lock transport %p\n",
112                         task->tk_pid, xprt);
113         task->tk_timeout = 0;
114         task->tk_status = -EAGAIN;
115         if (req && req->rq_ntrans)
116                 rpc_sleep_on(&xprt->resend, task, NULL, NULL);
117         else
118                 rpc_sleep_on(&xprt->sending, task, NULL, NULL);
119         return 0;
120 }
121
122 static void xprt_clear_locked(struct rpc_xprt *xprt)
123 {
124         xprt->snd_task = NULL;
125         if (!test_bit(XPRT_CLOSE_WAIT, &xprt->state) || xprt->shutdown) {
126                 smp_mb__before_clear_bit();
127                 clear_bit(XPRT_LOCKED, &xprt->state);
128                 smp_mb__after_clear_bit();
129         } else
130                 schedule_work(&xprt->task_cleanup);
131 }
132
133 /*
134  * xprt_reserve_xprt_cong - serialize write access to transports
135  * @task: task that is requesting access to the transport
136  *
137  * Same as xprt_reserve_xprt, but Van Jacobson congestion control is
138  * integrated into the decision of whether a request is allowed to be
139  * woken up and given access to the transport.
140  */
141 int xprt_reserve_xprt_cong(struct rpc_task *task)
142 {
143         struct rpc_xprt *xprt = task->tk_xprt;
144         struct rpc_rqst *req = task->tk_rqstp;
145
146         if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) {
147                 if (task == xprt->snd_task)
148                         return 1;
149                 goto out_sleep;
150         }
151         if (__xprt_get_cong(xprt, task)) {
152                 xprt->snd_task = task;
153                 if (req) {
154                         req->rq_bytes_sent = 0;
155                         req->rq_ntrans++;
156                 }
157                 return 1;
158         }
159         xprt_clear_locked(xprt);
160 out_sleep:
161         dprintk("RPC: %4d failed to lock transport %p\n", task->tk_pid, xprt);
162         task->tk_timeout = 0;
163         task->tk_status = -EAGAIN;
164         if (req && req->rq_ntrans)
165                 rpc_sleep_on(&xprt->resend, task, NULL, NULL);
166         else
167                 rpc_sleep_on(&xprt->sending, task, NULL, NULL);
168         return 0;
169 }
170
171 static inline int xprt_lock_write(struct rpc_xprt *xprt, struct rpc_task *task)
172 {
173         int retval;
174
175         spin_lock_bh(&xprt->transport_lock);
176         retval = xprt->ops->reserve_xprt(task);
177         spin_unlock_bh(&xprt->transport_lock);
178         return retval;
179 }
180
181 static void __xprt_lock_write_next(struct rpc_xprt *xprt)
182 {
183         struct rpc_task *task;
184         struct rpc_rqst *req;
185
186         if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
187                 return;
188
189         task = rpc_wake_up_next(&xprt->resend);
190         if (!task) {
191                 task = rpc_wake_up_next(&xprt->sending);
192                 if (!task)
193                         goto out_unlock;
194         }
195
196         req = task->tk_rqstp;
197         xprt->snd_task = task;
198         if (req) {
199                 req->rq_bytes_sent = 0;
200                 req->rq_ntrans++;
201         }
202         return;
203
204 out_unlock:
205         xprt_clear_locked(xprt);
206 }
207
208 static void __xprt_lock_write_next_cong(struct rpc_xprt *xprt)
209 {
210         struct rpc_task *task;
211
212         if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
213                 return;
214         if (RPCXPRT_CONGESTED(xprt))
215                 goto out_unlock;
216         task = rpc_wake_up_next(&xprt->resend);
217         if (!task) {
218                 task = rpc_wake_up_next(&xprt->sending);
219                 if (!task)
220                         goto out_unlock;
221         }
222         if (__xprt_get_cong(xprt, task)) {
223                 struct rpc_rqst *req = task->tk_rqstp;
224                 xprt->snd_task = task;
225                 if (req) {
226                         req->rq_bytes_sent = 0;
227                         req->rq_ntrans++;
228                 }
229                 return;
230         }
231 out_unlock:
232         xprt_clear_locked(xprt);
233 }
234
235 /**
236  * xprt_release_xprt - allow other requests to use a transport
237  * @xprt: transport with other tasks potentially waiting
238  * @task: task that is releasing access to the transport
239  *
240  * Note that "task" can be NULL.  No congestion control is provided.
241  */
242 void xprt_release_xprt(struct rpc_xprt *xprt, struct rpc_task *task)
243 {
244         if (xprt->snd_task == task) {
245                 xprt_clear_locked(xprt);
246                 __xprt_lock_write_next(xprt);
247         }
248 }
249
250 /**
251  * xprt_release_xprt_cong - allow other requests to use a transport
252  * @xprt: transport with other tasks potentially waiting
253  * @task: task that is releasing access to the transport
254  *
255  * Note that "task" can be NULL.  Another task is awoken to use the
256  * transport if the transport's congestion window allows it.
257  */
258 void xprt_release_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task)
259 {
260         if (xprt->snd_task == task) {
261                 xprt_clear_locked(xprt);
262                 __xprt_lock_write_next_cong(xprt);
263         }
264 }
265
266 static inline void xprt_release_write(struct rpc_xprt *xprt, struct rpc_task *task)
267 {
268         spin_lock_bh(&xprt->transport_lock);
269         xprt->ops->release_xprt(xprt, task);
270         spin_unlock_bh(&xprt->transport_lock);
271 }
272
273 /*
274  * Van Jacobson congestion avoidance. Check if the congestion window
275  * overflowed. Put the task to sleep if this is the case.
276  */
277 static int
278 __xprt_get_cong(struct rpc_xprt *xprt, struct rpc_task *task)
279 {
280         struct rpc_rqst *req = task->tk_rqstp;
281
282         if (req->rq_cong)
283                 return 1;
284         dprintk("RPC: %4d xprt_cwnd_limited cong = %ld cwnd = %ld\n",
285                         task->tk_pid, xprt->cong, xprt->cwnd);
286         if (RPCXPRT_CONGESTED(xprt))
287                 return 0;
288         req->rq_cong = 1;
289         xprt->cong += RPC_CWNDSCALE;
290         return 1;
291 }
292
293 /*
294  * Adjust the congestion window, and wake up the next task
295  * that has been sleeping due to congestion
296  */
297 static void
298 __xprt_put_cong(struct rpc_xprt *xprt, struct rpc_rqst *req)
299 {
300         if (!req->rq_cong)
301                 return;
302         req->rq_cong = 0;
303         xprt->cong -= RPC_CWNDSCALE;
304         __xprt_lock_write_next_cong(xprt);
305 }
306
307 /**
308  * xprt_release_rqst_cong - housekeeping when request is complete
309  * @task: RPC request that recently completed
310  *
311  * Useful for transports that require congestion control.
312  */
313 void xprt_release_rqst_cong(struct rpc_task *task)
314 {
315         __xprt_put_cong(task->tk_xprt, task->tk_rqstp);
316 }
317
318 /**
319  * xprt_adjust_cwnd - adjust transport congestion window
320  * @task: recently completed RPC request used to adjust window
321  * @result: result code of completed RPC request
322  *
323  * We use a time-smoothed congestion estimator to avoid heavy oscillation.
324  */
325 void xprt_adjust_cwnd(struct rpc_task *task, int result)
326 {
327         struct rpc_rqst *req = task->tk_rqstp;
328         struct rpc_xprt *xprt = task->tk_xprt;
329         unsigned long cwnd = xprt->cwnd;
330
331         if (result >= 0 && cwnd <= xprt->cong) {
332                 /* The (cwnd >> 1) term makes sure
333                  * the result gets rounded properly. */
334                 cwnd += (RPC_CWNDSCALE * RPC_CWNDSCALE + (cwnd >> 1)) / cwnd;
335                 if (cwnd > RPC_MAXCWND(xprt))
336                         cwnd = RPC_MAXCWND(xprt);
337                 __xprt_lock_write_next_cong(xprt);
338         } else if (result == -ETIMEDOUT) {
339                 cwnd >>= 1;
340                 if (cwnd < RPC_CWNDSCALE)
341                         cwnd = RPC_CWNDSCALE;
342         }
343         dprintk("RPC:      cong %ld, cwnd was %ld, now %ld\n",
344                         xprt->cong, xprt->cwnd, cwnd);
345         xprt->cwnd = cwnd;
346         __xprt_put_cong(xprt, req);
347 }
348
349 /**
350  * xprt_wake_pending_tasks - wake all tasks on a transport's pending queue
351  * @xprt: transport with waiting tasks
352  * @status: result code to plant in each task before waking it
353  *
354  */
355 void xprt_wake_pending_tasks(struct rpc_xprt *xprt, int status)
356 {
357         if (status < 0)
358                 rpc_wake_up_status(&xprt->pending, status);
359         else
360                 rpc_wake_up(&xprt->pending);
361 }
362
363 /**
364  * xprt_wait_for_buffer_space - wait for transport output buffer to clear
365  * @task: task to be put to sleep
366  *
367  */
368 void xprt_wait_for_buffer_space(struct rpc_task *task)
369 {
370         struct rpc_rqst *req = task->tk_rqstp;
371         struct rpc_xprt *xprt = req->rq_xprt;
372
373         task->tk_timeout = req->rq_timeout;
374         rpc_sleep_on(&xprt->pending, task, NULL, NULL);
375 }
376
377 /**
378  * xprt_write_space - wake the task waiting for transport output buffer space
379  * @xprt: transport with waiting tasks
380  *
381  * Can be called in a soft IRQ context, so xprt_write_space never sleeps.
382  */
383 void xprt_write_space(struct rpc_xprt *xprt)
384 {
385         if (unlikely(xprt->shutdown))
386                 return;
387
388         spin_lock_bh(&xprt->transport_lock);
389         if (xprt->snd_task) {
390                 dprintk("RPC:      write space: waking waiting task on xprt %p\n",
391                                 xprt);
392                 rpc_wake_up_task(xprt->snd_task);
393         }
394         spin_unlock_bh(&xprt->transport_lock);
395 }
396
397 /**
398  * xprt_set_retrans_timeout_def - set a request's retransmit timeout
399  * @task: task whose timeout is to be set
400  *
401  * Set a request's retransmit timeout based on the transport's
402  * default timeout parameters.  Used by transports that don't adjust
403  * the retransmit timeout based on round-trip time estimation.
404  */
405 void xprt_set_retrans_timeout_def(struct rpc_task *task)
406 {
407         task->tk_timeout = task->tk_rqstp->rq_timeout;
408 }
409
410 /*
411  * xprt_set_retrans_timeout_rtt - set a request's retransmit timeout
412  * @task: task whose timeout is to be set
413  * 
414  * Set a request's retransmit timeout using the RTT estimator.
415  */
416 void xprt_set_retrans_timeout_rtt(struct rpc_task *task)
417 {
418         int timer = task->tk_msg.rpc_proc->p_timer;
419         struct rpc_rtt *rtt = task->tk_client->cl_rtt;
420         struct rpc_rqst *req = task->tk_rqstp;
421         unsigned long max_timeout = req->rq_xprt->timeout.to_maxval;
422
423         task->tk_timeout = rpc_calc_rto(rtt, timer);
424         task->tk_timeout <<= rpc_ntimeo(rtt, timer) + req->rq_retries;
425         if (task->tk_timeout > max_timeout || task->tk_timeout == 0)
426                 task->tk_timeout = max_timeout;
427 }
428
429 static void xprt_reset_majortimeo(struct rpc_rqst *req)
430 {
431         struct rpc_timeout *to = &req->rq_xprt->timeout;
432
433         req->rq_majortimeo = req->rq_timeout;
434         if (to->to_exponential)
435                 req->rq_majortimeo <<= to->to_retries;
436         else
437                 req->rq_majortimeo += to->to_increment * to->to_retries;
438         if (req->rq_majortimeo > to->to_maxval || req->rq_majortimeo == 0)
439                 req->rq_majortimeo = to->to_maxval;
440         req->rq_majortimeo += jiffies;
441 }
442
443 /**
444  * xprt_adjust_timeout - adjust timeout values for next retransmit
445  * @req: RPC request containing parameters to use for the adjustment
446  *
447  */
448 int xprt_adjust_timeout(struct rpc_rqst *req)
449 {
450         struct rpc_xprt *xprt = req->rq_xprt;
451         struct rpc_timeout *to = &xprt->timeout;
452         int status = 0;
453
454         if (time_before(jiffies, req->rq_majortimeo)) {
455                 if (to->to_exponential)
456                         req->rq_timeout <<= 1;
457                 else
458                         req->rq_timeout += to->to_increment;
459                 if (to->to_maxval && req->rq_timeout >= to->to_maxval)
460                         req->rq_timeout = to->to_maxval;
461                 req->rq_retries++;
462                 pprintk("RPC: %lu retrans\n", jiffies);
463         } else {
464                 req->rq_timeout = to->to_initval;
465                 req->rq_retries = 0;
466                 xprt_reset_majortimeo(req);
467                 /* Reset the RTT counters == "slow start" */
468                 spin_lock_bh(&xprt->transport_lock);
469                 rpc_init_rtt(req->rq_task->tk_client->cl_rtt, to->to_initval);
470                 spin_unlock_bh(&xprt->transport_lock);
471                 pprintk("RPC: %lu timeout\n", jiffies);
472                 status = -ETIMEDOUT;
473         }
474
475         if (req->rq_timeout == 0) {
476                 printk(KERN_WARNING "xprt_adjust_timeout: rq_timeout = 0!\n");
477                 req->rq_timeout = 5 * HZ;
478         }
479         return status;
480 }
481
482 static void xprt_autoclose(struct work_struct *work)
483 {
484         struct rpc_xprt *xprt =
485                 container_of(work, struct rpc_xprt, task_cleanup);
486
487         xprt_disconnect(xprt);
488         xprt->ops->close(xprt);
489         xprt_release_write(xprt, NULL);
490 }
491
492 /**
493  * xprt_disconnect - mark a transport as disconnected
494  * @xprt: transport to flag for disconnect
495  *
496  */
497 void xprt_disconnect(struct rpc_xprt *xprt)
498 {
499         dprintk("RPC:      disconnected transport %p\n", xprt);
500         spin_lock_bh(&xprt->transport_lock);
501         xprt_clear_connected(xprt);
502         xprt_wake_pending_tasks(xprt, -ENOTCONN);
503         spin_unlock_bh(&xprt->transport_lock);
504 }
505
506 static void
507 xprt_init_autodisconnect(unsigned long data)
508 {
509         struct rpc_xprt *xprt = (struct rpc_xprt *)data;
510
511         spin_lock(&xprt->transport_lock);
512         if (!list_empty(&xprt->recv) || xprt->shutdown)
513                 goto out_abort;
514         if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
515                 goto out_abort;
516         spin_unlock(&xprt->transport_lock);
517         if (xprt_connecting(xprt))
518                 xprt_release_write(xprt, NULL);
519         else
520                 schedule_work(&xprt->task_cleanup);
521         return;
522 out_abort:
523         spin_unlock(&xprt->transport_lock);
524 }
525
526 /**
527  * xprt_connect - schedule a transport connect operation
528  * @task: RPC task that is requesting the connect
529  *
530  */
531 void xprt_connect(struct rpc_task *task)
532 {
533         struct rpc_xprt *xprt = task->tk_xprt;
534
535         dprintk("RPC: %4d xprt_connect xprt %p %s connected\n", task->tk_pid,
536                         xprt, (xprt_connected(xprt) ? "is" : "is not"));
537
538         if (!xprt_bound(xprt)) {
539                 task->tk_status = -EIO;
540                 return;
541         }
542         if (!xprt_lock_write(xprt, task))
543                 return;
544         if (xprt_connected(xprt))
545                 xprt_release_write(xprt, task);
546         else {
547                 if (task->tk_rqstp)
548                         task->tk_rqstp->rq_bytes_sent = 0;
549
550                 task->tk_timeout = xprt->connect_timeout;
551                 rpc_sleep_on(&xprt->pending, task, xprt_connect_status, NULL);
552                 xprt->stat.connect_start = jiffies;
553                 xprt->ops->connect(task);
554         }
555         return;
556 }
557
558 static void xprt_connect_status(struct rpc_task *task)
559 {
560         struct rpc_xprt *xprt = task->tk_xprt;
561
562         if (task->tk_status >= 0) {
563                 xprt->stat.connect_count++;
564                 xprt->stat.connect_time += (long)jiffies - xprt->stat.connect_start;
565                 dprintk("RPC: %4d xprt_connect_status: connection established\n",
566                                 task->tk_pid);
567                 return;
568         }
569
570         switch (task->tk_status) {
571         case -ECONNREFUSED:
572         case -ECONNRESET:
573                 dprintk("RPC: %4d xprt_connect_status: server %s refused connection\n",
574                                 task->tk_pid, task->tk_client->cl_server);
575                 break;
576         case -ENOTCONN:
577                 dprintk("RPC: %4d xprt_connect_status: connection broken\n",
578                                 task->tk_pid);
579                 break;
580         case -ETIMEDOUT:
581                 dprintk("RPC: %4d xprt_connect_status: connect attempt timed out\n",
582                                 task->tk_pid);
583                 break;
584         default:
585                 dprintk("RPC: %4d xprt_connect_status: error %d connecting to server %s\n",
586                                 task->tk_pid, -task->tk_status, task->tk_client->cl_server);
587                 xprt_release_write(xprt, task);
588                 task->tk_status = -EIO;
589         }
590 }
591
592 /**
593  * xprt_lookup_rqst - find an RPC request corresponding to an XID
594  * @xprt: transport on which the original request was transmitted
595  * @xid: RPC XID of incoming reply
596  *
597  */
598 struct rpc_rqst *xprt_lookup_rqst(struct rpc_xprt *xprt, __be32 xid)
599 {
600         struct list_head *pos;
601
602         list_for_each(pos, &xprt->recv) {
603                 struct rpc_rqst *entry = list_entry(pos, struct rpc_rqst, rq_list);
604                 if (entry->rq_xid == xid)
605                         return entry;
606         }
607         xprt->stat.bad_xids++;
608         return NULL;
609 }
610
611 /**
612  * xprt_update_rtt - update an RPC client's RTT state after receiving a reply
613  * @task: RPC request that recently completed
614  *
615  */
616 void xprt_update_rtt(struct rpc_task *task)
617 {
618         struct rpc_rqst *req = task->tk_rqstp;
619         struct rpc_rtt *rtt = task->tk_client->cl_rtt;
620         unsigned timer = task->tk_msg.rpc_proc->p_timer;
621
622         if (timer) {
623                 if (req->rq_ntrans == 1)
624                         rpc_update_rtt(rtt, timer,
625                                         (long)jiffies - req->rq_xtime);
626                 rpc_set_timeo(rtt, timer, req->rq_ntrans - 1);
627         }
628 }
629
630 /**
631  * xprt_complete_rqst - called when reply processing is complete
632  * @task: RPC request that recently completed
633  * @copied: actual number of bytes received from the transport
634  *
635  * Caller holds transport lock.
636  */
637 void xprt_complete_rqst(struct rpc_task *task, int copied)
638 {
639         struct rpc_rqst *req = task->tk_rqstp;
640
641         dprintk("RPC: %5u xid %08x complete (%d bytes received)\n",
642                         task->tk_pid, ntohl(req->rq_xid), copied);
643
644         task->tk_xprt->stat.recvs++;
645         task->tk_rtt = (long)jiffies - req->rq_xtime;
646
647         list_del_init(&req->rq_list);
648         /* Ensure all writes are done before we update req->rq_received */
649         smp_wmb();
650         req->rq_received = req->rq_private_buf.len = copied;
651         rpc_wake_up_task(task);
652 }
653
654 static void xprt_timer(struct rpc_task *task)
655 {
656         struct rpc_rqst *req = task->tk_rqstp;
657         struct rpc_xprt *xprt = req->rq_xprt;
658
659         dprintk("RPC: %4d xprt_timer\n", task->tk_pid);
660
661         spin_lock(&xprt->transport_lock);
662         if (!req->rq_received) {
663                 if (xprt->ops->timer)
664                         xprt->ops->timer(task);
665                 task->tk_status = -ETIMEDOUT;
666         }
667         task->tk_timeout = 0;
668         rpc_wake_up_task(task);
669         spin_unlock(&xprt->transport_lock);
670 }
671
672 /**
673  * xprt_prepare_transmit - reserve the transport before sending a request
674  * @task: RPC task about to send a request
675  *
676  */
677 int xprt_prepare_transmit(struct rpc_task *task)
678 {
679         struct rpc_rqst *req = task->tk_rqstp;
680         struct rpc_xprt *xprt = req->rq_xprt;
681         int err = 0;
682
683         dprintk("RPC: %4d xprt_prepare_transmit\n", task->tk_pid);
684
685         spin_lock_bh(&xprt->transport_lock);
686         if (req->rq_received && !req->rq_bytes_sent) {
687                 err = req->rq_received;
688                 goto out_unlock;
689         }
690         if (!xprt->ops->reserve_xprt(task)) {
691                 err = -EAGAIN;
692                 goto out_unlock;
693         }
694
695         if (!xprt_connected(xprt)) {
696                 err = -ENOTCONN;
697                 goto out_unlock;
698         }
699 out_unlock:
700         spin_unlock_bh(&xprt->transport_lock);
701         return err;
702 }
703
704 void xprt_end_transmit(struct rpc_task *task)
705 {
706         xprt_release_write(task->tk_xprt, task);
707 }
708
709 /**
710  * xprt_transmit - send an RPC request on a transport
711  * @task: controlling RPC task
712  *
713  * We have to copy the iovec because sendmsg fiddles with its contents.
714  */
715 void xprt_transmit(struct rpc_task *task)
716 {
717         struct rpc_rqst *req = task->tk_rqstp;
718         struct rpc_xprt *xprt = req->rq_xprt;
719         int status;
720
721         dprintk("RPC: %4d xprt_transmit(%u)\n", task->tk_pid, req->rq_slen);
722
723         if (!req->rq_received) {
724                 if (list_empty(&req->rq_list)) {
725                         spin_lock_bh(&xprt->transport_lock);
726                         /* Update the softirq receive buffer */
727                         memcpy(&req->rq_private_buf, &req->rq_rcv_buf,
728                                         sizeof(req->rq_private_buf));
729                         /* Add request to the receive list */
730                         list_add_tail(&req->rq_list, &xprt->recv);
731                         spin_unlock_bh(&xprt->transport_lock);
732                         xprt_reset_majortimeo(req);
733                         /* Turn off autodisconnect */
734                         del_singleshot_timer_sync(&xprt->timer);
735                 }
736         } else if (!req->rq_bytes_sent)
737                 return;
738
739         status = xprt->ops->send_request(task);
740         if (status == 0) {
741                 dprintk("RPC: %4d xmit complete\n", task->tk_pid);
742                 spin_lock_bh(&xprt->transport_lock);
743
744                 xprt->ops->set_retrans_timeout(task);
745
746                 xprt->stat.sends++;
747                 xprt->stat.req_u += xprt->stat.sends - xprt->stat.recvs;
748                 xprt->stat.bklog_u += xprt->backlog.qlen;
749
750                 /* Don't race with disconnect */
751                 if (!xprt_connected(xprt))
752                         task->tk_status = -ENOTCONN;
753                 else if (!req->rq_received)
754                         rpc_sleep_on(&xprt->pending, task, NULL, xprt_timer);
755                 spin_unlock_bh(&xprt->transport_lock);
756                 return;
757         }
758
759         /* Note: at this point, task->tk_sleeping has not yet been set,
760          *       hence there is no danger of the waking up task being put on
761          *       schedq, and being picked up by a parallel run of rpciod().
762          */
763         task->tk_status = status;
764         if (status == -ECONNREFUSED)
765                 rpc_sleep_on(&xprt->sending, task, NULL, NULL);
766 }
767
768 static inline void do_xprt_reserve(struct rpc_task *task)
769 {
770         struct rpc_xprt *xprt = task->tk_xprt;
771
772         task->tk_status = 0;
773         if (task->tk_rqstp)
774                 return;
775         if (!list_empty(&xprt->free)) {
776                 struct rpc_rqst *req = list_entry(xprt->free.next, struct rpc_rqst, rq_list);
777                 list_del_init(&req->rq_list);
778                 task->tk_rqstp = req;
779                 xprt_request_init(task, xprt);
780                 return;
781         }
782         dprintk("RPC:      waiting for request slot\n");
783         task->tk_status = -EAGAIN;
784         task->tk_timeout = 0;
785         rpc_sleep_on(&xprt->backlog, task, NULL, NULL);
786 }
787
788 /**
789  * xprt_reserve - allocate an RPC request slot
790  * @task: RPC task requesting a slot allocation
791  *
792  * If no more slots are available, place the task on the transport's
793  * backlog queue.
794  */
795 void xprt_reserve(struct rpc_task *task)
796 {
797         struct rpc_xprt *xprt = task->tk_xprt;
798
799         task->tk_status = -EIO;
800         spin_lock(&xprt->reserve_lock);
801         do_xprt_reserve(task);
802         spin_unlock(&xprt->reserve_lock);
803 }
804
805 static inline __be32 xprt_alloc_xid(struct rpc_xprt *xprt)
806 {
807         return xprt->xid++;
808 }
809
810 static inline void xprt_init_xid(struct rpc_xprt *xprt)
811 {
812         xprt->xid = net_random();
813 }
814
815 static void xprt_request_init(struct rpc_task *task, struct rpc_xprt *xprt)
816 {
817         struct rpc_rqst *req = task->tk_rqstp;
818
819         req->rq_timeout = xprt->timeout.to_initval;
820         req->rq_task    = task;
821         req->rq_xprt    = xprt;
822         req->rq_buffer  = NULL;
823         req->rq_bufsize = 0;
824         req->rq_xid     = xprt_alloc_xid(xprt);
825         req->rq_release_snd_buf = NULL;
826         xprt_reset_majortimeo(req);
827         dprintk("RPC: %4d reserved req %p xid %08x\n", task->tk_pid,
828                         req, ntohl(req->rq_xid));
829 }
830
831 /**
832  * xprt_release - release an RPC request slot
833  * @task: task which is finished with the slot
834  *
835  */
836 void xprt_release(struct rpc_task *task)
837 {
838         struct rpc_xprt *xprt = task->tk_xprt;
839         struct rpc_rqst *req;
840
841         if (!(req = task->tk_rqstp))
842                 return;
843         rpc_count_iostats(task);
844         spin_lock_bh(&xprt->transport_lock);
845         xprt->ops->release_xprt(xprt, task);
846         if (xprt->ops->release_request)
847                 xprt->ops->release_request(task);
848         if (!list_empty(&req->rq_list))
849                 list_del(&req->rq_list);
850         xprt->last_used = jiffies;
851         if (list_empty(&xprt->recv))
852                 mod_timer(&xprt->timer,
853                                 xprt->last_used + xprt->idle_timeout);
854         spin_unlock_bh(&xprt->transport_lock);
855         xprt->ops->buf_free(task);
856         task->tk_rqstp = NULL;
857         if (req->rq_release_snd_buf)
858                 req->rq_release_snd_buf(req);
859         memset(req, 0, sizeof(*req));   /* mark unused */
860
861         dprintk("RPC: %4d release request %p\n", task->tk_pid, req);
862
863         spin_lock(&xprt->reserve_lock);
864         list_add(&req->rq_list, &xprt->free);
865         rpc_wake_up_next(&xprt->backlog);
866         spin_unlock(&xprt->reserve_lock);
867 }
868
869 /**
870  * xprt_set_timeout - set constant RPC timeout
871  * @to: RPC timeout parameters to set up
872  * @retr: number of retries
873  * @incr: amount of increase after each retry
874  *
875  */
876 void xprt_set_timeout(struct rpc_timeout *to, unsigned int retr, unsigned long incr)
877 {
878         to->to_initval   = 
879         to->to_increment = incr;
880         to->to_maxval    = to->to_initval + (incr * retr);
881         to->to_retries   = retr;
882         to->to_exponential = 0;
883 }
884
885 /**
886  * xprt_create_transport - create an RPC transport
887  * @proto: requested transport protocol
888  * @ap: remote peer address
889  * @size: length of address
890  * @to: timeout parameters
891  *
892  */
893 struct rpc_xprt *xprt_create_transport(int proto, struct sockaddr *ap, size_t size, struct rpc_timeout *to)
894 {
895         int result;
896         struct rpc_xprt *xprt;
897         struct rpc_rqst *req;
898
899         if ((xprt = kzalloc(sizeof(struct rpc_xprt), GFP_KERNEL)) == NULL) {
900                 dprintk("RPC:      xprt_create_transport: no memory\n");
901                 return ERR_PTR(-ENOMEM);
902         }
903         if (size <= sizeof(xprt->addr)) {
904                 memcpy(&xprt->addr, ap, size);
905                 xprt->addrlen = size;
906         } else {
907                 kfree(xprt);
908                 dprintk("RPC:      xprt_create_transport: address too large\n");
909                 return ERR_PTR(-EBADF);
910         }
911
912         switch (proto) {
913         case IPPROTO_UDP:
914                 result = xs_setup_udp(xprt, to);
915                 break;
916         case IPPROTO_TCP:
917                 result = xs_setup_tcp(xprt, to);
918                 break;
919         default:
920                 printk(KERN_ERR "RPC: unrecognized transport protocol: %d\n",
921                                 proto);
922                 return ERR_PTR(-EIO);
923         }
924         if (result) {
925                 kfree(xprt);
926                 dprintk("RPC:      xprt_create_transport: failed, %d\n", result);
927                 return ERR_PTR(result);
928         }
929
930         kref_init(&xprt->kref);
931         spin_lock_init(&xprt->transport_lock);
932         spin_lock_init(&xprt->reserve_lock);
933
934         INIT_LIST_HEAD(&xprt->free);
935         INIT_LIST_HEAD(&xprt->recv);
936         INIT_WORK(&xprt->task_cleanup, xprt_autoclose);
937         init_timer(&xprt->timer);
938         xprt->timer.function = xprt_init_autodisconnect;
939         xprt->timer.data = (unsigned long) xprt;
940         xprt->last_used = jiffies;
941         xprt->cwnd = RPC_INITCWND;
942
943         rpc_init_wait_queue(&xprt->binding, "xprt_binding");
944         rpc_init_wait_queue(&xprt->pending, "xprt_pending");
945         rpc_init_wait_queue(&xprt->sending, "xprt_sending");
946         rpc_init_wait_queue(&xprt->resend, "xprt_resend");
947         rpc_init_priority_wait_queue(&xprt->backlog, "xprt_backlog");
948
949         /* initialize free list */
950         for (req = &xprt->slot[xprt->max_reqs-1]; req >= &xprt->slot[0]; req--)
951                 list_add(&req->rq_list, &xprt->free);
952
953         xprt_init_xid(xprt);
954
955         dprintk("RPC:      created transport %p with %u slots\n", xprt,
956                         xprt->max_reqs);
957
958         return xprt;
959 }
960
961 /**
962  * xprt_destroy - destroy an RPC transport, killing off all requests.
963  * @kref: kref for the transport to destroy
964  *
965  */
966 static void xprt_destroy(struct kref *kref)
967 {
968         struct rpc_xprt *xprt = container_of(kref, struct rpc_xprt, kref);
969
970         dprintk("RPC:      destroying transport %p\n", xprt);
971         xprt->shutdown = 1;
972         del_timer_sync(&xprt->timer);
973         xprt->ops->destroy(xprt);
974         kfree(xprt);
975 }
976
977 /**
978  * xprt_put - release a reference to an RPC transport.
979  * @xprt: pointer to the transport
980  *
981  */
982 void xprt_put(struct rpc_xprt *xprt)
983 {
984         kref_put(&xprt->kref, xprt_destroy);
985 }
986
987 /**
988  * xprt_get - return a reference to an RPC transport.
989  * @xprt: pointer to the transport
990  *
991  */
992 struct rpc_xprt *xprt_get(struct rpc_xprt *xprt)
993 {
994         kref_get(&xprt->kref);
995         return xprt;
996 }