2 * linux/net/sunrpc/xprt.c
4 * This is a generic RPC call interface supporting congestion avoidance,
5 * and asynchronous calls.
7 * The interface works like this:
9 * - When a process places a call, it allocates a request slot if
10 * one is available. Otherwise, it sleeps on the backlog queue
12 * - Next, the caller puts together the RPC message, stuffs it into
13 * the request struct, and calls xprt_transmit().
14 * - xprt_transmit sends the message and installs the caller on the
15 * transport's wait list. At the same time, it installs a timer that
16 * is run after the packet's timeout has expired.
17 * - When a packet arrives, the data_ready handler walks the list of
18 * pending requests for that transport. If a matching XID is found, the
19 * caller is woken up, and the timer removed.
20 * - When no reply arrives within the timeout interval, the timer is
21 * fired by the kernel and runs xprt_timer(). It either adjusts the
22 * timeout values (minor timeout) or wakes up the caller with a status
24 * - When the caller receives a notification from RPC that a reply arrived,
25 * it should release the RPC slot, and process the reply.
26 * If the call timed out, it may choose to retry the operation by
27 * adjusting the initial timeout value, and simply calling rpc_call
30 * Support for async RPC is done through a set of RPC-specific scheduling
31 * primitives that `transparently' work for processes as well as async
32 * tasks that rely on callbacks.
34 * Copyright (C) 1995-1997, Olaf Kirch <okir@monad.swb.de>
36 * Transport switch API copyright (C) 2005, Chuck Lever <cel@netapp.com>
39 #include <linux/module.h>
41 #include <linux/types.h>
42 #include <linux/interrupt.h>
43 #include <linux/workqueue.h>
44 #include <linux/random.h>
46 #include <linux/sunrpc/clnt.h>
53 # undef RPC_DEBUG_DATA
54 # define RPCDBG_FACILITY RPCDBG_XPRT
60 static void xprt_request_init(struct rpc_task *, struct rpc_xprt *);
61 static inline void do_xprt_reserve(struct rpc_task *);
62 static void xprt_connect_status(struct rpc_task *task);
63 static int __xprt_get_cong(struct rpc_xprt *, struct rpc_task *);
65 static int xprt_clear_backlog(struct rpc_xprt *xprt);
68 * Serialize write access to transports, in order to prevent different
69 * requests from interfering with each other.
70 * Also prevents transport connects from colliding with writes.
73 __xprt_lock_write(struct rpc_xprt *xprt, struct rpc_task *task)
75 struct rpc_rqst *req = task->tk_rqstp;
77 if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) {
78 if (task == xprt->snd_task)
82 if (xprt->nocong || __xprt_get_cong(xprt, task)) {
83 xprt->snd_task = task;
85 req->rq_bytes_sent = 0;
90 smp_mb__before_clear_bit();
91 clear_bit(XPRT_LOCKED, &xprt->state);
92 smp_mb__after_clear_bit();
94 dprintk("RPC: %4d failed to lock transport %p\n", task->tk_pid, xprt);
96 task->tk_status = -EAGAIN;
97 if (req && req->rq_ntrans)
98 rpc_sleep_on(&xprt->resend, task, NULL, NULL);
100 rpc_sleep_on(&xprt->sending, task, NULL, NULL);
105 xprt_lock_write(struct rpc_xprt *xprt, struct rpc_task *task)
109 spin_lock_bh(&xprt->transport_lock);
110 retval = __xprt_lock_write(xprt, task);
111 spin_unlock_bh(&xprt->transport_lock);
117 __xprt_lock_write_next(struct rpc_xprt *xprt)
119 struct rpc_task *task;
121 if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
123 if (!xprt->nocong && RPCXPRT_CONGESTED(xprt))
125 task = rpc_wake_up_next(&xprt->resend);
127 task = rpc_wake_up_next(&xprt->sending);
131 if (xprt->nocong || __xprt_get_cong(xprt, task)) {
132 struct rpc_rqst *req = task->tk_rqstp;
133 xprt->snd_task = task;
135 req->rq_bytes_sent = 0;
141 smp_mb__before_clear_bit();
142 clear_bit(XPRT_LOCKED, &xprt->state);
143 smp_mb__after_clear_bit();
147 * Releases the transport for use by other requests.
150 __xprt_release_write(struct rpc_xprt *xprt, struct rpc_task *task)
152 if (xprt->snd_task == task) {
153 xprt->snd_task = NULL;
154 smp_mb__before_clear_bit();
155 clear_bit(XPRT_LOCKED, &xprt->state);
156 smp_mb__after_clear_bit();
157 __xprt_lock_write_next(xprt);
162 xprt_release_write(struct rpc_xprt *xprt, struct rpc_task *task)
164 spin_lock_bh(&xprt->transport_lock);
165 __xprt_release_write(xprt, task);
166 spin_unlock_bh(&xprt->transport_lock);
170 * Van Jacobson congestion avoidance. Check if the congestion window
171 * overflowed. Put the task to sleep if this is the case.
174 __xprt_get_cong(struct rpc_xprt *xprt, struct rpc_task *task)
176 struct rpc_rqst *req = task->tk_rqstp;
180 dprintk("RPC: %4d xprt_cwnd_limited cong = %ld cwnd = %ld\n",
181 task->tk_pid, xprt->cong, xprt->cwnd);
182 if (RPCXPRT_CONGESTED(xprt))
185 xprt->cong += RPC_CWNDSCALE;
190 * Adjust the congestion window, and wake up the next task
191 * that has been sleeping due to congestion
194 __xprt_put_cong(struct rpc_xprt *xprt, struct rpc_rqst *req)
199 xprt->cong -= RPC_CWNDSCALE;
200 __xprt_lock_write_next(xprt);
204 * Adjust RPC congestion window
205 * We use a time-smoothed congestion estimator to avoid heavy oscillation.
208 xprt_adjust_cwnd(struct rpc_xprt *xprt, int result)
213 if (result >= 0 && cwnd <= xprt->cong) {
214 /* The (cwnd >> 1) term makes sure
215 * the result gets rounded properly. */
216 cwnd += (RPC_CWNDSCALE * RPC_CWNDSCALE + (cwnd >> 1)) / cwnd;
217 if (cwnd > RPC_MAXCWND(xprt))
218 cwnd = RPC_MAXCWND(xprt);
219 __xprt_lock_write_next(xprt);
220 } else if (result == -ETIMEDOUT) {
222 if (cwnd < RPC_CWNDSCALE)
223 cwnd = RPC_CWNDSCALE;
225 dprintk("RPC: cong %ld, cwnd was %ld, now %ld\n",
226 xprt->cong, xprt->cwnd, cwnd);
231 * xprt_wake_pending_tasks - wake all tasks on a transport's pending queue
232 * @xprt: transport with waiting tasks
233 * @status: result code to plant in each task before waking it
236 void xprt_wake_pending_tasks(struct rpc_xprt *xprt, int status)
239 rpc_wake_up_status(&xprt->pending, status);
241 rpc_wake_up(&xprt->pending);
245 * xprt_wait_for_buffer_space - wait for transport output buffer to clear
246 * @task: task to be put to sleep
249 void xprt_wait_for_buffer_space(struct rpc_task *task)
251 struct rpc_rqst *req = task->tk_rqstp;
252 struct rpc_xprt *xprt = req->rq_xprt;
254 task->tk_timeout = req->rq_timeout;
255 rpc_sleep_on(&xprt->pending, task, NULL, NULL);
259 * xprt_write_space - wake the task waiting for transport output buffer space
260 * @xprt: transport with waiting tasks
262 * Can be called in a soft IRQ context, so xprt_write_space never sleeps.
264 void xprt_write_space(struct rpc_xprt *xprt)
266 if (unlikely(xprt->shutdown))
269 spin_lock_bh(&xprt->transport_lock);
270 if (xprt->snd_task) {
271 dprintk("RPC: write space: waking waiting task on xprt %p\n",
273 rpc_wake_up_task(xprt->snd_task);
275 spin_unlock_bh(&xprt->transport_lock);
278 static void xprt_reset_majortimeo(struct rpc_rqst *req)
280 struct rpc_timeout *to = &req->rq_xprt->timeout;
282 req->rq_majortimeo = req->rq_timeout;
283 if (to->to_exponential)
284 req->rq_majortimeo <<= to->to_retries;
286 req->rq_majortimeo += to->to_increment * to->to_retries;
287 if (req->rq_majortimeo > to->to_maxval || req->rq_majortimeo == 0)
288 req->rq_majortimeo = to->to_maxval;
289 req->rq_majortimeo += jiffies;
293 * xprt_adjust_timeout - adjust timeout values for next retransmit
294 * @req: RPC request containing parameters to use for the adjustment
297 int xprt_adjust_timeout(struct rpc_rqst *req)
299 struct rpc_xprt *xprt = req->rq_xprt;
300 struct rpc_timeout *to = &xprt->timeout;
303 if (time_before(jiffies, req->rq_majortimeo)) {
304 if (to->to_exponential)
305 req->rq_timeout <<= 1;
307 req->rq_timeout += to->to_increment;
308 if (to->to_maxval && req->rq_timeout >= to->to_maxval)
309 req->rq_timeout = to->to_maxval;
311 pprintk("RPC: %lu retrans\n", jiffies);
313 req->rq_timeout = to->to_initval;
315 xprt_reset_majortimeo(req);
316 /* Reset the RTT counters == "slow start" */
317 spin_lock_bh(&xprt->transport_lock);
318 rpc_init_rtt(req->rq_task->tk_client->cl_rtt, to->to_initval);
319 spin_unlock_bh(&xprt->transport_lock);
320 pprintk("RPC: %lu timeout\n", jiffies);
324 if (req->rq_timeout == 0) {
325 printk(KERN_WARNING "xprt_adjust_timeout: rq_timeout = 0!\n");
326 req->rq_timeout = 5 * HZ;
331 static void xprt_autoclose(void *args)
333 struct rpc_xprt *xprt = (struct rpc_xprt *)args;
335 xprt_disconnect(xprt);
336 xprt->ops->close(xprt);
337 xprt_release_write(xprt, NULL);
341 * xprt_disconnect - mark a transport as disconnected
342 * @xprt: transport to flag for disconnect
345 void xprt_disconnect(struct rpc_xprt *xprt)
347 dprintk("RPC: disconnected transport %p\n", xprt);
348 spin_lock_bh(&xprt->transport_lock);
349 xprt_clear_connected(xprt);
350 xprt_wake_pending_tasks(xprt, -ENOTCONN);
351 spin_unlock_bh(&xprt->transport_lock);
355 xprt_init_autodisconnect(unsigned long data)
357 struct rpc_xprt *xprt = (struct rpc_xprt *)data;
359 spin_lock(&xprt->transport_lock);
360 if (!list_empty(&xprt->recv) || xprt->shutdown)
362 if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
364 spin_unlock(&xprt->transport_lock);
365 if (xprt_connecting(xprt))
366 xprt_release_write(xprt, NULL);
368 schedule_work(&xprt->task_cleanup);
371 spin_unlock(&xprt->transport_lock);
375 * xprt_connect - schedule a transport connect operation
376 * @task: RPC task that is requesting the connect
379 void xprt_connect(struct rpc_task *task)
381 struct rpc_xprt *xprt = task->tk_xprt;
383 dprintk("RPC: %4d xprt_connect xprt %p %s connected\n", task->tk_pid,
384 xprt, (xprt_connected(xprt) ? "is" : "is not"));
386 if (xprt->shutdown) {
387 task->tk_status = -EIO;
390 if (!xprt->addr.sin_port) {
391 task->tk_status = -EIO;
394 if (!xprt_lock_write(xprt, task))
396 if (xprt_connected(xprt))
397 xprt_release_write(xprt, task);
400 task->tk_rqstp->rq_bytes_sent = 0;
402 task->tk_timeout = RPC_CONNECT_TIMEOUT;
403 rpc_sleep_on(&xprt->pending, task, xprt_connect_status, NULL);
404 xprt->ops->connect(task);
409 static void xprt_connect_status(struct rpc_task *task)
411 struct rpc_xprt *xprt = task->tk_xprt;
413 if (task->tk_status >= 0) {
414 dprintk("RPC: %4d xprt_connect_status: connection established\n",
419 switch (task->tk_status) {
422 dprintk("RPC: %4d xprt_connect_status: server %s refused connection\n",
423 task->tk_pid, task->tk_client->cl_server);
426 dprintk("RPC: %4d xprt_connect_status: connection broken\n",
430 dprintk("RPC: %4d xprt_connect_status: connect attempt timed out\n",
434 dprintk("RPC: %4d xprt_connect_status: error %d connecting to server %s\n",
435 task->tk_pid, -task->tk_status, task->tk_client->cl_server);
436 xprt_release_write(xprt, task);
437 task->tk_status = -EIO;
441 /* if soft mounted, just cause this RPC to fail */
442 if (RPC_IS_SOFT(task)) {
443 xprt_release_write(xprt, task);
444 task->tk_status = -EIO;
449 * xprt_lookup_rqst - find an RPC request corresponding to an XID
450 * @xprt: transport on which the original request was transmitted
451 * @xid: RPC XID of incoming reply
454 struct rpc_rqst *xprt_lookup_rqst(struct rpc_xprt *xprt, u32 xid)
456 struct list_head *pos;
457 struct rpc_rqst *req = NULL;
459 list_for_each(pos, &xprt->recv) {
460 struct rpc_rqst *entry = list_entry(pos, struct rpc_rqst, rq_list);
461 if (entry->rq_xid == xid) {
470 * xprt_complete_rqst - called when reply processing is complete
471 * @xprt: controlling transport
472 * @req: RPC request that just completed
473 * @copied: actual number of bytes received from the transport
476 void xprt_complete_rqst(struct rpc_xprt *xprt, struct rpc_rqst *req, int copied)
478 struct rpc_task *task = req->rq_task;
479 struct rpc_clnt *clnt = task->tk_client;
481 /* Adjust congestion window */
483 unsigned timer = task->tk_msg.rpc_proc->p_timer;
484 xprt_adjust_cwnd(xprt, copied);
485 __xprt_put_cong(xprt, req);
487 if (req->rq_ntrans == 1)
488 rpc_update_rtt(clnt->cl_rtt, timer,
489 (long)jiffies - req->rq_xtime);
490 rpc_set_timeo(clnt->cl_rtt, timer, req->rq_ntrans - 1);
495 /* Profile only reads for now */
497 static unsigned long nextstat;
498 static unsigned long pkt_rtt, pkt_len, pkt_cnt;
501 pkt_len += req->rq_slen + copied;
502 pkt_rtt += jiffies - req->rq_xtime;
503 if (time_before(nextstat, jiffies)) {
504 printk("RPC: %lu %ld cwnd\n", jiffies, xprt->cwnd);
505 printk("RPC: %ld %ld %ld %ld stat\n",
506 jiffies, pkt_cnt, pkt_len, pkt_rtt);
507 pkt_rtt = pkt_len = pkt_cnt = 0;
508 nextstat = jiffies + 5 * HZ;
513 dprintk("RPC: %4d has input (%d bytes)\n", task->tk_pid, copied);
514 list_del_init(&req->rq_list);
515 req->rq_received = req->rq_private_buf.len = copied;
517 /* ... and wake up the process. */
518 rpc_wake_up_task(task);
523 * RPC receive timeout handler.
526 xprt_timer(struct rpc_task *task)
528 struct rpc_rqst *req = task->tk_rqstp;
529 struct rpc_xprt *xprt = req->rq_xprt;
531 spin_lock(&xprt->transport_lock);
532 if (req->rq_received)
535 xprt_adjust_cwnd(req->rq_xprt, -ETIMEDOUT);
536 __xprt_put_cong(xprt, req);
538 dprintk("RPC: %4d xprt_timer (%s request)\n",
539 task->tk_pid, req ? "pending" : "backlogged");
541 task->tk_status = -ETIMEDOUT;
543 task->tk_timeout = 0;
544 rpc_wake_up_task(task);
545 spin_unlock(&xprt->transport_lock);
549 * xprt_prepare_transmit - reserve the transport before sending a request
550 * @task: RPC task about to send a request
553 int xprt_prepare_transmit(struct rpc_task *task)
555 struct rpc_rqst *req = task->tk_rqstp;
556 struct rpc_xprt *xprt = req->rq_xprt;
559 dprintk("RPC: %4d xprt_prepare_transmit\n", task->tk_pid);
564 spin_lock_bh(&xprt->transport_lock);
565 if (req->rq_received && !req->rq_bytes_sent) {
566 err = req->rq_received;
569 if (!__xprt_lock_write(xprt, task)) {
574 if (!xprt_connected(xprt)) {
579 spin_unlock_bh(&xprt->transport_lock);
584 * xprt_transmit - send an RPC request on a transport
585 * @task: controlling RPC task
587 * We have to copy the iovec because sendmsg fiddles with its contents.
589 void xprt_transmit(struct rpc_task *task)
591 struct rpc_clnt *clnt = task->tk_client;
592 struct rpc_rqst *req = task->tk_rqstp;
593 struct rpc_xprt *xprt = req->rq_xprt;
596 dprintk("RPC: %4d xprt_transmit(%u)\n", task->tk_pid, req->rq_slen);
599 if (!req->rq_received) {
600 if (list_empty(&req->rq_list)) {
601 spin_lock_bh(&xprt->transport_lock);
602 /* Update the softirq receive buffer */
603 memcpy(&req->rq_private_buf, &req->rq_rcv_buf,
604 sizeof(req->rq_private_buf));
605 /* Add request to the receive list */
606 list_add_tail(&req->rq_list, &xprt->recv);
607 spin_unlock_bh(&xprt->transport_lock);
608 xprt_reset_majortimeo(req);
609 /* Turn off autodisconnect */
610 del_singleshot_timer_sync(&xprt->timer);
612 } else if (!req->rq_bytes_sent)
615 status = xprt->ops->send_request(task);
619 /* Note: at this point, task->tk_sleeping has not yet been set,
620 * hence there is no danger of the waking up task being put on
621 * schedq, and being picked up by a parallel run of rpciod().
623 task->tk_status = status;
627 task->tk_timeout = RPC_REESTABLISH_TIMEOUT;
628 rpc_sleep_on(&xprt->sending, task, NULL, NULL);
634 xprt_disconnect(xprt);
636 xprt_release_write(xprt, task);
639 dprintk("RPC: %4d xmit complete\n", task->tk_pid);
640 /* Set the task's receive timeout value */
641 spin_lock_bh(&xprt->transport_lock);
643 int timer = task->tk_msg.rpc_proc->p_timer;
644 task->tk_timeout = rpc_calc_rto(clnt->cl_rtt, timer);
645 task->tk_timeout <<= rpc_ntimeo(clnt->cl_rtt, timer) + req->rq_retries;
646 if (task->tk_timeout > xprt->timeout.to_maxval || task->tk_timeout == 0)
647 task->tk_timeout = xprt->timeout.to_maxval;
649 task->tk_timeout = req->rq_timeout;
650 /* Don't race with disconnect */
651 if (!xprt_connected(xprt))
652 task->tk_status = -ENOTCONN;
653 else if (!req->rq_received)
654 rpc_sleep_on(&xprt->pending, task, NULL, xprt_timer);
655 __xprt_release_write(xprt, task);
656 spin_unlock_bh(&xprt->transport_lock);
659 static inline void do_xprt_reserve(struct rpc_task *task)
661 struct rpc_xprt *xprt = task->tk_xprt;
666 if (!list_empty(&xprt->free)) {
667 struct rpc_rqst *req = list_entry(xprt->free.next, struct rpc_rqst, rq_list);
668 list_del_init(&req->rq_list);
669 task->tk_rqstp = req;
670 xprt_request_init(task, xprt);
673 dprintk("RPC: waiting for request slot\n");
674 task->tk_status = -EAGAIN;
675 task->tk_timeout = 0;
676 rpc_sleep_on(&xprt->backlog, task, NULL, NULL);
680 * xprt_reserve - allocate an RPC request slot
681 * @task: RPC task requesting a slot allocation
683 * If no more slots are available, place the task on the transport's
686 void xprt_reserve(struct rpc_task *task)
688 struct rpc_xprt *xprt = task->tk_xprt;
690 task->tk_status = -EIO;
691 if (!xprt->shutdown) {
692 spin_lock(&xprt->reserve_lock);
693 do_xprt_reserve(task);
694 spin_unlock(&xprt->reserve_lock);
698 static inline u32 xprt_alloc_xid(struct rpc_xprt *xprt)
703 static inline void xprt_init_xid(struct rpc_xprt *xprt)
705 get_random_bytes(&xprt->xid, sizeof(xprt->xid));
708 static void xprt_request_init(struct rpc_task *task, struct rpc_xprt *xprt)
710 struct rpc_rqst *req = task->tk_rqstp;
712 req->rq_timeout = xprt->timeout.to_initval;
715 req->rq_xid = xprt_alloc_xid(xprt);
716 dprintk("RPC: %4d reserved req %p xid %08x\n", task->tk_pid,
717 req, ntohl(req->rq_xid));
721 * xprt_release - release an RPC request slot
722 * @task: task which is finished with the slot
725 void xprt_release(struct rpc_task *task)
727 struct rpc_xprt *xprt = task->tk_xprt;
728 struct rpc_rqst *req;
730 if (!(req = task->tk_rqstp))
732 spin_lock_bh(&xprt->transport_lock);
733 __xprt_release_write(xprt, task);
734 __xprt_put_cong(xprt, req);
735 if (!list_empty(&req->rq_list))
736 list_del(&req->rq_list);
737 xprt->last_used = jiffies;
738 if (list_empty(&xprt->recv) && !xprt->shutdown)
739 mod_timer(&xprt->timer,
740 xprt->last_used + RPC_IDLE_DISCONNECT_TIMEOUT);
741 spin_unlock_bh(&xprt->transport_lock);
742 task->tk_rqstp = NULL;
743 memset(req, 0, sizeof(*req)); /* mark unused */
745 dprintk("RPC: %4d release request %p\n", task->tk_pid, req);
747 spin_lock(&xprt->reserve_lock);
748 list_add(&req->rq_list, &xprt->free);
749 xprt_clear_backlog(xprt);
750 spin_unlock(&xprt->reserve_lock);
754 * xprt_set_timeout - set constant RPC timeout
755 * @to: RPC timeout parameters to set up
756 * @retr: number of retries
757 * @incr: amount of increase after each retry
760 void xprt_set_timeout(struct rpc_timeout *to, unsigned int retr, unsigned long incr)
763 to->to_increment = incr;
764 to->to_maxval = to->to_initval + (incr * retr);
765 to->to_retries = retr;
766 to->to_exponential = 0;
769 static struct rpc_xprt *xprt_setup(int proto, struct sockaddr_in *ap, struct rpc_timeout *to)
772 struct rpc_xprt *xprt;
773 struct rpc_rqst *req;
775 if ((xprt = kmalloc(sizeof(struct rpc_xprt), GFP_KERNEL)) == NULL)
776 return ERR_PTR(-ENOMEM);
777 memset(xprt, 0, sizeof(*xprt)); /* Nnnngh! */
783 result = xs_setup_udp(xprt, to);
786 result = xs_setup_tcp(xprt, to);
789 printk(KERN_ERR "RPC: unrecognized transport protocol: %d\n",
796 return ERR_PTR(result);
799 spin_lock_init(&xprt->transport_lock);
800 spin_lock_init(&xprt->reserve_lock);
801 init_waitqueue_head(&xprt->cong_wait);
803 INIT_LIST_HEAD(&xprt->free);
804 INIT_LIST_HEAD(&xprt->recv);
805 INIT_WORK(&xprt->task_cleanup, xprt_autoclose, xprt);
806 init_timer(&xprt->timer);
807 xprt->timer.function = xprt_init_autodisconnect;
808 xprt->timer.data = (unsigned long) xprt;
809 xprt->last_used = jiffies;
811 rpc_init_wait_queue(&xprt->pending, "xprt_pending");
812 rpc_init_wait_queue(&xprt->sending, "xprt_sending");
813 rpc_init_wait_queue(&xprt->resend, "xprt_resend");
814 rpc_init_priority_wait_queue(&xprt->backlog, "xprt_backlog");
816 /* initialize free list */
817 for (req = &xprt->slot[xprt->max_reqs-1]; req >= &xprt->slot[0]; req--)
818 list_add(&req->rq_list, &xprt->free);
822 dprintk("RPC: created transport %p with %u slots\n", xprt,
829 * xprt_create_proto - create an RPC client transport
830 * @proto: requested transport protocol
831 * @sap: remote peer's address
832 * @to: timeout parameters for new transport
835 struct rpc_xprt *xprt_create_proto(int proto, struct sockaddr_in *sap, struct rpc_timeout *to)
837 struct rpc_xprt *xprt;
839 xprt = xprt_setup(proto, sap, to);
841 dprintk("RPC: xprt_create_proto failed\n");
843 dprintk("RPC: xprt_create_proto created xprt %p\n", xprt);
847 static void xprt_shutdown(struct rpc_xprt *xprt)
850 rpc_wake_up(&xprt->sending);
851 rpc_wake_up(&xprt->resend);
852 xprt_wake_pending_tasks(xprt, -EIO);
853 rpc_wake_up(&xprt->backlog);
854 wake_up(&xprt->cong_wait);
855 del_timer_sync(&xprt->timer);
858 static int xprt_clear_backlog(struct rpc_xprt *xprt) {
859 rpc_wake_up_next(&xprt->backlog);
860 wake_up(&xprt->cong_wait);
865 * xprt_destroy - destroy an RPC transport, killing off all requests.
866 * @xprt: transport to destroy
869 int xprt_destroy(struct rpc_xprt *xprt)
871 dprintk("RPC: destroying transport %p\n", xprt);
873 xprt->ops->destroy(xprt);