2 * linux/net/sunrpc/clnt.c
4 * This file contains the high-level RPC interface.
5 * It is modeled as a finite state machine to support both synchronous
6 * and asynchronous requests.
8 * - RPC header generation and argument serialization.
9 * - Credential refresh.
10 * - TCP connect handling.
11 * - Retry of operation when it is suspected the operation failed because
12 * of uid squashing on the server, or when the credentials were stale
13 * and need to be refreshed, or when a packet was damaged in transit.
14 * This may be have to be moved to the VFS layer.
16 * NB: BSD uses a more intelligent approach to guessing when a request
17 * or reply has been lost by keeping the RTO estimate for each procedure.
18 * We currently make do with a constant timeout value.
20 * Copyright (C) 1992,1993 Rick Sladkey <jrs@world.std.com>
21 * Copyright (C) 1995,1996 Olaf Kirch <okir@monad.swb.de>
24 #include <asm/system.h>
26 #include <linux/module.h>
27 #include <linux/types.h>
29 #include <linux/slab.h>
30 #include <linux/utsname.h>
32 #include <linux/sunrpc/clnt.h>
33 #include <linux/workqueue.h>
34 #include <linux/sunrpc/rpc_pipe_fs.h>
36 #include <linux/nfs.h>
39 #define RPC_SLACK_SPACE (1024) /* total overkill */
42 # define RPCDBG_FACILITY RPCDBG_CALL
45 static DECLARE_WAIT_QUEUE_HEAD(destroy_wait);
48 static void call_start(struct rpc_task *task);
49 static void call_reserve(struct rpc_task *task);
50 static void call_reserveresult(struct rpc_task *task);
51 static void call_allocate(struct rpc_task *task);
52 static void call_encode(struct rpc_task *task);
53 static void call_decode(struct rpc_task *task);
54 static void call_bind(struct rpc_task *task);
55 static void call_bind_status(struct rpc_task *task);
56 static void call_transmit(struct rpc_task *task);
57 static void call_status(struct rpc_task *task);
58 static void call_transmit_status(struct rpc_task *task);
59 static void call_refresh(struct rpc_task *task);
60 static void call_refreshresult(struct rpc_task *task);
61 static void call_timeout(struct rpc_task *task);
62 static void call_connect(struct rpc_task *task);
63 static void call_connect_status(struct rpc_task *task);
64 static u32 * call_header(struct rpc_task *task);
65 static u32 * call_verify(struct rpc_task *task);
69 rpc_setup_pipedir(struct rpc_clnt *clnt, char *dir_name)
71 static uint32_t clntid;
77 snprintf(clnt->cl_pathname, sizeof(clnt->cl_pathname),
78 "%s/clnt%x", dir_name,
79 (unsigned int)clntid++);
80 clnt->cl_pathname[sizeof(clnt->cl_pathname) - 1] = '\0';
81 clnt->cl_dentry = rpc_mkdir(clnt->cl_pathname, clnt);
82 if (!IS_ERR(clnt->cl_dentry))
84 error = PTR_ERR(clnt->cl_dentry);
85 if (error != -EEXIST) {
86 printk(KERN_INFO "RPC: Couldn't create pipefs entry %s, error %d\n",
87 clnt->cl_pathname, error);
94 * Create an RPC client
95 * FIXME: This should also take a flags argument (as in task->tk_flags).
96 * It's called (among others) from pmap_create_client, which may in
97 * turn be called by an async task. In this case, rpciod should not be
98 * made to sleep too long.
101 rpc_new_client(struct rpc_xprt *xprt, char *servname,
102 struct rpc_program *program, u32 vers,
103 rpc_authflavor_t flavor)
105 struct rpc_version *version;
106 struct rpc_clnt *clnt = NULL;
107 struct rpc_auth *auth;
111 dprintk("RPC: creating %s client for %s (xprt %p)\n",
112 program->name, servname, xprt);
117 if (vers >= program->nrvers || !(version = program->version[vers]))
121 clnt = kmalloc(sizeof(*clnt), GFP_KERNEL);
124 memset(clnt, 0, sizeof(*clnt));
125 atomic_set(&clnt->cl_users, 0);
126 atomic_set(&clnt->cl_count, 1);
127 clnt->cl_parent = clnt;
129 clnt->cl_server = clnt->cl_inline_name;
130 len = strlen(servname) + 1;
131 if (len > sizeof(clnt->cl_inline_name)) {
132 char *buf = kmalloc(len, GFP_KERNEL);
134 clnt->cl_server = buf;
136 len = sizeof(clnt->cl_inline_name);
138 strlcpy(clnt->cl_server, servname, len);
140 clnt->cl_xprt = xprt;
141 clnt->cl_procinfo = version->procs;
142 clnt->cl_maxproc = version->nrprocs;
143 clnt->cl_protname = program->name;
144 clnt->cl_pmap = &clnt->cl_pmap_default;
145 clnt->cl_port = xprt->addr.sin_port;
146 clnt->cl_prog = program->number;
147 clnt->cl_vers = version->number;
148 clnt->cl_prot = xprt->prot;
149 clnt->cl_stats = program->stats;
150 rpc_init_wait_queue(&clnt->cl_pmap_default.pm_bindwait, "bindwait");
153 clnt->cl_autobind = 1;
155 clnt->cl_rtt = &clnt->cl_rtt_default;
156 rpc_init_rtt(&clnt->cl_rtt_default, xprt->timeout.to_initval);
158 err = rpc_setup_pipedir(clnt, program->pipe_dir_name);
162 auth = rpcauth_create(flavor, clnt);
164 printk(KERN_INFO "RPC: Couldn't create auth handle (flavor %u)\n",
170 /* save the nodename */
171 clnt->cl_nodelen = strlen(system_utsname.nodename);
172 if (clnt->cl_nodelen > UNX_MAXNODENAME)
173 clnt->cl_nodelen = UNX_MAXNODENAME;
174 memcpy(clnt->cl_nodename, system_utsname.nodename, clnt->cl_nodelen);
178 rpc_rmdir(clnt->cl_pathname);
180 if (clnt->cl_server != clnt->cl_inline_name)
181 kfree(clnt->cl_server);
190 * Create an RPC client
191 * @xprt - pointer to xprt struct
192 * @servname - name of server
193 * @info - rpc_program
194 * @version - rpc_program version
195 * @authflavor - rpc_auth flavour to use
197 * Creates an RPC client structure, then pings the server in order to
198 * determine if it is up, and if it supports this program and version.
200 * This function should never be called by asynchronous tasks such as
203 struct rpc_clnt *rpc_create_client(struct rpc_xprt *xprt, char *servname,
204 struct rpc_program *info, u32 version, rpc_authflavor_t authflavor)
206 struct rpc_clnt *clnt;
209 clnt = rpc_new_client(xprt, servname, info, version, authflavor);
212 err = rpc_ping(clnt, RPC_TASK_SOFT|RPC_TASK_NOINTR);
215 rpc_shutdown_client(clnt);
220 * This function clones the RPC client structure. It allows us to share the
221 * same transport while varying parameters such as the authentication
225 rpc_clone_client(struct rpc_clnt *clnt)
227 struct rpc_clnt *new;
229 new = kmalloc(sizeof(*new), GFP_KERNEL);
232 memcpy(new, clnt, sizeof(*new));
233 atomic_set(&new->cl_count, 1);
234 atomic_set(&new->cl_users, 0);
235 new->cl_parent = clnt;
236 atomic_inc(&clnt->cl_count);
237 /* Duplicate portmapper */
238 rpc_init_wait_queue(&new->cl_pmap_default.pm_bindwait, "bindwait");
239 /* Turn off autobind on clones */
240 new->cl_autobind = 0;
243 rpc_init_rtt(&new->cl_rtt_default, clnt->cl_xprt->timeout.to_initval);
245 atomic_inc(&new->cl_auth->au_count);
246 new->cl_pmap = &new->cl_pmap_default;
247 rpc_init_wait_queue(&new->cl_pmap_default.pm_bindwait, "bindwait");
250 printk(KERN_INFO "RPC: out of memory in %s\n", __FUNCTION__);
251 return ERR_PTR(-ENOMEM);
255 * Properly shut down an RPC client, terminating all outstanding
256 * requests. Note that we must be certain that cl_oneshot and
257 * cl_dead are cleared, or else the client would be destroyed
258 * when the last task releases it.
261 rpc_shutdown_client(struct rpc_clnt *clnt)
263 dprintk("RPC: shutting down %s client for %s, tasks=%d\n",
264 clnt->cl_protname, clnt->cl_server,
265 atomic_read(&clnt->cl_users));
267 while (atomic_read(&clnt->cl_users) > 0) {
268 /* Don't let rpc_release_client destroy us */
269 clnt->cl_oneshot = 0;
271 rpc_killall_tasks(clnt);
272 wait_event_timeout(destroy_wait,
273 !atomic_read(&clnt->cl_users), 1*HZ);
276 if (atomic_read(&clnt->cl_users) < 0) {
277 printk(KERN_ERR "RPC: rpc_shutdown_client clnt %p tasks=%d\n",
278 clnt, atomic_read(&clnt->cl_users));
285 return rpc_destroy_client(clnt);
289 * Delete an RPC client
292 rpc_destroy_client(struct rpc_clnt *clnt)
294 if (!atomic_dec_and_test(&clnt->cl_count))
296 BUG_ON(atomic_read(&clnt->cl_users) != 0);
298 dprintk("RPC: destroying %s client for %s\n",
299 clnt->cl_protname, clnt->cl_server);
301 rpcauth_destroy(clnt->cl_auth);
302 clnt->cl_auth = NULL;
304 if (clnt->cl_parent != clnt) {
305 rpc_destroy_client(clnt->cl_parent);
308 if (clnt->cl_pathname[0])
309 rpc_rmdir(clnt->cl_pathname);
311 xprt_destroy(clnt->cl_xprt);
312 clnt->cl_xprt = NULL;
314 if (clnt->cl_server != clnt->cl_inline_name)
315 kfree(clnt->cl_server);
322 * Release an RPC client
325 rpc_release_client(struct rpc_clnt *clnt)
327 dprintk("RPC: rpc_release_client(%p, %d)\n",
328 clnt, atomic_read(&clnt->cl_users));
330 if (!atomic_dec_and_test(&clnt->cl_users))
332 wake_up(&destroy_wait);
333 if (clnt->cl_oneshot || clnt->cl_dead)
334 rpc_destroy_client(clnt);
338 * rpc_bind_new_program - bind a new RPC program to an existing client
339 * @old - old rpc_client
340 * @program - rpc program to set
341 * @vers - rpc program version
343 * Clones the rpc client and sets up a new RPC program. This is mainly
344 * of use for enabling different RPC programs to share the same transport.
345 * The Sun NFSv2/v3 ACL protocol can do this.
347 struct rpc_clnt *rpc_bind_new_program(struct rpc_clnt *old,
348 struct rpc_program *program,
351 struct rpc_clnt *clnt;
352 struct rpc_version *version;
355 BUG_ON(vers >= program->nrvers || !program->version[vers]);
356 version = program->version[vers];
357 clnt = rpc_clone_client(old);
360 clnt->cl_procinfo = version->procs;
361 clnt->cl_maxproc = version->nrprocs;
362 clnt->cl_protname = program->name;
363 clnt->cl_prog = program->number;
364 clnt->cl_vers = version->number;
365 clnt->cl_stats = program->stats;
366 err = rpc_ping(clnt, RPC_TASK_SOFT|RPC_TASK_NOINTR);
368 rpc_shutdown_client(clnt);
376 * Default callback for async RPC calls
379 rpc_default_callback(struct rpc_task *task, void *data)
383 static const struct rpc_call_ops rpc_default_ops = {
384 .rpc_call_done = rpc_default_callback,
388 * Export the signal mask handling for synchronous code that
389 * sleeps on RPC calls
391 #define RPC_INTR_SIGNALS (sigmask(SIGHUP) | sigmask(SIGINT) | sigmask(SIGQUIT) | sigmask(SIGTERM))
393 static void rpc_save_sigmask(sigset_t *oldset, int intr)
395 unsigned long sigallow = sigmask(SIGKILL);
398 /* Block all signals except those listed in sigallow */
400 sigallow |= RPC_INTR_SIGNALS;
401 siginitsetinv(&sigmask, sigallow);
402 sigprocmask(SIG_BLOCK, &sigmask, oldset);
405 static inline void rpc_task_sigmask(struct rpc_task *task, sigset_t *oldset)
407 rpc_save_sigmask(oldset, !RPC_TASK_UNINTERRUPTIBLE(task));
410 static inline void rpc_restore_sigmask(sigset_t *oldset)
412 sigprocmask(SIG_SETMASK, oldset, NULL);
415 void rpc_clnt_sigmask(struct rpc_clnt *clnt, sigset_t *oldset)
417 rpc_save_sigmask(oldset, clnt->cl_intr);
420 void rpc_clnt_sigunmask(struct rpc_clnt *clnt, sigset_t *oldset)
422 rpc_restore_sigmask(oldset);
426 * New rpc_call implementation
428 int rpc_call_sync(struct rpc_clnt *clnt, struct rpc_message *msg, int flags)
430 struct rpc_task *task;
434 /* If this client is slain all further I/O fails */
438 BUG_ON(flags & RPC_TASK_ASYNC);
441 task = rpc_new_task(clnt, flags, &rpc_default_ops, NULL);
445 /* Mask signals on RPC calls _and_ GSS_AUTH upcalls */
446 rpc_task_sigmask(task, &oldset);
448 rpc_call_setup(task, msg, 0);
450 /* Set up the call info struct and execute the task */
451 status = task->tk_status;
453 atomic_inc(&task->tk_count);
454 status = rpc_execute(task);
456 status = task->tk_status;
458 rpc_restore_sigmask(&oldset);
459 rpc_release_task(task);
465 * New rpc_call implementation
468 rpc_call_async(struct rpc_clnt *clnt, struct rpc_message *msg, int flags,
469 const struct rpc_call_ops *tk_ops, void *data)
471 struct rpc_task *task;
475 /* If this client is slain all further I/O fails */
479 flags |= RPC_TASK_ASYNC;
481 /* Create/initialize a new RPC task */
483 if (!(task = rpc_new_task(clnt, flags, tk_ops, data)))
486 /* Mask signals on GSS_AUTH upcalls */
487 rpc_task_sigmask(task, &oldset);
489 rpc_call_setup(task, msg, 0);
491 /* Set up the call info struct and execute the task */
492 status = task->tk_status;
496 rpc_release_task(task);
498 rpc_restore_sigmask(&oldset);
505 rpc_call_setup(struct rpc_task *task, struct rpc_message *msg, int flags)
508 task->tk_flags |= flags;
509 /* Bind the user cred */
510 if (task->tk_msg.rpc_cred != NULL)
511 rpcauth_holdcred(task);
513 rpcauth_bindcred(task);
515 if (task->tk_status == 0)
516 task->tk_action = call_start;
518 task->tk_action = rpc_exit_task;
522 rpc_setbufsize(struct rpc_clnt *clnt, unsigned int sndsize, unsigned int rcvsize)
524 struct rpc_xprt *xprt = clnt->cl_xprt;
525 if (xprt->ops->set_buffer_size)
526 xprt->ops->set_buffer_size(xprt, sndsize, rcvsize);
530 * Return size of largest payload RPC client can support, in bytes
532 * For stream transports, this is one RPC record fragment (see RFC
533 * 1831), as we don't support multi-record requests yet. For datagram
534 * transports, this is the size of an IP packet minus the IP, UDP, and
537 size_t rpc_max_payload(struct rpc_clnt *clnt)
539 return clnt->cl_xprt->max_payload;
541 EXPORT_SYMBOL(rpc_max_payload);
544 * rpc_force_rebind - force transport to check that remote port is unchanged
545 * @clnt: client to rebind
548 void rpc_force_rebind(struct rpc_clnt *clnt)
550 if (clnt->cl_autobind)
553 EXPORT_SYMBOL(rpc_force_rebind);
556 * Restart an (async) RPC call. Usually called from within the
560 rpc_restart_call(struct rpc_task *task)
562 if (RPC_ASSASSINATED(task))
565 task->tk_action = call_start;
571 * Other FSM states can be visited zero or more times, but
572 * this state is visited exactly once for each RPC.
575 call_start(struct rpc_task *task)
577 struct rpc_clnt *clnt = task->tk_client;
579 dprintk("RPC: %4d call_start %s%d proc %d (%s)\n", task->tk_pid,
580 clnt->cl_protname, clnt->cl_vers, task->tk_msg.rpc_proc->p_proc,
581 (RPC_IS_ASYNC(task) ? "async" : "sync"));
583 /* Increment call count */
584 task->tk_msg.rpc_proc->p_count++;
585 clnt->cl_stats->rpccnt++;
586 task->tk_action = call_reserve;
590 * 1. Reserve an RPC call slot
593 call_reserve(struct rpc_task *task)
595 dprintk("RPC: %4d call_reserve\n", task->tk_pid);
597 if (!rpcauth_uptodatecred(task)) {
598 task->tk_action = call_refresh;
603 task->tk_action = call_reserveresult;
608 * 1b. Grok the result of xprt_reserve()
611 call_reserveresult(struct rpc_task *task)
613 int status = task->tk_status;
615 dprintk("RPC: %4d call_reserveresult (status %d)\n",
616 task->tk_pid, task->tk_status);
619 * After a call to xprt_reserve(), we must have either
620 * a request slot or else an error status.
624 if (task->tk_rqstp) {
625 task->tk_action = call_allocate;
629 printk(KERN_ERR "%s: status=%d, but no request slot, exiting\n",
630 __FUNCTION__, status);
631 rpc_exit(task, -EIO);
636 * Even though there was an error, we may have acquired
637 * a request slot somehow. Make sure not to leak it.
639 if (task->tk_rqstp) {
640 printk(KERN_ERR "%s: status=%d, request allocated anyway\n",
641 __FUNCTION__, status);
646 case -EAGAIN: /* woken up; retry */
647 task->tk_action = call_reserve;
649 case -EIO: /* probably a shutdown */
652 printk(KERN_ERR "%s: unrecognized error %d, exiting\n",
653 __FUNCTION__, status);
656 rpc_exit(task, status);
660 * 2. Allocate the buffer. For details, see sched.c:rpc_malloc.
661 * (Note: buffer memory is freed in xprt_release).
664 call_allocate(struct rpc_task *task)
666 struct rpc_rqst *req = task->tk_rqstp;
667 struct rpc_xprt *xprt = task->tk_xprt;
670 dprintk("RPC: %4d call_allocate (status %d)\n",
671 task->tk_pid, task->tk_status);
672 task->tk_action = call_bind;
676 /* FIXME: compute buffer requirements more exactly using
678 bufsiz = task->tk_msg.rpc_proc->p_bufsiz + RPC_SLACK_SPACE;
680 if (xprt->ops->buf_alloc(task, bufsiz << 1) != NULL)
682 printk(KERN_INFO "RPC: buffer allocation failed for task %p\n", task);
684 if (RPC_IS_ASYNC(task) || !signalled()) {
686 task->tk_action = call_reserve;
687 rpc_delay(task, HZ>>4);
691 rpc_exit(task, -ERESTARTSYS);
695 rpc_task_need_encode(struct rpc_task *task)
697 return task->tk_rqstp->rq_snd_buf.len == 0;
701 rpc_task_force_reencode(struct rpc_task *task)
703 task->tk_rqstp->rq_snd_buf.len = 0;
707 * 3. Encode arguments of an RPC call
710 call_encode(struct rpc_task *task)
712 struct rpc_rqst *req = task->tk_rqstp;
713 struct xdr_buf *sndbuf = &req->rq_snd_buf;
714 struct xdr_buf *rcvbuf = &req->rq_rcv_buf;
719 dprintk("RPC: %4d call_encode (status %d)\n",
720 task->tk_pid, task->tk_status);
722 /* Default buffer setup */
723 bufsiz = req->rq_bufsize >> 1;
724 sndbuf->head[0].iov_base = (void *)req->rq_buffer;
725 sndbuf->head[0].iov_len = bufsiz;
726 sndbuf->tail[0].iov_len = 0;
727 sndbuf->page_len = 0;
729 sndbuf->buflen = bufsiz;
730 rcvbuf->head[0].iov_base = (void *)((char *)req->rq_buffer + bufsiz);
731 rcvbuf->head[0].iov_len = bufsiz;
732 rcvbuf->tail[0].iov_len = 0;
733 rcvbuf->page_len = 0;
735 rcvbuf->buflen = bufsiz;
737 /* Encode header and provided arguments */
738 encode = task->tk_msg.rpc_proc->p_encode;
739 if (!(p = call_header(task))) {
740 printk(KERN_INFO "RPC: call_header failed, exit EIO\n");
741 rpc_exit(task, -EIO);
747 task->tk_status = rpcauth_wrap_req(task, encode, req, p,
748 task->tk_msg.rpc_argp);
749 if (task->tk_status == -ENOMEM) {
750 /* XXX: Is this sane? */
751 rpc_delay(task, 3*HZ);
752 task->tk_status = -EAGAIN;
757 * 4. Get the server port number if not yet set
760 call_bind(struct rpc_task *task)
762 struct rpc_clnt *clnt = task->tk_client;
764 dprintk("RPC: %4d call_bind (status %d)\n",
765 task->tk_pid, task->tk_status);
767 task->tk_action = call_connect;
768 if (!clnt->cl_port) {
769 task->tk_action = call_bind_status;
770 task->tk_timeout = task->tk_xprt->bind_timeout;
771 rpc_getport(task, clnt);
776 * 4a. Sort out bind result
779 call_bind_status(struct rpc_task *task)
781 int status = -EACCES;
783 if (task->tk_status >= 0) {
784 dprintk("RPC: %4d call_bind_status (status %d)\n",
785 task->tk_pid, task->tk_status);
787 task->tk_action = call_connect;
791 switch (task->tk_status) {
793 dprintk("RPC: %4d remote rpcbind: RPC program/version unavailable\n",
795 rpc_delay(task, 3*HZ);
798 dprintk("RPC: %4d rpcbind request timed out\n",
800 if (RPC_IS_SOFT(task)) {
806 dprintk("RPC: %4d remote rpcbind service unavailable\n",
809 case -EPROTONOSUPPORT:
810 dprintk("RPC: %4d remote rpcbind version 2 unavailable\n",
814 dprintk("RPC: %4d unrecognized rpcbind error (%d)\n",
815 task->tk_pid, -task->tk_status);
820 rpc_exit(task, status);
825 task->tk_action = call_bind;
830 * 4b. Connect to the RPC server
833 call_connect(struct rpc_task *task)
835 struct rpc_xprt *xprt = task->tk_xprt;
837 dprintk("RPC: %4d call_connect xprt %p %s connected\n",
839 (xprt_connected(xprt) ? "is" : "is not"));
841 task->tk_action = call_transmit;
842 if (!xprt_connected(xprt)) {
843 task->tk_action = call_connect_status;
844 if (task->tk_status < 0)
851 * 4c. Sort out connect result
854 call_connect_status(struct rpc_task *task)
856 struct rpc_clnt *clnt = task->tk_client;
857 int status = task->tk_status;
859 dprintk("RPC: %5u call_connect_status (status %d)\n",
860 task->tk_pid, task->tk_status);
864 clnt->cl_stats->netreconn++;
865 task->tk_action = call_transmit;
869 /* Something failed: remote service port may have changed */
870 rpc_force_rebind(clnt);
876 task->tk_action = call_bind;
879 rpc_exit(task, -EIO);
885 * 5. Transmit the RPC request, and wait for reply
888 call_transmit(struct rpc_task *task)
890 dprintk("RPC: %4d call_transmit (status %d)\n",
891 task->tk_pid, task->tk_status);
893 task->tk_action = call_status;
894 if (task->tk_status < 0)
896 task->tk_status = xprt_prepare_transmit(task);
897 if (task->tk_status != 0)
899 /* Encode here so that rpcsec_gss can use correct sequence number. */
900 if (rpc_task_need_encode(task)) {
901 task->tk_rqstp->rq_bytes_sent = 0;
903 /* Did the encode result in an error condition? */
904 if (task->tk_status != 0)
907 task->tk_action = call_transmit_status;
909 if (task->tk_status < 0)
911 if (!task->tk_msg.rpc_proc->p_decode) {
912 task->tk_action = rpc_exit_task;
913 rpc_wake_up_task(task);
917 /* release socket write lock before attempting to handle error */
918 xprt_abort_transmit(task);
919 rpc_task_force_reencode(task);
923 * 6. Sort out the RPC call status
926 call_status(struct rpc_task *task)
928 struct rpc_clnt *clnt = task->tk_client;
929 struct rpc_rqst *req = task->tk_rqstp;
932 if (req->rq_received > 0 && !req->rq_bytes_sent)
933 task->tk_status = req->rq_received;
935 dprintk("RPC: %4d call_status (status %d)\n",
936 task->tk_pid, task->tk_status);
938 status = task->tk_status;
940 task->tk_action = call_decode;
947 task->tk_action = call_timeout;
951 rpc_force_rebind(clnt);
952 task->tk_action = call_bind;
955 task->tk_action = call_transmit;
958 /* shutdown or soft timeout */
959 rpc_exit(task, status);
962 printk("%s: RPC call returned error %d\n",
963 clnt->cl_protname, -status);
964 rpc_exit(task, status);
970 * 6a. Handle transmission errors.
973 call_transmit_status(struct rpc_task *task)
975 if (task->tk_status != -EAGAIN)
976 rpc_task_force_reencode(task);
981 * 6b. Handle RPC timeout
982 * We do not release the request slot, so we keep using the
983 * same XID for all retransmits.
986 call_timeout(struct rpc_task *task)
988 struct rpc_clnt *clnt = task->tk_client;
990 if (xprt_adjust_timeout(task->tk_rqstp) == 0) {
991 dprintk("RPC: %4d call_timeout (minor)\n", task->tk_pid);
995 dprintk("RPC: %4d call_timeout (major)\n", task->tk_pid);
996 if (RPC_IS_SOFT(task)) {
997 printk(KERN_NOTICE "%s: server %s not responding, timed out\n",
998 clnt->cl_protname, clnt->cl_server);
999 rpc_exit(task, -EIO);
1003 if (!(task->tk_flags & RPC_CALL_MAJORSEEN)) {
1004 task->tk_flags |= RPC_CALL_MAJORSEEN;
1005 printk(KERN_NOTICE "%s: server %s not responding, still trying\n",
1006 clnt->cl_protname, clnt->cl_server);
1008 rpc_force_rebind(clnt);
1011 clnt->cl_stats->rpcretrans++;
1012 task->tk_action = call_bind;
1013 task->tk_status = 0;
1017 * 7. Decode the RPC reply
1020 call_decode(struct rpc_task *task)
1022 struct rpc_clnt *clnt = task->tk_client;
1023 struct rpc_rqst *req = task->tk_rqstp;
1024 kxdrproc_t decode = task->tk_msg.rpc_proc->p_decode;
1027 dprintk("RPC: %4d call_decode (status %d)\n",
1028 task->tk_pid, task->tk_status);
1030 if (task->tk_flags & RPC_CALL_MAJORSEEN) {
1031 printk(KERN_NOTICE "%s: server %s OK\n",
1032 clnt->cl_protname, clnt->cl_server);
1033 task->tk_flags &= ~RPC_CALL_MAJORSEEN;
1036 if (task->tk_status < 12) {
1037 if (!RPC_IS_SOFT(task)) {
1038 task->tk_action = call_bind;
1039 clnt->cl_stats->rpcretrans++;
1042 printk(KERN_WARNING "%s: too small RPC reply size (%d bytes)\n",
1043 clnt->cl_protname, task->tk_status);
1044 rpc_exit(task, -EIO);
1048 req->rq_rcv_buf.len = req->rq_private_buf.len;
1050 /* Check that the softirq receive buffer is valid */
1051 WARN_ON(memcmp(&req->rq_rcv_buf, &req->rq_private_buf,
1052 sizeof(req->rq_rcv_buf)) != 0);
1054 /* Verify the RPC header */
1055 p = call_verify(task);
1057 if (p == ERR_PTR(-EAGAIN))
1062 task->tk_action = rpc_exit_task;
1065 task->tk_status = rpcauth_unwrap_resp(task, decode, req, p,
1066 task->tk_msg.rpc_resp);
1067 dprintk("RPC: %4d call_decode result %d\n", task->tk_pid,
1071 req->rq_received = req->rq_private_buf.len = 0;
1072 task->tk_status = 0;
1076 * 8. Refresh the credentials if rejected by the server
1079 call_refresh(struct rpc_task *task)
1081 dprintk("RPC: %4d call_refresh\n", task->tk_pid);
1083 xprt_release(task); /* Must do to obtain new XID */
1084 task->tk_action = call_refreshresult;
1085 task->tk_status = 0;
1086 task->tk_client->cl_stats->rpcauthrefresh++;
1087 rpcauth_refreshcred(task);
1091 * 8a. Process the results of a credential refresh
1094 call_refreshresult(struct rpc_task *task)
1096 int status = task->tk_status;
1097 dprintk("RPC: %4d call_refreshresult (status %d)\n",
1098 task->tk_pid, task->tk_status);
1100 task->tk_status = 0;
1101 task->tk_action = call_reserve;
1102 if (status >= 0 && rpcauth_uptodatecred(task))
1104 if (status == -EACCES) {
1105 rpc_exit(task, -EACCES);
1108 task->tk_action = call_refresh;
1109 if (status != -ETIMEDOUT)
1110 rpc_delay(task, 3*HZ);
1115 * Call header serialization
1118 call_header(struct rpc_task *task)
1120 struct rpc_clnt *clnt = task->tk_client;
1121 struct rpc_rqst *req = task->tk_rqstp;
1122 u32 *p = req->rq_svec[0].iov_base;
1124 /* FIXME: check buffer size? */
1126 p = xprt_skip_transport_header(task->tk_xprt, p);
1127 *p++ = req->rq_xid; /* XID */
1128 *p++ = htonl(RPC_CALL); /* CALL */
1129 *p++ = htonl(RPC_VERSION); /* RPC version */
1130 *p++ = htonl(clnt->cl_prog); /* program number */
1131 *p++ = htonl(clnt->cl_vers); /* program version */
1132 *p++ = htonl(task->tk_msg.rpc_proc->p_proc); /* procedure */
1133 p = rpcauth_marshcred(task, p);
1134 req->rq_slen = xdr_adjust_iovec(&req->rq_svec[0], p);
1139 * Reply header verification
1142 call_verify(struct rpc_task *task)
1144 struct kvec *iov = &task->tk_rqstp->rq_rcv_buf.head[0];
1145 int len = task->tk_rqstp->rq_rcv_buf.len >> 2;
1146 u32 *p = iov->iov_base, n;
1147 int error = -EACCES;
1151 p += 1; /* skip XID */
1153 if ((n = ntohl(*p++)) != RPC_REPLY) {
1154 printk(KERN_WARNING "call_verify: not an RPC reply: %x\n", n);
1157 if ((n = ntohl(*p++)) != RPC_MSG_ACCEPTED) {
1160 switch ((n = ntohl(*p++))) {
1161 case RPC_AUTH_ERROR:
1164 dprintk("%s: RPC call version mismatch!\n", __FUNCTION__);
1165 error = -EPROTONOSUPPORT;
1168 dprintk("%s: RPC call rejected, unknown error: %x\n", __FUNCTION__, n);
1173 switch ((n = ntohl(*p++))) {
1174 case RPC_AUTH_REJECTEDCRED:
1175 case RPC_AUTH_REJECTEDVERF:
1176 case RPCSEC_GSS_CREDPROBLEM:
1177 case RPCSEC_GSS_CTXPROBLEM:
1178 if (!task->tk_cred_retry)
1180 task->tk_cred_retry--;
1181 dprintk("RPC: %4d call_verify: retry stale creds\n",
1183 rpcauth_invalcred(task);
1184 task->tk_action = call_refresh;
1186 case RPC_AUTH_BADCRED:
1187 case RPC_AUTH_BADVERF:
1188 /* possibly garbled cred/verf? */
1189 if (!task->tk_garb_retry)
1191 task->tk_garb_retry--;
1192 dprintk("RPC: %4d call_verify: retry garbled creds\n",
1194 task->tk_action = call_bind;
1196 case RPC_AUTH_TOOWEAK:
1197 printk(KERN_NOTICE "call_verify: server requires stronger "
1198 "authentication.\n");
1201 printk(KERN_WARNING "call_verify: unknown auth error: %x\n", n);
1204 dprintk("RPC: %4d call_verify: call rejected %d\n",
1208 if (!(p = rpcauth_checkverf(task, p))) {
1209 printk(KERN_WARNING "call_verify: auth check failed\n");
1210 goto out_garbage; /* bad verifier, retry */
1212 len = p - (u32 *)iov->iov_base - 1;
1215 switch ((n = ntohl(*p++))) {
1218 case RPC_PROG_UNAVAIL:
1219 dprintk("RPC: call_verify: program %u is unsupported by server %s\n",
1220 (unsigned int)task->tk_client->cl_prog,
1221 task->tk_client->cl_server);
1222 error = -EPFNOSUPPORT;
1224 case RPC_PROG_MISMATCH:
1225 dprintk("RPC: call_verify: program %u, version %u unsupported by server %s\n",
1226 (unsigned int)task->tk_client->cl_prog,
1227 (unsigned int)task->tk_client->cl_vers,
1228 task->tk_client->cl_server);
1229 error = -EPROTONOSUPPORT;
1231 case RPC_PROC_UNAVAIL:
1232 dprintk("RPC: call_verify: proc %p unsupported by program %u, version %u on server %s\n",
1233 task->tk_msg.rpc_proc,
1234 task->tk_client->cl_prog,
1235 task->tk_client->cl_vers,
1236 task->tk_client->cl_server);
1237 error = -EOPNOTSUPP;
1239 case RPC_GARBAGE_ARGS:
1240 dprintk("RPC: %4d %s: server saw garbage\n", task->tk_pid, __FUNCTION__);
1243 printk(KERN_WARNING "call_verify: server accept status: %x\n", n);
1248 task->tk_client->cl_stats->rpcgarbage++;
1249 if (task->tk_garb_retry) {
1250 task->tk_garb_retry--;
1251 dprintk("RPC %s: retrying %4d\n", __FUNCTION__, task->tk_pid);
1252 task->tk_action = call_bind;
1254 return ERR_PTR(-EAGAIN);
1256 printk(KERN_WARNING "RPC %s: retry failed, exit EIO\n", __FUNCTION__);
1260 rpc_exit(task, error);
1261 return ERR_PTR(error);
1263 printk(KERN_WARNING "RPC %s: server reply was truncated.\n", __FUNCTION__);
1267 static int rpcproc_encode_null(void *rqstp, u32 *data, void *obj)
1272 static int rpcproc_decode_null(void *rqstp, u32 *data, void *obj)
1277 static struct rpc_procinfo rpcproc_null = {
1278 .p_encode = rpcproc_encode_null,
1279 .p_decode = rpcproc_decode_null,
1282 int rpc_ping(struct rpc_clnt *clnt, int flags)
1284 struct rpc_message msg = {
1285 .rpc_proc = &rpcproc_null,
1288 msg.rpc_cred = authnull_ops.lookup_cred(NULL, NULL, 0);
1289 err = rpc_call_sync(clnt, &msg, flags);
1290 put_rpccred(msg.rpc_cred);