4 * Copyright (c) 2002 The Regents of the University of Michigan.
7 * Kendrick Smith <kmsmith@umich.edu>
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. Neither the name of the University nor the names of its
19 * contributors may be used to endorse or promote products derived
20 * from this software without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
23 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
24 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
25 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
29 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
30 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
31 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
32 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 * Implementation of the NFSv4 "renew daemon", which wakes up periodically to
35 * send a RENEW, to keep state alive on the server. The daemon is implemented
36 * as an rpc_task, not a real kernel thread, so it always runs in rpciod's
37 * context. There is one renewd per nfs_server.
39 * TODO: If the send queue gets backlogged (e.g., if the server goes down),
40 * we will keep filling the queue with periodic RENEW requests. We need a
41 * mechanism for ensuring that if renewd successfully sends off a request,
42 * then it only wakes up when the request is finished. Maybe use the
43 * child task framework of the RPC layer?
46 #include <linux/sched.h>
47 #include <linux/smp_lock.h>
49 #include <linux/pagemap.h>
50 #include <linux/sunrpc/sched.h>
51 #include <linux/sunrpc/clnt.h>
53 #include <linux/nfs.h>
54 #include <linux/nfs4.h>
55 #include <linux/nfs_fs.h>
58 #define NFSDBG_FACILITY NFSDBG_PROC
61 nfs4_renew_state(void *data)
63 struct nfs4_client *clp = (struct nfs4_client *)data;
65 unsigned long last, now;
67 down_read(&clp->cl_sem);
68 dprintk("%s: start\n", __FUNCTION__);
69 /* Are there any active superblocks? */
70 if (list_empty(&clp->cl_superblocks))
72 spin_lock(&clp->cl_lock);
73 lease = clp->cl_lease_time;
74 last = clp->cl_last_renewal;
76 timeout = (2 * lease) / 3 + (long)last - (long)now;
77 /* Are we close to a lease timeout? */
78 if (time_after(now, last + lease/3)) {
79 spin_unlock(&clp->cl_lock);
80 /* Queue an asynchronous RENEW. */
81 nfs4_proc_async_renew(clp);
82 timeout = (2 * lease) / 3;
83 spin_lock(&clp->cl_lock);
85 dprintk("%s: failed to call renewd. Reason: lease not expired \n",
87 if (timeout < 5 * HZ) /* safeguard */
89 dprintk("%s: requeueing work. Lease period = %ld\n",
90 __FUNCTION__, (timeout + HZ - 1) / HZ);
91 cancel_delayed_work(&clp->cl_renewd);
92 schedule_delayed_work(&clp->cl_renewd, timeout);
93 spin_unlock(&clp->cl_lock);
95 up_read(&clp->cl_sem);
96 dprintk("%s: done\n", __FUNCTION__);
99 /* Must be called with clp->cl_sem locked for writes */
101 nfs4_schedule_state_renewal(struct nfs4_client *clp)
105 spin_lock(&clp->cl_lock);
106 timeout = (2 * clp->cl_lease_time) / 3 + (long)clp->cl_last_renewal
108 if (timeout < 5 * HZ)
110 dprintk("%s: requeueing work. Lease period = %ld\n",
111 __FUNCTION__, (timeout + HZ - 1) / HZ);
112 cancel_delayed_work(&clp->cl_renewd);
113 schedule_delayed_work(&clp->cl_renewd, timeout);
114 spin_unlock(&clp->cl_lock);
118 nfs4_renewd_prepare_shutdown(struct nfs_server *server)
120 struct nfs4_client *clp = server->nfs4_state;
124 flush_scheduled_work();
125 down_write(&clp->cl_sem);
126 if (!list_empty(&server->nfs4_siblings))
127 list_del_init(&server->nfs4_siblings);
128 up_write(&clp->cl_sem);
131 /* Must be called with clp->cl_sem locked for writes */
133 nfs4_kill_renewd(struct nfs4_client *clp)
135 down_read(&clp->cl_sem);
136 if (!list_empty(&clp->cl_superblocks)) {
137 up_read(&clp->cl_sem);
140 cancel_delayed_work(&clp->cl_renewd);
141 up_read(&clp->cl_sem);
142 flush_scheduled_work();