2 * linux/fs/nfs/delegation.c
4 * Copyright (C) 2004 Trond Myklebust
6 * NFS file delegation management
9 #include <linux/config.h>
10 #include <linux/completion.h>
11 #include <linux/kthread.h>
12 #include <linux/module.h>
13 #include <linux/sched.h>
14 #include <linux/spinlock.h>
16 #include <linux/nfs4.h>
17 #include <linux/nfs_fs.h>
18 #include <linux/nfs_xdr.h>
21 #include "delegation.h"
23 static struct nfs_delegation *nfs_alloc_delegation(void)
25 return (struct nfs_delegation *)kmalloc(sizeof(struct nfs_delegation), GFP_KERNEL);
28 static void nfs_free_delegation(struct nfs_delegation *delegation)
31 put_rpccred(delegation->cred);
35 static int nfs_delegation_claim_locks(struct nfs_open_context *ctx, struct nfs4_state *state)
37 struct inode *inode = state->inode;
41 for (fl = inode->i_flock; fl != 0; fl = fl->fl_next) {
42 if (!(fl->fl_flags & (FL_POSIX|FL_FLOCK)))
44 if ((struct nfs_open_context *)fl->fl_file->private_data != ctx)
46 status = nfs4_lock_delegation_recall(state, fl);
51 printk(KERN_ERR "%s: unhandled error %d.\n",
52 __FUNCTION__, status);
53 case -NFS4ERR_EXPIRED:
54 /* kill_proc(fl->fl_pid, SIGLOST, 1); */
55 case -NFS4ERR_STALE_CLIENTID:
56 nfs4_schedule_state_recovery(NFS_SERVER(inode)->nfs4_state);
65 static void nfs_delegation_claim_opens(struct inode *inode)
67 struct nfs_inode *nfsi = NFS_I(inode);
68 struct nfs_open_context *ctx;
69 struct nfs4_state *state;
73 spin_lock(&inode->i_lock);
74 list_for_each_entry(ctx, &nfsi->open_files, list) {
78 if (!test_bit(NFS_DELEGATED_STATE, &state->flags))
80 get_nfs_open_context(ctx);
81 spin_unlock(&inode->i_lock);
82 err = nfs4_open_delegation_recall(ctx->dentry, state);
84 err = nfs_delegation_claim_locks(ctx, state);
85 put_nfs_open_context(ctx);
90 spin_unlock(&inode->i_lock);
94 * Set up a delegation on an inode
96 void nfs_inode_reclaim_delegation(struct inode *inode, struct rpc_cred *cred, struct nfs_openres *res)
98 struct nfs_delegation *delegation = NFS_I(inode)->delegation;
100 if (delegation == NULL)
102 memcpy(delegation->stateid.data, res->delegation.data,
103 sizeof(delegation->stateid.data));
104 delegation->type = res->delegation_type;
105 delegation->maxsize = res->maxsize;
107 delegation->cred = get_rpccred(cred);
108 delegation->flags &= ~NFS_DELEGATION_NEED_RECLAIM;
109 NFS_I(inode)->delegation_state = delegation->type;
114 * Set up a delegation on an inode
116 int nfs_inode_set_delegation(struct inode *inode, struct rpc_cred *cred, struct nfs_openres *res)
118 struct nfs4_client *clp = NFS_SERVER(inode)->nfs4_state;
119 struct nfs_inode *nfsi = NFS_I(inode);
120 struct nfs_delegation *delegation;
123 /* Ensure we first revalidate the attributes and page cache! */
124 if ((nfsi->cache_validity & (NFS_INO_REVAL_PAGECACHE|NFS_INO_INVALID_ATTR)))
125 __nfs_revalidate_inode(NFS_SERVER(inode), inode);
127 delegation = nfs_alloc_delegation();
128 if (delegation == NULL)
130 memcpy(delegation->stateid.data, res->delegation.data,
131 sizeof(delegation->stateid.data));
132 delegation->type = res->delegation_type;
133 delegation->maxsize = res->maxsize;
134 delegation->cred = get_rpccred(cred);
135 delegation->inode = inode;
137 spin_lock(&clp->cl_lock);
138 if (nfsi->delegation == NULL) {
139 list_add(&delegation->super_list, &clp->cl_delegations);
140 nfsi->delegation = delegation;
141 nfsi->delegation_state = delegation->type;
144 if (memcmp(&delegation->stateid, &nfsi->delegation->stateid,
145 sizeof(delegation->stateid)) != 0 ||
146 delegation->type != nfsi->delegation->type) {
147 printk("%s: server %u.%u.%u.%u, handed out a duplicate delegation!\n",
148 __FUNCTION__, NIPQUAD(clp->cl_addr));
152 spin_unlock(&clp->cl_lock);
157 static int nfs_do_return_delegation(struct inode *inode, struct nfs_delegation *delegation)
161 __nfs_revalidate_inode(NFS_SERVER(inode), inode);
163 res = nfs4_proc_delegreturn(inode, delegation->cred, &delegation->stateid);
164 nfs_free_delegation(delegation);
168 /* Sync all data to disk upon delegation return */
169 static void nfs_msync_inode(struct inode *inode)
171 filemap_fdatawrite(inode->i_mapping);
173 filemap_fdatawait(inode->i_mapping);
177 * Basic procedure for returning a delegation to the server
179 int __nfs_inode_return_delegation(struct inode *inode)
181 struct nfs4_client *clp = NFS_SERVER(inode)->nfs4_state;
182 struct nfs_inode *nfsi = NFS_I(inode);
183 struct nfs_delegation *delegation;
186 nfs_msync_inode(inode);
187 down_read(&clp->cl_sem);
188 /* Guard against new delegated open calls */
189 down_write(&nfsi->rwsem);
190 spin_lock(&clp->cl_lock);
191 delegation = nfsi->delegation;
192 if (delegation != NULL) {
193 list_del_init(&delegation->super_list);
194 nfsi->delegation = NULL;
195 nfsi->delegation_state = 0;
197 spin_unlock(&clp->cl_lock);
198 nfs_delegation_claim_opens(inode);
199 up_write(&nfsi->rwsem);
200 up_read(&clp->cl_sem);
201 nfs_msync_inode(inode);
203 if (delegation != NULL)
204 res = nfs_do_return_delegation(inode, delegation);
209 * Return all delegations associated to a super block
211 void nfs_return_all_delegations(struct super_block *sb)
213 struct nfs4_client *clp = NFS_SB(sb)->nfs4_state;
214 struct nfs_delegation *delegation;
220 spin_lock(&clp->cl_lock);
221 list_for_each_entry(delegation, &clp->cl_delegations, super_list) {
222 if (delegation->inode->i_sb != sb)
224 inode = igrab(delegation->inode);
227 spin_unlock(&clp->cl_lock);
228 nfs_inode_return_delegation(inode);
232 spin_unlock(&clp->cl_lock);
235 int nfs_do_expire_all_delegations(void *ptr)
237 struct nfs4_client *clp = ptr;
238 struct nfs_delegation *delegation;
242 allow_signal(SIGKILL);
244 spin_lock(&clp->cl_lock);
245 if (test_bit(NFS4CLNT_STATE_RECOVER, &clp->cl_state) != 0)
247 if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) == 0)
249 list_for_each_entry(delegation, &clp->cl_delegations, super_list) {
250 inode = igrab(delegation->inode);
253 spin_unlock(&clp->cl_lock);
254 err = nfs_inode_return_delegation(inode);
260 spin_unlock(&clp->cl_lock);
261 nfs4_put_client(clp);
262 module_put_and_exit(0);
265 void nfs_expire_all_delegations(struct nfs4_client *clp)
267 struct task_struct *task;
269 __module_get(THIS_MODULE);
270 atomic_inc(&clp->cl_count);
271 task = kthread_run(nfs_do_expire_all_delegations, clp,
272 "%u.%u.%u.%u-delegreturn",
273 NIPQUAD(clp->cl_addr));
276 nfs4_put_client(clp);
277 module_put(THIS_MODULE);
281 * Return all delegations following an NFS4ERR_CB_PATH_DOWN error.
283 void nfs_handle_cb_pathdown(struct nfs4_client *clp)
285 struct nfs_delegation *delegation;
291 spin_lock(&clp->cl_lock);
292 list_for_each_entry(delegation, &clp->cl_delegations, super_list) {
293 inode = igrab(delegation->inode);
296 spin_unlock(&clp->cl_lock);
297 nfs_inode_return_delegation(inode);
301 spin_unlock(&clp->cl_lock);
304 struct recall_threadargs {
306 struct nfs4_client *clp;
307 const nfs4_stateid *stateid;
309 struct completion started;
313 static int recall_thread(void *data)
315 struct recall_threadargs *args = (struct recall_threadargs *)data;
316 struct inode *inode = igrab(args->inode);
317 struct nfs4_client *clp = NFS_SERVER(inode)->nfs4_state;
318 struct nfs_inode *nfsi = NFS_I(inode);
319 struct nfs_delegation *delegation;
321 daemonize("nfsv4-delegreturn");
323 nfs_msync_inode(inode);
324 down_read(&clp->cl_sem);
325 down_write(&nfsi->rwsem);
326 spin_lock(&clp->cl_lock);
327 delegation = nfsi->delegation;
328 if (delegation != NULL && memcmp(delegation->stateid.data,
330 sizeof(delegation->stateid.data)) == 0) {
331 list_del_init(&delegation->super_list);
332 nfsi->delegation = NULL;
333 nfsi->delegation_state = 0;
337 args->result = -ENOENT;
339 spin_unlock(&clp->cl_lock);
340 complete(&args->started);
341 nfs_delegation_claim_opens(inode);
342 up_write(&nfsi->rwsem);
343 up_read(&clp->cl_sem);
344 nfs_msync_inode(inode);
346 if (delegation != NULL)
347 nfs_do_return_delegation(inode, delegation);
349 module_put_and_exit(0);
353 * Asynchronous delegation recall!
355 int nfs_async_inode_return_delegation(struct inode *inode, const nfs4_stateid *stateid)
357 struct recall_threadargs data = {
363 init_completion(&data.started);
364 __module_get(THIS_MODULE);
365 status = kernel_thread(recall_thread, &data, CLONE_KERNEL);
368 wait_for_completion(&data.started);
371 module_put(THIS_MODULE);
376 * Retrieve the inode associated with a delegation
378 struct inode *nfs_delegation_find_inode(struct nfs4_client *clp, const struct nfs_fh *fhandle)
380 struct nfs_delegation *delegation;
381 struct inode *res = NULL;
382 spin_lock(&clp->cl_lock);
383 list_for_each_entry(delegation, &clp->cl_delegations, super_list) {
384 if (nfs_compare_fh(fhandle, &NFS_I(delegation->inode)->fh) == 0) {
385 res = igrab(delegation->inode);
389 spin_unlock(&clp->cl_lock);
394 * Mark all delegations as needing to be reclaimed
396 void nfs_delegation_mark_reclaim(struct nfs4_client *clp)
398 struct nfs_delegation *delegation;
399 spin_lock(&clp->cl_lock);
400 list_for_each_entry(delegation, &clp->cl_delegations, super_list)
401 delegation->flags |= NFS_DELEGATION_NEED_RECLAIM;
402 spin_unlock(&clp->cl_lock);
406 * Reap all unclaimed delegations after reboot recovery is done
408 void nfs_delegation_reap_unclaimed(struct nfs4_client *clp)
410 struct nfs_delegation *delegation, *n;
412 spin_lock(&clp->cl_lock);
413 list_for_each_entry_safe(delegation, n, &clp->cl_delegations, super_list) {
414 if ((delegation->flags & NFS_DELEGATION_NEED_RECLAIM) == 0)
416 list_move(&delegation->super_list, &head);
417 NFS_I(delegation->inode)->delegation = NULL;
418 NFS_I(delegation->inode)->delegation_state = 0;
420 spin_unlock(&clp->cl_lock);
421 while(!list_empty(&head)) {
422 delegation = list_entry(head.next, struct nfs_delegation, super_list);
423 list_del(&delegation->super_list);
424 nfs_free_delegation(delegation);