4 * Client-side XDR for NFSv4.
6 * Copyright (c) 2002 The Regents of the University of Michigan.
9 * Kendrick Smith <kmsmith@umich.edu>
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. Neither the name of the University nor the names of its
21 * contributors may be used to endorse or promote products derived
22 * from this software without specific prior written permission.
24 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
25 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
26 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
27 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
31 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
32 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
33 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
34 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 * Implementation of the NFSv4 state model. For the time being,
37 * this is minimal, but will be made much more complex in a
41 #include <linux/slab.h>
42 #include <linux/smp_lock.h>
43 #include <linux/nfs_fs.h>
44 #include <linux/nfs_idmap.h>
45 #include <linux/kthread.h>
46 #include <linux/module.h>
47 #include <linux/workqueue.h>
48 #include <linux/bitops.h>
52 #include "delegation.h"
55 #define OPENOWNER_POOL_SIZE 8
57 const nfs4_stateid zero_stateid;
59 static LIST_HEAD(nfs4_clientid_list);
62 init_nfsv4_state(struct nfs_server *server)
64 server->nfs_client = NULL;
65 INIT_LIST_HEAD(&server->nfs4_siblings);
69 destroy_nfsv4_state(struct nfs_server *server)
71 kfree(server->mnt_path);
72 server->mnt_path = NULL;
73 if (server->nfs_client) {
74 nfs_put_client(server->nfs_client);
75 server->nfs_client = NULL;
79 static int nfs4_init_client(struct nfs_client *clp, struct rpc_cred *cred)
81 int status = nfs4_proc_setclientid(clp, NFS4_CALLBACK,
82 nfs_callback_tcpport, cred);
84 status = nfs4_proc_setclientid_confirm(clp, cred);
86 nfs4_schedule_state_renewal(clp);
91 nfs4_alloc_lockowner_id(struct nfs_client *clp)
93 return clp->cl_lockowner_id ++;
96 static struct nfs4_state_owner *
97 nfs4_client_grab_unused(struct nfs_client *clp, struct rpc_cred *cred)
99 struct nfs4_state_owner *sp = NULL;
101 if (!list_empty(&clp->cl_unused)) {
102 sp = list_entry(clp->cl_unused.next, struct nfs4_state_owner, so_list);
103 atomic_inc(&sp->so_count);
105 list_move(&sp->so_list, &clp->cl_state_owners);
111 struct rpc_cred *nfs4_get_renew_cred(struct nfs_client *clp)
113 struct nfs4_state_owner *sp;
114 struct rpc_cred *cred = NULL;
116 list_for_each_entry(sp, &clp->cl_state_owners, so_list) {
117 if (list_empty(&sp->so_states))
119 cred = get_rpccred(sp->so_cred);
125 struct rpc_cred *nfs4_get_setclientid_cred(struct nfs_client *clp)
127 struct nfs4_state_owner *sp;
129 if (!list_empty(&clp->cl_state_owners)) {
130 sp = list_entry(clp->cl_state_owners.next,
131 struct nfs4_state_owner, so_list);
132 return get_rpccred(sp->so_cred);
137 static struct nfs4_state_owner *
138 nfs4_find_state_owner(struct nfs_client *clp, struct rpc_cred *cred)
140 struct nfs4_state_owner *sp, *res = NULL;
142 list_for_each_entry(sp, &clp->cl_state_owners, so_list) {
143 if (sp->so_cred != cred)
145 atomic_inc(&sp->so_count);
146 /* Move to the head of the list */
147 list_move(&sp->so_list, &clp->cl_state_owners);
155 * nfs4_alloc_state_owner(): this is called on the OPEN or CREATE path to
156 * create a new state_owner.
159 static struct nfs4_state_owner *
160 nfs4_alloc_state_owner(void)
162 struct nfs4_state_owner *sp;
164 sp = kzalloc(sizeof(*sp),GFP_KERNEL);
167 spin_lock_init(&sp->so_lock);
168 INIT_LIST_HEAD(&sp->so_states);
169 INIT_LIST_HEAD(&sp->so_delegations);
170 rpc_init_wait_queue(&sp->so_sequence.wait, "Seqid_waitqueue");
171 sp->so_seqid.sequence = &sp->so_sequence;
172 spin_lock_init(&sp->so_sequence.lock);
173 INIT_LIST_HEAD(&sp->so_sequence.list);
174 atomic_set(&sp->so_count, 1);
179 nfs4_drop_state_owner(struct nfs4_state_owner *sp)
181 struct nfs_client *clp = sp->so_client;
182 spin_lock(&clp->cl_lock);
183 list_del_init(&sp->so_list);
184 spin_unlock(&clp->cl_lock);
188 * Note: must be called with clp->cl_sem held in order to prevent races
189 * with reboot recovery!
191 struct nfs4_state_owner *nfs4_get_state_owner(struct nfs_server *server, struct rpc_cred *cred)
193 struct nfs_client *clp = server->nfs_client;
194 struct nfs4_state_owner *sp, *new;
197 new = nfs4_alloc_state_owner();
198 spin_lock(&clp->cl_lock);
199 sp = nfs4_find_state_owner(clp, cred);
201 sp = nfs4_client_grab_unused(clp, cred);
202 if (sp == NULL && new != NULL) {
203 list_add(&new->so_list, &clp->cl_state_owners);
204 new->so_client = clp;
205 new->so_id = nfs4_alloc_lockowner_id(clp);
210 spin_unlock(&clp->cl_lock);
219 * Must be called with clp->cl_sem held in order to avoid races
220 * with state recovery...
222 void nfs4_put_state_owner(struct nfs4_state_owner *sp)
224 struct nfs_client *clp = sp->so_client;
225 struct rpc_cred *cred = sp->so_cred;
227 if (!atomic_dec_and_lock(&sp->so_count, &clp->cl_lock))
229 if (clp->cl_nunused >= OPENOWNER_POOL_SIZE)
231 if (list_empty(&sp->so_list))
233 list_move(&sp->so_list, &clp->cl_unused);
235 spin_unlock(&clp->cl_lock);
240 list_del(&sp->so_list);
241 spin_unlock(&clp->cl_lock);
246 static struct nfs4_state *
247 nfs4_alloc_open_state(void)
249 struct nfs4_state *state;
251 state = kzalloc(sizeof(*state), GFP_KERNEL);
254 atomic_set(&state->count, 1);
255 INIT_LIST_HEAD(&state->lock_states);
256 spin_lock_init(&state->state_lock);
261 nfs4_state_set_mode_locked(struct nfs4_state *state, mode_t mode)
263 if (state->state == mode)
265 /* NB! List reordering - see the reclaim code for why. */
266 if ((mode & FMODE_WRITE) != (state->state & FMODE_WRITE)) {
267 if (mode & FMODE_WRITE)
268 list_move(&state->open_states, &state->owner->so_states);
270 list_move_tail(&state->open_states, &state->owner->so_states);
273 list_del_init(&state->inode_states);
277 static struct nfs4_state *
278 __nfs4_find_state_byowner(struct inode *inode, struct nfs4_state_owner *owner)
280 struct nfs_inode *nfsi = NFS_I(inode);
281 struct nfs4_state *state;
283 list_for_each_entry(state, &nfsi->open_states, inode_states) {
284 /* Is this in the process of being freed? */
285 if (state->state == 0)
287 if (state->owner == owner) {
288 atomic_inc(&state->count);
296 nfs4_free_open_state(struct nfs4_state *state)
302 nfs4_get_open_state(struct inode *inode, struct nfs4_state_owner *owner)
304 struct nfs4_state *state, *new;
305 struct nfs_inode *nfsi = NFS_I(inode);
307 spin_lock(&inode->i_lock);
308 state = __nfs4_find_state_byowner(inode, owner);
309 spin_unlock(&inode->i_lock);
312 new = nfs4_alloc_open_state();
313 spin_lock(&owner->so_lock);
314 spin_lock(&inode->i_lock);
315 state = __nfs4_find_state_byowner(inode, owner);
316 if (state == NULL && new != NULL) {
318 state->owner = owner;
319 atomic_inc(&owner->so_count);
320 list_add(&state->inode_states, &nfsi->open_states);
321 state->inode = igrab(inode);
322 spin_unlock(&inode->i_lock);
323 /* Note: The reclaim code dictates that we add stateless
324 * and read-only stateids to the end of the list */
325 list_add_tail(&state->open_states, &owner->so_states);
326 spin_unlock(&owner->so_lock);
328 spin_unlock(&inode->i_lock);
329 spin_unlock(&owner->so_lock);
331 nfs4_free_open_state(new);
338 * Beware! Caller must be holding exactly one
339 * reference to clp->cl_sem!
341 void nfs4_put_open_state(struct nfs4_state *state)
343 struct inode *inode = state->inode;
344 struct nfs4_state_owner *owner = state->owner;
346 if (!atomic_dec_and_lock(&state->count, &owner->so_lock))
348 spin_lock(&inode->i_lock);
349 if (!list_empty(&state->inode_states))
350 list_del(&state->inode_states);
351 list_del(&state->open_states);
352 spin_unlock(&inode->i_lock);
353 spin_unlock(&owner->so_lock);
355 nfs4_free_open_state(state);
356 nfs4_put_state_owner(owner);
360 * Close the current file.
362 void nfs4_close_state(struct nfs4_state *state, mode_t mode)
364 struct inode *inode = state->inode;
365 struct nfs4_state_owner *owner = state->owner;
366 int oldstate, newstate = 0;
368 atomic_inc(&owner->so_count);
369 /* Protect against nfs4_find_state() */
370 spin_lock(&owner->so_lock);
371 spin_lock(&inode->i_lock);
372 switch (mode & (FMODE_READ | FMODE_WRITE)) {
379 case FMODE_READ|FMODE_WRITE:
382 oldstate = newstate = state->state;
383 if (state->n_rdwr == 0) {
384 if (state->n_rdonly == 0)
385 newstate &= ~FMODE_READ;
386 if (state->n_wronly == 0)
387 newstate &= ~FMODE_WRITE;
389 if (test_bit(NFS_DELEGATED_STATE, &state->flags)) {
390 nfs4_state_set_mode_locked(state, newstate);
393 spin_unlock(&inode->i_lock);
394 spin_unlock(&owner->so_lock);
396 if (oldstate != newstate && nfs4_do_close(inode, state) == 0)
398 nfs4_put_open_state(state);
399 nfs4_put_state_owner(owner);
403 * Search the state->lock_states for an existing lock_owner
404 * that is compatible with current->files
406 static struct nfs4_lock_state *
407 __nfs4_find_lock_state(struct nfs4_state *state, fl_owner_t fl_owner)
409 struct nfs4_lock_state *pos;
410 list_for_each_entry(pos, &state->lock_states, ls_locks) {
411 if (pos->ls_owner != fl_owner)
413 atomic_inc(&pos->ls_count);
420 * Return a compatible lock_state. If no initialized lock_state structure
421 * exists, return an uninitialized one.
424 static struct nfs4_lock_state *nfs4_alloc_lock_state(struct nfs4_state *state, fl_owner_t fl_owner)
426 struct nfs4_lock_state *lsp;
427 struct nfs_client *clp = state->owner->so_client;
429 lsp = kzalloc(sizeof(*lsp), GFP_KERNEL);
432 lsp->ls_seqid.sequence = &state->owner->so_sequence;
433 atomic_set(&lsp->ls_count, 1);
434 lsp->ls_owner = fl_owner;
435 spin_lock(&clp->cl_lock);
436 lsp->ls_id = nfs4_alloc_lockowner_id(clp);
437 spin_unlock(&clp->cl_lock);
438 INIT_LIST_HEAD(&lsp->ls_locks);
443 * Return a compatible lock_state. If no initialized lock_state structure
444 * exists, return an uninitialized one.
446 * The caller must be holding clp->cl_sem
448 static struct nfs4_lock_state *nfs4_get_lock_state(struct nfs4_state *state, fl_owner_t owner)
450 struct nfs4_lock_state *lsp, *new = NULL;
453 spin_lock(&state->state_lock);
454 lsp = __nfs4_find_lock_state(state, owner);
458 new->ls_state = state;
459 list_add(&new->ls_locks, &state->lock_states);
460 set_bit(LK_STATE_IN_USE, &state->flags);
465 spin_unlock(&state->state_lock);
466 new = nfs4_alloc_lock_state(state, owner);
470 spin_unlock(&state->state_lock);
476 * Release reference to lock_state, and free it if we see that
477 * it is no longer in use
479 void nfs4_put_lock_state(struct nfs4_lock_state *lsp)
481 struct nfs4_state *state;
485 state = lsp->ls_state;
486 if (!atomic_dec_and_lock(&lsp->ls_count, &state->state_lock))
488 list_del(&lsp->ls_locks);
489 if (list_empty(&state->lock_states))
490 clear_bit(LK_STATE_IN_USE, &state->flags);
491 spin_unlock(&state->state_lock);
495 static void nfs4_fl_copy_lock(struct file_lock *dst, struct file_lock *src)
497 struct nfs4_lock_state *lsp = src->fl_u.nfs4_fl.owner;
499 dst->fl_u.nfs4_fl.owner = lsp;
500 atomic_inc(&lsp->ls_count);
503 static void nfs4_fl_release_lock(struct file_lock *fl)
505 nfs4_put_lock_state(fl->fl_u.nfs4_fl.owner);
508 static struct file_lock_operations nfs4_fl_lock_ops = {
509 .fl_copy_lock = nfs4_fl_copy_lock,
510 .fl_release_private = nfs4_fl_release_lock,
513 int nfs4_set_lock_state(struct nfs4_state *state, struct file_lock *fl)
515 struct nfs4_lock_state *lsp;
517 if (fl->fl_ops != NULL)
519 lsp = nfs4_get_lock_state(state, fl->fl_owner);
522 fl->fl_u.nfs4_fl.owner = lsp;
523 fl->fl_ops = &nfs4_fl_lock_ops;
528 * Byte-range lock aware utility to initialize the stateid of read/write
531 void nfs4_copy_stateid(nfs4_stateid *dst, struct nfs4_state *state, fl_owner_t fl_owner)
533 struct nfs4_lock_state *lsp;
535 memcpy(dst, &state->stateid, sizeof(*dst));
536 if (test_bit(LK_STATE_IN_USE, &state->flags) == 0)
539 spin_lock(&state->state_lock);
540 lsp = __nfs4_find_lock_state(state, fl_owner);
541 if (lsp != NULL && (lsp->ls_flags & NFS_LOCK_INITIALIZED) != 0)
542 memcpy(dst, &lsp->ls_stateid, sizeof(*dst));
543 spin_unlock(&state->state_lock);
544 nfs4_put_lock_state(lsp);
547 struct nfs_seqid *nfs_alloc_seqid(struct nfs_seqid_counter *counter)
549 struct rpc_sequence *sequence = counter->sequence;
550 struct nfs_seqid *new;
552 new = kmalloc(sizeof(*new), GFP_KERNEL);
554 new->sequence = counter;
555 spin_lock(&sequence->lock);
556 list_add_tail(&new->list, &sequence->list);
557 spin_unlock(&sequence->lock);
562 void nfs_free_seqid(struct nfs_seqid *seqid)
564 struct rpc_sequence *sequence = seqid->sequence->sequence;
566 spin_lock(&sequence->lock);
567 list_del(&seqid->list);
568 spin_unlock(&sequence->lock);
569 rpc_wake_up(&sequence->wait);
574 * Increment the seqid if the OPEN/OPEN_DOWNGRADE/CLOSE succeeded, or
575 * failed with a seqid incrementing error -
576 * see comments nfs_fs.h:seqid_mutating_error()
578 static inline void nfs_increment_seqid(int status, struct nfs_seqid *seqid)
583 case -NFS4ERR_BAD_SEQID:
584 case -NFS4ERR_STALE_CLIENTID:
585 case -NFS4ERR_STALE_STATEID:
586 case -NFS4ERR_BAD_STATEID:
587 case -NFS4ERR_BADXDR:
588 case -NFS4ERR_RESOURCE:
589 case -NFS4ERR_NOFILEHANDLE:
590 /* Non-seqid mutating errors */
594 * Note: no locking needed as we are guaranteed to be first
595 * on the sequence list
597 seqid->sequence->counter++;
600 void nfs_increment_open_seqid(int status, struct nfs_seqid *seqid)
602 if (status == -NFS4ERR_BAD_SEQID) {
603 struct nfs4_state_owner *sp = container_of(seqid->sequence,
604 struct nfs4_state_owner, so_seqid);
605 nfs4_drop_state_owner(sp);
607 return nfs_increment_seqid(status, seqid);
611 * Increment the seqid if the LOCK/LOCKU succeeded, or
612 * failed with a seqid incrementing error -
613 * see comments nfs_fs.h:seqid_mutating_error()
615 void nfs_increment_lock_seqid(int status, struct nfs_seqid *seqid)
617 return nfs_increment_seqid(status, seqid);
620 int nfs_wait_on_sequence(struct nfs_seqid *seqid, struct rpc_task *task)
622 struct rpc_sequence *sequence = seqid->sequence->sequence;
625 if (sequence->list.next == &seqid->list)
627 spin_lock(&sequence->lock);
628 if (sequence->list.next != &seqid->list) {
629 rpc_sleep_on(&sequence->wait, task, NULL, NULL);
632 spin_unlock(&sequence->lock);
637 static int reclaimer(void *);
639 static inline void nfs4_clear_recover_bit(struct nfs_client *clp)
641 smp_mb__before_clear_bit();
642 clear_bit(NFS4CLNT_STATE_RECOVER, &clp->cl_state);
643 smp_mb__after_clear_bit();
644 wake_up_bit(&clp->cl_state, NFS4CLNT_STATE_RECOVER);
645 rpc_wake_up(&clp->cl_rpcwaitq);
649 * State recovery routine
651 static void nfs4_recover_state(struct nfs_client *clp)
653 struct task_struct *task;
655 __module_get(THIS_MODULE);
656 atomic_inc(&clp->cl_count);
657 task = kthread_run(reclaimer, clp, "%u.%u.%u.%u-reclaim",
658 NIPQUAD(clp->cl_addr.sin_addr));
661 nfs4_clear_recover_bit(clp);
663 module_put(THIS_MODULE);
667 * Schedule a state recovery attempt
669 void nfs4_schedule_state_recovery(struct nfs_client *clp)
673 if (test_and_set_bit(NFS4CLNT_STATE_RECOVER, &clp->cl_state) == 0)
674 nfs4_recover_state(clp);
677 static int nfs4_reclaim_locks(struct nfs4_state_recovery_ops *ops, struct nfs4_state *state)
679 struct inode *inode = state->inode;
680 struct file_lock *fl;
683 for (fl = inode->i_flock; fl != 0; fl = fl->fl_next) {
684 if (!(fl->fl_flags & (FL_POSIX|FL_FLOCK)))
686 if (((struct nfs_open_context *)fl->fl_file->private_data)->state != state)
688 status = ops->recover_lock(state, fl);
693 printk(KERN_ERR "%s: unhandled error %d. Zeroing state\n",
694 __FUNCTION__, status);
695 case -NFS4ERR_EXPIRED:
696 case -NFS4ERR_NO_GRACE:
697 case -NFS4ERR_RECLAIM_BAD:
698 case -NFS4ERR_RECLAIM_CONFLICT:
699 /* kill_proc(fl->fl_pid, SIGLOST, 1); */
701 case -NFS4ERR_STALE_CLIENTID:
710 static int nfs4_reclaim_open_state(struct nfs4_state_recovery_ops *ops, struct nfs4_state_owner *sp)
712 struct nfs4_state *state;
713 struct nfs4_lock_state *lock;
716 /* Note: we rely on the sp->so_states list being ordered
717 * so that we always reclaim open(O_RDWR) and/or open(O_WRITE)
719 * This is needed to ensure that the server won't give us any
720 * read delegations that we have to return if, say, we are
721 * recovering after a network partition or a reboot from a
722 * server that doesn't support a grace period.
724 list_for_each_entry(state, &sp->so_states, open_states) {
725 if (state->state == 0)
727 status = ops->recover_open(sp, state);
729 status = nfs4_reclaim_locks(ops, state);
732 list_for_each_entry(lock, &state->lock_states, ls_locks) {
733 if (!(lock->ls_flags & NFS_LOCK_INITIALIZED))
734 printk("%s: Lock reclaim failed!\n",
741 printk(KERN_ERR "%s: unhandled error %d. Zeroing state\n",
742 __FUNCTION__, status);
744 case -NFS4ERR_RECLAIM_BAD:
745 case -NFS4ERR_RECLAIM_CONFLICT:
747 * Open state on this file cannot be recovered
748 * All we can do is revert to using the zero stateid.
750 memset(state->stateid.data, 0,
751 sizeof(state->stateid.data));
752 /* Mark the file as being 'closed' */
755 case -NFS4ERR_EXPIRED:
756 case -NFS4ERR_NO_GRACE:
757 case -NFS4ERR_STALE_CLIENTID:
766 static void nfs4_state_mark_reclaim(struct nfs_client *clp)
768 struct nfs4_state_owner *sp;
769 struct nfs4_state *state;
770 struct nfs4_lock_state *lock;
772 /* Reset all sequence ids to zero */
773 list_for_each_entry(sp, &clp->cl_state_owners, so_list) {
774 sp->so_seqid.counter = 0;
775 sp->so_seqid.flags = 0;
776 spin_lock(&sp->so_lock);
777 list_for_each_entry(state, &sp->so_states, open_states) {
778 list_for_each_entry(lock, &state->lock_states, ls_locks) {
779 lock->ls_seqid.counter = 0;
780 lock->ls_seqid.flags = 0;
781 lock->ls_flags &= ~NFS_LOCK_INITIALIZED;
784 spin_unlock(&sp->so_lock);
788 static int reclaimer(void *ptr)
790 struct nfs_client *clp = ptr;
791 struct nfs4_state_owner *sp;
792 struct nfs4_state_recovery_ops *ops;
793 struct rpc_cred *cred;
796 allow_signal(SIGKILL);
798 /* Ensure exclusive access to NFSv4 state */
800 down_write(&clp->cl_sem);
801 /* Are there any NFS mounts out there? */
802 if (list_empty(&clp->cl_superblocks))
805 ops = &nfs4_network_partition_recovery_ops;
806 /* Are there any open files on this volume? */
807 cred = nfs4_get_renew_cred(clp);
809 /* Yes there are: try to renew the old lease */
810 status = nfs4_proc_renew(clp, cred);
813 case -NFS4ERR_CB_PATH_DOWN:
816 case -NFS4ERR_STALE_CLIENTID:
817 case -NFS4ERR_LEASE_MOVED:
818 ops = &nfs4_reboot_recovery_ops;
821 /* "reboot" to ensure we clear all state on the server */
822 clp->cl_boot_time = CURRENT_TIME;
823 cred = nfs4_get_setclientid_cred(clp);
825 /* We're going to have to re-establish a clientid */
826 nfs4_state_mark_reclaim(clp);
829 status = nfs4_init_client(clp, cred);
834 /* Mark all delegations for reclaim */
835 nfs_delegation_mark_reclaim(clp);
836 /* Note: list is protected by exclusive lock on cl->cl_sem */
837 list_for_each_entry(sp, &clp->cl_state_owners, so_list) {
838 status = nfs4_reclaim_open_state(ops, sp);
840 if (status == -NFS4ERR_NO_GRACE) {
841 ops = &nfs4_network_partition_recovery_ops;
842 status = nfs4_reclaim_open_state(ops, sp);
844 if (status == -NFS4ERR_STALE_CLIENTID)
846 if (status == -NFS4ERR_EXPIRED)
850 nfs_delegation_reap_unclaimed(clp);
852 up_write(&clp->cl_sem);
854 if (status == -NFS4ERR_CB_PATH_DOWN)
855 nfs_handle_cb_pathdown(clp);
856 nfs4_clear_recover_bit(clp);
858 module_put_and_exit(0);
861 printk(KERN_WARNING "Error: state recovery failed on NFSv4 server %u.%u.%u.%u with error %d\n",
862 NIPQUAD(clp->cl_addr.sin_addr), -status);
863 set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);