4 * Client-side XDR for NFSv4.
6 * Copyright (c) 2002 The Regents of the University of Michigan.
9 * Kendrick Smith <kmsmith@umich.edu>
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. Neither the name of the University nor the names of its
21 * contributors may be used to endorse or promote products derived
22 * from this software without specific prior written permission.
24 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
25 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
26 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
27 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
31 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
32 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
33 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
34 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 * Implementation of the NFSv4 state model. For the time being,
37 * this is minimal, but will be made much more complex in a
41 #include <linux/kernel.h>
42 #include <linux/slab.h>
43 #include <linux/smp_lock.h>
44 #include <linux/nfs_fs.h>
45 #include <linux/nfs_idmap.h>
46 #include <linux/kthread.h>
47 #include <linux/module.h>
48 #include <linux/random.h>
49 #include <linux/workqueue.h>
50 #include <linux/bitops.h>
54 #include "delegation.h"
57 #define OPENOWNER_POOL_SIZE 8
59 const nfs4_stateid zero_stateid;
61 static LIST_HEAD(nfs4_clientid_list);
63 static int nfs4_init_client(struct nfs_client *clp, struct rpc_cred *cred)
65 int status = nfs4_proc_setclientid(clp, NFS4_CALLBACK,
66 nfs_callback_tcpport, cred);
68 status = nfs4_proc_setclientid_confirm(clp, cred);
70 nfs4_schedule_state_renewal(clp);
74 struct rpc_cred *nfs4_get_renew_cred(struct nfs_client *clp)
76 struct nfs4_state_owner *sp;
78 struct rpc_cred *cred = NULL;
80 for (pos = rb_first(&clp->cl_state_owners); pos != NULL; pos = rb_next(pos)) {
81 sp = rb_entry(pos, struct nfs4_state_owner, so_client_node);
82 if (list_empty(&sp->so_states))
84 cred = get_rpccred(sp->so_cred);
90 static struct rpc_cred *nfs4_get_setclientid_cred(struct nfs_client *clp)
92 struct nfs4_state_owner *sp;
95 pos = rb_first(&clp->cl_state_owners);
97 sp = rb_entry(pos, struct nfs4_state_owner, so_client_node);
98 return get_rpccred(sp->so_cred);
103 static void nfs_alloc_unique_id(struct rb_root *root, struct nfs_unique_id *new,
104 __u64 minval, int maxbits)
106 struct rb_node **p, *parent;
107 struct nfs_unique_id *pos;
111 mask = (1ULL << maxbits) - 1ULL;
113 /* Ensure distribution is more or less flat */
114 get_random_bytes(&new->id, sizeof(new->id));
116 if (new->id < minval)
124 pos = rb_entry(parent, struct nfs_unique_id, rb_node);
126 if (new->id < pos->id)
128 else if (new->id > pos->id)
133 rb_link_node(&new->rb_node, parent, p);
134 rb_insert_color(&new->rb_node, root);
139 if (new->id < minval || (new->id & mask) != new->id) {
143 parent = rb_next(parent);
146 pos = rb_entry(parent, struct nfs_unique_id, rb_node);
147 if (new->id < pos->id)
153 static void nfs_free_unique_id(struct rb_root *root, struct nfs_unique_id *id)
155 rb_erase(&id->rb_node, root);
158 static struct nfs4_state_owner *
159 nfs4_find_state_owner(struct nfs_server *server, struct rpc_cred *cred)
161 struct nfs_client *clp = server->nfs_client;
162 struct rb_node **p = &clp->cl_state_owners.rb_node,
164 struct nfs4_state_owner *sp, *res = NULL;
168 sp = rb_entry(parent, struct nfs4_state_owner, so_client_node);
170 if (server < sp->so_server) {
171 p = &parent->rb_left;
174 if (server > sp->so_server) {
175 p = &parent->rb_right;
178 if (cred < sp->so_cred)
179 p = &parent->rb_left;
180 else if (cred > sp->so_cred)
181 p = &parent->rb_right;
183 atomic_inc(&sp->so_count);
191 static struct nfs4_state_owner *
192 nfs4_insert_state_owner(struct nfs_client *clp, struct nfs4_state_owner *new)
194 struct rb_node **p = &clp->cl_state_owners.rb_node,
196 struct nfs4_state_owner *sp;
200 sp = rb_entry(parent, struct nfs4_state_owner, so_client_node);
202 if (new->so_server < sp->so_server) {
203 p = &parent->rb_left;
206 if (new->so_server > sp->so_server) {
207 p = &parent->rb_right;
210 if (new->so_cred < sp->so_cred)
211 p = &parent->rb_left;
212 else if (new->so_cred > sp->so_cred)
213 p = &parent->rb_right;
215 atomic_inc(&sp->so_count);
219 nfs_alloc_unique_id(&clp->cl_openowner_id, &new->so_owner_id, 1, 64);
220 rb_link_node(&new->so_client_node, parent, p);
221 rb_insert_color(&new->so_client_node, &clp->cl_state_owners);
226 nfs4_remove_state_owner(struct nfs_client *clp, struct nfs4_state_owner *sp)
228 if (!RB_EMPTY_NODE(&sp->so_client_node))
229 rb_erase(&sp->so_client_node, &clp->cl_state_owners);
230 nfs_free_unique_id(&clp->cl_openowner_id, &sp->so_owner_id);
234 * nfs4_alloc_state_owner(): this is called on the OPEN or CREATE path to
235 * create a new state_owner.
238 static struct nfs4_state_owner *
239 nfs4_alloc_state_owner(void)
241 struct nfs4_state_owner *sp;
243 sp = kzalloc(sizeof(*sp),GFP_KERNEL);
246 spin_lock_init(&sp->so_lock);
247 INIT_LIST_HEAD(&sp->so_states);
248 INIT_LIST_HEAD(&sp->so_delegations);
249 rpc_init_wait_queue(&sp->so_sequence.wait, "Seqid_waitqueue");
250 sp->so_seqid.sequence = &sp->so_sequence;
251 spin_lock_init(&sp->so_sequence.lock);
252 INIT_LIST_HEAD(&sp->so_sequence.list);
253 atomic_set(&sp->so_count, 1);
258 nfs4_drop_state_owner(struct nfs4_state_owner *sp)
260 if (!RB_EMPTY_NODE(&sp->so_client_node)) {
261 struct nfs_client *clp = sp->so_client;
263 spin_lock(&clp->cl_lock);
264 rb_erase(&sp->so_client_node, &clp->cl_state_owners);
265 RB_CLEAR_NODE(&sp->so_client_node);
266 spin_unlock(&clp->cl_lock);
271 * Note: must be called with clp->cl_sem held in order to prevent races
272 * with reboot recovery!
274 struct nfs4_state_owner *nfs4_get_state_owner(struct nfs_server *server, struct rpc_cred *cred)
276 struct nfs_client *clp = server->nfs_client;
277 struct nfs4_state_owner *sp, *new;
279 spin_lock(&clp->cl_lock);
280 sp = nfs4_find_state_owner(server, cred);
281 spin_unlock(&clp->cl_lock);
284 new = nfs4_alloc_state_owner();
287 new->so_client = clp;
288 new->so_server = server;
290 spin_lock(&clp->cl_lock);
291 sp = nfs4_insert_state_owner(clp, new);
292 spin_unlock(&clp->cl_lock);
296 rpc_destroy_wait_queue(&new->so_sequence.wait);
303 * Must be called with clp->cl_sem held in order to avoid races
304 * with state recovery...
306 void nfs4_put_state_owner(struct nfs4_state_owner *sp)
308 struct nfs_client *clp = sp->so_client;
309 struct rpc_cred *cred = sp->so_cred;
311 if (!atomic_dec_and_lock(&sp->so_count, &clp->cl_lock))
313 nfs4_remove_state_owner(clp, sp);
314 spin_unlock(&clp->cl_lock);
315 rpc_destroy_wait_queue(&sp->so_sequence.wait);
320 static struct nfs4_state *
321 nfs4_alloc_open_state(void)
323 struct nfs4_state *state;
325 state = kzalloc(sizeof(*state), GFP_KERNEL);
328 atomic_set(&state->count, 1);
329 INIT_LIST_HEAD(&state->lock_states);
330 spin_lock_init(&state->state_lock);
331 seqlock_init(&state->seqlock);
336 nfs4_state_set_mode_locked(struct nfs4_state *state, mode_t mode)
338 if (state->state == mode)
340 /* NB! List reordering - see the reclaim code for why. */
341 if ((mode & FMODE_WRITE) != (state->state & FMODE_WRITE)) {
342 if (mode & FMODE_WRITE)
343 list_move(&state->open_states, &state->owner->so_states);
345 list_move_tail(&state->open_states, &state->owner->so_states);
350 static struct nfs4_state *
351 __nfs4_find_state_byowner(struct inode *inode, struct nfs4_state_owner *owner)
353 struct nfs_inode *nfsi = NFS_I(inode);
354 struct nfs4_state *state;
356 list_for_each_entry(state, &nfsi->open_states, inode_states) {
357 if (state->owner != owner)
359 if (atomic_inc_not_zero(&state->count))
366 nfs4_free_open_state(struct nfs4_state *state)
372 nfs4_get_open_state(struct inode *inode, struct nfs4_state_owner *owner)
374 struct nfs4_state *state, *new;
375 struct nfs_inode *nfsi = NFS_I(inode);
377 spin_lock(&inode->i_lock);
378 state = __nfs4_find_state_byowner(inode, owner);
379 spin_unlock(&inode->i_lock);
382 new = nfs4_alloc_open_state();
383 spin_lock(&owner->so_lock);
384 spin_lock(&inode->i_lock);
385 state = __nfs4_find_state_byowner(inode, owner);
386 if (state == NULL && new != NULL) {
388 state->owner = owner;
389 atomic_inc(&owner->so_count);
390 list_add(&state->inode_states, &nfsi->open_states);
391 state->inode = igrab(inode);
392 spin_unlock(&inode->i_lock);
393 /* Note: The reclaim code dictates that we add stateless
394 * and read-only stateids to the end of the list */
395 list_add_tail(&state->open_states, &owner->so_states);
396 spin_unlock(&owner->so_lock);
398 spin_unlock(&inode->i_lock);
399 spin_unlock(&owner->so_lock);
401 nfs4_free_open_state(new);
408 * Beware! Caller must be holding exactly one
409 * reference to clp->cl_sem!
411 void nfs4_put_open_state(struct nfs4_state *state)
413 struct inode *inode = state->inode;
414 struct nfs4_state_owner *owner = state->owner;
416 if (!atomic_dec_and_lock(&state->count, &owner->so_lock))
418 spin_lock(&inode->i_lock);
419 list_del(&state->inode_states);
420 list_del(&state->open_states);
421 spin_unlock(&inode->i_lock);
422 spin_unlock(&owner->so_lock);
424 nfs4_free_open_state(state);
425 nfs4_put_state_owner(owner);
429 * Close the current file.
431 static void __nfs4_close(struct path *path, struct nfs4_state *state, mode_t mode, int wait)
433 struct nfs4_state_owner *owner = state->owner;
437 atomic_inc(&owner->so_count);
438 /* Protect against nfs4_find_state() */
439 spin_lock(&owner->so_lock);
440 switch (mode & (FMODE_READ | FMODE_WRITE)) {
447 case FMODE_READ|FMODE_WRITE:
450 newstate = FMODE_READ|FMODE_WRITE;
451 if (state->n_rdwr == 0) {
452 if (state->n_rdonly == 0) {
453 newstate &= ~FMODE_READ;
454 call_close |= test_bit(NFS_O_RDONLY_STATE, &state->flags);
455 call_close |= test_bit(NFS_O_RDWR_STATE, &state->flags);
457 if (state->n_wronly == 0) {
458 newstate &= ~FMODE_WRITE;
459 call_close |= test_bit(NFS_O_WRONLY_STATE, &state->flags);
460 call_close |= test_bit(NFS_O_RDWR_STATE, &state->flags);
463 clear_bit(NFS_DELEGATED_STATE, &state->flags);
465 nfs4_state_set_mode_locked(state, newstate);
466 spin_unlock(&owner->so_lock);
469 nfs4_put_open_state(state);
470 nfs4_put_state_owner(owner);
472 nfs4_do_close(path, state, wait);
475 void nfs4_close_state(struct path *path, struct nfs4_state *state, mode_t mode)
477 __nfs4_close(path, state, mode, 0);
480 void nfs4_close_sync(struct path *path, struct nfs4_state *state, mode_t mode)
482 __nfs4_close(path, state, mode, 1);
486 * Search the state->lock_states for an existing lock_owner
487 * that is compatible with current->files
489 static struct nfs4_lock_state *
490 __nfs4_find_lock_state(struct nfs4_state *state, fl_owner_t fl_owner)
492 struct nfs4_lock_state *pos;
493 list_for_each_entry(pos, &state->lock_states, ls_locks) {
494 if (pos->ls_owner != fl_owner)
496 atomic_inc(&pos->ls_count);
503 * Return a compatible lock_state. If no initialized lock_state structure
504 * exists, return an uninitialized one.
507 static struct nfs4_lock_state *nfs4_alloc_lock_state(struct nfs4_state *state, fl_owner_t fl_owner)
509 struct nfs4_lock_state *lsp;
510 struct nfs_client *clp = state->owner->so_client;
512 lsp = kzalloc(sizeof(*lsp), GFP_KERNEL);
515 rpc_init_wait_queue(&lsp->ls_sequence.wait, "lock_seqid_waitqueue");
516 spin_lock_init(&lsp->ls_sequence.lock);
517 INIT_LIST_HEAD(&lsp->ls_sequence.list);
518 lsp->ls_seqid.sequence = &lsp->ls_sequence;
519 atomic_set(&lsp->ls_count, 1);
520 lsp->ls_owner = fl_owner;
521 spin_lock(&clp->cl_lock);
522 nfs_alloc_unique_id(&clp->cl_lockowner_id, &lsp->ls_id, 1, 64);
523 spin_unlock(&clp->cl_lock);
524 INIT_LIST_HEAD(&lsp->ls_locks);
528 static void nfs4_free_lock_state(struct nfs4_lock_state *lsp)
530 struct nfs_client *clp = lsp->ls_state->owner->so_client;
532 spin_lock(&clp->cl_lock);
533 nfs_free_unique_id(&clp->cl_lockowner_id, &lsp->ls_id);
534 spin_unlock(&clp->cl_lock);
535 rpc_destroy_wait_queue(&lsp->ls_sequence.wait);
540 * Return a compatible lock_state. If no initialized lock_state structure
541 * exists, return an uninitialized one.
543 * The caller must be holding clp->cl_sem
545 static struct nfs4_lock_state *nfs4_get_lock_state(struct nfs4_state *state, fl_owner_t owner)
547 struct nfs4_lock_state *lsp, *new = NULL;
550 spin_lock(&state->state_lock);
551 lsp = __nfs4_find_lock_state(state, owner);
555 new->ls_state = state;
556 list_add(&new->ls_locks, &state->lock_states);
557 set_bit(LK_STATE_IN_USE, &state->flags);
562 spin_unlock(&state->state_lock);
563 new = nfs4_alloc_lock_state(state, owner);
567 spin_unlock(&state->state_lock);
569 nfs4_free_lock_state(new);
574 * Release reference to lock_state, and free it if we see that
575 * it is no longer in use
577 void nfs4_put_lock_state(struct nfs4_lock_state *lsp)
579 struct nfs4_state *state;
583 state = lsp->ls_state;
584 if (!atomic_dec_and_lock(&lsp->ls_count, &state->state_lock))
586 list_del(&lsp->ls_locks);
587 if (list_empty(&state->lock_states))
588 clear_bit(LK_STATE_IN_USE, &state->flags);
589 spin_unlock(&state->state_lock);
590 nfs4_free_lock_state(lsp);
593 static void nfs4_fl_copy_lock(struct file_lock *dst, struct file_lock *src)
595 struct nfs4_lock_state *lsp = src->fl_u.nfs4_fl.owner;
597 dst->fl_u.nfs4_fl.owner = lsp;
598 atomic_inc(&lsp->ls_count);
601 static void nfs4_fl_release_lock(struct file_lock *fl)
603 nfs4_put_lock_state(fl->fl_u.nfs4_fl.owner);
606 static struct file_lock_operations nfs4_fl_lock_ops = {
607 .fl_copy_lock = nfs4_fl_copy_lock,
608 .fl_release_private = nfs4_fl_release_lock,
611 int nfs4_set_lock_state(struct nfs4_state *state, struct file_lock *fl)
613 struct nfs4_lock_state *lsp;
615 if (fl->fl_ops != NULL)
617 lsp = nfs4_get_lock_state(state, fl->fl_owner);
620 fl->fl_u.nfs4_fl.owner = lsp;
621 fl->fl_ops = &nfs4_fl_lock_ops;
626 * Byte-range lock aware utility to initialize the stateid of read/write
629 void nfs4_copy_stateid(nfs4_stateid *dst, struct nfs4_state *state, fl_owner_t fl_owner)
631 struct nfs4_lock_state *lsp;
635 seq = read_seqbegin(&state->seqlock);
636 memcpy(dst, &state->stateid, sizeof(*dst));
637 } while (read_seqretry(&state->seqlock, seq));
638 if (test_bit(LK_STATE_IN_USE, &state->flags) == 0)
641 spin_lock(&state->state_lock);
642 lsp = __nfs4_find_lock_state(state, fl_owner);
643 if (lsp != NULL && (lsp->ls_flags & NFS_LOCK_INITIALIZED) != 0)
644 memcpy(dst, &lsp->ls_stateid, sizeof(*dst));
645 spin_unlock(&state->state_lock);
646 nfs4_put_lock_state(lsp);
649 struct nfs_seqid *nfs_alloc_seqid(struct nfs_seqid_counter *counter)
651 struct nfs_seqid *new;
653 new = kmalloc(sizeof(*new), GFP_KERNEL);
655 new->sequence = counter;
656 INIT_LIST_HEAD(&new->list);
661 void nfs_free_seqid(struct nfs_seqid *seqid)
663 if (!list_empty(&seqid->list)) {
664 struct rpc_sequence *sequence = seqid->sequence->sequence;
666 spin_lock(&sequence->lock);
667 list_del(&seqid->list);
668 spin_unlock(&sequence->lock);
669 rpc_wake_up(&sequence->wait);
675 * Increment the seqid if the OPEN/OPEN_DOWNGRADE/CLOSE succeeded, or
676 * failed with a seqid incrementing error -
677 * see comments nfs_fs.h:seqid_mutating_error()
679 static void nfs_increment_seqid(int status, struct nfs_seqid *seqid)
681 BUG_ON(list_first_entry(&seqid->sequence->sequence->list, struct nfs_seqid, list) != seqid);
685 case -NFS4ERR_BAD_SEQID:
686 if (seqid->sequence->flags & NFS_SEQID_CONFIRMED)
688 printk(KERN_WARNING "NFS: v4 server returned a bad"
689 " sequence-id error on an"
690 " unconfirmed sequence %p!\n",
692 case -NFS4ERR_STALE_CLIENTID:
693 case -NFS4ERR_STALE_STATEID:
694 case -NFS4ERR_BAD_STATEID:
695 case -NFS4ERR_BADXDR:
696 case -NFS4ERR_RESOURCE:
697 case -NFS4ERR_NOFILEHANDLE:
698 /* Non-seqid mutating errors */
702 * Note: no locking needed as we are guaranteed to be first
703 * on the sequence list
705 seqid->sequence->counter++;
708 void nfs_increment_open_seqid(int status, struct nfs_seqid *seqid)
710 if (status == -NFS4ERR_BAD_SEQID) {
711 struct nfs4_state_owner *sp = container_of(seqid->sequence,
712 struct nfs4_state_owner, so_seqid);
713 nfs4_drop_state_owner(sp);
715 nfs_increment_seqid(status, seqid);
719 * Increment the seqid if the LOCK/LOCKU succeeded, or
720 * failed with a seqid incrementing error -
721 * see comments nfs_fs.h:seqid_mutating_error()
723 void nfs_increment_lock_seqid(int status, struct nfs_seqid *seqid)
725 nfs_increment_seqid(status, seqid);
728 int nfs_wait_on_sequence(struct nfs_seqid *seqid, struct rpc_task *task)
730 struct rpc_sequence *sequence = seqid->sequence->sequence;
733 spin_lock(&sequence->lock);
734 if (list_empty(&seqid->list))
735 list_add_tail(&seqid->list, &sequence->list);
736 if (list_first_entry(&sequence->list, struct nfs_seqid, list) == seqid)
738 rpc_sleep_on(&sequence->wait, task, NULL);
741 spin_unlock(&sequence->lock);
745 static int reclaimer(void *);
747 static inline void nfs4_clear_recover_bit(struct nfs_client *clp)
749 smp_mb__before_clear_bit();
750 clear_bit(NFS4CLNT_STATE_RECOVER, &clp->cl_state);
751 smp_mb__after_clear_bit();
752 wake_up_bit(&clp->cl_state, NFS4CLNT_STATE_RECOVER);
753 rpc_wake_up(&clp->cl_rpcwaitq);
757 * State recovery routine
759 static void nfs4_recover_state(struct nfs_client *clp)
761 struct task_struct *task;
763 __module_get(THIS_MODULE);
764 atomic_inc(&clp->cl_count);
765 task = kthread_run(reclaimer, clp, "%s-reclaim",
766 rpc_peeraddr2str(clp->cl_rpcclient,
770 nfs4_clear_recover_bit(clp);
772 module_put(THIS_MODULE);
776 * Schedule a state recovery attempt
778 void nfs4_schedule_state_recovery(struct nfs_client *clp)
782 if (test_and_set_bit(NFS4CLNT_STATE_RECOVER, &clp->cl_state) == 0)
783 nfs4_recover_state(clp);
786 static int nfs4_reclaim_locks(struct nfs4_state_recovery_ops *ops, struct nfs4_state *state)
788 struct inode *inode = state->inode;
789 struct file_lock *fl;
792 for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) {
793 if (!(fl->fl_flags & (FL_POSIX|FL_FLOCK)))
795 if (nfs_file_open_context(fl->fl_file)->state != state)
797 status = ops->recover_lock(state, fl);
802 printk(KERN_ERR "%s: unhandled error %d. Zeroing state\n",
803 __FUNCTION__, status);
804 case -NFS4ERR_EXPIRED:
805 case -NFS4ERR_NO_GRACE:
806 case -NFS4ERR_RECLAIM_BAD:
807 case -NFS4ERR_RECLAIM_CONFLICT:
808 /* kill_proc(fl->fl_pid, SIGLOST, 1); */
810 case -NFS4ERR_STALE_CLIENTID:
819 static int nfs4_reclaim_open_state(struct nfs4_state_recovery_ops *ops, struct nfs4_state_owner *sp)
821 struct nfs4_state *state;
822 struct nfs4_lock_state *lock;
825 /* Note: we rely on the sp->so_states list being ordered
826 * so that we always reclaim open(O_RDWR) and/or open(O_WRITE)
828 * This is needed to ensure that the server won't give us any
829 * read delegations that we have to return if, say, we are
830 * recovering after a network partition or a reboot from a
831 * server that doesn't support a grace period.
833 list_for_each_entry(state, &sp->so_states, open_states) {
834 if (state->state == 0)
836 status = ops->recover_open(sp, state);
838 status = nfs4_reclaim_locks(ops, state);
841 list_for_each_entry(lock, &state->lock_states, ls_locks) {
842 if (!(lock->ls_flags & NFS_LOCK_INITIALIZED))
843 printk("%s: Lock reclaim failed!\n",
850 printk(KERN_ERR "%s: unhandled error %d. Zeroing state\n",
851 __FUNCTION__, status);
853 case -NFS4ERR_RECLAIM_BAD:
854 case -NFS4ERR_RECLAIM_CONFLICT:
856 * Open state on this file cannot be recovered
857 * All we can do is revert to using the zero stateid.
859 memset(state->stateid.data, 0,
860 sizeof(state->stateid.data));
861 /* Mark the file as being 'closed' */
864 case -NFS4ERR_EXPIRED:
865 case -NFS4ERR_NO_GRACE:
866 case -NFS4ERR_STALE_CLIENTID:
875 static void nfs4_state_mark_reclaim(struct nfs_client *clp)
877 struct nfs4_state_owner *sp;
879 struct nfs4_state *state;
880 struct nfs4_lock_state *lock;
882 /* Reset all sequence ids to zero */
883 for (pos = rb_first(&clp->cl_state_owners); pos != NULL; pos = rb_next(pos)) {
884 sp = rb_entry(pos, struct nfs4_state_owner, so_client_node);
885 sp->so_seqid.counter = 0;
886 sp->so_seqid.flags = 0;
887 spin_lock(&sp->so_lock);
888 list_for_each_entry(state, &sp->so_states, open_states) {
889 clear_bit(NFS_DELEGATED_STATE, &state->flags);
890 clear_bit(NFS_O_RDONLY_STATE, &state->flags);
891 clear_bit(NFS_O_WRONLY_STATE, &state->flags);
892 clear_bit(NFS_O_RDWR_STATE, &state->flags);
893 list_for_each_entry(lock, &state->lock_states, ls_locks) {
894 lock->ls_seqid.counter = 0;
895 lock->ls_seqid.flags = 0;
896 lock->ls_flags &= ~NFS_LOCK_INITIALIZED;
899 spin_unlock(&sp->so_lock);
903 static int reclaimer(void *ptr)
905 struct nfs_client *clp = ptr;
906 struct nfs4_state_owner *sp;
908 struct nfs4_state_recovery_ops *ops;
909 struct rpc_cred *cred;
912 allow_signal(SIGKILL);
914 /* Ensure exclusive access to NFSv4 state */
916 down_write(&clp->cl_sem);
917 /* Are there any NFS mounts out there? */
918 if (list_empty(&clp->cl_superblocks))
921 ops = &nfs4_network_partition_recovery_ops;
922 /* Are there any open files on this volume? */
923 cred = nfs4_get_renew_cred(clp);
925 /* Yes there are: try to renew the old lease */
926 status = nfs4_proc_renew(clp, cred);
929 case -NFS4ERR_CB_PATH_DOWN:
932 case -NFS4ERR_STALE_CLIENTID:
933 case -NFS4ERR_LEASE_MOVED:
934 ops = &nfs4_reboot_recovery_ops;
937 /* "reboot" to ensure we clear all state on the server */
938 clp->cl_boot_time = CURRENT_TIME;
939 cred = nfs4_get_setclientid_cred(clp);
941 /* We're going to have to re-establish a clientid */
942 nfs4_state_mark_reclaim(clp);
945 status = nfs4_init_client(clp, cred);
950 /* Mark all delegations for reclaim */
951 nfs_delegation_mark_reclaim(clp);
952 /* Note: list is protected by exclusive lock on cl->cl_sem */
953 for (pos = rb_first(&clp->cl_state_owners); pos != NULL; pos = rb_next(pos)) {
954 sp = rb_entry(pos, struct nfs4_state_owner, so_client_node);
955 status = nfs4_reclaim_open_state(ops, sp);
957 if (status == -NFS4ERR_NO_GRACE) {
958 ops = &nfs4_network_partition_recovery_ops;
959 status = nfs4_reclaim_open_state(ops, sp);
961 if (status == -NFS4ERR_STALE_CLIENTID)
963 if (status == -NFS4ERR_EXPIRED)
967 nfs_delegation_reap_unclaimed(clp);
969 up_write(&clp->cl_sem);
971 if (status == -NFS4ERR_CB_PATH_DOWN)
972 nfs_handle_cb_pathdown(clp);
973 nfs4_clear_recover_bit(clp);
975 module_put_and_exit(0);
978 printk(KERN_WARNING "Error: state recovery failed on NFSv4 server %s"
979 " with error %d\n", clp->cl_hostname, -status);
980 set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);