4 * Client-side XDR for NFSv4.
6 * Copyright (c) 2002 The Regents of the University of Michigan.
9 * Kendrick Smith <kmsmith@umich.edu>
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. Neither the name of the University nor the names of its
21 * contributors may be used to endorse or promote products derived
22 * from this software without specific prior written permission.
24 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
25 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
26 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
27 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
31 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
32 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
33 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
34 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 * Implementation of the NFSv4 state model. For the time being,
37 * this is minimal, but will be made much more complex in a
41 #include <linux/kernel.h>
42 #include <linux/slab.h>
43 #include <linux/smp_lock.h>
44 #include <linux/nfs_fs.h>
45 #include <linux/nfs_idmap.h>
46 #include <linux/kthread.h>
47 #include <linux/module.h>
48 #include <linux/random.h>
49 #include <linux/workqueue.h>
50 #include <linux/bitops.h>
54 #include "delegation.h"
57 #define OPENOWNER_POOL_SIZE 8
59 const nfs4_stateid zero_stateid;
61 static LIST_HEAD(nfs4_clientid_list);
63 static int nfs4_init_client(struct nfs_client *clp, struct rpc_cred *cred)
68 port = nfs_callback_tcpport;
69 if (clp->cl_addr.ss_family == AF_INET6)
70 port = nfs_callback_tcpport6;
72 status = nfs4_proc_setclientid(clp, NFS4_CALLBACK, port, cred);
74 status = nfs4_proc_setclientid_confirm(clp, cred);
76 nfs4_schedule_state_renewal(clp);
80 static struct rpc_cred *nfs4_get_machine_cred_locked(struct nfs_client *clp)
82 struct rpc_cred *cred = NULL;
84 if (clp->cl_machine_cred != NULL)
85 cred = get_rpccred(clp->cl_machine_cred);
89 static void nfs4_clear_machine_cred(struct nfs_client *clp)
91 struct rpc_cred *cred;
93 spin_lock(&clp->cl_lock);
94 cred = clp->cl_machine_cred;
95 clp->cl_machine_cred = NULL;
96 spin_unlock(&clp->cl_lock);
101 struct rpc_cred *nfs4_get_renew_cred_locked(struct nfs_client *clp)
103 struct nfs4_state_owner *sp;
105 struct rpc_cred *cred = NULL;
107 for (pos = rb_first(&clp->cl_state_owners); pos != NULL; pos = rb_next(pos)) {
108 sp = rb_entry(pos, struct nfs4_state_owner, so_client_node);
109 if (list_empty(&sp->so_states))
111 cred = get_rpccred(sp->so_cred);
117 static struct rpc_cred *nfs4_get_renew_cred(struct nfs_client *clp)
119 struct rpc_cred *cred;
121 spin_lock(&clp->cl_lock);
122 cred = nfs4_get_renew_cred_locked(clp);
123 spin_unlock(&clp->cl_lock);
127 static struct rpc_cred *nfs4_get_setclientid_cred(struct nfs_client *clp)
129 struct nfs4_state_owner *sp;
131 struct rpc_cred *cred;
133 spin_lock(&clp->cl_lock);
134 cred = nfs4_get_machine_cred_locked(clp);
137 pos = rb_first(&clp->cl_state_owners);
139 sp = rb_entry(pos, struct nfs4_state_owner, so_client_node);
140 cred = get_rpccred(sp->so_cred);
143 spin_unlock(&clp->cl_lock);
147 static void nfs_alloc_unique_id(struct rb_root *root, struct nfs_unique_id *new,
148 __u64 minval, int maxbits)
150 struct rb_node **p, *parent;
151 struct nfs_unique_id *pos;
155 mask = (1ULL << maxbits) - 1ULL;
157 /* Ensure distribution is more or less flat */
158 get_random_bytes(&new->id, sizeof(new->id));
160 if (new->id < minval)
168 pos = rb_entry(parent, struct nfs_unique_id, rb_node);
170 if (new->id < pos->id)
172 else if (new->id > pos->id)
177 rb_link_node(&new->rb_node, parent, p);
178 rb_insert_color(&new->rb_node, root);
183 if (new->id < minval || (new->id & mask) != new->id) {
187 parent = rb_next(parent);
190 pos = rb_entry(parent, struct nfs_unique_id, rb_node);
191 if (new->id < pos->id)
197 static void nfs_free_unique_id(struct rb_root *root, struct nfs_unique_id *id)
199 rb_erase(&id->rb_node, root);
202 static struct nfs4_state_owner *
203 nfs4_find_state_owner(struct nfs_server *server, struct rpc_cred *cred)
205 struct nfs_client *clp = server->nfs_client;
206 struct rb_node **p = &clp->cl_state_owners.rb_node,
208 struct nfs4_state_owner *sp, *res = NULL;
212 sp = rb_entry(parent, struct nfs4_state_owner, so_client_node);
214 if (server < sp->so_server) {
215 p = &parent->rb_left;
218 if (server > sp->so_server) {
219 p = &parent->rb_right;
222 if (cred < sp->so_cred)
223 p = &parent->rb_left;
224 else if (cred > sp->so_cred)
225 p = &parent->rb_right;
227 atomic_inc(&sp->so_count);
235 static struct nfs4_state_owner *
236 nfs4_insert_state_owner(struct nfs_client *clp, struct nfs4_state_owner *new)
238 struct rb_node **p = &clp->cl_state_owners.rb_node,
240 struct nfs4_state_owner *sp;
244 sp = rb_entry(parent, struct nfs4_state_owner, so_client_node);
246 if (new->so_server < sp->so_server) {
247 p = &parent->rb_left;
250 if (new->so_server > sp->so_server) {
251 p = &parent->rb_right;
254 if (new->so_cred < sp->so_cred)
255 p = &parent->rb_left;
256 else if (new->so_cred > sp->so_cred)
257 p = &parent->rb_right;
259 atomic_inc(&sp->so_count);
263 nfs_alloc_unique_id(&clp->cl_openowner_id, &new->so_owner_id, 1, 64);
264 rb_link_node(&new->so_client_node, parent, p);
265 rb_insert_color(&new->so_client_node, &clp->cl_state_owners);
270 nfs4_remove_state_owner(struct nfs_client *clp, struct nfs4_state_owner *sp)
272 if (!RB_EMPTY_NODE(&sp->so_client_node))
273 rb_erase(&sp->so_client_node, &clp->cl_state_owners);
274 nfs_free_unique_id(&clp->cl_openowner_id, &sp->so_owner_id);
278 * nfs4_alloc_state_owner(): this is called on the OPEN or CREATE path to
279 * create a new state_owner.
282 static struct nfs4_state_owner *
283 nfs4_alloc_state_owner(void)
285 struct nfs4_state_owner *sp;
287 sp = kzalloc(sizeof(*sp),GFP_KERNEL);
290 spin_lock_init(&sp->so_lock);
291 INIT_LIST_HEAD(&sp->so_states);
292 INIT_LIST_HEAD(&sp->so_delegations);
293 rpc_init_wait_queue(&sp->so_sequence.wait, "Seqid_waitqueue");
294 sp->so_seqid.sequence = &sp->so_sequence;
295 spin_lock_init(&sp->so_sequence.lock);
296 INIT_LIST_HEAD(&sp->so_sequence.list);
297 atomic_set(&sp->so_count, 1);
302 nfs4_drop_state_owner(struct nfs4_state_owner *sp)
304 if (!RB_EMPTY_NODE(&sp->so_client_node)) {
305 struct nfs_client *clp = sp->so_client;
307 spin_lock(&clp->cl_lock);
308 rb_erase(&sp->so_client_node, &clp->cl_state_owners);
309 RB_CLEAR_NODE(&sp->so_client_node);
310 spin_unlock(&clp->cl_lock);
314 struct nfs4_state_owner *nfs4_get_state_owner(struct nfs_server *server, struct rpc_cred *cred)
316 struct nfs_client *clp = server->nfs_client;
317 struct nfs4_state_owner *sp, *new;
319 spin_lock(&clp->cl_lock);
320 sp = nfs4_find_state_owner(server, cred);
321 spin_unlock(&clp->cl_lock);
324 new = nfs4_alloc_state_owner();
327 new->so_client = clp;
328 new->so_server = server;
330 spin_lock(&clp->cl_lock);
331 sp = nfs4_insert_state_owner(clp, new);
332 spin_unlock(&clp->cl_lock);
336 rpc_destroy_wait_queue(&new->so_sequence.wait);
342 void nfs4_put_state_owner(struct nfs4_state_owner *sp)
344 struct nfs_client *clp = sp->so_client;
345 struct rpc_cred *cred = sp->so_cred;
347 if (!atomic_dec_and_lock(&sp->so_count, &clp->cl_lock))
349 nfs4_remove_state_owner(clp, sp);
350 spin_unlock(&clp->cl_lock);
351 rpc_destroy_wait_queue(&sp->so_sequence.wait);
356 static struct nfs4_state *
357 nfs4_alloc_open_state(void)
359 struct nfs4_state *state;
361 state = kzalloc(sizeof(*state), GFP_KERNEL);
364 atomic_set(&state->count, 1);
365 INIT_LIST_HEAD(&state->lock_states);
366 spin_lock_init(&state->state_lock);
367 seqlock_init(&state->seqlock);
372 nfs4_state_set_mode_locked(struct nfs4_state *state, fmode_t fmode)
374 if (state->state == fmode)
376 /* NB! List reordering - see the reclaim code for why. */
377 if ((fmode & FMODE_WRITE) != (state->state & FMODE_WRITE)) {
378 if (fmode & FMODE_WRITE)
379 list_move(&state->open_states, &state->owner->so_states);
381 list_move_tail(&state->open_states, &state->owner->so_states);
383 state->state = fmode;
386 static struct nfs4_state *
387 __nfs4_find_state_byowner(struct inode *inode, struct nfs4_state_owner *owner)
389 struct nfs_inode *nfsi = NFS_I(inode);
390 struct nfs4_state *state;
392 list_for_each_entry(state, &nfsi->open_states, inode_states) {
393 if (state->owner != owner)
395 if (atomic_inc_not_zero(&state->count))
402 nfs4_free_open_state(struct nfs4_state *state)
408 nfs4_get_open_state(struct inode *inode, struct nfs4_state_owner *owner)
410 struct nfs4_state *state, *new;
411 struct nfs_inode *nfsi = NFS_I(inode);
413 spin_lock(&inode->i_lock);
414 state = __nfs4_find_state_byowner(inode, owner);
415 spin_unlock(&inode->i_lock);
418 new = nfs4_alloc_open_state();
419 spin_lock(&owner->so_lock);
420 spin_lock(&inode->i_lock);
421 state = __nfs4_find_state_byowner(inode, owner);
422 if (state == NULL && new != NULL) {
424 state->owner = owner;
425 atomic_inc(&owner->so_count);
426 list_add(&state->inode_states, &nfsi->open_states);
427 state->inode = igrab(inode);
428 spin_unlock(&inode->i_lock);
429 /* Note: The reclaim code dictates that we add stateless
430 * and read-only stateids to the end of the list */
431 list_add_tail(&state->open_states, &owner->so_states);
432 spin_unlock(&owner->so_lock);
434 spin_unlock(&inode->i_lock);
435 spin_unlock(&owner->so_lock);
437 nfs4_free_open_state(new);
443 void nfs4_put_open_state(struct nfs4_state *state)
445 struct inode *inode = state->inode;
446 struct nfs4_state_owner *owner = state->owner;
448 if (!atomic_dec_and_lock(&state->count, &owner->so_lock))
450 spin_lock(&inode->i_lock);
451 list_del(&state->inode_states);
452 list_del(&state->open_states);
453 spin_unlock(&inode->i_lock);
454 spin_unlock(&owner->so_lock);
456 nfs4_free_open_state(state);
457 nfs4_put_state_owner(owner);
461 * Close the current file.
463 static void __nfs4_close(struct path *path, struct nfs4_state *state, fmode_t fmode, int wait)
465 struct nfs4_state_owner *owner = state->owner;
469 atomic_inc(&owner->so_count);
470 /* Protect against nfs4_find_state() */
471 spin_lock(&owner->so_lock);
472 switch (fmode & (FMODE_READ | FMODE_WRITE)) {
479 case FMODE_READ|FMODE_WRITE:
482 newstate = FMODE_READ|FMODE_WRITE;
483 if (state->n_rdwr == 0) {
484 if (state->n_rdonly == 0) {
485 newstate &= ~FMODE_READ;
486 call_close |= test_bit(NFS_O_RDONLY_STATE, &state->flags);
487 call_close |= test_bit(NFS_O_RDWR_STATE, &state->flags);
489 if (state->n_wronly == 0) {
490 newstate &= ~FMODE_WRITE;
491 call_close |= test_bit(NFS_O_WRONLY_STATE, &state->flags);
492 call_close |= test_bit(NFS_O_RDWR_STATE, &state->flags);
495 clear_bit(NFS_DELEGATED_STATE, &state->flags);
497 nfs4_state_set_mode_locked(state, newstate);
498 spin_unlock(&owner->so_lock);
501 nfs4_put_open_state(state);
502 nfs4_put_state_owner(owner);
504 nfs4_do_close(path, state, wait);
507 void nfs4_close_state(struct path *path, struct nfs4_state *state, fmode_t fmode)
509 __nfs4_close(path, state, fmode, 0);
512 void nfs4_close_sync(struct path *path, struct nfs4_state *state, fmode_t fmode)
514 __nfs4_close(path, state, fmode, 1);
518 * Search the state->lock_states for an existing lock_owner
519 * that is compatible with current->files
521 static struct nfs4_lock_state *
522 __nfs4_find_lock_state(struct nfs4_state *state, fl_owner_t fl_owner)
524 struct nfs4_lock_state *pos;
525 list_for_each_entry(pos, &state->lock_states, ls_locks) {
526 if (pos->ls_owner != fl_owner)
528 atomic_inc(&pos->ls_count);
535 * Return a compatible lock_state. If no initialized lock_state structure
536 * exists, return an uninitialized one.
539 static struct nfs4_lock_state *nfs4_alloc_lock_state(struct nfs4_state *state, fl_owner_t fl_owner)
541 struct nfs4_lock_state *lsp;
542 struct nfs_client *clp = state->owner->so_client;
544 lsp = kzalloc(sizeof(*lsp), GFP_KERNEL);
547 rpc_init_wait_queue(&lsp->ls_sequence.wait, "lock_seqid_waitqueue");
548 spin_lock_init(&lsp->ls_sequence.lock);
549 INIT_LIST_HEAD(&lsp->ls_sequence.list);
550 lsp->ls_seqid.sequence = &lsp->ls_sequence;
551 atomic_set(&lsp->ls_count, 1);
552 lsp->ls_owner = fl_owner;
553 spin_lock(&clp->cl_lock);
554 nfs_alloc_unique_id(&clp->cl_lockowner_id, &lsp->ls_id, 1, 64);
555 spin_unlock(&clp->cl_lock);
556 INIT_LIST_HEAD(&lsp->ls_locks);
560 static void nfs4_free_lock_state(struct nfs4_lock_state *lsp)
562 struct nfs_client *clp = lsp->ls_state->owner->so_client;
564 spin_lock(&clp->cl_lock);
565 nfs_free_unique_id(&clp->cl_lockowner_id, &lsp->ls_id);
566 spin_unlock(&clp->cl_lock);
567 rpc_destroy_wait_queue(&lsp->ls_sequence.wait);
572 * Return a compatible lock_state. If no initialized lock_state structure
573 * exists, return an uninitialized one.
576 static struct nfs4_lock_state *nfs4_get_lock_state(struct nfs4_state *state, fl_owner_t owner)
578 struct nfs4_lock_state *lsp, *new = NULL;
581 spin_lock(&state->state_lock);
582 lsp = __nfs4_find_lock_state(state, owner);
586 new->ls_state = state;
587 list_add(&new->ls_locks, &state->lock_states);
588 set_bit(LK_STATE_IN_USE, &state->flags);
593 spin_unlock(&state->state_lock);
594 new = nfs4_alloc_lock_state(state, owner);
598 spin_unlock(&state->state_lock);
600 nfs4_free_lock_state(new);
605 * Release reference to lock_state, and free it if we see that
606 * it is no longer in use
608 void nfs4_put_lock_state(struct nfs4_lock_state *lsp)
610 struct nfs4_state *state;
614 state = lsp->ls_state;
615 if (!atomic_dec_and_lock(&lsp->ls_count, &state->state_lock))
617 list_del(&lsp->ls_locks);
618 if (list_empty(&state->lock_states))
619 clear_bit(LK_STATE_IN_USE, &state->flags);
620 spin_unlock(&state->state_lock);
621 nfs4_free_lock_state(lsp);
624 static void nfs4_fl_copy_lock(struct file_lock *dst, struct file_lock *src)
626 struct nfs4_lock_state *lsp = src->fl_u.nfs4_fl.owner;
628 dst->fl_u.nfs4_fl.owner = lsp;
629 atomic_inc(&lsp->ls_count);
632 static void nfs4_fl_release_lock(struct file_lock *fl)
634 nfs4_put_lock_state(fl->fl_u.nfs4_fl.owner);
637 static struct file_lock_operations nfs4_fl_lock_ops = {
638 .fl_copy_lock = nfs4_fl_copy_lock,
639 .fl_release_private = nfs4_fl_release_lock,
642 int nfs4_set_lock_state(struct nfs4_state *state, struct file_lock *fl)
644 struct nfs4_lock_state *lsp;
646 if (fl->fl_ops != NULL)
648 lsp = nfs4_get_lock_state(state, fl->fl_owner);
651 fl->fl_u.nfs4_fl.owner = lsp;
652 fl->fl_ops = &nfs4_fl_lock_ops;
657 * Byte-range lock aware utility to initialize the stateid of read/write
660 void nfs4_copy_stateid(nfs4_stateid *dst, struct nfs4_state *state, fl_owner_t fl_owner)
662 struct nfs4_lock_state *lsp;
666 seq = read_seqbegin(&state->seqlock);
667 memcpy(dst, &state->stateid, sizeof(*dst));
668 } while (read_seqretry(&state->seqlock, seq));
669 if (test_bit(LK_STATE_IN_USE, &state->flags) == 0)
672 spin_lock(&state->state_lock);
673 lsp = __nfs4_find_lock_state(state, fl_owner);
674 if (lsp != NULL && (lsp->ls_flags & NFS_LOCK_INITIALIZED) != 0)
675 memcpy(dst, &lsp->ls_stateid, sizeof(*dst));
676 spin_unlock(&state->state_lock);
677 nfs4_put_lock_state(lsp);
680 struct nfs_seqid *nfs_alloc_seqid(struct nfs_seqid_counter *counter)
682 struct nfs_seqid *new;
684 new = kmalloc(sizeof(*new), GFP_KERNEL);
686 new->sequence = counter;
687 INIT_LIST_HEAD(&new->list);
692 void nfs_free_seqid(struct nfs_seqid *seqid)
694 if (!list_empty(&seqid->list)) {
695 struct rpc_sequence *sequence = seqid->sequence->sequence;
697 spin_lock(&sequence->lock);
698 list_del(&seqid->list);
699 spin_unlock(&sequence->lock);
700 rpc_wake_up(&sequence->wait);
706 * Increment the seqid if the OPEN/OPEN_DOWNGRADE/CLOSE succeeded, or
707 * failed with a seqid incrementing error -
708 * see comments nfs_fs.h:seqid_mutating_error()
710 static void nfs_increment_seqid(int status, struct nfs_seqid *seqid)
712 BUG_ON(list_first_entry(&seqid->sequence->sequence->list, struct nfs_seqid, list) != seqid);
716 case -NFS4ERR_BAD_SEQID:
717 if (seqid->sequence->flags & NFS_SEQID_CONFIRMED)
719 printk(KERN_WARNING "NFS: v4 server returned a bad"
720 " sequence-id error on an"
721 " unconfirmed sequence %p!\n",
723 case -NFS4ERR_STALE_CLIENTID:
724 case -NFS4ERR_STALE_STATEID:
725 case -NFS4ERR_BAD_STATEID:
726 case -NFS4ERR_BADXDR:
727 case -NFS4ERR_RESOURCE:
728 case -NFS4ERR_NOFILEHANDLE:
729 /* Non-seqid mutating errors */
733 * Note: no locking needed as we are guaranteed to be first
734 * on the sequence list
736 seqid->sequence->counter++;
739 void nfs_increment_open_seqid(int status, struct nfs_seqid *seqid)
741 if (status == -NFS4ERR_BAD_SEQID) {
742 struct nfs4_state_owner *sp = container_of(seqid->sequence,
743 struct nfs4_state_owner, so_seqid);
744 nfs4_drop_state_owner(sp);
746 nfs_increment_seqid(status, seqid);
750 * Increment the seqid if the LOCK/LOCKU succeeded, or
751 * failed with a seqid incrementing error -
752 * see comments nfs_fs.h:seqid_mutating_error()
754 void nfs_increment_lock_seqid(int status, struct nfs_seqid *seqid)
756 nfs_increment_seqid(status, seqid);
759 int nfs_wait_on_sequence(struct nfs_seqid *seqid, struct rpc_task *task)
761 struct rpc_sequence *sequence = seqid->sequence->sequence;
764 spin_lock(&sequence->lock);
765 if (list_empty(&seqid->list))
766 list_add_tail(&seqid->list, &sequence->list);
767 if (list_first_entry(&sequence->list, struct nfs_seqid, list) == seqid)
769 rpc_sleep_on(&sequence->wait, task, NULL);
772 spin_unlock(&sequence->lock);
776 static int nfs4_run_state_manager(void *);
778 static void nfs4_clear_state_manager_bit(struct nfs_client *clp)
780 smp_mb__before_clear_bit();
781 clear_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state);
782 smp_mb__after_clear_bit();
783 wake_up_bit(&clp->cl_state, NFS4CLNT_MANAGER_RUNNING);
784 rpc_wake_up(&clp->cl_rpcwaitq);
788 * Schedule the nfs_client asynchronous state management routine
790 void nfs4_schedule_state_manager(struct nfs_client *clp)
792 struct task_struct *task;
794 if (test_and_set_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) != 0)
796 __module_get(THIS_MODULE);
797 atomic_inc(&clp->cl_count);
798 task = kthread_run(nfs4_run_state_manager, clp, "%s-manager",
799 rpc_peeraddr2str(clp->cl_rpcclient,
803 nfs4_clear_state_manager_bit(clp);
805 module_put(THIS_MODULE);
809 * Schedule a state recovery attempt
811 void nfs4_schedule_state_recovery(struct nfs_client *clp)
815 if (!test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state))
816 set_bit(NFS4CLNT_CHECK_LEASE, &clp->cl_state);
817 nfs4_schedule_state_manager(clp);
820 static int nfs4_state_mark_reclaim_reboot(struct nfs_client *clp, struct nfs4_state *state)
823 set_bit(NFS_STATE_RECLAIM_REBOOT, &state->flags);
824 /* Don't recover state that expired before the reboot */
825 if (test_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags)) {
826 clear_bit(NFS_STATE_RECLAIM_REBOOT, &state->flags);
829 set_bit(NFS_OWNER_RECLAIM_REBOOT, &state->owner->so_flags);
830 set_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state);
834 int nfs4_state_mark_reclaim_nograce(struct nfs_client *clp, struct nfs4_state *state)
836 set_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags);
837 clear_bit(NFS_STATE_RECLAIM_REBOOT, &state->flags);
838 set_bit(NFS_OWNER_RECLAIM_NOGRACE, &state->owner->so_flags);
839 set_bit(NFS4CLNT_RECLAIM_NOGRACE, &clp->cl_state);
843 static int nfs4_reclaim_locks(struct nfs4_state *state, const struct nfs4_state_recovery_ops *ops)
845 struct inode *inode = state->inode;
846 struct nfs_inode *nfsi = NFS_I(inode);
847 struct file_lock *fl;
850 down_write(&nfsi->rwsem);
851 for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) {
852 if (!(fl->fl_flags & (FL_POSIX|FL_FLOCK)))
854 if (nfs_file_open_context(fl->fl_file)->state != state)
856 status = ops->recover_lock(state, fl);
861 printk(KERN_ERR "%s: unhandled error %d. Zeroing state\n",
863 case -NFS4ERR_EXPIRED:
864 case -NFS4ERR_NO_GRACE:
865 case -NFS4ERR_RECLAIM_BAD:
866 case -NFS4ERR_RECLAIM_CONFLICT:
867 /* kill_proc(fl->fl_pid, SIGLOST, 1); */
869 case -NFS4ERR_STALE_CLIENTID:
873 up_write(&nfsi->rwsem);
876 up_write(&nfsi->rwsem);
880 static int nfs4_reclaim_open_state(struct nfs4_state_owner *sp, const struct nfs4_state_recovery_ops *ops)
882 struct nfs4_state *state;
883 struct nfs4_lock_state *lock;
886 /* Note: we rely on the sp->so_states list being ordered
887 * so that we always reclaim open(O_RDWR) and/or open(O_WRITE)
889 * This is needed to ensure that the server won't give us any
890 * read delegations that we have to return if, say, we are
891 * recovering after a network partition or a reboot from a
892 * server that doesn't support a grace period.
895 spin_lock(&sp->so_lock);
896 list_for_each_entry(state, &sp->so_states, open_states) {
897 if (!test_and_clear_bit(ops->state_flag_bit, &state->flags))
899 if (state->state == 0)
901 atomic_inc(&state->count);
902 spin_unlock(&sp->so_lock);
903 status = ops->recover_open(sp, state);
905 status = nfs4_reclaim_locks(state, ops);
907 list_for_each_entry(lock, &state->lock_states, ls_locks) {
908 if (!(lock->ls_flags & NFS_LOCK_INITIALIZED))
909 printk("%s: Lock reclaim failed!\n",
912 nfs4_put_open_state(state);
918 printk(KERN_ERR "%s: unhandled error %d. Zeroing state\n",
923 * Open state on this file cannot be recovered
924 * All we can do is revert to using the zero stateid.
926 memset(state->stateid.data, 0,
927 sizeof(state->stateid.data));
928 /* Mark the file as being 'closed' */
931 case -NFS4ERR_RECLAIM_BAD:
932 case -NFS4ERR_RECLAIM_CONFLICT:
933 nfs4_state_mark_reclaim_nograce(sp->so_client, state);
935 case -NFS4ERR_EXPIRED:
936 case -NFS4ERR_NO_GRACE:
937 nfs4_state_mark_reclaim_nograce(sp->so_client, state);
938 case -NFS4ERR_STALE_CLIENTID:
941 nfs4_put_open_state(state);
944 spin_unlock(&sp->so_lock);
947 nfs4_put_open_state(state);
951 static void nfs4_clear_open_state(struct nfs4_state *state)
953 struct nfs4_lock_state *lock;
955 clear_bit(NFS_DELEGATED_STATE, &state->flags);
956 clear_bit(NFS_O_RDONLY_STATE, &state->flags);
957 clear_bit(NFS_O_WRONLY_STATE, &state->flags);
958 clear_bit(NFS_O_RDWR_STATE, &state->flags);
959 list_for_each_entry(lock, &state->lock_states, ls_locks) {
960 lock->ls_seqid.flags = 0;
961 lock->ls_flags &= ~NFS_LOCK_INITIALIZED;
965 static void nfs4_state_mark_reclaim_helper(struct nfs_client *clp, int (*mark_reclaim)(struct nfs_client *clp, struct nfs4_state *state))
967 struct nfs4_state_owner *sp;
969 struct nfs4_state *state;
971 /* Reset all sequence ids to zero */
972 for (pos = rb_first(&clp->cl_state_owners); pos != NULL; pos = rb_next(pos)) {
973 sp = rb_entry(pos, struct nfs4_state_owner, so_client_node);
974 sp->so_seqid.flags = 0;
975 spin_lock(&sp->so_lock);
976 list_for_each_entry(state, &sp->so_states, open_states) {
977 if (mark_reclaim(clp, state))
978 nfs4_clear_open_state(state);
980 spin_unlock(&sp->so_lock);
984 static void nfs4_state_start_reclaim_reboot(struct nfs_client *clp)
986 /* Mark all delegations for reclaim */
987 nfs_delegation_mark_reclaim(clp);
988 nfs4_state_mark_reclaim_helper(clp, nfs4_state_mark_reclaim_reboot);
991 static void nfs4_state_end_reclaim_reboot(struct nfs_client *clp)
993 struct nfs4_state_owner *sp;
995 struct nfs4_state *state;
997 if (!test_and_clear_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state))
1000 for (pos = rb_first(&clp->cl_state_owners); pos != NULL; pos = rb_next(pos)) {
1001 sp = rb_entry(pos, struct nfs4_state_owner, so_client_node);
1002 spin_lock(&sp->so_lock);
1003 list_for_each_entry(state, &sp->so_states, open_states) {
1004 if (!test_and_clear_bit(NFS_STATE_RECLAIM_REBOOT, &state->flags))
1006 nfs4_state_mark_reclaim_nograce(clp, state);
1008 spin_unlock(&sp->so_lock);
1011 nfs_delegation_reap_unclaimed(clp);
1014 static void nfs_delegation_clear_all(struct nfs_client *clp)
1016 nfs_delegation_mark_reclaim(clp);
1017 nfs_delegation_reap_unclaimed(clp);
1020 static void nfs4_state_start_reclaim_nograce(struct nfs_client *clp)
1022 nfs_delegation_clear_all(clp);
1023 nfs4_state_mark_reclaim_helper(clp, nfs4_state_mark_reclaim_nograce);
1026 static void nfs4_state_end_reclaim_nograce(struct nfs_client *clp)
1028 clear_bit(NFS4CLNT_RECLAIM_NOGRACE, &clp->cl_state);
1031 static void nfs4_recovery_handle_error(struct nfs_client *clp, int error)
1034 case -NFS4ERR_CB_PATH_DOWN:
1035 nfs_handle_cb_pathdown(clp);
1037 case -NFS4ERR_STALE_CLIENTID:
1038 case -NFS4ERR_LEASE_MOVED:
1039 set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
1040 nfs4_state_start_reclaim_reboot(clp);
1042 case -NFS4ERR_EXPIRED:
1043 set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
1044 nfs4_state_start_reclaim_nograce(clp);
1048 static int nfs4_do_reclaim(struct nfs_client *clp, const struct nfs4_state_recovery_ops *ops)
1050 struct rb_node *pos;
1054 spin_lock(&clp->cl_lock);
1055 for (pos = rb_first(&clp->cl_state_owners); pos != NULL; pos = rb_next(pos)) {
1056 struct nfs4_state_owner *sp = rb_entry(pos, struct nfs4_state_owner, so_client_node);
1057 if (!test_and_clear_bit(ops->owner_flag_bit, &sp->so_flags))
1059 atomic_inc(&sp->so_count);
1060 spin_unlock(&clp->cl_lock);
1061 status = nfs4_reclaim_open_state(sp, ops);
1063 set_bit(ops->owner_flag_bit, &sp->so_flags);
1064 nfs4_put_state_owner(sp);
1065 nfs4_recovery_handle_error(clp, status);
1068 nfs4_put_state_owner(sp);
1071 spin_unlock(&clp->cl_lock);
1075 static int nfs4_check_lease(struct nfs_client *clp)
1077 struct rpc_cred *cred;
1078 int status = -NFS4ERR_EXPIRED;
1080 /* Is the client already known to have an expired lease? */
1081 if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state))
1083 cred = nfs4_get_renew_cred(clp);
1085 cred = nfs4_get_setclientid_cred(clp);
1089 status = nfs4_proc_renew(clp, cred);
1092 nfs4_recovery_handle_error(clp, status);
1096 static int nfs4_reclaim_lease(struct nfs_client *clp)
1098 struct rpc_cred *cred;
1099 int status = -ENOENT;
1101 cred = nfs4_get_setclientid_cred(clp);
1103 status = nfs4_init_client(clp, cred);
1105 /* Handle case where the user hasn't set up machine creds */
1106 if (status == -EACCES && cred == clp->cl_machine_cred) {
1107 nfs4_clear_machine_cred(clp);
1114 static void nfs4_state_manager(struct nfs_client *clp)
1118 /* Ensure exclusive access to NFSv4 state */
1120 if (test_and_clear_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state)) {
1121 /* We're going to have to re-establish a clientid */
1122 status = nfs4_reclaim_lease(clp);
1124 set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
1125 if (status == -EAGAIN)
1129 clear_bit(NFS4CLNT_CHECK_LEASE, &clp->cl_state);
1132 if (test_and_clear_bit(NFS4CLNT_CHECK_LEASE, &clp->cl_state)) {
1133 status = nfs4_check_lease(clp);
1138 /* First recover reboot state... */
1139 if (test_and_clear_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state)) {
1140 status = nfs4_do_reclaim(clp, &nfs4_reboot_recovery_ops);
1141 if (status == -NFS4ERR_STALE_CLIENTID)
1143 nfs4_state_end_reclaim_reboot(clp);
1147 /* Now recover expired state... */
1148 if (test_and_clear_bit(NFS4CLNT_RECLAIM_NOGRACE, &clp->cl_state)) {
1149 status = nfs4_do_reclaim(clp, &nfs4_nograce_recovery_ops);
1151 set_bit(NFS4CLNT_RECLAIM_NOGRACE, &clp->cl_state);
1152 if (status == -NFS4ERR_STALE_CLIENTID)
1154 if (status == -NFS4ERR_EXPIRED)
1158 nfs4_state_end_reclaim_nograce(clp);
1162 if (test_and_clear_bit(NFS4CLNT_DELEGRETURN, &clp->cl_state)) {
1163 nfs_client_return_marked_delegations(clp);
1167 nfs4_clear_state_manager_bit(clp);
1168 /* Did we race with an attempt to give us more work? */
1169 if (clp->cl_state == 0)
1171 if (test_and_set_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) != 0)
1176 printk(KERN_WARNING "Error: state manager failed on NFSv4 server %s"
1177 " with error %d\n", clp->cl_hostname, -status);
1178 if (test_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state))
1179 nfs4_state_end_reclaim_reboot(clp);
1180 nfs4_clear_state_manager_bit(clp);
1183 static int nfs4_run_state_manager(void *ptr)
1185 struct nfs_client *clp = ptr;
1187 allow_signal(SIGKILL);
1188 nfs4_state_manager(clp);
1189 nfs_put_client(clp);
1190 module_put_and_exit(0);