Auto-update from upstream
[linux-2.6] / fs / nfs / nfs4state.c
1 /*
2  *  fs/nfs/nfs4state.c
3  *
4  *  Client-side XDR for NFSv4.
5  *
6  *  Copyright (c) 2002 The Regents of the University of Michigan.
7  *  All rights reserved.
8  *
9  *  Kendrick Smith <kmsmith@umich.edu>
10  *
11  *  Redistribution and use in source and binary forms, with or without
12  *  modification, are permitted provided that the following conditions
13  *  are met:
14  *
15  *  1. Redistributions of source code must retain the above copyright
16  *     notice, this list of conditions and the following disclaimer.
17  *  2. Redistributions in binary form must reproduce the above copyright
18  *     notice, this list of conditions and the following disclaimer in the
19  *     documentation and/or other materials provided with the distribution.
20  *  3. Neither the name of the University nor the names of its
21  *     contributors may be used to endorse or promote products derived
22  *     from this software without specific prior written permission.
23  *
24  *  THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
25  *  WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
26  *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
27  *  DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28  *  FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
31  *  BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
32  *  LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
33  *  NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
34  *  SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35  *
36  * Implementation of the NFSv4 state model.  For the time being,
37  * this is minimal, but will be made much more complex in a
38  * subsequent patch.
39  */
40
41 #include <linux/config.h>
42 #include <linux/slab.h>
43 #include <linux/smp_lock.h>
44 #include <linux/nfs_fs.h>
45 #include <linux/nfs_idmap.h>
46 #include <linux/workqueue.h>
47 #include <linux/bitops.h>
48
49 #include "nfs4_fs.h"
50 #include "callback.h"
51 #include "delegation.h"
52
53 #define OPENOWNER_POOL_SIZE     8
54
55 const nfs4_stateid zero_stateid;
56
57 static DEFINE_SPINLOCK(state_spinlock);
58 static LIST_HEAD(nfs4_clientid_list);
59
60 static void nfs4_recover_state(void *);
61
62 void
63 init_nfsv4_state(struct nfs_server *server)
64 {
65         server->nfs4_state = NULL;
66         INIT_LIST_HEAD(&server->nfs4_siblings);
67 }
68
69 void
70 destroy_nfsv4_state(struct nfs_server *server)
71 {
72         if (server->mnt_path) {
73                 kfree(server->mnt_path);
74                 server->mnt_path = NULL;
75         }
76         if (server->nfs4_state) {
77                 nfs4_put_client(server->nfs4_state);
78                 server->nfs4_state = NULL;
79         }
80 }
81
82 /*
83  * nfs4_get_client(): returns an empty client structure
84  * nfs4_put_client(): drops reference to client structure
85  *
86  * Since these are allocated/deallocated very rarely, we don't
87  * bother putting them in a slab cache...
88  */
89 static struct nfs4_client *
90 nfs4_alloc_client(struct in_addr *addr)
91 {
92         struct nfs4_client *clp;
93
94         if (nfs_callback_up() < 0)
95                 return NULL;
96         if ((clp = kmalloc(sizeof(*clp), GFP_KERNEL)) == NULL) {
97                 nfs_callback_down();
98                 return NULL;
99         }
100         memset(clp, 0, sizeof(*clp));
101         memcpy(&clp->cl_addr, addr, sizeof(clp->cl_addr));
102         init_rwsem(&clp->cl_sem);
103         INIT_LIST_HEAD(&clp->cl_delegations);
104         INIT_LIST_HEAD(&clp->cl_state_owners);
105         INIT_LIST_HEAD(&clp->cl_unused);
106         spin_lock_init(&clp->cl_lock);
107         atomic_set(&clp->cl_count, 1);
108         INIT_WORK(&clp->cl_recoverd, nfs4_recover_state, clp);
109         INIT_WORK(&clp->cl_renewd, nfs4_renew_state, clp);
110         INIT_LIST_HEAD(&clp->cl_superblocks);
111         init_waitqueue_head(&clp->cl_waitq);
112         rpc_init_wait_queue(&clp->cl_rpcwaitq, "NFS4 client");
113         clp->cl_rpcclient = ERR_PTR(-EINVAL);
114         clp->cl_boot_time = CURRENT_TIME;
115         clp->cl_state = 1 << NFS4CLNT_OK;
116         return clp;
117 }
118
119 static void
120 nfs4_free_client(struct nfs4_client *clp)
121 {
122         struct nfs4_state_owner *sp;
123
124         while (!list_empty(&clp->cl_unused)) {
125                 sp = list_entry(clp->cl_unused.next,
126                                 struct nfs4_state_owner,
127                                 so_list);
128                 list_del(&sp->so_list);
129                 kfree(sp);
130         }
131         BUG_ON(!list_empty(&clp->cl_state_owners));
132         if (clp->cl_cred)
133                 put_rpccred(clp->cl_cred);
134         nfs_idmap_delete(clp);
135         if (!IS_ERR(clp->cl_rpcclient))
136                 rpc_shutdown_client(clp->cl_rpcclient);
137         kfree(clp);
138         nfs_callback_down();
139 }
140
141 static struct nfs4_client *__nfs4_find_client(struct in_addr *addr)
142 {
143         struct nfs4_client *clp;
144         list_for_each_entry(clp, &nfs4_clientid_list, cl_servers) {
145                 if (memcmp(&clp->cl_addr, addr, sizeof(clp->cl_addr)) == 0) {
146                         atomic_inc(&clp->cl_count);
147                         return clp;
148                 }
149         }
150         return NULL;
151 }
152
153 struct nfs4_client *nfs4_find_client(struct in_addr *addr)
154 {
155         struct nfs4_client *clp;
156         spin_lock(&state_spinlock);
157         clp = __nfs4_find_client(addr);
158         spin_unlock(&state_spinlock);
159         return clp;
160 }
161
162 struct nfs4_client *
163 nfs4_get_client(struct in_addr *addr)
164 {
165         struct nfs4_client *clp, *new = NULL;
166
167         spin_lock(&state_spinlock);
168         for (;;) {
169                 clp = __nfs4_find_client(addr);
170                 if (clp != NULL)
171                         break;
172                 clp = new;
173                 if (clp != NULL) {
174                         list_add(&clp->cl_servers, &nfs4_clientid_list);
175                         new = NULL;
176                         break;
177                 }
178                 spin_unlock(&state_spinlock);
179                 new = nfs4_alloc_client(addr);
180                 spin_lock(&state_spinlock);
181                 if (new == NULL)
182                         break;
183         }
184         spin_unlock(&state_spinlock);
185         if (new)
186                 nfs4_free_client(new);
187         return clp;
188 }
189
190 void
191 nfs4_put_client(struct nfs4_client *clp)
192 {
193         if (!atomic_dec_and_lock(&clp->cl_count, &state_spinlock))
194                 return;
195         list_del(&clp->cl_servers);
196         spin_unlock(&state_spinlock);
197         BUG_ON(!list_empty(&clp->cl_superblocks));
198         wake_up_all(&clp->cl_waitq);
199         rpc_wake_up(&clp->cl_rpcwaitq);
200         nfs4_kill_renewd(clp);
201         nfs4_free_client(clp);
202 }
203
204 static int __nfs4_init_client(struct nfs4_client *clp)
205 {
206         int status = nfs4_proc_setclientid(clp, NFS4_CALLBACK, nfs_callback_tcpport);
207         if (status == 0)
208                 status = nfs4_proc_setclientid_confirm(clp);
209         if (status == 0)
210                 nfs4_schedule_state_renewal(clp);
211         return status;
212 }
213
214 int nfs4_init_client(struct nfs4_client *clp)
215 {
216         return nfs4_map_errors(__nfs4_init_client(clp));
217 }
218
219 u32
220 nfs4_alloc_lockowner_id(struct nfs4_client *clp)
221 {
222         return clp->cl_lockowner_id ++;
223 }
224
225 static struct nfs4_state_owner *
226 nfs4_client_grab_unused(struct nfs4_client *clp, struct rpc_cred *cred)
227 {
228         struct nfs4_state_owner *sp = NULL;
229
230         if (!list_empty(&clp->cl_unused)) {
231                 sp = list_entry(clp->cl_unused.next, struct nfs4_state_owner, so_list);
232                 atomic_inc(&sp->so_count);
233                 sp->so_cred = cred;
234                 list_move(&sp->so_list, &clp->cl_state_owners);
235                 clp->cl_nunused--;
236         }
237         return sp;
238 }
239
240 static struct nfs4_state_owner *
241 nfs4_find_state_owner(struct nfs4_client *clp, struct rpc_cred *cred)
242 {
243         struct nfs4_state_owner *sp, *res = NULL;
244
245         list_for_each_entry(sp, &clp->cl_state_owners, so_list) {
246                 if (sp->so_cred != cred)
247                         continue;
248                 atomic_inc(&sp->so_count);
249                 /* Move to the head of the list */
250                 list_move(&sp->so_list, &clp->cl_state_owners);
251                 res = sp;
252                 break;
253         }
254         return res;
255 }
256
257 /*
258  * nfs4_alloc_state_owner(): this is called on the OPEN or CREATE path to
259  * create a new state_owner.
260  *
261  */
262 static struct nfs4_state_owner *
263 nfs4_alloc_state_owner(void)
264 {
265         struct nfs4_state_owner *sp;
266
267         sp = kzalloc(sizeof(*sp),GFP_KERNEL);
268         if (!sp)
269                 return NULL;
270         spin_lock_init(&sp->so_lock);
271         INIT_LIST_HEAD(&sp->so_states);
272         INIT_LIST_HEAD(&sp->so_delegations);
273         rpc_init_wait_queue(&sp->so_sequence.wait, "Seqid_waitqueue");
274         sp->so_seqid.sequence = &sp->so_sequence;
275         spin_lock_init(&sp->so_sequence.lock);
276         INIT_LIST_HEAD(&sp->so_sequence.list);
277         atomic_set(&sp->so_count, 1);
278         return sp;
279 }
280
281 void
282 nfs4_drop_state_owner(struct nfs4_state_owner *sp)
283 {
284         struct nfs4_client *clp = sp->so_client;
285         spin_lock(&clp->cl_lock);
286         list_del_init(&sp->so_list);
287         spin_unlock(&clp->cl_lock);
288 }
289
290 /*
291  * Note: must be called with clp->cl_sem held in order to prevent races
292  *       with reboot recovery!
293  */
294 struct nfs4_state_owner *nfs4_get_state_owner(struct nfs_server *server, struct rpc_cred *cred)
295 {
296         struct nfs4_client *clp = server->nfs4_state;
297         struct nfs4_state_owner *sp, *new;
298
299         get_rpccred(cred);
300         new = nfs4_alloc_state_owner();
301         spin_lock(&clp->cl_lock);
302         sp = nfs4_find_state_owner(clp, cred);
303         if (sp == NULL)
304                 sp = nfs4_client_grab_unused(clp, cred);
305         if (sp == NULL && new != NULL) {
306                 list_add(&new->so_list, &clp->cl_state_owners);
307                 new->so_client = clp;
308                 new->so_id = nfs4_alloc_lockowner_id(clp);
309                 new->so_cred = cred;
310                 sp = new;
311                 new = NULL;
312         }
313         spin_unlock(&clp->cl_lock);
314         if (new)
315                 kfree(new);
316         if (sp != NULL)
317                 return sp;
318         put_rpccred(cred);
319         return NULL;
320 }
321
322 /*
323  * Must be called with clp->cl_sem held in order to avoid races
324  * with state recovery...
325  */
326 void nfs4_put_state_owner(struct nfs4_state_owner *sp)
327 {
328         struct nfs4_client *clp = sp->so_client;
329         struct rpc_cred *cred = sp->so_cred;
330
331         if (!atomic_dec_and_lock(&sp->so_count, &clp->cl_lock))
332                 return;
333         if (clp->cl_nunused >= OPENOWNER_POOL_SIZE)
334                 goto out_free;
335         if (list_empty(&sp->so_list))
336                 goto out_free;
337         list_move(&sp->so_list, &clp->cl_unused);
338         clp->cl_nunused++;
339         spin_unlock(&clp->cl_lock);
340         put_rpccred(cred);
341         cred = NULL;
342         return;
343 out_free:
344         list_del(&sp->so_list);
345         spin_unlock(&clp->cl_lock);
346         put_rpccred(cred);
347         kfree(sp);
348 }
349
350 static struct nfs4_state *
351 nfs4_alloc_open_state(void)
352 {
353         struct nfs4_state *state;
354
355         state = kmalloc(sizeof(*state), GFP_KERNEL);
356         if (!state)
357                 return NULL;
358         state->state = 0;
359         state->nreaders = 0;
360         state->nwriters = 0;
361         state->flags = 0;
362         memset(state->stateid.data, 0, sizeof(state->stateid.data));
363         atomic_set(&state->count, 1);
364         INIT_LIST_HEAD(&state->lock_states);
365         spin_lock_init(&state->state_lock);
366         return state;
367 }
368
369 static struct nfs4_state *
370 __nfs4_find_state(struct inode *inode, struct rpc_cred *cred, mode_t mode)
371 {
372         struct nfs_inode *nfsi = NFS_I(inode);
373         struct nfs4_state *state;
374
375         mode &= (FMODE_READ|FMODE_WRITE);
376         list_for_each_entry(state, &nfsi->open_states, inode_states) {
377                 if (state->owner->so_cred != cred)
378                         continue;
379                 if ((mode & FMODE_READ) != 0 && state->nreaders == 0)
380                         continue;
381                 if ((mode & FMODE_WRITE) != 0 && state->nwriters == 0)
382                         continue;
383                 if ((state->state & mode) != mode)
384                         continue;
385                 atomic_inc(&state->count);
386                 if (mode & FMODE_READ)
387                         state->nreaders++;
388                 if (mode & FMODE_WRITE)
389                         state->nwriters++;
390                 return state;
391         }
392         return NULL;
393 }
394
395 static struct nfs4_state *
396 __nfs4_find_state_byowner(struct inode *inode, struct nfs4_state_owner *owner)
397 {
398         struct nfs_inode *nfsi = NFS_I(inode);
399         struct nfs4_state *state;
400
401         list_for_each_entry(state, &nfsi->open_states, inode_states) {
402                 /* Is this in the process of being freed? */
403                 if (state->nreaders == 0 && state->nwriters == 0)
404                         continue;
405                 if (state->owner == owner) {
406                         atomic_inc(&state->count);
407                         return state;
408                 }
409         }
410         return NULL;
411 }
412
413 struct nfs4_state *
414 nfs4_find_state(struct inode *inode, struct rpc_cred *cred, mode_t mode)
415 {
416         struct nfs4_state *state;
417
418         spin_lock(&inode->i_lock);
419         state = __nfs4_find_state(inode, cred, mode);
420         spin_unlock(&inode->i_lock);
421         return state;
422 }
423
424 static void
425 nfs4_free_open_state(struct nfs4_state *state)
426 {
427         kfree(state);
428 }
429
430 struct nfs4_state *
431 nfs4_get_open_state(struct inode *inode, struct nfs4_state_owner *owner)
432 {
433         struct nfs4_state *state, *new;
434         struct nfs_inode *nfsi = NFS_I(inode);
435
436         spin_lock(&inode->i_lock);
437         state = __nfs4_find_state_byowner(inode, owner);
438         spin_unlock(&inode->i_lock);
439         if (state)
440                 goto out;
441         new = nfs4_alloc_open_state();
442         spin_lock(&owner->so_lock);
443         spin_lock(&inode->i_lock);
444         state = __nfs4_find_state_byowner(inode, owner);
445         if (state == NULL && new != NULL) {
446                 state = new;
447                 state->owner = owner;
448                 atomic_inc(&owner->so_count);
449                 list_add(&state->inode_states, &nfsi->open_states);
450                 state->inode = igrab(inode);
451                 spin_unlock(&inode->i_lock);
452                 /* Note: The reclaim code dictates that we add stateless
453                  * and read-only stateids to the end of the list */
454                 list_add_tail(&state->open_states, &owner->so_states);
455                 spin_unlock(&owner->so_lock);
456         } else {
457                 spin_unlock(&inode->i_lock);
458                 spin_unlock(&owner->so_lock);
459                 if (new)
460                         nfs4_free_open_state(new);
461         }
462 out:
463         return state;
464 }
465
466 /*
467  * Beware! Caller must be holding exactly one
468  * reference to clp->cl_sem!
469  */
470 void nfs4_put_open_state(struct nfs4_state *state)
471 {
472         struct inode *inode = state->inode;
473         struct nfs4_state_owner *owner = state->owner;
474
475         if (!atomic_dec_and_lock(&state->count, &owner->so_lock))
476                 return;
477         spin_lock(&inode->i_lock);
478         if (!list_empty(&state->inode_states))
479                 list_del(&state->inode_states);
480         list_del(&state->open_states);
481         spin_unlock(&inode->i_lock);
482         spin_unlock(&owner->so_lock);
483         iput(inode);
484         BUG_ON (state->state != 0);
485         nfs4_free_open_state(state);
486         nfs4_put_state_owner(owner);
487 }
488
489 /*
490  * Close the current file.
491  */
492 void nfs4_close_state(struct nfs4_state *state, mode_t mode)
493 {
494         struct inode *inode = state->inode;
495         struct nfs4_state_owner *owner = state->owner;
496         int newstate;
497
498         atomic_inc(&owner->so_count);
499         /* Protect against nfs4_find_state() */
500         spin_lock(&owner->so_lock);
501         spin_lock(&inode->i_lock);
502         if (mode & FMODE_READ)
503                 state->nreaders--;
504         if (mode & FMODE_WRITE)
505                 state->nwriters--;
506         if (state->nwriters == 0) {
507                 if (state->nreaders == 0)
508                         list_del_init(&state->inode_states);
509                 /* See reclaim code */
510                 list_move_tail(&state->open_states, &owner->so_states);
511         }
512         spin_unlock(&inode->i_lock);
513         spin_unlock(&owner->so_lock);
514         newstate = 0;
515         if (state->state != 0) {
516                 if (state->nreaders)
517                         newstate |= FMODE_READ;
518                 if (state->nwriters)
519                         newstate |= FMODE_WRITE;
520                 if (state->state == newstate)
521                         goto out;
522                 if (test_bit(NFS_DELEGATED_STATE, &state->flags)) {
523                         state->state = newstate;
524                         goto out;
525                 }
526                 if (nfs4_do_close(inode, state, newstate) == 0)
527                         return;
528         }
529 out:
530         nfs4_put_open_state(state);
531         nfs4_put_state_owner(owner);
532 }
533
534 /*
535  * Search the state->lock_states for an existing lock_owner
536  * that is compatible with current->files
537  */
538 static struct nfs4_lock_state *
539 __nfs4_find_lock_state(struct nfs4_state *state, fl_owner_t fl_owner)
540 {
541         struct nfs4_lock_state *pos;
542         list_for_each_entry(pos, &state->lock_states, ls_locks) {
543                 if (pos->ls_owner != fl_owner)
544                         continue;
545                 atomic_inc(&pos->ls_count);
546                 return pos;
547         }
548         return NULL;
549 }
550
551 /*
552  * Return a compatible lock_state. If no initialized lock_state structure
553  * exists, return an uninitialized one.
554  *
555  */
556 static struct nfs4_lock_state *nfs4_alloc_lock_state(struct nfs4_state *state, fl_owner_t fl_owner)
557 {
558         struct nfs4_lock_state *lsp;
559         struct nfs4_client *clp = state->owner->so_client;
560
561         lsp = kzalloc(sizeof(*lsp), GFP_KERNEL);
562         if (lsp == NULL)
563                 return NULL;
564         lsp->ls_seqid.sequence = &state->owner->so_sequence;
565         atomic_set(&lsp->ls_count, 1);
566         lsp->ls_owner = fl_owner;
567         spin_lock(&clp->cl_lock);
568         lsp->ls_id = nfs4_alloc_lockowner_id(clp);
569         spin_unlock(&clp->cl_lock);
570         INIT_LIST_HEAD(&lsp->ls_locks);
571         return lsp;
572 }
573
574 /*
575  * Return a compatible lock_state. If no initialized lock_state structure
576  * exists, return an uninitialized one.
577  *
578  * The caller must be holding clp->cl_sem
579  */
580 static struct nfs4_lock_state *nfs4_get_lock_state(struct nfs4_state *state, fl_owner_t owner)
581 {
582         struct nfs4_lock_state *lsp, *new = NULL;
583         
584         for(;;) {
585                 spin_lock(&state->state_lock);
586                 lsp = __nfs4_find_lock_state(state, owner);
587                 if (lsp != NULL)
588                         break;
589                 if (new != NULL) {
590                         new->ls_state = state;
591                         list_add(&new->ls_locks, &state->lock_states);
592                         set_bit(LK_STATE_IN_USE, &state->flags);
593                         lsp = new;
594                         new = NULL;
595                         break;
596                 }
597                 spin_unlock(&state->state_lock);
598                 new = nfs4_alloc_lock_state(state, owner);
599                 if (new == NULL)
600                         return NULL;
601         }
602         spin_unlock(&state->state_lock);
603         kfree(new);
604         return lsp;
605 }
606
607 /*
608  * Release reference to lock_state, and free it if we see that
609  * it is no longer in use
610  */
611 void nfs4_put_lock_state(struct nfs4_lock_state *lsp)
612 {
613         struct nfs4_state *state;
614
615         if (lsp == NULL)
616                 return;
617         state = lsp->ls_state;
618         if (!atomic_dec_and_lock(&lsp->ls_count, &state->state_lock))
619                 return;
620         list_del(&lsp->ls_locks);
621         if (list_empty(&state->lock_states))
622                 clear_bit(LK_STATE_IN_USE, &state->flags);
623         spin_unlock(&state->state_lock);
624         kfree(lsp);
625 }
626
627 static void nfs4_fl_copy_lock(struct file_lock *dst, struct file_lock *src)
628 {
629         struct nfs4_lock_state *lsp = src->fl_u.nfs4_fl.owner;
630
631         dst->fl_u.nfs4_fl.owner = lsp;
632         atomic_inc(&lsp->ls_count);
633 }
634
635 static void nfs4_fl_release_lock(struct file_lock *fl)
636 {
637         nfs4_put_lock_state(fl->fl_u.nfs4_fl.owner);
638 }
639
640 static struct file_lock_operations nfs4_fl_lock_ops = {
641         .fl_copy_lock = nfs4_fl_copy_lock,
642         .fl_release_private = nfs4_fl_release_lock,
643 };
644
645 int nfs4_set_lock_state(struct nfs4_state *state, struct file_lock *fl)
646 {
647         struct nfs4_lock_state *lsp;
648
649         if (fl->fl_ops != NULL)
650                 return 0;
651         lsp = nfs4_get_lock_state(state, fl->fl_owner);
652         if (lsp == NULL)
653                 return -ENOMEM;
654         fl->fl_u.nfs4_fl.owner = lsp;
655         fl->fl_ops = &nfs4_fl_lock_ops;
656         return 0;
657 }
658
659 /*
660  * Byte-range lock aware utility to initialize the stateid of read/write
661  * requests.
662  */
663 void nfs4_copy_stateid(nfs4_stateid *dst, struct nfs4_state *state, fl_owner_t fl_owner)
664 {
665         struct nfs4_lock_state *lsp;
666
667         memcpy(dst, &state->stateid, sizeof(*dst));
668         if (test_bit(LK_STATE_IN_USE, &state->flags) == 0)
669                 return;
670
671         spin_lock(&state->state_lock);
672         lsp = __nfs4_find_lock_state(state, fl_owner);
673         if (lsp != NULL && (lsp->ls_flags & NFS_LOCK_INITIALIZED) != 0)
674                 memcpy(dst, &lsp->ls_stateid, sizeof(*dst));
675         spin_unlock(&state->state_lock);
676         nfs4_put_lock_state(lsp);
677 }
678
679 struct nfs_seqid *nfs_alloc_seqid(struct nfs_seqid_counter *counter)
680 {
681         struct nfs_seqid *new;
682
683         new = kmalloc(sizeof(*new), GFP_KERNEL);
684         if (new != NULL) {
685                 new->sequence = counter;
686                 INIT_LIST_HEAD(&new->list);
687         }
688         return new;
689 }
690
691 void nfs_free_seqid(struct nfs_seqid *seqid)
692 {
693         struct rpc_sequence *sequence = seqid->sequence->sequence;
694
695         if (!list_empty(&seqid->list)) {
696                 spin_lock(&sequence->lock);
697                 list_del(&seqid->list);
698                 spin_unlock(&sequence->lock);
699         }
700         rpc_wake_up_next(&sequence->wait);
701         kfree(seqid);
702 }
703
704 /*
705  * Increment the seqid if the OPEN/OPEN_DOWNGRADE/CLOSE succeeded, or
706  * failed with a seqid incrementing error -
707  * see comments nfs_fs.h:seqid_mutating_error()
708  */
709 static inline void nfs_increment_seqid(int status, struct nfs_seqid *seqid)
710 {
711         switch (status) {
712                 case 0:
713                         break;
714                 case -NFS4ERR_BAD_SEQID:
715                 case -NFS4ERR_STALE_CLIENTID:
716                 case -NFS4ERR_STALE_STATEID:
717                 case -NFS4ERR_BAD_STATEID:
718                 case -NFS4ERR_BADXDR:
719                 case -NFS4ERR_RESOURCE:
720                 case -NFS4ERR_NOFILEHANDLE:
721                         /* Non-seqid mutating errors */
722                         return;
723         };
724         /*
725          * Note: no locking needed as we are guaranteed to be first
726          * on the sequence list
727          */
728         seqid->sequence->counter++;
729 }
730
731 void nfs_increment_open_seqid(int status, struct nfs_seqid *seqid)
732 {
733         if (status == -NFS4ERR_BAD_SEQID) {
734                 struct nfs4_state_owner *sp = container_of(seqid->sequence,
735                                 struct nfs4_state_owner, so_seqid);
736                 nfs4_drop_state_owner(sp);
737         }
738         return nfs_increment_seqid(status, seqid);
739 }
740
741 /*
742  * Increment the seqid if the LOCK/LOCKU succeeded, or
743  * failed with a seqid incrementing error -
744  * see comments nfs_fs.h:seqid_mutating_error()
745  */
746 void nfs_increment_lock_seqid(int status, struct nfs_seqid *seqid)
747 {
748         return nfs_increment_seqid(status, seqid);
749 }
750
751 int nfs_wait_on_sequence(struct nfs_seqid *seqid, struct rpc_task *task)
752 {
753         struct rpc_sequence *sequence = seqid->sequence->sequence;
754         int status = 0;
755
756         if (sequence->list.next == &seqid->list)
757                 goto out;
758         spin_lock(&sequence->lock);
759         if (!list_empty(&sequence->list)) {
760                 rpc_sleep_on(&sequence->wait, task, NULL, NULL);
761                 status = -EAGAIN;
762         } else
763                 list_add(&seqid->list, &sequence->list);
764         spin_unlock(&sequence->lock);
765 out:
766         return status;
767 }
768
769 static int reclaimer(void *);
770 struct reclaimer_args {
771         struct nfs4_client *clp;
772         struct completion complete;
773 };
774
775 /*
776  * State recovery routine
777  */
778 void
779 nfs4_recover_state(void *data)
780 {
781         struct nfs4_client *clp = (struct nfs4_client *)data;
782         struct reclaimer_args args = {
783                 .clp = clp,
784         };
785         might_sleep();
786
787         init_completion(&args.complete);
788
789         if (kernel_thread(reclaimer, &args, CLONE_KERNEL) < 0)
790                 goto out_failed_clear;
791         wait_for_completion(&args.complete);
792         return;
793 out_failed_clear:
794         set_bit(NFS4CLNT_OK, &clp->cl_state);
795         wake_up_all(&clp->cl_waitq);
796         rpc_wake_up(&clp->cl_rpcwaitq);
797 }
798
799 /*
800  * Schedule a state recovery attempt
801  */
802 void
803 nfs4_schedule_state_recovery(struct nfs4_client *clp)
804 {
805         if (!clp)
806                 return;
807         if (test_and_clear_bit(NFS4CLNT_OK, &clp->cl_state))
808                 schedule_work(&clp->cl_recoverd);
809 }
810
811 static int nfs4_reclaim_locks(struct nfs4_state_recovery_ops *ops, struct nfs4_state *state)
812 {
813         struct inode *inode = state->inode;
814         struct file_lock *fl;
815         int status = 0;
816
817         for (fl = inode->i_flock; fl != 0; fl = fl->fl_next) {
818                 if (!(fl->fl_flags & FL_POSIX))
819                         continue;
820                 if (((struct nfs_open_context *)fl->fl_file->private_data)->state != state)
821                         continue;
822                 status = ops->recover_lock(state, fl);
823                 if (status >= 0)
824                         continue;
825                 switch (status) {
826                         default:
827                                 printk(KERN_ERR "%s: unhandled error %d. Zeroing state\n",
828                                                 __FUNCTION__, status);
829                         case -NFS4ERR_EXPIRED:
830                         case -NFS4ERR_NO_GRACE:
831                         case -NFS4ERR_RECLAIM_BAD:
832                         case -NFS4ERR_RECLAIM_CONFLICT:
833                                 /* kill_proc(fl->fl_owner, SIGLOST, 1); */
834                                 break;
835                         case -NFS4ERR_STALE_CLIENTID:
836                                 goto out_err;
837                 }
838         }
839         return 0;
840 out_err:
841         return status;
842 }
843
844 static int nfs4_reclaim_open_state(struct nfs4_state_recovery_ops *ops, struct nfs4_state_owner *sp)
845 {
846         struct nfs4_state *state;
847         struct nfs4_lock_state *lock;
848         int status = 0;
849
850         /* Note: we rely on the sp->so_states list being ordered 
851          * so that we always reclaim open(O_RDWR) and/or open(O_WRITE)
852          * states first.
853          * This is needed to ensure that the server won't give us any
854          * read delegations that we have to return if, say, we are
855          * recovering after a network partition or a reboot from a
856          * server that doesn't support a grace period.
857          */
858         list_for_each_entry(state, &sp->so_states, open_states) {
859                 if (state->state == 0)
860                         continue;
861                 status = ops->recover_open(sp, state);
862                 if (status >= 0) {
863                         status = nfs4_reclaim_locks(ops, state);
864                         if (status < 0)
865                                 goto out_err;
866                         list_for_each_entry(lock, &state->lock_states, ls_locks) {
867                                 if (!(lock->ls_flags & NFS_LOCK_INITIALIZED))
868                                         printk("%s: Lock reclaim failed!\n",
869                                                         __FUNCTION__);
870                         }
871                         continue;
872                 }
873                 switch (status) {
874                         default:
875                                 printk(KERN_ERR "%s: unhandled error %d. Zeroing state\n",
876                                                 __FUNCTION__, status);
877                         case -ENOENT:
878                         case -NFS4ERR_RECLAIM_BAD:
879                         case -NFS4ERR_RECLAIM_CONFLICT:
880                                 /*
881                                  * Open state on this file cannot be recovered
882                                  * All we can do is revert to using the zero stateid.
883                                  */
884                                 memset(state->stateid.data, 0,
885                                         sizeof(state->stateid.data));
886                                 /* Mark the file as being 'closed' */
887                                 state->state = 0;
888                                 break;
889                         case -NFS4ERR_EXPIRED:
890                         case -NFS4ERR_NO_GRACE:
891                         case -NFS4ERR_STALE_CLIENTID:
892                                 goto out_err;
893                 }
894         }
895         return 0;
896 out_err:
897         return status;
898 }
899
900 static void nfs4_state_mark_reclaim(struct nfs4_client *clp)
901 {
902         struct nfs4_state_owner *sp;
903         struct nfs4_state *state;
904         struct nfs4_lock_state *lock;
905
906         /* Reset all sequence ids to zero */
907         list_for_each_entry(sp, &clp->cl_state_owners, so_list) {
908                 sp->so_seqid.counter = 0;
909                 sp->so_seqid.flags = 0;
910                 spin_lock(&sp->so_lock);
911                 list_for_each_entry(state, &sp->so_states, open_states) {
912                         list_for_each_entry(lock, &state->lock_states, ls_locks) {
913                                 lock->ls_seqid.counter = 0;
914                                 lock->ls_seqid.flags = 0;
915                                 lock->ls_flags &= ~NFS_LOCK_INITIALIZED;
916                         }
917                 }
918                 spin_unlock(&sp->so_lock);
919         }
920 }
921
922 static int reclaimer(void *ptr)
923 {
924         struct reclaimer_args *args = (struct reclaimer_args *)ptr;
925         struct nfs4_client *clp = args->clp;
926         struct nfs4_state_owner *sp;
927         struct nfs4_state_recovery_ops *ops;
928         int status = 0;
929
930         daemonize("%u.%u.%u.%u-reclaim", NIPQUAD(clp->cl_addr));
931         allow_signal(SIGKILL);
932
933         atomic_inc(&clp->cl_count);
934         complete(&args->complete);
935
936         /* Ensure exclusive access to NFSv4 state */
937         lock_kernel();
938         down_write(&clp->cl_sem);
939         /* Are there any NFS mounts out there? */
940         if (list_empty(&clp->cl_superblocks))
941                 goto out;
942 restart_loop:
943         status = nfs4_proc_renew(clp);
944         switch (status) {
945                 case 0:
946                 case -NFS4ERR_CB_PATH_DOWN:
947                         goto out;
948                 case -NFS4ERR_STALE_CLIENTID:
949                 case -NFS4ERR_LEASE_MOVED:
950                         ops = &nfs4_reboot_recovery_ops;
951                         break;
952                 default:
953                         ops = &nfs4_network_partition_recovery_ops;
954         };
955         nfs4_state_mark_reclaim(clp);
956         status = __nfs4_init_client(clp);
957         if (status)
958                 goto out_error;
959         /* Mark all delegations for reclaim */
960         nfs_delegation_mark_reclaim(clp);
961         /* Note: list is protected by exclusive lock on cl->cl_sem */
962         list_for_each_entry(sp, &clp->cl_state_owners, so_list) {
963                 status = nfs4_reclaim_open_state(ops, sp);
964                 if (status < 0) {
965                         if (status == -NFS4ERR_NO_GRACE) {
966                                 ops = &nfs4_network_partition_recovery_ops;
967                                 status = nfs4_reclaim_open_state(ops, sp);
968                         }
969                         if (status == -NFS4ERR_STALE_CLIENTID)
970                                 goto restart_loop;
971                         if (status == -NFS4ERR_EXPIRED)
972                                 goto restart_loop;
973                 }
974         }
975         nfs_delegation_reap_unclaimed(clp);
976 out:
977         set_bit(NFS4CLNT_OK, &clp->cl_state);
978         up_write(&clp->cl_sem);
979         unlock_kernel();
980         wake_up_all(&clp->cl_waitq);
981         rpc_wake_up(&clp->cl_rpcwaitq);
982         if (status == -NFS4ERR_CB_PATH_DOWN)
983                 nfs_handle_cb_pathdown(clp);
984         nfs4_put_client(clp);
985         return 0;
986 out_error:
987         printk(KERN_WARNING "Error: state recovery failed on NFSv4 server %u.%u.%u.%u with error %d\n",
988                                 NIPQUAD(clp->cl_addr.s_addr), -status);
989         goto out;
990 }
991
992 /*
993  * Local variables:
994  *  c-basic-offset: 8
995  * End:
996  */