Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/fs/lockd/clntlock.c | |
3 | * | |
4 | * Lock handling for the client side NLM implementation | |
5 | * | |
6 | * Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de> | |
7 | */ | |
8 | ||
9 | #include <linux/module.h> | |
10 | #include <linux/types.h> | |
11 | #include <linux/time.h> | |
12 | #include <linux/nfs_fs.h> | |
13 | #include <linux/sunrpc/clnt.h> | |
14 | #include <linux/sunrpc/svc.h> | |
15 | #include <linux/lockd/lockd.h> | |
16 | #include <linux/smp_lock.h> | |
17 | ||
18 | #define NLMDBG_FACILITY NLMDBG_CLIENT | |
19 | ||
20 | /* | |
21 | * Local function prototypes | |
22 | */ | |
23 | static int reclaimer(void *ptr); | |
24 | ||
25 | /* | |
26 | * The following functions handle blocking and granting from the | |
27 | * client perspective. | |
28 | */ | |
29 | ||
30 | /* | |
31 | * This is the representation of a blocked client lock. | |
32 | */ | |
33 | struct nlm_wait { | |
4f15e2b1 | 34 | struct list_head b_list; /* linked list */ |
1da177e4 LT |
35 | wait_queue_head_t b_wait; /* where to wait on */ |
36 | struct nlm_host * b_host; | |
37 | struct file_lock * b_lock; /* local file lock */ | |
38 | unsigned short b_reclaim; /* got to reclaim lock */ | |
e8c5c045 | 39 | __be32 b_status; /* grant callback status */ |
1da177e4 LT |
40 | }; |
41 | ||
4f15e2b1 | 42 | static LIST_HEAD(nlm_blocked); |
1da177e4 LT |
43 | |
44 | /* | |
ecdbf769 | 45 | * Queue up a lock for blocking so that the GRANTED request can see it |
1da177e4 | 46 | */ |
3a649b88 | 47 | struct nlm_wait *nlmclnt_prepare_block(struct nlm_host *host, struct file_lock *fl) |
ecdbf769 TM |
48 | { |
49 | struct nlm_wait *block; | |
50 | ||
ecdbf769 | 51 | block = kmalloc(sizeof(*block), GFP_KERNEL); |
3a649b88 TM |
52 | if (block != NULL) { |
53 | block->b_host = host; | |
54 | block->b_lock = fl; | |
55 | init_waitqueue_head(&block->b_wait); | |
e8c5c045 | 56 | block->b_status = nlm_lck_blocked; |
3a649b88 TM |
57 | list_add(&block->b_list, &nlm_blocked); |
58 | } | |
59 | return block; | |
ecdbf769 TM |
60 | } |
61 | ||
3a649b88 | 62 | void nlmclnt_finish_block(struct nlm_wait *block) |
1da177e4 | 63 | { |
ecdbf769 TM |
64 | if (block == NULL) |
65 | return; | |
ecdbf769 TM |
66 | list_del(&block->b_list); |
67 | kfree(block); | |
68 | } | |
1da177e4 | 69 | |
ecdbf769 TM |
70 | /* |
71 | * Block on a lock | |
72 | */ | |
3a649b88 | 73 | int nlmclnt_block(struct nlm_wait *block, struct nlm_rqst *req, long timeout) |
ecdbf769 | 74 | { |
ecdbf769 | 75 | long ret; |
1da177e4 | 76 | |
ecdbf769 TM |
77 | /* A borken server might ask us to block even if we didn't |
78 | * request it. Just say no! | |
79 | */ | |
3a649b88 | 80 | if (block == NULL) |
ecdbf769 | 81 | return -EAGAIN; |
1da177e4 LT |
82 | |
83 | /* Go to sleep waiting for GRANT callback. Some servers seem | |
84 | * to lose callbacks, however, so we're going to poll from | |
85 | * time to time just to make sure. | |
86 | * | |
87 | * For now, the retry frequency is pretty high; normally | |
88 | * a 1 minute timeout would do. See the comment before | |
89 | * nlmclnt_lock for an explanation. | |
90 | */ | |
ecdbf769 | 91 | ret = wait_event_interruptible_timeout(block->b_wait, |
e8c5c045 | 92 | block->b_status != nlm_lck_blocked, |
ecdbf769 | 93 | timeout); |
3a649b88 TM |
94 | if (ret < 0) |
95 | return -ERESTARTSYS; | |
96 | req->a_res.status = block->b_status; | |
97 | return 0; | |
1da177e4 LT |
98 | } |
99 | ||
100 | /* | |
101 | * The server lockd has called us back to tell us the lock was granted | |
102 | */ | |
52921e02 | 103 | __be32 nlmclnt_grant(const struct sockaddr_in *addr, const struct nlm_lock *lock) |
1da177e4 | 104 | { |
5ac5f9d1 TM |
105 | const struct file_lock *fl = &lock->fl; |
106 | const struct nfs_fh *fh = &lock->fh; | |
1da177e4 | 107 | struct nlm_wait *block; |
52921e02 | 108 | __be32 res = nlm_lck_denied; |
1da177e4 LT |
109 | |
110 | /* | |
111 | * Look up blocked request based on arguments. | |
112 | * Warning: must not use cookie to match it! | |
113 | */ | |
4f15e2b1 | 114 | list_for_each_entry(block, &nlm_blocked, b_list) { |
5ac5f9d1 TM |
115 | struct file_lock *fl_blocked = block->b_lock; |
116 | ||
7bab377f TM |
117 | if (fl_blocked->fl_start != fl->fl_start) |
118 | continue; | |
119 | if (fl_blocked->fl_end != fl->fl_end) | |
120 | continue; | |
121 | /* | |
122 | * Careful! The NLM server will return the 32-bit "pid" that | |
123 | * we put on the wire: in this case the lockowner "pid". | |
124 | */ | |
125 | if (fl_blocked->fl_u.nfs_fl.owner->pid != lock->svid) | |
5ac5f9d1 TM |
126 | continue; |
127 | if (!nlm_cmp_addr(&block->b_host->h_addr, addr)) | |
128 | continue; | |
225a719f | 129 | if (nfs_compare_fh(NFS_FH(fl_blocked->fl_file->f_path.dentry->d_inode) ,fh) != 0) |
5ac5f9d1 TM |
130 | continue; |
131 | /* Alright, we found a lock. Set the return status | |
132 | * and wake up the caller | |
133 | */ | |
e8c5c045 | 134 | block->b_status = nlm_granted; |
5ac5f9d1 TM |
135 | wake_up(&block->b_wait); |
136 | res = nlm_granted; | |
1da177e4 | 137 | } |
ecdbf769 | 138 | return res; |
1da177e4 LT |
139 | } |
140 | ||
141 | /* | |
142 | * The following procedures deal with the recovery of locks after a | |
143 | * server crash. | |
144 | */ | |
145 | ||
1da177e4 LT |
146 | /* |
147 | * Reclaim all locks on server host. We do this by spawning a separate | |
148 | * reclaimer thread. | |
149 | */ | |
150 | void | |
5c8dd29c | 151 | nlmclnt_recovery(struct nlm_host *host) |
1da177e4 | 152 | { |
28df955a | 153 | if (!host->h_reclaiming++) { |
1da177e4 LT |
154 | nlm_get_host(host); |
155 | __module_get(THIS_MODULE); | |
550facd1 | 156 | if (kernel_thread(reclaimer, host, CLONE_FS | CLONE_FILES) < 0) |
1da177e4 LT |
157 | module_put(THIS_MODULE); |
158 | } | |
159 | } | |
160 | ||
161 | static int | |
162 | reclaimer(void *ptr) | |
163 | { | |
164 | struct nlm_host *host = (struct nlm_host *) ptr; | |
165 | struct nlm_wait *block; | |
26bcbf96 | 166 | struct file_lock *fl, *next; |
28df955a | 167 | u32 nsmstate; |
1da177e4 LT |
168 | |
169 | daemonize("%s-reclaim", host->h_name); | |
170 | allow_signal(SIGKILL); | |
171 | ||
5c8dd29c OK |
172 | down_write(&host->h_rwsem); |
173 | ||
1da177e4 LT |
174 | /* This one ensures that our parent doesn't terminate while the |
175 | * reclaim is in progress */ | |
176 | lock_kernel(); | |
4a3ae42d | 177 | lockd_up(0); /* note: this cannot fail as lockd is already running */ |
1da177e4 | 178 | |
d019bcf0 | 179 | dprintk("lockd: reclaiming locks for host %s\n", host->h_name); |
5c8dd29c | 180 | |
1da177e4 | 181 | restart: |
28df955a | 182 | nsmstate = host->h_nsmstate; |
5c8dd29c OK |
183 | |
184 | /* Force a portmap getport - the peer's lockd will | |
185 | * most likely end up on a different port. | |
186 | */ | |
0ade060e | 187 | host->h_nextrebind = jiffies; |
5c8dd29c OK |
188 | nlm_rebind_host(host); |
189 | ||
190 | /* First, reclaim all locks that have been granted. */ | |
191 | list_splice_init(&host->h_granted, &host->h_reclaim); | |
26bcbf96 | 192 | list_for_each_entry_safe(fl, next, &host->h_reclaim, fl_u.nfs_fl.list) { |
4c060b53 | 193 | list_del_init(&fl->fl_u.nfs_fl.list); |
1da177e4 | 194 | |
5c8dd29c | 195 | /* Why are we leaking memory here? --okir */ |
1da177e4 | 196 | if (signalled()) |
4c060b53 | 197 | continue; |
28df955a TM |
198 | if (nlmclnt_reclaim(host, fl) != 0) |
199 | continue; | |
200 | list_add_tail(&fl->fl_u.nfs_fl.list, &host->h_granted); | |
201 | if (host->h_nsmstate != nsmstate) { | |
202 | /* Argh! The server rebooted again! */ | |
28df955a TM |
203 | goto restart; |
204 | } | |
1da177e4 | 205 | } |
5c8dd29c OK |
206 | |
207 | host->h_reclaiming = 0; | |
208 | up_write(&host->h_rwsem); | |
d019bcf0 | 209 | dprintk("NLM: done reclaiming locks for host %s\n", host->h_name); |
1da177e4 LT |
210 | |
211 | /* Now, wake up all processes that sleep on a blocked lock */ | |
4f15e2b1 | 212 | list_for_each_entry(block, &nlm_blocked, b_list) { |
1da177e4 | 213 | if (block->b_host == host) { |
e8c5c045 | 214 | block->b_status = nlm_lck_denied_grace_period; |
1da177e4 LT |
215 | wake_up(&block->b_wait); |
216 | } | |
217 | } | |
218 | ||
219 | /* Release host handle after use */ | |
220 | nlm_release_host(host); | |
221 | lockd_down(); | |
222 | unlock_kernel(); | |
223 | module_put_and_exit(0); | |
224 | } |