Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/fs/lockd/clntproc.c | |
3 | * | |
4 | * RPC procedures for the client side NLM implementation | |
5 | * | |
6 | * Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de> | |
7 | */ | |
8 | ||
1da177e4 LT |
9 | #include <linux/module.h> |
10 | #include <linux/types.h> | |
11 | #include <linux/errno.h> | |
12 | #include <linux/fs.h> | |
13 | #include <linux/nfs_fs.h> | |
14 | #include <linux/utsname.h> | |
7dfb7103 | 15 | #include <linux/freezer.h> |
1da177e4 LT |
16 | #include <linux/sunrpc/clnt.h> |
17 | #include <linux/sunrpc/svc.h> | |
18 | #include <linux/lockd/lockd.h> | |
1da177e4 LT |
19 | |
20 | #define NLMDBG_FACILITY NLMDBG_CLIENT | |
21 | #define NLMCLNT_GRACE_WAIT (5*HZ) | |
ecdbf769 | 22 | #define NLMCLNT_POLL_TIMEOUT (30*HZ) |
aaaa9942 | 23 | #define NLMCLNT_MAX_RETRIES 3 |
1da177e4 LT |
24 | |
25 | static int nlmclnt_test(struct nlm_rqst *, struct file_lock *); | |
26 | static int nlmclnt_lock(struct nlm_rqst *, struct file_lock *); | |
27 | static int nlmclnt_unlock(struct nlm_rqst *, struct file_lock *); | |
e8c5c045 | 28 | static int nlm_stat_to_errno(__be32 stat); |
1da177e4 | 29 | static void nlmclnt_locks_init_private(struct file_lock *fl, struct nlm_host *host); |
16fb2425 | 30 | static int nlmclnt_cancel(struct nlm_host *, int , struct file_lock *); |
1da177e4 | 31 | |
963d8fe5 TM |
32 | static const struct rpc_call_ops nlmclnt_unlock_ops; |
33 | static const struct rpc_call_ops nlmclnt_cancel_ops; | |
34 | ||
1da177e4 LT |
35 | /* |
36 | * Cookie counter for NLM requests | |
37 | */ | |
031d869d | 38 | static atomic_t nlm_cookie = ATOMIC_INIT(0x1234); |
1da177e4 | 39 | |
031d869d | 40 | void nlmclnt_next_cookie(struct nlm_cookie *c) |
1da177e4 | 41 | { |
031d869d OK |
42 | u32 cookie = atomic_inc_return(&nlm_cookie); |
43 | ||
44 | memcpy(c->data, &cookie, 4); | |
1da177e4 | 45 | c->len=4; |
1da177e4 LT |
46 | } |
47 | ||
48 | static struct nlm_lockowner *nlm_get_lockowner(struct nlm_lockowner *lockowner) | |
49 | { | |
50 | atomic_inc(&lockowner->count); | |
51 | return lockowner; | |
52 | } | |
53 | ||
54 | static void nlm_put_lockowner(struct nlm_lockowner *lockowner) | |
55 | { | |
56 | if (!atomic_dec_and_lock(&lockowner->count, &lockowner->host->h_lock)) | |
57 | return; | |
58 | list_del(&lockowner->list); | |
59 | spin_unlock(&lockowner->host->h_lock); | |
60 | nlm_release_host(lockowner->host); | |
61 | kfree(lockowner); | |
62 | } | |
63 | ||
64 | static inline int nlm_pidbusy(struct nlm_host *host, uint32_t pid) | |
65 | { | |
66 | struct nlm_lockowner *lockowner; | |
67 | list_for_each_entry(lockowner, &host->h_lockowners, list) { | |
68 | if (lockowner->pid == pid) | |
69 | return -EBUSY; | |
70 | } | |
71 | return 0; | |
72 | } | |
73 | ||
74 | static inline uint32_t __nlm_alloc_pid(struct nlm_host *host) | |
75 | { | |
76 | uint32_t res; | |
77 | do { | |
78 | res = host->h_pidcount++; | |
79 | } while (nlm_pidbusy(host, res) < 0); | |
80 | return res; | |
81 | } | |
82 | ||
83 | static struct nlm_lockowner *__nlm_find_lockowner(struct nlm_host *host, fl_owner_t owner) | |
84 | { | |
85 | struct nlm_lockowner *lockowner; | |
86 | list_for_each_entry(lockowner, &host->h_lockowners, list) { | |
87 | if (lockowner->owner != owner) | |
88 | continue; | |
89 | return nlm_get_lockowner(lockowner); | |
90 | } | |
91 | return NULL; | |
92 | } | |
93 | ||
94 | static struct nlm_lockowner *nlm_find_lockowner(struct nlm_host *host, fl_owner_t owner) | |
95 | { | |
96 | struct nlm_lockowner *res, *new = NULL; | |
97 | ||
98 | spin_lock(&host->h_lock); | |
99 | res = __nlm_find_lockowner(host, owner); | |
100 | if (res == NULL) { | |
101 | spin_unlock(&host->h_lock); | |
f52720ca | 102 | new = kmalloc(sizeof(*new), GFP_KERNEL); |
1da177e4 LT |
103 | spin_lock(&host->h_lock); |
104 | res = __nlm_find_lockowner(host, owner); | |
105 | if (res == NULL && new != NULL) { | |
106 | res = new; | |
107 | atomic_set(&new->count, 1); | |
108 | new->owner = owner; | |
109 | new->pid = __nlm_alloc_pid(host); | |
110 | new->host = nlm_get_host(host); | |
111 | list_add(&new->list, &host->h_lockowners); | |
112 | new = NULL; | |
113 | } | |
114 | } | |
115 | spin_unlock(&host->h_lock); | |
f99d49ad | 116 | kfree(new); |
1da177e4 LT |
117 | return res; |
118 | } | |
119 | ||
120 | /* | |
121 | * Initialize arguments for TEST/LOCK/UNLOCK/CANCEL calls | |
122 | */ | |
123 | static void nlmclnt_setlockargs(struct nlm_rqst *req, struct file_lock *fl) | |
124 | { | |
125 | struct nlm_args *argp = &req->a_args; | |
126 | struct nlm_lock *lock = &argp->lock; | |
127 | ||
128 | nlmclnt_next_cookie(&argp->cookie); | |
129 | argp->state = nsm_local_state; | |
225a719f | 130 | memcpy(&lock->fh, NFS_FH(fl->fl_file->f_path.dentry->d_inode), sizeof(struct nfs_fh)); |
e9ff3990 | 131 | lock->caller = utsname()->nodename; |
1da177e4 | 132 | lock->oh.data = req->a_owner; |
7bab377f TM |
133 | lock->oh.len = snprintf(req->a_owner, sizeof(req->a_owner), "%u@%s", |
134 | (unsigned int)fl->fl_u.nfs_fl.owner->pid, | |
e9ff3990 | 135 | utsname()->nodename); |
7bab377f | 136 | lock->svid = fl->fl_u.nfs_fl.owner->pid; |
3a649b88 TM |
137 | lock->fl.fl_start = fl->fl_start; |
138 | lock->fl.fl_end = fl->fl_end; | |
139 | lock->fl.fl_type = fl->fl_type; | |
1da177e4 LT |
140 | } |
141 | ||
142 | static void nlmclnt_release_lockargs(struct nlm_rqst *req) | |
143 | { | |
3a649b88 | 144 | BUG_ON(req->a_args.lock.fl.fl_ops != NULL); |
1da177e4 LT |
145 | } |
146 | ||
1093a60e CL |
147 | /** |
148 | * nlmclnt_proc - Perform a single client-side lock request | |
149 | * @host: address of a valid nlm_host context representing the NLM server | |
150 | * @cmd: fcntl-style file lock operation to perform | |
151 | * @fl: address of arguments for the lock operation | |
152 | * | |
1da177e4 | 153 | */ |
1093a60e | 154 | int nlmclnt_proc(struct nlm_host *host, int cmd, struct file_lock *fl) |
1da177e4 | 155 | { |
92737230 | 156 | struct nlm_rqst *call; |
1093a60e | 157 | int status; |
1da177e4 | 158 | |
1093a60e | 159 | nlm_get_host(host); |
92737230 TM |
160 | call = nlm_alloc_call(host); |
161 | if (call == NULL) | |
162 | return -ENOMEM; | |
1da177e4 | 163 | |
92737230 TM |
164 | nlmclnt_locks_init_private(fl, host); |
165 | /* Set up the argument struct */ | |
166 | nlmclnt_setlockargs(call, fl); | |
1da177e4 | 167 | |
1da177e4 LT |
168 | if (IS_SETLK(cmd) || IS_SETLKW(cmd)) { |
169 | if (fl->fl_type != F_UNLCK) { | |
170 | call->a_args.block = IS_SETLKW(cmd) ? 1 : 0; | |
171 | status = nlmclnt_lock(call, fl); | |
172 | } else | |
173 | status = nlmclnt_unlock(call, fl); | |
174 | } else if (IS_GETLK(cmd)) | |
175 | status = nlmclnt_test(call, fl); | |
176 | else | |
177 | status = -EINVAL; | |
178 | ||
92737230 TM |
179 | fl->fl_ops->fl_release_private(fl); |
180 | fl->fl_ops = NULL; | |
181 | ||
1da177e4 | 182 | dprintk("lockd: clnt proc returns %d\n", status); |
1da177e4 LT |
183 | return status; |
184 | } | |
1093a60e | 185 | EXPORT_SYMBOL_GPL(nlmclnt_proc); |
1da177e4 LT |
186 | |
187 | /* | |
188 | * Allocate an NLM RPC call struct | |
92737230 TM |
189 | * |
190 | * Note: the caller must hold a reference to host. In case of failure, | |
191 | * this reference will be released. | |
1da177e4 | 192 | */ |
92737230 | 193 | struct nlm_rqst *nlm_alloc_call(struct nlm_host *host) |
1da177e4 LT |
194 | { |
195 | struct nlm_rqst *call; | |
196 | ||
36943fa4 TM |
197 | for(;;) { |
198 | call = kzalloc(sizeof(*call), GFP_KERNEL); | |
199 | if (call != NULL) { | |
5e7f37a7 | 200 | atomic_set(&call->a_count, 1); |
1da177e4 LT |
201 | locks_init_lock(&call->a_args.lock.fl); |
202 | locks_init_lock(&call->a_res.lock.fl); | |
92737230 | 203 | call->a_host = host; |
1da177e4 LT |
204 | return call; |
205 | } | |
36943fa4 TM |
206 | if (signalled()) |
207 | break; | |
92737230 | 208 | printk("nlm_alloc_call: failed, waiting for memory\n"); |
041e0e3b | 209 | schedule_timeout_interruptible(5*HZ); |
1da177e4 | 210 | } |
92737230 | 211 | nlm_release_host(host); |
1da177e4 LT |
212 | return NULL; |
213 | } | |
214 | ||
92737230 TM |
215 | void nlm_release_call(struct nlm_rqst *call) |
216 | { | |
5e7f37a7 TM |
217 | if (!atomic_dec_and_test(&call->a_count)) |
218 | return; | |
92737230 TM |
219 | nlm_release_host(call->a_host); |
220 | nlmclnt_release_lockargs(call); | |
221 | kfree(call); | |
222 | } | |
223 | ||
224 | static void nlmclnt_rpc_release(void *data) | |
225 | { | |
a86dc496 | 226 | lock_kernel(); |
65fdf7d2 | 227 | nlm_release_call(data); |
a86dc496 | 228 | unlock_kernel(); |
92737230 TM |
229 | } |
230 | ||
1da177e4 LT |
231 | static int nlm_wait_on_grace(wait_queue_head_t *queue) |
232 | { | |
233 | DEFINE_WAIT(wait); | |
234 | int status = -EINTR; | |
235 | ||
236 | prepare_to_wait(queue, &wait, TASK_INTERRUPTIBLE); | |
237 | if (!signalled ()) { | |
238 | schedule_timeout(NLMCLNT_GRACE_WAIT); | |
3e1d1d28 | 239 | try_to_freeze(); |
1da177e4 LT |
240 | if (!signalled ()) |
241 | status = 0; | |
242 | } | |
243 | finish_wait(queue, &wait); | |
244 | return status; | |
245 | } | |
246 | ||
247 | /* | |
248 | * Generic NLM call | |
249 | */ | |
250 | static int | |
d11d10cc | 251 | nlmclnt_call(struct rpc_cred *cred, struct nlm_rqst *req, u32 proc) |
1da177e4 LT |
252 | { |
253 | struct nlm_host *host = req->a_host; | |
254 | struct rpc_clnt *clnt; | |
255 | struct nlm_args *argp = &req->a_args; | |
256 | struct nlm_res *resp = &req->a_res; | |
257 | struct rpc_message msg = { | |
258 | .rpc_argp = argp, | |
259 | .rpc_resp = resp, | |
d11d10cc | 260 | .rpc_cred = cred, |
1da177e4 LT |
261 | }; |
262 | int status; | |
263 | ||
264 | dprintk("lockd: call procedure %d on %s\n", | |
265 | (int)proc, host->h_name); | |
266 | ||
267 | do { | |
268 | if (host->h_reclaiming && !argp->reclaim) | |
269 | goto in_grace_period; | |
270 | ||
271 | /* If we have no RPC client yet, create one. */ | |
272 | if ((clnt = nlm_bind_host(host)) == NULL) | |
273 | return -ENOLCK; | |
274 | msg.rpc_proc = &clnt->cl_procinfo[proc]; | |
275 | ||
276 | /* Perform the RPC call. If an error occurs, try again */ | |
277 | if ((status = rpc_call_sync(clnt, &msg, 0)) < 0) { | |
278 | dprintk("lockd: rpc_call returned error %d\n", -status); | |
279 | switch (status) { | |
280 | case -EPROTONOSUPPORT: | |
281 | status = -EINVAL; | |
282 | break; | |
283 | case -ECONNREFUSED: | |
284 | case -ETIMEDOUT: | |
285 | case -ENOTCONN: | |
286 | nlm_rebind_host(host); | |
287 | status = -EAGAIN; | |
288 | break; | |
289 | case -ERESTARTSYS: | |
290 | return signalled () ? -EINTR : status; | |
291 | default: | |
292 | break; | |
293 | } | |
294 | break; | |
295 | } else | |
e8c5c045 | 296 | if (resp->status == nlm_lck_denied_grace_period) { |
1da177e4 LT |
297 | dprintk("lockd: server in grace period\n"); |
298 | if (argp->reclaim) { | |
299 | printk(KERN_WARNING | |
300 | "lockd: spurious grace period reject?!\n"); | |
301 | return -ENOLCK; | |
302 | } | |
303 | } else { | |
304 | if (!argp->reclaim) { | |
305 | /* We appear to be out of the grace period */ | |
306 | wake_up_all(&host->h_gracewait); | |
307 | } | |
308 | dprintk("lockd: server returns status %d\n", resp->status); | |
309 | return 0; /* Okay, call complete */ | |
310 | } | |
311 | ||
312 | in_grace_period: | |
313 | /* | |
314 | * The server has rebooted and appears to be in the grace | |
315 | * period during which locks are only allowed to be | |
316 | * reclaimed. | |
317 | * We can only back off and try again later. | |
318 | */ | |
319 | status = nlm_wait_on_grace(&host->h_gracewait); | |
320 | } while (status == 0); | |
321 | ||
322 | return status; | |
323 | } | |
324 | ||
325 | /* | |
326 | * Generic NLM call, async version. | |
327 | */ | |
dc9d8d04 | 328 | static struct rpc_task *__nlm_async_call(struct nlm_rqst *req, u32 proc, struct rpc_message *msg, const struct rpc_call_ops *tk_ops) |
1da177e4 LT |
329 | { |
330 | struct nlm_host *host = req->a_host; | |
331 | struct rpc_clnt *clnt; | |
dc9d8d04 TM |
332 | struct rpc_task_setup task_setup_data = { |
333 | .rpc_message = msg, | |
334 | .callback_ops = tk_ops, | |
335 | .callback_data = req, | |
336 | .flags = RPC_TASK_ASYNC, | |
337 | }; | |
1da177e4 LT |
338 | |
339 | dprintk("lockd: call procedure %d on %s (async)\n", | |
340 | (int)proc, host->h_name); | |
341 | ||
342 | /* If we have no RPC client yet, create one. */ | |
92737230 TM |
343 | clnt = nlm_bind_host(host); |
344 | if (clnt == NULL) | |
345 | goto out_err; | |
d4716624 | 346 | msg->rpc_proc = &clnt->cl_procinfo[proc]; |
dc9d8d04 | 347 | task_setup_data.rpc_client = clnt; |
1da177e4 | 348 | |
1da177e4 | 349 | /* bootstrap and kick off the async RPC call */ |
dc9d8d04 | 350 | return rpc_run_task(&task_setup_data); |
92737230 | 351 | out_err: |
a995e9eb | 352 | tk_ops->rpc_release(req); |
dc9d8d04 TM |
353 | return ERR_PTR(-ENOLCK); |
354 | } | |
355 | ||
356 | static int nlm_do_async_call(struct nlm_rqst *req, u32 proc, struct rpc_message *msg, const struct rpc_call_ops *tk_ops) | |
357 | { | |
358 | struct rpc_task *task; | |
359 | ||
360 | task = __nlm_async_call(req, proc, msg, tk_ops); | |
361 | if (IS_ERR(task)) | |
362 | return PTR_ERR(task); | |
363 | rpc_put_task(task); | |
364 | return 0; | |
1da177e4 LT |
365 | } |
366 | ||
dc9d8d04 TM |
367 | /* |
368 | * NLM asynchronous call. | |
369 | */ | |
d4716624 TM |
370 | int nlm_async_call(struct nlm_rqst *req, u32 proc, const struct rpc_call_ops *tk_ops) |
371 | { | |
372 | struct rpc_message msg = { | |
373 | .rpc_argp = &req->a_args, | |
374 | .rpc_resp = &req->a_res, | |
375 | }; | |
dc9d8d04 | 376 | return nlm_do_async_call(req, proc, &msg, tk_ops); |
d4716624 TM |
377 | } |
378 | ||
379 | int nlm_async_reply(struct nlm_rqst *req, u32 proc, const struct rpc_call_ops *tk_ops) | |
380 | { | |
381 | struct rpc_message msg = { | |
382 | .rpc_argp = &req->a_res, | |
383 | }; | |
dc9d8d04 TM |
384 | return nlm_do_async_call(req, proc, &msg, tk_ops); |
385 | } | |
386 | ||
387 | /* | |
388 | * NLM client asynchronous call. | |
389 | * | |
390 | * Note that although the calls are asynchronous, and are therefore | |
391 | * guaranteed to complete, we still always attempt to wait for | |
392 | * completion in order to be able to correctly track the lock | |
393 | * state. | |
394 | */ | |
d11d10cc | 395 | static int nlmclnt_async_call(struct rpc_cred *cred, struct nlm_rqst *req, u32 proc, const struct rpc_call_ops *tk_ops) |
dc9d8d04 TM |
396 | { |
397 | struct rpc_message msg = { | |
398 | .rpc_argp = &req->a_args, | |
399 | .rpc_resp = &req->a_res, | |
d11d10cc | 400 | .rpc_cred = cred, |
dc9d8d04 TM |
401 | }; |
402 | struct rpc_task *task; | |
403 | int err; | |
404 | ||
405 | task = __nlm_async_call(req, proc, &msg, tk_ops); | |
406 | if (IS_ERR(task)) | |
407 | return PTR_ERR(task); | |
408 | err = rpc_wait_for_completion_task(task); | |
409 | rpc_put_task(task); | |
410 | return err; | |
d4716624 TM |
411 | } |
412 | ||
1da177e4 LT |
413 | /* |
414 | * TEST for the presence of a conflicting lock | |
415 | */ | |
416 | static int | |
417 | nlmclnt_test(struct nlm_rqst *req, struct file_lock *fl) | |
418 | { | |
419 | int status; | |
420 | ||
d11d10cc | 421 | status = nlmclnt_call(nfs_file_cred(fl->fl_file), req, NLMPROC_TEST); |
1da177e4 | 422 | if (status < 0) |
92737230 | 423 | goto out; |
1da177e4 | 424 | |
92737230 | 425 | switch (req->a_res.status) { |
e8c5c045 | 426 | case nlm_granted: |
92737230 TM |
427 | fl->fl_type = F_UNLCK; |
428 | break; | |
e8c5c045 | 429 | case nlm_lck_denied: |
92737230 TM |
430 | /* |
431 | * Report the conflicting lock back to the application. | |
432 | */ | |
433 | fl->fl_start = req->a_res.lock.fl.fl_start; | |
d67d1c7b | 434 | fl->fl_end = req->a_res.lock.fl.fl_end; |
92737230 TM |
435 | fl->fl_type = req->a_res.lock.fl.fl_type; |
436 | fl->fl_pid = 0; | |
437 | break; | |
438 | default: | |
439 | status = nlm_stat_to_errno(req->a_res.status); | |
1da177e4 | 440 | } |
92737230 TM |
441 | out: |
442 | nlm_release_call(req); | |
443 | return status; | |
1da177e4 LT |
444 | } |
445 | ||
446 | static void nlmclnt_locks_copy_lock(struct file_lock *new, struct file_lock *fl) | |
447 | { | |
4c060b53 TM |
448 | new->fl_u.nfs_fl.state = fl->fl_u.nfs_fl.state; |
449 | new->fl_u.nfs_fl.owner = nlm_get_lockowner(fl->fl_u.nfs_fl.owner); | |
450 | list_add_tail(&new->fl_u.nfs_fl.list, &fl->fl_u.nfs_fl.owner->host->h_granted); | |
1da177e4 LT |
451 | } |
452 | ||
453 | static void nlmclnt_locks_release_private(struct file_lock *fl) | |
454 | { | |
4c060b53 | 455 | list_del(&fl->fl_u.nfs_fl.list); |
1da177e4 | 456 | nlm_put_lockowner(fl->fl_u.nfs_fl.owner); |
1da177e4 LT |
457 | } |
458 | ||
459 | static struct file_lock_operations nlmclnt_lock_ops = { | |
460 | .fl_copy_lock = nlmclnt_locks_copy_lock, | |
461 | .fl_release_private = nlmclnt_locks_release_private, | |
462 | }; | |
463 | ||
464 | static void nlmclnt_locks_init_private(struct file_lock *fl, struct nlm_host *host) | |
465 | { | |
466 | BUG_ON(fl->fl_ops != NULL); | |
467 | fl->fl_u.nfs_fl.state = 0; | |
1da177e4 | 468 | fl->fl_u.nfs_fl.owner = nlm_find_lockowner(host, fl->fl_owner); |
4c060b53 | 469 | INIT_LIST_HEAD(&fl->fl_u.nfs_fl.list); |
1da177e4 LT |
470 | fl->fl_ops = &nlmclnt_lock_ops; |
471 | } | |
472 | ||
9b073574 | 473 | static int do_vfs_lock(struct file_lock *fl) |
1da177e4 LT |
474 | { |
475 | int res = 0; | |
476 | switch (fl->fl_flags & (FL_POSIX|FL_FLOCK)) { | |
477 | case FL_POSIX: | |
478 | res = posix_lock_file_wait(fl->fl_file, fl); | |
479 | break; | |
480 | case FL_FLOCK: | |
481 | res = flock_lock_file_wait(fl->fl_file, fl); | |
482 | break; | |
483 | default: | |
484 | BUG(); | |
485 | } | |
9b073574 | 486 | return res; |
1da177e4 LT |
487 | } |
488 | ||
489 | /* | |
490 | * LOCK: Try to create a lock | |
491 | * | |
492 | * Programmer Harassment Alert | |
493 | * | |
494 | * When given a blocking lock request in a sync RPC call, the HPUX lockd | |
495 | * will faithfully return LCK_BLOCKED but never cares to notify us when | |
496 | * the lock could be granted. This way, our local process could hang | |
497 | * around forever waiting for the callback. | |
498 | * | |
499 | * Solution A: Implement busy-waiting | |
500 | * Solution B: Use the async version of the call (NLM_LOCK_{MSG,RES}) | |
501 | * | |
502 | * For now I am implementing solution A, because I hate the idea of | |
503 | * re-implementing lockd for a third time in two months. The async | |
504 | * calls shouldn't be too hard to do, however. | |
505 | * | |
506 | * This is one of the lovely things about standards in the NFS area: | |
507 | * they're so soft and squishy you can't really blame HP for doing this. | |
508 | */ | |
509 | static int | |
510 | nlmclnt_lock(struct nlm_rqst *req, struct file_lock *fl) | |
511 | { | |
d11d10cc | 512 | struct rpc_cred *cred = nfs_file_cred(fl->fl_file); |
1da177e4 LT |
513 | struct nlm_host *host = req->a_host; |
514 | struct nlm_res *resp = &req->a_res; | |
3a649b88 | 515 | struct nlm_wait *block = NULL; |
01c3b861 | 516 | unsigned char fl_flags = fl->fl_flags; |
5f50c0c6 | 517 | unsigned char fl_type; |
3a649b88 | 518 | int status = -ENOLCK; |
1da177e4 | 519 | |
501c1ed3 | 520 | if (nsm_monitor(host) < 0) |
1da177e4 | 521 | goto out; |
501c1ed3 | 522 | |
01c3b861 TM |
523 | fl->fl_flags |= FL_ACCESS; |
524 | status = do_vfs_lock(fl); | |
4a9af59f | 525 | fl->fl_flags = fl_flags; |
01c3b861 TM |
526 | if (status < 0) |
527 | goto out; | |
1da177e4 | 528 | |
3a649b88 | 529 | block = nlmclnt_prepare_block(host, fl); |
28df955a | 530 | again: |
5f50c0c6 TM |
531 | /* |
532 | * Initialise resp->status to a valid non-zero value, | |
533 | * since 0 == nlm_lck_granted | |
534 | */ | |
535 | resp->status = nlm_lck_blocked; | |
ecdbf769 | 536 | for(;;) { |
28df955a TM |
537 | /* Reboot protection */ |
538 | fl->fl_u.nfs_fl.state = host->h_state; | |
d11d10cc | 539 | status = nlmclnt_call(cred, req, NLMPROC_LOCK); |
ecdbf769 | 540 | if (status < 0) |
ecdbf769 | 541 | break; |
ecdbf769 | 542 | /* Did a reclaimer thread notify us of a server reboot? */ |
e8c5c045 | 543 | if (resp->status == nlm_lck_denied_grace_period) |
ecdbf769 | 544 | continue; |
e8c5c045 | 545 | if (resp->status != nlm_lck_blocked) |
ecdbf769 | 546 | break; |
3a649b88 TM |
547 | /* Wait on an NLM blocking lock */ |
548 | status = nlmclnt_block(block, req, NLMCLNT_POLL_TIMEOUT); | |
3a649b88 | 549 | if (status < 0) |
5f50c0c6 | 550 | break; |
e8c5c045 | 551 | if (resp->status != nlm_lck_blocked) |
3a649b88 | 552 | break; |
ecdbf769 | 553 | } |
1da177e4 | 554 | |
5f50c0c6 TM |
555 | /* if we were interrupted while blocking, then cancel the lock request |
556 | * and exit | |
557 | */ | |
558 | if (resp->status == nlm_lck_blocked) { | |
559 | if (!req->a_args.block) | |
560 | goto out_unlock; | |
561 | if (nlmclnt_cancel(host, req->a_args.block, fl) == 0) | |
562 | goto out_unblock; | |
563 | } | |
564 | ||
e8c5c045 | 565 | if (resp->status == nlm_granted) { |
28df955a TM |
566 | down_read(&host->h_rwsem); |
567 | /* Check whether or not the server has rebooted */ | |
568 | if (fl->fl_u.nfs_fl.state != host->h_state) { | |
569 | up_read(&host->h_rwsem); | |
570 | goto again; | |
571 | } | |
4c060b53 | 572 | /* Ensure the resulting lock will get added to granted list */ |
4a9af59f | 573 | fl->fl_flags |= FL_SLEEP; |
9b073574 | 574 | if (do_vfs_lock(fl) < 0) |
8e24eea7 | 575 | printk(KERN_WARNING "%s: VFS is out of sync with lock manager!\n", __func__); |
28df955a | 576 | up_read(&host->h_rwsem); |
4a9af59f | 577 | fl->fl_flags = fl_flags; |
5f50c0c6 | 578 | status = 0; |
1da177e4 | 579 | } |
5f50c0c6 TM |
580 | if (status < 0) |
581 | goto out_unlock; | |
cc77b152 MS |
582 | /* |
583 | * EAGAIN doesn't make sense for sleeping locks, and in some | |
584 | * cases NLM_LCK_DENIED is returned for a permanent error. So | |
585 | * turn it into an ENOLCK. | |
586 | */ | |
587 | if (resp->status == nlm_lck_denied && (fl_flags & FL_SLEEP)) | |
588 | status = -ENOLCK; | |
589 | else | |
590 | status = nlm_stat_to_errno(resp->status); | |
ecdbf769 | 591 | out_unblock: |
3a649b88 | 592 | nlmclnt_finish_block(block); |
1da177e4 | 593 | out: |
92737230 | 594 | nlm_release_call(req); |
1da177e4 | 595 | return status; |
5f50c0c6 TM |
596 | out_unlock: |
597 | /* Fatal error: ensure that we remove the lock altogether */ | |
598 | dprintk("lockd: lock attempt ended in fatal error.\n" | |
599 | " Attempting to unlock.\n"); | |
600 | nlmclnt_finish_block(block); | |
601 | fl_type = fl->fl_type; | |
602 | fl->fl_type = F_UNLCK; | |
603 | down_read(&host->h_rwsem); | |
604 | do_vfs_lock(fl); | |
605 | up_read(&host->h_rwsem); | |
606 | fl->fl_type = fl_type; | |
607 | fl->fl_flags = fl_flags; | |
d11d10cc | 608 | nlmclnt_async_call(cred, req, NLMPROC_UNLOCK, &nlmclnt_unlock_ops); |
5f50c0c6 | 609 | return status; |
1da177e4 LT |
610 | } |
611 | ||
612 | /* | |
613 | * RECLAIM: Try to reclaim a lock | |
614 | */ | |
615 | int | |
616 | nlmclnt_reclaim(struct nlm_host *host, struct file_lock *fl) | |
617 | { | |
618 | struct nlm_rqst reqst, *req; | |
619 | int status; | |
620 | ||
621 | req = &reqst; | |
622 | memset(req, 0, sizeof(*req)); | |
623 | locks_init_lock(&req->a_args.lock.fl); | |
624 | locks_init_lock(&req->a_res.lock.fl); | |
625 | req->a_host = host; | |
626 | req->a_flags = 0; | |
627 | ||
628 | /* Set up the argument struct */ | |
629 | nlmclnt_setlockargs(req, fl); | |
630 | req->a_args.reclaim = 1; | |
631 | ||
d11d10cc TM |
632 | status = nlmclnt_call(nfs_file_cred(fl->fl_file), req, NLMPROC_LOCK); |
633 | if (status >= 0 && req->a_res.status == nlm_granted) | |
1da177e4 LT |
634 | return 0; |
635 | ||
636 | printk(KERN_WARNING "lockd: failed to reclaim lock for pid %d " | |
637 | "(errno %d, status %d)\n", fl->fl_pid, | |
e8c5c045 | 638 | status, ntohl(req->a_res.status)); |
1da177e4 LT |
639 | |
640 | /* | |
641 | * FIXME: This is a serious failure. We can | |
642 | * | |
643 | * a. Ignore the problem | |
644 | * b. Send the owning process some signal (Linux doesn't have | |
645 | * SIGLOST, though...) | |
646 | * c. Retry the operation | |
647 | * | |
648 | * Until someone comes up with a simple implementation | |
649 | * for b or c, I'll choose option a. | |
650 | */ | |
651 | ||
652 | return -ENOLCK; | |
653 | } | |
654 | ||
655 | /* | |
656 | * UNLOCK: remove an existing lock | |
657 | */ | |
658 | static int | |
659 | nlmclnt_unlock(struct nlm_rqst *req, struct file_lock *fl) | |
660 | { | |
28df955a | 661 | struct nlm_host *host = req->a_host; |
1da177e4 | 662 | struct nlm_res *resp = &req->a_res; |
4a9af59f TM |
663 | int status; |
664 | unsigned char fl_flags = fl->fl_flags; | |
1da177e4 | 665 | |
30f4e20a TM |
666 | /* |
667 | * Note: the server is supposed to either grant us the unlock | |
668 | * request, or to deny it with NLM_LCK_DENIED_GRACE_PERIOD. In either | |
669 | * case, we want to unlock. | |
670 | */ | |
9b073574 | 671 | fl->fl_flags |= FL_EXISTS; |
28df955a | 672 | down_read(&host->h_rwsem); |
4a9af59f TM |
673 | status = do_vfs_lock(fl); |
674 | up_read(&host->h_rwsem); | |
675 | fl->fl_flags = fl_flags; | |
676 | if (status == -ENOENT) { | |
677 | status = 0; | |
9b073574 TM |
678 | goto out; |
679 | } | |
30f4e20a | 680 | |
dc9d8d04 | 681 | atomic_inc(&req->a_count); |
d11d10cc TM |
682 | status = nlmclnt_async_call(nfs_file_cred(fl->fl_file), req, |
683 | NLMPROC_UNLOCK, &nlmclnt_unlock_ops); | |
1da177e4 | 684 | if (status < 0) |
92737230 | 685 | goto out; |
1da177e4 | 686 | |
e8c5c045 | 687 | if (resp->status == nlm_granted) |
92737230 | 688 | goto out; |
1da177e4 | 689 | |
e8c5c045 | 690 | if (resp->status != nlm_lck_denied_nolocks) |
1da177e4 | 691 | printk("lockd: unexpected unlock status: %d\n", resp->status); |
1da177e4 | 692 | /* What to do now? I'm out of my depth... */ |
92737230 TM |
693 | status = -ENOLCK; |
694 | out: | |
695 | nlm_release_call(req); | |
696 | return status; | |
1da177e4 LT |
697 | } |
698 | ||
963d8fe5 | 699 | static void nlmclnt_unlock_callback(struct rpc_task *task, void *data) |
1da177e4 | 700 | { |
963d8fe5 | 701 | struct nlm_rqst *req = data; |
e8c5c045 | 702 | u32 status = ntohl(req->a_res.status); |
1da177e4 LT |
703 | |
704 | if (RPC_ASSASSINATED(task)) | |
705 | goto die; | |
706 | ||
707 | if (task->tk_status < 0) { | |
708 | dprintk("lockd: unlock failed (err = %d)\n", -task->tk_status); | |
709 | goto retry_rebind; | |
710 | } | |
711 | if (status == NLM_LCK_DENIED_GRACE_PERIOD) { | |
712 | rpc_delay(task, NLMCLNT_GRACE_WAIT); | |
713 | goto retry_unlock; | |
714 | } | |
715 | if (status != NLM_LCK_GRANTED) | |
716 | printk(KERN_WARNING "lockd: unexpected unlock status: %d\n", status); | |
717 | die: | |
1da177e4 LT |
718 | return; |
719 | retry_rebind: | |
a86dc496 | 720 | lock_kernel(); |
1da177e4 | 721 | nlm_rebind_host(req->a_host); |
a86dc496 | 722 | unlock_kernel(); |
1da177e4 LT |
723 | retry_unlock: |
724 | rpc_restart_call(task); | |
725 | } | |
726 | ||
963d8fe5 TM |
727 | static const struct rpc_call_ops nlmclnt_unlock_ops = { |
728 | .rpc_call_done = nlmclnt_unlock_callback, | |
92737230 | 729 | .rpc_release = nlmclnt_rpc_release, |
963d8fe5 TM |
730 | }; |
731 | ||
1da177e4 LT |
732 | /* |
733 | * Cancel a blocked lock request. | |
734 | * We always use an async RPC call for this in order not to hang a | |
735 | * process that has been Ctrl-C'ed. | |
736 | */ | |
16fb2425 | 737 | static int nlmclnt_cancel(struct nlm_host *host, int block, struct file_lock *fl) |
1da177e4 LT |
738 | { |
739 | struct nlm_rqst *req; | |
6b4b3a75 TM |
740 | int status; |
741 | ||
742 | dprintk("lockd: blocking lock attempt was interrupted by a signal.\n" | |
743 | " Attempting to cancel lock.\n"); | |
1da177e4 | 744 | |
92737230 | 745 | req = nlm_alloc_call(nlm_get_host(host)); |
1da177e4 LT |
746 | if (!req) |
747 | return -ENOMEM; | |
1da177e4 LT |
748 | req->a_flags = RPC_TASK_ASYNC; |
749 | ||
750 | nlmclnt_setlockargs(req, fl); | |
16fb2425 | 751 | req->a_args.block = block; |
1da177e4 | 752 | |
6b4b3a75 | 753 | atomic_inc(&req->a_count); |
d11d10cc TM |
754 | status = nlmclnt_async_call(nfs_file_cred(fl->fl_file), req, |
755 | NLMPROC_CANCEL, &nlmclnt_cancel_ops); | |
6b4b3a75 TM |
756 | if (status == 0 && req->a_res.status == nlm_lck_denied) |
757 | status = -ENOLCK; | |
758 | nlm_release_call(req); | |
759 | return status; | |
1da177e4 LT |
760 | } |
761 | ||
963d8fe5 | 762 | static void nlmclnt_cancel_callback(struct rpc_task *task, void *data) |
1da177e4 | 763 | { |
963d8fe5 | 764 | struct nlm_rqst *req = data; |
e8c5c045 | 765 | u32 status = ntohl(req->a_res.status); |
1da177e4 LT |
766 | |
767 | if (RPC_ASSASSINATED(task)) | |
768 | goto die; | |
769 | ||
770 | if (task->tk_status < 0) { | |
771 | dprintk("lockd: CANCEL call error %d, retrying.\n", | |
772 | task->tk_status); | |
773 | goto retry_cancel; | |
774 | } | |
775 | ||
c041b5ff | 776 | dprintk("lockd: cancel status %u (task %u)\n", |
e8c5c045 | 777 | status, task->tk_pid); |
1da177e4 | 778 | |
e8c5c045 | 779 | switch (status) { |
1da177e4 LT |
780 | case NLM_LCK_GRANTED: |
781 | case NLM_LCK_DENIED_GRACE_PERIOD: | |
35576cba | 782 | case NLM_LCK_DENIED: |
1da177e4 LT |
783 | /* Everything's good */ |
784 | break; | |
785 | case NLM_LCK_DENIED_NOLOCKS: | |
786 | dprintk("lockd: CANCEL failed (server has no locks)\n"); | |
787 | goto retry_cancel; | |
788 | default: | |
789 | printk(KERN_NOTICE "lockd: weird return %d for CANCEL call\n", | |
e8c5c045 | 790 | status); |
1da177e4 LT |
791 | } |
792 | ||
793 | die: | |
1da177e4 LT |
794 | return; |
795 | ||
796 | retry_cancel: | |
aaaa9942 TM |
797 | /* Don't ever retry more than 3 times */ |
798 | if (req->a_retries++ >= NLMCLNT_MAX_RETRIES) | |
799 | goto die; | |
a86dc496 | 800 | lock_kernel(); |
1da177e4 | 801 | nlm_rebind_host(req->a_host); |
a86dc496 | 802 | unlock_kernel(); |
1da177e4 LT |
803 | rpc_restart_call(task); |
804 | rpc_delay(task, 30 * HZ); | |
805 | } | |
806 | ||
963d8fe5 TM |
807 | static const struct rpc_call_ops nlmclnt_cancel_ops = { |
808 | .rpc_call_done = nlmclnt_cancel_callback, | |
92737230 | 809 | .rpc_release = nlmclnt_rpc_release, |
963d8fe5 TM |
810 | }; |
811 | ||
1da177e4 LT |
812 | /* |
813 | * Convert an NLM status code to a generic kernel errno | |
814 | */ | |
815 | static int | |
e8c5c045 | 816 | nlm_stat_to_errno(__be32 status) |
1da177e4 | 817 | { |
e8c5c045 | 818 | switch(ntohl(status)) { |
1da177e4 LT |
819 | case NLM_LCK_GRANTED: |
820 | return 0; | |
821 | case NLM_LCK_DENIED: | |
822 | return -EAGAIN; | |
823 | case NLM_LCK_DENIED_NOLOCKS: | |
824 | case NLM_LCK_DENIED_GRACE_PERIOD: | |
825 | return -ENOLCK; | |
826 | case NLM_LCK_BLOCKED: | |
827 | printk(KERN_NOTICE "lockd: unexpected status NLM_BLOCKED\n"); | |
828 | return -ENOLCK; | |
829 | #ifdef CONFIG_LOCKD_V4 | |
830 | case NLM_DEADLCK: | |
831 | return -EDEADLK; | |
832 | case NLM_ROFS: | |
833 | return -EROFS; | |
834 | case NLM_STALE_FH: | |
835 | return -ESTALE; | |
836 | case NLM_FBIG: | |
837 | return -EOVERFLOW; | |
838 | case NLM_FAILED: | |
839 | return -ENOLCK; | |
840 | #endif | |
841 | } | |
842 | printk(KERN_NOTICE "lockd: unexpected server status %d\n", status); | |
843 | return -ENOLCK; | |
844 | } |