1 /* -*- mode: c; c-basic-offset: 8; -*-
2 * vim: noexpandtab sw=8 ts=8 sts=0:
6 * underlying calls for lock creation
8 * Copyright (C) 2004 Oracle. All rights reserved.
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public
12 * License as published by the Free Software Foundation; either
13 * version 2 of the License, or (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
20 * You should have received a copy of the GNU General Public
21 * License along with this program; if not, write to the
22 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
23 * Boston, MA 021110-1307, USA.
28 #include <linux/module.h>
30 #include <linux/types.h>
31 #include <linux/slab.h>
32 #include <linux/highmem.h>
33 #include <linux/utsname.h>
34 #include <linux/init.h>
35 #include <linux/sysctl.h>
36 #include <linux/random.h>
37 #include <linux/blkdev.h>
38 #include <linux/socket.h>
39 #include <linux/inet.h>
40 #include <linux/spinlock.h>
41 #include <linux/delay.h>
44 #include "cluster/heartbeat.h"
45 #include "cluster/nodemanager.h"
46 #include "cluster/tcp.h"
49 #include "dlmcommon.h"
51 #include "dlmconvert.h"
53 #define MLOG_MASK_PREFIX ML_DLM
54 #include "cluster/masklog.h"
56 static struct kmem_cache *dlm_lock_cache = NULL;
58 static DEFINE_SPINLOCK(dlm_cookie_lock);
59 static u64 dlm_next_cookie = 1;
61 static enum dlm_status dlm_send_remote_lock_request(struct dlm_ctxt *dlm,
62 struct dlm_lock_resource *res,
63 struct dlm_lock *lock, int flags);
64 static void dlm_init_lock(struct dlm_lock *newlock, int type,
66 static void dlm_lock_release(struct kref *kref);
67 static void dlm_lock_detach_lockres(struct dlm_lock *lock);
69 int dlm_init_lock_cache(void)
71 dlm_lock_cache = kmem_cache_create("o2dlm_lock",
72 sizeof(struct dlm_lock),
73 0, SLAB_HWCACHE_ALIGN, NULL);
74 if (dlm_lock_cache == NULL)
79 void dlm_destroy_lock_cache(void)
82 kmem_cache_destroy(dlm_lock_cache);
85 /* Tell us whether we can grant a new lock request.
87 * caller needs: res->spinlock
90 * returns: 1 if the lock can be granted, 0 otherwise.
92 static int dlm_can_grant_new_lock(struct dlm_lock_resource *res,
93 struct dlm_lock *lock)
95 struct list_head *iter;
96 struct dlm_lock *tmplock;
98 list_for_each(iter, &res->granted) {
99 tmplock = list_entry(iter, struct dlm_lock, list);
101 if (!dlm_lock_compatible(tmplock->ml.type, lock->ml.type))
105 list_for_each(iter, &res->converting) {
106 tmplock = list_entry(iter, struct dlm_lock, list);
108 if (!dlm_lock_compatible(tmplock->ml.type, lock->ml.type))
115 /* performs lock creation at the lockres master site
118 * taken: takes and drops res->spinlock
120 * returns: DLM_NORMAL, DLM_NOTQUEUED
122 static enum dlm_status dlmlock_master(struct dlm_ctxt *dlm,
123 struct dlm_lock_resource *res,
124 struct dlm_lock *lock, int flags)
126 int call_ast = 0, kick_thread = 0;
127 enum dlm_status status = DLM_NORMAL;
129 mlog_entry("type=%d\n", lock->ml.type);
131 spin_lock(&res->spinlock);
132 /* if called from dlm_create_lock_handler, need to
133 * ensure it will not sleep in dlm_wait_on_lockres */
134 status = __dlm_lockres_state_to_status(res);
135 if (status != DLM_NORMAL &&
136 lock->ml.node != dlm->node_num) {
137 /* erf. state changed after lock was dropped. */
138 spin_unlock(&res->spinlock);
142 __dlm_wait_on_lockres(res);
143 __dlm_lockres_reserve_ast(res);
145 if (dlm_can_grant_new_lock(res, lock)) {
146 mlog(0, "I can grant this lock right away\n");
147 /* got it right away */
148 lock->lksb->status = DLM_NORMAL;
151 list_add_tail(&lock->list, &res->granted);
153 /* for the recovery lock, we can't allow the ast
154 * to be queued since the dlmthread is already
155 * frozen. but the recovery lock is always locked
156 * with LKM_NOQUEUE so we do not need the ast in
157 * this special case */
158 if (!dlm_is_recovery_lock(res->lockname.name,
159 res->lockname.len)) {
163 mlog(0, "%s: returning DLM_NORMAL to "
164 "node %u for reco lock\n", dlm->name,
168 /* for NOQUEUE request, unless we get the
169 * lock right away, return DLM_NOTQUEUED */
170 if (flags & LKM_NOQUEUE) {
171 status = DLM_NOTQUEUED;
172 if (dlm_is_recovery_lock(res->lockname.name,
173 res->lockname.len)) {
174 mlog(0, "%s: returning NOTQUEUED to "
175 "node %u for reco lock\n", dlm->name,
180 list_add_tail(&lock->list, &res->blocked);
184 /* reduce the inflight count, this may result in the lockres
185 * being purged below during calc_usage */
186 if (lock->ml.node == dlm->node_num)
187 dlm_lockres_drop_inflight_ref(dlm, res);
189 spin_unlock(&res->spinlock);
192 /* either queue the ast or release it */
194 dlm_queue_ast(dlm, lock);
196 dlm_lockres_release_ast(dlm, res);
198 dlm_lockres_calc_usage(dlm, res);
200 dlm_kick_thread(dlm, res);
205 void dlm_revert_pending_lock(struct dlm_lock_resource *res,
206 struct dlm_lock *lock)
208 /* remove from local queue if it failed */
209 list_del_init(&lock->list);
210 lock->lksb->flags &= ~DLM_LKSB_GET_LVB;
217 * taken: takes and drops res->spinlock
219 * returns: DLM_DENIED, DLM_RECOVERING, or net status
221 static enum dlm_status dlmlock_remote(struct dlm_ctxt *dlm,
222 struct dlm_lock_resource *res,
223 struct dlm_lock *lock, int flags)
225 enum dlm_status status = DLM_DENIED;
226 int lockres_changed = 1;
228 mlog_entry("type=%d\n", lock->ml.type);
229 mlog(0, "lockres %.*s, flags = 0x%x\n", res->lockname.len,
230 res->lockname.name, flags);
232 spin_lock(&res->spinlock);
234 /* will exit this call with spinlock held */
235 __dlm_wait_on_lockres(res);
236 res->state |= DLM_LOCK_RES_IN_PROGRESS;
238 /* add lock to local (secondary) queue */
240 list_add_tail(&lock->list, &res->blocked);
241 lock->lock_pending = 1;
242 spin_unlock(&res->spinlock);
244 /* spec seems to say that you will get DLM_NORMAL when the lock
245 * has been queued, meaning we need to wait for a reply here. */
246 status = dlm_send_remote_lock_request(dlm, res, lock, flags);
248 spin_lock(&res->spinlock);
249 res->state &= ~DLM_LOCK_RES_IN_PROGRESS;
250 lock->lock_pending = 0;
251 if (status != DLM_NORMAL) {
252 if (status == DLM_RECOVERING &&
253 dlm_is_recovery_lock(res->lockname.name,
254 res->lockname.len)) {
255 /* recovery lock was mastered by dead node.
256 * we need to have calc_usage shoot down this
257 * lockres and completely remaster it. */
258 mlog(0, "%s: recovery lock was owned by "
259 "dead node %u, remaster it now.\n",
260 dlm->name, res->owner);
261 } else if (status != DLM_NOTQUEUED) {
263 * DO NOT call calc_usage, as this would unhash
264 * the remote lockres before we ever get to use
265 * it. treat as if we never made any change to
271 dlm_revert_pending_lock(res, lock);
273 } else if (dlm_is_recovery_lock(res->lockname.name,
274 res->lockname.len)) {
275 /* special case for the $RECOVERY lock.
276 * there will never be an AST delivered to put
277 * this lock on the proper secondary queue
278 * (granted), so do it manually. */
279 mlog(0, "%s: $RECOVERY lock for this node (%u) is "
280 "mastered by %u; got lock, manually granting (no ast)\n",
281 dlm->name, dlm->node_num, res->owner);
282 list_move_tail(&lock->list, &res->granted);
284 spin_unlock(&res->spinlock);
287 dlm_lockres_calc_usage(dlm, res);
294 /* for remote lock creation.
296 * caller needs: none, but need res->state & DLM_LOCK_RES_IN_PROGRESS
299 * returns: DLM_NOLOCKMGR, or net status
301 static enum dlm_status dlm_send_remote_lock_request(struct dlm_ctxt *dlm,
302 struct dlm_lock_resource *res,
303 struct dlm_lock *lock, int flags)
305 struct dlm_create_lock create;
306 int tmpret, status = 0;
311 memset(&create, 0, sizeof(create));
312 create.node_idx = dlm->node_num;
313 create.requested_type = lock->ml.type;
314 create.cookie = lock->ml.cookie;
315 create.namelen = res->lockname.len;
316 create.flags = cpu_to_be32(flags);
317 memcpy(create.name, res->lockname.name, create.namelen);
319 tmpret = o2net_send_message(DLM_CREATE_LOCK_MSG, dlm->key, &create,
320 sizeof(create), res->owner, &status);
322 // successfully sent and received
323 ret = status; // this is already a dlm_status
324 if (ret == DLM_REJECTED) {
325 mlog(ML_ERROR, "%s:%.*s: BUG. this is a stale lockres "
326 "no longer owned by %u. that node is coming back "
327 "up currently.\n", dlm->name, create.namelen,
328 create.name, res->owner);
329 dlm_print_one_lock_resource(res);
334 if (dlm_is_host_down(tmpret)) {
335 ret = DLM_RECOVERING;
336 mlog(0, "node %u died so returning DLM_RECOVERING "
337 "from lock message!\n", res->owner);
339 ret = dlm_err_to_dlm_status(tmpret);
346 void dlm_lock_get(struct dlm_lock *lock)
348 kref_get(&lock->lock_refs);
351 void dlm_lock_put(struct dlm_lock *lock)
353 kref_put(&lock->lock_refs, dlm_lock_release);
356 static void dlm_lock_release(struct kref *kref)
358 struct dlm_lock *lock;
360 lock = container_of(kref, struct dlm_lock, lock_refs);
362 BUG_ON(!list_empty(&lock->list));
363 BUG_ON(!list_empty(&lock->ast_list));
364 BUG_ON(!list_empty(&lock->bast_list));
365 BUG_ON(lock->ast_pending);
366 BUG_ON(lock->bast_pending);
368 dlm_lock_detach_lockres(lock);
370 if (lock->lksb_kernel_allocated) {
371 mlog(0, "freeing kernel-allocated lksb\n");
374 kmem_cache_free(dlm_lock_cache, lock);
377 /* associate a lock with it's lockres, getting a ref on the lockres */
378 void dlm_lock_attach_lockres(struct dlm_lock *lock,
379 struct dlm_lock_resource *res)
381 dlm_lockres_get(res);
385 /* drop ref on lockres, if there is still one associated with lock */
386 static void dlm_lock_detach_lockres(struct dlm_lock *lock)
388 struct dlm_lock_resource *res;
392 lock->lockres = NULL;
393 mlog(0, "removing lock's lockres reference\n");
394 dlm_lockres_put(res);
398 static void dlm_init_lock(struct dlm_lock *newlock, int type,
401 INIT_LIST_HEAD(&newlock->list);
402 INIT_LIST_HEAD(&newlock->ast_list);
403 INIT_LIST_HEAD(&newlock->bast_list);
404 spin_lock_init(&newlock->spinlock);
405 newlock->ml.type = type;
406 newlock->ml.convert_type = LKM_IVMODE;
407 newlock->ml.highest_blocked = LKM_IVMODE;
408 newlock->ml.node = node;
409 newlock->ml.pad1 = 0;
410 newlock->ml.list = 0;
411 newlock->ml.flags = 0;
413 newlock->bast = NULL;
414 newlock->astdata = NULL;
415 newlock->ml.cookie = cpu_to_be64(cookie);
416 newlock->ast_pending = 0;
417 newlock->bast_pending = 0;
418 newlock->convert_pending = 0;
419 newlock->lock_pending = 0;
420 newlock->unlock_pending = 0;
421 newlock->cancel_pending = 0;
422 newlock->lksb_kernel_allocated = 0;
424 kref_init(&newlock->lock_refs);
427 struct dlm_lock * dlm_new_lock(int type, u8 node, u64 cookie,
428 struct dlm_lockstatus *lksb)
430 struct dlm_lock *lock;
431 int kernel_allocated = 0;
433 lock = (struct dlm_lock *) kmem_cache_zalloc(dlm_lock_cache, GFP_NOFS);
438 /* zero memory only if kernel-allocated */
439 lksb = kzalloc(sizeof(*lksb), GFP_NOFS);
444 kernel_allocated = 1;
447 dlm_init_lock(lock, type, node, cookie);
448 if (kernel_allocated)
449 lock->lksb_kernel_allocated = 1;
455 /* handler for lock creation net message
458 * taken: takes and drops res->spinlock
460 * returns: DLM_NORMAL, DLM_SYSERR, DLM_IVLOCKID, DLM_NOTQUEUED
462 int dlm_create_lock_handler(struct o2net_msg *msg, u32 len, void *data,
465 struct dlm_ctxt *dlm = data;
466 struct dlm_create_lock *create = (struct dlm_create_lock *)msg->buf;
467 struct dlm_lock_resource *res = NULL;
468 struct dlm_lock *newlock = NULL;
469 struct dlm_lockstatus *lksb = NULL;
470 enum dlm_status status = DLM_NORMAL;
472 unsigned int namelen;
482 namelen = create->namelen;
483 status = DLM_REJECTED;
484 if (!dlm_domain_fully_joined(dlm)) {
485 mlog(ML_ERROR, "Domain %s not fully joined, but node %u is "
486 "sending a create_lock message for lock %.*s!\n",
487 dlm->name, create->node_idx, namelen, name);
492 status = DLM_IVBUFLEN;
493 if (namelen > DLM_LOCKID_NAME_MAX) {
499 newlock = dlm_new_lock(create->requested_type,
501 be64_to_cpu(create->cookie), NULL);
507 lksb = newlock->lksb;
509 if (be32_to_cpu(create->flags) & LKM_GET_LVB) {
510 lksb->flags |= DLM_LKSB_GET_LVB;
511 mlog(0, "set DLM_LKSB_GET_LVB flag\n");
514 status = DLM_IVLOCKID;
515 res = dlm_lookup_lockres(dlm, name, namelen);
521 spin_lock(&res->spinlock);
522 status = __dlm_lockres_state_to_status(res);
523 spin_unlock(&res->spinlock);
525 if (status != DLM_NORMAL) {
526 mlog(0, "lockres recovering/migrating/in-progress\n");
530 dlm_lock_attach_lockres(newlock, res);
532 status = dlmlock_master(dlm, res, newlock, be32_to_cpu(create->flags));
534 if (status != DLM_NORMAL)
536 dlm_lock_put(newlock);
539 dlm_lockres_put(res);
547 /* fetch next node-local (u8 nodenum + u56 cookie) into u64 */
548 static inline void dlm_get_next_cookie(u8 node_num, u64 *cookie)
550 u64 tmpnode = node_num;
552 /* shift single byte of node num into top 8 bits */
555 spin_lock(&dlm_cookie_lock);
556 *cookie = (dlm_next_cookie | tmpnode);
557 if (++dlm_next_cookie & 0xff00000000000000ull) {
558 mlog(0, "This node's cookie will now wrap!\n");
561 spin_unlock(&dlm_cookie_lock);
564 enum dlm_status dlmlock(struct dlm_ctxt *dlm, int mode,
565 struct dlm_lockstatus *lksb, int flags,
566 const char *name, int namelen, dlm_astlockfunc_t *ast,
567 void *data, dlm_bastlockfunc_t *bast)
569 enum dlm_status status;
570 struct dlm_lock_resource *res = NULL;
571 struct dlm_lock *lock = NULL;
572 int convert = 0, recovery = 0;
574 /* yes this function is a mess.
575 * TODO: clean this up. lots of common code in the
576 * lock and convert paths, especially in the retry blocks */
578 dlm_error(DLM_BADARGS);
582 status = DLM_BADPARAM;
583 if (mode != LKM_EXMODE && mode != LKM_PRMODE && mode != LKM_NLMODE) {
588 if (flags & ~LKM_VALID_FLAGS) {
593 convert = (flags & LKM_CONVERT);
594 recovery = (flags & LKM_RECOVERY);
597 (!dlm_is_recovery_lock(name, namelen) || convert) ) {
601 if (convert && (flags & LKM_LOCAL)) {
602 mlog(ML_ERROR, "strange LOCAL convert request!\n");
607 /* CONVERT request */
609 /* if converting, must pass in a valid dlm_lock */
612 mlog(ML_ERROR, "NULL lock pointer in convert "
619 mlog(ML_ERROR, "NULL lockres pointer in convert "
623 dlm_lockres_get(res);
625 /* XXX: for ocfs2 purposes, the ast/bast/astdata/lksb are
626 * static after the original lock call. convert requests will
627 * ensure that everything is the same, or return DLM_BADARGS.
628 * this means that DLM_DENIED_NOASTS will never be returned.
630 if (lock->lksb != lksb || lock->ast != ast ||
631 lock->bast != bast || lock->astdata != data) {
632 status = DLM_BADARGS;
633 mlog(ML_ERROR, "new args: lksb=%p, ast=%p, bast=%p, "
634 "astdata=%p\n", lksb, ast, bast, data);
635 mlog(ML_ERROR, "orig args: lksb=%p, ast=%p, bast=%p, "
636 "astdata=%p\n", lock->lksb, lock->ast,
637 lock->bast, lock->astdata);
641 dlm_wait_for_recovery(dlm);
643 if (res->owner == dlm->node_num)
644 status = dlmconvert_master(dlm, res, lock, flags, mode);
646 status = dlmconvert_remote(dlm, res, lock, flags, mode);
647 if (status == DLM_RECOVERING || status == DLM_MIGRATING ||
648 status == DLM_FORWARD) {
649 /* for now, see how this works without sleeping
650 * and just retry right away. I suspect the reco
651 * or migration will complete fast enough that
652 * no waiting will be necessary */
653 mlog(0, "retrying convert with migration/recovery/"
662 status = DLM_BADARGS;
668 status = DLM_IVBUFLEN;
669 if (namelen > DLM_LOCKID_NAME_MAX || namelen < 1) {
674 dlm_get_next_cookie(dlm->node_num, &tmpcookie);
675 lock = dlm_new_lock(mode, dlm->node_num, tmpcookie, lksb);
682 dlm_wait_for_recovery(dlm);
684 /* find or create the lock resource */
685 res = dlm_get_lock_resource(dlm, name, namelen, flags);
687 status = DLM_IVLOCKID;
692 mlog(0, "type=%d, flags = 0x%x\n", mode, flags);
693 mlog(0, "creating lock: lock=%p res=%p\n", lock, res);
695 dlm_lock_attach_lockres(lock, res);
698 lock->astdata = data;
701 if (flags & LKM_VALBLK) {
702 mlog(0, "LKM_VALBLK passed by caller\n");
704 /* LVB requests for non PR, PW or EX locks are
706 if (mode < LKM_PRMODE)
707 flags &= ~LKM_VALBLK;
709 flags |= LKM_GET_LVB;
710 lock->lksb->flags |= DLM_LKSB_GET_LVB;
714 if (res->owner == dlm->node_num)
715 status = dlmlock_master(dlm, res, lock, flags);
717 status = dlmlock_remote(dlm, res, lock, flags);
719 if (status == DLM_RECOVERING || status == DLM_MIGRATING ||
720 status == DLM_FORWARD) {
721 mlog(0, "retrying lock with migration/"
722 "recovery/in progress\n");
724 /* no waiting for dlm_reco_thread */
726 if (status != DLM_RECOVERING)
729 mlog(0, "%s: got RECOVERING "
730 "for $RECOVERY lock, master "
731 "was %u\n", dlm->name,
733 /* wait to see the node go down, then
734 * drop down and allow the lockres to
735 * get cleaned up. need to remaster. */
736 dlm_wait_for_node_death(dlm, res->owner,
737 DLM_NODE_DEATH_WAIT_MAX);
739 dlm_wait_for_recovery(dlm);
744 if (status != DLM_NORMAL) {
745 lock->lksb->flags &= ~DLM_LKSB_GET_LVB;
746 if (status != DLM_NOTQUEUED)
753 if (status != DLM_NORMAL) {
754 if (lock && !convert)
756 // this is kind of unnecessary
757 lksb->status = status;
760 /* put lockres ref from the convert path
761 * or from dlm_get_lock_resource */
763 dlm_lockres_put(res);
767 EXPORT_SYMBOL_GPL(dlmlock);