1 /* -*- mode: c; c-basic-offset: 8; -*-
2 * vim: noexpandtab sw=8 ts=8 sts=0:
6 * AST and BAST functionality for local and remote nodes
8 * Copyright (C) 2004 Oracle. All rights reserved.
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public
12 * License as published by the Free Software Foundation; either
13 * version 2 of the License, or (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
20 * You should have received a copy of the GNU General Public
21 * License along with this program; if not, write to the
22 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
23 * Boston, MA 021110-1307, USA.
28 #include <linux/module.h>
30 #include <linux/types.h>
31 #include <linux/slab.h>
32 #include <linux/highmem.h>
33 #include <linux/utsname.h>
34 #include <linux/init.h>
35 #include <linux/sysctl.h>
36 #include <linux/random.h>
37 #include <linux/blkdev.h>
38 #include <linux/socket.h>
39 #include <linux/inet.h>
40 #include <linux/spinlock.h>
43 #include "cluster/heartbeat.h"
44 #include "cluster/nodemanager.h"
45 #include "cluster/tcp.h"
46 #include "cluster/endian.h"
49 #include "dlmcommon.h"
51 #define MLOG_MASK_PREFIX ML_DLM
52 #include "cluster/masklog.h"
54 static void dlm_update_lvb(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
55 struct dlm_lock *lock);
56 static int dlm_should_cancel_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock);
58 /* Should be called as an ast gets queued to see if the new
59 * lock level will obsolete a pending bast.
60 * For example, if dlm_thread queued a bast for an EX lock that
61 * was blocking another EX, but before sending the bast the
62 * lock owner downconverted to NL, the bast is now obsolete.
63 * Only the ast should be sent.
64 * This is needed because the lock and convert paths can queue
65 * asts out-of-band (not waiting for dlm_thread) in order to
66 * allow for LKM_NOQUEUE to get immediate responses. */
67 static int dlm_should_cancel_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock)
69 assert_spin_locked(&dlm->ast_lock);
70 assert_spin_locked(&lock->spinlock);
72 if (lock->ml.highest_blocked == LKM_IVMODE)
74 BUG_ON(lock->ml.highest_blocked == LKM_NLMODE);
76 if (lock->bast_pending &&
77 list_empty(&lock->bast_list))
78 /* old bast already sent, ok */
81 if (lock->ml.type == LKM_EXMODE)
82 /* EX blocks anything left, any bast still valid */
84 else if (lock->ml.type == LKM_NLMODE)
85 /* NL blocks nothing, no reason to send any bast, cancel it */
87 else if (lock->ml.highest_blocked != LKM_EXMODE)
88 /* PR only blocks EX */
94 static void __dlm_queue_ast(struct dlm_ctxt *dlm, struct dlm_lock *lock)
101 assert_spin_locked(&dlm->ast_lock);
102 if (!list_empty(&lock->ast_list)) {
103 mlog(ML_ERROR, "ast list not empty!! pending=%d, newlevel=%d\n",
104 lock->ast_pending, lock->ml.type);
107 BUG_ON(!list_empty(&lock->ast_list));
108 if (lock->ast_pending)
109 mlog(0, "lock has an ast getting flushed right now\n");
111 /* putting lock on list, add a ref */
113 spin_lock(&lock->spinlock);
115 /* check to see if this ast obsoletes the bast */
116 if (dlm_should_cancel_bast(dlm, lock)) {
117 struct dlm_lock_resource *res = lock->lockres;
118 mlog(0, "%s: cancelling bast for %.*s\n",
119 dlm->name, res->lockname.len, res->lockname.name);
120 lock->bast_pending = 0;
121 list_del_init(&lock->bast_list);
122 lock->ml.highest_blocked = LKM_IVMODE;
123 /* removing lock from list, remove a ref. guaranteed
124 * this won't be the last ref because of the get above,
125 * so res->spinlock will not be taken here */
127 /* free up the reserved bast that we are cancelling.
128 * guaranteed that this will not be the last reserved
129 * ast because *both* an ast and a bast were reserved
130 * to get to this point. the res->spinlock will not be
132 dlm_lockres_release_ast(dlm, res);
134 list_add_tail(&lock->ast_list, &dlm->pending_asts);
135 lock->ast_pending = 1;
136 spin_unlock(&lock->spinlock);
139 void dlm_queue_ast(struct dlm_ctxt *dlm, struct dlm_lock *lock)
146 spin_lock(&dlm->ast_lock);
147 __dlm_queue_ast(dlm, lock);
148 spin_unlock(&dlm->ast_lock);
152 static void __dlm_queue_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock)
158 assert_spin_locked(&dlm->ast_lock);
160 BUG_ON(!list_empty(&lock->bast_list));
161 if (lock->bast_pending)
162 mlog(0, "lock has a bast getting flushed right now\n");
164 /* putting lock on list, add a ref */
166 spin_lock(&lock->spinlock);
167 list_add_tail(&lock->bast_list, &dlm->pending_basts);
168 lock->bast_pending = 1;
169 spin_unlock(&lock->spinlock);
172 void dlm_queue_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock)
179 spin_lock(&dlm->ast_lock);
180 __dlm_queue_bast(dlm, lock);
181 spin_unlock(&dlm->ast_lock);
184 static void dlm_update_lvb(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
185 struct dlm_lock *lock)
187 struct dlm_lockstatus *lksb = lock->lksb;
190 /* only updates if this node masters the lockres */
191 if (res->owner == dlm->node_num) {
193 spin_lock(&res->spinlock);
194 /* check the lksb flags for the direction */
195 if (lksb->flags & DLM_LKSB_GET_LVB) {
196 mlog(0, "getting lvb from lockres for %s node\n",
197 lock->ml.node == dlm->node_num ? "master" :
199 memcpy(lksb->lvb, res->lvb, DLM_LVB_LEN);
200 } else if (lksb->flags & DLM_LKSB_PUT_LVB) {
201 mlog(0, "setting lvb from lockres for %s node\n",
202 lock->ml.node == dlm->node_num ? "master" :
204 memcpy(res->lvb, lksb->lvb, DLM_LVB_LEN);
206 spin_unlock(&res->spinlock);
209 /* reset any lvb flags on the lksb */
210 lksb->flags &= ~(DLM_LKSB_PUT_LVB|DLM_LKSB_GET_LVB);
213 void dlm_do_local_ast(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
214 struct dlm_lock *lock)
216 dlm_astlockfunc_t *fn;
217 struct dlm_lockstatus *lksb;
223 BUG_ON(lock->ml.node != dlm->node_num);
225 dlm_update_lvb(dlm, res, lock);
226 (*fn)(lock->astdata);
230 int dlm_do_remote_ast(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
231 struct dlm_lock *lock)
234 struct dlm_lockstatus *lksb;
240 BUG_ON(lock->ml.node == dlm->node_num);
242 lksbflags = lksb->flags;
243 dlm_update_lvb(dlm, res, lock);
245 /* lock request came from another node
246 * go do the ast over there */
247 ret = dlm_send_proxy_ast(dlm, res, lock, lksbflags);
251 void dlm_do_local_bast(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
252 struct dlm_lock *lock, int blocked_type)
254 dlm_bastlockfunc_t *fn = lock->bast;
257 BUG_ON(lock->ml.node != dlm->node_num);
259 (*fn)(lock->astdata, blocked_type);
264 int dlm_proxy_ast_handler(struct o2net_msg *msg, u32 len, void *data)
267 unsigned int locklen;
268 struct dlm_ctxt *dlm = data;
269 struct dlm_lock_resource *res = NULL;
270 struct dlm_lock *lock = NULL;
271 struct dlm_proxy_ast *past = (struct dlm_proxy_ast *) msg->buf;
273 struct list_head *iter, *head=NULL;
277 if (!dlm_grab(dlm)) {
278 dlm_error(DLM_REJECTED);
282 mlog_bug_on_msg(!dlm_domain_fully_joined(dlm),
283 "Domain %s not fully joined!\n", dlm->name);
286 locklen = past->namelen;
287 cookie = be64_to_cpu(past->cookie);
288 flags = be32_to_cpu(past->flags);
290 if (locklen > DLM_LOCKID_NAME_MAX) {
292 mlog(ML_ERROR, "Invalid name length in proxy ast handler!\n");
296 if ((flags & (LKM_PUT_LVB|LKM_GET_LVB)) ==
297 (LKM_PUT_LVB|LKM_GET_LVB)) {
298 mlog(ML_ERROR, "both PUT and GET lvb specified\n");
303 mlog(0, "lvb: %s\n", flags & LKM_PUT_LVB ? "put lvb" :
304 (flags & LKM_GET_LVB ? "get lvb" : "none"));
306 mlog(0, "type=%d, blocked_type=%d\n", past->type, past->blocked_type);
308 if (past->type != DLM_AST &&
309 past->type != DLM_BAST) {
310 mlog(ML_ERROR, "Unknown ast type! %d, cookie=%u:%llu"
311 "name=%.*s\n", past->type,
312 dlm_get_lock_cookie_node(cookie),
313 dlm_get_lock_cookie_seq(cookie),
319 res = dlm_lookup_lockres(dlm, name, locklen);
321 mlog(ML_ERROR, "got %sast for unknown lockres! "
322 "cookie=%u:%llu, name=%.*s, namelen=%u\n",
323 past->type == DLM_AST ? "" : "b",
324 dlm_get_lock_cookie_node(cookie),
325 dlm_get_lock_cookie_seq(cookie),
326 locklen, name, locklen);
331 /* cannot get a proxy ast message if this node owns it */
332 BUG_ON(res->owner == dlm->node_num);
334 mlog(0, "lockres %.*s\n", res->lockname.len, res->lockname.name);
336 spin_lock(&res->spinlock);
337 if (res->state & DLM_LOCK_RES_RECOVERING) {
338 mlog(0, "responding with DLM_RECOVERING!\n");
339 ret = DLM_RECOVERING;
342 if (res->state & DLM_LOCK_RES_MIGRATING) {
343 mlog(0, "responding with DLM_MIGRATING!\n");
347 /* try convert queue for both ast/bast */
348 head = &res->converting;
350 list_for_each(iter, head) {
351 lock = list_entry (iter, struct dlm_lock, list);
352 if (be64_to_cpu(lock->ml.cookie) == cookie)
356 /* if not on convert, try blocked for ast, granted for bast */
357 if (past->type == DLM_AST)
358 head = &res->blocked;
360 head = &res->granted;
362 list_for_each(iter, head) {
363 lock = list_entry (iter, struct dlm_lock, list);
364 if (be64_to_cpu(lock->ml.cookie) == cookie)
368 mlog(ML_ERROR, "got %sast for unknown lock! cookie=%u:%llu, "
369 "name=%.*s, namelen=%u\n",
370 past->type == DLM_AST ? "" : "b",
371 dlm_get_lock_cookie_node(cookie),
372 dlm_get_lock_cookie_seq(cookie),
373 locklen, name, locklen);
377 spin_unlock(&res->spinlock);
382 if (past->type == DLM_AST) {
383 /* do not alter lock refcount. switching lists. */
384 list_move_tail(&lock->list, &res->granted);
385 mlog(0, "ast: adding to granted list... type=%d, "
386 "convert_type=%d\n", lock->ml.type, lock->ml.convert_type);
387 if (lock->ml.convert_type != LKM_IVMODE) {
388 lock->ml.type = lock->ml.convert_type;
389 lock->ml.convert_type = LKM_IVMODE;
391 // should already be there....
394 lock->lksb->status = DLM_NORMAL;
396 /* if we requested the lvb, fetch it into our lksb now */
397 if (flags & LKM_GET_LVB) {
398 BUG_ON(!(lock->lksb->flags & DLM_LKSB_GET_LVB));
399 memcpy(lock->lksb->lvb, past->lvb, DLM_LVB_LEN);
402 spin_unlock(&res->spinlock);
404 if (past->type == DLM_AST)
405 dlm_do_local_ast(dlm, res, lock);
407 dlm_do_local_bast(dlm, res, lock, past->blocked_type);
412 dlm_lockres_put(res);
420 int dlm_send_proxy_ast_msg(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
421 struct dlm_lock *lock, int msg_type,
422 int blocked_type, int flags)
425 struct dlm_proxy_ast past;
430 mlog_entry("res %.*s, to=%u, type=%d, blocked_type=%d\n",
431 res->lockname.len, res->lockname.name, lock->ml.node,
432 msg_type, blocked_type);
434 memset(&past, 0, sizeof(struct dlm_proxy_ast));
435 past.node_idx = dlm->node_num;
436 past.type = msg_type;
437 past.blocked_type = blocked_type;
438 past.namelen = res->lockname.len;
439 memcpy(past.name, res->lockname.name, past.namelen);
440 past.cookie = lock->ml.cookie;
442 vec[0].iov_len = sizeof(struct dlm_proxy_ast);
443 vec[0].iov_base = &past;
444 if (flags & DLM_LKSB_GET_LVB) {
445 mlog(0, "returning requested LVB data\n");
446 be32_add_cpu(&past.flags, LKM_GET_LVB);
447 vec[1].iov_len = DLM_LVB_LEN;
448 vec[1].iov_base = lock->lksb->lvb;
452 ret = o2net_send_message_vec(DLM_PROXY_AST_MSG, dlm->key, vec, veclen,
453 lock->ml.node, &status);
457 if (status == DLM_RECOVERING) {
458 mlog(ML_ERROR, "sent AST to node %u, it thinks this "
459 "node is dead!\n", lock->ml.node);
461 } else if (status == DLM_MIGRATING) {
462 mlog(ML_ERROR, "sent AST to node %u, it returned "
463 "DLM_MIGRATING!\n", lock->ml.node);
465 } else if (status != DLM_NORMAL) {
466 mlog(ML_ERROR, "AST to node %u returned %d!\n",
467 lock->ml.node, status);