1 /* -*- mode: c; c-basic-offset: 8; -*-
2 * vim: noexpandtab sw=8 ts=8 sts=0:
8 * Copyright (C) 2004 Oracle. All rights reserved.
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public
12 * License as published by the Free Software Foundation; either
13 * version 2 of the License, or (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
20 * You should have received a copy of the GNU General Public
21 * License along with this program; if not, write to the
22 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
23 * Boston, MA 021110-1307, USA.
28 #include <linux/module.h>
30 #include <linux/types.h>
31 #include <linux/slab.h>
32 #include <linux/highmem.h>
33 #include <linux/utsname.h>
34 #include <linux/init.h>
35 #include <linux/sysctl.h>
36 #include <linux/random.h>
37 #include <linux/blkdev.h>
38 #include <linux/socket.h>
39 #include <linux/inet.h>
40 #include <linux/timer.h>
41 #include <linux/kthread.h>
42 #include <linux/delay.h>
45 #include "cluster/heartbeat.h"
46 #include "cluster/nodemanager.h"
47 #include "cluster/tcp.h"
50 #include "dlmcommon.h"
51 #include "dlmdomain.h"
53 #define MLOG_MASK_PREFIX (ML_DLM|ML_DLM_RECOVERY)
54 #include "cluster/masklog.h"
56 static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node);
58 static int dlm_recovery_thread(void *data);
59 void dlm_complete_recovery_thread(struct dlm_ctxt *dlm);
60 int dlm_launch_recovery_thread(struct dlm_ctxt *dlm);
61 void dlm_kick_recovery_thread(struct dlm_ctxt *dlm);
62 static int dlm_do_recovery(struct dlm_ctxt *dlm);
64 static int dlm_pick_recovery_master(struct dlm_ctxt *dlm);
65 static int dlm_remaster_locks(struct dlm_ctxt *dlm, u8 dead_node);
66 static int dlm_init_recovery_area(struct dlm_ctxt *dlm, u8 dead_node);
67 static int dlm_request_all_locks(struct dlm_ctxt *dlm,
68 u8 request_from, u8 dead_node);
69 static void dlm_destroy_recovery_area(struct dlm_ctxt *dlm, u8 dead_node);
71 static inline int dlm_num_locks_in_lockres(struct dlm_lock_resource *res);
72 static void dlm_init_migratable_lockres(struct dlm_migratable_lockres *mres,
73 const char *lockname, int namelen,
74 int total_locks, u64 cookie,
76 static int dlm_send_mig_lockres_msg(struct dlm_ctxt *dlm,
77 struct dlm_migratable_lockres *mres,
79 struct dlm_lock_resource *res,
81 static int dlm_process_recovery_data(struct dlm_ctxt *dlm,
82 struct dlm_lock_resource *res,
83 struct dlm_migratable_lockres *mres);
84 static int dlm_send_finalize_reco_message(struct dlm_ctxt *dlm);
85 static int dlm_send_all_done_msg(struct dlm_ctxt *dlm,
86 u8 dead_node, u8 send_to);
87 static int dlm_send_begin_reco_message(struct dlm_ctxt *dlm, u8 dead_node);
88 static void dlm_move_reco_locks_to_list(struct dlm_ctxt *dlm,
89 struct list_head *list, u8 dead_node);
90 static void dlm_finish_local_lockres_recovery(struct dlm_ctxt *dlm,
91 u8 dead_node, u8 new_master);
92 static void dlm_reco_ast(void *astdata);
93 static void dlm_reco_bast(void *astdata, int blocked_type);
94 static void dlm_reco_unlock_ast(void *astdata, enum dlm_status st);
95 static void dlm_request_all_locks_worker(struct dlm_work_item *item,
97 static void dlm_mig_lockres_worker(struct dlm_work_item *item, void *data);
99 static u64 dlm_get_next_mig_cookie(void);
101 static spinlock_t dlm_reco_state_lock = SPIN_LOCK_UNLOCKED;
102 static spinlock_t dlm_mig_cookie_lock = SPIN_LOCK_UNLOCKED;
103 static u64 dlm_mig_cookie = 1;
105 static u64 dlm_get_next_mig_cookie(void)
108 spin_lock(&dlm_mig_cookie_lock);
110 if (dlm_mig_cookie == (~0ULL))
114 spin_unlock(&dlm_mig_cookie_lock);
118 static inline void dlm_reset_recovery(struct dlm_ctxt *dlm)
120 spin_lock(&dlm->spinlock);
121 clear_bit(dlm->reco.dead_node, dlm->recovery_map);
122 dlm->reco.dead_node = O2NM_INVALID_NODE_NUM;
123 dlm->reco.new_master = O2NM_INVALID_NODE_NUM;
124 spin_unlock(&dlm->spinlock);
127 /* Worker function used during recovery. */
128 void dlm_dispatch_work(void *data)
130 struct dlm_ctxt *dlm = (struct dlm_ctxt *)data;
132 struct list_head *iter, *iter2;
133 struct dlm_work_item *item;
134 dlm_workfunc_t *workfunc;
136 spin_lock(&dlm->work_lock);
137 list_splice_init(&dlm->work_list, &tmp_list);
138 spin_unlock(&dlm->work_lock);
140 list_for_each_safe(iter, iter2, &tmp_list) {
141 item = list_entry(iter, struct dlm_work_item, list);
142 workfunc = item->func;
143 list_del_init(&item->list);
145 /* already have ref on dlm to avoid having
146 * it disappear. just double-check. */
147 BUG_ON(item->dlm != dlm);
149 /* this is allowed to sleep and
150 * call network stuff */
151 workfunc(item, item->data);
162 void dlm_kick_recovery_thread(struct dlm_ctxt *dlm)
164 /* wake the recovery thread
165 * this will wake the reco thread in one of three places
166 * 1) sleeping with no recovery happening
167 * 2) sleeping with recovery mastered elsewhere
168 * 3) recovery mastered here, waiting on reco data */
170 wake_up(&dlm->dlm_reco_thread_wq);
173 /* Launch the recovery thread */
174 int dlm_launch_recovery_thread(struct dlm_ctxt *dlm)
176 mlog(0, "starting dlm recovery thread...\n");
178 dlm->dlm_reco_thread_task = kthread_run(dlm_recovery_thread, dlm,
180 if (IS_ERR(dlm->dlm_reco_thread_task)) {
181 mlog_errno(PTR_ERR(dlm->dlm_reco_thread_task));
182 dlm->dlm_reco_thread_task = NULL;
189 void dlm_complete_recovery_thread(struct dlm_ctxt *dlm)
191 if (dlm->dlm_reco_thread_task) {
192 mlog(0, "waiting for dlm recovery thread to exit\n");
193 kthread_stop(dlm->dlm_reco_thread_task);
194 dlm->dlm_reco_thread_task = NULL;
201 * this is lame, but here's how recovery works...
202 * 1) all recovery threads cluster wide will work on recovering
204 * 2) negotiate who will take over all the locks for the dead node.
205 * thats right... ALL the locks.
206 * 3) once a new master is chosen, everyone scans all locks
207 * and moves aside those mastered by the dead guy
208 * 4) each of these locks should be locked until recovery is done
209 * 5) the new master collects up all of secondary lock queue info
210 * one lock at a time, forcing each node to communicate back
212 * 6) each secondary lock queue responds with the full known lock info
213 * 7) once the new master has run all its locks, it sends a ALLDONE!
214 * message to everyone
215 * 8) upon receiving this message, the secondary queue node unlocks
216 * and responds to the ALLDONE
217 * 9) once the new master gets responses from everyone, he unlocks
218 * everything and recovery for this dead node is done
219 *10) go back to 2) while there are still dead nodes
224 #define DLM_RECO_THREAD_TIMEOUT_MS (5 * 1000)
226 static int dlm_recovery_thread(void *data)
229 struct dlm_ctxt *dlm = data;
230 unsigned long timeout = msecs_to_jiffies(DLM_RECO_THREAD_TIMEOUT_MS);
232 mlog(0, "dlm thread running for %s...\n", dlm->name);
234 while (!kthread_should_stop()) {
235 if (dlm_joined(dlm)) {
236 status = dlm_do_recovery(dlm);
237 if (status == -EAGAIN) {
238 /* do not sleep, recheck immediately. */
245 wait_event_interruptible_timeout(dlm->dlm_reco_thread_wq,
246 kthread_should_stop(),
250 mlog(0, "quitting DLM recovery thread\n");
254 /* returns true when the recovery master has contacted us */
255 static int dlm_reco_master_ready(struct dlm_ctxt *dlm)
258 spin_lock(&dlm->spinlock);
259 ready = (dlm->reco.new_master != O2NM_INVALID_NODE_NUM);
260 spin_unlock(&dlm->spinlock);
264 /* returns true if node is no longer in the domain
265 * could be dead or just not joined */
266 int dlm_is_node_dead(struct dlm_ctxt *dlm, u8 node)
269 spin_lock(&dlm->spinlock);
270 dead = test_bit(node, dlm->domain_map);
271 spin_unlock(&dlm->spinlock);
275 int dlm_wait_for_node_death(struct dlm_ctxt *dlm, u8 node, int timeout)
278 mlog(ML_NOTICE, "%s: waiting %dms for notification of "
279 "death of node %u\n", dlm->name, timeout, node);
280 wait_event_timeout(dlm->dlm_reco_thread_wq,
281 dlm_is_node_dead(dlm, node),
282 msecs_to_jiffies(timeout));
284 mlog(ML_NOTICE, "%s: waiting indefinitely for notification "
285 "of death of node %u\n", dlm->name, node);
286 wait_event(dlm->dlm_reco_thread_wq,
287 dlm_is_node_dead(dlm, node));
289 /* for now, return 0 */
293 /* callers of the top-level api calls (dlmlock/dlmunlock) should
294 * block on the dlm->reco.event when recovery is in progress.
295 * the dlm recovery thread will set this state when it begins
296 * recovering a dead node (as the new master or not) and clear
297 * the state and wake as soon as all affected lock resources have
298 * been marked with the RECOVERY flag */
299 static int dlm_in_recovery(struct dlm_ctxt *dlm)
302 spin_lock(&dlm->spinlock);
303 in_recovery = !!(dlm->reco.state & DLM_RECO_STATE_ACTIVE);
304 spin_unlock(&dlm->spinlock);
309 void dlm_wait_for_recovery(struct dlm_ctxt *dlm)
311 wait_event(dlm->reco.event, !dlm_in_recovery(dlm));
314 static void dlm_begin_recovery(struct dlm_ctxt *dlm)
316 spin_lock(&dlm->spinlock);
317 BUG_ON(dlm->reco.state & DLM_RECO_STATE_ACTIVE);
318 dlm->reco.state |= DLM_RECO_STATE_ACTIVE;
319 spin_unlock(&dlm->spinlock);
322 static void dlm_end_recovery(struct dlm_ctxt *dlm)
324 spin_lock(&dlm->spinlock);
325 BUG_ON(!(dlm->reco.state & DLM_RECO_STATE_ACTIVE));
326 dlm->reco.state &= ~DLM_RECO_STATE_ACTIVE;
327 spin_unlock(&dlm->spinlock);
328 wake_up(&dlm->reco.event);
331 static int dlm_do_recovery(struct dlm_ctxt *dlm)
336 spin_lock(&dlm->spinlock);
338 /* check to see if the new master has died */
339 if (dlm->reco.new_master != O2NM_INVALID_NODE_NUM &&
340 test_bit(dlm->reco.new_master, dlm->recovery_map)) {
341 mlog(0, "new master %u died while recovering %u!\n",
342 dlm->reco.new_master, dlm->reco.dead_node);
343 /* unset the new_master, leave dead_node */
344 dlm->reco.new_master = O2NM_INVALID_NODE_NUM;
347 /* select a target to recover */
348 if (dlm->reco.dead_node == O2NM_INVALID_NODE_NUM) {
351 bit = find_next_bit (dlm->recovery_map, O2NM_MAX_NODES+1, 0);
352 if (bit >= O2NM_MAX_NODES || bit < 0)
353 dlm->reco.dead_node = O2NM_INVALID_NODE_NUM;
355 dlm->reco.dead_node = bit;
356 } else if (!test_bit(dlm->reco.dead_node, dlm->recovery_map)) {
358 mlog(ML_ERROR, "dead_node %u no longer in recovery map!\n",
359 dlm->reco.dead_node);
360 dlm->reco.dead_node = O2NM_INVALID_NODE_NUM;
363 if (dlm->reco.dead_node == O2NM_INVALID_NODE_NUM) {
364 // mlog(0, "nothing to recover! sleeping now!\n");
365 spin_unlock(&dlm->spinlock);
366 /* return to main thread loop and sleep. */
369 mlog(0, "recovery thread found node %u in the recovery map!\n",
370 dlm->reco.dead_node);
371 spin_unlock(&dlm->spinlock);
373 /* take write barrier */
374 /* (stops the list reshuffling thread, proxy ast handling) */
375 dlm_begin_recovery(dlm);
377 if (dlm->reco.new_master == dlm->node_num)
380 if (dlm->reco.new_master == O2NM_INVALID_NODE_NUM) {
381 /* choose a new master, returns 0 if this node
382 * is the master, -EEXIST if it's another node.
383 * this does not return until a new master is chosen
384 * or recovery completes entirely. */
385 ret = dlm_pick_recovery_master(dlm);
387 /* already notified everyone. go. */
390 mlog(0, "another node will master this recovery session.\n");
392 mlog(0, "dlm=%s, new_master=%u, this node=%u, dead_node=%u\n",
393 dlm->name, dlm->reco.new_master,
394 dlm->node_num, dlm->reco.dead_node);
396 /* it is safe to start everything back up here
397 * because all of the dead node's lock resources
398 * have been marked as in-recovery */
399 dlm_end_recovery(dlm);
401 /* sleep out in main dlm_recovery_thread loop. */
405 mlog(0, "mastering recovery of %s:%u here(this=%u)!\n",
406 dlm->name, dlm->reco.dead_node, dlm->node_num);
408 status = dlm_remaster_locks(dlm, dlm->reco.dead_node);
410 mlog(ML_ERROR, "error %d remastering locks for node %u, "
411 "retrying.\n", status, dlm->reco.dead_node);
412 /* yield a bit to allow any final network messages
413 * to get handled on remaining nodes */
416 /* success! see if any other nodes need recovery */
417 mlog(0, "DONE mastering recovery of %s:%u here(this=%u)!\n",
418 dlm->name, dlm->reco.dead_node, dlm->node_num);
419 dlm_reset_recovery(dlm);
421 dlm_end_recovery(dlm);
423 /* continue and look for another dead node */
427 static int dlm_remaster_locks(struct dlm_ctxt *dlm, u8 dead_node)
430 struct dlm_reco_node_data *ndata;
431 struct list_head *iter;
436 status = dlm_init_recovery_area(dlm, dead_node);
440 /* safe to access the node data list without a lock, since this
441 * process is the only one to change the list */
442 list_for_each(iter, &dlm->reco.node_data) {
443 ndata = list_entry (iter, struct dlm_reco_node_data, list);
444 BUG_ON(ndata->state != DLM_RECO_NODE_DATA_INIT);
445 ndata->state = DLM_RECO_NODE_DATA_REQUESTING;
447 mlog(0, "requesting lock info from node %u\n",
450 if (ndata->node_num == dlm->node_num) {
451 ndata->state = DLM_RECO_NODE_DATA_DONE;
455 status = dlm_request_all_locks(dlm, ndata->node_num, dead_node);
458 if (dlm_is_host_down(status))
459 ndata->state = DLM_RECO_NODE_DATA_DEAD;
466 switch (ndata->state) {
467 case DLM_RECO_NODE_DATA_INIT:
468 case DLM_RECO_NODE_DATA_FINALIZE_SENT:
469 case DLM_RECO_NODE_DATA_REQUESTED:
472 case DLM_RECO_NODE_DATA_DEAD:
473 mlog(0, "node %u died after requesting "
474 "recovery info for node %u\n",
475 ndata->node_num, dead_node);
480 case DLM_RECO_NODE_DATA_REQUESTING:
481 ndata->state = DLM_RECO_NODE_DATA_REQUESTED;
482 mlog(0, "now receiving recovery data from "
483 "node %u for dead node %u\n",
484 ndata->node_num, dead_node);
486 case DLM_RECO_NODE_DATA_RECEIVING:
487 mlog(0, "already receiving recovery data from "
488 "node %u for dead node %u\n",
489 ndata->node_num, dead_node);
491 case DLM_RECO_NODE_DATA_DONE:
492 mlog(0, "already DONE receiving recovery data "
493 "from node %u for dead node %u\n",
494 ndata->node_num, dead_node);
499 mlog(0, "done requesting all lock info\n");
501 /* nodes should be sending reco data now
502 * just need to wait */
505 /* check all the nodes now to see if we are
506 * done, or if anyone died */
508 spin_lock(&dlm_reco_state_lock);
509 list_for_each(iter, &dlm->reco.node_data) {
510 ndata = list_entry (iter, struct dlm_reco_node_data, list);
512 mlog(0, "checking recovery state of node %u\n",
514 switch (ndata->state) {
515 case DLM_RECO_NODE_DATA_INIT:
516 case DLM_RECO_NODE_DATA_REQUESTING:
517 mlog(ML_ERROR, "bad ndata state for "
518 "node %u: state=%d\n",
519 ndata->node_num, ndata->state);
522 case DLM_RECO_NODE_DATA_DEAD:
523 mlog(ML_NOTICE, "node %u died after "
524 "requesting recovery info for "
525 "node %u\n", ndata->node_num,
527 spin_unlock(&dlm_reco_state_lock);
531 /* instead of spinning like crazy here,
532 * wait for the domain map to catch up
533 * with the network state. otherwise this
534 * can be hit hundreds of times before
535 * the node is really seen as dead. */
536 wait_event_timeout(dlm->dlm_reco_thread_wq,
537 dlm_is_node_dead(dlm,
539 msecs_to_jiffies(1000));
540 mlog(0, "waited 1 sec for %u, "
541 "dead? %s\n", ndata->node_num,
542 dlm_is_node_dead(dlm, ndata->node_num) ?
545 case DLM_RECO_NODE_DATA_RECEIVING:
546 case DLM_RECO_NODE_DATA_REQUESTED:
549 case DLM_RECO_NODE_DATA_DONE:
551 case DLM_RECO_NODE_DATA_FINALIZE_SENT:
555 spin_unlock(&dlm_reco_state_lock);
557 mlog(0, "pass #%d, all_nodes_done?: %s\n", ++pass,
558 all_nodes_done?"yes":"no");
559 if (all_nodes_done) {
562 /* all nodes are now in DLM_RECO_NODE_DATA_DONE state
563 * just send a finalize message to everyone and
565 mlog(0, "all nodes are done! send finalize\n");
566 ret = dlm_send_finalize_reco_message(dlm);
570 spin_lock(&dlm->spinlock);
571 dlm_finish_local_lockres_recovery(dlm, dead_node,
573 spin_unlock(&dlm->spinlock);
574 mlog(0, "should be done with recovery!\n");
576 mlog(0, "finishing recovery of %s at %lu, "
577 "dead=%u, this=%u, new=%u\n", dlm->name,
578 jiffies, dlm->reco.dead_node,
579 dlm->node_num, dlm->reco.new_master);
582 /* rescan everything marked dirty along the way */
583 dlm_kick_thread(dlm, NULL);
586 /* wait to be signalled, with periodic timeout
587 * to check for node death */
588 wait_event_interruptible_timeout(dlm->dlm_reco_thread_wq,
589 kthread_should_stop(),
590 msecs_to_jiffies(DLM_RECO_THREAD_TIMEOUT_MS));
596 dlm_destroy_recovery_area(dlm, dead_node);
602 static int dlm_init_recovery_area(struct dlm_ctxt *dlm, u8 dead_node)
605 struct dlm_reco_node_data *ndata;
607 spin_lock(&dlm->spinlock);
608 memcpy(dlm->reco.node_map, dlm->domain_map, sizeof(dlm->domain_map));
609 /* nodes can only be removed (by dying) after dropping
610 * this lock, and death will be trapped later, so this should do */
611 spin_unlock(&dlm->spinlock);
614 num = find_next_bit (dlm->reco.node_map, O2NM_MAX_NODES, num);
615 if (num >= O2NM_MAX_NODES) {
618 BUG_ON(num == dead_node);
620 ndata = kcalloc(1, sizeof(*ndata), GFP_KERNEL);
622 dlm_destroy_recovery_area(dlm, dead_node);
625 ndata->node_num = num;
626 ndata->state = DLM_RECO_NODE_DATA_INIT;
627 spin_lock(&dlm_reco_state_lock);
628 list_add_tail(&ndata->list, &dlm->reco.node_data);
629 spin_unlock(&dlm_reco_state_lock);
636 static void dlm_destroy_recovery_area(struct dlm_ctxt *dlm, u8 dead_node)
638 struct list_head *iter, *iter2;
639 struct dlm_reco_node_data *ndata;
642 spin_lock(&dlm_reco_state_lock);
643 list_splice_init(&dlm->reco.node_data, &tmplist);
644 spin_unlock(&dlm_reco_state_lock);
646 list_for_each_safe(iter, iter2, &tmplist) {
647 ndata = list_entry (iter, struct dlm_reco_node_data, list);
648 list_del_init(&ndata->list);
653 static int dlm_request_all_locks(struct dlm_ctxt *dlm, u8 request_from,
656 struct dlm_lock_request lr;
662 mlog(0, "dlm_request_all_locks: dead node is %u, sending request "
663 "to %u\n", dead_node, request_from);
665 memset(&lr, 0, sizeof(lr));
666 lr.node_idx = dlm->node_num;
667 lr.dead_node = dead_node;
671 ret = o2net_send_message(DLM_LOCK_REQUEST_MSG, dlm->key,
672 &lr, sizeof(lr), request_from, NULL);
674 /* negative status is handled by caller */
678 // return from here, then
679 // sleep until all received or error
684 int dlm_request_all_locks_handler(struct o2net_msg *msg, u32 len, void *data)
686 struct dlm_ctxt *dlm = data;
687 struct dlm_lock_request *lr = (struct dlm_lock_request *)msg->buf;
689 struct dlm_work_item *item = NULL;
694 BUG_ON(lr->dead_node != dlm->reco.dead_node);
696 item = kcalloc(1, sizeof(*item), GFP_KERNEL);
702 /* this will get freed by dlm_request_all_locks_worker */
703 buf = (char *) __get_free_page(GFP_KERNEL);
710 /* queue up work for dlm_request_all_locks_worker */
711 dlm_grab(dlm); /* get an extra ref for the work item */
712 dlm_init_work_item(dlm, item, dlm_request_all_locks_worker, buf);
713 item->u.ral.reco_master = lr->node_idx;
714 item->u.ral.dead_node = lr->dead_node;
715 spin_lock(&dlm->work_lock);
716 list_add_tail(&item->list, &dlm->work_list);
717 spin_unlock(&dlm->work_lock);
718 schedule_work(&dlm->dispatched_work);
724 static void dlm_request_all_locks_worker(struct dlm_work_item *item, void *data)
726 struct dlm_migratable_lockres *mres;
727 struct dlm_lock_resource *res;
728 struct dlm_ctxt *dlm;
729 LIST_HEAD(resources);
730 struct list_head *iter;
732 u8 dead_node, reco_master;
735 dead_node = item->u.ral.dead_node;
736 reco_master = item->u.ral.reco_master;
737 mres = (struct dlm_migratable_lockres *)data;
739 if (dead_node != dlm->reco.dead_node ||
740 reco_master != dlm->reco.new_master) {
741 /* show extra debug info if the recovery state is messed */
742 mlog(ML_ERROR, "%s: bad reco state: reco(dead=%u, master=%u), "
743 "request(dead=%u, master=%u)\n",
744 dlm->name, dlm->reco.dead_node, dlm->reco.new_master,
745 dead_node, reco_master);
746 mlog(ML_ERROR, "%s: name=%.*s master=%u locks=%u/%u flags=%u "
747 "entry[0]={c=%u:%llu,l=%u,f=%u,t=%d,ct=%d,hb=%d,n=%u}\n",
748 dlm->name, mres->lockname_len, mres->lockname, mres->master,
749 mres->num_locks, mres->total_locks, mres->flags,
750 dlm_get_lock_cookie_node(mres->ml[0].cookie),
751 dlm_get_lock_cookie_seq(mres->ml[0].cookie),
752 mres->ml[0].list, mres->ml[0].flags,
753 mres->ml[0].type, mres->ml[0].convert_type,
754 mres->ml[0].highest_blocked, mres->ml[0].node);
757 BUG_ON(dead_node != dlm->reco.dead_node);
758 BUG_ON(reco_master != dlm->reco.new_master);
760 /* lock resources should have already been moved to the
761 * dlm->reco.resources list. now move items from that list
762 * to a temp list if the dead owner matches. note that the
763 * whole cluster recovers only one node at a time, so we
764 * can safely move UNKNOWN lock resources for each recovery
766 dlm_move_reco_locks_to_list(dlm, &resources, dead_node);
768 /* now we can begin blasting lockreses without the dlm lock */
769 list_for_each(iter, &resources) {
770 res = list_entry (iter, struct dlm_lock_resource, recovering);
771 ret = dlm_send_one_lockres(dlm, res, mres, reco_master,
777 /* move the resources back to the list */
778 spin_lock(&dlm->spinlock);
779 list_splice_init(&resources, &dlm->reco.resources);
780 spin_unlock(&dlm->spinlock);
782 ret = dlm_send_all_done_msg(dlm, dead_node, reco_master);
786 free_page((unsigned long)data);
790 static int dlm_send_all_done_msg(struct dlm_ctxt *dlm, u8 dead_node, u8 send_to)
793 struct dlm_reco_data_done done_msg;
795 memset(&done_msg, 0, sizeof(done_msg));
796 done_msg.node_idx = dlm->node_num;
797 done_msg.dead_node = dead_node;
798 mlog(0, "sending DATA DONE message to %u, "
799 "my node=%u, dead node=%u\n", send_to, done_msg.node_idx,
802 ret = o2net_send_message(DLM_RECO_DATA_DONE_MSG, dlm->key, &done_msg,
803 sizeof(done_msg), send_to, &tmpret);
804 /* negative status is ignored by the caller */
811 int dlm_reco_data_done_handler(struct o2net_msg *msg, u32 len, void *data)
813 struct dlm_ctxt *dlm = data;
814 struct dlm_reco_data_done *done = (struct dlm_reco_data_done *)msg->buf;
815 struct list_head *iter;
816 struct dlm_reco_node_data *ndata = NULL;
822 mlog(0, "got DATA DONE: dead_node=%u, reco.dead_node=%u, "
823 "node_idx=%u, this node=%u\n", done->dead_node,
824 dlm->reco.dead_node, done->node_idx, dlm->node_num);
825 BUG_ON(done->dead_node != dlm->reco.dead_node);
827 spin_lock(&dlm_reco_state_lock);
828 list_for_each(iter, &dlm->reco.node_data) {
829 ndata = list_entry (iter, struct dlm_reco_node_data, list);
830 if (ndata->node_num != done->node_idx)
833 switch (ndata->state) {
834 /* should have moved beyond INIT but not to FINALIZE yet */
835 case DLM_RECO_NODE_DATA_INIT:
836 case DLM_RECO_NODE_DATA_DEAD:
837 case DLM_RECO_NODE_DATA_FINALIZE_SENT:
838 mlog(ML_ERROR, "bad ndata state for node %u:"
839 " state=%d\n", ndata->node_num,
843 /* these states are possible at this point, anywhere along
844 * the line of recovery */
845 case DLM_RECO_NODE_DATA_DONE:
846 case DLM_RECO_NODE_DATA_RECEIVING:
847 case DLM_RECO_NODE_DATA_REQUESTED:
848 case DLM_RECO_NODE_DATA_REQUESTING:
849 mlog(0, "node %u is DONE sending "
853 ndata->state = DLM_RECO_NODE_DATA_DONE;
858 spin_unlock(&dlm_reco_state_lock);
860 /* wake the recovery thread, some node is done */
862 dlm_kick_recovery_thread(dlm);
865 mlog(ML_ERROR, "failed to find recovery node data for node "
866 "%u\n", done->node_idx);
869 mlog(0, "leaving reco data done handler, ret=%d\n", ret);
873 static void dlm_move_reco_locks_to_list(struct dlm_ctxt *dlm,
874 struct list_head *list,
877 struct dlm_lock_resource *res;
878 struct list_head *iter, *iter2;
879 struct dlm_lock *lock;
881 spin_lock(&dlm->spinlock);
882 list_for_each_safe(iter, iter2, &dlm->reco.resources) {
883 res = list_entry (iter, struct dlm_lock_resource, recovering);
884 /* always prune any $RECOVERY entries for dead nodes,
885 * otherwise hangs can occur during later recovery */
886 if (dlm_is_recovery_lock(res->lockname.name,
887 res->lockname.len)) {
888 spin_lock(&res->spinlock);
889 list_for_each_entry(lock, &res->granted, list) {
890 if (lock->ml.node == dead_node) {
891 mlog(0, "AHA! there was "
892 "a $RECOVERY lock for dead "
894 dead_node, dlm->name);
895 list_del_init(&lock->list);
900 spin_unlock(&res->spinlock);
904 if (res->owner == dead_node) {
905 mlog(0, "found lockres owned by dead node while "
906 "doing recovery for node %u. sending it.\n",
908 list_del_init(&res->recovering);
909 list_add_tail(&res->recovering, list);
910 } else if (res->owner == DLM_LOCK_RES_OWNER_UNKNOWN) {
911 mlog(0, "found UNKNOWN owner while doing recovery "
912 "for node %u. sending it.\n", dead_node);
913 list_del_init(&res->recovering);
914 list_add_tail(&res->recovering, list);
917 spin_unlock(&dlm->spinlock);
920 static inline int dlm_num_locks_in_lockres(struct dlm_lock_resource *res)
923 struct list_head *iter, *queue = &res->granted;
926 for (i=0; i<3; i++) {
927 list_for_each(iter, queue)
935 static int dlm_send_mig_lockres_msg(struct dlm_ctxt *dlm,
936 struct dlm_migratable_lockres *mres,
938 struct dlm_lock_resource *res,
941 u64 mig_cookie = be64_to_cpu(mres->mig_cookie);
942 int mres_total_locks = be32_to_cpu(mres->total_locks);
943 int sz, ret = 0, status = 0;
944 u8 orig_flags = mres->flags,
945 orig_master = mres->master;
947 BUG_ON(mres->num_locks > DLM_MAX_MIGRATABLE_LOCKS);
948 if (!mres->num_locks)
951 sz = sizeof(struct dlm_migratable_lockres) +
952 (mres->num_locks * sizeof(struct dlm_migratable_lock));
954 /* add an all-done flag if we reached the last lock */
955 orig_flags = mres->flags;
956 BUG_ON(total_locks > mres_total_locks);
957 if (total_locks == mres_total_locks)
958 mres->flags |= DLM_MRES_ALL_DONE;
961 ret = o2net_send_message(DLM_MIG_LOCKRES_MSG, dlm->key, mres,
962 sz, send_to, &status);
964 /* XXX: negative status is not handled.
965 * this will end up killing this node. */
968 /* might get an -ENOMEM back here */
973 if (ret == -EFAULT) {
974 mlog(ML_ERROR, "node %u told me to kill "
975 "myself!\n", send_to);
981 /* zero and reinit the message buffer */
982 dlm_init_migratable_lockres(mres, res->lockname.name,
983 res->lockname.len, mres_total_locks,
984 mig_cookie, orig_flags, orig_master);
988 static void dlm_init_migratable_lockres(struct dlm_migratable_lockres *mres,
989 const char *lockname, int namelen,
990 int total_locks, u64 cookie,
993 /* mres here is one full page */
994 memset(mres, 0, PAGE_SIZE);
995 mres->lockname_len = namelen;
996 memcpy(mres->lockname, lockname, namelen);
998 mres->total_locks = cpu_to_be32(total_locks);
999 mres->mig_cookie = cpu_to_be64(cookie);
1000 mres->flags = flags;
1001 mres->master = master;
1005 /* returns 1 if this lock fills the network structure,
1007 static int dlm_add_lock_to_array(struct dlm_lock *lock,
1008 struct dlm_migratable_lockres *mres, int queue)
1010 struct dlm_migratable_lock *ml;
1011 int lock_num = mres->num_locks;
1013 ml = &(mres->ml[lock_num]);
1014 ml->cookie = lock->ml.cookie;
1015 ml->type = lock->ml.type;
1016 ml->convert_type = lock->ml.convert_type;
1017 ml->highest_blocked = lock->ml.highest_blocked;
1020 ml->flags = lock->lksb->flags;
1021 /* send our current lvb */
1022 if (ml->type == LKM_EXMODE ||
1023 ml->type == LKM_PRMODE) {
1024 /* if it is already set, this had better be a PR
1025 * and it has to match */
1026 if (mres->lvb[0] && (ml->type == LKM_EXMODE ||
1027 memcmp(mres->lvb, lock->lksb->lvb, DLM_LVB_LEN))) {
1028 mlog(ML_ERROR, "mismatched lvbs!\n");
1029 __dlm_print_one_lock_resource(lock->lockres);
1032 memcpy(mres->lvb, lock->lksb->lvb, DLM_LVB_LEN);
1035 ml->node = lock->ml.node;
1037 /* we reached the max, send this network message */
1038 if (mres->num_locks == DLM_MAX_MIGRATABLE_LOCKS)
1044 int dlm_send_one_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
1045 struct dlm_migratable_lockres *mres,
1046 u8 send_to, u8 flags)
1048 struct list_head *queue, *iter;
1051 struct dlm_lock *lock;
1054 BUG_ON(!(flags & (DLM_MRES_RECOVERY|DLM_MRES_MIGRATION)));
1056 mlog(0, "sending to %u\n", send_to);
1058 total_locks = dlm_num_locks_in_lockres(res);
1059 if (total_locks > DLM_MAX_MIGRATABLE_LOCKS) {
1060 /* rare, but possible */
1061 mlog(0, "argh. lockres has %d locks. this will "
1062 "require more than one network packet to "
1063 "migrate\n", total_locks);
1064 mig_cookie = dlm_get_next_mig_cookie();
1067 dlm_init_migratable_lockres(mres, res->lockname.name,
1068 res->lockname.len, total_locks,
1069 mig_cookie, flags, res->owner);
1072 for (i=DLM_GRANTED_LIST; i<=DLM_BLOCKED_LIST; i++) {
1073 queue = dlm_list_idx_to_ptr(res, i);
1074 list_for_each(iter, queue) {
1075 lock = list_entry (iter, struct dlm_lock, list);
1077 /* add another lock. */
1079 if (!dlm_add_lock_to_array(lock, mres, i))
1082 /* this filled the lock message,
1083 * we must send it immediately. */
1084 ret = dlm_send_mig_lockres_msg(dlm, mres, send_to,
1088 mlog(ML_ERROR, "dlm_send_mig_lockres_msg "
1089 "returned %d, TODO\n", ret);
1094 /* flush any remaining locks */
1095 ret = dlm_send_mig_lockres_msg(dlm, mres, send_to, res, total_locks);
1098 mlog(ML_ERROR, "dlm_send_mig_lockres_msg returned %d, "
1108 * this message will contain no more than one page worth of
1109 * recovery data, and it will work on only one lockres.
1110 * there may be many locks in this page, and we may need to wait
1111 * for additional packets to complete all the locks (rare, but
1115 * NOTE: the allocation error cases here are scary
1116 * we really cannot afford to fail an alloc in recovery
1117 * do we spin? returning an error only delays the problem really
1120 int dlm_mig_lockres_handler(struct o2net_msg *msg, u32 len, void *data)
1122 struct dlm_ctxt *dlm = data;
1123 struct dlm_migratable_lockres *mres =
1124 (struct dlm_migratable_lockres *)msg->buf;
1128 struct dlm_work_item *item = NULL;
1129 struct dlm_lock_resource *res = NULL;
1134 BUG_ON(!(mres->flags & (DLM_MRES_RECOVERY|DLM_MRES_MIGRATION)));
1136 real_master = mres->master;
1137 if (real_master == DLM_LOCK_RES_OWNER_UNKNOWN) {
1138 /* cannot migrate a lockres with no master */
1139 BUG_ON(!(mres->flags & DLM_MRES_RECOVERY));
1142 mlog(0, "%s message received from node %u\n",
1143 (mres->flags & DLM_MRES_RECOVERY) ?
1144 "recovery" : "migration", mres->master);
1145 if (mres->flags & DLM_MRES_ALL_DONE)
1146 mlog(0, "all done flag. all lockres data received!\n");
1149 buf = kmalloc(be16_to_cpu(msg->data_len), GFP_KERNEL);
1150 item = kcalloc(1, sizeof(*item), GFP_KERNEL);
1154 /* lookup the lock to see if we have a secondary queue for this
1155 * already... just add the locks in and this will have its owner
1156 * and RECOVERY flag changed when it completes. */
1157 res = dlm_lookup_lockres(dlm, mres->lockname, mres->lockname_len);
1159 /* this will get a ref on res */
1160 /* mark it as recovering/migrating and hash it */
1161 spin_lock(&res->spinlock);
1162 if (mres->flags & DLM_MRES_RECOVERY) {
1163 res->state |= DLM_LOCK_RES_RECOVERING;
1165 if (res->state & DLM_LOCK_RES_MIGRATING) {
1166 /* this is at least the second
1167 * lockres message */
1168 mlog(0, "lock %.*s is already migrating\n",
1171 } else if (res->state & DLM_LOCK_RES_RECOVERING) {
1172 /* caller should BUG */
1173 mlog(ML_ERROR, "node is attempting to migrate "
1174 "lock %.*s, but marked as recovering!\n",
1175 mres->lockname_len, mres->lockname);
1177 spin_unlock(&res->spinlock);
1180 res->state |= DLM_LOCK_RES_MIGRATING;
1182 spin_unlock(&res->spinlock);
1184 /* need to allocate, just like if it was
1185 * mastered here normally */
1186 res = dlm_new_lockres(dlm, mres->lockname, mres->lockname_len);
1190 /* to match the ref that we would have gotten if
1191 * dlm_lookup_lockres had succeeded */
1192 dlm_lockres_get(res);
1194 /* mark it as recovering/migrating and hash it */
1195 if (mres->flags & DLM_MRES_RECOVERY)
1196 res->state |= DLM_LOCK_RES_RECOVERING;
1198 res->state |= DLM_LOCK_RES_MIGRATING;
1200 spin_lock(&dlm->spinlock);
1201 __dlm_insert_lockres(dlm, res);
1202 spin_unlock(&dlm->spinlock);
1204 /* now that the new lockres is inserted,
1205 * make it usable by other processes */
1206 spin_lock(&res->spinlock);
1207 res->state &= ~DLM_LOCK_RES_IN_PROGRESS;
1208 spin_unlock(&res->spinlock);
1210 /* add an extra ref for just-allocated lockres
1211 * otherwise the lockres will be purged immediately */
1212 dlm_lockres_get(res);
1216 /* at this point we have allocated everything we need,
1217 * and we have a hashed lockres with an extra ref and
1218 * the proper res->state flags. */
1220 if (mres->master == DLM_LOCK_RES_OWNER_UNKNOWN) {
1221 /* migration cannot have an unknown master */
1222 BUG_ON(!(mres->flags & DLM_MRES_RECOVERY));
1223 mlog(0, "recovery has passed me a lockres with an "
1224 "unknown owner.. will need to requery: "
1225 "%.*s\n", mres->lockname_len, mres->lockname);
1227 spin_lock(&res->spinlock);
1228 dlm_change_lockres_owner(dlm, res, dlm->node_num);
1229 spin_unlock(&res->spinlock);
1232 /* queue up work for dlm_mig_lockres_worker */
1233 dlm_grab(dlm); /* get an extra ref for the work item */
1234 memcpy(buf, msg->buf, be16_to_cpu(msg->data_len)); /* copy the whole message */
1235 dlm_init_work_item(dlm, item, dlm_mig_lockres_worker, buf);
1236 item->u.ml.lockres = res; /* already have a ref */
1237 item->u.ml.real_master = real_master;
1238 spin_lock(&dlm->work_lock);
1239 list_add_tail(&item->list, &dlm->work_list);
1240 spin_unlock(&dlm->work_lock);
1241 schedule_work(&dlm->dispatched_work);
1257 static void dlm_mig_lockres_worker(struct dlm_work_item *item, void *data)
1259 struct dlm_ctxt *dlm = data;
1260 struct dlm_migratable_lockres *mres;
1262 struct dlm_lock_resource *res;
1266 mres = (struct dlm_migratable_lockres *)data;
1268 res = item->u.ml.lockres;
1269 real_master = item->u.ml.real_master;
1271 if (real_master == DLM_LOCK_RES_OWNER_UNKNOWN) {
1272 /* this case is super-rare. only occurs if
1273 * node death happens during migration. */
1275 ret = dlm_lockres_master_requery(dlm, res, &real_master);
1277 mlog(0, "dlm_lockres_master_requery ret=%d\n",
1281 if (real_master == DLM_LOCK_RES_OWNER_UNKNOWN) {
1282 mlog(0, "lockres %.*s not claimed. "
1283 "this node will take it.\n",
1284 res->lockname.len, res->lockname.name);
1286 mlog(0, "master needs to respond to sender "
1287 "that node %u still owns %.*s\n",
1288 real_master, res->lockname.len,
1289 res->lockname.name);
1290 /* cannot touch this lockres */
1295 ret = dlm_process_recovery_data(dlm, res, mres);
1297 mlog(0, "dlm_process_recovery_data returned %d\n", ret);
1299 mlog(0, "dlm_process_recovery_data succeeded\n");
1301 if ((mres->flags & (DLM_MRES_MIGRATION|DLM_MRES_ALL_DONE)) ==
1302 (DLM_MRES_MIGRATION|DLM_MRES_ALL_DONE)) {
1303 ret = dlm_finish_migration(dlm, res, mres->master);
1315 int dlm_lockres_master_requery(struct dlm_ctxt *dlm,
1316 struct dlm_lock_resource *res, u8 *real_master)
1318 struct dlm_node_iter iter;
1322 *real_master = DLM_LOCK_RES_OWNER_UNKNOWN;
1324 /* we only reach here if one of the two nodes in a
1325 * migration died while the migration was in progress.
1326 * at this point we need to requery the master. we
1327 * know that the new_master got as far as creating
1328 * an mle on at least one node, but we do not know
1329 * if any nodes had actually cleared the mle and set
1330 * the master to the new_master. the old master
1331 * is supposed to set the owner to UNKNOWN in the
1332 * event of a new_master death, so the only possible
1333 * responses that we can get from nodes here are
1334 * that the master is new_master, or that the master
1336 * if all nodes come back with UNKNOWN then we know
1337 * the lock needs remastering here.
1338 * if any node comes back with a valid master, check
1339 * to see if that master is the one that we are
1340 * recovering. if so, then the new_master died and
1341 * we need to remaster this lock. if not, then the
1342 * new_master survived and that node will respond to
1343 * other nodes about the owner.
1344 * if there is an owner, this node needs to dump this
1345 * lockres and alert the sender that this lockres
1347 spin_lock(&dlm->spinlock);
1348 dlm_node_iter_init(dlm->domain_map, &iter);
1349 spin_unlock(&dlm->spinlock);
1351 while ((nodenum = dlm_node_iter_next(&iter)) >= 0) {
1352 /* do not send to self */
1353 if (nodenum == dlm->node_num)
1355 ret = dlm_do_master_requery(dlm, res, nodenum, real_master);
1358 if (!dlm_is_host_down(ret))
1360 /* host is down, so answer for that node would be
1361 * DLM_LOCK_RES_OWNER_UNKNOWN. continue. */
1363 if (*real_master != DLM_LOCK_RES_OWNER_UNKNOWN) {
1364 mlog(0, "lock master is %u\n", *real_master);
1372 int dlm_do_master_requery(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
1373 u8 nodenum, u8 *real_master)
1376 struct dlm_master_requery req;
1377 int status = DLM_LOCK_RES_OWNER_UNKNOWN;
1379 memset(&req, 0, sizeof(req));
1380 req.node_idx = dlm->node_num;
1381 req.namelen = res->lockname.len;
1382 memcpy(req.name, res->lockname.name, res->lockname.len);
1384 ret = o2net_send_message(DLM_MASTER_REQUERY_MSG, dlm->key,
1385 &req, sizeof(req), nodenum, &status);
1386 /* XXX: negative status not handled properly here. */
1391 BUG_ON(status > DLM_LOCK_RES_OWNER_UNKNOWN);
1392 *real_master = (u8) (status & 0xff);
1393 mlog(0, "node %u responded to master requery with %u\n",
1394 nodenum, *real_master);
1401 /* this function cannot error, so unless the sending
1402 * or receiving of the message failed, the owner can
1404 int dlm_master_requery_handler(struct o2net_msg *msg, u32 len, void *data)
1406 struct dlm_ctxt *dlm = data;
1407 struct dlm_master_requery *req = (struct dlm_master_requery *)msg->buf;
1408 struct dlm_lock_resource *res = NULL;
1409 int master = DLM_LOCK_RES_OWNER_UNKNOWN;
1410 u32 flags = DLM_ASSERT_MASTER_REQUERY;
1412 if (!dlm_grab(dlm)) {
1413 /* since the domain has gone away on this
1414 * node, the proper response is UNKNOWN */
1418 spin_lock(&dlm->spinlock);
1419 res = __dlm_lookup_lockres(dlm, req->name, req->namelen);
1421 spin_lock(&res->spinlock);
1422 master = res->owner;
1423 if (master == dlm->node_num) {
1424 int ret = dlm_dispatch_assert_master(dlm, res,
1427 mlog_errno(-ENOMEM);
1432 spin_unlock(&res->spinlock);
1434 spin_unlock(&dlm->spinlock);
1440 static inline struct list_head *
1441 dlm_list_num_to_pointer(struct dlm_lock_resource *res, int list_num)
1443 struct list_head *ret;
1444 BUG_ON(list_num < 0);
1445 BUG_ON(list_num > 2);
1446 ret = &(res->granted);
1450 /* TODO: do ast flush business
1451 * TODO: do MIGRATING and RECOVERING spinning
1455 * NOTE about in-flight requests during migration:
1457 * Before attempting the migrate, the master has marked the lockres as
1458 * MIGRATING and then flushed all of its pending ASTS. So any in-flight
1459 * requests either got queued before the MIGRATING flag got set, in which
1460 * case the lock data will reflect the change and a return message is on
1461 * the way, or the request failed to get in before MIGRATING got set. In
1462 * this case, the caller will be told to spin and wait for the MIGRATING
1463 * flag to be dropped, then recheck the master.
1464 * This holds true for the convert, cancel and unlock cases, and since lvb
1465 * updates are tied to these same messages, it applies to lvb updates as
1466 * well. For the lock case, there is no way a lock can be on the master
1467 * queue and not be on the secondary queue since the lock is always added
1468 * locally first. This means that the new target node will never be sent
1469 * a lock that he doesn't already have on the list.
1470 * In total, this means that the local lock is correct and should not be
1471 * updated to match the one sent by the master. Any messages sent back
1472 * from the master before the MIGRATING flag will bring the lock properly
1473 * up-to-date, and the change will be ordered properly for the waiter.
1474 * We will *not* attempt to modify the lock underneath the waiter.
1477 static int dlm_process_recovery_data(struct dlm_ctxt *dlm,
1478 struct dlm_lock_resource *res,
1479 struct dlm_migratable_lockres *mres)
1481 struct dlm_migratable_lock *ml;
1482 struct list_head *queue;
1483 struct dlm_lock *newlock = NULL;
1484 struct dlm_lockstatus *lksb = NULL;
1487 struct list_head *iter;
1488 struct dlm_lock *lock = NULL;
1490 mlog(0, "running %d locks for this lockres\n", mres->num_locks);
1491 for (i=0; i<mres->num_locks; i++) {
1492 ml = &(mres->ml[i]);
1493 BUG_ON(ml->highest_blocked != LKM_IVMODE);
1497 queue = dlm_list_num_to_pointer(res, ml->list);
1499 /* if the lock is for the local node it needs to
1500 * be moved to the proper location within the queue.
1501 * do not allocate a new lock structure. */
1502 if (ml->node == dlm->node_num) {
1503 /* MIGRATION ONLY! */
1504 BUG_ON(!(mres->flags & DLM_MRES_MIGRATION));
1506 spin_lock(&res->spinlock);
1507 list_for_each(iter, queue) {
1508 lock = list_entry (iter, struct dlm_lock, list);
1509 if (lock->ml.cookie != ml->cookie)
1515 /* lock is always created locally first, and
1516 * destroyed locally last. it must be on the list */
1519 mlog(ML_ERROR, "could not find local lock "
1520 "with cookie %u:%llu!\n",
1521 dlm_get_lock_cookie_node(c),
1522 dlm_get_lock_cookie_seq(c));
1525 BUG_ON(lock->ml.node != ml->node);
1527 /* see NOTE above about why we do not update
1528 * to match the master here */
1530 /* move the lock to its proper place */
1531 /* do not alter lock refcount. switching lists. */
1532 list_del_init(&lock->list);
1533 list_add_tail(&lock->list, queue);
1534 spin_unlock(&res->spinlock);
1536 mlog(0, "just reordered a local lock!\n");
1540 /* lock is for another node. */
1541 newlock = dlm_new_lock(ml->type, ml->node,
1542 be64_to_cpu(ml->cookie), NULL);
1547 lksb = newlock->lksb;
1548 dlm_lock_attach_lockres(newlock, res);
1550 if (ml->convert_type != LKM_IVMODE) {
1551 BUG_ON(queue != &res->converting);
1552 newlock->ml.convert_type = ml->convert_type;
1554 lksb->flags |= (ml->flags &
1555 (DLM_LKSB_PUT_LVB|DLM_LKSB_GET_LVB));
1558 if (lksb->flags & DLM_LKSB_PUT_LVB) {
1559 /* other node was trying to update
1560 * lvb when node died. recreate the
1561 * lksb with the updated lvb. */
1562 memcpy(lksb->lvb, mres->lvb, DLM_LVB_LEN);
1564 /* otherwise, the node is sending its
1565 * most recent valid lvb info */
1566 BUG_ON(ml->type != LKM_EXMODE &&
1567 ml->type != LKM_PRMODE);
1568 if (res->lvb[0] && (ml->type == LKM_EXMODE ||
1569 memcmp(res->lvb, mres->lvb, DLM_LVB_LEN))) {
1570 mlog(ML_ERROR, "received bad lvb!\n");
1571 __dlm_print_one_lock_resource(res);
1574 memcpy(res->lvb, mres->lvb, DLM_LVB_LEN);
1580 * wrt lock queue ordering and recovery:
1581 * 1. order of locks on granted queue is
1583 * 2. order of locks on converting queue is
1584 * LOST with the node death. sorry charlie.
1585 * 3. order of locks on the blocked queue is
1587 * order of locks does not affect integrity, it
1588 * just means that a lock request may get pushed
1589 * back in line as a result of the node death.
1590 * also note that for a given node the lock order
1591 * for its secondary queue locks is preserved
1592 * relative to each other, but clearly *not*
1593 * preserved relative to locks from other nodes.
1595 spin_lock(&res->spinlock);
1596 dlm_lock_get(newlock);
1597 list_add_tail(&newlock->list, queue);
1598 spin_unlock(&res->spinlock);
1600 mlog(0, "done running all the locks\n");
1606 dlm_lock_put(newlock);
1613 void dlm_move_lockres_to_recovery_list(struct dlm_ctxt *dlm,
1614 struct dlm_lock_resource *res)
1617 struct list_head *queue, *iter, *iter2;
1618 struct dlm_lock *lock;
1620 res->state |= DLM_LOCK_RES_RECOVERING;
1621 if (!list_empty(&res->recovering))
1622 list_del_init(&res->recovering);
1623 list_add_tail(&res->recovering, &dlm->reco.resources);
1625 /* find any pending locks and put them back on proper list */
1626 for (i=DLM_BLOCKED_LIST; i>=DLM_GRANTED_LIST; i--) {
1627 queue = dlm_list_idx_to_ptr(res, i);
1628 list_for_each_safe(iter, iter2, queue) {
1629 lock = list_entry (iter, struct dlm_lock, list);
1631 if (lock->convert_pending) {
1632 /* move converting lock back to granted */
1633 BUG_ON(i != DLM_CONVERTING_LIST);
1634 mlog(0, "node died with convert pending "
1635 "on %.*s. move back to granted list.\n",
1636 res->lockname.len, res->lockname.name);
1637 dlm_revert_pending_convert(res, lock);
1638 lock->convert_pending = 0;
1639 } else if (lock->lock_pending) {
1640 /* remove pending lock requests completely */
1641 BUG_ON(i != DLM_BLOCKED_LIST);
1642 mlog(0, "node died with lock pending "
1643 "on %.*s. remove from blocked list and skip.\n",
1644 res->lockname.len, res->lockname.name);
1645 /* lock will be floating until ref in
1646 * dlmlock_remote is freed after the network
1647 * call returns. ok for it to not be on any
1648 * list since no ast can be called
1649 * (the master is dead). */
1650 dlm_revert_pending_lock(res, lock);
1651 lock->lock_pending = 0;
1652 } else if (lock->unlock_pending) {
1653 /* if an unlock was in progress, treat as
1654 * if this had completed successfully
1655 * before sending this lock state to the
1656 * new master. note that the dlm_unlock
1657 * call is still responsible for calling
1658 * the unlockast. that will happen after
1659 * the network call times out. for now,
1660 * just move lists to prepare the new
1661 * recovery master. */
1662 BUG_ON(i != DLM_GRANTED_LIST);
1663 mlog(0, "node died with unlock pending "
1664 "on %.*s. remove from blocked list and skip.\n",
1665 res->lockname.len, res->lockname.name);
1666 dlm_commit_pending_unlock(res, lock);
1667 lock->unlock_pending = 0;
1668 } else if (lock->cancel_pending) {
1669 /* if a cancel was in progress, treat as
1670 * if this had completed successfully
1671 * before sending this lock state to the
1673 BUG_ON(i != DLM_CONVERTING_LIST);
1674 mlog(0, "node died with cancel pending "
1675 "on %.*s. move back to granted list.\n",
1676 res->lockname.len, res->lockname.name);
1677 dlm_commit_pending_cancel(res, lock);
1678 lock->cancel_pending = 0;
1687 /* removes all recovered locks from the recovery list.
1688 * sets the res->owner to the new master.
1689 * unsets the RECOVERY flag and wakes waiters. */
1690 static void dlm_finish_local_lockres_recovery(struct dlm_ctxt *dlm,
1691 u8 dead_node, u8 new_master)
1694 struct list_head *iter, *iter2;
1695 struct hlist_node *hash_iter;
1696 struct hlist_head *bucket;
1698 struct dlm_lock_resource *res;
1702 assert_spin_locked(&dlm->spinlock);
1704 list_for_each_safe(iter, iter2, &dlm->reco.resources) {
1705 res = list_entry (iter, struct dlm_lock_resource, recovering);
1706 if (res->owner == dead_node) {
1707 list_del_init(&res->recovering);
1708 spin_lock(&res->spinlock);
1709 dlm_change_lockres_owner(dlm, res, new_master);
1710 res->state &= ~DLM_LOCK_RES_RECOVERING;
1711 __dlm_dirty_lockres(dlm, res);
1712 spin_unlock(&res->spinlock);
1717 /* this will become unnecessary eventually, but
1718 * for now we need to run the whole hash, clear
1719 * the RECOVERING state and set the owner
1721 for (i = 0; i < DLM_HASH_BUCKETS; i++) {
1722 bucket = &(dlm->lockres_hash[i]);
1723 hlist_for_each_entry(res, hash_iter, bucket, hash_node) {
1724 if (res->state & DLM_LOCK_RES_RECOVERING) {
1725 if (res->owner == dead_node) {
1726 mlog(0, "(this=%u) res %.*s owner=%u "
1727 "was not on recovering list, but "
1728 "clearing state anyway\n",
1729 dlm->node_num, res->lockname.len,
1730 res->lockname.name, new_master);
1731 } else if (res->owner == dlm->node_num) {
1732 mlog(0, "(this=%u) res %.*s owner=%u "
1733 "was not on recovering list, "
1734 "owner is THIS node, clearing\n",
1735 dlm->node_num, res->lockname.len,
1736 res->lockname.name, new_master);
1740 if (!list_empty(&res->recovering)) {
1741 mlog(0, "%s:%.*s: lockres was "
1742 "marked RECOVERING, owner=%u\n",
1743 dlm->name, res->lockname.len,
1744 res->lockname.name, res->owner);
1745 list_del_init(&res->recovering);
1747 spin_lock(&res->spinlock);
1748 dlm_change_lockres_owner(dlm, res, new_master);
1749 res->state &= ~DLM_LOCK_RES_RECOVERING;
1750 __dlm_dirty_lockres(dlm, res);
1751 spin_unlock(&res->spinlock);
1758 static inline int dlm_lvb_needs_invalidation(struct dlm_lock *lock, int local)
1761 if (lock->ml.type != LKM_EXMODE &&
1762 lock->ml.type != LKM_PRMODE)
1764 } else if (lock->ml.type == LKM_EXMODE)
1769 static void dlm_revalidate_lvb(struct dlm_ctxt *dlm,
1770 struct dlm_lock_resource *res, u8 dead_node)
1772 struct list_head *iter, *queue;
1773 struct dlm_lock *lock;
1774 int blank_lvb = 0, local = 0;
1778 assert_spin_locked(&dlm->spinlock);
1779 assert_spin_locked(&res->spinlock);
1781 if (res->owner == dlm->node_num)
1782 /* if this node owned the lockres, and if the dead node
1783 * had an EX when he died, blank out the lvb */
1784 search_node = dead_node;
1786 /* if this is a secondary lockres, and we had no EX or PR
1787 * locks granted, we can no longer trust the lvb */
1788 search_node = dlm->node_num;
1789 local = 1; /* check local state for valid lvb */
1792 for (i=DLM_GRANTED_LIST; i<=DLM_CONVERTING_LIST; i++) {
1793 queue = dlm_list_idx_to_ptr(res, i);
1794 list_for_each(iter, queue) {
1795 lock = list_entry (iter, struct dlm_lock, list);
1796 if (lock->ml.node == search_node) {
1797 if (dlm_lvb_needs_invalidation(lock, local)) {
1798 /* zero the lksb lvb and lockres lvb */
1800 memset(lock->lksb->lvb, 0, DLM_LVB_LEN);
1807 mlog(0, "clearing %.*s lvb, dead node %u had EX\n",
1808 res->lockname.len, res->lockname.name, dead_node);
1809 memset(res->lvb, 0, DLM_LVB_LEN);
1813 static void dlm_free_dead_locks(struct dlm_ctxt *dlm,
1814 struct dlm_lock_resource *res, u8 dead_node)
1816 struct list_head *iter, *tmpiter;
1817 struct dlm_lock *lock;
1819 /* this node is the lockres master:
1820 * 1) remove any stale locks for the dead node
1821 * 2) if the dead node had an EX when he died, blank out the lvb
1823 assert_spin_locked(&dlm->spinlock);
1824 assert_spin_locked(&res->spinlock);
1826 /* TODO: check pending_asts, pending_basts here */
1827 list_for_each_safe(iter, tmpiter, &res->granted) {
1828 lock = list_entry (iter, struct dlm_lock, list);
1829 if (lock->ml.node == dead_node) {
1830 list_del_init(&lock->list);
1834 list_for_each_safe(iter, tmpiter, &res->converting) {
1835 lock = list_entry (iter, struct dlm_lock, list);
1836 if (lock->ml.node == dead_node) {
1837 list_del_init(&lock->list);
1841 list_for_each_safe(iter, tmpiter, &res->blocked) {
1842 lock = list_entry (iter, struct dlm_lock, list);
1843 if (lock->ml.node == dead_node) {
1844 list_del_init(&lock->list);
1849 /* do not kick thread yet */
1850 __dlm_dirty_lockres(dlm, res);
1853 /* if this node is the recovery master, and there are no
1854 * locks for a given lockres owned by this node that are in
1855 * either PR or EX mode, zero out the lvb before requesting.
1860 static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node)
1862 struct hlist_node *iter;
1863 struct dlm_lock_resource *res;
1865 struct hlist_head *bucket;
1866 struct dlm_lock *lock;
1869 /* purge any stale mles */
1870 dlm_clean_master_list(dlm, dead_node);
1873 * now clean up all lock resources. there are two rules:
1875 * 1) if the dead node was the master, move the lockres
1876 * to the recovering list. set the RECOVERING flag.
1877 * this lockres needs to be cleaned up before it can
1880 * 2) if this node was the master, remove all locks from
1881 * each of the lockres queues that were owned by the
1882 * dead node. once recovery finishes, the dlm thread
1883 * can be kicked again to see if any ASTs or BASTs
1884 * need to be fired as a result.
1886 for (i = 0; i < DLM_HASH_BUCKETS; i++) {
1887 bucket = &(dlm->lockres_hash[i]);
1888 hlist_for_each_entry(res, iter, bucket, hash_node) {
1889 /* always prune any $RECOVERY entries for dead nodes,
1890 * otherwise hangs can occur during later recovery */
1891 if (dlm_is_recovery_lock(res->lockname.name,
1892 res->lockname.len)) {
1893 spin_lock(&res->spinlock);
1894 list_for_each_entry(lock, &res->granted, list) {
1895 if (lock->ml.node == dead_node) {
1896 mlog(0, "AHA! there was "
1897 "a $RECOVERY lock for dead "
1899 dead_node, dlm->name);
1900 list_del_init(&lock->list);
1905 spin_unlock(&res->spinlock);
1908 spin_lock(&res->spinlock);
1909 /* zero the lvb if necessary */
1910 dlm_revalidate_lvb(dlm, res, dead_node);
1911 if (res->owner == dead_node)
1912 dlm_move_lockres_to_recovery_list(dlm, res);
1913 else if (res->owner == dlm->node_num) {
1914 dlm_free_dead_locks(dlm, res, dead_node);
1915 __dlm_lockres_calc_usage(dlm, res);
1917 spin_unlock(&res->spinlock);
1923 static void __dlm_hb_node_down(struct dlm_ctxt *dlm, int idx)
1925 assert_spin_locked(&dlm->spinlock);
1927 /* check to see if the node is already considered dead */
1928 if (!test_bit(idx, dlm->live_nodes_map)) {
1929 mlog(0, "for domain %s, node %d is already dead. "
1930 "another node likely did recovery already.\n",
1935 /* check to see if we do not care about this node */
1936 if (!test_bit(idx, dlm->domain_map)) {
1937 /* This also catches the case that we get a node down
1938 * but haven't joined the domain yet. */
1939 mlog(0, "node %u already removed from domain!\n", idx);
1943 clear_bit(idx, dlm->live_nodes_map);
1945 /* Clean up join state on node death. */
1946 if (dlm->joining_node == idx) {
1947 mlog(0, "Clearing join state for node %u\n", idx);
1948 __dlm_set_joining_node(dlm, DLM_LOCK_RES_OWNER_UNKNOWN);
1951 /* make sure local cleanup occurs before the heartbeat events */
1952 if (!test_bit(idx, dlm->recovery_map))
1953 dlm_do_local_recovery_cleanup(dlm, idx);
1955 /* notify anything attached to the heartbeat events */
1956 dlm_hb_event_notify_attached(dlm, idx, 0);
1958 mlog(0, "node %u being removed from domain map!\n", idx);
1959 clear_bit(idx, dlm->domain_map);
1960 /* wake up migration waiters if a node goes down.
1961 * perhaps later we can genericize this for other waiters. */
1962 wake_up(&dlm->migration_wq);
1964 if (test_bit(idx, dlm->recovery_map))
1965 mlog(0, "domain %s, node %u already added "
1966 "to recovery map!\n", dlm->name, idx);
1968 set_bit(idx, dlm->recovery_map);
1971 void dlm_hb_node_down_cb(struct o2nm_node *node, int idx, void *data)
1973 struct dlm_ctxt *dlm = data;
1978 spin_lock(&dlm->spinlock);
1979 __dlm_hb_node_down(dlm, idx);
1980 spin_unlock(&dlm->spinlock);
1985 void dlm_hb_node_up_cb(struct o2nm_node *node, int idx, void *data)
1987 struct dlm_ctxt *dlm = data;
1992 spin_lock(&dlm->spinlock);
1993 set_bit(idx, dlm->live_nodes_map);
1994 /* do NOT notify mle attached to the heartbeat events.
1995 * new nodes are not interesting in mastery until joined. */
1996 spin_unlock(&dlm->spinlock);
2001 static void dlm_reco_ast(void *astdata)
2003 struct dlm_ctxt *dlm = astdata;
2004 mlog(0, "ast for recovery lock fired!, this=%u, dlm=%s\n",
2005 dlm->node_num, dlm->name);
2007 static void dlm_reco_bast(void *astdata, int blocked_type)
2009 struct dlm_ctxt *dlm = astdata;
2010 mlog(0, "bast for recovery lock fired!, this=%u, dlm=%s\n",
2011 dlm->node_num, dlm->name);
2013 static void dlm_reco_unlock_ast(void *astdata, enum dlm_status st)
2015 mlog(0, "unlockast for recovery lock fired!\n");
2019 * dlm_pick_recovery_master will continually attempt to use
2020 * dlmlock() on the special "$RECOVERY" lockres with the
2021 * LKM_NOQUEUE flag to get an EX. every thread that enters
2022 * this function on each node racing to become the recovery
2023 * master will not stop attempting this until either:
2024 * a) this node gets the EX (and becomes the recovery master),
2025 * or b) dlm->reco.new_master gets set to some nodenum
2026 * != O2NM_INVALID_NODE_NUM (another node will do the reco).
2027 * so each time a recovery master is needed, the entire cluster
2028 * will sync at this point. if the new master dies, that will
2029 * be detected in dlm_do_recovery */
2030 static int dlm_pick_recovery_master(struct dlm_ctxt *dlm)
2032 enum dlm_status ret;
2033 struct dlm_lockstatus lksb;
2034 int status = -EINVAL;
2036 mlog(0, "starting recovery of %s at %lu, dead=%u, this=%u\n",
2037 dlm->name, jiffies, dlm->reco.dead_node, dlm->node_num);
2039 memset(&lksb, 0, sizeof(lksb));
2041 ret = dlmlock(dlm, LKM_EXMODE, &lksb, LKM_NOQUEUE|LKM_RECOVERY,
2042 DLM_RECOVERY_LOCK_NAME, dlm_reco_ast, dlm, dlm_reco_bast);
2044 mlog(0, "%s: dlmlock($RECOVERY) returned %d, lksb=%d\n",
2045 dlm->name, ret, lksb.status);
2047 if (ret == DLM_NORMAL) {
2048 mlog(0, "dlm=%s dlmlock says I got it (this=%u)\n",
2049 dlm->name, dlm->node_num);
2051 /* got the EX lock. check to see if another node
2052 * just became the reco master */
2053 if (dlm_reco_master_ready(dlm)) {
2054 mlog(0, "%s: got reco EX lock, but %u will "
2055 "do the recovery\n", dlm->name,
2056 dlm->reco.new_master);
2061 /* see if recovery was already finished elsewhere */
2062 spin_lock(&dlm->spinlock);
2063 if (dlm->reco.dead_node == O2NM_INVALID_NODE_NUM) {
2065 mlog(0, "%s: got reco EX lock, but "
2066 "node got recovered already\n", dlm->name);
2067 if (dlm->reco.new_master != O2NM_INVALID_NODE_NUM) {
2068 mlog(ML_ERROR, "%s: new master is %u "
2069 "but no dead node!\n",
2070 dlm->name, dlm->reco.new_master);
2074 spin_unlock(&dlm->spinlock);
2077 /* if this node has actually become the recovery master,
2078 * set the master and send the messages to begin recovery */
2080 mlog(0, "%s: dead=%u, this=%u, sending "
2081 "begin_reco now\n", dlm->name,
2082 dlm->reco.dead_node, dlm->node_num);
2083 status = dlm_send_begin_reco_message(dlm,
2084 dlm->reco.dead_node);
2085 /* this always succeeds */
2088 /* set the new_master to this node */
2089 spin_lock(&dlm->spinlock);
2090 dlm->reco.new_master = dlm->node_num;
2091 spin_unlock(&dlm->spinlock);
2094 /* recovery lock is a special case. ast will not get fired,
2095 * so just go ahead and unlock it. */
2096 ret = dlmunlock(dlm, &lksb, 0, dlm_reco_unlock_ast, dlm);
2097 if (ret == DLM_DENIED) {
2098 mlog(0, "got DLM_DENIED, trying LKM_CANCEL\n");
2099 ret = dlmunlock(dlm, &lksb, LKM_CANCEL, dlm_reco_unlock_ast, dlm);
2101 if (ret != DLM_NORMAL) {
2102 /* this would really suck. this could only happen
2103 * if there was a network error during the unlock
2104 * because of node death. this means the unlock
2105 * is actually "done" and the lock structure is
2106 * even freed. we can continue, but only
2107 * because this specific lock name is special. */
2108 mlog(ML_ERROR, "dlmunlock returned %d\n", ret);
2110 } else if (ret == DLM_NOTQUEUED) {
2111 mlog(0, "dlm=%s dlmlock says another node got it (this=%u)\n",
2112 dlm->name, dlm->node_num);
2113 /* another node is master. wait on
2114 * reco.new_master != O2NM_INVALID_NODE_NUM
2115 * for at most one second */
2116 wait_event_timeout(dlm->dlm_reco_thread_wq,
2117 dlm_reco_master_ready(dlm),
2118 msecs_to_jiffies(1000));
2119 if (!dlm_reco_master_ready(dlm)) {
2120 mlog(0, "%s: reco master taking awhile\n",
2124 /* another node has informed this one that it is reco master */
2125 mlog(0, "%s: reco master %u is ready to recover %u\n",
2126 dlm->name, dlm->reco.new_master, dlm->reco.dead_node);
2129 struct dlm_lock_resource *res;
2131 /* dlmlock returned something other than NOTQUEUED or NORMAL */
2132 mlog(ML_ERROR, "%s: got %s from dlmlock($RECOVERY), "
2133 "lksb.status=%s\n", dlm->name, dlm_errname(ret),
2134 dlm_errname(lksb.status));
2135 res = dlm_lookup_lockres(dlm, DLM_RECOVERY_LOCK_NAME,
2136 DLM_RECOVERY_LOCK_NAME_LEN);
2138 dlm_print_one_lock_resource(res);
2139 dlm_lockres_put(res);
2141 mlog(ML_ERROR, "recovery lock not found\n");
2149 static int dlm_send_begin_reco_message(struct dlm_ctxt *dlm, u8 dead_node)
2151 struct dlm_begin_reco br;
2153 struct dlm_node_iter iter;
2157 mlog_entry("%u\n", dead_node);
2159 mlog(0, "dead node is %u\n", dead_node);
2161 spin_lock(&dlm->spinlock);
2162 dlm_node_iter_init(dlm->domain_map, &iter);
2163 spin_unlock(&dlm->spinlock);
2165 clear_bit(dead_node, iter.node_map);
2167 memset(&br, 0, sizeof(br));
2168 br.node_idx = dlm->node_num;
2169 br.dead_node = dead_node;
2171 while ((nodenum = dlm_node_iter_next(&iter)) >= 0) {
2173 if (nodenum == dead_node) {
2174 mlog(0, "not sending begin reco to dead node "
2178 if (nodenum == dlm->node_num) {
2179 mlog(0, "not sending begin reco to self\n");
2184 mlog(0, "attempting to send begin reco msg to %d\n",
2186 ret = o2net_send_message(DLM_BEGIN_RECO_MSG, dlm->key,
2187 &br, sizeof(br), nodenum, &status);
2188 /* negative status is handled ok by caller here */
2191 if (dlm_is_host_down(ret)) {
2192 /* node is down. not involved in recovery
2193 * so just keep going */
2194 mlog(0, "%s: node %u was down when sending "
2195 "begin reco msg (%d)\n", dlm->name, nodenum, ret);
2199 struct dlm_lock_resource *res;
2200 /* this is now a serious problem, possibly ENOMEM
2201 * in the network stack. must retry */
2203 mlog(ML_ERROR, "begin reco of dlm %s to node %u "
2204 " returned %d\n", dlm->name, nodenum, ret);
2205 res = dlm_lookup_lockres(dlm, DLM_RECOVERY_LOCK_NAME,
2206 DLM_RECOVERY_LOCK_NAME_LEN);
2208 dlm_print_one_lock_resource(res);
2209 dlm_lockres_put(res);
2211 mlog(ML_ERROR, "recovery lock not found\n");
2213 /* sleep for a bit in hopes that we can avoid
2223 int dlm_begin_reco_handler(struct o2net_msg *msg, u32 len, void *data)
2225 struct dlm_ctxt *dlm = data;
2226 struct dlm_begin_reco *br = (struct dlm_begin_reco *)msg->buf;
2228 /* ok to return 0, domain has gone away */
2232 mlog(0, "node %u wants to recover node %u\n",
2233 br->node_idx, br->dead_node);
2235 dlm_fire_domain_eviction_callbacks(dlm, br->dead_node);
2237 spin_lock(&dlm->spinlock);
2238 if (dlm->reco.new_master != O2NM_INVALID_NODE_NUM) {
2239 if (test_bit(dlm->reco.new_master, dlm->recovery_map)) {
2240 mlog(0, "%s: new_master %u died, changing "
2241 "to %u\n", dlm->name, dlm->reco.new_master,
2244 mlog(0, "%s: new_master %u NOT DEAD, changing "
2245 "to %u\n", dlm->name, dlm->reco.new_master,
2247 /* may not have seen the new master as dead yet */
2250 if (dlm->reco.dead_node != O2NM_INVALID_NODE_NUM) {
2251 mlog(ML_NOTICE, "%s: dead_node previously set to %u, "
2252 "node %u changing it to %u\n", dlm->name,
2253 dlm->reco.dead_node, br->node_idx, br->dead_node);
2255 dlm->reco.new_master = br->node_idx;
2256 dlm->reco.dead_node = br->dead_node;
2257 if (!test_bit(br->dead_node, dlm->recovery_map)) {
2258 mlog(0, "recovery master %u sees %u as dead, but this "
2259 "node has not yet. marking %u as dead\n",
2260 br->node_idx, br->dead_node, br->dead_node);
2261 if (!test_bit(br->dead_node, dlm->domain_map) ||
2262 !test_bit(br->dead_node, dlm->live_nodes_map))
2263 mlog(0, "%u not in domain/live_nodes map "
2264 "so setting it in reco map manually\n",
2266 /* force the recovery cleanup in __dlm_hb_node_down
2267 * both of these will be cleared in a moment */
2268 set_bit(br->dead_node, dlm->domain_map);
2269 set_bit(br->dead_node, dlm->live_nodes_map);
2270 __dlm_hb_node_down(dlm, br->dead_node);
2272 spin_unlock(&dlm->spinlock);
2274 dlm_kick_recovery_thread(dlm);
2279 static int dlm_send_finalize_reco_message(struct dlm_ctxt *dlm)
2282 struct dlm_finalize_reco fr;
2283 struct dlm_node_iter iter;
2287 mlog(0, "finishing recovery for node %s:%u\n",
2288 dlm->name, dlm->reco.dead_node);
2290 spin_lock(&dlm->spinlock);
2291 dlm_node_iter_init(dlm->domain_map, &iter);
2292 spin_unlock(&dlm->spinlock);
2294 memset(&fr, 0, sizeof(fr));
2295 fr.node_idx = dlm->node_num;
2296 fr.dead_node = dlm->reco.dead_node;
2298 while ((nodenum = dlm_node_iter_next(&iter)) >= 0) {
2299 if (nodenum == dlm->node_num)
2301 ret = o2net_send_message(DLM_FINALIZE_RECO_MSG, dlm->key,
2302 &fr, sizeof(fr), nodenum, &status);
2305 if (dlm_is_host_down(ret)) {
2306 /* this has no effect on this recovery
2307 * session, so set the status to zero to
2308 * finish out the last recovery */
2309 mlog(ML_ERROR, "node %u went down after this "
2310 "node finished recovery.\n", nodenum);
2323 int dlm_finalize_reco_handler(struct o2net_msg *msg, u32 len, void *data)
2325 struct dlm_ctxt *dlm = data;
2326 struct dlm_finalize_reco *fr = (struct dlm_finalize_reco *)msg->buf;
2328 /* ok to return 0, domain has gone away */
2332 mlog(0, "node %u finalizing recovery of node %u\n",
2333 fr->node_idx, fr->dead_node);
2335 spin_lock(&dlm->spinlock);
2337 if (dlm->reco.new_master != fr->node_idx) {
2338 mlog(ML_ERROR, "node %u sent recovery finalize msg, but node "
2339 "%u is supposed to be the new master, dead=%u\n",
2340 fr->node_idx, dlm->reco.new_master, fr->dead_node);
2343 if (dlm->reco.dead_node != fr->dead_node) {
2344 mlog(ML_ERROR, "node %u sent recovery finalize msg for dead "
2345 "node %u, but node %u is supposed to be dead\n",
2346 fr->node_idx, fr->dead_node, dlm->reco.dead_node);
2350 dlm_finish_local_lockres_recovery(dlm, fr->dead_node, fr->node_idx);
2352 spin_unlock(&dlm->spinlock);
2354 dlm_reset_recovery(dlm);
2356 dlm_kick_recovery_thread(dlm);