1 /* -*- mode: c; c-basic-offset: 8; -*-
2 * vim: noexpandtab sw=8 ts=8 sts=0:
8 * Copyright (C) 2004 Oracle. All rights reserved.
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public
12 * License as published by the Free Software Foundation; either
13 * version 2 of the License, or (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
20 * You should have received a copy of the GNU General Public
21 * License along with this program; if not, write to the
22 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
23 * Boston, MA 021110-1307, USA.
28 #include <linux/module.h>
30 #include <linux/types.h>
31 #include <linux/slab.h>
32 #include <linux/highmem.h>
33 #include <linux/utsname.h>
34 #include <linux/init.h>
35 #include <linux/sysctl.h>
36 #include <linux/random.h>
37 #include <linux/blkdev.h>
38 #include <linux/socket.h>
39 #include <linux/inet.h>
40 #include <linux/timer.h>
41 #include <linux/kthread.h>
42 #include <linux/delay.h>
45 #include "cluster/heartbeat.h"
46 #include "cluster/nodemanager.h"
47 #include "cluster/tcp.h"
50 #include "dlmcommon.h"
51 #include "dlmdomain.h"
53 #define MLOG_MASK_PREFIX (ML_DLM|ML_DLM_RECOVERY)
54 #include "cluster/masklog.h"
56 static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node);
58 static int dlm_recovery_thread(void *data);
59 void dlm_complete_recovery_thread(struct dlm_ctxt *dlm);
60 int dlm_launch_recovery_thread(struct dlm_ctxt *dlm);
61 void dlm_kick_recovery_thread(struct dlm_ctxt *dlm);
62 static int dlm_do_recovery(struct dlm_ctxt *dlm);
64 static int dlm_pick_recovery_master(struct dlm_ctxt *dlm);
65 static int dlm_remaster_locks(struct dlm_ctxt *dlm, u8 dead_node);
66 static int dlm_init_recovery_area(struct dlm_ctxt *dlm, u8 dead_node);
67 static int dlm_request_all_locks(struct dlm_ctxt *dlm,
68 u8 request_from, u8 dead_node);
69 static void dlm_destroy_recovery_area(struct dlm_ctxt *dlm, u8 dead_node);
71 static inline int dlm_num_locks_in_lockres(struct dlm_lock_resource *res);
72 static void dlm_init_migratable_lockres(struct dlm_migratable_lockres *mres,
73 const char *lockname, int namelen,
74 int total_locks, u64 cookie,
76 static int dlm_send_mig_lockres_msg(struct dlm_ctxt *dlm,
77 struct dlm_migratable_lockres *mres,
79 struct dlm_lock_resource *res,
81 static int dlm_process_recovery_data(struct dlm_ctxt *dlm,
82 struct dlm_lock_resource *res,
83 struct dlm_migratable_lockres *mres);
84 static int dlm_send_finalize_reco_message(struct dlm_ctxt *dlm);
85 static int dlm_send_all_done_msg(struct dlm_ctxt *dlm,
86 u8 dead_node, u8 send_to);
87 static int dlm_send_begin_reco_message(struct dlm_ctxt *dlm, u8 dead_node);
88 static void dlm_move_reco_locks_to_list(struct dlm_ctxt *dlm,
89 struct list_head *list, u8 dead_node);
90 static void dlm_finish_local_lockres_recovery(struct dlm_ctxt *dlm,
91 u8 dead_node, u8 new_master);
92 static void dlm_reco_ast(void *astdata);
93 static void dlm_reco_bast(void *astdata, int blocked_type);
94 static void dlm_reco_unlock_ast(void *astdata, enum dlm_status st);
95 static void dlm_request_all_locks_worker(struct dlm_work_item *item,
97 static void dlm_mig_lockres_worker(struct dlm_work_item *item, void *data);
98 static int dlm_lockres_master_requery(struct dlm_ctxt *dlm,
99 struct dlm_lock_resource *res,
102 static u64 dlm_get_next_mig_cookie(void);
104 static DEFINE_SPINLOCK(dlm_reco_state_lock);
105 static DEFINE_SPINLOCK(dlm_mig_cookie_lock);
106 static u64 dlm_mig_cookie = 1;
108 static u64 dlm_get_next_mig_cookie(void)
111 spin_lock(&dlm_mig_cookie_lock);
113 if (dlm_mig_cookie == (~0ULL))
117 spin_unlock(&dlm_mig_cookie_lock);
121 static inline void dlm_set_reco_dead_node(struct dlm_ctxt *dlm,
124 assert_spin_locked(&dlm->spinlock);
125 if (dlm->reco.dead_node != dead_node)
126 mlog(0, "%s: changing dead_node from %u to %u\n",
127 dlm->name, dlm->reco.dead_node, dead_node);
128 dlm->reco.dead_node = dead_node;
131 static inline void dlm_set_reco_master(struct dlm_ctxt *dlm,
134 assert_spin_locked(&dlm->spinlock);
135 mlog(0, "%s: changing new_master from %u to %u\n",
136 dlm->name, dlm->reco.new_master, master);
137 dlm->reco.new_master = master;
140 static inline void __dlm_reset_recovery(struct dlm_ctxt *dlm)
142 assert_spin_locked(&dlm->spinlock);
143 clear_bit(dlm->reco.dead_node, dlm->recovery_map);
144 dlm_set_reco_dead_node(dlm, O2NM_INVALID_NODE_NUM);
145 dlm_set_reco_master(dlm, O2NM_INVALID_NODE_NUM);
148 static inline void dlm_reset_recovery(struct dlm_ctxt *dlm)
150 spin_lock(&dlm->spinlock);
151 __dlm_reset_recovery(dlm);
152 spin_unlock(&dlm->spinlock);
155 /* Worker function used during recovery. */
156 void dlm_dispatch_work(struct work_struct *work)
158 struct dlm_ctxt *dlm =
159 container_of(work, struct dlm_ctxt, dispatched_work);
161 struct list_head *iter, *iter2;
162 struct dlm_work_item *item;
163 dlm_workfunc_t *workfunc;
166 spin_lock(&dlm->work_lock);
167 list_splice_init(&dlm->work_list, &tmp_list);
168 spin_unlock(&dlm->work_lock);
170 list_for_each_safe(iter, iter2, &tmp_list) {
173 mlog(0, "%s: work thread has %d work items\n", dlm->name, tot);
175 list_for_each_safe(iter, iter2, &tmp_list) {
176 item = list_entry(iter, struct dlm_work_item, list);
177 workfunc = item->func;
178 list_del_init(&item->list);
180 /* already have ref on dlm to avoid having
181 * it disappear. just double-check. */
182 BUG_ON(item->dlm != dlm);
184 /* this is allowed to sleep and
185 * call network stuff */
186 workfunc(item, item->data);
197 void dlm_kick_recovery_thread(struct dlm_ctxt *dlm)
199 /* wake the recovery thread
200 * this will wake the reco thread in one of three places
201 * 1) sleeping with no recovery happening
202 * 2) sleeping with recovery mastered elsewhere
203 * 3) recovery mastered here, waiting on reco data */
205 wake_up(&dlm->dlm_reco_thread_wq);
208 /* Launch the recovery thread */
209 int dlm_launch_recovery_thread(struct dlm_ctxt *dlm)
211 mlog(0, "starting dlm recovery thread...\n");
213 dlm->dlm_reco_thread_task = kthread_run(dlm_recovery_thread, dlm,
215 if (IS_ERR(dlm->dlm_reco_thread_task)) {
216 mlog_errno(PTR_ERR(dlm->dlm_reco_thread_task));
217 dlm->dlm_reco_thread_task = NULL;
224 void dlm_complete_recovery_thread(struct dlm_ctxt *dlm)
226 if (dlm->dlm_reco_thread_task) {
227 mlog(0, "waiting for dlm recovery thread to exit\n");
228 kthread_stop(dlm->dlm_reco_thread_task);
229 dlm->dlm_reco_thread_task = NULL;
236 * this is lame, but here's how recovery works...
237 * 1) all recovery threads cluster wide will work on recovering
239 * 2) negotiate who will take over all the locks for the dead node.
240 * thats right... ALL the locks.
241 * 3) once a new master is chosen, everyone scans all locks
242 * and moves aside those mastered by the dead guy
243 * 4) each of these locks should be locked until recovery is done
244 * 5) the new master collects up all of secondary lock queue info
245 * one lock at a time, forcing each node to communicate back
247 * 6) each secondary lock queue responds with the full known lock info
248 * 7) once the new master has run all its locks, it sends a ALLDONE!
249 * message to everyone
250 * 8) upon receiving this message, the secondary queue node unlocks
251 * and responds to the ALLDONE
252 * 9) once the new master gets responses from everyone, he unlocks
253 * everything and recovery for this dead node is done
254 *10) go back to 2) while there are still dead nodes
258 static void dlm_print_reco_node_status(struct dlm_ctxt *dlm)
260 struct dlm_reco_node_data *ndata;
261 struct dlm_lock_resource *res;
263 mlog(ML_NOTICE, "%s(%d): recovery info, state=%s, dead=%u, master=%u\n",
264 dlm->name, dlm->dlm_reco_thread_task->pid,
265 dlm->reco.state & DLM_RECO_STATE_ACTIVE ? "ACTIVE" : "inactive",
266 dlm->reco.dead_node, dlm->reco.new_master);
268 list_for_each_entry(ndata, &dlm->reco.node_data, list) {
269 char *st = "unknown";
270 switch (ndata->state) {
271 case DLM_RECO_NODE_DATA_INIT:
274 case DLM_RECO_NODE_DATA_REQUESTING:
277 case DLM_RECO_NODE_DATA_DEAD:
280 case DLM_RECO_NODE_DATA_RECEIVING:
283 case DLM_RECO_NODE_DATA_REQUESTED:
286 case DLM_RECO_NODE_DATA_DONE:
289 case DLM_RECO_NODE_DATA_FINALIZE_SENT:
290 st = "finalize-sent";
296 mlog(ML_NOTICE, "%s: reco state, node %u, state=%s\n",
297 dlm->name, ndata->node_num, st);
299 list_for_each_entry(res, &dlm->reco.resources, recovering) {
300 mlog(ML_NOTICE, "%s: lockres %.*s on recovering list\n",
301 dlm->name, res->lockname.len, res->lockname.name);
305 #define DLM_RECO_THREAD_TIMEOUT_MS (5 * 1000)
307 static int dlm_recovery_thread(void *data)
310 struct dlm_ctxt *dlm = data;
311 unsigned long timeout = msecs_to_jiffies(DLM_RECO_THREAD_TIMEOUT_MS);
313 mlog(0, "dlm thread running for %s...\n", dlm->name);
315 while (!kthread_should_stop()) {
316 if (dlm_joined(dlm)) {
317 status = dlm_do_recovery(dlm);
318 if (status == -EAGAIN) {
319 /* do not sleep, recheck immediately. */
326 wait_event_interruptible_timeout(dlm->dlm_reco_thread_wq,
327 kthread_should_stop(),
331 mlog(0, "quitting DLM recovery thread\n");
335 /* returns true when the recovery master has contacted us */
336 static int dlm_reco_master_ready(struct dlm_ctxt *dlm)
339 spin_lock(&dlm->spinlock);
340 ready = (dlm->reco.new_master != O2NM_INVALID_NODE_NUM);
341 spin_unlock(&dlm->spinlock);
345 /* returns true if node is no longer in the domain
346 * could be dead or just not joined */
347 int dlm_is_node_dead(struct dlm_ctxt *dlm, u8 node)
350 spin_lock(&dlm->spinlock);
351 dead = !test_bit(node, dlm->domain_map);
352 spin_unlock(&dlm->spinlock);
356 /* returns true if node is no longer in the domain
357 * could be dead or just not joined */
358 static int dlm_is_node_recovered(struct dlm_ctxt *dlm, u8 node)
361 spin_lock(&dlm->spinlock);
362 recovered = !test_bit(node, dlm->recovery_map);
363 spin_unlock(&dlm->spinlock);
368 int dlm_wait_for_node_death(struct dlm_ctxt *dlm, u8 node, int timeout)
371 mlog(ML_NOTICE, "%s: waiting %dms for notification of "
372 "death of node %u\n", dlm->name, timeout, node);
373 wait_event_timeout(dlm->dlm_reco_thread_wq,
374 dlm_is_node_dead(dlm, node),
375 msecs_to_jiffies(timeout));
377 mlog(ML_NOTICE, "%s: waiting indefinitely for notification "
378 "of death of node %u\n", dlm->name, node);
379 wait_event(dlm->dlm_reco_thread_wq,
380 dlm_is_node_dead(dlm, node));
382 /* for now, return 0 */
386 int dlm_wait_for_node_recovery(struct dlm_ctxt *dlm, u8 node, int timeout)
389 mlog(0, "%s: waiting %dms for notification of "
390 "recovery of node %u\n", dlm->name, timeout, node);
391 wait_event_timeout(dlm->dlm_reco_thread_wq,
392 dlm_is_node_recovered(dlm, node),
393 msecs_to_jiffies(timeout));
395 mlog(0, "%s: waiting indefinitely for notification "
396 "of recovery of node %u\n", dlm->name, node);
397 wait_event(dlm->dlm_reco_thread_wq,
398 dlm_is_node_recovered(dlm, node));
400 /* for now, return 0 */
404 /* callers of the top-level api calls (dlmlock/dlmunlock) should
405 * block on the dlm->reco.event when recovery is in progress.
406 * the dlm recovery thread will set this state when it begins
407 * recovering a dead node (as the new master or not) and clear
408 * the state and wake as soon as all affected lock resources have
409 * been marked with the RECOVERY flag */
410 static int dlm_in_recovery(struct dlm_ctxt *dlm)
413 spin_lock(&dlm->spinlock);
414 in_recovery = !!(dlm->reco.state & DLM_RECO_STATE_ACTIVE);
415 spin_unlock(&dlm->spinlock);
420 void dlm_wait_for_recovery(struct dlm_ctxt *dlm)
422 if (dlm_in_recovery(dlm)) {
423 mlog(0, "%s: reco thread %d in recovery: "
424 "state=%d, master=%u, dead=%u\n",
425 dlm->name, dlm->dlm_reco_thread_task->pid,
426 dlm->reco.state, dlm->reco.new_master,
427 dlm->reco.dead_node);
429 wait_event(dlm->reco.event, !dlm_in_recovery(dlm));
432 static void dlm_begin_recovery(struct dlm_ctxt *dlm)
434 spin_lock(&dlm->spinlock);
435 BUG_ON(dlm->reco.state & DLM_RECO_STATE_ACTIVE);
436 dlm->reco.state |= DLM_RECO_STATE_ACTIVE;
437 spin_unlock(&dlm->spinlock);
440 static void dlm_end_recovery(struct dlm_ctxt *dlm)
442 spin_lock(&dlm->spinlock);
443 BUG_ON(!(dlm->reco.state & DLM_RECO_STATE_ACTIVE));
444 dlm->reco.state &= ~DLM_RECO_STATE_ACTIVE;
445 spin_unlock(&dlm->spinlock);
446 wake_up(&dlm->reco.event);
449 static int dlm_do_recovery(struct dlm_ctxt *dlm)
454 spin_lock(&dlm->spinlock);
456 /* check to see if the new master has died */
457 if (dlm->reco.new_master != O2NM_INVALID_NODE_NUM &&
458 test_bit(dlm->reco.new_master, dlm->recovery_map)) {
459 mlog(0, "new master %u died while recovering %u!\n",
460 dlm->reco.new_master, dlm->reco.dead_node);
461 /* unset the new_master, leave dead_node */
462 dlm_set_reco_master(dlm, O2NM_INVALID_NODE_NUM);
465 /* select a target to recover */
466 if (dlm->reco.dead_node == O2NM_INVALID_NODE_NUM) {
469 bit = find_next_bit (dlm->recovery_map, O2NM_MAX_NODES+1, 0);
470 if (bit >= O2NM_MAX_NODES || bit < 0)
471 dlm_set_reco_dead_node(dlm, O2NM_INVALID_NODE_NUM);
473 dlm_set_reco_dead_node(dlm, bit);
474 } else if (!test_bit(dlm->reco.dead_node, dlm->recovery_map)) {
476 mlog(ML_ERROR, "dead_node %u no longer in recovery map!\n",
477 dlm->reco.dead_node);
478 dlm_set_reco_dead_node(dlm, O2NM_INVALID_NODE_NUM);
481 if (dlm->reco.dead_node == O2NM_INVALID_NODE_NUM) {
482 // mlog(0, "nothing to recover! sleeping now!\n");
483 spin_unlock(&dlm->spinlock);
484 /* return to main thread loop and sleep. */
487 mlog(0, "%s(%d):recovery thread found node %u in the recovery map!\n",
488 dlm->name, dlm->dlm_reco_thread_task->pid,
489 dlm->reco.dead_node);
490 spin_unlock(&dlm->spinlock);
492 /* take write barrier */
493 /* (stops the list reshuffling thread, proxy ast handling) */
494 dlm_begin_recovery(dlm);
496 if (dlm->reco.new_master == dlm->node_num)
499 if (dlm->reco.new_master == O2NM_INVALID_NODE_NUM) {
500 /* choose a new master, returns 0 if this node
501 * is the master, -EEXIST if it's another node.
502 * this does not return until a new master is chosen
503 * or recovery completes entirely. */
504 ret = dlm_pick_recovery_master(dlm);
506 /* already notified everyone. go. */
509 mlog(0, "another node will master this recovery session.\n");
511 mlog(0, "dlm=%s (%d), new_master=%u, this node=%u, dead_node=%u\n",
512 dlm->name, dlm->dlm_reco_thread_task->pid, dlm->reco.new_master,
513 dlm->node_num, dlm->reco.dead_node);
515 /* it is safe to start everything back up here
516 * because all of the dead node's lock resources
517 * have been marked as in-recovery */
518 dlm_end_recovery(dlm);
520 /* sleep out in main dlm_recovery_thread loop. */
524 mlog(0, "(%d) mastering recovery of %s:%u here(this=%u)!\n",
525 dlm->dlm_reco_thread_task->pid,
526 dlm->name, dlm->reco.dead_node, dlm->node_num);
528 status = dlm_remaster_locks(dlm, dlm->reco.dead_node);
530 /* we should never hit this anymore */
531 mlog(ML_ERROR, "error %d remastering locks for node %u, "
532 "retrying.\n", status, dlm->reco.dead_node);
533 /* yield a bit to allow any final network messages
534 * to get handled on remaining nodes */
537 /* success! see if any other nodes need recovery */
538 mlog(0, "DONE mastering recovery of %s:%u here(this=%u)!\n",
539 dlm->name, dlm->reco.dead_node, dlm->node_num);
540 dlm_reset_recovery(dlm);
542 dlm_end_recovery(dlm);
544 /* continue and look for another dead node */
548 static int dlm_remaster_locks(struct dlm_ctxt *dlm, u8 dead_node)
551 struct dlm_reco_node_data *ndata;
552 struct list_head *iter;
558 /* we have become recovery master. there is no escaping
559 * this, so just keep trying until we get it. */
560 status = dlm_init_recovery_area(dlm, dead_node);
562 mlog(ML_ERROR, "%s: failed to alloc recovery area, "
563 "retrying\n", dlm->name);
566 } while (status != 0);
568 /* safe to access the node data list without a lock, since this
569 * process is the only one to change the list */
570 list_for_each(iter, &dlm->reco.node_data) {
571 ndata = list_entry (iter, struct dlm_reco_node_data, list);
572 BUG_ON(ndata->state != DLM_RECO_NODE_DATA_INIT);
573 ndata->state = DLM_RECO_NODE_DATA_REQUESTING;
575 mlog(0, "requesting lock info from node %u\n",
578 if (ndata->node_num == dlm->node_num) {
579 ndata->state = DLM_RECO_NODE_DATA_DONE;
584 status = dlm_request_all_locks(dlm, ndata->node_num,
588 if (dlm_is_host_down(status)) {
589 /* node died, ignore it for recovery */
591 ndata->state = DLM_RECO_NODE_DATA_DEAD;
592 /* wait for the domain map to catch up
593 * with the network state. */
594 wait_event_timeout(dlm->dlm_reco_thread_wq,
595 dlm_is_node_dead(dlm,
597 msecs_to_jiffies(1000));
598 mlog(0, "waited 1 sec for %u, "
599 "dead? %s\n", ndata->node_num,
600 dlm_is_node_dead(dlm, ndata->node_num) ?
603 /* -ENOMEM on the other node */
604 mlog(0, "%s: node %u returned "
605 "%d during recovery, retrying "
606 "after a short wait\n",
607 dlm->name, ndata->node_num,
612 } while (status != 0);
614 spin_lock(&dlm_reco_state_lock);
615 switch (ndata->state) {
616 case DLM_RECO_NODE_DATA_INIT:
617 case DLM_RECO_NODE_DATA_FINALIZE_SENT:
618 case DLM_RECO_NODE_DATA_REQUESTED:
621 case DLM_RECO_NODE_DATA_DEAD:
622 mlog(0, "node %u died after requesting "
623 "recovery info for node %u\n",
624 ndata->node_num, dead_node);
625 /* fine. don't need this node's info.
626 * continue without it. */
628 case DLM_RECO_NODE_DATA_REQUESTING:
629 ndata->state = DLM_RECO_NODE_DATA_REQUESTED;
630 mlog(0, "now receiving recovery data from "
631 "node %u for dead node %u\n",
632 ndata->node_num, dead_node);
634 case DLM_RECO_NODE_DATA_RECEIVING:
635 mlog(0, "already receiving recovery data from "
636 "node %u for dead node %u\n",
637 ndata->node_num, dead_node);
639 case DLM_RECO_NODE_DATA_DONE:
640 mlog(0, "already DONE receiving recovery data "
641 "from node %u for dead node %u\n",
642 ndata->node_num, dead_node);
645 spin_unlock(&dlm_reco_state_lock);
648 mlog(0, "done requesting all lock info\n");
650 /* nodes should be sending reco data now
651 * just need to wait */
654 /* check all the nodes now to see if we are
655 * done, or if anyone died */
657 spin_lock(&dlm_reco_state_lock);
658 list_for_each(iter, &dlm->reco.node_data) {
659 ndata = list_entry (iter, struct dlm_reco_node_data, list);
661 mlog(0, "checking recovery state of node %u\n",
663 switch (ndata->state) {
664 case DLM_RECO_NODE_DATA_INIT:
665 case DLM_RECO_NODE_DATA_REQUESTING:
666 mlog(ML_ERROR, "bad ndata state for "
667 "node %u: state=%d\n",
668 ndata->node_num, ndata->state);
671 case DLM_RECO_NODE_DATA_DEAD:
672 mlog(0, "node %u died after "
673 "requesting recovery info for "
674 "node %u\n", ndata->node_num,
677 case DLM_RECO_NODE_DATA_RECEIVING:
678 case DLM_RECO_NODE_DATA_REQUESTED:
679 mlog(0, "%s: node %u still in state %s\n",
680 dlm->name, ndata->node_num,
681 ndata->state==DLM_RECO_NODE_DATA_RECEIVING ?
682 "receiving" : "requested");
685 case DLM_RECO_NODE_DATA_DONE:
686 mlog(0, "%s: node %u state is done\n",
687 dlm->name, ndata->node_num);
689 case DLM_RECO_NODE_DATA_FINALIZE_SENT:
690 mlog(0, "%s: node %u state is finalize\n",
691 dlm->name, ndata->node_num);
695 spin_unlock(&dlm_reco_state_lock);
697 mlog(0, "pass #%d, all_nodes_done?: %s\n", ++pass,
698 all_nodes_done?"yes":"no");
699 if (all_nodes_done) {
702 /* all nodes are now in DLM_RECO_NODE_DATA_DONE state
703 * just send a finalize message to everyone and
705 mlog(0, "all nodes are done! send finalize\n");
706 ret = dlm_send_finalize_reco_message(dlm);
710 spin_lock(&dlm->spinlock);
711 dlm_finish_local_lockres_recovery(dlm, dead_node,
713 spin_unlock(&dlm->spinlock);
714 mlog(0, "should be done with recovery!\n");
716 mlog(0, "finishing recovery of %s at %lu, "
717 "dead=%u, this=%u, new=%u\n", dlm->name,
718 jiffies, dlm->reco.dead_node,
719 dlm->node_num, dlm->reco.new_master);
722 /* rescan everything marked dirty along the way */
723 dlm_kick_thread(dlm, NULL);
726 /* wait to be signalled, with periodic timeout
727 * to check for node death */
728 wait_event_interruptible_timeout(dlm->dlm_reco_thread_wq,
729 kthread_should_stop(),
730 msecs_to_jiffies(DLM_RECO_THREAD_TIMEOUT_MS));
735 dlm_destroy_recovery_area(dlm, dead_node);
741 static int dlm_init_recovery_area(struct dlm_ctxt *dlm, u8 dead_node)
744 struct dlm_reco_node_data *ndata;
746 spin_lock(&dlm->spinlock);
747 memcpy(dlm->reco.node_map, dlm->domain_map, sizeof(dlm->domain_map));
748 /* nodes can only be removed (by dying) after dropping
749 * this lock, and death will be trapped later, so this should do */
750 spin_unlock(&dlm->spinlock);
753 num = find_next_bit (dlm->reco.node_map, O2NM_MAX_NODES, num);
754 if (num >= O2NM_MAX_NODES) {
757 BUG_ON(num == dead_node);
759 ndata = kzalloc(sizeof(*ndata), GFP_NOFS);
761 dlm_destroy_recovery_area(dlm, dead_node);
764 ndata->node_num = num;
765 ndata->state = DLM_RECO_NODE_DATA_INIT;
766 spin_lock(&dlm_reco_state_lock);
767 list_add_tail(&ndata->list, &dlm->reco.node_data);
768 spin_unlock(&dlm_reco_state_lock);
775 static void dlm_destroy_recovery_area(struct dlm_ctxt *dlm, u8 dead_node)
777 struct list_head *iter, *iter2;
778 struct dlm_reco_node_data *ndata;
781 spin_lock(&dlm_reco_state_lock);
782 list_splice_init(&dlm->reco.node_data, &tmplist);
783 spin_unlock(&dlm_reco_state_lock);
785 list_for_each_safe(iter, iter2, &tmplist) {
786 ndata = list_entry (iter, struct dlm_reco_node_data, list);
787 list_del_init(&ndata->list);
792 static int dlm_request_all_locks(struct dlm_ctxt *dlm, u8 request_from,
795 struct dlm_lock_request lr;
801 mlog(0, "dlm_request_all_locks: dead node is %u, sending request "
802 "to %u\n", dead_node, request_from);
804 memset(&lr, 0, sizeof(lr));
805 lr.node_idx = dlm->node_num;
806 lr.dead_node = dead_node;
810 ret = o2net_send_message(DLM_LOCK_REQUEST_MSG, dlm->key,
811 &lr, sizeof(lr), request_from, NULL);
813 /* negative status is handled by caller */
817 // return from here, then
818 // sleep until all received or error
823 int dlm_request_all_locks_handler(struct o2net_msg *msg, u32 len, void *data,
826 struct dlm_ctxt *dlm = data;
827 struct dlm_lock_request *lr = (struct dlm_lock_request *)msg->buf;
829 struct dlm_work_item *item = NULL;
834 if (lr->dead_node != dlm->reco.dead_node) {
835 mlog(ML_ERROR, "%s: node %u sent dead_node=%u, but local "
836 "dead_node is %u\n", dlm->name, lr->node_idx,
837 lr->dead_node, dlm->reco.dead_node);
838 dlm_print_reco_node_status(dlm);
843 BUG_ON(lr->dead_node != dlm->reco.dead_node);
845 item = kzalloc(sizeof(*item), GFP_NOFS);
851 /* this will get freed by dlm_request_all_locks_worker */
852 buf = (char *) __get_free_page(GFP_NOFS);
859 /* queue up work for dlm_request_all_locks_worker */
860 dlm_grab(dlm); /* get an extra ref for the work item */
861 dlm_init_work_item(dlm, item, dlm_request_all_locks_worker, buf);
862 item->u.ral.reco_master = lr->node_idx;
863 item->u.ral.dead_node = lr->dead_node;
864 spin_lock(&dlm->work_lock);
865 list_add_tail(&item->list, &dlm->work_list);
866 spin_unlock(&dlm->work_lock);
867 queue_work(dlm->dlm_worker, &dlm->dispatched_work);
873 static void dlm_request_all_locks_worker(struct dlm_work_item *item, void *data)
875 struct dlm_migratable_lockres *mres;
876 struct dlm_lock_resource *res;
877 struct dlm_ctxt *dlm;
878 LIST_HEAD(resources);
879 struct list_head *iter;
881 u8 dead_node, reco_master;
882 int skip_all_done = 0;
885 dead_node = item->u.ral.dead_node;
886 reco_master = item->u.ral.reco_master;
887 mres = (struct dlm_migratable_lockres *)data;
889 mlog(0, "%s: recovery worker started, dead=%u, master=%u\n",
890 dlm->name, dead_node, reco_master);
892 if (dead_node != dlm->reco.dead_node ||
893 reco_master != dlm->reco.new_master) {
894 /* worker could have been created before the recovery master
895 * died. if so, do not continue, but do not error. */
896 if (dlm->reco.new_master == O2NM_INVALID_NODE_NUM) {
897 mlog(ML_NOTICE, "%s: will not send recovery state, "
898 "recovery master %u died, thread=(dead=%u,mas=%u)"
899 " current=(dead=%u,mas=%u)\n", dlm->name,
900 reco_master, dead_node, reco_master,
901 dlm->reco.dead_node, dlm->reco.new_master);
903 mlog(ML_NOTICE, "%s: reco state invalid: reco(dead=%u, "
904 "master=%u), request(dead=%u, master=%u)\n",
905 dlm->name, dlm->reco.dead_node,
906 dlm->reco.new_master, dead_node, reco_master);
911 /* lock resources should have already been moved to the
912 * dlm->reco.resources list. now move items from that list
913 * to a temp list if the dead owner matches. note that the
914 * whole cluster recovers only one node at a time, so we
915 * can safely move UNKNOWN lock resources for each recovery
917 dlm_move_reco_locks_to_list(dlm, &resources, dead_node);
919 /* now we can begin blasting lockreses without the dlm lock */
921 /* any errors returned will be due to the new_master dying,
922 * the dlm_reco_thread should detect this */
923 list_for_each(iter, &resources) {
924 res = list_entry (iter, struct dlm_lock_resource, recovering);
925 ret = dlm_send_one_lockres(dlm, res, mres, reco_master,
928 mlog(ML_ERROR, "%s: node %u went down while sending "
929 "recovery state for dead node %u, ret=%d\n", dlm->name,
930 reco_master, dead_node, ret);
936 /* move the resources back to the list */
937 spin_lock(&dlm->spinlock);
938 list_splice_init(&resources, &dlm->reco.resources);
939 spin_unlock(&dlm->spinlock);
941 if (!skip_all_done) {
942 ret = dlm_send_all_done_msg(dlm, dead_node, reco_master);
944 mlog(ML_ERROR, "%s: node %u went down while sending "
945 "recovery all-done for dead node %u, ret=%d\n",
946 dlm->name, reco_master, dead_node, ret);
950 free_page((unsigned long)data);
954 static int dlm_send_all_done_msg(struct dlm_ctxt *dlm, u8 dead_node, u8 send_to)
957 struct dlm_reco_data_done done_msg;
959 memset(&done_msg, 0, sizeof(done_msg));
960 done_msg.node_idx = dlm->node_num;
961 done_msg.dead_node = dead_node;
962 mlog(0, "sending DATA DONE message to %u, "
963 "my node=%u, dead node=%u\n", send_to, done_msg.node_idx,
966 ret = o2net_send_message(DLM_RECO_DATA_DONE_MSG, dlm->key, &done_msg,
967 sizeof(done_msg), send_to, &tmpret);
969 if (!dlm_is_host_down(ret)) {
971 mlog(ML_ERROR, "%s: unknown error sending data-done "
972 "to %u\n", dlm->name, send_to);
981 int dlm_reco_data_done_handler(struct o2net_msg *msg, u32 len, void *data,
984 struct dlm_ctxt *dlm = data;
985 struct dlm_reco_data_done *done = (struct dlm_reco_data_done *)msg->buf;
986 struct list_head *iter;
987 struct dlm_reco_node_data *ndata = NULL;
993 mlog(0, "got DATA DONE: dead_node=%u, reco.dead_node=%u, "
994 "node_idx=%u, this node=%u\n", done->dead_node,
995 dlm->reco.dead_node, done->node_idx, dlm->node_num);
997 mlog_bug_on_msg((done->dead_node != dlm->reco.dead_node),
998 "Got DATA DONE: dead_node=%u, reco.dead_node=%u, "
999 "node_idx=%u, this node=%u\n", done->dead_node,
1000 dlm->reco.dead_node, done->node_idx, dlm->node_num);
1002 spin_lock(&dlm_reco_state_lock);
1003 list_for_each(iter, &dlm->reco.node_data) {
1004 ndata = list_entry (iter, struct dlm_reco_node_data, list);
1005 if (ndata->node_num != done->node_idx)
1008 switch (ndata->state) {
1009 /* should have moved beyond INIT but not to FINALIZE yet */
1010 case DLM_RECO_NODE_DATA_INIT:
1011 case DLM_RECO_NODE_DATA_DEAD:
1012 case DLM_RECO_NODE_DATA_FINALIZE_SENT:
1013 mlog(ML_ERROR, "bad ndata state for node %u:"
1014 " state=%d\n", ndata->node_num,
1018 /* these states are possible at this point, anywhere along
1019 * the line of recovery */
1020 case DLM_RECO_NODE_DATA_DONE:
1021 case DLM_RECO_NODE_DATA_RECEIVING:
1022 case DLM_RECO_NODE_DATA_REQUESTED:
1023 case DLM_RECO_NODE_DATA_REQUESTING:
1024 mlog(0, "node %u is DONE sending "
1028 ndata->state = DLM_RECO_NODE_DATA_DONE;
1033 spin_unlock(&dlm_reco_state_lock);
1035 /* wake the recovery thread, some node is done */
1037 dlm_kick_recovery_thread(dlm);
1040 mlog(ML_ERROR, "failed to find recovery node data for node "
1041 "%u\n", done->node_idx);
1044 mlog(0, "leaving reco data done handler, ret=%d\n", ret);
1048 static void dlm_move_reco_locks_to_list(struct dlm_ctxt *dlm,
1049 struct list_head *list,
1052 struct dlm_lock_resource *res;
1053 struct list_head *iter, *iter2;
1054 struct dlm_lock *lock;
1056 spin_lock(&dlm->spinlock);
1057 list_for_each_safe(iter, iter2, &dlm->reco.resources) {
1058 res = list_entry (iter, struct dlm_lock_resource, recovering);
1059 /* always prune any $RECOVERY entries for dead nodes,
1060 * otherwise hangs can occur during later recovery */
1061 if (dlm_is_recovery_lock(res->lockname.name,
1062 res->lockname.len)) {
1063 spin_lock(&res->spinlock);
1064 list_for_each_entry(lock, &res->granted, list) {
1065 if (lock->ml.node == dead_node) {
1066 mlog(0, "AHA! there was "
1067 "a $RECOVERY lock for dead "
1069 dead_node, dlm->name);
1070 list_del_init(&lock->list);
1075 spin_unlock(&res->spinlock);
1079 if (res->owner == dead_node) {
1080 mlog(0, "found lockres owned by dead node while "
1081 "doing recovery for node %u. sending it.\n",
1083 list_move_tail(&res->recovering, list);
1084 } else if (res->owner == DLM_LOCK_RES_OWNER_UNKNOWN) {
1085 mlog(0, "found UNKNOWN owner while doing recovery "
1086 "for node %u. sending it.\n", dead_node);
1087 list_move_tail(&res->recovering, list);
1090 spin_unlock(&dlm->spinlock);
1093 static inline int dlm_num_locks_in_lockres(struct dlm_lock_resource *res)
1095 int total_locks = 0;
1096 struct list_head *iter, *queue = &res->granted;
1099 for (i=0; i<3; i++) {
1100 list_for_each(iter, queue)
1108 static int dlm_send_mig_lockres_msg(struct dlm_ctxt *dlm,
1109 struct dlm_migratable_lockres *mres,
1111 struct dlm_lock_resource *res,
1114 u64 mig_cookie = be64_to_cpu(mres->mig_cookie);
1115 int mres_total_locks = be32_to_cpu(mres->total_locks);
1116 int sz, ret = 0, status = 0;
1117 u8 orig_flags = mres->flags,
1118 orig_master = mres->master;
1120 BUG_ON(mres->num_locks > DLM_MAX_MIGRATABLE_LOCKS);
1121 if (!mres->num_locks)
1124 sz = sizeof(struct dlm_migratable_lockres) +
1125 (mres->num_locks * sizeof(struct dlm_migratable_lock));
1127 /* add an all-done flag if we reached the last lock */
1128 orig_flags = mres->flags;
1129 BUG_ON(total_locks > mres_total_locks);
1130 if (total_locks == mres_total_locks)
1131 mres->flags |= DLM_MRES_ALL_DONE;
1133 mlog(0, "%s:%.*s: sending mig lockres (%s) to %u\n",
1134 dlm->name, res->lockname.len, res->lockname.name,
1135 orig_flags & DLM_MRES_MIGRATION ? "migrate" : "recovery",
1139 ret = o2net_send_message(DLM_MIG_LOCKRES_MSG, dlm->key, mres,
1140 sz, send_to, &status);
1142 /* XXX: negative status is not handled.
1143 * this will end up killing this node. */
1146 /* might get an -ENOMEM back here */
1151 if (ret == -EFAULT) {
1152 mlog(ML_ERROR, "node %u told me to kill "
1153 "myself!\n", send_to);
1159 /* zero and reinit the message buffer */
1160 dlm_init_migratable_lockres(mres, res->lockname.name,
1161 res->lockname.len, mres_total_locks,
1162 mig_cookie, orig_flags, orig_master);
1166 static void dlm_init_migratable_lockres(struct dlm_migratable_lockres *mres,
1167 const char *lockname, int namelen,
1168 int total_locks, u64 cookie,
1169 u8 flags, u8 master)
1171 /* mres here is one full page */
1172 memset(mres, 0, PAGE_SIZE);
1173 mres->lockname_len = namelen;
1174 memcpy(mres->lockname, lockname, namelen);
1175 mres->num_locks = 0;
1176 mres->total_locks = cpu_to_be32(total_locks);
1177 mres->mig_cookie = cpu_to_be64(cookie);
1178 mres->flags = flags;
1179 mres->master = master;
1183 /* returns 1 if this lock fills the network structure,
1185 static int dlm_add_lock_to_array(struct dlm_lock *lock,
1186 struct dlm_migratable_lockres *mres, int queue)
1188 struct dlm_migratable_lock *ml;
1189 int lock_num = mres->num_locks;
1191 ml = &(mres->ml[lock_num]);
1192 ml->cookie = lock->ml.cookie;
1193 ml->type = lock->ml.type;
1194 ml->convert_type = lock->ml.convert_type;
1195 ml->highest_blocked = lock->ml.highest_blocked;
1198 ml->flags = lock->lksb->flags;
1199 /* send our current lvb */
1200 if (ml->type == LKM_EXMODE ||
1201 ml->type == LKM_PRMODE) {
1202 /* if it is already set, this had better be a PR
1203 * and it has to match */
1204 if (!dlm_lvb_is_empty(mres->lvb) &&
1205 (ml->type == LKM_EXMODE ||
1206 memcmp(mres->lvb, lock->lksb->lvb, DLM_LVB_LEN))) {
1207 mlog(ML_ERROR, "mismatched lvbs!\n");
1208 __dlm_print_one_lock_resource(lock->lockres);
1211 memcpy(mres->lvb, lock->lksb->lvb, DLM_LVB_LEN);
1214 ml->node = lock->ml.node;
1216 /* we reached the max, send this network message */
1217 if (mres->num_locks == DLM_MAX_MIGRATABLE_LOCKS)
1222 static void dlm_add_dummy_lock(struct dlm_ctxt *dlm,
1223 struct dlm_migratable_lockres *mres)
1225 struct dlm_lock dummy;
1226 memset(&dummy, 0, sizeof(dummy));
1227 dummy.ml.cookie = 0;
1228 dummy.ml.type = LKM_IVMODE;
1229 dummy.ml.convert_type = LKM_IVMODE;
1230 dummy.ml.highest_blocked = LKM_IVMODE;
1232 dummy.ml.node = dlm->node_num;
1233 dlm_add_lock_to_array(&dummy, mres, DLM_BLOCKED_LIST);
1236 static inline int dlm_is_dummy_lock(struct dlm_ctxt *dlm,
1237 struct dlm_migratable_lock *ml,
1240 if (unlikely(ml->cookie == 0 &&
1241 ml->type == LKM_IVMODE &&
1242 ml->convert_type == LKM_IVMODE &&
1243 ml->highest_blocked == LKM_IVMODE &&
1244 ml->list == DLM_BLOCKED_LIST)) {
1245 *nodenum = ml->node;
1251 int dlm_send_one_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
1252 struct dlm_migratable_lockres *mres,
1253 u8 send_to, u8 flags)
1255 struct list_head *queue, *iter;
1258 struct dlm_lock *lock;
1261 BUG_ON(!(flags & (DLM_MRES_RECOVERY|DLM_MRES_MIGRATION)));
1263 mlog(0, "sending to %u\n", send_to);
1265 total_locks = dlm_num_locks_in_lockres(res);
1266 if (total_locks > DLM_MAX_MIGRATABLE_LOCKS) {
1267 /* rare, but possible */
1268 mlog(0, "argh. lockres has %d locks. this will "
1269 "require more than one network packet to "
1270 "migrate\n", total_locks);
1271 mig_cookie = dlm_get_next_mig_cookie();
1274 dlm_init_migratable_lockres(mres, res->lockname.name,
1275 res->lockname.len, total_locks,
1276 mig_cookie, flags, res->owner);
1279 for (i=DLM_GRANTED_LIST; i<=DLM_BLOCKED_LIST; i++) {
1280 queue = dlm_list_idx_to_ptr(res, i);
1281 list_for_each(iter, queue) {
1282 lock = list_entry (iter, struct dlm_lock, list);
1284 /* add another lock. */
1286 if (!dlm_add_lock_to_array(lock, mres, i))
1289 /* this filled the lock message,
1290 * we must send it immediately. */
1291 ret = dlm_send_mig_lockres_msg(dlm, mres, send_to,
1297 if (total_locks == 0) {
1298 /* send a dummy lock to indicate a mastery reference only */
1299 mlog(0, "%s:%.*s: sending dummy lock to %u, %s\n",
1300 dlm->name, res->lockname.len, res->lockname.name,
1301 send_to, flags & DLM_MRES_RECOVERY ? "recovery" :
1303 dlm_add_dummy_lock(dlm, mres);
1305 /* flush any remaining locks */
1306 ret = dlm_send_mig_lockres_msg(dlm, mres, send_to, res, total_locks);
1312 mlog(ML_ERROR, "%s: dlm_send_mig_lockres_msg returned %d\n",
1314 if (!dlm_is_host_down(ret))
1316 mlog(0, "%s: node %u went down while sending %s "
1317 "lockres %.*s\n", dlm->name, send_to,
1318 flags & DLM_MRES_RECOVERY ? "recovery" : "migration",
1319 res->lockname.len, res->lockname.name);
1326 * this message will contain no more than one page worth of
1327 * recovery data, and it will work on only one lockres.
1328 * there may be many locks in this page, and we may need to wait
1329 * for additional packets to complete all the locks (rare, but
1333 * NOTE: the allocation error cases here are scary
1334 * we really cannot afford to fail an alloc in recovery
1335 * do we spin? returning an error only delays the problem really
1338 int dlm_mig_lockres_handler(struct o2net_msg *msg, u32 len, void *data,
1341 struct dlm_ctxt *dlm = data;
1342 struct dlm_migratable_lockres *mres =
1343 (struct dlm_migratable_lockres *)msg->buf;
1347 struct dlm_work_item *item = NULL;
1348 struct dlm_lock_resource *res = NULL;
1353 BUG_ON(!(mres->flags & (DLM_MRES_RECOVERY|DLM_MRES_MIGRATION)));
1355 real_master = mres->master;
1356 if (real_master == DLM_LOCK_RES_OWNER_UNKNOWN) {
1357 /* cannot migrate a lockres with no master */
1358 BUG_ON(!(mres->flags & DLM_MRES_RECOVERY));
1361 mlog(0, "%s message received from node %u\n",
1362 (mres->flags & DLM_MRES_RECOVERY) ?
1363 "recovery" : "migration", mres->master);
1364 if (mres->flags & DLM_MRES_ALL_DONE)
1365 mlog(0, "all done flag. all lockres data received!\n");
1368 buf = kmalloc(be16_to_cpu(msg->data_len), GFP_NOFS);
1369 item = kzalloc(sizeof(*item), GFP_NOFS);
1373 /* lookup the lock to see if we have a secondary queue for this
1374 * already... just add the locks in and this will have its owner
1375 * and RECOVERY flag changed when it completes. */
1376 res = dlm_lookup_lockres(dlm, mres->lockname, mres->lockname_len);
1378 /* this will get a ref on res */
1379 /* mark it as recovering/migrating and hash it */
1380 spin_lock(&res->spinlock);
1381 if (mres->flags & DLM_MRES_RECOVERY) {
1382 res->state |= DLM_LOCK_RES_RECOVERING;
1384 if (res->state & DLM_LOCK_RES_MIGRATING) {
1385 /* this is at least the second
1386 * lockres message */
1387 mlog(0, "lock %.*s is already migrating\n",
1390 } else if (res->state & DLM_LOCK_RES_RECOVERING) {
1391 /* caller should BUG */
1392 mlog(ML_ERROR, "node is attempting to migrate "
1393 "lock %.*s, but marked as recovering!\n",
1394 mres->lockname_len, mres->lockname);
1396 spin_unlock(&res->spinlock);
1399 res->state |= DLM_LOCK_RES_MIGRATING;
1401 spin_unlock(&res->spinlock);
1403 /* need to allocate, just like if it was
1404 * mastered here normally */
1405 res = dlm_new_lockres(dlm, mres->lockname, mres->lockname_len);
1409 /* to match the ref that we would have gotten if
1410 * dlm_lookup_lockres had succeeded */
1411 dlm_lockres_get(res);
1413 /* mark it as recovering/migrating and hash it */
1414 if (mres->flags & DLM_MRES_RECOVERY)
1415 res->state |= DLM_LOCK_RES_RECOVERING;
1417 res->state |= DLM_LOCK_RES_MIGRATING;
1419 spin_lock(&dlm->spinlock);
1420 __dlm_insert_lockres(dlm, res);
1421 spin_unlock(&dlm->spinlock);
1423 /* now that the new lockres is inserted,
1424 * make it usable by other processes */
1425 spin_lock(&res->spinlock);
1426 res->state &= ~DLM_LOCK_RES_IN_PROGRESS;
1427 spin_unlock(&res->spinlock);
1430 /* add an extra ref for just-allocated lockres
1431 * otherwise the lockres will be purged immediately */
1432 dlm_lockres_get(res);
1435 /* at this point we have allocated everything we need,
1436 * and we have a hashed lockres with an extra ref and
1437 * the proper res->state flags. */
1439 spin_lock(&res->spinlock);
1440 /* drop this either when master requery finds a different master
1441 * or when a lock is added by the recovery worker */
1442 dlm_lockres_grab_inflight_ref(dlm, res);
1443 if (mres->master == DLM_LOCK_RES_OWNER_UNKNOWN) {
1444 /* migration cannot have an unknown master */
1445 BUG_ON(!(mres->flags & DLM_MRES_RECOVERY));
1446 mlog(0, "recovery has passed me a lockres with an "
1447 "unknown owner.. will need to requery: "
1448 "%.*s\n", mres->lockname_len, mres->lockname);
1450 /* take a reference now to pin the lockres, drop it
1451 * when locks are added in the worker */
1452 dlm_change_lockres_owner(dlm, res, dlm->node_num);
1454 spin_unlock(&res->spinlock);
1456 /* queue up work for dlm_mig_lockres_worker */
1457 dlm_grab(dlm); /* get an extra ref for the work item */
1458 memcpy(buf, msg->buf, be16_to_cpu(msg->data_len)); /* copy the whole message */
1459 dlm_init_work_item(dlm, item, dlm_mig_lockres_worker, buf);
1460 item->u.ml.lockres = res; /* already have a ref */
1461 item->u.ml.real_master = real_master;
1462 spin_lock(&dlm->work_lock);
1463 list_add_tail(&item->list, &dlm->work_list);
1464 spin_unlock(&dlm->work_lock);
1465 queue_work(dlm->dlm_worker, &dlm->dispatched_work);
1481 static void dlm_mig_lockres_worker(struct dlm_work_item *item, void *data)
1483 struct dlm_ctxt *dlm = data;
1484 struct dlm_migratable_lockres *mres;
1486 struct dlm_lock_resource *res;
1490 mres = (struct dlm_migratable_lockres *)data;
1492 res = item->u.ml.lockres;
1493 real_master = item->u.ml.real_master;
1495 if (real_master == DLM_LOCK_RES_OWNER_UNKNOWN) {
1496 /* this case is super-rare. only occurs if
1497 * node death happens during migration. */
1499 ret = dlm_lockres_master_requery(dlm, res, &real_master);
1501 mlog(0, "dlm_lockres_master_requery ret=%d\n",
1505 if (real_master == DLM_LOCK_RES_OWNER_UNKNOWN) {
1506 mlog(0, "lockres %.*s not claimed. "
1507 "this node will take it.\n",
1508 res->lockname.len, res->lockname.name);
1510 spin_lock(&res->spinlock);
1511 dlm_lockres_drop_inflight_ref(dlm, res);
1512 spin_unlock(&res->spinlock);
1513 mlog(0, "master needs to respond to sender "
1514 "that node %u still owns %.*s\n",
1515 real_master, res->lockname.len,
1516 res->lockname.name);
1517 /* cannot touch this lockres */
1522 ret = dlm_process_recovery_data(dlm, res, mres);
1524 mlog(0, "dlm_process_recovery_data returned %d\n", ret);
1526 mlog(0, "dlm_process_recovery_data succeeded\n");
1528 if ((mres->flags & (DLM_MRES_MIGRATION|DLM_MRES_ALL_DONE)) ==
1529 (DLM_MRES_MIGRATION|DLM_MRES_ALL_DONE)) {
1530 ret = dlm_finish_migration(dlm, res, mres->master);
1542 static int dlm_lockres_master_requery(struct dlm_ctxt *dlm,
1543 struct dlm_lock_resource *res,
1546 struct dlm_node_iter iter;
1550 *real_master = DLM_LOCK_RES_OWNER_UNKNOWN;
1552 /* we only reach here if one of the two nodes in a
1553 * migration died while the migration was in progress.
1554 * at this point we need to requery the master. we
1555 * know that the new_master got as far as creating
1556 * an mle on at least one node, but we do not know
1557 * if any nodes had actually cleared the mle and set
1558 * the master to the new_master. the old master
1559 * is supposed to set the owner to UNKNOWN in the
1560 * event of a new_master death, so the only possible
1561 * responses that we can get from nodes here are
1562 * that the master is new_master, or that the master
1564 * if all nodes come back with UNKNOWN then we know
1565 * the lock needs remastering here.
1566 * if any node comes back with a valid master, check
1567 * to see if that master is the one that we are
1568 * recovering. if so, then the new_master died and
1569 * we need to remaster this lock. if not, then the
1570 * new_master survived and that node will respond to
1571 * other nodes about the owner.
1572 * if there is an owner, this node needs to dump this
1573 * lockres and alert the sender that this lockres
1575 spin_lock(&dlm->spinlock);
1576 dlm_node_iter_init(dlm->domain_map, &iter);
1577 spin_unlock(&dlm->spinlock);
1579 while ((nodenum = dlm_node_iter_next(&iter)) >= 0) {
1580 /* do not send to self */
1581 if (nodenum == dlm->node_num)
1583 ret = dlm_do_master_requery(dlm, res, nodenum, real_master);
1586 if (!dlm_is_host_down(ret))
1588 /* host is down, so answer for that node would be
1589 * DLM_LOCK_RES_OWNER_UNKNOWN. continue. */
1591 if (*real_master != DLM_LOCK_RES_OWNER_UNKNOWN) {
1592 mlog(0, "lock master is %u\n", *real_master);
1600 int dlm_do_master_requery(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
1601 u8 nodenum, u8 *real_master)
1604 struct dlm_master_requery req;
1605 int status = DLM_LOCK_RES_OWNER_UNKNOWN;
1607 memset(&req, 0, sizeof(req));
1608 req.node_idx = dlm->node_num;
1609 req.namelen = res->lockname.len;
1610 memcpy(req.name, res->lockname.name, res->lockname.len);
1612 ret = o2net_send_message(DLM_MASTER_REQUERY_MSG, dlm->key,
1613 &req, sizeof(req), nodenum, &status);
1614 /* XXX: negative status not handled properly here. */
1619 BUG_ON(status > DLM_LOCK_RES_OWNER_UNKNOWN);
1620 *real_master = (u8) (status & 0xff);
1621 mlog(0, "node %u responded to master requery with %u\n",
1622 nodenum, *real_master);
1629 /* this function cannot error, so unless the sending
1630 * or receiving of the message failed, the owner can
1632 int dlm_master_requery_handler(struct o2net_msg *msg, u32 len, void *data,
1635 struct dlm_ctxt *dlm = data;
1636 struct dlm_master_requery *req = (struct dlm_master_requery *)msg->buf;
1637 struct dlm_lock_resource *res = NULL;
1639 int master = DLM_LOCK_RES_OWNER_UNKNOWN;
1640 u32 flags = DLM_ASSERT_MASTER_REQUERY;
1642 if (!dlm_grab(dlm)) {
1643 /* since the domain has gone away on this
1644 * node, the proper response is UNKNOWN */
1648 hash = dlm_lockid_hash(req->name, req->namelen);
1650 spin_lock(&dlm->spinlock);
1651 res = __dlm_lookup_lockres(dlm, req->name, req->namelen, hash);
1653 spin_lock(&res->spinlock);
1654 master = res->owner;
1655 if (master == dlm->node_num) {
1656 int ret = dlm_dispatch_assert_master(dlm, res,
1659 mlog_errno(-ENOMEM);
1664 spin_unlock(&res->spinlock);
1666 spin_unlock(&dlm->spinlock);
1672 static inline struct list_head *
1673 dlm_list_num_to_pointer(struct dlm_lock_resource *res, int list_num)
1675 struct list_head *ret;
1676 BUG_ON(list_num < 0);
1677 BUG_ON(list_num > 2);
1678 ret = &(res->granted);
1682 /* TODO: do ast flush business
1683 * TODO: do MIGRATING and RECOVERING spinning
1687 * NOTE about in-flight requests during migration:
1689 * Before attempting the migrate, the master has marked the lockres as
1690 * MIGRATING and then flushed all of its pending ASTS. So any in-flight
1691 * requests either got queued before the MIGRATING flag got set, in which
1692 * case the lock data will reflect the change and a return message is on
1693 * the way, or the request failed to get in before MIGRATING got set. In
1694 * this case, the caller will be told to spin and wait for the MIGRATING
1695 * flag to be dropped, then recheck the master.
1696 * This holds true for the convert, cancel and unlock cases, and since lvb
1697 * updates are tied to these same messages, it applies to lvb updates as
1698 * well. For the lock case, there is no way a lock can be on the master
1699 * queue and not be on the secondary queue since the lock is always added
1700 * locally first. This means that the new target node will never be sent
1701 * a lock that he doesn't already have on the list.
1702 * In total, this means that the local lock is correct and should not be
1703 * updated to match the one sent by the master. Any messages sent back
1704 * from the master before the MIGRATING flag will bring the lock properly
1705 * up-to-date, and the change will be ordered properly for the waiter.
1706 * We will *not* attempt to modify the lock underneath the waiter.
1709 static int dlm_process_recovery_data(struct dlm_ctxt *dlm,
1710 struct dlm_lock_resource *res,
1711 struct dlm_migratable_lockres *mres)
1713 struct dlm_migratable_lock *ml;
1714 struct list_head *queue;
1715 struct list_head *tmpq = NULL;
1716 struct dlm_lock *newlock = NULL;
1717 struct dlm_lockstatus *lksb = NULL;
1720 struct list_head *iter;
1721 struct dlm_lock *lock = NULL;
1722 u8 from = O2NM_MAX_NODES;
1723 unsigned int added = 0;
1725 mlog(0, "running %d locks for this lockres\n", mres->num_locks);
1726 for (i=0; i<mres->num_locks; i++) {
1727 ml = &(mres->ml[i]);
1729 if (dlm_is_dummy_lock(dlm, ml, &from)) {
1730 /* placeholder, just need to set the refmap bit */
1731 BUG_ON(mres->num_locks != 1);
1732 mlog(0, "%s:%.*s: dummy lock for %u\n",
1733 dlm->name, mres->lockname_len, mres->lockname,
1735 spin_lock(&res->spinlock);
1736 dlm_lockres_set_refmap_bit(from, res);
1737 spin_unlock(&res->spinlock);
1741 BUG_ON(ml->highest_blocked != LKM_IVMODE);
1745 queue = dlm_list_num_to_pointer(res, ml->list);
1748 /* if the lock is for the local node it needs to
1749 * be moved to the proper location within the queue.
1750 * do not allocate a new lock structure. */
1751 if (ml->node == dlm->node_num) {
1752 /* MIGRATION ONLY! */
1753 BUG_ON(!(mres->flags & DLM_MRES_MIGRATION));
1755 spin_lock(&res->spinlock);
1756 for (j = DLM_GRANTED_LIST; j <= DLM_BLOCKED_LIST; j++) {
1757 tmpq = dlm_list_idx_to_ptr(res, j);
1758 list_for_each(iter, tmpq) {
1759 lock = list_entry (iter, struct dlm_lock, list);
1760 if (lock->ml.cookie != ml->cookie)
1769 /* lock is always created locally first, and
1770 * destroyed locally last. it must be on the list */
1772 __be64 c = ml->cookie;
1773 mlog(ML_ERROR, "could not find local lock "
1774 "with cookie %u:%llu!\n",
1775 dlm_get_lock_cookie_node(be64_to_cpu(c)),
1776 dlm_get_lock_cookie_seq(be64_to_cpu(c)));
1777 __dlm_print_one_lock_resource(res);
1780 BUG_ON(lock->ml.node != ml->node);
1782 if (tmpq != queue) {
1783 mlog(0, "lock was on %u instead of %u for %.*s\n",
1784 j, ml->list, res->lockname.len, res->lockname.name);
1785 spin_unlock(&res->spinlock);
1789 /* see NOTE above about why we do not update
1790 * to match the master here */
1792 /* move the lock to its proper place */
1793 /* do not alter lock refcount. switching lists. */
1794 list_move_tail(&lock->list, queue);
1795 spin_unlock(&res->spinlock);
1798 mlog(0, "just reordered a local lock!\n");
1802 /* lock is for another node. */
1803 newlock = dlm_new_lock(ml->type, ml->node,
1804 be64_to_cpu(ml->cookie), NULL);
1809 lksb = newlock->lksb;
1810 dlm_lock_attach_lockres(newlock, res);
1812 if (ml->convert_type != LKM_IVMODE) {
1813 BUG_ON(queue != &res->converting);
1814 newlock->ml.convert_type = ml->convert_type;
1816 lksb->flags |= (ml->flags &
1817 (DLM_LKSB_PUT_LVB|DLM_LKSB_GET_LVB));
1819 if (ml->type == LKM_NLMODE)
1822 if (!dlm_lvb_is_empty(mres->lvb)) {
1823 if (lksb->flags & DLM_LKSB_PUT_LVB) {
1824 /* other node was trying to update
1825 * lvb when node died. recreate the
1826 * lksb with the updated lvb. */
1827 memcpy(lksb->lvb, mres->lvb, DLM_LVB_LEN);
1828 /* the lock resource lvb update must happen
1829 * NOW, before the spinlock is dropped.
1830 * we no longer wait for the AST to update
1832 memcpy(res->lvb, mres->lvb, DLM_LVB_LEN);
1834 /* otherwise, the node is sending its
1835 * most recent valid lvb info */
1836 BUG_ON(ml->type != LKM_EXMODE &&
1837 ml->type != LKM_PRMODE);
1838 if (!dlm_lvb_is_empty(res->lvb) &&
1839 (ml->type == LKM_EXMODE ||
1840 memcmp(res->lvb, mres->lvb, DLM_LVB_LEN))) {
1842 mlog(ML_ERROR, "%s:%.*s: received bad "
1843 "lvb! type=%d\n", dlm->name,
1845 res->lockname.name, ml->type);
1846 printk("lockres lvb=[");
1847 for (i=0; i<DLM_LVB_LEN; i++)
1848 printk("%02x", res->lvb[i]);
1849 printk("]\nmigrated lvb=[");
1850 for (i=0; i<DLM_LVB_LEN; i++)
1851 printk("%02x", mres->lvb[i]);
1853 dlm_print_one_lock_resource(res);
1856 memcpy(res->lvb, mres->lvb, DLM_LVB_LEN);
1862 * wrt lock queue ordering and recovery:
1863 * 1. order of locks on granted queue is
1865 * 2. order of locks on converting queue is
1866 * LOST with the node death. sorry charlie.
1867 * 3. order of locks on the blocked queue is
1869 * order of locks does not affect integrity, it
1870 * just means that a lock request may get pushed
1871 * back in line as a result of the node death.
1872 * also note that for a given node the lock order
1873 * for its secondary queue locks is preserved
1874 * relative to each other, but clearly *not*
1875 * preserved relative to locks from other nodes.
1878 spin_lock(&res->spinlock);
1879 list_for_each_entry(lock, queue, list) {
1880 if (lock->ml.cookie == ml->cookie) {
1881 __be64 c = lock->ml.cookie;
1882 mlog(ML_ERROR, "%s:%.*s: %u:%llu: lock already "
1883 "exists on this lockres!\n", dlm->name,
1884 res->lockname.len, res->lockname.name,
1885 dlm_get_lock_cookie_node(be64_to_cpu(c)),
1886 dlm_get_lock_cookie_seq(be64_to_cpu(c)));
1888 mlog(ML_NOTICE, "sent lock: type=%d, conv=%d, "
1889 "node=%u, cookie=%u:%llu, queue=%d\n",
1890 ml->type, ml->convert_type, ml->node,
1891 dlm_get_lock_cookie_node(be64_to_cpu(ml->cookie)),
1892 dlm_get_lock_cookie_seq(be64_to_cpu(ml->cookie)),
1895 __dlm_print_one_lock_resource(res);
1901 dlm_lock_get(newlock);
1902 list_add_tail(&newlock->list, queue);
1903 mlog(0, "%s:%.*s: added lock for node %u, "
1904 "setting refmap bit\n", dlm->name,
1905 res->lockname.len, res->lockname.name, ml->node);
1906 dlm_lockres_set_refmap_bit(ml->node, res);
1909 spin_unlock(&res->spinlock);
1911 mlog(0, "done running all the locks\n");
1914 /* balance the ref taken when the work was queued */
1915 spin_lock(&res->spinlock);
1916 dlm_lockres_drop_inflight_ref(dlm, res);
1917 spin_unlock(&res->spinlock);
1922 dlm_lock_put(newlock);
1929 void dlm_move_lockres_to_recovery_list(struct dlm_ctxt *dlm,
1930 struct dlm_lock_resource *res)
1933 struct list_head *queue, *iter, *iter2;
1934 struct dlm_lock *lock;
1936 res->state |= DLM_LOCK_RES_RECOVERING;
1937 if (!list_empty(&res->recovering)) {
1939 "Recovering res %s:%.*s, is already on recovery list!\n",
1940 dlm->name, res->lockname.len, res->lockname.name);
1941 list_del_init(&res->recovering);
1943 /* We need to hold a reference while on the recovery list */
1944 dlm_lockres_get(res);
1945 list_add_tail(&res->recovering, &dlm->reco.resources);
1947 /* find any pending locks and put them back on proper list */
1948 for (i=DLM_BLOCKED_LIST; i>=DLM_GRANTED_LIST; i--) {
1949 queue = dlm_list_idx_to_ptr(res, i);
1950 list_for_each_safe(iter, iter2, queue) {
1951 lock = list_entry (iter, struct dlm_lock, list);
1953 if (lock->convert_pending) {
1954 /* move converting lock back to granted */
1955 BUG_ON(i != DLM_CONVERTING_LIST);
1956 mlog(0, "node died with convert pending "
1957 "on %.*s. move back to granted list.\n",
1958 res->lockname.len, res->lockname.name);
1959 dlm_revert_pending_convert(res, lock);
1960 lock->convert_pending = 0;
1961 } else if (lock->lock_pending) {
1962 /* remove pending lock requests completely */
1963 BUG_ON(i != DLM_BLOCKED_LIST);
1964 mlog(0, "node died with lock pending "
1965 "on %.*s. remove from blocked list and skip.\n",
1966 res->lockname.len, res->lockname.name);
1967 /* lock will be floating until ref in
1968 * dlmlock_remote is freed after the network
1969 * call returns. ok for it to not be on any
1970 * list since no ast can be called
1971 * (the master is dead). */
1972 dlm_revert_pending_lock(res, lock);
1973 lock->lock_pending = 0;
1974 } else if (lock->unlock_pending) {
1975 /* if an unlock was in progress, treat as
1976 * if this had completed successfully
1977 * before sending this lock state to the
1978 * new master. note that the dlm_unlock
1979 * call is still responsible for calling
1980 * the unlockast. that will happen after
1981 * the network call times out. for now,
1982 * just move lists to prepare the new
1983 * recovery master. */
1984 BUG_ON(i != DLM_GRANTED_LIST);
1985 mlog(0, "node died with unlock pending "
1986 "on %.*s. remove from blocked list and skip.\n",
1987 res->lockname.len, res->lockname.name);
1988 dlm_commit_pending_unlock(res, lock);
1989 lock->unlock_pending = 0;
1990 } else if (lock->cancel_pending) {
1991 /* if a cancel was in progress, treat as
1992 * if this had completed successfully
1993 * before sending this lock state to the
1995 BUG_ON(i != DLM_CONVERTING_LIST);
1996 mlog(0, "node died with cancel pending "
1997 "on %.*s. move back to granted list.\n",
1998 res->lockname.len, res->lockname.name);
1999 dlm_commit_pending_cancel(res, lock);
2000 lock->cancel_pending = 0;
2009 /* removes all recovered locks from the recovery list.
2010 * sets the res->owner to the new master.
2011 * unsets the RECOVERY flag and wakes waiters. */
2012 static void dlm_finish_local_lockres_recovery(struct dlm_ctxt *dlm,
2013 u8 dead_node, u8 new_master)
2016 struct list_head *iter, *iter2;
2017 struct hlist_node *hash_iter;
2018 struct hlist_head *bucket;
2020 struct dlm_lock_resource *res;
2024 assert_spin_locked(&dlm->spinlock);
2026 list_for_each_safe(iter, iter2, &dlm->reco.resources) {
2027 res = list_entry (iter, struct dlm_lock_resource, recovering);
2028 if (res->owner == dead_node) {
2029 list_del_init(&res->recovering);
2030 spin_lock(&res->spinlock);
2031 /* new_master has our reference from
2032 * the lock state sent during recovery */
2033 dlm_change_lockres_owner(dlm, res, new_master);
2034 res->state &= ~DLM_LOCK_RES_RECOVERING;
2035 if (__dlm_lockres_has_locks(res))
2036 __dlm_dirty_lockres(dlm, res);
2037 spin_unlock(&res->spinlock);
2039 dlm_lockres_put(res);
2043 /* this will become unnecessary eventually, but
2044 * for now we need to run the whole hash, clear
2045 * the RECOVERING state and set the owner
2047 for (i = 0; i < DLM_HASH_BUCKETS; i++) {
2048 bucket = dlm_lockres_hash(dlm, i);
2049 hlist_for_each_entry(res, hash_iter, bucket, hash_node) {
2050 if (res->state & DLM_LOCK_RES_RECOVERING) {
2051 if (res->owner == dead_node) {
2052 mlog(0, "(this=%u) res %.*s owner=%u "
2053 "was not on recovering list, but "
2054 "clearing state anyway\n",
2055 dlm->node_num, res->lockname.len,
2056 res->lockname.name, new_master);
2057 } else if (res->owner == dlm->node_num) {
2058 mlog(0, "(this=%u) res %.*s owner=%u "
2059 "was not on recovering list, "
2060 "owner is THIS node, clearing\n",
2061 dlm->node_num, res->lockname.len,
2062 res->lockname.name, new_master);
2066 if (!list_empty(&res->recovering)) {
2067 mlog(0, "%s:%.*s: lockres was "
2068 "marked RECOVERING, owner=%u\n",
2069 dlm->name, res->lockname.len,
2070 res->lockname.name, res->owner);
2071 list_del_init(&res->recovering);
2072 dlm_lockres_put(res);
2074 spin_lock(&res->spinlock);
2075 /* new_master has our reference from
2076 * the lock state sent during recovery */
2077 dlm_change_lockres_owner(dlm, res, new_master);
2078 res->state &= ~DLM_LOCK_RES_RECOVERING;
2079 if (__dlm_lockres_has_locks(res))
2080 __dlm_dirty_lockres(dlm, res);
2081 spin_unlock(&res->spinlock);
2088 static inline int dlm_lvb_needs_invalidation(struct dlm_lock *lock, int local)
2091 if (lock->ml.type != LKM_EXMODE &&
2092 lock->ml.type != LKM_PRMODE)
2094 } else if (lock->ml.type == LKM_EXMODE)
2099 static void dlm_revalidate_lvb(struct dlm_ctxt *dlm,
2100 struct dlm_lock_resource *res, u8 dead_node)
2102 struct list_head *iter, *queue;
2103 struct dlm_lock *lock;
2104 int blank_lvb = 0, local = 0;
2108 assert_spin_locked(&dlm->spinlock);
2109 assert_spin_locked(&res->spinlock);
2111 if (res->owner == dlm->node_num)
2112 /* if this node owned the lockres, and if the dead node
2113 * had an EX when he died, blank out the lvb */
2114 search_node = dead_node;
2116 /* if this is a secondary lockres, and we had no EX or PR
2117 * locks granted, we can no longer trust the lvb */
2118 search_node = dlm->node_num;
2119 local = 1; /* check local state for valid lvb */
2122 for (i=DLM_GRANTED_LIST; i<=DLM_CONVERTING_LIST; i++) {
2123 queue = dlm_list_idx_to_ptr(res, i);
2124 list_for_each(iter, queue) {
2125 lock = list_entry (iter, struct dlm_lock, list);
2126 if (lock->ml.node == search_node) {
2127 if (dlm_lvb_needs_invalidation(lock, local)) {
2128 /* zero the lksb lvb and lockres lvb */
2130 memset(lock->lksb->lvb, 0, DLM_LVB_LEN);
2137 mlog(0, "clearing %.*s lvb, dead node %u had EX\n",
2138 res->lockname.len, res->lockname.name, dead_node);
2139 memset(res->lvb, 0, DLM_LVB_LEN);
2143 static void dlm_free_dead_locks(struct dlm_ctxt *dlm,
2144 struct dlm_lock_resource *res, u8 dead_node)
2146 struct list_head *iter, *tmpiter;
2147 struct dlm_lock *lock;
2148 unsigned int freed = 0;
2150 /* this node is the lockres master:
2151 * 1) remove any stale locks for the dead node
2152 * 2) if the dead node had an EX when he died, blank out the lvb
2154 assert_spin_locked(&dlm->spinlock);
2155 assert_spin_locked(&res->spinlock);
2157 /* TODO: check pending_asts, pending_basts here */
2158 list_for_each_safe(iter, tmpiter, &res->granted) {
2159 lock = list_entry (iter, struct dlm_lock, list);
2160 if (lock->ml.node == dead_node) {
2161 list_del_init(&lock->list);
2166 list_for_each_safe(iter, tmpiter, &res->converting) {
2167 lock = list_entry (iter, struct dlm_lock, list);
2168 if (lock->ml.node == dead_node) {
2169 list_del_init(&lock->list);
2174 list_for_each_safe(iter, tmpiter, &res->blocked) {
2175 lock = list_entry (iter, struct dlm_lock, list);
2176 if (lock->ml.node == dead_node) {
2177 list_del_init(&lock->list);
2184 mlog(0, "%s:%.*s: freed %u locks for dead node %u, "
2185 "dropping ref from lockres\n", dlm->name,
2186 res->lockname.len, res->lockname.name, freed, dead_node);
2187 BUG_ON(!test_bit(dead_node, res->refmap));
2188 dlm_lockres_clear_refmap_bit(dead_node, res);
2189 } else if (test_bit(dead_node, res->refmap)) {
2190 mlog(0, "%s:%.*s: dead node %u had a ref, but had "
2191 "no locks and had not purged before dying\n", dlm->name,
2192 res->lockname.len, res->lockname.name, dead_node);
2193 dlm_lockres_clear_refmap_bit(dead_node, res);
2196 /* do not kick thread yet */
2197 __dlm_dirty_lockres(dlm, res);
2200 /* if this node is the recovery master, and there are no
2201 * locks for a given lockres owned by this node that are in
2202 * either PR or EX mode, zero out the lvb before requesting.
2207 static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node)
2209 struct hlist_node *iter;
2210 struct dlm_lock_resource *res;
2212 struct hlist_head *bucket;
2213 struct dlm_lock *lock;
2216 /* purge any stale mles */
2217 dlm_clean_master_list(dlm, dead_node);
2220 * now clean up all lock resources. there are two rules:
2222 * 1) if the dead node was the master, move the lockres
2223 * to the recovering list. set the RECOVERING flag.
2224 * this lockres needs to be cleaned up before it can
2227 * 2) if this node was the master, remove all locks from
2228 * each of the lockres queues that were owned by the
2229 * dead node. once recovery finishes, the dlm thread
2230 * can be kicked again to see if any ASTs or BASTs
2231 * need to be fired as a result.
2233 for (i = 0; i < DLM_HASH_BUCKETS; i++) {
2234 bucket = dlm_lockres_hash(dlm, i);
2235 hlist_for_each_entry(res, iter, bucket, hash_node) {
2236 /* always prune any $RECOVERY entries for dead nodes,
2237 * otherwise hangs can occur during later recovery */
2238 if (dlm_is_recovery_lock(res->lockname.name,
2239 res->lockname.len)) {
2240 spin_lock(&res->spinlock);
2241 list_for_each_entry(lock, &res->granted, list) {
2242 if (lock->ml.node == dead_node) {
2243 mlog(0, "AHA! there was "
2244 "a $RECOVERY lock for dead "
2246 dead_node, dlm->name);
2247 list_del_init(&lock->list);
2252 spin_unlock(&res->spinlock);
2255 spin_lock(&res->spinlock);
2256 /* zero the lvb if necessary */
2257 dlm_revalidate_lvb(dlm, res, dead_node);
2258 if (res->owner == dead_node) {
2259 if (res->state & DLM_LOCK_RES_DROPPING_REF)
2260 mlog(0, "%s:%.*s: owned by "
2261 "dead node %u, this node was "
2262 "dropping its ref when it died. "
2263 "continue, dropping the flag.\n",
2264 dlm->name, res->lockname.len,
2265 res->lockname.name, dead_node);
2267 /* the wake_up for this will happen when the
2268 * RECOVERING flag is dropped later */
2269 res->state &= ~DLM_LOCK_RES_DROPPING_REF;
2271 dlm_move_lockres_to_recovery_list(dlm, res);
2272 } else if (res->owner == dlm->node_num) {
2273 dlm_free_dead_locks(dlm, res, dead_node);
2274 __dlm_lockres_calc_usage(dlm, res);
2276 spin_unlock(&res->spinlock);
2282 static void __dlm_hb_node_down(struct dlm_ctxt *dlm, int idx)
2284 assert_spin_locked(&dlm->spinlock);
2286 if (dlm->reco.new_master == idx) {
2287 mlog(0, "%s: recovery master %d just died\n",
2289 if (dlm->reco.state & DLM_RECO_STATE_FINALIZE) {
2290 /* finalize1 was reached, so it is safe to clear
2291 * the new_master and dead_node. that recovery
2293 mlog(0, "%s: dead master %d had reached "
2294 "finalize1 state, clearing\n", dlm->name, idx);
2295 dlm->reco.state &= ~DLM_RECO_STATE_FINALIZE;
2296 __dlm_reset_recovery(dlm);
2300 /* check to see if the node is already considered dead */
2301 if (!test_bit(idx, dlm->live_nodes_map)) {
2302 mlog(0, "for domain %s, node %d is already dead. "
2303 "another node likely did recovery already.\n",
2308 /* check to see if we do not care about this node */
2309 if (!test_bit(idx, dlm->domain_map)) {
2310 /* This also catches the case that we get a node down
2311 * but haven't joined the domain yet. */
2312 mlog(0, "node %u already removed from domain!\n", idx);
2316 clear_bit(idx, dlm->live_nodes_map);
2318 /* Clean up join state on node death. */
2319 if (dlm->joining_node == idx) {
2320 mlog(0, "Clearing join state for node %u\n", idx);
2321 __dlm_set_joining_node(dlm, DLM_LOCK_RES_OWNER_UNKNOWN);
2324 /* make sure local cleanup occurs before the heartbeat events */
2325 if (!test_bit(idx, dlm->recovery_map))
2326 dlm_do_local_recovery_cleanup(dlm, idx);
2328 /* notify anything attached to the heartbeat events */
2329 dlm_hb_event_notify_attached(dlm, idx, 0);
2331 mlog(0, "node %u being removed from domain map!\n", idx);
2332 clear_bit(idx, dlm->domain_map);
2333 /* wake up migration waiters if a node goes down.
2334 * perhaps later we can genericize this for other waiters. */
2335 wake_up(&dlm->migration_wq);
2337 if (test_bit(idx, dlm->recovery_map))
2338 mlog(0, "domain %s, node %u already added "
2339 "to recovery map!\n", dlm->name, idx);
2341 set_bit(idx, dlm->recovery_map);
2344 void dlm_hb_node_down_cb(struct o2nm_node *node, int idx, void *data)
2346 struct dlm_ctxt *dlm = data;
2351 spin_lock(&dlm->spinlock);
2352 __dlm_hb_node_down(dlm, idx);
2353 spin_unlock(&dlm->spinlock);
2358 void dlm_hb_node_up_cb(struct o2nm_node *node, int idx, void *data)
2360 struct dlm_ctxt *dlm = data;
2365 spin_lock(&dlm->spinlock);
2366 set_bit(idx, dlm->live_nodes_map);
2367 /* do NOT notify mle attached to the heartbeat events.
2368 * new nodes are not interesting in mastery until joined. */
2369 spin_unlock(&dlm->spinlock);
2374 static void dlm_reco_ast(void *astdata)
2376 struct dlm_ctxt *dlm = astdata;
2377 mlog(0, "ast for recovery lock fired!, this=%u, dlm=%s\n",
2378 dlm->node_num, dlm->name);
2380 static void dlm_reco_bast(void *astdata, int blocked_type)
2382 struct dlm_ctxt *dlm = astdata;
2383 mlog(0, "bast for recovery lock fired!, this=%u, dlm=%s\n",
2384 dlm->node_num, dlm->name);
2386 static void dlm_reco_unlock_ast(void *astdata, enum dlm_status st)
2388 mlog(0, "unlockast for recovery lock fired!\n");
2392 * dlm_pick_recovery_master will continually attempt to use
2393 * dlmlock() on the special "$RECOVERY" lockres with the
2394 * LKM_NOQUEUE flag to get an EX. every thread that enters
2395 * this function on each node racing to become the recovery
2396 * master will not stop attempting this until either:
2397 * a) this node gets the EX (and becomes the recovery master),
2398 * or b) dlm->reco.new_master gets set to some nodenum
2399 * != O2NM_INVALID_NODE_NUM (another node will do the reco).
2400 * so each time a recovery master is needed, the entire cluster
2401 * will sync at this point. if the new master dies, that will
2402 * be detected in dlm_do_recovery */
2403 static int dlm_pick_recovery_master(struct dlm_ctxt *dlm)
2405 enum dlm_status ret;
2406 struct dlm_lockstatus lksb;
2407 int status = -EINVAL;
2409 mlog(0, "starting recovery of %s at %lu, dead=%u, this=%u\n",
2410 dlm->name, jiffies, dlm->reco.dead_node, dlm->node_num);
2412 memset(&lksb, 0, sizeof(lksb));
2414 ret = dlmlock(dlm, LKM_EXMODE, &lksb, LKM_NOQUEUE|LKM_RECOVERY,
2415 DLM_RECOVERY_LOCK_NAME, DLM_RECOVERY_LOCK_NAME_LEN,
2416 dlm_reco_ast, dlm, dlm_reco_bast);
2418 mlog(0, "%s: dlmlock($RECOVERY) returned %d, lksb=%d\n",
2419 dlm->name, ret, lksb.status);
2421 if (ret == DLM_NORMAL) {
2422 mlog(0, "dlm=%s dlmlock says I got it (this=%u)\n",
2423 dlm->name, dlm->node_num);
2425 /* got the EX lock. check to see if another node
2426 * just became the reco master */
2427 if (dlm_reco_master_ready(dlm)) {
2428 mlog(0, "%s: got reco EX lock, but %u will "
2429 "do the recovery\n", dlm->name,
2430 dlm->reco.new_master);
2435 /* see if recovery was already finished elsewhere */
2436 spin_lock(&dlm->spinlock);
2437 if (dlm->reco.dead_node == O2NM_INVALID_NODE_NUM) {
2439 mlog(0, "%s: got reco EX lock, but "
2440 "node got recovered already\n", dlm->name);
2441 if (dlm->reco.new_master != O2NM_INVALID_NODE_NUM) {
2442 mlog(ML_ERROR, "%s: new master is %u "
2443 "but no dead node!\n",
2444 dlm->name, dlm->reco.new_master);
2448 spin_unlock(&dlm->spinlock);
2451 /* if this node has actually become the recovery master,
2452 * set the master and send the messages to begin recovery */
2454 mlog(0, "%s: dead=%u, this=%u, sending "
2455 "begin_reco now\n", dlm->name,
2456 dlm->reco.dead_node, dlm->node_num);
2457 status = dlm_send_begin_reco_message(dlm,
2458 dlm->reco.dead_node);
2459 /* this always succeeds */
2462 /* set the new_master to this node */
2463 spin_lock(&dlm->spinlock);
2464 dlm_set_reco_master(dlm, dlm->node_num);
2465 spin_unlock(&dlm->spinlock);
2468 /* recovery lock is a special case. ast will not get fired,
2469 * so just go ahead and unlock it. */
2470 ret = dlmunlock(dlm, &lksb, 0, dlm_reco_unlock_ast, dlm);
2471 if (ret == DLM_DENIED) {
2472 mlog(0, "got DLM_DENIED, trying LKM_CANCEL\n");
2473 ret = dlmunlock(dlm, &lksb, LKM_CANCEL, dlm_reco_unlock_ast, dlm);
2475 if (ret != DLM_NORMAL) {
2476 /* this would really suck. this could only happen
2477 * if there was a network error during the unlock
2478 * because of node death. this means the unlock
2479 * is actually "done" and the lock structure is
2480 * even freed. we can continue, but only
2481 * because this specific lock name is special. */
2482 mlog(ML_ERROR, "dlmunlock returned %d\n", ret);
2484 } else if (ret == DLM_NOTQUEUED) {
2485 mlog(0, "dlm=%s dlmlock says another node got it (this=%u)\n",
2486 dlm->name, dlm->node_num);
2487 /* another node is master. wait on
2488 * reco.new_master != O2NM_INVALID_NODE_NUM
2489 * for at most one second */
2490 wait_event_timeout(dlm->dlm_reco_thread_wq,
2491 dlm_reco_master_ready(dlm),
2492 msecs_to_jiffies(1000));
2493 if (!dlm_reco_master_ready(dlm)) {
2494 mlog(0, "%s: reco master taking awhile\n",
2498 /* another node has informed this one that it is reco master */
2499 mlog(0, "%s: reco master %u is ready to recover %u\n",
2500 dlm->name, dlm->reco.new_master, dlm->reco.dead_node);
2502 } else if (ret == DLM_RECOVERING) {
2503 mlog(0, "dlm=%s dlmlock says master node died (this=%u)\n",
2504 dlm->name, dlm->node_num);
2507 struct dlm_lock_resource *res;
2509 /* dlmlock returned something other than NOTQUEUED or NORMAL */
2510 mlog(ML_ERROR, "%s: got %s from dlmlock($RECOVERY), "
2511 "lksb.status=%s\n", dlm->name, dlm_errname(ret),
2512 dlm_errname(lksb.status));
2513 res = dlm_lookup_lockres(dlm, DLM_RECOVERY_LOCK_NAME,
2514 DLM_RECOVERY_LOCK_NAME_LEN);
2516 dlm_print_one_lock_resource(res);
2517 dlm_lockres_put(res);
2519 mlog(ML_ERROR, "recovery lock not found\n");
2527 static int dlm_send_begin_reco_message(struct dlm_ctxt *dlm, u8 dead_node)
2529 struct dlm_begin_reco br;
2531 struct dlm_node_iter iter;
2535 mlog_entry("%u\n", dead_node);
2537 mlog(0, "%s: dead node is %u\n", dlm->name, dead_node);
2539 spin_lock(&dlm->spinlock);
2540 dlm_node_iter_init(dlm->domain_map, &iter);
2541 spin_unlock(&dlm->spinlock);
2543 clear_bit(dead_node, iter.node_map);
2545 memset(&br, 0, sizeof(br));
2546 br.node_idx = dlm->node_num;
2547 br.dead_node = dead_node;
2549 while ((nodenum = dlm_node_iter_next(&iter)) >= 0) {
2551 if (nodenum == dead_node) {
2552 mlog(0, "not sending begin reco to dead node "
2556 if (nodenum == dlm->node_num) {
2557 mlog(0, "not sending begin reco to self\n");
2562 mlog(0, "attempting to send begin reco msg to %d\n",
2564 ret = o2net_send_message(DLM_BEGIN_RECO_MSG, dlm->key,
2565 &br, sizeof(br), nodenum, &status);
2566 /* negative status is handled ok by caller here */
2569 if (dlm_is_host_down(ret)) {
2570 /* node is down. not involved in recovery
2571 * so just keep going */
2572 mlog(0, "%s: node %u was down when sending "
2573 "begin reco msg (%d)\n", dlm->name, nodenum, ret);
2577 struct dlm_lock_resource *res;
2578 /* this is now a serious problem, possibly ENOMEM
2579 * in the network stack. must retry */
2581 mlog(ML_ERROR, "begin reco of dlm %s to node %u "
2582 " returned %d\n", dlm->name, nodenum, ret);
2583 res = dlm_lookup_lockres(dlm, DLM_RECOVERY_LOCK_NAME,
2584 DLM_RECOVERY_LOCK_NAME_LEN);
2586 dlm_print_one_lock_resource(res);
2587 dlm_lockres_put(res);
2589 mlog(ML_ERROR, "recovery lock not found\n");
2591 /* sleep for a bit in hopes that we can avoid
2595 } else if (ret == EAGAIN) {
2596 mlog(0, "%s: trying to start recovery of node "
2597 "%u, but node %u is waiting for last recovery "
2598 "to complete, backoff for a bit\n", dlm->name,
2599 dead_node, nodenum);
2600 /* TODO Look into replacing msleep with cond_resched() */
2609 int dlm_begin_reco_handler(struct o2net_msg *msg, u32 len, void *data,
2612 struct dlm_ctxt *dlm = data;
2613 struct dlm_begin_reco *br = (struct dlm_begin_reco *)msg->buf;
2615 /* ok to return 0, domain has gone away */
2619 spin_lock(&dlm->spinlock);
2620 if (dlm->reco.state & DLM_RECO_STATE_FINALIZE) {
2621 mlog(0, "%s: node %u wants to recover node %u (%u:%u) "
2622 "but this node is in finalize state, waiting on finalize2\n",
2623 dlm->name, br->node_idx, br->dead_node,
2624 dlm->reco.dead_node, dlm->reco.new_master);
2625 spin_unlock(&dlm->spinlock);
2628 spin_unlock(&dlm->spinlock);
2630 mlog(0, "%s: node %u wants to recover node %u (%u:%u)\n",
2631 dlm->name, br->node_idx, br->dead_node,
2632 dlm->reco.dead_node, dlm->reco.new_master);
2634 dlm_fire_domain_eviction_callbacks(dlm, br->dead_node);
2636 spin_lock(&dlm->spinlock);
2637 if (dlm->reco.new_master != O2NM_INVALID_NODE_NUM) {
2638 if (test_bit(dlm->reco.new_master, dlm->recovery_map)) {
2639 mlog(0, "%s: new_master %u died, changing "
2640 "to %u\n", dlm->name, dlm->reco.new_master,
2643 mlog(0, "%s: new_master %u NOT DEAD, changing "
2644 "to %u\n", dlm->name, dlm->reco.new_master,
2646 /* may not have seen the new master as dead yet */
2649 if (dlm->reco.dead_node != O2NM_INVALID_NODE_NUM) {
2650 mlog(ML_NOTICE, "%s: dead_node previously set to %u, "
2651 "node %u changing it to %u\n", dlm->name,
2652 dlm->reco.dead_node, br->node_idx, br->dead_node);
2654 dlm_set_reco_master(dlm, br->node_idx);
2655 dlm_set_reco_dead_node(dlm, br->dead_node);
2656 if (!test_bit(br->dead_node, dlm->recovery_map)) {
2657 mlog(0, "recovery master %u sees %u as dead, but this "
2658 "node has not yet. marking %u as dead\n",
2659 br->node_idx, br->dead_node, br->dead_node);
2660 if (!test_bit(br->dead_node, dlm->domain_map) ||
2661 !test_bit(br->dead_node, dlm->live_nodes_map))
2662 mlog(0, "%u not in domain/live_nodes map "
2663 "so setting it in reco map manually\n",
2665 /* force the recovery cleanup in __dlm_hb_node_down
2666 * both of these will be cleared in a moment */
2667 set_bit(br->dead_node, dlm->domain_map);
2668 set_bit(br->dead_node, dlm->live_nodes_map);
2669 __dlm_hb_node_down(dlm, br->dead_node);
2671 spin_unlock(&dlm->spinlock);
2673 dlm_kick_recovery_thread(dlm);
2675 mlog(0, "%s: recovery started by node %u, for %u (%u:%u)\n",
2676 dlm->name, br->node_idx, br->dead_node,
2677 dlm->reco.dead_node, dlm->reco.new_master);
2683 #define DLM_FINALIZE_STAGE2 0x01
2684 static int dlm_send_finalize_reco_message(struct dlm_ctxt *dlm)
2687 struct dlm_finalize_reco fr;
2688 struct dlm_node_iter iter;
2693 mlog(0, "finishing recovery for node %s:%u, "
2694 "stage %d\n", dlm->name, dlm->reco.dead_node, stage);
2696 spin_lock(&dlm->spinlock);
2697 dlm_node_iter_init(dlm->domain_map, &iter);
2698 spin_unlock(&dlm->spinlock);
2701 memset(&fr, 0, sizeof(fr));
2702 fr.node_idx = dlm->node_num;
2703 fr.dead_node = dlm->reco.dead_node;
2705 fr.flags |= DLM_FINALIZE_STAGE2;
2707 while ((nodenum = dlm_node_iter_next(&iter)) >= 0) {
2708 if (nodenum == dlm->node_num)
2710 ret = o2net_send_message(DLM_FINALIZE_RECO_MSG, dlm->key,
2711 &fr, sizeof(fr), nodenum, &status);
2716 if (dlm_is_host_down(ret)) {
2717 /* this has no effect on this recovery
2718 * session, so set the status to zero to
2719 * finish out the last recovery */
2720 mlog(ML_ERROR, "node %u went down after this "
2721 "node finished recovery.\n", nodenum);
2729 /* reset the node_iter back to the top and send finalize2 */
2738 int dlm_finalize_reco_handler(struct o2net_msg *msg, u32 len, void *data,
2741 struct dlm_ctxt *dlm = data;
2742 struct dlm_finalize_reco *fr = (struct dlm_finalize_reco *)msg->buf;
2745 /* ok to return 0, domain has gone away */
2749 if (fr->flags & DLM_FINALIZE_STAGE2)
2752 mlog(0, "%s: node %u finalizing recovery stage%d of "
2753 "node %u (%u:%u)\n", dlm->name, fr->node_idx, stage,
2754 fr->dead_node, dlm->reco.dead_node, dlm->reco.new_master);
2756 spin_lock(&dlm->spinlock);
2758 if (dlm->reco.new_master != fr->node_idx) {
2759 mlog(ML_ERROR, "node %u sent recovery finalize msg, but node "
2760 "%u is supposed to be the new master, dead=%u\n",
2761 fr->node_idx, dlm->reco.new_master, fr->dead_node);
2764 if (dlm->reco.dead_node != fr->dead_node) {
2765 mlog(ML_ERROR, "node %u sent recovery finalize msg for dead "
2766 "node %u, but node %u is supposed to be dead\n",
2767 fr->node_idx, fr->dead_node, dlm->reco.dead_node);
2773 dlm_finish_local_lockres_recovery(dlm, fr->dead_node, fr->node_idx);
2774 if (dlm->reco.state & DLM_RECO_STATE_FINALIZE) {
2775 mlog(ML_ERROR, "%s: received finalize1 from "
2776 "new master %u for dead node %u, but "
2777 "this node has already received it!\n",
2778 dlm->name, fr->node_idx, fr->dead_node);
2779 dlm_print_reco_node_status(dlm);
2782 dlm->reco.state |= DLM_RECO_STATE_FINALIZE;
2783 spin_unlock(&dlm->spinlock);
2786 if (!(dlm->reco.state & DLM_RECO_STATE_FINALIZE)) {
2787 mlog(ML_ERROR, "%s: received finalize2 from "
2788 "new master %u for dead node %u, but "
2789 "this node did not have finalize1!\n",
2790 dlm->name, fr->node_idx, fr->dead_node);
2791 dlm_print_reco_node_status(dlm);
2794 dlm->reco.state &= ~DLM_RECO_STATE_FINALIZE;
2795 spin_unlock(&dlm->spinlock);
2796 dlm_reset_recovery(dlm);
2797 dlm_kick_recovery_thread(dlm);
2803 mlog(0, "%s: recovery done, reco master was %u, dead now %u, master now %u\n",
2804 dlm->name, fr->node_idx, dlm->reco.dead_node, dlm->reco.new_master);