Merge branch 'master' into 85xx
[linux-2.6] / fs / ocfs2 / dlm / dlmmaster.c
1 /* -*- mode: c; c-basic-offset: 8; -*-
2  * vim: noexpandtab sw=8 ts=8 sts=0:
3  *
4  * dlmmod.c
5  *
6  * standalone DLM module
7  *
8  * Copyright (C) 2004 Oracle.  All rights reserved.
9  *
10  * This program is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU General Public
12  * License as published by the Free Software Foundation; either
13  * version 2 of the License, or (at your option) any later version.
14  *
15  * This program is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18  * General Public License for more details.
19  *
20  * You should have received a copy of the GNU General Public
21  * License along with this program; if not, write to the
22  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
23  * Boston, MA 021110-1307, USA.
24  *
25  */
26
27
28 #include <linux/module.h>
29 #include <linux/fs.h>
30 #include <linux/types.h>
31 #include <linux/slab.h>
32 #include <linux/highmem.h>
33 #include <linux/utsname.h>
34 #include <linux/init.h>
35 #include <linux/sysctl.h>
36 #include <linux/random.h>
37 #include <linux/blkdev.h>
38 #include <linux/socket.h>
39 #include <linux/inet.h>
40 #include <linux/spinlock.h>
41 #include <linux/delay.h>
42
43
44 #include "cluster/heartbeat.h"
45 #include "cluster/nodemanager.h"
46 #include "cluster/tcp.h"
47
48 #include "dlmapi.h"
49 #include "dlmcommon.h"
50 #include "dlmdomain.h"
51
52 #define MLOG_MASK_PREFIX (ML_DLM|ML_DLM_MASTER)
53 #include "cluster/masklog.h"
54
55 enum dlm_mle_type {
56         DLM_MLE_BLOCK,
57         DLM_MLE_MASTER,
58         DLM_MLE_MIGRATION
59 };
60
61 struct dlm_lock_name
62 {
63         u8 len;
64         u8 name[DLM_LOCKID_NAME_MAX];
65 };
66
67 struct dlm_master_list_entry
68 {
69         struct list_head list;
70         struct list_head hb_events;
71         struct dlm_ctxt *dlm;
72         spinlock_t spinlock;
73         wait_queue_head_t wq;
74         atomic_t woken;
75         struct kref mle_refs;
76         int inuse;
77         unsigned long maybe_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
78         unsigned long vote_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
79         unsigned long response_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
80         unsigned long node_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
81         u8 master;
82         u8 new_master;
83         enum dlm_mle_type type;
84         struct o2hb_callback_func mle_hb_up;
85         struct o2hb_callback_func mle_hb_down;
86         union {
87                 struct dlm_lock_resource *res;
88                 struct dlm_lock_name name;
89         } u;
90 };
91
92 static void dlm_mle_node_down(struct dlm_ctxt *dlm,
93                               struct dlm_master_list_entry *mle,
94                               struct o2nm_node *node,
95                               int idx);
96 static void dlm_mle_node_up(struct dlm_ctxt *dlm,
97                             struct dlm_master_list_entry *mle,
98                             struct o2nm_node *node,
99                             int idx);
100
101 static void dlm_assert_master_worker(struct dlm_work_item *item, void *data);
102 static int dlm_do_assert_master(struct dlm_ctxt *dlm,
103                                 struct dlm_lock_resource *res,
104                                 void *nodemap, u32 flags);
105 static void dlm_deref_lockres_worker(struct dlm_work_item *item, void *data);
106
107 static inline int dlm_mle_equal(struct dlm_ctxt *dlm,
108                                 struct dlm_master_list_entry *mle,
109                                 const char *name,
110                                 unsigned int namelen)
111 {
112         struct dlm_lock_resource *res;
113
114         if (dlm != mle->dlm)
115                 return 0;
116
117         if (mle->type == DLM_MLE_BLOCK ||
118             mle->type == DLM_MLE_MIGRATION) {
119                 if (namelen != mle->u.name.len ||
120                     memcmp(name, mle->u.name.name, namelen)!=0)
121                         return 0;
122         } else {
123                 res = mle->u.res;
124                 if (namelen != res->lockname.len ||
125                     memcmp(res->lockname.name, name, namelen) != 0)
126                         return 0;
127         }
128         return 1;
129 }
130
131 #define dlm_print_nodemap(m)  _dlm_print_nodemap(m,#m)
132 static void _dlm_print_nodemap(unsigned long *map, const char *mapname)
133 {
134         int i;
135         printk("%s=[ ", mapname);
136         for (i=0; i<O2NM_MAX_NODES; i++)
137                 if (test_bit(i, map))
138                         printk("%d ", i);
139         printk("]");
140 }
141
142 static void dlm_print_one_mle(struct dlm_master_list_entry *mle)
143 {
144         int refs;
145         char *type;
146         char attached;
147         u8 master;
148         unsigned int namelen;
149         const char *name;
150         struct kref *k;
151         unsigned long *maybe = mle->maybe_map,
152                       *vote = mle->vote_map,
153                       *resp = mle->response_map,
154                       *node = mle->node_map;
155
156         k = &mle->mle_refs;
157         if (mle->type == DLM_MLE_BLOCK)
158                 type = "BLK";
159         else if (mle->type == DLM_MLE_MASTER)
160                 type = "MAS";
161         else
162                 type = "MIG";
163         refs = atomic_read(&k->refcount);
164         master = mle->master;
165         attached = (list_empty(&mle->hb_events) ? 'N' : 'Y');
166
167         if (mle->type != DLM_MLE_MASTER) {
168                 namelen = mle->u.name.len;
169                 name = mle->u.name.name;
170         } else {
171                 namelen = mle->u.res->lockname.len;
172                 name = mle->u.res->lockname.name;
173         }
174
175         mlog(ML_NOTICE, "%.*s: %3s refs=%3d mas=%3u new=%3u evt=%c inuse=%d ",
176                   namelen, name, type, refs, master, mle->new_master, attached,
177                   mle->inuse);
178         dlm_print_nodemap(maybe);
179         printk(", ");
180         dlm_print_nodemap(vote);
181         printk(", ");
182         dlm_print_nodemap(resp);
183         printk(", ");
184         dlm_print_nodemap(node);
185         printk(", ");
186         printk("\n");
187 }
188
189 #if 0
190 /* Code here is included but defined out as it aids debugging */
191
192 static void dlm_dump_mles(struct dlm_ctxt *dlm)
193 {
194         struct dlm_master_list_entry *mle;
195         struct list_head *iter;
196         
197         mlog(ML_NOTICE, "dumping all mles for domain %s:\n", dlm->name);
198         spin_lock(&dlm->master_lock);
199         list_for_each(iter, &dlm->master_list) {
200                 mle = list_entry(iter, struct dlm_master_list_entry, list);
201                 dlm_print_one_mle(mle);
202         }
203         spin_unlock(&dlm->master_lock);
204 }
205
206 int dlm_dump_all_mles(const char __user *data, unsigned int len)
207 {
208         struct list_head *iter;
209         struct dlm_ctxt *dlm;
210
211         spin_lock(&dlm_domain_lock);
212         list_for_each(iter, &dlm_domains) {
213                 dlm = list_entry (iter, struct dlm_ctxt, list);
214                 mlog(ML_NOTICE, "found dlm: %p, name=%s\n", dlm, dlm->name);
215                 dlm_dump_mles(dlm);
216         }
217         spin_unlock(&dlm_domain_lock);
218         return len;
219 }
220 EXPORT_SYMBOL_GPL(dlm_dump_all_mles);
221
222 #endif  /*  0  */
223
224
225 static struct kmem_cache *dlm_mle_cache = NULL;
226
227
228 static void dlm_mle_release(struct kref *kref);
229 static void dlm_init_mle(struct dlm_master_list_entry *mle,
230                         enum dlm_mle_type type,
231                         struct dlm_ctxt *dlm,
232                         struct dlm_lock_resource *res,
233                         const char *name,
234                         unsigned int namelen);
235 static void dlm_put_mle(struct dlm_master_list_entry *mle);
236 static void __dlm_put_mle(struct dlm_master_list_entry *mle);
237 static int dlm_find_mle(struct dlm_ctxt *dlm,
238                         struct dlm_master_list_entry **mle,
239                         char *name, unsigned int namelen);
240
241 static int dlm_do_master_request(struct dlm_lock_resource *res,
242                                  struct dlm_master_list_entry *mle, int to);
243
244
245 static int dlm_wait_for_lock_mastery(struct dlm_ctxt *dlm,
246                                      struct dlm_lock_resource *res,
247                                      struct dlm_master_list_entry *mle,
248                                      int *blocked);
249 static int dlm_restart_lock_mastery(struct dlm_ctxt *dlm,
250                                     struct dlm_lock_resource *res,
251                                     struct dlm_master_list_entry *mle,
252                                     int blocked);
253 static int dlm_add_migration_mle(struct dlm_ctxt *dlm,
254                                  struct dlm_lock_resource *res,
255                                  struct dlm_master_list_entry *mle,
256                                  struct dlm_master_list_entry **oldmle,
257                                  const char *name, unsigned int namelen,
258                                  u8 new_master, u8 master);
259
260 static u8 dlm_pick_migration_target(struct dlm_ctxt *dlm,
261                                     struct dlm_lock_resource *res);
262 static void dlm_remove_nonlocal_locks(struct dlm_ctxt *dlm,
263                                       struct dlm_lock_resource *res);
264 static int dlm_mark_lockres_migrating(struct dlm_ctxt *dlm,
265                                        struct dlm_lock_resource *res,
266                                        u8 target);
267 static int dlm_pre_master_reco_lockres(struct dlm_ctxt *dlm,
268                                        struct dlm_lock_resource *res);
269
270
271 int dlm_is_host_down(int errno)
272 {
273         switch (errno) {
274                 case -EBADF:
275                 case -ECONNREFUSED:
276                 case -ENOTCONN:
277                 case -ECONNRESET:
278                 case -EPIPE:
279                 case -EHOSTDOWN:
280                 case -EHOSTUNREACH:
281                 case -ETIMEDOUT:
282                 case -ECONNABORTED:
283                 case -ENETDOWN:
284                 case -ENETUNREACH:
285                 case -ENETRESET:
286                 case -ESHUTDOWN:
287                 case -ENOPROTOOPT:
288                 case -EINVAL:   /* if returned from our tcp code,
289                                    this means there is no socket */
290                         return 1;
291         }
292         return 0;
293 }
294
295
296 /*
297  * MASTER LIST FUNCTIONS
298  */
299
300
301 /*
302  * regarding master list entries and heartbeat callbacks:
303  *
304  * in order to avoid sleeping and allocation that occurs in
305  * heartbeat, master list entries are simply attached to the
306  * dlm's established heartbeat callbacks.  the mle is attached
307  * when it is created, and since the dlm->spinlock is held at
308  * that time, any heartbeat event will be properly discovered
309  * by the mle.  the mle needs to be detached from the
310  * dlm->mle_hb_events list as soon as heartbeat events are no
311  * longer useful to the mle, and before the mle is freed.
312  *
313  * as a general rule, heartbeat events are no longer needed by
314  * the mle once an "answer" regarding the lock master has been
315  * received.
316  */
317 static inline void __dlm_mle_attach_hb_events(struct dlm_ctxt *dlm,
318                                               struct dlm_master_list_entry *mle)
319 {
320         assert_spin_locked(&dlm->spinlock);
321
322         list_add_tail(&mle->hb_events, &dlm->mle_hb_events);
323 }
324
325
326 static inline void __dlm_mle_detach_hb_events(struct dlm_ctxt *dlm,
327                                               struct dlm_master_list_entry *mle)
328 {
329         if (!list_empty(&mle->hb_events))
330                 list_del_init(&mle->hb_events);
331 }
332
333
334 static inline void dlm_mle_detach_hb_events(struct dlm_ctxt *dlm,
335                                             struct dlm_master_list_entry *mle)
336 {
337         spin_lock(&dlm->spinlock);
338         __dlm_mle_detach_hb_events(dlm, mle);
339         spin_unlock(&dlm->spinlock);
340 }
341
342 static void dlm_get_mle_inuse(struct dlm_master_list_entry *mle)
343 {
344         struct dlm_ctxt *dlm;
345         dlm = mle->dlm;
346
347         assert_spin_locked(&dlm->spinlock);
348         assert_spin_locked(&dlm->master_lock);
349         mle->inuse++;
350         kref_get(&mle->mle_refs);
351 }
352
353 static void dlm_put_mle_inuse(struct dlm_master_list_entry *mle)
354 {
355         struct dlm_ctxt *dlm;
356         dlm = mle->dlm;
357
358         spin_lock(&dlm->spinlock);
359         spin_lock(&dlm->master_lock);
360         mle->inuse--;
361         __dlm_put_mle(mle);
362         spin_unlock(&dlm->master_lock);
363         spin_unlock(&dlm->spinlock);
364
365 }
366
367 /* remove from list and free */
368 static void __dlm_put_mle(struct dlm_master_list_entry *mle)
369 {
370         struct dlm_ctxt *dlm;
371         dlm = mle->dlm;
372
373         assert_spin_locked(&dlm->spinlock);
374         assert_spin_locked(&dlm->master_lock);
375         if (!atomic_read(&mle->mle_refs.refcount)) {
376                 /* this may or may not crash, but who cares.
377                  * it's a BUG. */
378                 mlog(ML_ERROR, "bad mle: %p\n", mle);
379                 dlm_print_one_mle(mle);
380                 BUG();
381         } else
382                 kref_put(&mle->mle_refs, dlm_mle_release);
383 }
384
385
386 /* must not have any spinlocks coming in */
387 static void dlm_put_mle(struct dlm_master_list_entry *mle)
388 {
389         struct dlm_ctxt *dlm;
390         dlm = mle->dlm;
391
392         spin_lock(&dlm->spinlock);
393         spin_lock(&dlm->master_lock);
394         __dlm_put_mle(mle);
395         spin_unlock(&dlm->master_lock);
396         spin_unlock(&dlm->spinlock);
397 }
398
399 static inline void dlm_get_mle(struct dlm_master_list_entry *mle)
400 {
401         kref_get(&mle->mle_refs);
402 }
403
404 static void dlm_init_mle(struct dlm_master_list_entry *mle,
405                         enum dlm_mle_type type,
406                         struct dlm_ctxt *dlm,
407                         struct dlm_lock_resource *res,
408                         const char *name,
409                         unsigned int namelen)
410 {
411         assert_spin_locked(&dlm->spinlock);
412
413         mle->dlm = dlm;
414         mle->type = type;
415         INIT_LIST_HEAD(&mle->list);
416         INIT_LIST_HEAD(&mle->hb_events);
417         memset(mle->maybe_map, 0, sizeof(mle->maybe_map));
418         spin_lock_init(&mle->spinlock);
419         init_waitqueue_head(&mle->wq);
420         atomic_set(&mle->woken, 0);
421         kref_init(&mle->mle_refs);
422         memset(mle->response_map, 0, sizeof(mle->response_map));
423         mle->master = O2NM_MAX_NODES;
424         mle->new_master = O2NM_MAX_NODES;
425         mle->inuse = 0;
426
427         if (mle->type == DLM_MLE_MASTER) {
428                 BUG_ON(!res);
429                 mle->u.res = res;
430         } else if (mle->type == DLM_MLE_BLOCK) {
431                 BUG_ON(!name);
432                 memcpy(mle->u.name.name, name, namelen);
433                 mle->u.name.len = namelen;
434         } else /* DLM_MLE_MIGRATION */ {
435                 BUG_ON(!name);
436                 memcpy(mle->u.name.name, name, namelen);
437                 mle->u.name.len = namelen;
438         }
439
440         /* copy off the node_map and register hb callbacks on our copy */
441         memcpy(mle->node_map, dlm->domain_map, sizeof(mle->node_map));
442         memcpy(mle->vote_map, dlm->domain_map, sizeof(mle->vote_map));
443         clear_bit(dlm->node_num, mle->vote_map);
444         clear_bit(dlm->node_num, mle->node_map);
445
446         /* attach the mle to the domain node up/down events */
447         __dlm_mle_attach_hb_events(dlm, mle);
448 }
449
450
451 /* returns 1 if found, 0 if not */
452 static int dlm_find_mle(struct dlm_ctxt *dlm,
453                         struct dlm_master_list_entry **mle,
454                         char *name, unsigned int namelen)
455 {
456         struct dlm_master_list_entry *tmpmle;
457         struct list_head *iter;
458
459         assert_spin_locked(&dlm->master_lock);
460
461         list_for_each(iter, &dlm->master_list) {
462                 tmpmle = list_entry(iter, struct dlm_master_list_entry, list);
463                 if (!dlm_mle_equal(dlm, tmpmle, name, namelen))
464                         continue;
465                 dlm_get_mle(tmpmle);
466                 *mle = tmpmle;
467                 return 1;
468         }
469         return 0;
470 }
471
472 void dlm_hb_event_notify_attached(struct dlm_ctxt *dlm, int idx, int node_up)
473 {
474         struct dlm_master_list_entry *mle;
475         struct list_head *iter;
476
477         assert_spin_locked(&dlm->spinlock);
478         
479         list_for_each(iter, &dlm->mle_hb_events) {
480                 mle = list_entry(iter, struct dlm_master_list_entry, 
481                                  hb_events);
482                 if (node_up)
483                         dlm_mle_node_up(dlm, mle, NULL, idx);
484                 else
485                         dlm_mle_node_down(dlm, mle, NULL, idx);
486         }
487 }
488
489 static void dlm_mle_node_down(struct dlm_ctxt *dlm,
490                               struct dlm_master_list_entry *mle,
491                               struct o2nm_node *node, int idx)
492 {
493         spin_lock(&mle->spinlock);
494
495         if (!test_bit(idx, mle->node_map))
496                 mlog(0, "node %u already removed from nodemap!\n", idx);
497         else
498                 clear_bit(idx, mle->node_map);
499
500         spin_unlock(&mle->spinlock);
501 }
502
503 static void dlm_mle_node_up(struct dlm_ctxt *dlm,
504                             struct dlm_master_list_entry *mle,
505                             struct o2nm_node *node, int idx)
506 {
507         spin_lock(&mle->spinlock);
508
509         if (test_bit(idx, mle->node_map))
510                 mlog(0, "node %u already in node map!\n", idx);
511         else
512                 set_bit(idx, mle->node_map);
513
514         spin_unlock(&mle->spinlock);
515 }
516
517
518 int dlm_init_mle_cache(void)
519 {
520         dlm_mle_cache = kmem_cache_create("dlm_mle_cache",
521                                           sizeof(struct dlm_master_list_entry),
522                                           0, SLAB_HWCACHE_ALIGN,
523                                           NULL, NULL);
524         if (dlm_mle_cache == NULL)
525                 return -ENOMEM;
526         return 0;
527 }
528
529 void dlm_destroy_mle_cache(void)
530 {
531         if (dlm_mle_cache)
532                 kmem_cache_destroy(dlm_mle_cache);
533 }
534
535 static void dlm_mle_release(struct kref *kref)
536 {
537         struct dlm_master_list_entry *mle;
538         struct dlm_ctxt *dlm;
539
540         mlog_entry_void();
541
542         mle = container_of(kref, struct dlm_master_list_entry, mle_refs);
543         dlm = mle->dlm;
544
545         if (mle->type != DLM_MLE_MASTER) {
546                 mlog(0, "calling mle_release for %.*s, type %d\n",
547                      mle->u.name.len, mle->u.name.name, mle->type);
548         } else {
549                 mlog(0, "calling mle_release for %.*s, type %d\n",
550                      mle->u.res->lockname.len,
551                      mle->u.res->lockname.name, mle->type);
552         }
553         assert_spin_locked(&dlm->spinlock);
554         assert_spin_locked(&dlm->master_lock);
555
556         /* remove from list if not already */
557         if (!list_empty(&mle->list))
558                 list_del_init(&mle->list);
559
560         /* detach the mle from the domain node up/down events */
561         __dlm_mle_detach_hb_events(dlm, mle);
562
563         /* NOTE: kfree under spinlock here.
564          * if this is bad, we can move this to a freelist. */
565         kmem_cache_free(dlm_mle_cache, mle);
566 }
567
568
569 /*
570  * LOCK RESOURCE FUNCTIONS
571  */
572
573 static void dlm_set_lockres_owner(struct dlm_ctxt *dlm,
574                                   struct dlm_lock_resource *res,
575                                   u8 owner)
576 {
577         assert_spin_locked(&res->spinlock);
578
579         mlog_entry("%.*s, %u\n", res->lockname.len, res->lockname.name, owner);
580
581         if (owner == dlm->node_num)
582                 atomic_inc(&dlm->local_resources);
583         else if (owner == DLM_LOCK_RES_OWNER_UNKNOWN)
584                 atomic_inc(&dlm->unknown_resources);
585         else
586                 atomic_inc(&dlm->remote_resources);
587
588         res->owner = owner;
589 }
590
591 void dlm_change_lockres_owner(struct dlm_ctxt *dlm,
592                               struct dlm_lock_resource *res, u8 owner)
593 {
594         assert_spin_locked(&res->spinlock);
595
596         if (owner == res->owner)
597                 return;
598
599         if (res->owner == dlm->node_num)
600                 atomic_dec(&dlm->local_resources);
601         else if (res->owner == DLM_LOCK_RES_OWNER_UNKNOWN)
602                 atomic_dec(&dlm->unknown_resources);
603         else
604                 atomic_dec(&dlm->remote_resources);
605
606         dlm_set_lockres_owner(dlm, res, owner);
607 }
608
609
610 static void dlm_lockres_release(struct kref *kref)
611 {
612         struct dlm_lock_resource *res;
613
614         res = container_of(kref, struct dlm_lock_resource, refs);
615
616         /* This should not happen -- all lockres' have a name
617          * associated with them at init time. */
618         BUG_ON(!res->lockname.name);
619
620         mlog(0, "destroying lockres %.*s\n", res->lockname.len,
621              res->lockname.name);
622
623         if (!hlist_unhashed(&res->hash_node) ||
624             !list_empty(&res->granted) ||
625             !list_empty(&res->converting) ||
626             !list_empty(&res->blocked) ||
627             !list_empty(&res->dirty) ||
628             !list_empty(&res->recovering) ||
629             !list_empty(&res->purge)) {
630                 mlog(ML_ERROR,
631                      "Going to BUG for resource %.*s."
632                      "  We're on a list! [%c%c%c%c%c%c%c]\n",
633                      res->lockname.len, res->lockname.name,
634                      !hlist_unhashed(&res->hash_node) ? 'H' : ' ',
635                      !list_empty(&res->granted) ? 'G' : ' ',
636                      !list_empty(&res->converting) ? 'C' : ' ',
637                      !list_empty(&res->blocked) ? 'B' : ' ',
638                      !list_empty(&res->dirty) ? 'D' : ' ',
639                      !list_empty(&res->recovering) ? 'R' : ' ',
640                      !list_empty(&res->purge) ? 'P' : ' ');
641
642                 dlm_print_one_lock_resource(res);
643         }
644
645         /* By the time we're ready to blow this guy away, we shouldn't
646          * be on any lists. */
647         BUG_ON(!hlist_unhashed(&res->hash_node));
648         BUG_ON(!list_empty(&res->granted));
649         BUG_ON(!list_empty(&res->converting));
650         BUG_ON(!list_empty(&res->blocked));
651         BUG_ON(!list_empty(&res->dirty));
652         BUG_ON(!list_empty(&res->recovering));
653         BUG_ON(!list_empty(&res->purge));
654
655         kfree(res->lockname.name);
656
657         kfree(res);
658 }
659
660 void dlm_lockres_put(struct dlm_lock_resource *res)
661 {
662         kref_put(&res->refs, dlm_lockres_release);
663 }
664
665 static void dlm_init_lockres(struct dlm_ctxt *dlm,
666                              struct dlm_lock_resource *res,
667                              const char *name, unsigned int namelen)
668 {
669         char *qname;
670
671         /* If we memset here, we lose our reference to the kmalloc'd
672          * res->lockname.name, so be sure to init every field
673          * correctly! */
674
675         qname = (char *) res->lockname.name;
676         memcpy(qname, name, namelen);
677
678         res->lockname.len = namelen;
679         res->lockname.hash = dlm_lockid_hash(name, namelen);
680
681         init_waitqueue_head(&res->wq);
682         spin_lock_init(&res->spinlock);
683         INIT_HLIST_NODE(&res->hash_node);
684         INIT_LIST_HEAD(&res->granted);
685         INIT_LIST_HEAD(&res->converting);
686         INIT_LIST_HEAD(&res->blocked);
687         INIT_LIST_HEAD(&res->dirty);
688         INIT_LIST_HEAD(&res->recovering);
689         INIT_LIST_HEAD(&res->purge);
690         atomic_set(&res->asts_reserved, 0);
691         res->migration_pending = 0;
692         res->inflight_locks = 0;
693
694         kref_init(&res->refs);
695
696         /* just for consistency */
697         spin_lock(&res->spinlock);
698         dlm_set_lockres_owner(dlm, res, DLM_LOCK_RES_OWNER_UNKNOWN);
699         spin_unlock(&res->spinlock);
700
701         res->state = DLM_LOCK_RES_IN_PROGRESS;
702
703         res->last_used = 0;
704
705         memset(res->lvb, 0, DLM_LVB_LEN);
706         memset(res->refmap, 0, sizeof(res->refmap));
707 }
708
709 struct dlm_lock_resource *dlm_new_lockres(struct dlm_ctxt *dlm,
710                                    const char *name,
711                                    unsigned int namelen)
712 {
713         struct dlm_lock_resource *res;
714
715         res = kmalloc(sizeof(struct dlm_lock_resource), GFP_NOFS);
716         if (!res)
717                 return NULL;
718
719         res->lockname.name = kmalloc(namelen, GFP_NOFS);
720         if (!res->lockname.name) {
721                 kfree(res);
722                 return NULL;
723         }
724
725         dlm_init_lockres(dlm, res, name, namelen);
726         return res;
727 }
728
729 void __dlm_lockres_grab_inflight_ref(struct dlm_ctxt *dlm,
730                                    struct dlm_lock_resource *res,
731                                    int new_lockres,
732                                    const char *file,
733                                    int line)
734 {
735         if (!new_lockres)
736                 assert_spin_locked(&res->spinlock);
737
738         if (!test_bit(dlm->node_num, res->refmap)) {
739                 BUG_ON(res->inflight_locks != 0);
740                 dlm_lockres_set_refmap_bit(dlm->node_num, res);
741         }
742         res->inflight_locks++;
743         mlog(0, "%s:%.*s: inflight++: now %u\n",
744              dlm->name, res->lockname.len, res->lockname.name,
745              res->inflight_locks);
746 }
747
748 void __dlm_lockres_drop_inflight_ref(struct dlm_ctxt *dlm,
749                                    struct dlm_lock_resource *res,
750                                    const char *file,
751                                    int line)
752 {
753         assert_spin_locked(&res->spinlock);
754
755         BUG_ON(res->inflight_locks == 0);
756         res->inflight_locks--;
757         mlog(0, "%s:%.*s: inflight--: now %u\n",
758              dlm->name, res->lockname.len, res->lockname.name,
759              res->inflight_locks);
760         if (res->inflight_locks == 0)
761                 dlm_lockres_clear_refmap_bit(dlm->node_num, res);
762         wake_up(&res->wq);
763 }
764
765 /*
766  * lookup a lock resource by name.
767  * may already exist in the hashtable.
768  * lockid is null terminated
769  *
770  * if not, allocate enough for the lockres and for
771  * the temporary structure used in doing the mastering.
772  *
773  * also, do a lookup in the dlm->master_list to see
774  * if another node has begun mastering the same lock.
775  * if so, there should be a block entry in there
776  * for this name, and we should *not* attempt to master
777  * the lock here.   need to wait around for that node
778  * to assert_master (or die).
779  *
780  */
781 struct dlm_lock_resource * dlm_get_lock_resource(struct dlm_ctxt *dlm,
782                                           const char *lockid,
783                                           int namelen,
784                                           int flags)
785 {
786         struct dlm_lock_resource *tmpres=NULL, *res=NULL;
787         struct dlm_master_list_entry *mle = NULL;
788         struct dlm_master_list_entry *alloc_mle = NULL;
789         int blocked = 0;
790         int ret, nodenum;
791         struct dlm_node_iter iter;
792         unsigned int hash;
793         int tries = 0;
794         int bit, wait_on_recovery = 0;
795         int drop_inflight_if_nonlocal = 0;
796
797         BUG_ON(!lockid);
798
799         hash = dlm_lockid_hash(lockid, namelen);
800
801         mlog(0, "get lockres %s (len %d)\n", lockid, namelen);
802
803 lookup:
804         spin_lock(&dlm->spinlock);
805         tmpres = __dlm_lookup_lockres_full(dlm, lockid, namelen, hash);
806         if (tmpres) {
807                 int dropping_ref = 0;
808
809                 spin_lock(&tmpres->spinlock);
810                 if (tmpres->owner == dlm->node_num) {
811                         BUG_ON(tmpres->state & DLM_LOCK_RES_DROPPING_REF);
812                         dlm_lockres_grab_inflight_ref(dlm, tmpres);
813                 } else if (tmpres->state & DLM_LOCK_RES_DROPPING_REF)
814                         dropping_ref = 1;
815                 spin_unlock(&tmpres->spinlock);
816                 spin_unlock(&dlm->spinlock);
817
818                 /* wait until done messaging the master, drop our ref to allow
819                  * the lockres to be purged, start over. */
820                 if (dropping_ref) {
821                         spin_lock(&tmpres->spinlock);
822                         __dlm_wait_on_lockres_flags(tmpres, DLM_LOCK_RES_DROPPING_REF);
823                         spin_unlock(&tmpres->spinlock);
824                         dlm_lockres_put(tmpres);
825                         tmpres = NULL;
826                         goto lookup;
827                 }
828
829                 mlog(0, "found in hash!\n");
830                 if (res)
831                         dlm_lockres_put(res);
832                 res = tmpres;
833                 goto leave;
834         }
835
836         if (!res) {
837                 spin_unlock(&dlm->spinlock);
838                 mlog(0, "allocating a new resource\n");
839                 /* nothing found and we need to allocate one. */
840                 alloc_mle = (struct dlm_master_list_entry *)
841                         kmem_cache_alloc(dlm_mle_cache, GFP_NOFS);
842                 if (!alloc_mle)
843                         goto leave;
844                 res = dlm_new_lockres(dlm, lockid, namelen);
845                 if (!res)
846                         goto leave;
847                 goto lookup;
848         }
849
850         mlog(0, "no lockres found, allocated our own: %p\n", res);
851
852         if (flags & LKM_LOCAL) {
853                 /* caller knows it's safe to assume it's not mastered elsewhere
854                  * DONE!  return right away */
855                 spin_lock(&res->spinlock);
856                 dlm_change_lockres_owner(dlm, res, dlm->node_num);
857                 __dlm_insert_lockres(dlm, res);
858                 dlm_lockres_grab_inflight_ref(dlm, res);
859                 spin_unlock(&res->spinlock);
860                 spin_unlock(&dlm->spinlock);
861                 /* lockres still marked IN_PROGRESS */
862                 goto wake_waiters;
863         }
864
865         /* check master list to see if another node has started mastering it */
866         spin_lock(&dlm->master_lock);
867
868         /* if we found a block, wait for lock to be mastered by another node */
869         blocked = dlm_find_mle(dlm, &mle, (char *)lockid, namelen);
870         if (blocked) {
871                 int mig;
872                 if (mle->type == DLM_MLE_MASTER) {
873                         mlog(ML_ERROR, "master entry for nonexistent lock!\n");
874                         BUG();
875                 }
876                 mig = (mle->type == DLM_MLE_MIGRATION);
877                 /* if there is a migration in progress, let the migration
878                  * finish before continuing.  we can wait for the absence
879                  * of the MIGRATION mle: either the migrate finished or
880                  * one of the nodes died and the mle was cleaned up.
881                  * if there is a BLOCK here, but it already has a master
882                  * set, we are too late.  the master does not have a ref
883                  * for us in the refmap.  detach the mle and drop it.
884                  * either way, go back to the top and start over. */
885                 if (mig || mle->master != O2NM_MAX_NODES) {
886                         BUG_ON(mig && mle->master == dlm->node_num);
887                         /* we arrived too late.  the master does not
888                          * have a ref for us. retry. */
889                         mlog(0, "%s:%.*s: late on %s\n",
890                              dlm->name, namelen, lockid,
891                              mig ?  "MIGRATION" : "BLOCK");
892                         spin_unlock(&dlm->master_lock);
893                         spin_unlock(&dlm->spinlock);
894
895                         /* master is known, detach */
896                         if (!mig)
897                                 dlm_mle_detach_hb_events(dlm, mle);
898                         dlm_put_mle(mle);
899                         mle = NULL;
900                         /* this is lame, but we cant wait on either
901                          * the mle or lockres waitqueue here */
902                         if (mig)
903                                 msleep(100);
904                         goto lookup;
905                 }
906         } else {
907                 /* go ahead and try to master lock on this node */
908                 mle = alloc_mle;
909                 /* make sure this does not get freed below */
910                 alloc_mle = NULL;
911                 dlm_init_mle(mle, DLM_MLE_MASTER, dlm, res, NULL, 0);
912                 set_bit(dlm->node_num, mle->maybe_map);
913                 list_add(&mle->list, &dlm->master_list);
914
915                 /* still holding the dlm spinlock, check the recovery map
916                  * to see if there are any nodes that still need to be 
917                  * considered.  these will not appear in the mle nodemap
918                  * but they might own this lockres.  wait on them. */
919                 bit = find_next_bit(dlm->recovery_map, O2NM_MAX_NODES, 0);
920                 if (bit < O2NM_MAX_NODES) {
921                         mlog(ML_NOTICE, "%s:%.*s: at least one node (%d) to"
922                              "recover before lock mastery can begin\n",
923                              dlm->name, namelen, (char *)lockid, bit);
924                         wait_on_recovery = 1;
925                 }
926         }
927
928         /* at this point there is either a DLM_MLE_BLOCK or a
929          * DLM_MLE_MASTER on the master list, so it's safe to add the
930          * lockres to the hashtable.  anyone who finds the lock will
931          * still have to wait on the IN_PROGRESS. */
932
933         /* finally add the lockres to its hash bucket */
934         __dlm_insert_lockres(dlm, res);
935         /* since this lockres is new it doesnt not require the spinlock */
936         dlm_lockres_grab_inflight_ref_new(dlm, res);
937
938         /* if this node does not become the master make sure to drop
939          * this inflight reference below */
940         drop_inflight_if_nonlocal = 1;
941
942         /* get an extra ref on the mle in case this is a BLOCK
943          * if so, the creator of the BLOCK may try to put the last
944          * ref at this time in the assert master handler, so we
945          * need an extra one to keep from a bad ptr deref. */
946         dlm_get_mle_inuse(mle);
947         spin_unlock(&dlm->master_lock);
948         spin_unlock(&dlm->spinlock);
949
950 redo_request:
951         while (wait_on_recovery) {
952                 /* any cluster changes that occurred after dropping the
953                  * dlm spinlock would be detectable be a change on the mle,
954                  * so we only need to clear out the recovery map once. */
955                 if (dlm_is_recovery_lock(lockid, namelen)) {
956                         mlog(ML_NOTICE, "%s: recovery map is not empty, but "
957                              "must master $RECOVERY lock now\n", dlm->name);
958                         if (!dlm_pre_master_reco_lockres(dlm, res))
959                                 wait_on_recovery = 0;
960                         else {
961                                 mlog(0, "%s: waiting 500ms for heartbeat state "
962                                     "change\n", dlm->name);
963                                 msleep(500);
964                         }
965                         continue;
966                 } 
967
968                 dlm_kick_recovery_thread(dlm);
969                 msleep(1000);
970                 dlm_wait_for_recovery(dlm);
971
972                 spin_lock(&dlm->spinlock);
973                 bit = find_next_bit(dlm->recovery_map, O2NM_MAX_NODES, 0);
974                 if (bit < O2NM_MAX_NODES) {
975                         mlog(ML_NOTICE, "%s:%.*s: at least one node (%d) to"
976                              "recover before lock mastery can begin\n",
977                              dlm->name, namelen, (char *)lockid, bit);
978                         wait_on_recovery = 1;
979                 } else
980                         wait_on_recovery = 0;
981                 spin_unlock(&dlm->spinlock);
982
983                 if (wait_on_recovery)
984                         dlm_wait_for_node_recovery(dlm, bit, 10000);
985         }
986
987         /* must wait for lock to be mastered elsewhere */
988         if (blocked)
989                 goto wait;
990
991         ret = -EINVAL;
992         dlm_node_iter_init(mle->vote_map, &iter);
993         while ((nodenum = dlm_node_iter_next(&iter)) >= 0) {
994                 ret = dlm_do_master_request(res, mle, nodenum);
995                 if (ret < 0)
996                         mlog_errno(ret);
997                 if (mle->master != O2NM_MAX_NODES) {
998                         /* found a master ! */
999                         if (mle->master <= nodenum)
1000                                 break;
1001                         /* if our master request has not reached the master
1002                          * yet, keep going until it does.  this is how the
1003                          * master will know that asserts are needed back to
1004                          * the lower nodes. */
1005                         mlog(0, "%s:%.*s: requests only up to %u but master "
1006                              "is %u, keep going\n", dlm->name, namelen,
1007                              lockid, nodenum, mle->master);
1008                 }
1009         }
1010
1011 wait:
1012         /* keep going until the response map includes all nodes */
1013         ret = dlm_wait_for_lock_mastery(dlm, res, mle, &blocked);
1014         if (ret < 0) {
1015                 wait_on_recovery = 1;
1016                 mlog(0, "%s:%.*s: node map changed, redo the "
1017                      "master request now, blocked=%d\n",
1018                      dlm->name, res->lockname.len,
1019                      res->lockname.name, blocked);
1020                 if (++tries > 20) {
1021                         mlog(ML_ERROR, "%s:%.*s: spinning on "
1022                              "dlm_wait_for_lock_mastery, blocked=%d\n", 
1023                              dlm->name, res->lockname.len, 
1024                              res->lockname.name, blocked);
1025                         dlm_print_one_lock_resource(res);
1026                         dlm_print_one_mle(mle);
1027                         tries = 0;
1028                 }
1029                 goto redo_request;
1030         }
1031
1032         mlog(0, "lockres mastered by %u\n", res->owner);
1033         /* make sure we never continue without this */
1034         BUG_ON(res->owner == O2NM_MAX_NODES);
1035
1036         /* master is known, detach if not already detached */
1037         dlm_mle_detach_hb_events(dlm, mle);
1038         dlm_put_mle(mle);
1039         /* put the extra ref */
1040         dlm_put_mle_inuse(mle);
1041
1042 wake_waiters:
1043         spin_lock(&res->spinlock);
1044         if (res->owner != dlm->node_num && drop_inflight_if_nonlocal)
1045                 dlm_lockres_drop_inflight_ref(dlm, res);
1046         res->state &= ~DLM_LOCK_RES_IN_PROGRESS;
1047         spin_unlock(&res->spinlock);
1048         wake_up(&res->wq);
1049
1050 leave:
1051         /* need to free the unused mle */
1052         if (alloc_mle)
1053                 kmem_cache_free(dlm_mle_cache, alloc_mle);
1054
1055         return res;
1056 }
1057
1058
1059 #define DLM_MASTERY_TIMEOUT_MS   5000
1060
1061 static int dlm_wait_for_lock_mastery(struct dlm_ctxt *dlm,
1062                                      struct dlm_lock_resource *res,
1063                                      struct dlm_master_list_entry *mle,
1064                                      int *blocked)
1065 {
1066         u8 m;
1067         int ret, bit;
1068         int map_changed, voting_done;
1069         int assert, sleep;
1070
1071 recheck:
1072         ret = 0;
1073         assert = 0;
1074
1075         /* check if another node has already become the owner */
1076         spin_lock(&res->spinlock);
1077         if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN) {
1078                 mlog(0, "%s:%.*s: owner is suddenly %u\n", dlm->name,
1079                      res->lockname.len, res->lockname.name, res->owner);
1080                 spin_unlock(&res->spinlock);
1081                 /* this will cause the master to re-assert across
1082                  * the whole cluster, freeing up mles */
1083                 if (res->owner != dlm->node_num) {
1084                         ret = dlm_do_master_request(res, mle, res->owner);
1085                         if (ret < 0) {
1086                                 /* give recovery a chance to run */
1087                                 mlog(ML_ERROR, "link to %u went down?: %d\n", res->owner, ret);
1088                                 msleep(500);
1089                                 goto recheck;
1090                         }
1091                 }
1092                 ret = 0;
1093                 goto leave;
1094         }
1095         spin_unlock(&res->spinlock);
1096
1097         spin_lock(&mle->spinlock);
1098         m = mle->master;
1099         map_changed = (memcmp(mle->vote_map, mle->node_map,
1100                               sizeof(mle->vote_map)) != 0);
1101         voting_done = (memcmp(mle->vote_map, mle->response_map,
1102                              sizeof(mle->vote_map)) == 0);
1103
1104         /* restart if we hit any errors */
1105         if (map_changed) {
1106                 int b;
1107                 mlog(0, "%s: %.*s: node map changed, restarting\n",
1108                      dlm->name, res->lockname.len, res->lockname.name);
1109                 ret = dlm_restart_lock_mastery(dlm, res, mle, *blocked);
1110                 b = (mle->type == DLM_MLE_BLOCK);
1111                 if ((*blocked && !b) || (!*blocked && b)) {
1112                         mlog(0, "%s:%.*s: status change: old=%d new=%d\n", 
1113                              dlm->name, res->lockname.len, res->lockname.name,
1114                              *blocked, b);
1115                         *blocked = b;
1116                 }
1117                 spin_unlock(&mle->spinlock);
1118                 if (ret < 0) {
1119                         mlog_errno(ret);
1120                         goto leave;
1121                 }
1122                 mlog(0, "%s:%.*s: restart lock mastery succeeded, "
1123                      "rechecking now\n", dlm->name, res->lockname.len,
1124                      res->lockname.name);
1125                 goto recheck;
1126         } else {
1127                 if (!voting_done) {
1128                         mlog(0, "map not changed and voting not done "
1129                              "for %s:%.*s\n", dlm->name, res->lockname.len,
1130                              res->lockname.name);
1131                 }
1132         }
1133
1134         if (m != O2NM_MAX_NODES) {
1135                 /* another node has done an assert!
1136                  * all done! */
1137                 sleep = 0;
1138         } else {
1139                 sleep = 1;
1140                 /* have all nodes responded? */
1141                 if (voting_done && !*blocked) {
1142                         bit = find_next_bit(mle->maybe_map, O2NM_MAX_NODES, 0);
1143                         if (dlm->node_num <= bit) {
1144                                 /* my node number is lowest.
1145                                  * now tell other nodes that I am
1146                                  * mastering this. */
1147                                 mle->master = dlm->node_num;
1148                                 /* ref was grabbed in get_lock_resource
1149                                  * will be dropped in dlmlock_master */
1150                                 assert = 1;
1151                                 sleep = 0;
1152                         }
1153                         /* if voting is done, but we have not received
1154                          * an assert master yet, we must sleep */
1155                 }
1156         }
1157
1158         spin_unlock(&mle->spinlock);
1159
1160         /* sleep if we haven't finished voting yet */
1161         if (sleep) {
1162                 unsigned long timeo = msecs_to_jiffies(DLM_MASTERY_TIMEOUT_MS);
1163
1164                 /*
1165                 if (atomic_read(&mle->mle_refs.refcount) < 2)
1166                         mlog(ML_ERROR, "mle (%p) refs=%d, name=%.*s\n", mle,
1167                         atomic_read(&mle->mle_refs.refcount),
1168                         res->lockname.len, res->lockname.name);
1169                 */
1170                 atomic_set(&mle->woken, 0);
1171                 (void)wait_event_timeout(mle->wq,
1172                                          (atomic_read(&mle->woken) == 1),
1173                                          timeo);
1174                 if (res->owner == O2NM_MAX_NODES) {
1175                         mlog(0, "%s:%.*s: waiting again\n", dlm->name,
1176                              res->lockname.len, res->lockname.name);
1177                         goto recheck;
1178                 }
1179                 mlog(0, "done waiting, master is %u\n", res->owner);
1180                 ret = 0;
1181                 goto leave;
1182         }
1183
1184         ret = 0;   /* done */
1185         if (assert) {
1186                 m = dlm->node_num;
1187                 mlog(0, "about to master %.*s here, this=%u\n",
1188                      res->lockname.len, res->lockname.name, m);
1189                 ret = dlm_do_assert_master(dlm, res, mle->vote_map, 0);
1190                 if (ret) {
1191                         /* This is a failure in the network path,
1192                          * not in the response to the assert_master
1193                          * (any nonzero response is a BUG on this node).
1194                          * Most likely a socket just got disconnected
1195                          * due to node death. */
1196                         mlog_errno(ret);
1197                 }
1198                 /* no longer need to restart lock mastery.
1199                  * all living nodes have been contacted. */
1200                 ret = 0;
1201         }
1202
1203         /* set the lockres owner */
1204         spin_lock(&res->spinlock);
1205         /* mastery reference obtained either during
1206          * assert_master_handler or in get_lock_resource */
1207         dlm_change_lockres_owner(dlm, res, m);
1208         spin_unlock(&res->spinlock);
1209
1210 leave:
1211         return ret;
1212 }
1213
1214 struct dlm_bitmap_diff_iter
1215 {
1216         int curnode;
1217         unsigned long *orig_bm;
1218         unsigned long *cur_bm;
1219         unsigned long diff_bm[BITS_TO_LONGS(O2NM_MAX_NODES)];
1220 };
1221
1222 enum dlm_node_state_change
1223 {
1224         NODE_DOWN = -1,
1225         NODE_NO_CHANGE = 0,
1226         NODE_UP
1227 };
1228
1229 static void dlm_bitmap_diff_iter_init(struct dlm_bitmap_diff_iter *iter,
1230                                       unsigned long *orig_bm,
1231                                       unsigned long *cur_bm)
1232 {
1233         unsigned long p1, p2;
1234         int i;
1235
1236         iter->curnode = -1;
1237         iter->orig_bm = orig_bm;
1238         iter->cur_bm = cur_bm;
1239
1240         for (i = 0; i < BITS_TO_LONGS(O2NM_MAX_NODES); i++) {
1241                 p1 = *(iter->orig_bm + i);
1242                 p2 = *(iter->cur_bm + i);
1243                 iter->diff_bm[i] = (p1 & ~p2) | (p2 & ~p1);
1244         }
1245 }
1246
1247 static int dlm_bitmap_diff_iter_next(struct dlm_bitmap_diff_iter *iter,
1248                                      enum dlm_node_state_change *state)
1249 {
1250         int bit;
1251
1252         if (iter->curnode >= O2NM_MAX_NODES)
1253                 return -ENOENT;
1254
1255         bit = find_next_bit(iter->diff_bm, O2NM_MAX_NODES,
1256                             iter->curnode+1);
1257         if (bit >= O2NM_MAX_NODES) {
1258                 iter->curnode = O2NM_MAX_NODES;
1259                 return -ENOENT;
1260         }
1261
1262         /* if it was there in the original then this node died */
1263         if (test_bit(bit, iter->orig_bm))
1264                 *state = NODE_DOWN;
1265         else
1266                 *state = NODE_UP;
1267
1268         iter->curnode = bit;
1269         return bit;
1270 }
1271
1272
1273 static int dlm_restart_lock_mastery(struct dlm_ctxt *dlm,
1274                                     struct dlm_lock_resource *res,
1275                                     struct dlm_master_list_entry *mle,
1276                                     int blocked)
1277 {
1278         struct dlm_bitmap_diff_iter bdi;
1279         enum dlm_node_state_change sc;
1280         int node;
1281         int ret = 0;
1282
1283         mlog(0, "something happened such that the "
1284              "master process may need to be restarted!\n");
1285
1286         assert_spin_locked(&mle->spinlock);
1287
1288         dlm_bitmap_diff_iter_init(&bdi, mle->vote_map, mle->node_map);
1289         node = dlm_bitmap_diff_iter_next(&bdi, &sc);
1290         while (node >= 0) {
1291                 if (sc == NODE_UP) {
1292                         /* a node came up.  clear any old vote from
1293                          * the response map and set it in the vote map
1294                          * then restart the mastery. */
1295                         mlog(ML_NOTICE, "node %d up while restarting\n", node);
1296
1297                         /* redo the master request, but only for the new node */
1298                         mlog(0, "sending request to new node\n");
1299                         clear_bit(node, mle->response_map);
1300                         set_bit(node, mle->vote_map);
1301                 } else {
1302                         mlog(ML_ERROR, "node down! %d\n", node);
1303                         if (blocked) {
1304                                 int lowest = find_next_bit(mle->maybe_map,
1305                                                        O2NM_MAX_NODES, 0);
1306
1307                                 /* act like it was never there */
1308                                 clear_bit(node, mle->maybe_map);
1309
1310                                 if (node == lowest) {
1311                                         mlog(0, "expected master %u died"
1312                                             " while this node was blocked "
1313                                             "waiting on it!\n", node);
1314                                         lowest = find_next_bit(mle->maybe_map,
1315                                                         O2NM_MAX_NODES,
1316                                                         lowest+1);
1317                                         if (lowest < O2NM_MAX_NODES) {
1318                                                 mlog(0, "%s:%.*s:still "
1319                                                      "blocked. waiting on %u "
1320                                                      "now\n", dlm->name,
1321                                                      res->lockname.len,
1322                                                      res->lockname.name,
1323                                                      lowest);
1324                                         } else {
1325                                                 /* mle is an MLE_BLOCK, but
1326                                                  * there is now nothing left to
1327                                                  * block on.  we need to return
1328                                                  * all the way back out and try
1329                                                  * again with an MLE_MASTER.
1330                                                  * dlm_do_local_recovery_cleanup
1331                                                  * has already run, so the mle
1332                                                  * refcount is ok */
1333                                                 mlog(0, "%s:%.*s: no "
1334                                                      "longer blocking. try to "
1335                                                      "master this here\n",
1336                                                      dlm->name,
1337                                                      res->lockname.len,
1338                                                      res->lockname.name);
1339                                                 mle->type = DLM_MLE_MASTER;
1340                                                 mle->u.res = res;
1341                                         }
1342                                 }
1343                         }
1344
1345                         /* now blank out everything, as if we had never
1346                          * contacted anyone */
1347                         memset(mle->maybe_map, 0, sizeof(mle->maybe_map));
1348                         memset(mle->response_map, 0, sizeof(mle->response_map));
1349                         /* reset the vote_map to the current node_map */
1350                         memcpy(mle->vote_map, mle->node_map,
1351                                sizeof(mle->node_map));
1352                         /* put myself into the maybe map */
1353                         if (mle->type != DLM_MLE_BLOCK)
1354                                 set_bit(dlm->node_num, mle->maybe_map);
1355                 }
1356                 ret = -EAGAIN;
1357                 node = dlm_bitmap_diff_iter_next(&bdi, &sc);
1358         }
1359         return ret;
1360 }
1361
1362
1363 /*
1364  * DLM_MASTER_REQUEST_MSG
1365  *
1366  * returns: 0 on success,
1367  *          -errno on a network error
1368  *
1369  * on error, the caller should assume the target node is "dead"
1370  *
1371  */
1372
1373 static int dlm_do_master_request(struct dlm_lock_resource *res,
1374                                  struct dlm_master_list_entry *mle, int to)
1375 {
1376         struct dlm_ctxt *dlm = mle->dlm;
1377         struct dlm_master_request request;
1378         int ret, response=0, resend;
1379
1380         memset(&request, 0, sizeof(request));
1381         request.node_idx = dlm->node_num;
1382
1383         BUG_ON(mle->type == DLM_MLE_MIGRATION);
1384
1385         if (mle->type != DLM_MLE_MASTER) {
1386                 request.namelen = mle->u.name.len;
1387                 memcpy(request.name, mle->u.name.name, request.namelen);
1388         } else {
1389                 request.namelen = mle->u.res->lockname.len;
1390                 memcpy(request.name, mle->u.res->lockname.name,
1391                         request.namelen);
1392         }
1393
1394 again:
1395         ret = o2net_send_message(DLM_MASTER_REQUEST_MSG, dlm->key, &request,
1396                                  sizeof(request), to, &response);
1397         if (ret < 0)  {
1398                 if (ret == -ESRCH) {
1399                         /* should never happen */
1400                         mlog(ML_ERROR, "TCP stack not ready!\n");
1401                         BUG();
1402                 } else if (ret == -EINVAL) {
1403                         mlog(ML_ERROR, "bad args passed to o2net!\n");
1404                         BUG();
1405                 } else if (ret == -ENOMEM) {
1406                         mlog(ML_ERROR, "out of memory while trying to send "
1407                              "network message!  retrying\n");
1408                         /* this is totally crude */
1409                         msleep(50);
1410                         goto again;
1411                 } else if (!dlm_is_host_down(ret)) {
1412                         /* not a network error. bad. */
1413                         mlog_errno(ret);
1414                         mlog(ML_ERROR, "unhandled error!");
1415                         BUG();
1416                 }
1417                 /* all other errors should be network errors,
1418                  * and likely indicate node death */
1419                 mlog(ML_ERROR, "link to %d went down!\n", to);
1420                 goto out;
1421         }
1422
1423         ret = 0;
1424         resend = 0;
1425         spin_lock(&mle->spinlock);
1426         switch (response) {
1427                 case DLM_MASTER_RESP_YES:
1428                         set_bit(to, mle->response_map);
1429                         mlog(0, "node %u is the master, response=YES\n", to);
1430                         mlog(0, "%s:%.*s: master node %u now knows I have a "
1431                              "reference\n", dlm->name, res->lockname.len,
1432                              res->lockname.name, to);
1433                         mle->master = to;
1434                         break;
1435                 case DLM_MASTER_RESP_NO:
1436                         mlog(0, "node %u not master, response=NO\n", to);
1437                         set_bit(to, mle->response_map);
1438                         break;
1439                 case DLM_MASTER_RESP_MAYBE:
1440                         mlog(0, "node %u not master, response=MAYBE\n", to);
1441                         set_bit(to, mle->response_map);
1442                         set_bit(to, mle->maybe_map);
1443                         break;
1444                 case DLM_MASTER_RESP_ERROR:
1445                         mlog(0, "node %u hit an error, resending\n", to);
1446                         resend = 1;
1447                         response = 0;
1448                         break;
1449                 default:
1450                         mlog(ML_ERROR, "bad response! %u\n", response);
1451                         BUG();
1452         }
1453         spin_unlock(&mle->spinlock);
1454         if (resend) {
1455                 /* this is also totally crude */
1456                 msleep(50);
1457                 goto again;
1458         }
1459
1460 out:
1461         return ret;
1462 }
1463
1464 /*
1465  * locks that can be taken here:
1466  * dlm->spinlock
1467  * res->spinlock
1468  * mle->spinlock
1469  * dlm->master_list
1470  *
1471  * if possible, TRIM THIS DOWN!!!
1472  */
1473 int dlm_master_request_handler(struct o2net_msg *msg, u32 len, void *data,
1474                                void **ret_data)
1475 {
1476         u8 response = DLM_MASTER_RESP_MAYBE;
1477         struct dlm_ctxt *dlm = data;
1478         struct dlm_lock_resource *res = NULL;
1479         struct dlm_master_request *request = (struct dlm_master_request *) msg->buf;
1480         struct dlm_master_list_entry *mle = NULL, *tmpmle = NULL;
1481         char *name;
1482         unsigned int namelen, hash;
1483         int found, ret;
1484         int set_maybe;
1485         int dispatch_assert = 0;
1486
1487         if (!dlm_grab(dlm))
1488                 return DLM_MASTER_RESP_NO;
1489
1490         if (!dlm_domain_fully_joined(dlm)) {
1491                 response = DLM_MASTER_RESP_NO;
1492                 goto send_response;
1493         }
1494
1495         name = request->name;
1496         namelen = request->namelen;
1497         hash = dlm_lockid_hash(name, namelen);
1498
1499         if (namelen > DLM_LOCKID_NAME_MAX) {
1500                 response = DLM_IVBUFLEN;
1501                 goto send_response;
1502         }
1503
1504 way_up_top:
1505         spin_lock(&dlm->spinlock);
1506         res = __dlm_lookup_lockres(dlm, name, namelen, hash);
1507         if (res) {
1508                 spin_unlock(&dlm->spinlock);
1509
1510                 /* take care of the easy cases up front */
1511                 spin_lock(&res->spinlock);
1512                 if (res->state & (DLM_LOCK_RES_RECOVERING|
1513                                   DLM_LOCK_RES_MIGRATING)) {
1514                         spin_unlock(&res->spinlock);
1515                         mlog(0, "returning DLM_MASTER_RESP_ERROR since res is "
1516                              "being recovered/migrated\n");
1517                         response = DLM_MASTER_RESP_ERROR;
1518                         if (mle)
1519                                 kmem_cache_free(dlm_mle_cache, mle);
1520                         goto send_response;
1521                 }
1522
1523                 if (res->owner == dlm->node_num) {
1524                         mlog(0, "%s:%.*s: setting bit %u in refmap\n",
1525                              dlm->name, namelen, name, request->node_idx);
1526                         dlm_lockres_set_refmap_bit(request->node_idx, res);
1527                         spin_unlock(&res->spinlock);
1528                         response = DLM_MASTER_RESP_YES;
1529                         if (mle)
1530                                 kmem_cache_free(dlm_mle_cache, mle);
1531
1532                         /* this node is the owner.
1533                          * there is some extra work that needs to
1534                          * happen now.  the requesting node has
1535                          * caused all nodes up to this one to
1536                          * create mles.  this node now needs to
1537                          * go back and clean those up. */
1538                         dispatch_assert = 1;
1539                         goto send_response;
1540                 } else if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN) {
1541                         spin_unlock(&res->spinlock);
1542                         // mlog(0, "node %u is the master\n", res->owner);
1543                         response = DLM_MASTER_RESP_NO;
1544                         if (mle)
1545                                 kmem_cache_free(dlm_mle_cache, mle);
1546                         goto send_response;
1547                 }
1548
1549                 /* ok, there is no owner.  either this node is
1550                  * being blocked, or it is actively trying to
1551                  * master this lock. */
1552                 if (!(res->state & DLM_LOCK_RES_IN_PROGRESS)) {
1553                         mlog(ML_ERROR, "lock with no owner should be "
1554                              "in-progress!\n");
1555                         BUG();
1556                 }
1557
1558                 // mlog(0, "lockres is in progress...\n");
1559                 spin_lock(&dlm->master_lock);
1560                 found = dlm_find_mle(dlm, &tmpmle, name, namelen);
1561                 if (!found) {
1562                         mlog(ML_ERROR, "no mle found for this lock!\n");
1563                         BUG();
1564                 }
1565                 set_maybe = 1;
1566                 spin_lock(&tmpmle->spinlock);
1567                 if (tmpmle->type == DLM_MLE_BLOCK) {
1568                         // mlog(0, "this node is waiting for "
1569                         // "lockres to be mastered\n");
1570                         response = DLM_MASTER_RESP_NO;
1571                 } else if (tmpmle->type == DLM_MLE_MIGRATION) {
1572                         mlog(0, "node %u is master, but trying to migrate to "
1573                              "node %u.\n", tmpmle->master, tmpmle->new_master);
1574                         if (tmpmle->master == dlm->node_num) {
1575                                 mlog(ML_ERROR, "no owner on lockres, but this "
1576                                      "node is trying to migrate it to %u?!\n",
1577                                      tmpmle->new_master);
1578                                 BUG();
1579                         } else {
1580                                 /* the real master can respond on its own */
1581                                 response = DLM_MASTER_RESP_NO;
1582                         }
1583                 } else if (tmpmle->master != DLM_LOCK_RES_OWNER_UNKNOWN) {
1584                         set_maybe = 0;
1585                         if (tmpmle->master == dlm->node_num) {
1586                                 response = DLM_MASTER_RESP_YES;
1587                                 /* this node will be the owner.
1588                                  * go back and clean the mles on any
1589                                  * other nodes */
1590                                 dispatch_assert = 1;
1591                                 dlm_lockres_set_refmap_bit(request->node_idx, res);
1592                                 mlog(0, "%s:%.*s: setting bit %u in refmap\n",
1593                                      dlm->name, namelen, name,
1594                                      request->node_idx);
1595                         } else
1596                                 response = DLM_MASTER_RESP_NO;
1597                 } else {
1598                         // mlog(0, "this node is attempting to "
1599                         // "master lockres\n");
1600                         response = DLM_MASTER_RESP_MAYBE;
1601                 }
1602                 if (set_maybe)
1603                         set_bit(request->node_idx, tmpmle->maybe_map);
1604                 spin_unlock(&tmpmle->spinlock);
1605
1606                 spin_unlock(&dlm->master_lock);
1607                 spin_unlock(&res->spinlock);
1608
1609                 /* keep the mle attached to heartbeat events */
1610                 dlm_put_mle(tmpmle);
1611                 if (mle)
1612                         kmem_cache_free(dlm_mle_cache, mle);
1613                 goto send_response;
1614         }
1615
1616         /*
1617          * lockres doesn't exist on this node
1618          * if there is an MLE_BLOCK, return NO
1619          * if there is an MLE_MASTER, return MAYBE
1620          * otherwise, add an MLE_BLOCK, return NO
1621          */
1622         spin_lock(&dlm->master_lock);
1623         found = dlm_find_mle(dlm, &tmpmle, name, namelen);
1624         if (!found) {
1625                 /* this lockid has never been seen on this node yet */
1626                 // mlog(0, "no mle found\n");
1627                 if (!mle) {
1628                         spin_unlock(&dlm->master_lock);
1629                         spin_unlock(&dlm->spinlock);
1630
1631                         mle = (struct dlm_master_list_entry *)
1632                                 kmem_cache_alloc(dlm_mle_cache, GFP_NOFS);
1633                         if (!mle) {
1634                                 response = DLM_MASTER_RESP_ERROR;
1635                                 mlog_errno(-ENOMEM);
1636                                 goto send_response;
1637                         }
1638                         goto way_up_top;
1639                 }
1640
1641                 // mlog(0, "this is second time thru, already allocated, "
1642                 // "add the block.\n");
1643                 dlm_init_mle(mle, DLM_MLE_BLOCK, dlm, NULL, name, namelen);
1644                 set_bit(request->node_idx, mle->maybe_map);
1645                 list_add(&mle->list, &dlm->master_list);
1646                 response = DLM_MASTER_RESP_NO;
1647         } else {
1648                 // mlog(0, "mle was found\n");
1649                 set_maybe = 1;
1650                 spin_lock(&tmpmle->spinlock);
1651                 if (tmpmle->master == dlm->node_num) {
1652                         mlog(ML_ERROR, "no lockres, but an mle with this node as master!\n");
1653                         BUG();
1654                 }
1655                 if (tmpmle->type == DLM_MLE_BLOCK)
1656                         response = DLM_MASTER_RESP_NO;
1657                 else if (tmpmle->type == DLM_MLE_MIGRATION) {
1658                         mlog(0, "migration mle was found (%u->%u)\n",
1659                              tmpmle->master, tmpmle->new_master);
1660                         /* real master can respond on its own */
1661                         response = DLM_MASTER_RESP_NO;
1662                 } else
1663                         response = DLM_MASTER_RESP_MAYBE;
1664                 if (set_maybe)
1665                         set_bit(request->node_idx, tmpmle->maybe_map);
1666                 spin_unlock(&tmpmle->spinlock);
1667         }
1668         spin_unlock(&dlm->master_lock);
1669         spin_unlock(&dlm->spinlock);
1670
1671         if (found) {
1672                 /* keep the mle attached to heartbeat events */
1673                 dlm_put_mle(tmpmle);
1674         }
1675 send_response:
1676
1677         if (dispatch_assert) {
1678                 if (response != DLM_MASTER_RESP_YES)
1679                         mlog(ML_ERROR, "invalid response %d\n", response);
1680                 if (!res) {
1681                         mlog(ML_ERROR, "bad lockres while trying to assert!\n");
1682                         BUG();
1683                 }
1684                 mlog(0, "%u is the owner of %.*s, cleaning everyone else\n",
1685                              dlm->node_num, res->lockname.len, res->lockname.name);
1686                 ret = dlm_dispatch_assert_master(dlm, res, 0, request->node_idx, 
1687                                                  DLM_ASSERT_MASTER_MLE_CLEANUP);
1688                 if (ret < 0) {
1689                         mlog(ML_ERROR, "failed to dispatch assert master work\n");
1690                         response = DLM_MASTER_RESP_ERROR;
1691                 }
1692         }
1693
1694         dlm_put(dlm);
1695         return response;
1696 }
1697
1698 /*
1699  * DLM_ASSERT_MASTER_MSG
1700  */
1701
1702
1703 /*
1704  * NOTE: this can be used for debugging
1705  * can periodically run all locks owned by this node
1706  * and re-assert across the cluster...
1707  */
1708 int dlm_do_assert_master(struct dlm_ctxt *dlm,
1709                          struct dlm_lock_resource *res,
1710                          void *nodemap, u32 flags)
1711 {
1712         struct dlm_assert_master assert;
1713         int to, tmpret;
1714         struct dlm_node_iter iter;
1715         int ret = 0;
1716         int reassert;
1717         const char *lockname = res->lockname.name;
1718         unsigned int namelen = res->lockname.len;
1719
1720         BUG_ON(namelen > O2NM_MAX_NAME_LEN);
1721
1722         spin_lock(&res->spinlock);
1723         res->state |= DLM_LOCK_RES_SETREF_INPROG;
1724         spin_unlock(&res->spinlock);
1725
1726 again:
1727         reassert = 0;
1728
1729         /* note that if this nodemap is empty, it returns 0 */
1730         dlm_node_iter_init(nodemap, &iter);
1731         while ((to = dlm_node_iter_next(&iter)) >= 0) {
1732                 int r = 0;
1733                 struct dlm_master_list_entry *mle = NULL;
1734
1735                 mlog(0, "sending assert master to %d (%.*s)\n", to,
1736                      namelen, lockname);
1737                 memset(&assert, 0, sizeof(assert));
1738                 assert.node_idx = dlm->node_num;
1739                 assert.namelen = namelen;
1740                 memcpy(assert.name, lockname, namelen);
1741                 assert.flags = cpu_to_be32(flags);
1742
1743                 tmpret = o2net_send_message(DLM_ASSERT_MASTER_MSG, dlm->key,
1744                                             &assert, sizeof(assert), to, &r);
1745                 if (tmpret < 0) {
1746                         mlog(0, "assert_master returned %d!\n", tmpret);
1747                         if (!dlm_is_host_down(tmpret)) {
1748                                 mlog(ML_ERROR, "unhandled error=%d!\n", tmpret);
1749                                 BUG();
1750                         }
1751                         /* a node died.  finish out the rest of the nodes. */
1752                         mlog(0, "link to %d went down!\n", to);
1753                         /* any nonzero status return will do */
1754                         ret = tmpret;
1755                         r = 0;
1756                 } else if (r < 0) {
1757                         /* ok, something horribly messed.  kill thyself. */
1758                         mlog(ML_ERROR,"during assert master of %.*s to %u, "
1759                              "got %d.\n", namelen, lockname, to, r);
1760                         spin_lock(&dlm->spinlock);
1761                         spin_lock(&dlm->master_lock);
1762                         if (dlm_find_mle(dlm, &mle, (char *)lockname,
1763                                          namelen)) {
1764                                 dlm_print_one_mle(mle);
1765                                 __dlm_put_mle(mle);
1766                         }
1767                         spin_unlock(&dlm->master_lock);
1768                         spin_unlock(&dlm->spinlock);
1769                         BUG();
1770                 }
1771
1772                 if (r & DLM_ASSERT_RESPONSE_REASSERT &&
1773                     !(r & DLM_ASSERT_RESPONSE_MASTERY_REF)) {
1774                                 mlog(ML_ERROR, "%.*s: very strange, "
1775                                      "master MLE but no lockres on %u\n",
1776                                      namelen, lockname, to);
1777                 }
1778
1779                 if (r & DLM_ASSERT_RESPONSE_REASSERT) {
1780                         mlog(0, "%.*s: node %u create mles on other "
1781                              "nodes and requests a re-assert\n", 
1782                              namelen, lockname, to);
1783                         reassert = 1;
1784                 }
1785                 if (r & DLM_ASSERT_RESPONSE_MASTERY_REF) {
1786                         mlog(0, "%.*s: node %u has a reference to this "
1787                              "lockres, set the bit in the refmap\n",
1788                              namelen, lockname, to);
1789                         spin_lock(&res->spinlock);
1790                         dlm_lockres_set_refmap_bit(to, res);
1791                         spin_unlock(&res->spinlock);
1792                 }
1793         }
1794
1795         if (reassert)
1796                 goto again;
1797
1798         spin_lock(&res->spinlock);
1799         res->state &= ~DLM_LOCK_RES_SETREF_INPROG;
1800         spin_unlock(&res->spinlock);
1801         wake_up(&res->wq);
1802
1803         return ret;
1804 }
1805
1806 /*
1807  * locks that can be taken here:
1808  * dlm->spinlock
1809  * res->spinlock
1810  * mle->spinlock
1811  * dlm->master_list
1812  *
1813  * if possible, TRIM THIS DOWN!!!
1814  */
1815 int dlm_assert_master_handler(struct o2net_msg *msg, u32 len, void *data,
1816                               void **ret_data)
1817 {
1818         struct dlm_ctxt *dlm = data;
1819         struct dlm_master_list_entry *mle = NULL;
1820         struct dlm_assert_master *assert = (struct dlm_assert_master *)msg->buf;
1821         struct dlm_lock_resource *res = NULL;
1822         char *name;
1823         unsigned int namelen, hash;
1824         u32 flags;
1825         int master_request = 0, have_lockres_ref = 0;
1826         int ret = 0;
1827
1828         if (!dlm_grab(dlm))
1829                 return 0;
1830
1831         name = assert->name;
1832         namelen = assert->namelen;
1833         hash = dlm_lockid_hash(name, namelen);
1834         flags = be32_to_cpu(assert->flags);
1835
1836         if (namelen > DLM_LOCKID_NAME_MAX) {
1837                 mlog(ML_ERROR, "Invalid name length!");
1838                 goto done;
1839         }
1840
1841         spin_lock(&dlm->spinlock);
1842
1843         if (flags)
1844                 mlog(0, "assert_master with flags: %u\n", flags);
1845
1846         /* find the MLE */
1847         spin_lock(&dlm->master_lock);
1848         if (!dlm_find_mle(dlm, &mle, name, namelen)) {
1849                 /* not an error, could be master just re-asserting */
1850                 mlog(0, "just got an assert_master from %u, but no "
1851                      "MLE for it! (%.*s)\n", assert->node_idx,
1852                      namelen, name);
1853         } else {
1854                 int bit = find_next_bit (mle->maybe_map, O2NM_MAX_NODES, 0);
1855                 if (bit >= O2NM_MAX_NODES) {
1856                         /* not necessarily an error, though less likely.
1857                          * could be master just re-asserting. */
1858                         mlog(0, "no bits set in the maybe_map, but %u "
1859                              "is asserting! (%.*s)\n", assert->node_idx,
1860                              namelen, name);
1861                 } else if (bit != assert->node_idx) {
1862                         if (flags & DLM_ASSERT_MASTER_MLE_CLEANUP) {
1863                                 mlog(0, "master %u was found, %u should "
1864                                      "back off\n", assert->node_idx, bit);
1865                         } else {
1866                                 /* with the fix for bug 569, a higher node
1867                                  * number winning the mastery will respond
1868                                  * YES to mastery requests, but this node
1869                                  * had no way of knowing.  let it pass. */
1870                                 mlog(0, "%u is the lowest node, "
1871                                      "%u is asserting. (%.*s)  %u must "
1872                                      "have begun after %u won.\n", bit,
1873                                      assert->node_idx, namelen, name, bit,
1874                                      assert->node_idx);
1875                         }
1876                 }
1877                 if (mle->type == DLM_MLE_MIGRATION) {
1878                         if (flags & DLM_ASSERT_MASTER_MLE_CLEANUP) {
1879                                 mlog(0, "%s:%.*s: got cleanup assert"
1880                                      " from %u for migration\n",
1881                                      dlm->name, namelen, name,
1882                                      assert->node_idx);
1883                         } else if (!(flags & DLM_ASSERT_MASTER_FINISH_MIGRATION)) {
1884                                 mlog(0, "%s:%.*s: got unrelated assert"
1885                                      " from %u for migration, ignoring\n",
1886                                      dlm->name, namelen, name,
1887                                      assert->node_idx);
1888                                 __dlm_put_mle(mle);
1889                                 spin_unlock(&dlm->master_lock);
1890                                 spin_unlock(&dlm->spinlock);
1891                                 goto done;
1892                         }       
1893                 }
1894         }
1895         spin_unlock(&dlm->master_lock);
1896
1897         /* ok everything checks out with the MLE
1898          * now check to see if there is a lockres */
1899         res = __dlm_lookup_lockres(dlm, name, namelen, hash);
1900         if (res) {
1901                 spin_lock(&res->spinlock);
1902                 if (res->state & DLM_LOCK_RES_RECOVERING)  {
1903                         mlog(ML_ERROR, "%u asserting but %.*s is "
1904                              "RECOVERING!\n", assert->node_idx, namelen, name);
1905                         goto kill;
1906                 }
1907                 if (!mle) {
1908                         if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN &&
1909                             res->owner != assert->node_idx) {
1910                                 mlog(ML_ERROR, "assert_master from "
1911                                           "%u, but current owner is "
1912                                           "%u! (%.*s)\n",
1913                                        assert->node_idx, res->owner,
1914                                        namelen, name);
1915                                 goto kill;
1916                         }
1917                 } else if (mle->type != DLM_MLE_MIGRATION) {
1918                         if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN) {
1919                                 /* owner is just re-asserting */
1920                                 if (res->owner == assert->node_idx) {
1921                                         mlog(0, "owner %u re-asserting on "
1922                                              "lock %.*s\n", assert->node_idx,
1923                                              namelen, name);
1924                                         goto ok;
1925                                 }
1926                                 mlog(ML_ERROR, "got assert_master from "
1927                                      "node %u, but %u is the owner! "
1928                                      "(%.*s)\n", assert->node_idx,
1929                                      res->owner, namelen, name);
1930                                 goto kill;
1931                         }
1932                         if (!(res->state & DLM_LOCK_RES_IN_PROGRESS)) {
1933                                 mlog(ML_ERROR, "got assert from %u, but lock "
1934                                      "with no owner should be "
1935                                      "in-progress! (%.*s)\n",
1936                                      assert->node_idx,
1937                                      namelen, name);
1938                                 goto kill;
1939                         }
1940                 } else /* mle->type == DLM_MLE_MIGRATION */ {
1941                         /* should only be getting an assert from new master */
1942                         if (assert->node_idx != mle->new_master) {
1943                                 mlog(ML_ERROR, "got assert from %u, but "
1944                                      "new master is %u, and old master "
1945                                      "was %u (%.*s)\n",
1946                                      assert->node_idx, mle->new_master,
1947                                      mle->master, namelen, name);
1948                                 goto kill;
1949                         }
1950
1951                 }
1952 ok:
1953                 spin_unlock(&res->spinlock);
1954         }
1955         spin_unlock(&dlm->spinlock);
1956
1957         // mlog(0, "woo!  got an assert_master from node %u!\n",
1958         //           assert->node_idx);
1959         if (mle) {
1960                 int extra_ref = 0;
1961                 int nn = -1;
1962                 int rr, err = 0;
1963                 
1964                 spin_lock(&mle->spinlock);
1965                 if (mle->type == DLM_MLE_BLOCK || mle->type == DLM_MLE_MIGRATION)
1966                         extra_ref = 1;
1967                 else {
1968                         /* MASTER mle: if any bits set in the response map
1969                          * then the calling node needs to re-assert to clear
1970                          * up nodes that this node contacted */
1971                         while ((nn = find_next_bit (mle->response_map, O2NM_MAX_NODES, 
1972                                                     nn+1)) < O2NM_MAX_NODES) {
1973                                 if (nn != dlm->node_num && nn != assert->node_idx)
1974                                         master_request = 1;
1975                         }
1976                 }
1977                 mle->master = assert->node_idx;
1978                 atomic_set(&mle->woken, 1);
1979                 wake_up(&mle->wq);
1980                 spin_unlock(&mle->spinlock);
1981
1982                 if (res) {
1983                         int wake = 0;
1984                         spin_lock(&res->spinlock);
1985                         if (mle->type == DLM_MLE_MIGRATION) {
1986                                 mlog(0, "finishing off migration of lockres %.*s, "
1987                                         "from %u to %u\n",
1988                                         res->lockname.len, res->lockname.name,
1989                                         dlm->node_num, mle->new_master);
1990                                 res->state &= ~DLM_LOCK_RES_MIGRATING;
1991                                 wake = 1;
1992                                 dlm_change_lockres_owner(dlm, res, mle->new_master);
1993                                 BUG_ON(res->state & DLM_LOCK_RES_DIRTY);
1994                         } else {
1995                                 dlm_change_lockres_owner(dlm, res, mle->master);
1996                         }
1997                         spin_unlock(&res->spinlock);
1998                         have_lockres_ref = 1;
1999                         if (wake)
2000                                 wake_up(&res->wq);
2001                 }
2002
2003                 /* master is known, detach if not already detached.
2004                  * ensures that only one assert_master call will happen
2005                  * on this mle. */
2006                 spin_lock(&dlm->spinlock);
2007                 spin_lock(&dlm->master_lock);
2008
2009                 rr = atomic_read(&mle->mle_refs.refcount);
2010                 if (mle->inuse > 0) {
2011                         if (extra_ref && rr < 3)
2012                                 err = 1;
2013                         else if (!extra_ref && rr < 2)
2014                                 err = 1;
2015                 } else {
2016                         if (extra_ref && rr < 2)
2017                                 err = 1;
2018                         else if (!extra_ref && rr < 1)
2019                                 err = 1;
2020                 }
2021                 if (err) {
2022                         mlog(ML_ERROR, "%s:%.*s: got assert master from %u "
2023                              "that will mess up this node, refs=%d, extra=%d, "
2024                              "inuse=%d\n", dlm->name, namelen, name,
2025                              assert->node_idx, rr, extra_ref, mle->inuse);
2026                         dlm_print_one_mle(mle);
2027                 }
2028                 list_del_init(&mle->list);
2029                 __dlm_mle_detach_hb_events(dlm, mle);
2030                 __dlm_put_mle(mle);
2031                 if (extra_ref) {
2032                         /* the assert master message now balances the extra
2033                          * ref given by the master / migration request message.
2034                          * if this is the last put, it will be removed
2035                          * from the list. */
2036                         __dlm_put_mle(mle);
2037                 }
2038                 spin_unlock(&dlm->master_lock);
2039                 spin_unlock(&dlm->spinlock);
2040         } else if (res) {
2041                 if (res->owner != assert->node_idx) {
2042                         mlog(0, "assert_master from %u, but current "
2043                              "owner is %u (%.*s), no mle\n", assert->node_idx,
2044                              res->owner, namelen, name);
2045                 }
2046         }
2047
2048 done:
2049         ret = 0;
2050         if (res) {
2051                 spin_lock(&res->spinlock);
2052                 res->state |= DLM_LOCK_RES_SETREF_INPROG;
2053                 spin_unlock(&res->spinlock);
2054                 *ret_data = (void *)res;
2055         }
2056         dlm_put(dlm);
2057         if (master_request) {
2058                 mlog(0, "need to tell master to reassert\n");
2059                 /* positive. negative would shoot down the node. */
2060                 ret |= DLM_ASSERT_RESPONSE_REASSERT;
2061                 if (!have_lockres_ref) {
2062                         mlog(ML_ERROR, "strange, got assert from %u, MASTER "
2063                              "mle present here for %s:%.*s, but no lockres!\n",
2064                              assert->node_idx, dlm->name, namelen, name);
2065                 }
2066         }
2067         if (have_lockres_ref) {
2068                 /* let the master know we have a reference to the lockres */
2069                 ret |= DLM_ASSERT_RESPONSE_MASTERY_REF;
2070                 mlog(0, "%s:%.*s: got assert from %u, need a ref\n",
2071                      dlm->name, namelen, name, assert->node_idx);
2072         }
2073         return ret;
2074
2075 kill:
2076         /* kill the caller! */
2077         mlog(ML_ERROR, "Bad message received from another node.  Dumping state "
2078              "and killing the other node now!  This node is OK and can continue.\n");
2079         __dlm_print_one_lock_resource(res);
2080         spin_unlock(&res->spinlock);
2081         spin_unlock(&dlm->spinlock);
2082         *ret_data = (void *)res; 
2083         dlm_put(dlm);
2084         return -EINVAL;
2085 }
2086
2087 void dlm_assert_master_post_handler(int status, void *data, void *ret_data)
2088 {
2089         struct dlm_lock_resource *res = (struct dlm_lock_resource *)ret_data;
2090
2091         if (ret_data) {
2092                 spin_lock(&res->spinlock);
2093                 res->state &= ~DLM_LOCK_RES_SETREF_INPROG;
2094                 spin_unlock(&res->spinlock);
2095                 wake_up(&res->wq);
2096                 dlm_lockres_put(res);
2097         }
2098         return;
2099 }
2100
2101 int dlm_dispatch_assert_master(struct dlm_ctxt *dlm,
2102                                struct dlm_lock_resource *res,
2103                                int ignore_higher, u8 request_from, u32 flags)
2104 {
2105         struct dlm_work_item *item;
2106         item = kzalloc(sizeof(*item), GFP_NOFS);
2107         if (!item)
2108                 return -ENOMEM;
2109
2110
2111         /* queue up work for dlm_assert_master_worker */
2112         dlm_grab(dlm);  /* get an extra ref for the work item */
2113         dlm_init_work_item(dlm, item, dlm_assert_master_worker, NULL);
2114         item->u.am.lockres = res; /* already have a ref */
2115         /* can optionally ignore node numbers higher than this node */
2116         item->u.am.ignore_higher = ignore_higher;
2117         item->u.am.request_from = request_from;
2118         item->u.am.flags = flags;
2119
2120         if (ignore_higher) 
2121                 mlog(0, "IGNORE HIGHER: %.*s\n", res->lockname.len, 
2122                      res->lockname.name);
2123                 
2124         spin_lock(&dlm->work_lock);
2125         list_add_tail(&item->list, &dlm->work_list);
2126         spin_unlock(&dlm->work_lock);
2127
2128         queue_work(dlm->dlm_worker, &dlm->dispatched_work);
2129         return 0;
2130 }
2131
2132 static void dlm_assert_master_worker(struct dlm_work_item *item, void *data)
2133 {
2134         struct dlm_ctxt *dlm = data;
2135         int ret = 0;
2136         struct dlm_lock_resource *res;
2137         unsigned long nodemap[BITS_TO_LONGS(O2NM_MAX_NODES)];
2138         int ignore_higher;
2139         int bit;
2140         u8 request_from;
2141         u32 flags;
2142
2143         dlm = item->dlm;
2144         res = item->u.am.lockres;
2145         ignore_higher = item->u.am.ignore_higher;
2146         request_from = item->u.am.request_from;
2147         flags = item->u.am.flags;
2148
2149         spin_lock(&dlm->spinlock);
2150         memcpy(nodemap, dlm->domain_map, sizeof(nodemap));
2151         spin_unlock(&dlm->spinlock);
2152
2153         clear_bit(dlm->node_num, nodemap);
2154         if (ignore_higher) {
2155                 /* if is this just to clear up mles for nodes below
2156                  * this node, do not send the message to the original
2157                  * caller or any node number higher than this */
2158                 clear_bit(request_from, nodemap);
2159                 bit = dlm->node_num;
2160                 while (1) {
2161                         bit = find_next_bit(nodemap, O2NM_MAX_NODES,
2162                                             bit+1);
2163                         if (bit >= O2NM_MAX_NODES)
2164                                 break;
2165                         clear_bit(bit, nodemap);
2166                 }
2167         }
2168
2169         /*
2170          * If we're migrating this lock to someone else, we are no
2171          * longer allowed to assert out own mastery.  OTOH, we need to
2172          * prevent migration from starting while we're still asserting
2173          * our dominance.  The reserved ast delays migration.
2174          */
2175         spin_lock(&res->spinlock);
2176         if (res->state & DLM_LOCK_RES_MIGRATING) {
2177                 mlog(0, "Someone asked us to assert mastery, but we're "
2178                      "in the middle of migration.  Skipping assert, "
2179                      "the new master will handle that.\n");
2180                 spin_unlock(&res->spinlock);
2181                 goto put;
2182         } else
2183                 __dlm_lockres_reserve_ast(res);
2184         spin_unlock(&res->spinlock);
2185
2186         /* this call now finishes out the nodemap
2187          * even if one or more nodes die */
2188         mlog(0, "worker about to master %.*s here, this=%u\n",
2189                      res->lockname.len, res->lockname.name, dlm->node_num);
2190         ret = dlm_do_assert_master(dlm, res, nodemap, flags);
2191         if (ret < 0) {
2192                 /* no need to restart, we are done */
2193                 if (!dlm_is_host_down(ret))
2194                         mlog_errno(ret);
2195         }
2196
2197         /* Ok, we've asserted ourselves.  Let's let migration start. */
2198         dlm_lockres_release_ast(dlm, res);
2199
2200 put:
2201         dlm_lockres_put(res);
2202
2203         mlog(0, "finished with dlm_assert_master_worker\n");
2204 }
2205
2206 /* SPECIAL CASE for the $RECOVERY lock used by the recovery thread.
2207  * We cannot wait for node recovery to complete to begin mastering this
2208  * lockres because this lockres is used to kick off recovery! ;-)
2209  * So, do a pre-check on all living nodes to see if any of those nodes
2210  * think that $RECOVERY is currently mastered by a dead node.  If so,
2211  * we wait a short time to allow that node to get notified by its own
2212  * heartbeat stack, then check again.  All $RECOVERY lock resources
2213  * mastered by dead nodes are purged when the hearbeat callback is 
2214  * fired, so we can know for sure that it is safe to continue once
2215  * the node returns a live node or no node.  */
2216 static int dlm_pre_master_reco_lockres(struct dlm_ctxt *dlm,
2217                                        struct dlm_lock_resource *res)
2218 {
2219         struct dlm_node_iter iter;
2220         int nodenum;
2221         int ret = 0;
2222         u8 master = DLM_LOCK_RES_OWNER_UNKNOWN;
2223
2224         spin_lock(&dlm->spinlock);
2225         dlm_node_iter_init(dlm->domain_map, &iter);
2226         spin_unlock(&dlm->spinlock);
2227
2228         while ((nodenum = dlm_node_iter_next(&iter)) >= 0) {
2229                 /* do not send to self */
2230                 if (nodenum == dlm->node_num)
2231                         continue;
2232                 ret = dlm_do_master_requery(dlm, res, nodenum, &master);
2233                 if (ret < 0) {
2234                         mlog_errno(ret);
2235                         if (!dlm_is_host_down(ret))
2236                                 BUG();
2237                         /* host is down, so answer for that node would be
2238                          * DLM_LOCK_RES_OWNER_UNKNOWN.  continue. */
2239                         ret = 0;
2240                 }
2241
2242                 if (master != DLM_LOCK_RES_OWNER_UNKNOWN) {
2243                         /* check to see if this master is in the recovery map */
2244                         spin_lock(&dlm->spinlock);
2245                         if (test_bit(master, dlm->recovery_map)) {
2246                                 mlog(ML_NOTICE, "%s: node %u has not seen "
2247                                      "node %u go down yet, and thinks the "
2248                                      "dead node is mastering the recovery "
2249                                      "lock.  must wait.\n", dlm->name,
2250                                      nodenum, master);
2251                                 ret = -EAGAIN;
2252                         }
2253                         spin_unlock(&dlm->spinlock);
2254                         mlog(0, "%s: reco lock master is %u\n", dlm->name, 
2255                              master);
2256                         break;
2257                 }
2258         }
2259         return ret;
2260 }
2261
2262 /*
2263  * DLM_DEREF_LOCKRES_MSG
2264  */
2265
2266 int dlm_drop_lockres_ref(struct dlm_ctxt *dlm, struct dlm_lock_resource *res)
2267 {
2268         struct dlm_deref_lockres deref;
2269         int ret = 0, r;
2270         const char *lockname;
2271         unsigned int namelen;
2272
2273         lockname = res->lockname.name;
2274         namelen = res->lockname.len;
2275         BUG_ON(namelen > O2NM_MAX_NAME_LEN);
2276
2277         mlog(0, "%s:%.*s: sending deref to %d\n",
2278              dlm->name, namelen, lockname, res->owner);
2279         memset(&deref, 0, sizeof(deref));
2280         deref.node_idx = dlm->node_num;
2281         deref.namelen = namelen;
2282         memcpy(deref.name, lockname, namelen);
2283
2284         ret = o2net_send_message(DLM_DEREF_LOCKRES_MSG, dlm->key,
2285                                  &deref, sizeof(deref), res->owner, &r);
2286         if (ret < 0)
2287                 mlog_errno(ret);
2288         else if (r < 0) {
2289                 /* BAD.  other node says I did not have a ref. */
2290                 mlog(ML_ERROR,"while dropping ref on %s:%.*s "
2291                     "(master=%u) got %d.\n", dlm->name, namelen,
2292                     lockname, res->owner, r);
2293                 dlm_print_one_lock_resource(res);
2294                 BUG();
2295         }
2296         return ret;
2297 }
2298
2299 int dlm_deref_lockres_handler(struct o2net_msg *msg, u32 len, void *data,
2300                               void **ret_data)
2301 {
2302         struct dlm_ctxt *dlm = data;
2303         struct dlm_deref_lockres *deref = (struct dlm_deref_lockres *)msg->buf;
2304         struct dlm_lock_resource *res = NULL;
2305         char *name;
2306         unsigned int namelen;
2307         int ret = -EINVAL;
2308         u8 node;
2309         unsigned int hash;
2310         struct dlm_work_item *item;
2311         int cleared = 0;
2312         int dispatch = 0;
2313
2314         if (!dlm_grab(dlm))
2315                 return 0;
2316
2317         name = deref->name;
2318         namelen = deref->namelen;
2319         node = deref->node_idx;
2320
2321         if (namelen > DLM_LOCKID_NAME_MAX) {
2322                 mlog(ML_ERROR, "Invalid name length!");
2323                 goto done;
2324         }
2325         if (deref->node_idx >= O2NM_MAX_NODES) {
2326                 mlog(ML_ERROR, "Invalid node number: %u\n", node);
2327                 goto done;
2328         }
2329
2330         hash = dlm_lockid_hash(name, namelen);
2331
2332         spin_lock(&dlm->spinlock);
2333         res = __dlm_lookup_lockres_full(dlm, name, namelen, hash);
2334         if (!res) {
2335                 spin_unlock(&dlm->spinlock);
2336                 mlog(ML_ERROR, "%s:%.*s: bad lockres name\n",
2337                      dlm->name, namelen, name);
2338                 goto done;
2339         }
2340         spin_unlock(&dlm->spinlock);
2341
2342         spin_lock(&res->spinlock);
2343         if (res->state & DLM_LOCK_RES_SETREF_INPROG)
2344                 dispatch = 1;
2345         else {
2346                 BUG_ON(res->state & DLM_LOCK_RES_DROPPING_REF);
2347                 if (test_bit(node, res->refmap)) {
2348                         dlm_lockres_clear_refmap_bit(node, res);
2349                         cleared = 1;
2350                 }
2351         }
2352         spin_unlock(&res->spinlock);
2353
2354         if (!dispatch) {
2355                 if (cleared)
2356                         dlm_lockres_calc_usage(dlm, res);
2357                 else {
2358                         mlog(ML_ERROR, "%s:%.*s: node %u trying to drop ref "
2359                         "but it is already dropped!\n", dlm->name,
2360                         res->lockname.len, res->lockname.name, node);
2361                         __dlm_print_one_lock_resource(res);
2362                 }
2363                 ret = 0;
2364                 goto done;
2365         }
2366
2367         item = kzalloc(sizeof(*item), GFP_NOFS);
2368         if (!item) {
2369                 ret = -ENOMEM;
2370                 mlog_errno(ret);
2371                 goto done;
2372         }
2373
2374         dlm_init_work_item(dlm, item, dlm_deref_lockres_worker, NULL);
2375         item->u.dl.deref_res = res;
2376         item->u.dl.deref_node = node;
2377
2378         spin_lock(&dlm->work_lock);
2379         list_add_tail(&item->list, &dlm->work_list);
2380         spin_unlock(&dlm->work_lock);
2381
2382         queue_work(dlm->dlm_worker, &dlm->dispatched_work);
2383         return 0;
2384
2385 done:
2386         if (res)
2387                 dlm_lockres_put(res);
2388         dlm_put(dlm);
2389
2390         return ret;
2391 }
2392
2393 static void dlm_deref_lockres_worker(struct dlm_work_item *item, void *data)
2394 {
2395         struct dlm_ctxt *dlm;
2396         struct dlm_lock_resource *res;
2397         u8 node;
2398         u8 cleared = 0;
2399
2400         dlm = item->dlm;
2401         res = item->u.dl.deref_res;
2402         node = item->u.dl.deref_node;
2403
2404         spin_lock(&res->spinlock);
2405         BUG_ON(res->state & DLM_LOCK_RES_DROPPING_REF);
2406         if (test_bit(node, res->refmap)) {
2407                 __dlm_wait_on_lockres_flags(res, DLM_LOCK_RES_SETREF_INPROG);
2408                 dlm_lockres_clear_refmap_bit(node, res);
2409                 cleared = 1;
2410         }
2411         spin_unlock(&res->spinlock);
2412
2413         if (cleared) {
2414                 mlog(0, "%s:%.*s node %u ref dropped in dispatch\n",
2415                      dlm->name, res->lockname.len, res->lockname.name, node);
2416                 dlm_lockres_calc_usage(dlm, res);
2417         } else {
2418                 mlog(ML_ERROR, "%s:%.*s: node %u trying to drop ref "
2419                      "but it is already dropped!\n", dlm->name,
2420                      res->lockname.len, res->lockname.name, node);
2421                 __dlm_print_one_lock_resource(res);
2422         }
2423
2424         dlm_lockres_put(res);
2425 }
2426
2427
2428 /*
2429  * DLM_MIGRATE_LOCKRES
2430  */
2431
2432
2433 static int dlm_migrate_lockres(struct dlm_ctxt *dlm,
2434                                struct dlm_lock_resource *res,
2435                                u8 target)
2436 {
2437         struct dlm_master_list_entry *mle = NULL;
2438         struct dlm_master_list_entry *oldmle = NULL;
2439         struct dlm_migratable_lockres *mres = NULL;
2440         int ret = -EINVAL;
2441         const char *name;
2442         unsigned int namelen;
2443         int mle_added = 0;
2444         struct list_head *queue, *iter;
2445         int i;
2446         struct dlm_lock *lock;
2447         int empty = 1, wake = 0;
2448
2449         if (!dlm_grab(dlm))
2450                 return -EINVAL;
2451
2452         name = res->lockname.name;
2453         namelen = res->lockname.len;
2454
2455         mlog(0, "migrating %.*s to %u\n", namelen, name, target);
2456
2457         /*
2458          * ensure this lockres is a proper candidate for migration
2459          */
2460         spin_lock(&res->spinlock);
2461         if (res->owner == DLM_LOCK_RES_OWNER_UNKNOWN) {
2462                 mlog(0, "cannot migrate lockres with unknown owner!\n");
2463                 spin_unlock(&res->spinlock);
2464                 goto leave;
2465         }
2466         if (res->owner != dlm->node_num) {
2467                 mlog(0, "cannot migrate lockres this node doesn't own!\n");
2468                 spin_unlock(&res->spinlock);
2469                 goto leave;
2470         }
2471         mlog(0, "checking queues...\n");
2472         queue = &res->granted;
2473         for (i=0; i<3; i++) {
2474                 list_for_each(iter, queue) {
2475                         lock = list_entry (iter, struct dlm_lock, list);
2476                         empty = 0;
2477                         if (lock->ml.node == dlm->node_num) {
2478                                 mlog(0, "found a lock owned by this node "
2479                                      "still on the %s queue!  will not "
2480                                      "migrate this lockres\n",
2481                                      i==0 ? "granted" :
2482                                      (i==1 ? "converting" : "blocked"));
2483                                 spin_unlock(&res->spinlock);
2484                                 ret = -ENOTEMPTY;
2485                                 goto leave;
2486                         }
2487                 }
2488                 queue++;
2489         }
2490         mlog(0, "all locks on this lockres are nonlocal.  continuing\n");
2491         spin_unlock(&res->spinlock);
2492
2493         /* no work to do */
2494         if (empty) {
2495                 mlog(0, "no locks were found on this lockres! done!\n");
2496                 ret = 0;
2497                 goto leave;
2498         }
2499
2500         /*
2501          * preallocate up front
2502          * if this fails, abort
2503          */
2504
2505         ret = -ENOMEM;
2506         mres = (struct dlm_migratable_lockres *) __get_free_page(GFP_NOFS);
2507         if (!mres) {
2508                 mlog_errno(ret);
2509                 goto leave;
2510         }
2511
2512         mle = (struct dlm_master_list_entry *) kmem_cache_alloc(dlm_mle_cache,
2513                                                                 GFP_NOFS);
2514         if (!mle) {
2515                 mlog_errno(ret);
2516                 goto leave;
2517         }
2518         ret = 0;
2519
2520         /*
2521          * find a node to migrate the lockres to
2522          */
2523
2524         mlog(0, "picking a migration node\n");
2525         spin_lock(&dlm->spinlock);
2526         /* pick a new node */
2527         if (!test_bit(target, dlm->domain_map) ||
2528             target >= O2NM_MAX_NODES) {
2529                 target = dlm_pick_migration_target(dlm, res);
2530         }
2531         mlog(0, "node %u chosen for migration\n", target);
2532
2533         if (target >= O2NM_MAX_NODES ||
2534             !test_bit(target, dlm->domain_map)) {
2535                 /* target chosen is not alive */
2536                 ret = -EINVAL;
2537         }
2538
2539         if (ret) {
2540                 spin_unlock(&dlm->spinlock);
2541                 goto fail;
2542         }
2543
2544         mlog(0, "continuing with target = %u\n", target);
2545
2546         /*
2547          * clear any existing master requests and
2548          * add the migration mle to the list
2549          */
2550         spin_lock(&dlm->master_lock);
2551         ret = dlm_add_migration_mle(dlm, res, mle, &oldmle, name,
2552                                     namelen, target, dlm->node_num);
2553         spin_unlock(&dlm->master_lock);
2554         spin_unlock(&dlm->spinlock);
2555
2556         if (ret == -EEXIST) {
2557                 mlog(0, "another process is already migrating it\n");
2558                 goto fail;
2559         }
2560         mle_added = 1;
2561
2562         /*
2563          * set the MIGRATING flag and flush asts
2564          * if we fail after this we need to re-dirty the lockres
2565          */
2566         if (dlm_mark_lockres_migrating(dlm, res, target) < 0) {
2567                 mlog(ML_ERROR, "tried to migrate %.*s to %u, but "
2568                      "the target went down.\n", res->lockname.len,
2569                      res->lockname.name, target);
2570                 spin_lock(&res->spinlock);
2571                 res->state &= ~DLM_LOCK_RES_MIGRATING;
2572                 wake = 1;
2573                 spin_unlock(&res->spinlock);
2574                 ret = -EINVAL;
2575         }
2576
2577 fail:
2578         if (oldmle) {
2579                 /* master is known, detach if not already detached */
2580                 dlm_mle_detach_hb_events(dlm, oldmle);
2581                 dlm_put_mle(oldmle);
2582         }
2583
2584         if (ret < 0) {
2585                 if (mle_added) {
2586                         dlm_mle_detach_hb_events(dlm, mle);
2587                         dlm_put_mle(mle);
2588                 } else if (mle) {
2589                         kmem_cache_free(dlm_mle_cache, mle);
2590                 }
2591                 goto leave;
2592         }
2593
2594         /*
2595          * at this point, we have a migration target, an mle
2596          * in the master list, and the MIGRATING flag set on
2597          * the lockres
2598          */
2599
2600         /* now that remote nodes are spinning on the MIGRATING flag,
2601          * ensure that all assert_master work is flushed. */
2602         flush_workqueue(dlm->dlm_worker);
2603
2604         /* get an extra reference on the mle.
2605          * otherwise the assert_master from the new
2606          * master will destroy this.
2607          * also, make sure that all callers of dlm_get_mle
2608          * take both dlm->spinlock and dlm->master_lock */
2609         spin_lock(&dlm->spinlock);
2610         spin_lock(&dlm->master_lock);
2611         dlm_get_mle_inuse(mle);
2612         spin_unlock(&dlm->master_lock);
2613         spin_unlock(&dlm->spinlock);
2614
2615         /* notify new node and send all lock state */
2616         /* call send_one_lockres with migration flag.
2617          * this serves as notice to the target node that a
2618          * migration is starting. */
2619         ret = dlm_send_one_lockres(dlm, res, mres, target,
2620                                    DLM_MRES_MIGRATION);
2621
2622         if (ret < 0) {
2623                 mlog(0, "migration to node %u failed with %d\n",
2624                      target, ret);
2625                 /* migration failed, detach and clean up mle */
2626                 dlm_mle_detach_hb_events(dlm, mle);
2627                 dlm_put_mle(mle);
2628                 dlm_put_mle_inuse(mle);
2629                 spin_lock(&res->spinlock);
2630                 res->state &= ~DLM_LOCK_RES_MIGRATING;
2631                 wake = 1;
2632                 spin_unlock(&res->spinlock);
2633                 goto leave;
2634         }
2635
2636         /* at this point, the target sends a message to all nodes,
2637          * (using dlm_do_migrate_request).  this node is skipped since
2638          * we had to put an mle in the list to begin the process.  this
2639          * node now waits for target to do an assert master.  this node
2640          * will be the last one notified, ensuring that the migration
2641          * is complete everywhere.  if the target dies while this is
2642          * going on, some nodes could potentially see the target as the
2643          * master, so it is important that my recovery finds the migration
2644          * mle and sets the master to UNKNONWN. */
2645
2646
2647         /* wait for new node to assert master */
2648         while (1) {
2649                 ret = wait_event_interruptible_timeout(mle->wq,
2650                                         (atomic_read(&mle->woken) == 1),
2651                                         msecs_to_jiffies(5000));
2652
2653                 if (ret >= 0) {
2654                         if (atomic_read(&mle->woken) == 1 ||
2655                             res->owner == target)
2656                                 break;
2657
2658                         mlog(0, "%s:%.*s: timed out during migration\n",
2659                              dlm->name, res->lockname.len, res->lockname.name);
2660                         /* avoid hang during shutdown when migrating lockres 
2661                          * to a node which also goes down */
2662                         if (dlm_is_node_dead(dlm, target)) {
2663                                 mlog(0, "%s:%.*s: expected migration "
2664                                      "target %u is no longer up, restarting\n",
2665                                      dlm->name, res->lockname.len,
2666                                      res->lockname.name, target);
2667                                 ret = -EINVAL;
2668                                 /* migration failed, detach and clean up mle */
2669                                 dlm_mle_detach_hb_events(dlm, mle);
2670                                 dlm_put_mle(mle);
2671                                 dlm_put_mle_inuse(mle);
2672                                 spin_lock(&res->spinlock);
2673                                 res->state &= ~DLM_LOCK_RES_MIGRATING;
2674                                 wake = 1;
2675                                 spin_unlock(&res->spinlock);
2676                                 goto leave;
2677                         }
2678                 } else
2679                         mlog(0, "%s:%.*s: caught signal during migration\n",
2680                              dlm->name, res->lockname.len, res->lockname.name);
2681         }
2682
2683         /* all done, set the owner, clear the flag */
2684         spin_lock(&res->spinlock);
2685         dlm_set_lockres_owner(dlm, res, target);
2686         res->state &= ~DLM_LOCK_RES_MIGRATING;
2687         dlm_remove_nonlocal_locks(dlm, res);
2688         spin_unlock(&res->spinlock);
2689         wake_up(&res->wq);
2690
2691         /* master is known, detach if not already detached */
2692         dlm_mle_detach_hb_events(dlm, mle);
2693         dlm_put_mle_inuse(mle);
2694         ret = 0;
2695
2696         dlm_lockres_calc_usage(dlm, res);
2697
2698 leave:
2699         /* re-dirty the lockres if we failed */
2700         if (ret < 0)
2701                 dlm_kick_thread(dlm, res);
2702
2703         /* wake up waiters if the MIGRATING flag got set
2704          * but migration failed */
2705         if (wake)
2706                 wake_up(&res->wq);
2707
2708         /* TODO: cleanup */
2709         if (mres)
2710                 free_page((unsigned long)mres);
2711
2712         dlm_put(dlm);
2713
2714         mlog(0, "returning %d\n", ret);
2715         return ret;
2716 }
2717
2718 #define DLM_MIGRATION_RETRY_MS  100
2719
2720 /* Should be called only after beginning the domain leave process.
2721  * There should not be any remaining locks on nonlocal lock resources,
2722  * and there should be no local locks left on locally mastered resources.
2723  *
2724  * Called with the dlm spinlock held, may drop it to do migration, but
2725  * will re-acquire before exit.
2726  *
2727  * Returns: 1 if dlm->spinlock was dropped/retaken, 0 if never dropped */
2728 int dlm_empty_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res)
2729 {
2730         int ret;
2731         int lock_dropped = 0;
2732
2733         if (res->owner != dlm->node_num) {
2734                 if (!__dlm_lockres_unused(res)) {
2735                         mlog(ML_ERROR, "%s:%.*s: this node is not master, "
2736                              "trying to free this but locks remain\n",
2737                              dlm->name, res->lockname.len, res->lockname.name);
2738                 }
2739                 goto leave;
2740         }
2741
2742         /* Wheee! Migrate lockres here! Will sleep so drop spinlock. */
2743         spin_unlock(&dlm->spinlock);
2744         lock_dropped = 1;
2745         while (1) {
2746                 ret = dlm_migrate_lockres(dlm, res, O2NM_MAX_NODES);
2747                 if (ret >= 0)
2748                         break;
2749                 if (ret == -ENOTEMPTY) {
2750                         mlog(ML_ERROR, "lockres %.*s still has local locks!\n",
2751                                 res->lockname.len, res->lockname.name);
2752                         BUG();
2753                 }
2754
2755                 mlog(0, "lockres %.*s: migrate failed, "
2756                      "retrying\n", res->lockname.len,
2757                      res->lockname.name);
2758                 msleep(DLM_MIGRATION_RETRY_MS);
2759         }
2760         spin_lock(&dlm->spinlock);
2761 leave:
2762         return lock_dropped;
2763 }
2764
2765 int dlm_lock_basts_flushed(struct dlm_ctxt *dlm, struct dlm_lock *lock)
2766 {
2767         int ret;
2768         spin_lock(&dlm->ast_lock);
2769         spin_lock(&lock->spinlock);
2770         ret = (list_empty(&lock->bast_list) && !lock->bast_pending);
2771         spin_unlock(&lock->spinlock);
2772         spin_unlock(&dlm->ast_lock);
2773         return ret;
2774 }
2775
2776 static int dlm_migration_can_proceed(struct dlm_ctxt *dlm,
2777                                      struct dlm_lock_resource *res,
2778                                      u8 mig_target)
2779 {
2780         int can_proceed;
2781         spin_lock(&res->spinlock);
2782         can_proceed = !!(res->state & DLM_LOCK_RES_MIGRATING);
2783         spin_unlock(&res->spinlock);
2784
2785         /* target has died, so make the caller break out of the 
2786          * wait_event, but caller must recheck the domain_map */
2787         spin_lock(&dlm->spinlock);
2788         if (!test_bit(mig_target, dlm->domain_map))
2789                 can_proceed = 1;
2790         spin_unlock(&dlm->spinlock);
2791         return can_proceed;
2792 }
2793
2794 static int dlm_lockres_is_dirty(struct dlm_ctxt *dlm,
2795                                 struct dlm_lock_resource *res)
2796 {
2797         int ret;
2798         spin_lock(&res->spinlock);
2799         ret = !!(res->state & DLM_LOCK_RES_DIRTY);
2800         spin_unlock(&res->spinlock);
2801         return ret;
2802 }
2803
2804
2805 static int dlm_mark_lockres_migrating(struct dlm_ctxt *dlm,
2806                                        struct dlm_lock_resource *res,
2807                                        u8 target)
2808 {
2809         int ret = 0;
2810
2811         mlog(0, "dlm_mark_lockres_migrating: %.*s, from %u to %u\n",
2812                res->lockname.len, res->lockname.name, dlm->node_num,
2813                target);
2814         /* need to set MIGRATING flag on lockres.  this is done by
2815          * ensuring that all asts have been flushed for this lockres. */
2816         spin_lock(&res->spinlock);
2817         BUG_ON(res->migration_pending);
2818         res->migration_pending = 1;
2819         /* strategy is to reserve an extra ast then release
2820          * it below, letting the release do all of the work */
2821         __dlm_lockres_reserve_ast(res);
2822         spin_unlock(&res->spinlock);
2823
2824         /* now flush all the pending asts */
2825         dlm_kick_thread(dlm, res);
2826         /* before waiting on DIRTY, block processes which may
2827          * try to dirty the lockres before MIGRATING is set */
2828         spin_lock(&res->spinlock);
2829         BUG_ON(res->state & DLM_LOCK_RES_BLOCK_DIRTY);
2830         res->state |= DLM_LOCK_RES_BLOCK_DIRTY;
2831         spin_unlock(&res->spinlock);
2832         /* now wait on any pending asts and the DIRTY state */
2833         wait_event(dlm->ast_wq, !dlm_lockres_is_dirty(dlm, res));
2834         dlm_lockres_release_ast(dlm, res);
2835
2836         mlog(0, "about to wait on migration_wq, dirty=%s\n",
2837                res->state & DLM_LOCK_RES_DIRTY ? "yes" : "no");
2838         /* if the extra ref we just put was the final one, this
2839          * will pass thru immediately.  otherwise, we need to wait
2840          * for the last ast to finish. */
2841 again:
2842         ret = wait_event_interruptible_timeout(dlm->migration_wq,
2843                    dlm_migration_can_proceed(dlm, res, target),
2844                    msecs_to_jiffies(1000));
2845         if (ret < 0) {
2846                 mlog(0, "woken again: migrating? %s, dead? %s\n",
2847                        res->state & DLM_LOCK_RES_MIGRATING ? "yes":"no",
2848                        test_bit(target, dlm->domain_map) ? "no":"yes");
2849         } else {
2850                 mlog(0, "all is well: migrating? %s, dead? %s\n",
2851                        res->state & DLM_LOCK_RES_MIGRATING ? "yes":"no",
2852                        test_bit(target, dlm->domain_map) ? "no":"yes");
2853         }
2854         if (!dlm_migration_can_proceed(dlm, res, target)) {
2855                 mlog(0, "trying again...\n");
2856                 goto again;
2857         }
2858         /* now that we are sure the MIGRATING state is there, drop
2859          * the unneded state which blocked threads trying to DIRTY */
2860         spin_lock(&res->spinlock);
2861         BUG_ON(!(res->state & DLM_LOCK_RES_BLOCK_DIRTY));
2862         BUG_ON(!(res->state & DLM_LOCK_RES_MIGRATING));
2863         res->state &= ~DLM_LOCK_RES_BLOCK_DIRTY;
2864         spin_unlock(&res->spinlock);
2865
2866         /* did the target go down or die? */
2867         spin_lock(&dlm->spinlock);
2868         if (!test_bit(target, dlm->domain_map)) {
2869                 mlog(ML_ERROR, "aha. migration target %u just went down\n",
2870                      target);
2871                 ret = -EHOSTDOWN;
2872         }
2873         spin_unlock(&dlm->spinlock);
2874
2875         /*
2876          * at this point:
2877          *
2878          *   o the DLM_LOCK_RES_MIGRATING flag is set
2879          *   o there are no pending asts on this lockres
2880          *   o all processes trying to reserve an ast on this
2881          *     lockres must wait for the MIGRATING flag to clear
2882          */
2883         return ret;
2884 }
2885
2886 /* last step in the migration process.
2887  * original master calls this to free all of the dlm_lock
2888  * structures that used to be for other nodes. */
2889 static void dlm_remove_nonlocal_locks(struct dlm_ctxt *dlm,
2890                                       struct dlm_lock_resource *res)
2891 {
2892         struct list_head *iter, *iter2;
2893         struct list_head *queue = &res->granted;
2894         int i, bit;
2895         struct dlm_lock *lock;
2896
2897         assert_spin_locked(&res->spinlock);
2898
2899         BUG_ON(res->owner == dlm->node_num);
2900
2901         for (i=0; i<3; i++) {
2902                 list_for_each_safe(iter, iter2, queue) {
2903                         lock = list_entry (iter, struct dlm_lock, list);
2904                         if (lock->ml.node != dlm->node_num) {
2905                                 mlog(0, "putting lock for node %u\n",
2906                                      lock->ml.node);
2907                                 /* be extra careful */
2908                                 BUG_ON(!list_empty(&lock->ast_list));
2909                                 BUG_ON(!list_empty(&lock->bast_list));
2910                                 BUG_ON(lock->ast_pending);
2911                                 BUG_ON(lock->bast_pending);
2912                                 dlm_lockres_clear_refmap_bit(lock->ml.node, res);
2913                                 list_del_init(&lock->list);
2914                                 dlm_lock_put(lock);
2915                         }
2916                 }
2917                 queue++;
2918         }
2919         bit = 0;
2920         while (1) {
2921                 bit = find_next_bit(res->refmap, O2NM_MAX_NODES, bit);
2922                 if (bit >= O2NM_MAX_NODES)
2923                         break;
2924                 /* do not clear the local node reference, if there is a
2925                  * process holding this, let it drop the ref itself */
2926                 if (bit != dlm->node_num) {
2927                         mlog(0, "%s:%.*s: node %u had a ref to this "
2928                              "migrating lockres, clearing\n", dlm->name,
2929                              res->lockname.len, res->lockname.name, bit);
2930                         dlm_lockres_clear_refmap_bit(bit, res);
2931                 }
2932                 bit++;
2933         }
2934 }
2935
2936 /* for now this is not too intelligent.  we will
2937  * need stats to make this do the right thing.
2938  * this just finds the first lock on one of the
2939  * queues and uses that node as the target. */
2940 static u8 dlm_pick_migration_target(struct dlm_ctxt *dlm,
2941                                     struct dlm_lock_resource *res)
2942 {
2943         int i;
2944         struct list_head *queue = &res->granted;
2945         struct list_head *iter;
2946         struct dlm_lock *lock;
2947         int nodenum;
2948
2949         assert_spin_locked(&dlm->spinlock);
2950
2951         spin_lock(&res->spinlock);
2952         for (i=0; i<3; i++) {
2953                 list_for_each(iter, queue) {
2954                         /* up to the caller to make sure this node
2955                          * is alive */
2956                         lock = list_entry (iter, struct dlm_lock, list);
2957                         if (lock->ml.node != dlm->node_num) {
2958                                 spin_unlock(&res->spinlock);
2959                                 return lock->ml.node;
2960                         }
2961                 }
2962                 queue++;
2963         }
2964         spin_unlock(&res->spinlock);
2965         mlog(0, "have not found a suitable target yet! checking domain map\n");
2966
2967         /* ok now we're getting desperate.  pick anyone alive. */
2968         nodenum = -1;
2969         while (1) {
2970                 nodenum = find_next_bit(dlm->domain_map,
2971                                         O2NM_MAX_NODES, nodenum+1);
2972                 mlog(0, "found %d in domain map\n", nodenum);
2973                 if (nodenum >= O2NM_MAX_NODES)
2974                         break;
2975                 if (nodenum != dlm->node_num) {
2976                         mlog(0, "picking %d\n", nodenum);
2977                         return nodenum;
2978                 }
2979         }
2980
2981         mlog(0, "giving up.  no master to migrate to\n");
2982         return DLM_LOCK_RES_OWNER_UNKNOWN;
2983 }
2984
2985
2986
2987 /* this is called by the new master once all lockres
2988  * data has been received */
2989 static int dlm_do_migrate_request(struct dlm_ctxt *dlm,
2990                                   struct dlm_lock_resource *res,
2991                                   u8 master, u8 new_master,
2992                                   struct dlm_node_iter *iter)
2993 {
2994         struct dlm_migrate_request migrate;
2995         int ret, status = 0;
2996         int nodenum;
2997
2998         memset(&migrate, 0, sizeof(migrate));
2999         migrate.namelen = res->lockname.len;
3000         memcpy(migrate.name, res->lockname.name, migrate.namelen);
3001         migrate.new_master = new_master;
3002         migrate.master = master;
3003
3004         ret = 0;
3005
3006         /* send message to all nodes, except the master and myself */
3007         while ((nodenum = dlm_node_iter_next(iter)) >= 0) {
3008                 if (nodenum == master ||
3009                     nodenum == new_master)
3010                         continue;
3011
3012                 ret = o2net_send_message(DLM_MIGRATE_REQUEST_MSG, dlm->key,
3013                                          &migrate, sizeof(migrate), nodenum,
3014                                          &status);
3015                 if (ret < 0)
3016                         mlog_errno(ret);
3017                 else if (status < 0) {
3018                         mlog(0, "migrate request (node %u) returned %d!\n",
3019                              nodenum, status);
3020                         ret = status;
3021                 } else if (status == DLM_MIGRATE_RESPONSE_MASTERY_REF) {
3022                         /* during the migration request we short-circuited
3023                          * the mastery of the lockres.  make sure we have
3024                          * a mastery ref for nodenum */
3025                         mlog(0, "%s:%.*s: need ref for node %u\n",
3026                              dlm->name, res->lockname.len, res->lockname.name,
3027                              nodenum);
3028                         spin_lock(&res->spinlock);
3029                         dlm_lockres_set_refmap_bit(nodenum, res);
3030                         spin_unlock(&res->spinlock);
3031                 }
3032         }
3033
3034         if (ret < 0)
3035                 mlog_errno(ret);
3036
3037         mlog(0, "returning ret=%d\n", ret);
3038         return ret;
3039 }
3040
3041
3042 /* if there is an existing mle for this lockres, we now know who the master is.
3043  * (the one who sent us *this* message) we can clear it up right away.
3044  * since the process that put the mle on the list still has a reference to it,
3045  * we can unhash it now, set the master and wake the process.  as a result,
3046  * we will have no mle in the list to start with.  now we can add an mle for
3047  * the migration and this should be the only one found for those scanning the
3048  * list.  */
3049 int dlm_migrate_request_handler(struct o2net_msg *msg, u32 len, void *data,
3050                                 void **ret_data)
3051 {
3052         struct dlm_ctxt *dlm = data;
3053         struct dlm_lock_resource *res = NULL;
3054         struct dlm_migrate_request *migrate = (struct dlm_migrate_request *) msg->buf;
3055         struct dlm_master_list_entry *mle = NULL, *oldmle = NULL;
3056         const char *name;
3057         unsigned int namelen, hash;
3058         int ret = 0;
3059
3060         if (!dlm_grab(dlm))
3061                 return -EINVAL;
3062
3063         name = migrate->name;
3064         namelen = migrate->namelen;
3065         hash = dlm_lockid_hash(name, namelen);
3066
3067         /* preallocate.. if this fails, abort */
3068         mle = (struct dlm_master_list_entry *) kmem_cache_alloc(dlm_mle_cache,
3069                                                          GFP_NOFS);
3070
3071         if (!mle) {
3072                 ret = -ENOMEM;
3073                 goto leave;
3074         }
3075
3076         /* check for pre-existing lock */
3077         spin_lock(&dlm->spinlock);
3078         res = __dlm_lookup_lockres(dlm, name, namelen, hash);
3079         spin_lock(&dlm->master_lock);
3080
3081         if (res) {
3082                 spin_lock(&res->spinlock);
3083                 if (res->state & DLM_LOCK_RES_RECOVERING) {
3084                         /* if all is working ok, this can only mean that we got
3085                         * a migrate request from a node that we now see as
3086                         * dead.  what can we do here?  drop it to the floor? */
3087                         spin_unlock(&res->spinlock);
3088                         mlog(ML_ERROR, "Got a migrate request, but the "
3089                              "lockres is marked as recovering!");
3090                         kmem_cache_free(dlm_mle_cache, mle);
3091                         ret = -EINVAL; /* need a better solution */
3092                         goto unlock;
3093                 }
3094                 res->state |= DLM_LOCK_RES_MIGRATING;
3095                 spin_unlock(&res->spinlock);
3096         }
3097
3098         /* ignore status.  only nonzero status would BUG. */
3099         ret = dlm_add_migration_mle(dlm, res, mle, &oldmle,
3100                                     name, namelen,
3101                                     migrate->new_master,
3102                                     migrate->master);
3103
3104 unlock:
3105         spin_unlock(&dlm->master_lock);
3106         spin_unlock(&dlm->spinlock);
3107
3108         if (oldmle) {
3109                 /* master is known, detach if not already detached */
3110                 dlm_mle_detach_hb_events(dlm, oldmle);
3111                 dlm_put_mle(oldmle);
3112         }
3113
3114         if (res)
3115                 dlm_lockres_put(res);
3116 leave:
3117         dlm_put(dlm);
3118         return ret;
3119 }
3120
3121 /* must be holding dlm->spinlock and dlm->master_lock
3122  * when adding a migration mle, we can clear any other mles
3123  * in the master list because we know with certainty that
3124  * the master is "master".  so we remove any old mle from
3125  * the list after setting it's master field, and then add
3126  * the new migration mle.  this way we can hold with the rule
3127  * of having only one mle for a given lock name at all times. */
3128 static int dlm_add_migration_mle(struct dlm_ctxt *dlm,
3129                                  struct dlm_lock_resource *res,
3130                                  struct dlm_master_list_entry *mle,
3131                                  struct dlm_master_list_entry **oldmle,
3132                                  const char *name, unsigned int namelen,
3133                                  u8 new_master, u8 master)
3134 {
3135         int found;
3136         int ret = 0;
3137
3138         *oldmle = NULL;
3139
3140         mlog_entry_void();
3141
3142         assert_spin_locked(&dlm->spinlock);
3143         assert_spin_locked(&dlm->master_lock);
3144
3145         /* caller is responsible for any ref taken here on oldmle */
3146         found = dlm_find_mle(dlm, oldmle, (char *)name, namelen);
3147         if (found) {
3148                 struct dlm_master_list_entry *tmp = *oldmle;
3149                 spin_lock(&tmp->spinlock);
3150                 if (tmp->type == DLM_MLE_MIGRATION) {
3151                         if (master == dlm->node_num) {
3152                                 /* ah another process raced me to it */
3153                                 mlog(0, "tried to migrate %.*s, but some "
3154                                      "process beat me to it\n",
3155                                      namelen, name);
3156                                 ret = -EEXIST;
3157                         } else {
3158                                 /* bad.  2 NODES are trying to migrate! */
3159                                 mlog(ML_ERROR, "migration error  mle: "
3160                                      "master=%u new_master=%u // request: "
3161                                      "master=%u new_master=%u // "
3162                                      "lockres=%.*s\n",
3163                                      tmp->master, tmp->new_master,
3164                                      master, new_master,
3165                                      namelen, name);
3166                                 BUG();
3167                         }
3168                 } else {
3169                         /* this is essentially what assert_master does */
3170                         tmp->master = master;
3171                         atomic_set(&tmp->woken, 1);
3172                         wake_up(&tmp->wq);
3173                         /* remove it from the list so that only one
3174                          * mle will be found */
3175                         list_del_init(&tmp->list);
3176                         /* this was obviously WRONG.  mle is uninited here.  should be tmp. */
3177                         __dlm_mle_detach_hb_events(dlm, tmp);
3178                         ret = DLM_MIGRATE_RESPONSE_MASTERY_REF;
3179                         mlog(0, "%s:%.*s: master=%u, newmaster=%u, "
3180                             "telling master to get ref for cleared out mle "
3181                             "during migration\n", dlm->name, namelen, name,
3182                             master, new_master);
3183                 }
3184                 spin_unlock(&tmp->spinlock);
3185         }
3186
3187         /* now add a migration mle to the tail of the list */
3188         dlm_init_mle(mle, DLM_MLE_MIGRATION, dlm, res, name, namelen);
3189         mle->new_master = new_master;
3190         /* the new master will be sending an assert master for this.
3191          * at that point we will get the refmap reference */
3192         mle->master = master;
3193         /* do this for consistency with other mle types */
3194         set_bit(new_master, mle->maybe_map);
3195         list_add(&mle->list, &dlm->master_list);
3196
3197         return ret;
3198 }
3199
3200
3201 void dlm_clean_master_list(struct dlm_ctxt *dlm, u8 dead_node)
3202 {
3203         struct list_head *iter, *iter2;
3204         struct dlm_master_list_entry *mle;
3205         struct dlm_lock_resource *res;
3206         unsigned int hash;
3207
3208         mlog_entry("dlm=%s, dead node=%u\n", dlm->name, dead_node);
3209 top:
3210         assert_spin_locked(&dlm->spinlock);
3211
3212         /* clean the master list */
3213         spin_lock(&dlm->master_lock);
3214         list_for_each_safe(iter, iter2, &dlm->master_list) {
3215                 mle = list_entry(iter, struct dlm_master_list_entry, list);
3216
3217                 BUG_ON(mle->type != DLM_MLE_BLOCK &&
3218                        mle->type != DLM_MLE_MASTER &&
3219                        mle->type != DLM_MLE_MIGRATION);
3220
3221                 /* MASTER mles are initiated locally.  the waiting
3222                  * process will notice the node map change
3223                  * shortly.  let that happen as normal. */
3224                 if (mle->type == DLM_MLE_MASTER)
3225                         continue;
3226
3227
3228                 /* BLOCK mles are initiated by other nodes.
3229                  * need to clean up if the dead node would have
3230                  * been the master. */
3231                 if (mle->type == DLM_MLE_BLOCK) {
3232                         int bit;
3233
3234                         spin_lock(&mle->spinlock);
3235                         bit = find_next_bit(mle->maybe_map, O2NM_MAX_NODES, 0);
3236                         if (bit != dead_node) {
3237                                 mlog(0, "mle found, but dead node %u would "
3238                                      "not have been master\n", dead_node);
3239                                 spin_unlock(&mle->spinlock);
3240                         } else {
3241                                 /* must drop the refcount by one since the
3242                                  * assert_master will never arrive.  this
3243                                  * may result in the mle being unlinked and
3244                                  * freed, but there may still be a process
3245                                  * waiting in the dlmlock path which is fine. */
3246                                 mlog(0, "node %u was expected master\n",
3247                                      dead_node);
3248                                 atomic_set(&mle->woken, 1);
3249                                 spin_unlock(&mle->spinlock);
3250                                 wake_up(&mle->wq);
3251                                 /* do not need events any longer, so detach 
3252                                  * from heartbeat */
3253                                 __dlm_mle_detach_hb_events(dlm, mle);
3254                                 __dlm_put_mle(mle);
3255                         }
3256                         continue;
3257                 }
3258
3259                 /* everything else is a MIGRATION mle */
3260
3261                 /* the rule for MIGRATION mles is that the master
3262                  * becomes UNKNOWN if *either* the original or
3263                  * the new master dies.  all UNKNOWN lockreses
3264                  * are sent to whichever node becomes the recovery
3265                  * master.  the new master is responsible for
3266                  * determining if there is still a master for
3267                  * this lockres, or if he needs to take over
3268                  * mastery.  either way, this node should expect
3269                  * another message to resolve this. */
3270                 if (mle->master != dead_node &&
3271                     mle->new_master != dead_node)
3272                         continue;
3273
3274                 /* if we have reached this point, this mle needs to
3275                  * be removed from the list and freed. */
3276
3277                 /* remove from the list early.  NOTE: unlinking
3278                  * list_head while in list_for_each_safe */
3279                 __dlm_mle_detach_hb_events(dlm, mle);
3280                 spin_lock(&mle->spinlock);
3281                 list_del_init(&mle->list);
3282                 atomic_set(&mle->woken, 1);
3283                 spin_unlock(&mle->spinlock);
3284                 wake_up(&mle->wq);
3285
3286                 mlog(0, "%s: node %u died during migration from "
3287                      "%u to %u!\n", dlm->name, dead_node,
3288                      mle->master, mle->new_master);
3289                 /* if there is a lockres associated with this
3290                  * mle, find it and set its owner to UNKNOWN */
3291                 hash = dlm_lockid_hash(mle->u.name.name, mle->u.name.len);
3292                 res = __dlm_lookup_lockres(dlm, mle->u.name.name,
3293                                            mle->u.name.len, hash);
3294                 if (res) {
3295                         /* unfortunately if we hit this rare case, our
3296                          * lock ordering is messed.  we need to drop
3297                          * the master lock so that we can take the
3298                          * lockres lock, meaning that we will have to
3299                          * restart from the head of list. */
3300                         spin_unlock(&dlm->master_lock);
3301
3302                         /* move lockres onto recovery list */
3303                         spin_lock(&res->spinlock);
3304                         dlm_set_lockres_owner(dlm, res,
3305                                         DLM_LOCK_RES_OWNER_UNKNOWN);
3306                         dlm_move_lockres_to_recovery_list(dlm, res);
3307                         spin_unlock(&res->spinlock);
3308                         dlm_lockres_put(res);
3309
3310                         /* about to get rid of mle, detach from heartbeat */
3311                         __dlm_mle_detach_hb_events(dlm, mle);
3312
3313                         /* dump the mle */
3314                         spin_lock(&dlm->master_lock);
3315                         __dlm_put_mle(mle);
3316                         spin_unlock(&dlm->master_lock);
3317
3318                         /* restart */
3319                         goto top;
3320                 }
3321
3322                 /* this may be the last reference */
3323                 __dlm_put_mle(mle);
3324         }
3325         spin_unlock(&dlm->master_lock);
3326 }
3327
3328
3329 int dlm_finish_migration(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
3330                          u8 old_master)
3331 {
3332         struct dlm_node_iter iter;
3333         int ret = 0;
3334
3335         spin_lock(&dlm->spinlock);
3336         dlm_node_iter_init(dlm->domain_map, &iter);
3337         clear_bit(old_master, iter.node_map);
3338         clear_bit(dlm->node_num, iter.node_map);
3339         spin_unlock(&dlm->spinlock);
3340
3341         /* ownership of the lockres is changing.  account for the
3342          * mastery reference here since old_master will briefly have
3343          * a reference after the migration completes */
3344         spin_lock(&res->spinlock);
3345         dlm_lockres_set_refmap_bit(old_master, res);
3346         spin_unlock(&res->spinlock);
3347
3348         mlog(0, "now time to do a migrate request to other nodes\n");
3349         ret = dlm_do_migrate_request(dlm, res, old_master,
3350                                      dlm->node_num, &iter);
3351         if (ret < 0) {
3352                 mlog_errno(ret);
3353                 goto leave;
3354         }
3355
3356         mlog(0, "doing assert master of %.*s to all except the original node\n",
3357              res->lockname.len, res->lockname.name);
3358         /* this call now finishes out the nodemap
3359          * even if one or more nodes die */
3360         ret = dlm_do_assert_master(dlm, res, iter.node_map,
3361                                    DLM_ASSERT_MASTER_FINISH_MIGRATION);
3362         if (ret < 0) {
3363                 /* no longer need to retry.  all living nodes contacted. */
3364                 mlog_errno(ret);
3365                 ret = 0;
3366         }
3367
3368         memset(iter.node_map, 0, sizeof(iter.node_map));
3369         set_bit(old_master, iter.node_map);
3370         mlog(0, "doing assert master of %.*s back to %u\n",
3371              res->lockname.len, res->lockname.name, old_master);
3372         ret = dlm_do_assert_master(dlm, res, iter.node_map,
3373                                    DLM_ASSERT_MASTER_FINISH_MIGRATION);
3374         if (ret < 0) {
3375                 mlog(0, "assert master to original master failed "
3376                      "with %d.\n", ret);
3377                 /* the only nonzero status here would be because of
3378                  * a dead original node.  we're done. */
3379                 ret = 0;
3380         }
3381
3382         /* all done, set the owner, clear the flag */
3383         spin_lock(&res->spinlock);
3384         dlm_set_lockres_owner(dlm, res, dlm->node_num);
3385         res->state &= ~DLM_LOCK_RES_MIGRATING;
3386         spin_unlock(&res->spinlock);
3387         /* re-dirty it on the new master */
3388         dlm_kick_thread(dlm, res);
3389         wake_up(&res->wq);
3390 leave:
3391         return ret;
3392 }
3393
3394 /*
3395  * LOCKRES AST REFCOUNT
3396  * this is integral to migration
3397  */
3398
3399 /* for future intent to call an ast, reserve one ahead of time.
3400  * this should be called only after waiting on the lockres
3401  * with dlm_wait_on_lockres, and while still holding the
3402  * spinlock after the call. */
3403 void __dlm_lockres_reserve_ast(struct dlm_lock_resource *res)
3404 {
3405         assert_spin_locked(&res->spinlock);
3406         if (res->state & DLM_LOCK_RES_MIGRATING) {
3407                 __dlm_print_one_lock_resource(res);
3408         }
3409         BUG_ON(res->state & DLM_LOCK_RES_MIGRATING);
3410
3411         atomic_inc(&res->asts_reserved);
3412 }
3413
3414 /*
3415  * used to drop the reserved ast, either because it went unused,
3416  * or because the ast/bast was actually called.
3417  *
3418  * also, if there is a pending migration on this lockres,
3419  * and this was the last pending ast on the lockres,
3420  * atomically set the MIGRATING flag before we drop the lock.
3421  * this is how we ensure that migration can proceed with no
3422  * asts in progress.  note that it is ok if the state of the
3423  * queues is such that a lock should be granted in the future
3424  * or that a bast should be fired, because the new master will
3425  * shuffle the lists on this lockres as soon as it is migrated.
3426  */
3427 void dlm_lockres_release_ast(struct dlm_ctxt *dlm,
3428                              struct dlm_lock_resource *res)
3429 {
3430         if (!atomic_dec_and_lock(&res->asts_reserved, &res->spinlock))
3431                 return;
3432
3433         if (!res->migration_pending) {
3434                 spin_unlock(&res->spinlock);
3435                 return;
3436         }
3437
3438         BUG_ON(res->state & DLM_LOCK_RES_MIGRATING);
3439         res->migration_pending = 0;
3440         res->state |= DLM_LOCK_RES_MIGRATING;
3441         spin_unlock(&res->spinlock);
3442         wake_up(&res->wq);
3443         wake_up(&dlm->migration_wq);
3444 }