Merge ../torvalds-2.6/
[linux-2.6] / fs / ocfs2 / dlm / dlmmaster.c
1 /* -*- mode: c; c-basic-offset: 8; -*-
2  * vim: noexpandtab sw=8 ts=8 sts=0:
3  *
4  * dlmmod.c
5  *
6  * standalone DLM module
7  *
8  * Copyright (C) 2004 Oracle.  All rights reserved.
9  *
10  * This program is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU General Public
12  * License as published by the Free Software Foundation; either
13  * version 2 of the License, or (at your option) any later version.
14  *
15  * This program is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18  * General Public License for more details.
19  *
20  * You should have received a copy of the GNU General Public
21  * License along with this program; if not, write to the
22  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
23  * Boston, MA 021110-1307, USA.
24  *
25  */
26
27
28 #include <linux/module.h>
29 #include <linux/fs.h>
30 #include <linux/types.h>
31 #include <linux/slab.h>
32 #include <linux/highmem.h>
33 #include <linux/utsname.h>
34 #include <linux/init.h>
35 #include <linux/sysctl.h>
36 #include <linux/random.h>
37 #include <linux/blkdev.h>
38 #include <linux/socket.h>
39 #include <linux/inet.h>
40 #include <linux/spinlock.h>
41 #include <linux/delay.h>
42
43
44 #include "cluster/heartbeat.h"
45 #include "cluster/nodemanager.h"
46 #include "cluster/tcp.h"
47
48 #include "dlmapi.h"
49 #include "dlmcommon.h"
50 #include "dlmdebug.h"
51 #include "dlmdomain.h"
52
53 #define MLOG_MASK_PREFIX (ML_DLM|ML_DLM_MASTER)
54 #include "cluster/masklog.h"
55
56 enum dlm_mle_type {
57         DLM_MLE_BLOCK,
58         DLM_MLE_MASTER,
59         DLM_MLE_MIGRATION
60 };
61
62 struct dlm_lock_name
63 {
64         u8 len;
65         u8 name[DLM_LOCKID_NAME_MAX];
66 };
67
68 struct dlm_master_list_entry
69 {
70         struct list_head list;
71         struct list_head hb_events;
72         struct dlm_ctxt *dlm;
73         spinlock_t spinlock;
74         wait_queue_head_t wq;
75         atomic_t woken;
76         struct kref mle_refs;
77         unsigned long maybe_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
78         unsigned long vote_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
79         unsigned long response_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
80         unsigned long node_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
81         u8 master;
82         u8 new_master;
83         enum dlm_mle_type type;
84         struct o2hb_callback_func mle_hb_up;
85         struct o2hb_callback_func mle_hb_down;
86         union {
87                 struct dlm_lock_resource *res;
88                 struct dlm_lock_name name;
89         } u;
90 };
91
92 static void dlm_mle_node_down(struct dlm_ctxt *dlm,
93                               struct dlm_master_list_entry *mle,
94                               struct o2nm_node *node,
95                               int idx);
96 static void dlm_mle_node_up(struct dlm_ctxt *dlm,
97                             struct dlm_master_list_entry *mle,
98                             struct o2nm_node *node,
99                             int idx);
100
101 static void dlm_assert_master_worker(struct dlm_work_item *item, void *data);
102 static int dlm_do_assert_master(struct dlm_ctxt *dlm, const char *lockname,
103                                 unsigned int namelen, void *nodemap,
104                                 u32 flags);
105
106 static inline int dlm_mle_equal(struct dlm_ctxt *dlm,
107                                 struct dlm_master_list_entry *mle,
108                                 const char *name,
109                                 unsigned int namelen)
110 {
111         struct dlm_lock_resource *res;
112
113         if (dlm != mle->dlm)
114                 return 0;
115
116         if (mle->type == DLM_MLE_BLOCK ||
117             mle->type == DLM_MLE_MIGRATION) {
118                 if (namelen != mle->u.name.len ||
119                     memcmp(name, mle->u.name.name, namelen)!=0)
120                         return 0;
121         } else {
122                 res = mle->u.res;
123                 if (namelen != res->lockname.len ||
124                     memcmp(res->lockname.name, name, namelen) != 0)
125                         return 0;
126         }
127         return 1;
128 }
129
130 #if 0
131 /* Code here is included but defined out as it aids debugging */
132
133 void dlm_print_one_mle(struct dlm_master_list_entry *mle)
134 {
135         int i = 0, refs;
136         char *type;
137         char attached;
138         u8 master;
139         unsigned int namelen;
140         const char *name;
141         struct kref *k;
142
143         k = &mle->mle_refs;
144         if (mle->type == DLM_MLE_BLOCK)
145                 type = "BLK";
146         else if (mle->type == DLM_MLE_MASTER)
147                 type = "MAS";
148         else
149                 type = "MIG";
150         refs = atomic_read(&k->refcount);
151         master = mle->master;
152         attached = (list_empty(&mle->hb_events) ? 'N' : 'Y');
153
154         if (mle->type != DLM_MLE_MASTER) {
155                 namelen = mle->u.name.len;
156                 name = mle->u.name.name;
157         } else {
158                 namelen = mle->u.res->lockname.len;
159                 name = mle->u.res->lockname.name;
160         }
161
162         mlog(ML_NOTICE, "  #%3d: %3s  %3d  %3u   %3u %c    (%d)%.*s\n",
163                   i, type, refs, master, mle->new_master, attached,
164                   namelen, namelen, name);
165 }
166
167 static void dlm_dump_mles(struct dlm_ctxt *dlm)
168 {
169         struct dlm_master_list_entry *mle;
170         struct list_head *iter;
171         
172         mlog(ML_NOTICE, "dumping all mles for domain %s:\n", dlm->name);
173         mlog(ML_NOTICE, "  ####: type refs owner new events? lockname nodemap votemap respmap maybemap\n");
174         spin_lock(&dlm->master_lock);
175         list_for_each(iter, &dlm->master_list) {
176                 mle = list_entry(iter, struct dlm_master_list_entry, list);
177                 dlm_print_one_mle(mle);
178         }
179         spin_unlock(&dlm->master_lock);
180 }
181
182 int dlm_dump_all_mles(const char __user *data, unsigned int len)
183 {
184         struct list_head *iter;
185         struct dlm_ctxt *dlm;
186
187         spin_lock(&dlm_domain_lock);
188         list_for_each(iter, &dlm_domains) {
189                 dlm = list_entry (iter, struct dlm_ctxt, list);
190                 mlog(ML_NOTICE, "found dlm: %p, name=%s\n", dlm, dlm->name);
191                 dlm_dump_mles(dlm);
192         }
193         spin_unlock(&dlm_domain_lock);
194         return len;
195 }
196 EXPORT_SYMBOL_GPL(dlm_dump_all_mles);
197
198 #endif  /*  0  */
199
200
201 static kmem_cache_t *dlm_mle_cache = NULL;
202
203
204 static void dlm_mle_release(struct kref *kref);
205 static void dlm_init_mle(struct dlm_master_list_entry *mle,
206                         enum dlm_mle_type type,
207                         struct dlm_ctxt *dlm,
208                         struct dlm_lock_resource *res,
209                         const char *name,
210                         unsigned int namelen);
211 static void dlm_put_mle(struct dlm_master_list_entry *mle);
212 static void __dlm_put_mle(struct dlm_master_list_entry *mle);
213 static int dlm_find_mle(struct dlm_ctxt *dlm,
214                         struct dlm_master_list_entry **mle,
215                         char *name, unsigned int namelen);
216
217 static int dlm_do_master_request(struct dlm_master_list_entry *mle, int to);
218
219
220 static int dlm_wait_for_lock_mastery(struct dlm_ctxt *dlm,
221                                      struct dlm_lock_resource *res,
222                                      struct dlm_master_list_entry *mle,
223                                      int *blocked);
224 static int dlm_restart_lock_mastery(struct dlm_ctxt *dlm,
225                                     struct dlm_lock_resource *res,
226                                     struct dlm_master_list_entry *mle,
227                                     int blocked);
228 static int dlm_add_migration_mle(struct dlm_ctxt *dlm,
229                                  struct dlm_lock_resource *res,
230                                  struct dlm_master_list_entry *mle,
231                                  struct dlm_master_list_entry **oldmle,
232                                  const char *name, unsigned int namelen,
233                                  u8 new_master, u8 master);
234
235 static u8 dlm_pick_migration_target(struct dlm_ctxt *dlm,
236                                     struct dlm_lock_resource *res);
237 static void dlm_remove_nonlocal_locks(struct dlm_ctxt *dlm,
238                                       struct dlm_lock_resource *res);
239 static int dlm_mark_lockres_migrating(struct dlm_ctxt *dlm,
240                                        struct dlm_lock_resource *res,
241                                        u8 target);
242
243
244 int dlm_is_host_down(int errno)
245 {
246         switch (errno) {
247                 case -EBADF:
248                 case -ECONNREFUSED:
249                 case -ENOTCONN:
250                 case -ECONNRESET:
251                 case -EPIPE:
252                 case -EHOSTDOWN:
253                 case -EHOSTUNREACH:
254                 case -ETIMEDOUT:
255                 case -ECONNABORTED:
256                 case -ENETDOWN:
257                 case -ENETUNREACH:
258                 case -ENETRESET:
259                 case -ESHUTDOWN:
260                 case -ENOPROTOOPT:
261                 case -EINVAL:   /* if returned from our tcp code,
262                                    this means there is no socket */
263                         return 1;
264         }
265         return 0;
266 }
267
268
269 /*
270  * MASTER LIST FUNCTIONS
271  */
272
273
274 /*
275  * regarding master list entries and heartbeat callbacks:
276  *
277  * in order to avoid sleeping and allocation that occurs in
278  * heartbeat, master list entries are simply attached to the
279  * dlm's established heartbeat callbacks.  the mle is attached
280  * when it is created, and since the dlm->spinlock is held at
281  * that time, any heartbeat event will be properly discovered
282  * by the mle.  the mle needs to be detached from the
283  * dlm->mle_hb_events list as soon as heartbeat events are no
284  * longer useful to the mle, and before the mle is freed.
285  *
286  * as a general rule, heartbeat events are no longer needed by
287  * the mle once an "answer" regarding the lock master has been
288  * received.
289  */
290 static inline void __dlm_mle_attach_hb_events(struct dlm_ctxt *dlm,
291                                               struct dlm_master_list_entry *mle)
292 {
293         assert_spin_locked(&dlm->spinlock);
294
295         list_add_tail(&mle->hb_events, &dlm->mle_hb_events);
296 }
297
298
299 static inline void __dlm_mle_detach_hb_events(struct dlm_ctxt *dlm,
300                                               struct dlm_master_list_entry *mle)
301 {
302         if (!list_empty(&mle->hb_events))
303                 list_del_init(&mle->hb_events);
304 }
305
306
307 static inline void dlm_mle_detach_hb_events(struct dlm_ctxt *dlm,
308                                             struct dlm_master_list_entry *mle)
309 {
310         spin_lock(&dlm->spinlock);
311         __dlm_mle_detach_hb_events(dlm, mle);
312         spin_unlock(&dlm->spinlock);
313 }
314
315 /* remove from list and free */
316 static void __dlm_put_mle(struct dlm_master_list_entry *mle)
317 {
318         struct dlm_ctxt *dlm;
319         dlm = mle->dlm;
320
321         assert_spin_locked(&dlm->spinlock);
322         assert_spin_locked(&dlm->master_lock);
323         BUG_ON(!atomic_read(&mle->mle_refs.refcount));
324
325         kref_put(&mle->mle_refs, dlm_mle_release);
326 }
327
328
329 /* must not have any spinlocks coming in */
330 static void dlm_put_mle(struct dlm_master_list_entry *mle)
331 {
332         struct dlm_ctxt *dlm;
333         dlm = mle->dlm;
334
335         spin_lock(&dlm->spinlock);
336         spin_lock(&dlm->master_lock);
337         __dlm_put_mle(mle);
338         spin_unlock(&dlm->master_lock);
339         spin_unlock(&dlm->spinlock);
340 }
341
342 static inline void dlm_get_mle(struct dlm_master_list_entry *mle)
343 {
344         kref_get(&mle->mle_refs);
345 }
346
347 static void dlm_init_mle(struct dlm_master_list_entry *mle,
348                         enum dlm_mle_type type,
349                         struct dlm_ctxt *dlm,
350                         struct dlm_lock_resource *res,
351                         const char *name,
352                         unsigned int namelen)
353 {
354         assert_spin_locked(&dlm->spinlock);
355
356         mle->dlm = dlm;
357         mle->type = type;
358         INIT_LIST_HEAD(&mle->list);
359         INIT_LIST_HEAD(&mle->hb_events);
360         memset(mle->maybe_map, 0, sizeof(mle->maybe_map));
361         spin_lock_init(&mle->spinlock);
362         init_waitqueue_head(&mle->wq);
363         atomic_set(&mle->woken, 0);
364         kref_init(&mle->mle_refs);
365         memset(mle->response_map, 0, sizeof(mle->response_map));
366         mle->master = O2NM_MAX_NODES;
367         mle->new_master = O2NM_MAX_NODES;
368
369         if (mle->type == DLM_MLE_MASTER) {
370                 BUG_ON(!res);
371                 mle->u.res = res;
372         } else if (mle->type == DLM_MLE_BLOCK) {
373                 BUG_ON(!name);
374                 memcpy(mle->u.name.name, name, namelen);
375                 mle->u.name.len = namelen;
376         } else /* DLM_MLE_MIGRATION */ {
377                 BUG_ON(!name);
378                 memcpy(mle->u.name.name, name, namelen);
379                 mle->u.name.len = namelen;
380         }
381
382         /* copy off the node_map and register hb callbacks on our copy */
383         memcpy(mle->node_map, dlm->domain_map, sizeof(mle->node_map));
384         memcpy(mle->vote_map, dlm->domain_map, sizeof(mle->vote_map));
385         clear_bit(dlm->node_num, mle->vote_map);
386         clear_bit(dlm->node_num, mle->node_map);
387
388         /* attach the mle to the domain node up/down events */
389         __dlm_mle_attach_hb_events(dlm, mle);
390 }
391
392
393 /* returns 1 if found, 0 if not */
394 static int dlm_find_mle(struct dlm_ctxt *dlm,
395                         struct dlm_master_list_entry **mle,
396                         char *name, unsigned int namelen)
397 {
398         struct dlm_master_list_entry *tmpmle;
399         struct list_head *iter;
400
401         assert_spin_locked(&dlm->master_lock);
402
403         list_for_each(iter, &dlm->master_list) {
404                 tmpmle = list_entry(iter, struct dlm_master_list_entry, list);
405                 if (!dlm_mle_equal(dlm, tmpmle, name, namelen))
406                         continue;
407                 dlm_get_mle(tmpmle);
408                 *mle = tmpmle;
409                 return 1;
410         }
411         return 0;
412 }
413
414 void dlm_hb_event_notify_attached(struct dlm_ctxt *dlm, int idx, int node_up)
415 {
416         struct dlm_master_list_entry *mle;
417         struct list_head *iter;
418
419         assert_spin_locked(&dlm->spinlock);
420         
421         list_for_each(iter, &dlm->mle_hb_events) {
422                 mle = list_entry(iter, struct dlm_master_list_entry, 
423                                  hb_events);
424                 if (node_up)
425                         dlm_mle_node_up(dlm, mle, NULL, idx);
426                 else
427                         dlm_mle_node_down(dlm, mle, NULL, idx);
428         }
429 }
430
431 static void dlm_mle_node_down(struct dlm_ctxt *dlm,
432                               struct dlm_master_list_entry *mle,
433                               struct o2nm_node *node, int idx)
434 {
435         spin_lock(&mle->spinlock);
436
437         if (!test_bit(idx, mle->node_map))
438                 mlog(0, "node %u already removed from nodemap!\n", idx);
439         else
440                 clear_bit(idx, mle->node_map);
441
442         spin_unlock(&mle->spinlock);
443 }
444
445 static void dlm_mle_node_up(struct dlm_ctxt *dlm,
446                             struct dlm_master_list_entry *mle,
447                             struct o2nm_node *node, int idx)
448 {
449         spin_lock(&mle->spinlock);
450
451         if (test_bit(idx, mle->node_map))
452                 mlog(0, "node %u already in node map!\n", idx);
453         else
454                 set_bit(idx, mle->node_map);
455
456         spin_unlock(&mle->spinlock);
457 }
458
459
460 int dlm_init_mle_cache(void)
461 {
462         dlm_mle_cache = kmem_cache_create("dlm_mle_cache",
463                                           sizeof(struct dlm_master_list_entry),
464                                           0, SLAB_HWCACHE_ALIGN,
465                                           NULL, NULL);
466         if (dlm_mle_cache == NULL)
467                 return -ENOMEM;
468         return 0;
469 }
470
471 void dlm_destroy_mle_cache(void)
472 {
473         if (dlm_mle_cache)
474                 kmem_cache_destroy(dlm_mle_cache);
475 }
476
477 static void dlm_mle_release(struct kref *kref)
478 {
479         struct dlm_master_list_entry *mle;
480         struct dlm_ctxt *dlm;
481
482         mlog_entry_void();
483
484         mle = container_of(kref, struct dlm_master_list_entry, mle_refs);
485         dlm = mle->dlm;
486
487         if (mle->type != DLM_MLE_MASTER) {
488                 mlog(0, "calling mle_release for %.*s, type %d\n",
489                      mle->u.name.len, mle->u.name.name, mle->type);
490         } else {
491                 mlog(0, "calling mle_release for %.*s, type %d\n",
492                      mle->u.res->lockname.len,
493                      mle->u.res->lockname.name, mle->type);
494         }
495         assert_spin_locked(&dlm->spinlock);
496         assert_spin_locked(&dlm->master_lock);
497
498         /* remove from list if not already */
499         if (!list_empty(&mle->list))
500                 list_del_init(&mle->list);
501
502         /* detach the mle from the domain node up/down events */
503         __dlm_mle_detach_hb_events(dlm, mle);
504
505         /* NOTE: kfree under spinlock here.
506          * if this is bad, we can move this to a freelist. */
507         kmem_cache_free(dlm_mle_cache, mle);
508 }
509
510
511 /*
512  * LOCK RESOURCE FUNCTIONS
513  */
514
515 static void dlm_set_lockres_owner(struct dlm_ctxt *dlm,
516                                   struct dlm_lock_resource *res,
517                                   u8 owner)
518 {
519         assert_spin_locked(&res->spinlock);
520
521         mlog_entry("%.*s, %u\n", res->lockname.len, res->lockname.name, owner);
522
523         if (owner == dlm->node_num)
524                 atomic_inc(&dlm->local_resources);
525         else if (owner == DLM_LOCK_RES_OWNER_UNKNOWN)
526                 atomic_inc(&dlm->unknown_resources);
527         else
528                 atomic_inc(&dlm->remote_resources);
529
530         res->owner = owner;
531 }
532
533 void dlm_change_lockres_owner(struct dlm_ctxt *dlm,
534                               struct dlm_lock_resource *res, u8 owner)
535 {
536         assert_spin_locked(&res->spinlock);
537
538         if (owner == res->owner)
539                 return;
540
541         if (res->owner == dlm->node_num)
542                 atomic_dec(&dlm->local_resources);
543         else if (res->owner == DLM_LOCK_RES_OWNER_UNKNOWN)
544                 atomic_dec(&dlm->unknown_resources);
545         else
546                 atomic_dec(&dlm->remote_resources);
547
548         dlm_set_lockres_owner(dlm, res, owner);
549 }
550
551
552 static void dlm_lockres_release(struct kref *kref)
553 {
554         struct dlm_lock_resource *res;
555
556         res = container_of(kref, struct dlm_lock_resource, refs);
557
558         /* This should not happen -- all lockres' have a name
559          * associated with them at init time. */
560         BUG_ON(!res->lockname.name);
561
562         mlog(0, "destroying lockres %.*s\n", res->lockname.len,
563              res->lockname.name);
564
565         /* By the time we're ready to blow this guy away, we shouldn't
566          * be on any lists. */
567         BUG_ON(!list_empty(&res->list));
568         BUG_ON(!list_empty(&res->granted));
569         BUG_ON(!list_empty(&res->converting));
570         BUG_ON(!list_empty(&res->blocked));
571         BUG_ON(!list_empty(&res->dirty));
572         BUG_ON(!list_empty(&res->recovering));
573         BUG_ON(!list_empty(&res->purge));
574
575         kfree(res->lockname.name);
576
577         kfree(res);
578 }
579
580 void dlm_lockres_get(struct dlm_lock_resource *res)
581 {
582         kref_get(&res->refs);
583 }
584
585 void dlm_lockres_put(struct dlm_lock_resource *res)
586 {
587         kref_put(&res->refs, dlm_lockres_release);
588 }
589
590 static void dlm_init_lockres(struct dlm_ctxt *dlm,
591                              struct dlm_lock_resource *res,
592                              const char *name, unsigned int namelen)
593 {
594         char *qname;
595
596         /* If we memset here, we lose our reference to the kmalloc'd
597          * res->lockname.name, so be sure to init every field
598          * correctly! */
599
600         qname = (char *) res->lockname.name;
601         memcpy(qname, name, namelen);
602
603         res->lockname.len = namelen;
604         res->lockname.hash = full_name_hash(name, namelen);
605
606         init_waitqueue_head(&res->wq);
607         spin_lock_init(&res->spinlock);
608         INIT_LIST_HEAD(&res->list);
609         INIT_LIST_HEAD(&res->granted);
610         INIT_LIST_HEAD(&res->converting);
611         INIT_LIST_HEAD(&res->blocked);
612         INIT_LIST_HEAD(&res->dirty);
613         INIT_LIST_HEAD(&res->recovering);
614         INIT_LIST_HEAD(&res->purge);
615         atomic_set(&res->asts_reserved, 0);
616         res->migration_pending = 0;
617
618         kref_init(&res->refs);
619
620         /* just for consistency */
621         spin_lock(&res->spinlock);
622         dlm_set_lockres_owner(dlm, res, DLM_LOCK_RES_OWNER_UNKNOWN);
623         spin_unlock(&res->spinlock);
624
625         res->state = DLM_LOCK_RES_IN_PROGRESS;
626
627         res->last_used = 0;
628
629         memset(res->lvb, 0, DLM_LVB_LEN);
630 }
631
632 struct dlm_lock_resource *dlm_new_lockres(struct dlm_ctxt *dlm,
633                                    const char *name,
634                                    unsigned int namelen)
635 {
636         struct dlm_lock_resource *res;
637
638         res = kmalloc(sizeof(struct dlm_lock_resource), GFP_KERNEL);
639         if (!res)
640                 return NULL;
641
642         res->lockname.name = kmalloc(namelen, GFP_KERNEL);
643         if (!res->lockname.name) {
644                 kfree(res);
645                 return NULL;
646         }
647
648         dlm_init_lockres(dlm, res, name, namelen);
649         return res;
650 }
651
652 /*
653  * lookup a lock resource by name.
654  * may already exist in the hashtable.
655  * lockid is null terminated
656  *
657  * if not, allocate enough for the lockres and for
658  * the temporary structure used in doing the mastering.
659  *
660  * also, do a lookup in the dlm->master_list to see
661  * if another node has begun mastering the same lock.
662  * if so, there should be a block entry in there
663  * for this name, and we should *not* attempt to master
664  * the lock here.   need to wait around for that node
665  * to assert_master (or die).
666  *
667  */
668 struct dlm_lock_resource * dlm_get_lock_resource(struct dlm_ctxt *dlm,
669                                           const char *lockid,
670                                           int flags)
671 {
672         struct dlm_lock_resource *tmpres=NULL, *res=NULL;
673         struct dlm_master_list_entry *mle = NULL;
674         struct dlm_master_list_entry *alloc_mle = NULL;
675         int blocked = 0;
676         int ret, nodenum;
677         struct dlm_node_iter iter;
678         unsigned int namelen;
679         int tries = 0;
680
681         BUG_ON(!lockid);
682
683         namelen = strlen(lockid);
684
685         mlog(0, "get lockres %s (len %d)\n", lockid, namelen);
686
687 lookup:
688         spin_lock(&dlm->spinlock);
689         tmpres = __dlm_lookup_lockres(dlm, lockid, namelen);
690         if (tmpres) {
691                 spin_unlock(&dlm->spinlock);
692                 mlog(0, "found in hash!\n");
693                 if (res)
694                         dlm_lockres_put(res);
695                 res = tmpres;
696                 goto leave;
697         }
698
699         if (!res) {
700                 spin_unlock(&dlm->spinlock);
701                 mlog(0, "allocating a new resource\n");
702                 /* nothing found and we need to allocate one. */
703                 alloc_mle = (struct dlm_master_list_entry *)
704                         kmem_cache_alloc(dlm_mle_cache, GFP_KERNEL);
705                 if (!alloc_mle)
706                         goto leave;
707                 res = dlm_new_lockres(dlm, lockid, namelen);
708                 if (!res)
709                         goto leave;
710                 goto lookup;
711         }
712
713         mlog(0, "no lockres found, allocated our own: %p\n", res);
714
715         if (flags & LKM_LOCAL) {
716                 /* caller knows it's safe to assume it's not mastered elsewhere
717                  * DONE!  return right away */
718                 spin_lock(&res->spinlock);
719                 dlm_change_lockres_owner(dlm, res, dlm->node_num);
720                 __dlm_insert_lockres(dlm, res);
721                 spin_unlock(&res->spinlock);
722                 spin_unlock(&dlm->spinlock);
723                 /* lockres still marked IN_PROGRESS */
724                 goto wake_waiters;
725         }
726
727         /* check master list to see if another node has started mastering it */
728         spin_lock(&dlm->master_lock);
729
730         /* if we found a block, wait for lock to be mastered by another node */
731         blocked = dlm_find_mle(dlm, &mle, (char *)lockid, namelen);
732         if (blocked) {
733                 if (mle->type == DLM_MLE_MASTER) {
734                         mlog(ML_ERROR, "master entry for nonexistent lock!\n");
735                         BUG();
736                 } else if (mle->type == DLM_MLE_MIGRATION) {
737                         /* migration is in progress! */
738                         /* the good news is that we now know the
739                          * "current" master (mle->master). */
740
741                         spin_unlock(&dlm->master_lock);
742                         assert_spin_locked(&dlm->spinlock);
743
744                         /* set the lockres owner and hash it */
745                         spin_lock(&res->spinlock);
746                         dlm_set_lockres_owner(dlm, res, mle->master);
747                         __dlm_insert_lockres(dlm, res);
748                         spin_unlock(&res->spinlock);
749                         spin_unlock(&dlm->spinlock);
750
751                         /* master is known, detach */
752                         dlm_mle_detach_hb_events(dlm, mle);
753                         dlm_put_mle(mle);
754                         mle = NULL;
755                         goto wake_waiters;
756                 }
757         } else {
758                 /* go ahead and try to master lock on this node */
759                 mle = alloc_mle;
760                 /* make sure this does not get freed below */
761                 alloc_mle = NULL;
762                 dlm_init_mle(mle, DLM_MLE_MASTER, dlm, res, NULL, 0);
763                 set_bit(dlm->node_num, mle->maybe_map);
764                 list_add(&mle->list, &dlm->master_list);
765         }
766
767         /* at this point there is either a DLM_MLE_BLOCK or a
768          * DLM_MLE_MASTER on the master list, so it's safe to add the
769          * lockres to the hashtable.  anyone who finds the lock will
770          * still have to wait on the IN_PROGRESS. */
771
772         /* finally add the lockres to its hash bucket */
773         __dlm_insert_lockres(dlm, res);
774         /* get an extra ref on the mle in case this is a BLOCK
775          * if so, the creator of the BLOCK may try to put the last
776          * ref at this time in the assert master handler, so we
777          * need an extra one to keep from a bad ptr deref. */
778         dlm_get_mle(mle);
779         spin_unlock(&dlm->master_lock);
780         spin_unlock(&dlm->spinlock);
781
782         /* must wait for lock to be mastered elsewhere */
783         if (blocked)
784                 goto wait;
785
786 redo_request:
787         ret = -EINVAL;
788         dlm_node_iter_init(mle->vote_map, &iter);
789         while ((nodenum = dlm_node_iter_next(&iter)) >= 0) {
790                 ret = dlm_do_master_request(mle, nodenum);
791                 if (ret < 0)
792                         mlog_errno(ret);
793                 if (mle->master != O2NM_MAX_NODES) {
794                         /* found a master ! */
795                         break;
796                 }
797         }
798
799 wait:
800         /* keep going until the response map includes all nodes */
801         ret = dlm_wait_for_lock_mastery(dlm, res, mle, &blocked);
802         if (ret < 0) {
803                 mlog(0, "%s:%.*s: node map changed, redo the "
804                      "master request now, blocked=%d\n",
805                      dlm->name, res->lockname.len,
806                      res->lockname.name, blocked);
807                 if (++tries > 20) {
808                         mlog(ML_ERROR, "%s:%.*s: spinning on "
809                              "dlm_wait_for_lock_mastery, blocked=%d\n", 
810                              dlm->name, res->lockname.len, 
811                              res->lockname.name, blocked);
812                         dlm_print_one_lock_resource(res);
813                         /* dlm_print_one_mle(mle); */
814                         tries = 0;
815                 }
816                 goto redo_request;
817         }
818
819         mlog(0, "lockres mastered by %u\n", res->owner);
820         /* make sure we never continue without this */
821         BUG_ON(res->owner == O2NM_MAX_NODES);
822
823         /* master is known, detach if not already detached */
824         dlm_mle_detach_hb_events(dlm, mle);
825         dlm_put_mle(mle);
826         /* put the extra ref */
827         dlm_put_mle(mle);
828
829 wake_waiters:
830         spin_lock(&res->spinlock);
831         res->state &= ~DLM_LOCK_RES_IN_PROGRESS;
832         spin_unlock(&res->spinlock);
833         wake_up(&res->wq);
834
835 leave:
836         /* need to free the unused mle */
837         if (alloc_mle)
838                 kmem_cache_free(dlm_mle_cache, alloc_mle);
839
840         return res;
841 }
842
843
844 #define DLM_MASTERY_TIMEOUT_MS   5000
845
846 static int dlm_wait_for_lock_mastery(struct dlm_ctxt *dlm,
847                                      struct dlm_lock_resource *res,
848                                      struct dlm_master_list_entry *mle,
849                                      int *blocked)
850 {
851         u8 m;
852         int ret, bit;
853         int map_changed, voting_done;
854         int assert, sleep;
855
856 recheck:
857         ret = 0;
858         assert = 0;
859
860         /* check if another node has already become the owner */
861         spin_lock(&res->spinlock);
862         if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN) {
863                 spin_unlock(&res->spinlock);
864                 goto leave;
865         }
866         spin_unlock(&res->spinlock);
867
868         spin_lock(&mle->spinlock);
869         m = mle->master;
870         map_changed = (memcmp(mle->vote_map, mle->node_map,
871                               sizeof(mle->vote_map)) != 0);
872         voting_done = (memcmp(mle->vote_map, mle->response_map,
873                              sizeof(mle->vote_map)) == 0);
874
875         /* restart if we hit any errors */
876         if (map_changed) {
877                 int b;
878                 mlog(0, "%s: %.*s: node map changed, restarting\n",
879                      dlm->name, res->lockname.len, res->lockname.name);
880                 ret = dlm_restart_lock_mastery(dlm, res, mle, *blocked);
881                 b = (mle->type == DLM_MLE_BLOCK);
882                 if ((*blocked && !b) || (!*blocked && b)) {
883                         mlog(0, "%s:%.*s: status change: old=%d new=%d\n", 
884                              dlm->name, res->lockname.len, res->lockname.name,
885                              *blocked, b);
886                         *blocked = b;
887                 }
888                 spin_unlock(&mle->spinlock);
889                 if (ret < 0) {
890                         mlog_errno(ret);
891                         goto leave;
892                 }
893                 mlog(0, "%s:%.*s: restart lock mastery succeeded, "
894                      "rechecking now\n", dlm->name, res->lockname.len,
895                      res->lockname.name);
896                 goto recheck;
897         }
898
899         if (m != O2NM_MAX_NODES) {
900                 /* another node has done an assert!
901                  * all done! */
902                 sleep = 0;
903         } else {
904                 sleep = 1;
905                 /* have all nodes responded? */
906                 if (voting_done && !*blocked) {
907                         bit = find_next_bit(mle->maybe_map, O2NM_MAX_NODES, 0);
908                         if (dlm->node_num <= bit) {
909                                 /* my node number is lowest.
910                                  * now tell other nodes that I am
911                                  * mastering this. */
912                                 mle->master = dlm->node_num;
913                                 assert = 1;
914                                 sleep = 0;
915                         }
916                         /* if voting is done, but we have not received
917                          * an assert master yet, we must sleep */
918                 }
919         }
920
921         spin_unlock(&mle->spinlock);
922
923         /* sleep if we haven't finished voting yet */
924         if (sleep) {
925                 unsigned long timeo = msecs_to_jiffies(DLM_MASTERY_TIMEOUT_MS);
926
927                 /*
928                 if (atomic_read(&mle->mle_refs.refcount) < 2)
929                         mlog(ML_ERROR, "mle (%p) refs=%d, name=%.*s\n", mle,
930                         atomic_read(&mle->mle_refs.refcount),
931                         res->lockname.len, res->lockname.name);
932                 */
933                 atomic_set(&mle->woken, 0);
934                 (void)wait_event_timeout(mle->wq,
935                                          (atomic_read(&mle->woken) == 1),
936                                          timeo);
937                 if (res->owner == O2NM_MAX_NODES) {
938                         mlog(0, "waiting again\n");
939                         goto recheck;
940                 }
941                 mlog(0, "done waiting, master is %u\n", res->owner);
942                 ret = 0;
943                 goto leave;
944         }
945
946         ret = 0;   /* done */
947         if (assert) {
948                 m = dlm->node_num;
949                 mlog(0, "about to master %.*s here, this=%u\n",
950                      res->lockname.len, res->lockname.name, m);
951                 ret = dlm_do_assert_master(dlm, res->lockname.name,
952                                            res->lockname.len, mle->vote_map, 0);
953                 if (ret) {
954                         /* This is a failure in the network path,
955                          * not in the response to the assert_master
956                          * (any nonzero response is a BUG on this node).
957                          * Most likely a socket just got disconnected
958                          * due to node death. */
959                         mlog_errno(ret);
960                 }
961                 /* no longer need to restart lock mastery.
962                  * all living nodes have been contacted. */
963                 ret = 0;
964         }
965
966         /* set the lockres owner */
967         spin_lock(&res->spinlock);
968         dlm_change_lockres_owner(dlm, res, m);
969         spin_unlock(&res->spinlock);
970
971 leave:
972         return ret;
973 }
974
975 struct dlm_bitmap_diff_iter
976 {
977         int curnode;
978         unsigned long *orig_bm;
979         unsigned long *cur_bm;
980         unsigned long diff_bm[BITS_TO_LONGS(O2NM_MAX_NODES)];
981 };
982
983 enum dlm_node_state_change
984 {
985         NODE_DOWN = -1,
986         NODE_NO_CHANGE = 0,
987         NODE_UP
988 };
989
990 static void dlm_bitmap_diff_iter_init(struct dlm_bitmap_diff_iter *iter,
991                                       unsigned long *orig_bm,
992                                       unsigned long *cur_bm)
993 {
994         unsigned long p1, p2;
995         int i;
996
997         iter->curnode = -1;
998         iter->orig_bm = orig_bm;
999         iter->cur_bm = cur_bm;
1000
1001         for (i = 0; i < BITS_TO_LONGS(O2NM_MAX_NODES); i++) {
1002                 p1 = *(iter->orig_bm + i);
1003                 p2 = *(iter->cur_bm + i);
1004                 iter->diff_bm[i] = (p1 & ~p2) | (p2 & ~p1);
1005         }
1006 }
1007
1008 static int dlm_bitmap_diff_iter_next(struct dlm_bitmap_diff_iter *iter,
1009                                      enum dlm_node_state_change *state)
1010 {
1011         int bit;
1012
1013         if (iter->curnode >= O2NM_MAX_NODES)
1014                 return -ENOENT;
1015
1016         bit = find_next_bit(iter->diff_bm, O2NM_MAX_NODES,
1017                             iter->curnode+1);
1018         if (bit >= O2NM_MAX_NODES) {
1019                 iter->curnode = O2NM_MAX_NODES;
1020                 return -ENOENT;
1021         }
1022
1023         /* if it was there in the original then this node died */
1024         if (test_bit(bit, iter->orig_bm))
1025                 *state = NODE_DOWN;
1026         else
1027                 *state = NODE_UP;
1028
1029         iter->curnode = bit;
1030         return bit;
1031 }
1032
1033
1034 static int dlm_restart_lock_mastery(struct dlm_ctxt *dlm,
1035                                     struct dlm_lock_resource *res,
1036                                     struct dlm_master_list_entry *mle,
1037                                     int blocked)
1038 {
1039         struct dlm_bitmap_diff_iter bdi;
1040         enum dlm_node_state_change sc;
1041         int node;
1042         int ret = 0;
1043
1044         mlog(0, "something happened such that the "
1045              "master process may need to be restarted!\n");
1046
1047         assert_spin_locked(&mle->spinlock);
1048
1049         dlm_bitmap_diff_iter_init(&bdi, mle->vote_map, mle->node_map);
1050         node = dlm_bitmap_diff_iter_next(&bdi, &sc);
1051         while (node >= 0) {
1052                 if (sc == NODE_UP) {
1053                         /* a node came up.  easy.  might not even need
1054                          * to talk to it if its node number is higher
1055                          * or if we are already blocked. */
1056                         mlog(0, "node up! %d\n", node);
1057                         if (blocked)
1058                                 goto next;
1059
1060                         if (node > dlm->node_num) {
1061                                 mlog(0, "node > this node. skipping.\n");
1062                                 goto next;
1063                         }
1064
1065                         /* redo the master request, but only for the new node */
1066                         mlog(0, "sending request to new node\n");
1067                         clear_bit(node, mle->response_map);
1068                         set_bit(node, mle->vote_map);
1069                 } else {
1070                         mlog(ML_ERROR, "node down! %d\n", node);
1071
1072                         /* if the node wasn't involved in mastery skip it,
1073                          * but clear it out from the maps so that it will
1074                          * not affect mastery of this lockres */
1075                         clear_bit(node, mle->response_map);
1076                         clear_bit(node, mle->vote_map);
1077                         if (!test_bit(node, mle->maybe_map))
1078                                 goto next;
1079
1080                         /* if we're already blocked on lock mastery, and the
1081                          * dead node wasn't the expected master, or there is
1082                          * another node in the maybe_map, keep waiting */
1083                         if (blocked) {
1084                                 int lowest = find_next_bit(mle->maybe_map,
1085                                                        O2NM_MAX_NODES, 0);
1086
1087                                 /* act like it was never there */
1088                                 clear_bit(node, mle->maybe_map);
1089
1090                                 if (node != lowest)
1091                                         goto next;
1092
1093                                 mlog(ML_ERROR, "expected master %u died while "
1094                                      "this node was blocked waiting on it!\n",
1095                                      node);
1096                                 lowest = find_next_bit(mle->maybe_map,
1097                                                        O2NM_MAX_NODES,
1098                                                        lowest+1);
1099                                 if (lowest < O2NM_MAX_NODES) {
1100                                         mlog(0, "still blocked. waiting "
1101                                              "on %u now\n", lowest);
1102                                         goto next;
1103                                 }
1104
1105                                 /* mle is an MLE_BLOCK, but there is now
1106                                  * nothing left to block on.  we need to return
1107                                  * all the way back out and try again with
1108                                  * an MLE_MASTER. dlm_do_local_recovery_cleanup
1109                                  * has already run, so the mle refcount is ok */
1110                                 mlog(0, "no longer blocking. we can "
1111                                      "try to master this here\n");
1112                                 mle->type = DLM_MLE_MASTER;
1113                                 memset(mle->maybe_map, 0,
1114                                        sizeof(mle->maybe_map));
1115                                 memset(mle->response_map, 0,
1116                                        sizeof(mle->maybe_map));
1117                                 memcpy(mle->vote_map, mle->node_map,
1118                                        sizeof(mle->node_map));
1119                                 mle->u.res = res;
1120                                 set_bit(dlm->node_num, mle->maybe_map);
1121
1122                                 ret = -EAGAIN;
1123                                 goto next;
1124                         }
1125
1126                         clear_bit(node, mle->maybe_map);
1127                         if (node > dlm->node_num)
1128                                 goto next;
1129
1130                         mlog(0, "dead node in map!\n");
1131                         /* yuck. go back and re-contact all nodes
1132                          * in the vote_map, removing this node. */
1133                         memset(mle->response_map, 0,
1134                                sizeof(mle->response_map));
1135                 }
1136                 ret = -EAGAIN;
1137 next:
1138                 node = dlm_bitmap_diff_iter_next(&bdi, &sc);
1139         }
1140         return ret;
1141 }
1142
1143
1144 /*
1145  * DLM_MASTER_REQUEST_MSG
1146  *
1147  * returns: 0 on success,
1148  *          -errno on a network error
1149  *
1150  * on error, the caller should assume the target node is "dead"
1151  *
1152  */
1153
1154 static int dlm_do_master_request(struct dlm_master_list_entry *mle, int to)
1155 {
1156         struct dlm_ctxt *dlm = mle->dlm;
1157         struct dlm_master_request request;
1158         int ret, response=0, resend;
1159
1160         memset(&request, 0, sizeof(request));
1161         request.node_idx = dlm->node_num;
1162
1163         BUG_ON(mle->type == DLM_MLE_MIGRATION);
1164
1165         if (mle->type != DLM_MLE_MASTER) {
1166                 request.namelen = mle->u.name.len;
1167                 memcpy(request.name, mle->u.name.name, request.namelen);
1168         } else {
1169                 request.namelen = mle->u.res->lockname.len;
1170                 memcpy(request.name, mle->u.res->lockname.name,
1171                         request.namelen);
1172         }
1173
1174 again:
1175         ret = o2net_send_message(DLM_MASTER_REQUEST_MSG, dlm->key, &request,
1176                                  sizeof(request), to, &response);
1177         if (ret < 0)  {
1178                 if (ret == -ESRCH) {
1179                         /* should never happen */
1180                         mlog(ML_ERROR, "TCP stack not ready!\n");
1181                         BUG();
1182                 } else if (ret == -EINVAL) {
1183                         mlog(ML_ERROR, "bad args passed to o2net!\n");
1184                         BUG();
1185                 } else if (ret == -ENOMEM) {
1186                         mlog(ML_ERROR, "out of memory while trying to send "
1187                              "network message!  retrying\n");
1188                         /* this is totally crude */
1189                         msleep(50);
1190                         goto again;
1191                 } else if (!dlm_is_host_down(ret)) {
1192                         /* not a network error. bad. */
1193                         mlog_errno(ret);
1194                         mlog(ML_ERROR, "unhandled error!");
1195                         BUG();
1196                 }
1197                 /* all other errors should be network errors,
1198                  * and likely indicate node death */
1199                 mlog(ML_ERROR, "link to %d went down!\n", to);
1200                 goto out;
1201         }
1202
1203         ret = 0;
1204         resend = 0;
1205         spin_lock(&mle->spinlock);
1206         switch (response) {
1207                 case DLM_MASTER_RESP_YES:
1208                         set_bit(to, mle->response_map);
1209                         mlog(0, "node %u is the master, response=YES\n", to);
1210                         mle->master = to;
1211                         break;
1212                 case DLM_MASTER_RESP_NO:
1213                         mlog(0, "node %u not master, response=NO\n", to);
1214                         set_bit(to, mle->response_map);
1215                         break;
1216                 case DLM_MASTER_RESP_MAYBE:
1217                         mlog(0, "node %u not master, response=MAYBE\n", to);
1218                         set_bit(to, mle->response_map);
1219                         set_bit(to, mle->maybe_map);
1220                         break;
1221                 case DLM_MASTER_RESP_ERROR:
1222                         mlog(0, "node %u hit an error, resending\n", to);
1223                         resend = 1;
1224                         response = 0;
1225                         break;
1226                 default:
1227                         mlog(ML_ERROR, "bad response! %u\n", response);
1228                         BUG();
1229         }
1230         spin_unlock(&mle->spinlock);
1231         if (resend) {
1232                 /* this is also totally crude */
1233                 msleep(50);
1234                 goto again;
1235         }
1236
1237 out:
1238         return ret;
1239 }
1240
1241 /*
1242  * locks that can be taken here:
1243  * dlm->spinlock
1244  * res->spinlock
1245  * mle->spinlock
1246  * dlm->master_list
1247  *
1248  * if possible, TRIM THIS DOWN!!!
1249  */
1250 int dlm_master_request_handler(struct o2net_msg *msg, u32 len, void *data)
1251 {
1252         u8 response = DLM_MASTER_RESP_MAYBE;
1253         struct dlm_ctxt *dlm = data;
1254         struct dlm_lock_resource *res;
1255         struct dlm_master_request *request = (struct dlm_master_request *) msg->buf;
1256         struct dlm_master_list_entry *mle = NULL, *tmpmle = NULL;
1257         char *name;
1258         unsigned int namelen;
1259         int found, ret;
1260         int set_maybe;
1261
1262         if (!dlm_grab(dlm))
1263                 return DLM_MASTER_RESP_NO;
1264
1265         if (!dlm_domain_fully_joined(dlm)) {
1266                 response = DLM_MASTER_RESP_NO;
1267                 goto send_response;
1268         }
1269
1270         name = request->name;
1271         namelen = request->namelen;
1272
1273         if (namelen > DLM_LOCKID_NAME_MAX) {
1274                 response = DLM_IVBUFLEN;
1275                 goto send_response;
1276         }
1277
1278 way_up_top:
1279         spin_lock(&dlm->spinlock);
1280         res = __dlm_lookup_lockres(dlm, name, namelen);
1281         if (res) {
1282                 spin_unlock(&dlm->spinlock);
1283
1284                 /* take care of the easy cases up front */
1285                 spin_lock(&res->spinlock);
1286                 if (res->state & DLM_LOCK_RES_RECOVERING) {
1287                         spin_unlock(&res->spinlock);
1288                         mlog(0, "returning DLM_MASTER_RESP_ERROR since res is "
1289                              "being recovered\n");
1290                         response = DLM_MASTER_RESP_ERROR;
1291                         if (mle)
1292                                 kmem_cache_free(dlm_mle_cache, mle);
1293                         goto send_response;
1294                 }
1295
1296                 if (res->owner == dlm->node_num) {
1297                         u32 flags = DLM_ASSERT_MASTER_MLE_CLEANUP;
1298                         spin_unlock(&res->spinlock);
1299                         // mlog(0, "this node is the master\n");
1300                         response = DLM_MASTER_RESP_YES;
1301                         if (mle)
1302                                 kmem_cache_free(dlm_mle_cache, mle);
1303
1304                         /* this node is the owner.
1305                          * there is some extra work that needs to
1306                          * happen now.  the requesting node has
1307                          * caused all nodes up to this one to
1308                          * create mles.  this node now needs to
1309                          * go back and clean those up. */
1310                         mlog(0, "%u is the owner of %.*s, cleaning everyone else\n",
1311                              dlm->node_num, res->lockname.len, res->lockname.name);
1312                         ret = dlm_dispatch_assert_master(dlm, res, 1,
1313                                                          request->node_idx,
1314                                                          flags);
1315                         if (ret < 0) {
1316                                 mlog(ML_ERROR, "failed to dispatch assert "
1317                                      "master work\n");
1318                                 response = DLM_MASTER_RESP_ERROR;
1319                         }
1320                         goto send_response;
1321                 } else if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN) {
1322                         spin_unlock(&res->spinlock);
1323                         // mlog(0, "node %u is the master\n", res->owner);
1324                         response = DLM_MASTER_RESP_NO;
1325                         if (mle)
1326                                 kmem_cache_free(dlm_mle_cache, mle);
1327                         goto send_response;
1328                 }
1329
1330                 /* ok, there is no owner.  either this node is
1331                  * being blocked, or it is actively trying to
1332                  * master this lock. */
1333                 if (!(res->state & DLM_LOCK_RES_IN_PROGRESS)) {
1334                         mlog(ML_ERROR, "lock with no owner should be "
1335                              "in-progress!\n");
1336                         BUG();
1337                 }
1338
1339                 // mlog(0, "lockres is in progress...\n");
1340                 spin_lock(&dlm->master_lock);
1341                 found = dlm_find_mle(dlm, &tmpmle, name, namelen);
1342                 if (!found) {
1343                         mlog(ML_ERROR, "no mle found for this lock!\n");
1344                         BUG();
1345                 }
1346                 set_maybe = 1;
1347                 spin_lock(&tmpmle->spinlock);
1348                 if (tmpmle->type == DLM_MLE_BLOCK) {
1349                         // mlog(0, "this node is waiting for "
1350                         // "lockres to be mastered\n");
1351                         response = DLM_MASTER_RESP_NO;
1352                 } else if (tmpmle->type == DLM_MLE_MIGRATION) {
1353                         mlog(0, "node %u is master, but trying to migrate to "
1354                              "node %u.\n", tmpmle->master, tmpmle->new_master);
1355                         if (tmpmle->master == dlm->node_num) {
1356                                 response = DLM_MASTER_RESP_YES;
1357                                 mlog(ML_ERROR, "no owner on lockres, but this "
1358                                      "node is trying to migrate it to %u?!\n",
1359                                      tmpmle->new_master);
1360                                 BUG();
1361                         } else {
1362                                 /* the real master can respond on its own */
1363                                 response = DLM_MASTER_RESP_NO;
1364                         }
1365                 } else if (tmpmle->master != DLM_LOCK_RES_OWNER_UNKNOWN) {
1366                         set_maybe = 0;
1367                         if (tmpmle->master == dlm->node_num)
1368                                 response = DLM_MASTER_RESP_YES;
1369                         else
1370                                 response = DLM_MASTER_RESP_NO;
1371                 } else {
1372                         // mlog(0, "this node is attempting to "
1373                         // "master lockres\n");
1374                         response = DLM_MASTER_RESP_MAYBE;
1375                 }
1376                 if (set_maybe)
1377                         set_bit(request->node_idx, tmpmle->maybe_map);
1378                 spin_unlock(&tmpmle->spinlock);
1379
1380                 spin_unlock(&dlm->master_lock);
1381                 spin_unlock(&res->spinlock);
1382
1383                 /* keep the mle attached to heartbeat events */
1384                 dlm_put_mle(tmpmle);
1385                 if (mle)
1386                         kmem_cache_free(dlm_mle_cache, mle);
1387                 goto send_response;
1388         }
1389
1390         /*
1391          * lockres doesn't exist on this node
1392          * if there is an MLE_BLOCK, return NO
1393          * if there is an MLE_MASTER, return MAYBE
1394          * otherwise, add an MLE_BLOCK, return NO
1395          */
1396         spin_lock(&dlm->master_lock);
1397         found = dlm_find_mle(dlm, &tmpmle, name, namelen);
1398         if (!found) {
1399                 /* this lockid has never been seen on this node yet */
1400                 // mlog(0, "no mle found\n");
1401                 if (!mle) {
1402                         spin_unlock(&dlm->master_lock);
1403                         spin_unlock(&dlm->spinlock);
1404
1405                         mle = (struct dlm_master_list_entry *)
1406                                 kmem_cache_alloc(dlm_mle_cache, GFP_KERNEL);
1407                         if (!mle) {
1408                                 // bad bad bad... this sucks.
1409                                 response = DLM_MASTER_RESP_ERROR;
1410                                 goto send_response;
1411                         }
1412                         spin_lock(&dlm->spinlock);
1413                         dlm_init_mle(mle, DLM_MLE_BLOCK, dlm, NULL,
1414                                          name, namelen);
1415                         spin_unlock(&dlm->spinlock);
1416                         goto way_up_top;
1417                 }
1418
1419                 // mlog(0, "this is second time thru, already allocated, "
1420                 // "add the block.\n");
1421                 set_bit(request->node_idx, mle->maybe_map);
1422                 list_add(&mle->list, &dlm->master_list);
1423                 response = DLM_MASTER_RESP_NO;
1424         } else {
1425                 // mlog(0, "mle was found\n");
1426                 set_maybe = 1;
1427                 spin_lock(&tmpmle->spinlock);
1428                 if (tmpmle->type == DLM_MLE_BLOCK)
1429                         response = DLM_MASTER_RESP_NO;
1430                 else if (tmpmle->type == DLM_MLE_MIGRATION) {
1431                         mlog(0, "migration mle was found (%u->%u)\n",
1432                              tmpmle->master, tmpmle->new_master);
1433                         if (tmpmle->master == dlm->node_num) {
1434                                 mlog(ML_ERROR, "no lockres, but migration mle "
1435                                      "says that this node is master!\n");
1436                                 BUG();
1437                         }
1438                         /* real master can respond on its own */
1439                         response = DLM_MASTER_RESP_NO;
1440                 } else {
1441                         if (tmpmle->master == dlm->node_num) {
1442                                 response = DLM_MASTER_RESP_YES;
1443                                 set_maybe = 0;
1444                         } else
1445                                 response = DLM_MASTER_RESP_MAYBE;
1446                 }
1447                 if (set_maybe)
1448                         set_bit(request->node_idx, tmpmle->maybe_map);
1449                 spin_unlock(&tmpmle->spinlock);
1450         }
1451         spin_unlock(&dlm->master_lock);
1452         spin_unlock(&dlm->spinlock);
1453
1454         if (found) {
1455                 /* keep the mle attached to heartbeat events */
1456                 dlm_put_mle(tmpmle);
1457         }
1458 send_response:
1459         dlm_put(dlm);
1460         return response;
1461 }
1462
1463 /*
1464  * DLM_ASSERT_MASTER_MSG
1465  */
1466
1467
1468 /*
1469  * NOTE: this can be used for debugging
1470  * can periodically run all locks owned by this node
1471  * and re-assert across the cluster...
1472  */
1473 static int dlm_do_assert_master(struct dlm_ctxt *dlm, const char *lockname,
1474                                 unsigned int namelen, void *nodemap,
1475                                 u32 flags)
1476 {
1477         struct dlm_assert_master assert;
1478         int to, tmpret;
1479         struct dlm_node_iter iter;
1480         int ret = 0;
1481
1482         BUG_ON(namelen > O2NM_MAX_NAME_LEN);
1483
1484         /* note that if this nodemap is empty, it returns 0 */
1485         dlm_node_iter_init(nodemap, &iter);
1486         while ((to = dlm_node_iter_next(&iter)) >= 0) {
1487                 int r = 0;
1488                 mlog(0, "sending assert master to %d (%.*s)\n", to,
1489                      namelen, lockname);
1490                 memset(&assert, 0, sizeof(assert));
1491                 assert.node_idx = dlm->node_num;
1492                 assert.namelen = namelen;
1493                 memcpy(assert.name, lockname, namelen);
1494                 assert.flags = cpu_to_be32(flags);
1495
1496                 tmpret = o2net_send_message(DLM_ASSERT_MASTER_MSG, dlm->key,
1497                                             &assert, sizeof(assert), to, &r);
1498                 if (tmpret < 0) {
1499                         mlog(ML_ERROR, "assert_master returned %d!\n", tmpret);
1500                         if (!dlm_is_host_down(tmpret)) {
1501                                 mlog(ML_ERROR, "unhandled error!\n");
1502                                 BUG();
1503                         }
1504                         /* a node died.  finish out the rest of the nodes. */
1505                         mlog(ML_ERROR, "link to %d went down!\n", to);
1506                         /* any nonzero status return will do */
1507                         ret = tmpret;
1508                 } else if (r < 0) {
1509                         /* ok, something horribly messed.  kill thyself. */
1510                         mlog(ML_ERROR,"during assert master of %.*s to %u, "
1511                              "got %d.\n", namelen, lockname, to, r);
1512                         dlm_dump_lock_resources(dlm);
1513                         BUG();
1514                 }
1515         }
1516
1517         return ret;
1518 }
1519
1520 /*
1521  * locks that can be taken here:
1522  * dlm->spinlock
1523  * res->spinlock
1524  * mle->spinlock
1525  * dlm->master_list
1526  *
1527  * if possible, TRIM THIS DOWN!!!
1528  */
1529 int dlm_assert_master_handler(struct o2net_msg *msg, u32 len, void *data)
1530 {
1531         struct dlm_ctxt *dlm = data;
1532         struct dlm_master_list_entry *mle = NULL;
1533         struct dlm_assert_master *assert = (struct dlm_assert_master *)msg->buf;
1534         struct dlm_lock_resource *res = NULL;
1535         char *name;
1536         unsigned int namelen;
1537         u32 flags;
1538
1539         if (!dlm_grab(dlm))
1540                 return 0;
1541
1542         name = assert->name;
1543         namelen = assert->namelen;
1544         flags = be32_to_cpu(assert->flags);
1545
1546         if (namelen > DLM_LOCKID_NAME_MAX) {
1547                 mlog(ML_ERROR, "Invalid name length!");
1548                 goto done;
1549         }
1550
1551         spin_lock(&dlm->spinlock);
1552
1553         if (flags)
1554                 mlog(0, "assert_master with flags: %u\n", flags);
1555
1556         /* find the MLE */
1557         spin_lock(&dlm->master_lock);
1558         if (!dlm_find_mle(dlm, &mle, name, namelen)) {
1559                 /* not an error, could be master just re-asserting */
1560                 mlog(0, "just got an assert_master from %u, but no "
1561                      "MLE for it! (%.*s)\n", assert->node_idx,
1562                      namelen, name);
1563         } else {
1564                 int bit = find_next_bit (mle->maybe_map, O2NM_MAX_NODES, 0);
1565                 if (bit >= O2NM_MAX_NODES) {
1566                         /* not necessarily an error, though less likely.
1567                          * could be master just re-asserting. */
1568                         mlog(ML_ERROR, "no bits set in the maybe_map, but %u "
1569                              "is asserting! (%.*s)\n", assert->node_idx,
1570                              namelen, name);
1571                 } else if (bit != assert->node_idx) {
1572                         if (flags & DLM_ASSERT_MASTER_MLE_CLEANUP) {
1573                                 mlog(0, "master %u was found, %u should "
1574                                      "back off\n", assert->node_idx, bit);
1575                         } else {
1576                                 /* with the fix for bug 569, a higher node
1577                                  * number winning the mastery will respond
1578                                  * YES to mastery requests, but this node
1579                                  * had no way of knowing.  let it pass. */
1580                                 mlog(ML_ERROR, "%u is the lowest node, "
1581                                      "%u is asserting. (%.*s)  %u must "
1582                                      "have begun after %u won.\n", bit,
1583                                      assert->node_idx, namelen, name, bit,
1584                                      assert->node_idx);
1585                         }
1586                 }
1587         }
1588         spin_unlock(&dlm->master_lock);
1589
1590         /* ok everything checks out with the MLE
1591          * now check to see if there is a lockres */
1592         res = __dlm_lookup_lockres(dlm, name, namelen);
1593         if (res) {
1594                 spin_lock(&res->spinlock);
1595                 if (res->state & DLM_LOCK_RES_RECOVERING)  {
1596                         mlog(ML_ERROR, "%u asserting but %.*s is "
1597                              "RECOVERING!\n", assert->node_idx, namelen, name);
1598                         goto kill;
1599                 }
1600                 if (!mle) {
1601                         if (res->owner != assert->node_idx) {
1602                                 mlog(ML_ERROR, "assert_master from "
1603                                           "%u, but current owner is "
1604                                           "%u! (%.*s)\n",
1605                                        assert->node_idx, res->owner,
1606                                        namelen, name);
1607                                 goto kill;
1608                         }
1609                 } else if (mle->type != DLM_MLE_MIGRATION) {
1610                         if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN) {
1611                                 /* owner is just re-asserting */
1612                                 if (res->owner == assert->node_idx) {
1613                                         mlog(0, "owner %u re-asserting on "
1614                                              "lock %.*s\n", assert->node_idx,
1615                                              namelen, name);
1616                                         goto ok;
1617                                 }
1618                                 mlog(ML_ERROR, "got assert_master from "
1619                                      "node %u, but %u is the owner! "
1620                                      "(%.*s)\n", assert->node_idx,
1621                                      res->owner, namelen, name);
1622                                 goto kill;
1623                         }
1624                         if (!(res->state & DLM_LOCK_RES_IN_PROGRESS)) {
1625                                 mlog(ML_ERROR, "got assert from %u, but lock "
1626                                      "with no owner should be "
1627                                      "in-progress! (%.*s)\n",
1628                                      assert->node_idx,
1629                                      namelen, name);
1630                                 goto kill;
1631                         }
1632                 } else /* mle->type == DLM_MLE_MIGRATION */ {
1633                         /* should only be getting an assert from new master */
1634                         if (assert->node_idx != mle->new_master) {
1635                                 mlog(ML_ERROR, "got assert from %u, but "
1636                                      "new master is %u, and old master "
1637                                      "was %u (%.*s)\n",
1638                                      assert->node_idx, mle->new_master,
1639                                      mle->master, namelen, name);
1640                                 goto kill;
1641                         }
1642
1643                 }
1644 ok:
1645                 spin_unlock(&res->spinlock);
1646         }
1647         spin_unlock(&dlm->spinlock);
1648
1649         // mlog(0, "woo!  got an assert_master from node %u!\n",
1650         //           assert->node_idx);
1651         if (mle) {
1652                 int extra_ref;
1653                 
1654                 spin_lock(&mle->spinlock);
1655                 extra_ref = !!(mle->type == DLM_MLE_BLOCK
1656                                || mle->type == DLM_MLE_MIGRATION);
1657                 mle->master = assert->node_idx;
1658                 atomic_set(&mle->woken, 1);
1659                 wake_up(&mle->wq);
1660                 spin_unlock(&mle->spinlock);
1661
1662                 if (mle->type == DLM_MLE_MIGRATION && res) {
1663                         mlog(0, "finishing off migration of lockres %.*s, "
1664                              "from %u to %u\n",
1665                                res->lockname.len, res->lockname.name,
1666                                dlm->node_num, mle->new_master);
1667                         spin_lock(&res->spinlock);
1668                         res->state &= ~DLM_LOCK_RES_MIGRATING;
1669                         dlm_change_lockres_owner(dlm, res, mle->new_master);
1670                         BUG_ON(res->state & DLM_LOCK_RES_DIRTY);
1671                         spin_unlock(&res->spinlock);
1672                 }
1673                 /* master is known, detach if not already detached */
1674                 dlm_mle_detach_hb_events(dlm, mle);
1675                 dlm_put_mle(mle);
1676                 
1677                 if (extra_ref) {
1678                         /* the assert master message now balances the extra
1679                          * ref given by the master / migration request message.
1680                          * if this is the last put, it will be removed
1681                          * from the list. */
1682                         dlm_put_mle(mle);
1683                 }
1684         }
1685
1686 done:
1687         if (res)
1688                 dlm_lockres_put(res);
1689         dlm_put(dlm);
1690         return 0;
1691
1692 kill:
1693         /* kill the caller! */
1694         spin_unlock(&res->spinlock);
1695         spin_unlock(&dlm->spinlock);
1696         dlm_lockres_put(res);
1697         mlog(ML_ERROR, "Bad message received from another node.  Dumping state "
1698              "and killing the other node now!  This node is OK and can continue.\n");
1699         dlm_dump_lock_resources(dlm);
1700         dlm_put(dlm);
1701         return -EINVAL;
1702 }
1703
1704 int dlm_dispatch_assert_master(struct dlm_ctxt *dlm,
1705                                struct dlm_lock_resource *res,
1706                                int ignore_higher, u8 request_from, u32 flags)
1707 {
1708         struct dlm_work_item *item;
1709         item = kcalloc(1, sizeof(*item), GFP_KERNEL);
1710         if (!item)
1711                 return -ENOMEM;
1712
1713
1714         /* queue up work for dlm_assert_master_worker */
1715         dlm_grab(dlm);  /* get an extra ref for the work item */
1716         dlm_init_work_item(dlm, item, dlm_assert_master_worker, NULL);
1717         item->u.am.lockres = res; /* already have a ref */
1718         /* can optionally ignore node numbers higher than this node */
1719         item->u.am.ignore_higher = ignore_higher;
1720         item->u.am.request_from = request_from;
1721         item->u.am.flags = flags;
1722
1723         spin_lock(&dlm->work_lock);
1724         list_add_tail(&item->list, &dlm->work_list);
1725         spin_unlock(&dlm->work_lock);
1726
1727         schedule_work(&dlm->dispatched_work);
1728         return 0;
1729 }
1730
1731 static void dlm_assert_master_worker(struct dlm_work_item *item, void *data)
1732 {
1733         struct dlm_ctxt *dlm = data;
1734         int ret = 0;
1735         struct dlm_lock_resource *res;
1736         unsigned long nodemap[BITS_TO_LONGS(O2NM_MAX_NODES)];
1737         int ignore_higher;
1738         int bit;
1739         u8 request_from;
1740         u32 flags;
1741
1742         dlm = item->dlm;
1743         res = item->u.am.lockres;
1744         ignore_higher = item->u.am.ignore_higher;
1745         request_from = item->u.am.request_from;
1746         flags = item->u.am.flags;
1747
1748         spin_lock(&dlm->spinlock);
1749         memcpy(nodemap, dlm->domain_map, sizeof(nodemap));
1750         spin_unlock(&dlm->spinlock);
1751
1752         clear_bit(dlm->node_num, nodemap);
1753         if (ignore_higher) {
1754                 /* if is this just to clear up mles for nodes below
1755                  * this node, do not send the message to the original
1756                  * caller or any node number higher than this */
1757                 clear_bit(request_from, nodemap);
1758                 bit = dlm->node_num;
1759                 while (1) {
1760                         bit = find_next_bit(nodemap, O2NM_MAX_NODES,
1761                                             bit+1);
1762                         if (bit >= O2NM_MAX_NODES)
1763                                 break;
1764                         clear_bit(bit, nodemap);
1765                 }
1766         }
1767
1768         /* this call now finishes out the nodemap
1769          * even if one or more nodes die */
1770         mlog(0, "worker about to master %.*s here, this=%u\n",
1771                      res->lockname.len, res->lockname.name, dlm->node_num);
1772         ret = dlm_do_assert_master(dlm, res->lockname.name,
1773                                    res->lockname.len,
1774                                    nodemap, flags);
1775         if (ret < 0) {
1776                 /* no need to restart, we are done */
1777                 mlog_errno(ret);
1778         }
1779
1780         dlm_lockres_put(res);
1781
1782         mlog(0, "finished with dlm_assert_master_worker\n");
1783 }
1784
1785
1786 /*
1787  * DLM_MIGRATE_LOCKRES
1788  */
1789
1790
1791 int dlm_migrate_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
1792                         u8 target)
1793 {
1794         struct dlm_master_list_entry *mle = NULL;
1795         struct dlm_master_list_entry *oldmle = NULL;
1796         struct dlm_migratable_lockres *mres = NULL;
1797         int ret = -EINVAL;
1798         const char *name;
1799         unsigned int namelen;
1800         int mle_added = 0;
1801         struct list_head *queue, *iter;
1802         int i;
1803         struct dlm_lock *lock;
1804         int empty = 1;
1805
1806         if (!dlm_grab(dlm))
1807                 return -EINVAL;
1808
1809         name = res->lockname.name;
1810         namelen = res->lockname.len;
1811
1812         mlog(0, "migrating %.*s to %u\n", namelen, name, target);
1813
1814         /*
1815          * ensure this lockres is a proper candidate for migration
1816          */
1817         spin_lock(&res->spinlock);
1818         if (res->owner == DLM_LOCK_RES_OWNER_UNKNOWN) {
1819                 mlog(0, "cannot migrate lockres with unknown owner!\n");
1820                 spin_unlock(&res->spinlock);
1821                 goto leave;
1822         }
1823         if (res->owner != dlm->node_num) {
1824                 mlog(0, "cannot migrate lockres this node doesn't own!\n");
1825                 spin_unlock(&res->spinlock);
1826                 goto leave;
1827         }
1828         mlog(0, "checking queues...\n");
1829         queue = &res->granted;
1830         for (i=0; i<3; i++) {
1831                 list_for_each(iter, queue) {
1832                         lock = list_entry (iter, struct dlm_lock, list);
1833                         empty = 0;
1834                         if (lock->ml.node == dlm->node_num) {
1835                                 mlog(0, "found a lock owned by this node "
1836                                      "still on the %s queue!  will not "
1837                                      "migrate this lockres\n",
1838                                      i==0 ? "granted" :
1839                                      (i==1 ? "converting" : "blocked"));
1840                                 spin_unlock(&res->spinlock);
1841                                 ret = -ENOTEMPTY;
1842                                 goto leave;
1843                         }
1844                 }
1845                 queue++;
1846         }
1847         mlog(0, "all locks on this lockres are nonlocal.  continuing\n");
1848         spin_unlock(&res->spinlock);
1849
1850         /* no work to do */
1851         if (empty) {
1852                 mlog(0, "no locks were found on this lockres! done!\n");
1853                 ret = 0;
1854                 goto leave;
1855         }
1856
1857         /*
1858          * preallocate up front
1859          * if this fails, abort
1860          */
1861
1862         ret = -ENOMEM;
1863         mres = (struct dlm_migratable_lockres *) __get_free_page(GFP_KERNEL);
1864         if (!mres) {
1865                 mlog_errno(ret);
1866                 goto leave;
1867         }
1868
1869         mle = (struct dlm_master_list_entry *) kmem_cache_alloc(dlm_mle_cache,
1870                                                                 GFP_KERNEL);
1871         if (!mle) {
1872                 mlog_errno(ret);
1873                 goto leave;
1874         }
1875         ret = 0;
1876
1877         /*
1878          * find a node to migrate the lockres to
1879          */
1880
1881         mlog(0, "picking a migration node\n");
1882         spin_lock(&dlm->spinlock);
1883         /* pick a new node */
1884         if (!test_bit(target, dlm->domain_map) ||
1885             target >= O2NM_MAX_NODES) {
1886                 target = dlm_pick_migration_target(dlm, res);
1887         }
1888         mlog(0, "node %u chosen for migration\n", target);
1889
1890         if (target >= O2NM_MAX_NODES ||
1891             !test_bit(target, dlm->domain_map)) {
1892                 /* target chosen is not alive */
1893                 ret = -EINVAL;
1894         }
1895
1896         if (ret) {
1897                 spin_unlock(&dlm->spinlock);
1898                 goto fail;
1899         }
1900
1901         mlog(0, "continuing with target = %u\n", target);
1902
1903         /*
1904          * clear any existing master requests and
1905          * add the migration mle to the list
1906          */
1907         spin_lock(&dlm->master_lock);
1908         ret = dlm_add_migration_mle(dlm, res, mle, &oldmle, name,
1909                                     namelen, target, dlm->node_num);
1910         spin_unlock(&dlm->master_lock);
1911         spin_unlock(&dlm->spinlock);
1912
1913         if (ret == -EEXIST) {
1914                 mlog(0, "another process is already migrating it\n");
1915                 goto fail;
1916         }
1917         mle_added = 1;
1918
1919         /*
1920          * set the MIGRATING flag and flush asts
1921          * if we fail after this we need to re-dirty the lockres
1922          */
1923         if (dlm_mark_lockres_migrating(dlm, res, target) < 0) {
1924                 mlog(ML_ERROR, "tried to migrate %.*s to %u, but "
1925                      "the target went down.\n", res->lockname.len,
1926                      res->lockname.name, target);
1927                 spin_lock(&res->spinlock);
1928                 res->state &= ~DLM_LOCK_RES_MIGRATING;
1929                 spin_unlock(&res->spinlock);
1930                 ret = -EINVAL;
1931         }
1932
1933 fail:
1934         if (oldmle) {
1935                 /* master is known, detach if not already detached */
1936                 dlm_mle_detach_hb_events(dlm, oldmle);
1937                 dlm_put_mle(oldmle);
1938         }
1939
1940         if (ret < 0) {
1941                 if (mle_added) {
1942                         dlm_mle_detach_hb_events(dlm, mle);
1943                         dlm_put_mle(mle);
1944                 } else if (mle) {
1945                         kmem_cache_free(dlm_mle_cache, mle);
1946                 }
1947                 goto leave;
1948         }
1949
1950         /*
1951          * at this point, we have a migration target, an mle
1952          * in the master list, and the MIGRATING flag set on
1953          * the lockres
1954          */
1955
1956
1957         /* get an extra reference on the mle.
1958          * otherwise the assert_master from the new
1959          * master will destroy this.
1960          * also, make sure that all callers of dlm_get_mle
1961          * take both dlm->spinlock and dlm->master_lock */
1962         spin_lock(&dlm->spinlock);
1963         spin_lock(&dlm->master_lock);
1964         dlm_get_mle(mle);
1965         spin_unlock(&dlm->master_lock);
1966         spin_unlock(&dlm->spinlock);
1967
1968         /* notify new node and send all lock state */
1969         /* call send_one_lockres with migration flag.
1970          * this serves as notice to the target node that a
1971          * migration is starting. */
1972         ret = dlm_send_one_lockres(dlm, res, mres, target,
1973                                    DLM_MRES_MIGRATION);
1974
1975         if (ret < 0) {
1976                 mlog(0, "migration to node %u failed with %d\n",
1977                      target, ret);
1978                 /* migration failed, detach and clean up mle */
1979                 dlm_mle_detach_hb_events(dlm, mle);
1980                 dlm_put_mle(mle);
1981                 dlm_put_mle(mle);
1982                 goto leave;
1983         }
1984
1985         /* at this point, the target sends a message to all nodes,
1986          * (using dlm_do_migrate_request).  this node is skipped since
1987          * we had to put an mle in the list to begin the process.  this
1988          * node now waits for target to do an assert master.  this node
1989          * will be the last one notified, ensuring that the migration
1990          * is complete everywhere.  if the target dies while this is
1991          * going on, some nodes could potentially see the target as the
1992          * master, so it is important that my recovery finds the migration
1993          * mle and sets the master to UNKNONWN. */
1994
1995
1996         /* wait for new node to assert master */
1997         while (1) {
1998                 ret = wait_event_interruptible_timeout(mle->wq,
1999                                         (atomic_read(&mle->woken) == 1),
2000                                         msecs_to_jiffies(5000));
2001
2002                 if (ret >= 0) {
2003                         if (atomic_read(&mle->woken) == 1 ||
2004                             res->owner == target)
2005                                 break;
2006
2007                         mlog(0, "timed out during migration\n");
2008                 }
2009                 if (ret == -ERESTARTSYS) {
2010                         /* migration failed, detach and clean up mle */
2011                         dlm_mle_detach_hb_events(dlm, mle);
2012                         dlm_put_mle(mle);
2013                         dlm_put_mle(mle);
2014                         goto leave;
2015                 }
2016                 /* TODO: if node died: stop, clean up, return error */
2017         }
2018
2019         /* all done, set the owner, clear the flag */
2020         spin_lock(&res->spinlock);
2021         dlm_set_lockres_owner(dlm, res, target);
2022         res->state &= ~DLM_LOCK_RES_MIGRATING;
2023         dlm_remove_nonlocal_locks(dlm, res);
2024         spin_unlock(&res->spinlock);
2025         wake_up(&res->wq);
2026
2027         /* master is known, detach if not already detached */
2028         dlm_mle_detach_hb_events(dlm, mle);
2029         dlm_put_mle(mle);
2030         ret = 0;
2031
2032         dlm_lockres_calc_usage(dlm, res);
2033
2034 leave:
2035         /* re-dirty the lockres if we failed */
2036         if (ret < 0)
2037                 dlm_kick_thread(dlm, res);
2038
2039         /* TODO: cleanup */
2040         if (mres)
2041                 free_page((unsigned long)mres);
2042
2043         dlm_put(dlm);
2044
2045         mlog(0, "returning %d\n", ret);
2046         return ret;
2047 }
2048 EXPORT_SYMBOL_GPL(dlm_migrate_lockres);
2049
2050 int dlm_lock_basts_flushed(struct dlm_ctxt *dlm, struct dlm_lock *lock)
2051 {
2052         int ret;
2053         spin_lock(&dlm->ast_lock);
2054         spin_lock(&lock->spinlock);
2055         ret = (list_empty(&lock->bast_list) && !lock->bast_pending);
2056         spin_unlock(&lock->spinlock);
2057         spin_unlock(&dlm->ast_lock);
2058         return ret;
2059 }
2060
2061 static int dlm_migration_can_proceed(struct dlm_ctxt *dlm,
2062                                      struct dlm_lock_resource *res,
2063                                      u8 mig_target)
2064 {
2065         int can_proceed;
2066         spin_lock(&res->spinlock);
2067         can_proceed = !!(res->state & DLM_LOCK_RES_MIGRATING);
2068         spin_unlock(&res->spinlock);
2069
2070         /* target has died, so make the caller break out of the 
2071          * wait_event, but caller must recheck the domain_map */
2072         spin_lock(&dlm->spinlock);
2073         if (!test_bit(mig_target, dlm->domain_map))
2074                 can_proceed = 1;
2075         spin_unlock(&dlm->spinlock);
2076         return can_proceed;
2077 }
2078
2079 int dlm_lockres_is_dirty(struct dlm_ctxt *dlm, struct dlm_lock_resource *res)
2080 {
2081         int ret;
2082         spin_lock(&res->spinlock);
2083         ret = !!(res->state & DLM_LOCK_RES_DIRTY);
2084         spin_unlock(&res->spinlock);
2085         return ret;
2086 }
2087
2088
2089 static int dlm_mark_lockres_migrating(struct dlm_ctxt *dlm,
2090                                        struct dlm_lock_resource *res,
2091                                        u8 target)
2092 {
2093         int ret = 0;
2094
2095         mlog(0, "dlm_mark_lockres_migrating: %.*s, from %u to %u\n",
2096                res->lockname.len, res->lockname.name, dlm->node_num,
2097                target);
2098         /* need to set MIGRATING flag on lockres.  this is done by
2099          * ensuring that all asts have been flushed for this lockres. */
2100         spin_lock(&res->spinlock);
2101         BUG_ON(res->migration_pending);
2102         res->migration_pending = 1;
2103         /* strategy is to reserve an extra ast then release
2104          * it below, letting the release do all of the work */
2105         __dlm_lockres_reserve_ast(res);
2106         spin_unlock(&res->spinlock);
2107
2108         /* now flush all the pending asts.. hang out for a bit */
2109         dlm_kick_thread(dlm, res);
2110         wait_event(dlm->ast_wq, !dlm_lockres_is_dirty(dlm, res));
2111         dlm_lockres_release_ast(dlm, res);
2112
2113         mlog(0, "about to wait on migration_wq, dirty=%s\n",
2114                res->state & DLM_LOCK_RES_DIRTY ? "yes" : "no");
2115         /* if the extra ref we just put was the final one, this
2116          * will pass thru immediately.  otherwise, we need to wait
2117          * for the last ast to finish. */
2118 again:
2119         ret = wait_event_interruptible_timeout(dlm->migration_wq,
2120                    dlm_migration_can_proceed(dlm, res, target),
2121                    msecs_to_jiffies(1000));
2122         if (ret < 0) {
2123                 mlog(0, "woken again: migrating? %s, dead? %s\n",
2124                        res->state & DLM_LOCK_RES_MIGRATING ? "yes":"no",
2125                        test_bit(target, dlm->domain_map) ? "no":"yes");
2126         } else {
2127                 mlog(0, "all is well: migrating? %s, dead? %s\n",
2128                        res->state & DLM_LOCK_RES_MIGRATING ? "yes":"no",
2129                        test_bit(target, dlm->domain_map) ? "no":"yes");
2130         }
2131         if (!dlm_migration_can_proceed(dlm, res, target)) {
2132                 mlog(0, "trying again...\n");
2133                 goto again;
2134         }
2135
2136         /* did the target go down or die? */
2137         spin_lock(&dlm->spinlock);
2138         if (!test_bit(target, dlm->domain_map)) {
2139                 mlog(ML_ERROR, "aha. migration target %u just went down\n",
2140                      target);
2141                 ret = -EHOSTDOWN;
2142         }
2143         spin_unlock(&dlm->spinlock);
2144
2145         /*
2146          * at this point:
2147          *
2148          *   o the DLM_LOCK_RES_MIGRATING flag is set
2149          *   o there are no pending asts on this lockres
2150          *   o all processes trying to reserve an ast on this
2151          *     lockres must wait for the MIGRATING flag to clear
2152          */
2153         return ret;
2154 }
2155
2156 /* last step in the migration process.
2157  * original master calls this to free all of the dlm_lock
2158  * structures that used to be for other nodes. */
2159 static void dlm_remove_nonlocal_locks(struct dlm_ctxt *dlm,
2160                                       struct dlm_lock_resource *res)
2161 {
2162         struct list_head *iter, *iter2;
2163         struct list_head *queue = &res->granted;
2164         int i;
2165         struct dlm_lock *lock;
2166
2167         assert_spin_locked(&res->spinlock);
2168
2169         BUG_ON(res->owner == dlm->node_num);
2170
2171         for (i=0; i<3; i++) {
2172                 list_for_each_safe(iter, iter2, queue) {
2173                         lock = list_entry (iter, struct dlm_lock, list);
2174                         if (lock->ml.node != dlm->node_num) {
2175                                 mlog(0, "putting lock for node %u\n",
2176                                      lock->ml.node);
2177                                 /* be extra careful */
2178                                 BUG_ON(!list_empty(&lock->ast_list));
2179                                 BUG_ON(!list_empty(&lock->bast_list));
2180                                 BUG_ON(lock->ast_pending);
2181                                 BUG_ON(lock->bast_pending);
2182                                 list_del_init(&lock->list);
2183                                 dlm_lock_put(lock);
2184                         }
2185                 }
2186                 queue++;
2187         }
2188 }
2189
2190 /* for now this is not too intelligent.  we will
2191  * need stats to make this do the right thing.
2192  * this just finds the first lock on one of the
2193  * queues and uses that node as the target. */
2194 static u8 dlm_pick_migration_target(struct dlm_ctxt *dlm,
2195                                     struct dlm_lock_resource *res)
2196 {
2197         int i;
2198         struct list_head *queue = &res->granted;
2199         struct list_head *iter;
2200         struct dlm_lock *lock;
2201         int nodenum;
2202
2203         assert_spin_locked(&dlm->spinlock);
2204
2205         spin_lock(&res->spinlock);
2206         for (i=0; i<3; i++) {
2207                 list_for_each(iter, queue) {
2208                         /* up to the caller to make sure this node
2209                          * is alive */
2210                         lock = list_entry (iter, struct dlm_lock, list);
2211                         if (lock->ml.node != dlm->node_num) {
2212                                 spin_unlock(&res->spinlock);
2213                                 return lock->ml.node;
2214                         }
2215                 }
2216                 queue++;
2217         }
2218         spin_unlock(&res->spinlock);
2219         mlog(0, "have not found a suitable target yet! checking domain map\n");
2220
2221         /* ok now we're getting desperate.  pick anyone alive. */
2222         nodenum = -1;
2223         while (1) {
2224                 nodenum = find_next_bit(dlm->domain_map,
2225                                         O2NM_MAX_NODES, nodenum+1);
2226                 mlog(0, "found %d in domain map\n", nodenum);
2227                 if (nodenum >= O2NM_MAX_NODES)
2228                         break;
2229                 if (nodenum != dlm->node_num) {
2230                         mlog(0, "picking %d\n", nodenum);
2231                         return nodenum;
2232                 }
2233         }
2234
2235         mlog(0, "giving up.  no master to migrate to\n");
2236         return DLM_LOCK_RES_OWNER_UNKNOWN;
2237 }
2238
2239
2240
2241 /* this is called by the new master once all lockres
2242  * data has been received */
2243 static int dlm_do_migrate_request(struct dlm_ctxt *dlm,
2244                                   struct dlm_lock_resource *res,
2245                                   u8 master, u8 new_master,
2246                                   struct dlm_node_iter *iter)
2247 {
2248         struct dlm_migrate_request migrate;
2249         int ret, status = 0;
2250         int nodenum;
2251
2252         memset(&migrate, 0, sizeof(migrate));
2253         migrate.namelen = res->lockname.len;
2254         memcpy(migrate.name, res->lockname.name, migrate.namelen);
2255         migrate.new_master = new_master;
2256         migrate.master = master;
2257
2258         ret = 0;
2259
2260         /* send message to all nodes, except the master and myself */
2261         while ((nodenum = dlm_node_iter_next(iter)) >= 0) {
2262                 if (nodenum == master ||
2263                     nodenum == new_master)
2264                         continue;
2265
2266                 ret = o2net_send_message(DLM_MIGRATE_REQUEST_MSG, dlm->key,
2267                                          &migrate, sizeof(migrate), nodenum,
2268                                          &status);
2269                 if (ret < 0)
2270                         mlog_errno(ret);
2271                 else if (status < 0) {
2272                         mlog(0, "migrate request (node %u) returned %d!\n",
2273                              nodenum, status);
2274                         ret = status;
2275                 }
2276         }
2277
2278         if (ret < 0)
2279                 mlog_errno(ret);
2280
2281         mlog(0, "returning ret=%d\n", ret);
2282         return ret;
2283 }
2284
2285
2286 /* if there is an existing mle for this lockres, we now know who the master is.
2287  * (the one who sent us *this* message) we can clear it up right away.
2288  * since the process that put the mle on the list still has a reference to it,
2289  * we can unhash it now, set the master and wake the process.  as a result,
2290  * we will have no mle in the list to start with.  now we can add an mle for
2291  * the migration and this should be the only one found for those scanning the
2292  * list.  */
2293 int dlm_migrate_request_handler(struct o2net_msg *msg, u32 len, void *data)
2294 {
2295         struct dlm_ctxt *dlm = data;
2296         struct dlm_lock_resource *res = NULL;
2297         struct dlm_migrate_request *migrate = (struct dlm_migrate_request *) msg->buf;
2298         struct dlm_master_list_entry *mle = NULL, *oldmle = NULL;
2299         const char *name;
2300         unsigned int namelen;
2301         int ret = 0;
2302
2303         if (!dlm_grab(dlm))
2304                 return -EINVAL;
2305
2306         name = migrate->name;
2307         namelen = migrate->namelen;
2308
2309         /* preallocate.. if this fails, abort */
2310         mle = (struct dlm_master_list_entry *) kmem_cache_alloc(dlm_mle_cache,
2311                                                          GFP_KERNEL);
2312
2313         if (!mle) {
2314                 ret = -ENOMEM;
2315                 goto leave;
2316         }
2317
2318         /* check for pre-existing lock */
2319         spin_lock(&dlm->spinlock);
2320         res = __dlm_lookup_lockres(dlm, name, namelen);
2321         spin_lock(&dlm->master_lock);
2322
2323         if (res) {
2324                 spin_lock(&res->spinlock);
2325                 if (res->state & DLM_LOCK_RES_RECOVERING) {
2326                         /* if all is working ok, this can only mean that we got
2327                         * a migrate request from a node that we now see as
2328                         * dead.  what can we do here?  drop it to the floor? */
2329                         spin_unlock(&res->spinlock);
2330                         mlog(ML_ERROR, "Got a migrate request, but the "
2331                              "lockres is marked as recovering!");
2332                         kmem_cache_free(dlm_mle_cache, mle);
2333                         ret = -EINVAL; /* need a better solution */
2334                         goto unlock;
2335                 }
2336                 res->state |= DLM_LOCK_RES_MIGRATING;
2337                 spin_unlock(&res->spinlock);
2338         }
2339
2340         /* ignore status.  only nonzero status would BUG. */
2341         ret = dlm_add_migration_mle(dlm, res, mle, &oldmle,
2342                                     name, namelen,
2343                                     migrate->new_master,
2344                                     migrate->master);
2345
2346 unlock:
2347         spin_unlock(&dlm->master_lock);
2348         spin_unlock(&dlm->spinlock);
2349
2350         if (oldmle) {
2351                 /* master is known, detach if not already detached */
2352                 dlm_mle_detach_hb_events(dlm, oldmle);
2353                 dlm_put_mle(oldmle);
2354         }
2355
2356         if (res)
2357                 dlm_lockres_put(res);
2358 leave:
2359         dlm_put(dlm);
2360         return ret;
2361 }
2362
2363 /* must be holding dlm->spinlock and dlm->master_lock
2364  * when adding a migration mle, we can clear any other mles
2365  * in the master list because we know with certainty that
2366  * the master is "master".  so we remove any old mle from
2367  * the list after setting it's master field, and then add
2368  * the new migration mle.  this way we can hold with the rule
2369  * of having only one mle for a given lock name at all times. */
2370 static int dlm_add_migration_mle(struct dlm_ctxt *dlm,
2371                                  struct dlm_lock_resource *res,
2372                                  struct dlm_master_list_entry *mle,
2373                                  struct dlm_master_list_entry **oldmle,
2374                                  const char *name, unsigned int namelen,
2375                                  u8 new_master, u8 master)
2376 {
2377         int found;
2378         int ret = 0;
2379
2380         *oldmle = NULL;
2381
2382         mlog_entry_void();
2383
2384         assert_spin_locked(&dlm->spinlock);
2385         assert_spin_locked(&dlm->master_lock);
2386
2387         /* caller is responsible for any ref taken here on oldmle */
2388         found = dlm_find_mle(dlm, oldmle, (char *)name, namelen);
2389         if (found) {
2390                 struct dlm_master_list_entry *tmp = *oldmle;
2391                 spin_lock(&tmp->spinlock);
2392                 if (tmp->type == DLM_MLE_MIGRATION) {
2393                         if (master == dlm->node_num) {
2394                                 /* ah another process raced me to it */
2395                                 mlog(0, "tried to migrate %.*s, but some "
2396                                      "process beat me to it\n",
2397                                      namelen, name);
2398                                 ret = -EEXIST;
2399                         } else {
2400                                 /* bad.  2 NODES are trying to migrate! */
2401                                 mlog(ML_ERROR, "migration error  mle: "
2402                                      "master=%u new_master=%u // request: "
2403                                      "master=%u new_master=%u // "
2404                                      "lockres=%.*s\n",
2405                                      tmp->master, tmp->new_master,
2406                                      master, new_master,
2407                                      namelen, name);
2408                                 BUG();
2409                         }
2410                 } else {
2411                         /* this is essentially what assert_master does */
2412                         tmp->master = master;
2413                         atomic_set(&tmp->woken, 1);
2414                         wake_up(&tmp->wq);
2415                         /* remove it from the list so that only one
2416                          * mle will be found */
2417                         list_del_init(&tmp->list);
2418                 }
2419                 spin_unlock(&tmp->spinlock);
2420         }
2421
2422         /* now add a migration mle to the tail of the list */
2423         dlm_init_mle(mle, DLM_MLE_MIGRATION, dlm, res, name, namelen);
2424         mle->new_master = new_master;
2425         mle->master = master;
2426         /* do this for consistency with other mle types */
2427         set_bit(new_master, mle->maybe_map);
2428         list_add(&mle->list, &dlm->master_list);
2429
2430         return ret;
2431 }
2432
2433
2434 void dlm_clean_master_list(struct dlm_ctxt *dlm, u8 dead_node)
2435 {
2436         struct list_head *iter, *iter2;
2437         struct dlm_master_list_entry *mle;
2438         struct dlm_lock_resource *res;
2439
2440         mlog_entry("dlm=%s, dead node=%u\n", dlm->name, dead_node);
2441 top:
2442         assert_spin_locked(&dlm->spinlock);
2443
2444         /* clean the master list */
2445         spin_lock(&dlm->master_lock);
2446         list_for_each_safe(iter, iter2, &dlm->master_list) {
2447                 mle = list_entry(iter, struct dlm_master_list_entry, list);
2448
2449                 BUG_ON(mle->type != DLM_MLE_BLOCK &&
2450                        mle->type != DLM_MLE_MASTER &&
2451                        mle->type != DLM_MLE_MIGRATION);
2452
2453                 /* MASTER mles are initiated locally.  the waiting
2454                  * process will notice the node map change
2455                  * shortly.  let that happen as normal. */
2456                 if (mle->type == DLM_MLE_MASTER)
2457                         continue;
2458
2459
2460                 /* BLOCK mles are initiated by other nodes.
2461                  * need to clean up if the dead node would have
2462                  * been the master. */
2463                 if (mle->type == DLM_MLE_BLOCK) {
2464                         int bit;
2465
2466                         spin_lock(&mle->spinlock);
2467                         bit = find_next_bit(mle->maybe_map, O2NM_MAX_NODES, 0);
2468                         if (bit != dead_node) {
2469                                 mlog(0, "mle found, but dead node %u would "
2470                                      "not have been master\n", dead_node);
2471                                 spin_unlock(&mle->spinlock);
2472                         } else {
2473                                 /* must drop the refcount by one since the
2474                                  * assert_master will never arrive.  this
2475                                  * may result in the mle being unlinked and
2476                                  * freed, but there may still be a process
2477                                  * waiting in the dlmlock path which is fine. */
2478                                 mlog(ML_ERROR, "node %u was expected master\n",
2479                                      dead_node);
2480                                 atomic_set(&mle->woken, 1);
2481                                 spin_unlock(&mle->spinlock);
2482                                 wake_up(&mle->wq);
2483                                 /* final put will take care of list removal */
2484                                 __dlm_put_mle(mle);
2485                         }
2486                         continue;
2487                 }
2488
2489                 /* everything else is a MIGRATION mle */
2490
2491                 /* the rule for MIGRATION mles is that the master
2492                  * becomes UNKNOWN if *either* the original or
2493                  * the new master dies.  all UNKNOWN lockreses
2494                  * are sent to whichever node becomes the recovery
2495                  * master.  the new master is responsible for
2496                  * determining if there is still a master for
2497                  * this lockres, or if he needs to take over
2498                  * mastery.  either way, this node should expect
2499                  * another message to resolve this. */
2500                 if (mle->master != dead_node &&
2501                     mle->new_master != dead_node)
2502                         continue;
2503
2504                 /* if we have reached this point, this mle needs to
2505                  * be removed from the list and freed. */
2506
2507                 /* remove from the list early.  NOTE: unlinking
2508                  * list_head while in list_for_each_safe */
2509                 spin_lock(&mle->spinlock);
2510                 list_del_init(&mle->list);
2511                 atomic_set(&mle->woken, 1);
2512                 spin_unlock(&mle->spinlock);
2513                 wake_up(&mle->wq);
2514
2515                 mlog(0, "node %u died during migration from "
2516                      "%u to %u!\n", dead_node,
2517                      mle->master, mle->new_master);
2518                 /* if there is a lockres associated with this
2519                  * mle, find it and set its owner to UNKNOWN */
2520                 res = __dlm_lookup_lockres(dlm, mle->u.name.name,
2521                                         mle->u.name.len);
2522                 if (res) {
2523                         /* unfortunately if we hit this rare case, our
2524                          * lock ordering is messed.  we need to drop
2525                          * the master lock so that we can take the
2526                          * lockres lock, meaning that we will have to
2527                          * restart from the head of list. */
2528                         spin_unlock(&dlm->master_lock);
2529
2530                         /* move lockres onto recovery list */
2531                         spin_lock(&res->spinlock);
2532                         dlm_set_lockres_owner(dlm, res,
2533                                         DLM_LOCK_RES_OWNER_UNKNOWN);
2534                         dlm_move_lockres_to_recovery_list(dlm, res);
2535                         spin_unlock(&res->spinlock);
2536                         dlm_lockres_put(res);
2537
2538                         /* dump the mle */
2539                         spin_lock(&dlm->master_lock);
2540                         __dlm_put_mle(mle);
2541                         spin_unlock(&dlm->master_lock);
2542
2543                         /* restart */
2544                         goto top;
2545                 }
2546
2547                 /* this may be the last reference */
2548                 __dlm_put_mle(mle);
2549         }
2550         spin_unlock(&dlm->master_lock);
2551 }
2552
2553
2554 int dlm_finish_migration(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
2555                          u8 old_master)
2556 {
2557         struct dlm_node_iter iter;
2558         int ret = 0;
2559
2560         spin_lock(&dlm->spinlock);
2561         dlm_node_iter_init(dlm->domain_map, &iter);
2562         clear_bit(old_master, iter.node_map);
2563         clear_bit(dlm->node_num, iter.node_map);
2564         spin_unlock(&dlm->spinlock);
2565
2566         mlog(0, "now time to do a migrate request to other nodes\n");
2567         ret = dlm_do_migrate_request(dlm, res, old_master,
2568                                      dlm->node_num, &iter);
2569         if (ret < 0) {
2570                 mlog_errno(ret);
2571                 goto leave;
2572         }
2573
2574         mlog(0, "doing assert master of %.*s to all except the original node\n",
2575              res->lockname.len, res->lockname.name);
2576         /* this call now finishes out the nodemap
2577          * even if one or more nodes die */
2578         ret = dlm_do_assert_master(dlm, res->lockname.name,
2579                                    res->lockname.len, iter.node_map,
2580                                    DLM_ASSERT_MASTER_FINISH_MIGRATION);
2581         if (ret < 0) {
2582                 /* no longer need to retry.  all living nodes contacted. */
2583                 mlog_errno(ret);
2584                 ret = 0;
2585         }
2586
2587         memset(iter.node_map, 0, sizeof(iter.node_map));
2588         set_bit(old_master, iter.node_map);
2589         mlog(0, "doing assert master of %.*s back to %u\n",
2590              res->lockname.len, res->lockname.name, old_master);
2591         ret = dlm_do_assert_master(dlm, res->lockname.name,
2592                                    res->lockname.len, iter.node_map,
2593                                    DLM_ASSERT_MASTER_FINISH_MIGRATION);
2594         if (ret < 0) {
2595                 mlog(0, "assert master to original master failed "
2596                      "with %d.\n", ret);
2597                 /* the only nonzero status here would be because of
2598                  * a dead original node.  we're done. */
2599                 ret = 0;
2600         }
2601
2602         /* all done, set the owner, clear the flag */
2603         spin_lock(&res->spinlock);
2604         dlm_set_lockres_owner(dlm, res, dlm->node_num);
2605         res->state &= ~DLM_LOCK_RES_MIGRATING;
2606         spin_unlock(&res->spinlock);
2607         /* re-dirty it on the new master */
2608         dlm_kick_thread(dlm, res);
2609         wake_up(&res->wq);
2610 leave:
2611         return ret;
2612 }
2613
2614 /*
2615  * LOCKRES AST REFCOUNT
2616  * this is integral to migration
2617  */
2618
2619 /* for future intent to call an ast, reserve one ahead of time.
2620  * this should be called only after waiting on the lockres
2621  * with dlm_wait_on_lockres, and while still holding the
2622  * spinlock after the call. */
2623 void __dlm_lockres_reserve_ast(struct dlm_lock_resource *res)
2624 {
2625         assert_spin_locked(&res->spinlock);
2626         if (res->state & DLM_LOCK_RES_MIGRATING) {
2627                 __dlm_print_one_lock_resource(res);
2628         }
2629         BUG_ON(res->state & DLM_LOCK_RES_MIGRATING);
2630
2631         atomic_inc(&res->asts_reserved);
2632 }
2633
2634 /*
2635  * used to drop the reserved ast, either because it went unused,
2636  * or because the ast/bast was actually called.
2637  *
2638  * also, if there is a pending migration on this lockres,
2639  * and this was the last pending ast on the lockres,
2640  * atomically set the MIGRATING flag before we drop the lock.
2641  * this is how we ensure that migration can proceed with no
2642  * asts in progress.  note that it is ok if the state of the
2643  * queues is such that a lock should be granted in the future
2644  * or that a bast should be fired, because the new master will
2645  * shuffle the lists on this lockres as soon as it is migrated.
2646  */
2647 void dlm_lockres_release_ast(struct dlm_ctxt *dlm,
2648                              struct dlm_lock_resource *res)
2649 {
2650         if (!atomic_dec_and_lock(&res->asts_reserved, &res->spinlock))
2651                 return;
2652
2653         if (!res->migration_pending) {
2654                 spin_unlock(&res->spinlock);
2655                 return;
2656         }
2657
2658         BUG_ON(res->state & DLM_LOCK_RES_MIGRATING);
2659         res->migration_pending = 0;
2660         res->state |= DLM_LOCK_RES_MIGRATING;
2661         spin_unlock(&res->spinlock);
2662         wake_up(&res->wq);
2663         wake_up(&dlm->migration_wq);
2664 }