ocfs2/dlm: Fix race in adding/removing lockres' to/from the tracking list
[linux-2.6.git] / fs / ocfs2 / dlm / dlmmaster.c
1 /* -*- mode: c; c-basic-offset: 8; -*-
2  * vim: noexpandtab sw=8 ts=8 sts=0:
3  *
4  * dlmmod.c
5  *
6  * standalone DLM module
7  *
8  * Copyright (C) 2004 Oracle.  All rights reserved.
9  *
10  * This program is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU General Public
12  * License as published by the Free Software Foundation; either
13  * version 2 of the License, or (at your option) any later version.
14  *
15  * This program is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18  * General Public License for more details.
19  *
20  * You should have received a copy of the GNU General Public
21  * License along with this program; if not, write to the
22  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
23  * Boston, MA 021110-1307, USA.
24  *
25  */
26
27
28 #include <linux/module.h>
29 #include <linux/fs.h>
30 #include <linux/types.h>
31 #include <linux/slab.h>
32 #include <linux/highmem.h>
33 #include <linux/utsname.h>
34 #include <linux/init.h>
35 #include <linux/sysctl.h>
36 #include <linux/random.h>
37 #include <linux/blkdev.h>
38 #include <linux/socket.h>
39 #include <linux/inet.h>
40 #include <linux/spinlock.h>
41 #include <linux/delay.h>
42
43
44 #include "cluster/heartbeat.h"
45 #include "cluster/nodemanager.h"
46 #include "cluster/tcp.h"
47
48 #include "dlmapi.h"
49 #include "dlmcommon.h"
50 #include "dlmdomain.h"
51 #include "dlmdebug.h"
52
53 #define MLOG_MASK_PREFIX (ML_DLM|ML_DLM_MASTER)
54 #include "cluster/masklog.h"
55
56 static void dlm_mle_node_down(struct dlm_ctxt *dlm,
57                               struct dlm_master_list_entry *mle,
58                               struct o2nm_node *node,
59                               int idx);
60 static void dlm_mle_node_up(struct dlm_ctxt *dlm,
61                             struct dlm_master_list_entry *mle,
62                             struct o2nm_node *node,
63                             int idx);
64
65 static void dlm_assert_master_worker(struct dlm_work_item *item, void *data);
66 static int dlm_do_assert_master(struct dlm_ctxt *dlm,
67                                 struct dlm_lock_resource *res,
68                                 void *nodemap, u32 flags);
69 static void dlm_deref_lockres_worker(struct dlm_work_item *item, void *data);
70
71 static inline int dlm_mle_equal(struct dlm_ctxt *dlm,
72                                 struct dlm_master_list_entry *mle,
73                                 const char *name,
74                                 unsigned int namelen)
75 {
76         struct dlm_lock_resource *res;
77
78         if (dlm != mle->dlm)
79                 return 0;
80
81         if (mle->type == DLM_MLE_BLOCK ||
82             mle->type == DLM_MLE_MIGRATION) {
83                 if (namelen != mle->u.name.len ||
84                     memcmp(name, mle->u.name.name, namelen)!=0)
85                         return 0;
86         } else {
87                 res = mle->u.res;
88                 if (namelen != res->lockname.len ||
89                     memcmp(res->lockname.name, name, namelen) != 0)
90                         return 0;
91         }
92         return 1;
93 }
94
95 static struct kmem_cache *dlm_lockres_cache = NULL;
96 static struct kmem_cache *dlm_lockname_cache = NULL;
97 static struct kmem_cache *dlm_mle_cache = NULL;
98
99 static void dlm_mle_release(struct kref *kref);
100 static void dlm_init_mle(struct dlm_master_list_entry *mle,
101                         enum dlm_mle_type type,
102                         struct dlm_ctxt *dlm,
103                         struct dlm_lock_resource *res,
104                         const char *name,
105                         unsigned int namelen);
106 static void dlm_put_mle(struct dlm_master_list_entry *mle);
107 static void __dlm_put_mle(struct dlm_master_list_entry *mle);
108 static int dlm_find_mle(struct dlm_ctxt *dlm,
109                         struct dlm_master_list_entry **mle,
110                         char *name, unsigned int namelen);
111
112 static int dlm_do_master_request(struct dlm_lock_resource *res,
113                                  struct dlm_master_list_entry *mle, int to);
114
115
116 static int dlm_wait_for_lock_mastery(struct dlm_ctxt *dlm,
117                                      struct dlm_lock_resource *res,
118                                      struct dlm_master_list_entry *mle,
119                                      int *blocked);
120 static int dlm_restart_lock_mastery(struct dlm_ctxt *dlm,
121                                     struct dlm_lock_resource *res,
122                                     struct dlm_master_list_entry *mle,
123                                     int blocked);
124 static int dlm_add_migration_mle(struct dlm_ctxt *dlm,
125                                  struct dlm_lock_resource *res,
126                                  struct dlm_master_list_entry *mle,
127                                  struct dlm_master_list_entry **oldmle,
128                                  const char *name, unsigned int namelen,
129                                  u8 new_master, u8 master);
130
131 static u8 dlm_pick_migration_target(struct dlm_ctxt *dlm,
132                                     struct dlm_lock_resource *res);
133 static void dlm_remove_nonlocal_locks(struct dlm_ctxt *dlm,
134                                       struct dlm_lock_resource *res);
135 static int dlm_mark_lockres_migrating(struct dlm_ctxt *dlm,
136                                        struct dlm_lock_resource *res,
137                                        u8 target);
138 static int dlm_pre_master_reco_lockres(struct dlm_ctxt *dlm,
139                                        struct dlm_lock_resource *res);
140
141
142 int dlm_is_host_down(int errno)
143 {
144         switch (errno) {
145                 case -EBADF:
146                 case -ECONNREFUSED:
147                 case -ENOTCONN:
148                 case -ECONNRESET:
149                 case -EPIPE:
150                 case -EHOSTDOWN:
151                 case -EHOSTUNREACH:
152                 case -ETIMEDOUT:
153                 case -ECONNABORTED:
154                 case -ENETDOWN:
155                 case -ENETUNREACH:
156                 case -ENETRESET:
157                 case -ESHUTDOWN:
158                 case -ENOPROTOOPT:
159                 case -EINVAL:   /* if returned from our tcp code,
160                                    this means there is no socket */
161                         return 1;
162         }
163         return 0;
164 }
165
166
167 /*
168  * MASTER LIST FUNCTIONS
169  */
170
171
172 /*
173  * regarding master list entries and heartbeat callbacks:
174  *
175  * in order to avoid sleeping and allocation that occurs in
176  * heartbeat, master list entries are simply attached to the
177  * dlm's established heartbeat callbacks.  the mle is attached
178  * when it is created, and since the dlm->spinlock is held at
179  * that time, any heartbeat event will be properly discovered
180  * by the mle.  the mle needs to be detached from the
181  * dlm->mle_hb_events list as soon as heartbeat events are no
182  * longer useful to the mle, and before the mle is freed.
183  *
184  * as a general rule, heartbeat events are no longer needed by
185  * the mle once an "answer" regarding the lock master has been
186  * received.
187  */
188 static inline void __dlm_mle_attach_hb_events(struct dlm_ctxt *dlm,
189                                               struct dlm_master_list_entry *mle)
190 {
191         assert_spin_locked(&dlm->spinlock);
192
193         list_add_tail(&mle->hb_events, &dlm->mle_hb_events);
194 }
195
196
197 static inline void __dlm_mle_detach_hb_events(struct dlm_ctxt *dlm,
198                                               struct dlm_master_list_entry *mle)
199 {
200         if (!list_empty(&mle->hb_events))
201                 list_del_init(&mle->hb_events);
202 }
203
204
205 static inline void dlm_mle_detach_hb_events(struct dlm_ctxt *dlm,
206                                             struct dlm_master_list_entry *mle)
207 {
208         spin_lock(&dlm->spinlock);
209         __dlm_mle_detach_hb_events(dlm, mle);
210         spin_unlock(&dlm->spinlock);
211 }
212
213 static void dlm_get_mle_inuse(struct dlm_master_list_entry *mle)
214 {
215         struct dlm_ctxt *dlm;
216         dlm = mle->dlm;
217
218         assert_spin_locked(&dlm->spinlock);
219         assert_spin_locked(&dlm->master_lock);
220         mle->inuse++;
221         kref_get(&mle->mle_refs);
222 }
223
224 static void dlm_put_mle_inuse(struct dlm_master_list_entry *mle)
225 {
226         struct dlm_ctxt *dlm;
227         dlm = mle->dlm;
228
229         spin_lock(&dlm->spinlock);
230         spin_lock(&dlm->master_lock);
231         mle->inuse--;
232         __dlm_put_mle(mle);
233         spin_unlock(&dlm->master_lock);
234         spin_unlock(&dlm->spinlock);
235
236 }
237
238 /* remove from list and free */
239 static void __dlm_put_mle(struct dlm_master_list_entry *mle)
240 {
241         struct dlm_ctxt *dlm;
242         dlm = mle->dlm;
243
244         assert_spin_locked(&dlm->spinlock);
245         assert_spin_locked(&dlm->master_lock);
246         if (!atomic_read(&mle->mle_refs.refcount)) {
247                 /* this may or may not crash, but who cares.
248                  * it's a BUG. */
249                 mlog(ML_ERROR, "bad mle: %p\n", mle);
250                 dlm_print_one_mle(mle);
251                 BUG();
252         } else
253                 kref_put(&mle->mle_refs, dlm_mle_release);
254 }
255
256
257 /* must not have any spinlocks coming in */
258 static void dlm_put_mle(struct dlm_master_list_entry *mle)
259 {
260         struct dlm_ctxt *dlm;
261         dlm = mle->dlm;
262
263         spin_lock(&dlm->spinlock);
264         spin_lock(&dlm->master_lock);
265         __dlm_put_mle(mle);
266         spin_unlock(&dlm->master_lock);
267         spin_unlock(&dlm->spinlock);
268 }
269
270 static inline void dlm_get_mle(struct dlm_master_list_entry *mle)
271 {
272         kref_get(&mle->mle_refs);
273 }
274
275 static void dlm_init_mle(struct dlm_master_list_entry *mle,
276                         enum dlm_mle_type type,
277                         struct dlm_ctxt *dlm,
278                         struct dlm_lock_resource *res,
279                         const char *name,
280                         unsigned int namelen)
281 {
282         assert_spin_locked(&dlm->spinlock);
283
284         mle->dlm = dlm;
285         mle->type = type;
286         INIT_LIST_HEAD(&mle->list);
287         INIT_LIST_HEAD(&mle->hb_events);
288         memset(mle->maybe_map, 0, sizeof(mle->maybe_map));
289         spin_lock_init(&mle->spinlock);
290         init_waitqueue_head(&mle->wq);
291         atomic_set(&mle->woken, 0);
292         kref_init(&mle->mle_refs);
293         memset(mle->response_map, 0, sizeof(mle->response_map));
294         mle->master = O2NM_MAX_NODES;
295         mle->new_master = O2NM_MAX_NODES;
296         mle->inuse = 0;
297
298         if (mle->type == DLM_MLE_MASTER) {
299                 BUG_ON(!res);
300                 mle->u.res = res;
301         } else if (mle->type == DLM_MLE_BLOCK) {
302                 BUG_ON(!name);
303                 memcpy(mle->u.name.name, name, namelen);
304                 mle->u.name.len = namelen;
305         } else /* DLM_MLE_MIGRATION */ {
306                 BUG_ON(!name);
307                 memcpy(mle->u.name.name, name, namelen);
308                 mle->u.name.len = namelen;
309         }
310
311         /* copy off the node_map and register hb callbacks on our copy */
312         memcpy(mle->node_map, dlm->domain_map, sizeof(mle->node_map));
313         memcpy(mle->vote_map, dlm->domain_map, sizeof(mle->vote_map));
314         clear_bit(dlm->node_num, mle->vote_map);
315         clear_bit(dlm->node_num, mle->node_map);
316
317         /* attach the mle to the domain node up/down events */
318         __dlm_mle_attach_hb_events(dlm, mle);
319 }
320
321
322 /* returns 1 if found, 0 if not */
323 static int dlm_find_mle(struct dlm_ctxt *dlm,
324                         struct dlm_master_list_entry **mle,
325                         char *name, unsigned int namelen)
326 {
327         struct dlm_master_list_entry *tmpmle;
328
329         assert_spin_locked(&dlm->master_lock);
330
331         list_for_each_entry(tmpmle, &dlm->master_list, list) {
332                 if (!dlm_mle_equal(dlm, tmpmle, name, namelen))
333                         continue;
334                 dlm_get_mle(tmpmle);
335                 *mle = tmpmle;
336                 return 1;
337         }
338         return 0;
339 }
340
341 void dlm_hb_event_notify_attached(struct dlm_ctxt *dlm, int idx, int node_up)
342 {
343         struct dlm_master_list_entry *mle;
344
345         assert_spin_locked(&dlm->spinlock);
346         
347         list_for_each_entry(mle, &dlm->mle_hb_events, hb_events) {
348                 if (node_up)
349                         dlm_mle_node_up(dlm, mle, NULL, idx);
350                 else
351                         dlm_mle_node_down(dlm, mle, NULL, idx);
352         }
353 }
354
355 static void dlm_mle_node_down(struct dlm_ctxt *dlm,
356                               struct dlm_master_list_entry *mle,
357                               struct o2nm_node *node, int idx)
358 {
359         spin_lock(&mle->spinlock);
360
361         if (!test_bit(idx, mle->node_map))
362                 mlog(0, "node %u already removed from nodemap!\n", idx);
363         else
364                 clear_bit(idx, mle->node_map);
365
366         spin_unlock(&mle->spinlock);
367 }
368
369 static void dlm_mle_node_up(struct dlm_ctxt *dlm,
370                             struct dlm_master_list_entry *mle,
371                             struct o2nm_node *node, int idx)
372 {
373         spin_lock(&mle->spinlock);
374
375         if (test_bit(idx, mle->node_map))
376                 mlog(0, "node %u already in node map!\n", idx);
377         else
378                 set_bit(idx, mle->node_map);
379
380         spin_unlock(&mle->spinlock);
381 }
382
383
384 int dlm_init_mle_cache(void)
385 {
386         dlm_mle_cache = kmem_cache_create("o2dlm_mle",
387                                           sizeof(struct dlm_master_list_entry),
388                                           0, SLAB_HWCACHE_ALIGN,
389                                           NULL);
390         if (dlm_mle_cache == NULL)
391                 return -ENOMEM;
392         return 0;
393 }
394
395 void dlm_destroy_mle_cache(void)
396 {
397         if (dlm_mle_cache)
398                 kmem_cache_destroy(dlm_mle_cache);
399 }
400
401 static void dlm_mle_release(struct kref *kref)
402 {
403         struct dlm_master_list_entry *mle;
404         struct dlm_ctxt *dlm;
405
406         mlog_entry_void();
407
408         mle = container_of(kref, struct dlm_master_list_entry, mle_refs);
409         dlm = mle->dlm;
410
411         if (mle->type != DLM_MLE_MASTER) {
412                 mlog(0, "calling mle_release for %.*s, type %d\n",
413                      mle->u.name.len, mle->u.name.name, mle->type);
414         } else {
415                 mlog(0, "calling mle_release for %.*s, type %d\n",
416                      mle->u.res->lockname.len,
417                      mle->u.res->lockname.name, mle->type);
418         }
419         assert_spin_locked(&dlm->spinlock);
420         assert_spin_locked(&dlm->master_lock);
421
422         /* remove from list if not already */
423         if (!list_empty(&mle->list))
424                 list_del_init(&mle->list);
425
426         /* detach the mle from the domain node up/down events */
427         __dlm_mle_detach_hb_events(dlm, mle);
428
429         /* NOTE: kfree under spinlock here.
430          * if this is bad, we can move this to a freelist. */
431         kmem_cache_free(dlm_mle_cache, mle);
432 }
433
434
435 /*
436  * LOCK RESOURCE FUNCTIONS
437  */
438
439 int dlm_init_master_caches(void)
440 {
441         dlm_lockres_cache = kmem_cache_create("o2dlm_lockres",
442                                               sizeof(struct dlm_lock_resource),
443                                               0, SLAB_HWCACHE_ALIGN, NULL);
444         if (!dlm_lockres_cache)
445                 goto bail;
446
447         dlm_lockname_cache = kmem_cache_create("o2dlm_lockname",
448                                                DLM_LOCKID_NAME_MAX, 0,
449                                                SLAB_HWCACHE_ALIGN, NULL);
450         if (!dlm_lockname_cache)
451                 goto bail;
452
453         return 0;
454 bail:
455         dlm_destroy_master_caches();
456         return -ENOMEM;
457 }
458
459 void dlm_destroy_master_caches(void)
460 {
461         if (dlm_lockname_cache)
462                 kmem_cache_destroy(dlm_lockname_cache);
463
464         if (dlm_lockres_cache)
465                 kmem_cache_destroy(dlm_lockres_cache);
466 }
467
468 static void dlm_set_lockres_owner(struct dlm_ctxt *dlm,
469                                   struct dlm_lock_resource *res,
470                                   u8 owner)
471 {
472         assert_spin_locked(&res->spinlock);
473
474         mlog_entry("%.*s, %u\n", res->lockname.len, res->lockname.name, owner);
475
476         if (owner == dlm->node_num)
477                 atomic_inc(&dlm->local_resources);
478         else if (owner == DLM_LOCK_RES_OWNER_UNKNOWN)
479                 atomic_inc(&dlm->unknown_resources);
480         else
481                 atomic_inc(&dlm->remote_resources);
482
483         res->owner = owner;
484 }
485
486 void dlm_change_lockres_owner(struct dlm_ctxt *dlm,
487                               struct dlm_lock_resource *res, u8 owner)
488 {
489         assert_spin_locked(&res->spinlock);
490
491         if (owner == res->owner)
492                 return;
493
494         if (res->owner == dlm->node_num)
495                 atomic_dec(&dlm->local_resources);
496         else if (res->owner == DLM_LOCK_RES_OWNER_UNKNOWN)
497                 atomic_dec(&dlm->unknown_resources);
498         else
499                 atomic_dec(&dlm->remote_resources);
500
501         dlm_set_lockres_owner(dlm, res, owner);
502 }
503
504
505 static void dlm_lockres_release(struct kref *kref)
506 {
507         struct dlm_lock_resource *res;
508         struct dlm_ctxt *dlm;
509
510         res = container_of(kref, struct dlm_lock_resource, refs);
511         dlm = res->dlm;
512
513         /* This should not happen -- all lockres' have a name
514          * associated with them at init time. */
515         BUG_ON(!res->lockname.name);
516
517         mlog(0, "destroying lockres %.*s\n", res->lockname.len,
518              res->lockname.name);
519
520         spin_lock(&dlm->track_lock);
521         if (!list_empty(&res->tracking))
522                 list_del_init(&res->tracking);
523         else {
524                 mlog(ML_ERROR, "Resource %.*s not on the Tracking list\n",
525                      res->lockname.len, res->lockname.name);
526                 dlm_print_one_lock_resource(res);
527         }
528         spin_unlock(&dlm->track_lock);
529
530         dlm_put(dlm);
531
532         if (!hlist_unhashed(&res->hash_node) ||
533             !list_empty(&res->granted) ||
534             !list_empty(&res->converting) ||
535             !list_empty(&res->blocked) ||
536             !list_empty(&res->dirty) ||
537             !list_empty(&res->recovering) ||
538             !list_empty(&res->purge)) {
539                 mlog(ML_ERROR,
540                      "Going to BUG for resource %.*s."
541                      "  We're on a list! [%c%c%c%c%c%c%c]\n",
542                      res->lockname.len, res->lockname.name,
543                      !hlist_unhashed(&res->hash_node) ? 'H' : ' ',
544                      !list_empty(&res->granted) ? 'G' : ' ',
545                      !list_empty(&res->converting) ? 'C' : ' ',
546                      !list_empty(&res->blocked) ? 'B' : ' ',
547                      !list_empty(&res->dirty) ? 'D' : ' ',
548                      !list_empty(&res->recovering) ? 'R' : ' ',
549                      !list_empty(&res->purge) ? 'P' : ' ');
550
551                 dlm_print_one_lock_resource(res);
552         }
553
554         /* By the time we're ready to blow this guy away, we shouldn't
555          * be on any lists. */
556         BUG_ON(!hlist_unhashed(&res->hash_node));
557         BUG_ON(!list_empty(&res->granted));
558         BUG_ON(!list_empty(&res->converting));
559         BUG_ON(!list_empty(&res->blocked));
560         BUG_ON(!list_empty(&res->dirty));
561         BUG_ON(!list_empty(&res->recovering));
562         BUG_ON(!list_empty(&res->purge));
563
564         kmem_cache_free(dlm_lockname_cache, (void *)res->lockname.name);
565
566         kmem_cache_free(dlm_lockres_cache, res);
567 }
568
569 void dlm_lockres_put(struct dlm_lock_resource *res)
570 {
571         kref_put(&res->refs, dlm_lockres_release);
572 }
573
574 static void dlm_init_lockres(struct dlm_ctxt *dlm,
575                              struct dlm_lock_resource *res,
576                              const char *name, unsigned int namelen)
577 {
578         char *qname;
579
580         /* If we memset here, we lose our reference to the kmalloc'd
581          * res->lockname.name, so be sure to init every field
582          * correctly! */
583
584         qname = (char *) res->lockname.name;
585         memcpy(qname, name, namelen);
586
587         res->lockname.len = namelen;
588         res->lockname.hash = dlm_lockid_hash(name, namelen);
589
590         init_waitqueue_head(&res->wq);
591         spin_lock_init(&res->spinlock);
592         INIT_HLIST_NODE(&res->hash_node);
593         INIT_LIST_HEAD(&res->granted);
594         INIT_LIST_HEAD(&res->converting);
595         INIT_LIST_HEAD(&res->blocked);
596         INIT_LIST_HEAD(&res->dirty);
597         INIT_LIST_HEAD(&res->recovering);
598         INIT_LIST_HEAD(&res->purge);
599         INIT_LIST_HEAD(&res->tracking);
600         atomic_set(&res->asts_reserved, 0);
601         res->migration_pending = 0;
602         res->inflight_locks = 0;
603
604         /* put in dlm_lockres_release */
605         dlm_grab(dlm);
606         res->dlm = dlm;
607
608         kref_init(&res->refs);
609
610         /* just for consistency */
611         spin_lock(&res->spinlock);
612         dlm_set_lockres_owner(dlm, res, DLM_LOCK_RES_OWNER_UNKNOWN);
613         spin_unlock(&res->spinlock);
614
615         res->state = DLM_LOCK_RES_IN_PROGRESS;
616
617         res->last_used = 0;
618
619         spin_lock(&dlm->spinlock);
620         list_add_tail(&res->tracking, &dlm->tracking_list);
621         spin_unlock(&dlm->spinlock);
622
623         memset(res->lvb, 0, DLM_LVB_LEN);
624         memset(res->refmap, 0, sizeof(res->refmap));
625 }
626
627 struct dlm_lock_resource *dlm_new_lockres(struct dlm_ctxt *dlm,
628                                    const char *name,
629                                    unsigned int namelen)
630 {
631         struct dlm_lock_resource *res = NULL;
632
633         res = (struct dlm_lock_resource *)
634                                 kmem_cache_zalloc(dlm_lockres_cache, GFP_NOFS);
635         if (!res)
636                 goto error;
637
638         res->lockname.name = (char *)
639                                 kmem_cache_zalloc(dlm_lockname_cache, GFP_NOFS);
640         if (!res->lockname.name)
641                 goto error;
642
643         dlm_init_lockres(dlm, res, name, namelen);
644         return res;
645
646 error:
647         if (res && res->lockname.name)
648                 kmem_cache_free(dlm_lockname_cache, (void *)res->lockname.name);
649
650         if (res)
651                 kmem_cache_free(dlm_lockres_cache, res);
652         return NULL;
653 }
654
655 void __dlm_lockres_grab_inflight_ref(struct dlm_ctxt *dlm,
656                                    struct dlm_lock_resource *res,
657                                    int new_lockres,
658                                    const char *file,
659                                    int line)
660 {
661         if (!new_lockres)
662                 assert_spin_locked(&res->spinlock);
663
664         if (!test_bit(dlm->node_num, res->refmap)) {
665                 BUG_ON(res->inflight_locks != 0);
666                 dlm_lockres_set_refmap_bit(dlm->node_num, res);
667         }
668         res->inflight_locks++;
669         mlog(0, "%s:%.*s: inflight++: now %u\n",
670              dlm->name, res->lockname.len, res->lockname.name,
671              res->inflight_locks);
672 }
673
674 void __dlm_lockres_drop_inflight_ref(struct dlm_ctxt *dlm,
675                                    struct dlm_lock_resource *res,
676                                    const char *file,
677                                    int line)
678 {
679         assert_spin_locked(&res->spinlock);
680
681         BUG_ON(res->inflight_locks == 0);
682         res->inflight_locks--;
683         mlog(0, "%s:%.*s: inflight--: now %u\n",
684              dlm->name, res->lockname.len, res->lockname.name,
685              res->inflight_locks);
686         if (res->inflight_locks == 0)
687                 dlm_lockres_clear_refmap_bit(dlm->node_num, res);
688         wake_up(&res->wq);
689 }
690
691 /*
692  * lookup a lock resource by name.
693  * may already exist in the hashtable.
694  * lockid is null terminated
695  *
696  * if not, allocate enough for the lockres and for
697  * the temporary structure used in doing the mastering.
698  *
699  * also, do a lookup in the dlm->master_list to see
700  * if another node has begun mastering the same lock.
701  * if so, there should be a block entry in there
702  * for this name, and we should *not* attempt to master
703  * the lock here.   need to wait around for that node
704  * to assert_master (or die).
705  *
706  */
707 struct dlm_lock_resource * dlm_get_lock_resource(struct dlm_ctxt *dlm,
708                                           const char *lockid,
709                                           int namelen,
710                                           int flags)
711 {
712         struct dlm_lock_resource *tmpres=NULL, *res=NULL;
713         struct dlm_master_list_entry *mle = NULL;
714         struct dlm_master_list_entry *alloc_mle = NULL;
715         int blocked = 0;
716         int ret, nodenum;
717         struct dlm_node_iter iter;
718         unsigned int hash;
719         int tries = 0;
720         int bit, wait_on_recovery = 0;
721         int drop_inflight_if_nonlocal = 0;
722
723         BUG_ON(!lockid);
724
725         hash = dlm_lockid_hash(lockid, namelen);
726
727         mlog(0, "get lockres %s (len %d)\n", lockid, namelen);
728
729 lookup:
730         spin_lock(&dlm->spinlock);
731         tmpres = __dlm_lookup_lockres_full(dlm, lockid, namelen, hash);
732         if (tmpres) {
733                 int dropping_ref = 0;
734
735                 spin_lock(&tmpres->spinlock);
736                 if (tmpres->owner == dlm->node_num) {
737                         BUG_ON(tmpres->state & DLM_LOCK_RES_DROPPING_REF);
738                         dlm_lockres_grab_inflight_ref(dlm, tmpres);
739                 } else if (tmpres->state & DLM_LOCK_RES_DROPPING_REF)
740                         dropping_ref = 1;
741                 spin_unlock(&tmpres->spinlock);
742                 spin_unlock(&dlm->spinlock);
743
744                 /* wait until done messaging the master, drop our ref to allow
745                  * the lockres to be purged, start over. */
746                 if (dropping_ref) {
747                         spin_lock(&tmpres->spinlock);
748                         __dlm_wait_on_lockres_flags(tmpres, DLM_LOCK_RES_DROPPING_REF);
749                         spin_unlock(&tmpres->spinlock);
750                         dlm_lockres_put(tmpres);
751                         tmpres = NULL;
752                         goto lookup;
753                 }
754
755                 mlog(0, "found in hash!\n");
756                 if (res)
757                         dlm_lockres_put(res);
758                 res = tmpres;
759                 goto leave;
760         }
761
762         if (!res) {
763                 spin_unlock(&dlm->spinlock);
764                 mlog(0, "allocating a new resource\n");
765                 /* nothing found and we need to allocate one. */
766                 alloc_mle = (struct dlm_master_list_entry *)
767                         kmem_cache_alloc(dlm_mle_cache, GFP_NOFS);
768                 if (!alloc_mle)
769                         goto leave;
770                 res = dlm_new_lockres(dlm, lockid, namelen);
771                 if (!res)
772                         goto leave;
773                 goto lookup;
774         }
775
776         mlog(0, "no lockres found, allocated our own: %p\n", res);
777
778         if (flags & LKM_LOCAL) {
779                 /* caller knows it's safe to assume it's not mastered elsewhere
780                  * DONE!  return right away */
781                 spin_lock(&res->spinlock);
782                 dlm_change_lockres_owner(dlm, res, dlm->node_num);
783                 __dlm_insert_lockres(dlm, res);
784                 dlm_lockres_grab_inflight_ref(dlm, res);
785                 spin_unlock(&res->spinlock);
786                 spin_unlock(&dlm->spinlock);
787                 /* lockres still marked IN_PROGRESS */
788                 goto wake_waiters;
789         }
790
791         /* check master list to see if another node has started mastering it */
792         spin_lock(&dlm->master_lock);
793
794         /* if we found a block, wait for lock to be mastered by another node */
795         blocked = dlm_find_mle(dlm, &mle, (char *)lockid, namelen);
796         if (blocked) {
797                 int mig;
798                 if (mle->type == DLM_MLE_MASTER) {
799                         mlog(ML_ERROR, "master entry for nonexistent lock!\n");
800                         BUG();
801                 }
802                 mig = (mle->type == DLM_MLE_MIGRATION);
803                 /* if there is a migration in progress, let the migration
804                  * finish before continuing.  we can wait for the absence
805                  * of the MIGRATION mle: either the migrate finished or
806                  * one of the nodes died and the mle was cleaned up.
807                  * if there is a BLOCK here, but it already has a master
808                  * set, we are too late.  the master does not have a ref
809                  * for us in the refmap.  detach the mle and drop it.
810                  * either way, go back to the top and start over. */
811                 if (mig || mle->master != O2NM_MAX_NODES) {
812                         BUG_ON(mig && mle->master == dlm->node_num);
813                         /* we arrived too late.  the master does not
814                          * have a ref for us. retry. */
815                         mlog(0, "%s:%.*s: late on %s\n",
816                              dlm->name, namelen, lockid,
817                              mig ?  "MIGRATION" : "BLOCK");
818                         spin_unlock(&dlm->master_lock);
819                         spin_unlock(&dlm->spinlock);
820
821                         /* master is known, detach */
822                         if (!mig)
823                                 dlm_mle_detach_hb_events(dlm, mle);
824                         dlm_put_mle(mle);
825                         mle = NULL;
826                         /* this is lame, but we cant wait on either
827                          * the mle or lockres waitqueue here */
828                         if (mig)
829                                 msleep(100);
830                         goto lookup;
831                 }
832         } else {
833                 /* go ahead and try to master lock on this node */
834                 mle = alloc_mle;
835                 /* make sure this does not get freed below */
836                 alloc_mle = NULL;
837                 dlm_init_mle(mle, DLM_MLE_MASTER, dlm, res, NULL, 0);
838                 set_bit(dlm->node_num, mle->maybe_map);
839                 list_add(&mle->list, &dlm->master_list);
840
841                 /* still holding the dlm spinlock, check the recovery map
842                  * to see if there are any nodes that still need to be 
843                  * considered.  these will not appear in the mle nodemap
844                  * but they might own this lockres.  wait on them. */
845                 bit = find_next_bit(dlm->recovery_map, O2NM_MAX_NODES, 0);
846                 if (bit < O2NM_MAX_NODES) {
847                         mlog(ML_NOTICE, "%s:%.*s: at least one node (%d) to "
848                              "recover before lock mastery can begin\n",
849                              dlm->name, namelen, (char *)lockid, bit);
850                         wait_on_recovery = 1;
851                 }
852         }
853
854         /* at this point there is either a DLM_MLE_BLOCK or a
855          * DLM_MLE_MASTER on the master list, so it's safe to add the
856          * lockres to the hashtable.  anyone who finds the lock will
857          * still have to wait on the IN_PROGRESS. */
858
859         /* finally add the lockres to its hash bucket */
860         __dlm_insert_lockres(dlm, res);
861         /* since this lockres is new it doesnt not require the spinlock */
862         dlm_lockres_grab_inflight_ref_new(dlm, res);
863
864         /* if this node does not become the master make sure to drop
865          * this inflight reference below */
866         drop_inflight_if_nonlocal = 1;
867
868         /* get an extra ref on the mle in case this is a BLOCK
869          * if so, the creator of the BLOCK may try to put the last
870          * ref at this time in the assert master handler, so we
871          * need an extra one to keep from a bad ptr deref. */
872         dlm_get_mle_inuse(mle);
873         spin_unlock(&dlm->master_lock);
874         spin_unlock(&dlm->spinlock);
875
876 redo_request:
877         while (wait_on_recovery) {
878                 /* any cluster changes that occurred after dropping the
879                  * dlm spinlock would be detectable be a change on the mle,
880                  * so we only need to clear out the recovery map once. */
881                 if (dlm_is_recovery_lock(lockid, namelen)) {
882                         mlog(ML_NOTICE, "%s: recovery map is not empty, but "
883                              "must master $RECOVERY lock now\n", dlm->name);
884                         if (!dlm_pre_master_reco_lockres(dlm, res))
885                                 wait_on_recovery = 0;
886                         else {
887                                 mlog(0, "%s: waiting 500ms for heartbeat state "
888                                     "change\n", dlm->name);
889                                 msleep(500);
890                         }
891                         continue;
892                 } 
893
894                 dlm_kick_recovery_thread(dlm);
895                 msleep(1000);
896                 dlm_wait_for_recovery(dlm);
897
898                 spin_lock(&dlm->spinlock);
899                 bit = find_next_bit(dlm->recovery_map, O2NM_MAX_NODES, 0);
900                 if (bit < O2NM_MAX_NODES) {
901                         mlog(ML_NOTICE, "%s:%.*s: at least one node (%d) to "
902                              "recover before lock mastery can begin\n",
903                              dlm->name, namelen, (char *)lockid, bit);
904                         wait_on_recovery = 1;
905                 } else
906                         wait_on_recovery = 0;
907                 spin_unlock(&dlm->spinlock);
908
909                 if (wait_on_recovery)
910                         dlm_wait_for_node_recovery(dlm, bit, 10000);
911         }
912
913         /* must wait for lock to be mastered elsewhere */
914         if (blocked)
915                 goto wait;
916
917         ret = -EINVAL;
918         dlm_node_iter_init(mle->vote_map, &iter);
919         while ((nodenum = dlm_node_iter_next(&iter)) >= 0) {
920                 ret = dlm_do_master_request(res, mle, nodenum);
921                 if (ret < 0)
922                         mlog_errno(ret);
923                 if (mle->master != O2NM_MAX_NODES) {
924                         /* found a master ! */
925                         if (mle->master <= nodenum)
926                                 break;
927                         /* if our master request has not reached the master
928                          * yet, keep going until it does.  this is how the
929                          * master will know that asserts are needed back to
930                          * the lower nodes. */
931                         mlog(0, "%s:%.*s: requests only up to %u but master "
932                              "is %u, keep going\n", dlm->name, namelen,
933                              lockid, nodenum, mle->master);
934                 }
935         }
936
937 wait:
938         /* keep going until the response map includes all nodes */
939         ret = dlm_wait_for_lock_mastery(dlm, res, mle, &blocked);
940         if (ret < 0) {
941                 wait_on_recovery = 1;
942                 mlog(0, "%s:%.*s: node map changed, redo the "
943                      "master request now, blocked=%d\n",
944                      dlm->name, res->lockname.len,
945                      res->lockname.name, blocked);
946                 if (++tries > 20) {
947                         mlog(ML_ERROR, "%s:%.*s: spinning on "
948                              "dlm_wait_for_lock_mastery, blocked=%d\n", 
949                              dlm->name, res->lockname.len, 
950                              res->lockname.name, blocked);
951                         dlm_print_one_lock_resource(res);
952                         dlm_print_one_mle(mle);
953                         tries = 0;
954                 }
955                 goto redo_request;
956         }
957
958         mlog(0, "lockres mastered by %u\n", res->owner);
959         /* make sure we never continue without this */
960         BUG_ON(res->owner == O2NM_MAX_NODES);
961
962         /* master is known, detach if not already detached */
963         dlm_mle_detach_hb_events(dlm, mle);
964         dlm_put_mle(mle);
965         /* put the extra ref */
966         dlm_put_mle_inuse(mle);
967
968 wake_waiters:
969         spin_lock(&res->spinlock);
970         if (res->owner != dlm->node_num && drop_inflight_if_nonlocal)
971                 dlm_lockres_drop_inflight_ref(dlm, res);
972         res->state &= ~DLM_LOCK_RES_IN_PROGRESS;
973         spin_unlock(&res->spinlock);
974         wake_up(&res->wq);
975
976 leave:
977         /* need to free the unused mle */
978         if (alloc_mle)
979                 kmem_cache_free(dlm_mle_cache, alloc_mle);
980
981         return res;
982 }
983
984
985 #define DLM_MASTERY_TIMEOUT_MS   5000
986
987 static int dlm_wait_for_lock_mastery(struct dlm_ctxt *dlm,
988                                      struct dlm_lock_resource *res,
989                                      struct dlm_master_list_entry *mle,
990                                      int *blocked)
991 {
992         u8 m;
993         int ret, bit;
994         int map_changed, voting_done;
995         int assert, sleep;
996
997 recheck:
998         ret = 0;
999         assert = 0;
1000
1001         /* check if another node has already become the owner */
1002         spin_lock(&res->spinlock);
1003         if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN) {
1004                 mlog(0, "%s:%.*s: owner is suddenly %u\n", dlm->name,
1005                      res->lockname.len, res->lockname.name, res->owner);
1006                 spin_unlock(&res->spinlock);
1007                 /* this will cause the master to re-assert across
1008                  * the whole cluster, freeing up mles */
1009                 if (res->owner != dlm->node_num) {
1010                         ret = dlm_do_master_request(res, mle, res->owner);
1011                         if (ret < 0) {
1012                                 /* give recovery a chance to run */
1013                                 mlog(ML_ERROR, "link to %u went down?: %d\n", res->owner, ret);
1014                                 msleep(500);
1015                                 goto recheck;
1016                         }
1017                 }
1018                 ret = 0;
1019                 goto leave;
1020         }
1021         spin_unlock(&res->spinlock);
1022
1023         spin_lock(&mle->spinlock);
1024         m = mle->master;
1025         map_changed = (memcmp(mle->vote_map, mle->node_map,
1026                               sizeof(mle->vote_map)) != 0);
1027         voting_done = (memcmp(mle->vote_map, mle->response_map,
1028                              sizeof(mle->vote_map)) == 0);
1029
1030         /* restart if we hit any errors */
1031         if (map_changed) {
1032                 int b;
1033                 mlog(0, "%s: %.*s: node map changed, restarting\n",
1034                      dlm->name, res->lockname.len, res->lockname.name);
1035                 ret = dlm_restart_lock_mastery(dlm, res, mle, *blocked);
1036                 b = (mle->type == DLM_MLE_BLOCK);
1037                 if ((*blocked && !b) || (!*blocked && b)) {
1038                         mlog(0, "%s:%.*s: status change: old=%d new=%d\n", 
1039                              dlm->name, res->lockname.len, res->lockname.name,
1040                              *blocked, b);
1041                         *blocked = b;
1042                 }
1043                 spin_unlock(&mle->spinlock);
1044                 if (ret < 0) {
1045                         mlog_errno(ret);
1046                         goto leave;
1047                 }
1048                 mlog(0, "%s:%.*s: restart lock mastery succeeded, "
1049                      "rechecking now\n", dlm->name, res->lockname.len,
1050                      res->lockname.name);
1051                 goto recheck;
1052         } else {
1053                 if (!voting_done) {
1054                         mlog(0, "map not changed and voting not done "
1055                              "for %s:%.*s\n", dlm->name, res->lockname.len,
1056                              res->lockname.name);
1057                 }
1058         }
1059
1060         if (m != O2NM_MAX_NODES) {
1061                 /* another node has done an assert!
1062                  * all done! */
1063                 sleep = 0;
1064         } else {
1065                 sleep = 1;
1066                 /* have all nodes responded? */
1067                 if (voting_done && !*blocked) {
1068                         bit = find_next_bit(mle->maybe_map, O2NM_MAX_NODES, 0);
1069                         if (dlm->node_num <= bit) {
1070                                 /* my node number is lowest.
1071                                  * now tell other nodes that I am
1072                                  * mastering this. */
1073                                 mle->master = dlm->node_num;
1074                                 /* ref was grabbed in get_lock_resource
1075                                  * will be dropped in dlmlock_master */
1076                                 assert = 1;
1077                                 sleep = 0;
1078                         }
1079                         /* if voting is done, but we have not received
1080                          * an assert master yet, we must sleep */
1081                 }
1082         }
1083
1084         spin_unlock(&mle->spinlock);
1085
1086         /* sleep if we haven't finished voting yet */
1087         if (sleep) {
1088                 unsigned long timeo = msecs_to_jiffies(DLM_MASTERY_TIMEOUT_MS);
1089
1090                 /*
1091                 if (atomic_read(&mle->mle_refs.refcount) < 2)
1092                         mlog(ML_ERROR, "mle (%p) refs=%d, name=%.*s\n", mle,
1093                         atomic_read(&mle->mle_refs.refcount),
1094                         res->lockname.len, res->lockname.name);
1095                 */
1096                 atomic_set(&mle->woken, 0);
1097                 (void)wait_event_timeout(mle->wq,
1098                                          (atomic_read(&mle->woken) == 1),
1099                                          timeo);
1100                 if (res->owner == O2NM_MAX_NODES) {
1101                         mlog(0, "%s:%.*s: waiting again\n", dlm->name,
1102                              res->lockname.len, res->lockname.name);
1103                         goto recheck;
1104                 }
1105                 mlog(0, "done waiting, master is %u\n", res->owner);
1106                 ret = 0;
1107                 goto leave;
1108         }
1109
1110         ret = 0;   /* done */
1111         if (assert) {
1112                 m = dlm->node_num;
1113                 mlog(0, "about to master %.*s here, this=%u\n",
1114                      res->lockname.len, res->lockname.name, m);
1115                 ret = dlm_do_assert_master(dlm, res, mle->vote_map, 0);
1116                 if (ret) {
1117                         /* This is a failure in the network path,
1118                          * not in the response to the assert_master
1119                          * (any nonzero response is a BUG on this node).
1120                          * Most likely a socket just got disconnected
1121                          * due to node death. */
1122                         mlog_errno(ret);
1123                 }
1124                 /* no longer need to restart lock mastery.
1125                  * all living nodes have been contacted. */
1126                 ret = 0;
1127         }
1128
1129         /* set the lockres owner */
1130         spin_lock(&res->spinlock);
1131         /* mastery reference obtained either during
1132          * assert_master_handler or in get_lock_resource */
1133         dlm_change_lockres_owner(dlm, res, m);
1134         spin_unlock(&res->spinlock);
1135
1136 leave:
1137         return ret;
1138 }
1139
1140 struct dlm_bitmap_diff_iter
1141 {
1142         int curnode;
1143         unsigned long *orig_bm;
1144         unsigned long *cur_bm;
1145         unsigned long diff_bm[BITS_TO_LONGS(O2NM_MAX_NODES)];
1146 };
1147
1148 enum dlm_node_state_change
1149 {
1150         NODE_DOWN = -1,
1151         NODE_NO_CHANGE = 0,
1152         NODE_UP
1153 };
1154
1155 static void dlm_bitmap_diff_iter_init(struct dlm_bitmap_diff_iter *iter,
1156                                       unsigned long *orig_bm,
1157                                       unsigned long *cur_bm)
1158 {
1159         unsigned long p1, p2;
1160         int i;
1161
1162         iter->curnode = -1;
1163         iter->orig_bm = orig_bm;
1164         iter->cur_bm = cur_bm;
1165
1166         for (i = 0; i < BITS_TO_LONGS(O2NM_MAX_NODES); i++) {
1167                 p1 = *(iter->orig_bm + i);
1168                 p2 = *(iter->cur_bm + i);
1169                 iter->diff_bm[i] = (p1 & ~p2) | (p2 & ~p1);
1170         }
1171 }
1172
1173 static int dlm_bitmap_diff_iter_next(struct dlm_bitmap_diff_iter *iter,
1174                                      enum dlm_node_state_change *state)
1175 {
1176         int bit;
1177
1178         if (iter->curnode >= O2NM_MAX_NODES)
1179                 return -ENOENT;
1180
1181         bit = find_next_bit(iter->diff_bm, O2NM_MAX_NODES,
1182                             iter->curnode+1);
1183         if (bit >= O2NM_MAX_NODES) {
1184                 iter->curnode = O2NM_MAX_NODES;
1185                 return -ENOENT;
1186         }
1187
1188         /* if it was there in the original then this node died */
1189         if (test_bit(bit, iter->orig_bm))
1190                 *state = NODE_DOWN;
1191         else
1192                 *state = NODE_UP;
1193
1194         iter->curnode = bit;
1195         return bit;
1196 }
1197
1198
1199 static int dlm_restart_lock_mastery(struct dlm_ctxt *dlm,
1200                                     struct dlm_lock_resource *res,
1201                                     struct dlm_master_list_entry *mle,
1202                                     int blocked)
1203 {
1204         struct dlm_bitmap_diff_iter bdi;
1205         enum dlm_node_state_change sc;
1206         int node;
1207         int ret = 0;
1208
1209         mlog(0, "something happened such that the "
1210              "master process may need to be restarted!\n");
1211
1212         assert_spin_locked(&mle->spinlock);
1213
1214         dlm_bitmap_diff_iter_init(&bdi, mle->vote_map, mle->node_map);
1215         node = dlm_bitmap_diff_iter_next(&bdi, &sc);
1216         while (node >= 0) {
1217                 if (sc == NODE_UP) {
1218                         /* a node came up.  clear any old vote from
1219                          * the response map and set it in the vote map
1220                          * then restart the mastery. */
1221                         mlog(ML_NOTICE, "node %d up while restarting\n", node);
1222
1223                         /* redo the master request, but only for the new node */
1224                         mlog(0, "sending request to new node\n");
1225                         clear_bit(node, mle->response_map);
1226                         set_bit(node, mle->vote_map);
1227                 } else {
1228                         mlog(ML_ERROR, "node down! %d\n", node);
1229                         if (blocked) {
1230                                 int lowest = find_next_bit(mle->maybe_map,
1231                                                        O2NM_MAX_NODES, 0);
1232
1233                                 /* act like it was never there */
1234                                 clear_bit(node, mle->maybe_map);
1235
1236                                 if (node == lowest) {
1237                                         mlog(0, "expected master %u died"
1238                                             " while this node was blocked "
1239                                             "waiting on it!\n", node);
1240                                         lowest = find_next_bit(mle->maybe_map,
1241                                                         O2NM_MAX_NODES,
1242                                                         lowest+1);
1243                                         if (lowest < O2NM_MAX_NODES) {
1244                                                 mlog(0, "%s:%.*s:still "
1245                                                      "blocked. waiting on %u "
1246                                                      "now\n", dlm->name,
1247                                                      res->lockname.len,
1248                                                      res->lockname.name,
1249                                                      lowest);
1250                                         } else {
1251                                                 /* mle is an MLE_BLOCK, but
1252                                                  * there is now nothing left to
1253                                                  * block on.  we need to return
1254                                                  * all the way back out and try
1255                                                  * again with an MLE_MASTER.
1256                                                  * dlm_do_local_recovery_cleanup
1257                                                  * has already run, so the mle
1258                                                  * refcount is ok */
1259                                                 mlog(0, "%s:%.*s: no "
1260                                                      "longer blocking. try to "
1261                                                      "master this here\n",
1262                                                      dlm->name,
1263                                                      res->lockname.len,
1264                                                      res->lockname.name);
1265                                                 mle->type = DLM_MLE_MASTER;
1266                                                 mle->u.res = res;
1267                                         }
1268                                 }
1269                         }
1270
1271                         /* now blank out everything, as if we had never
1272                          * contacted anyone */
1273                         memset(mle->maybe_map, 0, sizeof(mle->maybe_map));
1274                         memset(mle->response_map, 0, sizeof(mle->response_map));
1275                         /* reset the vote_map to the current node_map */
1276                         memcpy(mle->vote_map, mle->node_map,
1277                                sizeof(mle->node_map));
1278                         /* put myself into the maybe map */
1279                         if (mle->type != DLM_MLE_BLOCK)
1280                                 set_bit(dlm->node_num, mle->maybe_map);
1281                 }
1282                 ret = -EAGAIN;
1283                 node = dlm_bitmap_diff_iter_next(&bdi, &sc);
1284         }
1285         return ret;
1286 }
1287
1288
1289 /*
1290  * DLM_MASTER_REQUEST_MSG
1291  *
1292  * returns: 0 on success,
1293  *          -errno on a network error
1294  *
1295  * on error, the caller should assume the target node is "dead"
1296  *
1297  */
1298
1299 static int dlm_do_master_request(struct dlm_lock_resource *res,
1300                                  struct dlm_master_list_entry *mle, int to)
1301 {
1302         struct dlm_ctxt *dlm = mle->dlm;
1303         struct dlm_master_request request;
1304         int ret, response=0, resend;
1305
1306         memset(&request, 0, sizeof(request));
1307         request.node_idx = dlm->node_num;
1308
1309         BUG_ON(mle->type == DLM_MLE_MIGRATION);
1310
1311         if (mle->type != DLM_MLE_MASTER) {
1312                 request.namelen = mle->u.name.len;
1313                 memcpy(request.name, mle->u.name.name, request.namelen);
1314         } else {
1315                 request.namelen = mle->u.res->lockname.len;
1316                 memcpy(request.name, mle->u.res->lockname.name,
1317                         request.namelen);
1318         }
1319
1320 again:
1321         ret = o2net_send_message(DLM_MASTER_REQUEST_MSG, dlm->key, &request,
1322                                  sizeof(request), to, &response);
1323         if (ret < 0)  {
1324                 if (ret == -ESRCH) {
1325                         /* should never happen */
1326                         mlog(ML_ERROR, "TCP stack not ready!\n");
1327                         BUG();
1328                 } else if (ret == -EINVAL) {
1329                         mlog(ML_ERROR, "bad args passed to o2net!\n");
1330                         BUG();
1331                 } else if (ret == -ENOMEM) {
1332                         mlog(ML_ERROR, "out of memory while trying to send "
1333                              "network message!  retrying\n");
1334                         /* this is totally crude */
1335                         msleep(50);
1336                         goto again;
1337                 } else if (!dlm_is_host_down(ret)) {
1338                         /* not a network error. bad. */
1339                         mlog_errno(ret);
1340                         mlog(ML_ERROR, "unhandled error!");
1341                         BUG();
1342                 }
1343                 /* all other errors should be network errors,
1344                  * and likely indicate node death */
1345                 mlog(ML_ERROR, "link to %d went down!\n", to);
1346                 goto out;
1347         }
1348
1349         ret = 0;
1350         resend = 0;
1351         spin_lock(&mle->spinlock);
1352         switch (response) {
1353                 case DLM_MASTER_RESP_YES:
1354                         set_bit(to, mle->response_map);
1355                         mlog(0, "node %u is the master, response=YES\n", to);
1356                         mlog(0, "%s:%.*s: master node %u now knows I have a "
1357                              "reference\n", dlm->name, res->lockname.len,
1358                              res->lockname.name, to);
1359                         mle->master = to;
1360                         break;
1361                 case DLM_MASTER_RESP_NO:
1362                         mlog(0, "node %u not master, response=NO\n", to);
1363                         set_bit(to, mle->response_map);
1364                         break;
1365                 case DLM_MASTER_RESP_MAYBE:
1366                         mlog(0, "node %u not master, response=MAYBE\n", to);
1367                         set_bit(to, mle->response_map);
1368                         set_bit(to, mle->maybe_map);
1369                         break;
1370                 case DLM_MASTER_RESP_ERROR:
1371                         mlog(0, "node %u hit an error, resending\n", to);
1372                         resend = 1;
1373                         response = 0;
1374                         break;
1375                 default:
1376                         mlog(ML_ERROR, "bad response! %u\n", response);
1377                         BUG();
1378         }
1379         spin_unlock(&mle->spinlock);
1380         if (resend) {
1381                 /* this is also totally crude */
1382                 msleep(50);
1383                 goto again;
1384         }
1385
1386 out:
1387         return ret;
1388 }
1389
1390 /*
1391  * locks that can be taken here:
1392  * dlm->spinlock
1393  * res->spinlock
1394  * mle->spinlock
1395  * dlm->master_list
1396  *
1397  * if possible, TRIM THIS DOWN!!!
1398  */
1399 int dlm_master_request_handler(struct o2net_msg *msg, u32 len, void *data,
1400                                void **ret_data)
1401 {
1402         u8 response = DLM_MASTER_RESP_MAYBE;
1403         struct dlm_ctxt *dlm = data;
1404         struct dlm_lock_resource *res = NULL;
1405         struct dlm_master_request *request = (struct dlm_master_request *) msg->buf;
1406         struct dlm_master_list_entry *mle = NULL, *tmpmle = NULL;
1407         char *name;
1408         unsigned int namelen, hash;
1409         int found, ret;
1410         int set_maybe;
1411         int dispatch_assert = 0;
1412
1413         if (!dlm_grab(dlm))
1414                 return DLM_MASTER_RESP_NO;
1415
1416         if (!dlm_domain_fully_joined(dlm)) {
1417                 response = DLM_MASTER_RESP_NO;
1418                 goto send_response;
1419         }
1420
1421         name = request->name;
1422         namelen = request->namelen;
1423         hash = dlm_lockid_hash(name, namelen);
1424
1425         if (namelen > DLM_LOCKID_NAME_MAX) {
1426                 response = DLM_IVBUFLEN;
1427                 goto send_response;
1428         }
1429
1430 way_up_top:
1431         spin_lock(&dlm->spinlock);
1432         res = __dlm_lookup_lockres(dlm, name, namelen, hash);
1433         if (res) {
1434                 spin_unlock(&dlm->spinlock);
1435
1436                 /* take care of the easy cases up front */
1437                 spin_lock(&res->spinlock);
1438                 if (res->state & (DLM_LOCK_RES_RECOVERING|
1439                                   DLM_LOCK_RES_MIGRATING)) {
1440                         spin_unlock(&res->spinlock);
1441                         mlog(0, "returning DLM_MASTER_RESP_ERROR since res is "
1442                              "being recovered/migrated\n");
1443                         response = DLM_MASTER_RESP_ERROR;
1444                         if (mle)
1445                                 kmem_cache_free(dlm_mle_cache, mle);
1446                         goto send_response;
1447                 }
1448
1449                 if (res->owner == dlm->node_num) {
1450                         mlog(0, "%s:%.*s: setting bit %u in refmap\n",
1451                              dlm->name, namelen, name, request->node_idx);
1452                         dlm_lockres_set_refmap_bit(request->node_idx, res);
1453                         spin_unlock(&res->spinlock);
1454                         response = DLM_MASTER_RESP_YES;
1455                         if (mle)
1456                                 kmem_cache_free(dlm_mle_cache, mle);
1457
1458                         /* this node is the owner.
1459                          * there is some extra work that needs to
1460                          * happen now.  the requesting node has
1461                          * caused all nodes up to this one to
1462                          * create mles.  this node now needs to
1463                          * go back and clean those up. */
1464                         dispatch_assert = 1;
1465                         goto send_response;
1466                 } else if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN) {
1467                         spin_unlock(&res->spinlock);
1468                         // mlog(0, "node %u is the master\n", res->owner);
1469                         response = DLM_MASTER_RESP_NO;
1470                         if (mle)
1471                                 kmem_cache_free(dlm_mle_cache, mle);
1472                         goto send_response;
1473                 }
1474
1475                 /* ok, there is no owner.  either this node is
1476                  * being blocked, or it is actively trying to
1477                  * master this lock. */
1478                 if (!(res->state & DLM_LOCK_RES_IN_PROGRESS)) {
1479                         mlog(ML_ERROR, "lock with no owner should be "
1480                              "in-progress!\n");
1481                         BUG();
1482                 }
1483
1484                 // mlog(0, "lockres is in progress...\n");
1485                 spin_lock(&dlm->master_lock);
1486                 found = dlm_find_mle(dlm, &tmpmle, name, namelen);
1487                 if (!found) {
1488                         mlog(ML_ERROR, "no mle found for this lock!\n");
1489                         BUG();
1490                 }
1491                 set_maybe = 1;
1492                 spin_lock(&tmpmle->spinlock);
1493                 if (tmpmle->type == DLM_MLE_BLOCK) {
1494                         // mlog(0, "this node is waiting for "
1495                         // "lockres to be mastered\n");
1496                         response = DLM_MASTER_RESP_NO;
1497                 } else if (tmpmle->type == DLM_MLE_MIGRATION) {
1498                         mlog(0, "node %u is master, but trying to migrate to "
1499                              "node %u.\n", tmpmle->master, tmpmle->new_master);
1500                         if (tmpmle->master == dlm->node_num) {
1501                                 mlog(ML_ERROR, "no owner on lockres, but this "
1502                                      "node is trying to migrate it to %u?!\n",
1503                                      tmpmle->new_master);
1504                                 BUG();
1505                         } else {
1506                                 /* the real master can respond on its own */
1507                                 response = DLM_MASTER_RESP_NO;
1508                         }
1509                 } else if (tmpmle->master != DLM_LOCK_RES_OWNER_UNKNOWN) {
1510                         set_maybe = 0;
1511                         if (tmpmle->master == dlm->node_num) {
1512                                 response = DLM_MASTER_RESP_YES;
1513                                 /* this node will be the owner.
1514                                  * go back and clean the mles on any
1515                                  * other nodes */
1516                                 dispatch_assert = 1;
1517                                 dlm_lockres_set_refmap_bit(request->node_idx, res);
1518                                 mlog(0, "%s:%.*s: setting bit %u in refmap\n",
1519                                      dlm->name, namelen, name,
1520                                      request->node_idx);
1521                         } else
1522                                 response = DLM_MASTER_RESP_NO;
1523                 } else {
1524                         // mlog(0, "this node is attempting to "
1525                         // "master lockres\n");
1526                         response = DLM_MASTER_RESP_MAYBE;
1527                 }
1528                 if (set_maybe)
1529                         set_bit(request->node_idx, tmpmle->maybe_map);
1530                 spin_unlock(&tmpmle->spinlock);
1531
1532                 spin_unlock(&dlm->master_lock);
1533                 spin_unlock(&res->spinlock);
1534
1535                 /* keep the mle attached to heartbeat events */
1536                 dlm_put_mle(tmpmle);
1537                 if (mle)
1538                         kmem_cache_free(dlm_mle_cache, mle);
1539                 goto send_response;
1540         }
1541
1542         /*
1543          * lockres doesn't exist on this node
1544          * if there is an MLE_BLOCK, return NO
1545          * if there is an MLE_MASTER, return MAYBE
1546          * otherwise, add an MLE_BLOCK, return NO
1547          */
1548         spin_lock(&dlm->master_lock);
1549         found = dlm_find_mle(dlm, &tmpmle, name, namelen);
1550         if (!found) {
1551                 /* this lockid has never been seen on this node yet */
1552                 // mlog(0, "no mle found\n");
1553                 if (!mle) {
1554                         spin_unlock(&dlm->master_lock);
1555                         spin_unlock(&dlm->spinlock);
1556
1557                         mle = (struct dlm_master_list_entry *)
1558                                 kmem_cache_alloc(dlm_mle_cache, GFP_NOFS);
1559                         if (!mle) {
1560                                 response = DLM_MASTER_RESP_ERROR;
1561                                 mlog_errno(-ENOMEM);
1562                                 goto send_response;
1563                         }
1564                         goto way_up_top;
1565                 }
1566
1567                 // mlog(0, "this is second time thru, already allocated, "
1568                 // "add the block.\n");
1569                 dlm_init_mle(mle, DLM_MLE_BLOCK, dlm, NULL, name, namelen);
1570                 set_bit(request->node_idx, mle->maybe_map);
1571                 list_add(&mle->list, &dlm->master_list);
1572                 response = DLM_MASTER_RESP_NO;
1573         } else {
1574                 // mlog(0, "mle was found\n");
1575                 set_maybe = 1;
1576                 spin_lock(&tmpmle->spinlock);
1577                 if (tmpmle->master == dlm->node_num) {
1578                         mlog(ML_ERROR, "no lockres, but an mle with this node as master!\n");
1579                         BUG();
1580                 }
1581                 if (tmpmle->type == DLM_MLE_BLOCK)
1582                         response = DLM_MASTER_RESP_NO;
1583                 else if (tmpmle->type == DLM_MLE_MIGRATION) {
1584                         mlog(0, "migration mle was found (%u->%u)\n",
1585                              tmpmle->master, tmpmle->new_master);
1586                         /* real master can respond on its own */
1587                         response = DLM_MASTER_RESP_NO;
1588                 } else
1589                         response = DLM_MASTER_RESP_MAYBE;
1590                 if (set_maybe)
1591                         set_bit(request->node_idx, tmpmle->maybe_map);
1592                 spin_unlock(&tmpmle->spinlock);
1593         }
1594         spin_unlock(&dlm->master_lock);
1595         spin_unlock(&dlm->spinlock);
1596
1597         if (found) {
1598                 /* keep the mle attached to heartbeat events */
1599                 dlm_put_mle(tmpmle);
1600         }
1601 send_response:
1602         /*
1603          * __dlm_lookup_lockres() grabbed a reference to this lockres.
1604          * The reference is released by dlm_assert_master_worker() under
1605          * the call to dlm_dispatch_assert_master().  If
1606          * dlm_assert_master_worker() isn't called, we drop it here.
1607          */
1608         if (dispatch_assert) {
1609                 if (response != DLM_MASTER_RESP_YES)
1610                         mlog(ML_ERROR, "invalid response %d\n", response);
1611                 if (!res) {
1612                         mlog(ML_ERROR, "bad lockres while trying to assert!\n");
1613                         BUG();
1614                 }
1615                 mlog(0, "%u is the owner of %.*s, cleaning everyone else\n",
1616                              dlm->node_num, res->lockname.len, res->lockname.name);
1617                 ret = dlm_dispatch_assert_master(dlm, res, 0, request->node_idx, 
1618                                                  DLM_ASSERT_MASTER_MLE_CLEANUP);
1619                 if (ret < 0) {
1620                         mlog(ML_ERROR, "failed to dispatch assert master work\n");
1621                         response = DLM_MASTER_RESP_ERROR;
1622                         dlm_lockres_put(res);
1623                 }
1624         } else {
1625                 if (res)
1626                         dlm_lockres_put(res);
1627         }
1628
1629         dlm_put(dlm);
1630         return response;
1631 }
1632
1633 /*
1634  * DLM_ASSERT_MASTER_MSG
1635  */
1636
1637
1638 /*
1639  * NOTE: this can be used for debugging
1640  * can periodically run all locks owned by this node
1641  * and re-assert across the cluster...
1642  */
1643 static int dlm_do_assert_master(struct dlm_ctxt *dlm,
1644                                 struct dlm_lock_resource *res,
1645                                 void *nodemap, u32 flags)
1646 {
1647         struct dlm_assert_master assert;
1648         int to, tmpret;
1649         struct dlm_node_iter iter;
1650         int ret = 0;
1651         int reassert;
1652         const char *lockname = res->lockname.name;
1653         unsigned int namelen = res->lockname.len;
1654
1655         BUG_ON(namelen > O2NM_MAX_NAME_LEN);
1656
1657         spin_lock(&res->spinlock);
1658         res->state |= DLM_LOCK_RES_SETREF_INPROG;
1659         spin_unlock(&res->spinlock);
1660
1661 again:
1662         reassert = 0;
1663
1664         /* note that if this nodemap is empty, it returns 0 */
1665         dlm_node_iter_init(nodemap, &iter);
1666         while ((to = dlm_node_iter_next(&iter)) >= 0) {
1667                 int r = 0;
1668                 struct dlm_master_list_entry *mle = NULL;
1669
1670                 mlog(0, "sending assert master to %d (%.*s)\n", to,
1671                      namelen, lockname);
1672                 memset(&assert, 0, sizeof(assert));
1673                 assert.node_idx = dlm->node_num;
1674                 assert.namelen = namelen;
1675                 memcpy(assert.name, lockname, namelen);
1676                 assert.flags = cpu_to_be32(flags);
1677
1678                 tmpret = o2net_send_message(DLM_ASSERT_MASTER_MSG, dlm->key,
1679                                             &assert, sizeof(assert), to, &r);
1680                 if (tmpret < 0) {
1681                         mlog(0, "assert_master returned %d!\n", tmpret);
1682                         if (!dlm_is_host_down(tmpret)) {
1683                                 mlog(ML_ERROR, "unhandled error=%d!\n", tmpret);
1684                                 BUG();
1685                         }
1686                         /* a node died.  finish out the rest of the nodes. */
1687                         mlog(0, "link to %d went down!\n", to);
1688                         /* any nonzero status return will do */
1689                         ret = tmpret;
1690                         r = 0;
1691                 } else if (r < 0) {
1692                         /* ok, something horribly messed.  kill thyself. */
1693                         mlog(ML_ERROR,"during assert master of %.*s to %u, "
1694                              "got %d.\n", namelen, lockname, to, r);
1695                         spin_lock(&dlm->spinlock);
1696                         spin_lock(&dlm->master_lock);
1697                         if (dlm_find_mle(dlm, &mle, (char *)lockname,
1698                                          namelen)) {
1699                                 dlm_print_one_mle(mle);
1700                                 __dlm_put_mle(mle);
1701                         }
1702                         spin_unlock(&dlm->master_lock);
1703                         spin_unlock(&dlm->spinlock);
1704                         BUG();
1705                 }
1706
1707                 if (r & DLM_ASSERT_RESPONSE_REASSERT &&
1708                     !(r & DLM_ASSERT_RESPONSE_MASTERY_REF)) {
1709                                 mlog(ML_ERROR, "%.*s: very strange, "
1710                                      "master MLE but no lockres on %u\n",
1711                                      namelen, lockname, to);
1712                 }
1713
1714                 if (r & DLM_ASSERT_RESPONSE_REASSERT) {
1715                         mlog(0, "%.*s: node %u create mles on other "
1716                              "nodes and requests a re-assert\n", 
1717                              namelen, lockname, to);
1718                         reassert = 1;
1719                 }
1720                 if (r & DLM_ASSERT_RESPONSE_MASTERY_REF) {
1721                         mlog(0, "%.*s: node %u has a reference to this "
1722                              "lockres, set the bit in the refmap\n",
1723                              namelen, lockname, to);
1724                         spin_lock(&res->spinlock);
1725                         dlm_lockres_set_refmap_bit(to, res);
1726                         spin_unlock(&res->spinlock);
1727                 }
1728         }
1729
1730         if (reassert)
1731                 goto again;
1732
1733         spin_lock(&res->spinlock);
1734         res->state &= ~DLM_LOCK_RES_SETREF_INPROG;
1735         spin_unlock(&res->spinlock);
1736         wake_up(&res->wq);
1737
1738         return ret;
1739 }
1740
1741 /*
1742  * locks that can be taken here:
1743  * dlm->spinlock
1744  * res->spinlock
1745  * mle->spinlock
1746  * dlm->master_list
1747  *
1748  * if possible, TRIM THIS DOWN!!!
1749  */
1750 int dlm_assert_master_handler(struct o2net_msg *msg, u32 len, void *data,
1751                               void **ret_data)
1752 {
1753         struct dlm_ctxt *dlm = data;
1754         struct dlm_master_list_entry *mle = NULL;
1755         struct dlm_assert_master *assert = (struct dlm_assert_master *)msg->buf;
1756         struct dlm_lock_resource *res = NULL;
1757         char *name;
1758         unsigned int namelen, hash;
1759         u32 flags;
1760         int master_request = 0, have_lockres_ref = 0;
1761         int ret = 0;
1762
1763         if (!dlm_grab(dlm))
1764                 return 0;
1765
1766         name = assert->name;
1767         namelen = assert->namelen;
1768         hash = dlm_lockid_hash(name, namelen);
1769         flags = be32_to_cpu(assert->flags);
1770
1771         if (namelen > DLM_LOCKID_NAME_MAX) {
1772                 mlog(ML_ERROR, "Invalid name length!");
1773                 goto done;
1774         }
1775
1776         spin_lock(&dlm->spinlock);
1777
1778         if (flags)
1779                 mlog(0, "assert_master with flags: %u\n", flags);
1780
1781         /* find the MLE */
1782         spin_lock(&dlm->master_lock);
1783         if (!dlm_find_mle(dlm, &mle, name, namelen)) {
1784                 /* not an error, could be master just re-asserting */
1785                 mlog(0, "just got an assert_master from %u, but no "
1786                      "MLE for it! (%.*s)\n", assert->node_idx,
1787                      namelen, name);
1788         } else {
1789                 int bit = find_next_bit (mle->maybe_map, O2NM_MAX_NODES, 0);
1790                 if (bit >= O2NM_MAX_NODES) {
1791                         /* not necessarily an error, though less likely.
1792                          * could be master just re-asserting. */
1793                         mlog(0, "no bits set in the maybe_map, but %u "
1794                              "is asserting! (%.*s)\n", assert->node_idx,
1795                              namelen, name);
1796                 } else if (bit != assert->node_idx) {
1797                         if (flags & DLM_ASSERT_MASTER_MLE_CLEANUP) {
1798                                 mlog(0, "master %u was found, %u should "
1799                                      "back off\n", assert->node_idx, bit);
1800                         } else {
1801                                 /* with the fix for bug 569, a higher node
1802                                  * number winning the mastery will respond
1803                                  * YES to mastery requests, but this node
1804                                  * had no way of knowing.  let it pass. */
1805                                 mlog(0, "%u is the lowest node, "
1806                                      "%u is asserting. (%.*s)  %u must "
1807                                      "have begun after %u won.\n", bit,
1808                                      assert->node_idx, namelen, name, bit,
1809                                      assert->node_idx);
1810                         }
1811                 }
1812                 if (mle->type == DLM_MLE_MIGRATION) {
1813                         if (flags & DLM_ASSERT_MASTER_MLE_CLEANUP) {
1814                                 mlog(0, "%s:%.*s: got cleanup assert"
1815                                      " from %u for migration\n",
1816                                      dlm->name, namelen, name,
1817                                      assert->node_idx);
1818                         } else if (!(flags & DLM_ASSERT_MASTER_FINISH_MIGRATION)) {
1819                                 mlog(0, "%s:%.*s: got unrelated assert"
1820                                      " from %u for migration, ignoring\n",
1821                                      dlm->name, namelen, name,
1822                                      assert->node_idx);
1823                                 __dlm_put_mle(mle);
1824                                 spin_unlock(&dlm->master_lock);
1825                                 spin_unlock(&dlm->spinlock);
1826                                 goto done;
1827                         }       
1828                 }
1829         }
1830         spin_unlock(&dlm->master_lock);
1831
1832         /* ok everything checks out with the MLE
1833          * now check to see if there is a lockres */
1834         res = __dlm_lookup_lockres(dlm, name, namelen, hash);
1835         if (res) {
1836                 spin_lock(&res->spinlock);
1837                 if (res->state & DLM_LOCK_RES_RECOVERING)  {
1838                         mlog(ML_ERROR, "%u asserting but %.*s is "
1839                              "RECOVERING!\n", assert->node_idx, namelen, name);
1840                         goto kill;
1841                 }
1842                 if (!mle) {
1843                         if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN &&
1844                             res->owner != assert->node_idx) {
1845                                 mlog(ML_ERROR, "assert_master from "
1846                                           "%u, but current owner is "
1847                                           "%u! (%.*s)\n",
1848                                        assert->node_idx, res->owner,
1849                                        namelen, name);
1850                                 goto kill;
1851                         }
1852                 } else if (mle->type != DLM_MLE_MIGRATION) {
1853                         if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN) {
1854                                 /* owner is just re-asserting */
1855                                 if (res->owner == assert->node_idx) {
1856                                         mlog(0, "owner %u re-asserting on "
1857                                              "lock %.*s\n", assert->node_idx,
1858                                              namelen, name);
1859                                         goto ok;
1860                                 }
1861                                 mlog(ML_ERROR, "got assert_master from "
1862                                      "node %u, but %u is the owner! "
1863                                      "(%.*s)\n", assert->node_idx,
1864                                      res->owner, namelen, name);
1865                                 goto kill;
1866                         }
1867                         if (!(res->state & DLM_LOCK_RES_IN_PROGRESS)) {
1868                                 mlog(ML_ERROR, "got assert from %u, but lock "
1869                                      "with no owner should be "
1870                                      "in-progress! (%.*s)\n",
1871                                      assert->node_idx,
1872                                      namelen, name);
1873                                 goto kill;
1874                         }
1875                 } else /* mle->type == DLM_MLE_MIGRATION */ {
1876                         /* should only be getting an assert from new master */
1877                         if (assert->node_idx != mle->new_master) {
1878                                 mlog(ML_ERROR, "got assert from %u, but "
1879                                      "new master is %u, and old master "
1880                                      "was %u (%.*s)\n",
1881                                      assert->node_idx, mle->new_master,
1882                                      mle->master, namelen, name);
1883                                 goto kill;
1884                         }
1885
1886                 }
1887 ok:
1888                 spin_unlock(&res->spinlock);
1889         }
1890         spin_unlock(&dlm->spinlock);
1891
1892         // mlog(0, "woo!  got an assert_master from node %u!\n",
1893         //           assert->node_idx);
1894         if (mle) {
1895                 int extra_ref = 0;
1896                 int nn = -1;
1897                 int rr, err = 0;
1898                 
1899                 spin_lock(&mle->spinlock);
1900                 if (mle->type == DLM_MLE_BLOCK || mle->type == DLM_MLE_MIGRATION)
1901                         extra_ref = 1;
1902                 else {
1903                         /* MASTER mle: if any bits set in the response map
1904                          * then the calling node needs to re-assert to clear
1905                          * up nodes that this node contacted */
1906                         while ((nn = find_next_bit (mle->response_map, O2NM_MAX_NODES, 
1907                                                     nn+1)) < O2NM_MAX_NODES) {
1908                                 if (nn != dlm->node_num && nn != assert->node_idx)
1909                                         master_request = 1;
1910                         }
1911                 }
1912                 mle->master = assert->node_idx;
1913                 atomic_set(&mle->woken, 1);
1914                 wake_up(&mle->wq);
1915                 spin_unlock(&mle->spinlock);
1916
1917                 if (res) {
1918                         int wake = 0;
1919                         spin_lock(&res->spinlock);
1920                         if (mle->type == DLM_MLE_MIGRATION) {
1921                                 mlog(0, "finishing off migration of lockres %.*s, "
1922                                         "from %u to %u\n",
1923                                         res->lockname.len, res->lockname.name,
1924                                         dlm->node_num, mle->new_master);
1925                                 res->state &= ~DLM_LOCK_RES_MIGRATING;
1926                                 wake = 1;
1927                                 dlm_change_lockres_owner(dlm, res, mle->new_master);
1928                                 BUG_ON(res->state & DLM_LOCK_RES_DIRTY);
1929                         } else {
1930                                 dlm_change_lockres_owner(dlm, res, mle->master);
1931                         }
1932                         spin_unlock(&res->spinlock);
1933                         have_lockres_ref = 1;
1934                         if (wake)
1935                                 wake_up(&res->wq);
1936                 }
1937
1938                 /* master is known, detach if not already detached.
1939                  * ensures that only one assert_master call will happen
1940                  * on this mle. */
1941                 spin_lock(&dlm->spinlock);
1942                 spin_lock(&dlm->master_lock);
1943
1944                 rr = atomic_read(&mle->mle_refs.refcount);
1945                 if (mle->inuse > 0) {
1946                         if (extra_ref && rr < 3)
1947                                 err = 1;
1948                         else if (!extra_ref && rr < 2)
1949                                 err = 1;
1950                 } else {
1951                         if (extra_ref && rr < 2)
1952                                 err = 1;
1953                         else if (!extra_ref && rr < 1)
1954                                 err = 1;
1955                 }
1956                 if (err) {
1957                         mlog(ML_ERROR, "%s:%.*s: got assert master from %u "
1958                              "that will mess up this node, refs=%d, extra=%d, "
1959                              "inuse=%d\n", dlm->name, namelen, name,
1960                              assert->node_idx, rr, extra_ref, mle->inuse);
1961                         dlm_print_one_mle(mle);
1962                 }
1963                 list_del_init(&mle->list);
1964                 __dlm_mle_detach_hb_events(dlm, mle);
1965                 __dlm_put_mle(mle);
1966                 if (extra_ref) {
1967                         /* the assert master message now balances the extra
1968                          * ref given by the master / migration request message.
1969                          * if this is the last put, it will be removed
1970                          * from the list. */
1971                         __dlm_put_mle(mle);
1972                 }
1973                 spin_unlock(&dlm->master_lock);
1974                 spin_unlock(&dlm->spinlock);
1975         } else if (res) {
1976                 if (res->owner != assert->node_idx) {
1977                         mlog(0, "assert_master from %u, but current "
1978                              "owner is %u (%.*s), no mle\n", assert->node_idx,
1979                              res->owner, namelen, name);
1980                 }
1981         }
1982
1983 done:
1984         ret = 0;
1985         if (res) {
1986                 spin_lock(&res->spinlock);
1987                 res->state |= DLM_LOCK_RES_SETREF_INPROG;
1988                 spin_unlock(&res->spinlock);
1989                 *ret_data = (void *)res;
1990         }
1991         dlm_put(dlm);
1992         if (master_request) {
1993                 mlog(0, "need to tell master to reassert\n");
1994                 /* positive. negative would shoot down the node. */
1995                 ret |= DLM_ASSERT_RESPONSE_REASSERT;
1996                 if (!have_lockres_ref) {
1997                         mlog(ML_ERROR, "strange, got assert from %u, MASTER "
1998                              "mle present here for %s:%.*s, but no lockres!\n",
1999                              assert->node_idx, dlm->name, namelen, name);
2000                 }
2001         }
2002         if (have_lockres_ref) {
2003                 /* let the master know we have a reference to the lockres */
2004                 ret |= DLM_ASSERT_RESPONSE_MASTERY_REF;
2005                 mlog(0, "%s:%.*s: got assert from %u, need a ref\n",
2006                      dlm->name, namelen, name, assert->node_idx);
2007         }
2008         return ret;
2009
2010 kill:
2011         /* kill the caller! */
2012         mlog(ML_ERROR, "Bad message received from another node.  Dumping state "
2013              "and killing the other node now!  This node is OK and can continue.\n");
2014         __dlm_print_one_lock_resource(res);
2015         spin_unlock(&res->spinlock);
2016         spin_unlock(&dlm->spinlock);
2017         *ret_data = (void *)res; 
2018         dlm_put(dlm);
2019         return -EINVAL;
2020 }
2021
2022 void dlm_assert_master_post_handler(int status, void *data, void *ret_data)
2023 {
2024         struct dlm_lock_resource *res = (struct dlm_lock_resource *)ret_data;
2025
2026         if (ret_data) {
2027                 spin_lock(&res->spinlock);
2028                 res->state &= ~DLM_LOCK_RES_SETREF_INPROG;
2029                 spin_unlock(&res->spinlock);
2030                 wake_up(&res->wq);
2031                 dlm_lockres_put(res);
2032         }
2033         return;
2034 }
2035
2036 int dlm_dispatch_assert_master(struct dlm_ctxt *dlm,
2037                                struct dlm_lock_resource *res,
2038                                int ignore_higher, u8 request_from, u32 flags)
2039 {
2040         struct dlm_work_item *item;
2041         item = kzalloc(sizeof(*item), GFP_NOFS);
2042         if (!item)
2043                 return -ENOMEM;
2044
2045
2046         /* queue up work for dlm_assert_master_worker */
2047         dlm_grab(dlm);  /* get an extra ref for the work item */
2048         dlm_init_work_item(dlm, item, dlm_assert_master_worker, NULL);
2049         item->u.am.lockres = res; /* already have a ref */
2050         /* can optionally ignore node numbers higher than this node */
2051         item->u.am.ignore_higher = ignore_higher;
2052         item->u.am.request_from = request_from;
2053         item->u.am.flags = flags;
2054
2055         if (ignore_higher) 
2056                 mlog(0, "IGNORE HIGHER: %.*s\n", res->lockname.len, 
2057                      res->lockname.name);
2058                 
2059         spin_lock(&dlm->work_lock);
2060         list_add_tail(&item->list, &dlm->work_list);
2061         spin_unlock(&dlm->work_lock);
2062
2063         queue_work(dlm->dlm_worker, &dlm->dispatched_work);
2064         return 0;
2065 }
2066
2067 static void dlm_assert_master_worker(struct dlm_work_item *item, void *data)
2068 {
2069         struct dlm_ctxt *dlm = data;
2070         int ret = 0;
2071         struct dlm_lock_resource *res;
2072         unsigned long nodemap[BITS_TO_LONGS(O2NM_MAX_NODES)];
2073         int ignore_higher;
2074         int bit;
2075         u8 request_from;
2076         u32 flags;
2077
2078         dlm = item->dlm;
2079         res = item->u.am.lockres;
2080         ignore_higher = item->u.am.ignore_higher;
2081         request_from = item->u.am.request_from;
2082         flags = item->u.am.flags;
2083
2084         spin_lock(&dlm->spinlock);
2085         memcpy(nodemap, dlm->domain_map, sizeof(nodemap));
2086         spin_unlock(&dlm->spinlock);
2087
2088         clear_bit(dlm->node_num, nodemap);
2089         if (ignore_higher) {
2090                 /* if is this just to clear up mles for nodes below
2091                  * this node, do not send the message to the original
2092                  * caller or any node number higher than this */
2093                 clear_bit(request_from, nodemap);
2094                 bit = dlm->node_num;
2095                 while (1) {
2096                         bit = find_next_bit(nodemap, O2NM_MAX_NODES,
2097                                             bit+1);
2098                         if (bit >= O2NM_MAX_NODES)
2099                                 break;
2100                         clear_bit(bit, nodemap);
2101                 }
2102         }
2103
2104         /*
2105          * If we're migrating this lock to someone else, we are no
2106          * longer allowed to assert out own mastery.  OTOH, we need to
2107          * prevent migration from starting while we're still asserting
2108          * our dominance.  The reserved ast delays migration.
2109          */
2110         spin_lock(&res->spinlock);
2111         if (res->state & DLM_LOCK_RES_MIGRATING) {
2112                 mlog(0, "Someone asked us to assert mastery, but we're "
2113                      "in the middle of migration.  Skipping assert, "
2114                      "the new master will handle that.\n");
2115                 spin_unlock(&res->spinlock);
2116                 goto put;
2117         } else
2118                 __dlm_lockres_reserve_ast(res);
2119         spin_unlock(&res->spinlock);
2120
2121         /* this call now finishes out the nodemap
2122          * even if one or more nodes die */
2123         mlog(0, "worker about to master %.*s here, this=%u\n",
2124                      res->lockname.len, res->lockname.name, dlm->node_num);
2125         ret = dlm_do_assert_master(dlm, res, nodemap, flags);
2126         if (ret < 0) {
2127                 /* no need to restart, we are done */
2128                 if (!dlm_is_host_down(ret))
2129                         mlog_errno(ret);
2130         }
2131
2132         /* Ok, we've asserted ourselves.  Let's let migration start. */
2133         dlm_lockres_release_ast(dlm, res);
2134
2135 put:
2136         dlm_lockres_put(res);
2137
2138         mlog(0, "finished with dlm_assert_master_worker\n");
2139 }
2140
2141 /* SPECIAL CASE for the $RECOVERY lock used by the recovery thread.
2142  * We cannot wait for node recovery to complete to begin mastering this
2143  * lockres because this lockres is used to kick off recovery! ;-)
2144  * So, do a pre-check on all living nodes to see if any of those nodes
2145  * think that $RECOVERY is currently mastered by a dead node.  If so,
2146  * we wait a short time to allow that node to get notified by its own
2147  * heartbeat stack, then check again.  All $RECOVERY lock resources
2148  * mastered by dead nodes are purged when the hearbeat callback is 
2149  * fired, so we can know for sure that it is safe to continue once
2150  * the node returns a live node or no node.  */
2151 static int dlm_pre_master_reco_lockres(struct dlm_ctxt *dlm,
2152                                        struct dlm_lock_resource *res)
2153 {
2154         struct dlm_node_iter iter;
2155         int nodenum;
2156         int ret = 0;
2157         u8 master = DLM_LOCK_RES_OWNER_UNKNOWN;
2158
2159         spin_lock(&dlm->spinlock);
2160         dlm_node_iter_init(dlm->domain_map, &iter);
2161         spin_unlock(&dlm->spinlock);
2162
2163         while ((nodenum = dlm_node_iter_next(&iter)) >= 0) {
2164                 /* do not send to self */
2165                 if (nodenum == dlm->node_num)
2166                         continue;
2167                 ret = dlm_do_master_requery(dlm, res, nodenum, &master);
2168                 if (ret < 0) {
2169                         mlog_errno(ret);
2170                         if (!dlm_is_host_down(ret))
2171                                 BUG();
2172                         /* host is down, so answer for that node would be
2173                          * DLM_LOCK_RES_OWNER_UNKNOWN.  continue. */
2174                         ret = 0;
2175                 }
2176
2177                 if (master != DLM_LOCK_RES_OWNER_UNKNOWN) {
2178                         /* check to see if this master is in the recovery map */
2179                         spin_lock(&dlm->spinlock);
2180                         if (test_bit(master, dlm->recovery_map)) {
2181                                 mlog(ML_NOTICE, "%s: node %u has not seen "
2182                                      "node %u go down yet, and thinks the "
2183                                      "dead node is mastering the recovery "
2184                                      "lock.  must wait.\n", dlm->name,
2185                                      nodenum, master);
2186                                 ret = -EAGAIN;
2187                         }
2188                         spin_unlock(&dlm->spinlock);
2189                         mlog(0, "%s: reco lock master is %u\n", dlm->name, 
2190                              master);
2191                         break;
2192                 }
2193         }
2194         return ret;
2195 }
2196
2197 /*
2198  * DLM_DEREF_LOCKRES_MSG
2199  */
2200
2201 int dlm_drop_lockres_ref(struct dlm_ctxt *dlm, struct dlm_lock_resource *res)
2202 {
2203         struct dlm_deref_lockres deref;
2204         int ret = 0, r;
2205         const char *lockname;
2206         unsigned int namelen;
2207
2208         lockname = res->lockname.name;
2209         namelen = res->lockname.len;
2210         BUG_ON(namelen > O2NM_MAX_NAME_LEN);
2211
2212         mlog(0, "%s:%.*s: sending deref to %d\n",
2213              dlm->name, namelen, lockname, res->owner);
2214         memset(&deref, 0, sizeof(deref));
2215         deref.node_idx = dlm->node_num;
2216         deref.namelen = namelen;
2217         memcpy(deref.name, lockname, namelen);
2218
2219         ret = o2net_send_message(DLM_DEREF_LOCKRES_MSG, dlm->key,
2220                                  &deref, sizeof(deref), res->owner, &r);
2221         if (ret < 0)
2222                 mlog_errno(ret);
2223         else if (r < 0) {
2224                 /* BAD.  other node says I did not have a ref. */
2225                 mlog(ML_ERROR,"while dropping ref on %s:%.*s "
2226                     "(master=%u) got %d.\n", dlm->name, namelen,
2227                     lockname, res->owner, r);
2228                 dlm_print_one_lock_resource(res);
2229                 BUG();
2230         }
2231         return ret;
2232 }
2233
2234 int dlm_deref_lockres_handler(struct o2net_msg *msg, u32 len, void *data,
2235                               void **ret_data)
2236 {
2237         struct dlm_ctxt *dlm = data;
2238         struct dlm_deref_lockres *deref = (struct dlm_deref_lockres *)msg->buf;
2239         struct dlm_lock_resource *res = NULL;
2240         char *name;
2241         unsigned int namelen;
2242         int ret = -EINVAL;
2243         u8 node;
2244         unsigned int hash;
2245         struct dlm_work_item *item;
2246         int cleared = 0;
2247         int dispatch = 0;
2248
2249         if (!dlm_grab(dlm))
2250                 return 0;
2251
2252         name = deref->name;
2253         namelen = deref->namelen;
2254         node = deref->node_idx;
2255
2256         if (namelen > DLM_LOCKID_NAME_MAX) {
2257                 mlog(ML_ERROR, "Invalid name length!");
2258                 goto done;
2259         }
2260         if (deref->node_idx >= O2NM_MAX_NODES) {
2261                 mlog(ML_ERROR, "Invalid node number: %u\n", node);
2262                 goto done;
2263         }
2264
2265         hash = dlm_lockid_hash(name, namelen);
2266
2267         spin_lock(&dlm->spinlock);
2268         res = __dlm_lookup_lockres_full(dlm, name, namelen, hash);
2269         if (!res) {
2270                 spin_unlock(&dlm->spinlock);
2271                 mlog(ML_ERROR, "%s:%.*s: bad lockres name\n",
2272                      dlm->name, namelen, name);
2273                 goto done;
2274         }
2275         spin_unlock(&dlm->spinlock);
2276
2277         spin_lock(&res->spinlock);
2278         if (res->state & DLM_LOCK_RES_SETREF_INPROG)
2279                 dispatch = 1;
2280         else {
2281                 BUG_ON(res->state & DLM_LOCK_RES_DROPPING_REF);
2282                 if (test_bit(node, res->refmap)) {
2283                         dlm_lockres_clear_refmap_bit(node, res);
2284                         cleared = 1;
2285                 }
2286         }
2287         spin_unlock(&res->spinlock);
2288
2289         if (!dispatch) {
2290                 if (cleared)
2291                         dlm_lockres_calc_usage(dlm, res);
2292                 else {
2293                         mlog(ML_ERROR, "%s:%.*s: node %u trying to drop ref "
2294                         "but it is already dropped!\n", dlm->name,
2295                         res->lockname.len, res->lockname.name, node);
2296                         dlm_print_one_lock_resource(res);
2297                 }
2298                 ret = 0;
2299                 goto done;
2300         }
2301
2302         item = kzalloc(sizeof(*item), GFP_NOFS);
2303         if (!item) {
2304                 ret = -ENOMEM;
2305                 mlog_errno(ret);
2306                 goto done;
2307         }
2308
2309         dlm_init_work_item(dlm, item, dlm_deref_lockres_worker, NULL);
2310         item->u.dl.deref_res = res;
2311         item->u.dl.deref_node = node;
2312
2313         spin_lock(&dlm->work_lock);
2314         list_add_tail(&item->list, &dlm->work_list);
2315         spin_unlock(&dlm->work_lock);
2316
2317         queue_work(dlm->dlm_worker, &dlm->dispatched_work);
2318         return 0;
2319
2320 done:
2321         if (res)
2322                 dlm_lockres_put(res);
2323         dlm_put(dlm);
2324
2325         return ret;
2326 }
2327
2328 static void dlm_deref_lockres_worker(struct dlm_work_item *item, void *data)
2329 {
2330         struct dlm_ctxt *dlm;
2331         struct dlm_lock_resource *res;
2332         u8 node;
2333         u8 cleared = 0;
2334
2335         dlm = item->dlm;
2336         res = item->u.dl.deref_res;
2337         node = item->u.dl.deref_node;
2338
2339         spin_lock(&res->spinlock);
2340         BUG_ON(res->state & DLM_LOCK_RES_DROPPING_REF);
2341         if (test_bit(node, res->refmap)) {
2342                 __dlm_wait_on_lockres_flags(res, DLM_LOCK_RES_SETREF_INPROG);
2343                 dlm_lockres_clear_refmap_bit(node, res);
2344                 cleared = 1;
2345         }
2346         spin_unlock(&res->spinlock);
2347
2348         if (cleared) {
2349                 mlog(0, "%s:%.*s node %u ref dropped in dispatch\n",
2350                      dlm->name, res->lockname.len, res->lockname.name, node);
2351                 dlm_lockres_calc_usage(dlm, res);
2352         } else {
2353                 mlog(ML_ERROR, "%s:%.*s: node %u trying to drop ref "
2354                      "but it is already dropped!\n", dlm->name,
2355                      res->lockname.len, res->lockname.name, node);
2356                 dlm_print_one_lock_resource(res);
2357         }
2358
2359         dlm_lockres_put(res);
2360 }
2361
2362 /* Checks whether the lockres can be migrated. Returns 0 if yes, < 0
2363  * if not. If 0, numlocks is set to the number of locks in the lockres.
2364  */
2365 static int dlm_is_lockres_migrateable(struct dlm_ctxt *dlm,
2366                                       struct dlm_lock_resource *res,
2367                                       int *numlocks)
2368 {
2369         int ret;
2370         int i;
2371         int count = 0;
2372         struct list_head *queue;
2373         struct dlm_lock *lock;
2374
2375         assert_spin_locked(&res->spinlock);
2376
2377         ret = -EINVAL;
2378         if (res->owner == DLM_LOCK_RES_OWNER_UNKNOWN) {
2379                 mlog(0, "cannot migrate lockres with unknown owner!\n");
2380                 goto leave;
2381         }
2382
2383         if (res->owner != dlm->node_num) {
2384                 mlog(0, "cannot migrate lockres this node doesn't own!\n");
2385                 goto leave;
2386         }
2387
2388         ret = 0;
2389         queue = &res->granted;
2390         for (i = 0; i < 3; i++) {
2391                 list_for_each_entry(lock, queue, list) {
2392                         ++count;
2393                         if (lock->ml.node == dlm->node_num) {
2394                                 mlog(0, "found a lock owned by this node still "
2395                                      "on the %s queue!  will not migrate this "
2396                                      "lockres\n", (i == 0 ? "granted" :
2397                                                    (i == 1 ? "converting" :
2398                                                     "blocked")));
2399                                 ret = -ENOTEMPTY;
2400                                 goto leave;
2401                         }
2402                 }
2403                 queue++;
2404         }
2405
2406         *numlocks = count;
2407         mlog(0, "migrateable lockres having %d locks\n", *numlocks);
2408
2409 leave:
2410         return ret;
2411 }
2412
2413 /*
2414  * DLM_MIGRATE_LOCKRES
2415  */
2416
2417
2418 static int dlm_migrate_lockres(struct dlm_ctxt *dlm,
2419                                struct dlm_lock_resource *res,
2420                                u8 target)
2421 {
2422         struct dlm_master_list_entry *mle = NULL;
2423         struct dlm_master_list_entry *oldmle = NULL;
2424         struct dlm_migratable_lockres *mres = NULL;
2425         int ret = 0;
2426         const char *name;
2427         unsigned int namelen;
2428         int mle_added = 0;
2429         int numlocks;
2430         int wake = 0;
2431
2432         if (!dlm_grab(dlm))
2433                 return -EINVAL;
2434
2435         name = res->lockname.name;
2436         namelen = res->lockname.len;
2437
2438         mlog(0, "migrating %.*s to %u\n", namelen, name, target);
2439
2440         /*
2441          * ensure this lockres is a proper candidate for migration
2442          */
2443         spin_lock(&res->spinlock);
2444         ret = dlm_is_lockres_migrateable(dlm, res, &numlocks);
2445         if (ret < 0) {
2446                 spin_unlock(&res->spinlock);
2447                 goto leave;
2448         }
2449         spin_unlock(&res->spinlock);
2450
2451         /* no work to do */
2452         if (numlocks == 0) {
2453                 mlog(0, "no locks were found on this lockres! done!\n");
2454                 goto leave;
2455         }
2456
2457         /*
2458          * preallocate up front
2459          * if this fails, abort
2460          */
2461
2462         ret = -ENOMEM;
2463         mres = (struct dlm_migratable_lockres *) __get_free_page(GFP_NOFS);
2464         if (!mres) {
2465                 mlog_errno(ret);
2466                 goto leave;
2467         }
2468
2469         mle = (struct dlm_master_list_entry *) kmem_cache_alloc(dlm_mle_cache,
2470                                                                 GFP_NOFS);
2471         if (!mle) {
2472                 mlog_errno(ret);
2473                 goto leave;
2474         }
2475         ret = 0;
2476
2477         /*
2478          * find a node to migrate the lockres to
2479          */
2480
2481         mlog(0, "picking a migration node\n");
2482         spin_lock(&dlm->spinlock);
2483         /* pick a new node */
2484         if (!test_bit(target, dlm->domain_map) ||
2485             target >= O2NM_MAX_NODES) {
2486                 target = dlm_pick_migration_target(dlm, res);
2487         }
2488         mlog(0, "node %u chosen for migration\n", target);
2489
2490         if (target >= O2NM_MAX_NODES ||
2491             !test_bit(target, dlm->domain_map)) {
2492                 /* target chosen is not alive */
2493                 ret = -EINVAL;
2494         }
2495
2496         if (ret) {
2497                 spin_unlock(&dlm->spinlock);
2498                 goto fail;
2499         }
2500
2501         mlog(0, "continuing with target = %u\n", target);
2502
2503         /*
2504          * clear any existing master requests and
2505          * add the migration mle to the list
2506          */
2507         spin_lock(&dlm->master_lock);
2508         ret = dlm_add_migration_mle(dlm, res, mle, &oldmle, name,
2509                                     namelen, target, dlm->node_num);
2510         spin_unlock(&dlm->master_lock);
2511         spin_unlock(&dlm->spinlock);
2512
2513         if (ret == -EEXIST) {
2514                 mlog(0, "another process is already migrating it\n");
2515                 goto fail;
2516         }
2517         mle_added = 1;
2518
2519         /*
2520          * set the MIGRATING flag and flush asts
2521          * if we fail after this we need to re-dirty the lockres
2522          */
2523         if (dlm_mark_lockres_migrating(dlm, res, target) < 0) {
2524                 mlog(ML_ERROR, "tried to migrate %.*s to %u, but "
2525                      "the target went down.\n", res->lockname.len,
2526                      res->lockname.name, target);
2527                 spin_lock(&res->spinlock);
2528                 res->state &= ~DLM_LOCK_RES_MIGRATING;
2529                 wake = 1;
2530                 spin_unlock(&res->spinlock);
2531                 ret = -EINVAL;
2532         }
2533
2534 fail:
2535         if (oldmle) {
2536                 /* master is known, detach if not already detached */
2537                 dlm_mle_detach_hb_events(dlm, oldmle);
2538                 dlm_put_mle(oldmle);
2539         }
2540
2541         if (ret < 0) {
2542                 if (mle_added) {
2543                         dlm_mle_detach_hb_events(dlm, mle);
2544                         dlm_put_mle(mle);
2545                 } else if (mle) {
2546                         kmem_cache_free(dlm_mle_cache, mle);
2547                 }
2548                 goto leave;
2549         }
2550
2551         /*
2552          * at this point, we have a migration target, an mle
2553          * in the master list, and the MIGRATING flag set on
2554          * the lockres
2555          */
2556
2557         /* now that remote nodes are spinning on the MIGRATING flag,
2558          * ensure that all assert_master work is flushed. */
2559         flush_workqueue(dlm->dlm_worker);
2560
2561         /* get an extra reference on the mle.
2562          * otherwise the assert_master from the new
2563          * master will destroy this.
2564          * also, make sure that all callers of dlm_get_mle
2565          * take both dlm->spinlock and dlm->master_lock */
2566         spin_lock(&dlm->spinlock);
2567         spin_lock(&dlm->master_lock);
2568         dlm_get_mle_inuse(mle);
2569         spin_unlock(&dlm->master_lock);
2570         spin_unlock(&dlm->spinlock);
2571
2572         /* notify new node and send all lock state */
2573         /* call send_one_lockres with migration flag.
2574          * this serves as notice to the target node that a
2575          * migration is starting. */
2576         ret = dlm_send_one_lockres(dlm, res, mres, target,
2577                                    DLM_MRES_MIGRATION);
2578
2579         if (ret < 0) {
2580                 mlog(0, "migration to node %u failed with %d\n",
2581                      target, ret);
2582                 /* migration failed, detach and clean up mle */
2583                 dlm_mle_detach_hb_events(dlm, mle);
2584                 dlm_put_mle(mle);
2585                 dlm_put_mle_inuse(mle);
2586                 spin_lock(&res->spinlock);
2587                 res->state &= ~DLM_LOCK_RES_MIGRATING;
2588                 wake = 1;
2589                 spin_unlock(&res->spinlock);
2590                 goto leave;
2591         }
2592
2593         /* at this point, the target sends a message to all nodes,
2594          * (using dlm_do_migrate_request).  this node is skipped since
2595          * we had to put an mle in the list to begin the process.  this
2596          * node now waits for target to do an assert master.  this node
2597          * will be the last one notified, ensuring that the migration
2598          * is complete everywhere.  if the target dies while this is
2599          * going on, some nodes could potentially see the target as the
2600          * master, so it is important that my recovery finds the migration
2601          * mle and sets the master to UNKNONWN. */
2602
2603
2604         /* wait for new node to assert master */
2605         while (1) {
2606                 ret = wait_event_interruptible_timeout(mle->wq,
2607                                         (atomic_read(&mle->woken) == 1),
2608                                         msecs_to_jiffies(5000));
2609
2610                 if (ret >= 0) {
2611                         if (atomic_read(&mle->woken) == 1 ||
2612                             res->owner == target)
2613                                 break;
2614
2615                         mlog(0, "%s:%.*s: timed out during migration\n",
2616                              dlm->name, res->lockname.len, res->lockname.name);
2617                         /* avoid hang during shutdown when migrating lockres 
2618                          * to a node which also goes down */
2619                         if (dlm_is_node_dead(dlm, target)) {
2620                                 mlog(0, "%s:%.*s: expected migration "
2621                                      "target %u is no longer up, restarting\n",
2622                                      dlm->name, res->lockname.len,
2623                                      res->lockname.name, target);
2624                                 ret = -EINVAL;
2625                                 /* migration failed, detach and clean up mle */
2626                                 dlm_mle_detach_hb_events(dlm, mle);
2627                                 dlm_put_mle(mle);
2628                                 dlm_put_mle_inuse(mle);
2629                                 spin_lock(&res->spinlock);
2630                                 res->state &= ~DLM_LOCK_RES_MIGRATING;
2631                                 wake = 1;
2632                                 spin_unlock(&res->spinlock);
2633                                 goto leave;
2634                         }
2635                 } else
2636                         mlog(0, "%s:%.*s: caught signal during migration\n",
2637                              dlm->name, res->lockname.len, res->lockname.name);
2638         }
2639
2640         /* all done, set the owner, clear the flag */
2641         spin_lock(&res->spinlock);
2642         dlm_set_lockres_owner(dlm, res, target);
2643         res->state &= ~DLM_LOCK_RES_MIGRATING;
2644         dlm_remove_nonlocal_locks(dlm, res);
2645         spin_unlock(&res->spinlock);
2646         wake_up(&res->wq);
2647
2648         /* master is known, detach if not already detached */
2649         dlm_mle_detach_hb_events(dlm, mle);
2650         dlm_put_mle_inuse(mle);
2651         ret = 0;
2652
2653         dlm_lockres_calc_usage(dlm, res);
2654
2655 leave:
2656         /* re-dirty the lockres if we failed */
2657         if (ret < 0)
2658                 dlm_kick_thread(dlm, res);
2659
2660         /* wake up waiters if the MIGRATING flag got set
2661          * but migration failed */
2662         if (wake)
2663                 wake_up(&res->wq);
2664
2665         /* TODO: cleanup */
2666         if (mres)
2667                 free_page((unsigned long)mres);
2668
2669         dlm_put(dlm);
2670
2671         mlog(0, "returning %d\n", ret);
2672         return ret;
2673 }
2674
2675 #define DLM_MIGRATION_RETRY_MS  100
2676
2677 /* Should be called only after beginning the domain leave process.
2678  * There should not be any remaining locks on nonlocal lock resources,
2679  * and there should be no local locks left on locally mastered resources.
2680  *
2681  * Called with the dlm spinlock held, may drop it to do migration, but
2682  * will re-acquire before exit.
2683  *
2684  * Returns: 1 if dlm->spinlock was dropped/retaken, 0 if never dropped */
2685 int dlm_empty_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res)
2686 {
2687         int ret;
2688         int lock_dropped = 0;
2689         int numlocks;
2690
2691         spin_lock(&res->spinlock);
2692         if (res->owner != dlm->node_num) {
2693                 if (!__dlm_lockres_unused(res)) {
2694                         mlog(ML_ERROR, "%s:%.*s: this node is not master, "
2695                              "trying to free this but locks remain\n",
2696                              dlm->name, res->lockname.len, res->lockname.name);
2697                 }
2698                 spin_unlock(&res->spinlock);
2699                 goto leave;
2700         }
2701
2702         /* No need to migrate a lockres having no locks */
2703         ret = dlm_is_lockres_migrateable(dlm, res, &numlocks);
2704         if (ret >= 0 && numlocks == 0) {
2705                 spin_unlock(&res->spinlock);
2706                 goto leave;
2707         }
2708         spin_unlock(&res->spinlock);
2709
2710         /* Wheee! Migrate lockres here! Will sleep so drop spinlock. */
2711         spin_unlock(&dlm->spinlock);
2712         lock_dropped = 1;
2713         while (1) {
2714                 ret = dlm_migrate_lockres(dlm, res, O2NM_MAX_NODES);
2715                 if (ret >= 0)
2716                         break;
2717                 if (ret == -ENOTEMPTY) {
2718                         mlog(ML_ERROR, "lockres %.*s still has local locks!\n",
2719                                 res->lockname.len, res->lockname.name);
2720                         BUG();
2721                 }
2722
2723                 mlog(0, "lockres %.*s: migrate failed, "
2724                      "retrying\n", res->lockname.len,
2725                      res->lockname.name);
2726                 msleep(DLM_MIGRATION_RETRY_MS);
2727         }
2728         spin_lock(&dlm->spinlock);
2729 leave:
2730         return lock_dropped;
2731 }
2732
2733 int dlm_lock_basts_flushed(struct dlm_ctxt *dlm, struct dlm_lock *lock)
2734 {
2735         int ret;
2736         spin_lock(&dlm->ast_lock);
2737         spin_lock(&lock->spinlock);
2738         ret = (list_empty(&lock->bast_list) && !lock->bast_pending);
2739         spin_unlock(&lock->spinlock);
2740         spin_unlock(&dlm->ast_lock);
2741         return ret;
2742 }
2743
2744 static int dlm_migration_can_proceed(struct dlm_ctxt *dlm,
2745                                      struct dlm_lock_resource *res,
2746                                      u8 mig_target)
2747 {
2748         int can_proceed;
2749         spin_lock(&res->spinlock);
2750         can_proceed = !!(res->state & DLM_LOCK_RES_MIGRATING);
2751         spin_unlock(&res->spinlock);
2752
2753         /* target has died, so make the caller break out of the 
2754          * wait_event, but caller must recheck the domain_map */
2755         spin_lock(&dlm->spinlock);
2756         if (!test_bit(mig_target, dlm->domain_map))
2757                 can_proceed = 1;
2758         spin_unlock(&dlm->spinlock);
2759         return can_proceed;
2760 }
2761
2762 static int dlm_lockres_is_dirty(struct dlm_ctxt *dlm,
2763                                 struct dlm_lock_resource *res)
2764 {
2765         int ret;
2766         spin_lock(&res->spinlock);
2767         ret = !!(res->state & DLM_LOCK_RES_DIRTY);
2768         spin_unlock(&res->spinlock);
2769         return ret;
2770 }
2771
2772
2773 static int dlm_mark_lockres_migrating(struct dlm_ctxt *dlm,
2774                                        struct dlm_lock_resource *res,
2775                                        u8 target)
2776 {
2777         int ret = 0;
2778
2779         mlog(0, "dlm_mark_lockres_migrating: %.*s, from %u to %u\n",
2780                res->lockname.len, res->lockname.name, dlm->node_num,
2781                target);
2782         /* need to set MIGRATING flag on lockres.  this is done by
2783          * ensuring that all asts have been flushed for this lockres. */
2784         spin_lock(&res->spinlock);
2785         BUG_ON(res->migration_pending);
2786         res->migration_pending = 1;
2787         /* strategy is to reserve an extra ast then release
2788          * it below, letting the release do all of the work */
2789         __dlm_lockres_reserve_ast(res);
2790         spin_unlock(&res->spinlock);
2791
2792         /* now flush all the pending asts */
2793         dlm_kick_thread(dlm, res);
2794         /* before waiting on DIRTY, block processes which may
2795          * try to dirty the lockres before MIGRATING is set */
2796         spin_lock(&res->spinlock);
2797         BUG_ON(res->state & DLM_LOCK_RES_BLOCK_DIRTY);
2798         res->state |= DLM_LOCK_RES_BLOCK_DIRTY;
2799         spin_unlock(&res->spinlock);
2800         /* now wait on any pending asts and the DIRTY state */
2801         wait_event(dlm->ast_wq, !dlm_lockres_is_dirty(dlm, res));
2802         dlm_lockres_release_ast(dlm, res);
2803
2804         mlog(0, "about to wait on migration_wq, dirty=%s\n",
2805                res->state & DLM_LOCK_RES_DIRTY ? "yes" : "no");
2806         /* if the extra ref we just put was the final one, this
2807          * will pass thru immediately.  otherwise, we need to wait
2808          * for the last ast to finish. */
2809 again:
2810         ret = wait_event_interruptible_timeout(dlm->migration_wq,
2811                    dlm_migration_can_proceed(dlm, res, target),
2812                    msecs_to_jiffies(1000));
2813         if (ret < 0) {
2814                 mlog(0, "woken again: migrating? %s, dead? %s\n",
2815                        res->state & DLM_LOCK_RES_MIGRATING ? "yes":"no",
2816                        test_bit(target, dlm->domain_map) ? "no":"yes");
2817         } else {
2818                 mlog(0, "all is well: migrating? %s, dead? %s\n",
2819                        res->state & DLM_LOCK_RES_MIGRATING ? "yes":"no",
2820                        test_bit(target, dlm->domain_map) ? "no":"yes");
2821         }
2822         if (!dlm_migration_can_proceed(dlm, res, target)) {
2823                 mlog(0, "trying again...\n");
2824                 goto again;
2825         }
2826         /* now that we are sure the MIGRATING state is there, drop
2827          * the unneded state which blocked threads trying to DIRTY */
2828         spin_lock(&res->spinlock);
2829         BUG_ON(!(res->state & DLM_LOCK_RES_BLOCK_DIRTY));
2830         BUG_ON(!(res->state & DLM_LOCK_RES_MIGRATING));
2831         res->state &= ~DLM_LOCK_RES_BLOCK_DIRTY;
2832         spin_unlock(&res->spinlock);
2833
2834         /* did the target go down or die? */
2835         spin_lock(&dlm->spinlock);
2836         if (!test_bit(target, dlm->domain_map)) {
2837                 mlog(ML_ERROR, "aha. migration target %u just went down\n",
2838                      target);
2839                 ret = -EHOSTDOWN;
2840         }
2841         spin_unlock(&dlm->spinlock);
2842
2843         /*
2844          * at this point:
2845          *
2846          *   o the DLM_LOCK_RES_MIGRATING flag is set
2847          *   o there are no pending asts on this lockres
2848          *   o all processes trying to reserve an ast on this
2849          *     lockres must wait for the MIGRATING flag to clear
2850          */
2851         return ret;
2852 }
2853
2854 /* last step in the migration process.
2855  * original master calls this to free all of the dlm_lock
2856  * structures that used to be for other nodes. */
2857 static void dlm_remove_nonlocal_locks(struct dlm_ctxt *dlm,
2858                                       struct dlm_lock_resource *res)
2859 {
2860         struct list_head *queue = &res->granted;
2861         int i, bit;
2862         struct dlm_lock *lock, *next;
2863
2864         assert_spin_locked(&res->spinlock);
2865
2866         BUG_ON(res->owner == dlm->node_num);
2867
2868         for (i=0; i<3; i++) {
2869                 list_for_each_entry_safe(lock, next, queue, list) {
2870                         if (lock->ml.node != dlm->node_num) {
2871                                 mlog(0, "putting lock for node %u\n",
2872                                      lock->ml.node);
2873                                 /* be extra careful */
2874                                 BUG_ON(!list_empty(&lock->ast_list));
2875                                 BUG_ON(!list_empty(&lock->bast_list));
2876                                 BUG_ON(lock->ast_pending);
2877                                 BUG_ON(lock->bast_pending);
2878                                 dlm_lockres_clear_refmap_bit(lock->ml.node, res);
2879                                 list_del_init(&lock->list);
2880                                 dlm_lock_put(lock);
2881                                 /* In a normal unlock, we would have added a
2882                                  * DLM_UNLOCK_FREE_LOCK action. Force it. */
2883                                 dlm_lock_put(lock);
2884                         }
2885                 }
2886                 queue++;
2887         }
2888         bit = 0;
2889         while (1) {
2890                 bit = find_next_bit(res->refmap, O2NM_MAX_NODES, bit);
2891                 if (bit >= O2NM_MAX_NODES)
2892                         break;
2893                 /* do not clear the local node reference, if there is a
2894                  * process holding this, let it drop the ref itself */
2895                 if (bit != dlm->node_num) {
2896                         mlog(0, "%s:%.*s: node %u had a ref to this "
2897                              "migrating lockres, clearing\n", dlm->name,
2898                              res->lockname.len, res->lockname.name, bit);
2899                         dlm_lockres_clear_refmap_bit(bit, res);
2900                 }
2901                 bit++;
2902         }
2903 }
2904
2905 /* for now this is not too intelligent.  we will
2906  * need stats to make this do the right thing.
2907  * this just finds the first lock on one of the
2908  * queues and uses that node as the target. */
2909 static u8 dlm_pick_migration_target(struct dlm_ctxt *dlm,
2910                                     struct dlm_lock_resource *res)
2911 {
2912         int i;
2913         struct list_head *queue = &res->granted;
2914         struct dlm_lock *lock;
2915         int nodenum;
2916
2917         assert_spin_locked(&dlm->spinlock);
2918
2919         spin_lock(&res->spinlock);
2920         for (i=0; i<3; i++) {
2921                 list_for_each_entry(lock, queue, list) {
2922                         /* up to the caller to make sure this node
2923                          * is alive */
2924                         if (lock->ml.node != dlm->node_num) {
2925                                 spin_unlock(&res->spinlock);
2926                                 return lock->ml.node;
2927                         }
2928                 }
2929                 queue++;
2930         }
2931         spin_unlock(&res->spinlock);
2932         mlog(0, "have not found a suitable target yet! checking domain map\n");
2933
2934         /* ok now we're getting desperate.  pick anyone alive. */
2935         nodenum = -1;
2936         while (1) {
2937                 nodenum = find_next_bit(dlm->domain_map,
2938                                         O2NM_MAX_NODES, nodenum+1);
2939                 mlog(0, "found %d in domain map\n", nodenum);
2940                 if (nodenum >= O2NM_MAX_NODES)
2941                         break;
2942                 if (nodenum != dlm->node_num) {
2943                         mlog(0, "picking %d\n", nodenum);
2944                         return nodenum;
2945                 }
2946         }
2947
2948         mlog(0, "giving up.  no master to migrate to\n");
2949         return DLM_LOCK_RES_OWNER_UNKNOWN;
2950 }
2951
2952
2953
2954 /* this is called by the new master once all lockres
2955  * data has been received */
2956 static int dlm_do_migrate_request(struct dlm_ctxt *dlm,
2957                                   struct dlm_lock_resource *res,
2958                                   u8 master, u8 new_master,
2959                                   struct dlm_node_iter *iter)
2960 {
2961         struct dlm_migrate_request migrate;
2962         int ret, skip, status = 0;
2963         int nodenum;
2964
2965         memset(&migrate, 0, sizeof(migrate));
2966         migrate.namelen = res->lockname.len;
2967         memcpy(migrate.name, res->lockname.name, migrate.namelen);
2968         migrate.new_master = new_master;
2969         migrate.master = master;
2970
2971         ret = 0;
2972
2973         /* send message to all nodes, except the master and myself */
2974         while ((nodenum = dlm_node_iter_next(iter)) >= 0) {
2975                 if (nodenum == master ||
2976                     nodenum == new_master)
2977                         continue;
2978
2979                 /* We could race exit domain. If exited, skip. */
2980                 spin_lock(&dlm->spinlock);
2981                 skip = (!test_bit(nodenum, dlm->domain_map));
2982                 spin_unlock(&dlm->spinlock);
2983                 if (skip) {
2984                         clear_bit(nodenum, iter->node_map);
2985                         continue;
2986                 }
2987
2988                 ret = o2net_send_message(DLM_MIGRATE_REQUEST_MSG, dlm->key,
2989                                          &migrate, sizeof(migrate), nodenum,
2990                                          &status);
2991                 if (ret < 0) {
2992                         mlog(0, "migrate_request returned %d!\n", ret);
2993                         if (!dlm_is_host_down(ret)) {
2994                                 mlog(ML_ERROR, "unhandled error=%d!\n", ret);
2995                                 BUG();
2996                         }
2997                         clear_bit(nodenum, iter->node_map);
2998                         ret = 0;
2999                 } else if (status < 0) {
3000                         mlog(0, "migrate request (node %u) returned %d!\n",
3001                              nodenum, status);
3002                         ret = status;
3003                 } else if (status == DLM_MIGRATE_RESPONSE_MASTERY_REF) {
3004                         /* during the migration request we short-circuited
3005                          * the mastery of the lockres.  make sure we have
3006                          * a mastery ref for nodenum */
3007                         mlog(0, "%s:%.*s: need ref for node %u\n",
3008                              dlm->name, res->lockname.len, res->lockname.name,
3009                              nodenum);
3010                         spin_lock(&res->spinlock);
3011                         dlm_lockres_set_refmap_bit(nodenum, res);
3012                         spin_unlock(&res->spinlock);
3013                 }
3014         }
3015
3016         if (ret < 0)
3017                 mlog_errno(ret);
3018
3019         mlog(0, "returning ret=%d\n", ret);
3020         return ret;
3021 }
3022
3023
3024 /* if there is an existing mle for this lockres, we now know who the master is.
3025  * (the one who sent us *this* message) we can clear it up right away.
3026  * since the process that put the mle on the list still has a reference to it,
3027  * we can unhash it now, set the master and wake the process.  as a result,
3028  * we will have no mle in the list to start with.  now we can add an mle for
3029  * the migration and this should be the only one found for those scanning the
3030  * list.  */
3031 int dlm_migrate_request_handler(struct o2net_msg *msg, u32 len, void *data,
3032                                 void **ret_data)
3033 {
3034         struct dlm_ctxt *dlm = data;
3035         struct dlm_lock_resource *res = NULL;
3036         struct dlm_migrate_request *migrate = (struct dlm_migrate_request *) msg->buf;
3037         struct dlm_master_list_entry *mle = NULL, *oldmle = NULL;
3038         const char *name;
3039         unsigned int namelen, hash;
3040         int ret = 0;
3041
3042         if (!dlm_grab(dlm))
3043                 return -EINVAL;
3044
3045         name = migrate->name;
3046         namelen = migrate->namelen;
3047         hash = dlm_lockid_hash(name, namelen);
3048
3049         /* preallocate.. if this fails, abort */
3050         mle = (struct dlm_master_list_entry *) kmem_cache_alloc(dlm_mle_cache,
3051                                                          GFP_NOFS);
3052
3053         if (!mle) {
3054                 ret = -ENOMEM;
3055                 goto leave;
3056         }
3057
3058         /* check for pre-existing lock */
3059         spin_lock(&dlm->spinlock);
3060         res = __dlm_lookup_lockres(dlm, name, namelen, hash);
3061         spin_lock(&dlm->master_lock);
3062
3063         if (res) {
3064                 spin_lock(&res->spinlock);
3065                 if (res->state & DLM_LOCK_RES_RECOVERING) {
3066                         /* if all is working ok, this can only mean that we got
3067                         * a migrate request from a node that we now see as
3068                         * dead.  what can we do here?  drop it to the floor? */
3069                         spin_unlock(&res->spinlock);
3070                         mlog(ML_ERROR, "Got a migrate request, but the "
3071                              "lockres is marked as recovering!");
3072                         kmem_cache_free(dlm_mle_cache, mle);
3073                         ret = -EINVAL; /* need a better solution */
3074                         goto unlock;
3075                 }
3076                 res->state |= DLM_LOCK_RES_MIGRATING;
3077                 spin_unlock(&res->spinlock);
3078         }
3079
3080         /* ignore status.  only nonzero status would BUG. */
3081         ret = dlm_add_migration_mle(dlm, res, mle, &oldmle,
3082                                     name, namelen,
3083                                     migrate->new_master,
3084                                     migrate->master);
3085
3086 unlock:
3087         spin_unlock(&dlm->master_lock);
3088         spin_unlock(&dlm->spinlock);
3089
3090         if (oldmle) {
3091                 /* master is known, detach if not already detached */
3092                 dlm_mle_detach_hb_events(dlm, oldmle);
3093                 dlm_put_mle(oldmle);
3094         }
3095
3096         if (res)
3097                 dlm_lockres_put(res);
3098 leave:
3099         dlm_put(dlm);
3100         return ret;
3101 }
3102
3103 /* must be holding dlm->spinlock and dlm->master_lock
3104  * when adding a migration mle, we can clear any other mles
3105  * in the master list because we know with certainty that
3106  * the master is "master".  so we remove any old mle from
3107  * the list after setting it's master field, and then add
3108  * the new migration mle.  this way we can hold with the rule
3109  * of having only one mle for a given lock name at all times. */
3110 static int dlm_add_migration_mle(struct dlm_ctxt *dlm,
3111                                  struct dlm_lock_resource *res,
3112                                  struct dlm_master_list_entry *mle,
3113                                  struct dlm_master_list_entry **oldmle,
3114                                  const char *name, unsigned int namelen,
3115                                  u8 new_master, u8 master)
3116 {
3117         int found;
3118         int ret = 0;
3119
3120         *oldmle = NULL;
3121
3122         mlog_entry_void();
3123
3124         assert_spin_locked(&dlm->spinlock);
3125         assert_spin_locked(&dlm->master_lock);
3126
3127         /* caller is responsible for any ref taken here on oldmle */
3128         found = dlm_find_mle(dlm, oldmle, (char *)name, namelen);
3129         if (found) {
3130                 struct dlm_master_list_entry *tmp = *oldmle;
3131                 spin_lock(&tmp->spinlock);
3132                 if (tmp->type == DLM_MLE_MIGRATION) {
3133                         if (master == dlm->node_num) {
3134                                 /* ah another process raced me to it */
3135                                 mlog(0, "tried to migrate %.*s, but some "
3136                                      "process beat me to it\n",
3137                                      namelen, name);
3138                                 ret = -EEXIST;
3139                         } else {
3140                                 /* bad.  2 NODES are trying to migrate! */
3141                                 mlog(ML_ERROR, "migration error  mle: "
3142                                      "master=%u new_master=%u // request: "
3143                                      "master=%u new_master=%u // "
3144                                      "lockres=%.*s\n",
3145                                      tmp->master, tmp->new_master,
3146                                      master, new_master,
3147                                      namelen, name);
3148                                 BUG();
3149                         }
3150                 } else {
3151                         /* this is essentially what assert_master does */
3152                         tmp->master = master;
3153                         atomic_set(&tmp->woken, 1);
3154                         wake_up(&tmp->wq);
3155                         /* remove it from the list so that only one
3156                          * mle will be found */
3157                         list_del_init(&tmp->list);
3158                         /* this was obviously WRONG.  mle is uninited here.  should be tmp. */
3159                         __dlm_mle_detach_hb_events(dlm, tmp);
3160                         ret = DLM_MIGRATE_RESPONSE_MASTERY_REF;
3161                         mlog(0, "%s:%.*s: master=%u, newmaster=%u, "
3162                             "telling master to get ref for cleared out mle "
3163                             "during migration\n", dlm->name, namelen, name,
3164                             master, new_master);
3165                 }
3166                 spin_unlock(&tmp->spinlock);
3167         }
3168
3169         /* now add a migration mle to the tail of the list */
3170         dlm_init_mle(mle, DLM_MLE_MIGRATION, dlm, res, name, namelen);
3171         mle->new_master = new_master;
3172         /* the new master will be sending an assert master for this.
3173          * at that point we will get the refmap reference */
3174         mle->master = master;
3175         /* do this for consistency with other mle types */
3176         set_bit(new_master, mle->maybe_map);
3177         list_add(&mle->list, &dlm->master_list);
3178
3179         return ret;
3180 }
3181
3182
3183 void dlm_clean_master_list(struct dlm_ctxt *dlm, u8 dead_node)
3184 {
3185         struct dlm_master_list_entry *mle, *next;
3186         struct dlm_lock_resource *res;
3187         unsigned int hash;
3188
3189         mlog_entry("dlm=%s, dead node=%u\n", dlm->name, dead_node);
3190 top:
3191         assert_spin_locked(&dlm->spinlock);
3192
3193         /* clean the master list */
3194         spin_lock(&dlm->master_lock);
3195         list_for_each_entry_safe(mle, next, &dlm->master_list, list) {
3196                 BUG_ON(mle->type != DLM_MLE_BLOCK &&
3197                        mle->type != DLM_MLE_MASTER &&
3198                        mle->type != DLM_MLE_MIGRATION);
3199
3200                 /* MASTER mles are initiated locally.  the waiting
3201                  * process will notice the node map change
3202                  * shortly.  let that happen as normal. */
3203                 if (mle->type == DLM_MLE_MASTER)
3204                         continue;
3205
3206
3207                 /* BLOCK mles are initiated by other nodes.
3208                  * need to clean up if the dead node would have
3209                  * been the master. */
3210                 if (mle->type == DLM_MLE_BLOCK) {
3211                         int bit;
3212
3213                         spin_lock(&mle->spinlock);
3214                         bit = find_next_bit(mle->maybe_map, O2NM_MAX_NODES, 0);
3215                         if (bit != dead_node) {
3216                                 mlog(0, "mle found, but dead node %u would "
3217                                      "not have been master\n", dead_node);
3218                                 spin_unlock(&mle->spinlock);
3219                         } else {
3220                                 /* must drop the refcount by one since the
3221                                  * assert_master will never arrive.  this
3222                                  * may result in the mle being unlinked and
3223                                  * freed, but there may still be a process
3224                                  * waiting in the dlmlock path which is fine. */
3225                                 mlog(0, "node %u was expected master\n",
3226                                      dead_node);
3227                                 atomic_set(&mle->woken, 1);
3228                                 spin_unlock(&mle->spinlock);
3229                                 wake_up(&mle->wq);
3230                                 /* do not need events any longer, so detach 
3231                                  * from heartbeat */
3232                                 __dlm_mle_detach_hb_events(dlm, mle);
3233                                 __dlm_put_mle(mle);
3234                         }
3235                         continue;
3236                 }
3237
3238                 /* everything else is a MIGRATION mle */
3239
3240                 /* the rule for MIGRATION mles is that the master
3241                  * becomes UNKNOWN if *either* the original or
3242                  * the new master dies.  all UNKNOWN lockreses
3243                  * are sent to whichever node becomes the recovery
3244                  * master.  the new master is responsible for
3245                  * determining if there is still a master for
3246                  * this lockres, or if he needs to take over
3247                  * mastery.  either way, this node should expect
3248                  * another message to resolve this. */
3249                 if (mle->master != dead_node &&
3250                     mle->new_master != dead_node)
3251                         continue;
3252
3253                 /* if we have reached this point, this mle needs to
3254                  * be removed from the list and freed. */
3255
3256                 /* remove from the list early.  NOTE: unlinking
3257                  * list_head while in list_for_each_safe */
3258                 __dlm_mle_detach_hb_events(dlm, mle);
3259                 spin_lock(&mle->spinlock);
3260                 list_del_init(&mle->list);
3261                 atomic_set(&mle->woken, 1);
3262                 spin_unlock(&mle->spinlock);
3263                 wake_up(&mle->wq);
3264
3265                 mlog(0, "%s: node %u died during migration from "
3266                      "%u to %u!\n", dlm->name, dead_node,
3267                      mle->master, mle->new_master);
3268                 /* if there is a lockres associated with this
3269                  * mle, find it and set its owner to UNKNOWN */
3270                 hash = dlm_lockid_hash(mle->u.name.name, mle->u.name.len);
3271                 res = __dlm_lookup_lockres(dlm, mle->u.name.name,
3272                                            mle->u.name.len, hash);
3273                 if (res) {
3274                         /* unfortunately if we hit this rare case, our
3275                          * lock ordering is messed.  we need to drop
3276                          * the master lock so that we can take the
3277                          * lockres lock, meaning that we will have to
3278                          * restart from the head of list. */
3279                         spin_unlock(&dlm->master_lock);
3280
3281                         /* move lockres onto recovery list */
3282                         spin_lock(&res->spinlock);
3283                         dlm_set_lockres_owner(dlm, res,
3284                                         DLM_LOCK_RES_OWNER_UNKNOWN);
3285                         dlm_move_lockres_to_recovery_list(dlm, res);
3286                         spin_unlock(&res->spinlock);
3287                         dlm_lockres_put(res);
3288
3289                         /* about to get rid of mle, detach from heartbeat */
3290                         __dlm_mle_detach_hb_events(dlm, mle);
3291
3292                         /* dump the mle */
3293                         spin_lock(&dlm->master_lock);
3294                         __dlm_put_mle(mle);
3295                         spin_unlock(&dlm->master_lock);
3296
3297                         /* restart */
3298                         goto top;
3299                 }
3300
3301                 /* this may be the last reference */
3302                 __dlm_put_mle(mle);
3303         }
3304         spin_unlock(&dlm->master_lock);
3305 }
3306
3307
3308 int dlm_finish_migration(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
3309                          u8 old_master)
3310 {
3311         struct dlm_node_iter iter;
3312         int ret = 0;
3313
3314         spin_lock(&dlm->spinlock);
3315         dlm_node_iter_init(dlm->domain_map, &iter);
3316         clear_bit(old_master, iter.node_map);
3317         clear_bit(dlm->node_num, iter.node_map);
3318         spin_unlock(&dlm->spinlock);
3319
3320         /* ownership of the lockres is changing.  account for the
3321          * mastery reference here since old_master will briefly have
3322          * a reference after the migration completes */
3323         spin_lock(&res->spinlock);
3324         dlm_lockres_set_refmap_bit(old_master, res);
3325         spin_unlock(&res->spinlock);
3326
3327         mlog(0, "now time to do a migrate request to other nodes\n");
3328         ret = dlm_do_migrate_request(dlm, res, old_master,
3329                                      dlm->node_num, &iter);
3330         if (ret < 0) {
3331                 mlog_errno(ret);
3332                 goto leave;
3333         }
3334
3335         mlog(0, "doing assert master of %.*s to all except the original node\n",
3336              res->lockname.len, res->lockname.name);
3337         /* this call now finishes out the nodemap
3338          * even if one or more nodes die */
3339         ret = dlm_do_assert_master(dlm, res, iter.node_map,
3340                                    DLM_ASSERT_MASTER_FINISH_MIGRATION);
3341         if (ret < 0) {
3342                 /* no longer need to retry.  all living nodes contacted. */
3343                 mlog_errno(ret);
3344                 ret = 0;
3345         }
3346
3347         memset(iter.node_map, 0, sizeof(iter.node_map));
3348         set_bit(old_master, iter.node_map);
3349         mlog(0, "doing assert master of %.*s back to %u\n",
3350              res->lockname.len, res->lockname.name, old_master);
3351         ret = dlm_do_assert_master(dlm, res, iter.node_map,
3352                                    DLM_ASSERT_MASTER_FINISH_MIGRATION);
3353         if (ret < 0) {
3354                 mlog(0, "assert master to original master failed "
3355                      "with %d.\n", ret);
3356                 /* the only nonzero status here would be because of
3357                  * a dead original node.  we're done. */
3358                 ret = 0;
3359         }
3360
3361         /* all done, set the owner, clear the flag */
3362         spin_lock(&res->spinlock);
3363         dlm_set_lockres_owner(dlm, res, dlm->node_num);
3364         res->state &= ~DLM_LOCK_RES_MIGRATING;
3365         spin_unlock(&res->spinlock);
3366         /* re-dirty it on the new master */
3367         dlm_kick_thread(dlm, res);
3368         wake_up(&res->wq);
3369 leave:
3370         return ret;
3371 }
3372
3373 /*
3374  * LOCKRES AST REFCOUNT
3375  * this is integral to migration
3376  */
3377
3378 /* for future intent to call an ast, reserve one ahead of time.
3379  * this should be called only after waiting on the lockres
3380  * with dlm_wait_on_lockres, and while still holding the
3381  * spinlock after the call. */
3382 void __dlm_lockres_reserve_ast(struct dlm_lock_resource *res)
3383 {
3384         assert_spin_locked(&res->spinlock);
3385         if (res->state & DLM_LOCK_RES_MIGRATING) {
3386                 __dlm_print_one_lock_resource(res);
3387         }
3388         BUG_ON(res->state & DLM_LOCK_RES_MIGRATING);
3389
3390         atomic_inc(&res->asts_reserved);
3391 }
3392
3393 /*
3394  * used to drop the reserved ast, either because it went unused,
3395  * or because the ast/bast was actually called.
3396  *
3397  * also, if there is a pending migration on this lockres,
3398  * and this was the last pending ast on the lockres,
3399  * atomically set the MIGRATING flag before we drop the lock.
3400  * this is how we ensure that migration can proceed with no
3401  * asts in progress.  note that it is ok if the state of the
3402  * queues is such that a lock should be granted in the future
3403  * or that a bast should be fired, because the new master will
3404  * shuffle the lists on this lockres as soon as it is migrated.
3405  */
3406 void dlm_lockres_release_ast(struct dlm_ctxt *dlm,
3407                              struct dlm_lock_resource *res)
3408 {
3409         if (!atomic_dec_and_lock(&res->asts_reserved, &res->spinlock))
3410                 return;
3411
3412         if (!res->migration_pending) {
3413                 spin_unlock(&res->spinlock);
3414                 return;
3415         }
3416
3417         BUG_ON(res->state & DLM_LOCK_RES_MIGRATING);
3418         res->migration_pending = 0;
3419         res->state |= DLM_LOCK_RES_MIGRATING;
3420         spin_unlock(&res->spinlock);
3421         wake_up(&res->wq);
3422         wake_up(&dlm->migration_wq);
3423 }