[GFS2] Fix a bug: scheduling under a spinlock
[linux-2.6.git] / fs / gfs2 / glock.c
1 /*
2  * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
3  * Copyright (C) 2004-2005 Red Hat, Inc.  All rights reserved.
4  *
5  * This copyrighted material is made available to anyone wishing to use,
6  * modify, copy, or redistribute it subject to the terms and conditions
7  * of the GNU General Public License v.2.
8  */
9
10 #include <linux/sched.h>
11 #include <linux/slab.h>
12 #include <linux/spinlock.h>
13 #include <linux/completion.h>
14 #include <linux/buffer_head.h>
15 #include <linux/delay.h>
16 #include <linux/sort.h>
17 #include <linux/jhash.h>
18 #include <linux/kref.h>
19 #include <linux/kallsyms.h>
20 #include <linux/gfs2_ondisk.h>
21 #include <asm/semaphore.h>
22 #include <asm/uaccess.h>
23
24 #include "gfs2.h"
25 #include "lm_interface.h"
26 #include "incore.h"
27 #include "glock.h"
28 #include "glops.h"
29 #include "inode.h"
30 #include "lm.h"
31 #include "lops.h"
32 #include "meta_io.h"
33 #include "quota.h"
34 #include "super.h"
35 #include "util.h"
36
37 /*  Must be kept in sync with the beginning of struct gfs2_glock  */
38 struct glock_plug {
39         struct list_head gl_list;
40         unsigned long gl_flags;
41 };
42
43 struct greedy {
44         struct gfs2_holder gr_gh;
45         struct work_struct gr_work;
46 };
47
48 typedef void (*glock_examiner) (struct gfs2_glock * gl);
49
50 /**
51  * relaxed_state_ok - is a requested lock compatible with the current lock mode?
52  * @actual: the current state of the lock
53  * @requested: the lock state that was requested by the caller
54  * @flags: the modifier flags passed in by the caller
55  *
56  * Returns: 1 if the locks are compatible, 0 otherwise
57  */
58
59 static inline int relaxed_state_ok(unsigned int actual, unsigned requested,
60                                    int flags)
61 {
62         if (actual == requested)
63                 return 1;
64
65         if (flags & GL_EXACT)
66                 return 0;
67
68         if (actual == LM_ST_EXCLUSIVE && requested == LM_ST_SHARED)
69                 return 1;
70
71         if (actual != LM_ST_UNLOCKED && (flags & LM_FLAG_ANY))
72                 return 1;
73
74         return 0;
75 }
76
77 /**
78  * gl_hash() - Turn glock number into hash bucket number
79  * @lock: The glock number
80  *
81  * Returns: The number of the corresponding hash bucket
82  */
83
84 static unsigned int gl_hash(struct lm_lockname *name)
85 {
86         unsigned int h;
87
88         h = jhash(&name->ln_number, sizeof(uint64_t), 0);
89         h = jhash(&name->ln_type, sizeof(unsigned int), h);
90         h &= GFS2_GL_HASH_MASK;
91
92         return h;
93 }
94
95 /**
96  * glock_free() - Perform a few checks and then release struct gfs2_glock
97  * @gl: The glock to release
98  *
99  * Also calls lock module to release its internal structure for this glock.
100  *
101  */
102
103 static void glock_free(struct gfs2_glock *gl)
104 {
105         struct gfs2_sbd *sdp = gl->gl_sbd;
106         struct inode *aspace = gl->gl_aspace;
107
108         gfs2_lm_put_lock(sdp, gl->gl_lock);
109
110         if (aspace)
111                 gfs2_aspace_put(aspace);
112
113         kmem_cache_free(gfs2_glock_cachep, gl);
114 }
115
116 /**
117  * gfs2_glock_hold() - increment reference count on glock
118  * @gl: The glock to hold
119  *
120  */
121
122 void gfs2_glock_hold(struct gfs2_glock *gl)
123 {
124         kref_get(&gl->gl_ref);
125 }
126
127 /* All work is done after the return from kref_put() so we
128    can release the write_lock before the free. */
129
130 static void kill_glock(struct kref *kref)
131 {
132         struct gfs2_glock *gl = container_of(kref, struct gfs2_glock, gl_ref);
133         struct gfs2_sbd *sdp = gl->gl_sbd;
134
135         gfs2_assert(sdp, gl->gl_state == LM_ST_UNLOCKED);
136         gfs2_assert(sdp, list_empty(&gl->gl_reclaim));
137         gfs2_assert(sdp, list_empty(&gl->gl_holders));
138         gfs2_assert(sdp, list_empty(&gl->gl_waiters1));
139         gfs2_assert(sdp, list_empty(&gl->gl_waiters2));
140         gfs2_assert(sdp, list_empty(&gl->gl_waiters3));
141 }
142
143 /**
144  * gfs2_glock_put() - Decrement reference count on glock
145  * @gl: The glock to put
146  *
147  */
148
149 int gfs2_glock_put(struct gfs2_glock *gl)
150 {
151         struct gfs2_sbd *sdp = gl->gl_sbd;
152         struct gfs2_gl_hash_bucket *bucket = gl->gl_bucket;
153         int rv = 0;
154
155         mutex_lock(&sdp->sd_invalidate_inodes_mutex);
156
157         write_lock(&bucket->hb_lock);
158         if (kref_put(&gl->gl_ref, kill_glock)) {
159                 list_del_init(&gl->gl_list);
160                 write_unlock(&bucket->hb_lock);
161                 BUG_ON(spin_is_locked(&gl->gl_spin));
162                 glock_free(gl);
163                 rv = 1;
164                 goto out;
165         }
166         write_unlock(&bucket->hb_lock);
167  out:
168         mutex_unlock(&sdp->sd_invalidate_inodes_mutex);
169         return rv;
170 }
171
172 /**
173  * queue_empty - check to see if a glock's queue is empty
174  * @gl: the glock
175  * @head: the head of the queue to check
176  *
177  * This function protects the list in the event that a process already
178  * has a holder on the list and is adding a second holder for itself.
179  * The glmutex lock is what generally prevents processes from working
180  * on the same glock at once, but the special case of adding a second
181  * holder for yourself ("recursive" locking) doesn't involve locking
182  * glmutex, making the spin lock necessary.
183  *
184  * Returns: 1 if the queue is empty
185  */
186
187 static inline int queue_empty(struct gfs2_glock *gl, struct list_head *head)
188 {
189         int empty;
190         spin_lock(&gl->gl_spin);
191         empty = list_empty(head);
192         spin_unlock(&gl->gl_spin);
193         return empty;
194 }
195
196 /**
197  * search_bucket() - Find struct gfs2_glock by lock number
198  * @bucket: the bucket to search
199  * @name: The lock name
200  *
201  * Returns: NULL, or the struct gfs2_glock with the requested number
202  */
203
204 static struct gfs2_glock *search_bucket(struct gfs2_gl_hash_bucket *bucket,
205                                         struct lm_lockname *name)
206 {
207         struct gfs2_glock *gl;
208
209         list_for_each_entry(gl, &bucket->hb_list, gl_list) {
210                 if (test_bit(GLF_PLUG, &gl->gl_flags))
211                         continue;
212                 if (!lm_name_equal(&gl->gl_name, name))
213                         continue;
214
215                 kref_get(&gl->gl_ref);
216
217                 return gl;
218         }
219
220         return NULL;
221 }
222
223 /**
224  * gfs2_glock_find() - Find glock by lock number
225  * @sdp: The GFS2 superblock
226  * @name: The lock name
227  *
228  * Returns: NULL, or the struct gfs2_glock with the requested number
229  */
230
231 struct gfs2_glock *gfs2_glock_find(struct gfs2_sbd *sdp,
232                                    struct lm_lockname *name)
233 {
234         struct gfs2_gl_hash_bucket *bucket = &sdp->sd_gl_hash[gl_hash(name)];
235         struct gfs2_glock *gl;
236
237         read_lock(&bucket->hb_lock);
238         gl = search_bucket(bucket, name);
239         read_unlock(&bucket->hb_lock);
240
241         return gl;
242 }
243
244 /**
245  * gfs2_glock_get() - Get a glock, or create one if one doesn't exist
246  * @sdp: The GFS2 superblock
247  * @number: the lock number
248  * @glops: The glock_operations to use
249  * @create: If 0, don't create the glock if it doesn't exist
250  * @glp: the glock is returned here
251  *
252  * This does not lock a glock, just finds/creates structures for one.
253  *
254  * Returns: errno
255  */
256
257 int gfs2_glock_get(struct gfs2_sbd *sdp, uint64_t number,
258                    struct gfs2_glock_operations *glops, int create,
259                    struct gfs2_glock **glp)
260 {
261         struct lm_lockname name;
262         struct gfs2_glock *gl, *tmp;
263         struct gfs2_gl_hash_bucket *bucket;
264         int error;
265
266         name.ln_number = number;
267         name.ln_type = glops->go_type;
268         bucket = &sdp->sd_gl_hash[gl_hash(&name)];
269
270         read_lock(&bucket->hb_lock);
271         gl = search_bucket(bucket, &name);
272         read_unlock(&bucket->hb_lock);
273
274         if (gl || !create) {
275                 *glp = gl;
276                 return 0;
277         }
278
279         gl = kmem_cache_alloc(gfs2_glock_cachep, GFP_KERNEL);
280         if (!gl)
281                 return -ENOMEM;
282
283         memset(gl, 0, sizeof(struct gfs2_glock));
284
285         INIT_LIST_HEAD(&gl->gl_list);
286         gl->gl_name = name;
287         kref_init(&gl->gl_ref);
288
289         spin_lock_init(&gl->gl_spin);
290
291         gl->gl_state = LM_ST_UNLOCKED;
292         INIT_LIST_HEAD(&gl->gl_holders);
293         INIT_LIST_HEAD(&gl->gl_waiters1);
294         INIT_LIST_HEAD(&gl->gl_waiters2);
295         INIT_LIST_HEAD(&gl->gl_waiters3);
296
297         gl->gl_ops = glops;
298
299         gl->gl_bucket = bucket;
300         INIT_LIST_HEAD(&gl->gl_reclaim);
301
302         gl->gl_sbd = sdp;
303
304         lops_init_le(&gl->gl_le, &gfs2_glock_lops);
305         INIT_LIST_HEAD(&gl->gl_ail_list);
306
307         /* If this glock protects actual on-disk data or metadata blocks,
308            create a VFS inode to manage the pages/buffers holding them. */
309         if (glops == &gfs2_inode_glops ||
310             glops == &gfs2_rgrp_glops ||
311             glops == &gfs2_meta_glops) {
312                 gl->gl_aspace = gfs2_aspace_get(sdp);
313                 if (!gl->gl_aspace) {
314                         error = -ENOMEM;
315                         goto fail;
316                 }
317         }
318
319         error = gfs2_lm_get_lock(sdp, &name, &gl->gl_lock);
320         if (error)
321                 goto fail_aspace;
322
323         write_lock(&bucket->hb_lock);
324         tmp = search_bucket(bucket, &name);
325         if (tmp) {
326                 write_unlock(&bucket->hb_lock);
327                 glock_free(gl);
328                 gl = tmp;
329         } else {
330                 list_add_tail(&gl->gl_list, &bucket->hb_list);
331                 write_unlock(&bucket->hb_lock);
332         }
333
334         *glp = gl;
335
336         return 0;
337
338  fail_aspace:
339         if (gl->gl_aspace)
340                 gfs2_aspace_put(gl->gl_aspace);
341
342  fail:
343         kmem_cache_free(gfs2_glock_cachep, gl); 
344
345         return error;
346 }
347
348 /**
349  * gfs2_holder_init - initialize a struct gfs2_holder in the default way
350  * @gl: the glock
351  * @state: the state we're requesting
352  * @flags: the modifier flags
353  * @gh: the holder structure
354  *
355  */
356
357 void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, unsigned flags,
358                       struct gfs2_holder *gh)
359 {
360         flags |= GL_NEVER_RECURSE;
361         INIT_LIST_HEAD(&gh->gh_list);
362         gh->gh_gl = gl;
363         gh->gh_ip = (unsigned long)__builtin_return_address(0);
364         gh->gh_owner = current;
365         gh->gh_state = state;
366         gh->gh_flags = flags;
367         gh->gh_error = 0;
368         gh->gh_iflags = 0;
369         init_completion(&gh->gh_wait);
370
371         if (gh->gh_state == LM_ST_EXCLUSIVE)
372                 gh->gh_flags |= GL_LOCAL_EXCL;
373
374         gfs2_glock_hold(gl);
375 }
376
377 /**
378  * gfs2_holder_reinit - reinitialize a struct gfs2_holder so we can requeue it
379  * @state: the state we're requesting
380  * @flags: the modifier flags
381  * @gh: the holder structure
382  *
383  * Don't mess with the glock.
384  *
385  */
386
387 void gfs2_holder_reinit(unsigned int state, unsigned flags, struct gfs2_holder *gh)
388 {
389         gh->gh_state = state;
390         gh->gh_flags = flags | GL_NEVER_RECURSE;
391         if (gh->gh_state == LM_ST_EXCLUSIVE)
392                 gh->gh_flags |= GL_LOCAL_EXCL;
393
394         gh->gh_iflags &= 1 << HIF_ALLOCED;
395         gh->gh_ip = (unsigned long)__builtin_return_address(0);
396 }
397
398 /**
399  * gfs2_holder_uninit - uninitialize a holder structure (drop glock reference)
400  * @gh: the holder structure
401  *
402  */
403
404 void gfs2_holder_uninit(struct gfs2_holder *gh)
405 {
406         gfs2_glock_put(gh->gh_gl);
407         gh->gh_gl = NULL;
408         gh->gh_ip = 0;
409 }
410
411 /**
412  * gfs2_holder_get - get a struct gfs2_holder structure
413  * @gl: the glock
414  * @state: the state we're requesting
415  * @flags: the modifier flags
416  * @gfp_flags: __GFP_NOFAIL
417  *
418  * Figure out how big an impact this function has.  Either:
419  * 1) Replace it with a cache of structures hanging off the struct gfs2_sbd
420  * 2) Leave it like it is
421  *
422  * Returns: the holder structure, NULL on ENOMEM
423  */
424
425 struct gfs2_holder *gfs2_holder_get(struct gfs2_glock *gl, unsigned int state,
426                                     int flags, gfp_t gfp_flags)
427 {
428         struct gfs2_holder *gh;
429
430         gh = kmalloc(sizeof(struct gfs2_holder), gfp_flags);
431         if (!gh)
432                 return NULL;
433
434         gfs2_holder_init(gl, state, flags, gh);
435         set_bit(HIF_ALLOCED, &gh->gh_iflags);
436         gh->gh_ip = (unsigned long)__builtin_return_address(0);
437         return gh;
438 }
439
440 /**
441  * gfs2_holder_put - get rid of a struct gfs2_holder structure
442  * @gh: the holder structure
443  *
444  */
445
446 void gfs2_holder_put(struct gfs2_holder *gh)
447 {
448         gfs2_holder_uninit(gh);
449         kfree(gh);
450 }
451
452 /**
453  * handle_recurse - put other holder structures (marked recursive)
454  *                  into the holders list
455  * @gh: the holder structure
456  *
457  */
458
459 static void handle_recurse(struct gfs2_holder *gh)
460 {
461         struct gfs2_glock *gl = gh->gh_gl;
462         struct gfs2_sbd *sdp = gl->gl_sbd;
463         struct gfs2_holder *tmp_gh, *safe;
464         int found = 0;
465
466         BUG_ON(!spin_is_locked(&gl->gl_spin));
467
468         printk(KERN_INFO "recursion %016llx, %u\n", gl->gl_name.ln_number,
469                 gl->gl_name.ln_type);
470
471         if (gfs2_assert_warn(sdp, gh->gh_owner))
472                 return;
473
474         list_for_each_entry_safe(tmp_gh, safe, &gl->gl_waiters3, gh_list) {
475                 if (tmp_gh->gh_owner != gh->gh_owner)
476                         continue;
477
478                 gfs2_assert_warn(sdp,
479                                  test_bit(HIF_RECURSE, &tmp_gh->gh_iflags));
480
481                 list_move_tail(&tmp_gh->gh_list, &gl->gl_holders);
482                 tmp_gh->gh_error = 0;
483                 set_bit(HIF_HOLDER, &tmp_gh->gh_iflags);
484
485                 complete(&tmp_gh->gh_wait);
486
487                 found = 1;
488         }
489
490         gfs2_assert_warn(sdp, found);
491 }
492
493 /**
494  * do_unrecurse - a recursive holder was just dropped of the waiters3 list
495  * @gh: the holder
496  *
497  * If there is only one other recursive holder, clear its HIF_RECURSE bit.
498  * If there is more than one, leave them alone.
499  *
500  */
501
502 static void do_unrecurse(struct gfs2_holder *gh)
503 {
504         struct gfs2_glock *gl = gh->gh_gl;
505         struct gfs2_sbd *sdp = gl->gl_sbd;
506         struct gfs2_holder *tmp_gh, *last_gh = NULL;
507         int found = 0;
508
509         BUG_ON(!spin_is_locked(&gl->gl_spin));
510
511         if (gfs2_assert_warn(sdp, gh->gh_owner))
512                 return;
513
514         list_for_each_entry(tmp_gh, &gl->gl_waiters3, gh_list) {
515                 if (tmp_gh->gh_owner != gh->gh_owner)
516                         continue;
517
518                 gfs2_assert_warn(sdp,
519                                  test_bit(HIF_RECURSE, &tmp_gh->gh_iflags));
520
521                 if (found)
522                         return;
523
524                 found = 1;
525                 last_gh = tmp_gh;
526         }
527
528         if (!gfs2_assert_warn(sdp, found))
529                 clear_bit(HIF_RECURSE, &last_gh->gh_iflags);
530 }
531
532 /**
533  * rq_mutex - process a mutex request in the queue
534  * @gh: the glock holder
535  *
536  * Returns: 1 if the queue is blocked
537  */
538
539 static int rq_mutex(struct gfs2_holder *gh)
540 {
541         struct gfs2_glock *gl = gh->gh_gl;
542
543         list_del_init(&gh->gh_list);
544         /*  gh->gh_error never examined.  */
545         set_bit(GLF_LOCK, &gl->gl_flags);
546         complete(&gh->gh_wait);
547
548         return 1;
549 }
550
551 /**
552  * rq_promote - process a promote request in the queue
553  * @gh: the glock holder
554  *
555  * Acquire a new inter-node lock, or change a lock state to more restrictive.
556  *
557  * Returns: 1 if the queue is blocked
558  */
559
560 static int rq_promote(struct gfs2_holder *gh)
561 {
562         struct gfs2_glock *gl = gh->gh_gl;
563         struct gfs2_sbd *sdp = gl->gl_sbd;
564         struct gfs2_glock_operations *glops = gl->gl_ops;
565         int recurse;
566
567         if (!relaxed_state_ok(gl->gl_state, gh->gh_state, gh->gh_flags)) {
568                 if (list_empty(&gl->gl_holders)) {
569                         gl->gl_req_gh = gh;
570                         set_bit(GLF_LOCK, &gl->gl_flags);
571                         spin_unlock(&gl->gl_spin);
572
573                         if (atomic_read(&sdp->sd_reclaim_count) >
574                             gfs2_tune_get(sdp, gt_reclaim_limit) &&
575                             !(gh->gh_flags & LM_FLAG_PRIORITY)) {
576                                 gfs2_reclaim_glock(sdp);
577                                 gfs2_reclaim_glock(sdp);
578                         }
579
580                         glops->go_xmote_th(gl, gh->gh_state,
581                                            gh->gh_flags);
582
583                         spin_lock(&gl->gl_spin);
584                 }
585                 return 1;
586         }
587
588         if (list_empty(&gl->gl_holders)) {
589                 set_bit(HIF_FIRST, &gh->gh_iflags);
590                 set_bit(GLF_LOCK, &gl->gl_flags);
591                 recurse = 0;
592         } else {
593                 struct gfs2_holder *next_gh;
594                 if (gh->gh_flags & GL_LOCAL_EXCL)
595                         return 1;
596                 next_gh = list_entry(gl->gl_holders.next, struct gfs2_holder,
597                                      gh_list);
598                 if (next_gh->gh_flags & GL_LOCAL_EXCL)
599                          return 1;
600                 recurse = test_bit(HIF_RECURSE, &gh->gh_iflags);
601         }
602
603         list_move_tail(&gh->gh_list, &gl->gl_holders);
604         gh->gh_error = 0;
605         set_bit(HIF_HOLDER, &gh->gh_iflags);
606
607         if (recurse)
608                 handle_recurse(gh);
609
610         complete(&gh->gh_wait);
611
612         return 0;
613 }
614
615 /**
616  * rq_demote - process a demote request in the queue
617  * @gh: the glock holder
618  *
619  * Returns: 1 if the queue is blocked
620  */
621
622 static int rq_demote(struct gfs2_holder *gh)
623 {
624         struct gfs2_glock *gl = gh->gh_gl;
625         struct gfs2_glock_operations *glops = gl->gl_ops;
626
627         if (!list_empty(&gl->gl_holders))
628                 return 1;
629
630         if (gl->gl_state == gh->gh_state || gl->gl_state == LM_ST_UNLOCKED) {
631                 list_del_init(&gh->gh_list);
632                 gh->gh_error = 0;
633                 spin_unlock(&gl->gl_spin);
634                 if (test_bit(HIF_DEALLOC, &gh->gh_iflags))
635                         gfs2_holder_put(gh);
636                 else
637                         complete(&gh->gh_wait);
638                 spin_lock(&gl->gl_spin);
639         } else {
640                 gl->gl_req_gh = gh;
641                 set_bit(GLF_LOCK, &gl->gl_flags);
642                 spin_unlock(&gl->gl_spin);
643
644                 if (gh->gh_state == LM_ST_UNLOCKED ||
645                     gl->gl_state != LM_ST_EXCLUSIVE)
646                         glops->go_drop_th(gl);
647                 else
648                         glops->go_xmote_th(gl, gh->gh_state, gh->gh_flags);
649
650                 spin_lock(&gl->gl_spin);
651         }
652
653         return 0;
654 }
655
656 /**
657  * rq_greedy - process a queued request to drop greedy status
658  * @gh: the glock holder
659  *
660  * Returns: 1 if the queue is blocked
661  */
662
663 static int rq_greedy(struct gfs2_holder *gh)
664 {
665         struct gfs2_glock *gl = gh->gh_gl;
666
667         list_del_init(&gh->gh_list);
668         /*  gh->gh_error never examined.  */
669         clear_bit(GLF_GREEDY, &gl->gl_flags);
670         spin_unlock(&gl->gl_spin);
671
672         gfs2_holder_uninit(gh);
673         kfree(container_of(gh, struct greedy, gr_gh));
674
675         spin_lock(&gl->gl_spin);                
676
677         return 0;
678 }
679
680 /**
681  * run_queue - process holder structures on a glock
682  * @gl: the glock
683  *
684  */
685 static void run_queue(struct gfs2_glock *gl)
686 {
687         struct gfs2_holder *gh;
688         int blocked = 1;
689
690         for (;;) {
691                 if (test_bit(GLF_LOCK, &gl->gl_flags))
692                         break;
693
694                 if (!list_empty(&gl->gl_waiters1)) {
695                         gh = list_entry(gl->gl_waiters1.next,
696                                         struct gfs2_holder, gh_list);
697
698                         if (test_bit(HIF_MUTEX, &gh->gh_iflags))
699                                 blocked = rq_mutex(gh);
700                         else
701                                 gfs2_assert_warn(gl->gl_sbd, 0);
702
703                 } else if (!list_empty(&gl->gl_waiters2) &&
704                            !test_bit(GLF_SKIP_WAITERS2, &gl->gl_flags)) {
705                         gh = list_entry(gl->gl_waiters2.next,
706                                         struct gfs2_holder, gh_list);
707
708                         if (test_bit(HIF_DEMOTE, &gh->gh_iflags))
709                                 blocked = rq_demote(gh);
710                         else if (test_bit(HIF_GREEDY, &gh->gh_iflags))
711                                 blocked = rq_greedy(gh);
712                         else
713                                 gfs2_assert_warn(gl->gl_sbd, 0);
714
715                 } else if (!list_empty(&gl->gl_waiters3)) {
716                         gh = list_entry(gl->gl_waiters3.next,
717                                         struct gfs2_holder, gh_list);
718
719                         if (test_bit(HIF_PROMOTE, &gh->gh_iflags))
720                                 blocked = rq_promote(gh);
721                         else
722                                 gfs2_assert_warn(gl->gl_sbd, 0);
723
724                 } else
725                         break;
726
727                 if (blocked)
728                         break;
729         }
730 }
731
732 /**
733  * gfs2_glmutex_lock - acquire a local lock on a glock
734  * @gl: the glock
735  *
736  * Gives caller exclusive access to manipulate a glock structure.
737  */
738
739 void gfs2_glmutex_lock(struct gfs2_glock *gl)
740 {
741         struct gfs2_holder gh;
742
743         gfs2_holder_init(gl, 0, 0, &gh);
744         set_bit(HIF_MUTEX, &gh.gh_iflags);
745
746         spin_lock(&gl->gl_spin);
747         if (test_and_set_bit(GLF_LOCK, &gl->gl_flags))
748                 list_add_tail(&gh.gh_list, &gl->gl_waiters1);
749         else
750                 complete(&gh.gh_wait);
751         spin_unlock(&gl->gl_spin);
752
753         wait_for_completion(&gh.gh_wait);
754         gfs2_holder_uninit(&gh);
755 }
756
757 /**
758  * gfs2_glmutex_trylock - try to acquire a local lock on a glock
759  * @gl: the glock
760  *
761  * Returns: 1 if the glock is acquired
762  */
763
764 int gfs2_glmutex_trylock(struct gfs2_glock *gl)
765 {
766         int acquired = 1;
767
768         spin_lock(&gl->gl_spin);
769         if (test_and_set_bit(GLF_LOCK, &gl->gl_flags))
770                 acquired = 0;
771         spin_unlock(&gl->gl_spin);
772
773         return acquired;
774 }
775
776 /**
777  * gfs2_glmutex_unlock - release a local lock on a glock
778  * @gl: the glock
779  *
780  */
781
782 void gfs2_glmutex_unlock(struct gfs2_glock *gl)
783 {
784         spin_lock(&gl->gl_spin);
785         clear_bit(GLF_LOCK, &gl->gl_flags);
786         run_queue(gl);
787         BUG_ON(!spin_is_locked(&gl->gl_spin));
788         spin_unlock(&gl->gl_spin);
789 }
790
791 /**
792  * handle_callback - add a demote request to a lock's queue
793  * @gl: the glock
794  * @state: the state the caller wants us to change to
795  *
796  */
797
798 static void handle_callback(struct gfs2_glock *gl, unsigned int state)
799 {
800         struct gfs2_holder *gh, *new_gh = NULL;
801
802  restart:
803         spin_lock(&gl->gl_spin);
804
805         list_for_each_entry(gh, &gl->gl_waiters2, gh_list) {
806                 if (test_bit(HIF_DEMOTE, &gh->gh_iflags) &&
807                     gl->gl_req_gh != gh) {
808                         if (gh->gh_state != state)
809                                 gh->gh_state = LM_ST_UNLOCKED;
810                         goto out;
811                 }
812         }
813
814         if (new_gh) {
815                 list_add_tail(&new_gh->gh_list, &gl->gl_waiters2);
816                 new_gh = NULL;
817         } else {
818                 spin_unlock(&gl->gl_spin);
819
820                 new_gh = gfs2_holder_get(gl, state,
821                                          LM_FLAG_TRY | GL_NEVER_RECURSE,
822                                          GFP_KERNEL | __GFP_NOFAIL),
823                 set_bit(HIF_DEMOTE, &new_gh->gh_iflags);
824                 set_bit(HIF_DEALLOC, &new_gh->gh_iflags);
825
826                 goto restart;
827         }
828
829  out:
830         spin_unlock(&gl->gl_spin);
831
832         if (new_gh)
833                 gfs2_holder_put(new_gh);
834 }
835
836 /**
837  * state_change - record that the glock is now in a different state
838  * @gl: the glock
839  * @new_state the new state
840  *
841  */
842
843 static void state_change(struct gfs2_glock *gl, unsigned int new_state)
844 {
845         int held1, held2;
846
847         held1 = (gl->gl_state != LM_ST_UNLOCKED);
848         held2 = (new_state != LM_ST_UNLOCKED);
849
850         if (held1 != held2) {
851                 if (held2)
852                         gfs2_glock_hold(gl);
853                 else
854                         gfs2_glock_put(gl);
855         }
856
857         gl->gl_state = new_state;
858 }
859
860 /**
861  * xmote_bh - Called after the lock module is done acquiring a lock
862  * @gl: The glock in question
863  * @ret: the int returned from the lock module
864  *
865  */
866
867 static void xmote_bh(struct gfs2_glock *gl, unsigned int ret)
868 {
869         struct gfs2_sbd *sdp = gl->gl_sbd;
870         struct gfs2_glock_operations *glops = gl->gl_ops;
871         struct gfs2_holder *gh = gl->gl_req_gh;
872         int prev_state = gl->gl_state;
873         int op_done = 1;
874
875         gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
876         gfs2_assert_warn(sdp, queue_empty(gl, &gl->gl_holders));
877         gfs2_assert_warn(sdp, !(ret & LM_OUT_ASYNC));
878
879         state_change(gl, ret & LM_OUT_ST_MASK);
880
881         if (prev_state != LM_ST_UNLOCKED && !(ret & LM_OUT_CACHEABLE)) {
882                 if (glops->go_inval)
883                         glops->go_inval(gl, DIO_METADATA | DIO_DATA);
884         } else if (gl->gl_state == LM_ST_DEFERRED) {
885                 /* We might not want to do this here.
886                    Look at moving to the inode glops. */
887                 if (glops->go_inval)
888                         glops->go_inval(gl, DIO_DATA);
889         }
890
891         /*  Deal with each possible exit condition  */
892
893         if (!gh)
894                 gl->gl_stamp = jiffies;
895
896         else if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) {
897                 spin_lock(&gl->gl_spin);
898                 list_del_init(&gh->gh_list);
899                 gh->gh_error = -EIO;
900                 if (test_bit(HIF_RECURSE, &gh->gh_iflags))
901                         do_unrecurse(gh);
902                 spin_unlock(&gl->gl_spin);
903
904         } else if (test_bit(HIF_DEMOTE, &gh->gh_iflags)) {
905                 spin_lock(&gl->gl_spin);
906                 list_del_init(&gh->gh_list);
907                 if (gl->gl_state == gh->gh_state ||
908                     gl->gl_state == LM_ST_UNLOCKED)
909                         gh->gh_error = 0;
910                 else {
911                         if (gfs2_assert_warn(sdp, gh->gh_flags &
912                                         (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) == -1)
913                                 fs_warn(sdp, "ret = 0x%.8X\n", ret);
914                         gh->gh_error = GLR_TRYFAILED;
915                 }
916                 spin_unlock(&gl->gl_spin);
917
918                 if (ret & LM_OUT_CANCELED)
919                         handle_callback(gl, LM_ST_UNLOCKED); /* Lame */
920
921         } else if (ret & LM_OUT_CANCELED) {
922                 spin_lock(&gl->gl_spin);
923                 list_del_init(&gh->gh_list);
924                 gh->gh_error = GLR_CANCELED;
925                 if (test_bit(HIF_RECURSE, &gh->gh_iflags))
926                         do_unrecurse(gh);
927                 spin_unlock(&gl->gl_spin);
928
929         } else if (relaxed_state_ok(gl->gl_state, gh->gh_state, gh->gh_flags)) {
930                 spin_lock(&gl->gl_spin);
931                 list_move_tail(&gh->gh_list, &gl->gl_holders);
932                 gh->gh_error = 0;
933                 set_bit(HIF_HOLDER, &gh->gh_iflags);
934                 spin_unlock(&gl->gl_spin);
935
936                 set_bit(HIF_FIRST, &gh->gh_iflags);
937
938                 op_done = 0;
939
940         } else if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) {
941                 spin_lock(&gl->gl_spin);
942                 list_del_init(&gh->gh_list);
943                 gh->gh_error = GLR_TRYFAILED;
944                 if (test_bit(HIF_RECURSE, &gh->gh_iflags))
945                         do_unrecurse(gh);
946                 spin_unlock(&gl->gl_spin);
947
948         } else {
949                 if (gfs2_assert_withdraw(sdp, 0) == -1)
950                         fs_err(sdp, "ret = 0x%.8X\n", ret);
951         }
952
953         if (glops->go_xmote_bh)
954                 glops->go_xmote_bh(gl);
955
956         if (op_done) {
957                 spin_lock(&gl->gl_spin);
958                 gl->gl_req_gh = NULL;
959                 gl->gl_req_bh = NULL;
960                 clear_bit(GLF_LOCK, &gl->gl_flags);
961                 run_queue(gl);
962                 spin_unlock(&gl->gl_spin);
963         }
964
965         gfs2_glock_put(gl);
966
967         if (gh) {
968                 if (test_bit(HIF_DEALLOC, &gh->gh_iflags))
969                         gfs2_holder_put(gh);
970                 else
971                         complete(&gh->gh_wait);
972         }
973 }
974
975 /**
976  * gfs2_glock_xmote_th - Call into the lock module to acquire or change a glock
977  * @gl: The glock in question
978  * @state: the requested state
979  * @flags: modifier flags to the lock call
980  *
981  */
982
983 void gfs2_glock_xmote_th(struct gfs2_glock *gl, unsigned int state, int flags)
984 {
985         struct gfs2_sbd *sdp = gl->gl_sbd;
986         struct gfs2_glock_operations *glops = gl->gl_ops;
987         int lck_flags = flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB |
988                                  LM_FLAG_NOEXP | LM_FLAG_ANY |
989                                  LM_FLAG_PRIORITY);
990         unsigned int lck_ret;
991
992         gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
993         gfs2_assert_warn(sdp, queue_empty(gl, &gl->gl_holders));
994         gfs2_assert_warn(sdp, state != LM_ST_UNLOCKED);
995         gfs2_assert_warn(sdp, state != gl->gl_state);
996
997         if (gl->gl_state == LM_ST_EXCLUSIVE) {
998                 if (glops->go_sync)
999                         glops->go_sync(gl,
1000                                        DIO_METADATA | DIO_DATA | DIO_RELEASE);
1001         }
1002
1003         gfs2_glock_hold(gl);
1004         gl->gl_req_bh = xmote_bh;
1005
1006         lck_ret = gfs2_lm_lock(sdp, gl->gl_lock, gl->gl_state, state,
1007                                lck_flags);
1008
1009         if (gfs2_assert_withdraw(sdp, !(lck_ret & LM_OUT_ERROR)))
1010                 return;
1011
1012         if (lck_ret & LM_OUT_ASYNC)
1013                 gfs2_assert_warn(sdp, lck_ret == LM_OUT_ASYNC);
1014         else
1015                 xmote_bh(gl, lck_ret);
1016 }
1017
1018 /**
1019  * drop_bh - Called after a lock module unlock completes
1020  * @gl: the glock
1021  * @ret: the return status
1022  *
1023  * Doesn't wake up the process waiting on the struct gfs2_holder (if any)
1024  * Doesn't drop the reference on the glock the top half took out
1025  *
1026  */
1027
1028 static void drop_bh(struct gfs2_glock *gl, unsigned int ret)
1029 {
1030         struct gfs2_sbd *sdp = gl->gl_sbd;
1031         struct gfs2_glock_operations *glops = gl->gl_ops;
1032         struct gfs2_holder *gh = gl->gl_req_gh;
1033
1034         clear_bit(GLF_PREFETCH, &gl->gl_flags);
1035
1036         gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
1037         gfs2_assert_warn(sdp, queue_empty(gl, &gl->gl_holders));
1038         gfs2_assert_warn(sdp, !ret);
1039
1040         state_change(gl, LM_ST_UNLOCKED);
1041
1042         if (glops->go_inval)
1043                 glops->go_inval(gl, DIO_METADATA | DIO_DATA);
1044
1045         if (gh) {
1046                 spin_lock(&gl->gl_spin);
1047                 list_del_init(&gh->gh_list);
1048                 gh->gh_error = 0;
1049                 spin_unlock(&gl->gl_spin);
1050         }
1051
1052         if (glops->go_drop_bh)
1053                 glops->go_drop_bh(gl);
1054
1055         spin_lock(&gl->gl_spin);
1056         gl->gl_req_gh = NULL;
1057         gl->gl_req_bh = NULL;
1058         clear_bit(GLF_LOCK, &gl->gl_flags);
1059         run_queue(gl);
1060         spin_unlock(&gl->gl_spin);
1061
1062         gfs2_glock_put(gl);
1063
1064         if (gh) {
1065                 if (test_bit(HIF_DEALLOC, &gh->gh_iflags))
1066                         gfs2_holder_put(gh);
1067                 else
1068                         complete(&gh->gh_wait);
1069         }
1070 }
1071
1072 /**
1073  * gfs2_glock_drop_th - call into the lock module to unlock a lock
1074  * @gl: the glock
1075  *
1076  */
1077
1078 void gfs2_glock_drop_th(struct gfs2_glock *gl)
1079 {
1080         struct gfs2_sbd *sdp = gl->gl_sbd;
1081         struct gfs2_glock_operations *glops = gl->gl_ops;
1082         unsigned int ret;
1083
1084         gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
1085         gfs2_assert_warn(sdp, queue_empty(gl, &gl->gl_holders));
1086         gfs2_assert_warn(sdp, gl->gl_state != LM_ST_UNLOCKED);
1087
1088         if (gl->gl_state == LM_ST_EXCLUSIVE) {
1089                 if (glops->go_sync)
1090                         glops->go_sync(gl,
1091                                        DIO_METADATA | DIO_DATA | DIO_RELEASE);
1092         }
1093
1094         gfs2_glock_hold(gl);
1095         gl->gl_req_bh = drop_bh;
1096
1097         ret = gfs2_lm_unlock(sdp, gl->gl_lock, gl->gl_state);
1098
1099         if (gfs2_assert_withdraw(sdp, !(ret & LM_OUT_ERROR)))
1100                 return;
1101
1102         if (!ret)
1103                 drop_bh(gl, ret);
1104         else
1105                 gfs2_assert_warn(sdp, ret == LM_OUT_ASYNC);
1106 }
1107
1108 /**
1109  * do_cancels - cancel requests for locks stuck waiting on an expire flag
1110  * @gh: the LM_FLAG_PRIORITY holder waiting to acquire the lock
1111  *
1112  * Don't cancel GL_NOCANCEL requests.
1113  */
1114
1115 static void do_cancels(struct gfs2_holder *gh)
1116 {
1117         struct gfs2_glock *gl = gh->gh_gl;
1118
1119         spin_lock(&gl->gl_spin);
1120
1121         while (gl->gl_req_gh != gh &&
1122                !test_bit(HIF_HOLDER, &gh->gh_iflags) &&
1123                !list_empty(&gh->gh_list)) {
1124                 if (gl->gl_req_bh &&
1125                     !(gl->gl_req_gh &&
1126                       (gl->gl_req_gh->gh_flags & GL_NOCANCEL))) {
1127                         spin_unlock(&gl->gl_spin);
1128                         gfs2_lm_cancel(gl->gl_sbd, gl->gl_lock);
1129                         msleep(100);
1130                         spin_lock(&gl->gl_spin);
1131                 } else {
1132                         spin_unlock(&gl->gl_spin);
1133                         msleep(100);
1134                         spin_lock(&gl->gl_spin);
1135                 }
1136         }
1137
1138         spin_unlock(&gl->gl_spin);
1139 }
1140
1141 /**
1142  * glock_wait_internal - wait on a glock acquisition
1143  * @gh: the glock holder
1144  *
1145  * Returns: 0 on success
1146  */
1147
1148 static int glock_wait_internal(struct gfs2_holder *gh)
1149 {
1150         struct gfs2_glock *gl = gh->gh_gl;
1151         struct gfs2_sbd *sdp = gl->gl_sbd;
1152         struct gfs2_glock_operations *glops = gl->gl_ops;
1153
1154         if (test_bit(HIF_ABORTED, &gh->gh_iflags))
1155                 return -EIO;
1156
1157         if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) {
1158                 spin_lock(&gl->gl_spin);
1159                 if (gl->gl_req_gh != gh &&
1160                     !test_bit(HIF_HOLDER, &gh->gh_iflags) &&
1161                     !list_empty(&gh->gh_list)) {
1162                         list_del_init(&gh->gh_list);
1163                         gh->gh_error = GLR_TRYFAILED;
1164                         if (test_bit(HIF_RECURSE, &gh->gh_iflags))
1165                                 do_unrecurse(gh);
1166                         run_queue(gl);
1167                         spin_unlock(&gl->gl_spin);
1168                         return gh->gh_error;
1169                 }
1170                 spin_unlock(&gl->gl_spin);
1171         }
1172
1173         if (gh->gh_flags & LM_FLAG_PRIORITY)
1174                 do_cancels(gh);
1175
1176         wait_for_completion(&gh->gh_wait);
1177
1178         if (gh->gh_error)
1179                 return gh->gh_error;
1180
1181         gfs2_assert_withdraw(sdp, test_bit(HIF_HOLDER, &gh->gh_iflags));
1182         gfs2_assert_withdraw(sdp, relaxed_state_ok(gl->gl_state,
1183                                                    gh->gh_state,
1184                                                    gh->gh_flags));
1185
1186         if (test_bit(HIF_FIRST, &gh->gh_iflags)) {
1187                 gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
1188
1189                 if (glops->go_lock) {
1190                         gh->gh_error = glops->go_lock(gh);
1191                         if (gh->gh_error) {
1192                                 spin_lock(&gl->gl_spin);
1193                                 list_del_init(&gh->gh_list);
1194                                 if (test_and_clear_bit(HIF_RECURSE,
1195                                                        &gh->gh_iflags))
1196                                         do_unrecurse(gh);
1197                                 spin_unlock(&gl->gl_spin);
1198                         }
1199                 }
1200
1201                 spin_lock(&gl->gl_spin);
1202                 gl->gl_req_gh = NULL;
1203                 gl->gl_req_bh = NULL;
1204                 clear_bit(GLF_LOCK, &gl->gl_flags);
1205                 if (test_bit(HIF_RECURSE, &gh->gh_iflags))
1206                         handle_recurse(gh);
1207                 run_queue(gl);
1208                 spin_unlock(&gl->gl_spin);
1209         }
1210
1211         return gh->gh_error;
1212 }
1213
1214 static inline struct gfs2_holder *
1215 find_holder_by_owner(struct list_head *head, struct task_struct *owner)
1216 {
1217         struct gfs2_holder *gh;
1218
1219         list_for_each_entry(gh, head, gh_list) {
1220                 if (gh->gh_owner == owner)
1221                         return gh;
1222         }
1223
1224         return NULL;
1225 }
1226
1227 /**
1228  * recurse_check -
1229  *
1230  * Make sure the new holder is compatible with the pre-existing one.
1231  *
1232  */
1233
1234 static int recurse_check(struct gfs2_holder *existing, struct gfs2_holder *new,
1235                          unsigned int state)
1236 {
1237         struct gfs2_sbd *sdp = existing->gh_gl->gl_sbd;
1238
1239         if (gfs2_assert_warn(sdp, (new->gh_flags & LM_FLAG_ANY) ||
1240                                   !(existing->gh_flags & LM_FLAG_ANY)))
1241                 goto fail;
1242
1243         if (gfs2_assert_warn(sdp, (existing->gh_flags & GL_LOCAL_EXCL) ||
1244                                   !(new->gh_flags & GL_LOCAL_EXCL)))
1245                 goto fail;
1246
1247         if (gfs2_assert_warn(sdp, relaxed_state_ok(state, new->gh_state,
1248                                                    new->gh_flags)))
1249                 goto fail;
1250
1251         return 0;
1252
1253 fail:
1254         print_symbol(KERN_WARNING "GFS2: Existing holder from %s\n",
1255                      existing->gh_ip);
1256         print_symbol(KERN_WARNING "GFS2: New holder from %s\n", new->gh_ip);
1257         set_bit(HIF_ABORTED, &new->gh_iflags);
1258         return -EINVAL;
1259 }
1260
1261 /**
1262  * add_to_queue - Add a holder to the wait queue (but look for recursion)
1263  * @gh: the holder structure to add
1264  *
1265  */
1266
1267 static void add_to_queue(struct gfs2_holder *gh)
1268 {
1269         struct gfs2_glock *gl = gh->gh_gl;
1270         struct gfs2_holder *existing;
1271
1272         BUG_ON(!gh->gh_owner);
1273
1274         if (!gh->gh_owner)
1275                 goto out;
1276
1277         existing = find_holder_by_owner(&gl->gl_holders, gh->gh_owner);
1278         if (existing) {
1279                 if (recurse_check(existing, gh, gl->gl_state))
1280                         return;
1281
1282                 list_add_tail(&gh->gh_list, &gl->gl_holders);
1283                 set_bit(HIF_HOLDER, &gh->gh_iflags);
1284
1285                 gh->gh_error = 0;
1286                 complete(&gh->gh_wait);
1287
1288                 return;
1289         }
1290
1291         existing = find_holder_by_owner(&gl->gl_waiters3, gh->gh_owner);
1292         if (existing) {
1293                 if (recurse_check(existing, gh, existing->gh_state))
1294                         return;
1295
1296                 set_bit(HIF_RECURSE, &gh->gh_iflags);
1297                 set_bit(HIF_RECURSE, &existing->gh_iflags);
1298
1299                 list_add_tail(&gh->gh_list, &gl->gl_waiters3);
1300
1301                 return;
1302         }
1303
1304  out:
1305         if (gh->gh_flags & LM_FLAG_PRIORITY)
1306                 list_add(&gh->gh_list, &gl->gl_waiters3);
1307         else
1308                 list_add_tail(&gh->gh_list, &gl->gl_waiters3);  
1309 }
1310
1311 /**
1312  * gfs2_glock_nq - enqueue a struct gfs2_holder onto a glock (acquire a glock)
1313  * @gh: the holder structure
1314  *
1315  * if (gh->gh_flags & GL_ASYNC), this never returns an error
1316  *
1317  * Returns: 0, GLR_TRYFAILED, or errno on failure
1318  */
1319
1320 int gfs2_glock_nq(struct gfs2_holder *gh)
1321 {
1322         struct gfs2_glock *gl = gh->gh_gl;
1323         struct gfs2_sbd *sdp = gl->gl_sbd;
1324         int error = 0;
1325
1326  restart:
1327         if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) {
1328                 set_bit(HIF_ABORTED, &gh->gh_iflags);
1329                 return -EIO;
1330         }
1331
1332         set_bit(HIF_PROMOTE, &gh->gh_iflags);
1333
1334         spin_lock(&gl->gl_spin);
1335         add_to_queue(gh);
1336         run_queue(gl);
1337         spin_unlock(&gl->gl_spin);
1338
1339         if (!(gh->gh_flags & GL_ASYNC)) {
1340                 error = glock_wait_internal(gh);
1341                 if (error == GLR_CANCELED) {
1342                         msleep(100);
1343                         goto restart;
1344                 }
1345         }
1346
1347         clear_bit(GLF_PREFETCH, &gl->gl_flags);
1348
1349         return error;
1350 }
1351
1352 /**
1353  * gfs2_glock_poll - poll to see if an async request has been completed
1354  * @gh: the holder
1355  *
1356  * Returns: 1 if the request is ready to be gfs2_glock_wait()ed on
1357  */
1358
1359 int gfs2_glock_poll(struct gfs2_holder *gh)
1360 {
1361         struct gfs2_glock *gl = gh->gh_gl;
1362         int ready = 0;
1363
1364         spin_lock(&gl->gl_spin);
1365
1366         if (test_bit(HIF_HOLDER, &gh->gh_iflags))
1367                 ready = 1;
1368         else if (list_empty(&gh->gh_list)) {
1369                 if (gh->gh_error == GLR_CANCELED) {
1370                         spin_unlock(&gl->gl_spin);
1371                         msleep(100);
1372                         if (gfs2_glock_nq(gh))
1373                                 return 1;
1374                         return 0;
1375                 } else
1376                         ready = 1;
1377         }
1378
1379         spin_unlock(&gl->gl_spin);
1380
1381         return ready;
1382 }
1383
1384 /**
1385  * gfs2_glock_wait - wait for a lock acquisition that ended in a GLR_ASYNC
1386  * @gh: the holder structure
1387  *
1388  * Returns: 0, GLR_TRYFAILED, or errno on failure
1389  */
1390
1391 int gfs2_glock_wait(struct gfs2_holder *gh)
1392 {
1393         int error;
1394
1395         error = glock_wait_internal(gh);
1396         if (error == GLR_CANCELED) {
1397                 msleep(100);
1398                 gh->gh_flags &= ~GL_ASYNC;
1399                 error = gfs2_glock_nq(gh);
1400         }
1401
1402         return error;
1403 }
1404
1405 /**
1406  * gfs2_glock_dq - dequeue a struct gfs2_holder from a glock (release a glock)
1407  * @gh: the glock holder
1408  *
1409  */
1410
1411 void gfs2_glock_dq(struct gfs2_holder *gh)
1412 {
1413         struct gfs2_glock *gl = gh->gh_gl;
1414         struct gfs2_glock_operations *glops = gl->gl_ops;
1415
1416         if (gh->gh_flags & GL_SYNC)
1417                 set_bit(GLF_SYNC, &gl->gl_flags);
1418
1419         if (gh->gh_flags & GL_NOCACHE)
1420                 handle_callback(gl, LM_ST_UNLOCKED);
1421
1422         gfs2_glmutex_lock(gl);
1423
1424         spin_lock(&gl->gl_spin);
1425         list_del_init(&gh->gh_list);
1426
1427         if (list_empty(&gl->gl_holders)) {
1428                 spin_unlock(&gl->gl_spin);
1429
1430                 if (glops->go_unlock)
1431                         glops->go_unlock(gh);
1432
1433                 if (test_bit(GLF_SYNC, &gl->gl_flags)) {
1434                         if (glops->go_sync)
1435                                 glops->go_sync(gl, DIO_METADATA | DIO_DATA);
1436                 }
1437
1438                 gl->gl_stamp = jiffies;
1439
1440                 spin_lock(&gl->gl_spin);
1441         }
1442
1443         clear_bit(GLF_LOCK, &gl->gl_flags);
1444         run_queue(gl);
1445         spin_unlock(&gl->gl_spin);
1446 }
1447
1448 /**
1449  * gfs2_glock_prefetch - Try to prefetch a glock
1450  * @gl: the glock
1451  * @state: the state to prefetch in
1452  * @flags: flags passed to go_xmote_th()
1453  *
1454  */
1455
1456 void gfs2_glock_prefetch(struct gfs2_glock *gl, unsigned int state, int flags)
1457 {
1458         struct gfs2_glock_operations *glops = gl->gl_ops;
1459
1460         spin_lock(&gl->gl_spin);
1461
1462         if (test_bit(GLF_LOCK, &gl->gl_flags) ||
1463             !list_empty(&gl->gl_holders) ||
1464             !list_empty(&gl->gl_waiters1) ||
1465             !list_empty(&gl->gl_waiters2) ||
1466             !list_empty(&gl->gl_waiters3) ||
1467             relaxed_state_ok(gl->gl_state, state, flags)) {
1468                 spin_unlock(&gl->gl_spin);
1469                 return;
1470         }
1471
1472         set_bit(GLF_PREFETCH, &gl->gl_flags);
1473         set_bit(GLF_LOCK, &gl->gl_flags);
1474         spin_unlock(&gl->gl_spin);
1475
1476         glops->go_xmote_th(gl, state, flags);
1477 }
1478
1479 /**
1480  * gfs2_glock_force_drop - Force a glock to be uncached
1481  * @gl: the glock
1482  *
1483  */
1484
1485 void gfs2_glock_force_drop(struct gfs2_glock *gl)
1486 {
1487         struct gfs2_holder gh;
1488
1489         gfs2_holder_init(gl, LM_ST_UNLOCKED, GL_NEVER_RECURSE, &gh);
1490         set_bit(HIF_DEMOTE, &gh.gh_iflags);
1491
1492         spin_lock(&gl->gl_spin);
1493         list_add_tail(&gh.gh_list, &gl->gl_waiters2);
1494         run_queue(gl);
1495         spin_unlock(&gl->gl_spin);
1496
1497         wait_for_completion(&gh.gh_wait);
1498         gfs2_holder_uninit(&gh);
1499 }
1500
1501 static void greedy_work(void *data)
1502 {
1503         struct greedy *gr = (struct greedy *)data;
1504         struct gfs2_holder *gh = &gr->gr_gh;
1505         struct gfs2_glock *gl = gh->gh_gl;
1506         struct gfs2_glock_operations *glops = gl->gl_ops;
1507
1508         clear_bit(GLF_SKIP_WAITERS2, &gl->gl_flags);
1509
1510         if (glops->go_greedy)
1511                 glops->go_greedy(gl);
1512
1513         spin_lock(&gl->gl_spin);
1514
1515         if (list_empty(&gl->gl_waiters2)) {
1516                 clear_bit(GLF_GREEDY, &gl->gl_flags);
1517                 spin_unlock(&gl->gl_spin);
1518                 gfs2_holder_uninit(gh);
1519                 kfree(gr);
1520         } else {
1521                 gfs2_glock_hold(gl);
1522                 list_add_tail(&gh->gh_list, &gl->gl_waiters2);
1523                 run_queue(gl);
1524                 spin_unlock(&gl->gl_spin);
1525                 gfs2_glock_put(gl);
1526         }
1527 }
1528
1529 /**
1530  * gfs2_glock_be_greedy -
1531  * @gl:
1532  * @time:
1533  *
1534  * Returns: 0 if go_greedy will be called, 1 otherwise
1535  */
1536
1537 int gfs2_glock_be_greedy(struct gfs2_glock *gl, unsigned int time)
1538 {
1539         struct greedy *gr;
1540         struct gfs2_holder *gh;
1541
1542         if (!time ||
1543             gl->gl_sbd->sd_args.ar_localcaching ||
1544             test_and_set_bit(GLF_GREEDY, &gl->gl_flags))
1545                 return 1;
1546
1547         gr = kmalloc(sizeof(struct greedy), GFP_KERNEL);
1548         if (!gr) {
1549                 clear_bit(GLF_GREEDY, &gl->gl_flags);
1550                 return 1;
1551         }
1552         gh = &gr->gr_gh;
1553
1554         gfs2_holder_init(gl, 0, GL_NEVER_RECURSE, gh);
1555         set_bit(HIF_GREEDY, &gh->gh_iflags);
1556         INIT_WORK(&gr->gr_work, greedy_work, gr);
1557
1558         set_bit(GLF_SKIP_WAITERS2, &gl->gl_flags);
1559         schedule_delayed_work(&gr->gr_work, time);
1560
1561         return 0;
1562 }
1563
1564 /**
1565  * gfs2_glock_dq_uninit - dequeue a holder from a glock and initialize it
1566  * @gh: the holder structure
1567  *
1568  */
1569
1570 void gfs2_glock_dq_uninit(struct gfs2_holder *gh)
1571 {
1572         gfs2_glock_dq(gh);
1573         gfs2_holder_uninit(gh);
1574 }
1575
1576 /**
1577  * gfs2_glock_nq_num - acquire a glock based on lock number
1578  * @sdp: the filesystem
1579  * @number: the lock number
1580  * @glops: the glock operations for the type of glock
1581  * @state: the state to acquire the glock in
1582  * @flags: modifier flags for the aquisition
1583  * @gh: the struct gfs2_holder
1584  *
1585  * Returns: errno
1586  */
1587
1588 int gfs2_glock_nq_num(struct gfs2_sbd *sdp, uint64_t number,
1589                       struct gfs2_glock_operations *glops, unsigned int state,
1590                       int flags, struct gfs2_holder *gh)
1591 {
1592         struct gfs2_glock *gl;
1593         int error;
1594
1595         error = gfs2_glock_get(sdp, number, glops, CREATE, &gl);
1596         if (!error) {
1597                 error = gfs2_glock_nq_init(gl, state, flags, gh);
1598                 gfs2_glock_put(gl);
1599         }
1600
1601         return error;
1602 }
1603
1604 /**
1605  * glock_compare - Compare two struct gfs2_glock structures for sorting
1606  * @arg_a: the first structure
1607  * @arg_b: the second structure
1608  *
1609  */
1610
1611 static int glock_compare(const void *arg_a, const void *arg_b)
1612 {
1613         struct gfs2_holder *gh_a = *(struct gfs2_holder **)arg_a;
1614         struct gfs2_holder *gh_b = *(struct gfs2_holder **)arg_b;
1615         struct lm_lockname *a = &gh_a->gh_gl->gl_name;
1616         struct lm_lockname *b = &gh_b->gh_gl->gl_name;
1617         int ret = 0;
1618
1619         if (a->ln_number > b->ln_number)
1620                 ret = 1;
1621         else if (a->ln_number < b->ln_number)
1622                 ret = -1;
1623         else {
1624                 if (gh_a->gh_state == LM_ST_SHARED &&
1625                     gh_b->gh_state == LM_ST_EXCLUSIVE)
1626                         ret = 1;
1627                 else if (!(gh_a->gh_flags & GL_LOCAL_EXCL) &&
1628                          (gh_b->gh_flags & GL_LOCAL_EXCL))
1629                         ret = 1;
1630         }
1631
1632         return ret;
1633 }
1634
1635 /**
1636  * nq_m_sync - synchonously acquire more than one glock in deadlock free order
1637  * @num_gh: the number of structures
1638  * @ghs: an array of struct gfs2_holder structures
1639  *
1640  * Returns: 0 on success (all glocks acquired),
1641  *          errno on failure (no glocks acquired)
1642  */
1643
1644 static int nq_m_sync(unsigned int num_gh, struct gfs2_holder *ghs,
1645                      struct gfs2_holder **p)
1646 {
1647         unsigned int x;
1648         int error = 0;
1649
1650         for (x = 0; x < num_gh; x++)
1651                 p[x] = &ghs[x];
1652
1653         sort(p, num_gh, sizeof(struct gfs2_holder *), glock_compare, NULL);
1654
1655         for (x = 0; x < num_gh; x++) {
1656                 p[x]->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
1657
1658                 error = gfs2_glock_nq(p[x]);
1659                 if (error) {
1660                         while (x--)
1661                                 gfs2_glock_dq(p[x]);
1662                         break;
1663                 }
1664         }
1665
1666         return error;
1667 }
1668
1669 /**
1670  * gfs2_glock_nq_m - acquire multiple glocks
1671  * @num_gh: the number of structures
1672  * @ghs: an array of struct gfs2_holder structures
1673  *
1674  * Figure out how big an impact this function has.  Either:
1675  * 1) Replace this code with code that calls gfs2_glock_prefetch()
1676  * 2) Forget async stuff and just call nq_m_sync()
1677  * 3) Leave it like it is
1678  *
1679  * Returns: 0 on success (all glocks acquired),
1680  *          errno on failure (no glocks acquired)
1681  */
1682
1683 int gfs2_glock_nq_m(unsigned int num_gh, struct gfs2_holder *ghs)
1684 {
1685         int *e;
1686         unsigned int x;
1687         int borked = 0, serious = 0;
1688         int error = 0;
1689
1690         if (!num_gh)
1691                 return 0;
1692
1693         if (num_gh == 1) {
1694                 ghs->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
1695                 return gfs2_glock_nq(ghs);
1696         }
1697
1698         e = kcalloc(num_gh, sizeof(struct gfs2_holder *), GFP_KERNEL);
1699         if (!e)
1700                 return -ENOMEM;
1701
1702         for (x = 0; x < num_gh; x++) {
1703                 ghs[x].gh_flags |= LM_FLAG_TRY | GL_ASYNC;
1704                 error = gfs2_glock_nq(&ghs[x]);
1705                 if (error) {
1706                         borked = 1;
1707                         serious = error;
1708                         num_gh = x;
1709                         break;
1710                 }
1711         }
1712
1713         for (x = 0; x < num_gh; x++) {
1714                 error = e[x] = glock_wait_internal(&ghs[x]);
1715                 if (error) {
1716                         borked = 1;
1717                         if (error != GLR_TRYFAILED && error != GLR_CANCELED)
1718                                 serious = error;
1719                 }
1720         }
1721
1722         if (!borked) {
1723                 kfree(e);
1724                 return 0;
1725         }
1726
1727         for (x = 0; x < num_gh; x++)
1728                 if (!e[x])
1729                         gfs2_glock_dq(&ghs[x]);
1730
1731         if (serious)
1732                 error = serious;
1733         else {
1734                 for (x = 0; x < num_gh; x++)
1735                         gfs2_holder_reinit(ghs[x].gh_state, ghs[x].gh_flags,
1736                                           &ghs[x]);
1737                 error = nq_m_sync(num_gh, ghs, (struct gfs2_holder **)e);
1738         }
1739
1740         kfree(e);
1741
1742         return error;
1743 }
1744
1745 /**
1746  * gfs2_glock_dq_m - release multiple glocks
1747  * @num_gh: the number of structures
1748  * @ghs: an array of struct gfs2_holder structures
1749  *
1750  */
1751
1752 void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs)
1753 {
1754         unsigned int x;
1755
1756         for (x = 0; x < num_gh; x++)
1757                 gfs2_glock_dq(&ghs[x]);
1758 }
1759
1760 /**
1761  * gfs2_glock_dq_uninit_m - release multiple glocks
1762  * @num_gh: the number of structures
1763  * @ghs: an array of struct gfs2_holder structures
1764  *
1765  */
1766
1767 void gfs2_glock_dq_uninit_m(unsigned int num_gh, struct gfs2_holder *ghs)
1768 {
1769         unsigned int x;
1770
1771         for (x = 0; x < num_gh; x++)
1772                 gfs2_glock_dq_uninit(&ghs[x]);
1773 }
1774
1775 /**
1776  * gfs2_glock_prefetch_num - prefetch a glock based on lock number
1777  * @sdp: the filesystem
1778  * @number: the lock number
1779  * @glops: the glock operations for the type of glock
1780  * @state: the state to acquire the glock in
1781  * @flags: modifier flags for the aquisition
1782  *
1783  * Returns: errno
1784  */
1785
1786 void gfs2_glock_prefetch_num(struct gfs2_sbd *sdp, uint64_t number,
1787                              struct gfs2_glock_operations *glops,
1788                              unsigned int state, int flags)
1789 {
1790         struct gfs2_glock *gl;
1791         int error;
1792
1793         if (atomic_read(&sdp->sd_reclaim_count) <
1794             gfs2_tune_get(sdp, gt_reclaim_limit)) {
1795                 error = gfs2_glock_get(sdp, number, glops, CREATE, &gl);
1796                 if (!error) {
1797                         gfs2_glock_prefetch(gl, state, flags);
1798                         gfs2_glock_put(gl);
1799                 }
1800         }
1801 }
1802
1803 /**
1804  * gfs2_lvb_hold - attach a LVB from a glock
1805  * @gl: The glock in question
1806  *
1807  */
1808
1809 int gfs2_lvb_hold(struct gfs2_glock *gl)
1810 {
1811         int error;
1812
1813         gfs2_glmutex_lock(gl);
1814
1815         if (!atomic_read(&gl->gl_lvb_count)) {
1816                 error = gfs2_lm_hold_lvb(gl->gl_sbd, gl->gl_lock, &gl->gl_lvb);
1817                 if (error) {
1818                         gfs2_glmutex_unlock(gl);
1819                         return error;
1820                 }
1821                 gfs2_glock_hold(gl);
1822         }
1823         atomic_inc(&gl->gl_lvb_count);
1824
1825         gfs2_glmutex_unlock(gl);
1826
1827         return 0;
1828 }
1829
1830 /**
1831  * gfs2_lvb_unhold - detach a LVB from a glock
1832  * @gl: The glock in question
1833  *
1834  */
1835
1836 void gfs2_lvb_unhold(struct gfs2_glock *gl)
1837 {
1838         gfs2_glock_hold(gl);
1839         gfs2_glmutex_lock(gl);
1840
1841         gfs2_assert(gl->gl_sbd, atomic_read(&gl->gl_lvb_count) > 0);
1842         if (atomic_dec_and_test(&gl->gl_lvb_count)) {
1843                 gfs2_lm_unhold_lvb(gl->gl_sbd, gl->gl_lock, gl->gl_lvb);
1844                 gl->gl_lvb = NULL;
1845                 gfs2_glock_put(gl);
1846         }
1847
1848         gfs2_glmutex_unlock(gl);
1849         gfs2_glock_put(gl);
1850 }
1851
1852 void gfs2_lvb_sync(struct gfs2_glock *gl)
1853 {
1854         gfs2_glmutex_lock(gl);
1855
1856         gfs2_assert(gl->gl_sbd, atomic_read(&gl->gl_lvb_count));
1857         if (!gfs2_assert_warn(gl->gl_sbd, gfs2_glock_is_held_excl(gl)))
1858                 gfs2_lm_sync_lvb(gl->gl_sbd, gl->gl_lock, gl->gl_lvb);
1859
1860         gfs2_glmutex_unlock(gl);
1861 }
1862
1863 static void blocking_cb(struct gfs2_sbd *sdp, struct lm_lockname *name,
1864                         unsigned int state)
1865 {
1866         struct gfs2_glock *gl;
1867
1868         gl = gfs2_glock_find(sdp, name);
1869         if (!gl)
1870                 return;
1871
1872         if (gl->gl_ops->go_callback)
1873                 gl->gl_ops->go_callback(gl, state);
1874         handle_callback(gl, state);
1875
1876         spin_lock(&gl->gl_spin);
1877         run_queue(gl);
1878         spin_unlock(&gl->gl_spin);
1879
1880         gfs2_glock_put(gl);
1881 }
1882
1883 /**
1884  * gfs2_glock_cb - Callback used by locking module
1885  * @fsdata: Pointer to the superblock
1886  * @type: Type of callback
1887  * @data: Type dependent data pointer
1888  *
1889  * Called by the locking module when it wants to tell us something.
1890  * Either we need to drop a lock, one of our ASYNC requests completed, or
1891  * a journal from another client needs to be recovered.
1892  */
1893
1894 void gfs2_glock_cb(lm_fsdata_t *fsdata, unsigned int type, void *data)
1895 {
1896         struct gfs2_sbd *sdp = (struct gfs2_sbd *)fsdata;
1897
1898         switch (type) {
1899         case LM_CB_NEED_E:
1900                 blocking_cb(sdp, (struct lm_lockname *)data, LM_ST_UNLOCKED);
1901                 return;
1902
1903         case LM_CB_NEED_D:
1904                 blocking_cb(sdp, (struct lm_lockname *)data, LM_ST_DEFERRED);
1905                 return;
1906
1907         case LM_CB_NEED_S:
1908                 blocking_cb(sdp, (struct lm_lockname *)data, LM_ST_SHARED);
1909                 return;
1910
1911         case LM_CB_ASYNC: {
1912                 struct lm_async_cb *async = (struct lm_async_cb *)data;
1913                 struct gfs2_glock *gl;
1914
1915                 gl = gfs2_glock_find(sdp, &async->lc_name);
1916                 if (gfs2_assert_warn(sdp, gl))
1917                         return;
1918                 if (!gfs2_assert_warn(sdp, gl->gl_req_bh))
1919                         gl->gl_req_bh(gl, async->lc_ret);
1920                 gfs2_glock_put(gl);
1921
1922                 return;
1923         }
1924
1925         case LM_CB_NEED_RECOVERY:
1926                 gfs2_jdesc_make_dirty(sdp, *(unsigned int *)data);
1927                 if (sdp->sd_recoverd_process)
1928                         wake_up_process(sdp->sd_recoverd_process);
1929                 return;
1930
1931         case LM_CB_DROPLOCKS:
1932                 gfs2_gl_hash_clear(sdp, NO_WAIT);
1933                 gfs2_quota_scan(sdp);
1934                 return;
1935
1936         default:
1937                 gfs2_assert_warn(sdp, 0);
1938                 return;
1939         }
1940 }
1941
1942 /**
1943  * gfs2_try_toss_inode - try to remove a particular inode struct from cache
1944  * sdp: the filesystem
1945  * inum: the inode number
1946  *
1947  */
1948
1949 void gfs2_try_toss_inode(struct gfs2_sbd *sdp, struct gfs2_inum *inum)
1950 {
1951         struct gfs2_glock *gl;
1952         struct gfs2_inode *ip;
1953         int error;
1954
1955         error = gfs2_glock_get(sdp, inum->no_addr, &gfs2_inode_glops,
1956                                NO_CREATE, &gl);
1957         if (error || !gl)
1958                 return;
1959
1960         if (!gfs2_glmutex_trylock(gl))
1961                 goto out;
1962
1963         ip = gl->gl_object;
1964         if (!ip)
1965                 goto out_unlock;
1966
1967         if (atomic_read(&ip->i_count))
1968                 goto out_unlock;
1969
1970         gfs2_inode_destroy(ip);
1971
1972  out_unlock:
1973         gfs2_glmutex_unlock(gl);
1974
1975  out:
1976         gfs2_glock_put(gl);
1977 }
1978
1979 /**
1980  * gfs2_iopen_go_callback - Try to kick the inode/vnode associated with an
1981  *                          iopen glock from memory
1982  * @io_gl: the iopen glock
1983  * @state: the state into which the glock should be put
1984  *
1985  */
1986
1987 void gfs2_iopen_go_callback(struct gfs2_glock *io_gl, unsigned int state)
1988 {
1989         struct gfs2_glock *i_gl;
1990
1991         if (state != LM_ST_UNLOCKED)
1992                 return;
1993
1994         spin_lock(&io_gl->gl_spin);
1995         i_gl = io_gl->gl_object;
1996         if (i_gl) {
1997                 gfs2_glock_hold(i_gl);
1998                 spin_unlock(&io_gl->gl_spin);
1999         } else {
2000                 spin_unlock(&io_gl->gl_spin);
2001                 return;
2002         }
2003
2004         if (gfs2_glmutex_trylock(i_gl)) {
2005                 struct gfs2_inode *ip = i_gl->gl_object;
2006                 if (ip) {
2007                         gfs2_try_toss_vnode(ip);
2008                         gfs2_glmutex_unlock(i_gl);
2009                         gfs2_glock_schedule_for_reclaim(i_gl);
2010                         goto out;
2011                 }
2012                 gfs2_glmutex_unlock(i_gl);
2013         }
2014
2015  out:
2016         gfs2_glock_put(i_gl);
2017 }
2018
2019 /**
2020  * demote_ok - Check to see if it's ok to unlock a glock
2021  * @gl: the glock
2022  *
2023  * Returns: 1 if it's ok
2024  */
2025
2026 static int demote_ok(struct gfs2_glock *gl)
2027 {
2028         struct gfs2_sbd *sdp = gl->gl_sbd;
2029         struct gfs2_glock_operations *glops = gl->gl_ops;
2030         int demote = 1;
2031
2032         if (test_bit(GLF_STICKY, &gl->gl_flags))
2033                 demote = 0;
2034         else if (test_bit(GLF_PREFETCH, &gl->gl_flags))
2035                 demote = time_after_eq(jiffies,
2036                                     gl->gl_stamp +
2037                                     gfs2_tune_get(sdp, gt_prefetch_secs) * HZ);
2038         else if (glops->go_demote_ok)
2039                 demote = glops->go_demote_ok(gl);
2040
2041         return demote;
2042 }
2043
2044 /**
2045  * gfs2_glock_schedule_for_reclaim - Add a glock to the reclaim list
2046  * @gl: the glock
2047  *
2048  */
2049
2050 void gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl)
2051 {
2052         struct gfs2_sbd *sdp = gl->gl_sbd;
2053
2054         spin_lock(&sdp->sd_reclaim_lock);
2055         if (list_empty(&gl->gl_reclaim)) {
2056                 gfs2_glock_hold(gl);
2057                 list_add(&gl->gl_reclaim, &sdp->sd_reclaim_list);
2058                 atomic_inc(&sdp->sd_reclaim_count);
2059         }
2060         spin_unlock(&sdp->sd_reclaim_lock);
2061
2062         wake_up(&sdp->sd_reclaim_wq);
2063 }
2064
2065 /**
2066  * gfs2_reclaim_glock - process the next glock on the filesystem's reclaim list
2067  * @sdp: the filesystem
2068  *
2069  * Called from gfs2_glockd() glock reclaim daemon, or when promoting a
2070  * different glock and we notice that there are a lot of glocks in the
2071  * reclaim list.
2072  *
2073  */
2074
2075 void gfs2_reclaim_glock(struct gfs2_sbd *sdp)
2076 {
2077         struct gfs2_glock *gl;
2078
2079         spin_lock(&sdp->sd_reclaim_lock);
2080         if (list_empty(&sdp->sd_reclaim_list)) {
2081                 spin_unlock(&sdp->sd_reclaim_lock);
2082                 return;
2083         }
2084         gl = list_entry(sdp->sd_reclaim_list.next,
2085                         struct gfs2_glock, gl_reclaim);
2086         list_del_init(&gl->gl_reclaim);
2087         spin_unlock(&sdp->sd_reclaim_lock);
2088
2089         atomic_dec(&sdp->sd_reclaim_count);
2090         atomic_inc(&sdp->sd_reclaimed);
2091
2092         if (gfs2_glmutex_trylock(gl)) {
2093                 if (gl->gl_ops == &gfs2_inode_glops) {
2094                         struct gfs2_inode *ip = gl->gl_object;
2095                         if (ip && !atomic_read(&ip->i_count))
2096                                 gfs2_inode_destroy(ip);
2097                 }
2098                 if (queue_empty(gl, &gl->gl_holders) &&
2099                     gl->gl_state != LM_ST_UNLOCKED &&
2100                     demote_ok(gl))
2101                         handle_callback(gl, LM_ST_UNLOCKED);
2102                 gfs2_glmutex_unlock(gl);
2103         }
2104
2105         gfs2_glock_put(gl);
2106 }
2107
2108 /**
2109  * examine_bucket - Call a function for glock in a hash bucket
2110  * @examiner: the function
2111  * @sdp: the filesystem
2112  * @bucket: the bucket
2113  *
2114  * Returns: 1 if the bucket has entries
2115  */
2116
2117 static int examine_bucket(glock_examiner examiner, struct gfs2_sbd *sdp,
2118                           struct gfs2_gl_hash_bucket *bucket)
2119 {
2120         struct glock_plug plug;
2121         struct list_head *tmp;
2122         struct gfs2_glock *gl;
2123         int entries;
2124
2125         /* Add "plug" to end of bucket list, work back up list from there */
2126         memset(&plug.gl_flags, 0, sizeof(unsigned long));
2127         set_bit(GLF_PLUG, &plug.gl_flags);
2128
2129         write_lock(&bucket->hb_lock);
2130         list_add(&plug.gl_list, &bucket->hb_list);
2131         write_unlock(&bucket->hb_lock);
2132
2133         for (;;) {
2134                 write_lock(&bucket->hb_lock);
2135
2136                 for (;;) {
2137                         tmp = plug.gl_list.next;
2138
2139                         if (tmp == &bucket->hb_list) {
2140                                 list_del(&plug.gl_list);
2141                                 entries = !list_empty(&bucket->hb_list);
2142                                 write_unlock(&bucket->hb_lock);
2143                                 return entries;
2144                         }
2145                         gl = list_entry(tmp, struct gfs2_glock, gl_list);
2146
2147                         /* Move plug up list */
2148                         list_move(&plug.gl_list, &gl->gl_list);
2149
2150                         if (test_bit(GLF_PLUG, &gl->gl_flags))
2151                                 continue;
2152
2153                         /* examiner() must glock_put() */
2154                         gfs2_glock_hold(gl);
2155
2156                         break;
2157                 }
2158
2159                 write_unlock(&bucket->hb_lock);
2160
2161                 examiner(gl);
2162         }
2163 }
2164
2165 /**
2166  * scan_glock - look at a glock and see if we can reclaim it
2167  * @gl: the glock to look at
2168  *
2169  */
2170
2171 static void scan_glock(struct gfs2_glock *gl)
2172 {
2173         if (gfs2_glmutex_trylock(gl)) {
2174                 if (gl->gl_ops == &gfs2_inode_glops) {
2175                         struct gfs2_inode *ip = gl->gl_object;
2176                         if (ip && !atomic_read(&ip->i_count))
2177                                 goto out_schedule;
2178                 }
2179                 if (queue_empty(gl, &gl->gl_holders) &&
2180                     gl->gl_state != LM_ST_UNLOCKED &&
2181                     demote_ok(gl))
2182                         goto out_schedule;
2183
2184                 gfs2_glmutex_unlock(gl);
2185         }
2186
2187         gfs2_glock_put(gl);
2188
2189         return;
2190
2191  out_schedule:
2192         gfs2_glmutex_unlock(gl);
2193         gfs2_glock_schedule_for_reclaim(gl);
2194         gfs2_glock_put(gl);
2195 }
2196
2197 /**
2198  * gfs2_scand_internal - Look for glocks and inodes to toss from memory
2199  * @sdp: the filesystem
2200  *
2201  */
2202
2203 void gfs2_scand_internal(struct gfs2_sbd *sdp)
2204 {
2205         unsigned int x;
2206
2207         for (x = 0; x < GFS2_GL_HASH_SIZE; x++) {
2208                 examine_bucket(scan_glock, sdp, &sdp->sd_gl_hash[x]);
2209                 cond_resched();
2210         }
2211 }
2212
2213 /**
2214  * clear_glock - look at a glock and see if we can free it from glock cache
2215  * @gl: the glock to look at
2216  *
2217  */
2218
2219 static void clear_glock(struct gfs2_glock *gl)
2220 {
2221         struct gfs2_sbd *sdp = gl->gl_sbd;
2222         int released;
2223
2224         spin_lock(&sdp->sd_reclaim_lock);
2225         if (!list_empty(&gl->gl_reclaim)) {
2226                 list_del_init(&gl->gl_reclaim);
2227                 atomic_dec(&sdp->sd_reclaim_count);
2228                 spin_unlock(&sdp->sd_reclaim_lock);
2229                 released = gfs2_glock_put(gl);
2230                 gfs2_assert(sdp, !released);
2231         } else {
2232                 spin_unlock(&sdp->sd_reclaim_lock);
2233         }
2234
2235         if (gfs2_glmutex_trylock(gl)) {
2236                 if (gl->gl_ops == &gfs2_inode_glops) {
2237                         struct gfs2_inode *ip = gl->gl_object;
2238                         if (ip && !atomic_read(&ip->i_count))
2239                                 gfs2_inode_destroy(ip);
2240                 }
2241                 if (queue_empty(gl, &gl->gl_holders) &&
2242                     gl->gl_state != LM_ST_UNLOCKED)
2243                         handle_callback(gl, LM_ST_UNLOCKED);
2244
2245                 gfs2_glmutex_unlock(gl);
2246         }
2247
2248         gfs2_glock_put(gl);
2249 }
2250
2251 /**
2252  * gfs2_gl_hash_clear - Empty out the glock hash table
2253  * @sdp: the filesystem
2254  * @wait: wait until it's all gone
2255  *
2256  * Called when unmounting the filesystem, or when inter-node lock manager
2257  * requests DROPLOCKS because it is running out of capacity.
2258  */
2259
2260 void gfs2_gl_hash_clear(struct gfs2_sbd *sdp, int wait)
2261 {
2262         unsigned long t;
2263         unsigned int x;
2264         int cont;
2265
2266         t = jiffies;
2267
2268         for (;;) {
2269                 cont = 0;
2270
2271                 for (x = 0; x < GFS2_GL_HASH_SIZE; x++)
2272                         if (examine_bucket(clear_glock, sdp,
2273                                            &sdp->sd_gl_hash[x]))
2274                                 cont = 1;
2275
2276                 if (!wait || !cont)
2277                         break;
2278
2279                 if (time_after_eq(jiffies,
2280                                   t + gfs2_tune_get(sdp, gt_stall_secs) * HZ)) {
2281                         fs_warn(sdp, "Unmount seems to be stalled. "
2282                                      "Dumping lock state...\n");
2283                         gfs2_dump_lockstate(sdp);
2284                         t = jiffies;
2285                 }
2286
2287                 /* invalidate_inodes() requires that the sb inodes list
2288                    not change, but an async completion callback for an
2289                    unlock can occur which does glock_put() which
2290                    can call iput() which will change the sb inodes list.
2291                    invalidate_inodes_mutex prevents glock_put()'s during
2292                    an invalidate_inodes() */
2293
2294                 mutex_lock(&sdp->sd_invalidate_inodes_mutex);
2295                 invalidate_inodes(sdp->sd_vfs);
2296                 mutex_unlock(&sdp->sd_invalidate_inodes_mutex);
2297                 yield();
2298         }
2299 }
2300
2301 /*
2302  *  Diagnostic routines to help debug distributed deadlock
2303  */
2304
2305 /**
2306  * dump_holder - print information about a glock holder
2307  * @str: a string naming the type of holder
2308  * @gh: the glock holder
2309  *
2310  * Returns: 0 on success, -ENOBUFS when we run out of space
2311  */
2312
2313 static int dump_holder(char *str, struct gfs2_holder *gh)
2314 {
2315         unsigned int x;
2316         int error = -ENOBUFS;
2317
2318         printk(KERN_INFO "  %s\n", str);
2319         printk(KERN_INFO "    owner = %ld\n",
2320                    (gh->gh_owner) ? (long)gh->gh_owner->pid : -1);
2321         printk(KERN_INFO "    gh_state = %u\n", gh->gh_state);
2322         printk(KERN_INFO "    gh_flags =");
2323         for (x = 0; x < 32; x++)
2324                 if (gh->gh_flags & (1 << x))
2325                         printk(" %u", x);
2326         printk(" \n");
2327         printk(KERN_INFO "    error = %d\n", gh->gh_error);
2328         printk(KERN_INFO "    gh_iflags =");
2329         for (x = 0; x < 32; x++)
2330                 if (test_bit(x, &gh->gh_iflags))
2331                         printk(" %u", x);
2332         printk(" \n");
2333         print_symbol(KERN_INFO "    initialized at: %s\n", gh->gh_ip);
2334
2335         error = 0;
2336
2337         return error;
2338 }
2339
2340 /**
2341  * dump_inode - print information about an inode
2342  * @ip: the inode
2343  *
2344  * Returns: 0 on success, -ENOBUFS when we run out of space
2345  */
2346
2347 static int dump_inode(struct gfs2_inode *ip)
2348 {
2349         unsigned int x;
2350         int error = -ENOBUFS;
2351
2352         printk(KERN_INFO "  Inode:\n");
2353         printk(KERN_INFO "    num = %llu %llu\n",
2354                     ip->i_num.no_formal_ino, ip->i_num.no_addr);
2355         printk(KERN_INFO "    type = %u\n", IF2DT(ip->i_di.di_mode));
2356         printk(KERN_INFO "    i_count = %d\n", atomic_read(&ip->i_count));
2357         printk(KERN_INFO "    i_flags =");
2358         for (x = 0; x < 32; x++)
2359                 if (test_bit(x, &ip->i_flags))
2360                         printk(" %u", x);
2361         printk(" \n");
2362         printk(KERN_INFO "    vnode = %s\n", (ip->i_vnode) ? "yes" : "no");
2363
2364         error = 0;
2365
2366         return error;
2367 }
2368
2369 /**
2370  * dump_glock - print information about a glock
2371  * @gl: the glock
2372  * @count: where we are in the buffer
2373  *
2374  * Returns: 0 on success, -ENOBUFS when we run out of space
2375  */
2376
2377 static int dump_glock(struct gfs2_glock *gl)
2378 {
2379         struct gfs2_holder *gh;
2380         unsigned int x;
2381         int error = -ENOBUFS;
2382
2383         spin_lock(&gl->gl_spin);
2384
2385         printk(KERN_INFO "Glock (%u, %llu)\n",
2386                     gl->gl_name.ln_type,
2387                     gl->gl_name.ln_number);
2388         printk(KERN_INFO "  gl_flags =");
2389         for (x = 0; x < 32; x++)
2390                 if (test_bit(x, &gl->gl_flags))
2391                         printk(" %u", x);
2392         printk(" \n");
2393         printk(KERN_INFO "  gl_ref = %d\n", atomic_read(&gl->gl_ref.refcount));
2394         printk(KERN_INFO "  gl_state = %u\n", gl->gl_state);
2395         printk(KERN_INFO "  req_gh = %s\n", (gl->gl_req_gh) ? "yes" : "no");
2396         printk(KERN_INFO "  req_bh = %s\n", (gl->gl_req_bh) ? "yes" : "no");
2397         printk(KERN_INFO "  lvb_count = %d\n", atomic_read(&gl->gl_lvb_count));
2398         printk(KERN_INFO "  object = %s\n", (gl->gl_object) ? "yes" : "no");
2399         printk(KERN_INFO "  le = %s\n",
2400                    (list_empty(&gl->gl_le.le_list)) ? "no" : "yes");
2401         printk(KERN_INFO "  reclaim = %s\n",
2402                     (list_empty(&gl->gl_reclaim)) ? "no" : "yes");
2403         if (gl->gl_aspace)
2404                 printk(KERN_INFO "  aspace = %lu\n",
2405                             gl->gl_aspace->i_mapping->nrpages);
2406         else
2407                 printk(KERN_INFO "  aspace = no\n");
2408         printk(KERN_INFO "  ail = %d\n", atomic_read(&gl->gl_ail_count));
2409         if (gl->gl_req_gh) {
2410                 error = dump_holder("Request", gl->gl_req_gh);
2411                 if (error)
2412                         goto out;
2413         }
2414         list_for_each_entry(gh, &gl->gl_holders, gh_list) {
2415                 error = dump_holder("Holder", gh);
2416                 if (error)
2417                         goto out;
2418         }
2419         list_for_each_entry(gh, &gl->gl_waiters1, gh_list) {
2420                 error = dump_holder("Waiter1", gh);
2421                 if (error)
2422                         goto out;
2423         }
2424         list_for_each_entry(gh, &gl->gl_waiters2, gh_list) {
2425                 error = dump_holder("Waiter2", gh);
2426                 if (error)
2427                         goto out;
2428         }
2429         list_for_each_entry(gh, &gl->gl_waiters3, gh_list) {
2430                 error = dump_holder("Waiter3", gh);
2431                 if (error)
2432                         goto out;
2433         }
2434         if (gl->gl_ops == &gfs2_inode_glops && gl->gl_object) {
2435                 if (!test_bit(GLF_LOCK, &gl->gl_flags) &&
2436                     list_empty(&gl->gl_holders)) {
2437                         error = dump_inode(gl->gl_object);
2438                         if (error)
2439                                 goto out;
2440                 } else {
2441                         error = -ENOBUFS;
2442                         printk(KERN_INFO "  Inode: busy\n");
2443                 }
2444         }
2445
2446         error = 0;
2447
2448  out:
2449         spin_unlock(&gl->gl_spin);
2450
2451         return error;
2452 }
2453
2454 /**
2455  * gfs2_dump_lockstate - print out the current lockstate
2456  * @sdp: the filesystem
2457  * @ub: the buffer to copy the information into
2458  *
2459  * If @ub is NULL, dump the lockstate to the console.
2460  *
2461  */
2462
2463 int gfs2_dump_lockstate(struct gfs2_sbd *sdp)
2464 {
2465         struct gfs2_gl_hash_bucket *bucket;
2466         struct gfs2_glock *gl;
2467         unsigned int x;
2468         int error = 0;
2469
2470         for (x = 0; x < GFS2_GL_HASH_SIZE; x++) {
2471                 bucket = &sdp->sd_gl_hash[x];
2472
2473                 read_lock(&bucket->hb_lock);
2474
2475                 list_for_each_entry(gl, &bucket->hb_list, gl_list) {
2476                         if (test_bit(GLF_PLUG, &gl->gl_flags))
2477                                 continue;
2478
2479                         error = dump_glock(gl);
2480                         if (error)
2481                                 break;
2482                 }
2483
2484                 read_unlock(&bucket->hb_lock);
2485
2486                 if (error)
2487                         break;
2488         }
2489
2490
2491         return error;
2492 }
2493