Merge branch 'for-rmk/samsung6' of git://git.fluff.org/bjdooks/linux into devel-stable
[linux-2.6.git] / fs / gfs2 / lock_dlm.c
1 /*
2  * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
3  * Copyright (C) 2004-2009 Red Hat, Inc.  All rights reserved.
4  *
5  * This copyrighted material is made available to anyone wishing to use,
6  * modify, copy, or redistribute it subject to the terms and conditions
7  * of the GNU General Public License version 2.
8  */
9
10 #include <linux/fs.h>
11 #include <linux/dlm.h>
12 #include <linux/types.h>
13 #include <linux/gfs2_ondisk.h>
14
15 #include "incore.h"
16 #include "glock.h"
17 #include "util.h"
18
19
20 static void gdlm_ast(void *arg)
21 {
22         struct gfs2_glock *gl = arg;
23         unsigned ret = gl->gl_state;
24         struct gfs2_sbd *sdp = gl->gl_sbd;
25
26         BUG_ON(gl->gl_lksb.sb_flags & DLM_SBF_DEMOTED);
27
28         if (gl->gl_lksb.sb_flags & DLM_SBF_VALNOTVALID)
29                 memset(gl->gl_lvb, 0, GDLM_LVB_SIZE);
30
31         switch (gl->gl_lksb.sb_status) {
32         case -DLM_EUNLOCK: /* Unlocked, so glock can be freed */
33                 kmem_cache_free(gfs2_glock_cachep, gl);
34                 if (atomic_dec_and_test(&sdp->sd_glock_disposal))
35                         wake_up(&sdp->sd_glock_wait);
36                 return;
37         case -DLM_ECANCEL: /* Cancel while getting lock */
38                 ret |= LM_OUT_CANCELED;
39                 goto out;
40         case -EAGAIN: /* Try lock fails */
41                 goto out;
42         case -EINVAL: /* Invalid */
43         case -ENOMEM: /* Out of memory */
44                 ret |= LM_OUT_ERROR;
45                 goto out;
46         case 0: /* Success */
47                 break;
48         default: /* Something unexpected */
49                 BUG();
50         }
51
52         ret = gl->gl_req;
53         if (gl->gl_lksb.sb_flags & DLM_SBF_ALTMODE) {
54                 if (gl->gl_req == LM_ST_SHARED)
55                         ret = LM_ST_DEFERRED;
56                 else if (gl->gl_req == LM_ST_DEFERRED)
57                         ret = LM_ST_SHARED;
58                 else
59                         BUG();
60         }
61
62         set_bit(GLF_INITIAL, &gl->gl_flags);
63         gfs2_glock_complete(gl, ret);
64         return;
65 out:
66         if (!test_bit(GLF_INITIAL, &gl->gl_flags))
67                 gl->gl_lksb.sb_lkid = 0;
68         gfs2_glock_complete(gl, ret);
69 }
70
71 static void gdlm_bast(void *arg, int mode)
72 {
73         struct gfs2_glock *gl = arg;
74
75         switch (mode) {
76         case DLM_LOCK_EX:
77                 gfs2_glock_cb(gl, LM_ST_UNLOCKED);
78                 break;
79         case DLM_LOCK_CW:
80                 gfs2_glock_cb(gl, LM_ST_DEFERRED);
81                 break;
82         case DLM_LOCK_PR:
83                 gfs2_glock_cb(gl, LM_ST_SHARED);
84                 break;
85         default:
86                 printk(KERN_ERR "unknown bast mode %d", mode);
87                 BUG();
88         }
89 }
90
91 /* convert gfs lock-state to dlm lock-mode */
92
93 static int make_mode(const unsigned int lmstate)
94 {
95         switch (lmstate) {
96         case LM_ST_UNLOCKED:
97                 return DLM_LOCK_NL;
98         case LM_ST_EXCLUSIVE:
99                 return DLM_LOCK_EX;
100         case LM_ST_DEFERRED:
101                 return DLM_LOCK_CW;
102         case LM_ST_SHARED:
103                 return DLM_LOCK_PR;
104         }
105         printk(KERN_ERR "unknown LM state %d", lmstate);
106         BUG();
107         return -1;
108 }
109
110 static u32 make_flags(const u32 lkid, const unsigned int gfs_flags,
111                       const int req)
112 {
113         u32 lkf = 0;
114
115         if (gfs_flags & LM_FLAG_TRY)
116                 lkf |= DLM_LKF_NOQUEUE;
117
118         if (gfs_flags & LM_FLAG_TRY_1CB) {
119                 lkf |= DLM_LKF_NOQUEUE;
120                 lkf |= DLM_LKF_NOQUEUEBAST;
121         }
122
123         if (gfs_flags & LM_FLAG_PRIORITY) {
124                 lkf |= DLM_LKF_NOORDER;
125                 lkf |= DLM_LKF_HEADQUE;
126         }
127
128         if (gfs_flags & LM_FLAG_ANY) {
129                 if (req == DLM_LOCK_PR)
130                         lkf |= DLM_LKF_ALTCW;
131                 else if (req == DLM_LOCK_CW)
132                         lkf |= DLM_LKF_ALTPR;
133                 else
134                         BUG();
135         }
136
137         if (lkid != 0) 
138                 lkf |= DLM_LKF_CONVERT;
139
140         lkf |= DLM_LKF_VALBLK;
141
142         return lkf;
143 }
144
145 static unsigned int gdlm_lock(struct gfs2_glock *gl,
146                               unsigned int req_state, unsigned int flags)
147 {
148         struct lm_lockstruct *ls = &gl->gl_sbd->sd_lockstruct;
149         int error;
150         int req;
151         u32 lkf;
152
153         gl->gl_req = req_state;
154         req = make_mode(req_state);
155         lkf = make_flags(gl->gl_lksb.sb_lkid, flags, req);
156
157         /*
158          * Submit the actual lock request.
159          */
160
161         error = dlm_lock(ls->ls_dlm, req, &gl->gl_lksb, lkf, gl->gl_strname,
162                          GDLM_STRNAME_BYTES - 1, 0, gdlm_ast, gl, gdlm_bast);
163         if (error == -EAGAIN)
164                 return 0;
165         if (error)
166                 return LM_OUT_ERROR;
167         return LM_OUT_ASYNC;
168 }
169
170 static void gdlm_put_lock(struct kmem_cache *cachep, struct gfs2_glock *gl)
171 {
172         struct gfs2_sbd *sdp = gl->gl_sbd;
173         struct lm_lockstruct *ls = &sdp->sd_lockstruct;
174         int error;
175
176         if (gl->gl_lksb.sb_lkid == 0) {
177                 kmem_cache_free(cachep, gl);
178                 if (atomic_dec_and_test(&sdp->sd_glock_disposal))
179                         wake_up(&sdp->sd_glock_wait);
180                 return;
181         }
182
183         error = dlm_unlock(ls->ls_dlm, gl->gl_lksb.sb_lkid, DLM_LKF_VALBLK,
184                            NULL, gl);
185         if (error) {
186                 printk(KERN_ERR "gdlm_unlock %x,%llx err=%d\n",
187                        gl->gl_name.ln_type,
188                        (unsigned long long)gl->gl_name.ln_number, error);
189                 return;
190         }
191 }
192
193 static void gdlm_cancel(struct gfs2_glock *gl)
194 {
195         struct lm_lockstruct *ls = &gl->gl_sbd->sd_lockstruct;
196         dlm_unlock(ls->ls_dlm, gl->gl_lksb.sb_lkid, DLM_LKF_CANCEL, NULL, gl);
197 }
198
199 static int gdlm_mount(struct gfs2_sbd *sdp, const char *fsname)
200 {
201         struct lm_lockstruct *ls = &sdp->sd_lockstruct;
202         int error;
203
204         if (fsname == NULL) {
205                 fs_info(sdp, "no fsname found\n");
206                 return -EINVAL;
207         }
208
209         error = dlm_new_lockspace(fsname, strlen(fsname), &ls->ls_dlm,
210                                   DLM_LSFL_FS | DLM_LSFL_NEWEXCL |
211                                   (ls->ls_nodir ? DLM_LSFL_NODIR : 0),
212                                   GDLM_LVB_SIZE);
213         if (error)
214                 printk(KERN_ERR "dlm_new_lockspace error %d", error);
215
216         return error;
217 }
218
219 static void gdlm_unmount(struct gfs2_sbd *sdp)
220 {
221         struct lm_lockstruct *ls = &sdp->sd_lockstruct;
222
223         if (ls->ls_dlm) {
224                 dlm_release_lockspace(ls->ls_dlm, 2);
225                 ls->ls_dlm = NULL;
226         }
227 }
228
229 static const match_table_t dlm_tokens = {
230         { Opt_jid, "jid=%d"},
231         { Opt_id, "id=%d"},
232         { Opt_first, "first=%d"},
233         { Opt_nodir, "nodir=%d"},
234         { Opt_err, NULL },
235 };
236
237 const struct lm_lockops gfs2_dlm_ops = {
238         .lm_proto_name = "lock_dlm",
239         .lm_mount = gdlm_mount,
240         .lm_unmount = gdlm_unmount,
241         .lm_put_lock = gdlm_put_lock,
242         .lm_lock = gdlm_lock,
243         .lm_cancel = gdlm_cancel,
244         .lm_tokens = &dlm_tokens,
245 };
246