1 /* -*- mode: c; c-basic-offset: 8; -*-
2 * vim: noexpandtab sw=8 ts=8 sts=0:
6 * Code which implements the kernel side of a minimal userspace
7 * interface to our DLM.
9 * Many of the functions here are pared down versions of dlmglue.c
12 * Copyright (C) 2003, 2004 Oracle. All rights reserved.
14 * This program is free software; you can redistribute it and/or
15 * modify it under the terms of the GNU General Public
16 * License as published by the Free Software Foundation; either
17 * version 2 of the License, or (at your option) any later version.
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 * General Public License for more details.
24 * You should have received a copy of the GNU General Public
25 * License along with this program; if not, write to the
26 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
27 * Boston, MA 021110-1307, USA.
30 #include <linux/signal.h>
32 #include <linux/module.h>
34 #include <linux/types.h>
35 #include <linux/crc32.h>
37 #include "ocfs2_lockingver.h"
38 #include "stackglue.h"
41 #define MLOG_MASK_PREFIX ML_DLMFS
42 #include "cluster/masklog.h"
45 static inline struct user_lock_res *user_lksb_to_lock_res(struct ocfs2_dlm_lksb *lksb)
47 return container_of(lksb, struct user_lock_res, l_lksb);
50 static inline int user_check_wait_flag(struct user_lock_res *lockres,
55 spin_lock(&lockres->l_lock);
56 ret = lockres->l_flags & flag;
57 spin_unlock(&lockres->l_lock);
62 static inline void user_wait_on_busy_lock(struct user_lock_res *lockres)
65 wait_event(lockres->l_event,
66 !user_check_wait_flag(lockres, USER_LOCK_BUSY));
69 static inline void user_wait_on_blocked_lock(struct user_lock_res *lockres)
72 wait_event(lockres->l_event,
73 !user_check_wait_flag(lockres, USER_LOCK_BLOCKED));
76 /* I heart container_of... */
77 static inline struct ocfs2_cluster_connection *
78 cluster_connection_from_user_lockres(struct user_lock_res *lockres)
80 struct dlmfs_inode_private *ip;
82 ip = container_of(lockres,
83 struct dlmfs_inode_private,
89 user_dlm_inode_from_user_lockres(struct user_lock_res *lockres)
91 struct dlmfs_inode_private *ip;
93 ip = container_of(lockres,
94 struct dlmfs_inode_private,
96 return &ip->ip_vfs_inode;
99 static inline void user_recover_from_dlm_error(struct user_lock_res *lockres)
101 spin_lock(&lockres->l_lock);
102 lockres->l_flags &= ~USER_LOCK_BUSY;
103 spin_unlock(&lockres->l_lock);
106 #define user_log_dlm_error(_func, _stat, _lockres) do { \
107 mlog(ML_ERROR, "Dlm error %d while calling %s on " \
108 "resource %.*s\n", _stat, _func, \
109 _lockres->l_namelen, _lockres->l_name); \
112 /* WARNING: This function lives in a world where the only three lock
113 * levels are EX, PR, and NL. It *will* have to be adjusted when more
114 * lock types are added. */
115 static inline int user_highest_compat_lock_level(int level)
117 int new_level = DLM_LOCK_EX;
119 if (level == DLM_LOCK_EX)
120 new_level = DLM_LOCK_NL;
121 else if (level == DLM_LOCK_PR)
122 new_level = DLM_LOCK_PR;
126 static void user_ast(struct ocfs2_dlm_lksb *lksb)
128 struct user_lock_res *lockres = user_lksb_to_lock_res(lksb);
131 mlog(0, "AST fired for lockres %.*s\n", lockres->l_namelen,
134 spin_lock(&lockres->l_lock);
136 status = ocfs2_dlm_lock_status(&lockres->l_lksb);
138 mlog(ML_ERROR, "lksb status value of %u on lockres %.*s\n",
139 status, lockres->l_namelen, lockres->l_name);
140 spin_unlock(&lockres->l_lock);
144 mlog_bug_on_msg(lockres->l_requested == DLM_LOCK_IV,
145 "Lockres %.*s, requested ivmode. flags 0x%x\n",
146 lockres->l_namelen, lockres->l_name, lockres->l_flags);
148 /* we're downconverting. */
149 if (lockres->l_requested < lockres->l_level) {
150 if (lockres->l_requested <=
151 user_highest_compat_lock_level(lockres->l_blocking)) {
152 lockres->l_blocking = DLM_LOCK_NL;
153 lockres->l_flags &= ~USER_LOCK_BLOCKED;
157 lockres->l_level = lockres->l_requested;
158 lockres->l_requested = DLM_LOCK_IV;
159 lockres->l_flags |= USER_LOCK_ATTACHED;
160 lockres->l_flags &= ~USER_LOCK_BUSY;
162 spin_unlock(&lockres->l_lock);
164 wake_up(&lockres->l_event);
167 static inline void user_dlm_grab_inode_ref(struct user_lock_res *lockres)
170 inode = user_dlm_inode_from_user_lockres(lockres);
175 static void user_dlm_unblock_lock(struct work_struct *work);
177 static void __user_dlm_queue_lockres(struct user_lock_res *lockres)
179 if (!(lockres->l_flags & USER_LOCK_QUEUED)) {
180 user_dlm_grab_inode_ref(lockres);
182 INIT_WORK(&lockres->l_work, user_dlm_unblock_lock);
184 queue_work(user_dlm_worker, &lockres->l_work);
185 lockres->l_flags |= USER_LOCK_QUEUED;
189 static void __user_dlm_cond_queue_lockres(struct user_lock_res *lockres)
193 if (!(lockres->l_flags & USER_LOCK_BLOCKED))
196 switch (lockres->l_blocking) {
198 if (!lockres->l_ex_holders && !lockres->l_ro_holders)
202 if (!lockres->l_ex_holders)
210 __user_dlm_queue_lockres(lockres);
213 static void user_bast(struct ocfs2_dlm_lksb *lksb, int level)
215 struct user_lock_res *lockres = user_lksb_to_lock_res(lksb);
217 mlog(0, "Blocking AST fired for lockres %.*s. Blocking level %d\n",
218 lockres->l_namelen, lockres->l_name, level);
220 spin_lock(&lockres->l_lock);
221 lockres->l_flags |= USER_LOCK_BLOCKED;
222 if (level > lockres->l_blocking)
223 lockres->l_blocking = level;
225 __user_dlm_queue_lockres(lockres);
226 spin_unlock(&lockres->l_lock);
228 wake_up(&lockres->l_event);
231 static void user_unlock_ast(struct ocfs2_dlm_lksb *lksb, int status)
233 struct user_lock_res *lockres = user_lksb_to_lock_res(lksb);
235 mlog(0, "UNLOCK AST called on lock %.*s\n", lockres->l_namelen,
239 mlog(ML_ERROR, "dlm returns status %d\n", status);
241 spin_lock(&lockres->l_lock);
242 /* The teardown flag gets set early during the unlock process,
243 * so test the cancel flag to make sure that this ast isn't
244 * for a concurrent cancel. */
245 if (lockres->l_flags & USER_LOCK_IN_TEARDOWN
246 && !(lockres->l_flags & USER_LOCK_IN_CANCEL)) {
247 lockres->l_level = DLM_LOCK_IV;
248 } else if (status == DLM_CANCELGRANT) {
249 /* We tried to cancel a convert request, but it was
250 * already granted. Don't clear the busy flag - the
251 * ast should've done this already. */
252 BUG_ON(!(lockres->l_flags & USER_LOCK_IN_CANCEL));
253 lockres->l_flags &= ~USER_LOCK_IN_CANCEL;
256 BUG_ON(!(lockres->l_flags & USER_LOCK_IN_CANCEL));
257 /* Cancel succeeded, we want to re-queue */
258 lockres->l_requested = DLM_LOCK_IV; /* cancel an
261 lockres->l_flags &= ~USER_LOCK_IN_CANCEL;
262 /* we want the unblock thread to look at it again
264 if (lockres->l_flags & USER_LOCK_BLOCKED)
265 __user_dlm_queue_lockres(lockres);
268 lockres->l_flags &= ~USER_LOCK_BUSY;
270 spin_unlock(&lockres->l_lock);
272 wake_up(&lockres->l_event);
276 * This is the userdlmfs locking protocol version.
278 * See fs/ocfs2/dlmglue.c for more details on locking versions.
280 static struct ocfs2_locking_protocol user_dlm_lproto = {
282 .pv_major = OCFS2_LOCKING_PROTOCOL_MAJOR,
283 .pv_minor = OCFS2_LOCKING_PROTOCOL_MINOR,
285 .lp_lock_ast = user_ast,
286 .lp_blocking_ast = user_bast,
287 .lp_unlock_ast = user_unlock_ast,
290 static inline void user_dlm_drop_inode_ref(struct user_lock_res *lockres)
293 inode = user_dlm_inode_from_user_lockres(lockres);
297 static void user_dlm_unblock_lock(struct work_struct *work)
299 int new_level, status;
300 struct user_lock_res *lockres =
301 container_of(work, struct user_lock_res, l_work);
302 struct ocfs2_cluster_connection *conn =
303 cluster_connection_from_user_lockres(lockres);
305 mlog(0, "processing lockres %.*s\n", lockres->l_namelen,
308 spin_lock(&lockres->l_lock);
310 mlog_bug_on_msg(!(lockres->l_flags & USER_LOCK_QUEUED),
311 "Lockres %.*s, flags 0x%x\n",
312 lockres->l_namelen, lockres->l_name, lockres->l_flags);
314 /* notice that we don't clear USER_LOCK_BLOCKED here. If it's
315 * set, we want user_ast clear it. */
316 lockres->l_flags &= ~USER_LOCK_QUEUED;
318 /* It's valid to get here and no longer be blocked - if we get
319 * several basts in a row, we might be queued by the first
320 * one, the unblock thread might run and clear the queued
321 * flag, and finally we might get another bast which re-queues
322 * us before our ast for the downconvert is called. */
323 if (!(lockres->l_flags & USER_LOCK_BLOCKED)) {
324 spin_unlock(&lockres->l_lock);
328 if (lockres->l_flags & USER_LOCK_IN_TEARDOWN) {
329 spin_unlock(&lockres->l_lock);
333 if (lockres->l_flags & USER_LOCK_BUSY) {
334 if (lockres->l_flags & USER_LOCK_IN_CANCEL) {
335 spin_unlock(&lockres->l_lock);
339 lockres->l_flags |= USER_LOCK_IN_CANCEL;
340 spin_unlock(&lockres->l_lock);
342 status = ocfs2_dlm_unlock(conn, &lockres->l_lksb,
345 user_log_dlm_error("ocfs2_dlm_unlock", status, lockres);
349 /* If there are still incompat holders, we can exit safely
350 * without worrying about re-queueing this lock as that will
351 * happen on the last call to user_cluster_unlock. */
352 if ((lockres->l_blocking == DLM_LOCK_EX)
353 && (lockres->l_ex_holders || lockres->l_ro_holders)) {
354 spin_unlock(&lockres->l_lock);
355 mlog(0, "can't downconvert for ex: ro = %u, ex = %u\n",
356 lockres->l_ro_holders, lockres->l_ex_holders);
360 if ((lockres->l_blocking == DLM_LOCK_PR)
361 && lockres->l_ex_holders) {
362 spin_unlock(&lockres->l_lock);
363 mlog(0, "can't downconvert for pr: ex = %u\n",
364 lockres->l_ex_holders);
368 /* yay, we can downconvert now. */
369 new_level = user_highest_compat_lock_level(lockres->l_blocking);
370 lockres->l_requested = new_level;
371 lockres->l_flags |= USER_LOCK_BUSY;
372 mlog(0, "Downconvert lock from %d to %d\n",
373 lockres->l_level, new_level);
374 spin_unlock(&lockres->l_lock);
376 /* need lock downconvert request now... */
377 status = ocfs2_dlm_lock(conn, new_level, &lockres->l_lksb,
378 DLM_LKF_CONVERT|DLM_LKF_VALBLK,
382 user_log_dlm_error("ocfs2_dlm_lock", status, lockres);
383 user_recover_from_dlm_error(lockres);
387 user_dlm_drop_inode_ref(lockres);
390 static inline void user_dlm_inc_holders(struct user_lock_res *lockres,
395 lockres->l_ex_holders++;
398 lockres->l_ro_holders++;
405 /* predict what lock level we'll be dropping down to on behalf
406 * of another node, and return true if the currently wanted
407 * level will be compatible with it. */
409 user_may_continue_on_blocked_lock(struct user_lock_res *lockres,
412 BUG_ON(!(lockres->l_flags & USER_LOCK_BLOCKED));
414 return wanted <= user_highest_compat_lock_level(lockres->l_blocking);
417 int user_dlm_cluster_lock(struct user_lock_res *lockres,
421 int status, local_flags;
422 struct ocfs2_cluster_connection *conn =
423 cluster_connection_from_user_lockres(lockres);
425 if (level != DLM_LOCK_EX &&
426 level != DLM_LOCK_PR) {
427 mlog(ML_ERROR, "lockres %.*s: invalid request!\n",
428 lockres->l_namelen, lockres->l_name);
433 mlog(0, "lockres %.*s: asking for %s lock, passed flags = 0x%x\n",
434 lockres->l_namelen, lockres->l_name,
435 (level == DLM_LOCK_EX) ? "DLM_LOCK_EX" : "DLM_LOCK_PR",
439 if (signal_pending(current)) {
440 status = -ERESTARTSYS;
444 spin_lock(&lockres->l_lock);
446 /* We only compare against the currently granted level
447 * here. If the lock is blocked waiting on a downconvert,
448 * we'll get caught below. */
449 if ((lockres->l_flags & USER_LOCK_BUSY) &&
450 (level > lockres->l_level)) {
451 /* is someone sitting in dlm_lock? If so, wait on
453 spin_unlock(&lockres->l_lock);
455 user_wait_on_busy_lock(lockres);
459 if ((lockres->l_flags & USER_LOCK_BLOCKED) &&
460 (!user_may_continue_on_blocked_lock(lockres, level))) {
461 /* is the lock is currently blocked on behalf of
463 spin_unlock(&lockres->l_lock);
465 user_wait_on_blocked_lock(lockres);
469 if (level > lockres->l_level) {
470 local_flags = lkm_flags | DLM_LKF_VALBLK;
471 if (lockres->l_level != DLM_LOCK_IV)
472 local_flags |= DLM_LKF_CONVERT;
474 lockres->l_requested = level;
475 lockres->l_flags |= USER_LOCK_BUSY;
476 spin_unlock(&lockres->l_lock);
478 BUG_ON(level == DLM_LOCK_IV);
479 BUG_ON(level == DLM_LOCK_NL);
481 /* call dlm_lock to upgrade lock now */
482 status = ocfs2_dlm_lock(conn, level, &lockres->l_lksb,
483 local_flags, lockres->l_name,
486 if ((lkm_flags & DLM_LKF_NOQUEUE) &&
488 user_log_dlm_error("ocfs2_dlm_lock",
490 user_recover_from_dlm_error(lockres);
494 user_wait_on_busy_lock(lockres);
498 user_dlm_inc_holders(lockres, level);
499 spin_unlock(&lockres->l_lock);
506 static inline void user_dlm_dec_holders(struct user_lock_res *lockres,
511 BUG_ON(!lockres->l_ex_holders);
512 lockres->l_ex_holders--;
515 BUG_ON(!lockres->l_ro_holders);
516 lockres->l_ro_holders--;
523 void user_dlm_cluster_unlock(struct user_lock_res *lockres,
526 if (level != DLM_LOCK_EX &&
527 level != DLM_LOCK_PR) {
528 mlog(ML_ERROR, "lockres %.*s: invalid request!\n",
529 lockres->l_namelen, lockres->l_name);
533 spin_lock(&lockres->l_lock);
534 user_dlm_dec_holders(lockres, level);
535 __user_dlm_cond_queue_lockres(lockres);
536 spin_unlock(&lockres->l_lock);
539 void user_dlm_write_lvb(struct inode *inode,
543 struct user_lock_res *lockres = &DLMFS_I(inode)->ip_lockres;
546 BUG_ON(len > DLM_LVB_LEN);
548 spin_lock(&lockres->l_lock);
550 BUG_ON(lockres->l_level < DLM_LOCK_EX);
551 lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
552 memcpy(lvb, val, len);
554 spin_unlock(&lockres->l_lock);
557 ssize_t user_dlm_read_lvb(struct inode *inode,
561 struct user_lock_res *lockres = &DLMFS_I(inode)->ip_lockres;
565 BUG_ON(len > DLM_LVB_LEN);
567 spin_lock(&lockres->l_lock);
569 BUG_ON(lockres->l_level < DLM_LOCK_PR);
570 if (ocfs2_dlm_lvb_valid(&lockres->l_lksb)) {
571 lvb = ocfs2_dlm_lvb(&lockres->l_lksb);
572 memcpy(val, lvb, len);
576 spin_unlock(&lockres->l_lock);
580 void user_dlm_lock_res_init(struct user_lock_res *lockres,
581 struct dentry *dentry)
583 memset(lockres, 0, sizeof(*lockres));
585 spin_lock_init(&lockres->l_lock);
586 init_waitqueue_head(&lockres->l_event);
587 lockres->l_level = DLM_LOCK_IV;
588 lockres->l_requested = DLM_LOCK_IV;
589 lockres->l_blocking = DLM_LOCK_IV;
591 /* should have been checked before getting here. */
592 BUG_ON(dentry->d_name.len >= USER_DLM_LOCK_ID_MAX_LEN);
594 memcpy(lockres->l_name,
597 lockres->l_namelen = dentry->d_name.len;
600 int user_dlm_destroy_lock(struct user_lock_res *lockres)
603 struct ocfs2_cluster_connection *conn =
604 cluster_connection_from_user_lockres(lockres);
606 mlog(0, "asked to destroy %.*s\n", lockres->l_namelen, lockres->l_name);
608 spin_lock(&lockres->l_lock);
609 if (lockres->l_flags & USER_LOCK_IN_TEARDOWN) {
610 spin_unlock(&lockres->l_lock);
614 lockres->l_flags |= USER_LOCK_IN_TEARDOWN;
616 while (lockres->l_flags & USER_LOCK_BUSY) {
617 spin_unlock(&lockres->l_lock);
619 user_wait_on_busy_lock(lockres);
621 spin_lock(&lockres->l_lock);
624 if (lockres->l_ro_holders || lockres->l_ex_holders) {
625 spin_unlock(&lockres->l_lock);
630 if (!(lockres->l_flags & USER_LOCK_ATTACHED)) {
631 spin_unlock(&lockres->l_lock);
635 lockres->l_flags &= ~USER_LOCK_ATTACHED;
636 lockres->l_flags |= USER_LOCK_BUSY;
637 spin_unlock(&lockres->l_lock);
639 status = ocfs2_dlm_unlock(conn, &lockres->l_lksb, DLM_LKF_VALBLK);
641 user_log_dlm_error("ocfs2_dlm_unlock", status, lockres);
645 user_wait_on_busy_lock(lockres);
652 static void user_dlm_recovery_handler_noop(int node_num,
655 /* We ignore recovery events */
659 void user_dlm_set_locking_protocol(void)
661 ocfs2_stack_glue_set_max_proto_version(&user_dlm_lproto.lp_max_version);
664 struct ocfs2_cluster_connection *user_dlm_register(struct qstr *name)
667 struct ocfs2_cluster_connection *conn;
669 rc = ocfs2_cluster_connect_agnostic(name->name, name->len,
671 user_dlm_recovery_handler_noop,
676 return rc ? ERR_PTR(rc) : conn;
679 void user_dlm_unregister(struct ocfs2_cluster_connection *conn)
681 ocfs2_cluster_disconnect(conn, 0);