[DLM] add lock timeouts and warnings [2/6]
[linux-2.6.git] / fs / dlm / lockspace.c
1 /******************************************************************************
2 *******************************************************************************
3 **
4 **  Copyright (C) Sistina Software, Inc.  1997-2003  All rights reserved.
5 **  Copyright (C) 2004-2007 Red Hat, Inc.  All rights reserved.
6 **
7 **  This copyrighted material is made available to anyone wishing to use,
8 **  modify, copy, or redistribute it subject to the terms and conditions
9 **  of the GNU General Public License v.2.
10 **
11 *******************************************************************************
12 ******************************************************************************/
13
14 #include "dlm_internal.h"
15 #include "lockspace.h"
16 #include "member.h"
17 #include "recoverd.h"
18 #include "ast.h"
19 #include "dir.h"
20 #include "lowcomms.h"
21 #include "config.h"
22 #include "memory.h"
23 #include "lock.h"
24 #include "recover.h"
25 #include "requestqueue.h"
26
27 #ifdef CONFIG_DLM_DEBUG
28 int dlm_create_debug_file(struct dlm_ls *ls);
29 void dlm_delete_debug_file(struct dlm_ls *ls);
30 #else
31 static inline int dlm_create_debug_file(struct dlm_ls *ls) { return 0; }
32 static inline void dlm_delete_debug_file(struct dlm_ls *ls) { }
33 #endif
34
35 static int                      ls_count;
36 static struct mutex             ls_lock;
37 static struct list_head         lslist;
38 static spinlock_t               lslist_lock;
39 static struct task_struct *     scand_task;
40
41
42 static ssize_t dlm_control_store(struct dlm_ls *ls, const char *buf, size_t len)
43 {
44         ssize_t ret = len;
45         int n = simple_strtol(buf, NULL, 0);
46
47         ls = dlm_find_lockspace_local(ls->ls_local_handle);
48         if (!ls)
49                 return -EINVAL;
50
51         switch (n) {
52         case 0:
53                 dlm_ls_stop(ls);
54                 break;
55         case 1:
56                 dlm_ls_start(ls);
57                 break;
58         default:
59                 ret = -EINVAL;
60         }
61         dlm_put_lockspace(ls);
62         return ret;
63 }
64
65 static ssize_t dlm_event_store(struct dlm_ls *ls, const char *buf, size_t len)
66 {
67         ls->ls_uevent_result = simple_strtol(buf, NULL, 0);
68         set_bit(LSFL_UEVENT_WAIT, &ls->ls_flags);
69         wake_up(&ls->ls_uevent_wait);
70         return len;
71 }
72
73 static ssize_t dlm_id_show(struct dlm_ls *ls, char *buf)
74 {
75         return snprintf(buf, PAGE_SIZE, "%u\n", ls->ls_global_id);
76 }
77
78 static ssize_t dlm_id_store(struct dlm_ls *ls, const char *buf, size_t len)
79 {
80         ls->ls_global_id = simple_strtoul(buf, NULL, 0);
81         return len;
82 }
83
84 static ssize_t dlm_recover_status_show(struct dlm_ls *ls, char *buf)
85 {
86         uint32_t status = dlm_recover_status(ls);
87         return snprintf(buf, PAGE_SIZE, "%x\n", status);
88 }
89
90 static ssize_t dlm_recover_nodeid_show(struct dlm_ls *ls, char *buf)
91 {
92         return snprintf(buf, PAGE_SIZE, "%d\n", ls->ls_recover_nodeid);
93 }
94
95 struct dlm_attr {
96         struct attribute attr;
97         ssize_t (*show)(struct dlm_ls *, char *);
98         ssize_t (*store)(struct dlm_ls *, const char *, size_t);
99 };
100
101 static struct dlm_attr dlm_attr_control = {
102         .attr  = {.name = "control", .mode = S_IWUSR},
103         .store = dlm_control_store
104 };
105
106 static struct dlm_attr dlm_attr_event = {
107         .attr  = {.name = "event_done", .mode = S_IWUSR},
108         .store = dlm_event_store
109 };
110
111 static struct dlm_attr dlm_attr_id = {
112         .attr  = {.name = "id", .mode = S_IRUGO | S_IWUSR},
113         .show  = dlm_id_show,
114         .store = dlm_id_store
115 };
116
117 static struct dlm_attr dlm_attr_recover_status = {
118         .attr  = {.name = "recover_status", .mode = S_IRUGO},
119         .show  = dlm_recover_status_show
120 };
121
122 static struct dlm_attr dlm_attr_recover_nodeid = {
123         .attr  = {.name = "recover_nodeid", .mode = S_IRUGO},
124         .show  = dlm_recover_nodeid_show
125 };
126
127 static struct attribute *dlm_attrs[] = {
128         &dlm_attr_control.attr,
129         &dlm_attr_event.attr,
130         &dlm_attr_id.attr,
131         &dlm_attr_recover_status.attr,
132         &dlm_attr_recover_nodeid.attr,
133         NULL,
134 };
135
136 static ssize_t dlm_attr_show(struct kobject *kobj, struct attribute *attr,
137                              char *buf)
138 {
139         struct dlm_ls *ls  = container_of(kobj, struct dlm_ls, ls_kobj);
140         struct dlm_attr *a = container_of(attr, struct dlm_attr, attr);
141         return a->show ? a->show(ls, buf) : 0;
142 }
143
144 static ssize_t dlm_attr_store(struct kobject *kobj, struct attribute *attr,
145                               const char *buf, size_t len)
146 {
147         struct dlm_ls *ls  = container_of(kobj, struct dlm_ls, ls_kobj);
148         struct dlm_attr *a = container_of(attr, struct dlm_attr, attr);
149         return a->store ? a->store(ls, buf, len) : len;
150 }
151
152 static void lockspace_kobj_release(struct kobject *k)
153 {
154         struct dlm_ls *ls  = container_of(k, struct dlm_ls, ls_kobj);
155         kfree(ls);
156 }
157
158 static struct sysfs_ops dlm_attr_ops = {
159         .show  = dlm_attr_show,
160         .store = dlm_attr_store,
161 };
162
163 static struct kobj_type dlm_ktype = {
164         .default_attrs = dlm_attrs,
165         .sysfs_ops     = &dlm_attr_ops,
166         .release       = lockspace_kobj_release,
167 };
168
169 static struct kset dlm_kset = {
170         .kobj   = {.name = "dlm",},
171         .ktype  = &dlm_ktype,
172 };
173
174 static int kobject_setup(struct dlm_ls *ls)
175 {
176         char lsname[DLM_LOCKSPACE_LEN];
177         int error;
178
179         memset(lsname, 0, DLM_LOCKSPACE_LEN);
180         snprintf(lsname, DLM_LOCKSPACE_LEN, "%s", ls->ls_name);
181
182         error = kobject_set_name(&ls->ls_kobj, "%s", lsname);
183         if (error)
184                 return error;
185
186         ls->ls_kobj.kset = &dlm_kset;
187         ls->ls_kobj.ktype = &dlm_ktype;
188         return 0;
189 }
190
191 static int do_uevent(struct dlm_ls *ls, int in)
192 {
193         int error;
194
195         if (in)
196                 kobject_uevent(&ls->ls_kobj, KOBJ_ONLINE);
197         else
198                 kobject_uevent(&ls->ls_kobj, KOBJ_OFFLINE);
199
200         error = wait_event_interruptible(ls->ls_uevent_wait,
201                         test_and_clear_bit(LSFL_UEVENT_WAIT, &ls->ls_flags));
202         if (error)
203                 goto out;
204
205         error = ls->ls_uevent_result;
206  out:
207         return error;
208 }
209
210
211 int dlm_lockspace_init(void)
212 {
213         int error;
214
215         ls_count = 0;
216         mutex_init(&ls_lock);
217         INIT_LIST_HEAD(&lslist);
218         spin_lock_init(&lslist_lock);
219
220         kobj_set_kset_s(&dlm_kset, kernel_subsys);
221         error = kset_register(&dlm_kset);
222         if (error)
223                 printk("dlm_lockspace_init: cannot register kset %d\n", error);
224         return error;
225 }
226
227 void dlm_lockspace_exit(void)
228 {
229         kset_unregister(&dlm_kset);
230 }
231
232 static int dlm_scand(void *data)
233 {
234         struct dlm_ls *ls;
235
236         while (!kthread_should_stop()) {
237                 list_for_each_entry(ls, &lslist, ls_list) {
238                         if (dlm_lock_recovery_try(ls)) {
239                                 dlm_scan_rsbs(ls);
240                                 dlm_scan_timeout(ls);
241                                 dlm_unlock_recovery(ls);
242                         }
243                 }
244                 schedule_timeout_interruptible(dlm_config.ci_scan_secs * HZ);
245         }
246         return 0;
247 }
248
249 static int dlm_scand_start(void)
250 {
251         struct task_struct *p;
252         int error = 0;
253
254         p = kthread_run(dlm_scand, NULL, "dlm_scand");
255         if (IS_ERR(p))
256                 error = PTR_ERR(p);
257         else
258                 scand_task = p;
259         return error;
260 }
261
262 static void dlm_scand_stop(void)
263 {
264         kthread_stop(scand_task);
265 }
266
267 static struct dlm_ls *dlm_find_lockspace_name(char *name, int namelen)
268 {
269         struct dlm_ls *ls;
270
271         spin_lock(&lslist_lock);
272
273         list_for_each_entry(ls, &lslist, ls_list) {
274                 if (ls->ls_namelen == namelen &&
275                     memcmp(ls->ls_name, name, namelen) == 0)
276                         goto out;
277         }
278         ls = NULL;
279  out:
280         spin_unlock(&lslist_lock);
281         return ls;
282 }
283
284 struct dlm_ls *dlm_find_lockspace_global(uint32_t id)
285 {
286         struct dlm_ls *ls;
287
288         spin_lock(&lslist_lock);
289
290         list_for_each_entry(ls, &lslist, ls_list) {
291                 if (ls->ls_global_id == id) {
292                         ls->ls_count++;
293                         goto out;
294                 }
295         }
296         ls = NULL;
297  out:
298         spin_unlock(&lslist_lock);
299         return ls;
300 }
301
302 struct dlm_ls *dlm_find_lockspace_local(dlm_lockspace_t *lockspace)
303 {
304         struct dlm_ls *ls;
305
306         spin_lock(&lslist_lock);
307         list_for_each_entry(ls, &lslist, ls_list) {
308                 if (ls->ls_local_handle == lockspace) {
309                         ls->ls_count++;
310                         goto out;
311                 }
312         }
313         ls = NULL;
314  out:
315         spin_unlock(&lslist_lock);
316         return ls;
317 }
318
319 struct dlm_ls *dlm_find_lockspace_device(int minor)
320 {
321         struct dlm_ls *ls;
322
323         spin_lock(&lslist_lock);
324         list_for_each_entry(ls, &lslist, ls_list) {
325                 if (ls->ls_device.minor == minor) {
326                         ls->ls_count++;
327                         goto out;
328                 }
329         }
330         ls = NULL;
331  out:
332         spin_unlock(&lslist_lock);
333         return ls;
334 }
335
336 void dlm_put_lockspace(struct dlm_ls *ls)
337 {
338         spin_lock(&lslist_lock);
339         ls->ls_count--;
340         spin_unlock(&lslist_lock);
341 }
342
343 static void remove_lockspace(struct dlm_ls *ls)
344 {
345         for (;;) {
346                 spin_lock(&lslist_lock);
347                 if (ls->ls_count == 0) {
348                         list_del(&ls->ls_list);
349                         spin_unlock(&lslist_lock);
350                         return;
351                 }
352                 spin_unlock(&lslist_lock);
353                 ssleep(1);
354         }
355 }
356
357 static int threads_start(void)
358 {
359         int error;
360
361         /* Thread which process lock requests for all lockspace's */
362         error = dlm_astd_start();
363         if (error) {
364                 log_print("cannot start dlm_astd thread %d", error);
365                 goto fail;
366         }
367
368         error = dlm_scand_start();
369         if (error) {
370                 log_print("cannot start dlm_scand thread %d", error);
371                 goto astd_fail;
372         }
373
374         /* Thread for sending/receiving messages for all lockspace's */
375         error = dlm_lowcomms_start();
376         if (error) {
377                 log_print("cannot start dlm lowcomms %d", error);
378                 goto scand_fail;
379         }
380
381         return 0;
382
383  scand_fail:
384         dlm_scand_stop();
385  astd_fail:
386         dlm_astd_stop();
387  fail:
388         return error;
389 }
390
391 static void threads_stop(void)
392 {
393         dlm_scand_stop();
394         dlm_lowcomms_stop();
395         dlm_astd_stop();
396 }
397
398 static int new_lockspace(char *name, int namelen, void **lockspace,
399                          uint32_t flags, int lvblen)
400 {
401         struct dlm_ls *ls;
402         int i, size, error = -ENOMEM;
403
404         if (namelen > DLM_LOCKSPACE_LEN)
405                 return -EINVAL;
406
407         if (!lvblen || (lvblen % 8))
408                 return -EINVAL;
409
410         if (!try_module_get(THIS_MODULE))
411                 return -EINVAL;
412
413         ls = dlm_find_lockspace_name(name, namelen);
414         if (ls) {
415                 *lockspace = ls;
416                 module_put(THIS_MODULE);
417                 return -EEXIST;
418         }
419
420         ls = kzalloc(sizeof(struct dlm_ls) + namelen, GFP_KERNEL);
421         if (!ls)
422                 goto out;
423         memcpy(ls->ls_name, name, namelen);
424         ls->ls_namelen = namelen;
425         ls->ls_lvblen = lvblen;
426         ls->ls_count = 0;
427         ls->ls_flags = 0;
428
429         /* ls_exflags are forced to match among nodes, and we don't
430            need to require all nodes to have TIMEWARN active */
431         if (flags & DLM_LSFL_TIMEWARN)
432                 set_bit(LSFL_TIMEWARN, &ls->ls_flags);
433         ls->ls_exflags = (flags & ~DLM_LSFL_TIMEWARN);
434
435         size = dlm_config.ci_rsbtbl_size;
436         ls->ls_rsbtbl_size = size;
437
438         ls->ls_rsbtbl = kmalloc(sizeof(struct dlm_rsbtable) * size, GFP_KERNEL);
439         if (!ls->ls_rsbtbl)
440                 goto out_lsfree;
441         for (i = 0; i < size; i++) {
442                 INIT_LIST_HEAD(&ls->ls_rsbtbl[i].list);
443                 INIT_LIST_HEAD(&ls->ls_rsbtbl[i].toss);
444                 rwlock_init(&ls->ls_rsbtbl[i].lock);
445         }
446
447         size = dlm_config.ci_lkbtbl_size;
448         ls->ls_lkbtbl_size = size;
449
450         ls->ls_lkbtbl = kmalloc(sizeof(struct dlm_lkbtable) * size, GFP_KERNEL);
451         if (!ls->ls_lkbtbl)
452                 goto out_rsbfree;
453         for (i = 0; i < size; i++) {
454                 INIT_LIST_HEAD(&ls->ls_lkbtbl[i].list);
455                 rwlock_init(&ls->ls_lkbtbl[i].lock);
456                 ls->ls_lkbtbl[i].counter = 1;
457         }
458
459         size = dlm_config.ci_dirtbl_size;
460         ls->ls_dirtbl_size = size;
461
462         ls->ls_dirtbl = kmalloc(sizeof(struct dlm_dirtable) * size, GFP_KERNEL);
463         if (!ls->ls_dirtbl)
464                 goto out_lkbfree;
465         for (i = 0; i < size; i++) {
466                 INIT_LIST_HEAD(&ls->ls_dirtbl[i].list);
467                 rwlock_init(&ls->ls_dirtbl[i].lock);
468         }
469
470         INIT_LIST_HEAD(&ls->ls_waiters);
471         mutex_init(&ls->ls_waiters_mutex);
472         INIT_LIST_HEAD(&ls->ls_orphans);
473         mutex_init(&ls->ls_orphans_mutex);
474         INIT_LIST_HEAD(&ls->ls_timeout);
475         mutex_init(&ls->ls_timeout_mutex);
476
477         INIT_LIST_HEAD(&ls->ls_nodes);
478         INIT_LIST_HEAD(&ls->ls_nodes_gone);
479         ls->ls_num_nodes = 0;
480         ls->ls_low_nodeid = 0;
481         ls->ls_total_weight = 0;
482         ls->ls_node_array = NULL;
483
484         memset(&ls->ls_stub_rsb, 0, sizeof(struct dlm_rsb));
485         ls->ls_stub_rsb.res_ls = ls;
486
487         ls->ls_debug_rsb_dentry = NULL;
488         ls->ls_debug_waiters_dentry = NULL;
489
490         init_waitqueue_head(&ls->ls_uevent_wait);
491         ls->ls_uevent_result = 0;
492
493         ls->ls_recoverd_task = NULL;
494         mutex_init(&ls->ls_recoverd_active);
495         spin_lock_init(&ls->ls_recover_lock);
496         spin_lock_init(&ls->ls_rcom_spin);
497         get_random_bytes(&ls->ls_rcom_seq, sizeof(uint64_t));
498         ls->ls_recover_status = 0;
499         ls->ls_recover_seq = 0;
500         ls->ls_recover_args = NULL;
501         init_rwsem(&ls->ls_in_recovery);
502         INIT_LIST_HEAD(&ls->ls_requestqueue);
503         mutex_init(&ls->ls_requestqueue_mutex);
504         mutex_init(&ls->ls_clear_proc_locks);
505
506         ls->ls_recover_buf = kmalloc(dlm_config.ci_buffer_size, GFP_KERNEL);
507         if (!ls->ls_recover_buf)
508                 goto out_dirfree;
509
510         INIT_LIST_HEAD(&ls->ls_recover_list);
511         spin_lock_init(&ls->ls_recover_list_lock);
512         ls->ls_recover_list_count = 0;
513         ls->ls_local_handle = ls;
514         init_waitqueue_head(&ls->ls_wait_general);
515         INIT_LIST_HEAD(&ls->ls_root_list);
516         init_rwsem(&ls->ls_root_sem);
517
518         down_write(&ls->ls_in_recovery);
519
520         spin_lock(&lslist_lock);
521         list_add(&ls->ls_list, &lslist);
522         spin_unlock(&lslist_lock);
523
524         /* needs to find ls in lslist */
525         error = dlm_recoverd_start(ls);
526         if (error) {
527                 log_error(ls, "can't start dlm_recoverd %d", error);
528                 goto out_rcomfree;
529         }
530
531         dlm_create_debug_file(ls);
532
533         error = kobject_setup(ls);
534         if (error)
535                 goto out_del;
536
537         error = kobject_register(&ls->ls_kobj);
538         if (error)
539                 goto out_del;
540
541         error = do_uevent(ls, 1);
542         if (error)
543                 goto out_unreg;
544
545         *lockspace = ls;
546         return 0;
547
548  out_unreg:
549         kobject_unregister(&ls->ls_kobj);
550  out_del:
551         dlm_delete_debug_file(ls);
552         dlm_recoverd_stop(ls);
553  out_rcomfree:
554         spin_lock(&lslist_lock);
555         list_del(&ls->ls_list);
556         spin_unlock(&lslist_lock);
557         kfree(ls->ls_recover_buf);
558  out_dirfree:
559         kfree(ls->ls_dirtbl);
560  out_lkbfree:
561         kfree(ls->ls_lkbtbl);
562  out_rsbfree:
563         kfree(ls->ls_rsbtbl);
564  out_lsfree:
565         kfree(ls);
566  out:
567         module_put(THIS_MODULE);
568         return error;
569 }
570
571 int dlm_new_lockspace(char *name, int namelen, void **lockspace,
572                       uint32_t flags, int lvblen)
573 {
574         int error = 0;
575
576         mutex_lock(&ls_lock);
577         if (!ls_count)
578                 error = threads_start();
579         if (error)
580                 goto out;
581
582         error = new_lockspace(name, namelen, lockspace, flags, lvblen);
583         if (!error)
584                 ls_count++;
585  out:
586         mutex_unlock(&ls_lock);
587         return error;
588 }
589
590 /* Return 1 if the lockspace still has active remote locks,
591  *        2 if the lockspace still has active local locks.
592  */
593 static int lockspace_busy(struct dlm_ls *ls)
594 {
595         int i, lkb_found = 0;
596         struct dlm_lkb *lkb;
597
598         /* NOTE: We check the lockidtbl here rather than the resource table.
599            This is because there may be LKBs queued as ASTs that have been
600            unlinked from their RSBs and are pending deletion once the AST has
601            been delivered */
602
603         for (i = 0; i < ls->ls_lkbtbl_size; i++) {
604                 read_lock(&ls->ls_lkbtbl[i].lock);
605                 if (!list_empty(&ls->ls_lkbtbl[i].list)) {
606                         lkb_found = 1;
607                         list_for_each_entry(lkb, &ls->ls_lkbtbl[i].list,
608                                             lkb_idtbl_list) {
609                                 if (!lkb->lkb_nodeid) {
610                                         read_unlock(&ls->ls_lkbtbl[i].lock);
611                                         return 2;
612                                 }
613                         }
614                 }
615                 read_unlock(&ls->ls_lkbtbl[i].lock);
616         }
617         return lkb_found;
618 }
619
620 static int release_lockspace(struct dlm_ls *ls, int force)
621 {
622         struct dlm_lkb *lkb;
623         struct dlm_rsb *rsb;
624         struct list_head *head;
625         int i;
626         int busy = lockspace_busy(ls);
627
628         if (busy > force)
629                 return -EBUSY;
630
631         if (force < 3)
632                 do_uevent(ls, 0);
633
634         dlm_recoverd_stop(ls);
635
636         remove_lockspace(ls);
637
638         dlm_delete_debug_file(ls);
639
640         dlm_astd_suspend();
641
642         kfree(ls->ls_recover_buf);
643
644         /*
645          * Free direntry structs.
646          */
647
648         dlm_dir_clear(ls);
649         kfree(ls->ls_dirtbl);
650
651         /*
652          * Free all lkb's on lkbtbl[] lists.
653          */
654
655         for (i = 0; i < ls->ls_lkbtbl_size; i++) {
656                 head = &ls->ls_lkbtbl[i].list;
657                 while (!list_empty(head)) {
658                         lkb = list_entry(head->next, struct dlm_lkb,
659                                          lkb_idtbl_list);
660
661                         list_del(&lkb->lkb_idtbl_list);
662
663                         dlm_del_ast(lkb);
664
665                         if (lkb->lkb_lvbptr && lkb->lkb_flags & DLM_IFL_MSTCPY)
666                                 free_lvb(lkb->lkb_lvbptr);
667
668                         free_lkb(lkb);
669                 }
670         }
671         dlm_astd_resume();
672
673         kfree(ls->ls_lkbtbl);
674
675         /*
676          * Free all rsb's on rsbtbl[] lists
677          */
678
679         for (i = 0; i < ls->ls_rsbtbl_size; i++) {
680                 head = &ls->ls_rsbtbl[i].list;
681                 while (!list_empty(head)) {
682                         rsb = list_entry(head->next, struct dlm_rsb,
683                                          res_hashchain);
684
685                         list_del(&rsb->res_hashchain);
686                         free_rsb(rsb);
687                 }
688
689                 head = &ls->ls_rsbtbl[i].toss;
690                 while (!list_empty(head)) {
691                         rsb = list_entry(head->next, struct dlm_rsb,
692                                          res_hashchain);
693                         list_del(&rsb->res_hashchain);
694                         free_rsb(rsb);
695                 }
696         }
697
698         kfree(ls->ls_rsbtbl);
699
700         /*
701          * Free structures on any other lists
702          */
703
704         dlm_purge_requestqueue(ls);
705         kfree(ls->ls_recover_args);
706         dlm_clear_free_entries(ls);
707         dlm_clear_members(ls);
708         dlm_clear_members_gone(ls);
709         kfree(ls->ls_node_array);
710         kobject_unregister(&ls->ls_kobj);
711         /* The ls structure will be freed when the kobject is done with */
712
713         mutex_lock(&ls_lock);
714         ls_count--;
715         if (!ls_count)
716                 threads_stop();
717         mutex_unlock(&ls_lock);
718
719         module_put(THIS_MODULE);
720         return 0;
721 }
722
723 /*
724  * Called when a system has released all its locks and is not going to use the
725  * lockspace any longer.  We free everything we're managing for this lockspace.
726  * Remaining nodes will go through the recovery process as if we'd died.  The
727  * lockspace must continue to function as usual, participating in recoveries,
728  * until this returns.
729  *
730  * Force has 4 possible values:
731  * 0 - don't destroy locksapce if it has any LKBs
732  * 1 - destroy lockspace if it has remote LKBs but not if it has local LKBs
733  * 2 - destroy lockspace regardless of LKBs
734  * 3 - destroy lockspace as part of a forced shutdown
735  */
736
737 int dlm_release_lockspace(void *lockspace, int force)
738 {
739         struct dlm_ls *ls;
740
741         ls = dlm_find_lockspace_local(lockspace);
742         if (!ls)
743                 return -EINVAL;
744         dlm_put_lockspace(ls);
745         return release_lockspace(ls, force);
746 }
747