[PATCH] Convert lockd to use the newer mutex instead of the older semaphore
Neil Brown [Wed, 4 Oct 2006 09:16:06 +0000 (02:16 -0700)]
Both the (recently introduces) nsm_sema and the older f_sema are converted
over.

Cc: Olaf Kirch <okir@suse.de>
Signed-off-by: Neil Brown <neilb@suse.de>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>

fs/lockd/host.c
fs/lockd/svclock.c
fs/lockd/svcsubs.c
include/linux/lockd/lockd.h

index a1423c6..0257a55 100644 (file)
@@ -436,7 +436,7 @@ nlm_gc_hosts(void)
  * Manage NSM handles
  */
 static LIST_HEAD(nsm_handles);
-static DECLARE_MUTEX(nsm_sema);
+static DEFINE_MUTEX(nsm_mutex);
 
 static struct nsm_handle *
 __nsm_find(const struct sockaddr_in *sin,
@@ -458,7 +458,7 @@ __nsm_find(const struct sockaddr_in *sin,
                return NULL;
        }
 
-       down(&nsm_sema);
+       mutex_lock(&nsm_mutex);
        list_for_each(pos, &nsm_handles) {
                nsm = list_entry(pos, struct nsm_handle, sm_link);
 
@@ -488,7 +488,8 @@ __nsm_find(const struct sockaddr_in *sin,
                list_add(&nsm->sm_link, &nsm_handles);
        }
 
-out:   up(&nsm_sema);
+out:
+       mutex_unlock(&nsm_mutex);
        return nsm;
 }
 
@@ -507,11 +508,11 @@ nsm_release(struct nsm_handle *nsm)
        if (!nsm)
                return;
        if (atomic_dec_and_test(&nsm->sm_count)) {
-               down(&nsm_sema);
+               mutex_lock(&nsm_mutex);
                if (atomic_read(&nsm->sm_count) == 0) {
                        list_del(&nsm->sm_link);
                        kfree(nsm);
                }
-               up(&nsm_sema);
+               mutex_unlock(&nsm_mutex);
        }
 }
index 3d2b8a8..814c606 100644 (file)
@@ -254,9 +254,9 @@ static void nlmsvc_free_block(struct kref *kref)
        dprintk("lockd: freeing block %p...\n", block);
 
        /* Remove block from file's list of blocks */
-       down(&file->f_sema);
+       mutex_lock(&file->f_mutex);
        list_del_init(&block->b_flist);
-       up(&file->f_sema);
+       mutex_unlock(&file->f_mutex);
 
        nlmsvc_freegrantargs(block->b_call);
        nlm_release_call(block->b_call);
@@ -281,7 +281,7 @@ void nlmsvc_traverse_blocks(struct nlm_host *host,
        struct nlm_block *block, *next;
 
 restart:
-       down(&file->f_sema);
+       mutex_lock(&file->f_mutex);
        list_for_each_entry_safe(block, next, &file->f_blocks, b_flist) {
                if (!match(block->b_host, host))
                        continue;
@@ -290,12 +290,12 @@ restart:
                if (list_empty(&block->b_list))
                        continue;
                kref_get(&block->b_count);
-               up(&file->f_sema);
+               mutex_unlock(&file->f_mutex);
                nlmsvc_unlink_block(block);
                nlmsvc_release_block(block);
                goto restart;
        }
-       up(&file->f_sema);
+       mutex_unlock(&file->f_mutex);
 }
 
 /*
@@ -354,7 +354,7 @@ nlmsvc_lock(struct svc_rqst *rqstp, struct nlm_file *file,
        lock->fl.fl_flags &= ~FL_SLEEP;
 again:
        /* Lock file against concurrent access */
-       down(&file->f_sema);
+       mutex_lock(&file->f_mutex);
        /* Get existing block (in case client is busy-waiting) */
        block = nlmsvc_lookup_block(file, lock);
        if (block == NULL) {
@@ -392,10 +392,10 @@ again:
 
        /* If we don't have a block, create and initialize it. Then
         * retry because we may have slept in kmalloc. */
-       /* We have to release f_sema as nlmsvc_create_block may try to
+       /* We have to release f_mutex as nlmsvc_create_block may try to
         * to claim it while doing host garbage collection */
        if (newblock == NULL) {
-               up(&file->f_sema);
+               mutex_unlock(&file->f_mutex);
                dprintk("lockd: blocking on this lock (allocating).\n");
                if (!(newblock = nlmsvc_create_block(rqstp, file, lock, cookie)))
                        return nlm_lck_denied_nolocks;
@@ -405,7 +405,7 @@ again:
        /* Append to list of blocked */
        nlmsvc_insert_block(newblock, NLM_NEVER);
 out:
-       up(&file->f_sema);
+       mutex_unlock(&file->f_mutex);
        nlmsvc_release_block(newblock);
        nlmsvc_release_block(block);
        dprintk("lockd: nlmsvc_lock returned %u\n", ret);
@@ -489,9 +489,9 @@ nlmsvc_cancel_blocked(struct nlm_file *file, struct nlm_lock *lock)
                                (long long)lock->fl.fl_start,
                                (long long)lock->fl.fl_end);
 
-       down(&file->f_sema);
+       mutex_lock(&file->f_mutex);
        block = nlmsvc_lookup_block(file, lock);
-       up(&file->f_sema);
+       mutex_unlock(&file->f_mutex);
        if (block != NULL) {
                status = nlmsvc_unlink_block(block);
                nlmsvc_release_block(block);
index a1c7c08..514f5f2 100644 (file)
@@ -106,7 +106,7 @@ nlm_lookup_file(struct svc_rqst *rqstp, struct nlm_file **result,
                goto out_unlock;
 
        memcpy(&file->f_handle, f, sizeof(struct nfs_fh));
-       init_MUTEX(&file->f_sema);
+       mutex_init(&file->f_mutex);
        INIT_HLIST_NODE(&file->f_list);
        INIT_LIST_HEAD(&file->f_blocks);
 
index 5920eca..2909619 100644 (file)
@@ -111,7 +111,7 @@ struct nlm_file {
        struct list_head        f_blocks;       /* blocked locks */
        unsigned int            f_locks;        /* guesstimate # of locks */
        unsigned int            f_count;        /* reference count */
-       struct semaphore        f_sema;         /* avoid concurrent access */
+       struct mutex            f_mutex;        /* avoid concurrent access */
 };
 
 /*