rwsem: shorter spinlocked section in rwsem_down_failed_common()
Michel Lespinasse [Tue, 7 May 2013 13:45:50 +0000 (06:45 -0700)]
This change reduces the size of the spinlocked and TASK_UNINTERRUPTIBLE
sections in rwsem_down_failed_common():

- We only need the sem->wait_lock to insert ourselves on the wait_list;
  the waiter node can be prepared outside of the wait_lock.

- The task state only needs to be set to TASK_UNINTERRUPTIBLE immediately
  before checking if we actually need to sleep; it doesn't need to protect
  the entire function.

Signed-off-by: Michel Lespinasse <walken@google.com>
Reviewed-by: Rik van Riel <riel@redhat.com>
Reviewed-by: Peter Hurley <peter@hurleysoftware.com>
Acked-by: Davidlohr Bueso <davidlohr.bueso@hp.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

lib/rwsem.c

index 672eb33..4063645 100644 (file)
@@ -188,14 +188,12 @@ rwsem_down_failed_common(struct rw_semaphore *sem,
        struct task_struct *tsk = current;
        signed long count;
 
-       set_task_state(tsk, TASK_UNINTERRUPTIBLE);
-
        /* set up my own style of waitqueue */
-       raw_spin_lock_irq(&sem->wait_lock);
        waiter.task = tsk;
        waiter.type = type;
        get_task_struct(tsk);
 
+       raw_spin_lock_irq(&sem->wait_lock);
        if (list_empty(&sem->wait_list))
                adjustment += RWSEM_WAITING_BIAS;
        list_add_tail(&waiter.list, &sem->wait_list);
@@ -218,7 +216,8 @@ rwsem_down_failed_common(struct rw_semaphore *sem,
        raw_spin_unlock_irq(&sem->wait_lock);
 
        /* wait to be given the lock */
-       for (;;) {
+       while (true) {
+               set_task_state(tsk, TASK_UNINTERRUPTIBLE);
                if (!waiter.task)
                        break;
 
@@ -231,7 +230,6 @@ rwsem_down_failed_common(struct rw_semaphore *sem,
                        }
                raw_spin_unlock_irq(&sem->wait_lock);
                schedule();
-               set_task_state(tsk, TASK_UNINTERRUPTIBLE);
        }
 
        tsk->state = TASK_RUNNING;