]> nv-tegra.nvidia Code Review - linux-2.6.git/blobdiff - kernel/futex.c
cgroup: notify_on_release may not be triggered in some cases
[linux-2.6.git] / kernel / futex.c
index 476603afd1478191fa3c1188dd92f94bcd1b666c..3717e7b306e08c0e8c2d3a66219c3cae13176d99 100644 (file)
 #include <linux/pagemap.h>
 #include <linux/syscalls.h>
 #include <linux/signal.h>
-#include <linux/module.h>
+#include <linux/export.h>
 #include <linux/magic.h>
 #include <linux/pid.h>
 #include <linux/nsproxy.h>
+#include <linux/ptrace.h>
 
 #include <asm/futex.h>
 
@@ -68,6 +69,14 @@ int __read_mostly futex_cmpxchg_enabled;
 
 #define FUTEX_HASHBITS (CONFIG_BASE_SMALL ? 4 : 8)
 
+/*
+ * Futex flags used to encode options to functions and preserve them across
+ * restarts.
+ */
+#define FLAGS_SHARED           0x01
+#define FLAGS_CLOCKRT          0x02
+#define FLAGS_HAS_TIMEOUT      0x04
+
 /*
  * Priority Inheritance state:
  */
@@ -89,36 +98,46 @@ struct futex_pi_state {
        union futex_key key;
 };
 
-/*
- * We use this hashed waitqueue instead of a normal wait_queue_t, so
+/**
+ * struct futex_q - The hashed futex queue entry, one per waiting task
+ * @list:              priority-sorted list of tasks waiting on this futex
+ * @task:              the task waiting on the futex
+ * @lock_ptr:          the hash bucket lock
+ * @key:               the key the futex is hashed on
+ * @pi_state:          optional priority inheritance state
+ * @rt_waiter:         rt_waiter storage for use with requeue_pi
+ * @requeue_pi_key:    the requeue_pi target futex key
+ * @bitset:            bitset for the optional bitmasked wakeup
+ *
+ * We use this hashed waitqueue, instead of a normal wait_queue_t, so
  * we can wake only the relevant ones (hashed queues may be shared).
  *
  * A futex_q has a woken state, just like tasks have TASK_RUNNING.
  * It is considered woken when plist_node_empty(&q->list) || q->lock_ptr == 0.
- * The order of wakup is always to make the first condition true, then
- * wake up q->waiter, then make the second condition true.
+ * The order of wakeup is always to make the first condition true, then
+ * the second.
+ *
+ * PI futexes are typically woken before they are removed from the hash list via
+ * the rt_mutex code. See unqueue_me_pi().
  */
 struct futex_q {
        struct plist_node list;
-       /* Waiter reference */
-       struct task_struct *task;
 
-       /* Which hash list lock to use: */
+       struct task_struct *task;
        spinlock_t *lock_ptr;
-
-       /* Key which the futex is hashed on: */
        union futex_key key;
-
-       /* Optional priority inheritance state: */
        struct futex_pi_state *pi_state;
-
-       /* rt_waiter storage for requeue_pi: */
        struct rt_mutex_waiter *rt_waiter;
-
-       /* Bitset for the optional bitmasked wakeup */
+       union futex_key *requeue_pi_key;
        u32 bitset;
 };
 
+static const struct futex_q futex_q_init = {
+       /* list gets initialized in queue_me()*/
+       .key = FUTEX_KEY_INIT,
+       .bitset = FUTEX_BITSET_MATCH_ANY
+};
+
 /*
  * Hash buckets are shared by all the futex_keys that hash to the same
  * location.  Each key may have multiple futex_q structures, one for each task
@@ -147,7 +166,8 @@ static struct futex_hash_bucket *hash_futex(union futex_key *key)
  */
 static inline int match_futex(union futex_key *key1, union futex_key *key2)
 {
-       return (key1->both.word == key2->both.word
+       return (key1 && key2
+               && key1->both.word == key2->both.word
                && key1->both.ptr == key2->both.ptr
                && key1->both.offset == key2->both.offset);
 }
@@ -164,7 +184,7 @@ static void get_futex_key_refs(union futex_key *key)
 
        switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
        case FUT_OFF_INODE:
-               atomic_inc(&key->shared.inode->i_count);
+               ihold(key->shared.inode);
                break;
        case FUT_OFF_MMSHARED:
                atomic_inc(&key->private.mm->mm_count);
@@ -195,11 +215,12 @@ static void drop_futex_key_refs(union futex_key *key)
 }
 
 /**
- * get_futex_key - Get parameters which are the keys for a futex.
- * @uaddr: virtual address of the futex
- * @fshared: 0 for a PROCESS_PRIVATE futex, 1 for PROCESS_SHARED
- * @key: address where result is stored.
- * @rw: mapping needs to be read/write (values: VERIFY_READ, VERIFY_WRITE)
+ * get_futex_key() - Get parameters which are the keys for a futex
+ * @uaddr:     virtual address of the futex
+ * @fshared:   0 for a PROCESS_PRIVATE futex, 1 for PROCESS_SHARED
+ * @key:       address where result is stored.
+ * @rw:                mapping needs to be read/write (values: VERIFY_READ,
+ *              VERIFY_WRITE)
  *
  * Returns a negative error code or 0
  * The key words are stored in *key on success.
@@ -215,8 +236,8 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
 {
        unsigned long address = (unsigned long)uaddr;
        struct mm_struct *mm = current->mm;
-       struct page *page;
-       int err;
+       struct page *page, *page_head;
+       int err, ro = 0;
 
        /*
         * The futex address must be "naturally" aligned.
@@ -234,7 +255,7 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
         *        but access_ok() should be faster than find_vma()
         */
        if (!fshared) {
-               if (unlikely(!access_ok(rw, uaddr, sizeof(u32))))
+               if (unlikely(!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))))
                        return -EFAULT;
                key->private.mm = mm;
                key->private.address = address;
@@ -243,15 +264,80 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
        }
 
 again:
-       err = get_user_pages_fast(address, 1, rw == VERIFY_WRITE, &page);
+       err = get_user_pages_fast(address, 1, 1, &page);
+       /*
+        * If write access is not required (eg. FUTEX_WAIT), try
+        * and get read-only access.
+        */
+       if (err == -EFAULT && rw == VERIFY_READ) {
+               err = get_user_pages_fast(address, 1, 0, &page);
+               ro = 1;
+       }
        if (err < 0)
                return err;
+       else
+               err = 0;
 
-       lock_page(page);
-       if (!page->mapping) {
-               unlock_page(page);
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+       page_head = page;
+       if (unlikely(PageTail(page))) {
+               put_page(page);
+               /* serialize against __split_huge_page_splitting() */
+               local_irq_disable();
+               if (likely(__get_user_pages_fast(address, 1, 1, &page) == 1)) {
+                       page_head = compound_head(page);
+                       /*
+                        * page_head is valid pointer but we must pin
+                        * it before taking the PG_lock and/or
+                        * PG_compound_lock. The moment we re-enable
+                        * irqs __split_huge_page_splitting() can
+                        * return and the head page can be freed from
+                        * under us. We can't take the PG_lock and/or
+                        * PG_compound_lock on a page that could be
+                        * freed from under us.
+                        */
+                       if (page != page_head) {
+                               get_page(page_head);
+                               put_page(page);
+                       }
+                       local_irq_enable();
+               } else {
+                       local_irq_enable();
+                       goto again;
+               }
+       }
+#else
+       page_head = compound_head(page);
+       if (page != page_head) {
+               get_page(page_head);
                put_page(page);
-               goto again;
+       }
+#endif
+
+       lock_page(page_head);
+
+       /*
+        * If page_head->mapping is NULL, then it cannot be a PageAnon
+        * page; but it might be the ZERO_PAGE or in the gate area or
+        * in a special mapping (all cases which we are happy to fail);
+        * or it may have been a good file page when get_user_pages_fast
+        * found it, but truncated or holepunched or subjected to
+        * invalidate_complete_page2 before we got the page lock (also
+        * cases which we are happy to fail).  And we hold a reference,
+        * so refcount care in invalidate_complete_page's remove_mapping
+        * prevents drop_caches from setting mapping to NULL beneath us.
+        *
+        * The case we do have to guard against is when memory pressure made
+        * shmem_writepage move it from filecache to swapcache beneath us:
+        * an unlikely race, but we do need to retry for page_head->mapping.
+        */
+       if (!page_head->mapping) {
+               int shmem_swizzled = PageSwapCache(page_head);
+               unlock_page(page_head);
+               put_page(page_head);
+               if (shmem_swizzled)
+                       goto again;
+               return -EFAULT;
        }
 
        /*
@@ -261,33 +347,67 @@ again:
         * it's a read-only handle, it's expected that futexes attach to
         * the object not the particular process.
         */
-       if (PageAnon(page)) {
+       if (PageAnon(page_head)) {
+               /*
+                * A RO anonymous page will never change and thus doesn't make
+                * sense for futex operations.
+                */
+               if (ro) {
+                       err = -EFAULT;
+                       goto out;
+               }
+
                key->both.offset |= FUT_OFF_MMSHARED; /* ref taken on mm */
                key->private.mm = mm;
                key->private.address = address;
        } else {
                key->both.offset |= FUT_OFF_INODE; /* inode-based key */
-               key->shared.inode = page->mapping->host;
-               key->shared.pgoff = page->index;
+               key->shared.inode = page_head->mapping->host;
+               key->shared.pgoff = page_head->index;
        }
 
        get_futex_key_refs(key);
 
-       unlock_page(page);
-       put_page(page);
-       return 0;
+out:
+       unlock_page(page_head);
+       put_page(page_head);
+       return err;
 }
 
-static inline
-void put_futex_key(int fshared, union futex_key *key)
+static inline void put_futex_key(union futex_key *key)
 {
        drop_futex_key_refs(key);
 }
 
+/**
+ * fault_in_user_writeable() - Fault in user address and verify RW access
+ * @uaddr:     pointer to faulting user space address
+ *
+ * Slow path to fixup the fault we just took in the atomic write
+ * access to @uaddr.
+ *
+ * We have no generic implementation of a non-destructive write to the
+ * user address. We know that we faulted in the atomic pagefault
+ * disabled section so we can as well avoid the #PF overhead by
+ * calling get_user_pages() right away.
+ */
+static int fault_in_user_writeable(u32 __user *uaddr)
+{
+       struct mm_struct *mm = current->mm;
+       int ret;
+
+       down_read(&mm->mmap_sem);
+       ret = fixup_user_fault(current, mm, (unsigned long)uaddr,
+                              FAULT_FLAG_WRITE);
+       up_read(&mm->mmap_sem);
+
+       return ret < 0 ? ret : 0;
+}
+
 /**
  * futex_top_waiter() - Return the highest priority waiter on a futex
- * @hb:     the hash bucket the futex_q's reside in
- * @key:    the futex key (to distinguish it from other futex futex_q's)
+ * @hb:                the hash bucket the futex_q's reside in
+ * @key:       the futex key (to distinguish it from other futex futex_q's)
  *
  * Must be called with the hb lock held.
  */
@@ -303,15 +423,16 @@ static struct futex_q *futex_top_waiter(struct futex_hash_bucket *hb,
        return NULL;
 }
 
-static u32 cmpxchg_futex_value_locked(u32 __user *uaddr, u32 uval, u32 newval)
+static int cmpxchg_futex_value_locked(u32 *curval, u32 __user *uaddr,
+                                     u32 uval, u32 newval)
 {
-       u32 curval;
+       int ret;
 
        pagefault_disable();
-       curval = futex_atomic_cmpxchg_inatomic(uaddr, uval, newval);
+       ret = futex_atomic_cmpxchg_inatomic(curval, uaddr, uval, newval);
        pagefault_enable();
 
-       return curval;
+       return ret;
 }
 
 static int get_futex_value_locked(u32 *dest, u32 __user *from)
@@ -372,9 +493,9 @@ static void free_pi_state(struct futex_pi_state *pi_state)
         * and has cleaned up the pi_state already
         */
        if (pi_state->owner) {
-               spin_lock_irq(&pi_state->owner->pi_lock);
+               raw_spin_lock_irq(&pi_state->owner->pi_lock);
                list_del_init(&pi_state->list);
-               spin_unlock_irq(&pi_state->owner->pi_lock);
+               raw_spin_unlock_irq(&pi_state->owner->pi_lock);
 
                rt_mutex_proxy_unlock(&pi_state->pi_mutex, pi_state->owner);
        }
@@ -400,20 +521,11 @@ static void free_pi_state(struct futex_pi_state *pi_state)
 static struct task_struct * futex_find_get_task(pid_t pid)
 {
        struct task_struct *p;
-       const struct cred *cred = current_cred(), *pcred;
 
        rcu_read_lock();
        p = find_task_by_vpid(pid);
-       if (!p) {
-               p = ERR_PTR(-ESRCH);
-       } else {
-               pcred = __task_cred(p);
-               if (cred->euid != pcred->euid &&
-                   cred->euid != pcred->uid)
-                       p = ERR_PTR(-ESRCH);
-               else
-                       get_task_struct(p);
-       }
+       if (p)
+               get_task_struct(p);
 
        rcu_read_unlock();
 
@@ -439,18 +551,18 @@ void exit_pi_state_list(struct task_struct *curr)
         * pi_state_list anymore, but we have to be careful
         * versus waiters unqueueing themselves:
         */
-       spin_lock_irq(&curr->pi_lock);
+       raw_spin_lock_irq(&curr->pi_lock);
        while (!list_empty(head)) {
 
                next = head->next;
                pi_state = list_entry(next, struct futex_pi_state, list);
                key = pi_state->key;
                hb = hash_futex(&key);
-               spin_unlock_irq(&curr->pi_lock);
+               raw_spin_unlock_irq(&curr->pi_lock);
 
                spin_lock(&hb->lock);
 
-               spin_lock_irq(&curr->pi_lock);
+               raw_spin_lock_irq(&curr->pi_lock);
                /*
                 * We dropped the pi-lock, so re-check whether this
                 * task still owns the PI-state:
@@ -464,15 +576,15 @@ void exit_pi_state_list(struct task_struct *curr)
                WARN_ON(list_empty(&pi_state->list));
                list_del_init(&pi_state->list);
                pi_state->owner = NULL;
-               spin_unlock_irq(&curr->pi_lock);
+               raw_spin_unlock_irq(&curr->pi_lock);
 
                rt_mutex_unlock(&pi_state->pi_mutex);
 
                spin_unlock(&hb->lock);
 
-               spin_lock_irq(&curr->pi_lock);
+               raw_spin_lock_irq(&curr->pi_lock);
        }
-       spin_unlock_irq(&curr->pi_lock);
+       raw_spin_unlock_irq(&curr->pi_lock);
 }
 
 static int
@@ -495,14 +607,31 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
                         */
                        pi_state = this->pi_state;
                        /*
-                        * Userspace might have messed up non PI and PI futexes
+                        * Userspace might have messed up non-PI and PI futexes
                         */
                        if (unlikely(!pi_state))
                                return -EINVAL;
 
                        WARN_ON(!atomic_read(&pi_state->refcount));
-                       WARN_ON(pid && pi_state->owner &&
-                               pi_state->owner->pid != pid);
+
+                       /*
+                        * When pi_state->owner is NULL then the owner died
+                        * and another waiter is on the fly. pi_state->owner
+                        * is fixed up by the task which acquires
+                        * pi_state->rt_mutex.
+                        *
+                        * We do not check for pid == 0 which can happen when
+                        * the owner died and robust_list_exit() cleared the
+                        * TID.
+                        */
+                       if (pid && pi_state->owner) {
+                               /*
+                                * Bail out if user space manipulated the
+                                * futex value.
+                                */
+                               if (pid != task_pid_vnr(pi_state->owner))
+                                       return -EINVAL;
+                       }
 
                        atomic_inc(&pi_state->refcount);
                        *ps = pi_state;
@@ -518,8 +647,8 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
        if (!pid)
                return -ESRCH;
        p = futex_find_get_task(pid);
-       if (IS_ERR(p))
-               return PTR_ERR(p);
+       if (!p)
+               return -ESRCH;
 
        /*
         * We need to look at the task state flags to figure out,
@@ -527,7 +656,7 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
         * change of the task flags, we do this protected by
         * p->pi_lock:
         */
-       spin_lock_irq(&p->pi_lock);
+       raw_spin_lock_irq(&p->pi_lock);
        if (unlikely(p->flags & PF_EXITING)) {
                /*
                 * The task is on the way out. When PF_EXITPIDONE is
@@ -536,7 +665,7 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
                 */
                int ret = (p->flags & PF_EXITPIDONE) ? -ESRCH : -EAGAIN;
 
-               spin_unlock_irq(&p->pi_lock);
+               raw_spin_unlock_irq(&p->pi_lock);
                put_task_struct(p);
                return ret;
        }
@@ -555,7 +684,7 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
        WARN_ON(!list_empty(&pi_state->list));
        list_add(&pi_state->list, &p->pi_state_list);
        pi_state->owner = p;
-       spin_unlock_irq(&p->pi_lock);
+       raw_spin_unlock_irq(&p->pi_lock);
 
        put_task_struct(p);
 
@@ -565,7 +694,7 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
 }
 
 /**
- * futex_lock_pi_atomic() - atomic work required to acquire a pi aware futex
+ * futex_lock_pi_atomic() - Atomic work required to acquire a pi aware futex
  * @uaddr:             the pi futex user address
  * @hb:                        the pi futex hash bucket
  * @key:               the futex key associated with uaddr and hb
@@ -588,7 +717,7 @@ static int futex_lock_pi_atomic(u32 __user *uaddr, struct futex_hash_bucket *hb,
                                struct task_struct *task, int set_waiters)
 {
        int lock_taken, ret, ownerdied = 0;
-       u32 uval, newval, curval;
+       u32 uval, newval, curval, vpid = task_pid_vnr(task);
 
 retry:
        ret = lock_taken = 0;
@@ -598,19 +727,17 @@ retry:
         * (by doing a 0 -> TID atomic cmpxchg), while holding all
         * the locks. It will most likely not succeed.
         */
-       newval = task_pid_vnr(task);
+       newval = vpid;
        if (set_waiters)
                newval |= FUTEX_WAITERS;
 
-       curval = cmpxchg_futex_value_locked(uaddr, 0, newval);
-
-       if (unlikely(curval == -EFAULT))
+       if (unlikely(cmpxchg_futex_value_locked(&curval, uaddr, 0, newval)))
                return -EFAULT;
 
        /*
         * Detect deadlocks.
         */
-       if ((unlikely((curval & FUTEX_TID_MASK) == task_pid_vnr(task))))
+       if ((unlikely((curval & FUTEX_TID_MASK) == vpid)))
                return -EDEADLK;
 
        /*
@@ -637,14 +764,12 @@ retry:
         */
        if (unlikely(ownerdied || !(curval & FUTEX_TID_MASK))) {
                /* Keep the OWNER_DIED bit */
-               newval = (curval & ~FUTEX_TID_MASK) | task_pid_vnr(task);
+               newval = (curval & ~FUTEX_TID_MASK) | vpid;
                ownerdied = 0;
                lock_taken = 1;
        }
 
-       curval = cmpxchg_futex_value_locked(uaddr, uval, newval);
-
-       if (unlikely(curval == -EFAULT))
+       if (unlikely(cmpxchg_futex_value_locked(&curval, uaddr, uval, newval)))
                return -EFAULT;
        if (unlikely(curval != uval))
                goto retry;
@@ -689,6 +814,24 @@ retry:
        return ret;
 }
 
+/**
+ * __unqueue_futex() - Remove the futex_q from its futex_hash_bucket
+ * @q: The futex_q to unqueue
+ *
+ * The q->lock_ptr must not be NULL and must be held by the caller.
+ */
+static void __unqueue_futex(struct futex_q *q)
+{
+       struct futex_hash_bucket *hb;
+
+       if (WARN_ON_SMP(!q->lock_ptr || !spin_is_locked(q->lock_ptr))
+           || WARN_ON(plist_node_empty(&q->list)))
+               return;
+
+       hb = container_of(q->lock_ptr, struct futex_hash_bucket, lock);
+       plist_del(&q->list, &hb->chain);
+}
+
 /*
  * The hash bucket lock must be held when this is called.
  * Afterwards, the futex_q must not be accessed.
@@ -699,14 +842,14 @@ static void wake_futex(struct futex_q *q)
 
        /*
         * We set q->lock_ptr = NULL _before_ we wake up the task. If
-        * a non futex wake up happens on another CPU then the task
-        * might exit and p would dereference a non existing task
+        * a non-futex wake up happens on another CPU then the task
+        * might exit and p would dereference a non-existing task
         * struct. Prevent this by holding a reference on p across the
         * wake up.
         */
        get_task_struct(p);
 
-       plist_del(&q->list, &q->list.plist);
+       __unqueue_futex(q);
        /*
         * The waiting task can free the futex_q as soon as
         * q->lock_ptr = NULL is written, without taking any locks. A
@@ -724,19 +867,25 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this)
 {
        struct task_struct *new_owner;
        struct futex_pi_state *pi_state = this->pi_state;
-       u32 curval, newval;
+       u32 uninitialized_var(curval), newval;
 
        if (!pi_state)
                return -EINVAL;
 
-       spin_lock(&pi_state->pi_mutex.wait_lock);
+       /*
+        * If current does not own the pi_state then the futex is
+        * inconsistent and user space fiddled with the futex value.
+        */
+       if (pi_state->owner != current)
+               return -EINVAL;
+
+       raw_spin_lock(&pi_state->pi_mutex.wait_lock);
        new_owner = rt_mutex_next_owner(&pi_state->pi_mutex);
 
        /*
-        * This happens when we have stolen the lock and the original
-        * pending owner did not enqueue itself back on the rt_mutex.
-        * Thats not a tragedy. We know that way, that a lock waiter
-        * is on the fly. We make the futex_q waiter the pending owner.
+        * It is possible that the next waiter (the one that brought
+        * this owner to the kernel) timed out and is no longer
+        * waiting on the lock.
         */
        if (!new_owner)
                new_owner = this->task;
@@ -751,30 +900,28 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this)
 
                newval = FUTEX_WAITERS | task_pid_vnr(new_owner);
 
-               curval = cmpxchg_futex_value_locked(uaddr, uval, newval);
-
-               if (curval == -EFAULT)
+               if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval))
                        ret = -EFAULT;
                else if (curval != uval)
                        ret = -EINVAL;
                if (ret) {
-                       spin_unlock(&pi_state->pi_mutex.wait_lock);
+                       raw_spin_unlock(&pi_state->pi_mutex.wait_lock);
                        return ret;
                }
        }
 
-       spin_lock_irq(&pi_state->owner->pi_lock);
+       raw_spin_lock_irq(&pi_state->owner->pi_lock);
        WARN_ON(list_empty(&pi_state->list));
        list_del_init(&pi_state->list);
-       spin_unlock_irq(&pi_state->owner->pi_lock);
+       raw_spin_unlock_irq(&pi_state->owner->pi_lock);
 
-       spin_lock_irq(&new_owner->pi_lock);
+       raw_spin_lock_irq(&new_owner->pi_lock);
        WARN_ON(!list_empty(&pi_state->list));
        list_add(&pi_state->list, &new_owner->pi_state_list);
        pi_state->owner = new_owner;
-       spin_unlock_irq(&new_owner->pi_lock);
+       raw_spin_unlock_irq(&new_owner->pi_lock);
 
-       spin_unlock(&pi_state->pi_mutex.wait_lock);
+       raw_spin_unlock(&pi_state->pi_mutex.wait_lock);
        rt_mutex_unlock(&pi_state->pi_mutex);
 
        return 0;
@@ -782,16 +929,14 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this)
 
 static int unlock_futex_pi(u32 __user *uaddr, u32 uval)
 {
-       u32 oldval;
+       u32 uninitialized_var(oldval);
 
        /*
         * There is no waiter, so we unlock the futex. The owner died
         * bit has not to be preserved here. We are the owner:
         */
-       oldval = cmpxchg_futex_value_locked(uaddr, uval, 0);
-
-       if (oldval == -EFAULT)
-               return oldval;
+       if (cmpxchg_futex_value_locked(&oldval, uaddr, uval, 0))
+               return -EFAULT;
        if (oldval != uval)
                return -EAGAIN;
 
@@ -825,7 +970,8 @@ double_unlock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2)
 /*
  * Wake up waiters matching bitset queued on this futex (uaddr).
  */
-static int futex_wake(u32 __user *uaddr, int fshared, int nr_wake, u32 bitset)
+static int
+futex_wake(u32 __user *uaddr, unsigned int flags, int nr_wake, u32 bitset)
 {
        struct futex_hash_bucket *hb;
        struct futex_q *this, *next;
@@ -836,7 +982,7 @@ static int futex_wake(u32 __user *uaddr, int fshared, int nr_wake, u32 bitset)
        if (!bitset)
                return -EINVAL;
 
-       ret = get_futex_key(uaddr, fshared, &key, VERIFY_READ);
+       ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &key, VERIFY_READ);
        if (unlikely(ret != 0))
                goto out;
 
@@ -862,7 +1008,7 @@ static int futex_wake(u32 __user *uaddr, int fshared, int nr_wake, u32 bitset)
        }
 
        spin_unlock(&hb->lock);
-       put_futex_key(fshared, &key);
+       put_futex_key(&key);
 out:
        return ret;
 }
@@ -872,7 +1018,7 @@ out:
  * to this virtual address:
  */
 static int
-futex_wake_op(u32 __user *uaddr1, int fshared, u32 __user *uaddr2,
+futex_wake_op(u32 __user *uaddr1, unsigned int flags, u32 __user *uaddr2,
              int nr_wake, int nr_wake2, int op)
 {
        union futex_key key1 = FUTEX_KEY_INIT, key2 = FUTEX_KEY_INIT;
@@ -882,21 +1028,20 @@ futex_wake_op(u32 __user *uaddr1, int fshared, u32 __user *uaddr2,
        int ret, op_ret;
 
 retry:
-       ret = get_futex_key(uaddr1, fshared, &key1, VERIFY_READ);
+       ret = get_futex_key(uaddr1, flags & FLAGS_SHARED, &key1, VERIFY_READ);
        if (unlikely(ret != 0))
                goto out;
-       ret = get_futex_key(uaddr2, fshared, &key2, VERIFY_WRITE);
+       ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2, VERIFY_WRITE);
        if (unlikely(ret != 0))
                goto out_put_key1;
 
        hb1 = hash_futex(&key1);
        hb2 = hash_futex(&key2);
 
-       double_lock_hb(hb1, hb2);
 retry_private:
+       double_lock_hb(hb1, hb2);
        op_ret = futex_atomic_op_inuser(op, uaddr2);
        if (unlikely(op_ret < 0)) {
-               u32 dummy;
 
                double_unlock_hb(hb1, hb2);
 
@@ -914,15 +1059,15 @@ retry_private:
                        goto out_put_keys;
                }
 
-               ret = get_user(dummy, uaddr2);
+               ret = fault_in_user_writeable(uaddr2);
                if (ret)
                        goto out_put_keys;
 
-               if (!fshared)
+               if (!(flags & FLAGS_SHARED))
                        goto retry_private;
 
-               put_futex_key(fshared, &key2);
-               put_futex_key(fshared, &key1);
+               put_futex_key(&key2);
+               put_futex_key(&key1);
                goto retry;
        }
 
@@ -952,9 +1097,9 @@ retry_private:
 
        double_unlock_hb(hb1, hb2);
 out_put_keys:
-       put_futex_key(fshared, &key2);
+       put_futex_key(&key2);
 out_put_key1:
-       put_futex_key(fshared, &key1);
+       put_futex_key(&key1);
 out:
        return ret;
 }
@@ -979,9 +1124,6 @@ void requeue_futex(struct futex_q *q, struct futex_hash_bucket *hb1,
                plist_del(&q->list, &hb1->chain);
                plist_add(&q->list, &hb2->chain);
                q->lock_ptr = &hb2->lock;
-#ifdef CONFIG_DEBUG_PI_LIST
-               q->list.plist.lock = &hb2->lock;
-#endif
        }
        get_futex_key_refs(key2);
        q->key = *key2;
@@ -989,28 +1131,32 @@ void requeue_futex(struct futex_q *q, struct futex_hash_bucket *hb1,
 
 /**
  * requeue_pi_wake_futex() - Wake a task that acquired the lock during requeue
- * q:  the futex_q
- * key:        the key of the requeue target futex
+ * @q:         the futex_q
+ * @key:       the key of the requeue target futex
+ * @hb:                the hash_bucket of the requeue target futex
  *
  * During futex_requeue, with requeue_pi=1, it is possible to acquire the
  * target futex if it is uncontended or via a lock steal.  Set the futex_q key
  * to the requeue target futex so the waiter can detect the wakeup on the right
  * futex, but remove it from the hb and NULL the rt_waiter so it can detect
- * atomic lock acquisition.  Must be called with the q->lock_ptr held.
+ * atomic lock acquisition.  Set the q->lock_ptr to the requeue target hb->lock
+ * to protect access to the pi_state to fixup the owner later.  Must be called
+ * with both q->lock_ptr and hb->lock held.
  */
 static inline
-void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key)
+void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key,
+                          struct futex_hash_bucket *hb)
 {
-       drop_futex_key_refs(&q->key);
        get_futex_key_refs(key);
        q->key = *key;
 
-       WARN_ON(plist_node_empty(&q->list));
-       plist_del(&q->list, &q->list.plist);
+       __unqueue_futex(q);
 
        WARN_ON(!q->rt_waiter);
        q->rt_waiter = NULL;
 
+       q->lock_ptr = &hb->lock;
+
        wake_up_state(q->task, TASK_NORMAL);
 }
 
@@ -1061,6 +1207,10 @@ static int futex_proxy_trylock_atomic(u32 __user *pifutex,
        if (!top_waiter)
                return 0;
 
+       /* Ensure we requeue to the expected futex. */
+       if (!match_futex(top_waiter->requeue_pi_key, key2))
+               return -EINVAL;
+
        /*
         * Try to take the lock for top_waiter.  Set the FUTEX_WAITERS bit in
         * the contended case or if set_waiters is 1.  The pi_state is returned
@@ -1069,19 +1219,21 @@ static int futex_proxy_trylock_atomic(u32 __user *pifutex,
        ret = futex_lock_pi_atomic(pifutex, hb2, key2, ps, top_waiter->task,
                                   set_waiters);
        if (ret == 1)
-               requeue_pi_wake_futex(top_waiter, key2);
+               requeue_pi_wake_futex(top_waiter, key2, hb2);
 
        return ret;
 }
 
 /**
  * futex_requeue() - Requeue waiters from uaddr1 to uaddr2
- * uaddr1:     source futex user address
- * uaddr2:     target futex user address
- * nr_wake:    number of waiters to wake (must be 1 for requeue_pi)
- * nr_requeue: number of waiters to requeue (0-INT_MAX)
- * requeue_pi: if we are attempting to requeue from a non-pi futex to a
- *             pi futex (pi to pi requeue is not supported)
+ * @uaddr1:    source futex user address
+ * @flags:     futex flags (FLAGS_SHARED, etc.)
+ * @uaddr2:    target futex user address
+ * @nr_wake:   number of waiters to wake (must be 1 for requeue_pi)
+ * @nr_requeue:        number of waiters to requeue (0-INT_MAX)
+ * @cmpval:    @uaddr1 expected value (or %NULL)
+ * @requeue_pi:        if we are attempting to requeue from a non-pi futex to a
+ *             pi futex (pi to pi requeue is not supported)
  *
  * Requeue waiters on uaddr1 to uaddr2. In the requeue_pi case, try to acquire
  * uaddr2 atomically on behalf of the top waiter.
@@ -1090,9 +1242,9 @@ static int futex_proxy_trylock_atomic(u32 __user *pifutex,
  * >=0 - on success, the number of tasks requeued or woken
  *  <0 - on error
  */
-static int futex_requeue(u32 __user *uaddr1, int fshared, u32 __user *uaddr2,
-                        int nr_wake, int nr_requeue, u32 *cmpval,
-                        int requeue_pi)
+static int futex_requeue(u32 __user *uaddr1, unsigned int flags,
+                        u32 __user *uaddr2, int nr_wake, int nr_requeue,
+                        u32 *cmpval, int requeue_pi)
 {
        union futex_key key1 = FUTEX_KEY_INIT, key2 = FUTEX_KEY_INIT;
        int drop_count = 0, task_count = 0, ret;
@@ -1133,10 +1285,10 @@ retry:
                pi_state = NULL;
        }
 
-       ret = get_futex_key(uaddr1, fshared, &key1, VERIFY_READ);
+       ret = get_futex_key(uaddr1, flags & FLAGS_SHARED, &key1, VERIFY_READ);
        if (unlikely(ret != 0))
                goto out;
-       ret = get_futex_key(uaddr2, fshared, &key2,
+       ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2,
                            requeue_pi ? VERIFY_WRITE : VERIFY_READ);
        if (unlikely(ret != 0))
                goto out_put_key1;
@@ -1159,11 +1311,11 @@ retry_private:
                        if (ret)
                                goto out_put_keys;
 
-                       if (!fshared)
+                       if (!(flags & FLAGS_SHARED))
                                goto retry_private;
 
-                       put_futex_key(fshared, &key2);
-                       put_futex_key(fshared, &key1);
+                       put_futex_key(&key2);
+                       put_futex_key(&key1);
                        goto retry;
                }
                if (curval != *cmpval) {
@@ -1190,6 +1342,7 @@ retry_private:
                 */
                if (ret == 1) {
                        WARN_ON(pi_state);
+                       drop_count++;
                        task_count++;
                        ret = get_futex_value_locked(&curval2, uaddr2);
                        if (!ret)
@@ -1202,17 +1355,17 @@ retry_private:
                        break;
                case -EFAULT:
                        double_unlock_hb(hb1, hb2);
-                       put_futex_key(fshared, &key2);
-                       put_futex_key(fshared, &key1);
-                       ret = get_user(curval2, uaddr2);
+                       put_futex_key(&key2);
+                       put_futex_key(&key1);
+                       ret = fault_in_user_writeable(uaddr2);
                        if (!ret)
                                goto retry;
                        goto out;
                case -EAGAIN:
                        /* The owner was exiting, try again. */
                        double_unlock_hb(hb1, hb2);
-                       put_futex_key(fshared, &key2);
-                       put_futex_key(fshared, &key1);
+                       put_futex_key(&key2);
+                       put_futex_key(&key1);
                        cond_resched();
                        goto retry;
                default:
@@ -1228,8 +1381,15 @@ retry_private:
                if (!match_futex(&this->key, &key1))
                        continue;
 
-               WARN_ON(!requeue_pi && this->rt_waiter);
-               WARN_ON(requeue_pi && !this->rt_waiter);
+               /*
+                * FUTEX_WAIT_REQEUE_PI and FUTEX_CMP_REQUEUE_PI should always
+                * be paired with each other and no other futex ops.
+                */
+               if ((requeue_pi && !this->rt_waiter) ||
+                   (!requeue_pi && this->rt_waiter)) {
+                       ret = -EINVAL;
+                       break;
+               }
 
                /*
                 * Wake nr_wake waiters.  For requeue_pi, if we acquired the
@@ -1241,6 +1401,12 @@ retry_private:
                        continue;
                }
 
+               /* Ensure we requeue to the expected futex for requeue_pi. */
+               if (requeue_pi && !match_futex(this->requeue_pi_key, &key2)) {
+                       ret = -EINVAL;
+                       break;
+               }
+
                /*
                 * Requeue nr_requeue waiters and possibly one more in the case
                 * of requeue_pi if we couldn't acquire the lock atomically.
@@ -1254,7 +1420,8 @@ retry_private:
                                                        this->task, 1);
                        if (ret == 1) {
                                /* We got the lock. */
-                               requeue_pi_wake_futex(this, &key2);
+                               requeue_pi_wake_futex(this, &key2, hb2);
+                               drop_count++;
                                continue;
                        } else if (ret) {
                                /* -EDEADLK */
@@ -1280,9 +1447,9 @@ out_unlock:
                drop_futex_key_refs(&key1);
 
 out_put_keys:
-       put_futex_key(fshared, &key2);
+       put_futex_key(&key2);
 out_put_key1:
-       put_futex_key(fshared, &key1);
+       put_futex_key(&key1);
 out:
        if (pi_state != NULL)
                free_pi_state(pi_state);
@@ -1291,10 +1458,10 @@ out:
 
 /* The key must be already stored in q->key. */
 static inline struct futex_hash_bucket *queue_lock(struct futex_q *q)
+       __acquires(&hb->lock)
 {
        struct futex_hash_bucket *hb;
 
-       get_futex_key_refs(&q->key);
        hb = hash_futex(&q->key);
        q->lock_ptr = &hb->lock;
 
@@ -1302,7 +1469,27 @@ static inline struct futex_hash_bucket *queue_lock(struct futex_q *q)
        return hb;
 }
 
+static inline void
+queue_unlock(struct futex_q *q, struct futex_hash_bucket *hb)
+       __releases(&hb->lock)
+{
+       spin_unlock(&hb->lock);
+}
+
+/**
+ * queue_me() - Enqueue the futex_q on the futex_hash_bucket
+ * @q: The futex_q to enqueue
+ * @hb:        The destination hash bucket
+ *
+ * The hb->lock must be held by the caller, and is released here. A call to
+ * queue_me() is typically paired with exactly one call to unqueue_me().  The
+ * exceptions involve the PI related operations, which may use unqueue_me_pi()
+ * or nothing if the unqueue is done as part of the wake process and the unqueue
+ * state is implicit in the state of woken task (see futex_wait_requeue_pi() for
+ * an example).
+ */
 static inline void queue_me(struct futex_q *q, struct futex_hash_bucket *hb)
+       __releases(&hb->lock)
 {
        int prio;
 
@@ -1317,27 +1504,22 @@ static inline void queue_me(struct futex_q *q, struct futex_hash_bucket *hb)
        prio = min(current->normal_prio, MAX_RT_PRIO);
 
        plist_node_init(&q->list, prio);
-#ifdef CONFIG_DEBUG_PI_LIST
-       q->list.plist.lock = &hb->lock;
-#endif
        plist_add(&q->list, &hb->chain);
        q->task = current;
        spin_unlock(&hb->lock);
 }
 
-static inline void
-queue_unlock(struct futex_q *q, struct futex_hash_bucket *hb)
-{
-       spin_unlock(&hb->lock);
-       drop_futex_key_refs(&q->key);
-}
-
-/*
- * queue_me and unqueue_me must be called as a pair, each
- * exactly once.  They are called with the hashed spinlock held.
+/**
+ * unqueue_me() - Remove the futex_q from its futex_hash_bucket
+ * @q: The futex_q to unqueue
+ *
+ * The q->lock_ptr must not be held by the caller. A call to unqueue_me() must
+ * be paired with exactly one earlier call to queue_me().
+ *
+ * Returns:
+ *   1 - if the futex_q was still queued (and we removed unqueued it)
+ *   0 - if the futex_q was already removed by the waking thread
  */
-
-/* Return 1 if we were still queued (ie. 0 means we were woken) */
 static int unqueue_me(struct futex_q *q)
 {
        spinlock_t *lock_ptr;
@@ -1366,8 +1548,7 @@ retry:
                        spin_unlock(lock_ptr);
                        goto retry;
                }
-               WARN_ON(plist_node_empty(&q->list));
-               plist_del(&q->list, &q->list.plist);
+               __unqueue_futex(q);
 
                BUG_ON(q->pi_state);
 
@@ -1385,17 +1566,15 @@ retry:
  * and dropped here.
  */
 static void unqueue_me_pi(struct futex_q *q)
+       __releases(q->lock_ptr)
 {
-       WARN_ON(plist_node_empty(&q->list));
-       plist_del(&q->list, &q->list.plist);
+       __unqueue_futex(q);
 
        BUG_ON(!q->pi_state);
        free_pi_state(q->pi_state);
        q->pi_state = NULL;
 
        spin_unlock(q->lock_ptr);
-
-       drop_futex_key_refs(&q->key);
 }
 
 /*
@@ -1405,12 +1584,12 @@ static void unqueue_me_pi(struct futex_q *q)
  * private futexes.
  */
 static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
-                               struct task_struct *newowner, int fshared)
+                               struct task_struct *newowner)
 {
        u32 newtid = task_pid_vnr(newowner) | FUTEX_WAITERS;
        struct futex_pi_state *pi_state = q->pi_state;
        struct task_struct *oldowner = pi_state->owner;
-       u32 uval, curval, newval;
+       u32 uval, uninitialized_var(curval), newval;
        int ret;
 
        /* Owner died? */
@@ -1419,10 +1598,10 @@ static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
 
        /*
         * We are here either because we stole the rtmutex from the
-        * pending owner or we are the pending owner which failed to
-        * get the rtmutex. We have to replace the pending owner TID
-        * in the user space variable. This must be atomic as we have
-        * to preserve the owner died bit here.
+        * previous highest priority waiter or we are the highest priority
+        * waiter but failed to get the rtmutex the first time.
+        * We have to replace the newowner TID in the user space variable.
+        * This must be atomic as we have to preserve the owner died bit here.
         *
         * Note: We write the user space value _before_ changing the pi_state
         * because we can fault here. Imagine swapped out pages or a fork
@@ -1441,9 +1620,7 @@ retry:
        while (1) {
                newval = (uval & FUTEX_OWNER_DIED) | newtid;
 
-               curval = cmpxchg_futex_value_locked(uaddr, uval, newval);
-
-               if (curval == -EFAULT)
+               if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval))
                        goto handle_fault;
                if (curval == uval)
                        break;
@@ -1455,24 +1632,24 @@ retry:
         * itself.
         */
        if (pi_state->owner != NULL) {
-               spin_lock_irq(&pi_state->owner->pi_lock);
+               raw_spin_lock_irq(&pi_state->owner->pi_lock);
                WARN_ON(list_empty(&pi_state->list));
                list_del_init(&pi_state->list);
-               spin_unlock_irq(&pi_state->owner->pi_lock);
+               raw_spin_unlock_irq(&pi_state->owner->pi_lock);
        }
 
        pi_state->owner = newowner;
 
-       spin_lock_irq(&newowner->pi_lock);
+       raw_spin_lock_irq(&newowner->pi_lock);
        WARN_ON(!list_empty(&pi_state->list));
        list_add(&pi_state->list, &newowner->pi_state_list);
-       spin_unlock_irq(&newowner->pi_lock);
+       raw_spin_unlock_irq(&newowner->pi_lock);
        return 0;
 
        /*
         * To handle the page fault we need to drop the hash bucket
-        * lock here. That gives the other task (either the pending
-        * owner itself or the task which stole the rtmutex) the
+        * lock here. That gives the other task (either the highest priority
+        * waiter itself or the task which stole the rtmutex) the
         * chance to try the fixup of the pi_state. So once we are
         * back from handling the fault we need to check the pi_state
         * after reacquiring the hash bucket lock and before trying to
@@ -1482,7 +1659,7 @@ retry:
 handle_fault:
        spin_unlock(q->lock_ptr);
 
-       ret = get_user(uval, uaddr);
+       ret = fault_in_user_writeable(uaddr);
 
        spin_lock(q->lock_ptr);
 
@@ -1498,21 +1675,11 @@ handle_fault:
        goto retry;
 }
 
-/*
- * In case we must use restart_block to restart a futex_wait,
- * we encode in the 'flags' shared capability
- */
-#define FLAGS_SHARED           0x01
-#define FLAGS_CLOCKRT          0x02
-#define FLAGS_HAS_TIMEOUT      0x04
-
 static long futex_wait_restart(struct restart_block *restart);
-static long futex_lock_pi_restart(struct restart_block *restart);
 
 /**
  * fixup_owner() - Post lock pi_state and corner case management
  * @uaddr:     user address of the futex
- * @fshared:   whether the futex is shared (1) or not (0)
  * @q:         futex_q (contains pi_state and access to the rt_mutex)
  * @locked:    if the attempt to take the rt_mutex succeeded (1) or not (0)
  *
@@ -1525,8 +1692,7 @@ static long futex_lock_pi_restart(struct restart_block *restart);
  *  0 - success, lock not taken
  * <0 - on error (-EFAULT)
  */
-static int fixup_owner(u32 __user *uaddr, int fshared, struct futex_q *q,
-                      int locked)
+static int fixup_owner(u32 __user *uaddr, struct futex_q *q, int locked)
 {
        struct task_struct *owner;
        int ret = 0;
@@ -1537,7 +1703,7 @@ static int fixup_owner(u32 __user *uaddr, int fshared, struct futex_q *q,
                 * did a lock-steal - fix up the PI-state in that case:
                 */
                if (q->pi_state->owner != current)
-                       ret = fixup_pi_state_owner(uaddr, q, current, fshared);
+                       ret = fixup_pi_state_owner(uaddr, q, current);
                goto out;
        }
 
@@ -1559,18 +1725,20 @@ static int fixup_owner(u32 __user *uaddr, int fshared, struct futex_q *q,
                /*
                 * pi_state is incorrect, some other task did a lock steal and
                 * we returned due to timeout or signal without taking the
-                * rt_mutex. Too late. We can access the rt_mutex_owner without
-                * locking, as the other task is now blocked on the hash bucket
-                * lock. Fix the state up.
+                * rt_mutex. Too late.
                 */
+               raw_spin_lock(&q->pi_state->pi_mutex.wait_lock);
                owner = rt_mutex_owner(&q->pi_state->pi_mutex);
-               ret = fixup_pi_state_owner(uaddr, q, owner, fshared);
+               if (!owner)
+                       owner = rt_mutex_next_owner(&q->pi_state->pi_mutex);
+               raw_spin_unlock(&q->pi_state->pi_mutex.wait_lock);
+               ret = fixup_pi_state_owner(uaddr, q, owner);
                goto out;
        }
 
        /*
         * Paranoia check. If we did not take the lock, then we should not be
-        * the owner, nor the pending owner, of the rt_mutex.
+        * the owner of the rt_mutex.
         */
        if (rt_mutex_owner(&q->pi_state->pi_mutex) == current)
                printk(KERN_ERR "fixup_owner: ret = %d pi-mutex: %p "
@@ -1591,17 +1759,14 @@ out:
 static void futex_wait_queue_me(struct futex_hash_bucket *hb, struct futex_q *q,
                                struct hrtimer_sleeper *timeout)
 {
-       queue_me(q, hb);
-
        /*
-        * There might have been scheduling since the queue_me(), as we
-        * cannot hold a spinlock across the get_user() in case it
-        * faults, and we cannot just set TASK_INTERRUPTIBLE state when
-        * queueing ourselves into the futex hash. This code thus has to
-        * rely on the futex_wake() code removing us from hash when it
-        * wakes us up.
+        * The task state is guaranteed to be set before another task can
+        * wake it. set_current_state() is implemented using set_mb() and
+        * queue_me() calls spin_unlock() upon completion, both serializing
+        * access to the hash list and forcing another memory barrier.
         */
        set_current_state(TASK_INTERRUPTIBLE);
+       queue_me(q, hb);
 
        /* Arm the timer */
        if (timeout) {
@@ -1611,8 +1776,8 @@ static void futex_wait_queue_me(struct futex_hash_bucket *hb, struct futex_q *q,
        }
 
        /*
-        * !plist_node_empty() is safe here without any lock.
-        * q.lock_ptr != 0 is not safe, because of ordering against wakeup.
+        * If we have been removed from the hash list, then another task
+        * has tried to wake us, and we can skip the call to schedule().
         */
        if (likely(!plist_node_empty(&q->list))) {
                /*
@@ -1630,7 +1795,7 @@ static void futex_wait_queue_me(struct futex_hash_bucket *hb, struct futex_q *q,
  * futex_wait_setup() - Prepare to wait on a futex
  * @uaddr:     the futex userspace address
  * @val:       the expected value
- * @fshared:   whether the futex is shared (1) or not (0)
+ * @flags:     futex flags (FLAGS_SHARED, etc.)
  * @q:         the associated futex_q
  * @hb:                storage for hash_bucket pointer to be returned to caller
  *
@@ -1641,9 +1806,9 @@ static void futex_wait_queue_me(struct futex_hash_bucket *hb, struct futex_q *q,
  *
  * Returns:
  *  0 - uaddr contains val and hb has been locked
- * <1 - -EFAULT or -EWOULDBLOCK (uaddr does not contain val) and hb is unlcoked
+ * <1 - -EFAULT or -EWOULDBLOCK (uaddr does not contain val) and hb is unlocked
  */
-static int futex_wait_setup(u32 __user *uaddr, u32 val, int fshared,
+static int futex_wait_setup(u32 __user *uaddr, u32 val, unsigned int flags,
                           struct futex_q *q, struct futex_hash_bucket **hb)
 {
        u32 uval;
@@ -1658,17 +1823,17 @@ static int futex_wait_setup(u32 __user *uaddr, u32 val, int fshared,
         *
         * The basic logical guarantee of a futex is that it blocks ONLY
         * if cond(var) is known to be true at the time of blocking, for
-        * any cond.  If we queued after testing *uaddr, that would open
-        * a race condition where we could block indefinitely with
+        * any cond.  If we locked the hash-bucket after testing *uaddr, that
+        * would open a race condition where we could block indefinitely with
         * cond(var) false, which would violate the guarantee.
         *
-        * A consequence is that futex_wait() can return zero and absorb
-        * a wakeup when *uaddr != val on entry to the syscall.  This is
-        * rare, but normal.
+        * On the other hand, we insert q and release the hash-bucket only
+        * after testing *uaddr.  This guarantees that futex_wait() will NOT
+        * absorb a wakeup if *uaddr does not match the desired values
+        * while the syscall executes.
         */
 retry:
-       q->key = FUTEX_KEY_INIT;
-       ret = get_futex_key(uaddr, fshared, &q->key, VERIFY_READ);
+       ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &q->key, VERIFY_READ);
        if (unlikely(ret != 0))
                return ret;
 
@@ -1684,10 +1849,10 @@ retry_private:
                if (ret)
                        goto out;
 
-               if (!fshared)
+               if (!(flags & FLAGS_SHARED))
                        goto retry_private;
 
-               put_futex_key(fshared, &q->key);
+               put_futex_key(&q->key);
                goto retry;
        }
 
@@ -1698,38 +1863,40 @@ retry_private:
 
 out:
        if (ret)
-               put_futex_key(fshared, &q->key);
+               put_futex_key(&q->key);
        return ret;
 }
 
-static int futex_wait(u32 __user *uaddr, int fshared,
-                     u32 val, ktime_t *abs_time, u32 bitset, int clockrt)
+static int futex_wait(u32 __user *uaddr, unsigned int flags, u32 val,
+                     ktime_t *abs_time, u32 bitset)
 {
        struct hrtimer_sleeper timeout, *to = NULL;
        struct restart_block *restart;
        struct futex_hash_bucket *hb;
-       struct futex_q q;
+       struct futex_q q = futex_q_init;
        int ret;
 
        if (!bitset)
                return -EINVAL;
-
-       q.pi_state = NULL;
        q.bitset = bitset;
-       q.rt_waiter = NULL;
 
        if (abs_time) {
                to = &timeout;
 
-               hrtimer_init_on_stack(&to->timer, clockrt ? CLOCK_REALTIME :
-                                     CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
+               hrtimer_init_on_stack(&to->timer, (flags & FLAGS_CLOCKRT) ?
+                                     CLOCK_REALTIME : CLOCK_MONOTONIC,
+                                     HRTIMER_MODE_ABS);
                hrtimer_init_sleeper(to, current);
                hrtimer_set_expires_range_ns(&to->timer, *abs_time,
                                             current->timer_slack_ns);
        }
 
-       /* Prepare to wait on uaddr. */
-       ret = futex_wait_setup(uaddr, val, fshared, &q, &hb);
+retry:
+       /*
+        * Prepare to wait on uaddr. On success, holds hb lock and increments
+        * q.key refs.
+        */
+       ret = futex_wait_setup(uaddr, val, flags, &q, &hb);
        if (ret)
                goto out;
 
@@ -1738,37 +1905,34 @@ static int futex_wait(u32 __user *uaddr, int fshared,
 
        /* If we were woken (and unqueued), we succeeded, whatever. */
        ret = 0;
+       /* unqueue_me() drops q.key ref */
        if (!unqueue_me(&q))
-               goto out_put_key;
+               goto out;
        ret = -ETIMEDOUT;
        if (to && !to->task)
-               goto out_put_key;
+               goto out;
 
        /*
-        * We expect signal_pending(current), but another thread may
-        * have handled it for us already.
+        * We expect signal_pending(current), but we might be the
+        * victim of a spurious wakeup as well.
         */
+       if (!signal_pending(current))
+               goto retry;
+
        ret = -ERESTARTSYS;
        if (!abs_time)
-               goto out_put_key;
+               goto out;
 
        restart = &current_thread_info()->restart_block;
        restart->fn = futex_wait_restart;
-       restart->futex.uaddr = (u32 *)uaddr;
+       restart->futex.uaddr = uaddr;
        restart->futex.val = val;
        restart->futex.time = abs_time->tv64;
        restart->futex.bitset = bitset;
-       restart->futex.flags = FLAGS_HAS_TIMEOUT;
-
-       if (fshared)
-               restart->futex.flags |= FLAGS_SHARED;
-       if (clockrt)
-               restart->futex.flags |= FLAGS_CLOCKRT;
+       restart->futex.flags = flags | FLAGS_HAS_TIMEOUT;
 
        ret = -ERESTART_RESTARTBLOCK;
 
-out_put_key:
-       put_futex_key(fshared, &q.key);
 out:
        if (to) {
                hrtimer_cancel(&to->timer);
@@ -1780,8 +1944,7 @@ out:
 
 static long futex_wait_restart(struct restart_block *restart)
 {
-       u32 __user *uaddr = (u32 __user *)restart->futex.uaddr;
-       int fshared = 0;
+       u32 __user *uaddr = restart->futex.uaddr;
        ktime_t t, *tp = NULL;
 
        if (restart->futex.flags & FLAGS_HAS_TIMEOUT) {
@@ -1789,11 +1952,9 @@ static long futex_wait_restart(struct restart_block *restart)
                tp = &t;
        }
        restart->fn = do_no_restart_syscall;
-       if (restart->futex.flags & FLAGS_SHARED)
-               fshared = 1;
-       return (long)futex_wait(uaddr, fshared, restart->futex.val, tp,
-                               restart->futex.bitset,
-                               restart->futex.flags & FLAGS_CLOCKRT);
+
+       return (long)futex_wait(uaddr, restart->futex.flags,
+                               restart->futex.val, tp, restart->futex.bitset);
 }
 
 
@@ -1803,13 +1964,12 @@ static long futex_wait_restart(struct restart_block *restart)
  * if there are waiters then it will block, it does PI, etc. (Due to
  * races the kernel might see a 0 value of the futex too.)
  */
-static int futex_lock_pi(u32 __user *uaddr, int fshared,
-                        int detect, ktime_t *time, int trylock)
+static int futex_lock_pi(u32 __user *uaddr, unsigned int flags, int detect,
+                        ktime_t *time, int trylock)
 {
        struct hrtimer_sleeper timeout, *to = NULL;
        struct futex_hash_bucket *hb;
-       u32 uval;
-       struct futex_q q;
+       struct futex_q q = futex_q_init;
        int res, ret;
 
        if (refill_pi_state_cache())
@@ -1823,11 +1983,8 @@ static int futex_lock_pi(u32 __user *uaddr, int fshared,
                hrtimer_set_expires(&to->timer, *time);
        }
 
-       q.pi_state = NULL;
-       q.rt_waiter = NULL;
 retry:
-       q.key = FUTEX_KEY_INIT;
-       ret = get_futex_key(uaddr, fshared, &q.key, VERIFY_WRITE);
+       ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &q.key, VERIFY_WRITE);
        if (unlikely(ret != 0))
                goto out;
 
@@ -1849,7 +2006,7 @@ retry_private:
                         * exit to complete.
                         */
                        queue_unlock(&q, hb);
-                       put_futex_key(fshared, &q.key);
+                       put_futex_key(&q.key);
                        cond_resched();
                        goto retry;
                default:
@@ -1879,7 +2036,7 @@ retry_private:
         * Fixup the pi_state owner and possibly acquire the lock if we
         * haven't already.
         */
-       res = fixup_owner(uaddr, fshared, &q, !ret);
+       res = fixup_owner(uaddr, &q, !ret);
        /*
         * If fixup_owner() returned an error, proprogate that.  If it acquired
         * the lock, clear our -ETIMEDOUT or -EINTR.
@@ -1897,66 +2054,44 @@ retry_private:
        /* Unqueue and drop the lock */
        unqueue_me_pi(&q);
 
-       goto out;
+       goto out_put_key;
 
 out_unlock_put_key:
        queue_unlock(&q, hb);
 
 out_put_key:
-       put_futex_key(fshared, &q.key);
+       put_futex_key(&q.key);
 out:
        if (to)
                destroy_hrtimer_on_stack(&to->timer);
        return ret != -EINTR ? ret : -ERESTARTNOINTR;
 
 uaddr_faulted:
-       /*
-        * We have to r/w  *(int __user *)uaddr, and we have to modify it
-        * atomically.  Therefore, if we continue to fault after get_user()
-        * below, we need to handle the fault ourselves, while still holding
-        * the mmap_sem.  This can occur if the uaddr is under contention as
-        * we have to drop the mmap_sem in order to call get_user().
-        */
        queue_unlock(&q, hb);
 
-       ret = get_user(uval, uaddr);
+       ret = fault_in_user_writeable(uaddr);
        if (ret)
                goto out_put_key;
 
-       if (!fshared)
+       if (!(flags & FLAGS_SHARED))
                goto retry_private;
 
-       put_futex_key(fshared, &q.key);
+       put_futex_key(&q.key);
        goto retry;
 }
 
-static long futex_lock_pi_restart(struct restart_block *restart)
-{
-       u32 __user *uaddr = (u32 __user *)restart->futex.uaddr;
-       ktime_t t, *tp = NULL;
-       int fshared = restart->futex.flags & FLAGS_SHARED;
-
-       if (restart->futex.flags & FLAGS_HAS_TIMEOUT) {
-               t.tv64 = restart->futex.time;
-               tp = &t;
-       }
-       restart->fn = do_no_restart_syscall;
-
-       return (long)futex_lock_pi(uaddr, fshared, restart->futex.val, tp, 0);
-}
-
 /*
  * Userspace attempted a TID -> 0 atomic transition, and failed.
  * This is the in-kernel slowpath: we look up the PI state (if any),
  * and do the rt-mutex unlock.
  */
-static int futex_unlock_pi(u32 __user *uaddr, int fshared)
+static int futex_unlock_pi(u32 __user *uaddr, unsigned int flags)
 {
        struct futex_hash_bucket *hb;
        struct futex_q *this, *next;
-       u32 uval;
        struct plist_head *head;
        union futex_key key = FUTEX_KEY_INIT;
+       u32 uval, vpid = task_pid_vnr(current);
        int ret;
 
 retry:
@@ -1965,10 +2100,10 @@ retry:
        /*
         * We release only a lock we actually own:
         */
-       if ((uval & FUTEX_TID_MASK) != task_pid_vnr(current))
+       if ((uval & FUTEX_TID_MASK) != vpid)
                return -EPERM;
 
-       ret = get_futex_key(uaddr, fshared, &key, VERIFY_WRITE);
+       ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &key, VERIFY_WRITE);
        if (unlikely(ret != 0))
                goto out;
 
@@ -1980,17 +2115,14 @@ retry:
         * again. If it succeeds then we can return without waking
         * anyone else up:
         */
-       if (!(uval & FUTEX_OWNER_DIED))
-               uval = cmpxchg_futex_value_locked(uaddr, task_pid_vnr(current), 0);
-
-
-       if (unlikely(uval == -EFAULT))
+       if (!(uval & FUTEX_OWNER_DIED) &&
+           cmpxchg_futex_value_locked(&uval, uaddr, vpid, 0))
                goto pi_faulted;
        /*
         * Rare case: we managed to release the lock atomically,
         * no need to wake anyone else up:
         */
-       if (unlikely(uval == task_pid_vnr(current)))
+       if (unlikely(uval == vpid))
                goto out_unlock;
 
        /*
@@ -2023,23 +2155,16 @@ retry:
 
 out_unlock:
        spin_unlock(&hb->lock);
-       put_futex_key(fshared, &key);
+       put_futex_key(&key);
 
 out:
        return ret;
 
 pi_faulted:
-       /*
-        * We have to r/w  *(int __user *)uaddr, and we have to modify it
-        * atomically.  Therefore, if we continue to fault after get_user()
-        * below, we need to handle the fault ourselves, while still holding
-        * the mmap_sem.  This can occur if the uaddr is under contention as
-        * we have to drop the mmap_sem in order to call get_user().
-        */
        spin_unlock(&hb->lock);
-       put_futex_key(fshared, &key);
+       put_futex_key(&key);
 
-       ret = get_user(uval, uaddr);
+       ret = fault_in_user_writeable(uaddr);
        if (!ret)
                goto retry;
 
@@ -2060,7 +2185,7 @@ pi_faulted:
  *
  * Returns
  *  0 - no early wakeup detected
- * <0 - -ETIMEDOUT or -ERESTARTSYS (FIXME: or ERESTARTNOINTR?)
+ * <0 - -ETIMEDOUT or -ERESTARTNOINTR
  */
 static inline
 int handle_early_requeue_pi_wakeup(struct futex_hash_bucket *hb,
@@ -2082,50 +2207,44 @@ int handle_early_requeue_pi_wakeup(struct futex_hash_bucket *hb,
                 * We were woken prior to requeue by a timeout or a signal.
                 * Unqueue the futex_q and determine which it was.
                 */
-               plist_del(&q->list, &q->list.plist);
-               drop_futex_key_refs(&q->key);
+               plist_del(&q->list, &hb->chain);
 
+               /* Handle spurious wakeups gracefully */
+               ret = -EWOULDBLOCK;
                if (timeout && !timeout->task)
                        ret = -ETIMEDOUT;
-               else {
-                       /*
-                        * We expect signal_pending(current), but another
-                        * thread may have handled it for us already.
-                        */
-                       /* FIXME: ERESTARTSYS or ERESTARTNOINTR?  Do we care if
-                        * the user specified SA_RESTART or not? */
-                       ret = -ERESTARTSYS;
-               }
+               else if (signal_pending(current))
+                       ret = -ERESTARTNOINTR;
        }
        return ret;
 }
 
 /**
  * futex_wait_requeue_pi() - Wait on uaddr and take uaddr2
- * @uaddr:     the futex we initialyl wait on (non-pi)
- * @fshared:   whether the futexes are shared (1) or not (0).  They must be
+ * @uaddr:     the futex we initially wait on (non-pi)
+ * @flags:     futex flags (FLAGS_SHARED, FLAGS_CLOCKRT, etc.), they must be
  *             the same type, no requeueing from private to shared, etc.
  * @val:       the expected value of uaddr
  * @abs_time:  absolute timeout
- * @bitset:    32 bit wakeup bitset set by userspace, defaults to all.
+ * @bitset:    32 bit wakeup bitset set by userspace, defaults to all
  * @clockrt:   whether to use CLOCK_REALTIME (1) or CLOCK_MONOTONIC (0)
  * @uaddr2:    the pi futex we will take prior to returning to user-space
  *
  * The caller will wait on uaddr and will be requeued by futex_requeue() to
- * uaddr2 which must be PI aware.  Normal wakeup will wake on uaddr2 and
- * complete the acquisition of the rt_mutex prior to returning to userspace.
- * This ensures the rt_mutex maintains an owner when it has waiters; without
- * one, the pi logic wouldn't know which task to boost/deboost, if there was a
- * need to.
+ * uaddr2 which must be PI aware and unique from uaddr.  Normal wakeup will wake
+ * on uaddr2 and complete the acquisition of the rt_mutex prior to returning to
+ * userspace.  This ensures the rt_mutex maintains an owner when it has waiters;
+ * without one, the pi logic would not know which task to boost/deboost, if
+ * there was a need to.
  *
  * We call schedule in futex_wait_queue_me() when we enqueue and return there
  * via the following:
  * 1) wakeup on uaddr2 after an atomic lock acquisition by futex_requeue()
- * 2) wakeup on uaddr2 after a requeue and subsequent unlock
- * 3) signal (before or after requeue)
- * 4) timeout (before or after requeue)
+ * 2) wakeup on uaddr2 after a requeue
+ * 3) signal
+ * 4) timeout
  *
- * If 3, we setup a restart_block with futex_wait_requeue_pi() as the function.
+ * If 3, cleanup and return -ERESTARTNOINTR.
  *
  * If 2, we may then block on trying to take the rt_mutex and return via:
  * 5) successful lock
@@ -2133,7 +2252,7 @@ int handle_early_requeue_pi_wakeup(struct futex_hash_bucket *hb,
  * 7) timeout
  * 8) other lock acquisition failure
  *
- * If 6, we setup a restart_block with futex_lock_pi() as the function.
+ * If 6, return -EWOULDBLOCK (restarting the syscall would do the same).
  *
  * If 4 or 7, we cleanup and return with -ETIMEDOUT.
  *
@@ -2141,27 +2260,29 @@ int handle_early_requeue_pi_wakeup(struct futex_hash_bucket *hb,
  *  0 - On success
  * <0 - On error
  */
-static int futex_wait_requeue_pi(u32 __user *uaddr, int fshared,
+static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
                                 u32 val, ktime_t *abs_time, u32 bitset,
-                                int clockrt, u32 __user *uaddr2)
+                                u32 __user *uaddr2)
 {
        struct hrtimer_sleeper timeout, *to = NULL;
        struct rt_mutex_waiter rt_waiter;
        struct rt_mutex *pi_mutex = NULL;
-       struct restart_block *restart;
        struct futex_hash_bucket *hb;
-       union futex_key key2;
-       struct futex_q q;
+       union futex_key key2 = FUTEX_KEY_INIT;
+       struct futex_q q = futex_q_init;
        int res, ret;
-       u32 uval;
+
+       if (uaddr == uaddr2)
+               return -EINVAL;
 
        if (!bitset)
                return -EINVAL;
 
        if (abs_time) {
                to = &timeout;
-               hrtimer_init_on_stack(&to->timer, clockrt ? CLOCK_REALTIME :
-                                     CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
+               hrtimer_init_on_stack(&to->timer, (flags & FLAGS_CLOCKRT) ?
+                                     CLOCK_REALTIME : CLOCK_MONOTONIC,
+                                     HRTIMER_MODE_ABS);
                hrtimer_init_sleeper(to, current);
                hrtimer_set_expires_range_ns(&to->timer, *abs_time,
                                             current->timer_slack_ns);
@@ -2174,21 +2295,21 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, int fshared,
        debug_rt_mutex_init_waiter(&rt_waiter);
        rt_waiter.task = NULL;
 
-       q.pi_state = NULL;
-       q.bitset = bitset;
-       q.rt_waiter = &rt_waiter;
-
-       key2 = FUTEX_KEY_INIT;
-       ret = get_futex_key(uaddr2, fshared, &key2, VERIFY_WRITE);
+       ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2, VERIFY_WRITE);
        if (unlikely(ret != 0))
                goto out;
 
-       /* Prepare to wait on uaddr. */
-       ret = futex_wait_setup(uaddr, val, fshared, &q, &hb);
-       if (ret) {
-               put_futex_key(fshared, &key2);
-               goto out;
-       }
+       q.bitset = bitset;
+       q.rt_waiter = &rt_waiter;
+       q.requeue_pi_key = &key2;
+
+       /*
+        * Prepare to wait on uaddr. On success, increments q.key (key1) ref
+        * count.
+        */
+       ret = futex_wait_setup(uaddr, val, flags, &q, &hb);
+       if (ret)
+               goto out_key2;
 
        /* Queue the futex_q, drop the hb lock, wait for wakeup. */
        futex_wait_queue_me(hb, &q, to);
@@ -2203,7 +2324,9 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, int fshared,
         * In order for us to be here, we know our q.key == key2, and since
         * we took the hb->lock above, we also know that futex_requeue() has
         * completed and we no longer have to concern ourselves with a wakeup
-        * race with the atomic proxy lock acquition by the requeue code.
+        * race with the atomic proxy lock acquisition by the requeue code. The
+        * futex_requeue dropped our key1 reference and incremented our key2
+        * reference count.
         */
 
        /* Check if the requeue code acquired the second futex for us. */
@@ -2214,8 +2337,7 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, int fshared,
                 */
                if (q.pi_state && (q.pi_state->owner != current)) {
                        spin_lock(q.lock_ptr);
-                       ret = fixup_pi_state_owner(uaddr2, &q, current,
-                                                  fshared);
+                       ret = fixup_pi_state_owner(uaddr2, &q, current);
                        spin_unlock(q.lock_ptr);
                }
        } else {
@@ -2224,7 +2346,7 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, int fshared,
                 * signal.  futex_unlock_pi() will not destroy the lock_ptr nor
                 * the pi_state.
                 */
-               WARN_ON(!&q.pi_state);
+               WARN_ON(!q.pi_state);
                pi_mutex = &q.pi_state->pi_mutex;
                ret = rt_mutex_finish_proxy_lock(pi_mutex, to, &rt_waiter, 1);
                debug_rt_mutex_free_waiter(&rt_waiter);
@@ -2234,10 +2356,10 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, int fshared,
                 * Fixup the pi_state owner and possibly acquire the lock if we
                 * haven't already.
                 */
-               res = fixup_owner(uaddr2, fshared, &q, !ret);
+               res = fixup_owner(uaddr2, &q, !ret);
                /*
                 * If fixup_owner() returned an error, proprogate that.  If it
-                * acquired the lock, clear our -ETIMEDOUT or -EINTR.
+                * acquired the lock, clear -ETIMEDOUT or -EINTR.
                 */
                if (res)
                        ret = (res < 0) ? res : 0;
@@ -2251,38 +2373,23 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, int fshared,
         * fault, unlock the rt_mutex and return the fault to userspace.
         */
        if (ret == -EFAULT) {
-               if (rt_mutex_owner(pi_mutex) == current)
+               if (pi_mutex && rt_mutex_owner(pi_mutex) == current)
                        rt_mutex_unlock(pi_mutex);
        } else if (ret == -EINTR) {
-               ret = -EFAULT;
-               if (get_user(uval, uaddr2))
-                       goto out_put_keys;
-
                /*
-                * We've already been requeued, so restart by calling
-                * futex_lock_pi() directly, rather then returning to this
-                * function.
+                * We've already been requeued, but cannot restart by calling
+                * futex_lock_pi() directly. We could restart this syscall, but
+                * it would detect that the user space "val" changed and return
+                * -EWOULDBLOCK.  Save the overhead of the restart and return
+                * -EWOULDBLOCK directly.
                 */
-               ret = -ERESTART_RESTARTBLOCK;
-               restart = &current_thread_info()->restart_block;
-               restart->fn = futex_lock_pi_restart;
-               restart->futex.uaddr = (u32 *)uaddr2;
-               restart->futex.val = uval;
-               restart->futex.flags = 0;
-               if (abs_time) {
-                       restart->futex.flags |= FLAGS_HAS_TIMEOUT;
-                       restart->futex.time = abs_time->tv64;
-               }
-
-               if (fshared)
-                       restart->futex.flags |= FLAGS_SHARED;
-               if (clockrt)
-                       restart->futex.flags |= FLAGS_CLOCKRT;
+               ret = -EWOULDBLOCK;
        }
 
 out_put_keys:
-       put_futex_key(fshared, &q.key);
-       put_futex_key(fshared, &key2);
+       put_futex_key(&q.key);
+out_key2:
+       put_futex_key(&key2);
 
 out:
        if (to) {
@@ -2308,9 +2415,9 @@ out:
  */
 
 /**
- * sys_set_robust_list - set the robust-futex list head of a task
- * @head: pointer to the list-head
- * @len: length of the list-head, as userspace expects
+ * sys_set_robust_list() - Set the robust-futex list head of a task
+ * @head:      pointer to the list-head
+ * @len:       length of the list-head, as userspace expects
  */
 SYSCALL_DEFINE2(set_robust_list, struct robust_list_head __user *, head,
                size_t, len)
@@ -2329,10 +2436,10 @@ SYSCALL_DEFINE2(set_robust_list, struct robust_list_head __user *, head,
 }
 
 /**
- * sys_get_robust_list - get the robust-futex list head of a task
- * @pid: pid of the process [zero for current task]
- * @head_ptr: pointer to a list-head pointer, the kernel fills it in
- * @len_ptr: pointer to a length field, the kernel fills in the header size
+ * sys_get_robust_list() - Get the robust-futex list head of a task
+ * @pid:       pid of the process [zero for current task]
+ * @head_ptr:  pointer to a list-head pointer, the kernel fills it in
+ * @len_ptr:   pointer to a length field, the kernel fills in the header size
  */
 SYSCALL_DEFINE3(get_robust_list, int, pid,
                struct robust_list_head __user * __user *, head_ptr,
@@ -2340,31 +2447,31 @@ SYSCALL_DEFINE3(get_robust_list, int, pid,
 {
        struct robust_list_head __user *head;
        unsigned long ret;
-       const struct cred *cred = current_cred(), *pcred;
+       struct task_struct *p;
 
        if (!futex_cmpxchg_enabled)
                return -ENOSYS;
 
+       WARN_ONCE(1, "deprecated: get_robust_list will be deleted in 2013.\n");
+
+       rcu_read_lock();
+
+       ret = -ESRCH;
        if (!pid)
-               head = current->robust_list;
+               p = current;
        else {
-               struct task_struct *p;
-
-               ret = -ESRCH;
-               rcu_read_lock();
                p = find_task_by_vpid(pid);
                if (!p)
                        goto err_unlock;
-               ret = -EPERM;
-               pcred = __task_cred(p);
-               if (cred->euid != pcred->euid &&
-                   cred->euid != pcred->uid &&
-                   !capable(CAP_SYS_PTRACE))
-                       goto err_unlock;
-               head = p->robust_list;
-               rcu_read_unlock();
        }
 
+       ret = -EPERM;
+       if (!ptrace_may_access(p, PTRACE_MODE_READ))
+               goto err_unlock;
+
+       head = p->robust_list;
+       rcu_read_unlock();
+
        if (put_user(sizeof(*head), len_ptr))
                return -EFAULT;
        return put_user(head, head_ptr);
@@ -2381,7 +2488,7 @@ err_unlock:
  */
 int handle_futex_death(u32 __user *uaddr, struct task_struct *curr, int pi)
 {
-       u32 uval, nval, mval;
+       u32 uval, uninitialized_var(nval), mval;
 
 retry:
        if (get_user(uval, uaddr))
@@ -2399,11 +2506,20 @@ retry:
                 * userspace.
                 */
                mval = (uval & FUTEX_WAITERS) | FUTEX_OWNER_DIED;
-               nval = futex_atomic_cmpxchg_inatomic(uaddr, uval, mval);
-
-               if (nval == -EFAULT)
-                       return -1;
-
+               /*
+                * We are not holding a lock here, but we want to have
+                * the pagefault_disable/enable() protection because
+                * we want to handle the fault gracefully. If the
+                * access fails we try to fault in the futex with R/W
+                * verification via get_user_pages. get_user() above
+                * does not guarantee R/W access. If that fails we
+                * give up and leave the futex locked.
+                */
+               if (cmpxchg_futex_value_locked(&nval, uaddr, uval, mval)) {
+                       if (fault_in_user_writeable(uaddr))
+                               return -1;
+                       goto retry;
+               }
                if (nval != uval)
                        goto retry;
 
@@ -2422,7 +2538,7 @@ retry:
  */
 static inline int fetch_robust_entry(struct robust_list __user **entry,
                                     struct robust_list __user * __user *head,
-                                    int *pi)
+                                    unsigned int *pi)
 {
        unsigned long uentry;
 
@@ -2445,7 +2561,8 @@ void exit_robust_list(struct task_struct *curr)
 {
        struct robust_list_head __user *head = curr->robust_list;
        struct robust_list __user *entry, *next_entry, *pending;
-       unsigned int limit = ROBUST_LIST_LIMIT, pi, next_pi, pip;
+       unsigned int limit = ROBUST_LIST_LIMIT, pi, pip;
+       unsigned int uninitialized_var(next_pi);
        unsigned long futex_offset;
        int rc;
 
@@ -2506,63 +2623,57 @@ void exit_robust_list(struct task_struct *curr)
 long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
                u32 __user *uaddr2, u32 val2, u32 val3)
 {
-       int clockrt, ret = -ENOSYS;
        int cmd = op & FUTEX_CMD_MASK;
-       int fshared = 0;
+       unsigned int flags = 0;
 
        if (!(op & FUTEX_PRIVATE_FLAG))
-               fshared = 1;
+               flags |= FLAGS_SHARED;
 
-       clockrt = op & FUTEX_CLOCK_REALTIME;
-       if (clockrt && cmd != FUTEX_WAIT_BITSET && cmd != FUTEX_WAIT_REQUEUE_PI)
-               return -ENOSYS;
+       if (op & FUTEX_CLOCK_REALTIME) {
+               flags |= FLAGS_CLOCKRT;
+               if (cmd != FUTEX_WAIT_BITSET && cmd != FUTEX_WAIT_REQUEUE_PI)
+                       return -ENOSYS;
+       }
+
+       switch (cmd) {
+       case FUTEX_LOCK_PI:
+       case FUTEX_UNLOCK_PI:
+       case FUTEX_TRYLOCK_PI:
+       case FUTEX_WAIT_REQUEUE_PI:
+       case FUTEX_CMP_REQUEUE_PI:
+               if (!futex_cmpxchg_enabled)
+                       return -ENOSYS;
+       }
 
        switch (cmd) {
        case FUTEX_WAIT:
                val3 = FUTEX_BITSET_MATCH_ANY;
        case FUTEX_WAIT_BITSET:
-               ret = futex_wait(uaddr, fshared, val, timeout, val3, clockrt);
-               break;
+               return futex_wait(uaddr, flags, val, timeout, val3);
        case FUTEX_WAKE:
                val3 = FUTEX_BITSET_MATCH_ANY;
        case FUTEX_WAKE_BITSET:
-               ret = futex_wake(uaddr, fshared, val, val3);
-               break;
+               return futex_wake(uaddr, flags, val, val3);
        case FUTEX_REQUEUE:
-               ret = futex_requeue(uaddr, fshared, uaddr2, val, val2, NULL, 0);
-               break;
+               return futex_requeue(uaddr, flags, uaddr2, val, val2, NULL, 0);
        case FUTEX_CMP_REQUEUE:
-               ret = futex_requeue(uaddr, fshared, uaddr2, val, val2, &val3,
-                                   0);
-               break;
+               return futex_requeue(uaddr, flags, uaddr2, val, val2, &val3, 0);
        case FUTEX_WAKE_OP:
-               ret = futex_wake_op(uaddr, fshared, uaddr2, val, val2, val3);
-               break;
+               return futex_wake_op(uaddr, flags, uaddr2, val, val2, val3);
        case FUTEX_LOCK_PI:
-               if (futex_cmpxchg_enabled)
-                       ret = futex_lock_pi(uaddr, fshared, val, timeout, 0);
-               break;
+               return futex_lock_pi(uaddr, flags, val, timeout, 0);
        case FUTEX_UNLOCK_PI:
-               if (futex_cmpxchg_enabled)
-                       ret = futex_unlock_pi(uaddr, fshared);
-               break;
+               return futex_unlock_pi(uaddr, flags);
        case FUTEX_TRYLOCK_PI:
-               if (futex_cmpxchg_enabled)
-                       ret = futex_lock_pi(uaddr, fshared, 0, timeout, 1);
-               break;
+               return futex_lock_pi(uaddr, flags, 0, timeout, 1);
        case FUTEX_WAIT_REQUEUE_PI:
                val3 = FUTEX_BITSET_MATCH_ANY;
-               ret = futex_wait_requeue_pi(uaddr, fshared, val, timeout, val3,
-                                           clockrt, uaddr2);
-               break;
+               return futex_wait_requeue_pi(uaddr, flags, val, timeout, val3,
+                                            uaddr2);
        case FUTEX_CMP_REQUEUE_PI:
-               ret = futex_requeue(uaddr, fshared, uaddr2, val, val2, &val3,
-                                   1);
-               break;
-       default:
-               ret = -ENOSYS;
+               return futex_requeue(uaddr, flags, uaddr2, val, val2, &val3, 1);
        }
-       return ret;
+       return -ENOSYS;
 }
 
 
@@ -2611,15 +2722,14 @@ static int __init futex_init(void)
         * of the complex code paths. Also we want to prevent
         * registration of robust lists in that case. NULL is
         * guaranteed to fault and we get -EFAULT on functional
-        * implementation, the non functional ones will return
+        * implementation, the non-functional ones will return
         * -ENOSYS.
         */
-       curval = cmpxchg_futex_value_locked(NULL, 0, 0);
-       if (curval == -EFAULT)
+       if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
                futex_cmpxchg_enabled = 1;
 
        for (i = 0; i < ARRAY_SIZE(futex_queues); i++) {
-               plist_head_init(&futex_queues[i].chain, &futex_queues[i].lock);
+               plist_head_init(&futex_queues[i].chain);
                spin_lock_init(&futex_queues[i].lock);
        }