sched: Reorder root_domain to remove 64 bit alignment padding
[linux-2.6.git] / kernel / futex.c
index 237f14b..fe28dc2 100644 (file)
@@ -772,6 +772,24 @@ retry:
        return ret;
 }
 
+/**
+ * __unqueue_futex() - Remove the futex_q from its futex_hash_bucket
+ * @q: The futex_q to unqueue
+ *
+ * The q->lock_ptr must not be NULL and must be held by the caller.
+ */
+static void __unqueue_futex(struct futex_q *q)
+{
+       struct futex_hash_bucket *hb;
+
+       if (WARN_ON_SMP(!q->lock_ptr || !spin_is_locked(q->lock_ptr))
+           || WARN_ON(plist_node_empty(&q->list)))
+               return;
+
+       hb = container_of(q->lock_ptr, struct futex_hash_bucket, lock);
+       plist_del(&q->list, &hb->chain);
+}
+
 /*
  * The hash bucket lock must be held when this is called.
  * Afterwards, the futex_q must not be accessed.
@@ -789,7 +807,7 @@ static void wake_futex(struct futex_q *q)
         */
        get_task_struct(p);
 
-       plist_del(&q->list, &q->list.plist);
+       __unqueue_futex(q);
        /*
         * The waiting task can free the futex_q as soon as
         * q->lock_ptr = NULL is written, without taking any locks. A
@@ -1064,9 +1082,6 @@ void requeue_futex(struct futex_q *q, struct futex_hash_bucket *hb1,
                plist_del(&q->list, &hb1->chain);
                plist_add(&q->list, &hb2->chain);
                q->lock_ptr = &hb2->lock;
-#ifdef CONFIG_DEBUG_PI_LIST
-               q->list.plist.spinlock = &hb2->lock;
-#endif
        }
        get_futex_key_refs(key2);
        q->key = *key2;
@@ -1093,16 +1108,12 @@ void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key,
        get_futex_key_refs(key);
        q->key = *key;
 
-       WARN_ON(plist_node_empty(&q->list));
-       plist_del(&q->list, &q->list.plist);
+       __unqueue_futex(q);
 
        WARN_ON(!q->rt_waiter);
        q->rt_waiter = NULL;
 
        q->lock_ptr = &hb->lock;
-#ifdef CONFIG_DEBUG_PI_LIST
-       q->list.plist.spinlock = &hb->lock;
-#endif
 
        wake_up_state(q->task, TASK_NORMAL);
 }
@@ -1450,9 +1461,6 @@ static inline void queue_me(struct futex_q *q, struct futex_hash_bucket *hb)
        prio = min(current->normal_prio, MAX_RT_PRIO);
 
        plist_node_init(&q->list, prio);
-#ifdef CONFIG_DEBUG_PI_LIST
-       q->list.plist.spinlock = &hb->lock;
-#endif
        plist_add(&q->list, &hb->chain);
        q->task = current;
        spin_unlock(&hb->lock);
@@ -1497,8 +1505,7 @@ retry:
                        spin_unlock(lock_ptr);
                        goto retry;
                }
-               WARN_ON(plist_node_empty(&q->list));
-               plist_del(&q->list, &q->list.plist);
+               __unqueue_futex(q);
 
                BUG_ON(q->pi_state);
 
@@ -1518,8 +1525,7 @@ retry:
 static void unqueue_me_pi(struct futex_q *q)
        __releases(q->lock_ptr)
 {
-       WARN_ON(plist_node_empty(&q->list));
-       plist_del(&q->list, &q->list.plist);
+       __unqueue_futex(q);
 
        BUG_ON(!q->pi_state);
        free_pi_state(q->pi_state);
@@ -1549,10 +1555,10 @@ static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
 
        /*
         * We are here either because we stole the rtmutex from the
-        * pending owner or we are the pending owner which failed to
-        * get the rtmutex. We have to replace the pending owner TID
-        * in the user space variable. This must be atomic as we have
-        * to preserve the owner died bit here.
+        * previous highest priority waiter or we are the highest priority
+        * waiter but failed to get the rtmutex the first time.
+        * We have to replace the newowner TID in the user space variable.
+        * This must be atomic as we have to preserve the owner died bit here.
         *
         * Note: We write the user space value _before_ changing the pi_state
         * because we can fault here. Imagine swapped out pages or a fork
@@ -1599,8 +1605,8 @@ retry:
 
        /*
         * To handle the page fault we need to drop the hash bucket
-        * lock here. That gives the other task (either the pending
-        * owner itself or the task which stole the rtmutex) the
+        * lock here. That gives the other task (either the highest priority
+        * waiter itself or the task which stole the rtmutex) the
         * chance to try the fixup of the pi_state. So once we are
         * back from handling the fault we need to check the pi_state
         * after reacquiring the hash bucket lock and before trying to
@@ -1676,18 +1682,20 @@ static int fixup_owner(u32 __user *uaddr, struct futex_q *q, int locked)
                /*
                 * pi_state is incorrect, some other task did a lock steal and
                 * we returned due to timeout or signal without taking the
-                * rt_mutex. Too late. We can access the rt_mutex_owner without
-                * locking, as the other task is now blocked on the hash bucket
-                * lock. Fix the state up.
+                * rt_mutex. Too late.
                 */
+               raw_spin_lock(&q->pi_state->pi_mutex.wait_lock);
                owner = rt_mutex_owner(&q->pi_state->pi_mutex);
+               if (!owner)
+                       owner = rt_mutex_next_owner(&q->pi_state->pi_mutex);
+               raw_spin_unlock(&q->pi_state->pi_mutex.wait_lock);
                ret = fixup_pi_state_owner(uaddr, q, owner);
                goto out;
        }
 
        /*
         * Paranoia check. If we did not take the lock, then we should not be
-        * the owner, nor the pending owner, of the rt_mutex.
+        * the owner of the rt_mutex.
         */
        if (rt_mutex_owner(&q->pi_state->pi_mutex) == current)
                printk(KERN_ERR "fixup_owner: ret = %d pi-mutex: %p "
@@ -1878,7 +1886,7 @@ retry:
        restart->futex.val = val;
        restart->futex.time = abs_time->tv64;
        restart->futex.bitset = bitset;
-       restart->futex.flags = flags;
+       restart->futex.flags = flags | FLAGS_HAS_TIMEOUT;
 
        ret = -ERESTART_RESTARTBLOCK;
 
@@ -2156,7 +2164,7 @@ int handle_early_requeue_pi_wakeup(struct futex_hash_bucket *hb,
                 * We were woken prior to requeue by a timeout or a signal.
                 * Unqueue the futex_q and determine which it was.
                 */
-               plist_del(&q->list, &q->list.plist);
+               plist_del(&q->list, &hb->chain);
 
                /* Handle spurious wakeups gracefully */
                ret = -EWOULDBLOCK;
@@ -2410,10 +2418,19 @@ SYSCALL_DEFINE3(get_robust_list, int, pid,
                        goto err_unlock;
                ret = -EPERM;
                pcred = __task_cred(p);
+               /* If victim is in different user_ns, then uids are not
+                  comparable, so we must have CAP_SYS_PTRACE */
+               if (cred->user->user_ns != pcred->user->user_ns) {
+                       if (!ns_capable(pcred->user->user_ns, CAP_SYS_PTRACE))
+                               goto err_unlock;
+                       goto ok;
+               }
+               /* If victim is in same user_ns, then uids are comparable */
                if (cred->euid != pcred->euid &&
                    cred->euid != pcred->uid &&
-                   !capable(CAP_SYS_PTRACE))
+                   !ns_capable(pcred->user->user_ns, CAP_SYS_PTRACE))
                        goto err_unlock;
+ok:
                head = p->robust_list;
                rcu_read_unlock();
        }
@@ -2452,9 +2469,20 @@ retry:
                 * userspace.
                 */
                mval = (uval & FUTEX_WAITERS) | FUTEX_OWNER_DIED;
-               if (futex_atomic_cmpxchg_inatomic(&nval, uaddr, uval, mval))
-                       return -1;
-
+               /*
+                * We are not holding a lock here, but we want to have
+                * the pagefault_disable/enable() protection because
+                * we want to handle the fault gracefully. If the
+                * access fails we try to fault in the futex with R/W
+                * verification via get_user_pages. get_user() above
+                * does not guarantee R/W access. If that fails we
+                * give up and leave the futex locked.
+                */
+               if (cmpxchg_futex_value_locked(&nval, uaddr, uval, mval)) {
+                       if (fault_in_user_writeable(uaddr))
+                               return -1;
+                       goto retry;
+               }
                if (nval != uval)
                        goto retry;