futex: fix requeue_pi key imbalance
[linux-2.6.git] / kernel / futex.c
1 /*
2  *  Fast Userspace Mutexes (which I call "Futexes!").
3  *  (C) Rusty Russell, IBM 2002
4  *
5  *  Generalized futexes, futex requeueing, misc fixes by Ingo Molnar
6  *  (C) Copyright 2003 Red Hat Inc, All Rights Reserved
7  *
8  *  Removed page pinning, fix privately mapped COW pages and other cleanups
9  *  (C) Copyright 2003, 2004 Jamie Lokier
10  *
11  *  Robust futex support started by Ingo Molnar
12  *  (C) Copyright 2006 Red Hat Inc, All Rights Reserved
13  *  Thanks to Thomas Gleixner for suggestions, analysis and fixes.
14  *
15  *  PI-futex support started by Ingo Molnar and Thomas Gleixner
16  *  Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
17  *  Copyright (C) 2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com>
18  *
19  *  PRIVATE futexes by Eric Dumazet
20  *  Copyright (C) 2007 Eric Dumazet <dada1@cosmosbay.com>
21  *
22  *  Requeue-PI support by Darren Hart <dvhltc@us.ibm.com>
23  *  Copyright (C) IBM Corporation, 2009
24  *  Thanks to Thomas Gleixner for conceptual design and careful reviews.
25  *
26  *  Thanks to Ben LaHaise for yelling "hashed waitqueues" loudly
27  *  enough at me, Linus for the original (flawed) idea, Matthew
28  *  Kirkwood for proof-of-concept implementation.
29  *
30  *  "The futexes are also cursed."
31  *  "But they come in a choice of three flavours!"
32  *
33  *  This program is free software; you can redistribute it and/or modify
34  *  it under the terms of the GNU General Public License as published by
35  *  the Free Software Foundation; either version 2 of the License, or
36  *  (at your option) any later version.
37  *
38  *  This program is distributed in the hope that it will be useful,
39  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
40  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
41  *  GNU General Public License for more details.
42  *
43  *  You should have received a copy of the GNU General Public License
44  *  along with this program; if not, write to the Free Software
45  *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
46  */
47 #include <linux/slab.h>
48 #include <linux/poll.h>
49 #include <linux/fs.h>
50 #include <linux/file.h>
51 #include <linux/jhash.h>
52 #include <linux/init.h>
53 #include <linux/futex.h>
54 #include <linux/mount.h>
55 #include <linux/pagemap.h>
56 #include <linux/syscalls.h>
57 #include <linux/signal.h>
58 #include <linux/module.h>
59 #include <linux/magic.h>
60 #include <linux/pid.h>
61 #include <linux/nsproxy.h>
62
63 #include <asm/futex.h>
64
65 #include "rtmutex_common.h"
66
67 int __read_mostly futex_cmpxchg_enabled;
68
69 #define FUTEX_HASHBITS (CONFIG_BASE_SMALL ? 4 : 8)
70
71 /*
72  * Priority Inheritance state:
73  */
74 struct futex_pi_state {
75         /*
76          * list of 'owned' pi_state instances - these have to be
77          * cleaned up in do_exit() if the task exits prematurely:
78          */
79         struct list_head list;
80
81         /*
82          * The PI object:
83          */
84         struct rt_mutex pi_mutex;
85
86         struct task_struct *owner;
87         atomic_t refcount;
88
89         union futex_key key;
90 };
91
92 /**
93  * struct futex_q - The hashed futex queue entry, one per waiting task
94  * @task:               the task waiting on the futex
95  * @lock_ptr:           the hash bucket lock
96  * @key:                the key the futex is hashed on
97  * @pi_state:           optional priority inheritance state
98  * @rt_waiter:          rt_waiter storage for use with requeue_pi
99  * @requeue_pi_key:     the requeue_pi target futex key
100  * @bitset:             bitset for the optional bitmasked wakeup
101  *
102  * We use this hashed waitqueue, instead of a normal wait_queue_t, so
103  * we can wake only the relevant ones (hashed queues may be shared).
104  *
105  * A futex_q has a woken state, just like tasks have TASK_RUNNING.
106  * It is considered woken when plist_node_empty(&q->list) || q->lock_ptr == 0.
107  * The order of wakup is always to make the first condition true, then
108  * the second.
109  *
110  * PI futexes are typically woken before they are removed from the hash list via
111  * the rt_mutex code. See unqueue_me_pi().
112  */
113 struct futex_q {
114         struct plist_node list;
115
116         struct task_struct *task;
117         spinlock_t *lock_ptr;
118         union futex_key key;
119         struct futex_pi_state *pi_state;
120         struct rt_mutex_waiter *rt_waiter;
121         union futex_key *requeue_pi_key;
122         u32 bitset;
123 };
124
125 /*
126  * Hash buckets are shared by all the futex_keys that hash to the same
127  * location.  Each key may have multiple futex_q structures, one for each task
128  * waiting on a futex.
129  */
130 struct futex_hash_bucket {
131         spinlock_t lock;
132         struct plist_head chain;
133 };
134
135 static struct futex_hash_bucket futex_queues[1<<FUTEX_HASHBITS];
136
137 /*
138  * We hash on the keys returned from get_futex_key (see below).
139  */
140 static struct futex_hash_bucket *hash_futex(union futex_key *key)
141 {
142         u32 hash = jhash2((u32*)&key->both.word,
143                           (sizeof(key->both.word)+sizeof(key->both.ptr))/4,
144                           key->both.offset);
145         return &futex_queues[hash & ((1 << FUTEX_HASHBITS)-1)];
146 }
147
148 /*
149  * Return 1 if two futex_keys are equal, 0 otherwise.
150  */
151 static inline int match_futex(union futex_key *key1, union futex_key *key2)
152 {
153         return (key1->both.word == key2->both.word
154                 && key1->both.ptr == key2->both.ptr
155                 && key1->both.offset == key2->both.offset);
156 }
157
158 /*
159  * Take a reference to the resource addressed by a key.
160  * Can be called while holding spinlocks.
161  *
162  */
163 static void get_futex_key_refs(union futex_key *key)
164 {
165         if (!key->both.ptr)
166                 return;
167
168         switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
169         case FUT_OFF_INODE:
170                 atomic_inc(&key->shared.inode->i_count);
171                 break;
172         case FUT_OFF_MMSHARED:
173                 atomic_inc(&key->private.mm->mm_count);
174                 break;
175         }
176 }
177
178 /*
179  * Drop a reference to the resource addressed by a key.
180  * The hash bucket spinlock must not be held.
181  */
182 static void drop_futex_key_refs(union futex_key *key)
183 {
184         if (!key->both.ptr) {
185                 /* If we're here then we tried to put a key we failed to get */
186                 WARN_ON_ONCE(1);
187                 return;
188         }
189
190         switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
191         case FUT_OFF_INODE:
192                 iput(key->shared.inode);
193                 break;
194         case FUT_OFF_MMSHARED:
195                 mmdrop(key->private.mm);
196                 break;
197         }
198 }
199
200 /**
201  * get_futex_key() - Get parameters which are the keys for a futex
202  * @uaddr:      virtual address of the futex
203  * @fshared:    0 for a PROCESS_PRIVATE futex, 1 for PROCESS_SHARED
204  * @key:        address where result is stored.
205  * @rw:         mapping needs to be read/write (values: VERIFY_READ,
206  *              VERIFY_WRITE)
207  *
208  * Returns a negative error code or 0
209  * The key words are stored in *key on success.
210  *
211  * For shared mappings, it's (page->index, vma->vm_file->f_path.dentry->d_inode,
212  * offset_within_page).  For private mappings, it's (uaddr, current->mm).
213  * We can usually work out the index without swapping in the page.
214  *
215  * lock_page() might sleep, the caller should not hold a spinlock.
216  */
217 static int
218 get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
219 {
220         unsigned long address = (unsigned long)uaddr;
221         struct mm_struct *mm = current->mm;
222         struct page *page;
223         int err;
224
225         /*
226          * The futex address must be "naturally" aligned.
227          */
228         key->both.offset = address % PAGE_SIZE;
229         if (unlikely((address % sizeof(u32)) != 0))
230                 return -EINVAL;
231         address -= key->both.offset;
232
233         /*
234          * PROCESS_PRIVATE futexes are fast.
235          * As the mm cannot disappear under us and the 'key' only needs
236          * virtual address, we dont even have to find the underlying vma.
237          * Note : We do have to check 'uaddr' is a valid user address,
238          *        but access_ok() should be faster than find_vma()
239          */
240         if (!fshared) {
241                 if (unlikely(!access_ok(rw, uaddr, sizeof(u32))))
242                         return -EFAULT;
243                 key->private.mm = mm;
244                 key->private.address = address;
245                 get_futex_key_refs(key);
246                 return 0;
247         }
248
249 again:
250         err = get_user_pages_fast(address, 1, rw == VERIFY_WRITE, &page);
251         if (err < 0)
252                 return err;
253
254         page = compound_head(page);
255         lock_page(page);
256         if (!page->mapping) {
257                 unlock_page(page);
258                 put_page(page);
259                 goto again;
260         }
261
262         /*
263          * Private mappings are handled in a simple way.
264          *
265          * NOTE: When userspace waits on a MAP_SHARED mapping, even if
266          * it's a read-only handle, it's expected that futexes attach to
267          * the object not the particular process.
268          */
269         if (PageAnon(page)) {
270                 key->both.offset |= FUT_OFF_MMSHARED; /* ref taken on mm */
271                 key->private.mm = mm;
272                 key->private.address = address;
273         } else {
274                 key->both.offset |= FUT_OFF_INODE; /* inode-based key */
275                 key->shared.inode = page->mapping->host;
276                 key->shared.pgoff = page->index;
277         }
278
279         get_futex_key_refs(key);
280
281         unlock_page(page);
282         put_page(page);
283         return 0;
284 }
285
286 static inline
287 void put_futex_key(int fshared, union futex_key *key)
288 {
289         drop_futex_key_refs(key);
290 }
291
292 /**
293  * fault_in_user_writeable() - Fault in user address and verify RW access
294  * @uaddr:      pointer to faulting user space address
295  *
296  * Slow path to fixup the fault we just took in the atomic write
297  * access to @uaddr.
298  *
299  * We have no generic implementation of a non destructive write to the
300  * user address. We know that we faulted in the atomic pagefault
301  * disabled section so we can as well avoid the #PF overhead by
302  * calling get_user_pages() right away.
303  */
304 static int fault_in_user_writeable(u32 __user *uaddr)
305 {
306         int ret = get_user_pages(current, current->mm, (unsigned long)uaddr,
307                                  1, 1, 0, NULL, NULL);
308         return ret < 0 ? ret : 0;
309 }
310
311 /**
312  * futex_top_waiter() - Return the highest priority waiter on a futex
313  * @hb:         the hash bucket the futex_q's reside in
314  * @key:        the futex key (to distinguish it from other futex futex_q's)
315  *
316  * Must be called with the hb lock held.
317  */
318 static struct futex_q *futex_top_waiter(struct futex_hash_bucket *hb,
319                                         union futex_key *key)
320 {
321         struct futex_q *this;
322
323         plist_for_each_entry(this, &hb->chain, list) {
324                 if (match_futex(&this->key, key))
325                         return this;
326         }
327         return NULL;
328 }
329
330 static u32 cmpxchg_futex_value_locked(u32 __user *uaddr, u32 uval, u32 newval)
331 {
332         u32 curval;
333
334         pagefault_disable();
335         curval = futex_atomic_cmpxchg_inatomic(uaddr, uval, newval);
336         pagefault_enable();
337
338         return curval;
339 }
340
341 static int get_futex_value_locked(u32 *dest, u32 __user *from)
342 {
343         int ret;
344
345         pagefault_disable();
346         ret = __copy_from_user_inatomic(dest, from, sizeof(u32));
347         pagefault_enable();
348
349         return ret ? -EFAULT : 0;
350 }
351
352
353 /*
354  * PI code:
355  */
356 static int refill_pi_state_cache(void)
357 {
358         struct futex_pi_state *pi_state;
359
360         if (likely(current->pi_state_cache))
361                 return 0;
362
363         pi_state = kzalloc(sizeof(*pi_state), GFP_KERNEL);
364
365         if (!pi_state)
366                 return -ENOMEM;
367
368         INIT_LIST_HEAD(&pi_state->list);
369         /* pi_mutex gets initialized later */
370         pi_state->owner = NULL;
371         atomic_set(&pi_state->refcount, 1);
372         pi_state->key = FUTEX_KEY_INIT;
373
374         current->pi_state_cache = pi_state;
375
376         return 0;
377 }
378
379 static struct futex_pi_state * alloc_pi_state(void)
380 {
381         struct futex_pi_state *pi_state = current->pi_state_cache;
382
383         WARN_ON(!pi_state);
384         current->pi_state_cache = NULL;
385
386         return pi_state;
387 }
388
389 static void free_pi_state(struct futex_pi_state *pi_state)
390 {
391         if (!atomic_dec_and_test(&pi_state->refcount))
392                 return;
393
394         /*
395          * If pi_state->owner is NULL, the owner is most probably dying
396          * and has cleaned up the pi_state already
397          */
398         if (pi_state->owner) {
399                 spin_lock_irq(&pi_state->owner->pi_lock);
400                 list_del_init(&pi_state->list);
401                 spin_unlock_irq(&pi_state->owner->pi_lock);
402
403                 rt_mutex_proxy_unlock(&pi_state->pi_mutex, pi_state->owner);
404         }
405
406         if (current->pi_state_cache)
407                 kfree(pi_state);
408         else {
409                 /*
410                  * pi_state->list is already empty.
411                  * clear pi_state->owner.
412                  * refcount is at 0 - put it back to 1.
413                  */
414                 pi_state->owner = NULL;
415                 atomic_set(&pi_state->refcount, 1);
416                 current->pi_state_cache = pi_state;
417         }
418 }
419
420 /*
421  * Look up the task based on what TID userspace gave us.
422  * We dont trust it.
423  */
424 static struct task_struct * futex_find_get_task(pid_t pid)
425 {
426         struct task_struct *p;
427         const struct cred *cred = current_cred(), *pcred;
428
429         rcu_read_lock();
430         p = find_task_by_vpid(pid);
431         if (!p) {
432                 p = ERR_PTR(-ESRCH);
433         } else {
434                 pcred = __task_cred(p);
435                 if (cred->euid != pcred->euid &&
436                     cred->euid != pcred->uid)
437                         p = ERR_PTR(-ESRCH);
438                 else
439                         get_task_struct(p);
440         }
441
442         rcu_read_unlock();
443
444         return p;
445 }
446
447 /*
448  * This task is holding PI mutexes at exit time => bad.
449  * Kernel cleans up PI-state, but userspace is likely hosed.
450  * (Robust-futex cleanup is separate and might save the day for userspace.)
451  */
452 void exit_pi_state_list(struct task_struct *curr)
453 {
454         struct list_head *next, *head = &curr->pi_state_list;
455         struct futex_pi_state *pi_state;
456         struct futex_hash_bucket *hb;
457         union futex_key key = FUTEX_KEY_INIT;
458
459         if (!futex_cmpxchg_enabled)
460                 return;
461         /*
462          * We are a ZOMBIE and nobody can enqueue itself on
463          * pi_state_list anymore, but we have to be careful
464          * versus waiters unqueueing themselves:
465          */
466         spin_lock_irq(&curr->pi_lock);
467         while (!list_empty(head)) {
468
469                 next = head->next;
470                 pi_state = list_entry(next, struct futex_pi_state, list);
471                 key = pi_state->key;
472                 hb = hash_futex(&key);
473                 spin_unlock_irq(&curr->pi_lock);
474
475                 spin_lock(&hb->lock);
476
477                 spin_lock_irq(&curr->pi_lock);
478                 /*
479                  * We dropped the pi-lock, so re-check whether this
480                  * task still owns the PI-state:
481                  */
482                 if (head->next != next) {
483                         spin_unlock(&hb->lock);
484                         continue;
485                 }
486
487                 WARN_ON(pi_state->owner != curr);
488                 WARN_ON(list_empty(&pi_state->list));
489                 list_del_init(&pi_state->list);
490                 pi_state->owner = NULL;
491                 spin_unlock_irq(&curr->pi_lock);
492
493                 rt_mutex_unlock(&pi_state->pi_mutex);
494
495                 spin_unlock(&hb->lock);
496
497                 spin_lock_irq(&curr->pi_lock);
498         }
499         spin_unlock_irq(&curr->pi_lock);
500 }
501
502 static int
503 lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
504                 union futex_key *key, struct futex_pi_state **ps)
505 {
506         struct futex_pi_state *pi_state = NULL;
507         struct futex_q *this, *next;
508         struct plist_head *head;
509         struct task_struct *p;
510         pid_t pid = uval & FUTEX_TID_MASK;
511
512         head = &hb->chain;
513
514         plist_for_each_entry_safe(this, next, head, list) {
515                 if (match_futex(&this->key, key)) {
516                         /*
517                          * Another waiter already exists - bump up
518                          * the refcount and return its pi_state:
519                          */
520                         pi_state = this->pi_state;
521                         /*
522                          * Userspace might have messed up non PI and PI futexes
523                          */
524                         if (unlikely(!pi_state))
525                                 return -EINVAL;
526
527                         WARN_ON(!atomic_read(&pi_state->refcount));
528                         WARN_ON(pid && pi_state->owner &&
529                                 pi_state->owner->pid != pid);
530
531                         atomic_inc(&pi_state->refcount);
532                         *ps = pi_state;
533
534                         return 0;
535                 }
536         }
537
538         /*
539          * We are the first waiter - try to look up the real owner and attach
540          * the new pi_state to it, but bail out when TID = 0
541          */
542         if (!pid)
543                 return -ESRCH;
544         p = futex_find_get_task(pid);
545         if (IS_ERR(p))
546                 return PTR_ERR(p);
547
548         /*
549          * We need to look at the task state flags to figure out,
550          * whether the task is exiting. To protect against the do_exit
551          * change of the task flags, we do this protected by
552          * p->pi_lock:
553          */
554         spin_lock_irq(&p->pi_lock);
555         if (unlikely(p->flags & PF_EXITING)) {
556                 /*
557                  * The task is on the way out. When PF_EXITPIDONE is
558                  * set, we know that the task has finished the
559                  * cleanup:
560                  */
561                 int ret = (p->flags & PF_EXITPIDONE) ? -ESRCH : -EAGAIN;
562
563                 spin_unlock_irq(&p->pi_lock);
564                 put_task_struct(p);
565                 return ret;
566         }
567
568         pi_state = alloc_pi_state();
569
570         /*
571          * Initialize the pi_mutex in locked state and make 'p'
572          * the owner of it:
573          */
574         rt_mutex_init_proxy_locked(&pi_state->pi_mutex, p);
575
576         /* Store the key for possible exit cleanups: */
577         pi_state->key = *key;
578
579         WARN_ON(!list_empty(&pi_state->list));
580         list_add(&pi_state->list, &p->pi_state_list);
581         pi_state->owner = p;
582         spin_unlock_irq(&p->pi_lock);
583
584         put_task_struct(p);
585
586         *ps = pi_state;
587
588         return 0;
589 }
590
591 /**
592  * futex_lock_pi_atomic() - Atomic work required to acquire a pi aware futex
593  * @uaddr:              the pi futex user address
594  * @hb:                 the pi futex hash bucket
595  * @key:                the futex key associated with uaddr and hb
596  * @ps:                 the pi_state pointer where we store the result of the
597  *                      lookup
598  * @task:               the task to perform the atomic lock work for.  This will
599  *                      be "current" except in the case of requeue pi.
600  * @set_waiters:        force setting the FUTEX_WAITERS bit (1) or not (0)
601  *
602  * Returns:
603  *  0 - ready to wait
604  *  1 - acquired the lock
605  * <0 - error
606  *
607  * The hb->lock and futex_key refs shall be held by the caller.
608  */
609 static int futex_lock_pi_atomic(u32 __user *uaddr, struct futex_hash_bucket *hb,
610                                 union futex_key *key,
611                                 struct futex_pi_state **ps,
612                                 struct task_struct *task, int set_waiters)
613 {
614         int lock_taken, ret, ownerdied = 0;
615         u32 uval, newval, curval;
616
617 retry:
618         ret = lock_taken = 0;
619
620         /*
621          * To avoid races, we attempt to take the lock here again
622          * (by doing a 0 -> TID atomic cmpxchg), while holding all
623          * the locks. It will most likely not succeed.
624          */
625         newval = task_pid_vnr(task);
626         if (set_waiters)
627                 newval |= FUTEX_WAITERS;
628
629         curval = cmpxchg_futex_value_locked(uaddr, 0, newval);
630
631         if (unlikely(curval == -EFAULT))
632                 return -EFAULT;
633
634         /*
635          * Detect deadlocks.
636          */
637         if ((unlikely((curval & FUTEX_TID_MASK) == task_pid_vnr(task))))
638                 return -EDEADLK;
639
640         /*
641          * Surprise - we got the lock. Just return to userspace:
642          */
643         if (unlikely(!curval))
644                 return 1;
645
646         uval = curval;
647
648         /*
649          * Set the FUTEX_WAITERS flag, so the owner will know it has someone
650          * to wake at the next unlock.
651          */
652         newval = curval | FUTEX_WAITERS;
653
654         /*
655          * There are two cases, where a futex might have no owner (the
656          * owner TID is 0): OWNER_DIED. We take over the futex in this
657          * case. We also do an unconditional take over, when the owner
658          * of the futex died.
659          *
660          * This is safe as we are protected by the hash bucket lock !
661          */
662         if (unlikely(ownerdied || !(curval & FUTEX_TID_MASK))) {
663                 /* Keep the OWNER_DIED bit */
664                 newval = (curval & ~FUTEX_TID_MASK) | task_pid_vnr(task);
665                 ownerdied = 0;
666                 lock_taken = 1;
667         }
668
669         curval = cmpxchg_futex_value_locked(uaddr, uval, newval);
670
671         if (unlikely(curval == -EFAULT))
672                 return -EFAULT;
673         if (unlikely(curval != uval))
674                 goto retry;
675
676         /*
677          * We took the lock due to owner died take over.
678          */
679         if (unlikely(lock_taken))
680                 return 1;
681
682         /*
683          * We dont have the lock. Look up the PI state (or create it if
684          * we are the first waiter):
685          */
686         ret = lookup_pi_state(uval, hb, key, ps);
687
688         if (unlikely(ret)) {
689                 switch (ret) {
690                 case -ESRCH:
691                         /*
692                          * No owner found for this futex. Check if the
693                          * OWNER_DIED bit is set to figure out whether
694                          * this is a robust futex or not.
695                          */
696                         if (get_futex_value_locked(&curval, uaddr))
697                                 return -EFAULT;
698
699                         /*
700                          * We simply start over in case of a robust
701                          * futex. The code above will take the futex
702                          * and return happy.
703                          */
704                         if (curval & FUTEX_OWNER_DIED) {
705                                 ownerdied = 1;
706                                 goto retry;
707                         }
708                 default:
709                         break;
710                 }
711         }
712
713         return ret;
714 }
715
716 /*
717  * The hash bucket lock must be held when this is called.
718  * Afterwards, the futex_q must not be accessed.
719  */
720 static void wake_futex(struct futex_q *q)
721 {
722         struct task_struct *p = q->task;
723
724         /*
725          * We set q->lock_ptr = NULL _before_ we wake up the task. If
726          * a non futex wake up happens on another CPU then the task
727          * might exit and p would dereference a non existing task
728          * struct. Prevent this by holding a reference on p across the
729          * wake up.
730          */
731         get_task_struct(p);
732
733         plist_del(&q->list, &q->list.plist);
734         /*
735          * The waiting task can free the futex_q as soon as
736          * q->lock_ptr = NULL is written, without taking any locks. A
737          * memory barrier is required here to prevent the following
738          * store to lock_ptr from getting ahead of the plist_del.
739          */
740         smp_wmb();
741         q->lock_ptr = NULL;
742
743         wake_up_state(p, TASK_NORMAL);
744         put_task_struct(p);
745 }
746
747 static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this)
748 {
749         struct task_struct *new_owner;
750         struct futex_pi_state *pi_state = this->pi_state;
751         u32 curval, newval;
752
753         if (!pi_state)
754                 return -EINVAL;
755
756         spin_lock(&pi_state->pi_mutex.wait_lock);
757         new_owner = rt_mutex_next_owner(&pi_state->pi_mutex);
758
759         /*
760          * This happens when we have stolen the lock and the original
761          * pending owner did not enqueue itself back on the rt_mutex.
762          * Thats not a tragedy. We know that way, that a lock waiter
763          * is on the fly. We make the futex_q waiter the pending owner.
764          */
765         if (!new_owner)
766                 new_owner = this->task;
767
768         /*
769          * We pass it to the next owner. (The WAITERS bit is always
770          * kept enabled while there is PI state around. We must also
771          * preserve the owner died bit.)
772          */
773         if (!(uval & FUTEX_OWNER_DIED)) {
774                 int ret = 0;
775
776                 newval = FUTEX_WAITERS | task_pid_vnr(new_owner);
777
778                 curval = cmpxchg_futex_value_locked(uaddr, uval, newval);
779
780                 if (curval == -EFAULT)
781                         ret = -EFAULT;
782                 else if (curval != uval)
783                         ret = -EINVAL;
784                 if (ret) {
785                         spin_unlock(&pi_state->pi_mutex.wait_lock);
786                         return ret;
787                 }
788         }
789
790         spin_lock_irq(&pi_state->owner->pi_lock);
791         WARN_ON(list_empty(&pi_state->list));
792         list_del_init(&pi_state->list);
793         spin_unlock_irq(&pi_state->owner->pi_lock);
794
795         spin_lock_irq(&new_owner->pi_lock);
796         WARN_ON(!list_empty(&pi_state->list));
797         list_add(&pi_state->list, &new_owner->pi_state_list);
798         pi_state->owner = new_owner;
799         spin_unlock_irq(&new_owner->pi_lock);
800
801         spin_unlock(&pi_state->pi_mutex.wait_lock);
802         rt_mutex_unlock(&pi_state->pi_mutex);
803
804         return 0;
805 }
806
807 static int unlock_futex_pi(u32 __user *uaddr, u32 uval)
808 {
809         u32 oldval;
810
811         /*
812          * There is no waiter, so we unlock the futex. The owner died
813          * bit has not to be preserved here. We are the owner:
814          */
815         oldval = cmpxchg_futex_value_locked(uaddr, uval, 0);
816
817         if (oldval == -EFAULT)
818                 return oldval;
819         if (oldval != uval)
820                 return -EAGAIN;
821
822         return 0;
823 }
824
825 /*
826  * Express the locking dependencies for lockdep:
827  */
828 static inline void
829 double_lock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2)
830 {
831         if (hb1 <= hb2) {
832                 spin_lock(&hb1->lock);
833                 if (hb1 < hb2)
834                         spin_lock_nested(&hb2->lock, SINGLE_DEPTH_NESTING);
835         } else { /* hb1 > hb2 */
836                 spin_lock(&hb2->lock);
837                 spin_lock_nested(&hb1->lock, SINGLE_DEPTH_NESTING);
838         }
839 }
840
841 static inline void
842 double_unlock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2)
843 {
844         spin_unlock(&hb1->lock);
845         if (hb1 != hb2)
846                 spin_unlock(&hb2->lock);
847 }
848
849 /*
850  * Wake up waiters matching bitset queued on this futex (uaddr).
851  */
852 static int futex_wake(u32 __user *uaddr, int fshared, int nr_wake, u32 bitset)
853 {
854         struct futex_hash_bucket *hb;
855         struct futex_q *this, *next;
856         struct plist_head *head;
857         union futex_key key = FUTEX_KEY_INIT;
858         int ret;
859
860         if (!bitset)
861                 return -EINVAL;
862
863         ret = get_futex_key(uaddr, fshared, &key, VERIFY_READ);
864         if (unlikely(ret != 0))
865                 goto out;
866
867         hb = hash_futex(&key);
868         spin_lock(&hb->lock);
869         head = &hb->chain;
870
871         plist_for_each_entry_safe(this, next, head, list) {
872                 if (match_futex (&this->key, &key)) {
873                         if (this->pi_state || this->rt_waiter) {
874                                 ret = -EINVAL;
875                                 break;
876                         }
877
878                         /* Check if one of the bits is set in both bitsets */
879                         if (!(this->bitset & bitset))
880                                 continue;
881
882                         wake_futex(this);
883                         if (++ret >= nr_wake)
884                                 break;
885                 }
886         }
887
888         spin_unlock(&hb->lock);
889         put_futex_key(fshared, &key);
890 out:
891         return ret;
892 }
893
894 /*
895  * Wake up all waiters hashed on the physical page that is mapped
896  * to this virtual address:
897  */
898 static int
899 futex_wake_op(u32 __user *uaddr1, int fshared, u32 __user *uaddr2,
900               int nr_wake, int nr_wake2, int op)
901 {
902         union futex_key key1 = FUTEX_KEY_INIT, key2 = FUTEX_KEY_INIT;
903         struct futex_hash_bucket *hb1, *hb2;
904         struct plist_head *head;
905         struct futex_q *this, *next;
906         int ret, op_ret;
907
908 retry:
909         ret = get_futex_key(uaddr1, fshared, &key1, VERIFY_READ);
910         if (unlikely(ret != 0))
911                 goto out;
912         ret = get_futex_key(uaddr2, fshared, &key2, VERIFY_WRITE);
913         if (unlikely(ret != 0))
914                 goto out_put_key1;
915
916         hb1 = hash_futex(&key1);
917         hb2 = hash_futex(&key2);
918
919 retry_private:
920         double_lock_hb(hb1, hb2);
921         op_ret = futex_atomic_op_inuser(op, uaddr2);
922         if (unlikely(op_ret < 0)) {
923
924                 double_unlock_hb(hb1, hb2);
925
926 #ifndef CONFIG_MMU
927                 /*
928                  * we don't get EFAULT from MMU faults if we don't have an MMU,
929                  * but we might get them from range checking
930                  */
931                 ret = op_ret;
932                 goto out_put_keys;
933 #endif
934
935                 if (unlikely(op_ret != -EFAULT)) {
936                         ret = op_ret;
937                         goto out_put_keys;
938                 }
939
940                 ret = fault_in_user_writeable(uaddr2);
941                 if (ret)
942                         goto out_put_keys;
943
944                 if (!fshared)
945                         goto retry_private;
946
947                 put_futex_key(fshared, &key2);
948                 put_futex_key(fshared, &key1);
949                 goto retry;
950         }
951
952         head = &hb1->chain;
953
954         plist_for_each_entry_safe(this, next, head, list) {
955                 if (match_futex (&this->key, &key1)) {
956                         wake_futex(this);
957                         if (++ret >= nr_wake)
958                                 break;
959                 }
960         }
961
962         if (op_ret > 0) {
963                 head = &hb2->chain;
964
965                 op_ret = 0;
966                 plist_for_each_entry_safe(this, next, head, list) {
967                         if (match_futex (&this->key, &key2)) {
968                                 wake_futex(this);
969                                 if (++op_ret >= nr_wake2)
970                                         break;
971                         }
972                 }
973                 ret += op_ret;
974         }
975
976         double_unlock_hb(hb1, hb2);
977 out_put_keys:
978         put_futex_key(fshared, &key2);
979 out_put_key1:
980         put_futex_key(fshared, &key1);
981 out:
982         return ret;
983 }
984
985 /**
986  * requeue_futex() - Requeue a futex_q from one hb to another
987  * @q:          the futex_q to requeue
988  * @hb1:        the source hash_bucket
989  * @hb2:        the target hash_bucket
990  * @key2:       the new key for the requeued futex_q
991  */
992 static inline
993 void requeue_futex(struct futex_q *q, struct futex_hash_bucket *hb1,
994                    struct futex_hash_bucket *hb2, union futex_key *key2)
995 {
996
997         /*
998          * If key1 and key2 hash to the same bucket, no need to
999          * requeue.
1000          */
1001         if (likely(&hb1->chain != &hb2->chain)) {
1002                 plist_del(&q->list, &hb1->chain);
1003                 plist_add(&q->list, &hb2->chain);
1004                 q->lock_ptr = &hb2->lock;
1005 #ifdef CONFIG_DEBUG_PI_LIST
1006                 q->list.plist.lock = &hb2->lock;
1007 #endif
1008         }
1009         get_futex_key_refs(key2);
1010         q->key = *key2;
1011 }
1012
1013 /**
1014  * requeue_pi_wake_futex() - Wake a task that acquired the lock during requeue
1015  * @q:          the futex_q
1016  * @key:        the key of the requeue target futex
1017  * @hb:         the hash_bucket of the requeue target futex
1018  *
1019  * During futex_requeue, with requeue_pi=1, it is possible to acquire the
1020  * target futex if it is uncontended or via a lock steal.  Set the futex_q key
1021  * to the requeue target futex so the waiter can detect the wakeup on the right
1022  * futex, but remove it from the hb and NULL the rt_waiter so it can detect
1023  * atomic lock acquisition.  Set the q->lock_ptr to the requeue target hb->lock
1024  * to protect access to the pi_state to fixup the owner later.  Must be called
1025  * with both q->lock_ptr and hb->lock held.
1026  */
1027 static inline
1028 void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key,
1029                            struct futex_hash_bucket *hb)
1030 {
1031         drop_futex_key_refs(&q->key);
1032         get_futex_key_refs(key);
1033         q->key = *key;
1034
1035         WARN_ON(plist_node_empty(&q->list));
1036         plist_del(&q->list, &q->list.plist);
1037
1038         WARN_ON(!q->rt_waiter);
1039         q->rt_waiter = NULL;
1040
1041         q->lock_ptr = &hb->lock;
1042 #ifdef CONFIG_DEBUG_PI_LIST
1043         q->list.plist.lock = &hb->lock;
1044 #endif
1045
1046         wake_up_state(q->task, TASK_NORMAL);
1047 }
1048
1049 /**
1050  * futex_proxy_trylock_atomic() - Attempt an atomic lock for the top waiter
1051  * @pifutex:            the user address of the to futex
1052  * @hb1:                the from futex hash bucket, must be locked by the caller
1053  * @hb2:                the to futex hash bucket, must be locked by the caller
1054  * @key1:               the from futex key
1055  * @key2:               the to futex key
1056  * @ps:                 address to store the pi_state pointer
1057  * @set_waiters:        force setting the FUTEX_WAITERS bit (1) or not (0)
1058  *
1059  * Try and get the lock on behalf of the top waiter if we can do it atomically.
1060  * Wake the top waiter if we succeed.  If the caller specified set_waiters,
1061  * then direct futex_lock_pi_atomic() to force setting the FUTEX_WAITERS bit.
1062  * hb1 and hb2 must be held by the caller.
1063  *
1064  * Returns:
1065  *  0 - failed to acquire the lock atomicly
1066  *  1 - acquired the lock
1067  * <0 - error
1068  */
1069 static int futex_proxy_trylock_atomic(u32 __user *pifutex,
1070                                  struct futex_hash_bucket *hb1,
1071                                  struct futex_hash_bucket *hb2,
1072                                  union futex_key *key1, union futex_key *key2,
1073                                  struct futex_pi_state **ps, int set_waiters)
1074 {
1075         struct futex_q *top_waiter = NULL;
1076         u32 curval;
1077         int ret;
1078
1079         if (get_futex_value_locked(&curval, pifutex))
1080                 return -EFAULT;
1081
1082         /*
1083          * Find the top_waiter and determine if there are additional waiters.
1084          * If the caller intends to requeue more than 1 waiter to pifutex,
1085          * force futex_lock_pi_atomic() to set the FUTEX_WAITERS bit now,
1086          * as we have means to handle the possible fault.  If not, don't set
1087          * the bit unecessarily as it will force the subsequent unlock to enter
1088          * the kernel.
1089          */
1090         top_waiter = futex_top_waiter(hb1, key1);
1091
1092         /* There are no waiters, nothing for us to do. */
1093         if (!top_waiter)
1094                 return 0;
1095
1096         /* Ensure we requeue to the expected futex. */
1097         if (!match_futex(top_waiter->requeue_pi_key, key2))
1098                 return -EINVAL;
1099
1100         /*
1101          * Try to take the lock for top_waiter.  Set the FUTEX_WAITERS bit in
1102          * the contended case or if set_waiters is 1.  The pi_state is returned
1103          * in ps in contended cases.
1104          */
1105         ret = futex_lock_pi_atomic(pifutex, hb2, key2, ps, top_waiter->task,
1106                                    set_waiters);
1107         if (ret == 1)
1108                 requeue_pi_wake_futex(top_waiter, key2, hb2);
1109
1110         return ret;
1111 }
1112
1113 /**
1114  * futex_requeue() - Requeue waiters from uaddr1 to uaddr2
1115  * uaddr1:      source futex user address
1116  * uaddr2:      target futex user address
1117  * nr_wake:     number of waiters to wake (must be 1 for requeue_pi)
1118  * nr_requeue:  number of waiters to requeue (0-INT_MAX)
1119  * requeue_pi:  if we are attempting to requeue from a non-pi futex to a
1120  *              pi futex (pi to pi requeue is not supported)
1121  *
1122  * Requeue waiters on uaddr1 to uaddr2. In the requeue_pi case, try to acquire
1123  * uaddr2 atomically on behalf of the top waiter.
1124  *
1125  * Returns:
1126  * >=0 - on success, the number of tasks requeued or woken
1127  *  <0 - on error
1128  */
1129 static int futex_requeue(u32 __user *uaddr1, int fshared, u32 __user *uaddr2,
1130                          int nr_wake, int nr_requeue, u32 *cmpval,
1131                          int requeue_pi)
1132 {
1133         union futex_key key1 = FUTEX_KEY_INIT, key2 = FUTEX_KEY_INIT;
1134         int drop_count = 0, task_count = 0, ret;
1135         struct futex_pi_state *pi_state = NULL;
1136         struct futex_hash_bucket *hb1, *hb2;
1137         struct plist_head *head1;
1138         struct futex_q *this, *next;
1139         u32 curval2;
1140
1141         if (requeue_pi) {
1142                 /*
1143                  * requeue_pi requires a pi_state, try to allocate it now
1144                  * without any locks in case it fails.
1145                  */
1146                 if (refill_pi_state_cache())
1147                         return -ENOMEM;
1148                 /*
1149                  * requeue_pi must wake as many tasks as it can, up to nr_wake
1150                  * + nr_requeue, since it acquires the rt_mutex prior to
1151                  * returning to userspace, so as to not leave the rt_mutex with
1152                  * waiters and no owner.  However, second and third wake-ups
1153                  * cannot be predicted as they involve race conditions with the
1154                  * first wake and a fault while looking up the pi_state.  Both
1155                  * pthread_cond_signal() and pthread_cond_broadcast() should
1156                  * use nr_wake=1.
1157                  */
1158                 if (nr_wake != 1)
1159                         return -EINVAL;
1160         }
1161
1162 retry:
1163         if (pi_state != NULL) {
1164                 /*
1165                  * We will have to lookup the pi_state again, so free this one
1166                  * to keep the accounting correct.
1167                  */
1168                 free_pi_state(pi_state);
1169                 pi_state = NULL;
1170         }
1171
1172         ret = get_futex_key(uaddr1, fshared, &key1, VERIFY_READ);
1173         if (unlikely(ret != 0))
1174                 goto out;
1175         ret = get_futex_key(uaddr2, fshared, &key2,
1176                             requeue_pi ? VERIFY_WRITE : VERIFY_READ);
1177         if (unlikely(ret != 0))
1178                 goto out_put_key1;
1179
1180         hb1 = hash_futex(&key1);
1181         hb2 = hash_futex(&key2);
1182
1183 retry_private:
1184         double_lock_hb(hb1, hb2);
1185
1186         if (likely(cmpval != NULL)) {
1187                 u32 curval;
1188
1189                 ret = get_futex_value_locked(&curval, uaddr1);
1190
1191                 if (unlikely(ret)) {
1192                         double_unlock_hb(hb1, hb2);
1193
1194                         ret = get_user(curval, uaddr1);
1195                         if (ret)
1196                                 goto out_put_keys;
1197
1198                         if (!fshared)
1199                                 goto retry_private;
1200
1201                         put_futex_key(fshared, &key2);
1202                         put_futex_key(fshared, &key1);
1203                         goto retry;
1204                 }
1205                 if (curval != *cmpval) {
1206                         ret = -EAGAIN;
1207                         goto out_unlock;
1208                 }
1209         }
1210
1211         if (requeue_pi && (task_count - nr_wake < nr_requeue)) {
1212                 /*
1213                  * Attempt to acquire uaddr2 and wake the top waiter. If we
1214                  * intend to requeue waiters, force setting the FUTEX_WAITERS
1215                  * bit.  We force this here where we are able to easily handle
1216                  * faults rather in the requeue loop below.
1217                  */
1218                 ret = futex_proxy_trylock_atomic(uaddr2, hb1, hb2, &key1,
1219                                                  &key2, &pi_state, nr_requeue);
1220
1221                 /*
1222                  * At this point the top_waiter has either taken uaddr2 or is
1223                  * waiting on it.  If the former, then the pi_state will not
1224                  * exist yet, look it up one more time to ensure we have a
1225                  * reference to it.
1226                  */
1227                 if (ret == 1) {
1228                         WARN_ON(pi_state);
1229                         task_count++;
1230                         ret = get_futex_value_locked(&curval2, uaddr2);
1231                         if (!ret)
1232                                 ret = lookup_pi_state(curval2, hb2, &key2,
1233                                                       &pi_state);
1234                 }
1235
1236                 switch (ret) {
1237                 case 0:
1238                         break;
1239                 case -EFAULT:
1240                         double_unlock_hb(hb1, hb2);
1241                         put_futex_key(fshared, &key2);
1242                         put_futex_key(fshared, &key1);
1243                         ret = fault_in_user_writeable(uaddr2);
1244                         if (!ret)
1245                                 goto retry;
1246                         goto out;
1247                 case -EAGAIN:
1248                         /* The owner was exiting, try again. */
1249                         double_unlock_hb(hb1, hb2);
1250                         put_futex_key(fshared, &key2);
1251                         put_futex_key(fshared, &key1);
1252                         cond_resched();
1253                         goto retry;
1254                 default:
1255                         goto out_unlock;
1256                 }
1257         }
1258
1259         head1 = &hb1->chain;
1260         plist_for_each_entry_safe(this, next, head1, list) {
1261                 if (task_count - nr_wake >= nr_requeue)
1262                         break;
1263
1264                 if (!match_futex(&this->key, &key1))
1265                         continue;
1266
1267                 /*
1268                  * FUTEX_WAIT_REQEUE_PI and FUTEX_CMP_REQUEUE_PI should always
1269                  * be paired with each other and no other futex ops.
1270                  */
1271                 if ((requeue_pi && !this->rt_waiter) ||
1272                     (!requeue_pi && this->rt_waiter)) {
1273                         ret = -EINVAL;
1274                         break;
1275                 }
1276
1277                 /*
1278                  * Wake nr_wake waiters.  For requeue_pi, if we acquired the
1279                  * lock, we already woke the top_waiter.  If not, it will be
1280                  * woken by futex_unlock_pi().
1281                  */
1282                 if (++task_count <= nr_wake && !requeue_pi) {
1283                         wake_futex(this);
1284                         continue;
1285                 }
1286
1287                 /* Ensure we requeue to the expected futex for requeue_pi. */
1288                 if (requeue_pi && !match_futex(this->requeue_pi_key, &key2)) {
1289                         ret = -EINVAL;
1290                         break;
1291                 }
1292
1293                 /*
1294                  * Requeue nr_requeue waiters and possibly one more in the case
1295                  * of requeue_pi if we couldn't acquire the lock atomically.
1296                  */
1297                 if (requeue_pi) {
1298                         /* Prepare the waiter to take the rt_mutex. */
1299                         atomic_inc(&pi_state->refcount);
1300                         this->pi_state = pi_state;
1301                         ret = rt_mutex_start_proxy_lock(&pi_state->pi_mutex,
1302                                                         this->rt_waiter,
1303                                                         this->task, 1);
1304                         if (ret == 1) {
1305                                 /* We got the lock. */
1306                                 requeue_pi_wake_futex(this, &key2, hb2);
1307                                 continue;
1308                         } else if (ret) {
1309                                 /* -EDEADLK */
1310                                 this->pi_state = NULL;
1311                                 free_pi_state(pi_state);
1312                                 goto out_unlock;
1313                         }
1314                 }
1315                 requeue_futex(this, hb1, hb2, &key2);
1316                 drop_count++;
1317         }
1318
1319 out_unlock:
1320         double_unlock_hb(hb1, hb2);
1321
1322         /*
1323          * drop_futex_key_refs() must be called outside the spinlocks. During
1324          * the requeue we moved futex_q's from the hash bucket at key1 to the
1325          * one at key2 and updated their key pointer.  We no longer need to
1326          * hold the references to key1.
1327          */
1328         while (--drop_count >= 0)
1329                 drop_futex_key_refs(&key1);
1330
1331 out_put_keys:
1332         put_futex_key(fshared, &key2);
1333 out_put_key1:
1334         put_futex_key(fshared, &key1);
1335 out:
1336         if (pi_state != NULL)
1337                 free_pi_state(pi_state);
1338         return ret ? ret : task_count;
1339 }
1340
1341 /* The key must be already stored in q->key. */
1342 static inline struct futex_hash_bucket *queue_lock(struct futex_q *q)
1343 {
1344         struct futex_hash_bucket *hb;
1345
1346         get_futex_key_refs(&q->key);
1347         hb = hash_futex(&q->key);
1348         q->lock_ptr = &hb->lock;
1349
1350         spin_lock(&hb->lock);
1351         return hb;
1352 }
1353
1354 static inline void
1355 queue_unlock(struct futex_q *q, struct futex_hash_bucket *hb)
1356 {
1357         spin_unlock(&hb->lock);
1358         drop_futex_key_refs(&q->key);
1359 }
1360
1361 /**
1362  * queue_me() - Enqueue the futex_q on the futex_hash_bucket
1363  * @q:  The futex_q to enqueue
1364  * @hb: The destination hash bucket
1365  *
1366  * The hb->lock must be held by the caller, and is released here. A call to
1367  * queue_me() is typically paired with exactly one call to unqueue_me().  The
1368  * exceptions involve the PI related operations, which may use unqueue_me_pi()
1369  * or nothing if the unqueue is done as part of the wake process and the unqueue
1370  * state is implicit in the state of woken task (see futex_wait_requeue_pi() for
1371  * an example).
1372  */
1373 static inline void queue_me(struct futex_q *q, struct futex_hash_bucket *hb)
1374 {
1375         int prio;
1376
1377         /*
1378          * The priority used to register this element is
1379          * - either the real thread-priority for the real-time threads
1380          * (i.e. threads with a priority lower than MAX_RT_PRIO)
1381          * - or MAX_RT_PRIO for non-RT threads.
1382          * Thus, all RT-threads are woken first in priority order, and
1383          * the others are woken last, in FIFO order.
1384          */
1385         prio = min(current->normal_prio, MAX_RT_PRIO);
1386
1387         plist_node_init(&q->list, prio);
1388 #ifdef CONFIG_DEBUG_PI_LIST
1389         q->list.plist.lock = &hb->lock;
1390 #endif
1391         plist_add(&q->list, &hb->chain);
1392         q->task = current;
1393         spin_unlock(&hb->lock);
1394 }
1395
1396 /**
1397  * unqueue_me() - Remove the futex_q from its futex_hash_bucket
1398  * @q:  The futex_q to unqueue
1399  *
1400  * The q->lock_ptr must not be held by the caller. A call to unqueue_me() must
1401  * be paired with exactly one earlier call to queue_me().
1402  *
1403  * Returns:
1404  *   1 - if the futex_q was still queued (and we removed unqueued it)
1405  *   0 - if the futex_q was already removed by the waking thread
1406  */
1407 static int unqueue_me(struct futex_q *q)
1408 {
1409         spinlock_t *lock_ptr;
1410         int ret = 0;
1411
1412         /* In the common case we don't take the spinlock, which is nice. */
1413 retry:
1414         lock_ptr = q->lock_ptr;
1415         barrier();
1416         if (lock_ptr != NULL) {
1417                 spin_lock(lock_ptr);
1418                 /*
1419                  * q->lock_ptr can change between reading it and
1420                  * spin_lock(), causing us to take the wrong lock.  This
1421                  * corrects the race condition.
1422                  *
1423                  * Reasoning goes like this: if we have the wrong lock,
1424                  * q->lock_ptr must have changed (maybe several times)
1425                  * between reading it and the spin_lock().  It can
1426                  * change again after the spin_lock() but only if it was
1427                  * already changed before the spin_lock().  It cannot,
1428                  * however, change back to the original value.  Therefore
1429                  * we can detect whether we acquired the correct lock.
1430                  */
1431                 if (unlikely(lock_ptr != q->lock_ptr)) {
1432                         spin_unlock(lock_ptr);
1433                         goto retry;
1434                 }
1435                 WARN_ON(plist_node_empty(&q->list));
1436                 plist_del(&q->list, &q->list.plist);
1437
1438                 BUG_ON(q->pi_state);
1439
1440                 spin_unlock(lock_ptr);
1441                 ret = 1;
1442         }
1443
1444         drop_futex_key_refs(&q->key);
1445         return ret;
1446 }
1447
1448 /*
1449  * PI futexes can not be requeued and must remove themself from the
1450  * hash bucket. The hash bucket lock (i.e. lock_ptr) is held on entry
1451  * and dropped here.
1452  */
1453 static void unqueue_me_pi(struct futex_q *q)
1454 {
1455         WARN_ON(plist_node_empty(&q->list));
1456         plist_del(&q->list, &q->list.plist);
1457
1458         BUG_ON(!q->pi_state);
1459         free_pi_state(q->pi_state);
1460         q->pi_state = NULL;
1461
1462         spin_unlock(q->lock_ptr);
1463
1464         drop_futex_key_refs(&q->key);
1465 }
1466
1467 /*
1468  * Fixup the pi_state owner with the new owner.
1469  *
1470  * Must be called with hash bucket lock held and mm->sem held for non
1471  * private futexes.
1472  */
1473 static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
1474                                 struct task_struct *newowner, int fshared)
1475 {
1476         u32 newtid = task_pid_vnr(newowner) | FUTEX_WAITERS;
1477         struct futex_pi_state *pi_state = q->pi_state;
1478         struct task_struct *oldowner = pi_state->owner;
1479         u32 uval, curval, newval;
1480         int ret;
1481
1482         /* Owner died? */
1483         if (!pi_state->owner)
1484                 newtid |= FUTEX_OWNER_DIED;
1485
1486         /*
1487          * We are here either because we stole the rtmutex from the
1488          * pending owner or we are the pending owner which failed to
1489          * get the rtmutex. We have to replace the pending owner TID
1490          * in the user space variable. This must be atomic as we have
1491          * to preserve the owner died bit here.
1492          *
1493          * Note: We write the user space value _before_ changing the pi_state
1494          * because we can fault here. Imagine swapped out pages or a fork
1495          * that marked all the anonymous memory readonly for cow.
1496          *
1497          * Modifying pi_state _before_ the user space value would
1498          * leave the pi_state in an inconsistent state when we fault
1499          * here, because we need to drop the hash bucket lock to
1500          * handle the fault. This might be observed in the PID check
1501          * in lookup_pi_state.
1502          */
1503 retry:
1504         if (get_futex_value_locked(&uval, uaddr))
1505                 goto handle_fault;
1506
1507         while (1) {
1508                 newval = (uval & FUTEX_OWNER_DIED) | newtid;
1509
1510                 curval = cmpxchg_futex_value_locked(uaddr, uval, newval);
1511
1512                 if (curval == -EFAULT)
1513                         goto handle_fault;
1514                 if (curval == uval)
1515                         break;
1516                 uval = curval;
1517         }
1518
1519         /*
1520          * We fixed up user space. Now we need to fix the pi_state
1521          * itself.
1522          */
1523         if (pi_state->owner != NULL) {
1524                 spin_lock_irq(&pi_state->owner->pi_lock);
1525                 WARN_ON(list_empty(&pi_state->list));
1526                 list_del_init(&pi_state->list);
1527                 spin_unlock_irq(&pi_state->owner->pi_lock);
1528         }
1529
1530         pi_state->owner = newowner;
1531
1532         spin_lock_irq(&newowner->pi_lock);
1533         WARN_ON(!list_empty(&pi_state->list));
1534         list_add(&pi_state->list, &newowner->pi_state_list);
1535         spin_unlock_irq(&newowner->pi_lock);
1536         return 0;
1537
1538         /*
1539          * To handle the page fault we need to drop the hash bucket
1540          * lock here. That gives the other task (either the pending
1541          * owner itself or the task which stole the rtmutex) the
1542          * chance to try the fixup of the pi_state. So once we are
1543          * back from handling the fault we need to check the pi_state
1544          * after reacquiring the hash bucket lock and before trying to
1545          * do another fixup. When the fixup has been done already we
1546          * simply return.
1547          */
1548 handle_fault:
1549         spin_unlock(q->lock_ptr);
1550
1551         ret = fault_in_user_writeable(uaddr);
1552
1553         spin_lock(q->lock_ptr);
1554
1555         /*
1556          * Check if someone else fixed it for us:
1557          */
1558         if (pi_state->owner != oldowner)
1559                 return 0;
1560
1561         if (ret)
1562                 return ret;
1563
1564         goto retry;
1565 }
1566
1567 /*
1568  * In case we must use restart_block to restart a futex_wait,
1569  * we encode in the 'flags' shared capability
1570  */
1571 #define FLAGS_SHARED            0x01
1572 #define FLAGS_CLOCKRT           0x02
1573 #define FLAGS_HAS_TIMEOUT       0x04
1574
1575 static long futex_wait_restart(struct restart_block *restart);
1576
1577 /**
1578  * fixup_owner() - Post lock pi_state and corner case management
1579  * @uaddr:      user address of the futex
1580  * @fshared:    whether the futex is shared (1) or not (0)
1581  * @q:          futex_q (contains pi_state and access to the rt_mutex)
1582  * @locked:     if the attempt to take the rt_mutex succeeded (1) or not (0)
1583  *
1584  * After attempting to lock an rt_mutex, this function is called to cleanup
1585  * the pi_state owner as well as handle race conditions that may allow us to
1586  * acquire the lock. Must be called with the hb lock held.
1587  *
1588  * Returns:
1589  *  1 - success, lock taken
1590  *  0 - success, lock not taken
1591  * <0 - on error (-EFAULT)
1592  */
1593 static int fixup_owner(u32 __user *uaddr, int fshared, struct futex_q *q,
1594                        int locked)
1595 {
1596         struct task_struct *owner;
1597         int ret = 0;
1598
1599         if (locked) {
1600                 /*
1601                  * Got the lock. We might not be the anticipated owner if we
1602                  * did a lock-steal - fix up the PI-state in that case:
1603                  */
1604                 if (q->pi_state->owner != current)
1605                         ret = fixup_pi_state_owner(uaddr, q, current, fshared);
1606                 goto out;
1607         }
1608
1609         /*
1610          * Catch the rare case, where the lock was released when we were on the
1611          * way back before we locked the hash bucket.
1612          */
1613         if (q->pi_state->owner == current) {
1614                 /*
1615                  * Try to get the rt_mutex now. This might fail as some other
1616                  * task acquired the rt_mutex after we removed ourself from the
1617                  * rt_mutex waiters list.
1618                  */
1619                 if (rt_mutex_trylock(&q->pi_state->pi_mutex)) {
1620                         locked = 1;
1621                         goto out;
1622                 }
1623
1624                 /*
1625                  * pi_state is incorrect, some other task did a lock steal and
1626                  * we returned due to timeout or signal without taking the
1627                  * rt_mutex. Too late. We can access the rt_mutex_owner without
1628                  * locking, as the other task is now blocked on the hash bucket
1629                  * lock. Fix the state up.
1630                  */
1631                 owner = rt_mutex_owner(&q->pi_state->pi_mutex);
1632                 ret = fixup_pi_state_owner(uaddr, q, owner, fshared);
1633                 goto out;
1634         }
1635
1636         /*
1637          * Paranoia check. If we did not take the lock, then we should not be
1638          * the owner, nor the pending owner, of the rt_mutex.
1639          */
1640         if (rt_mutex_owner(&q->pi_state->pi_mutex) == current)
1641                 printk(KERN_ERR "fixup_owner: ret = %d pi-mutex: %p "
1642                                 "pi-state %p\n", ret,
1643                                 q->pi_state->pi_mutex.owner,
1644                                 q->pi_state->owner);
1645
1646 out:
1647         return ret ? ret : locked;
1648 }
1649
1650 /**
1651  * futex_wait_queue_me() - queue_me() and wait for wakeup, timeout, or signal
1652  * @hb:         the futex hash bucket, must be locked by the caller
1653  * @q:          the futex_q to queue up on
1654  * @timeout:    the prepared hrtimer_sleeper, or null for no timeout
1655  */
1656 static void futex_wait_queue_me(struct futex_hash_bucket *hb, struct futex_q *q,
1657                                 struct hrtimer_sleeper *timeout)
1658 {
1659         set_current_state(TASK_INTERRUPTIBLE);
1660         queue_me(q, hb);
1661
1662         /* Arm the timer */
1663         if (timeout) {
1664                 hrtimer_start_expires(&timeout->timer, HRTIMER_MODE_ABS);
1665                 if (!hrtimer_active(&timeout->timer))
1666                         timeout->task = NULL;
1667         }
1668
1669         /*
1670          * If we have been removed from the hash list, then another task
1671          * has tried to wake us, and we can skip the call to schedule().
1672          */
1673         if (likely(!plist_node_empty(&q->list))) {
1674                 /*
1675                  * If the timer has already expired, current will already be
1676                  * flagged for rescheduling. Only call schedule if there
1677                  * is no timeout, or if it has yet to expire.
1678                  */
1679                 if (!timeout || timeout->task)
1680                         schedule();
1681         }
1682         __set_current_state(TASK_RUNNING);
1683 }
1684
1685 /**
1686  * futex_wait_setup() - Prepare to wait on a futex
1687  * @uaddr:      the futex userspace address
1688  * @val:        the expected value
1689  * @fshared:    whether the futex is shared (1) or not (0)
1690  * @q:          the associated futex_q
1691  * @hb:         storage for hash_bucket pointer to be returned to caller
1692  *
1693  * Setup the futex_q and locate the hash_bucket.  Get the futex value and
1694  * compare it with the expected value.  Handle atomic faults internally.
1695  * Return with the hb lock held and a q.key reference on success, and unlocked
1696  * with no q.key reference on failure.
1697  *
1698  * Returns:
1699  *  0 - uaddr contains val and hb has been locked
1700  * <1 - -EFAULT or -EWOULDBLOCK (uaddr does not contain val) and hb is unlcoked
1701  */
1702 static int futex_wait_setup(u32 __user *uaddr, u32 val, int fshared,
1703                            struct futex_q *q, struct futex_hash_bucket **hb)
1704 {
1705         u32 uval;
1706         int ret;
1707
1708         /*
1709          * Access the page AFTER the hash-bucket is locked.
1710          * Order is important:
1711          *
1712          *   Userspace waiter: val = var; if (cond(val)) futex_wait(&var, val);
1713          *   Userspace waker:  if (cond(var)) { var = new; futex_wake(&var); }
1714          *
1715          * The basic logical guarantee of a futex is that it blocks ONLY
1716          * if cond(var) is known to be true at the time of blocking, for
1717          * any cond.  If we queued after testing *uaddr, that would open
1718          * a race condition where we could block indefinitely with
1719          * cond(var) false, which would violate the guarantee.
1720          *
1721          * A consequence is that futex_wait() can return zero and absorb
1722          * a wakeup when *uaddr != val on entry to the syscall.  This is
1723          * rare, but normal.
1724          */
1725 retry:
1726         q->key = FUTEX_KEY_INIT;
1727         ret = get_futex_key(uaddr, fshared, &q->key, VERIFY_READ);
1728         if (unlikely(ret != 0))
1729                 return ret;
1730
1731 retry_private:
1732         *hb = queue_lock(q);
1733
1734         ret = get_futex_value_locked(&uval, uaddr);
1735
1736         if (ret) {
1737                 queue_unlock(q, *hb);
1738
1739                 ret = get_user(uval, uaddr);
1740                 if (ret)
1741                         goto out;
1742
1743                 if (!fshared)
1744                         goto retry_private;
1745
1746                 put_futex_key(fshared, &q->key);
1747                 goto retry;
1748         }
1749
1750         if (uval != val) {
1751                 queue_unlock(q, *hb);
1752                 ret = -EWOULDBLOCK;
1753         }
1754
1755 out:
1756         if (ret)
1757                 put_futex_key(fshared, &q->key);
1758         return ret;
1759 }
1760
1761 static int futex_wait(u32 __user *uaddr, int fshared,
1762                       u32 val, ktime_t *abs_time, u32 bitset, int clockrt)
1763 {
1764         struct hrtimer_sleeper timeout, *to = NULL;
1765         struct restart_block *restart;
1766         struct futex_hash_bucket *hb;
1767         struct futex_q q;
1768         int ret;
1769
1770         if (!bitset)
1771                 return -EINVAL;
1772
1773         q.pi_state = NULL;
1774         q.bitset = bitset;
1775         q.rt_waiter = NULL;
1776         q.requeue_pi_key = NULL;
1777
1778         if (abs_time) {
1779                 to = &timeout;
1780
1781                 hrtimer_init_on_stack(&to->timer, clockrt ? CLOCK_REALTIME :
1782                                       CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
1783                 hrtimer_init_sleeper(to, current);
1784                 hrtimer_set_expires_range_ns(&to->timer, *abs_time,
1785                                              current->timer_slack_ns);
1786         }
1787
1788         /* Prepare to wait on uaddr. */
1789         ret = futex_wait_setup(uaddr, val, fshared, &q, &hb);
1790         if (ret)
1791                 goto out;
1792
1793         /* queue_me and wait for wakeup, timeout, or a signal. */
1794         futex_wait_queue_me(hb, &q, to);
1795
1796         /* If we were woken (and unqueued), we succeeded, whatever. */
1797         ret = 0;
1798         if (!unqueue_me(&q))
1799                 goto out_put_key;
1800         ret = -ETIMEDOUT;
1801         if (to && !to->task)
1802                 goto out_put_key;
1803
1804         /*
1805          * We expect signal_pending(current), but another thread may
1806          * have handled it for us already.
1807          */
1808         ret = -ERESTARTSYS;
1809         if (!abs_time)
1810                 goto out_put_key;
1811
1812         restart = &current_thread_info()->restart_block;
1813         restart->fn = futex_wait_restart;
1814         restart->futex.uaddr = (u32 *)uaddr;
1815         restart->futex.val = val;
1816         restart->futex.time = abs_time->tv64;
1817         restart->futex.bitset = bitset;
1818         restart->futex.flags = FLAGS_HAS_TIMEOUT;
1819
1820         if (fshared)
1821                 restart->futex.flags |= FLAGS_SHARED;
1822         if (clockrt)
1823                 restart->futex.flags |= FLAGS_CLOCKRT;
1824
1825         ret = -ERESTART_RESTARTBLOCK;
1826
1827 out_put_key:
1828         put_futex_key(fshared, &q.key);
1829 out:
1830         if (to) {
1831                 hrtimer_cancel(&to->timer);
1832                 destroy_hrtimer_on_stack(&to->timer);
1833         }
1834         return ret;
1835 }
1836
1837
1838 static long futex_wait_restart(struct restart_block *restart)
1839 {
1840         u32 __user *uaddr = (u32 __user *)restart->futex.uaddr;
1841         int fshared = 0;
1842         ktime_t t, *tp = NULL;
1843
1844         if (restart->futex.flags & FLAGS_HAS_TIMEOUT) {
1845                 t.tv64 = restart->futex.time;
1846                 tp = &t;
1847         }
1848         restart->fn = do_no_restart_syscall;
1849         if (restart->futex.flags & FLAGS_SHARED)
1850                 fshared = 1;
1851         return (long)futex_wait(uaddr, fshared, restart->futex.val, tp,
1852                                 restart->futex.bitset,
1853                                 restart->futex.flags & FLAGS_CLOCKRT);
1854 }
1855
1856
1857 /*
1858  * Userspace tried a 0 -> TID atomic transition of the futex value
1859  * and failed. The kernel side here does the whole locking operation:
1860  * if there are waiters then it will block, it does PI, etc. (Due to
1861  * races the kernel might see a 0 value of the futex too.)
1862  */
1863 static int futex_lock_pi(u32 __user *uaddr, int fshared,
1864                          int detect, ktime_t *time, int trylock)
1865 {
1866         struct hrtimer_sleeper timeout, *to = NULL;
1867         struct futex_hash_bucket *hb;
1868         struct futex_q q;
1869         int res, ret;
1870
1871         if (refill_pi_state_cache())
1872                 return -ENOMEM;
1873
1874         if (time) {
1875                 to = &timeout;
1876                 hrtimer_init_on_stack(&to->timer, CLOCK_REALTIME,
1877                                       HRTIMER_MODE_ABS);
1878                 hrtimer_init_sleeper(to, current);
1879                 hrtimer_set_expires(&to->timer, *time);
1880         }
1881
1882         q.pi_state = NULL;
1883         q.rt_waiter = NULL;
1884         q.requeue_pi_key = NULL;
1885 retry:
1886         q.key = FUTEX_KEY_INIT;
1887         ret = get_futex_key(uaddr, fshared, &q.key, VERIFY_WRITE);
1888         if (unlikely(ret != 0))
1889                 goto out;
1890
1891 retry_private:
1892         hb = queue_lock(&q);
1893
1894         ret = futex_lock_pi_atomic(uaddr, hb, &q.key, &q.pi_state, current, 0);
1895         if (unlikely(ret)) {
1896                 switch (ret) {
1897                 case 1:
1898                         /* We got the lock. */
1899                         ret = 0;
1900                         goto out_unlock_put_key;
1901                 case -EFAULT:
1902                         goto uaddr_faulted;
1903                 case -EAGAIN:
1904                         /*
1905                          * Task is exiting and we just wait for the
1906                          * exit to complete.
1907                          */
1908                         queue_unlock(&q, hb);
1909                         put_futex_key(fshared, &q.key);
1910                         cond_resched();
1911                         goto retry;
1912                 default:
1913                         goto out_unlock_put_key;
1914                 }
1915         }
1916
1917         /*
1918          * Only actually queue now that the atomic ops are done:
1919          */
1920         queue_me(&q, hb);
1921
1922         WARN_ON(!q.pi_state);
1923         /*
1924          * Block on the PI mutex:
1925          */
1926         if (!trylock)
1927                 ret = rt_mutex_timed_lock(&q.pi_state->pi_mutex, to, 1);
1928         else {
1929                 ret = rt_mutex_trylock(&q.pi_state->pi_mutex);
1930                 /* Fixup the trylock return value: */
1931                 ret = ret ? 0 : -EWOULDBLOCK;
1932         }
1933
1934         spin_lock(q.lock_ptr);
1935         /*
1936          * Fixup the pi_state owner and possibly acquire the lock if we
1937          * haven't already.
1938          */
1939         res = fixup_owner(uaddr, fshared, &q, !ret);
1940         /*
1941          * If fixup_owner() returned an error, proprogate that.  If it acquired
1942          * the lock, clear our -ETIMEDOUT or -EINTR.
1943          */
1944         if (res)
1945                 ret = (res < 0) ? res : 0;
1946
1947         /*
1948          * If fixup_owner() faulted and was unable to handle the fault, unlock
1949          * it and return the fault to userspace.
1950          */
1951         if (ret && (rt_mutex_owner(&q.pi_state->pi_mutex) == current))
1952                 rt_mutex_unlock(&q.pi_state->pi_mutex);
1953
1954         /* Unqueue and drop the lock */
1955         unqueue_me_pi(&q);
1956
1957         goto out;
1958
1959 out_unlock_put_key:
1960         queue_unlock(&q, hb);
1961
1962 out_put_key:
1963         put_futex_key(fshared, &q.key);
1964 out:
1965         if (to)
1966                 destroy_hrtimer_on_stack(&to->timer);
1967         return ret != -EINTR ? ret : -ERESTARTNOINTR;
1968
1969 uaddr_faulted:
1970         queue_unlock(&q, hb);
1971
1972         ret = fault_in_user_writeable(uaddr);
1973         if (ret)
1974                 goto out_put_key;
1975
1976         if (!fshared)
1977                 goto retry_private;
1978
1979         put_futex_key(fshared, &q.key);
1980         goto retry;
1981 }
1982
1983 /*
1984  * Userspace attempted a TID -> 0 atomic transition, and failed.
1985  * This is the in-kernel slowpath: we look up the PI state (if any),
1986  * and do the rt-mutex unlock.
1987  */
1988 static int futex_unlock_pi(u32 __user *uaddr, int fshared)
1989 {
1990         struct futex_hash_bucket *hb;
1991         struct futex_q *this, *next;
1992         u32 uval;
1993         struct plist_head *head;
1994         union futex_key key = FUTEX_KEY_INIT;
1995         int ret;
1996
1997 retry:
1998         if (get_user(uval, uaddr))
1999                 return -EFAULT;
2000         /*
2001          * We release only a lock we actually own:
2002          */
2003         if ((uval & FUTEX_TID_MASK) != task_pid_vnr(current))
2004                 return -EPERM;
2005
2006         ret = get_futex_key(uaddr, fshared, &key, VERIFY_WRITE);
2007         if (unlikely(ret != 0))
2008                 goto out;
2009
2010         hb = hash_futex(&key);
2011         spin_lock(&hb->lock);
2012
2013         /*
2014          * To avoid races, try to do the TID -> 0 atomic transition
2015          * again. If it succeeds then we can return without waking
2016          * anyone else up:
2017          */
2018         if (!(uval & FUTEX_OWNER_DIED))
2019                 uval = cmpxchg_futex_value_locked(uaddr, task_pid_vnr(current), 0);
2020
2021
2022         if (unlikely(uval == -EFAULT))
2023                 goto pi_faulted;
2024         /*
2025          * Rare case: we managed to release the lock atomically,
2026          * no need to wake anyone else up:
2027          */
2028         if (unlikely(uval == task_pid_vnr(current)))
2029                 goto out_unlock;
2030
2031         /*
2032          * Ok, other tasks may need to be woken up - check waiters
2033          * and do the wakeup if necessary:
2034          */
2035         head = &hb->chain;
2036
2037         plist_for_each_entry_safe(this, next, head, list) {
2038                 if (!match_futex (&this->key, &key))
2039                         continue;
2040                 ret = wake_futex_pi(uaddr, uval, this);
2041                 /*
2042                  * The atomic access to the futex value
2043                  * generated a pagefault, so retry the
2044                  * user-access and the wakeup:
2045                  */
2046                 if (ret == -EFAULT)
2047                         goto pi_faulted;
2048                 goto out_unlock;
2049         }
2050         /*
2051          * No waiters - kernel unlocks the futex:
2052          */
2053         if (!(uval & FUTEX_OWNER_DIED)) {
2054                 ret = unlock_futex_pi(uaddr, uval);
2055                 if (ret == -EFAULT)
2056                         goto pi_faulted;
2057         }
2058
2059 out_unlock:
2060         spin_unlock(&hb->lock);
2061         put_futex_key(fshared, &key);
2062
2063 out:
2064         return ret;
2065
2066 pi_faulted:
2067         spin_unlock(&hb->lock);
2068         put_futex_key(fshared, &key);
2069
2070         ret = fault_in_user_writeable(uaddr);
2071         if (!ret)
2072                 goto retry;
2073
2074         return ret;
2075 }
2076
2077 /**
2078  * handle_early_requeue_pi_wakeup() - Detect early wakeup on the initial futex
2079  * @hb:         the hash_bucket futex_q was original enqueued on
2080  * @q:          the futex_q woken while waiting to be requeued
2081  * @key2:       the futex_key of the requeue target futex
2082  * @timeout:    the timeout associated with the wait (NULL if none)
2083  *
2084  * Detect if the task was woken on the initial futex as opposed to the requeue
2085  * target futex.  If so, determine if it was a timeout or a signal that caused
2086  * the wakeup and return the appropriate error code to the caller.  Must be
2087  * called with the hb lock held.
2088  *
2089  * Returns
2090  *  0 - no early wakeup detected
2091  * <0 - -ETIMEDOUT or -ERESTARTNOINTR
2092  */
2093 static inline
2094 int handle_early_requeue_pi_wakeup(struct futex_hash_bucket *hb,
2095                                    struct futex_q *q, union futex_key *key2,
2096                                    struct hrtimer_sleeper *timeout)
2097 {
2098         int ret = 0;
2099
2100         /*
2101          * With the hb lock held, we avoid races while we process the wakeup.
2102          * We only need to hold hb (and not hb2) to ensure atomicity as the
2103          * wakeup code can't change q.key from uaddr to uaddr2 if we hold hb.
2104          * It can't be requeued from uaddr2 to something else since we don't
2105          * support a PI aware source futex for requeue.
2106          */
2107         if (!match_futex(&q->key, key2)) {
2108                 WARN_ON(q->lock_ptr && (&hb->lock != q->lock_ptr));
2109                 /*
2110                  * We were woken prior to requeue by a timeout or a signal.
2111                  * Unqueue the futex_q and determine which it was.
2112                  */
2113                 plist_del(&q->list, &q->list.plist);
2114
2115                 if (timeout && !timeout->task)
2116                         ret = -ETIMEDOUT;
2117                 else
2118                         ret = -ERESTARTNOINTR;
2119         }
2120         return ret;
2121 }
2122
2123 /**
2124  * futex_wait_requeue_pi() - Wait on uaddr and take uaddr2
2125  * @uaddr:      the futex we initially wait on (non-pi)
2126  * @fshared:    whether the futexes are shared (1) or not (0).  They must be
2127  *              the same type, no requeueing from private to shared, etc.
2128  * @val:        the expected value of uaddr
2129  * @abs_time:   absolute timeout
2130  * @bitset:     32 bit wakeup bitset set by userspace, defaults to all
2131  * @clockrt:    whether to use CLOCK_REALTIME (1) or CLOCK_MONOTONIC (0)
2132  * @uaddr2:     the pi futex we will take prior to returning to user-space
2133  *
2134  * The caller will wait on uaddr and will be requeued by futex_requeue() to
2135  * uaddr2 which must be PI aware.  Normal wakeup will wake on uaddr2 and
2136  * complete the acquisition of the rt_mutex prior to returning to userspace.
2137  * This ensures the rt_mutex maintains an owner when it has waiters; without
2138  * one, the pi logic wouldn't know which task to boost/deboost, if there was a
2139  * need to.
2140  *
2141  * We call schedule in futex_wait_queue_me() when we enqueue and return there
2142  * via the following:
2143  * 1) wakeup on uaddr2 after an atomic lock acquisition by futex_requeue()
2144  * 2) wakeup on uaddr2 after a requeue
2145  * 3) signal
2146  * 4) timeout
2147  *
2148  * If 3, cleanup and return -ERESTARTNOINTR.
2149  *
2150  * If 2, we may then block on trying to take the rt_mutex and return via:
2151  * 5) successful lock
2152  * 6) signal
2153  * 7) timeout
2154  * 8) other lock acquisition failure
2155  *
2156  * If 6, return -EWOULDBLOCK (restarting the syscall would do the same).
2157  *
2158  * If 4 or 7, we cleanup and return with -ETIMEDOUT.
2159  *
2160  * Returns:
2161  *  0 - On success
2162  * <0 - On error
2163  */
2164 static int futex_wait_requeue_pi(u32 __user *uaddr, int fshared,
2165                                  u32 val, ktime_t *abs_time, u32 bitset,
2166                                  int clockrt, u32 __user *uaddr2)
2167 {
2168         struct hrtimer_sleeper timeout, *to = NULL;
2169         struct rt_mutex_waiter rt_waiter;
2170         struct rt_mutex *pi_mutex = NULL;
2171         struct futex_hash_bucket *hb;
2172         union futex_key key2;
2173         struct futex_q q;
2174         int res, ret;
2175
2176         if (!bitset)
2177                 return -EINVAL;
2178
2179         if (abs_time) {
2180                 to = &timeout;
2181                 hrtimer_init_on_stack(&to->timer, clockrt ? CLOCK_REALTIME :
2182                                       CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
2183                 hrtimer_init_sleeper(to, current);
2184                 hrtimer_set_expires_range_ns(&to->timer, *abs_time,
2185                                              current->timer_slack_ns);
2186         }
2187
2188         /*
2189          * The waiter is allocated on our stack, manipulated by the requeue
2190          * code while we sleep on uaddr.
2191          */
2192         debug_rt_mutex_init_waiter(&rt_waiter);
2193         rt_waiter.task = NULL;
2194
2195         key2 = FUTEX_KEY_INIT;
2196         ret = get_futex_key(uaddr2, fshared, &key2, VERIFY_WRITE);
2197         if (unlikely(ret != 0))
2198                 goto out;
2199
2200         q.pi_state = NULL;
2201         q.bitset = bitset;
2202         q.rt_waiter = &rt_waiter;
2203         q.requeue_pi_key = &key2;
2204
2205         /* Prepare to wait on uaddr. */
2206         ret = futex_wait_setup(uaddr, val, fshared, &q, &hb);
2207         if (ret)
2208                 goto out_key2;
2209
2210         /* Queue the futex_q, drop the hb lock, wait for wakeup. */
2211         futex_wait_queue_me(hb, &q, to);
2212
2213         spin_lock(&hb->lock);
2214         ret = handle_early_requeue_pi_wakeup(hb, &q, &key2, to);
2215         spin_unlock(&hb->lock);
2216         if (ret)
2217                 goto out_put_keys;
2218
2219         /*
2220          * In order for us to be here, we know our q.key == key2, and since
2221          * we took the hb->lock above, we also know that futex_requeue() has
2222          * completed and we no longer have to concern ourselves with a wakeup
2223          * race with the atomic proxy lock acquition by the requeue code.
2224          */
2225
2226         /* Check if the requeue code acquired the second futex for us. */
2227         if (!q.rt_waiter) {
2228                 /*
2229                  * Got the lock. We might not be the anticipated owner if we
2230                  * did a lock-steal - fix up the PI-state in that case.
2231                  */
2232                 if (q.pi_state && (q.pi_state->owner != current)) {
2233                         spin_lock(q.lock_ptr);
2234                         ret = fixup_pi_state_owner(uaddr2, &q, current,
2235                                                    fshared);
2236                         spin_unlock(q.lock_ptr);
2237                 }
2238         } else {
2239                 /*
2240                  * We have been woken up by futex_unlock_pi(), a timeout, or a
2241                  * signal.  futex_unlock_pi() will not destroy the lock_ptr nor
2242                  * the pi_state.
2243                  */
2244                 WARN_ON(!&q.pi_state);
2245                 pi_mutex = &q.pi_state->pi_mutex;
2246                 ret = rt_mutex_finish_proxy_lock(pi_mutex, to, &rt_waiter, 1);
2247                 debug_rt_mutex_free_waiter(&rt_waiter);
2248
2249                 spin_lock(q.lock_ptr);
2250                 /*
2251                  * Fixup the pi_state owner and possibly acquire the lock if we
2252                  * haven't already.
2253                  */
2254                 res = fixup_owner(uaddr2, fshared, &q, !ret);
2255                 /*
2256                  * If fixup_owner() returned an error, proprogate that.  If it
2257                  * acquired the lock, clear -ETIMEDOUT or -EINTR.
2258                  */
2259                 if (res)
2260                         ret = (res < 0) ? res : 0;
2261
2262                 /* Unqueue and drop the lock. */
2263                 unqueue_me_pi(&q);
2264         }
2265
2266         /*
2267          * If fixup_pi_state_owner() faulted and was unable to handle the
2268          * fault, unlock the rt_mutex and return the fault to userspace.
2269          */
2270         if (ret == -EFAULT) {
2271                 if (rt_mutex_owner(pi_mutex) == current)
2272                         rt_mutex_unlock(pi_mutex);
2273         } else if (ret == -EINTR) {
2274                 /*
2275                  * We've already been requeued, but cannot restart by calling
2276                  * futex_lock_pi() directly. We could restart this syscall, but
2277                  * it would detect that the user space "val" changed and return
2278                  * -EWOULDBLOCK.  Save the overhead of the restart and return
2279                  * -EWOULDBLOCK directly.
2280                  */
2281                 ret = -EWOULDBLOCK;
2282         }
2283
2284 out_put_keys:
2285         put_futex_key(fshared, &q.key);
2286 out_key2:
2287         put_futex_key(fshared, &key2);
2288
2289 out:
2290         if (to) {
2291                 hrtimer_cancel(&to->timer);
2292                 destroy_hrtimer_on_stack(&to->timer);
2293         }
2294         return ret;
2295 }
2296
2297 /*
2298  * Support for robust futexes: the kernel cleans up held futexes at
2299  * thread exit time.
2300  *
2301  * Implementation: user-space maintains a per-thread list of locks it
2302  * is holding. Upon do_exit(), the kernel carefully walks this list,
2303  * and marks all locks that are owned by this thread with the
2304  * FUTEX_OWNER_DIED bit, and wakes up a waiter (if any). The list is
2305  * always manipulated with the lock held, so the list is private and
2306  * per-thread. Userspace also maintains a per-thread 'list_op_pending'
2307  * field, to allow the kernel to clean up if the thread dies after
2308  * acquiring the lock, but just before it could have added itself to
2309  * the list. There can only be one such pending lock.
2310  */
2311
2312 /**
2313  * sys_set_robust_list() - Set the robust-futex list head of a task
2314  * @head:       pointer to the list-head
2315  * @len:        length of the list-head, as userspace expects
2316  */
2317 SYSCALL_DEFINE2(set_robust_list, struct robust_list_head __user *, head,
2318                 size_t, len)
2319 {
2320         if (!futex_cmpxchg_enabled)
2321                 return -ENOSYS;
2322         /*
2323          * The kernel knows only one size for now:
2324          */
2325         if (unlikely(len != sizeof(*head)))
2326                 return -EINVAL;
2327
2328         current->robust_list = head;
2329
2330         return 0;
2331 }
2332
2333 /**
2334  * sys_get_robust_list() - Get the robust-futex list head of a task
2335  * @pid:        pid of the process [zero for current task]
2336  * @head_ptr:   pointer to a list-head pointer, the kernel fills it in
2337  * @len_ptr:    pointer to a length field, the kernel fills in the header size
2338  */
2339 SYSCALL_DEFINE3(get_robust_list, int, pid,
2340                 struct robust_list_head __user * __user *, head_ptr,
2341                 size_t __user *, len_ptr)
2342 {
2343         struct robust_list_head __user *head;
2344         unsigned long ret;
2345         const struct cred *cred = current_cred(), *pcred;
2346
2347         if (!futex_cmpxchg_enabled)
2348                 return -ENOSYS;
2349
2350         if (!pid)
2351                 head = current->robust_list;
2352         else {
2353                 struct task_struct *p;
2354
2355                 ret = -ESRCH;
2356                 rcu_read_lock();
2357                 p = find_task_by_vpid(pid);
2358                 if (!p)
2359                         goto err_unlock;
2360                 ret = -EPERM;
2361                 pcred = __task_cred(p);
2362                 if (cred->euid != pcred->euid &&
2363                     cred->euid != pcred->uid &&
2364                     !capable(CAP_SYS_PTRACE))
2365                         goto err_unlock;
2366                 head = p->robust_list;
2367                 rcu_read_unlock();
2368         }
2369
2370         if (put_user(sizeof(*head), len_ptr))
2371                 return -EFAULT;
2372         return put_user(head, head_ptr);
2373
2374 err_unlock:
2375         rcu_read_unlock();
2376
2377         return ret;
2378 }
2379
2380 /*
2381  * Process a futex-list entry, check whether it's owned by the
2382  * dying task, and do notification if so:
2383  */
2384 int handle_futex_death(u32 __user *uaddr, struct task_struct *curr, int pi)
2385 {
2386         u32 uval, nval, mval;
2387
2388 retry:
2389         if (get_user(uval, uaddr))
2390                 return -1;
2391
2392         if ((uval & FUTEX_TID_MASK) == task_pid_vnr(curr)) {
2393                 /*
2394                  * Ok, this dying thread is truly holding a futex
2395                  * of interest. Set the OWNER_DIED bit atomically
2396                  * via cmpxchg, and if the value had FUTEX_WAITERS
2397                  * set, wake up a waiter (if any). (We have to do a
2398                  * futex_wake() even if OWNER_DIED is already set -
2399                  * to handle the rare but possible case of recursive
2400                  * thread-death.) The rest of the cleanup is done in
2401                  * userspace.
2402                  */
2403                 mval = (uval & FUTEX_WAITERS) | FUTEX_OWNER_DIED;
2404                 nval = futex_atomic_cmpxchg_inatomic(uaddr, uval, mval);
2405
2406                 if (nval == -EFAULT)
2407                         return -1;
2408
2409                 if (nval != uval)
2410                         goto retry;
2411
2412                 /*
2413                  * Wake robust non-PI futexes here. The wakeup of
2414                  * PI futexes happens in exit_pi_state():
2415                  */
2416                 if (!pi && (uval & FUTEX_WAITERS))
2417                         futex_wake(uaddr, 1, 1, FUTEX_BITSET_MATCH_ANY);
2418         }
2419         return 0;
2420 }
2421
2422 /*
2423  * Fetch a robust-list pointer. Bit 0 signals PI futexes:
2424  */
2425 static inline int fetch_robust_entry(struct robust_list __user **entry,
2426                                      struct robust_list __user * __user *head,
2427                                      int *pi)
2428 {
2429         unsigned long uentry;
2430
2431         if (get_user(uentry, (unsigned long __user *)head))
2432                 return -EFAULT;
2433
2434         *entry = (void __user *)(uentry & ~1UL);
2435         *pi = uentry & 1;
2436
2437         return 0;
2438 }
2439
2440 /*
2441  * Walk curr->robust_list (very carefully, it's a userspace list!)
2442  * and mark any locks found there dead, and notify any waiters.
2443  *
2444  * We silently return on any sign of list-walking problem.
2445  */
2446 void exit_robust_list(struct task_struct *curr)
2447 {
2448         struct robust_list_head __user *head = curr->robust_list;
2449         struct robust_list __user *entry, *next_entry, *pending;
2450         unsigned int limit = ROBUST_LIST_LIMIT, pi, next_pi, pip;
2451         unsigned long futex_offset;
2452         int rc;
2453
2454         if (!futex_cmpxchg_enabled)
2455                 return;
2456
2457         /*
2458          * Fetch the list head (which was registered earlier, via
2459          * sys_set_robust_list()):
2460          */
2461         if (fetch_robust_entry(&entry, &head->list.next, &pi))
2462                 return;
2463         /*
2464          * Fetch the relative futex offset:
2465          */
2466         if (get_user(futex_offset, &head->futex_offset))
2467                 return;
2468         /*
2469          * Fetch any possibly pending lock-add first, and handle it
2470          * if it exists:
2471          */
2472         if (fetch_robust_entry(&pending, &head->list_op_pending, &pip))
2473                 return;
2474
2475         next_entry = NULL;      /* avoid warning with gcc */
2476         while (entry != &head->list) {
2477                 /*
2478                  * Fetch the next entry in the list before calling
2479                  * handle_futex_death:
2480                  */
2481                 rc = fetch_robust_entry(&next_entry, &entry->next, &next_pi);
2482                 /*
2483                  * A pending lock might already be on the list, so
2484                  * don't process it twice:
2485                  */
2486                 if (entry != pending)
2487                         if (handle_futex_death((void __user *)entry + futex_offset,
2488                                                 curr, pi))
2489                                 return;
2490                 if (rc)
2491                         return;
2492                 entry = next_entry;
2493                 pi = next_pi;
2494                 /*
2495                  * Avoid excessively long or circular lists:
2496                  */
2497                 if (!--limit)
2498                         break;
2499
2500                 cond_resched();
2501         }
2502
2503         if (pending)
2504                 handle_futex_death((void __user *)pending + futex_offset,
2505                                    curr, pip);
2506 }
2507
2508 long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
2509                 u32 __user *uaddr2, u32 val2, u32 val3)
2510 {
2511         int clockrt, ret = -ENOSYS;
2512         int cmd = op & FUTEX_CMD_MASK;
2513         int fshared = 0;
2514
2515         if (!(op & FUTEX_PRIVATE_FLAG))
2516                 fshared = 1;
2517
2518         clockrt = op & FUTEX_CLOCK_REALTIME;
2519         if (clockrt && cmd != FUTEX_WAIT_BITSET && cmd != FUTEX_WAIT_REQUEUE_PI)
2520                 return -ENOSYS;
2521
2522         switch (cmd) {
2523         case FUTEX_WAIT:
2524                 val3 = FUTEX_BITSET_MATCH_ANY;
2525         case FUTEX_WAIT_BITSET:
2526                 ret = futex_wait(uaddr, fshared, val, timeout, val3, clockrt);
2527                 break;
2528         case FUTEX_WAKE:
2529                 val3 = FUTEX_BITSET_MATCH_ANY;
2530         case FUTEX_WAKE_BITSET:
2531                 ret = futex_wake(uaddr, fshared, val, val3);
2532                 break;
2533         case FUTEX_REQUEUE:
2534                 ret = futex_requeue(uaddr, fshared, uaddr2, val, val2, NULL, 0);
2535                 break;
2536         case FUTEX_CMP_REQUEUE:
2537                 ret = futex_requeue(uaddr, fshared, uaddr2, val, val2, &val3,
2538                                     0);
2539                 break;
2540         case FUTEX_WAKE_OP:
2541                 ret = futex_wake_op(uaddr, fshared, uaddr2, val, val2, val3);
2542                 break;
2543         case FUTEX_LOCK_PI:
2544                 if (futex_cmpxchg_enabled)
2545                         ret = futex_lock_pi(uaddr, fshared, val, timeout, 0);
2546                 break;
2547         case FUTEX_UNLOCK_PI:
2548                 if (futex_cmpxchg_enabled)
2549                         ret = futex_unlock_pi(uaddr, fshared);
2550                 break;
2551         case FUTEX_TRYLOCK_PI:
2552                 if (futex_cmpxchg_enabled)
2553                         ret = futex_lock_pi(uaddr, fshared, 0, timeout, 1);
2554                 break;
2555         case FUTEX_WAIT_REQUEUE_PI:
2556                 val3 = FUTEX_BITSET_MATCH_ANY;
2557                 ret = futex_wait_requeue_pi(uaddr, fshared, val, timeout, val3,
2558                                             clockrt, uaddr2);
2559                 break;
2560         case FUTEX_CMP_REQUEUE_PI:
2561                 ret = futex_requeue(uaddr, fshared, uaddr2, val, val2, &val3,
2562                                     1);
2563                 break;
2564         default:
2565                 ret = -ENOSYS;
2566         }
2567         return ret;
2568 }
2569
2570
2571 SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val,
2572                 struct timespec __user *, utime, u32 __user *, uaddr2,
2573                 u32, val3)
2574 {
2575         struct timespec ts;
2576         ktime_t t, *tp = NULL;
2577         u32 val2 = 0;
2578         int cmd = op & FUTEX_CMD_MASK;
2579
2580         if (utime && (cmd == FUTEX_WAIT || cmd == FUTEX_LOCK_PI ||
2581                       cmd == FUTEX_WAIT_BITSET ||
2582                       cmd == FUTEX_WAIT_REQUEUE_PI)) {
2583                 if (copy_from_user(&ts, utime, sizeof(ts)) != 0)
2584                         return -EFAULT;
2585                 if (!timespec_valid(&ts))
2586                         return -EINVAL;
2587
2588                 t = timespec_to_ktime(ts);
2589                 if (cmd == FUTEX_WAIT)
2590                         t = ktime_add_safe(ktime_get(), t);
2591                 tp = &t;
2592         }
2593         /*
2594          * requeue parameter in 'utime' if cmd == FUTEX_*_REQUEUE_*.
2595          * number of waiters to wake in 'utime' if cmd == FUTEX_WAKE_OP.
2596          */
2597         if (cmd == FUTEX_REQUEUE || cmd == FUTEX_CMP_REQUEUE ||
2598             cmd == FUTEX_CMP_REQUEUE_PI || cmd == FUTEX_WAKE_OP)
2599                 val2 = (u32) (unsigned long) utime;
2600
2601         return do_futex(uaddr, op, val, tp, uaddr2, val2, val3);
2602 }
2603
2604 static int __init futex_init(void)
2605 {
2606         u32 curval;
2607         int i;
2608
2609         /*
2610          * This will fail and we want it. Some arch implementations do
2611          * runtime detection of the futex_atomic_cmpxchg_inatomic()
2612          * functionality. We want to know that before we call in any
2613          * of the complex code paths. Also we want to prevent
2614          * registration of robust lists in that case. NULL is
2615          * guaranteed to fault and we get -EFAULT on functional
2616          * implementation, the non functional ones will return
2617          * -ENOSYS.
2618          */
2619         curval = cmpxchg_futex_value_locked(NULL, 0, 0);
2620         if (curval == -EFAULT)
2621                 futex_cmpxchg_enabled = 1;
2622
2623         for (i = 0; i < ARRAY_SIZE(futex_queues); i++) {
2624                 plist_head_init(&futex_queues[i].chain, &futex_queues[i].lock);
2625                 spin_lock_init(&futex_queues[i].lock);
2626         }
2627
2628         return 0;
2629 }
2630 __initcall(futex_init);