6b597cf33b02f4a03b269547b88ae4f8428aab54
[linux-2.6.git] / kernel / futex.c
1 /*
2  *  Fast Userspace Mutexes (which I call "Futexes!").
3  *  (C) Rusty Russell, IBM 2002
4  *
5  *  Generalized futexes, futex requeueing, misc fixes by Ingo Molnar
6  *  (C) Copyright 2003 Red Hat Inc, All Rights Reserved
7  *
8  *  Removed page pinning, fix privately mapped COW pages and other cleanups
9  *  (C) Copyright 2003, 2004 Jamie Lokier
10  *
11  *  Robust futex support started by Ingo Molnar
12  *  (C) Copyright 2006 Red Hat Inc, All Rights Reserved
13  *  Thanks to Thomas Gleixner for suggestions, analysis and fixes.
14  *
15  *  PI-futex support started by Ingo Molnar and Thomas Gleixner
16  *  Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
17  *  Copyright (C) 2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com>
18  *
19  *  PRIVATE futexes by Eric Dumazet
20  *  Copyright (C) 2007 Eric Dumazet <dada1@cosmosbay.com>
21  *
22  *  Thanks to Ben LaHaise for yelling "hashed waitqueues" loudly
23  *  enough at me, Linus for the original (flawed) idea, Matthew
24  *  Kirkwood for proof-of-concept implementation.
25  *
26  *  "The futexes are also cursed."
27  *  "But they come in a choice of three flavours!"
28  *
29  *  This program is free software; you can redistribute it and/or modify
30  *  it under the terms of the GNU General Public License as published by
31  *  the Free Software Foundation; either version 2 of the License, or
32  *  (at your option) any later version.
33  *
34  *  This program is distributed in the hope that it will be useful,
35  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
36  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
37  *  GNU General Public License for more details.
38  *
39  *  You should have received a copy of the GNU General Public License
40  *  along with this program; if not, write to the Free Software
41  *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
42  */
43 #include <linux/slab.h>
44 #include <linux/poll.h>
45 #include <linux/fs.h>
46 #include <linux/file.h>
47 #include <linux/jhash.h>
48 #include <linux/init.h>
49 #include <linux/futex.h>
50 #include <linux/mount.h>
51 #include <linux/pagemap.h>
52 #include <linux/syscalls.h>
53 #include <linux/signal.h>
54 #include <linux/module.h>
55 #include <linux/magic.h>
56 #include <linux/pid.h>
57 #include <linux/nsproxy.h>
58
59 #include <asm/futex.h>
60
61 #include "rtmutex_common.h"
62
63 int __read_mostly futex_cmpxchg_enabled;
64
65 #define FUTEX_HASHBITS (CONFIG_BASE_SMALL ? 4 : 8)
66
67 /*
68  * Priority Inheritance state:
69  */
70 struct futex_pi_state {
71         /*
72          * list of 'owned' pi_state instances - these have to be
73          * cleaned up in do_exit() if the task exits prematurely:
74          */
75         struct list_head list;
76
77         /*
78          * The PI object:
79          */
80         struct rt_mutex pi_mutex;
81
82         struct task_struct *owner;
83         atomic_t refcount;
84
85         union futex_key key;
86 };
87
88 /*
89  * We use this hashed waitqueue instead of a normal wait_queue_t, so
90  * we can wake only the relevant ones (hashed queues may be shared).
91  *
92  * A futex_q has a woken state, just like tasks have TASK_RUNNING.
93  * It is considered woken when plist_node_empty(&q->list) || q->lock_ptr == 0.
94  * The order of wakup is always to make the first condition true, then
95  * wake up q->waiter, then make the second condition true.
96  */
97 struct futex_q {
98         struct plist_node list;
99         /* There can only be a single waiter */
100         wait_queue_head_t waiter;
101
102         /* Which hash list lock to use: */
103         spinlock_t *lock_ptr;
104
105         /* Key which the futex is hashed on: */
106         union futex_key key;
107
108         /* Optional priority inheritance state: */
109         struct futex_pi_state *pi_state;
110         struct task_struct *task;
111
112         /* Bitset for the optional bitmasked wakeup */
113         u32 bitset;
114 };
115
116 /*
117  * Hash buckets are shared by all the futex_keys that hash to the same
118  * location.  Each key may have multiple futex_q structures, one for each task
119  * waiting on a futex.
120  */
121 struct futex_hash_bucket {
122         spinlock_t lock;
123         struct plist_head chain;
124 };
125
126 static struct futex_hash_bucket futex_queues[1<<FUTEX_HASHBITS];
127
128 /*
129  * We hash on the keys returned from get_futex_key (see below).
130  */
131 static struct futex_hash_bucket *hash_futex(union futex_key *key)
132 {
133         u32 hash = jhash2((u32*)&key->both.word,
134                           (sizeof(key->both.word)+sizeof(key->both.ptr))/4,
135                           key->both.offset);
136         return &futex_queues[hash & ((1 << FUTEX_HASHBITS)-1)];
137 }
138
139 /*
140  * Return 1 if two futex_keys are equal, 0 otherwise.
141  */
142 static inline int match_futex(union futex_key *key1, union futex_key *key2)
143 {
144         return (key1->both.word == key2->both.word
145                 && key1->both.ptr == key2->both.ptr
146                 && key1->both.offset == key2->both.offset);
147 }
148
149 /*
150  * Take a reference to the resource addressed by a key.
151  * Can be called while holding spinlocks.
152  *
153  */
154 static void get_futex_key_refs(union futex_key *key)
155 {
156         if (!key->both.ptr)
157                 return;
158
159         switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
160         case FUT_OFF_INODE:
161                 atomic_inc(&key->shared.inode->i_count);
162                 break;
163         case FUT_OFF_MMSHARED:
164                 atomic_inc(&key->private.mm->mm_count);
165                 break;
166         }
167 }
168
169 /*
170  * Drop a reference to the resource addressed by a key.
171  * The hash bucket spinlock must not be held.
172  */
173 static void drop_futex_key_refs(union futex_key *key)
174 {
175         if (!key->both.ptr) {
176                 /* If we're here then we tried to put a key we failed to get */
177                 WARN_ON_ONCE(1);
178                 return;
179         }
180
181         switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
182         case FUT_OFF_INODE:
183                 iput(key->shared.inode);
184                 break;
185         case FUT_OFF_MMSHARED:
186                 mmdrop(key->private.mm);
187                 break;
188         }
189 }
190
191 /**
192  * get_futex_key - Get parameters which are the keys for a futex.
193  * @uaddr: virtual address of the futex
194  * @fshared: 0 for a PROCESS_PRIVATE futex, 1 for PROCESS_SHARED
195  * @key: address where result is stored.
196  *
197  * Returns a negative error code or 0
198  * The key words are stored in *key on success.
199  *
200  * For shared mappings, it's (page->index, vma->vm_file->f_path.dentry->d_inode,
201  * offset_within_page).  For private mappings, it's (uaddr, current->mm).
202  * We can usually work out the index without swapping in the page.
203  *
204  * lock_page() might sleep, the caller should not hold a spinlock.
205  */
206 static int get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key)
207 {
208         unsigned long address = (unsigned long)uaddr;
209         struct mm_struct *mm = current->mm;
210         struct page *page;
211         int err;
212
213         /*
214          * The futex address must be "naturally" aligned.
215          */
216         key->both.offset = address % PAGE_SIZE;
217         if (unlikely((address % sizeof(u32)) != 0))
218                 return -EINVAL;
219         address -= key->both.offset;
220
221         /*
222          * PROCESS_PRIVATE futexes are fast.
223          * As the mm cannot disappear under us and the 'key' only needs
224          * virtual address, we dont even have to find the underlying vma.
225          * Note : We do have to check 'uaddr' is a valid user address,
226          *        but access_ok() should be faster than find_vma()
227          */
228         if (!fshared) {
229                 if (unlikely(!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))))
230                         return -EFAULT;
231                 key->private.mm = mm;
232                 key->private.address = address;
233                 get_futex_key_refs(key);
234                 return 0;
235         }
236
237 again:
238         err = get_user_pages_fast(address, 1, 0, &page);
239         if (err < 0)
240                 return err;
241
242         lock_page(page);
243         if (!page->mapping) {
244                 unlock_page(page);
245                 put_page(page);
246                 goto again;
247         }
248
249         /*
250          * Private mappings are handled in a simple way.
251          *
252          * NOTE: When userspace waits on a MAP_SHARED mapping, even if
253          * it's a read-only handle, it's expected that futexes attach to
254          * the object not the particular process.
255          */
256         if (PageAnon(page)) {
257                 key->both.offset |= FUT_OFF_MMSHARED; /* ref taken on mm */
258                 key->private.mm = mm;
259                 key->private.address = address;
260         } else {
261                 key->both.offset |= FUT_OFF_INODE; /* inode-based key */
262                 key->shared.inode = page->mapping->host;
263                 key->shared.pgoff = page->index;
264         }
265
266         get_futex_key_refs(key);
267
268         unlock_page(page);
269         put_page(page);
270         return 0;
271 }
272
273 static inline
274 void put_futex_key(int fshared, union futex_key *key)
275 {
276         drop_futex_key_refs(key);
277 }
278
279 /**
280  * futex_top_waiter() - Return the highest priority waiter on a futex
281  * @hb:     the hash bucket the futex_q's reside in
282  * @key:    the futex key (to distinguish it from other futex futex_q's)
283  *
284  * Must be called with the hb lock held.
285  */
286 static struct futex_q *futex_top_waiter(struct futex_hash_bucket *hb,
287                                         union futex_key *key)
288 {
289         struct futex_q *this;
290
291         plist_for_each_entry(this, &hb->chain, list) {
292                 if (match_futex(&this->key, key))
293                         return this;
294         }
295         return NULL;
296 }
297
298 static u32 cmpxchg_futex_value_locked(u32 __user *uaddr, u32 uval, u32 newval)
299 {
300         u32 curval;
301
302         pagefault_disable();
303         curval = futex_atomic_cmpxchg_inatomic(uaddr, uval, newval);
304         pagefault_enable();
305
306         return curval;
307 }
308
309 static int get_futex_value_locked(u32 *dest, u32 __user *from)
310 {
311         int ret;
312
313         pagefault_disable();
314         ret = __copy_from_user_inatomic(dest, from, sizeof(u32));
315         pagefault_enable();
316
317         return ret ? -EFAULT : 0;
318 }
319
320
321 /*
322  * PI code:
323  */
324 static int refill_pi_state_cache(void)
325 {
326         struct futex_pi_state *pi_state;
327
328         if (likely(current->pi_state_cache))
329                 return 0;
330
331         pi_state = kzalloc(sizeof(*pi_state), GFP_KERNEL);
332
333         if (!pi_state)
334                 return -ENOMEM;
335
336         INIT_LIST_HEAD(&pi_state->list);
337         /* pi_mutex gets initialized later */
338         pi_state->owner = NULL;
339         atomic_set(&pi_state->refcount, 1);
340         pi_state->key = FUTEX_KEY_INIT;
341
342         current->pi_state_cache = pi_state;
343
344         return 0;
345 }
346
347 static struct futex_pi_state * alloc_pi_state(void)
348 {
349         struct futex_pi_state *pi_state = current->pi_state_cache;
350
351         WARN_ON(!pi_state);
352         current->pi_state_cache = NULL;
353
354         return pi_state;
355 }
356
357 static void free_pi_state(struct futex_pi_state *pi_state)
358 {
359         if (!atomic_dec_and_test(&pi_state->refcount))
360                 return;
361
362         /*
363          * If pi_state->owner is NULL, the owner is most probably dying
364          * and has cleaned up the pi_state already
365          */
366         if (pi_state->owner) {
367                 spin_lock_irq(&pi_state->owner->pi_lock);
368                 list_del_init(&pi_state->list);
369                 spin_unlock_irq(&pi_state->owner->pi_lock);
370
371                 rt_mutex_proxy_unlock(&pi_state->pi_mutex, pi_state->owner);
372         }
373
374         if (current->pi_state_cache)
375                 kfree(pi_state);
376         else {
377                 /*
378                  * pi_state->list is already empty.
379                  * clear pi_state->owner.
380                  * refcount is at 0 - put it back to 1.
381                  */
382                 pi_state->owner = NULL;
383                 atomic_set(&pi_state->refcount, 1);
384                 current->pi_state_cache = pi_state;
385         }
386 }
387
388 /*
389  * Look up the task based on what TID userspace gave us.
390  * We dont trust it.
391  */
392 static struct task_struct * futex_find_get_task(pid_t pid)
393 {
394         struct task_struct *p;
395         const struct cred *cred = current_cred(), *pcred;
396
397         rcu_read_lock();
398         p = find_task_by_vpid(pid);
399         if (!p) {
400                 p = ERR_PTR(-ESRCH);
401         } else {
402                 pcred = __task_cred(p);
403                 if (cred->euid != pcred->euid &&
404                     cred->euid != pcred->uid)
405                         p = ERR_PTR(-ESRCH);
406                 else
407                         get_task_struct(p);
408         }
409
410         rcu_read_unlock();
411
412         return p;
413 }
414
415 /*
416  * This task is holding PI mutexes at exit time => bad.
417  * Kernel cleans up PI-state, but userspace is likely hosed.
418  * (Robust-futex cleanup is separate and might save the day for userspace.)
419  */
420 void exit_pi_state_list(struct task_struct *curr)
421 {
422         struct list_head *next, *head = &curr->pi_state_list;
423         struct futex_pi_state *pi_state;
424         struct futex_hash_bucket *hb;
425         union futex_key key = FUTEX_KEY_INIT;
426
427         if (!futex_cmpxchg_enabled)
428                 return;
429         /*
430          * We are a ZOMBIE and nobody can enqueue itself on
431          * pi_state_list anymore, but we have to be careful
432          * versus waiters unqueueing themselves:
433          */
434         spin_lock_irq(&curr->pi_lock);
435         while (!list_empty(head)) {
436
437                 next = head->next;
438                 pi_state = list_entry(next, struct futex_pi_state, list);
439                 key = pi_state->key;
440                 hb = hash_futex(&key);
441                 spin_unlock_irq(&curr->pi_lock);
442
443                 spin_lock(&hb->lock);
444
445                 spin_lock_irq(&curr->pi_lock);
446                 /*
447                  * We dropped the pi-lock, so re-check whether this
448                  * task still owns the PI-state:
449                  */
450                 if (head->next != next) {
451                         spin_unlock(&hb->lock);
452                         continue;
453                 }
454
455                 WARN_ON(pi_state->owner != curr);
456                 WARN_ON(list_empty(&pi_state->list));
457                 list_del_init(&pi_state->list);
458                 pi_state->owner = NULL;
459                 spin_unlock_irq(&curr->pi_lock);
460
461                 rt_mutex_unlock(&pi_state->pi_mutex);
462
463                 spin_unlock(&hb->lock);
464
465                 spin_lock_irq(&curr->pi_lock);
466         }
467         spin_unlock_irq(&curr->pi_lock);
468 }
469
470 static int
471 lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
472                 union futex_key *key, struct futex_pi_state **ps)
473 {
474         struct futex_pi_state *pi_state = NULL;
475         struct futex_q *this, *next;
476         struct plist_head *head;
477         struct task_struct *p;
478         pid_t pid = uval & FUTEX_TID_MASK;
479
480         head = &hb->chain;
481
482         plist_for_each_entry_safe(this, next, head, list) {
483                 if (match_futex(&this->key, key)) {
484                         /*
485                          * Another waiter already exists - bump up
486                          * the refcount and return its pi_state:
487                          */
488                         pi_state = this->pi_state;
489                         /*
490                          * Userspace might have messed up non PI and PI futexes
491                          */
492                         if (unlikely(!pi_state))
493                                 return -EINVAL;
494
495                         WARN_ON(!atomic_read(&pi_state->refcount));
496                         WARN_ON(pid && pi_state->owner &&
497                                 pi_state->owner->pid != pid);
498
499                         atomic_inc(&pi_state->refcount);
500                         *ps = pi_state;
501
502                         return 0;
503                 }
504         }
505
506         /*
507          * We are the first waiter - try to look up the real owner and attach
508          * the new pi_state to it, but bail out when TID = 0
509          */
510         if (!pid)
511                 return -ESRCH;
512         p = futex_find_get_task(pid);
513         if (IS_ERR(p))
514                 return PTR_ERR(p);
515
516         /*
517          * We need to look at the task state flags to figure out,
518          * whether the task is exiting. To protect against the do_exit
519          * change of the task flags, we do this protected by
520          * p->pi_lock:
521          */
522         spin_lock_irq(&p->pi_lock);
523         if (unlikely(p->flags & PF_EXITING)) {
524                 /*
525                  * The task is on the way out. When PF_EXITPIDONE is
526                  * set, we know that the task has finished the
527                  * cleanup:
528                  */
529                 int ret = (p->flags & PF_EXITPIDONE) ? -ESRCH : -EAGAIN;
530
531                 spin_unlock_irq(&p->pi_lock);
532                 put_task_struct(p);
533                 return ret;
534         }
535
536         pi_state = alloc_pi_state();
537
538         /*
539          * Initialize the pi_mutex in locked state and make 'p'
540          * the owner of it:
541          */
542         rt_mutex_init_proxy_locked(&pi_state->pi_mutex, p);
543
544         /* Store the key for possible exit cleanups: */
545         pi_state->key = *key;
546
547         WARN_ON(!list_empty(&pi_state->list));
548         list_add(&pi_state->list, &p->pi_state_list);
549         pi_state->owner = p;
550         spin_unlock_irq(&p->pi_lock);
551
552         put_task_struct(p);
553
554         *ps = pi_state;
555
556         return 0;
557 }
558
559 /**
560  * futex_lock_pi_atomic() - atomic work required to acquire a pi aware futex
561  * @uaddr:      the pi futex user address
562  * @hb:         the pi futex hash bucket
563  * @key:        the futex key associated with uaddr and hb
564  * @ps:         the pi_state pointer where we store the result of the lookup
565  * @task:       the task to perform the atomic lock work for.  This will be
566  *              "current" except in the case of requeue pi.
567  *
568  * Returns:
569  *  0 - ready to wait
570  *  1 - acquired the lock
571  * <0 - error
572  *
573  * The hb->lock and futex_key refs shall be held by the caller.
574  */
575 static int futex_lock_pi_atomic(u32 __user *uaddr, struct futex_hash_bucket *hb,
576                                 union futex_key *key,
577                                 struct futex_pi_state **ps,
578                                 struct task_struct *task)
579 {
580         int lock_taken, ret, ownerdied = 0;
581         u32 uval, newval, curval;
582
583 retry:
584         ret = lock_taken = 0;
585
586         /*
587          * To avoid races, we attempt to take the lock here again
588          * (by doing a 0 -> TID atomic cmpxchg), while holding all
589          * the locks. It will most likely not succeed.
590          */
591         newval = task_pid_vnr(task);
592
593         curval = cmpxchg_futex_value_locked(uaddr, 0, newval);
594
595         if (unlikely(curval == -EFAULT))
596                 return -EFAULT;
597
598         /*
599          * Detect deadlocks.
600          */
601         if ((unlikely((curval & FUTEX_TID_MASK) == task_pid_vnr(task))))
602                 return -EDEADLK;
603
604         /*
605          * Surprise - we got the lock. Just return to userspace:
606          */
607         if (unlikely(!curval))
608                 return 1;
609
610         uval = curval;
611
612         /*
613          * Set the FUTEX_WAITERS flag, so the owner will know it has someone
614          * to wake at the next unlock.
615          */
616         newval = curval | FUTEX_WAITERS;
617
618         /*
619          * There are two cases, where a futex might have no owner (the
620          * owner TID is 0): OWNER_DIED. We take over the futex in this
621          * case. We also do an unconditional take over, when the owner
622          * of the futex died.
623          *
624          * This is safe as we are protected by the hash bucket lock !
625          */
626         if (unlikely(ownerdied || !(curval & FUTEX_TID_MASK))) {
627                 /* Keep the OWNER_DIED bit */
628                 newval = (curval & ~FUTEX_TID_MASK) | task_pid_vnr(task);
629                 ownerdied = 0;
630                 lock_taken = 1;
631         }
632
633         curval = cmpxchg_futex_value_locked(uaddr, uval, newval);
634
635         if (unlikely(curval == -EFAULT))
636                 return -EFAULT;
637         if (unlikely(curval != uval))
638                 goto retry;
639
640         /*
641          * We took the lock due to owner died take over.
642          */
643         if (unlikely(lock_taken))
644                 return 1;
645
646         /*
647          * We dont have the lock. Look up the PI state (or create it if
648          * we are the first waiter):
649          */
650         ret = lookup_pi_state(uval, hb, key, ps);
651
652         if (unlikely(ret)) {
653                 switch (ret) {
654                 case -ESRCH:
655                         /*
656                          * No owner found for this futex. Check if the
657                          * OWNER_DIED bit is set to figure out whether
658                          * this is a robust futex or not.
659                          */
660                         if (get_futex_value_locked(&curval, uaddr))
661                                 return -EFAULT;
662
663                         /*
664                          * We simply start over in case of a robust
665                          * futex. The code above will take the futex
666                          * and return happy.
667                          */
668                         if (curval & FUTEX_OWNER_DIED) {
669                                 ownerdied = 1;
670                                 goto retry;
671                         }
672                 default:
673                         break;
674                 }
675         }
676
677         return ret;
678 }
679
680 /*
681  * The hash bucket lock must be held when this is called.
682  * Afterwards, the futex_q must not be accessed.
683  */
684 static void wake_futex(struct futex_q *q)
685 {
686         plist_del(&q->list, &q->list.plist);
687         /*
688          * The lock in wake_up_all() is a crucial memory barrier after the
689          * plist_del() and also before assigning to q->lock_ptr.
690          */
691         wake_up(&q->waiter);
692         /*
693          * The waiting task can free the futex_q as soon as this is written,
694          * without taking any locks.  This must come last.
695          *
696          * A memory barrier is required here to prevent the following store to
697          * lock_ptr from getting ahead of the wakeup. Clearing the lock at the
698          * end of wake_up() does not prevent this store from moving.
699          */
700         smp_wmb();
701         q->lock_ptr = NULL;
702 }
703
704 static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this)
705 {
706         struct task_struct *new_owner;
707         struct futex_pi_state *pi_state = this->pi_state;
708         u32 curval, newval;
709
710         if (!pi_state)
711                 return -EINVAL;
712
713         spin_lock(&pi_state->pi_mutex.wait_lock);
714         new_owner = rt_mutex_next_owner(&pi_state->pi_mutex);
715
716         /*
717          * This happens when we have stolen the lock and the original
718          * pending owner did not enqueue itself back on the rt_mutex.
719          * Thats not a tragedy. We know that way, that a lock waiter
720          * is on the fly. We make the futex_q waiter the pending owner.
721          */
722         if (!new_owner)
723                 new_owner = this->task;
724
725         /*
726          * We pass it to the next owner. (The WAITERS bit is always
727          * kept enabled while there is PI state around. We must also
728          * preserve the owner died bit.)
729          */
730         if (!(uval & FUTEX_OWNER_DIED)) {
731                 int ret = 0;
732
733                 newval = FUTEX_WAITERS | task_pid_vnr(new_owner);
734
735                 curval = cmpxchg_futex_value_locked(uaddr, uval, newval);
736
737                 if (curval == -EFAULT)
738                         ret = -EFAULT;
739                 else if (curval != uval)
740                         ret = -EINVAL;
741                 if (ret) {
742                         spin_unlock(&pi_state->pi_mutex.wait_lock);
743                         return ret;
744                 }
745         }
746
747         spin_lock_irq(&pi_state->owner->pi_lock);
748         WARN_ON(list_empty(&pi_state->list));
749         list_del_init(&pi_state->list);
750         spin_unlock_irq(&pi_state->owner->pi_lock);
751
752         spin_lock_irq(&new_owner->pi_lock);
753         WARN_ON(!list_empty(&pi_state->list));
754         list_add(&pi_state->list, &new_owner->pi_state_list);
755         pi_state->owner = new_owner;
756         spin_unlock_irq(&new_owner->pi_lock);
757
758         spin_unlock(&pi_state->pi_mutex.wait_lock);
759         rt_mutex_unlock(&pi_state->pi_mutex);
760
761         return 0;
762 }
763
764 static int unlock_futex_pi(u32 __user *uaddr, u32 uval)
765 {
766         u32 oldval;
767
768         /*
769          * There is no waiter, so we unlock the futex. The owner died
770          * bit has not to be preserved here. We are the owner:
771          */
772         oldval = cmpxchg_futex_value_locked(uaddr, uval, 0);
773
774         if (oldval == -EFAULT)
775                 return oldval;
776         if (oldval != uval)
777                 return -EAGAIN;
778
779         return 0;
780 }
781
782 /*
783  * Express the locking dependencies for lockdep:
784  */
785 static inline void
786 double_lock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2)
787 {
788         if (hb1 <= hb2) {
789                 spin_lock(&hb1->lock);
790                 if (hb1 < hb2)
791                         spin_lock_nested(&hb2->lock, SINGLE_DEPTH_NESTING);
792         } else { /* hb1 > hb2 */
793                 spin_lock(&hb2->lock);
794                 spin_lock_nested(&hb1->lock, SINGLE_DEPTH_NESTING);
795         }
796 }
797
798 static inline void
799 double_unlock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2)
800 {
801         spin_unlock(&hb1->lock);
802         if (hb1 != hb2)
803                 spin_unlock(&hb2->lock);
804 }
805
806 /*
807  * Wake up waiters matching bitset queued on this futex (uaddr).
808  */
809 static int futex_wake(u32 __user *uaddr, int fshared, int nr_wake, u32 bitset)
810 {
811         struct futex_hash_bucket *hb;
812         struct futex_q *this, *next;
813         struct plist_head *head;
814         union futex_key key = FUTEX_KEY_INIT;
815         int ret;
816
817         if (!bitset)
818                 return -EINVAL;
819
820         ret = get_futex_key(uaddr, fshared, &key);
821         if (unlikely(ret != 0))
822                 goto out;
823
824         hb = hash_futex(&key);
825         spin_lock(&hb->lock);
826         head = &hb->chain;
827
828         plist_for_each_entry_safe(this, next, head, list) {
829                 if (match_futex (&this->key, &key)) {
830                         if (this->pi_state) {
831                                 ret = -EINVAL;
832                                 break;
833                         }
834
835                         /* Check if one of the bits is set in both bitsets */
836                         if (!(this->bitset & bitset))
837                                 continue;
838
839                         wake_futex(this);
840                         if (++ret >= nr_wake)
841                                 break;
842                 }
843         }
844
845         spin_unlock(&hb->lock);
846         put_futex_key(fshared, &key);
847 out:
848         return ret;
849 }
850
851 /*
852  * Wake up all waiters hashed on the physical page that is mapped
853  * to this virtual address:
854  */
855 static int
856 futex_wake_op(u32 __user *uaddr1, int fshared, u32 __user *uaddr2,
857               int nr_wake, int nr_wake2, int op)
858 {
859         union futex_key key1 = FUTEX_KEY_INIT, key2 = FUTEX_KEY_INIT;
860         struct futex_hash_bucket *hb1, *hb2;
861         struct plist_head *head;
862         struct futex_q *this, *next;
863         int ret, op_ret;
864
865 retry:
866         ret = get_futex_key(uaddr1, fshared, &key1);
867         if (unlikely(ret != 0))
868                 goto out;
869         ret = get_futex_key(uaddr2, fshared, &key2);
870         if (unlikely(ret != 0))
871                 goto out_put_key1;
872
873         hb1 = hash_futex(&key1);
874         hb2 = hash_futex(&key2);
875
876         double_lock_hb(hb1, hb2);
877 retry_private:
878         op_ret = futex_atomic_op_inuser(op, uaddr2);
879         if (unlikely(op_ret < 0)) {
880                 u32 dummy;
881
882                 double_unlock_hb(hb1, hb2);
883
884 #ifndef CONFIG_MMU
885                 /*
886                  * we don't get EFAULT from MMU faults if we don't have an MMU,
887                  * but we might get them from range checking
888                  */
889                 ret = op_ret;
890                 goto out_put_keys;
891 #endif
892
893                 if (unlikely(op_ret != -EFAULT)) {
894                         ret = op_ret;
895                         goto out_put_keys;
896                 }
897
898                 ret = get_user(dummy, uaddr2);
899                 if (ret)
900                         goto out_put_keys;
901
902                 if (!fshared)
903                         goto retry_private;
904
905                 put_futex_key(fshared, &key2);
906                 put_futex_key(fshared, &key1);
907                 goto retry;
908         }
909
910         head = &hb1->chain;
911
912         plist_for_each_entry_safe(this, next, head, list) {
913                 if (match_futex (&this->key, &key1)) {
914                         wake_futex(this);
915                         if (++ret >= nr_wake)
916                                 break;
917                 }
918         }
919
920         if (op_ret > 0) {
921                 head = &hb2->chain;
922
923                 op_ret = 0;
924                 plist_for_each_entry_safe(this, next, head, list) {
925                         if (match_futex (&this->key, &key2)) {
926                                 wake_futex(this);
927                                 if (++op_ret >= nr_wake2)
928                                         break;
929                         }
930                 }
931                 ret += op_ret;
932         }
933
934         double_unlock_hb(hb1, hb2);
935 out_put_keys:
936         put_futex_key(fshared, &key2);
937 out_put_key1:
938         put_futex_key(fshared, &key1);
939 out:
940         return ret;
941 }
942
943 /*
944  * Requeue all waiters hashed on one physical page to another
945  * physical page.
946  */
947 static int futex_requeue(u32 __user *uaddr1, int fshared, u32 __user *uaddr2,
948                          int nr_wake, int nr_requeue, u32 *cmpval)
949 {
950         union futex_key key1 = FUTEX_KEY_INIT, key2 = FUTEX_KEY_INIT;
951         struct futex_hash_bucket *hb1, *hb2;
952         struct plist_head *head1;
953         struct futex_q *this, *next;
954         int ret, drop_count = 0;
955
956 retry:
957         ret = get_futex_key(uaddr1, fshared, &key1);
958         if (unlikely(ret != 0))
959                 goto out;
960         ret = get_futex_key(uaddr2, fshared, &key2);
961         if (unlikely(ret != 0))
962                 goto out_put_key1;
963
964         hb1 = hash_futex(&key1);
965         hb2 = hash_futex(&key2);
966
967 retry_private:
968         double_lock_hb(hb1, hb2);
969
970         if (likely(cmpval != NULL)) {
971                 u32 curval;
972
973                 ret = get_futex_value_locked(&curval, uaddr1);
974
975                 if (unlikely(ret)) {
976                         double_unlock_hb(hb1, hb2);
977
978                         ret = get_user(curval, uaddr1);
979                         if (ret)
980                                 goto out_put_keys;
981
982                         if (!fshared)
983                                 goto retry_private;
984
985                         put_futex_key(fshared, &key2);
986                         put_futex_key(fshared, &key1);
987                         goto retry;
988                 }
989                 if (curval != *cmpval) {
990                         ret = -EAGAIN;
991                         goto out_unlock;
992                 }
993         }
994
995         head1 = &hb1->chain;
996         plist_for_each_entry_safe(this, next, head1, list) {
997                 if (!match_futex (&this->key, &key1))
998                         continue;
999                 if (++ret <= nr_wake) {
1000                         wake_futex(this);
1001                 } else {
1002                         /*
1003                          * If key1 and key2 hash to the same bucket, no need to
1004                          * requeue.
1005                          */
1006                         if (likely(head1 != &hb2->chain)) {
1007                                 plist_del(&this->list, &hb1->chain);
1008                                 plist_add(&this->list, &hb2->chain);
1009                                 this->lock_ptr = &hb2->lock;
1010 #ifdef CONFIG_DEBUG_PI_LIST
1011                                 this->list.plist.lock = &hb2->lock;
1012 #endif
1013                         }
1014                         this->key = key2;
1015                         get_futex_key_refs(&key2);
1016                         drop_count++;
1017
1018                         if (ret - nr_wake >= nr_requeue)
1019                                 break;
1020                 }
1021         }
1022
1023 out_unlock:
1024         double_unlock_hb(hb1, hb2);
1025
1026         /* drop_futex_key_refs() must be called outside the spinlocks. */
1027         while (--drop_count >= 0)
1028                 drop_futex_key_refs(&key1);
1029
1030 out_put_keys:
1031         put_futex_key(fshared, &key2);
1032 out_put_key1:
1033         put_futex_key(fshared, &key1);
1034 out:
1035         return ret;
1036 }
1037
1038 /* The key must be already stored in q->key. */
1039 static inline struct futex_hash_bucket *queue_lock(struct futex_q *q)
1040 {
1041         struct futex_hash_bucket *hb;
1042
1043         init_waitqueue_head(&q->waiter);
1044
1045         get_futex_key_refs(&q->key);
1046         hb = hash_futex(&q->key);
1047         q->lock_ptr = &hb->lock;
1048
1049         spin_lock(&hb->lock);
1050         return hb;
1051 }
1052
1053 static inline void queue_me(struct futex_q *q, struct futex_hash_bucket *hb)
1054 {
1055         int prio;
1056
1057         /*
1058          * The priority used to register this element is
1059          * - either the real thread-priority for the real-time threads
1060          * (i.e. threads with a priority lower than MAX_RT_PRIO)
1061          * - or MAX_RT_PRIO for non-RT threads.
1062          * Thus, all RT-threads are woken first in priority order, and
1063          * the others are woken last, in FIFO order.
1064          */
1065         prio = min(current->normal_prio, MAX_RT_PRIO);
1066
1067         plist_node_init(&q->list, prio);
1068 #ifdef CONFIG_DEBUG_PI_LIST
1069         q->list.plist.lock = &hb->lock;
1070 #endif
1071         plist_add(&q->list, &hb->chain);
1072         q->task = current;
1073         spin_unlock(&hb->lock);
1074 }
1075
1076 static inline void
1077 queue_unlock(struct futex_q *q, struct futex_hash_bucket *hb)
1078 {
1079         spin_unlock(&hb->lock);
1080         drop_futex_key_refs(&q->key);
1081 }
1082
1083 /*
1084  * queue_me and unqueue_me must be called as a pair, each
1085  * exactly once.  They are called with the hashed spinlock held.
1086  */
1087
1088 /* Return 1 if we were still queued (ie. 0 means we were woken) */
1089 static int unqueue_me(struct futex_q *q)
1090 {
1091         spinlock_t *lock_ptr;
1092         int ret = 0;
1093
1094         /* In the common case we don't take the spinlock, which is nice. */
1095 retry:
1096         lock_ptr = q->lock_ptr;
1097         barrier();
1098         if (lock_ptr != NULL) {
1099                 spin_lock(lock_ptr);
1100                 /*
1101                  * q->lock_ptr can change between reading it and
1102                  * spin_lock(), causing us to take the wrong lock.  This
1103                  * corrects the race condition.
1104                  *
1105                  * Reasoning goes like this: if we have the wrong lock,
1106                  * q->lock_ptr must have changed (maybe several times)
1107                  * between reading it and the spin_lock().  It can
1108                  * change again after the spin_lock() but only if it was
1109                  * already changed before the spin_lock().  It cannot,
1110                  * however, change back to the original value.  Therefore
1111                  * we can detect whether we acquired the correct lock.
1112                  */
1113                 if (unlikely(lock_ptr != q->lock_ptr)) {
1114                         spin_unlock(lock_ptr);
1115                         goto retry;
1116                 }
1117                 WARN_ON(plist_node_empty(&q->list));
1118                 plist_del(&q->list, &q->list.plist);
1119
1120                 BUG_ON(q->pi_state);
1121
1122                 spin_unlock(lock_ptr);
1123                 ret = 1;
1124         }
1125
1126         drop_futex_key_refs(&q->key);
1127         return ret;
1128 }
1129
1130 /*
1131  * PI futexes can not be requeued and must remove themself from the
1132  * hash bucket. The hash bucket lock (i.e. lock_ptr) is held on entry
1133  * and dropped here.
1134  */
1135 static void unqueue_me_pi(struct futex_q *q)
1136 {
1137         WARN_ON(plist_node_empty(&q->list));
1138         plist_del(&q->list, &q->list.plist);
1139
1140         BUG_ON(!q->pi_state);
1141         free_pi_state(q->pi_state);
1142         q->pi_state = NULL;
1143
1144         spin_unlock(q->lock_ptr);
1145
1146         drop_futex_key_refs(&q->key);
1147 }
1148
1149 /*
1150  * Fixup the pi_state owner with the new owner.
1151  *
1152  * Must be called with hash bucket lock held and mm->sem held for non
1153  * private futexes.
1154  */
1155 static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
1156                                 struct task_struct *newowner, int fshared)
1157 {
1158         u32 newtid = task_pid_vnr(newowner) | FUTEX_WAITERS;
1159         struct futex_pi_state *pi_state = q->pi_state;
1160         struct task_struct *oldowner = pi_state->owner;
1161         u32 uval, curval, newval;
1162         int ret;
1163
1164         /* Owner died? */
1165         if (!pi_state->owner)
1166                 newtid |= FUTEX_OWNER_DIED;
1167
1168         /*
1169          * We are here either because we stole the rtmutex from the
1170          * pending owner or we are the pending owner which failed to
1171          * get the rtmutex. We have to replace the pending owner TID
1172          * in the user space variable. This must be atomic as we have
1173          * to preserve the owner died bit here.
1174          *
1175          * Note: We write the user space value _before_ changing the pi_state
1176          * because we can fault here. Imagine swapped out pages or a fork
1177          * that marked all the anonymous memory readonly for cow.
1178          *
1179          * Modifying pi_state _before_ the user space value would
1180          * leave the pi_state in an inconsistent state when we fault
1181          * here, because we need to drop the hash bucket lock to
1182          * handle the fault. This might be observed in the PID check
1183          * in lookup_pi_state.
1184          */
1185 retry:
1186         if (get_futex_value_locked(&uval, uaddr))
1187                 goto handle_fault;
1188
1189         while (1) {
1190                 newval = (uval & FUTEX_OWNER_DIED) | newtid;
1191
1192                 curval = cmpxchg_futex_value_locked(uaddr, uval, newval);
1193
1194                 if (curval == -EFAULT)
1195                         goto handle_fault;
1196                 if (curval == uval)
1197                         break;
1198                 uval = curval;
1199         }
1200
1201         /*
1202          * We fixed up user space. Now we need to fix the pi_state
1203          * itself.
1204          */
1205         if (pi_state->owner != NULL) {
1206                 spin_lock_irq(&pi_state->owner->pi_lock);
1207                 WARN_ON(list_empty(&pi_state->list));
1208                 list_del_init(&pi_state->list);
1209                 spin_unlock_irq(&pi_state->owner->pi_lock);
1210         }
1211
1212         pi_state->owner = newowner;
1213
1214         spin_lock_irq(&newowner->pi_lock);
1215         WARN_ON(!list_empty(&pi_state->list));
1216         list_add(&pi_state->list, &newowner->pi_state_list);
1217         spin_unlock_irq(&newowner->pi_lock);
1218         return 0;
1219
1220         /*
1221          * To handle the page fault we need to drop the hash bucket
1222          * lock here. That gives the other task (either the pending
1223          * owner itself or the task which stole the rtmutex) the
1224          * chance to try the fixup of the pi_state. So once we are
1225          * back from handling the fault we need to check the pi_state
1226          * after reacquiring the hash bucket lock and before trying to
1227          * do another fixup. When the fixup has been done already we
1228          * simply return.
1229          */
1230 handle_fault:
1231         spin_unlock(q->lock_ptr);
1232
1233         ret = get_user(uval, uaddr);
1234
1235         spin_lock(q->lock_ptr);
1236
1237         /*
1238          * Check if someone else fixed it for us:
1239          */
1240         if (pi_state->owner != oldowner)
1241                 return 0;
1242
1243         if (ret)
1244                 return ret;
1245
1246         goto retry;
1247 }
1248
1249 /*
1250  * In case we must use restart_block to restart a futex_wait,
1251  * we encode in the 'flags' shared capability
1252  */
1253 #define FLAGS_SHARED            0x01
1254 #define FLAGS_CLOCKRT           0x02
1255 #define FLAGS_HAS_TIMEOUT       0x04
1256
1257 static long futex_wait_restart(struct restart_block *restart);
1258
1259 /**
1260  * fixup_owner() - Post lock pi_state and corner case management
1261  * @uaddr:      user address of the futex
1262  * @fshared:    whether the futex is shared (1) or not (0)
1263  * @q:          futex_q (contains pi_state and access to the rt_mutex)
1264  * @locked:     if the attempt to take the rt_mutex succeeded (1) or not (0)
1265  *
1266  * After attempting to lock an rt_mutex, this function is called to cleanup
1267  * the pi_state owner as well as handle race conditions that may allow us to
1268  * acquire the lock. Must be called with the hb lock held.
1269  *
1270  * Returns:
1271  *  1 - success, lock taken
1272  *  0 - success, lock not taken
1273  * <0 - on error (-EFAULT)
1274  */
1275 static int fixup_owner(u32 __user *uaddr, int fshared, struct futex_q *q,
1276                        int locked)
1277 {
1278         struct task_struct *owner;
1279         int ret = 0;
1280
1281         if (locked) {
1282                 /*
1283                  * Got the lock. We might not be the anticipated owner if we
1284                  * did a lock-steal - fix up the PI-state in that case:
1285                  */
1286                 if (q->pi_state->owner != current)
1287                         ret = fixup_pi_state_owner(uaddr, q, current, fshared);
1288                 goto out;
1289         }
1290
1291         /*
1292          * Catch the rare case, where the lock was released when we were on the
1293          * way back before we locked the hash bucket.
1294          */
1295         if (q->pi_state->owner == current) {
1296                 /*
1297                  * Try to get the rt_mutex now. This might fail as some other
1298                  * task acquired the rt_mutex after we removed ourself from the
1299                  * rt_mutex waiters list.
1300                  */
1301                 if (rt_mutex_trylock(&q->pi_state->pi_mutex)) {
1302                         locked = 1;
1303                         goto out;
1304                 }
1305
1306                 /*
1307                  * pi_state is incorrect, some other task did a lock steal and
1308                  * we returned due to timeout or signal without taking the
1309                  * rt_mutex. Too late. We can access the rt_mutex_owner without
1310                  * locking, as the other task is now blocked on the hash bucket
1311                  * lock. Fix the state up.
1312                  */
1313                 owner = rt_mutex_owner(&q->pi_state->pi_mutex);
1314                 ret = fixup_pi_state_owner(uaddr, q, owner, fshared);
1315                 goto out;
1316         }
1317
1318         /*
1319          * Paranoia check. If we did not take the lock, then we should not be
1320          * the owner, nor the pending owner, of the rt_mutex.
1321          */
1322         if (rt_mutex_owner(&q->pi_state->pi_mutex) == current)
1323                 printk(KERN_ERR "fixup_owner: ret = %d pi-mutex: %p "
1324                                 "pi-state %p\n", ret,
1325                                 q->pi_state->pi_mutex.owner,
1326                                 q->pi_state->owner);
1327
1328 out:
1329         return ret ? ret : locked;
1330 }
1331
1332 /**
1333  * futex_wait_queue_me() - queue_me() and wait for wakeup, timeout, or signal
1334  * @hb:         the futex hash bucket, must be locked by the caller
1335  * @q:          the futex_q to queue up on
1336  * @timeout:    the prepared hrtimer_sleeper, or null for no timeout
1337  * @wait:       the wait_queue to add to the futex_q after queueing in the hb
1338  */
1339 static void futex_wait_queue_me(struct futex_hash_bucket *hb, struct futex_q *q,
1340                                 struct hrtimer_sleeper *timeout,
1341                                 wait_queue_t *wait)
1342 {
1343         queue_me(q, hb);
1344
1345         /*
1346          * There might have been scheduling since the queue_me(), as we
1347          * cannot hold a spinlock across the get_user() in case it
1348          * faults, and we cannot just set TASK_INTERRUPTIBLE state when
1349          * queueing ourselves into the futex hash.  This code thus has to
1350          * rely on the futex_wake() code removing us from hash when it
1351          * wakes us up.
1352          */
1353
1354         /* add_wait_queue is the barrier after __set_current_state. */
1355         __set_current_state(TASK_INTERRUPTIBLE);
1356
1357         /*
1358          * Add current as the futex_q waiter.  We don't remove ourselves from
1359          * the wait_queue because we are the only user of it.
1360          */
1361         add_wait_queue(&q->waiter, wait);
1362
1363         /* Arm the timer */
1364         if (timeout) {
1365                 hrtimer_start_expires(&timeout->timer, HRTIMER_MODE_ABS);
1366                 if (!hrtimer_active(&timeout->timer))
1367                         timeout->task = NULL;
1368         }
1369
1370         /*
1371          * !plist_node_empty() is safe here without any lock.
1372          * q.lock_ptr != 0 is not safe, because of ordering against wakeup.
1373          */
1374         if (likely(!plist_node_empty(&q->list))) {
1375                 /*
1376                  * If the timer has already expired, current will already be
1377                  * flagged for rescheduling. Only call schedule if there
1378                  * is no timeout, or if it has yet to expire.
1379                  */
1380                 if (!timeout || timeout->task)
1381                         schedule();
1382         }
1383         __set_current_state(TASK_RUNNING);
1384 }
1385
1386 static int futex_wait(u32 __user *uaddr, int fshared,
1387                       u32 val, ktime_t *abs_time, u32 bitset, int clockrt)
1388 {
1389         struct hrtimer_sleeper timeout, *to = NULL;
1390         DECLARE_WAITQUEUE(wait, current);
1391         struct restart_block *restart;
1392         struct futex_hash_bucket *hb;
1393         struct futex_q q;
1394         u32 uval;
1395         int ret;
1396
1397         if (!bitset)
1398                 return -EINVAL;
1399
1400         q.pi_state = NULL;
1401         q.bitset = bitset;
1402
1403         if (abs_time) {
1404                 to = &timeout;
1405
1406                 hrtimer_init_on_stack(&to->timer, clockrt ? CLOCK_REALTIME :
1407                                       CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
1408                 hrtimer_init_sleeper(to, current);
1409                 hrtimer_set_expires_range_ns(&to->timer, *abs_time,
1410                                              current->timer_slack_ns);
1411         }
1412
1413 retry:
1414         q.key = FUTEX_KEY_INIT;
1415         ret = get_futex_key(uaddr, fshared, &q.key);
1416         if (unlikely(ret != 0))
1417                 goto out;
1418
1419 retry_private:
1420         hb = queue_lock(&q);
1421
1422         /*
1423          * Access the page AFTER the hash-bucket is locked.
1424          * Order is important:
1425          *
1426          *   Userspace waiter: val = var; if (cond(val)) futex_wait(&var, val);
1427          *   Userspace waker:  if (cond(var)) { var = new; futex_wake(&var); }
1428          *
1429          * The basic logical guarantee of a futex is that it blocks ONLY
1430          * if cond(var) is known to be true at the time of blocking, for
1431          * any cond.  If we queued after testing *uaddr, that would open
1432          * a race condition where we could block indefinitely with
1433          * cond(var) false, which would violate the guarantee.
1434          *
1435          * A consequence is that futex_wait() can return zero and absorb
1436          * a wakeup when *uaddr != val on entry to the syscall.  This is
1437          * rare, but normal.
1438          *
1439          * For shared futexes, we hold the mmap semaphore, so the mapping
1440          * cannot have changed since we looked it up in get_futex_key.
1441          */
1442         ret = get_futex_value_locked(&uval, uaddr);
1443
1444         if (unlikely(ret)) {
1445                 queue_unlock(&q, hb);
1446
1447                 ret = get_user(uval, uaddr);
1448                 if (ret)
1449                         goto out_put_key;
1450
1451                 if (!fshared)
1452                         goto retry_private;
1453
1454                 put_futex_key(fshared, &q.key);
1455                 goto retry;
1456         }
1457         ret = -EWOULDBLOCK;
1458
1459         /* Only actually queue if *uaddr contained val.  */
1460         if (unlikely(uval != val)) {
1461                 queue_unlock(&q, hb);
1462                 goto out_put_key;
1463         }
1464
1465         /* queue_me and wait for wakeup, timeout, or a signal. */
1466         futex_wait_queue_me(hb, &q, to, &wait);
1467
1468         /* If we were woken (and unqueued), we succeeded, whatever. */
1469         ret = 0;
1470         if (!unqueue_me(&q))
1471                 goto out_put_key;
1472         ret = -ETIMEDOUT;
1473         if (to && !to->task)
1474                 goto out_put_key;
1475
1476         /*
1477          * We expect signal_pending(current), but another thread may
1478          * have handled it for us already.
1479          */
1480         ret = -ERESTARTSYS;
1481         if (!abs_time)
1482                 goto out_put_key;
1483
1484         restart = &current_thread_info()->restart_block;
1485         restart->fn = futex_wait_restart;
1486         restart->futex.uaddr = (u32 *)uaddr;
1487         restart->futex.val = val;
1488         restart->futex.time = abs_time->tv64;
1489         restart->futex.bitset = bitset;
1490         restart->futex.flags = FLAGS_HAS_TIMEOUT;
1491
1492         if (fshared)
1493                 restart->futex.flags |= FLAGS_SHARED;
1494         if (clockrt)
1495                 restart->futex.flags |= FLAGS_CLOCKRT;
1496
1497         ret = -ERESTART_RESTARTBLOCK;
1498
1499 out_put_key:
1500         put_futex_key(fshared, &q.key);
1501 out:
1502         if (to) {
1503                 hrtimer_cancel(&to->timer);
1504                 destroy_hrtimer_on_stack(&to->timer);
1505         }
1506         return ret;
1507 }
1508
1509
1510 static long futex_wait_restart(struct restart_block *restart)
1511 {
1512         u32 __user *uaddr = (u32 __user *)restart->futex.uaddr;
1513         int fshared = 0;
1514         ktime_t t, *tp = NULL;
1515
1516         if (restart->futex.flags & FLAGS_HAS_TIMEOUT) {
1517                 t.tv64 = restart->futex.time;
1518                 tp = &t;
1519         }
1520         restart->fn = do_no_restart_syscall;
1521         if (restart->futex.flags & FLAGS_SHARED)
1522                 fshared = 1;
1523         return (long)futex_wait(uaddr, fshared, restart->futex.val, tp,
1524                                 restart->futex.bitset,
1525                                 restart->futex.flags & FLAGS_CLOCKRT);
1526 }
1527
1528
1529 /*
1530  * Userspace tried a 0 -> TID atomic transition of the futex value
1531  * and failed. The kernel side here does the whole locking operation:
1532  * if there are waiters then it will block, it does PI, etc. (Due to
1533  * races the kernel might see a 0 value of the futex too.)
1534  */
1535 static int futex_lock_pi(u32 __user *uaddr, int fshared,
1536                          int detect, ktime_t *time, int trylock)
1537 {
1538         struct hrtimer_sleeper timeout, *to = NULL;
1539         struct futex_hash_bucket *hb;
1540         u32 uval;
1541         struct futex_q q;
1542         int res, ret;
1543
1544         if (refill_pi_state_cache())
1545                 return -ENOMEM;
1546
1547         if (time) {
1548                 to = &timeout;
1549                 hrtimer_init_on_stack(&to->timer, CLOCK_REALTIME,
1550                                       HRTIMER_MODE_ABS);
1551                 hrtimer_init_sleeper(to, current);
1552                 hrtimer_set_expires(&to->timer, *time);
1553         }
1554
1555         q.pi_state = NULL;
1556 retry:
1557         q.key = FUTEX_KEY_INIT;
1558         ret = get_futex_key(uaddr, fshared, &q.key);
1559         if (unlikely(ret != 0))
1560                 goto out;
1561
1562 retry_private:
1563         hb = queue_lock(&q);
1564
1565         ret = futex_lock_pi_atomic(uaddr, hb, &q.key, &q.pi_state, current);
1566         if (unlikely(ret)) {
1567                 switch (ret) {
1568                 case 1:
1569                         /* We got the lock. */
1570                         ret = 0;
1571                         goto out_unlock_put_key;
1572                 case -EFAULT:
1573                         goto uaddr_faulted;
1574                 case -EAGAIN:
1575                         /*
1576                          * Task is exiting and we just wait for the
1577                          * exit to complete.
1578                          */
1579                         queue_unlock(&q, hb);
1580                         put_futex_key(fshared, &q.key);
1581                         cond_resched();
1582                         goto retry;
1583                 default:
1584                         goto out_unlock_put_key;
1585                 }
1586         }
1587
1588         /*
1589          * Only actually queue now that the atomic ops are done:
1590          */
1591         queue_me(&q, hb);
1592
1593         WARN_ON(!q.pi_state);
1594         /*
1595          * Block on the PI mutex:
1596          */
1597         if (!trylock)
1598                 ret = rt_mutex_timed_lock(&q.pi_state->pi_mutex, to, 1);
1599         else {
1600                 ret = rt_mutex_trylock(&q.pi_state->pi_mutex);
1601                 /* Fixup the trylock return value: */
1602                 ret = ret ? 0 : -EWOULDBLOCK;
1603         }
1604
1605         spin_lock(q.lock_ptr);
1606         /*
1607          * Fixup the pi_state owner and possibly acquire the lock if we
1608          * haven't already.
1609          */
1610         res = fixup_owner(uaddr, fshared, &q, !ret);
1611         /*
1612          * If fixup_owner() returned an error, proprogate that.  If it acquired
1613          * the lock, clear our -ETIMEDOUT or -EINTR.
1614          */
1615         if (res)
1616                 ret = (res < 0) ? res : 0;
1617
1618         /*
1619          * If fixup_owner() faulted and was unable to handle the fault, unlock
1620          * it and return the fault to userspace.
1621          */
1622         if (ret && (rt_mutex_owner(&q.pi_state->pi_mutex) == current))
1623                 rt_mutex_unlock(&q.pi_state->pi_mutex);
1624
1625         /* Unqueue and drop the lock */
1626         unqueue_me_pi(&q);
1627
1628         goto out;
1629
1630 out_unlock_put_key:
1631         queue_unlock(&q, hb);
1632
1633 out_put_key:
1634         put_futex_key(fshared, &q.key);
1635 out:
1636         if (to)
1637                 destroy_hrtimer_on_stack(&to->timer);
1638         return ret != -EINTR ? ret : -ERESTARTNOINTR;
1639
1640 uaddr_faulted:
1641         /*
1642          * We have to r/w  *(int __user *)uaddr, and we have to modify it
1643          * atomically.  Therefore, if we continue to fault after get_user()
1644          * below, we need to handle the fault ourselves, while still holding
1645          * the mmap_sem.  This can occur if the uaddr is under contention as
1646          * we have to drop the mmap_sem in order to call get_user().
1647          */
1648         queue_unlock(&q, hb);
1649
1650         ret = get_user(uval, uaddr);
1651         if (ret)
1652                 goto out_put_key;
1653
1654         if (!fshared)
1655                 goto retry_private;
1656
1657         put_futex_key(fshared, &q.key);
1658         goto retry;
1659 }
1660
1661
1662 /*
1663  * Userspace attempted a TID -> 0 atomic transition, and failed.
1664  * This is the in-kernel slowpath: we look up the PI state (if any),
1665  * and do the rt-mutex unlock.
1666  */
1667 static int futex_unlock_pi(u32 __user *uaddr, int fshared)
1668 {
1669         struct futex_hash_bucket *hb;
1670         struct futex_q *this, *next;
1671         u32 uval;
1672         struct plist_head *head;
1673         union futex_key key = FUTEX_KEY_INIT;
1674         int ret;
1675
1676 retry:
1677         if (get_user(uval, uaddr))
1678                 return -EFAULT;
1679         /*
1680          * We release only a lock we actually own:
1681          */
1682         if ((uval & FUTEX_TID_MASK) != task_pid_vnr(current))
1683                 return -EPERM;
1684
1685         ret = get_futex_key(uaddr, fshared, &key);
1686         if (unlikely(ret != 0))
1687                 goto out;
1688
1689         hb = hash_futex(&key);
1690         spin_lock(&hb->lock);
1691
1692         /*
1693          * To avoid races, try to do the TID -> 0 atomic transition
1694          * again. If it succeeds then we can return without waking
1695          * anyone else up:
1696          */
1697         if (!(uval & FUTEX_OWNER_DIED))
1698                 uval = cmpxchg_futex_value_locked(uaddr, task_pid_vnr(current), 0);
1699
1700
1701         if (unlikely(uval == -EFAULT))
1702                 goto pi_faulted;
1703         /*
1704          * Rare case: we managed to release the lock atomically,
1705          * no need to wake anyone else up:
1706          */
1707         if (unlikely(uval == task_pid_vnr(current)))
1708                 goto out_unlock;
1709
1710         /*
1711          * Ok, other tasks may need to be woken up - check waiters
1712          * and do the wakeup if necessary:
1713          */
1714         head = &hb->chain;
1715
1716         plist_for_each_entry_safe(this, next, head, list) {
1717                 if (!match_futex (&this->key, &key))
1718                         continue;
1719                 ret = wake_futex_pi(uaddr, uval, this);
1720                 /*
1721                  * The atomic access to the futex value
1722                  * generated a pagefault, so retry the
1723                  * user-access and the wakeup:
1724                  */
1725                 if (ret == -EFAULT)
1726                         goto pi_faulted;
1727                 goto out_unlock;
1728         }
1729         /*
1730          * No waiters - kernel unlocks the futex:
1731          */
1732         if (!(uval & FUTEX_OWNER_DIED)) {
1733                 ret = unlock_futex_pi(uaddr, uval);
1734                 if (ret == -EFAULT)
1735                         goto pi_faulted;
1736         }
1737
1738 out_unlock:
1739         spin_unlock(&hb->lock);
1740         put_futex_key(fshared, &key);
1741
1742 out:
1743         return ret;
1744
1745 pi_faulted:
1746         /*
1747          * We have to r/w  *(int __user *)uaddr, and we have to modify it
1748          * atomically.  Therefore, if we continue to fault after get_user()
1749          * below, we need to handle the fault ourselves, while still holding
1750          * the mmap_sem.  This can occur if the uaddr is under contention as
1751          * we have to drop the mmap_sem in order to call get_user().
1752          */
1753         spin_unlock(&hb->lock);
1754         put_futex_key(fshared, &key);
1755
1756         ret = get_user(uval, uaddr);
1757         if (!ret)
1758                 goto retry;
1759
1760         return ret;
1761 }
1762
1763 /*
1764  * Support for robust futexes: the kernel cleans up held futexes at
1765  * thread exit time.
1766  *
1767  * Implementation: user-space maintains a per-thread list of locks it
1768  * is holding. Upon do_exit(), the kernel carefully walks this list,
1769  * and marks all locks that are owned by this thread with the
1770  * FUTEX_OWNER_DIED bit, and wakes up a waiter (if any). The list is
1771  * always manipulated with the lock held, so the list is private and
1772  * per-thread. Userspace also maintains a per-thread 'list_op_pending'
1773  * field, to allow the kernel to clean up if the thread dies after
1774  * acquiring the lock, but just before it could have added itself to
1775  * the list. There can only be one such pending lock.
1776  */
1777
1778 /**
1779  * sys_set_robust_list - set the robust-futex list head of a task
1780  * @head: pointer to the list-head
1781  * @len: length of the list-head, as userspace expects
1782  */
1783 SYSCALL_DEFINE2(set_robust_list, struct robust_list_head __user *, head,
1784                 size_t, len)
1785 {
1786         if (!futex_cmpxchg_enabled)
1787                 return -ENOSYS;
1788         /*
1789          * The kernel knows only one size for now:
1790          */
1791         if (unlikely(len != sizeof(*head)))
1792                 return -EINVAL;
1793
1794         current->robust_list = head;
1795
1796         return 0;
1797 }
1798
1799 /**
1800  * sys_get_robust_list - get the robust-futex list head of a task
1801  * @pid: pid of the process [zero for current task]
1802  * @head_ptr: pointer to a list-head pointer, the kernel fills it in
1803  * @len_ptr: pointer to a length field, the kernel fills in the header size
1804  */
1805 SYSCALL_DEFINE3(get_robust_list, int, pid,
1806                 struct robust_list_head __user * __user *, head_ptr,
1807                 size_t __user *, len_ptr)
1808 {
1809         struct robust_list_head __user *head;
1810         unsigned long ret;
1811         const struct cred *cred = current_cred(), *pcred;
1812
1813         if (!futex_cmpxchg_enabled)
1814                 return -ENOSYS;
1815
1816         if (!pid)
1817                 head = current->robust_list;
1818         else {
1819                 struct task_struct *p;
1820
1821                 ret = -ESRCH;
1822                 rcu_read_lock();
1823                 p = find_task_by_vpid(pid);
1824                 if (!p)
1825                         goto err_unlock;
1826                 ret = -EPERM;
1827                 pcred = __task_cred(p);
1828                 if (cred->euid != pcred->euid &&
1829                     cred->euid != pcred->uid &&
1830                     !capable(CAP_SYS_PTRACE))
1831                         goto err_unlock;
1832                 head = p->robust_list;
1833                 rcu_read_unlock();
1834         }
1835
1836         if (put_user(sizeof(*head), len_ptr))
1837                 return -EFAULT;
1838         return put_user(head, head_ptr);
1839
1840 err_unlock:
1841         rcu_read_unlock();
1842
1843         return ret;
1844 }
1845
1846 /*
1847  * Process a futex-list entry, check whether it's owned by the
1848  * dying task, and do notification if so:
1849  */
1850 int handle_futex_death(u32 __user *uaddr, struct task_struct *curr, int pi)
1851 {
1852         u32 uval, nval, mval;
1853
1854 retry:
1855         if (get_user(uval, uaddr))
1856                 return -1;
1857
1858         if ((uval & FUTEX_TID_MASK) == task_pid_vnr(curr)) {
1859                 /*
1860                  * Ok, this dying thread is truly holding a futex
1861                  * of interest. Set the OWNER_DIED bit atomically
1862                  * via cmpxchg, and if the value had FUTEX_WAITERS
1863                  * set, wake up a waiter (if any). (We have to do a
1864                  * futex_wake() even if OWNER_DIED is already set -
1865                  * to handle the rare but possible case of recursive
1866                  * thread-death.) The rest of the cleanup is done in
1867                  * userspace.
1868                  */
1869                 mval = (uval & FUTEX_WAITERS) | FUTEX_OWNER_DIED;
1870                 nval = futex_atomic_cmpxchg_inatomic(uaddr, uval, mval);
1871
1872                 if (nval == -EFAULT)
1873                         return -1;
1874
1875                 if (nval != uval)
1876                         goto retry;
1877
1878                 /*
1879                  * Wake robust non-PI futexes here. The wakeup of
1880                  * PI futexes happens in exit_pi_state():
1881                  */
1882                 if (!pi && (uval & FUTEX_WAITERS))
1883                         futex_wake(uaddr, 1, 1, FUTEX_BITSET_MATCH_ANY);
1884         }
1885         return 0;
1886 }
1887
1888 /*
1889  * Fetch a robust-list pointer. Bit 0 signals PI futexes:
1890  */
1891 static inline int fetch_robust_entry(struct robust_list __user **entry,
1892                                      struct robust_list __user * __user *head,
1893                                      int *pi)
1894 {
1895         unsigned long uentry;
1896
1897         if (get_user(uentry, (unsigned long __user *)head))
1898                 return -EFAULT;
1899
1900         *entry = (void __user *)(uentry & ~1UL);
1901         *pi = uentry & 1;
1902
1903         return 0;
1904 }
1905
1906 /*
1907  * Walk curr->robust_list (very carefully, it's a userspace list!)
1908  * and mark any locks found there dead, and notify any waiters.
1909  *
1910  * We silently return on any sign of list-walking problem.
1911  */
1912 void exit_robust_list(struct task_struct *curr)
1913 {
1914         struct robust_list_head __user *head = curr->robust_list;
1915         struct robust_list __user *entry, *next_entry, *pending;
1916         unsigned int limit = ROBUST_LIST_LIMIT, pi, next_pi, pip;
1917         unsigned long futex_offset;
1918         int rc;
1919
1920         if (!futex_cmpxchg_enabled)
1921                 return;
1922
1923         /*
1924          * Fetch the list head (which was registered earlier, via
1925          * sys_set_robust_list()):
1926          */
1927         if (fetch_robust_entry(&entry, &head->list.next, &pi))
1928                 return;
1929         /*
1930          * Fetch the relative futex offset:
1931          */
1932         if (get_user(futex_offset, &head->futex_offset))
1933                 return;
1934         /*
1935          * Fetch any possibly pending lock-add first, and handle it
1936          * if it exists:
1937          */
1938         if (fetch_robust_entry(&pending, &head->list_op_pending, &pip))
1939                 return;
1940
1941         next_entry = NULL;      /* avoid warning with gcc */
1942         while (entry != &head->list) {
1943                 /*
1944                  * Fetch the next entry in the list before calling
1945                  * handle_futex_death:
1946                  */
1947                 rc = fetch_robust_entry(&next_entry, &entry->next, &next_pi);
1948                 /*
1949                  * A pending lock might already be on the list, so
1950                  * don't process it twice:
1951                  */
1952                 if (entry != pending)
1953                         if (handle_futex_death((void __user *)entry + futex_offset,
1954                                                 curr, pi))
1955                                 return;
1956                 if (rc)
1957                         return;
1958                 entry = next_entry;
1959                 pi = next_pi;
1960                 /*
1961                  * Avoid excessively long or circular lists:
1962                  */
1963                 if (!--limit)
1964                         break;
1965
1966                 cond_resched();
1967         }
1968
1969         if (pending)
1970                 handle_futex_death((void __user *)pending + futex_offset,
1971                                    curr, pip);
1972 }
1973
1974 long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
1975                 u32 __user *uaddr2, u32 val2, u32 val3)
1976 {
1977         int clockrt, ret = -ENOSYS;
1978         int cmd = op & FUTEX_CMD_MASK;
1979         int fshared = 0;
1980
1981         if (!(op & FUTEX_PRIVATE_FLAG))
1982                 fshared = 1;
1983
1984         clockrt = op & FUTEX_CLOCK_REALTIME;
1985         if (clockrt && cmd != FUTEX_WAIT_BITSET)
1986                 return -ENOSYS;
1987
1988         switch (cmd) {
1989         case FUTEX_WAIT:
1990                 val3 = FUTEX_BITSET_MATCH_ANY;
1991         case FUTEX_WAIT_BITSET:
1992                 ret = futex_wait(uaddr, fshared, val, timeout, val3, clockrt);
1993                 break;
1994         case FUTEX_WAKE:
1995                 val3 = FUTEX_BITSET_MATCH_ANY;
1996         case FUTEX_WAKE_BITSET:
1997                 ret = futex_wake(uaddr, fshared, val, val3);
1998                 break;
1999         case FUTEX_REQUEUE:
2000                 ret = futex_requeue(uaddr, fshared, uaddr2, val, val2, NULL);
2001                 break;
2002         case FUTEX_CMP_REQUEUE:
2003                 ret = futex_requeue(uaddr, fshared, uaddr2, val, val2, &val3);
2004                 break;
2005         case FUTEX_WAKE_OP:
2006                 ret = futex_wake_op(uaddr, fshared, uaddr2, val, val2, val3);
2007                 break;
2008         case FUTEX_LOCK_PI:
2009                 if (futex_cmpxchg_enabled)
2010                         ret = futex_lock_pi(uaddr, fshared, val, timeout, 0);
2011                 break;
2012         case FUTEX_UNLOCK_PI:
2013                 if (futex_cmpxchg_enabled)
2014                         ret = futex_unlock_pi(uaddr, fshared);
2015                 break;
2016         case FUTEX_TRYLOCK_PI:
2017                 if (futex_cmpxchg_enabled)
2018                         ret = futex_lock_pi(uaddr, fshared, 0, timeout, 1);
2019                 break;
2020         default:
2021                 ret = -ENOSYS;
2022         }
2023         return ret;
2024 }
2025
2026
2027 SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val,
2028                 struct timespec __user *, utime, u32 __user *, uaddr2,
2029                 u32, val3)
2030 {
2031         struct timespec ts;
2032         ktime_t t, *tp = NULL;
2033         u32 val2 = 0;
2034         int cmd = op & FUTEX_CMD_MASK;
2035
2036         if (utime && (cmd == FUTEX_WAIT || cmd == FUTEX_LOCK_PI ||
2037                       cmd == FUTEX_WAIT_BITSET)) {
2038                 if (copy_from_user(&ts, utime, sizeof(ts)) != 0)
2039                         return -EFAULT;
2040                 if (!timespec_valid(&ts))
2041                         return -EINVAL;
2042
2043                 t = timespec_to_ktime(ts);
2044                 if (cmd == FUTEX_WAIT)
2045                         t = ktime_add_safe(ktime_get(), t);
2046                 tp = &t;
2047         }
2048         /*
2049          * requeue parameter in 'utime' if cmd == FUTEX_REQUEUE.
2050          * number of waiters to wake in 'utime' if cmd == FUTEX_WAKE_OP.
2051          */
2052         if (cmd == FUTEX_REQUEUE || cmd == FUTEX_CMP_REQUEUE ||
2053             cmd == FUTEX_WAKE_OP)
2054                 val2 = (u32) (unsigned long) utime;
2055
2056         return do_futex(uaddr, op, val, tp, uaddr2, val2, val3);
2057 }
2058
2059 static int __init futex_init(void)
2060 {
2061         u32 curval;
2062         int i;
2063
2064         /*
2065          * This will fail and we want it. Some arch implementations do
2066          * runtime detection of the futex_atomic_cmpxchg_inatomic()
2067          * functionality. We want to know that before we call in any
2068          * of the complex code paths. Also we want to prevent
2069          * registration of robust lists in that case. NULL is
2070          * guaranteed to fault and we get -EFAULT on functional
2071          * implementation, the non functional ones will return
2072          * -ENOSYS.
2073          */
2074         curval = cmpxchg_futex_value_locked(NULL, 0, 0);
2075         if (curval == -EFAULT)
2076                 futex_cmpxchg_enabled = 1;
2077
2078         for (i = 0; i < ARRAY_SIZE(futex_queues); i++) {
2079                 plist_head_init(&futex_queues[i].chain, &futex_queues[i].lock);
2080                 spin_lock_init(&futex_queues[i].lock);
2081         }
2082
2083         return 0;
2084 }
2085 __initcall(futex_init);