[PATCH] pi-futex: futex_wake() lockup fix
[linux-2.6.git] / kernel / futex.c
1 /*
2  *  Fast Userspace Mutexes (which I call "Futexes!").
3  *  (C) Rusty Russell, IBM 2002
4  *
5  *  Generalized futexes, futex requeueing, misc fixes by Ingo Molnar
6  *  (C) Copyright 2003 Red Hat Inc, All Rights Reserved
7  *
8  *  Removed page pinning, fix privately mapped COW pages and other cleanups
9  *  (C) Copyright 2003, 2004 Jamie Lokier
10  *
11  *  Robust futex support started by Ingo Molnar
12  *  (C) Copyright 2006 Red Hat Inc, All Rights Reserved
13  *  Thanks to Thomas Gleixner for suggestions, analysis and fixes.
14  *
15  *  PI-futex support started by Ingo Molnar and Thomas Gleixner
16  *  Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
17  *  Copyright (C) 2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com>
18  *
19  *  Thanks to Ben LaHaise for yelling "hashed waitqueues" loudly
20  *  enough at me, Linus for the original (flawed) idea, Matthew
21  *  Kirkwood for proof-of-concept implementation.
22  *
23  *  "The futexes are also cursed."
24  *  "But they come in a choice of three flavours!"
25  *
26  *  This program is free software; you can redistribute it and/or modify
27  *  it under the terms of the GNU General Public License as published by
28  *  the Free Software Foundation; either version 2 of the License, or
29  *  (at your option) any later version.
30  *
31  *  This program is distributed in the hope that it will be useful,
32  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
33  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
34  *  GNU General Public License for more details.
35  *
36  *  You should have received a copy of the GNU General Public License
37  *  along with this program; if not, write to the Free Software
38  *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
39  */
40 #include <linux/slab.h>
41 #include <linux/poll.h>
42 #include <linux/fs.h>
43 #include <linux/file.h>
44 #include <linux/jhash.h>
45 #include <linux/init.h>
46 #include <linux/futex.h>
47 #include <linux/mount.h>
48 #include <linux/pagemap.h>
49 #include <linux/syscalls.h>
50 #include <linux/signal.h>
51 #include <asm/futex.h>
52
53 #include "rtmutex_common.h"
54
55 #define FUTEX_HASHBITS (CONFIG_BASE_SMALL ? 4 : 8)
56
57 /*
58  * Futexes are matched on equal values of this key.
59  * The key type depends on whether it's a shared or private mapping.
60  * Don't rearrange members without looking at hash_futex().
61  *
62  * offset is aligned to a multiple of sizeof(u32) (== 4) by definition.
63  * We set bit 0 to indicate if it's an inode-based key.
64  */
65 union futex_key {
66         struct {
67                 unsigned long pgoff;
68                 struct inode *inode;
69                 int offset;
70         } shared;
71         struct {
72                 unsigned long address;
73                 struct mm_struct *mm;
74                 int offset;
75         } private;
76         struct {
77                 unsigned long word;
78                 void *ptr;
79                 int offset;
80         } both;
81 };
82
83 /*
84  * Priority Inheritance state:
85  */
86 struct futex_pi_state {
87         /*
88          * list of 'owned' pi_state instances - these have to be
89          * cleaned up in do_exit() if the task exits prematurely:
90          */
91         struct list_head list;
92
93         /*
94          * The PI object:
95          */
96         struct rt_mutex pi_mutex;
97
98         struct task_struct *owner;
99         atomic_t refcount;
100
101         union futex_key key;
102 };
103
104 /*
105  * We use this hashed waitqueue instead of a normal wait_queue_t, so
106  * we can wake only the relevant ones (hashed queues may be shared).
107  *
108  * A futex_q has a woken state, just like tasks have TASK_RUNNING.
109  * It is considered woken when list_empty(&q->list) || q->lock_ptr == 0.
110  * The order of wakup is always to make the first condition true, then
111  * wake up q->waiters, then make the second condition true.
112  */
113 struct futex_q {
114         struct list_head list;
115         wait_queue_head_t waiters;
116
117         /* Which hash list lock to use: */
118         spinlock_t *lock_ptr;
119
120         /* Key which the futex is hashed on: */
121         union futex_key key;
122
123         /* For fd, sigio sent using these: */
124         int fd;
125         struct file *filp;
126
127         /* Optional priority inheritance state: */
128         struct futex_pi_state *pi_state;
129         struct task_struct *task;
130 };
131
132 /*
133  * Split the global futex_lock into every hash list lock.
134  */
135 struct futex_hash_bucket {
136        spinlock_t              lock;
137        struct list_head       chain;
138 };
139
140 static struct futex_hash_bucket futex_queues[1<<FUTEX_HASHBITS];
141
142 /* Futex-fs vfsmount entry: */
143 static struct vfsmount *futex_mnt;
144
145 /*
146  * We hash on the keys returned from get_futex_key (see below).
147  */
148 static struct futex_hash_bucket *hash_futex(union futex_key *key)
149 {
150         u32 hash = jhash2((u32*)&key->both.word,
151                           (sizeof(key->both.word)+sizeof(key->both.ptr))/4,
152                           key->both.offset);
153         return &futex_queues[hash & ((1 << FUTEX_HASHBITS)-1)];
154 }
155
156 /*
157  * Return 1 if two futex_keys are equal, 0 otherwise.
158  */
159 static inline int match_futex(union futex_key *key1, union futex_key *key2)
160 {
161         return (key1->both.word == key2->both.word
162                 && key1->both.ptr == key2->both.ptr
163                 && key1->both.offset == key2->both.offset);
164 }
165
166 /*
167  * Get parameters which are the keys for a futex.
168  *
169  * For shared mappings, it's (page->index, vma->vm_file->f_dentry->d_inode,
170  * offset_within_page).  For private mappings, it's (uaddr, current->mm).
171  * We can usually work out the index without swapping in the page.
172  *
173  * Returns: 0, or negative error code.
174  * The key words are stored in *key on success.
175  *
176  * Should be called with &current->mm->mmap_sem but NOT any spinlocks.
177  */
178 static int get_futex_key(u32 __user *uaddr, union futex_key *key)
179 {
180         unsigned long address = (unsigned long)uaddr;
181         struct mm_struct *mm = current->mm;
182         struct vm_area_struct *vma;
183         struct page *page;
184         int err;
185
186         /*
187          * The futex address must be "naturally" aligned.
188          */
189         key->both.offset = address % PAGE_SIZE;
190         if (unlikely((key->both.offset % sizeof(u32)) != 0))
191                 return -EINVAL;
192         address -= key->both.offset;
193
194         /*
195          * The futex is hashed differently depending on whether
196          * it's in a shared or private mapping.  So check vma first.
197          */
198         vma = find_extend_vma(mm, address);
199         if (unlikely(!vma))
200                 return -EFAULT;
201
202         /*
203          * Permissions.
204          */
205         if (unlikely((vma->vm_flags & (VM_IO|VM_READ)) != VM_READ))
206                 return (vma->vm_flags & VM_IO) ? -EPERM : -EACCES;
207
208         /*
209          * Private mappings are handled in a simple way.
210          *
211          * NOTE: When userspace waits on a MAP_SHARED mapping, even if
212          * it's a read-only handle, it's expected that futexes attach to
213          * the object not the particular process.  Therefore we use
214          * VM_MAYSHARE here, not VM_SHARED which is restricted to shared
215          * mappings of _writable_ handles.
216          */
217         if (likely(!(vma->vm_flags & VM_MAYSHARE))) {
218                 key->private.mm = mm;
219                 key->private.address = address;
220                 return 0;
221         }
222
223         /*
224          * Linear file mappings are also simple.
225          */
226         key->shared.inode = vma->vm_file->f_dentry->d_inode;
227         key->both.offset++; /* Bit 0 of offset indicates inode-based key. */
228         if (likely(!(vma->vm_flags & VM_NONLINEAR))) {
229                 key->shared.pgoff = (((address - vma->vm_start) >> PAGE_SHIFT)
230                                      + vma->vm_pgoff);
231                 return 0;
232         }
233
234         /*
235          * We could walk the page table to read the non-linear
236          * pte, and get the page index without fetching the page
237          * from swap.  But that's a lot of code to duplicate here
238          * for a rare case, so we simply fetch the page.
239          */
240         err = get_user_pages(current, mm, address, 1, 0, 0, &page, NULL);
241         if (err >= 0) {
242                 key->shared.pgoff =
243                         page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
244                 put_page(page);
245                 return 0;
246         }
247         return err;
248 }
249
250 /*
251  * Take a reference to the resource addressed by a key.
252  * Can be called while holding spinlocks.
253  *
254  * NOTE: mmap_sem MUST be held between get_futex_key() and calling this
255  * function, if it is called at all.  mmap_sem keeps key->shared.inode valid.
256  */
257 static inline void get_key_refs(union futex_key *key)
258 {
259         if (key->both.ptr != 0) {
260                 if (key->both.offset & 1)
261                         atomic_inc(&key->shared.inode->i_count);
262                 else
263                         atomic_inc(&key->private.mm->mm_count);
264         }
265 }
266
267 /*
268  * Drop a reference to the resource addressed by a key.
269  * The hash bucket spinlock must not be held.
270  */
271 static void drop_key_refs(union futex_key *key)
272 {
273         if (key->both.ptr != 0) {
274                 if (key->both.offset & 1)
275                         iput(key->shared.inode);
276                 else
277                         mmdrop(key->private.mm);
278         }
279 }
280
281 static inline int get_futex_value_locked(u32 *dest, u32 __user *from)
282 {
283         int ret;
284
285         inc_preempt_count();
286         ret = __copy_from_user_inatomic(dest, from, sizeof(u32));
287         dec_preempt_count();
288
289         return ret ? -EFAULT : 0;
290 }
291
292 /*
293  * Fault handling. Called with current->mm->mmap_sem held.
294  */
295 static int futex_handle_fault(unsigned long address, int attempt)
296 {
297         struct vm_area_struct * vma;
298         struct mm_struct *mm = current->mm;
299
300         if (attempt >= 2 || !(vma = find_vma(mm, address)) ||
301             vma->vm_start > address || !(vma->vm_flags & VM_WRITE))
302                 return -EFAULT;
303
304         switch (handle_mm_fault(mm, vma, address, 1)) {
305         case VM_FAULT_MINOR:
306                 current->min_flt++;
307                 break;
308         case VM_FAULT_MAJOR:
309                 current->maj_flt++;
310                 break;
311         default:
312                 return -EFAULT;
313         }
314         return 0;
315 }
316
317 /*
318  * PI code:
319  */
320 static int refill_pi_state_cache(void)
321 {
322         struct futex_pi_state *pi_state;
323
324         if (likely(current->pi_state_cache))
325                 return 0;
326
327         pi_state = kmalloc(sizeof(*pi_state), GFP_KERNEL);
328
329         if (!pi_state)
330                 return -ENOMEM;
331
332         memset(pi_state, 0, sizeof(*pi_state));
333         INIT_LIST_HEAD(&pi_state->list);
334         /* pi_mutex gets initialized later */
335         pi_state->owner = NULL;
336         atomic_set(&pi_state->refcount, 1);
337
338         current->pi_state_cache = pi_state;
339
340         return 0;
341 }
342
343 static struct futex_pi_state * alloc_pi_state(void)
344 {
345         struct futex_pi_state *pi_state = current->pi_state_cache;
346
347         WARN_ON(!pi_state);
348         current->pi_state_cache = NULL;
349
350         return pi_state;
351 }
352
353 static void free_pi_state(struct futex_pi_state *pi_state)
354 {
355         if (!atomic_dec_and_test(&pi_state->refcount))
356                 return;
357
358         /*
359          * If pi_state->owner is NULL, the owner is most probably dying
360          * and has cleaned up the pi_state already
361          */
362         if (pi_state->owner) {
363                 spin_lock_irq(&pi_state->owner->pi_lock);
364                 list_del_init(&pi_state->list);
365                 spin_unlock_irq(&pi_state->owner->pi_lock);
366
367                 rt_mutex_proxy_unlock(&pi_state->pi_mutex, pi_state->owner);
368         }
369
370         if (current->pi_state_cache)
371                 kfree(pi_state);
372         else {
373                 /*
374                  * pi_state->list is already empty.
375                  * clear pi_state->owner.
376                  * refcount is at 0 - put it back to 1.
377                  */
378                 pi_state->owner = NULL;
379                 atomic_set(&pi_state->refcount, 1);
380                 current->pi_state_cache = pi_state;
381         }
382 }
383
384 /*
385  * Look up the task based on what TID userspace gave us.
386  * We dont trust it.
387  */
388 static struct task_struct * futex_find_get_task(pid_t pid)
389 {
390         struct task_struct *p;
391
392         read_lock(&tasklist_lock);
393         p = find_task_by_pid(pid);
394         if (!p)
395                 goto out_unlock;
396         if ((current->euid != p->euid) && (current->euid != p->uid)) {
397                 p = NULL;
398                 goto out_unlock;
399         }
400         if (p->state == EXIT_ZOMBIE || p->exit_state == EXIT_ZOMBIE) {
401                 p = NULL;
402                 goto out_unlock;
403         }
404         get_task_struct(p);
405 out_unlock:
406         read_unlock(&tasklist_lock);
407
408         return p;
409 }
410
411 /*
412  * This task is holding PI mutexes at exit time => bad.
413  * Kernel cleans up PI-state, but userspace is likely hosed.
414  * (Robust-futex cleanup is separate and might save the day for userspace.)
415  */
416 void exit_pi_state_list(struct task_struct *curr)
417 {
418         struct futex_hash_bucket *hb;
419         struct list_head *next, *head = &curr->pi_state_list;
420         struct futex_pi_state *pi_state;
421         union futex_key key;
422
423         /*
424          * We are a ZOMBIE and nobody can enqueue itself on
425          * pi_state_list anymore, but we have to be careful
426          * versus waiters unqueueing themselfs
427          */
428         spin_lock_irq(&curr->pi_lock);
429         while (!list_empty(head)) {
430
431                 next = head->next;
432                 pi_state = list_entry(next, struct futex_pi_state, list);
433                 key = pi_state->key;
434                 spin_unlock_irq(&curr->pi_lock);
435
436                 hb = hash_futex(&key);
437                 spin_lock(&hb->lock);
438
439                 spin_lock_irq(&curr->pi_lock);
440                 if (head->next != next) {
441                         spin_unlock(&hb->lock);
442                         continue;
443                 }
444
445                 list_del_init(&pi_state->list);
446
447                 WARN_ON(pi_state->owner != curr);
448
449                 pi_state->owner = NULL;
450                 spin_unlock_irq(&curr->pi_lock);
451
452                 rt_mutex_unlock(&pi_state->pi_mutex);
453
454                 spin_unlock(&hb->lock);
455
456                 spin_lock_irq(&curr->pi_lock);
457         }
458         spin_unlock_irq(&curr->pi_lock);
459 }
460
461 static int
462 lookup_pi_state(u32 uval, struct futex_hash_bucket *hb, struct futex_q *me)
463 {
464         struct futex_pi_state *pi_state = NULL;
465         struct futex_q *this, *next;
466         struct list_head *head;
467         struct task_struct *p;
468         pid_t pid;
469
470         head = &hb->chain;
471
472         list_for_each_entry_safe(this, next, head, list) {
473                 if (match_futex (&this->key, &me->key)) {
474                         /*
475                          * Another waiter already exists - bump up
476                          * the refcount and return its pi_state:
477                          */
478                         pi_state = this->pi_state;
479                         atomic_inc(&pi_state->refcount);
480                         me->pi_state = pi_state;
481
482                         return 0;
483                 }
484         }
485
486         /*
487          * We are the first waiter - try to look up the real owner and
488          * attach the new pi_state to it:
489          */
490         pid = uval & FUTEX_TID_MASK;
491         p = futex_find_get_task(pid);
492         if (!p)
493                 return -ESRCH;
494
495         pi_state = alloc_pi_state();
496
497         /*
498          * Initialize the pi_mutex in locked state and make 'p'
499          * the owner of it:
500          */
501         rt_mutex_init_proxy_locked(&pi_state->pi_mutex, p);
502
503         /* Store the key for possible exit cleanups: */
504         pi_state->key = me->key;
505
506         spin_lock_irq(&p->pi_lock);
507         list_add(&pi_state->list, &p->pi_state_list);
508         pi_state->owner = p;
509         spin_unlock_irq(&p->pi_lock);
510
511         put_task_struct(p);
512
513         me->pi_state = pi_state;
514
515         return 0;
516 }
517
518 /*
519  * The hash bucket lock must be held when this is called.
520  * Afterwards, the futex_q must not be accessed.
521  */
522 static void wake_futex(struct futex_q *q)
523 {
524         list_del_init(&q->list);
525         if (q->filp)
526                 send_sigio(&q->filp->f_owner, q->fd, POLL_IN);
527         /*
528          * The lock in wake_up_all() is a crucial memory barrier after the
529          * list_del_init() and also before assigning to q->lock_ptr.
530          */
531         wake_up_all(&q->waiters);
532         /*
533          * The waiting task can free the futex_q as soon as this is written,
534          * without taking any locks.  This must come last.
535          *
536          * A memory barrier is required here to prevent the following store
537          * to lock_ptr from getting ahead of the wakeup. Clearing the lock
538          * at the end of wake_up_all() does not prevent this store from
539          * moving.
540          */
541         wmb();
542         q->lock_ptr = NULL;
543 }
544
545 static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this)
546 {
547         struct task_struct *new_owner;
548         struct futex_pi_state *pi_state = this->pi_state;
549         u32 curval, newval;
550
551         if (!pi_state)
552                 return -EINVAL;
553
554         new_owner = rt_mutex_next_owner(&pi_state->pi_mutex);
555
556         /*
557          * This happens when we have stolen the lock and the original
558          * pending owner did not enqueue itself back on the rt_mutex.
559          * Thats not a tragedy. We know that way, that a lock waiter
560          * is on the fly. We make the futex_q waiter the pending owner.
561          */
562         if (!new_owner)
563                 new_owner = this->task;
564
565         /*
566          * We pass it to the next owner. (The WAITERS bit is always
567          * kept enabled while there is PI state around. We must also
568          * preserve the owner died bit.)
569          */
570         newval = (uval & FUTEX_OWNER_DIED) | FUTEX_WAITERS | new_owner->pid;
571
572         inc_preempt_count();
573         curval = futex_atomic_cmpxchg_inatomic(uaddr, uval, newval);
574         dec_preempt_count();
575
576         if (curval == -EFAULT)
577                 return -EFAULT;
578         if (curval != uval)
579                 return -EINVAL;
580
581         list_del_init(&pi_state->owner->pi_state_list);
582         list_add(&pi_state->list, &new_owner->pi_state_list);
583         pi_state->owner = new_owner;
584         rt_mutex_unlock(&pi_state->pi_mutex);
585
586         return 0;
587 }
588
589 static int unlock_futex_pi(u32 __user *uaddr, u32 uval)
590 {
591         u32 oldval;
592
593         /*
594          * There is no waiter, so we unlock the futex. The owner died
595          * bit has not to be preserved here. We are the owner:
596          */
597         inc_preempt_count();
598         oldval = futex_atomic_cmpxchg_inatomic(uaddr, uval, 0);
599         dec_preempt_count();
600
601         if (oldval == -EFAULT)
602                 return oldval;
603         if (oldval != uval)
604                 return -EAGAIN;
605
606         return 0;
607 }
608
609 /*
610  * Wake up all waiters hashed on the physical page that is mapped
611  * to this virtual address:
612  */
613 static int futex_wake(u32 __user *uaddr, int nr_wake)
614 {
615         struct futex_hash_bucket *hb;
616         struct futex_q *this, *next;
617         struct list_head *head;
618         union futex_key key;
619         int ret;
620
621         down_read(&current->mm->mmap_sem);
622
623         ret = get_futex_key(uaddr, &key);
624         if (unlikely(ret != 0))
625                 goto out;
626
627         hb = hash_futex(&key);
628         spin_lock(&hb->lock);
629         head = &hb->chain;
630
631         list_for_each_entry_safe(this, next, head, list) {
632                 if (match_futex (&this->key, &key)) {
633                         if (this->pi_state) {
634                                 ret = -EINVAL;
635                                 break;
636                         }
637                         wake_futex(this);
638                         if (++ret >= nr_wake)
639                                 break;
640                 }
641         }
642
643         spin_unlock(&hb->lock);
644 out:
645         up_read(&current->mm->mmap_sem);
646         return ret;
647 }
648
649 /*
650  * Wake up all waiters hashed on the physical page that is mapped
651  * to this virtual address:
652  */
653 static int
654 futex_wake_op(u32 __user *uaddr1, u32 __user *uaddr2,
655               int nr_wake, int nr_wake2, int op)
656 {
657         union futex_key key1, key2;
658         struct futex_hash_bucket *hb1, *hb2;
659         struct list_head *head;
660         struct futex_q *this, *next;
661         int ret, op_ret, attempt = 0;
662
663 retryfull:
664         down_read(&current->mm->mmap_sem);
665
666         ret = get_futex_key(uaddr1, &key1);
667         if (unlikely(ret != 0))
668                 goto out;
669         ret = get_futex_key(uaddr2, &key2);
670         if (unlikely(ret != 0))
671                 goto out;
672
673         hb1 = hash_futex(&key1);
674         hb2 = hash_futex(&key2);
675
676 retry:
677         if (hb1 < hb2)
678                 spin_lock(&hb1->lock);
679         spin_lock(&hb2->lock);
680         if (hb1 > hb2)
681                 spin_lock(&hb1->lock);
682
683         op_ret = futex_atomic_op_inuser(op, uaddr2);
684         if (unlikely(op_ret < 0)) {
685                 u32 dummy;
686
687                 spin_unlock(&hb1->lock);
688                 if (hb1 != hb2)
689                         spin_unlock(&hb2->lock);
690
691 #ifndef CONFIG_MMU
692                 /*
693                  * we don't get EFAULT from MMU faults if we don't have an MMU,
694                  * but we might get them from range checking
695                  */
696                 ret = op_ret;
697                 goto out;
698 #endif
699
700                 if (unlikely(op_ret != -EFAULT)) {
701                         ret = op_ret;
702                         goto out;
703                 }
704
705                 /*
706                  * futex_atomic_op_inuser needs to both read and write
707                  * *(int __user *)uaddr2, but we can't modify it
708                  * non-atomically.  Therefore, if get_user below is not
709                  * enough, we need to handle the fault ourselves, while
710                  * still holding the mmap_sem.
711                  */
712                 if (attempt++) {
713                         if (futex_handle_fault((unsigned long)uaddr2,
714                                                attempt))
715                                 goto out;
716                         goto retry;
717                 }
718
719                 /*
720                  * If we would have faulted, release mmap_sem,
721                  * fault it in and start all over again.
722                  */
723                 up_read(&current->mm->mmap_sem);
724
725                 ret = get_user(dummy, uaddr2);
726                 if (ret)
727                         return ret;
728
729                 goto retryfull;
730         }
731
732         head = &hb1->chain;
733
734         list_for_each_entry_safe(this, next, head, list) {
735                 if (match_futex (&this->key, &key1)) {
736                         wake_futex(this);
737                         if (++ret >= nr_wake)
738                                 break;
739                 }
740         }
741
742         if (op_ret > 0) {
743                 head = &hb2->chain;
744
745                 op_ret = 0;
746                 list_for_each_entry_safe(this, next, head, list) {
747                         if (match_futex (&this->key, &key2)) {
748                                 wake_futex(this);
749                                 if (++op_ret >= nr_wake2)
750                                         break;
751                         }
752                 }
753                 ret += op_ret;
754         }
755
756         spin_unlock(&hb1->lock);
757         if (hb1 != hb2)
758                 spin_unlock(&hb2->lock);
759 out:
760         up_read(&current->mm->mmap_sem);
761         return ret;
762 }
763
764 /*
765  * Requeue all waiters hashed on one physical page to another
766  * physical page.
767  */
768 static int futex_requeue(u32 __user *uaddr1, u32 __user *uaddr2,
769                          int nr_wake, int nr_requeue, u32 *cmpval)
770 {
771         union futex_key key1, key2;
772         struct futex_hash_bucket *hb1, *hb2;
773         struct list_head *head1;
774         struct futex_q *this, *next;
775         int ret, drop_count = 0;
776
777  retry:
778         down_read(&current->mm->mmap_sem);
779
780         ret = get_futex_key(uaddr1, &key1);
781         if (unlikely(ret != 0))
782                 goto out;
783         ret = get_futex_key(uaddr2, &key2);
784         if (unlikely(ret != 0))
785                 goto out;
786
787         hb1 = hash_futex(&key1);
788         hb2 = hash_futex(&key2);
789
790         if (hb1 < hb2)
791                 spin_lock(&hb1->lock);
792         spin_lock(&hb2->lock);
793         if (hb1 > hb2)
794                 spin_lock(&hb1->lock);
795
796         if (likely(cmpval != NULL)) {
797                 u32 curval;
798
799                 ret = get_futex_value_locked(&curval, uaddr1);
800
801                 if (unlikely(ret)) {
802                         spin_unlock(&hb1->lock);
803                         if (hb1 != hb2)
804                                 spin_unlock(&hb2->lock);
805
806                         /*
807                          * If we would have faulted, release mmap_sem, fault
808                          * it in and start all over again.
809                          */
810                         up_read(&current->mm->mmap_sem);
811
812                         ret = get_user(curval, uaddr1);
813
814                         if (!ret)
815                                 goto retry;
816
817                         return ret;
818                 }
819                 if (curval != *cmpval) {
820                         ret = -EAGAIN;
821                         goto out_unlock;
822                 }
823         }
824
825         head1 = &hb1->chain;
826         list_for_each_entry_safe(this, next, head1, list) {
827                 if (!match_futex (&this->key, &key1))
828                         continue;
829                 if (++ret <= nr_wake) {
830                         wake_futex(this);
831                 } else {
832                         /*
833                          * If key1 and key2 hash to the same bucket, no need to
834                          * requeue.
835                          */
836                         if (likely(head1 != &hb2->chain)) {
837                                 list_move_tail(&this->list, &hb2->chain);
838                                 this->lock_ptr = &hb2->lock;
839                         }
840                         this->key = key2;
841                         get_key_refs(&key2);
842                         drop_count++;
843
844                         if (ret - nr_wake >= nr_requeue)
845                                 break;
846                 }
847         }
848
849 out_unlock:
850         spin_unlock(&hb1->lock);
851         if (hb1 != hb2)
852                 spin_unlock(&hb2->lock);
853
854         /* drop_key_refs() must be called outside the spinlocks. */
855         while (--drop_count >= 0)
856                 drop_key_refs(&key1);
857
858 out:
859         up_read(&current->mm->mmap_sem);
860         return ret;
861 }
862
863 /* The key must be already stored in q->key. */
864 static inline struct futex_hash_bucket *
865 queue_lock(struct futex_q *q, int fd, struct file *filp)
866 {
867         struct futex_hash_bucket *hb;
868
869         q->fd = fd;
870         q->filp = filp;
871
872         init_waitqueue_head(&q->waiters);
873
874         get_key_refs(&q->key);
875         hb = hash_futex(&q->key);
876         q->lock_ptr = &hb->lock;
877
878         spin_lock(&hb->lock);
879         return hb;
880 }
881
882 static inline void __queue_me(struct futex_q *q, struct futex_hash_bucket *hb)
883 {
884         list_add_tail(&q->list, &hb->chain);
885         q->task = current;
886         spin_unlock(&hb->lock);
887 }
888
889 static inline void
890 queue_unlock(struct futex_q *q, struct futex_hash_bucket *hb)
891 {
892         spin_unlock(&hb->lock);
893         drop_key_refs(&q->key);
894 }
895
896 /*
897  * queue_me and unqueue_me must be called as a pair, each
898  * exactly once.  They are called with the hashed spinlock held.
899  */
900
901 /* The key must be already stored in q->key. */
902 static void queue_me(struct futex_q *q, int fd, struct file *filp)
903 {
904         struct futex_hash_bucket *hb;
905
906         hb = queue_lock(q, fd, filp);
907         __queue_me(q, hb);
908 }
909
910 /* Return 1 if we were still queued (ie. 0 means we were woken) */
911 static int unqueue_me(struct futex_q *q)
912 {
913         spinlock_t *lock_ptr;
914         int ret = 0;
915
916         /* In the common case we don't take the spinlock, which is nice. */
917  retry:
918         lock_ptr = q->lock_ptr;
919         if (lock_ptr != 0) {
920                 spin_lock(lock_ptr);
921                 /*
922                  * q->lock_ptr can change between reading it and
923                  * spin_lock(), causing us to take the wrong lock.  This
924                  * corrects the race condition.
925                  *
926                  * Reasoning goes like this: if we have the wrong lock,
927                  * q->lock_ptr must have changed (maybe several times)
928                  * between reading it and the spin_lock().  It can
929                  * change again after the spin_lock() but only if it was
930                  * already changed before the spin_lock().  It cannot,
931                  * however, change back to the original value.  Therefore
932                  * we can detect whether we acquired the correct lock.
933                  */
934                 if (unlikely(lock_ptr != q->lock_ptr)) {
935                         spin_unlock(lock_ptr);
936                         goto retry;
937                 }
938                 WARN_ON(list_empty(&q->list));
939                 list_del(&q->list);
940
941                 BUG_ON(q->pi_state);
942
943                 spin_unlock(lock_ptr);
944                 ret = 1;
945         }
946
947         drop_key_refs(&q->key);
948         return ret;
949 }
950
951 /*
952  * PI futexes can not be requeued and must remove themself from the
953  * hash bucket. The hash bucket lock is held on entry and dropped here.
954  */
955 static void unqueue_me_pi(struct futex_q *q, struct futex_hash_bucket *hb)
956 {
957         WARN_ON(list_empty(&q->list));
958         list_del(&q->list);
959
960         BUG_ON(!q->pi_state);
961         free_pi_state(q->pi_state);
962         q->pi_state = NULL;
963
964         spin_unlock(&hb->lock);
965
966         drop_key_refs(&q->key);
967 }
968
969 static int futex_wait(u32 __user *uaddr, u32 val, unsigned long time)
970 {
971         struct task_struct *curr = current;
972         DECLARE_WAITQUEUE(wait, curr);
973         struct futex_hash_bucket *hb;
974         struct futex_q q;
975         u32 uval;
976         int ret;
977
978         q.pi_state = NULL;
979  retry:
980         down_read(&curr->mm->mmap_sem);
981
982         ret = get_futex_key(uaddr, &q.key);
983         if (unlikely(ret != 0))
984                 goto out_release_sem;
985
986         hb = queue_lock(&q, -1, NULL);
987
988         /*
989          * Access the page AFTER the futex is queued.
990          * Order is important:
991          *
992          *   Userspace waiter: val = var; if (cond(val)) futex_wait(&var, val);
993          *   Userspace waker:  if (cond(var)) { var = new; futex_wake(&var); }
994          *
995          * The basic logical guarantee of a futex is that it blocks ONLY
996          * if cond(var) is known to be true at the time of blocking, for
997          * any cond.  If we queued after testing *uaddr, that would open
998          * a race condition where we could block indefinitely with
999          * cond(var) false, which would violate the guarantee.
1000          *
1001          * A consequence is that futex_wait() can return zero and absorb
1002          * a wakeup when *uaddr != val on entry to the syscall.  This is
1003          * rare, but normal.
1004          *
1005          * We hold the mmap semaphore, so the mapping cannot have changed
1006          * since we looked it up in get_futex_key.
1007          */
1008         ret = get_futex_value_locked(&uval, uaddr);
1009
1010         if (unlikely(ret)) {
1011                 queue_unlock(&q, hb);
1012
1013                 /*
1014                  * If we would have faulted, release mmap_sem, fault it in and
1015                  * start all over again.
1016                  */
1017                 up_read(&curr->mm->mmap_sem);
1018
1019                 ret = get_user(uval, uaddr);
1020
1021                 if (!ret)
1022                         goto retry;
1023                 return ret;
1024         }
1025         ret = -EWOULDBLOCK;
1026         if (uval != val)
1027                 goto out_unlock_release_sem;
1028
1029         /* Only actually queue if *uaddr contained val.  */
1030         __queue_me(&q, hb);
1031
1032         /*
1033          * Now the futex is queued and we have checked the data, we
1034          * don't want to hold mmap_sem while we sleep.
1035          */
1036         up_read(&curr->mm->mmap_sem);
1037
1038         /*
1039          * There might have been scheduling since the queue_me(), as we
1040          * cannot hold a spinlock across the get_user() in case it
1041          * faults, and we cannot just set TASK_INTERRUPTIBLE state when
1042          * queueing ourselves into the futex hash.  This code thus has to
1043          * rely on the futex_wake() code removing us from hash when it
1044          * wakes us up.
1045          */
1046
1047         /* add_wait_queue is the barrier after __set_current_state. */
1048         __set_current_state(TASK_INTERRUPTIBLE);
1049         add_wait_queue(&q.waiters, &wait);
1050         /*
1051          * !list_empty() is safe here without any lock.
1052          * q.lock_ptr != 0 is not safe, because of ordering against wakeup.
1053          */
1054         if (likely(!list_empty(&q.list)))
1055                 time = schedule_timeout(time);
1056         __set_current_state(TASK_RUNNING);
1057
1058         /*
1059          * NOTE: we don't remove ourselves from the waitqueue because
1060          * we are the only user of it.
1061          */
1062
1063         /* If we were woken (and unqueued), we succeeded, whatever. */
1064         if (!unqueue_me(&q))
1065                 return 0;
1066         if (time == 0)
1067                 return -ETIMEDOUT;
1068         /*
1069          * We expect signal_pending(current), but another thread may
1070          * have handled it for us already.
1071          */
1072         return -EINTR;
1073
1074  out_unlock_release_sem:
1075         queue_unlock(&q, hb);
1076
1077  out_release_sem:
1078         up_read(&curr->mm->mmap_sem);
1079         return ret;
1080 }
1081
1082 /*
1083  * Userspace tried a 0 -> TID atomic transition of the futex value
1084  * and failed. The kernel side here does the whole locking operation:
1085  * if there are waiters then it will block, it does PI, etc. (Due to
1086  * races the kernel might see a 0 value of the futex too.)
1087  */
1088 static int do_futex_lock_pi(u32 __user *uaddr, int detect, int trylock,
1089                             struct hrtimer_sleeper *to)
1090 {
1091         struct task_struct *curr = current;
1092         struct futex_hash_bucket *hb;
1093         u32 uval, newval, curval;
1094         struct futex_q q;
1095         int ret, attempt = 0;
1096
1097         if (refill_pi_state_cache())
1098                 return -ENOMEM;
1099
1100         q.pi_state = NULL;
1101  retry:
1102         down_read(&curr->mm->mmap_sem);
1103
1104         ret = get_futex_key(uaddr, &q.key);
1105         if (unlikely(ret != 0))
1106                 goto out_release_sem;
1107
1108         hb = queue_lock(&q, -1, NULL);
1109
1110  retry_locked:
1111         /*
1112          * To avoid races, we attempt to take the lock here again
1113          * (by doing a 0 -> TID atomic cmpxchg), while holding all
1114          * the locks. It will most likely not succeed.
1115          */
1116         newval = current->pid;
1117
1118         inc_preempt_count();
1119         curval = futex_atomic_cmpxchg_inatomic(uaddr, 0, newval);
1120         dec_preempt_count();
1121
1122         if (unlikely(curval == -EFAULT))
1123                 goto uaddr_faulted;
1124
1125         /* We own the lock already */
1126         if (unlikely((curval & FUTEX_TID_MASK) == current->pid)) {
1127                 if (!detect && 0)
1128                         force_sig(SIGKILL, current);
1129                 ret = -EDEADLK;
1130                 goto out_unlock_release_sem;
1131         }
1132
1133         /*
1134          * Surprise - we got the lock. Just return
1135          * to userspace:
1136          */
1137         if (unlikely(!curval))
1138                 goto out_unlock_release_sem;
1139
1140         uval = curval;
1141         newval = uval | FUTEX_WAITERS;
1142
1143         inc_preempt_count();
1144         curval = futex_atomic_cmpxchg_inatomic(uaddr, uval, newval);
1145         dec_preempt_count();
1146
1147         if (unlikely(curval == -EFAULT))
1148                 goto uaddr_faulted;
1149         if (unlikely(curval != uval))
1150                 goto retry_locked;
1151
1152         /*
1153          * We dont have the lock. Look up the PI state (or create it if
1154          * we are the first waiter):
1155          */
1156         ret = lookup_pi_state(uval, hb, &q);
1157
1158         if (unlikely(ret)) {
1159                 /*
1160                  * There were no waiters and the owner task lookup
1161                  * failed. When the OWNER_DIED bit is set, then we
1162                  * know that this is a robust futex and we actually
1163                  * take the lock. This is safe as we are protected by
1164                  * the hash bucket lock. We also set the waiters bit
1165                  * unconditionally here, to simplify glibc handling of
1166                  * multiple tasks racing to acquire the lock and
1167                  * cleanup the problems which were left by the dead
1168                  * owner.
1169                  */
1170                 if (curval & FUTEX_OWNER_DIED) {
1171                         uval = newval;
1172                         newval = current->pid |
1173                                 FUTEX_OWNER_DIED | FUTEX_WAITERS;
1174
1175                         inc_preempt_count();
1176                         curval = futex_atomic_cmpxchg_inatomic(uaddr,
1177                                                                uval, newval);
1178                         dec_preempt_count();
1179
1180                         if (unlikely(curval == -EFAULT))
1181                                 goto uaddr_faulted;
1182                         if (unlikely(curval != uval))
1183                                 goto retry_locked;
1184                         ret = 0;
1185                 }
1186                 goto out_unlock_release_sem;
1187         }
1188
1189         /*
1190          * Only actually queue now that the atomic ops are done:
1191          */
1192         __queue_me(&q, hb);
1193
1194         /*
1195          * Now the futex is queued and we have checked the data, we
1196          * don't want to hold mmap_sem while we sleep.
1197          */
1198         up_read(&curr->mm->mmap_sem);
1199
1200         WARN_ON(!q.pi_state);
1201         /*
1202          * Block on the PI mutex:
1203          */
1204         if (!trylock)
1205                 ret = rt_mutex_timed_lock(&q.pi_state->pi_mutex, to, 1);
1206         else {
1207                 ret = rt_mutex_trylock(&q.pi_state->pi_mutex);
1208                 /* Fixup the trylock return value: */
1209                 ret = ret ? 0 : -EWOULDBLOCK;
1210         }
1211
1212         down_read(&curr->mm->mmap_sem);
1213         spin_lock(q.lock_ptr);
1214
1215         /*
1216          * Got the lock. We might not be the anticipated owner if we
1217          * did a lock-steal - fix up the PI-state in that case.
1218          */
1219         if (!ret && q.pi_state->owner != curr) {
1220                 u32 newtid = current->pid | FUTEX_WAITERS;
1221
1222                 /* Owner died? */
1223                 if (q.pi_state->owner != NULL) {
1224                         spin_lock_irq(&q.pi_state->owner->pi_lock);
1225                         list_del_init(&q.pi_state->list);
1226                         spin_unlock_irq(&q.pi_state->owner->pi_lock);
1227                 } else
1228                         newtid |= FUTEX_OWNER_DIED;
1229
1230                 q.pi_state->owner = current;
1231
1232                 spin_lock_irq(&current->pi_lock);
1233                 list_add(&q.pi_state->list, &current->pi_state_list);
1234                 spin_unlock_irq(&current->pi_lock);
1235
1236                 /* Unqueue and drop the lock */
1237                 unqueue_me_pi(&q, hb);
1238                 up_read(&curr->mm->mmap_sem);
1239                 /*
1240                  * We own it, so we have to replace the pending owner
1241                  * TID. This must be atomic as we have preserve the
1242                  * owner died bit here.
1243                  */
1244                 ret = get_user(uval, uaddr);
1245                 while (!ret) {
1246                         newval = (uval & FUTEX_OWNER_DIED) | newtid;
1247                         curval = futex_atomic_cmpxchg_inatomic(uaddr,
1248                                                                uval, newval);
1249                         if (curval == -EFAULT)
1250                                 ret = -EFAULT;
1251                         if (curval == uval)
1252                                 break;
1253                         uval = curval;
1254                 }
1255         } else {
1256                 /*
1257                  * Catch the rare case, where the lock was released
1258                  * when we were on the way back before we locked
1259                  * the hash bucket.
1260                  */
1261                 if (ret && q.pi_state->owner == curr) {
1262                         if (rt_mutex_trylock(&q.pi_state->pi_mutex))
1263                                 ret = 0;
1264                 }
1265                 /* Unqueue and drop the lock */
1266                 unqueue_me_pi(&q, hb);
1267                 up_read(&curr->mm->mmap_sem);
1268         }
1269
1270         if (!detect && ret == -EDEADLK && 0)
1271                 force_sig(SIGKILL, current);
1272
1273         return ret;
1274
1275  out_unlock_release_sem:
1276         queue_unlock(&q, hb);
1277
1278  out_release_sem:
1279         up_read(&curr->mm->mmap_sem);
1280         return ret;
1281
1282  uaddr_faulted:
1283         /*
1284          * We have to r/w  *(int __user *)uaddr, but we can't modify it
1285          * non-atomically.  Therefore, if get_user below is not
1286          * enough, we need to handle the fault ourselves, while
1287          * still holding the mmap_sem.
1288          */
1289         if (attempt++) {
1290                 if (futex_handle_fault((unsigned long)uaddr, attempt))
1291                         goto out_unlock_release_sem;
1292
1293                 goto retry_locked;
1294         }
1295
1296         queue_unlock(&q, hb);
1297         up_read(&curr->mm->mmap_sem);
1298
1299         ret = get_user(uval, uaddr);
1300         if (!ret && (uval != -EFAULT))
1301                 goto retry;
1302
1303         return ret;
1304 }
1305
1306 /*
1307  * Restart handler
1308  */
1309 static long futex_lock_pi_restart(struct restart_block *restart)
1310 {
1311         struct hrtimer_sleeper timeout, *to = NULL;
1312         int ret;
1313
1314         restart->fn = do_no_restart_syscall;
1315
1316         if (restart->arg2 || restart->arg3) {
1317                 to = &timeout;
1318                 hrtimer_init(&to->timer, CLOCK_REALTIME, HRTIMER_ABS);
1319                 hrtimer_init_sleeper(to, current);
1320                 to->timer.expires.tv64 = ((u64)restart->arg1 << 32) |
1321                         (u64) restart->arg0;
1322         }
1323
1324         pr_debug("lock_pi restart: %p, %d (%d)\n",
1325                  (u32 __user *)restart->arg0, current->pid);
1326
1327         ret = do_futex_lock_pi((u32 __user *)restart->arg0, restart->arg1,
1328                                0, to);
1329
1330         if (ret != -EINTR)
1331                 return ret;
1332
1333         restart->fn = futex_lock_pi_restart;
1334
1335         /* The other values are filled in */
1336         return -ERESTART_RESTARTBLOCK;
1337 }
1338
1339 /*
1340  * Called from the syscall entry below.
1341  */
1342 static int futex_lock_pi(u32 __user *uaddr, int detect, unsigned long sec,
1343                          long nsec, int trylock)
1344 {
1345         struct hrtimer_sleeper timeout, *to = NULL;
1346         struct restart_block *restart;
1347         int ret;
1348
1349         if (sec != MAX_SCHEDULE_TIMEOUT) {
1350                 to = &timeout;
1351                 hrtimer_init(&to->timer, CLOCK_REALTIME, HRTIMER_ABS);
1352                 hrtimer_init_sleeper(to, current);
1353                 to->timer.expires = ktime_set(sec, nsec);
1354         }
1355
1356         ret = do_futex_lock_pi(uaddr, detect, trylock, to);
1357
1358         if (ret != -EINTR)
1359                 return ret;
1360
1361         pr_debug("lock_pi interrupted: %p, %d (%d)\n", uaddr, current->pid);
1362
1363         restart = &current_thread_info()->restart_block;
1364         restart->fn = futex_lock_pi_restart;
1365         restart->arg0 = (unsigned long) uaddr;
1366         restart->arg1 = detect;
1367         if (to) {
1368                 restart->arg2 = to->timer.expires.tv64 & 0xFFFFFFFF;
1369                 restart->arg3 = to->timer.expires.tv64 >> 32;
1370         } else
1371                 restart->arg2 = restart->arg3 = 0;
1372
1373         return -ERESTART_RESTARTBLOCK;
1374 }
1375
1376 /*
1377  * Userspace attempted a TID -> 0 atomic transition, and failed.
1378  * This is the in-kernel slowpath: we look up the PI state (if any),
1379  * and do the rt-mutex unlock.
1380  */
1381 static int futex_unlock_pi(u32 __user *uaddr)
1382 {
1383         struct futex_hash_bucket *hb;
1384         struct futex_q *this, *next;
1385         u32 uval;
1386         struct list_head *head;
1387         union futex_key key;
1388         int ret, attempt = 0;
1389
1390 retry:
1391         if (get_user(uval, uaddr))
1392                 return -EFAULT;
1393         /*
1394          * We release only a lock we actually own:
1395          */
1396         if ((uval & FUTEX_TID_MASK) != current->pid)
1397                 return -EPERM;
1398         /*
1399          * First take all the futex related locks:
1400          */
1401         down_read(&current->mm->mmap_sem);
1402
1403         ret = get_futex_key(uaddr, &key);
1404         if (unlikely(ret != 0))
1405                 goto out;
1406
1407         hb = hash_futex(&key);
1408         spin_lock(&hb->lock);
1409
1410 retry_locked:
1411         /*
1412          * To avoid races, try to do the TID -> 0 atomic transition
1413          * again. If it succeeds then we can return without waking
1414          * anyone else up:
1415          */
1416         inc_preempt_count();
1417         uval = futex_atomic_cmpxchg_inatomic(uaddr, current->pid, 0);
1418         dec_preempt_count();
1419
1420         if (unlikely(uval == -EFAULT))
1421                 goto pi_faulted;
1422         /*
1423          * Rare case: we managed to release the lock atomically,
1424          * no need to wake anyone else up:
1425          */
1426         if (unlikely(uval == current->pid))
1427                 goto out_unlock;
1428
1429         /*
1430          * Ok, other tasks may need to be woken up - check waiters
1431          * and do the wakeup if necessary:
1432          */
1433         head = &hb->chain;
1434
1435         list_for_each_entry_safe(this, next, head, list) {
1436                 if (!match_futex (&this->key, &key))
1437                         continue;
1438                 ret = wake_futex_pi(uaddr, uval, this);
1439                 /*
1440                  * The atomic access to the futex value
1441                  * generated a pagefault, so retry the
1442                  * user-access and the wakeup:
1443                  */
1444                 if (ret == -EFAULT)
1445                         goto pi_faulted;
1446                 goto out_unlock;
1447         }
1448         /*
1449          * No waiters - kernel unlocks the futex:
1450          */
1451         ret = unlock_futex_pi(uaddr, uval);
1452         if (ret == -EFAULT)
1453                 goto pi_faulted;
1454
1455 out_unlock:
1456         spin_unlock(&hb->lock);
1457 out:
1458         up_read(&current->mm->mmap_sem);
1459
1460         return ret;
1461
1462 pi_faulted:
1463         /*
1464          * We have to r/w  *(int __user *)uaddr, but we can't modify it
1465          * non-atomically.  Therefore, if get_user below is not
1466          * enough, we need to handle the fault ourselves, while
1467          * still holding the mmap_sem.
1468          */
1469         if (attempt++) {
1470                 if (futex_handle_fault((unsigned long)uaddr, attempt))
1471                         goto out_unlock;
1472
1473                 goto retry_locked;
1474         }
1475
1476         spin_unlock(&hb->lock);
1477         up_read(&current->mm->mmap_sem);
1478
1479         ret = get_user(uval, uaddr);
1480         if (!ret && (uval != -EFAULT))
1481                 goto retry;
1482
1483         return ret;
1484 }
1485
1486 static int futex_close(struct inode *inode, struct file *filp)
1487 {
1488         struct futex_q *q = filp->private_data;
1489
1490         unqueue_me(q);
1491         kfree(q);
1492
1493         return 0;
1494 }
1495
1496 /* This is one-shot: once it's gone off you need a new fd */
1497 static unsigned int futex_poll(struct file *filp,
1498                                struct poll_table_struct *wait)
1499 {
1500         struct futex_q *q = filp->private_data;
1501         int ret = 0;
1502
1503         poll_wait(filp, &q->waiters, wait);
1504
1505         /*
1506          * list_empty() is safe here without any lock.
1507          * q->lock_ptr != 0 is not safe, because of ordering against wakeup.
1508          */
1509         if (list_empty(&q->list))
1510                 ret = POLLIN | POLLRDNORM;
1511
1512         return ret;
1513 }
1514
1515 static struct file_operations futex_fops = {
1516         .release        = futex_close,
1517         .poll           = futex_poll,
1518 };
1519
1520 /*
1521  * Signal allows caller to avoid the race which would occur if they
1522  * set the sigio stuff up afterwards.
1523  */
1524 static int futex_fd(u32 __user *uaddr, int signal)
1525 {
1526         struct futex_q *q;
1527         struct file *filp;
1528         int ret, err;
1529
1530         ret = -EINVAL;
1531         if (!valid_signal(signal))
1532                 goto out;
1533
1534         ret = get_unused_fd();
1535         if (ret < 0)
1536                 goto out;
1537         filp = get_empty_filp();
1538         if (!filp) {
1539                 put_unused_fd(ret);
1540                 ret = -ENFILE;
1541                 goto out;
1542         }
1543         filp->f_op = &futex_fops;
1544         filp->f_vfsmnt = mntget(futex_mnt);
1545         filp->f_dentry = dget(futex_mnt->mnt_root);
1546         filp->f_mapping = filp->f_dentry->d_inode->i_mapping;
1547
1548         if (signal) {
1549                 err = f_setown(filp, current->pid, 1);
1550                 if (err < 0) {
1551                         goto error;
1552                 }
1553                 filp->f_owner.signum = signal;
1554         }
1555
1556         q = kmalloc(sizeof(*q), GFP_KERNEL);
1557         if (!q) {
1558                 err = -ENOMEM;
1559                 goto error;
1560         }
1561         q->pi_state = NULL;
1562
1563         down_read(&current->mm->mmap_sem);
1564         err = get_futex_key(uaddr, &q->key);
1565
1566         if (unlikely(err != 0)) {
1567                 up_read(&current->mm->mmap_sem);
1568                 kfree(q);
1569                 goto error;
1570         }
1571
1572         /*
1573          * queue_me() must be called before releasing mmap_sem, because
1574          * key->shared.inode needs to be referenced while holding it.
1575          */
1576         filp->private_data = q;
1577
1578         queue_me(q, ret, filp);
1579         up_read(&current->mm->mmap_sem);
1580
1581         /* Now we map fd to filp, so userspace can access it */
1582         fd_install(ret, filp);
1583 out:
1584         return ret;
1585 error:
1586         put_unused_fd(ret);
1587         put_filp(filp);
1588         ret = err;
1589         goto out;
1590 }
1591
1592 /*
1593  * Support for robust futexes: the kernel cleans up held futexes at
1594  * thread exit time.
1595  *
1596  * Implementation: user-space maintains a per-thread list of locks it
1597  * is holding. Upon do_exit(), the kernel carefully walks this list,
1598  * and marks all locks that are owned by this thread with the
1599  * FUTEX_OWNER_DIED bit, and wakes up a waiter (if any). The list is
1600  * always manipulated with the lock held, so the list is private and
1601  * per-thread. Userspace also maintains a per-thread 'list_op_pending'
1602  * field, to allow the kernel to clean up if the thread dies after
1603  * acquiring the lock, but just before it could have added itself to
1604  * the list. There can only be one such pending lock.
1605  */
1606
1607 /**
1608  * sys_set_robust_list - set the robust-futex list head of a task
1609  * @head: pointer to the list-head
1610  * @len: length of the list-head, as userspace expects
1611  */
1612 asmlinkage long
1613 sys_set_robust_list(struct robust_list_head __user *head,
1614                     size_t len)
1615 {
1616         /*
1617          * The kernel knows only one size for now:
1618          */
1619         if (unlikely(len != sizeof(*head)))
1620                 return -EINVAL;
1621
1622         current->robust_list = head;
1623
1624         return 0;
1625 }
1626
1627 /**
1628  * sys_get_robust_list - get the robust-futex list head of a task
1629  * @pid: pid of the process [zero for current task]
1630  * @head_ptr: pointer to a list-head pointer, the kernel fills it in
1631  * @len_ptr: pointer to a length field, the kernel fills in the header size
1632  */
1633 asmlinkage long
1634 sys_get_robust_list(int pid, struct robust_list_head __user **head_ptr,
1635                     size_t __user *len_ptr)
1636 {
1637         struct robust_list_head *head;
1638         unsigned long ret;
1639
1640         if (!pid)
1641                 head = current->robust_list;
1642         else {
1643                 struct task_struct *p;
1644
1645                 ret = -ESRCH;
1646                 read_lock(&tasklist_lock);
1647                 p = find_task_by_pid(pid);
1648                 if (!p)
1649                         goto err_unlock;
1650                 ret = -EPERM;
1651                 if ((current->euid != p->euid) && (current->euid != p->uid) &&
1652                                 !capable(CAP_SYS_PTRACE))
1653                         goto err_unlock;
1654                 head = p->robust_list;
1655                 read_unlock(&tasklist_lock);
1656         }
1657
1658         if (put_user(sizeof(*head), len_ptr))
1659                 return -EFAULT;
1660         return put_user(head, head_ptr);
1661
1662 err_unlock:
1663         read_unlock(&tasklist_lock);
1664
1665         return ret;
1666 }
1667
1668 /*
1669  * Process a futex-list entry, check whether it's owned by the
1670  * dying task, and do notification if so:
1671  */
1672 int handle_futex_death(u32 __user *uaddr, struct task_struct *curr)
1673 {
1674         u32 uval, nval;
1675
1676 retry:
1677         if (get_user(uval, uaddr))
1678                 return -1;
1679
1680         if ((uval & FUTEX_TID_MASK) == curr->pid) {
1681                 /*
1682                  * Ok, this dying thread is truly holding a futex
1683                  * of interest. Set the OWNER_DIED bit atomically
1684                  * via cmpxchg, and if the value had FUTEX_WAITERS
1685                  * set, wake up a waiter (if any). (We have to do a
1686                  * futex_wake() even if OWNER_DIED is already set -
1687                  * to handle the rare but possible case of recursive
1688                  * thread-death.) The rest of the cleanup is done in
1689                  * userspace.
1690                  */
1691                 nval = futex_atomic_cmpxchg_inatomic(uaddr, uval,
1692                                                      uval | FUTEX_OWNER_DIED);
1693                 if (nval == -EFAULT)
1694                         return -1;
1695
1696                 if (nval != uval)
1697                         goto retry;
1698
1699                 if (uval & FUTEX_WAITERS)
1700                         futex_wake(uaddr, 1);
1701         }
1702         return 0;
1703 }
1704
1705 /*
1706  * Walk curr->robust_list (very carefully, it's a userspace list!)
1707  * and mark any locks found there dead, and notify any waiters.
1708  *
1709  * We silently return on any sign of list-walking problem.
1710  */
1711 void exit_robust_list(struct task_struct *curr)
1712 {
1713         struct robust_list_head __user *head = curr->robust_list;
1714         struct robust_list __user *entry, *pending;
1715         unsigned int limit = ROBUST_LIST_LIMIT;
1716         unsigned long futex_offset;
1717
1718         /*
1719          * Fetch the list head (which was registered earlier, via
1720          * sys_set_robust_list()):
1721          */
1722         if (get_user(entry, &head->list.next))
1723                 return;
1724         /*
1725          * Fetch the relative futex offset:
1726          */
1727         if (get_user(futex_offset, &head->futex_offset))
1728                 return;
1729         /*
1730          * Fetch any possibly pending lock-add first, and handle it
1731          * if it exists:
1732          */
1733         if (get_user(pending, &head->list_op_pending))
1734                 return;
1735         if (pending)
1736                 handle_futex_death((void *)pending + futex_offset, curr);
1737
1738         while (entry != &head->list) {
1739                 /*
1740                  * A pending lock might already be on the list, so
1741                  * don't process it twice:
1742                  */
1743                 if (entry != pending)
1744                         if (handle_futex_death((void *)entry + futex_offset,
1745                                                 curr))
1746                                 return;
1747                 /*
1748                  * Fetch the next entry in the list:
1749                  */
1750                 if (get_user(entry, &entry->next))
1751                         return;
1752                 /*
1753                  * Avoid excessively long or circular lists:
1754                  */
1755                 if (!--limit)
1756                         break;
1757
1758                 cond_resched();
1759         }
1760 }
1761
1762 long do_futex(u32 __user *uaddr, int op, u32 val, unsigned long timeout,
1763                 u32 __user *uaddr2, u32 val2, u32 val3)
1764 {
1765         int ret;
1766
1767         switch (op) {
1768         case FUTEX_WAIT:
1769                 ret = futex_wait(uaddr, val, timeout);
1770                 break;
1771         case FUTEX_WAKE:
1772                 ret = futex_wake(uaddr, val);
1773                 break;
1774         case FUTEX_FD:
1775                 /* non-zero val means F_SETOWN(getpid()) & F_SETSIG(val) */
1776                 ret = futex_fd(uaddr, val);
1777                 break;
1778         case FUTEX_REQUEUE:
1779                 ret = futex_requeue(uaddr, uaddr2, val, val2, NULL);
1780                 break;
1781         case FUTEX_CMP_REQUEUE:
1782                 ret = futex_requeue(uaddr, uaddr2, val, val2, &val3);
1783                 break;
1784         case FUTEX_WAKE_OP:
1785                 ret = futex_wake_op(uaddr, uaddr2, val, val2, val3);
1786                 break;
1787         case FUTEX_LOCK_PI:
1788                 ret = futex_lock_pi(uaddr, val, timeout, val2, 0);
1789                 break;
1790         case FUTEX_UNLOCK_PI:
1791                 ret = futex_unlock_pi(uaddr);
1792                 break;
1793         case FUTEX_TRYLOCK_PI:
1794                 ret = futex_lock_pi(uaddr, 0, timeout, val2, 1);
1795                 break;
1796         default:
1797                 ret = -ENOSYS;
1798         }
1799         return ret;
1800 }
1801
1802
1803 asmlinkage long sys_futex(u32 __user *uaddr, int op, u32 val,
1804                           struct timespec __user *utime, u32 __user *uaddr2,
1805                           u32 val3)
1806 {
1807         struct timespec t;
1808         unsigned long timeout = MAX_SCHEDULE_TIMEOUT;
1809         u32 val2 = 0;
1810
1811         if (utime && (op == FUTEX_WAIT || op == FUTEX_LOCK_PI)) {
1812                 if (copy_from_user(&t, utime, sizeof(t)) != 0)
1813                         return -EFAULT;
1814                 if (!timespec_valid(&t))
1815                         return -EINVAL;
1816                 if (op == FUTEX_WAIT)
1817                         timeout = timespec_to_jiffies(&t) + 1;
1818                 else {
1819                         timeout = t.tv_sec;
1820                         val2 = t.tv_nsec;
1821                 }
1822         }
1823         /*
1824          * requeue parameter in 'utime' if op == FUTEX_REQUEUE.
1825          */
1826         if (op == FUTEX_REQUEUE || op == FUTEX_CMP_REQUEUE)
1827                 val2 = (u32) (unsigned long) utime;
1828
1829         return do_futex(uaddr, op, val, timeout, uaddr2, val2, val3);
1830 }
1831
1832 static int futexfs_get_sb(struct file_system_type *fs_type,
1833                           int flags, const char *dev_name, void *data,
1834                           struct vfsmount *mnt)
1835 {
1836         return get_sb_pseudo(fs_type, "futex", NULL, 0xBAD1DEA, mnt);
1837 }
1838
1839 static struct file_system_type futex_fs_type = {
1840         .name           = "futexfs",
1841         .get_sb         = futexfs_get_sb,
1842         .kill_sb        = kill_anon_super,
1843 };
1844
1845 static int __init init(void)
1846 {
1847         unsigned int i;
1848
1849         register_filesystem(&futex_fs_type);
1850         futex_mnt = kern_mount(&futex_fs_type);
1851
1852         for (i = 0; i < ARRAY_SIZE(futex_queues); i++) {
1853                 INIT_LIST_HEAD(&futex_queues[i].chain);
1854                 spin_lock_init(&futex_queues[i].lock);
1855         }
1856         return 0;
1857 }
1858 __initcall(init);