i386/x86_64: move headers to include/asm-x86
[linux-3.10.git] / include / asm-x86 / mutex_64.h
1 /*
2  * Assembly implementation of the mutex fastpath, based on atomic
3  * decrement/increment.
4  *
5  * started by Ingo Molnar:
6  *
7  *  Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
8  */
9 #ifndef _ASM_MUTEX_H
10 #define _ASM_MUTEX_H
11
12 /**
13  * __mutex_fastpath_lock - decrement and call function if negative
14  * @v: pointer of type atomic_t
15  * @fail_fn: function to call if the result is negative
16  *
17  * Atomically decrements @v and calls <fail_fn> if the result is negative.
18  */
19 #define __mutex_fastpath_lock(v, fail_fn)                               \
20 do {                                                                    \
21         unsigned long dummy;                                            \
22                                                                         \
23         typecheck(atomic_t *, v);                                       \
24         typecheck_fn(void (*)(atomic_t *), fail_fn);                    \
25                                                                         \
26         __asm__ __volatile__(                                           \
27                 LOCK_PREFIX "   decl (%%rdi)    \n"                     \
28                         "   jns 1f              \n"                     \
29                         "   call "#fail_fn"     \n"                     \
30                         "1:"                                            \
31                                                                         \
32                 :"=D" (dummy)                                           \
33                 : "D" (v)                                               \
34                 : "rax", "rsi", "rdx", "rcx",                           \
35                   "r8", "r9", "r10", "r11", "memory");                  \
36 } while (0)
37
38 /**
39  *  __mutex_fastpath_lock_retval - try to take the lock by moving the count
40  *                                 from 1 to a 0 value
41  *  @count: pointer of type atomic_t
42  *  @fail_fn: function to call if the original value was not 1
43  *
44  * Change the count from 1 to a value lower than 1, and call <fail_fn> if
45  * it wasn't 1 originally. This function returns 0 if the fastpath succeeds,
46  * or anything the slow path function returns
47  */
48 static inline int
49 __mutex_fastpath_lock_retval(atomic_t *count,
50                              int (*fail_fn)(atomic_t *))
51 {
52         if (unlikely(atomic_dec_return(count) < 0))
53                 return fail_fn(count);
54         else
55                 return 0;
56 }
57
58 /**
59  * __mutex_fastpath_unlock - increment and call function if nonpositive
60  * @v: pointer of type atomic_t
61  * @fail_fn: function to call if the result is nonpositive
62  *
63  * Atomically increments @v and calls <fail_fn> if the result is nonpositive.
64  */
65 #define __mutex_fastpath_unlock(v, fail_fn)                             \
66 do {                                                                    \
67         unsigned long dummy;                                            \
68                                                                         \
69         typecheck(atomic_t *, v);                                       \
70         typecheck_fn(void (*)(atomic_t *), fail_fn);                    \
71                                                                         \
72         __asm__ __volatile__(                                           \
73                 LOCK_PREFIX "   incl (%%rdi)    \n"                     \
74                         "   jg 1f               \n"                     \
75                         "   call "#fail_fn"     \n"                     \
76                         "1:                       "                     \
77                                                                         \
78                 :"=D" (dummy)                                           \
79                 : "D" (v)                                               \
80                 : "rax", "rsi", "rdx", "rcx",                           \
81                   "r8", "r9", "r10", "r11", "memory");                  \
82 } while (0)
83
84 #define __mutex_slowpath_needs_to_unlock()      1
85
86 /**
87  * __mutex_fastpath_trylock - try to acquire the mutex, without waiting
88  *
89  *  @count: pointer of type atomic_t
90  *  @fail_fn: fallback function
91  *
92  * Change the count from 1 to 0 and return 1 (success), or return 0 (failure)
93  * if it wasn't 1 originally. [the fallback function is never used on
94  * x86_64, because all x86_64 CPUs have a CMPXCHG instruction.]
95  */
96 static inline int
97 __mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
98 {
99         if (likely(atomic_cmpxchg(count, 1, 0) == 1))
100                 return 1;
101         else
102                 return 0;
103 }
104
105 #endif