Merge branch 'akpm' (Andrew's patch-bomb)
[linux-3.10.git] / arch / ia64 / include / asm / intrinsics.h
1 #ifndef _ASM_IA64_INTRINSICS_H
2 #define _ASM_IA64_INTRINSICS_H
3
4 /*
5  * Compiler-dependent intrinsics.
6  *
7  * Copyright (C) 2002-2003 Hewlett-Packard Co
8  *      David Mosberger-Tang <davidm@hpl.hp.com>
9  */
10
11 #ifndef __ASSEMBLY__
12
13 #include <linux/types.h>
14 /* include compiler specific intrinsics */
15 #include <asm/ia64regs.h>
16 #ifdef __INTEL_COMPILER
17 # include <asm/intel_intrin.h>
18 #else
19 # include <asm/gcc_intrin.h>
20 #endif
21 #include <asm/cmpxchg.h>
22
23 #define ia64_native_get_psr_i() (ia64_native_getreg(_IA64_REG_PSR) & IA64_PSR_I)
24
25 #define ia64_native_set_rr0_to_rr4(val0, val1, val2, val3, val4)        \
26 do {                                                                    \
27         ia64_native_set_rr(0x0000000000000000UL, (val0));               \
28         ia64_native_set_rr(0x2000000000000000UL, (val1));               \
29         ia64_native_set_rr(0x4000000000000000UL, (val2));               \
30         ia64_native_set_rr(0x6000000000000000UL, (val3));               \
31         ia64_native_set_rr(0x8000000000000000UL, (val4));               \
32 } while (0)
33
34 /*
35  * Force an unresolved reference if someone tries to use
36  * ia64_fetch_and_add() with a bad value.
37  */
38 extern unsigned long __bad_size_for_ia64_fetch_and_add (void);
39 extern unsigned long __bad_increment_for_ia64_fetch_and_add (void);
40
41 #define IA64_FETCHADD(tmp,v,n,sz,sem)                                           \
42 ({                                                                              \
43         switch (sz) {                                                           \
44               case 4:                                                           \
45                 tmp = ia64_fetchadd4_##sem((unsigned int *) v, n);              \
46                 break;                                                          \
47                                                                                 \
48               case 8:                                                           \
49                 tmp = ia64_fetchadd8_##sem((unsigned long *) v, n);             \
50                 break;                                                          \
51                                                                                 \
52               default:                                                          \
53                 __bad_size_for_ia64_fetch_and_add();                            \
54         }                                                                       \
55 })
56
57 #define ia64_fetchadd(i,v,sem)                                                          \
58 ({                                                                                      \
59         __u64 _tmp;                                                                     \
60         volatile __typeof__(*(v)) *_v = (v);                                            \
61         /* Can't use a switch () here: gcc isn't always smart enough for that... */     \
62         if ((i) == -16)                                                                 \
63                 IA64_FETCHADD(_tmp, _v, -16, sizeof(*(v)), sem);                        \
64         else if ((i) == -8)                                                             \
65                 IA64_FETCHADD(_tmp, _v, -8, sizeof(*(v)), sem);                         \
66         else if ((i) == -4)                                                             \
67                 IA64_FETCHADD(_tmp, _v, -4, sizeof(*(v)), sem);                         \
68         else if ((i) == -1)                                                             \
69                 IA64_FETCHADD(_tmp, _v, -1, sizeof(*(v)), sem);                         \
70         else if ((i) == 1)                                                              \
71                 IA64_FETCHADD(_tmp, _v, 1, sizeof(*(v)), sem);                          \
72         else if ((i) == 4)                                                              \
73                 IA64_FETCHADD(_tmp, _v, 4, sizeof(*(v)), sem);                          \
74         else if ((i) == 8)                                                              \
75                 IA64_FETCHADD(_tmp, _v, 8, sizeof(*(v)), sem);                          \
76         else if ((i) == 16)                                                             \
77                 IA64_FETCHADD(_tmp, _v, 16, sizeof(*(v)), sem);                         \
78         else                                                                            \
79                 _tmp = __bad_increment_for_ia64_fetch_and_add();                        \
80         (__typeof__(*(v))) (_tmp);      /* return old value */                          \
81 })
82
83 #define ia64_fetch_and_add(i,v) (ia64_fetchadd(i, v, rel) + (i)) /* return new value */
84
85 #endif
86
87 #ifdef __KERNEL__
88 #include <asm/paravirt_privop.h>
89 #endif
90
91 #ifndef __ASSEMBLY__
92
93 #define IA64_INTRINSIC_API(name)        ia64_native_ ## name
94 #define IA64_INTRINSIC_MACRO(name)      ia64_native_ ## name
95
96 #if defined(__KERNEL__)
97 #if defined(CONFIG_PARAVIRT)
98 # undef IA64_INTRINSIC_API
99 # undef IA64_INTRINSIC_MACRO
100 # ifdef ASM_SUPPORTED
101 #  define IA64_INTRINSIC_API(name)      paravirt_ ## name
102 # else
103 #  define IA64_INTRINSIC_API(name)      pv_cpu_ops.name
104 # endif
105 #define IA64_INTRINSIC_MACRO(name)      paravirt_ ## name
106 #endif
107 #endif
108
109 /************************************************/
110 /* Instructions paravirtualized for correctness */
111 /************************************************/
112 /* fc, thash, get_cpuid, get_pmd, get_eflags, set_eflags */
113 /* Note that "ttag" and "cover" are also privilege-sensitive; "ttag"
114  * is not currently used (though it may be in a long-format VHPT system!)
115  */
116 #define ia64_fc                         IA64_INTRINSIC_API(fc)
117 #define ia64_thash                      IA64_INTRINSIC_API(thash)
118 #define ia64_get_cpuid                  IA64_INTRINSIC_API(get_cpuid)
119 #define ia64_get_pmd                    IA64_INTRINSIC_API(get_pmd)
120
121
122 /************************************************/
123 /* Instructions paravirtualized for performance */
124 /************************************************/
125 #define ia64_ssm                        IA64_INTRINSIC_MACRO(ssm)
126 #define ia64_rsm                        IA64_INTRINSIC_MACRO(rsm)
127 #define ia64_getreg                     IA64_INTRINSIC_MACRO(getreg)
128 #define ia64_setreg                     IA64_INTRINSIC_API(setreg)
129 #define ia64_set_rr                     IA64_INTRINSIC_API(set_rr)
130 #define ia64_get_rr                     IA64_INTRINSIC_API(get_rr)
131 #define ia64_ptcga                      IA64_INTRINSIC_API(ptcga)
132 #define ia64_get_psr_i                  IA64_INTRINSIC_API(get_psr_i)
133 #define ia64_intrin_local_irq_restore   \
134         IA64_INTRINSIC_API(intrin_local_irq_restore)
135 #define ia64_set_rr0_to_rr4             IA64_INTRINSIC_API(set_rr0_to_rr4)
136
137 #endif /* !__ASSEMBLY__ */
138
139 #endif /* _ASM_IA64_INTRINSICS_H */