1f1c0bf4a5df9d361a72a4bc3a99f26b545be5ea
[linux-2.6.git] / include / asm-x86_64 / system.h
1 #ifndef __ASM_SYSTEM_H
2 #define __ASM_SYSTEM_H
3
4 #include <linux/kernel.h>
5 #include <asm/segment.h>
6 #include <asm/alternative.h>
7
8 #ifdef __KERNEL__
9
10 #define __STR(x) #x
11 #define STR(x) __STR(x)
12
13 #define __SAVE(reg,offset) "movq %%" #reg ",(14-" #offset ")*8(%%rsp)\n\t"
14 #define __RESTORE(reg,offset) "movq (14-" #offset ")*8(%%rsp),%%" #reg "\n\t"
15
16 /* frame pointer must be last for get_wchan */
17 #define SAVE_CONTEXT    "pushf ; pushq %%rbp ; movq %%rsi,%%rbp\n\t"
18 #define RESTORE_CONTEXT "movq %%rbp,%%rsi ; popq %%rbp ; popf\t"
19
20 #define __EXTRA_CLOBBER  \
21         ,"rcx","rbx","rdx","r8","r9","r10","r11","r12","r13","r14","r15"
22
23 /* Save restore flags to clear handle leaking NT */
24 #define switch_to(prev,next,last) \
25         asm volatile(SAVE_CONTEXT                                                   \
26                      "movq %%rsp,%P[threadrsp](%[prev])\n\t" /* save RSP */       \
27                      "movq %P[threadrsp](%[next]),%%rsp\n\t" /* restore RSP */    \
28                      "call __switch_to\n\t"                                       \
29                      ".globl thread_return\n"                                   \
30                      "thread_return:\n\t"                                           \
31                      "movq %%gs:%P[pda_pcurrent],%%rsi\n\t"                       \
32                      "movq %P[thread_info](%%rsi),%%r8\n\t"                       \
33                      LOCK_PREFIX "btr  %[tif_fork],%P[ti_flags](%%r8)\n\t"        \
34                      "movq %%rax,%%rdi\n\t"                                       \
35                      "jc   ret_from_fork\n\t"                                     \
36                      RESTORE_CONTEXT                                                \
37                      : "=a" (last)                                                \
38                      : [next] "S" (next), [prev] "D" (prev),                      \
39                        [threadrsp] "i" (offsetof(struct task_struct, thread.rsp)), \
40                        [ti_flags] "i" (offsetof(struct thread_info, flags)),\
41                        [tif_fork] "i" (TIF_FORK),                         \
42                        [thread_info] "i" (offsetof(struct task_struct, thread_info)), \
43                        [pda_pcurrent] "i" (offsetof(struct x8664_pda, pcurrent))   \
44                      : "memory", "cc" __EXTRA_CLOBBER)
45     
46 extern void load_gs_index(unsigned); 
47
48 /*
49  * Load a segment. Fall back on loading the zero
50  * segment if something goes wrong..
51  */
52 #define loadsegment(seg,value)  \
53         asm volatile("\n"                       \
54                 "1:\t"                          \
55                 "movl %k0,%%" #seg "\n"         \
56                 "2:\n"                          \
57                 ".section .fixup,\"ax\"\n"      \
58                 "3:\t"                          \
59                 "movl %1,%%" #seg "\n\t"        \
60                 "jmp 2b\n"                      \
61                 ".previous\n"                   \
62                 ".section __ex_table,\"a\"\n\t" \
63                 ".align 8\n\t"                  \
64                 ".quad 1b,3b\n"                 \
65                 ".previous"                     \
66                 : :"r" (value), "r" (0))
67
68 /*
69  * Clear and set 'TS' bit respectively
70  */
71 #define clts() __asm__ __volatile__ ("clts")
72
73 static inline unsigned long read_cr0(void)
74
75         unsigned long cr0;
76         asm volatile("movq %%cr0,%0" : "=r" (cr0));
77         return cr0;
78
79
80 static inline void write_cr0(unsigned long val) 
81
82         asm volatile("movq %0,%%cr0" :: "r" (val));
83
84
85 static inline unsigned long read_cr3(void)
86
87         unsigned long cr3;
88         asm("movq %%cr3,%0" : "=r" (cr3));
89         return cr3;
90
91
92 static inline void write_cr3(unsigned long val)
93 {
94         asm volatile("movq %0,%%cr3" :: "r" (val) : "memory");
95 }
96
97 static inline unsigned long read_cr4(void)
98
99         unsigned long cr4;
100         asm("movq %%cr4,%0" : "=r" (cr4));
101         return cr4;
102
103
104 static inline void write_cr4(unsigned long val)
105
106         asm volatile("movq %0,%%cr4" :: "r" (val) : "memory");
107
108
109 #define stts() write_cr0(8 | read_cr0())
110
111 #define wbinvd() \
112         __asm__ __volatile__ ("wbinvd": : :"memory");
113
114 /*
115  * On SMP systems, when the scheduler does migration-cost autodetection,
116  * it needs a way to flush as much of the CPU's caches as possible.
117  */
118 static inline void sched_cacheflush(void)
119 {
120         wbinvd();
121 }
122
123 #endif  /* __KERNEL__ */
124
125 #define nop() __asm__ __volatile__ ("nop")
126
127 #define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr))))
128
129 #define tas(ptr) (xchg((ptr),1))
130
131 #define __xg(x) ((volatile long *)(x))
132
133 static inline void set_64bit(volatile unsigned long *ptr, unsigned long val)
134 {
135         *ptr = val;
136 }
137
138 #define _set_64bit set_64bit
139
140 /*
141  * Note: no "lock" prefix even on SMP: xchg always implies lock anyway
142  * Note 2: xchg has side effect, so that attribute volatile is necessary,
143  *        but generally the primitive is invalid, *ptr is output argument. --ANK
144  */
145 static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
146 {
147         switch (size) {
148                 case 1:
149                         __asm__ __volatile__("xchgb %b0,%1"
150                                 :"=q" (x)
151                                 :"m" (*__xg(ptr)), "0" (x)
152                                 :"memory");
153                         break;
154                 case 2:
155                         __asm__ __volatile__("xchgw %w0,%1"
156                                 :"=r" (x)
157                                 :"m" (*__xg(ptr)), "0" (x)
158                                 :"memory");
159                         break;
160                 case 4:
161                         __asm__ __volatile__("xchgl %k0,%1"
162                                 :"=r" (x)
163                                 :"m" (*__xg(ptr)), "0" (x)
164                                 :"memory");
165                         break;
166                 case 8:
167                         __asm__ __volatile__("xchgq %0,%1"
168                                 :"=r" (x)
169                                 :"m" (*__xg(ptr)), "0" (x)
170                                 :"memory");
171                         break;
172         }
173         return x;
174 }
175
176 /*
177  * Atomic compare and exchange.  Compare OLD with MEM, if identical,
178  * store NEW in MEM.  Return the initial value in MEM.  Success is
179  * indicated by comparing RETURN with OLD.
180  */
181
182 #define __HAVE_ARCH_CMPXCHG 1
183
184 static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
185                                       unsigned long new, int size)
186 {
187         unsigned long prev;
188         switch (size) {
189         case 1:
190                 __asm__ __volatile__(LOCK_PREFIX "cmpxchgb %b1,%2"
191                                      : "=a"(prev)
192                                      : "q"(new), "m"(*__xg(ptr)), "0"(old)
193                                      : "memory");
194                 return prev;
195         case 2:
196                 __asm__ __volatile__(LOCK_PREFIX "cmpxchgw %w1,%2"
197                                      : "=a"(prev)
198                                      : "r"(new), "m"(*__xg(ptr)), "0"(old)
199                                      : "memory");
200                 return prev;
201         case 4:
202                 __asm__ __volatile__(LOCK_PREFIX "cmpxchgl %k1,%2"
203                                      : "=a"(prev)
204                                      : "r"(new), "m"(*__xg(ptr)), "0"(old)
205                                      : "memory");
206                 return prev;
207         case 8:
208                 __asm__ __volatile__(LOCK_PREFIX "cmpxchgq %1,%2"
209                                      : "=a"(prev)
210                                      : "r"(new), "m"(*__xg(ptr)), "0"(old)
211                                      : "memory");
212                 return prev;
213         }
214         return old;
215 }
216
217 static inline unsigned long __cmpxchg_local(volatile void *ptr,
218                         unsigned long old, unsigned long new, int size)
219 {
220         unsigned long prev;
221         switch (size) {
222         case 1:
223                 __asm__ __volatile__("cmpxchgb %b1,%2"
224                                      : "=a"(prev)
225                                      : "q"(new), "m"(*__xg(ptr)), "0"(old)
226                                      : "memory");
227                 return prev;
228         case 2:
229                 __asm__ __volatile__("cmpxchgw %w1,%2"
230                                      : "=a"(prev)
231                                      : "r"(new), "m"(*__xg(ptr)), "0"(old)
232                                      : "memory");
233                 return prev;
234         case 4:
235                 __asm__ __volatile__("cmpxchgl %k1,%2"
236                                      : "=a"(prev)
237                                      : "r"(new), "m"(*__xg(ptr)), "0"(old)
238                                      : "memory");
239                 return prev;
240         case 8:
241                 __asm__ __volatile__("cmpxchgq %1,%2"
242                                      : "=a"(prev)
243                                      : "r"(new), "m"(*__xg(ptr)), "0"(old)
244                                      : "memory");
245                 return prev;
246         }
247         return old;
248 }
249
250 #define cmpxchg(ptr,o,n)\
251         ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\
252                                         (unsigned long)(n),sizeof(*(ptr))))
253 #define cmpxchg_local(ptr,o,n)\
254         ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\
255                                         (unsigned long)(n),sizeof(*(ptr))))
256
257 #ifdef CONFIG_SMP
258 #define smp_mb()        mb()
259 #define smp_rmb()       rmb()
260 #define smp_wmb()       wmb()
261 #define smp_read_barrier_depends()      do {} while(0)
262 #else
263 #define smp_mb()        barrier()
264 #define smp_rmb()       barrier()
265 #define smp_wmb()       barrier()
266 #define smp_read_barrier_depends()      do {} while(0)
267 #endif
268
269     
270 /*
271  * Force strict CPU ordering.
272  * And yes, this is required on UP too when we're talking
273  * to devices.
274  */
275 #define mb()    asm volatile("mfence":::"memory")
276 #define rmb()   asm volatile("lfence":::"memory")
277
278 #ifdef CONFIG_UNORDERED_IO
279 #define wmb()   asm volatile("sfence" ::: "memory")
280 #else
281 #define wmb()   asm volatile("" ::: "memory")
282 #endif
283 #define read_barrier_depends()  do {} while(0)
284 #define set_mb(var, value) do { (void) xchg(&var, value); } while (0)
285
286 #define warn_if_not_ulong(x) do { unsigned long foo; (void) (&(x) == &foo); } while (0)
287
288 #include <linux/irqflags.h>
289
290 void cpu_idle_wait(void);
291
292 extern unsigned long arch_align_stack(unsigned long sp);
293 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
294
295 #endif