fb457642ac58013b222b3d46d7037229043be9d4
[linux-2.6.git] / include / asm-x86 / system_32.h
1 #ifndef __ASM_SYSTEM_H
2 #define __ASM_SYSTEM_H
3
4 #include <linux/kernel.h>
5 #include <asm/segment.h>
6 #include <asm/cpufeature.h>
7 #include <asm/cmpxchg.h>
8
9 #ifdef __KERNEL__
10 #define AT_VECTOR_SIZE_ARCH 2 /* entries in ARCH_DLINFO */
11
12 struct task_struct;     /* one of the stranger aspects of C forward declarations.. */
13 extern struct task_struct * FASTCALL(__switch_to(struct task_struct *prev, struct task_struct *next));
14
15 /*
16  * Saving eflags is important. It switches not only IOPL between tasks,
17  * it also protects other tasks from NT leaking through sysenter etc.
18  */
19 #define switch_to(prev,next,last) do {                                  \
20         unsigned long esi,edi;                                          \
21         asm volatile("pushfl\n\t"               /* Save flags */        \
22                      "pushl %%ebp\n\t"                                  \
23                      "movl %%esp,%0\n\t"        /* save ESP */          \
24                      "movl %5,%%esp\n\t"        /* restore ESP */       \
25                      "movl $1f,%1\n\t"          /* save EIP */          \
26                      "pushl %6\n\t"             /* restore EIP */       \
27                      "jmp __switch_to\n"                                \
28                      "1:\t"                                             \
29                      "popl %%ebp\n\t"                                   \
30                      "popfl"                                            \
31                      :"=m" (prev->thread.sp),"=m" (prev->thread.ip),    \
32                       "=a" (last),"=S" (esi),"=D" (edi)                 \
33                      :"m" (next->thread.sp),"m" (next->thread.ip),      \
34                       "2" (prev), "d" (next));                          \
35 } while (0)
36
37 /*
38  * Load a segment. Fall back on loading the zero
39  * segment if something goes wrong..
40  */
41 #define loadsegment(seg,value)                  \
42         asm volatile("\n"                       \
43                 "1:\t"                          \
44                 "mov %0,%%" #seg "\n"           \
45                 "2:\n"                          \
46                 ".section .fixup,\"ax\"\n"      \
47                 "3:\t"                          \
48                 "pushl $0\n\t"                  \
49                 "popl %%" #seg "\n\t"           \
50                 "jmp 2b\n"                      \
51                 ".previous\n"                   \
52                 ".section __ex_table,\"a\"\n\t" \
53                 ".align 4\n\t"                  \
54                 ".long 1b,3b\n"                 \
55                 ".previous"                     \
56                 : :"rm" (value))
57
58
59 static inline void native_clts(void)
60 {
61         asm volatile ("clts");
62 }
63
64 static inline unsigned long native_read_cr0(void)
65 {
66         unsigned long val;
67         asm volatile("movl %%cr0,%0\n\t" :"=r" (val));
68         return val;
69 }
70
71 static inline void native_write_cr0(unsigned long val)
72 {
73         asm volatile("movl %0,%%cr0": :"r" (val));
74 }
75
76 static inline unsigned long native_read_cr2(void)
77 {
78         unsigned long val;
79         asm volatile("movl %%cr2,%0\n\t" :"=r" (val));
80         return val;
81 }
82
83 static inline void native_write_cr2(unsigned long val)
84 {
85         asm volatile("movl %0,%%cr2": :"r" (val));
86 }
87
88 static inline unsigned long native_read_cr3(void)
89 {
90         unsigned long val;
91         asm volatile("movl %%cr3,%0\n\t" :"=r" (val));
92         return val;
93 }
94
95 static inline void native_write_cr3(unsigned long val)
96 {
97         asm volatile("movl %0,%%cr3": :"r" (val));
98 }
99
100 static inline unsigned long native_read_cr4(void)
101 {
102         unsigned long val;
103         asm volatile("movl %%cr4,%0\n\t" :"=r" (val));
104         return val;
105 }
106
107 static inline unsigned long native_read_cr4_safe(void)
108 {
109         unsigned long val;
110         /* This could fault if %cr4 does not exist */
111         asm volatile("1: movl %%cr4, %0         \n"
112                 "2:                             \n"
113                 ".section __ex_table,\"a\"      \n"
114                 ".long 1b,2b                    \n"
115                 ".previous                      \n"
116                 : "=r" (val): "0" (0));
117         return val;
118 }
119
120 static inline void native_write_cr4(unsigned long val)
121 {
122         asm volatile("movl %0,%%cr4": :"r" (val));
123 }
124
125 static inline void native_wbinvd(void)
126 {
127         asm volatile("wbinvd": : :"memory");
128 }
129
130 #ifdef CONFIG_PARAVIRT
131 #include <asm/paravirt.h>
132 #else
133 #define read_cr0()      (native_read_cr0())
134 #define write_cr0(x)    (native_write_cr0(x))
135 #define read_cr2()      (native_read_cr2())
136 #define write_cr2(x)    (native_write_cr2(x))
137 #define read_cr3()      (native_read_cr3())
138 #define write_cr3(x)    (native_write_cr3(x))
139 #define read_cr4()      (native_read_cr4())
140 #define read_cr4_safe() (native_read_cr4_safe())
141 #define write_cr4(x)    (native_write_cr4(x))
142 #define wbinvd()        (native_wbinvd())
143
144 /* Clear the 'TS' bit */
145 #define clts()          (native_clts())
146
147 #endif/* CONFIG_PARAVIRT */
148
149 /* Set the 'TS' bit */
150 #define stts() write_cr0(8 | read_cr0())
151
152 #endif  /* __KERNEL__ */
153
154
155 /*
156  * Force strict CPU ordering.
157  * And yes, this is required on UP too when we're talking
158  * to devices.
159  *
160  * For now, "wmb()" doesn't actually do anything, as all
161  * Intel CPU's follow what Intel calls a *Processor Order*,
162  * in which all writes are seen in the program order even
163  * outside the CPU.
164  *
165  * I expect future Intel CPU's to have a weaker ordering,
166  * but I'd also expect them to finally get their act together
167  * and add some real memory barriers if so.
168  *
169  * Some non intel clones support out of order store. wmb() ceases to be a
170  * nop for these.
171  */
172  
173
174 #define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2)
175 #define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2)
176 #define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM)
177
178 /**
179  * read_barrier_depends - Flush all pending reads that subsequents reads
180  * depend on.
181  *
182  * No data-dependent reads from memory-like regions are ever reordered
183  * over this barrier.  All reads preceding this primitive are guaranteed
184  * to access memory (but not necessarily other CPUs' caches) before any
185  * reads following this primitive that depend on the data return by
186  * any of the preceding reads.  This primitive is much lighter weight than
187  * rmb() on most CPUs, and is never heavier weight than is
188  * rmb().
189  *
190  * These ordering constraints are respected by both the local CPU
191  * and the compiler.
192  *
193  * Ordering is not guaranteed by anything other than these primitives,
194  * not even by data dependencies.  See the documentation for
195  * memory_barrier() for examples and URLs to more information.
196  *
197  * For example, the following code would force ordering (the initial
198  * value of "a" is zero, "b" is one, and "p" is "&a"):
199  *
200  * <programlisting>
201  *      CPU 0                           CPU 1
202  *
203  *      b = 2;
204  *      memory_barrier();
205  *      p = &b;                         q = p;
206  *                                      read_barrier_depends();
207  *                                      d = *q;
208  * </programlisting>
209  *
210  * because the read of "*q" depends on the read of "p" and these
211  * two reads are separated by a read_barrier_depends().  However,
212  * the following code, with the same initial values for "a" and "b":
213  *
214  * <programlisting>
215  *      CPU 0                           CPU 1
216  *
217  *      a = 2;
218  *      memory_barrier();
219  *      b = 3;                          y = b;
220  *                                      read_barrier_depends();
221  *                                      x = a;
222  * </programlisting>
223  *
224  * does not enforce ordering, since there is no data dependency between
225  * the read of "a" and the read of "b".  Therefore, on some CPUs, such
226  * as Alpha, "y" could be set to 3 and "x" to 0.  Use rmb()
227  * in cases like this where there are no data dependencies.
228  **/
229
230 #define read_barrier_depends()  do { } while(0)
231
232 #ifdef CONFIG_SMP
233 #define smp_mb()        mb()
234 #ifdef CONFIG_X86_PPRO_FENCE
235 # define smp_rmb()      rmb()
236 #else
237 # define smp_rmb()      barrier()
238 #endif
239 #ifdef CONFIG_X86_OOSTORE
240 # define smp_wmb()      wmb()
241 #else
242 # define smp_wmb()      barrier()
243 #endif
244 #define smp_read_barrier_depends()      read_barrier_depends()
245 #define set_mb(var, value) do { (void) xchg(&var, value); } while (0)
246 #else
247 #define smp_mb()        barrier()
248 #define smp_rmb()       barrier()
249 #define smp_wmb()       barrier()
250 #define smp_read_barrier_depends()      do { } while(0)
251 #define set_mb(var, value) do { var = value; barrier(); } while (0)
252 #endif
253
254 #include <linux/irqflags.h>
255
256 /*
257  * disable hlt during certain critical i/o operations
258  */
259 #define HAVE_DISABLE_HLT
260
261 #endif