[PATCH] kill include/linux/platform.h, default_idle() cleanup
[linux-2.6.git] / include / asm-i386 / system.h
1 #ifndef __ASM_SYSTEM_H
2 #define __ASM_SYSTEM_H
3
4 #include <linux/config.h>
5 #include <linux/kernel.h>
6 #include <asm/segment.h>
7 #include <asm/cpufeature.h>
8 #include <linux/bitops.h> /* for LOCK_PREFIX */
9
10 #ifdef __KERNEL__
11
12 struct task_struct;     /* one of the stranger aspects of C forward declarations.. */
13 extern struct task_struct * FASTCALL(__switch_to(struct task_struct *prev, struct task_struct *next));
14
15 #define switch_to(prev,next,last) do {                                  \
16         unsigned long esi,edi;                                          \
17         asm volatile("pushl %%ebp\n\t"                                  \
18                      "movl %%esp,%0\n\t"        /* save ESP */          \
19                      "movl %5,%%esp\n\t"        /* restore ESP */       \
20                      "movl $1f,%1\n\t"          /* save EIP */          \
21                      "pushl %6\n\t"             /* restore EIP */       \
22                      "jmp __switch_to\n"                                \
23                      "1:\t"                                             \
24                      "popl %%ebp\n\t"                                   \
25                      :"=m" (prev->thread.esp),"=m" (prev->thread.eip),  \
26                       "=a" (last),"=S" (esi),"=D" (edi)                 \
27                      :"m" (next->thread.esp),"m" (next->thread.eip),    \
28                       "2" (prev), "d" (next));                          \
29 } while (0)
30
31 #define _set_base(addr,base) do { unsigned long __pr; \
32 __asm__ __volatile__ ("movw %%dx,%1\n\t" \
33         "rorl $16,%%edx\n\t" \
34         "movb %%dl,%2\n\t" \
35         "movb %%dh,%3" \
36         :"=&d" (__pr) \
37         :"m" (*((addr)+2)), \
38          "m" (*((addr)+4)), \
39          "m" (*((addr)+7)), \
40          "0" (base) \
41         ); } while(0)
42
43 #define _set_limit(addr,limit) do { unsigned long __lr; \
44 __asm__ __volatile__ ("movw %%dx,%1\n\t" \
45         "rorl $16,%%edx\n\t" \
46         "movb %2,%%dh\n\t" \
47         "andb $0xf0,%%dh\n\t" \
48         "orb %%dh,%%dl\n\t" \
49         "movb %%dl,%2" \
50         :"=&d" (__lr) \
51         :"m" (*(addr)), \
52          "m" (*((addr)+6)), \
53          "0" (limit) \
54         ); } while(0)
55
56 #define set_base(ldt,base) _set_base( ((char *)&(ldt)) , (base) )
57 #define set_limit(ldt,limit) _set_limit( ((char *)&(ldt)) , ((limit)-1) )
58
59 /*
60  * Load a segment. Fall back on loading the zero
61  * segment if something goes wrong..
62  */
63 #define loadsegment(seg,value)                  \
64         asm volatile("\n"                       \
65                 "1:\t"                          \
66                 "mov %0,%%" #seg "\n"           \
67                 "2:\n"                          \
68                 ".section .fixup,\"ax\"\n"      \
69                 "3:\t"                          \
70                 "pushl $0\n\t"                  \
71                 "popl %%" #seg "\n\t"           \
72                 "jmp 2b\n"                      \
73                 ".previous\n"                   \
74                 ".section __ex_table,\"a\"\n\t" \
75                 ".align 4\n\t"                  \
76                 ".long 1b,3b\n"                 \
77                 ".previous"                     \
78                 : :"rm" (value))
79
80 /*
81  * Save a segment register away
82  */
83 #define savesegment(seg, value) \
84         asm volatile("mov %%" #seg ",%0":"=rm" (value))
85
86 /*
87  * Clear and set 'TS' bit respectively
88  */
89 #define clts() __asm__ __volatile__ ("clts")
90 #define read_cr0() ({ \
91         unsigned int __dummy; \
92         __asm__ __volatile__( \
93                 "movl %%cr0,%0\n\t" \
94                 :"=r" (__dummy)); \
95         __dummy; \
96 })
97 #define write_cr0(x) \
98         __asm__ __volatile__("movl %0,%%cr0": :"r" (x));
99
100 #define read_cr2() ({ \
101         unsigned int __dummy; \
102         __asm__ __volatile__( \
103                 "movl %%cr2,%0\n\t" \
104                 :"=r" (__dummy)); \
105         __dummy; \
106 })
107 #define write_cr2(x) \
108         __asm__ __volatile__("movl %0,%%cr2": :"r" (x));
109
110 #define read_cr3() ({ \
111         unsigned int __dummy; \
112         __asm__ ( \
113                 "movl %%cr3,%0\n\t" \
114                 :"=r" (__dummy)); \
115         __dummy; \
116 })
117 #define write_cr3(x) \
118         __asm__ __volatile__("movl %0,%%cr3": :"r" (x));
119
120 #define read_cr4() ({ \
121         unsigned int __dummy; \
122         __asm__( \
123                 "movl %%cr4,%0\n\t" \
124                 :"=r" (__dummy)); \
125         __dummy; \
126 })
127
128 #define read_cr4_safe() ({                            \
129         unsigned int __dummy;                         \
130         /* This could fault if %cr4 does not exist */ \
131         __asm__("1: movl %%cr4, %0              \n"   \
132                 "2:                             \n"   \
133                 ".section __ex_table,\"a\"      \n"   \
134                 ".long 1b,2b                    \n"   \
135                 ".previous                      \n"   \
136                 : "=r" (__dummy): "0" (0));           \
137         __dummy;                                      \
138 })
139
140 #define write_cr4(x) \
141         __asm__ __volatile__("movl %0,%%cr4": :"r" (x));
142 #define stts() write_cr0(8 | read_cr0())
143
144 #endif  /* __KERNEL__ */
145
146 #define wbinvd() \
147         __asm__ __volatile__ ("wbinvd": : :"memory");
148
149 static inline unsigned long get_limit(unsigned long segment)
150 {
151         unsigned long __limit;
152         __asm__("lsll %1,%0"
153                 :"=r" (__limit):"r" (segment));
154         return __limit+1;
155 }
156
157 #define nop() __asm__ __volatile__ ("nop")
158
159 #define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr))))
160
161 #define tas(ptr) (xchg((ptr),1))
162
163 struct __xchg_dummy { unsigned long a[100]; };
164 #define __xg(x) ((struct __xchg_dummy *)(x))
165
166
167 #ifdef CONFIG_X86_CMPXCHG64
168
169 /*
170  * The semantics of XCHGCMP8B are a bit strange, this is why
171  * there is a loop and the loading of %%eax and %%edx has to
172  * be inside. This inlines well in most cases, the cached
173  * cost is around ~38 cycles. (in the future we might want
174  * to do an SIMD/3DNOW!/MMX/FPU 64-bit store here, but that
175  * might have an implicit FPU-save as a cost, so it's not
176  * clear which path to go.)
177  *
178  * cmpxchg8b must be used with the lock prefix here to allow
179  * the instruction to be executed atomically, see page 3-102
180  * of the instruction set reference 24319102.pdf. We need
181  * the reader side to see the coherent 64bit value.
182  */
183 static inline void __set_64bit (unsigned long long * ptr,
184                 unsigned int low, unsigned int high)
185 {
186         __asm__ __volatile__ (
187                 "\n1:\t"
188                 "movl (%0), %%eax\n\t"
189                 "movl 4(%0), %%edx\n\t"
190                 "lock cmpxchg8b (%0)\n\t"
191                 "jnz 1b"
192                 : /* no outputs */
193                 :       "D"(ptr),
194                         "b"(low),
195                         "c"(high)
196                 :       "ax","dx","memory");
197 }
198
199 static inline void __set_64bit_constant (unsigned long long *ptr,
200                                                  unsigned long long value)
201 {
202         __set_64bit(ptr,(unsigned int)(value), (unsigned int)((value)>>32ULL));
203 }
204 #define ll_low(x)       *(((unsigned int*)&(x))+0)
205 #define ll_high(x)      *(((unsigned int*)&(x))+1)
206
207 static inline void __set_64bit_var (unsigned long long *ptr,
208                          unsigned long long value)
209 {
210         __set_64bit(ptr,ll_low(value), ll_high(value));
211 }
212
213 #define set_64bit(ptr,value) \
214 (__builtin_constant_p(value) ? \
215  __set_64bit_constant(ptr, value) : \
216  __set_64bit_var(ptr, value) )
217
218 #define _set_64bit(ptr,value) \
219 (__builtin_constant_p(value) ? \
220  __set_64bit(ptr, (unsigned int)(value), (unsigned int)((value)>>32ULL) ) : \
221  __set_64bit(ptr, ll_low(value), ll_high(value)) )
222
223 #endif
224
225 /*
226  * Note: no "lock" prefix even on SMP: xchg always implies lock anyway
227  * Note 2: xchg has side effect, so that attribute volatile is necessary,
228  *        but generally the primitive is invalid, *ptr is output argument. --ANK
229  */
230 static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
231 {
232         switch (size) {
233                 case 1:
234                         __asm__ __volatile__("xchgb %b0,%1"
235                                 :"=q" (x)
236                                 :"m" (*__xg(ptr)), "0" (x)
237                                 :"memory");
238                         break;
239                 case 2:
240                         __asm__ __volatile__("xchgw %w0,%1"
241                                 :"=r" (x)
242                                 :"m" (*__xg(ptr)), "0" (x)
243                                 :"memory");
244                         break;
245                 case 4:
246                         __asm__ __volatile__("xchgl %0,%1"
247                                 :"=r" (x)
248                                 :"m" (*__xg(ptr)), "0" (x)
249                                 :"memory");
250                         break;
251         }
252         return x;
253 }
254
255 /*
256  * Atomic compare and exchange.  Compare OLD with MEM, if identical,
257  * store NEW in MEM.  Return the initial value in MEM.  Success is
258  * indicated by comparing RETURN with OLD.
259  */
260
261 #ifdef CONFIG_X86_CMPXCHG
262 #define __HAVE_ARCH_CMPXCHG 1
263 #define cmpxchg(ptr,o,n)\
264         ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\
265                                         (unsigned long)(n),sizeof(*(ptr))))
266 #endif
267
268 static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
269                                       unsigned long new, int size)
270 {
271         unsigned long prev;
272         switch (size) {
273         case 1:
274                 __asm__ __volatile__(LOCK_PREFIX "cmpxchgb %b1,%2"
275                                      : "=a"(prev)
276                                      : "q"(new), "m"(*__xg(ptr)), "0"(old)
277                                      : "memory");
278                 return prev;
279         case 2:
280                 __asm__ __volatile__(LOCK_PREFIX "cmpxchgw %w1,%2"
281                                      : "=a"(prev)
282                                      : "r"(new), "m"(*__xg(ptr)), "0"(old)
283                                      : "memory");
284                 return prev;
285         case 4:
286                 __asm__ __volatile__(LOCK_PREFIX "cmpxchgl %1,%2"
287                                      : "=a"(prev)
288                                      : "r"(new), "m"(*__xg(ptr)), "0"(old)
289                                      : "memory");
290                 return prev;
291         }
292         return old;
293 }
294
295 #ifndef CONFIG_X86_CMPXCHG
296 /*
297  * Building a kernel capable running on 80386. It may be necessary to
298  * simulate the cmpxchg on the 80386 CPU. For that purpose we define
299  * a function for each of the sizes we support.
300  */
301
302 extern unsigned long cmpxchg_386_u8(volatile void *, u8, u8);
303 extern unsigned long cmpxchg_386_u16(volatile void *, u16, u16);
304 extern unsigned long cmpxchg_386_u32(volatile void *, u32, u32);
305
306 static inline unsigned long cmpxchg_386(volatile void *ptr, unsigned long old,
307                                       unsigned long new, int size)
308 {
309         switch (size) {
310         case 1:
311                 return cmpxchg_386_u8(ptr, old, new);
312         case 2:
313                 return cmpxchg_386_u16(ptr, old, new);
314         case 4:
315                 return cmpxchg_386_u32(ptr, old, new);
316         }
317         return old;
318 }
319
320 #define cmpxchg(ptr,o,n)                                                \
321 ({                                                                      \
322         __typeof__(*(ptr)) __ret;                                       \
323         if (likely(boot_cpu_data.x86 > 3))                              \
324                 __ret = __cmpxchg((ptr), (unsigned long)(o),            \
325                                         (unsigned long)(n), sizeof(*(ptr))); \
326         else                                                            \
327                 __ret = cmpxchg_386((ptr), (unsigned long)(o),          \
328                                         (unsigned long)(n), sizeof(*(ptr))); \
329         __ret;                                                          \
330 })
331 #endif
332
333 #ifdef CONFIG_X86_CMPXCHG64
334
335 static inline unsigned long long __cmpxchg64(volatile void *ptr, unsigned long long old,
336                                       unsigned long long new)
337 {
338         unsigned long long prev;
339         __asm__ __volatile__(LOCK_PREFIX "cmpxchg8b %3"
340                              : "=A"(prev)
341                              : "b"((unsigned long)new),
342                                "c"((unsigned long)(new >> 32)),
343                                "m"(*__xg(ptr)),
344                                "0"(old)
345                              : "memory");
346         return prev;
347 }
348
349 #define cmpxchg64(ptr,o,n)\
350         ((__typeof__(*(ptr)))__cmpxchg64((ptr),(unsigned long long)(o),\
351                                         (unsigned long long)(n)))
352
353 #endif
354     
355 /*
356  * Force strict CPU ordering.
357  * And yes, this is required on UP too when we're talking
358  * to devices.
359  *
360  * For now, "wmb()" doesn't actually do anything, as all
361  * Intel CPU's follow what Intel calls a *Processor Order*,
362  * in which all writes are seen in the program order even
363  * outside the CPU.
364  *
365  * I expect future Intel CPU's to have a weaker ordering,
366  * but I'd also expect them to finally get their act together
367  * and add some real memory barriers if so.
368  *
369  * Some non intel clones support out of order store. wmb() ceases to be a
370  * nop for these.
371  */
372  
373
374 /* 
375  * Actually only lfence would be needed for mb() because all stores done 
376  * by the kernel should be already ordered. But keep a full barrier for now. 
377  */
378
379 #define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2)
380 #define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2)
381
382 /**
383  * read_barrier_depends - Flush all pending reads that subsequents reads
384  * depend on.
385  *
386  * No data-dependent reads from memory-like regions are ever reordered
387  * over this barrier.  All reads preceding this primitive are guaranteed
388  * to access memory (but not necessarily other CPUs' caches) before any
389  * reads following this primitive that depend on the data return by
390  * any of the preceding reads.  This primitive is much lighter weight than
391  * rmb() on most CPUs, and is never heavier weight than is
392  * rmb().
393  *
394  * These ordering constraints are respected by both the local CPU
395  * and the compiler.
396  *
397  * Ordering is not guaranteed by anything other than these primitives,
398  * not even by data dependencies.  See the documentation for
399  * memory_barrier() for examples and URLs to more information.
400  *
401  * For example, the following code would force ordering (the initial
402  * value of "a" is zero, "b" is one, and "p" is "&a"):
403  *
404  * <programlisting>
405  *      CPU 0                           CPU 1
406  *
407  *      b = 2;
408  *      memory_barrier();
409  *      p = &b;                         q = p;
410  *                                      read_barrier_depends();
411  *                                      d = *q;
412  * </programlisting>
413  *
414  * because the read of "*q" depends on the read of "p" and these
415  * two reads are separated by a read_barrier_depends().  However,
416  * the following code, with the same initial values for "a" and "b":
417  *
418  * <programlisting>
419  *      CPU 0                           CPU 1
420  *
421  *      a = 2;
422  *      memory_barrier();
423  *      b = 3;                          y = b;
424  *                                      read_barrier_depends();
425  *                                      x = a;
426  * </programlisting>
427  *
428  * does not enforce ordering, since there is no data dependency between
429  * the read of "a" and the read of "b".  Therefore, on some CPUs, such
430  * as Alpha, "y" could be set to 3 and "x" to 0.  Use rmb()
431  * in cases like thiswhere there are no data dependencies.
432  **/
433
434 #define read_barrier_depends()  do { } while(0)
435
436 #ifdef CONFIG_X86_OOSTORE
437 /* Actually there are no OOO store capable CPUs for now that do SSE, 
438    but make it already an possibility. */
439 #define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM)
440 #else
441 #define wmb()   __asm__ __volatile__ ("": : :"memory")
442 #endif
443
444 #ifdef CONFIG_SMP
445 #define smp_mb()        mb()
446 #define smp_rmb()       rmb()
447 #define smp_wmb()       wmb()
448 #define smp_read_barrier_depends()      read_barrier_depends()
449 #define set_mb(var, value) do { (void) xchg(&var, value); } while (0)
450 #else
451 #define smp_mb()        barrier()
452 #define smp_rmb()       barrier()
453 #define smp_wmb()       barrier()
454 #define smp_read_barrier_depends()      do { } while(0)
455 #define set_mb(var, value) do { var = value; barrier(); } while (0)
456 #endif
457
458 #define set_wmb(var, value) do { var = value; wmb(); } while (0)
459
460 /* interrupt control.. */
461 #define local_save_flags(x)     do { typecheck(unsigned long,x); __asm__ __volatile__("pushfl ; popl %0":"=g" (x): /* no input */); } while (0)
462 #define local_irq_restore(x)    do { typecheck(unsigned long,x); __asm__ __volatile__("pushl %0 ; popfl": /* no output */ :"g" (x):"memory", "cc"); } while (0)
463 #define local_irq_disable()     __asm__ __volatile__("cli": : :"memory")
464 #define local_irq_enable()      __asm__ __volatile__("sti": : :"memory")
465 /* used in the idle loop; sti takes one instruction cycle to complete */
466 #define safe_halt()             __asm__ __volatile__("sti; hlt": : :"memory")
467 /* used when interrupts are already enabled or to shutdown the processor */
468 #define halt()                  __asm__ __volatile__("hlt": : :"memory")
469
470 #define irqs_disabled()                 \
471 ({                                      \
472         unsigned long flags;            \
473         local_save_flags(flags);        \
474         !(flags & (1<<9));              \
475 })
476
477 /* For spinlocks etc */
478 #define local_irq_save(x)       __asm__ __volatile__("pushfl ; popl %0 ; cli":"=g" (x): /* no input */ :"memory")
479
480 /*
481  * disable hlt during certain critical i/o operations
482  */
483 #define HAVE_DISABLE_HLT
484 void disable_hlt(void);
485 void enable_hlt(void);
486
487 extern int es7000_plat;
488 void cpu_idle_wait(void);
489
490 /*
491  * On SMP systems, when the scheduler does migration-cost autodetection,
492  * it needs a way to flush as much of the CPU's caches as possible:
493  */
494 static inline void sched_cacheflush(void)
495 {
496         wbinvd();
497 }
498
499 extern unsigned long arch_align_stack(unsigned long sp);
500 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
501
502 void default_idle(void);
503
504 #endif