[PATCH] powerpc: Fix kdump copy regs and dynamic allocate per-cpu crash notes
[linux-2.6.git] / include / asm-i386 / system.h
index 6f74d4c..36a92ed 100644 (file)
@@ -14,8 +14,7 @@ extern struct task_struct * FASTCALL(__switch_to(struct task_struct *prev, struc
 
 #define switch_to(prev,next,last) do {                                 \
        unsigned long esi,edi;                                          \
-       asm volatile("pushfl\n\t"                                       \
-                    "pushl %%ebp\n\t"                                  \
+       asm volatile("pushl %%ebp\n\t"                                  \
                     "movl %%esp,%0\n\t"        /* save ESP */          \
                     "movl %5,%%esp\n\t"        /* restore ESP */       \
                     "movl $1f,%1\n\t"          /* save EIP */          \
@@ -23,7 +22,6 @@ extern struct task_struct * FASTCALL(__switch_to(struct task_struct *prev, struc
                     "jmp __switch_to\n"                                \
                     "1:\t"                                             \
                     "popl %%ebp\n\t"                                   \
-                    "popfl"                                            \
                     :"=m" (prev->thread.esp),"=m" (prev->thread.eip),  \
                      "=a" (last),"=S" (esi),"=D" (edi)                 \
                     :"m" (next->thread.esp),"m" (next->thread.eip),    \
@@ -56,23 +54,7 @@ __asm__ __volatile__ ("movw %%dx,%1\n\t" \
         ); } while(0)
 
 #define set_base(ldt,base) _set_base( ((char *)&(ldt)) , (base) )
-#define set_limit(ldt,limit) _set_limit( ((char *)&(ldt)) , ((limit)-1)>>12 )
-
-static inline unsigned long _get_base(char * addr)
-{
-       unsigned long __base;
-       __asm__("movb %3,%%dh\n\t"
-               "movb %2,%%dl\n\t"
-               "shll $16,%%edx\n\t"
-               "movw %1,%%dx"
-               :"=&d" (__base)
-               :"m" (*((addr)+2)),
-                "m" (*((addr)+4)),
-                "m" (*((addr)+7)));
-       return __base;
-}
-
-#define get_base(ldt) _get_base( ((char *)&(ldt)) )
+#define set_limit(ldt,limit) _set_limit( ((char *)&(ldt)) , ((limit)-1) )
 
 /*
  * Load a segment. Fall back on loading the zero
@@ -81,7 +63,7 @@ static inline unsigned long _get_base(char * addr)
 #define loadsegment(seg,value)                 \
        asm volatile("\n"                       \
                "1:\t"                          \
-               "movl %0,%%" #seg "\n"          \
+               "mov %0,%%" #seg "\n"           \
                "2:\n"                          \
                ".section .fixup,\"ax\"\n"      \
                "3:\t"                          \
@@ -93,13 +75,13 @@ static inline unsigned long _get_base(char * addr)
                ".align 4\n\t"                  \
                ".long 1b,3b\n"                 \
                ".previous"                     \
-               : :"m" (*(unsigned int *)&(value)))
+               : :"rm" (value))
 
 /*
  * Save a segment register away
  */
 #define savesegment(seg, value) \
-       asm volatile("movl %%" #seg ",%0":"=m" (*(int *)&(value)))
+       asm volatile("mov %%" #seg ",%0":"=rm" (value))
 
 /*
  * Clear and set 'TS' bit respectively
@@ -107,13 +89,33 @@ static inline unsigned long _get_base(char * addr)
 #define clts() __asm__ __volatile__ ("clts")
 #define read_cr0() ({ \
        unsigned int __dummy; \
-       __asm__( \
+       __asm__ __volatile__( \
                "movl %%cr0,%0\n\t" \
                :"=r" (__dummy)); \
        __dummy; \
 })
 #define write_cr0(x) \
-       __asm__("movl %0,%%cr0": :"r" (x));
+       __asm__ __volatile__("movl %0,%%cr0": :"r" (x));
+
+#define read_cr2() ({ \
+       unsigned int __dummy; \
+       __asm__ __volatile__( \
+               "movl %%cr2,%0\n\t" \
+               :"=r" (__dummy)); \
+       __dummy; \
+})
+#define write_cr2(x) \
+       __asm__ __volatile__("movl %0,%%cr2": :"r" (x));
+
+#define read_cr3() ({ \
+       unsigned int __dummy; \
+       __asm__ ( \
+               "movl %%cr3,%0\n\t" \
+               :"=r" (__dummy)); \
+       __dummy; \
+})
+#define write_cr3(x) \
+       __asm__ __volatile__("movl %0,%%cr3": :"r" (x));
 
 #define read_cr4() ({ \
        unsigned int __dummy; \
@@ -122,8 +124,21 @@ static inline unsigned long _get_base(char * addr)
                :"=r" (__dummy)); \
        __dummy; \
 })
+
+#define read_cr4_safe() ({                           \
+       unsigned int __dummy;                         \
+       /* This could fault if %cr4 does not exist */ \
+       __asm__("1: movl %%cr4, %0              \n"   \
+               "2:                             \n"   \
+               ".section __ex_table,\"a\"      \n"   \
+               ".long 1b,2b                    \n"   \
+               ".previous                      \n"   \
+               : "=r" (__dummy): "0" (0));           \
+       __dummy;                                      \
+})
+
 #define write_cr4(x) \
-       __asm__("movl %0,%%cr4": :"r" (x));
+       __asm__ __volatile__("movl %0,%%cr4": :"r" (x));
 #define stts() write_cr0(8 | read_cr0())
 
 #endif /* __KERNEL__ */
@@ -149,6 +164,8 @@ struct __xchg_dummy { unsigned long a[100]; };
 #define __xg(x) ((struct __xchg_dummy *)(x))
 
 
+#ifdef CONFIG_X86_CMPXCHG64
+
 /*
  * The semantics of XCHGCMP8B are a bit strange, this is why
  * there is a loop and the loading of %%eax and %%edx has to
@@ -203,6 +220,8 @@ static inline void __set_64bit_var (unsigned long long *ptr,
  __set_64bit(ptr, (unsigned int)(value), (unsigned int)((value)>>32ULL) ) : \
  __set_64bit(ptr, ll_low(value), ll_high(value)) )
 
+#endif
+
 /*
  * Note: no "lock" prefix even on SMP: xchg always implies lock anyway
  * Note 2: xchg has side effect, so that attribute volatile is necessary,
@@ -241,6 +260,9 @@ static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int siz
 
 #ifdef CONFIG_X86_CMPXCHG
 #define __HAVE_ARCH_CMPXCHG 1
+#define cmpxchg(ptr,o,n)\
+       ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\
+                                       (unsigned long)(n),sizeof(*(ptr))))
 #endif
 
 static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
@@ -257,22 +279,78 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
        case 2:
                __asm__ __volatile__(LOCK_PREFIX "cmpxchgw %w1,%2"
                                     : "=a"(prev)
-                                    : "q"(new), "m"(*__xg(ptr)), "0"(old)
+                                    : "r"(new), "m"(*__xg(ptr)), "0"(old)
                                     : "memory");
                return prev;
        case 4:
                __asm__ __volatile__(LOCK_PREFIX "cmpxchgl %1,%2"
                                     : "=a"(prev)
-                                    : "q"(new), "m"(*__xg(ptr)), "0"(old)
+                                    : "r"(new), "m"(*__xg(ptr)), "0"(old)
                                     : "memory");
                return prev;
        }
        return old;
 }
 
-#define cmpxchg(ptr,o,n)\
-       ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\
-                                       (unsigned long)(n),sizeof(*(ptr))))
+#ifndef CONFIG_X86_CMPXCHG
+/*
+ * Building a kernel capable running on 80386. It may be necessary to
+ * simulate the cmpxchg on the 80386 CPU. For that purpose we define
+ * a function for each of the sizes we support.
+ */
+
+extern unsigned long cmpxchg_386_u8(volatile void *, u8, u8);
+extern unsigned long cmpxchg_386_u16(volatile void *, u16, u16);
+extern unsigned long cmpxchg_386_u32(volatile void *, u32, u32);
+
+static inline unsigned long cmpxchg_386(volatile void *ptr, unsigned long old,
+                                     unsigned long new, int size)
+{
+       switch (size) {
+       case 1:
+               return cmpxchg_386_u8(ptr, old, new);
+       case 2:
+               return cmpxchg_386_u16(ptr, old, new);
+       case 4:
+               return cmpxchg_386_u32(ptr, old, new);
+       }
+       return old;
+}
+
+#define cmpxchg(ptr,o,n)                                               \
+({                                                                     \
+       __typeof__(*(ptr)) __ret;                                       \
+       if (likely(boot_cpu_data.x86 > 3))                              \
+               __ret = __cmpxchg((ptr), (unsigned long)(o),            \
+                                       (unsigned long)(n), sizeof(*(ptr))); \
+       else                                                            \
+               __ret = cmpxchg_386((ptr), (unsigned long)(o),          \
+                                       (unsigned long)(n), sizeof(*(ptr))); \
+       __ret;                                                          \
+})
+#endif
+
+#ifdef CONFIG_X86_CMPXCHG64
+
+static inline unsigned long long __cmpxchg64(volatile void *ptr, unsigned long long old,
+                                     unsigned long long new)
+{
+       unsigned long long prev;
+       __asm__ __volatile__(LOCK_PREFIX "cmpxchg8b %3"
+                            : "=A"(prev)
+                            : "b"((unsigned long)new),
+                              "c"((unsigned long)(new >> 32)),
+                              "m"(*__xg(ptr)),
+                              "0"(old)
+                            : "memory");
+       return prev;
+}
+
+#define cmpxchg64(ptr,o,n)\
+       ((__typeof__(*(ptr)))__cmpxchg64((ptr),(unsigned long long)(o),\
+                                       (unsigned long long)(n)))
+
+#endif
     
 #ifdef __KERNEL__
 struct alt_instr { 
@@ -447,6 +525,8 @@ struct alt_instr {
 #define local_irq_enable()     __asm__ __volatile__("sti": : :"memory")
 /* used in the idle loop; sti takes one instruction cycle to complete */
 #define safe_halt()            __asm__ __volatile__("sti; hlt": : :"memory")
+/* used when interrupts are already enabled or to shutdown the processor */
+#define halt()                 __asm__ __volatile__("hlt": : :"memory")
 
 #define irqs_disabled()                        \
 ({                                     \
@@ -468,6 +548,15 @@ void enable_hlt(void);
 extern int es7000_plat;
 void cpu_idle_wait(void);
 
+/*
+ * On SMP systems, when the scheduler does migration-cost autodetection,
+ * it needs a way to flush as much of the CPU's caches as possible:
+ */
+static inline void sched_cacheflush(void)
+{
+       wbinvd();
+}
+
 extern unsigned long arch_align_stack(unsigned long sp);
 
 #endif