x86: sanitize pathes arch/x86/kernel/cpu/Makefile
[linux-3.10.git] / include / asm-x86_64 / pda.h
1 #ifndef X86_64_PDA_H
2 #define X86_64_PDA_H
3
4 #ifndef __ASSEMBLY__
5 #include <linux/stddef.h>
6 #include <linux/types.h>
7 #include <linux/cache.h>
8 #include <asm/page.h>
9
10 /* Per processor datastructure. %gs points to it while the kernel runs */ 
11 struct x8664_pda {
12         struct task_struct *pcurrent;   /* 0  Current process */
13         unsigned long data_offset;      /* 8 Per cpu data offset from linker
14                                            address */
15         unsigned long kernelstack;  /* 16 top of kernel stack for current */
16         unsigned long oldrsp;       /* 24 user rsp for system call */
17         int irqcount;               /* 32 Irq nesting counter. Starts with -1 */
18         int cpunumber;              /* 36 Logical CPU number */
19 #ifdef CONFIG_CC_STACKPROTECTOR
20         unsigned long stack_canary;     /* 40 stack canary value */
21                                         /* gcc-ABI: this canary MUST be at
22                                            offset 40!!! */
23 #endif
24         char *irqstackptr;
25         int nodenumber;             /* number of current node */
26         unsigned int __softirq_pending;
27         unsigned int __nmi_count;       /* number of NMI on this CPUs */
28         short mmu_state;
29         short isidle;
30         struct mm_struct *active_mm;
31         unsigned apic_timer_irqs;
32 } ____cacheline_aligned_in_smp;
33
34 extern struct x8664_pda *_cpu_pda[];
35 extern struct x8664_pda boot_cpu_pda[];
36
37 #define cpu_pda(i) (_cpu_pda[i])
38
39 /* 
40  * There is no fast way to get the base address of the PDA, all the accesses
41  * have to mention %fs/%gs.  So it needs to be done this Torvaldian way.
42  */ 
43 extern void __bad_pda_field(void) __attribute__((noreturn));
44
45 /*
46  * proxy_pda doesn't actually exist, but tell gcc it is accessed for
47  * all PDA accesses so it gets read/write dependencies right.
48  */
49 extern struct x8664_pda _proxy_pda;
50
51 #define pda_offset(field) offsetof(struct x8664_pda, field)
52
53 #define pda_to_op(op,field,val) do {            \
54         typedef typeof(_proxy_pda.field) T__;   \
55         if (0) { T__ tmp__; tmp__ = (val); }    /* type checking */ \
56         switch (sizeof(_proxy_pda.field)) {     \
57         case 2:                                 \
58                 asm(op "w %1,%%gs:%c2" :        \
59                     "+m" (_proxy_pda.field) :   \
60                     "ri" ((T__)val),            \
61                     "i"(pda_offset(field)));    \
62                 break;                          \
63         case 4:                                 \
64                 asm(op "l %1,%%gs:%c2" :        \
65                     "+m" (_proxy_pda.field) :   \
66                     "ri" ((T__)val),            \
67                     "i" (pda_offset(field)));   \
68                 break;                          \
69         case 8:                                 \
70                 asm(op "q %1,%%gs:%c2":         \
71                     "+m" (_proxy_pda.field) :   \
72                     "ri" ((T__)val),            \
73                     "i"(pda_offset(field)));    \
74                 break;                          \
75        default:                                 \
76                 __bad_pda_field();              \
77        }                                        \
78        } while (0)
79
80 #define pda_from_op(op,field) ({                \
81         typeof(_proxy_pda.field) ret__;         \
82         switch (sizeof(_proxy_pda.field)) {     \
83         case 2:                                 \
84                 asm(op "w %%gs:%c1,%0" :        \
85                     "=r" (ret__) :              \
86                     "i" (pda_offset(field)),    \
87                     "m" (_proxy_pda.field));    \
88                  break;                         \
89         case 4:                                 \
90                 asm(op "l %%gs:%c1,%0":         \
91                     "=r" (ret__):               \
92                     "i" (pda_offset(field)),    \
93                     "m" (_proxy_pda.field));    \
94                  break;                         \
95        case 8:                                  \
96                 asm(op "q %%gs:%c1,%0":         \
97                     "=r" (ret__) :              \
98                     "i" (pda_offset(field)),    \
99                     "m" (_proxy_pda.field));    \
100                  break;                         \
101        default:                                 \
102                 __bad_pda_field();              \
103        }                                        \
104        ret__; })
105
106 #define read_pda(field) pda_from_op("mov",field)
107 #define write_pda(field,val) pda_to_op("mov",field,val)
108 #define add_pda(field,val) pda_to_op("add",field,val)
109 #define sub_pda(field,val) pda_to_op("sub",field,val)
110 #define or_pda(field,val) pda_to_op("or",field,val)
111
112 /* This is not atomic against other CPUs -- CPU preemption needs to be off */
113 #define test_and_clear_bit_pda(bit,field) ({            \
114         int old__;                                              \
115         asm volatile("btr %2,%%gs:%c3\n\tsbbl %0,%0"            \
116             : "=r" (old__), "+m" (_proxy_pda.field)             \
117             : "dIr" (bit), "i" (pda_offset(field)) : "memory"); \
118         old__;                                                  \
119 })
120
121 #endif
122
123 #define PDA_STACKOFFSET (5*8)
124
125 #endif