blob: fb49f80eb94f9f03929392f058e4669ca5eb6968 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef X86_64_PDA_H
2#define X86_64_PDA_H
3
4#ifndef __ASSEMBLY__
5#include <linux/stddef.h>
6#include <linux/types.h>
7#include <linux/cache.h>
Jan Beulichb556b352006-01-11 22:43:00 +01008#include <asm/page.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -07009
10/* Per processor datastructure. %gs points to it while the kernel runs */
11struct x8664_pda {
Arjan van de Ven29a9af62006-09-26 10:52:38 +020012 struct task_struct *pcurrent; /* 0 Current process */
13 unsigned long data_offset; /* 8 Per cpu data offset from linker
14 address */
15 unsigned long kernelstack; /* 16 top of kernel stack for current */
16 unsigned long oldrsp; /* 24 user rsp for system call */
17 int irqcount; /* 32 Irq nesting counter. Starts with -1 */
18 int cpunumber; /* 36 Logical CPU number */
Arjan van de Ven0a425402006-09-26 10:52:38 +020019#ifdef CONFIG_CC_STACKPROTECTOR
20 unsigned long stack_canary; /* 40 stack canary value */
21 /* gcc-ABI: this canary MUST be at
22 offset 40!!! */
23#endif
24 char *irqstackptr;
Andi Kleen69d81fc2005-11-05 17:25:53 +010025 int nodenumber; /* number of current node */
Linus Torvalds1da177e2005-04-16 15:20:36 -070026 unsigned int __softirq_pending;
27 unsigned int __nmi_count; /* number of NMI on this CPUs */
Andi Kleena15da492006-09-26 10:52:40 +020028 short mmu_state;
29 short isidle;
Arjan van de Vendf920042006-03-25 16:31:01 +010030 struct mm_struct *active_mm;
Linus Torvalds1da177e2005-04-16 15:20:36 -070031 unsigned apic_timer_irqs;
Thomas Gleixner4e77ae32007-10-12 23:04:07 +020032 unsigned irq0_irqs;
Andi Kleenb9169112005-09-12 18:49:24 +020033} ____cacheline_aligned_in_smp;
Linus Torvalds1da177e2005-04-16 15:20:36 -070034
Ravikiran G Thirumalai365ba912006-01-11 22:45:42 +010035extern struct x8664_pda *_cpu_pda[];
36extern struct x8664_pda boot_cpu_pda[];
Ravikiran G Thirumalaidf79efd2006-01-11 22:45:39 +010037
Ravikiran G Thirumalai365ba912006-01-11 22:45:42 +010038#define cpu_pda(i) (_cpu_pda[i])
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
40/*
41 * There is no fast way to get the base address of the PDA, all the accesses
42 * have to mention %fs/%gs. So it needs to be done this Torvaldian way.
43 */
Andi Kleenfd167e42006-09-26 10:52:40 +020044extern void __bad_pda_field(void) __attribute__((noreturn));
Linus Torvalds1da177e2005-04-16 15:20:36 -070045
Andi Kleenc1a9d412006-09-26 10:52:40 +020046/*
47 * proxy_pda doesn't actually exist, but tell gcc it is accessed for
48 * all PDA accesses so it gets read/write dependencies right.
49 */
Andi Kleen53ee11a2006-09-26 10:52:38 +020050extern struct x8664_pda _proxy_pda;
51
Linus Torvalds1da177e2005-04-16 15:20:36 -070052#define pda_offset(field) offsetof(struct x8664_pda, field)
53
Andi Kleenc1a9d412006-09-26 10:52:40 +020054#define pda_to_op(op,field,val) do { \
55 typedef typeof(_proxy_pda.field) T__; \
56 if (0) { T__ tmp__; tmp__ = (val); } /* type checking */ \
57 switch (sizeof(_proxy_pda.field)) { \
58 case 2: \
59 asm(op "w %1,%%gs:%c2" : \
60 "+m" (_proxy_pda.field) : \
61 "ri" ((T__)val), \
62 "i"(pda_offset(field))); \
63 break; \
64 case 4: \
65 asm(op "l %1,%%gs:%c2" : \
66 "+m" (_proxy_pda.field) : \
67 "ri" ((T__)val), \
68 "i" (pda_offset(field))); \
69 break; \
70 case 8: \
71 asm(op "q %1,%%gs:%c2": \
72 "+m" (_proxy_pda.field) : \
73 "ri" ((T__)val), \
74 "i"(pda_offset(field))); \
75 break; \
76 default: \
77 __bad_pda_field(); \
78 } \
Linus Torvalds1da177e2005-04-16 15:20:36 -070079 } while (0)
80
Andi Kleenc1a9d412006-09-26 10:52:40 +020081#define pda_from_op(op,field) ({ \
82 typeof(_proxy_pda.field) ret__; \
83 switch (sizeof(_proxy_pda.field)) { \
84 case 2: \
85 asm(op "w %%gs:%c1,%0" : \
86 "=r" (ret__) : \
87 "i" (pda_offset(field)), \
88 "m" (_proxy_pda.field)); \
89 break; \
90 case 4: \
91 asm(op "l %%gs:%c1,%0": \
92 "=r" (ret__): \
93 "i" (pda_offset(field)), \
94 "m" (_proxy_pda.field)); \
95 break; \
96 case 8: \
97 asm(op "q %%gs:%c1,%0": \
98 "=r" (ret__) : \
99 "i" (pda_offset(field)), \
100 "m" (_proxy_pda.field)); \
101 break; \
102 default: \
103 __bad_pda_field(); \
104 } \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700105 ret__; })
106
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107#define read_pda(field) pda_from_op("mov",field)
108#define write_pda(field,val) pda_to_op("mov",field,val)
109#define add_pda(field,val) pda_to_op("add",field,val)
110#define sub_pda(field,val) pda_to_op("sub",field,val)
Andi Kleen3f744782005-09-12 18:49:24 +0200111#define or_pda(field,val) pda_to_op("or",field,val)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700112
Andi Kleen94468682006-11-14 16:57:46 +0100113/* This is not atomic against other CPUs -- CPU preemption needs to be off */
114#define test_and_clear_bit_pda(bit,field) ({ \
115 int old__; \
116 asm volatile("btr %2,%%gs:%c3\n\tsbbl %0,%0" \
117 : "=r" (old__), "+m" (_proxy_pda.field) \
118 : "dIr" (bit), "i" (pda_offset(field)) : "memory"); \
119 old__; \
120})
121
Linus Torvalds1da177e2005-04-16 15:20:36 -0700122#endif
123
124#define PDA_STACKOFFSET (5*8)
125
126#endif