Merge branch 'x86/paravirt' into x86/apic
[linux-2.6.git] / arch / x86 / include / asm / paravirt.h
1 #ifndef _ASM_X86_PARAVIRT_H
2 #define _ASM_X86_PARAVIRT_H
3 /* Various instructions on x86 need to be replaced for
4  * para-virtualization: those hooks are defined here. */
5
6 #ifdef CONFIG_PARAVIRT
7 #include <asm/page.h>
8 #include <asm/asm.h>
9
10 /* Bitmask of what can be clobbered: usually at least eax. */
11 #define CLBR_NONE 0
12 #define CLBR_EAX  (1 << 0)
13 #define CLBR_ECX  (1 << 1)
14 #define CLBR_EDX  (1 << 2)
15 #define CLBR_EDI  (1 << 3)
16
17 #ifdef CONFIG_X86_32
18 /* CLBR_ANY should match all regs platform has. For i386, that's just it */
19 #define CLBR_ANY  ((1 << 4) - 1)
20
21 #define CLBR_ARG_REGS   (CLBR_EAX | CLBR_EDX | CLBR_ECX)
22 #define CLBR_RET_REG    (CLBR_EAX | CLBR_EDX)
23 #define CLBR_SCRATCH    (0)
24 #else
25 #define CLBR_RAX  CLBR_EAX
26 #define CLBR_RCX  CLBR_ECX
27 #define CLBR_RDX  CLBR_EDX
28 #define CLBR_RDI  CLBR_EDI
29 #define CLBR_RSI  (1 << 4)
30 #define CLBR_R8   (1 << 5)
31 #define CLBR_R9   (1 << 6)
32 #define CLBR_R10  (1 << 7)
33 #define CLBR_R11  (1 << 8)
34
35 #define CLBR_ANY  ((1 << 9) - 1)
36
37 #define CLBR_ARG_REGS   (CLBR_RDI | CLBR_RSI | CLBR_RDX | \
38                          CLBR_RCX | CLBR_R8 | CLBR_R9)
39 #define CLBR_RET_REG    (CLBR_RAX)
40 #define CLBR_SCRATCH    (CLBR_R10 | CLBR_R11)
41
42 #include <asm/desc_defs.h>
43 #endif /* X86_64 */
44
45 #define CLBR_CALLEE_SAVE ((CLBR_ARG_REGS | CLBR_SCRATCH) & ~CLBR_RET_REG)
46
47 #ifndef __ASSEMBLY__
48 #include <linux/types.h>
49 #include <linux/cpumask.h>
50 #include <asm/kmap_types.h>
51 #include <asm/desc_defs.h>
52
53 struct page;
54 struct thread_struct;
55 struct desc_ptr;
56 struct tss_struct;
57 struct mm_struct;
58 struct desc_struct;
59
60 /*
61  * Wrapper type for pointers to code which uses the non-standard
62  * calling convention.  See PV_CALL_SAVE_REGS_THUNK below.
63  */
64 struct paravirt_callee_save {
65         void *func;
66 };
67
68 /* general info */
69 struct pv_info {
70         unsigned int kernel_rpl;
71         int shared_kernel_pmd;
72         int paravirt_enabled;
73         const char *name;
74 };
75
76 struct pv_init_ops {
77         /*
78          * Patch may replace one of the defined code sequences with
79          * arbitrary code, subject to the same register constraints.
80          * This generally means the code is not free to clobber any
81          * registers other than EAX.  The patch function should return
82          * the number of bytes of code generated, as we nop pad the
83          * rest in generic code.
84          */
85         unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
86                           unsigned long addr, unsigned len);
87
88         /* Basic arch-specific setup */
89         void (*arch_setup)(void);
90         char *(*memory_setup)(void);
91         void (*post_allocator_init)(void);
92
93         /* Print a banner to identify the environment */
94         void (*banner)(void);
95 };
96
97
98 struct pv_lazy_ops {
99         /* Set deferred update mode, used for batching operations. */
100         void (*enter)(void);
101         void (*leave)(void);
102 };
103
104 struct pv_time_ops {
105         void (*time_init)(void);
106
107         /* Set and set time of day */
108         unsigned long (*get_wallclock)(void);
109         int (*set_wallclock)(unsigned long);
110
111         unsigned long long (*sched_clock)(void);
112         unsigned long (*get_tsc_khz)(void);
113 };
114
115 struct pv_cpu_ops {
116         /* hooks for various privileged instructions */
117         unsigned long (*get_debugreg)(int regno);
118         void (*set_debugreg)(int regno, unsigned long value);
119
120         void (*clts)(void);
121
122         unsigned long (*read_cr0)(void);
123         void (*write_cr0)(unsigned long);
124
125         unsigned long (*read_cr4_safe)(void);
126         unsigned long (*read_cr4)(void);
127         void (*write_cr4)(unsigned long);
128
129 #ifdef CONFIG_X86_64
130         unsigned long (*read_cr8)(void);
131         void (*write_cr8)(unsigned long);
132 #endif
133
134         /* Segment descriptor handling */
135         void (*load_tr_desc)(void);
136         void (*load_gdt)(const struct desc_ptr *);
137         void (*load_idt)(const struct desc_ptr *);
138         void (*store_gdt)(struct desc_ptr *);
139         void (*store_idt)(struct desc_ptr *);
140         void (*set_ldt)(const void *desc, unsigned entries);
141         unsigned long (*store_tr)(void);
142         void (*load_tls)(struct thread_struct *t, unsigned int cpu);
143 #ifdef CONFIG_X86_64
144         void (*load_gs_index)(unsigned int idx);
145 #endif
146         void (*write_ldt_entry)(struct desc_struct *ldt, int entrynum,
147                                 const void *desc);
148         void (*write_gdt_entry)(struct desc_struct *,
149                                 int entrynum, const void *desc, int size);
150         void (*write_idt_entry)(gate_desc *,
151                                 int entrynum, const gate_desc *gate);
152         void (*alloc_ldt)(struct desc_struct *ldt, unsigned entries);
153         void (*free_ldt)(struct desc_struct *ldt, unsigned entries);
154
155         void (*load_sp0)(struct tss_struct *tss, struct thread_struct *t);
156
157         void (*set_iopl_mask)(unsigned mask);
158
159         void (*wbinvd)(void);
160         void (*io_delay)(void);
161
162         /* cpuid emulation, mostly so that caps bits can be disabled */
163         void (*cpuid)(unsigned int *eax, unsigned int *ebx,
164                       unsigned int *ecx, unsigned int *edx);
165
166         /* MSR, PMC and TSR operations.
167            err = 0/-EFAULT.  wrmsr returns 0/-EFAULT. */
168         u64 (*read_msr_amd)(unsigned int msr, int *err);
169         u64 (*read_msr)(unsigned int msr, int *err);
170         int (*write_msr)(unsigned int msr, unsigned low, unsigned high);
171
172         u64 (*read_tsc)(void);
173         u64 (*read_pmc)(int counter);
174         unsigned long long (*read_tscp)(unsigned int *aux);
175
176         /*
177          * Atomically enable interrupts and return to userspace.  This
178          * is only ever used to return to 32-bit processes; in a
179          * 64-bit kernel, it's used for 32-on-64 compat processes, but
180          * never native 64-bit processes.  (Jump, not call.)
181          */
182         void (*irq_enable_sysexit)(void);
183
184         /*
185          * Switch to usermode gs and return to 64-bit usermode using
186          * sysret.  Only used in 64-bit kernels to return to 64-bit
187          * processes.  Usermode register state, including %rsp, must
188          * already be restored.
189          */
190         void (*usergs_sysret64)(void);
191
192         /*
193          * Switch to usermode gs and return to 32-bit usermode using
194          * sysret.  Used to return to 32-on-64 compat processes.
195          * Other usermode register state, including %esp, must already
196          * be restored.
197          */
198         void (*usergs_sysret32)(void);
199
200         /* Normal iret.  Jump to this with the standard iret stack
201            frame set up. */
202         void (*iret)(void);
203
204         void (*swapgs)(void);
205
206         struct pv_lazy_ops lazy_mode;
207 };
208
209 struct pv_irq_ops {
210         void (*init_IRQ)(void);
211
212         /*
213          * Get/set interrupt state.  save_fl and restore_fl are only
214          * expected to use X86_EFLAGS_IF; all other bits
215          * returned from save_fl are undefined, and may be ignored by
216          * restore_fl.
217          *
218          * NOTE: These functions callers expect the callee to preserve
219          * more registers than the standard C calling convention.
220          */
221         struct paravirt_callee_save save_fl;
222         struct paravirt_callee_save restore_fl;
223         struct paravirt_callee_save irq_disable;
224         struct paravirt_callee_save irq_enable;
225
226         void (*safe_halt)(void);
227         void (*halt)(void);
228
229 #ifdef CONFIG_X86_64
230         void (*adjust_exception_frame)(void);
231 #endif
232 };
233
234 struct pv_apic_ops {
235 #ifdef CONFIG_X86_LOCAL_APIC
236         void (*setup_boot_clock)(void);
237         void (*setup_secondary_clock)(void);
238
239         void (*startup_ipi_hook)(int phys_apicid,
240                                  unsigned long start_eip,
241                                  unsigned long start_esp);
242 #endif
243 };
244
245 struct pv_mmu_ops {
246         /*
247          * Called before/after init_mm pagetable setup. setup_start
248          * may reset %cr3, and may pre-install parts of the pagetable;
249          * pagetable setup is expected to preserve any existing
250          * mapping.
251          */
252         void (*pagetable_setup_start)(pgd_t *pgd_base);
253         void (*pagetable_setup_done)(pgd_t *pgd_base);
254
255         unsigned long (*read_cr2)(void);
256         void (*write_cr2)(unsigned long);
257
258         unsigned long (*read_cr3)(void);
259         void (*write_cr3)(unsigned long);
260
261         /*
262          * Hooks for intercepting the creation/use/destruction of an
263          * mm_struct.
264          */
265         void (*activate_mm)(struct mm_struct *prev,
266                             struct mm_struct *next);
267         void (*dup_mmap)(struct mm_struct *oldmm,
268                          struct mm_struct *mm);
269         void (*exit_mmap)(struct mm_struct *mm);
270
271
272         /* TLB operations */
273         void (*flush_tlb_user)(void);
274         void (*flush_tlb_kernel)(void);
275         void (*flush_tlb_single)(unsigned long addr);
276         void (*flush_tlb_others)(const struct cpumask *cpus,
277                                  struct mm_struct *mm,
278                                  unsigned long va);
279
280         /* Hooks for allocating and freeing a pagetable top-level */
281         int  (*pgd_alloc)(struct mm_struct *mm);
282         void (*pgd_free)(struct mm_struct *mm, pgd_t *pgd);
283
284         /*
285          * Hooks for allocating/releasing pagetable pages when they're
286          * attached to a pagetable
287          */
288         void (*alloc_pte)(struct mm_struct *mm, unsigned long pfn);
289         void (*alloc_pmd)(struct mm_struct *mm, unsigned long pfn);
290         void (*alloc_pmd_clone)(unsigned long pfn, unsigned long clonepfn, unsigned long start, unsigned long count);
291         void (*alloc_pud)(struct mm_struct *mm, unsigned long pfn);
292         void (*release_pte)(unsigned long pfn);
293         void (*release_pmd)(unsigned long pfn);
294         void (*release_pud)(unsigned long pfn);
295
296         /* Pagetable manipulation functions */
297         void (*set_pte)(pte_t *ptep, pte_t pteval);
298         void (*set_pte_at)(struct mm_struct *mm, unsigned long addr,
299                            pte_t *ptep, pte_t pteval);
300         void (*set_pmd)(pmd_t *pmdp, pmd_t pmdval);
301         void (*pte_update)(struct mm_struct *mm, unsigned long addr,
302                            pte_t *ptep);
303         void (*pte_update_defer)(struct mm_struct *mm,
304                                  unsigned long addr, pte_t *ptep);
305
306         pte_t (*ptep_modify_prot_start)(struct mm_struct *mm, unsigned long addr,
307                                         pte_t *ptep);
308         void (*ptep_modify_prot_commit)(struct mm_struct *mm, unsigned long addr,
309                                         pte_t *ptep, pte_t pte);
310
311         struct paravirt_callee_save pte_val;
312         struct paravirt_callee_save make_pte;
313
314         struct paravirt_callee_save pgd_val;
315         struct paravirt_callee_save make_pgd;
316
317 #if PAGETABLE_LEVELS >= 3
318 #ifdef CONFIG_X86_PAE
319         void (*set_pte_atomic)(pte_t *ptep, pte_t pteval);
320         void (*set_pte_present)(struct mm_struct *mm, unsigned long addr,
321                                 pte_t *ptep, pte_t pte);
322         void (*pte_clear)(struct mm_struct *mm, unsigned long addr,
323                           pte_t *ptep);
324         void (*pmd_clear)(pmd_t *pmdp);
325
326 #endif  /* CONFIG_X86_PAE */
327
328         void (*set_pud)(pud_t *pudp, pud_t pudval);
329
330         struct paravirt_callee_save pmd_val;
331         struct paravirt_callee_save make_pmd;
332
333 #if PAGETABLE_LEVELS == 4
334         struct paravirt_callee_save pud_val;
335         struct paravirt_callee_save make_pud;
336
337         void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
338 #endif  /* PAGETABLE_LEVELS == 4 */
339 #endif  /* PAGETABLE_LEVELS >= 3 */
340
341 #ifdef CONFIG_HIGHPTE
342         void *(*kmap_atomic_pte)(struct page *page, enum km_type type);
343 #endif
344
345         struct pv_lazy_ops lazy_mode;
346
347         /* dom0 ops */
348
349         /* Sometimes the physical address is a pfn, and sometimes its
350            an mfn.  We can tell which is which from the index. */
351         void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
352                            unsigned long phys, pgprot_t flags);
353 };
354
355 struct raw_spinlock;
356 struct pv_lock_ops {
357         int (*spin_is_locked)(struct raw_spinlock *lock);
358         int (*spin_is_contended)(struct raw_spinlock *lock);
359         void (*spin_lock)(struct raw_spinlock *lock);
360         void (*spin_lock_flags)(struct raw_spinlock *lock, unsigned long flags);
361         int (*spin_trylock)(struct raw_spinlock *lock);
362         void (*spin_unlock)(struct raw_spinlock *lock);
363 };
364
365 /* This contains all the paravirt structures: we get a convenient
366  * number for each function using the offset which we use to indicate
367  * what to patch. */
368 struct paravirt_patch_template {
369         struct pv_init_ops pv_init_ops;
370         struct pv_time_ops pv_time_ops;
371         struct pv_cpu_ops pv_cpu_ops;
372         struct pv_irq_ops pv_irq_ops;
373         struct pv_apic_ops pv_apic_ops;
374         struct pv_mmu_ops pv_mmu_ops;
375         struct pv_lock_ops pv_lock_ops;
376 };
377
378 extern struct pv_info pv_info;
379 extern struct pv_init_ops pv_init_ops;
380 extern struct pv_time_ops pv_time_ops;
381 extern struct pv_cpu_ops pv_cpu_ops;
382 extern struct pv_irq_ops pv_irq_ops;
383 extern struct pv_apic_ops pv_apic_ops;
384 extern struct pv_mmu_ops pv_mmu_ops;
385 extern struct pv_lock_ops pv_lock_ops;
386
387 #define PARAVIRT_PATCH(x)                                       \
388         (offsetof(struct paravirt_patch_template, x) / sizeof(void *))
389
390 #define paravirt_type(op)                               \
391         [paravirt_typenum] "i" (PARAVIRT_PATCH(op)),    \
392         [paravirt_opptr] "m" (op)
393 #define paravirt_clobber(clobber)               \
394         [paravirt_clobber] "i" (clobber)
395
396 /*
397  * Generate some code, and mark it as patchable by the
398  * apply_paravirt() alternate instruction patcher.
399  */
400 #define _paravirt_alt(insn_string, type, clobber)       \
401         "771:\n\t" insn_string "\n" "772:\n"            \
402         ".pushsection .parainstructions,\"a\"\n"        \
403         _ASM_ALIGN "\n"                                 \
404         _ASM_PTR " 771b\n"                              \
405         "  .byte " type "\n"                            \
406         "  .byte 772b-771b\n"                           \
407         "  .short " clobber "\n"                        \
408         ".popsection\n"
409
410 /* Generate patchable code, with the default asm parameters. */
411 #define paravirt_alt(insn_string)                                       \
412         _paravirt_alt(insn_string, "%c[paravirt_typenum]", "%c[paravirt_clobber]")
413
414 /* Simple instruction patching code. */
415 #define DEF_NATIVE(ops, name, code)                                     \
416         extern const char start_##ops##_##name[], end_##ops##_##name[]; \
417         asm("start_" #ops "_" #name ": " code "; end_" #ops "_" #name ":")
418
419 unsigned paravirt_patch_nop(void);
420 unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len);
421 unsigned paravirt_patch_ident_64(void *insnbuf, unsigned len);
422 unsigned paravirt_patch_ignore(unsigned len);
423 unsigned paravirt_patch_call(void *insnbuf,
424                              const void *target, u16 tgt_clobbers,
425                              unsigned long addr, u16 site_clobbers,
426                              unsigned len);
427 unsigned paravirt_patch_jmp(void *insnbuf, const void *target,
428                             unsigned long addr, unsigned len);
429 unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
430                                 unsigned long addr, unsigned len);
431
432 unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
433                               const char *start, const char *end);
434
435 unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
436                       unsigned long addr, unsigned len);
437
438 int paravirt_disable_iospace(void);
439
440 /*
441  * This generates an indirect call based on the operation type number.
442  * The type number, computed in PARAVIRT_PATCH, is derived from the
443  * offset into the paravirt_patch_template structure, and can therefore be
444  * freely converted back into a structure offset.
445  */
446 #define PARAVIRT_CALL   "call *%[paravirt_opptr];"
447
448 /*
449  * These macros are intended to wrap calls through one of the paravirt
450  * ops structs, so that they can be later identified and patched at
451  * runtime.
452  *
453  * Normally, a call to a pv_op function is a simple indirect call:
454  * (pv_op_struct.operations)(args...).
455  *
456  * Unfortunately, this is a relatively slow operation for modern CPUs,
457  * because it cannot necessarily determine what the destination
458  * address is.  In this case, the address is a runtime constant, so at
459  * the very least we can patch the call to e a simple direct call, or
460  * ideally, patch an inline implementation into the callsite.  (Direct
461  * calls are essentially free, because the call and return addresses
462  * are completely predictable.)
463  *
464  * For i386, these macros rely on the standard gcc "regparm(3)" calling
465  * convention, in which the first three arguments are placed in %eax,
466  * %edx, %ecx (in that order), and the remaining arguments are placed
467  * on the stack.  All caller-save registers (eax,edx,ecx) are expected
468  * to be modified (either clobbered or used for return values).
469  * X86_64, on the other hand, already specifies a register-based calling
470  * conventions, returning at %rax, with parameteres going on %rdi, %rsi,
471  * %rdx, and %rcx. Note that for this reason, x86_64 does not need any
472  * special handling for dealing with 4 arguments, unlike i386.
473  * However, x86_64 also have to clobber all caller saved registers, which
474  * unfortunately, are quite a bit (r8 - r11)
475  *
476  * The call instruction itself is marked by placing its start address
477  * and size into the .parainstructions section, so that
478  * apply_paravirt() in arch/i386/kernel/alternative.c can do the
479  * appropriate patching under the control of the backend pv_init_ops
480  * implementation.
481  *
482  * Unfortunately there's no way to get gcc to generate the args setup
483  * for the call, and then allow the call itself to be generated by an
484  * inline asm.  Because of this, we must do the complete arg setup and
485  * return value handling from within these macros.  This is fairly
486  * cumbersome.
487  *
488  * There are 5 sets of PVOP_* macros for dealing with 0-4 arguments.
489  * It could be extended to more arguments, but there would be little
490  * to be gained from that.  For each number of arguments, there are
491  * the two VCALL and CALL variants for void and non-void functions.
492  *
493  * When there is a return value, the invoker of the macro must specify
494  * the return type.  The macro then uses sizeof() on that type to
495  * determine whether its a 32 or 64 bit value, and places the return
496  * in the right register(s) (just %eax for 32-bit, and %edx:%eax for
497  * 64-bit). For x86_64 machines, it just returns at %rax regardless of
498  * the return value size.
499  *
500  * 64-bit arguments are passed as a pair of adjacent 32-bit arguments
501  * i386 also passes 64-bit arguments as a pair of adjacent 32-bit arguments
502  * in low,high order
503  *
504  * Small structures are passed and returned in registers.  The macro
505  * calling convention can't directly deal with this, so the wrapper
506  * functions must do this.
507  *
508  * These PVOP_* macros are only defined within this header.  This
509  * means that all uses must be wrapped in inline functions.  This also
510  * makes sure the incoming and outgoing types are always correct.
511  */
512 #ifdef CONFIG_X86_32
513 #define PVOP_VCALL_ARGS                         \
514         unsigned long __eax = __eax, __edx = __edx, __ecx = __ecx
515 #define PVOP_CALL_ARGS                  PVOP_VCALL_ARGS
516
517 #define PVOP_CALL_ARG1(x)               "a" ((unsigned long)(x))
518 #define PVOP_CALL_ARG2(x)               "d" ((unsigned long)(x))
519 #define PVOP_CALL_ARG3(x)               "c" ((unsigned long)(x))
520
521 #define PVOP_VCALL_CLOBBERS             "=a" (__eax), "=d" (__edx),     \
522                                         "=c" (__ecx)
523 #define PVOP_CALL_CLOBBERS              PVOP_VCALL_CLOBBERS
524
525 #define PVOP_VCALLEE_CLOBBERS           "=a" (__eax), "=d" (__edx)
526 #define PVOP_CALLEE_CLOBBERS            PVOP_VCALLEE_CLOBBERS
527
528 #define EXTRA_CLOBBERS
529 #define VEXTRA_CLOBBERS
530 #else  /* CONFIG_X86_64 */
531 #define PVOP_VCALL_ARGS                                 \
532         unsigned long __edi = __edi, __esi = __esi,     \
533                 __edx = __edx, __ecx = __ecx
534 #define PVOP_CALL_ARGS          PVOP_VCALL_ARGS, __eax
535
536 #define PVOP_CALL_ARG1(x)               "D" ((unsigned long)(x))
537 #define PVOP_CALL_ARG2(x)               "S" ((unsigned long)(x))
538 #define PVOP_CALL_ARG3(x)               "d" ((unsigned long)(x))
539 #define PVOP_CALL_ARG4(x)               "c" ((unsigned long)(x))
540
541 #define PVOP_VCALL_CLOBBERS     "=D" (__edi),                           \
542                                 "=S" (__esi), "=d" (__edx),             \
543                                 "=c" (__ecx)
544 #define PVOP_CALL_CLOBBERS      PVOP_VCALL_CLOBBERS, "=a" (__eax)
545
546 #define PVOP_VCALLEE_CLOBBERS   "=a" (__eax)
547 #define PVOP_CALLEE_CLOBBERS    PVOP_VCALLEE_CLOBBERS
548
549 #define EXTRA_CLOBBERS   , "r8", "r9", "r10", "r11"
550 #define VEXTRA_CLOBBERS  , "rax", "r8", "r9", "r10", "r11"
551 #endif  /* CONFIG_X86_32 */
552
553 #ifdef CONFIG_PARAVIRT_DEBUG
554 #define PVOP_TEST_NULL(op)      BUG_ON(op == NULL)
555 #else
556 #define PVOP_TEST_NULL(op)      ((void)op)
557 #endif
558
559 #define ____PVOP_CALL(rettype, op, clbr, call_clbr, extra_clbr,         \
560                       pre, post, ...)                                   \
561         ({                                                              \
562                 rettype __ret;                                          \
563                 PVOP_CALL_ARGS;                                         \
564                 PVOP_TEST_NULL(op);                                     \
565                 /* This is 32-bit specific, but is okay in 64-bit */    \
566                 /* since this condition will never hold */              \
567                 if (sizeof(rettype) > sizeof(unsigned long)) {          \
568                         asm volatile(pre                                \
569                                      paravirt_alt(PARAVIRT_CALL)        \
570                                      post                               \
571                                      : call_clbr                        \
572                                      : paravirt_type(op),               \
573                                        paravirt_clobber(clbr),          \
574                                        ##__VA_ARGS__                    \
575                                      : "memory", "cc" extra_clbr);      \
576                         __ret = (rettype)((((u64)__edx) << 32) | __eax); \
577                 } else {                                                \
578                         asm volatile(pre                                \
579                                      paravirt_alt(PARAVIRT_CALL)        \
580                                      post                               \
581                                      : call_clbr                        \
582                                      : paravirt_type(op),               \
583                                        paravirt_clobber(clbr),          \
584                                        ##__VA_ARGS__                    \
585                                      : "memory", "cc" extra_clbr);      \
586                         __ret = (rettype)__eax;                         \
587                 }                                                       \
588                 __ret;                                                  \
589         })
590
591 #define __PVOP_CALL(rettype, op, pre, post, ...)                        \
592         ____PVOP_CALL(rettype, op, CLBR_ANY, PVOP_CALL_CLOBBERS,        \
593                       EXTRA_CLOBBERS, pre, post, ##__VA_ARGS__)
594
595 #define __PVOP_CALLEESAVE(rettype, op, pre, post, ...)                  \
596         ____PVOP_CALL(rettype, op.func, CLBR_RET_REG,                   \
597                       PVOP_CALLEE_CLOBBERS, ,                           \
598                       pre, post, ##__VA_ARGS__)
599
600
601 #define ____PVOP_VCALL(op, clbr, call_clbr, extra_clbr, pre, post, ...) \
602         ({                                                              \
603                 PVOP_VCALL_ARGS;                                        \
604                 PVOP_TEST_NULL(op);                                     \
605                 asm volatile(pre                                        \
606                              paravirt_alt(PARAVIRT_CALL)                \
607                              post                                       \
608                              : call_clbr                                \
609                              : paravirt_type(op),                       \
610                                paravirt_clobber(clbr),                  \
611                                ##__VA_ARGS__                            \
612                              : "memory", "cc" extra_clbr);              \
613         })
614
615 #define __PVOP_VCALL(op, pre, post, ...)                                \
616         ____PVOP_VCALL(op, CLBR_ANY, PVOP_VCALL_CLOBBERS,               \
617                        VEXTRA_CLOBBERS,                                 \
618                        pre, post, ##__VA_ARGS__)
619
620 #define __PVOP_VCALLEESAVE(rettype, op, pre, post, ...)                 \
621         ____PVOP_CALL(rettype, op.func, CLBR_RET_REG,                   \
622                       PVOP_VCALLEE_CLOBBERS, ,                          \
623                       pre, post, ##__VA_ARGS__)
624
625
626
627 #define PVOP_CALL0(rettype, op)                                         \
628         __PVOP_CALL(rettype, op, "", "")
629 #define PVOP_VCALL0(op)                                                 \
630         __PVOP_VCALL(op, "", "")
631
632 #define PVOP_CALLEE0(rettype, op)                                       \
633         __PVOP_CALLEESAVE(rettype, op, "", "")
634 #define PVOP_VCALLEE0(op)                                               \
635         __PVOP_VCALLEESAVE(op, "", "")
636
637
638 #define PVOP_CALL1(rettype, op, arg1)                                   \
639         __PVOP_CALL(rettype, op, "", "", PVOP_CALL_ARG1(arg1))
640 #define PVOP_VCALL1(op, arg1)                                           \
641         __PVOP_VCALL(op, "", "", PVOP_CALL_ARG1(arg1))
642
643 #define PVOP_CALLEE1(rettype, op, arg1)                                 \
644         __PVOP_CALLEESAVE(rettype, op, "", "", PVOP_CALL_ARG1(arg1))
645 #define PVOP_VCALLEE1(op, arg1)                                         \
646         __PVOP_VCALLEESAVE(op, "", "", PVOP_CALL_ARG1(arg1))
647
648
649 #define PVOP_CALL2(rettype, op, arg1, arg2)                             \
650         __PVOP_CALL(rettype, op, "", "", PVOP_CALL_ARG1(arg1),          \
651                     PVOP_CALL_ARG2(arg2))
652 #define PVOP_VCALL2(op, arg1, arg2)                                     \
653         __PVOP_VCALL(op, "", "", PVOP_CALL_ARG1(arg1),                  \
654                      PVOP_CALL_ARG2(arg2))
655
656 #define PVOP_CALLEE2(rettype, op, arg1, arg2)                           \
657         __PVOP_CALLEESAVE(rettype, op, "", "", PVOP_CALL_ARG1(arg1),    \
658                           PVOP_CALL_ARG2(arg2))
659 #define PVOP_VCALLEE2(op, arg1, arg2)                                   \
660         __PVOP_VCALLEESAVE(op, "", "", PVOP_CALL_ARG1(arg1),            \
661                            PVOP_CALL_ARG2(arg2))
662
663
664 #define PVOP_CALL3(rettype, op, arg1, arg2, arg3)                       \
665         __PVOP_CALL(rettype, op, "", "", PVOP_CALL_ARG1(arg1),          \
666                     PVOP_CALL_ARG2(arg2), PVOP_CALL_ARG3(arg3))
667 #define PVOP_VCALL3(op, arg1, arg2, arg3)                               \
668         __PVOP_VCALL(op, "", "", PVOP_CALL_ARG1(arg1),                  \
669                      PVOP_CALL_ARG2(arg2), PVOP_CALL_ARG3(arg3))
670
671 /* This is the only difference in x86_64. We can make it much simpler */
672 #ifdef CONFIG_X86_32
673 #define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4)                 \
674         __PVOP_CALL(rettype, op,                                        \
675                     "push %[_arg4];", "lea 4(%%esp),%%esp;",            \
676                     PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2),         \
677                     PVOP_CALL_ARG3(arg3), [_arg4] "mr" ((u32)(arg4)))
678 #define PVOP_VCALL4(op, arg1, arg2, arg3, arg4)                         \
679         __PVOP_VCALL(op,                                                \
680                     "push %[_arg4];", "lea 4(%%esp),%%esp;",            \
681                     "0" ((u32)(arg1)), "1" ((u32)(arg2)),               \
682                     "2" ((u32)(arg3)), [_arg4] "mr" ((u32)(arg4)))
683 #else
684 #define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4)                 \
685         __PVOP_CALL(rettype, op, "", "",                                \
686                     PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2),         \
687                     PVOP_CALL_ARG3(arg3), PVOP_CALL_ARG4(arg4))
688 #define PVOP_VCALL4(op, arg1, arg2, arg3, arg4)                         \
689         __PVOP_VCALL(op, "", "",                                        \
690                      PVOP_CALL_ARG1(arg1), PVOP_CALL_ARG2(arg2),        \
691                      PVOP_CALL_ARG3(arg3), PVOP_CALL_ARG4(arg4))
692 #endif
693
694 static inline int paravirt_enabled(void)
695 {
696         return pv_info.paravirt_enabled;
697 }
698
699 static inline void load_sp0(struct tss_struct *tss,
700                              struct thread_struct *thread)
701 {
702         PVOP_VCALL2(pv_cpu_ops.load_sp0, tss, thread);
703 }
704
705 #define ARCH_SETUP                      pv_init_ops.arch_setup();
706 static inline unsigned long get_wallclock(void)
707 {
708         return PVOP_CALL0(unsigned long, pv_time_ops.get_wallclock);
709 }
710
711 static inline int set_wallclock(unsigned long nowtime)
712 {
713         return PVOP_CALL1(int, pv_time_ops.set_wallclock, nowtime);
714 }
715
716 static inline void (*choose_time_init(void))(void)
717 {
718         return pv_time_ops.time_init;
719 }
720
721 /* The paravirtualized CPUID instruction. */
722 static inline void __cpuid(unsigned int *eax, unsigned int *ebx,
723                            unsigned int *ecx, unsigned int *edx)
724 {
725         PVOP_VCALL4(pv_cpu_ops.cpuid, eax, ebx, ecx, edx);
726 }
727
728 /*
729  * These special macros can be used to get or set a debugging register
730  */
731 static inline unsigned long paravirt_get_debugreg(int reg)
732 {
733         return PVOP_CALL1(unsigned long, pv_cpu_ops.get_debugreg, reg);
734 }
735 #define get_debugreg(var, reg) var = paravirt_get_debugreg(reg)
736 static inline void set_debugreg(unsigned long val, int reg)
737 {
738         PVOP_VCALL2(pv_cpu_ops.set_debugreg, reg, val);
739 }
740
741 static inline void clts(void)
742 {
743         PVOP_VCALL0(pv_cpu_ops.clts);
744 }
745
746 static inline unsigned long read_cr0(void)
747 {
748         return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr0);
749 }
750
751 static inline void write_cr0(unsigned long x)
752 {
753         PVOP_VCALL1(pv_cpu_ops.write_cr0, x);
754 }
755
756 static inline unsigned long read_cr2(void)
757 {
758         return PVOP_CALL0(unsigned long, pv_mmu_ops.read_cr2);
759 }
760
761 static inline void write_cr2(unsigned long x)
762 {
763         PVOP_VCALL1(pv_mmu_ops.write_cr2, x);
764 }
765
766 static inline unsigned long read_cr3(void)
767 {
768         return PVOP_CALL0(unsigned long, pv_mmu_ops.read_cr3);
769 }
770
771 static inline void write_cr3(unsigned long x)
772 {
773         PVOP_VCALL1(pv_mmu_ops.write_cr3, x);
774 }
775
776 static inline unsigned long read_cr4(void)
777 {
778         return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr4);
779 }
780 static inline unsigned long read_cr4_safe(void)
781 {
782         return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr4_safe);
783 }
784
785 static inline void write_cr4(unsigned long x)
786 {
787         PVOP_VCALL1(pv_cpu_ops.write_cr4, x);
788 }
789
790 #ifdef CONFIG_X86_64
791 static inline unsigned long read_cr8(void)
792 {
793         return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr8);
794 }
795
796 static inline void write_cr8(unsigned long x)
797 {
798         PVOP_VCALL1(pv_cpu_ops.write_cr8, x);
799 }
800 #endif
801
802 static inline void raw_safe_halt(void)
803 {
804         PVOP_VCALL0(pv_irq_ops.safe_halt);
805 }
806
807 static inline void halt(void)
808 {
809         PVOP_VCALL0(pv_irq_ops.safe_halt);
810 }
811
812 static inline void wbinvd(void)
813 {
814         PVOP_VCALL0(pv_cpu_ops.wbinvd);
815 }
816
817 #define get_kernel_rpl()  (pv_info.kernel_rpl)
818
819 static inline u64 paravirt_read_msr(unsigned msr, int *err)
820 {
821         return PVOP_CALL2(u64, pv_cpu_ops.read_msr, msr, err);
822 }
823 static inline u64 paravirt_read_msr_amd(unsigned msr, int *err)
824 {
825         return PVOP_CALL2(u64, pv_cpu_ops.read_msr_amd, msr, err);
826 }
827 static inline int paravirt_write_msr(unsigned msr, unsigned low, unsigned high)
828 {
829         return PVOP_CALL3(int, pv_cpu_ops.write_msr, msr, low, high);
830 }
831
832 /* These should all do BUG_ON(_err), but our headers are too tangled. */
833 #define rdmsr(msr, val1, val2)                  \
834 do {                                            \
835         int _err;                               \
836         u64 _l = paravirt_read_msr(msr, &_err); \
837         val1 = (u32)_l;                         \
838         val2 = _l >> 32;                        \
839 } while (0)
840
841 #define wrmsr(msr, val1, val2)                  \
842 do {                                            \
843         paravirt_write_msr(msr, val1, val2);    \
844 } while (0)
845
846 #define rdmsrl(msr, val)                        \
847 do {                                            \
848         int _err;                               \
849         val = paravirt_read_msr(msr, &_err);    \
850 } while (0)
851
852 #define wrmsrl(msr, val)        wrmsr(msr, (u32)((u64)(val)), ((u64)(val))>>32)
853 #define wrmsr_safe(msr, a, b)   paravirt_write_msr(msr, a, b)
854
855 /* rdmsr with exception handling */
856 #define rdmsr_safe(msr, a, b)                   \
857 ({                                              \
858         int _err;                               \
859         u64 _l = paravirt_read_msr(msr, &_err); \
860         (*a) = (u32)_l;                         \
861         (*b) = _l >> 32;                        \
862         _err;                                   \
863 })
864
865 static inline int rdmsrl_safe(unsigned msr, unsigned long long *p)
866 {
867         int err;
868
869         *p = paravirt_read_msr(msr, &err);
870         return err;
871 }
872 static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p)
873 {
874         int err;
875
876         *p = paravirt_read_msr_amd(msr, &err);
877         return err;
878 }
879
880 static inline u64 paravirt_read_tsc(void)
881 {
882         return PVOP_CALL0(u64, pv_cpu_ops.read_tsc);
883 }
884
885 #define rdtscl(low)                             \
886 do {                                            \
887         u64 _l = paravirt_read_tsc();           \
888         low = (int)_l;                          \
889 } while (0)
890
891 #define rdtscll(val) (val = paravirt_read_tsc())
892
893 static inline unsigned long long paravirt_sched_clock(void)
894 {
895         return PVOP_CALL0(unsigned long long, pv_time_ops.sched_clock);
896 }
897 #define calibrate_tsc() (pv_time_ops.get_tsc_khz())
898
899 static inline unsigned long long paravirt_read_pmc(int counter)
900 {
901         return PVOP_CALL1(u64, pv_cpu_ops.read_pmc, counter);
902 }
903
904 #define rdpmc(counter, low, high)               \
905 do {                                            \
906         u64 _l = paravirt_read_pmc(counter);    \
907         low = (u32)_l;                          \
908         high = _l >> 32;                        \
909 } while (0)
910
911 static inline unsigned long long paravirt_rdtscp(unsigned int *aux)
912 {
913         return PVOP_CALL1(u64, pv_cpu_ops.read_tscp, aux);
914 }
915
916 #define rdtscp(low, high, aux)                          \
917 do {                                                    \
918         int __aux;                                      \
919         unsigned long __val = paravirt_rdtscp(&__aux);  \
920         (low) = (u32)__val;                             \
921         (high) = (u32)(__val >> 32);                    \
922         (aux) = __aux;                                  \
923 } while (0)
924
925 #define rdtscpll(val, aux)                              \
926 do {                                                    \
927         unsigned long __aux;                            \
928         val = paravirt_rdtscp(&__aux);                  \
929         (aux) = __aux;                                  \
930 } while (0)
931
932 static inline void paravirt_alloc_ldt(struct desc_struct *ldt, unsigned entries)
933 {
934         PVOP_VCALL2(pv_cpu_ops.alloc_ldt, ldt, entries);
935 }
936
937 static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
938 {
939         PVOP_VCALL2(pv_cpu_ops.free_ldt, ldt, entries);
940 }
941
942 static inline void load_TR_desc(void)
943 {
944         PVOP_VCALL0(pv_cpu_ops.load_tr_desc);
945 }
946 static inline void load_gdt(const struct desc_ptr *dtr)
947 {
948         PVOP_VCALL1(pv_cpu_ops.load_gdt, dtr);
949 }
950 static inline void load_idt(const struct desc_ptr *dtr)
951 {
952         PVOP_VCALL1(pv_cpu_ops.load_idt, dtr);
953 }
954 static inline void set_ldt(const void *addr, unsigned entries)
955 {
956         PVOP_VCALL2(pv_cpu_ops.set_ldt, addr, entries);
957 }
958 static inline void store_gdt(struct desc_ptr *dtr)
959 {
960         PVOP_VCALL1(pv_cpu_ops.store_gdt, dtr);
961 }
962 static inline void store_idt(struct desc_ptr *dtr)
963 {
964         PVOP_VCALL1(pv_cpu_ops.store_idt, dtr);
965 }
966 static inline unsigned long paravirt_store_tr(void)
967 {
968         return PVOP_CALL0(unsigned long, pv_cpu_ops.store_tr);
969 }
970 #define store_tr(tr)    ((tr) = paravirt_store_tr())
971 static inline void load_TLS(struct thread_struct *t, unsigned cpu)
972 {
973         PVOP_VCALL2(pv_cpu_ops.load_tls, t, cpu);
974 }
975
976 #ifdef CONFIG_X86_64
977 static inline void load_gs_index(unsigned int gs)
978 {
979         PVOP_VCALL1(pv_cpu_ops.load_gs_index, gs);
980 }
981 #endif
982
983 static inline void write_ldt_entry(struct desc_struct *dt, int entry,
984                                    const void *desc)
985 {
986         PVOP_VCALL3(pv_cpu_ops.write_ldt_entry, dt, entry, desc);
987 }
988
989 static inline void write_gdt_entry(struct desc_struct *dt, int entry,
990                                    void *desc, int type)
991 {
992         PVOP_VCALL4(pv_cpu_ops.write_gdt_entry, dt, entry, desc, type);
993 }
994
995 static inline void write_idt_entry(gate_desc *dt, int entry, const gate_desc *g)
996 {
997         PVOP_VCALL3(pv_cpu_ops.write_idt_entry, dt, entry, g);
998 }
999 static inline void set_iopl_mask(unsigned mask)
1000 {
1001         PVOP_VCALL1(pv_cpu_ops.set_iopl_mask, mask);
1002 }
1003
1004 /* The paravirtualized I/O functions */
1005 static inline void slow_down_io(void)
1006 {
1007         pv_cpu_ops.io_delay();
1008 #ifdef REALLY_SLOW_IO
1009         pv_cpu_ops.io_delay();
1010         pv_cpu_ops.io_delay();
1011         pv_cpu_ops.io_delay();
1012 #endif
1013 }
1014
1015 #ifdef CONFIG_X86_LOCAL_APIC
1016 static inline void setup_boot_clock(void)
1017 {
1018         PVOP_VCALL0(pv_apic_ops.setup_boot_clock);
1019 }
1020
1021 static inline void setup_secondary_clock(void)
1022 {
1023         PVOP_VCALL0(pv_apic_ops.setup_secondary_clock);
1024 }
1025 #endif
1026
1027 static inline void paravirt_post_allocator_init(void)
1028 {
1029         if (pv_init_ops.post_allocator_init)
1030                 (*pv_init_ops.post_allocator_init)();
1031 }
1032
1033 static inline void paravirt_pagetable_setup_start(pgd_t *base)
1034 {
1035         (*pv_mmu_ops.pagetable_setup_start)(base);
1036 }
1037
1038 static inline void paravirt_pagetable_setup_done(pgd_t *base)
1039 {
1040         (*pv_mmu_ops.pagetable_setup_done)(base);
1041 }
1042
1043 #ifdef CONFIG_SMP
1044 static inline void startup_ipi_hook(int phys_apicid, unsigned long start_eip,
1045                                     unsigned long start_esp)
1046 {
1047         PVOP_VCALL3(pv_apic_ops.startup_ipi_hook,
1048                     phys_apicid, start_eip, start_esp);
1049 }
1050 #endif
1051
1052 static inline void paravirt_activate_mm(struct mm_struct *prev,
1053                                         struct mm_struct *next)
1054 {
1055         PVOP_VCALL2(pv_mmu_ops.activate_mm, prev, next);
1056 }
1057
1058 static inline void arch_dup_mmap(struct mm_struct *oldmm,
1059                                  struct mm_struct *mm)
1060 {
1061         PVOP_VCALL2(pv_mmu_ops.dup_mmap, oldmm, mm);
1062 }
1063
1064 static inline void arch_exit_mmap(struct mm_struct *mm)
1065 {
1066         PVOP_VCALL1(pv_mmu_ops.exit_mmap, mm);
1067 }
1068
1069 static inline void __flush_tlb(void)
1070 {
1071         PVOP_VCALL0(pv_mmu_ops.flush_tlb_user);
1072 }
1073 static inline void __flush_tlb_global(void)
1074 {
1075         PVOP_VCALL0(pv_mmu_ops.flush_tlb_kernel);
1076 }
1077 static inline void __flush_tlb_single(unsigned long addr)
1078 {
1079         PVOP_VCALL1(pv_mmu_ops.flush_tlb_single, addr);
1080 }
1081
1082 static inline void flush_tlb_others(const struct cpumask *cpumask,
1083                                     struct mm_struct *mm,
1084                                     unsigned long va)
1085 {
1086         PVOP_VCALL3(pv_mmu_ops.flush_tlb_others, cpumask, mm, va);
1087 }
1088
1089 static inline int paravirt_pgd_alloc(struct mm_struct *mm)
1090 {
1091         return PVOP_CALL1(int, pv_mmu_ops.pgd_alloc, mm);
1092 }
1093
1094 static inline void paravirt_pgd_free(struct mm_struct *mm, pgd_t *pgd)
1095 {
1096         PVOP_VCALL2(pv_mmu_ops.pgd_free, mm, pgd);
1097 }
1098
1099 static inline void paravirt_alloc_pte(struct mm_struct *mm, unsigned long pfn)
1100 {
1101         PVOP_VCALL2(pv_mmu_ops.alloc_pte, mm, pfn);
1102 }
1103 static inline void paravirt_release_pte(unsigned long pfn)
1104 {
1105         PVOP_VCALL1(pv_mmu_ops.release_pte, pfn);
1106 }
1107
1108 static inline void paravirt_alloc_pmd(struct mm_struct *mm, unsigned long pfn)
1109 {
1110         PVOP_VCALL2(pv_mmu_ops.alloc_pmd, mm, pfn);
1111 }
1112
1113 static inline void paravirt_alloc_pmd_clone(unsigned long pfn, unsigned long clonepfn,
1114                                             unsigned long start, unsigned long count)
1115 {
1116         PVOP_VCALL4(pv_mmu_ops.alloc_pmd_clone, pfn, clonepfn, start, count);
1117 }
1118 static inline void paravirt_release_pmd(unsigned long pfn)
1119 {
1120         PVOP_VCALL1(pv_mmu_ops.release_pmd, pfn);
1121 }
1122
1123 static inline void paravirt_alloc_pud(struct mm_struct *mm, unsigned long pfn)
1124 {
1125         PVOP_VCALL2(pv_mmu_ops.alloc_pud, mm, pfn);
1126 }
1127 static inline void paravirt_release_pud(unsigned long pfn)
1128 {
1129         PVOP_VCALL1(pv_mmu_ops.release_pud, pfn);
1130 }
1131
1132 #ifdef CONFIG_HIGHPTE
1133 static inline void *kmap_atomic_pte(struct page *page, enum km_type type)
1134 {
1135         unsigned long ret;
1136         ret = PVOP_CALL2(unsigned long, pv_mmu_ops.kmap_atomic_pte, page, type);
1137         return (void *)ret;
1138 }
1139 #endif
1140
1141 static inline void pte_update(struct mm_struct *mm, unsigned long addr,
1142                               pte_t *ptep)
1143 {
1144         PVOP_VCALL3(pv_mmu_ops.pte_update, mm, addr, ptep);
1145 }
1146
1147 static inline void pte_update_defer(struct mm_struct *mm, unsigned long addr,
1148                                     pte_t *ptep)
1149 {
1150         PVOP_VCALL3(pv_mmu_ops.pte_update_defer, mm, addr, ptep);
1151 }
1152
1153 static inline pte_t __pte(pteval_t val)
1154 {
1155         pteval_t ret;
1156
1157         if (sizeof(pteval_t) > sizeof(long))
1158                 ret = PVOP_CALLEE2(pteval_t,
1159                                    pv_mmu_ops.make_pte,
1160                                    val, (u64)val >> 32);
1161         else
1162                 ret = PVOP_CALLEE1(pteval_t,
1163                                    pv_mmu_ops.make_pte,
1164                                    val);
1165
1166         return (pte_t) { .pte = ret };
1167 }
1168
1169 static inline pteval_t pte_val(pte_t pte)
1170 {
1171         pteval_t ret;
1172
1173         if (sizeof(pteval_t) > sizeof(long))
1174                 ret = PVOP_CALLEE2(pteval_t, pv_mmu_ops.pte_val,
1175                                    pte.pte, (u64)pte.pte >> 32);
1176         else
1177                 ret = PVOP_CALLEE1(pteval_t, pv_mmu_ops.pte_val,
1178                                    pte.pte);
1179
1180         return ret;
1181 }
1182
1183 static inline pgd_t __pgd(pgdval_t val)
1184 {
1185         pgdval_t ret;
1186
1187         if (sizeof(pgdval_t) > sizeof(long))
1188                 ret = PVOP_CALLEE2(pgdval_t, pv_mmu_ops.make_pgd,
1189                                    val, (u64)val >> 32);
1190         else
1191                 ret = PVOP_CALLEE1(pgdval_t, pv_mmu_ops.make_pgd,
1192                                    val);
1193
1194         return (pgd_t) { ret };
1195 }
1196
1197 static inline pgdval_t pgd_val(pgd_t pgd)
1198 {
1199         pgdval_t ret;
1200
1201         if (sizeof(pgdval_t) > sizeof(long))
1202                 ret =  PVOP_CALLEE2(pgdval_t, pv_mmu_ops.pgd_val,
1203                                     pgd.pgd, (u64)pgd.pgd >> 32);
1204         else
1205                 ret =  PVOP_CALLEE1(pgdval_t, pv_mmu_ops.pgd_val,
1206                                     pgd.pgd);
1207
1208         return ret;
1209 }
1210
1211 #define  __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
1212 static inline pte_t ptep_modify_prot_start(struct mm_struct *mm, unsigned long addr,
1213                                            pte_t *ptep)
1214 {
1215         pteval_t ret;
1216
1217         ret = PVOP_CALL3(pteval_t, pv_mmu_ops.ptep_modify_prot_start,
1218                          mm, addr, ptep);
1219
1220         return (pte_t) { .pte = ret };
1221 }
1222
1223 static inline void ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
1224                                            pte_t *ptep, pte_t pte)
1225 {
1226         if (sizeof(pteval_t) > sizeof(long))
1227                 /* 5 arg words */
1228                 pv_mmu_ops.ptep_modify_prot_commit(mm, addr, ptep, pte);
1229         else
1230                 PVOP_VCALL4(pv_mmu_ops.ptep_modify_prot_commit,
1231                             mm, addr, ptep, pte.pte);
1232 }
1233
1234 static inline void set_pte(pte_t *ptep, pte_t pte)
1235 {
1236         if (sizeof(pteval_t) > sizeof(long))
1237                 PVOP_VCALL3(pv_mmu_ops.set_pte, ptep,
1238                             pte.pte, (u64)pte.pte >> 32);
1239         else
1240                 PVOP_VCALL2(pv_mmu_ops.set_pte, ptep,
1241                             pte.pte);
1242 }
1243
1244 static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
1245                               pte_t *ptep, pte_t pte)
1246 {
1247         if (sizeof(pteval_t) > sizeof(long))
1248                 /* 5 arg words */
1249                 pv_mmu_ops.set_pte_at(mm, addr, ptep, pte);
1250         else
1251                 PVOP_VCALL4(pv_mmu_ops.set_pte_at, mm, addr, ptep, pte.pte);
1252 }
1253
1254 static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
1255 {
1256         pmdval_t val = native_pmd_val(pmd);
1257
1258         if (sizeof(pmdval_t) > sizeof(long))
1259                 PVOP_VCALL3(pv_mmu_ops.set_pmd, pmdp, val, (u64)val >> 32);
1260         else
1261                 PVOP_VCALL2(pv_mmu_ops.set_pmd, pmdp, val);
1262 }
1263
1264 #if PAGETABLE_LEVELS >= 3
1265 static inline pmd_t __pmd(pmdval_t val)
1266 {
1267         pmdval_t ret;
1268
1269         if (sizeof(pmdval_t) > sizeof(long))
1270                 ret = PVOP_CALLEE2(pmdval_t, pv_mmu_ops.make_pmd,
1271                                    val, (u64)val >> 32);
1272         else
1273                 ret = PVOP_CALLEE1(pmdval_t, pv_mmu_ops.make_pmd,
1274                                    val);
1275
1276         return (pmd_t) { ret };
1277 }
1278
1279 static inline pmdval_t pmd_val(pmd_t pmd)
1280 {
1281         pmdval_t ret;
1282
1283         if (sizeof(pmdval_t) > sizeof(long))
1284                 ret =  PVOP_CALLEE2(pmdval_t, pv_mmu_ops.pmd_val,
1285                                     pmd.pmd, (u64)pmd.pmd >> 32);
1286         else
1287                 ret =  PVOP_CALLEE1(pmdval_t, pv_mmu_ops.pmd_val,
1288                                     pmd.pmd);
1289
1290         return ret;
1291 }
1292
1293 static inline void set_pud(pud_t *pudp, pud_t pud)
1294 {
1295         pudval_t val = native_pud_val(pud);
1296
1297         if (sizeof(pudval_t) > sizeof(long))
1298                 PVOP_VCALL3(pv_mmu_ops.set_pud, pudp,
1299                             val, (u64)val >> 32);
1300         else
1301                 PVOP_VCALL2(pv_mmu_ops.set_pud, pudp,
1302                             val);
1303 }
1304 #if PAGETABLE_LEVELS == 4
1305 static inline pud_t __pud(pudval_t val)
1306 {
1307         pudval_t ret;
1308
1309         if (sizeof(pudval_t) > sizeof(long))
1310                 ret = PVOP_CALLEE2(pudval_t, pv_mmu_ops.make_pud,
1311                                    val, (u64)val >> 32);
1312         else
1313                 ret = PVOP_CALLEE1(pudval_t, pv_mmu_ops.make_pud,
1314                                    val);
1315
1316         return (pud_t) { ret };
1317 }
1318
1319 static inline pudval_t pud_val(pud_t pud)
1320 {
1321         pudval_t ret;
1322
1323         if (sizeof(pudval_t) > sizeof(long))
1324                 ret =  PVOP_CALLEE2(pudval_t, pv_mmu_ops.pud_val,
1325                                     pud.pud, (u64)pud.pud >> 32);
1326         else
1327                 ret =  PVOP_CALLEE1(pudval_t, pv_mmu_ops.pud_val,
1328                                     pud.pud);
1329
1330         return ret;
1331 }
1332
1333 static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
1334 {
1335         pgdval_t val = native_pgd_val(pgd);
1336
1337         if (sizeof(pgdval_t) > sizeof(long))
1338                 PVOP_VCALL3(pv_mmu_ops.set_pgd, pgdp,
1339                             val, (u64)val >> 32);
1340         else
1341                 PVOP_VCALL2(pv_mmu_ops.set_pgd, pgdp,
1342                             val);
1343 }
1344
1345 static inline void pgd_clear(pgd_t *pgdp)
1346 {
1347         set_pgd(pgdp, __pgd(0));
1348 }
1349
1350 static inline void pud_clear(pud_t *pudp)
1351 {
1352         set_pud(pudp, __pud(0));
1353 }
1354
1355 #endif  /* PAGETABLE_LEVELS == 4 */
1356
1357 #endif  /* PAGETABLE_LEVELS >= 3 */
1358
1359 #ifdef CONFIG_X86_PAE
1360 /* Special-case pte-setting operations for PAE, which can't update a
1361    64-bit pte atomically */
1362 static inline void set_pte_atomic(pte_t *ptep, pte_t pte)
1363 {
1364         PVOP_VCALL3(pv_mmu_ops.set_pte_atomic, ptep,
1365                     pte.pte, pte.pte >> 32);
1366 }
1367
1368 static inline void set_pte_present(struct mm_struct *mm, unsigned long addr,
1369                                    pte_t *ptep, pte_t pte)
1370 {
1371         /* 5 arg words */
1372         pv_mmu_ops.set_pte_present(mm, addr, ptep, pte);
1373 }
1374
1375 static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
1376                              pte_t *ptep)
1377 {
1378         PVOP_VCALL3(pv_mmu_ops.pte_clear, mm, addr, ptep);
1379 }
1380
1381 static inline void pmd_clear(pmd_t *pmdp)
1382 {
1383         PVOP_VCALL1(pv_mmu_ops.pmd_clear, pmdp);
1384 }
1385 #else  /* !CONFIG_X86_PAE */
1386 static inline void set_pte_atomic(pte_t *ptep, pte_t pte)
1387 {
1388         set_pte(ptep, pte);
1389 }
1390
1391 static inline void set_pte_present(struct mm_struct *mm, unsigned long addr,
1392                                    pte_t *ptep, pte_t pte)
1393 {
1394         set_pte(ptep, pte);
1395 }
1396
1397 static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
1398                              pte_t *ptep)
1399 {
1400         set_pte_at(mm, addr, ptep, __pte(0));
1401 }
1402
1403 static inline void pmd_clear(pmd_t *pmdp)
1404 {
1405         set_pmd(pmdp, __pmd(0));
1406 }
1407 #endif  /* CONFIG_X86_PAE */
1408
1409 /* Lazy mode for batching updates / context switch */
1410 enum paravirt_lazy_mode {
1411         PARAVIRT_LAZY_NONE,
1412         PARAVIRT_LAZY_MMU,
1413         PARAVIRT_LAZY_CPU,
1414 };
1415
1416 enum paravirt_lazy_mode paravirt_get_lazy_mode(void);
1417 void paravirt_enter_lazy_cpu(void);
1418 void paravirt_leave_lazy_cpu(void);
1419 void paravirt_enter_lazy_mmu(void);
1420 void paravirt_leave_lazy_mmu(void);
1421 void paravirt_leave_lazy(enum paravirt_lazy_mode mode);
1422
1423 #define  __HAVE_ARCH_ENTER_LAZY_CPU_MODE
1424 static inline void arch_enter_lazy_cpu_mode(void)
1425 {
1426         PVOP_VCALL0(pv_cpu_ops.lazy_mode.enter);
1427 }
1428
1429 static inline void arch_leave_lazy_cpu_mode(void)
1430 {
1431         PVOP_VCALL0(pv_cpu_ops.lazy_mode.leave);
1432 }
1433
1434 static inline void arch_flush_lazy_cpu_mode(void)
1435 {
1436         if (unlikely(paravirt_get_lazy_mode() == PARAVIRT_LAZY_CPU)) {
1437                 arch_leave_lazy_cpu_mode();
1438                 arch_enter_lazy_cpu_mode();
1439         }
1440 }
1441
1442
1443 #define  __HAVE_ARCH_ENTER_LAZY_MMU_MODE
1444 static inline void arch_enter_lazy_mmu_mode(void)
1445 {
1446         PVOP_VCALL0(pv_mmu_ops.lazy_mode.enter);
1447 }
1448
1449 static inline void arch_leave_lazy_mmu_mode(void)
1450 {
1451         PVOP_VCALL0(pv_mmu_ops.lazy_mode.leave);
1452 }
1453
1454 static inline void arch_flush_lazy_mmu_mode(void)
1455 {
1456         if (unlikely(paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU)) {
1457                 arch_leave_lazy_mmu_mode();
1458                 arch_enter_lazy_mmu_mode();
1459         }
1460 }
1461
1462 static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
1463                                 unsigned long phys, pgprot_t flags)
1464 {
1465         pv_mmu_ops.set_fixmap(idx, phys, flags);
1466 }
1467
1468 void _paravirt_nop(void);
1469 u32 _paravirt_ident_32(u32);
1470 u64 _paravirt_ident_64(u64);
1471
1472 #define paravirt_nop    ((void *)_paravirt_nop)
1473
1474 #ifdef CONFIG_SMP
1475
1476 static inline int __raw_spin_is_locked(struct raw_spinlock *lock)
1477 {
1478         return PVOP_CALL1(int, pv_lock_ops.spin_is_locked, lock);
1479 }
1480
1481 static inline int __raw_spin_is_contended(struct raw_spinlock *lock)
1482 {
1483         return PVOP_CALL1(int, pv_lock_ops.spin_is_contended, lock);
1484 }
1485
1486 static __always_inline void __raw_spin_lock(struct raw_spinlock *lock)
1487 {
1488         PVOP_VCALL1(pv_lock_ops.spin_lock, lock);
1489 }
1490
1491 static __always_inline void __raw_spin_lock_flags(struct raw_spinlock *lock,
1492                                                   unsigned long flags)
1493 {
1494         PVOP_VCALL2(pv_lock_ops.spin_lock_flags, lock, flags);
1495 }
1496
1497 static __always_inline int __raw_spin_trylock(struct raw_spinlock *lock)
1498 {
1499         return PVOP_CALL1(int, pv_lock_ops.spin_trylock, lock);
1500 }
1501
1502 static __always_inline void __raw_spin_unlock(struct raw_spinlock *lock)
1503 {
1504         PVOP_VCALL1(pv_lock_ops.spin_unlock, lock);
1505 }
1506
1507 #endif
1508
1509 /* These all sit in the .parainstructions section to tell us what to patch. */
1510 struct paravirt_patch_site {
1511         u8 *instr;              /* original instructions */
1512         u8 instrtype;           /* type of this instruction */
1513         u8 len;                 /* length of original instruction */
1514         u16 clobbers;           /* what registers you may clobber */
1515 };
1516
1517 extern struct paravirt_patch_site __parainstructions[],
1518         __parainstructions_end[];
1519
1520 #ifdef CONFIG_X86_32
1521 #define PV_SAVE_REGS "pushl %ecx; pushl %edx;"
1522 #define PV_RESTORE_REGS "popl %edx; popl %ecx;"
1523
1524 /* save and restore all caller-save registers, except return value */
1525 #define PV_SAVE_ALL_CALLER_REGS         "pushl %ecx;"
1526 #define PV_RESTORE_ALL_CALLER_REGS      "popl  %ecx;"
1527
1528 #define PV_FLAGS_ARG "0"
1529 #define PV_EXTRA_CLOBBERS
1530 #define PV_VEXTRA_CLOBBERS
1531 #else
1532 /* save and restore all caller-save registers, except return value */
1533 #define PV_SAVE_ALL_CALLER_REGS                                         \
1534         "push %rcx;"                                                    \
1535         "push %rdx;"                                                    \
1536         "push %rsi;"                                                    \
1537         "push %rdi;"                                                    \
1538         "push %r8;"                                                     \
1539         "push %r9;"                                                     \
1540         "push %r10;"                                                    \
1541         "push %r11;"
1542 #define PV_RESTORE_ALL_CALLER_REGS                                      \
1543         "pop %r11;"                                                     \
1544         "pop %r10;"                                                     \
1545         "pop %r9;"                                                      \
1546         "pop %r8;"                                                      \
1547         "pop %rdi;"                                                     \
1548         "pop %rsi;"                                                     \
1549         "pop %rdx;"                                                     \
1550         "pop %rcx;"
1551
1552 /* We save some registers, but all of them, that's too much. We clobber all
1553  * caller saved registers but the argument parameter */
1554 #define PV_SAVE_REGS "pushq %%rdi;"
1555 #define PV_RESTORE_REGS "popq %%rdi;"
1556 #define PV_EXTRA_CLOBBERS EXTRA_CLOBBERS, "rcx" , "rdx", "rsi"
1557 #define PV_VEXTRA_CLOBBERS EXTRA_CLOBBERS, "rdi", "rcx" , "rdx", "rsi"
1558 #define PV_FLAGS_ARG "D"
1559 #endif
1560
1561 /*
1562  * Generate a thunk around a function which saves all caller-save
1563  * registers except for the return value.  This allows C functions to
1564  * be called from assembler code where fewer than normal registers are
1565  * available.  It may also help code generation around calls from C
1566  * code if the common case doesn't use many registers.
1567  *
1568  * When a callee is wrapped in a thunk, the caller can assume that all
1569  * arg regs and all scratch registers are preserved across the
1570  * call. The return value in rax/eax will not be saved, even for void
1571  * functions.
1572  */
1573 #define PV_CALLEE_SAVE_REGS_THUNK(func)                                 \
1574         extern typeof(func) __raw_callee_save_##func;                   \
1575         static void *__##func##__ __used = func;                        \
1576                                                                         \
1577         asm(".pushsection .text;"                                       \
1578             "__raw_callee_save_" #func ": "                             \
1579             PV_SAVE_ALL_CALLER_REGS                                     \
1580             "call " #func ";"                                           \
1581             PV_RESTORE_ALL_CALLER_REGS                                  \
1582             "ret;"                                                      \
1583             ".popsection")
1584
1585 /* Get a reference to a callee-save function */
1586 #define PV_CALLEE_SAVE(func)                                            \
1587         ((struct paravirt_callee_save) { __raw_callee_save_##func })
1588
1589 /* Promise that "func" already uses the right calling convention */
1590 #define __PV_IS_CALLEE_SAVE(func)                       \
1591         ((struct paravirt_callee_save) { func })
1592
1593 static inline unsigned long __raw_local_save_flags(void)
1594 {
1595         unsigned long f;
1596
1597         asm volatile(paravirt_alt(PARAVIRT_CALL)
1598                      : "=a"(f)
1599                      : paravirt_type(pv_irq_ops.save_fl),
1600                        paravirt_clobber(CLBR_EAX)
1601                      : "memory", "cc");
1602         return f;
1603 }
1604
1605 static inline void raw_local_irq_restore(unsigned long f)
1606 {
1607         asm volatile(paravirt_alt(PARAVIRT_CALL)
1608                      : "=a"(f)
1609                      : PV_FLAGS_ARG(f),
1610                        paravirt_type(pv_irq_ops.restore_fl),
1611                        paravirt_clobber(CLBR_EAX)
1612                      : "memory", "cc");
1613 }
1614
1615 static inline void raw_local_irq_disable(void)
1616 {
1617         asm volatile(paravirt_alt(PARAVIRT_CALL)
1618                      :
1619                      : paravirt_type(pv_irq_ops.irq_disable),
1620                        paravirt_clobber(CLBR_EAX)
1621                      : "memory", "eax", "cc");
1622 }
1623
1624 static inline void raw_local_irq_enable(void)
1625 {
1626         asm volatile(paravirt_alt(PARAVIRT_CALL)
1627                      :
1628                      : paravirt_type(pv_irq_ops.irq_enable),
1629                        paravirt_clobber(CLBR_EAX)
1630                      : "memory", "eax", "cc");
1631 }
1632
1633 static inline unsigned long __raw_local_irq_save(void)
1634 {
1635         unsigned long f;
1636
1637         f = __raw_local_save_flags();
1638         raw_local_irq_disable();
1639         return f;
1640 }
1641
1642
1643 /* Make sure as little as possible of this mess escapes. */
1644 #undef PARAVIRT_CALL
1645 #undef __PVOP_CALL
1646 #undef __PVOP_VCALL
1647 #undef PVOP_VCALL0
1648 #undef PVOP_CALL0
1649 #undef PVOP_VCALL1
1650 #undef PVOP_CALL1
1651 #undef PVOP_VCALL2
1652 #undef PVOP_CALL2
1653 #undef PVOP_VCALL3
1654 #undef PVOP_CALL3
1655 #undef PVOP_VCALL4
1656 #undef PVOP_CALL4
1657
1658 #else  /* __ASSEMBLY__ */
1659
1660 #define _PVSITE(ptype, clobbers, ops, word, algn)       \
1661 771:;                                           \
1662         ops;                                    \
1663 772:;                                           \
1664         .pushsection .parainstructions,"a";     \
1665          .align algn;                           \
1666          word 771b;                             \
1667          .byte ptype;                           \
1668          .byte 772b-771b;                       \
1669          .short clobbers;                       \
1670         .popsection
1671
1672
1673 #define COND_PUSH(set, mask, reg)                       \
1674         .if ((~(set)) & mask); push %reg; .endif
1675 #define COND_POP(set, mask, reg)                        \
1676         .if ((~(set)) & mask); pop %reg; .endif
1677
1678 #ifdef CONFIG_X86_64
1679
1680 #define PV_SAVE_REGS(set)                       \
1681         COND_PUSH(set, CLBR_RAX, rax);          \
1682         COND_PUSH(set, CLBR_RCX, rcx);          \
1683         COND_PUSH(set, CLBR_RDX, rdx);          \
1684         COND_PUSH(set, CLBR_RSI, rsi);          \
1685         COND_PUSH(set, CLBR_RDI, rdi);          \
1686         COND_PUSH(set, CLBR_R8, r8);            \
1687         COND_PUSH(set, CLBR_R9, r9);            \
1688         COND_PUSH(set, CLBR_R10, r10);          \
1689         COND_PUSH(set, CLBR_R11, r11)
1690 #define PV_RESTORE_REGS(set)                    \
1691         COND_POP(set, CLBR_R11, r11);           \
1692         COND_POP(set, CLBR_R10, r10);           \
1693         COND_POP(set, CLBR_R9, r9);             \
1694         COND_POP(set, CLBR_R8, r8);             \
1695         COND_POP(set, CLBR_RDI, rdi);           \
1696         COND_POP(set, CLBR_RSI, rsi);           \
1697         COND_POP(set, CLBR_RDX, rdx);           \
1698         COND_POP(set, CLBR_RCX, rcx);           \
1699         COND_POP(set, CLBR_RAX, rax)
1700
1701 #define PARA_PATCH(struct, off)        ((PARAVIRT_PATCH_##struct + (off)) / 8)
1702 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .quad, 8)
1703 #define PARA_INDIRECT(addr)     *addr(%rip)
1704 #else
1705 #define PV_SAVE_REGS(set)                       \
1706         COND_PUSH(set, CLBR_EAX, eax);          \
1707         COND_PUSH(set, CLBR_EDI, edi);          \
1708         COND_PUSH(set, CLBR_ECX, ecx);          \
1709         COND_PUSH(set, CLBR_EDX, edx)
1710 #define PV_RESTORE_REGS(set)                    \
1711         COND_POP(set, CLBR_EDX, edx);           \
1712         COND_POP(set, CLBR_ECX, ecx);           \
1713         COND_POP(set, CLBR_EDI, edi);           \
1714         COND_POP(set, CLBR_EAX, eax)
1715
1716 #define PARA_PATCH(struct, off)        ((PARAVIRT_PATCH_##struct + (off)) / 4)
1717 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
1718 #define PARA_INDIRECT(addr)     *%cs:addr
1719 #endif
1720
1721 #define INTERRUPT_RETURN                                                \
1722         PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_iret), CLBR_NONE,       \
1723                   jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_iret))
1724
1725 #define DISABLE_INTERRUPTS(clobbers)                                    \
1726         PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_disable), clobbers, \
1727                   PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE);            \
1728                   call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_disable);    \
1729                   PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
1730
1731 #define ENABLE_INTERRUPTS(clobbers)                                     \
1732         PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_enable), clobbers,  \
1733                   PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE);            \
1734                   call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_enable);     \
1735                   PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
1736
1737 #define USERGS_SYSRET32                                                 \
1738         PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_usergs_sysret32),       \
1739                   CLBR_NONE,                                            \
1740                   jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_usergs_sysret32))
1741
1742 #ifdef CONFIG_X86_32
1743 #define GET_CR0_INTO_EAX                                \
1744         push %ecx; push %edx;                           \
1745         call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
1746         pop %edx; pop %ecx
1747
1748 #define ENABLE_INTERRUPTS_SYSEXIT                                       \
1749         PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit),    \
1750                   CLBR_NONE,                                            \
1751                   jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
1752
1753
1754 #else   /* !CONFIG_X86_32 */
1755
1756 /*
1757  * If swapgs is used while the userspace stack is still current,
1758  * there's no way to call a pvop.  The PV replacement *must* be
1759  * inlined, or the swapgs instruction must be trapped and emulated.
1760  */
1761 #define SWAPGS_UNSAFE_STACK                                             \
1762         PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE,     \
1763                   swapgs)
1764
1765 /*
1766  * Note: swapgs is very special, and in practise is either going to be
1767  * implemented with a single "swapgs" instruction or something very
1768  * special.  Either way, we don't need to save any registers for
1769  * it.
1770  */
1771 #define SWAPGS                                                          \
1772         PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE,     \
1773                   call PARA_INDIRECT(pv_cpu_ops+PV_CPU_swapgs)          \
1774                  )
1775
1776 #define GET_CR2_INTO_RCX                                \
1777         call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr2); \
1778         movq %rax, %rcx;                                \
1779         xorq %rax, %rax;
1780
1781 #define PARAVIRT_ADJUST_EXCEPTION_FRAME                                 \
1782         PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_adjust_exception_frame), \
1783                   CLBR_NONE,                                            \
1784                   call PARA_INDIRECT(pv_irq_ops+PV_IRQ_adjust_exception_frame))
1785
1786 #define USERGS_SYSRET64                                                 \
1787         PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_usergs_sysret64),       \
1788                   CLBR_NONE,                                            \
1789                   jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_usergs_sysret64))
1790
1791 #define ENABLE_INTERRUPTS_SYSEXIT32                                     \
1792         PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit),    \
1793                   CLBR_NONE,                                            \
1794                   jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
1795 #endif  /* CONFIG_X86_32 */
1796
1797 #endif /* __ASSEMBLY__ */
1798 #endif /* CONFIG_PARAVIRT */
1799 #endif /* _ASM_X86_PARAVIRT_H */