x86/pvops: remove pte_flags pvop
[linux-2.6.git] / arch / x86 / include / asm / paravirt.h
1 #ifndef _ASM_X86_PARAVIRT_H
2 #define _ASM_X86_PARAVIRT_H
3 /* Various instructions on x86 need to be replaced for
4  * para-virtualization: those hooks are defined here. */
5
6 #ifdef CONFIG_PARAVIRT
7 #include <asm/page.h>
8 #include <asm/asm.h>
9
10 /* Bitmask of what can be clobbered: usually at least eax. */
11 #define CLBR_NONE 0
12 #define CLBR_EAX  (1 << 0)
13 #define CLBR_ECX  (1 << 1)
14 #define CLBR_EDX  (1 << 2)
15
16 #ifdef CONFIG_X86_64
17 #define CLBR_RSI  (1 << 3)
18 #define CLBR_RDI  (1 << 4)
19 #define CLBR_R8   (1 << 5)
20 #define CLBR_R9   (1 << 6)
21 #define CLBR_R10  (1 << 7)
22 #define CLBR_R11  (1 << 8)
23 #define CLBR_ANY  ((1 << 9) - 1)
24 #include <asm/desc_defs.h>
25 #else
26 /* CLBR_ANY should match all regs platform has. For i386, that's just it */
27 #define CLBR_ANY  ((1 << 3) - 1)
28 #endif /* X86_64 */
29
30 #ifndef __ASSEMBLY__
31 #include <linux/types.h>
32 #include <linux/cpumask.h>
33 #include <asm/kmap_types.h>
34 #include <asm/desc_defs.h>
35
36 struct page;
37 struct thread_struct;
38 struct desc_ptr;
39 struct tss_struct;
40 struct mm_struct;
41 struct desc_struct;
42
43 /* general info */
44 struct pv_info {
45         unsigned int kernel_rpl;
46         int shared_kernel_pmd;
47         int paravirt_enabled;
48         const char *name;
49 };
50
51 struct pv_init_ops {
52         /*
53          * Patch may replace one of the defined code sequences with
54          * arbitrary code, subject to the same register constraints.
55          * This generally means the code is not free to clobber any
56          * registers other than EAX.  The patch function should return
57          * the number of bytes of code generated, as we nop pad the
58          * rest in generic code.
59          */
60         unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
61                           unsigned long addr, unsigned len);
62
63         /* Basic arch-specific setup */
64         void (*arch_setup)(void);
65         char *(*memory_setup)(void);
66         void (*post_allocator_init)(void);
67
68         /* Print a banner to identify the environment */
69         void (*banner)(void);
70 };
71
72
73 struct pv_lazy_ops {
74         /* Set deferred update mode, used for batching operations. */
75         void (*enter)(void);
76         void (*leave)(void);
77 };
78
79 struct pv_time_ops {
80         void (*time_init)(void);
81
82         /* Set and set time of day */
83         unsigned long (*get_wallclock)(void);
84         int (*set_wallclock)(unsigned long);
85
86         unsigned long long (*sched_clock)(void);
87         unsigned long (*get_tsc_khz)(void);
88 };
89
90 struct pv_cpu_ops {
91         /* hooks for various privileged instructions */
92         unsigned long (*get_debugreg)(int regno);
93         void (*set_debugreg)(int regno, unsigned long value);
94
95         void (*clts)(void);
96
97         unsigned long (*read_cr0)(void);
98         void (*write_cr0)(unsigned long);
99
100         unsigned long (*read_cr4_safe)(void);
101         unsigned long (*read_cr4)(void);
102         void (*write_cr4)(unsigned long);
103
104 #ifdef CONFIG_X86_64
105         unsigned long (*read_cr8)(void);
106         void (*write_cr8)(unsigned long);
107 #endif
108
109         /* Segment descriptor handling */
110         void (*load_tr_desc)(void);
111         void (*load_gdt)(const struct desc_ptr *);
112         void (*load_idt)(const struct desc_ptr *);
113         void (*store_gdt)(struct desc_ptr *);
114         void (*store_idt)(struct desc_ptr *);
115         void (*set_ldt)(const void *desc, unsigned entries);
116         unsigned long (*store_tr)(void);
117         void (*load_tls)(struct thread_struct *t, unsigned int cpu);
118 #ifdef CONFIG_X86_64
119         void (*load_gs_index)(unsigned int idx);
120 #endif
121         void (*write_ldt_entry)(struct desc_struct *ldt, int entrynum,
122                                 const void *desc);
123         void (*write_gdt_entry)(struct desc_struct *,
124                                 int entrynum, const void *desc, int size);
125         void (*write_idt_entry)(gate_desc *,
126                                 int entrynum, const gate_desc *gate);
127         void (*alloc_ldt)(struct desc_struct *ldt, unsigned entries);
128         void (*free_ldt)(struct desc_struct *ldt, unsigned entries);
129
130         void (*load_sp0)(struct tss_struct *tss, struct thread_struct *t);
131
132         void (*set_iopl_mask)(unsigned mask);
133
134         void (*wbinvd)(void);
135         void (*io_delay)(void);
136
137         /* cpuid emulation, mostly so that caps bits can be disabled */
138         void (*cpuid)(unsigned int *eax, unsigned int *ebx,
139                       unsigned int *ecx, unsigned int *edx);
140
141         /* MSR, PMC and TSR operations.
142            err = 0/-EFAULT.  wrmsr returns 0/-EFAULT. */
143         u64 (*read_msr_amd)(unsigned int msr, int *err);
144         u64 (*read_msr)(unsigned int msr, int *err);
145         int (*write_msr)(unsigned int msr, unsigned low, unsigned high);
146
147         u64 (*read_tsc)(void);
148         u64 (*read_pmc)(int counter);
149         unsigned long long (*read_tscp)(unsigned int *aux);
150
151         /*
152          * Atomically enable interrupts and return to userspace.  This
153          * is only ever used to return to 32-bit processes; in a
154          * 64-bit kernel, it's used for 32-on-64 compat processes, but
155          * never native 64-bit processes.  (Jump, not call.)
156          */
157         void (*irq_enable_sysexit)(void);
158
159         /*
160          * Switch to usermode gs and return to 64-bit usermode using
161          * sysret.  Only used in 64-bit kernels to return to 64-bit
162          * processes.  Usermode register state, including %rsp, must
163          * already be restored.
164          */
165         void (*usergs_sysret64)(void);
166
167         /*
168          * Switch to usermode gs and return to 32-bit usermode using
169          * sysret.  Used to return to 32-on-64 compat processes.
170          * Other usermode register state, including %esp, must already
171          * be restored.
172          */
173         void (*usergs_sysret32)(void);
174
175         /* Normal iret.  Jump to this with the standard iret stack
176            frame set up. */
177         void (*iret)(void);
178
179         void (*swapgs)(void);
180
181         struct pv_lazy_ops lazy_mode;
182 };
183
184 struct pv_irq_ops {
185         void (*init_IRQ)(void);
186
187         /*
188          * Get/set interrupt state.  save_fl and restore_fl are only
189          * expected to use X86_EFLAGS_IF; all other bits
190          * returned from save_fl are undefined, and may be ignored by
191          * restore_fl.
192          */
193         unsigned long (*save_fl)(void);
194         void (*restore_fl)(unsigned long);
195         void (*irq_disable)(void);
196         void (*irq_enable)(void);
197         void (*safe_halt)(void);
198         void (*halt)(void);
199
200 #ifdef CONFIG_X86_64
201         void (*adjust_exception_frame)(void);
202 #endif
203 };
204
205 struct pv_apic_ops {
206 #ifdef CONFIG_X86_LOCAL_APIC
207         void (*setup_boot_clock)(void);
208         void (*setup_secondary_clock)(void);
209
210         void (*startup_ipi_hook)(int phys_apicid,
211                                  unsigned long start_eip,
212                                  unsigned long start_esp);
213 #endif
214 };
215
216 struct pv_mmu_ops {
217         /*
218          * Called before/after init_mm pagetable setup. setup_start
219          * may reset %cr3, and may pre-install parts of the pagetable;
220          * pagetable setup is expected to preserve any existing
221          * mapping.
222          */
223         void (*pagetable_setup_start)(pgd_t *pgd_base);
224         void (*pagetable_setup_done)(pgd_t *pgd_base);
225
226         unsigned long (*read_cr2)(void);
227         void (*write_cr2)(unsigned long);
228
229         unsigned long (*read_cr3)(void);
230         void (*write_cr3)(unsigned long);
231
232         /*
233          * Hooks for intercepting the creation/use/destruction of an
234          * mm_struct.
235          */
236         void (*activate_mm)(struct mm_struct *prev,
237                             struct mm_struct *next);
238         void (*dup_mmap)(struct mm_struct *oldmm,
239                          struct mm_struct *mm);
240         void (*exit_mmap)(struct mm_struct *mm);
241
242
243         /* TLB operations */
244         void (*flush_tlb_user)(void);
245         void (*flush_tlb_kernel)(void);
246         void (*flush_tlb_single)(unsigned long addr);
247         void (*flush_tlb_others)(const cpumask_t *cpus, struct mm_struct *mm,
248                                  unsigned long va);
249
250         /* Hooks for allocating and freeing a pagetable top-level */
251         int  (*pgd_alloc)(struct mm_struct *mm);
252         void (*pgd_free)(struct mm_struct *mm, pgd_t *pgd);
253
254         /*
255          * Hooks for allocating/releasing pagetable pages when they're
256          * attached to a pagetable
257          */
258         void (*alloc_pte)(struct mm_struct *mm, unsigned long pfn);
259         void (*alloc_pmd)(struct mm_struct *mm, unsigned long pfn);
260         void (*alloc_pmd_clone)(unsigned long pfn, unsigned long clonepfn, unsigned long start, unsigned long count);
261         void (*alloc_pud)(struct mm_struct *mm, unsigned long pfn);
262         void (*release_pte)(unsigned long pfn);
263         void (*release_pmd)(unsigned long pfn);
264         void (*release_pud)(unsigned long pfn);
265
266         /* Pagetable manipulation functions */
267         void (*set_pte)(pte_t *ptep, pte_t pteval);
268         void (*set_pte_at)(struct mm_struct *mm, unsigned long addr,
269                            pte_t *ptep, pte_t pteval);
270         void (*set_pmd)(pmd_t *pmdp, pmd_t pmdval);
271         void (*pte_update)(struct mm_struct *mm, unsigned long addr,
272                            pte_t *ptep);
273         void (*pte_update_defer)(struct mm_struct *mm,
274                                  unsigned long addr, pte_t *ptep);
275
276         pte_t (*ptep_modify_prot_start)(struct mm_struct *mm, unsigned long addr,
277                                         pte_t *ptep);
278         void (*ptep_modify_prot_commit)(struct mm_struct *mm, unsigned long addr,
279                                         pte_t *ptep, pte_t pte);
280
281         pteval_t (*pte_val)(pte_t);
282         pte_t (*make_pte)(pteval_t pte);
283
284         pgdval_t (*pgd_val)(pgd_t);
285         pgd_t (*make_pgd)(pgdval_t pgd);
286
287 #if PAGETABLE_LEVELS >= 3
288 #ifdef CONFIG_X86_PAE
289         void (*set_pte_atomic)(pte_t *ptep, pte_t pteval);
290         void (*set_pte_present)(struct mm_struct *mm, unsigned long addr,
291                                 pte_t *ptep, pte_t pte);
292         void (*pte_clear)(struct mm_struct *mm, unsigned long addr,
293                           pte_t *ptep);
294         void (*pmd_clear)(pmd_t *pmdp);
295
296 #endif  /* CONFIG_X86_PAE */
297
298         void (*set_pud)(pud_t *pudp, pud_t pudval);
299
300         pmdval_t (*pmd_val)(pmd_t);
301         pmd_t (*make_pmd)(pmdval_t pmd);
302
303 #if PAGETABLE_LEVELS == 4
304         pudval_t (*pud_val)(pud_t);
305         pud_t (*make_pud)(pudval_t pud);
306
307         void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
308 #endif  /* PAGETABLE_LEVELS == 4 */
309 #endif  /* PAGETABLE_LEVELS >= 3 */
310
311 #ifdef CONFIG_HIGHPTE
312         void *(*kmap_atomic_pte)(struct page *page, enum km_type type);
313 #endif
314
315         struct pv_lazy_ops lazy_mode;
316
317         /* dom0 ops */
318
319         /* Sometimes the physical address is a pfn, and sometimes its
320            an mfn.  We can tell which is which from the index. */
321         void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
322                            unsigned long phys, pgprot_t flags);
323 };
324
325 struct raw_spinlock;
326 struct pv_lock_ops {
327         int (*spin_is_locked)(struct raw_spinlock *lock);
328         int (*spin_is_contended)(struct raw_spinlock *lock);
329         void (*spin_lock)(struct raw_spinlock *lock);
330         void (*spin_lock_flags)(struct raw_spinlock *lock, unsigned long flags);
331         int (*spin_trylock)(struct raw_spinlock *lock);
332         void (*spin_unlock)(struct raw_spinlock *lock);
333 };
334
335 /* This contains all the paravirt structures: we get a convenient
336  * number for each function using the offset which we use to indicate
337  * what to patch. */
338 struct paravirt_patch_template {
339         struct pv_init_ops pv_init_ops;
340         struct pv_time_ops pv_time_ops;
341         struct pv_cpu_ops pv_cpu_ops;
342         struct pv_irq_ops pv_irq_ops;
343         struct pv_apic_ops pv_apic_ops;
344         struct pv_mmu_ops pv_mmu_ops;
345         struct pv_lock_ops pv_lock_ops;
346 };
347
348 extern struct pv_info pv_info;
349 extern struct pv_init_ops pv_init_ops;
350 extern struct pv_time_ops pv_time_ops;
351 extern struct pv_cpu_ops pv_cpu_ops;
352 extern struct pv_irq_ops pv_irq_ops;
353 extern struct pv_apic_ops pv_apic_ops;
354 extern struct pv_mmu_ops pv_mmu_ops;
355 extern struct pv_lock_ops pv_lock_ops;
356
357 #define PARAVIRT_PATCH(x)                                       \
358         (offsetof(struct paravirt_patch_template, x) / sizeof(void *))
359
360 #define paravirt_type(op)                               \
361         [paravirt_typenum] "i" (PARAVIRT_PATCH(op)),    \
362         [paravirt_opptr] "m" (op)
363 #define paravirt_clobber(clobber)               \
364         [paravirt_clobber] "i" (clobber)
365
366 /*
367  * Generate some code, and mark it as patchable by the
368  * apply_paravirt() alternate instruction patcher.
369  */
370 #define _paravirt_alt(insn_string, type, clobber)       \
371         "771:\n\t" insn_string "\n" "772:\n"            \
372         ".pushsection .parainstructions,\"a\"\n"        \
373         _ASM_ALIGN "\n"                                 \
374         _ASM_PTR " 771b\n"                              \
375         "  .byte " type "\n"                            \
376         "  .byte 772b-771b\n"                           \
377         "  .short " clobber "\n"                        \
378         ".popsection\n"
379
380 /* Generate patchable code, with the default asm parameters. */
381 #define paravirt_alt(insn_string)                                       \
382         _paravirt_alt(insn_string, "%c[paravirt_typenum]", "%c[paravirt_clobber]")
383
384 /* Simple instruction patching code. */
385 #define DEF_NATIVE(ops, name, code)                                     \
386         extern const char start_##ops##_##name[], end_##ops##_##name[]; \
387         asm("start_" #ops "_" #name ": " code "; end_" #ops "_" #name ":")
388
389 unsigned paravirt_patch_nop(void);
390 unsigned paravirt_patch_ignore(unsigned len);
391 unsigned paravirt_patch_call(void *insnbuf,
392                              const void *target, u16 tgt_clobbers,
393                              unsigned long addr, u16 site_clobbers,
394                              unsigned len);
395 unsigned paravirt_patch_jmp(void *insnbuf, const void *target,
396                             unsigned long addr, unsigned len);
397 unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
398                                 unsigned long addr, unsigned len);
399
400 unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
401                               const char *start, const char *end);
402
403 unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
404                       unsigned long addr, unsigned len);
405
406 int paravirt_disable_iospace(void);
407
408 /*
409  * This generates an indirect call based on the operation type number.
410  * The type number, computed in PARAVIRT_PATCH, is derived from the
411  * offset into the paravirt_patch_template structure, and can therefore be
412  * freely converted back into a structure offset.
413  */
414 #define PARAVIRT_CALL   "call *%[paravirt_opptr];"
415
416 /*
417  * These macros are intended to wrap calls through one of the paravirt
418  * ops structs, so that they can be later identified and patched at
419  * runtime.
420  *
421  * Normally, a call to a pv_op function is a simple indirect call:
422  * (pv_op_struct.operations)(args...).
423  *
424  * Unfortunately, this is a relatively slow operation for modern CPUs,
425  * because it cannot necessarily determine what the destination
426  * address is.  In this case, the address is a runtime constant, so at
427  * the very least we can patch the call to e a simple direct call, or
428  * ideally, patch an inline implementation into the callsite.  (Direct
429  * calls are essentially free, because the call and return addresses
430  * are completely predictable.)
431  *
432  * For i386, these macros rely on the standard gcc "regparm(3)" calling
433  * convention, in which the first three arguments are placed in %eax,
434  * %edx, %ecx (in that order), and the remaining arguments are placed
435  * on the stack.  All caller-save registers (eax,edx,ecx) are expected
436  * to be modified (either clobbered or used for return values).
437  * X86_64, on the other hand, already specifies a register-based calling
438  * conventions, returning at %rax, with parameteres going on %rdi, %rsi,
439  * %rdx, and %rcx. Note that for this reason, x86_64 does not need any
440  * special handling for dealing with 4 arguments, unlike i386.
441  * However, x86_64 also have to clobber all caller saved registers, which
442  * unfortunately, are quite a bit (r8 - r11)
443  *
444  * The call instruction itself is marked by placing its start address
445  * and size into the .parainstructions section, so that
446  * apply_paravirt() in arch/i386/kernel/alternative.c can do the
447  * appropriate patching under the control of the backend pv_init_ops
448  * implementation.
449  *
450  * Unfortunately there's no way to get gcc to generate the args setup
451  * for the call, and then allow the call itself to be generated by an
452  * inline asm.  Because of this, we must do the complete arg setup and
453  * return value handling from within these macros.  This is fairly
454  * cumbersome.
455  *
456  * There are 5 sets of PVOP_* macros for dealing with 0-4 arguments.
457  * It could be extended to more arguments, but there would be little
458  * to be gained from that.  For each number of arguments, there are
459  * the two VCALL and CALL variants for void and non-void functions.
460  *
461  * When there is a return value, the invoker of the macro must specify
462  * the return type.  The macro then uses sizeof() on that type to
463  * determine whether its a 32 or 64 bit value, and places the return
464  * in the right register(s) (just %eax for 32-bit, and %edx:%eax for
465  * 64-bit). For x86_64 machines, it just returns at %rax regardless of
466  * the return value size.
467  *
468  * 64-bit arguments are passed as a pair of adjacent 32-bit arguments
469  * i386 also passes 64-bit arguments as a pair of adjacent 32-bit arguments
470  * in low,high order
471  *
472  * Small structures are passed and returned in registers.  The macro
473  * calling convention can't directly deal with this, so the wrapper
474  * functions must do this.
475  *
476  * These PVOP_* macros are only defined within this header.  This
477  * means that all uses must be wrapped in inline functions.  This also
478  * makes sure the incoming and outgoing types are always correct.
479  */
480 #ifdef CONFIG_X86_32
481 #define PVOP_VCALL_ARGS                 unsigned long __eax, __edx, __ecx
482 #define PVOP_CALL_ARGS                  PVOP_VCALL_ARGS
483 #define PVOP_VCALL_CLOBBERS             "=a" (__eax), "=d" (__edx),     \
484                                         "=c" (__ecx)
485 #define PVOP_CALL_CLOBBERS              PVOP_VCALL_CLOBBERS
486 #define EXTRA_CLOBBERS
487 #define VEXTRA_CLOBBERS
488 #else
489 #define PVOP_VCALL_ARGS         unsigned long __edi, __esi, __edx, __ecx
490 #define PVOP_CALL_ARGS          PVOP_VCALL_ARGS, __eax
491 #define PVOP_VCALL_CLOBBERS     "=D" (__edi),                           \
492                                 "=S" (__esi), "=d" (__edx),             \
493                                 "=c" (__ecx)
494
495 #define PVOP_CALL_CLOBBERS      PVOP_VCALL_CLOBBERS, "=a" (__eax)
496
497 #define EXTRA_CLOBBERS   , "r8", "r9", "r10", "r11"
498 #define VEXTRA_CLOBBERS  , "rax", "r8", "r9", "r10", "r11"
499 #endif
500
501 #ifdef CONFIG_PARAVIRT_DEBUG
502 #define PVOP_TEST_NULL(op)      BUG_ON(op == NULL)
503 #else
504 #define PVOP_TEST_NULL(op)      ((void)op)
505 #endif
506
507 #define __PVOP_CALL(rettype, op, pre, post, ...)                        \
508         ({                                                              \
509                 rettype __ret;                                          \
510                 PVOP_CALL_ARGS;                                 \
511                 PVOP_TEST_NULL(op);                                     \
512                 /* This is 32-bit specific, but is okay in 64-bit */    \
513                 /* since this condition will never hold */              \
514                 if (sizeof(rettype) > sizeof(unsigned long)) {          \
515                         asm volatile(pre                                \
516                                      paravirt_alt(PARAVIRT_CALL)        \
517                                      post                               \
518                                      : PVOP_CALL_CLOBBERS               \
519                                      : paravirt_type(op),               \
520                                        paravirt_clobber(CLBR_ANY),      \
521                                        ##__VA_ARGS__                    \
522                                      : "memory", "cc" EXTRA_CLOBBERS);  \
523                         __ret = (rettype)((((u64)__edx) << 32) | __eax); \
524                 } else {                                                \
525                         asm volatile(pre                                \
526                                      paravirt_alt(PARAVIRT_CALL)        \
527                                      post                               \
528                                      : PVOP_CALL_CLOBBERS               \
529                                      : paravirt_type(op),               \
530                                        paravirt_clobber(CLBR_ANY),      \
531                                        ##__VA_ARGS__                    \
532                                      : "memory", "cc" EXTRA_CLOBBERS);  \
533                         __ret = (rettype)__eax;                         \
534                 }                                                       \
535                 __ret;                                                  \
536         })
537 #define __PVOP_VCALL(op, pre, post, ...)                                \
538         ({                                                              \
539                 PVOP_VCALL_ARGS;                                        \
540                 PVOP_TEST_NULL(op);                                     \
541                 asm volatile(pre                                        \
542                              paravirt_alt(PARAVIRT_CALL)                \
543                              post                                       \
544                              : PVOP_VCALL_CLOBBERS                      \
545                              : paravirt_type(op),                       \
546                                paravirt_clobber(CLBR_ANY),              \
547                                ##__VA_ARGS__                            \
548                              : "memory", "cc" VEXTRA_CLOBBERS);         \
549         })
550
551 #define PVOP_CALL0(rettype, op)                                         \
552         __PVOP_CALL(rettype, op, "", "")
553 #define PVOP_VCALL0(op)                                                 \
554         __PVOP_VCALL(op, "", "")
555
556 #define PVOP_CALL1(rettype, op, arg1)                                   \
557         __PVOP_CALL(rettype, op, "", "", "0" ((unsigned long)(arg1)))
558 #define PVOP_VCALL1(op, arg1)                                           \
559         __PVOP_VCALL(op, "", "", "0" ((unsigned long)(arg1)))
560
561 #define PVOP_CALL2(rettype, op, arg1, arg2)                             \
562         __PVOP_CALL(rettype, op, "", "", "0" ((unsigned long)(arg1)),   \
563         "1" ((unsigned long)(arg2)))
564 #define PVOP_VCALL2(op, arg1, arg2)                                     \
565         __PVOP_VCALL(op, "", "", "0" ((unsigned long)(arg1)),           \
566         "1" ((unsigned long)(arg2)))
567
568 #define PVOP_CALL3(rettype, op, arg1, arg2, arg3)                       \
569         __PVOP_CALL(rettype, op, "", "", "0" ((unsigned long)(arg1)),   \
570         "1"((unsigned long)(arg2)), "2"((unsigned long)(arg3)))
571 #define PVOP_VCALL3(op, arg1, arg2, arg3)                               \
572         __PVOP_VCALL(op, "", "", "0" ((unsigned long)(arg1)),           \
573         "1"((unsigned long)(arg2)), "2"((unsigned long)(arg3)))
574
575 /* This is the only difference in x86_64. We can make it much simpler */
576 #ifdef CONFIG_X86_32
577 #define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4)                 \
578         __PVOP_CALL(rettype, op,                                        \
579                     "push %[_arg4];", "lea 4(%%esp),%%esp;",            \
580                     "0" ((u32)(arg1)), "1" ((u32)(arg2)),               \
581                     "2" ((u32)(arg3)), [_arg4] "mr" ((u32)(arg4)))
582 #define PVOP_VCALL4(op, arg1, arg2, arg3, arg4)                         \
583         __PVOP_VCALL(op,                                                \
584                     "push %[_arg4];", "lea 4(%%esp),%%esp;",            \
585                     "0" ((u32)(arg1)), "1" ((u32)(arg2)),               \
586                     "2" ((u32)(arg3)), [_arg4] "mr" ((u32)(arg4)))
587 #else
588 #define PVOP_CALL4(rettype, op, arg1, arg2, arg3, arg4)                 \
589         __PVOP_CALL(rettype, op, "", "", "0" ((unsigned long)(arg1)),   \
590         "1"((unsigned long)(arg2)), "2"((unsigned long)(arg3)),         \
591         "3"((unsigned long)(arg4)))
592 #define PVOP_VCALL4(op, arg1, arg2, arg3, arg4)                         \
593         __PVOP_VCALL(op, "", "", "0" ((unsigned long)(arg1)),           \
594         "1"((unsigned long)(arg2)), "2"((unsigned long)(arg3)),         \
595         "3"((unsigned long)(arg4)))
596 #endif
597
598 static inline int paravirt_enabled(void)
599 {
600         return pv_info.paravirt_enabled;
601 }
602
603 static inline void load_sp0(struct tss_struct *tss,
604                              struct thread_struct *thread)
605 {
606         PVOP_VCALL2(pv_cpu_ops.load_sp0, tss, thread);
607 }
608
609 #define ARCH_SETUP                      pv_init_ops.arch_setup();
610 static inline unsigned long get_wallclock(void)
611 {
612         return PVOP_CALL0(unsigned long, pv_time_ops.get_wallclock);
613 }
614
615 static inline int set_wallclock(unsigned long nowtime)
616 {
617         return PVOP_CALL1(int, pv_time_ops.set_wallclock, nowtime);
618 }
619
620 static inline void (*choose_time_init(void))(void)
621 {
622         return pv_time_ops.time_init;
623 }
624
625 /* The paravirtualized CPUID instruction. */
626 static inline void __cpuid(unsigned int *eax, unsigned int *ebx,
627                            unsigned int *ecx, unsigned int *edx)
628 {
629         PVOP_VCALL4(pv_cpu_ops.cpuid, eax, ebx, ecx, edx);
630 }
631
632 /*
633  * These special macros can be used to get or set a debugging register
634  */
635 static inline unsigned long paravirt_get_debugreg(int reg)
636 {
637         return PVOP_CALL1(unsigned long, pv_cpu_ops.get_debugreg, reg);
638 }
639 #define get_debugreg(var, reg) var = paravirt_get_debugreg(reg)
640 static inline void set_debugreg(unsigned long val, int reg)
641 {
642         PVOP_VCALL2(pv_cpu_ops.set_debugreg, reg, val);
643 }
644
645 static inline void clts(void)
646 {
647         PVOP_VCALL0(pv_cpu_ops.clts);
648 }
649
650 static inline unsigned long read_cr0(void)
651 {
652         return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr0);
653 }
654
655 static inline void write_cr0(unsigned long x)
656 {
657         PVOP_VCALL1(pv_cpu_ops.write_cr0, x);
658 }
659
660 static inline unsigned long read_cr2(void)
661 {
662         return PVOP_CALL0(unsigned long, pv_mmu_ops.read_cr2);
663 }
664
665 static inline void write_cr2(unsigned long x)
666 {
667         PVOP_VCALL1(pv_mmu_ops.write_cr2, x);
668 }
669
670 static inline unsigned long read_cr3(void)
671 {
672         return PVOP_CALL0(unsigned long, pv_mmu_ops.read_cr3);
673 }
674
675 static inline void write_cr3(unsigned long x)
676 {
677         PVOP_VCALL1(pv_mmu_ops.write_cr3, x);
678 }
679
680 static inline unsigned long read_cr4(void)
681 {
682         return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr4);
683 }
684 static inline unsigned long read_cr4_safe(void)
685 {
686         return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr4_safe);
687 }
688
689 static inline void write_cr4(unsigned long x)
690 {
691         PVOP_VCALL1(pv_cpu_ops.write_cr4, x);
692 }
693
694 #ifdef CONFIG_X86_64
695 static inline unsigned long read_cr8(void)
696 {
697         return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr8);
698 }
699
700 static inline void write_cr8(unsigned long x)
701 {
702         PVOP_VCALL1(pv_cpu_ops.write_cr8, x);
703 }
704 #endif
705
706 static inline void raw_safe_halt(void)
707 {
708         PVOP_VCALL0(pv_irq_ops.safe_halt);
709 }
710
711 static inline void halt(void)
712 {
713         PVOP_VCALL0(pv_irq_ops.safe_halt);
714 }
715
716 static inline void wbinvd(void)
717 {
718         PVOP_VCALL0(pv_cpu_ops.wbinvd);
719 }
720
721 #define get_kernel_rpl()  (pv_info.kernel_rpl)
722
723 static inline u64 paravirt_read_msr(unsigned msr, int *err)
724 {
725         return PVOP_CALL2(u64, pv_cpu_ops.read_msr, msr, err);
726 }
727 static inline u64 paravirt_read_msr_amd(unsigned msr, int *err)
728 {
729         return PVOP_CALL2(u64, pv_cpu_ops.read_msr_amd, msr, err);
730 }
731 static inline int paravirt_write_msr(unsigned msr, unsigned low, unsigned high)
732 {
733         return PVOP_CALL3(int, pv_cpu_ops.write_msr, msr, low, high);
734 }
735
736 /* These should all do BUG_ON(_err), but our headers are too tangled. */
737 #define rdmsr(msr, val1, val2)                  \
738 do {                                            \
739         int _err;                               \
740         u64 _l = paravirt_read_msr(msr, &_err); \
741         val1 = (u32)_l;                         \
742         val2 = _l >> 32;                        \
743 } while (0)
744
745 #define wrmsr(msr, val1, val2)                  \
746 do {                                            \
747         paravirt_write_msr(msr, val1, val2);    \
748 } while (0)
749
750 #define rdmsrl(msr, val)                        \
751 do {                                            \
752         int _err;                               \
753         val = paravirt_read_msr(msr, &_err);    \
754 } while (0)
755
756 #define wrmsrl(msr, val)        wrmsr(msr, (u32)((u64)(val)), ((u64)(val))>>32)
757 #define wrmsr_safe(msr, a, b)   paravirt_write_msr(msr, a, b)
758
759 /* rdmsr with exception handling */
760 #define rdmsr_safe(msr, a, b)                   \
761 ({                                              \
762         int _err;                               \
763         u64 _l = paravirt_read_msr(msr, &_err); \
764         (*a) = (u32)_l;                         \
765         (*b) = _l >> 32;                        \
766         _err;                                   \
767 })
768
769 static inline int rdmsrl_safe(unsigned msr, unsigned long long *p)
770 {
771         int err;
772
773         *p = paravirt_read_msr(msr, &err);
774         return err;
775 }
776 static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p)
777 {
778         int err;
779
780         *p = paravirt_read_msr_amd(msr, &err);
781         return err;
782 }
783
784 static inline u64 paravirt_read_tsc(void)
785 {
786         return PVOP_CALL0(u64, pv_cpu_ops.read_tsc);
787 }
788
789 #define rdtscl(low)                             \
790 do {                                            \
791         u64 _l = paravirt_read_tsc();           \
792         low = (int)_l;                          \
793 } while (0)
794
795 #define rdtscll(val) (val = paravirt_read_tsc())
796
797 static inline unsigned long long paravirt_sched_clock(void)
798 {
799         return PVOP_CALL0(unsigned long long, pv_time_ops.sched_clock);
800 }
801 #define calibrate_tsc() (pv_time_ops.get_tsc_khz())
802
803 static inline unsigned long long paravirt_read_pmc(int counter)
804 {
805         return PVOP_CALL1(u64, pv_cpu_ops.read_pmc, counter);
806 }
807
808 #define rdpmc(counter, low, high)               \
809 do {                                            \
810         u64 _l = paravirt_read_pmc(counter);    \
811         low = (u32)_l;                          \
812         high = _l >> 32;                        \
813 } while (0)
814
815 static inline unsigned long long paravirt_rdtscp(unsigned int *aux)
816 {
817         return PVOP_CALL1(u64, pv_cpu_ops.read_tscp, aux);
818 }
819
820 #define rdtscp(low, high, aux)                          \
821 do {                                                    \
822         int __aux;                                      \
823         unsigned long __val = paravirt_rdtscp(&__aux);  \
824         (low) = (u32)__val;                             \
825         (high) = (u32)(__val >> 32);                    \
826         (aux) = __aux;                                  \
827 } while (0)
828
829 #define rdtscpll(val, aux)                              \
830 do {                                                    \
831         unsigned long __aux;                            \
832         val = paravirt_rdtscp(&__aux);                  \
833         (aux) = __aux;                                  \
834 } while (0)
835
836 static inline void paravirt_alloc_ldt(struct desc_struct *ldt, unsigned entries)
837 {
838         PVOP_VCALL2(pv_cpu_ops.alloc_ldt, ldt, entries);
839 }
840
841 static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
842 {
843         PVOP_VCALL2(pv_cpu_ops.free_ldt, ldt, entries);
844 }
845
846 static inline void load_TR_desc(void)
847 {
848         PVOP_VCALL0(pv_cpu_ops.load_tr_desc);
849 }
850 static inline void load_gdt(const struct desc_ptr *dtr)
851 {
852         PVOP_VCALL1(pv_cpu_ops.load_gdt, dtr);
853 }
854 static inline void load_idt(const struct desc_ptr *dtr)
855 {
856         PVOP_VCALL1(pv_cpu_ops.load_idt, dtr);
857 }
858 static inline void set_ldt(const void *addr, unsigned entries)
859 {
860         PVOP_VCALL2(pv_cpu_ops.set_ldt, addr, entries);
861 }
862 static inline void store_gdt(struct desc_ptr *dtr)
863 {
864         PVOP_VCALL1(pv_cpu_ops.store_gdt, dtr);
865 }
866 static inline void store_idt(struct desc_ptr *dtr)
867 {
868         PVOP_VCALL1(pv_cpu_ops.store_idt, dtr);
869 }
870 static inline unsigned long paravirt_store_tr(void)
871 {
872         return PVOP_CALL0(unsigned long, pv_cpu_ops.store_tr);
873 }
874 #define store_tr(tr)    ((tr) = paravirt_store_tr())
875 static inline void load_TLS(struct thread_struct *t, unsigned cpu)
876 {
877         PVOP_VCALL2(pv_cpu_ops.load_tls, t, cpu);
878 }
879
880 #ifdef CONFIG_X86_64
881 static inline void load_gs_index(unsigned int gs)
882 {
883         PVOP_VCALL1(pv_cpu_ops.load_gs_index, gs);
884 }
885 #endif
886
887 static inline void write_ldt_entry(struct desc_struct *dt, int entry,
888                                    const void *desc)
889 {
890         PVOP_VCALL3(pv_cpu_ops.write_ldt_entry, dt, entry, desc);
891 }
892
893 static inline void write_gdt_entry(struct desc_struct *dt, int entry,
894                                    void *desc, int type)
895 {
896         PVOP_VCALL4(pv_cpu_ops.write_gdt_entry, dt, entry, desc, type);
897 }
898
899 static inline void write_idt_entry(gate_desc *dt, int entry, const gate_desc *g)
900 {
901         PVOP_VCALL3(pv_cpu_ops.write_idt_entry, dt, entry, g);
902 }
903 static inline void set_iopl_mask(unsigned mask)
904 {
905         PVOP_VCALL1(pv_cpu_ops.set_iopl_mask, mask);
906 }
907
908 /* The paravirtualized I/O functions */
909 static inline void slow_down_io(void)
910 {
911         pv_cpu_ops.io_delay();
912 #ifdef REALLY_SLOW_IO
913         pv_cpu_ops.io_delay();
914         pv_cpu_ops.io_delay();
915         pv_cpu_ops.io_delay();
916 #endif
917 }
918
919 #ifdef CONFIG_X86_LOCAL_APIC
920 static inline void setup_boot_clock(void)
921 {
922         PVOP_VCALL0(pv_apic_ops.setup_boot_clock);
923 }
924
925 static inline void setup_secondary_clock(void)
926 {
927         PVOP_VCALL0(pv_apic_ops.setup_secondary_clock);
928 }
929 #endif
930
931 static inline void paravirt_post_allocator_init(void)
932 {
933         if (pv_init_ops.post_allocator_init)
934                 (*pv_init_ops.post_allocator_init)();
935 }
936
937 static inline void paravirt_pagetable_setup_start(pgd_t *base)
938 {
939         (*pv_mmu_ops.pagetable_setup_start)(base);
940 }
941
942 static inline void paravirt_pagetable_setup_done(pgd_t *base)
943 {
944         (*pv_mmu_ops.pagetable_setup_done)(base);
945 }
946
947 #ifdef CONFIG_SMP
948 static inline void startup_ipi_hook(int phys_apicid, unsigned long start_eip,
949                                     unsigned long start_esp)
950 {
951         PVOP_VCALL3(pv_apic_ops.startup_ipi_hook,
952                     phys_apicid, start_eip, start_esp);
953 }
954 #endif
955
956 static inline void paravirt_activate_mm(struct mm_struct *prev,
957                                         struct mm_struct *next)
958 {
959         PVOP_VCALL2(pv_mmu_ops.activate_mm, prev, next);
960 }
961
962 static inline void arch_dup_mmap(struct mm_struct *oldmm,
963                                  struct mm_struct *mm)
964 {
965         PVOP_VCALL2(pv_mmu_ops.dup_mmap, oldmm, mm);
966 }
967
968 static inline void arch_exit_mmap(struct mm_struct *mm)
969 {
970         PVOP_VCALL1(pv_mmu_ops.exit_mmap, mm);
971 }
972
973 static inline void __flush_tlb(void)
974 {
975         PVOP_VCALL0(pv_mmu_ops.flush_tlb_user);
976 }
977 static inline void __flush_tlb_global(void)
978 {
979         PVOP_VCALL0(pv_mmu_ops.flush_tlb_kernel);
980 }
981 static inline void __flush_tlb_single(unsigned long addr)
982 {
983         PVOP_VCALL1(pv_mmu_ops.flush_tlb_single, addr);
984 }
985
986 static inline void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
987                                     unsigned long va)
988 {
989         PVOP_VCALL3(pv_mmu_ops.flush_tlb_others, &cpumask, mm, va);
990 }
991
992 static inline int paravirt_pgd_alloc(struct mm_struct *mm)
993 {
994         return PVOP_CALL1(int, pv_mmu_ops.pgd_alloc, mm);
995 }
996
997 static inline void paravirt_pgd_free(struct mm_struct *mm, pgd_t *pgd)
998 {
999         PVOP_VCALL2(pv_mmu_ops.pgd_free, mm, pgd);
1000 }
1001
1002 static inline void paravirt_alloc_pte(struct mm_struct *mm, unsigned long pfn)
1003 {
1004         PVOP_VCALL2(pv_mmu_ops.alloc_pte, mm, pfn);
1005 }
1006 static inline void paravirt_release_pte(unsigned long pfn)
1007 {
1008         PVOP_VCALL1(pv_mmu_ops.release_pte, pfn);
1009 }
1010
1011 static inline void paravirt_alloc_pmd(struct mm_struct *mm, unsigned long pfn)
1012 {
1013         PVOP_VCALL2(pv_mmu_ops.alloc_pmd, mm, pfn);
1014 }
1015
1016 static inline void paravirt_alloc_pmd_clone(unsigned long pfn, unsigned long clonepfn,
1017                                             unsigned long start, unsigned long count)
1018 {
1019         PVOP_VCALL4(pv_mmu_ops.alloc_pmd_clone, pfn, clonepfn, start, count);
1020 }
1021 static inline void paravirt_release_pmd(unsigned long pfn)
1022 {
1023         PVOP_VCALL1(pv_mmu_ops.release_pmd, pfn);
1024 }
1025
1026 static inline void paravirt_alloc_pud(struct mm_struct *mm, unsigned long pfn)
1027 {
1028         PVOP_VCALL2(pv_mmu_ops.alloc_pud, mm, pfn);
1029 }
1030 static inline void paravirt_release_pud(unsigned long pfn)
1031 {
1032         PVOP_VCALL1(pv_mmu_ops.release_pud, pfn);
1033 }
1034
1035 #ifdef CONFIG_HIGHPTE
1036 static inline void *kmap_atomic_pte(struct page *page, enum km_type type)
1037 {
1038         unsigned long ret;
1039         ret = PVOP_CALL2(unsigned long, pv_mmu_ops.kmap_atomic_pte, page, type);
1040         return (void *)ret;
1041 }
1042 #endif
1043
1044 static inline void pte_update(struct mm_struct *mm, unsigned long addr,
1045                               pte_t *ptep)
1046 {
1047         PVOP_VCALL3(pv_mmu_ops.pte_update, mm, addr, ptep);
1048 }
1049
1050 static inline void pte_update_defer(struct mm_struct *mm, unsigned long addr,
1051                                     pte_t *ptep)
1052 {
1053         PVOP_VCALL3(pv_mmu_ops.pte_update_defer, mm, addr, ptep);
1054 }
1055
1056 static inline pte_t __pte(pteval_t val)
1057 {
1058         pteval_t ret;
1059
1060         if (sizeof(pteval_t) > sizeof(long))
1061                 ret = PVOP_CALL2(pteval_t,
1062                                  pv_mmu_ops.make_pte,
1063                                  val, (u64)val >> 32);
1064         else
1065                 ret = PVOP_CALL1(pteval_t,
1066                                  pv_mmu_ops.make_pte,
1067                                  val);
1068
1069         return (pte_t) { .pte = ret };
1070 }
1071
1072 static inline pteval_t pte_val(pte_t pte)
1073 {
1074         pteval_t ret;
1075
1076         if (sizeof(pteval_t) > sizeof(long))
1077                 ret = PVOP_CALL2(pteval_t, pv_mmu_ops.pte_val,
1078                                  pte.pte, (u64)pte.pte >> 32);
1079         else
1080                 ret = PVOP_CALL1(pteval_t, pv_mmu_ops.pte_val,
1081                                  pte.pte);
1082
1083         return ret;
1084 }
1085
1086 static inline pgd_t __pgd(pgdval_t val)
1087 {
1088         pgdval_t ret;
1089
1090         if (sizeof(pgdval_t) > sizeof(long))
1091                 ret = PVOP_CALL2(pgdval_t, pv_mmu_ops.make_pgd,
1092                                  val, (u64)val >> 32);
1093         else
1094                 ret = PVOP_CALL1(pgdval_t, pv_mmu_ops.make_pgd,
1095                                  val);
1096
1097         return (pgd_t) { ret };
1098 }
1099
1100 static inline pgdval_t pgd_val(pgd_t pgd)
1101 {
1102         pgdval_t ret;
1103
1104         if (sizeof(pgdval_t) > sizeof(long))
1105                 ret =  PVOP_CALL2(pgdval_t, pv_mmu_ops.pgd_val,
1106                                   pgd.pgd, (u64)pgd.pgd >> 32);
1107         else
1108                 ret =  PVOP_CALL1(pgdval_t, pv_mmu_ops.pgd_val,
1109                                   pgd.pgd);
1110
1111         return ret;
1112 }
1113
1114 #define  __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
1115 static inline pte_t ptep_modify_prot_start(struct mm_struct *mm, unsigned long addr,
1116                                            pte_t *ptep)
1117 {
1118         pteval_t ret;
1119
1120         ret = PVOP_CALL3(pteval_t, pv_mmu_ops.ptep_modify_prot_start,
1121                          mm, addr, ptep);
1122
1123         return (pte_t) { .pte = ret };
1124 }
1125
1126 static inline void ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
1127                                            pte_t *ptep, pte_t pte)
1128 {
1129         if (sizeof(pteval_t) > sizeof(long))
1130                 /* 5 arg words */
1131                 pv_mmu_ops.ptep_modify_prot_commit(mm, addr, ptep, pte);
1132         else
1133                 PVOP_VCALL4(pv_mmu_ops.ptep_modify_prot_commit,
1134                             mm, addr, ptep, pte.pte);
1135 }
1136
1137 static inline void set_pte(pte_t *ptep, pte_t pte)
1138 {
1139         if (sizeof(pteval_t) > sizeof(long))
1140                 PVOP_VCALL3(pv_mmu_ops.set_pte, ptep,
1141                             pte.pte, (u64)pte.pte >> 32);
1142         else
1143                 PVOP_VCALL2(pv_mmu_ops.set_pte, ptep,
1144                             pte.pte);
1145 }
1146
1147 static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
1148                               pte_t *ptep, pte_t pte)
1149 {
1150         if (sizeof(pteval_t) > sizeof(long))
1151                 /* 5 arg words */
1152                 pv_mmu_ops.set_pte_at(mm, addr, ptep, pte);
1153         else
1154                 PVOP_VCALL4(pv_mmu_ops.set_pte_at, mm, addr, ptep, pte.pte);
1155 }
1156
1157 static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
1158 {
1159         pmdval_t val = native_pmd_val(pmd);
1160
1161         if (sizeof(pmdval_t) > sizeof(long))
1162                 PVOP_VCALL3(pv_mmu_ops.set_pmd, pmdp, val, (u64)val >> 32);
1163         else
1164                 PVOP_VCALL2(pv_mmu_ops.set_pmd, pmdp, val);
1165 }
1166
1167 #if PAGETABLE_LEVELS >= 3
1168 static inline pmd_t __pmd(pmdval_t val)
1169 {
1170         pmdval_t ret;
1171
1172         if (sizeof(pmdval_t) > sizeof(long))
1173                 ret = PVOP_CALL2(pmdval_t, pv_mmu_ops.make_pmd,
1174                                  val, (u64)val >> 32);
1175         else
1176                 ret = PVOP_CALL1(pmdval_t, pv_mmu_ops.make_pmd,
1177                                  val);
1178
1179         return (pmd_t) { ret };
1180 }
1181
1182 static inline pmdval_t pmd_val(pmd_t pmd)
1183 {
1184         pmdval_t ret;
1185
1186         if (sizeof(pmdval_t) > sizeof(long))
1187                 ret =  PVOP_CALL2(pmdval_t, pv_mmu_ops.pmd_val,
1188                                   pmd.pmd, (u64)pmd.pmd >> 32);
1189         else
1190                 ret =  PVOP_CALL1(pmdval_t, pv_mmu_ops.pmd_val,
1191                                   pmd.pmd);
1192
1193         return ret;
1194 }
1195
1196 static inline void set_pud(pud_t *pudp, pud_t pud)
1197 {
1198         pudval_t val = native_pud_val(pud);
1199
1200         if (sizeof(pudval_t) > sizeof(long))
1201                 PVOP_VCALL3(pv_mmu_ops.set_pud, pudp,
1202                             val, (u64)val >> 32);
1203         else
1204                 PVOP_VCALL2(pv_mmu_ops.set_pud, pudp,
1205                             val);
1206 }
1207 #if PAGETABLE_LEVELS == 4
1208 static inline pud_t __pud(pudval_t val)
1209 {
1210         pudval_t ret;
1211
1212         if (sizeof(pudval_t) > sizeof(long))
1213                 ret = PVOP_CALL2(pudval_t, pv_mmu_ops.make_pud,
1214                                  val, (u64)val >> 32);
1215         else
1216                 ret = PVOP_CALL1(pudval_t, pv_mmu_ops.make_pud,
1217                                  val);
1218
1219         return (pud_t) { ret };
1220 }
1221
1222 static inline pudval_t pud_val(pud_t pud)
1223 {
1224         pudval_t ret;
1225
1226         if (sizeof(pudval_t) > sizeof(long))
1227                 ret =  PVOP_CALL2(pudval_t, pv_mmu_ops.pud_val,
1228                                   pud.pud, (u64)pud.pud >> 32);
1229         else
1230                 ret =  PVOP_CALL1(pudval_t, pv_mmu_ops.pud_val,
1231                                   pud.pud);
1232
1233         return ret;
1234 }
1235
1236 static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
1237 {
1238         pgdval_t val = native_pgd_val(pgd);
1239
1240         if (sizeof(pgdval_t) > sizeof(long))
1241                 PVOP_VCALL3(pv_mmu_ops.set_pgd, pgdp,
1242                             val, (u64)val >> 32);
1243         else
1244                 PVOP_VCALL2(pv_mmu_ops.set_pgd, pgdp,
1245                             val);
1246 }
1247
1248 static inline void pgd_clear(pgd_t *pgdp)
1249 {
1250         set_pgd(pgdp, __pgd(0));
1251 }
1252
1253 static inline void pud_clear(pud_t *pudp)
1254 {
1255         set_pud(pudp, __pud(0));
1256 }
1257
1258 #endif  /* PAGETABLE_LEVELS == 4 */
1259
1260 #endif  /* PAGETABLE_LEVELS >= 3 */
1261
1262 #ifdef CONFIG_X86_PAE
1263 /* Special-case pte-setting operations for PAE, which can't update a
1264    64-bit pte atomically */
1265 static inline void set_pte_atomic(pte_t *ptep, pte_t pte)
1266 {
1267         PVOP_VCALL3(pv_mmu_ops.set_pte_atomic, ptep,
1268                     pte.pte, pte.pte >> 32);
1269 }
1270
1271 static inline void set_pte_present(struct mm_struct *mm, unsigned long addr,
1272                                    pte_t *ptep, pte_t pte)
1273 {
1274         /* 5 arg words */
1275         pv_mmu_ops.set_pte_present(mm, addr, ptep, pte);
1276 }
1277
1278 static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
1279                              pte_t *ptep)
1280 {
1281         PVOP_VCALL3(pv_mmu_ops.pte_clear, mm, addr, ptep);
1282 }
1283
1284 static inline void pmd_clear(pmd_t *pmdp)
1285 {
1286         PVOP_VCALL1(pv_mmu_ops.pmd_clear, pmdp);
1287 }
1288 #else  /* !CONFIG_X86_PAE */
1289 static inline void set_pte_atomic(pte_t *ptep, pte_t pte)
1290 {
1291         set_pte(ptep, pte);
1292 }
1293
1294 static inline void set_pte_present(struct mm_struct *mm, unsigned long addr,
1295                                    pte_t *ptep, pte_t pte)
1296 {
1297         set_pte(ptep, pte);
1298 }
1299
1300 static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
1301                              pte_t *ptep)
1302 {
1303         set_pte_at(mm, addr, ptep, __pte(0));
1304 }
1305
1306 static inline void pmd_clear(pmd_t *pmdp)
1307 {
1308         set_pmd(pmdp, __pmd(0));
1309 }
1310 #endif  /* CONFIG_X86_PAE */
1311
1312 /* Lazy mode for batching updates / context switch */
1313 enum paravirt_lazy_mode {
1314         PARAVIRT_LAZY_NONE,
1315         PARAVIRT_LAZY_MMU,
1316         PARAVIRT_LAZY_CPU,
1317 };
1318
1319 enum paravirt_lazy_mode paravirt_get_lazy_mode(void);
1320 void paravirt_enter_lazy_cpu(void);
1321 void paravirt_leave_lazy_cpu(void);
1322 void paravirt_enter_lazy_mmu(void);
1323 void paravirt_leave_lazy_mmu(void);
1324 void paravirt_leave_lazy(enum paravirt_lazy_mode mode);
1325
1326 #define  __HAVE_ARCH_ENTER_LAZY_CPU_MODE
1327 static inline void arch_enter_lazy_cpu_mode(void)
1328 {
1329         PVOP_VCALL0(pv_cpu_ops.lazy_mode.enter);
1330 }
1331
1332 static inline void arch_leave_lazy_cpu_mode(void)
1333 {
1334         PVOP_VCALL0(pv_cpu_ops.lazy_mode.leave);
1335 }
1336
1337 static inline void arch_flush_lazy_cpu_mode(void)
1338 {
1339         if (unlikely(paravirt_get_lazy_mode() == PARAVIRT_LAZY_CPU)) {
1340                 arch_leave_lazy_cpu_mode();
1341                 arch_enter_lazy_cpu_mode();
1342         }
1343 }
1344
1345
1346 #define  __HAVE_ARCH_ENTER_LAZY_MMU_MODE
1347 static inline void arch_enter_lazy_mmu_mode(void)
1348 {
1349         PVOP_VCALL0(pv_mmu_ops.lazy_mode.enter);
1350 }
1351
1352 static inline void arch_leave_lazy_mmu_mode(void)
1353 {
1354         PVOP_VCALL0(pv_mmu_ops.lazy_mode.leave);
1355 }
1356
1357 static inline void arch_flush_lazy_mmu_mode(void)
1358 {
1359         if (unlikely(paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU)) {
1360                 arch_leave_lazy_mmu_mode();
1361                 arch_enter_lazy_mmu_mode();
1362         }
1363 }
1364
1365 static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
1366                                 unsigned long phys, pgprot_t flags)
1367 {
1368         pv_mmu_ops.set_fixmap(idx, phys, flags);
1369 }
1370
1371 void _paravirt_nop(void);
1372 #define paravirt_nop    ((void *)_paravirt_nop)
1373
1374 void paravirt_use_bytelocks(void);
1375
1376 #ifdef CONFIG_SMP
1377
1378 static inline int __raw_spin_is_locked(struct raw_spinlock *lock)
1379 {
1380         return PVOP_CALL1(int, pv_lock_ops.spin_is_locked, lock);
1381 }
1382
1383 static inline int __raw_spin_is_contended(struct raw_spinlock *lock)
1384 {
1385         return PVOP_CALL1(int, pv_lock_ops.spin_is_contended, lock);
1386 }
1387
1388 static __always_inline void __raw_spin_lock(struct raw_spinlock *lock)
1389 {
1390         PVOP_VCALL1(pv_lock_ops.spin_lock, lock);
1391 }
1392
1393 static __always_inline void __raw_spin_lock_flags(struct raw_spinlock *lock,
1394                                                   unsigned long flags)
1395 {
1396         PVOP_VCALL2(pv_lock_ops.spin_lock_flags, lock, flags);
1397 }
1398
1399 static __always_inline int __raw_spin_trylock(struct raw_spinlock *lock)
1400 {
1401         return PVOP_CALL1(int, pv_lock_ops.spin_trylock, lock);
1402 }
1403
1404 static __always_inline void __raw_spin_unlock(struct raw_spinlock *lock)
1405 {
1406         PVOP_VCALL1(pv_lock_ops.spin_unlock, lock);
1407 }
1408
1409 #endif
1410
1411 /* These all sit in the .parainstructions section to tell us what to patch. */
1412 struct paravirt_patch_site {
1413         u8 *instr;              /* original instructions */
1414         u8 instrtype;           /* type of this instruction */
1415         u8 len;                 /* length of original instruction */
1416         u16 clobbers;           /* what registers you may clobber */
1417 };
1418
1419 extern struct paravirt_patch_site __parainstructions[],
1420         __parainstructions_end[];
1421
1422 #ifdef CONFIG_X86_32
1423 #define PV_SAVE_REGS "pushl %%ecx; pushl %%edx;"
1424 #define PV_RESTORE_REGS "popl %%edx; popl %%ecx"
1425 #define PV_FLAGS_ARG "0"
1426 #define PV_EXTRA_CLOBBERS
1427 #define PV_VEXTRA_CLOBBERS
1428 #else
1429 /* We save some registers, but all of them, that's too much. We clobber all
1430  * caller saved registers but the argument parameter */
1431 #define PV_SAVE_REGS "pushq %%rdi;"
1432 #define PV_RESTORE_REGS "popq %%rdi;"
1433 #define PV_EXTRA_CLOBBERS EXTRA_CLOBBERS, "rcx" , "rdx", "rsi"
1434 #define PV_VEXTRA_CLOBBERS EXTRA_CLOBBERS, "rdi", "rcx" , "rdx", "rsi"
1435 #define PV_FLAGS_ARG "D"
1436 #endif
1437
1438 static inline unsigned long __raw_local_save_flags(void)
1439 {
1440         unsigned long f;
1441
1442         asm volatile(paravirt_alt(PV_SAVE_REGS
1443                                   PARAVIRT_CALL
1444                                   PV_RESTORE_REGS)
1445                      : "=a"(f)
1446                      : paravirt_type(pv_irq_ops.save_fl),
1447                        paravirt_clobber(CLBR_EAX)
1448                      : "memory", "cc" PV_VEXTRA_CLOBBERS);
1449         return f;
1450 }
1451
1452 static inline void raw_local_irq_restore(unsigned long f)
1453 {
1454         asm volatile(paravirt_alt(PV_SAVE_REGS
1455                                   PARAVIRT_CALL
1456                                   PV_RESTORE_REGS)
1457                      : "=a"(f)
1458                      : PV_FLAGS_ARG(f),
1459                        paravirt_type(pv_irq_ops.restore_fl),
1460                        paravirt_clobber(CLBR_EAX)
1461                      : "memory", "cc" PV_EXTRA_CLOBBERS);
1462 }
1463
1464 static inline void raw_local_irq_disable(void)
1465 {
1466         asm volatile(paravirt_alt(PV_SAVE_REGS
1467                                   PARAVIRT_CALL
1468                                   PV_RESTORE_REGS)
1469                      :
1470                      : paravirt_type(pv_irq_ops.irq_disable),
1471                        paravirt_clobber(CLBR_EAX)
1472                      : "memory", "eax", "cc" PV_EXTRA_CLOBBERS);
1473 }
1474
1475 static inline void raw_local_irq_enable(void)
1476 {
1477         asm volatile(paravirt_alt(PV_SAVE_REGS
1478                                   PARAVIRT_CALL
1479                                   PV_RESTORE_REGS)
1480                      :
1481                      : paravirt_type(pv_irq_ops.irq_enable),
1482                        paravirt_clobber(CLBR_EAX)
1483                      : "memory", "eax", "cc" PV_EXTRA_CLOBBERS);
1484 }
1485
1486 static inline unsigned long __raw_local_irq_save(void)
1487 {
1488         unsigned long f;
1489
1490         f = __raw_local_save_flags();
1491         raw_local_irq_disable();
1492         return f;
1493 }
1494
1495
1496 /* Make sure as little as possible of this mess escapes. */
1497 #undef PARAVIRT_CALL
1498 #undef __PVOP_CALL
1499 #undef __PVOP_VCALL
1500 #undef PVOP_VCALL0
1501 #undef PVOP_CALL0
1502 #undef PVOP_VCALL1
1503 #undef PVOP_CALL1
1504 #undef PVOP_VCALL2
1505 #undef PVOP_CALL2
1506 #undef PVOP_VCALL3
1507 #undef PVOP_CALL3
1508 #undef PVOP_VCALL4
1509 #undef PVOP_CALL4
1510
1511 #else  /* __ASSEMBLY__ */
1512
1513 #define _PVSITE(ptype, clobbers, ops, word, algn)       \
1514 771:;                                           \
1515         ops;                                    \
1516 772:;                                           \
1517         .pushsection .parainstructions,"a";     \
1518          .align algn;                           \
1519          word 771b;                             \
1520          .byte ptype;                           \
1521          .byte 772b-771b;                       \
1522          .short clobbers;                       \
1523         .popsection
1524
1525
1526 #ifdef CONFIG_X86_64
1527 #define PV_SAVE_REGS                            \
1528         push %rax;                              \
1529         push %rcx;                              \
1530         push %rdx;                              \
1531         push %rsi;                              \
1532         push %rdi;                              \
1533         push %r8;                               \
1534         push %r9;                               \
1535         push %r10;                              \
1536         push %r11
1537 #define PV_RESTORE_REGS                         \
1538         pop %r11;                               \
1539         pop %r10;                               \
1540         pop %r9;                                \
1541         pop %r8;                                \
1542         pop %rdi;                               \
1543         pop %rsi;                               \
1544         pop %rdx;                               \
1545         pop %rcx;                               \
1546         pop %rax
1547 #define PARA_PATCH(struct, off)        ((PARAVIRT_PATCH_##struct + (off)) / 8)
1548 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .quad, 8)
1549 #define PARA_INDIRECT(addr)     *addr(%rip)
1550 #else
1551 #define PV_SAVE_REGS   pushl %eax; pushl %edi; pushl %ecx; pushl %edx
1552 #define PV_RESTORE_REGS popl %edx; popl %ecx; popl %edi; popl %eax
1553 #define PARA_PATCH(struct, off)        ((PARAVIRT_PATCH_##struct + (off)) / 4)
1554 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
1555 #define PARA_INDIRECT(addr)     *%cs:addr
1556 #endif
1557
1558 #define INTERRUPT_RETURN                                                \
1559         PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_iret), CLBR_NONE,       \
1560                   jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_iret))
1561
1562 #define DISABLE_INTERRUPTS(clobbers)                                    \
1563         PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_disable), clobbers, \
1564                   PV_SAVE_REGS;                                         \
1565                   call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_disable);    \
1566                   PV_RESTORE_REGS;)                     \
1567
1568 #define ENABLE_INTERRUPTS(clobbers)                                     \
1569         PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_enable), clobbers,  \
1570                   PV_SAVE_REGS;                                         \
1571                   call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_enable);     \
1572                   PV_RESTORE_REGS;)
1573
1574 #define USERGS_SYSRET32                                                 \
1575         PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_usergs_sysret32),       \
1576                   CLBR_NONE,                                            \
1577                   jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_usergs_sysret32))
1578
1579 #ifdef CONFIG_X86_32
1580 #define GET_CR0_INTO_EAX                                \
1581         push %ecx; push %edx;                           \
1582         call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
1583         pop %edx; pop %ecx
1584
1585 #define ENABLE_INTERRUPTS_SYSEXIT                                       \
1586         PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit),    \
1587                   CLBR_NONE,                                            \
1588                   jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
1589
1590
1591 #else   /* !CONFIG_X86_32 */
1592
1593 /*
1594  * If swapgs is used while the userspace stack is still current,
1595  * there's no way to call a pvop.  The PV replacement *must* be
1596  * inlined, or the swapgs instruction must be trapped and emulated.
1597  */
1598 #define SWAPGS_UNSAFE_STACK                                             \
1599         PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE,     \
1600                   swapgs)
1601
1602 #define SWAPGS                                                          \
1603         PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE,     \
1604                   PV_SAVE_REGS;                                         \
1605                   call PARA_INDIRECT(pv_cpu_ops+PV_CPU_swapgs);         \
1606                   PV_RESTORE_REGS                                       \
1607                  )
1608
1609 #define GET_CR2_INTO_RCX                                \
1610         call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr2); \
1611         movq %rax, %rcx;                                \
1612         xorq %rax, %rax;
1613
1614 #define PARAVIRT_ADJUST_EXCEPTION_FRAME                                 \
1615         PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_adjust_exception_frame), \
1616                   CLBR_NONE,                                            \
1617                   call PARA_INDIRECT(pv_irq_ops+PV_IRQ_adjust_exception_frame))
1618
1619 #define USERGS_SYSRET64                                                 \
1620         PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_usergs_sysret64),       \
1621                   CLBR_NONE,                                            \
1622                   jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_usergs_sysret64))
1623
1624 #define ENABLE_INTERRUPTS_SYSEXIT32                                     \
1625         PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit),    \
1626                   CLBR_NONE,                                            \
1627                   jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
1628 #endif  /* CONFIG_X86_32 */
1629
1630 #endif /* __ASSEMBLY__ */
1631 #endif /* CONFIG_PARAVIRT */
1632 #endif /* _ASM_X86_PARAVIRT_H */