8e60b6c9c0b08c1c0ca4be5a51edf93ffe275da9
[linux-2.6.git] / arch / x86 / kvm / x86.c
1 /*
2  * Kernel-based Virtual Machine driver for Linux
3  *
4  * derived from drivers/kvm/kvm_main.c
5  *
6  * Copyright (C) 2006 Qumranet, Inc.
7  * Copyright (C) 2008 Qumranet, Inc.
8  * Copyright IBM Corporation, 2008
9  * Copyright 2010 Red Hat, Inc. and/or its affilates.
10  *
11  * Authors:
12  *   Avi Kivity   <avi@qumranet.com>
13  *   Yaniv Kamay  <yaniv@qumranet.com>
14  *   Amit Shah    <amit.shah@qumranet.com>
15  *   Ben-Ami Yassour <benami@il.ibm.com>
16  *
17  * This work is licensed under the terms of the GNU GPL, version 2.  See
18  * the COPYING file in the top-level directory.
19  *
20  */
21
22 #include <linux/kvm_host.h>
23 #include "irq.h"
24 #include "mmu.h"
25 #include "i8254.h"
26 #include "tss.h"
27 #include "kvm_cache_regs.h"
28 #include "x86.h"
29
30 #include <linux/clocksource.h>
31 #include <linux/interrupt.h>
32 #include <linux/kvm.h>
33 #include <linux/fs.h>
34 #include <linux/vmalloc.h>
35 #include <linux/module.h>
36 #include <linux/mman.h>
37 #include <linux/highmem.h>
38 #include <linux/iommu.h>
39 #include <linux/intel-iommu.h>
40 #include <linux/cpufreq.h>
41 #include <linux/user-return-notifier.h>
42 #include <linux/srcu.h>
43 #include <linux/slab.h>
44 #include <linux/perf_event.h>
45 #include <linux/uaccess.h>
46 #include <trace/events/kvm.h>
47
48 #define CREATE_TRACE_POINTS
49 #include "trace.h"
50
51 #include <asm/debugreg.h>
52 #include <asm/msr.h>
53 #include <asm/desc.h>
54 #include <asm/mtrr.h>
55 #include <asm/mce.h>
56 #include <asm/i387.h>
57 #include <asm/xcr.h>
58
59 #define MAX_IO_MSRS 256
60 #define CR0_RESERVED_BITS                                               \
61         (~(unsigned long)(X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS \
62                           | X86_CR0_ET | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM \
63                           | X86_CR0_NW | X86_CR0_CD | X86_CR0_PG))
64 #define CR4_RESERVED_BITS                                               \
65         (~(unsigned long)(X86_CR4_VME | X86_CR4_PVI | X86_CR4_TSD | X86_CR4_DE\
66                           | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_MCE     \
67                           | X86_CR4_PGE | X86_CR4_PCE | X86_CR4_OSFXSR  \
68                           | X86_CR4_OSXSAVE \
69                           | X86_CR4_OSXMMEXCPT | X86_CR4_VMXE))
70
71 #define CR8_RESERVED_BITS (~(unsigned long)X86_CR8_TPR)
72
73 #define KVM_MAX_MCE_BANKS 32
74 #define KVM_MCE_CAP_SUPPORTED MCG_CTL_P
75
76 /* EFER defaults:
77  * - enable syscall per default because its emulated by KVM
78  * - enable LME and LMA per default on 64 bit KVM
79  */
80 #ifdef CONFIG_X86_64
81 static u64 __read_mostly efer_reserved_bits = 0xfffffffffffffafeULL;
82 #else
83 static u64 __read_mostly efer_reserved_bits = 0xfffffffffffffffeULL;
84 #endif
85
86 #define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM
87 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
88
89 static void update_cr8_intercept(struct kvm_vcpu *vcpu);
90 static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
91                                     struct kvm_cpuid_entry2 __user *entries);
92
93 struct kvm_x86_ops *kvm_x86_ops;
94 EXPORT_SYMBOL_GPL(kvm_x86_ops);
95
96 int ignore_msrs = 0;
97 module_param_named(ignore_msrs, ignore_msrs, bool, S_IRUGO | S_IWUSR);
98
99 #define KVM_NR_SHARED_MSRS 16
100
101 struct kvm_shared_msrs_global {
102         int nr;
103         u32 msrs[KVM_NR_SHARED_MSRS];
104 };
105
106 struct kvm_shared_msrs {
107         struct user_return_notifier urn;
108         bool registered;
109         struct kvm_shared_msr_values {
110                 u64 host;
111                 u64 curr;
112         } values[KVM_NR_SHARED_MSRS];
113 };
114
115 static struct kvm_shared_msrs_global __read_mostly shared_msrs_global;
116 static DEFINE_PER_CPU(struct kvm_shared_msrs, shared_msrs);
117
118 struct kvm_stats_debugfs_item debugfs_entries[] = {
119         { "pf_fixed", VCPU_STAT(pf_fixed) },
120         { "pf_guest", VCPU_STAT(pf_guest) },
121         { "tlb_flush", VCPU_STAT(tlb_flush) },
122         { "invlpg", VCPU_STAT(invlpg) },
123         { "exits", VCPU_STAT(exits) },
124         { "io_exits", VCPU_STAT(io_exits) },
125         { "mmio_exits", VCPU_STAT(mmio_exits) },
126         { "signal_exits", VCPU_STAT(signal_exits) },
127         { "irq_window", VCPU_STAT(irq_window_exits) },
128         { "nmi_window", VCPU_STAT(nmi_window_exits) },
129         { "halt_exits", VCPU_STAT(halt_exits) },
130         { "halt_wakeup", VCPU_STAT(halt_wakeup) },
131         { "hypercalls", VCPU_STAT(hypercalls) },
132         { "request_irq", VCPU_STAT(request_irq_exits) },
133         { "irq_exits", VCPU_STAT(irq_exits) },
134         { "host_state_reload", VCPU_STAT(host_state_reload) },
135         { "efer_reload", VCPU_STAT(efer_reload) },
136         { "fpu_reload", VCPU_STAT(fpu_reload) },
137         { "insn_emulation", VCPU_STAT(insn_emulation) },
138         { "insn_emulation_fail", VCPU_STAT(insn_emulation_fail) },
139         { "irq_injections", VCPU_STAT(irq_injections) },
140         { "nmi_injections", VCPU_STAT(nmi_injections) },
141         { "mmu_shadow_zapped", VM_STAT(mmu_shadow_zapped) },
142         { "mmu_pte_write", VM_STAT(mmu_pte_write) },
143         { "mmu_pte_updated", VM_STAT(mmu_pte_updated) },
144         { "mmu_pde_zapped", VM_STAT(mmu_pde_zapped) },
145         { "mmu_flooded", VM_STAT(mmu_flooded) },
146         { "mmu_recycled", VM_STAT(mmu_recycled) },
147         { "mmu_cache_miss", VM_STAT(mmu_cache_miss) },
148         { "mmu_unsync", VM_STAT(mmu_unsync) },
149         { "remote_tlb_flush", VM_STAT(remote_tlb_flush) },
150         { "largepages", VM_STAT(lpages) },
151         { NULL }
152 };
153
154 u64 __read_mostly host_xcr0;
155
156 static inline u32 bit(int bitno)
157 {
158         return 1 << (bitno & 31);
159 }
160
161 static void kvm_on_user_return(struct user_return_notifier *urn)
162 {
163         unsigned slot;
164         struct kvm_shared_msrs *locals
165                 = container_of(urn, struct kvm_shared_msrs, urn);
166         struct kvm_shared_msr_values *values;
167
168         for (slot = 0; slot < shared_msrs_global.nr; ++slot) {
169                 values = &locals->values[slot];
170                 if (values->host != values->curr) {
171                         wrmsrl(shared_msrs_global.msrs[slot], values->host);
172                         values->curr = values->host;
173                 }
174         }
175         locals->registered = false;
176         user_return_notifier_unregister(urn);
177 }
178
179 static void shared_msr_update(unsigned slot, u32 msr)
180 {
181         struct kvm_shared_msrs *smsr;
182         u64 value;
183
184         smsr = &__get_cpu_var(shared_msrs);
185         /* only read, and nobody should modify it at this time,
186          * so don't need lock */
187         if (slot >= shared_msrs_global.nr) {
188                 printk(KERN_ERR "kvm: invalid MSR slot!");
189                 return;
190         }
191         rdmsrl_safe(msr, &value);
192         smsr->values[slot].host = value;
193         smsr->values[slot].curr = value;
194 }
195
196 void kvm_define_shared_msr(unsigned slot, u32 msr)
197 {
198         if (slot >= shared_msrs_global.nr)
199                 shared_msrs_global.nr = slot + 1;
200         shared_msrs_global.msrs[slot] = msr;
201         /* we need ensured the shared_msr_global have been updated */
202         smp_wmb();
203 }
204 EXPORT_SYMBOL_GPL(kvm_define_shared_msr);
205
206 static void kvm_shared_msr_cpu_online(void)
207 {
208         unsigned i;
209
210         for (i = 0; i < shared_msrs_global.nr; ++i)
211                 shared_msr_update(i, shared_msrs_global.msrs[i]);
212 }
213
214 void kvm_set_shared_msr(unsigned slot, u64 value, u64 mask)
215 {
216         struct kvm_shared_msrs *smsr = &__get_cpu_var(shared_msrs);
217
218         if (((value ^ smsr->values[slot].curr) & mask) == 0)
219                 return;
220         smsr->values[slot].curr = value;
221         wrmsrl(shared_msrs_global.msrs[slot], value);
222         if (!smsr->registered) {
223                 smsr->urn.on_user_return = kvm_on_user_return;
224                 user_return_notifier_register(&smsr->urn);
225                 smsr->registered = true;
226         }
227 }
228 EXPORT_SYMBOL_GPL(kvm_set_shared_msr);
229
230 static void drop_user_return_notifiers(void *ignore)
231 {
232         struct kvm_shared_msrs *smsr = &__get_cpu_var(shared_msrs);
233
234         if (smsr->registered)
235                 kvm_on_user_return(&smsr->urn);
236 }
237
238 u64 kvm_get_apic_base(struct kvm_vcpu *vcpu)
239 {
240         if (irqchip_in_kernel(vcpu->kvm))
241                 return vcpu->arch.apic_base;
242         else
243                 return vcpu->arch.apic_base;
244 }
245 EXPORT_SYMBOL_GPL(kvm_get_apic_base);
246
247 void kvm_set_apic_base(struct kvm_vcpu *vcpu, u64 data)
248 {
249         /* TODO: reserve bits check */
250         if (irqchip_in_kernel(vcpu->kvm))
251                 kvm_lapic_set_base(vcpu, data);
252         else
253                 vcpu->arch.apic_base = data;
254 }
255 EXPORT_SYMBOL_GPL(kvm_set_apic_base);
256
257 #define EXCPT_BENIGN            0
258 #define EXCPT_CONTRIBUTORY      1
259 #define EXCPT_PF                2
260
261 static int exception_class(int vector)
262 {
263         switch (vector) {
264         case PF_VECTOR:
265                 return EXCPT_PF;
266         case DE_VECTOR:
267         case TS_VECTOR:
268         case NP_VECTOR:
269         case SS_VECTOR:
270         case GP_VECTOR:
271                 return EXCPT_CONTRIBUTORY;
272         default:
273                 break;
274         }
275         return EXCPT_BENIGN;
276 }
277
278 static void kvm_multiple_exception(struct kvm_vcpu *vcpu,
279                 unsigned nr, bool has_error, u32 error_code,
280                 bool reinject)
281 {
282         u32 prev_nr;
283         int class1, class2;
284
285         if (!vcpu->arch.exception.pending) {
286         queue:
287                 vcpu->arch.exception.pending = true;
288                 vcpu->arch.exception.has_error_code = has_error;
289                 vcpu->arch.exception.nr = nr;
290                 vcpu->arch.exception.error_code = error_code;
291                 vcpu->arch.exception.reinject = reinject;
292                 return;
293         }
294
295         /* to check exception */
296         prev_nr = vcpu->arch.exception.nr;
297         if (prev_nr == DF_VECTOR) {
298                 /* triple fault -> shutdown */
299                 set_bit(KVM_REQ_TRIPLE_FAULT, &vcpu->requests);
300                 return;
301         }
302         class1 = exception_class(prev_nr);
303         class2 = exception_class(nr);
304         if ((class1 == EXCPT_CONTRIBUTORY && class2 == EXCPT_CONTRIBUTORY)
305                 || (class1 == EXCPT_PF && class2 != EXCPT_BENIGN)) {
306                 /* generate double fault per SDM Table 5-5 */
307                 vcpu->arch.exception.pending = true;
308                 vcpu->arch.exception.has_error_code = true;
309                 vcpu->arch.exception.nr = DF_VECTOR;
310                 vcpu->arch.exception.error_code = 0;
311         } else
312                 /* replace previous exception with a new one in a hope
313                    that instruction re-execution will regenerate lost
314                    exception */
315                 goto queue;
316 }
317
318 void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr)
319 {
320         kvm_multiple_exception(vcpu, nr, false, 0, false);
321 }
322 EXPORT_SYMBOL_GPL(kvm_queue_exception);
323
324 void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr)
325 {
326         kvm_multiple_exception(vcpu, nr, false, 0, true);
327 }
328 EXPORT_SYMBOL_GPL(kvm_requeue_exception);
329
330 void kvm_inject_page_fault(struct kvm_vcpu *vcpu, unsigned long addr,
331                            u32 error_code)
332 {
333         ++vcpu->stat.pf_guest;
334         vcpu->arch.cr2 = addr;
335         kvm_queue_exception_e(vcpu, PF_VECTOR, error_code);
336 }
337
338 void kvm_inject_nmi(struct kvm_vcpu *vcpu)
339 {
340         vcpu->arch.nmi_pending = 1;
341 }
342 EXPORT_SYMBOL_GPL(kvm_inject_nmi);
343
344 void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code)
345 {
346         kvm_multiple_exception(vcpu, nr, true, error_code, false);
347 }
348 EXPORT_SYMBOL_GPL(kvm_queue_exception_e);
349
350 void kvm_requeue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code)
351 {
352         kvm_multiple_exception(vcpu, nr, true, error_code, true);
353 }
354 EXPORT_SYMBOL_GPL(kvm_requeue_exception_e);
355
356 /*
357  * Checks if cpl <= required_cpl; if true, return true.  Otherwise queue
358  * a #GP and return false.
359  */
360 bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl)
361 {
362         if (kvm_x86_ops->get_cpl(vcpu) <= required_cpl)
363                 return true;
364         kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
365         return false;
366 }
367 EXPORT_SYMBOL_GPL(kvm_require_cpl);
368
369 /*
370  * Load the pae pdptrs.  Return true is they are all valid.
371  */
372 int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3)
373 {
374         gfn_t pdpt_gfn = cr3 >> PAGE_SHIFT;
375         unsigned offset = ((cr3 & (PAGE_SIZE-1)) >> 5) << 2;
376         int i;
377         int ret;
378         u64 pdpte[ARRAY_SIZE(vcpu->arch.pdptrs)];
379
380         ret = kvm_read_guest_page(vcpu->kvm, pdpt_gfn, pdpte,
381                                   offset * sizeof(u64), sizeof(pdpte));
382         if (ret < 0) {
383                 ret = 0;
384                 goto out;
385         }
386         for (i = 0; i < ARRAY_SIZE(pdpte); ++i) {
387                 if (is_present_gpte(pdpte[i]) &&
388                     (pdpte[i] & vcpu->arch.mmu.rsvd_bits_mask[0][2])) {
389                         ret = 0;
390                         goto out;
391                 }
392         }
393         ret = 1;
394
395         memcpy(vcpu->arch.pdptrs, pdpte, sizeof(vcpu->arch.pdptrs));
396         __set_bit(VCPU_EXREG_PDPTR,
397                   (unsigned long *)&vcpu->arch.regs_avail);
398         __set_bit(VCPU_EXREG_PDPTR,
399                   (unsigned long *)&vcpu->arch.regs_dirty);
400 out:
401
402         return ret;
403 }
404 EXPORT_SYMBOL_GPL(load_pdptrs);
405
406 static bool pdptrs_changed(struct kvm_vcpu *vcpu)
407 {
408         u64 pdpte[ARRAY_SIZE(vcpu->arch.pdptrs)];
409         bool changed = true;
410         int r;
411
412         if (is_long_mode(vcpu) || !is_pae(vcpu))
413                 return false;
414
415         if (!test_bit(VCPU_EXREG_PDPTR,
416                       (unsigned long *)&vcpu->arch.regs_avail))
417                 return true;
418
419         r = kvm_read_guest(vcpu->kvm, vcpu->arch.cr3 & ~31u, pdpte, sizeof(pdpte));
420         if (r < 0)
421                 goto out;
422         changed = memcmp(pdpte, vcpu->arch.pdptrs, sizeof(pdpte)) != 0;
423 out:
424
425         return changed;
426 }
427
428 int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
429 {
430         unsigned long old_cr0 = kvm_read_cr0(vcpu);
431         unsigned long update_bits = X86_CR0_PG | X86_CR0_WP |
432                                     X86_CR0_CD | X86_CR0_NW;
433
434         cr0 |= X86_CR0_ET;
435
436 #ifdef CONFIG_X86_64
437         if (cr0 & 0xffffffff00000000UL)
438                 return 1;
439 #endif
440
441         cr0 &= ~CR0_RESERVED_BITS;
442
443         if ((cr0 & X86_CR0_NW) && !(cr0 & X86_CR0_CD))
444                 return 1;
445
446         if ((cr0 & X86_CR0_PG) && !(cr0 & X86_CR0_PE))
447                 return 1;
448
449         if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
450 #ifdef CONFIG_X86_64
451                 if ((vcpu->arch.efer & EFER_LME)) {
452                         int cs_db, cs_l;
453
454                         if (!is_pae(vcpu))
455                                 return 1;
456                         kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
457                         if (cs_l)
458                                 return 1;
459                 } else
460 #endif
461                 if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->arch.cr3))
462                         return 1;
463         }
464
465         kvm_x86_ops->set_cr0(vcpu, cr0);
466
467         if ((cr0 ^ old_cr0) & update_bits)
468                 kvm_mmu_reset_context(vcpu);
469         return 0;
470 }
471 EXPORT_SYMBOL_GPL(kvm_set_cr0);
472
473 void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw)
474 {
475         (void)kvm_set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~0x0eul) | (msw & 0x0f));
476 }
477 EXPORT_SYMBOL_GPL(kvm_lmsw);
478
479 int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
480 {
481         u64 xcr0;
482
483         /* Only support XCR_XFEATURE_ENABLED_MASK(xcr0) now  */
484         if (index != XCR_XFEATURE_ENABLED_MASK)
485                 return 1;
486         xcr0 = xcr;
487         if (kvm_x86_ops->get_cpl(vcpu) != 0)
488                 return 1;
489         if (!(xcr0 & XSTATE_FP))
490                 return 1;
491         if ((xcr0 & XSTATE_YMM) && !(xcr0 & XSTATE_SSE))
492                 return 1;
493         if (xcr0 & ~host_xcr0)
494                 return 1;
495         vcpu->arch.xcr0 = xcr0;
496         vcpu->guest_xcr0_loaded = 0;
497         return 0;
498 }
499
500 int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
501 {
502         if (__kvm_set_xcr(vcpu, index, xcr)) {
503                 kvm_inject_gp(vcpu, 0);
504                 return 1;
505         }
506         return 0;
507 }
508 EXPORT_SYMBOL_GPL(kvm_set_xcr);
509
510 static bool guest_cpuid_has_xsave(struct kvm_vcpu *vcpu)
511 {
512         struct kvm_cpuid_entry2 *best;
513
514         best = kvm_find_cpuid_entry(vcpu, 1, 0);
515         return best && (best->ecx & bit(X86_FEATURE_XSAVE));
516 }
517
518 static void update_cpuid(struct kvm_vcpu *vcpu)
519 {
520         struct kvm_cpuid_entry2 *best;
521
522         best = kvm_find_cpuid_entry(vcpu, 1, 0);
523         if (!best)
524                 return;
525
526         /* Update OSXSAVE bit */
527         if (cpu_has_xsave && best->function == 0x1) {
528                 best->ecx &= ~(bit(X86_FEATURE_OSXSAVE));
529                 if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE))
530                         best->ecx |= bit(X86_FEATURE_OSXSAVE);
531         }
532 }
533
534 int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
535 {
536         unsigned long old_cr4 = kvm_read_cr4(vcpu);
537         unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE;
538
539         if (cr4 & CR4_RESERVED_BITS)
540                 return 1;
541
542         if (!guest_cpuid_has_xsave(vcpu) && (cr4 & X86_CR4_OSXSAVE))
543                 return 1;
544
545         if (is_long_mode(vcpu)) {
546                 if (!(cr4 & X86_CR4_PAE))
547                         return 1;
548         } else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE)
549                    && ((cr4 ^ old_cr4) & pdptr_bits)
550                    && !load_pdptrs(vcpu, vcpu->arch.cr3))
551                 return 1;
552
553         if (cr4 & X86_CR4_VMXE)
554                 return 1;
555
556         kvm_x86_ops->set_cr4(vcpu, cr4);
557
558         if ((cr4 ^ old_cr4) & pdptr_bits)
559                 kvm_mmu_reset_context(vcpu);
560
561         if ((cr4 ^ old_cr4) & X86_CR4_OSXSAVE)
562                 update_cpuid(vcpu);
563
564         return 0;
565 }
566 EXPORT_SYMBOL_GPL(kvm_set_cr4);
567
568 int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
569 {
570         if (cr3 == vcpu->arch.cr3 && !pdptrs_changed(vcpu)) {
571                 kvm_mmu_sync_roots(vcpu);
572                 kvm_mmu_flush_tlb(vcpu);
573                 return 0;
574         }
575
576         if (is_long_mode(vcpu)) {
577                 if (cr3 & CR3_L_MODE_RESERVED_BITS)
578                         return 1;
579         } else {
580                 if (is_pae(vcpu)) {
581                         if (cr3 & CR3_PAE_RESERVED_BITS)
582                                 return 1;
583                         if (is_paging(vcpu) && !load_pdptrs(vcpu, cr3))
584                                 return 1;
585                 }
586                 /*
587                  * We don't check reserved bits in nonpae mode, because
588                  * this isn't enforced, and VMware depends on this.
589                  */
590         }
591
592         /*
593          * Does the new cr3 value map to physical memory? (Note, we
594          * catch an invalid cr3 even in real-mode, because it would
595          * cause trouble later on when we turn on paging anyway.)
596          *
597          * A real CPU would silently accept an invalid cr3 and would
598          * attempt to use it - with largely undefined (and often hard
599          * to debug) behavior on the guest side.
600          */
601         if (unlikely(!gfn_to_memslot(vcpu->kvm, cr3 >> PAGE_SHIFT)))
602                 return 1;
603         vcpu->arch.cr3 = cr3;
604         vcpu->arch.mmu.new_cr3(vcpu);
605         return 0;
606 }
607 EXPORT_SYMBOL_GPL(kvm_set_cr3);
608
609 int __kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8)
610 {
611         if (cr8 & CR8_RESERVED_BITS)
612                 return 1;
613         if (irqchip_in_kernel(vcpu->kvm))
614                 kvm_lapic_set_tpr(vcpu, cr8);
615         else
616                 vcpu->arch.cr8 = cr8;
617         return 0;
618 }
619
620 void kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8)
621 {
622         if (__kvm_set_cr8(vcpu, cr8))
623                 kvm_inject_gp(vcpu, 0);
624 }
625 EXPORT_SYMBOL_GPL(kvm_set_cr8);
626
627 unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu)
628 {
629         if (irqchip_in_kernel(vcpu->kvm))
630                 return kvm_lapic_get_cr8(vcpu);
631         else
632                 return vcpu->arch.cr8;
633 }
634 EXPORT_SYMBOL_GPL(kvm_get_cr8);
635
636 static int __kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val)
637 {
638         switch (dr) {
639         case 0 ... 3:
640                 vcpu->arch.db[dr] = val;
641                 if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP))
642                         vcpu->arch.eff_db[dr] = val;
643                 break;
644         case 4:
645                 if (kvm_read_cr4_bits(vcpu, X86_CR4_DE))
646                         return 1; /* #UD */
647                 /* fall through */
648         case 6:
649                 if (val & 0xffffffff00000000ULL)
650                         return -1; /* #GP */
651                 vcpu->arch.dr6 = (val & DR6_VOLATILE) | DR6_FIXED_1;
652                 break;
653         case 5:
654                 if (kvm_read_cr4_bits(vcpu, X86_CR4_DE))
655                         return 1; /* #UD */
656                 /* fall through */
657         default: /* 7 */
658                 if (val & 0xffffffff00000000ULL)
659                         return -1; /* #GP */
660                 vcpu->arch.dr7 = (val & DR7_VOLATILE) | DR7_FIXED_1;
661                 if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) {
662                         kvm_x86_ops->set_dr7(vcpu, vcpu->arch.dr7);
663                         vcpu->arch.switch_db_regs = (val & DR7_BP_EN_MASK);
664                 }
665                 break;
666         }
667
668         return 0;
669 }
670
671 int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val)
672 {
673         int res;
674
675         res = __kvm_set_dr(vcpu, dr, val);
676         if (res > 0)
677                 kvm_queue_exception(vcpu, UD_VECTOR);
678         else if (res < 0)
679                 kvm_inject_gp(vcpu, 0);
680
681         return res;
682 }
683 EXPORT_SYMBOL_GPL(kvm_set_dr);
684
685 static int _kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val)
686 {
687         switch (dr) {
688         case 0 ... 3:
689                 *val = vcpu->arch.db[dr];
690                 break;
691         case 4:
692                 if (kvm_read_cr4_bits(vcpu, X86_CR4_DE))
693                         return 1;
694                 /* fall through */
695         case 6:
696                 *val = vcpu->arch.dr6;
697                 break;
698         case 5:
699                 if (kvm_read_cr4_bits(vcpu, X86_CR4_DE))
700                         return 1;
701                 /* fall through */
702         default: /* 7 */
703                 *val = vcpu->arch.dr7;
704                 break;
705         }
706
707         return 0;
708 }
709
710 int kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val)
711 {
712         if (_kvm_get_dr(vcpu, dr, val)) {
713                 kvm_queue_exception(vcpu, UD_VECTOR);
714                 return 1;
715         }
716         return 0;
717 }
718 EXPORT_SYMBOL_GPL(kvm_get_dr);
719
720 /*
721  * List of msr numbers which we expose to userspace through KVM_GET_MSRS
722  * and KVM_SET_MSRS, and KVM_GET_MSR_INDEX_LIST.
723  *
724  * This list is modified at module load time to reflect the
725  * capabilities of the host cpu. This capabilities test skips MSRs that are
726  * kvm-specific. Those are put in the beginning of the list.
727  */
728
729 #define KVM_SAVE_MSRS_BEGIN     7
730 static u32 msrs_to_save[] = {
731         MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK,
732         MSR_KVM_SYSTEM_TIME_NEW, MSR_KVM_WALL_CLOCK_NEW,
733         HV_X64_MSR_GUEST_OS_ID, HV_X64_MSR_HYPERCALL,
734         HV_X64_MSR_APIC_ASSIST_PAGE,
735         MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
736         MSR_K6_STAR,
737 #ifdef CONFIG_X86_64
738         MSR_CSTAR, MSR_KERNEL_GS_BASE, MSR_SYSCALL_MASK, MSR_LSTAR,
739 #endif
740         MSR_IA32_TSC, MSR_IA32_PERF_STATUS, MSR_IA32_CR_PAT, MSR_VM_HSAVE_PA
741 };
742
743 static unsigned num_msrs_to_save;
744
745 static u32 emulated_msrs[] = {
746         MSR_IA32_MISC_ENABLE,
747 };
748
749 static int set_efer(struct kvm_vcpu *vcpu, u64 efer)
750 {
751         u64 old_efer = vcpu->arch.efer;
752
753         if (efer & efer_reserved_bits)
754                 return 1;
755
756         if (is_paging(vcpu)
757             && (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME))
758                 return 1;
759
760         if (efer & EFER_FFXSR) {
761                 struct kvm_cpuid_entry2 *feat;
762
763                 feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
764                 if (!feat || !(feat->edx & bit(X86_FEATURE_FXSR_OPT)))
765                         return 1;
766         }
767
768         if (efer & EFER_SVME) {
769                 struct kvm_cpuid_entry2 *feat;
770
771                 feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
772                 if (!feat || !(feat->ecx & bit(X86_FEATURE_SVM)))
773                         return 1;
774         }
775
776         efer &= ~EFER_LMA;
777         efer |= vcpu->arch.efer & EFER_LMA;
778
779         kvm_x86_ops->set_efer(vcpu, efer);
780
781         vcpu->arch.mmu.base_role.nxe = (efer & EFER_NX) && !tdp_enabled;
782         kvm_mmu_reset_context(vcpu);
783
784         /* Update reserved bits */
785         if ((efer ^ old_efer) & EFER_NX)
786                 kvm_mmu_reset_context(vcpu);
787
788         return 0;
789 }
790
791 void kvm_enable_efer_bits(u64 mask)
792 {
793        efer_reserved_bits &= ~mask;
794 }
795 EXPORT_SYMBOL_GPL(kvm_enable_efer_bits);
796
797
798 /*
799  * Writes msr value into into the appropriate "register".
800  * Returns 0 on success, non-0 otherwise.
801  * Assumes vcpu_load() was already called.
802  */
803 int kvm_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
804 {
805         return kvm_x86_ops->set_msr(vcpu, msr_index, data);
806 }
807
808 /*
809  * Adapt set_msr() to msr_io()'s calling convention
810  */
811 static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
812 {
813         return kvm_set_msr(vcpu, index, *data);
814 }
815
816 static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock)
817 {
818         int version;
819         int r;
820         struct pvclock_wall_clock wc;
821         struct timespec boot;
822
823         if (!wall_clock)
824                 return;
825
826         r = kvm_read_guest(kvm, wall_clock, &version, sizeof(version));
827         if (r)
828                 return;
829
830         if (version & 1)
831                 ++version;  /* first time write, random junk */
832
833         ++version;
834
835         kvm_write_guest(kvm, wall_clock, &version, sizeof(version));
836
837         /*
838          * The guest calculates current wall clock time by adding
839          * system time (updated by kvm_write_guest_time below) to the
840          * wall clock specified here.  guest system time equals host
841          * system time for us, thus we must fill in host boot time here.
842          */
843         getboottime(&boot);
844
845         wc.sec = boot.tv_sec;
846         wc.nsec = boot.tv_nsec;
847         wc.version = version;
848
849         kvm_write_guest(kvm, wall_clock, &wc, sizeof(wc));
850
851         version++;
852         kvm_write_guest(kvm, wall_clock, &version, sizeof(version));
853 }
854
855 static uint32_t div_frac(uint32_t dividend, uint32_t divisor)
856 {
857         uint32_t quotient, remainder;
858
859         /* Don't try to replace with do_div(), this one calculates
860          * "(dividend << 32) / divisor" */
861         __asm__ ( "divl %4"
862                   : "=a" (quotient), "=d" (remainder)
863                   : "0" (0), "1" (dividend), "r" (divisor) );
864         return quotient;
865 }
866
867 static void kvm_set_time_scale(uint32_t tsc_khz, struct pvclock_vcpu_time_info *hv_clock)
868 {
869         uint64_t nsecs = 1000000000LL;
870         int32_t  shift = 0;
871         uint64_t tps64;
872         uint32_t tps32;
873
874         tps64 = tsc_khz * 1000LL;
875         while (tps64 > nsecs*2) {
876                 tps64 >>= 1;
877                 shift--;
878         }
879
880         tps32 = (uint32_t)tps64;
881         while (tps32 <= (uint32_t)nsecs) {
882                 tps32 <<= 1;
883                 shift++;
884         }
885
886         hv_clock->tsc_shift = shift;
887         hv_clock->tsc_to_system_mul = div_frac(nsecs, tps32);
888
889         pr_debug("%s: tsc_khz %u, tsc_shift %d, tsc_mul %u\n",
890                  __func__, tsc_khz, hv_clock->tsc_shift,
891                  hv_clock->tsc_to_system_mul);
892 }
893
894 static DEFINE_PER_CPU(unsigned long, cpu_tsc_khz);
895
896 static void kvm_write_guest_time(struct kvm_vcpu *v)
897 {
898         struct timespec ts;
899         unsigned long flags;
900         struct kvm_vcpu_arch *vcpu = &v->arch;
901         void *shared_kaddr;
902         unsigned long this_tsc_khz;
903
904         if ((!vcpu->time_page))
905                 return;
906
907         this_tsc_khz = get_cpu_var(cpu_tsc_khz);
908         if (unlikely(vcpu->hv_clock_tsc_khz != this_tsc_khz)) {
909                 kvm_set_time_scale(this_tsc_khz, &vcpu->hv_clock);
910                 vcpu->hv_clock_tsc_khz = this_tsc_khz;
911         }
912         put_cpu_var(cpu_tsc_khz);
913
914         /* Keep irq disabled to prevent changes to the clock */
915         local_irq_save(flags);
916         kvm_get_msr(v, MSR_IA32_TSC, &vcpu->hv_clock.tsc_timestamp);
917         ktime_get_ts(&ts);
918         monotonic_to_bootbased(&ts);
919         local_irq_restore(flags);
920
921         /* With all the info we got, fill in the values */
922
923         vcpu->hv_clock.system_time = ts.tv_nsec +
924                                      (NSEC_PER_SEC * (u64)ts.tv_sec) + v->kvm->arch.kvmclock_offset;
925
926         vcpu->hv_clock.flags = 0;
927
928         /*
929          * The interface expects us to write an even number signaling that the
930          * update is finished. Since the guest won't see the intermediate
931          * state, we just increase by 2 at the end.
932          */
933         vcpu->hv_clock.version += 2;
934
935         shared_kaddr = kmap_atomic(vcpu->time_page, KM_USER0);
936
937         memcpy(shared_kaddr + vcpu->time_offset, &vcpu->hv_clock,
938                sizeof(vcpu->hv_clock));
939
940         kunmap_atomic(shared_kaddr, KM_USER0);
941
942         mark_page_dirty(v->kvm, vcpu->time >> PAGE_SHIFT);
943 }
944
945 static int kvm_request_guest_time_update(struct kvm_vcpu *v)
946 {
947         struct kvm_vcpu_arch *vcpu = &v->arch;
948
949         if (!vcpu->time_page)
950                 return 0;
951         set_bit(KVM_REQ_KVMCLOCK_UPDATE, &v->requests);
952         return 1;
953 }
954
955 static bool msr_mtrr_valid(unsigned msr)
956 {
957         switch (msr) {
958         case 0x200 ... 0x200 + 2 * KVM_NR_VAR_MTRR - 1:
959         case MSR_MTRRfix64K_00000:
960         case MSR_MTRRfix16K_80000:
961         case MSR_MTRRfix16K_A0000:
962         case MSR_MTRRfix4K_C0000:
963         case MSR_MTRRfix4K_C8000:
964         case MSR_MTRRfix4K_D0000:
965         case MSR_MTRRfix4K_D8000:
966         case MSR_MTRRfix4K_E0000:
967         case MSR_MTRRfix4K_E8000:
968         case MSR_MTRRfix4K_F0000:
969         case MSR_MTRRfix4K_F8000:
970         case MSR_MTRRdefType:
971         case MSR_IA32_CR_PAT:
972                 return true;
973         case 0x2f8:
974                 return true;
975         }
976         return false;
977 }
978
979 static bool valid_pat_type(unsigned t)
980 {
981         return t < 8 && (1 << t) & 0xf3; /* 0, 1, 4, 5, 6, 7 */
982 }
983
984 static bool valid_mtrr_type(unsigned t)
985 {
986         return t < 8 && (1 << t) & 0x73; /* 0, 1, 4, 5, 6 */
987 }
988
989 static bool mtrr_valid(struct kvm_vcpu *vcpu, u32 msr, u64 data)
990 {
991         int i;
992
993         if (!msr_mtrr_valid(msr))
994                 return false;
995
996         if (msr == MSR_IA32_CR_PAT) {
997                 for (i = 0; i < 8; i++)
998                         if (!valid_pat_type((data >> (i * 8)) & 0xff))
999                                 return false;
1000                 return true;
1001         } else if (msr == MSR_MTRRdefType) {
1002                 if (data & ~0xcff)
1003                         return false;
1004                 return valid_mtrr_type(data & 0xff);
1005         } else if (msr >= MSR_MTRRfix64K_00000 && msr <= MSR_MTRRfix4K_F8000) {
1006                 for (i = 0; i < 8 ; i++)
1007                         if (!valid_mtrr_type((data >> (i * 8)) & 0xff))
1008                                 return false;
1009                 return true;
1010         }
1011
1012         /* variable MTRRs */
1013         return valid_mtrr_type(data & 0xff);
1014 }
1015
1016 static int set_msr_mtrr(struct kvm_vcpu *vcpu, u32 msr, u64 data)
1017 {
1018         u64 *p = (u64 *)&vcpu->arch.mtrr_state.fixed_ranges;
1019
1020         if (!mtrr_valid(vcpu, msr, data))
1021                 return 1;
1022
1023         if (msr == MSR_MTRRdefType) {
1024                 vcpu->arch.mtrr_state.def_type = data;
1025                 vcpu->arch.mtrr_state.enabled = (data & 0xc00) >> 10;
1026         } else if (msr == MSR_MTRRfix64K_00000)
1027                 p[0] = data;
1028         else if (msr == MSR_MTRRfix16K_80000 || msr == MSR_MTRRfix16K_A0000)
1029                 p[1 + msr - MSR_MTRRfix16K_80000] = data;
1030         else if (msr >= MSR_MTRRfix4K_C0000 && msr <= MSR_MTRRfix4K_F8000)
1031                 p[3 + msr - MSR_MTRRfix4K_C0000] = data;
1032         else if (msr == MSR_IA32_CR_PAT)
1033                 vcpu->arch.pat = data;
1034         else {  /* Variable MTRRs */
1035                 int idx, is_mtrr_mask;
1036                 u64 *pt;
1037
1038                 idx = (msr - 0x200) / 2;
1039                 is_mtrr_mask = msr - 0x200 - 2 * idx;
1040                 if (!is_mtrr_mask)
1041                         pt =
1042                           (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].base_lo;
1043                 else
1044                         pt =
1045                           (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].mask_lo;
1046                 *pt = data;
1047         }
1048
1049         kvm_mmu_reset_context(vcpu);
1050         return 0;
1051 }
1052
1053 static int set_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 data)
1054 {
1055         u64 mcg_cap = vcpu->arch.mcg_cap;
1056         unsigned bank_num = mcg_cap & 0xff;
1057
1058         switch (msr) {
1059         case MSR_IA32_MCG_STATUS:
1060                 vcpu->arch.mcg_status = data;
1061                 break;
1062         case MSR_IA32_MCG_CTL:
1063                 if (!(mcg_cap & MCG_CTL_P))
1064                         return 1;
1065                 if (data != 0 && data != ~(u64)0)
1066                         return -1;
1067                 vcpu->arch.mcg_ctl = data;
1068                 break;
1069         default:
1070                 if (msr >= MSR_IA32_MC0_CTL &&
1071                     msr < MSR_IA32_MC0_CTL + 4 * bank_num) {
1072                         u32 offset = msr - MSR_IA32_MC0_CTL;
1073                         /* only 0 or all 1s can be written to IA32_MCi_CTL
1074                          * some Linux kernels though clear bit 10 in bank 4 to
1075                          * workaround a BIOS/GART TBL issue on AMD K8s, ignore
1076                          * this to avoid an uncatched #GP in the guest
1077                          */
1078                         if ((offset & 0x3) == 0 &&
1079                             data != 0 && (data | (1 << 10)) != ~(u64)0)
1080                                 return -1;
1081                         vcpu->arch.mce_banks[offset] = data;
1082                         break;
1083                 }
1084                 return 1;
1085         }
1086         return 0;
1087 }
1088
1089 static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
1090 {
1091         struct kvm *kvm = vcpu->kvm;
1092         int lm = is_long_mode(vcpu);
1093         u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64
1094                 : (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
1095         u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
1096                 : kvm->arch.xen_hvm_config.blob_size_32;
1097         u32 page_num = data & ~PAGE_MASK;
1098         u64 page_addr = data & PAGE_MASK;
1099         u8 *page;
1100         int r;
1101
1102         r = -E2BIG;
1103         if (page_num >= blob_size)
1104                 goto out;
1105         r = -ENOMEM;
1106         page = kzalloc(PAGE_SIZE, GFP_KERNEL);
1107         if (!page)
1108                 goto out;
1109         r = -EFAULT;
1110         if (copy_from_user(page, blob_addr + (page_num * PAGE_SIZE), PAGE_SIZE))
1111                 goto out_free;
1112         if (kvm_write_guest(kvm, page_addr, page, PAGE_SIZE))
1113                 goto out_free;
1114         r = 0;
1115 out_free:
1116         kfree(page);
1117 out:
1118         return r;
1119 }
1120
1121 static bool kvm_hv_hypercall_enabled(struct kvm *kvm)
1122 {
1123         return kvm->arch.hv_hypercall & HV_X64_MSR_HYPERCALL_ENABLE;
1124 }
1125
1126 static bool kvm_hv_msr_partition_wide(u32 msr)
1127 {
1128         bool r = false;
1129         switch (msr) {
1130         case HV_X64_MSR_GUEST_OS_ID:
1131         case HV_X64_MSR_HYPERCALL:
1132                 r = true;
1133                 break;
1134         }
1135
1136         return r;
1137 }
1138
1139 static int set_msr_hyperv_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data)
1140 {
1141         struct kvm *kvm = vcpu->kvm;
1142
1143         switch (msr) {
1144         case HV_X64_MSR_GUEST_OS_ID:
1145                 kvm->arch.hv_guest_os_id = data;
1146                 /* setting guest os id to zero disables hypercall page */
1147                 if (!kvm->arch.hv_guest_os_id)
1148                         kvm->arch.hv_hypercall &= ~HV_X64_MSR_HYPERCALL_ENABLE;
1149                 break;
1150         case HV_X64_MSR_HYPERCALL: {
1151                 u64 gfn;
1152                 unsigned long addr;
1153                 u8 instructions[4];
1154
1155                 /* if guest os id is not set hypercall should remain disabled */
1156                 if (!kvm->arch.hv_guest_os_id)
1157                         break;
1158                 if (!(data & HV_X64_MSR_HYPERCALL_ENABLE)) {
1159                         kvm->arch.hv_hypercall = data;
1160                         break;
1161                 }
1162                 gfn = data >> HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_SHIFT;
1163                 addr = gfn_to_hva(kvm, gfn);
1164                 if (kvm_is_error_hva(addr))
1165                         return 1;
1166                 kvm_x86_ops->patch_hypercall(vcpu, instructions);
1167                 ((unsigned char *)instructions)[3] = 0xc3; /* ret */
1168                 if (copy_to_user((void __user *)addr, instructions, 4))
1169                         return 1;
1170                 kvm->arch.hv_hypercall = data;
1171                 break;
1172         }
1173         default:
1174                 pr_unimpl(vcpu, "HYPER-V unimplemented wrmsr: 0x%x "
1175                           "data 0x%llx\n", msr, data);
1176                 return 1;
1177         }
1178         return 0;
1179 }
1180
1181 static int set_msr_hyperv(struct kvm_vcpu *vcpu, u32 msr, u64 data)
1182 {
1183         switch (msr) {
1184         case HV_X64_MSR_APIC_ASSIST_PAGE: {
1185                 unsigned long addr;
1186
1187                 if (!(data & HV_X64_MSR_APIC_ASSIST_PAGE_ENABLE)) {
1188                         vcpu->arch.hv_vapic = data;
1189                         break;
1190                 }
1191                 addr = gfn_to_hva(vcpu->kvm, data >>
1192                                   HV_X64_MSR_APIC_ASSIST_PAGE_ADDRESS_SHIFT);
1193                 if (kvm_is_error_hva(addr))
1194                         return 1;
1195                 if (clear_user((void __user *)addr, PAGE_SIZE))
1196                         return 1;
1197                 vcpu->arch.hv_vapic = data;
1198                 break;
1199         }
1200         case HV_X64_MSR_EOI:
1201                 return kvm_hv_vapic_msr_write(vcpu, APIC_EOI, data);
1202         case HV_X64_MSR_ICR:
1203                 return kvm_hv_vapic_msr_write(vcpu, APIC_ICR, data);
1204         case HV_X64_MSR_TPR:
1205                 return kvm_hv_vapic_msr_write(vcpu, APIC_TASKPRI, data);
1206         default:
1207                 pr_unimpl(vcpu, "HYPER-V unimplemented wrmsr: 0x%x "
1208                           "data 0x%llx\n", msr, data);
1209                 return 1;
1210         }
1211
1212         return 0;
1213 }
1214
1215 int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
1216 {
1217         switch (msr) {
1218         case MSR_EFER:
1219                 return set_efer(vcpu, data);
1220         case MSR_K7_HWCR:
1221                 data &= ~(u64)0x40;     /* ignore flush filter disable */
1222                 data &= ~(u64)0x100;    /* ignore ignne emulation enable */
1223                 if (data != 0) {
1224                         pr_unimpl(vcpu, "unimplemented HWCR wrmsr: 0x%llx\n",
1225                                 data);
1226                         return 1;
1227                 }
1228                 break;
1229         case MSR_FAM10H_MMIO_CONF_BASE:
1230                 if (data != 0) {
1231                         pr_unimpl(vcpu, "unimplemented MMIO_CONF_BASE wrmsr: "
1232                                 "0x%llx\n", data);
1233                         return 1;
1234                 }
1235                 break;
1236         case MSR_AMD64_NB_CFG:
1237                 break;
1238         case MSR_IA32_DEBUGCTLMSR:
1239                 if (!data) {
1240                         /* We support the non-activated case already */
1241                         break;
1242                 } else if (data & ~(DEBUGCTLMSR_LBR | DEBUGCTLMSR_BTF)) {
1243                         /* Values other than LBR and BTF are vendor-specific,
1244                            thus reserved and should throw a #GP */
1245                         return 1;
1246                 }
1247                 pr_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTLMSR 0x%llx, nop\n",
1248                         __func__, data);
1249                 break;
1250         case MSR_IA32_UCODE_REV:
1251         case MSR_IA32_UCODE_WRITE:
1252         case MSR_VM_HSAVE_PA:
1253         case MSR_AMD64_PATCH_LOADER:
1254                 break;
1255         case 0x200 ... 0x2ff:
1256                 return set_msr_mtrr(vcpu, msr, data);
1257         case MSR_IA32_APICBASE:
1258                 kvm_set_apic_base(vcpu, data);
1259                 break;
1260         case APIC_BASE_MSR ... APIC_BASE_MSR + 0x3ff:
1261                 return kvm_x2apic_msr_write(vcpu, msr, data);
1262         case MSR_IA32_MISC_ENABLE:
1263                 vcpu->arch.ia32_misc_enable_msr = data;
1264                 break;
1265         case MSR_KVM_WALL_CLOCK_NEW:
1266         case MSR_KVM_WALL_CLOCK:
1267                 vcpu->kvm->arch.wall_clock = data;
1268                 kvm_write_wall_clock(vcpu->kvm, data);
1269                 break;
1270         case MSR_KVM_SYSTEM_TIME_NEW:
1271         case MSR_KVM_SYSTEM_TIME: {
1272                 if (vcpu->arch.time_page) {
1273                         kvm_release_page_dirty(vcpu->arch.time_page);
1274                         vcpu->arch.time_page = NULL;
1275                 }
1276
1277                 vcpu->arch.time = data;
1278
1279                 /* we verify if the enable bit is set... */
1280                 if (!(data & 1))
1281                         break;
1282
1283                 /* ...but clean it before doing the actual write */
1284                 vcpu->arch.time_offset = data & ~(PAGE_MASK | 1);
1285
1286                 vcpu->arch.time_page =
1287                                 gfn_to_page(vcpu->kvm, data >> PAGE_SHIFT);
1288
1289                 if (is_error_page(vcpu->arch.time_page)) {
1290                         kvm_release_page_clean(vcpu->arch.time_page);
1291                         vcpu->arch.time_page = NULL;
1292                 }
1293
1294                 kvm_request_guest_time_update(vcpu);
1295                 break;
1296         }
1297         case MSR_IA32_MCG_CTL:
1298         case MSR_IA32_MCG_STATUS:
1299         case MSR_IA32_MC0_CTL ... MSR_IA32_MC0_CTL + 4 * KVM_MAX_MCE_BANKS - 1:
1300                 return set_msr_mce(vcpu, msr, data);
1301
1302         /* Performance counters are not protected by a CPUID bit,
1303          * so we should check all of them in the generic path for the sake of
1304          * cross vendor migration.
1305          * Writing a zero into the event select MSRs disables them,
1306          * which we perfectly emulate ;-). Any other value should be at least
1307          * reported, some guests depend on them.
1308          */
1309         case MSR_P6_EVNTSEL0:
1310         case MSR_P6_EVNTSEL1:
1311         case MSR_K7_EVNTSEL0:
1312         case MSR_K7_EVNTSEL1:
1313         case MSR_K7_EVNTSEL2:
1314         case MSR_K7_EVNTSEL3:
1315                 if (data != 0)
1316                         pr_unimpl(vcpu, "unimplemented perfctr wrmsr: "
1317                                 "0x%x data 0x%llx\n", msr, data);
1318                 break;
1319         /* at least RHEL 4 unconditionally writes to the perfctr registers,
1320          * so we ignore writes to make it happy.
1321          */
1322         case MSR_P6_PERFCTR0:
1323         case MSR_P6_PERFCTR1:
1324         case MSR_K7_PERFCTR0:
1325         case MSR_K7_PERFCTR1:
1326         case MSR_K7_PERFCTR2:
1327         case MSR_K7_PERFCTR3:
1328                 pr_unimpl(vcpu, "unimplemented perfctr wrmsr: "
1329                         "0x%x data 0x%llx\n", msr, data);
1330                 break;
1331         case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15:
1332                 if (kvm_hv_msr_partition_wide(msr)) {
1333                         int r;
1334                         mutex_lock(&vcpu->kvm->lock);
1335                         r = set_msr_hyperv_pw(vcpu, msr, data);
1336                         mutex_unlock(&vcpu->kvm->lock);
1337                         return r;
1338                 } else
1339                         return set_msr_hyperv(vcpu, msr, data);
1340                 break;
1341         default:
1342                 if (msr && (msr == vcpu->kvm->arch.xen_hvm_config.msr))
1343                         return xen_hvm_config(vcpu, data);
1344                 if (!ignore_msrs) {
1345                         pr_unimpl(vcpu, "unhandled wrmsr: 0x%x data %llx\n",
1346                                 msr, data);
1347                         return 1;
1348                 } else {
1349                         pr_unimpl(vcpu, "ignored wrmsr: 0x%x data %llx\n",
1350                                 msr, data);
1351                         break;
1352                 }
1353         }
1354         return 0;
1355 }
1356 EXPORT_SYMBOL_GPL(kvm_set_msr_common);
1357
1358
1359 /*
1360  * Reads an msr value (of 'msr_index') into 'pdata'.
1361  * Returns 0 on success, non-0 otherwise.
1362  * Assumes vcpu_load() was already called.
1363  */
1364 int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
1365 {
1366         return kvm_x86_ops->get_msr(vcpu, msr_index, pdata);
1367 }
1368
1369 static int get_msr_mtrr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
1370 {
1371         u64 *p = (u64 *)&vcpu->arch.mtrr_state.fixed_ranges;
1372
1373         if (!msr_mtrr_valid(msr))
1374                 return 1;
1375
1376         if (msr == MSR_MTRRdefType)
1377                 *pdata = vcpu->arch.mtrr_state.def_type +
1378                          (vcpu->arch.mtrr_state.enabled << 10);
1379         else if (msr == MSR_MTRRfix64K_00000)
1380                 *pdata = p[0];
1381         else if (msr == MSR_MTRRfix16K_80000 || msr == MSR_MTRRfix16K_A0000)
1382                 *pdata = p[1 + msr - MSR_MTRRfix16K_80000];
1383         else if (msr >= MSR_MTRRfix4K_C0000 && msr <= MSR_MTRRfix4K_F8000)
1384                 *pdata = p[3 + msr - MSR_MTRRfix4K_C0000];
1385         else if (msr == MSR_IA32_CR_PAT)
1386                 *pdata = vcpu->arch.pat;
1387         else {  /* Variable MTRRs */
1388                 int idx, is_mtrr_mask;
1389                 u64 *pt;
1390
1391                 idx = (msr - 0x200) / 2;
1392                 is_mtrr_mask = msr - 0x200 - 2 * idx;
1393                 if (!is_mtrr_mask)
1394                         pt =
1395                           (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].base_lo;
1396                 else
1397                         pt =
1398                           (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].mask_lo;
1399                 *pdata = *pt;
1400         }
1401
1402         return 0;
1403 }
1404
1405 static int get_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
1406 {
1407         u64 data;
1408         u64 mcg_cap = vcpu->arch.mcg_cap;
1409         unsigned bank_num = mcg_cap & 0xff;
1410
1411         switch (msr) {
1412         case MSR_IA32_P5_MC_ADDR:
1413         case MSR_IA32_P5_MC_TYPE:
1414                 data = 0;
1415                 break;
1416         case MSR_IA32_MCG_CAP:
1417                 data = vcpu->arch.mcg_cap;
1418                 break;
1419         case MSR_IA32_MCG_CTL:
1420                 if (!(mcg_cap & MCG_CTL_P))
1421                         return 1;
1422                 data = vcpu->arch.mcg_ctl;
1423                 break;
1424         case MSR_IA32_MCG_STATUS:
1425                 data = vcpu->arch.mcg_status;
1426                 break;
1427         default:
1428                 if (msr >= MSR_IA32_MC0_CTL &&
1429                     msr < MSR_IA32_MC0_CTL + 4 * bank_num) {
1430                         u32 offset = msr - MSR_IA32_MC0_CTL;
1431                         data = vcpu->arch.mce_banks[offset];
1432                         break;
1433                 }
1434                 return 1;
1435         }
1436         *pdata = data;
1437         return 0;
1438 }
1439
1440 static int get_msr_hyperv_pw(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
1441 {
1442         u64 data = 0;
1443         struct kvm *kvm = vcpu->kvm;
1444
1445         switch (msr) {
1446         case HV_X64_MSR_GUEST_OS_ID:
1447                 data = kvm->arch.hv_guest_os_id;
1448                 break;
1449         case HV_X64_MSR_HYPERCALL:
1450                 data = kvm->arch.hv_hypercall;
1451                 break;
1452         default:
1453                 pr_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr);
1454                 return 1;
1455         }
1456
1457         *pdata = data;
1458         return 0;
1459 }
1460
1461 static int get_msr_hyperv(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
1462 {
1463         u64 data = 0;
1464
1465         switch (msr) {
1466         case HV_X64_MSR_VP_INDEX: {
1467                 int r;
1468                 struct kvm_vcpu *v;
1469                 kvm_for_each_vcpu(r, v, vcpu->kvm)
1470                         if (v == vcpu)
1471                                 data = r;
1472                 break;
1473         }
1474         case HV_X64_MSR_EOI:
1475                 return kvm_hv_vapic_msr_read(vcpu, APIC_EOI, pdata);
1476         case HV_X64_MSR_ICR:
1477                 return kvm_hv_vapic_msr_read(vcpu, APIC_ICR, pdata);
1478         case HV_X64_MSR_TPR:
1479                 return kvm_hv_vapic_msr_read(vcpu, APIC_TASKPRI, pdata);
1480         default:
1481                 pr_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr);
1482                 return 1;
1483         }
1484         *pdata = data;
1485         return 0;
1486 }
1487
1488 int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
1489 {
1490         u64 data;
1491
1492         switch (msr) {
1493         case MSR_IA32_PLATFORM_ID:
1494         case MSR_IA32_UCODE_REV:
1495         case MSR_IA32_EBL_CR_POWERON:
1496         case MSR_IA32_DEBUGCTLMSR:
1497         case MSR_IA32_LASTBRANCHFROMIP:
1498         case MSR_IA32_LASTBRANCHTOIP:
1499         case MSR_IA32_LASTINTFROMIP:
1500         case MSR_IA32_LASTINTTOIP:
1501         case MSR_K8_SYSCFG:
1502         case MSR_K7_HWCR:
1503         case MSR_VM_HSAVE_PA:
1504         case MSR_P6_PERFCTR0:
1505         case MSR_P6_PERFCTR1:
1506         case MSR_P6_EVNTSEL0:
1507         case MSR_P6_EVNTSEL1:
1508         case MSR_K7_EVNTSEL0:
1509         case MSR_K7_PERFCTR0:
1510         case MSR_K8_INT_PENDING_MSG:
1511         case MSR_AMD64_NB_CFG:
1512         case MSR_FAM10H_MMIO_CONF_BASE:
1513                 data = 0;
1514                 break;
1515         case MSR_MTRRcap:
1516                 data = 0x500 | KVM_NR_VAR_MTRR;
1517                 break;
1518         case 0x200 ... 0x2ff:
1519                 return get_msr_mtrr(vcpu, msr, pdata);
1520         case 0xcd: /* fsb frequency */
1521                 data = 3;
1522                 break;
1523         case MSR_IA32_APICBASE:
1524                 data = kvm_get_apic_base(vcpu);
1525                 break;
1526         case APIC_BASE_MSR ... APIC_BASE_MSR + 0x3ff:
1527                 return kvm_x2apic_msr_read(vcpu, msr, pdata);
1528                 break;
1529         case MSR_IA32_MISC_ENABLE:
1530                 data = vcpu->arch.ia32_misc_enable_msr;
1531                 break;
1532         case MSR_IA32_PERF_STATUS:
1533                 /* TSC increment by tick */
1534                 data = 1000ULL;
1535                 /* CPU multiplier */
1536                 data |= (((uint64_t)4ULL) << 40);
1537                 break;
1538         case MSR_EFER:
1539                 data = vcpu->arch.efer;
1540                 break;
1541         case MSR_KVM_WALL_CLOCK:
1542         case MSR_KVM_WALL_CLOCK_NEW:
1543                 data = vcpu->kvm->arch.wall_clock;
1544                 break;
1545         case MSR_KVM_SYSTEM_TIME:
1546         case MSR_KVM_SYSTEM_TIME_NEW:
1547                 data = vcpu->arch.time;
1548                 break;
1549         case MSR_IA32_P5_MC_ADDR:
1550         case MSR_IA32_P5_MC_TYPE:
1551         case MSR_IA32_MCG_CAP:
1552         case MSR_IA32_MCG_CTL:
1553         case MSR_IA32_MCG_STATUS:
1554         case MSR_IA32_MC0_CTL ... MSR_IA32_MC0_CTL + 4 * KVM_MAX_MCE_BANKS - 1:
1555                 return get_msr_mce(vcpu, msr, pdata);
1556         case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15:
1557                 if (kvm_hv_msr_partition_wide(msr)) {
1558                         int r;
1559                         mutex_lock(&vcpu->kvm->lock);
1560                         r = get_msr_hyperv_pw(vcpu, msr, pdata);
1561                         mutex_unlock(&vcpu->kvm->lock);
1562                         return r;
1563                 } else
1564                         return get_msr_hyperv(vcpu, msr, pdata);
1565                 break;
1566         default:
1567                 if (!ignore_msrs) {
1568                         pr_unimpl(vcpu, "unhandled rdmsr: 0x%x\n", msr);
1569                         return 1;
1570                 } else {
1571                         pr_unimpl(vcpu, "ignored rdmsr: 0x%x\n", msr);
1572                         data = 0;
1573                 }
1574                 break;
1575         }
1576         *pdata = data;
1577         return 0;
1578 }
1579 EXPORT_SYMBOL_GPL(kvm_get_msr_common);
1580
1581 /*
1582  * Read or write a bunch of msrs. All parameters are kernel addresses.
1583  *
1584  * @return number of msrs set successfully.
1585  */
1586 static int __msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs *msrs,
1587                     struct kvm_msr_entry *entries,
1588                     int (*do_msr)(struct kvm_vcpu *vcpu,
1589                                   unsigned index, u64 *data))
1590 {
1591         int i, idx;
1592
1593         idx = srcu_read_lock(&vcpu->kvm->srcu);
1594         for (i = 0; i < msrs->nmsrs; ++i)
1595                 if (do_msr(vcpu, entries[i].index, &entries[i].data))
1596                         break;
1597         srcu_read_unlock(&vcpu->kvm->srcu, idx);
1598
1599         return i;
1600 }
1601
1602 /*
1603  * Read or write a bunch of msrs. Parameters are user addresses.
1604  *
1605  * @return number of msrs set successfully.
1606  */
1607 static int msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs __user *user_msrs,
1608                   int (*do_msr)(struct kvm_vcpu *vcpu,
1609                                 unsigned index, u64 *data),
1610                   int writeback)
1611 {
1612         struct kvm_msrs msrs;
1613         struct kvm_msr_entry *entries;
1614         int r, n;
1615         unsigned size;
1616
1617         r = -EFAULT;
1618         if (copy_from_user(&msrs, user_msrs, sizeof msrs))
1619                 goto out;
1620
1621         r = -E2BIG;
1622         if (msrs.nmsrs >= MAX_IO_MSRS)
1623                 goto out;
1624
1625         r = -ENOMEM;
1626         size = sizeof(struct kvm_msr_entry) * msrs.nmsrs;
1627         entries = kmalloc(size, GFP_KERNEL);
1628         if (!entries)
1629                 goto out;
1630
1631         r = -EFAULT;
1632         if (copy_from_user(entries, user_msrs->entries, size))
1633                 goto out_free;
1634
1635         r = n = __msr_io(vcpu, &msrs, entries, do_msr);
1636         if (r < 0)
1637                 goto out_free;
1638
1639         r = -EFAULT;
1640         if (writeback && copy_to_user(user_msrs->entries, entries, size))
1641                 goto out_free;
1642
1643         r = n;
1644
1645 out_free:
1646         kfree(entries);
1647 out:
1648         return r;
1649 }
1650
1651 int kvm_dev_ioctl_check_extension(long ext)
1652 {
1653         int r;
1654
1655         switch (ext) {
1656         case KVM_CAP_IRQCHIP:
1657         case KVM_CAP_HLT:
1658         case KVM_CAP_MMU_SHADOW_CACHE_CONTROL:
1659         case KVM_CAP_SET_TSS_ADDR:
1660         case KVM_CAP_EXT_CPUID:
1661         case KVM_CAP_CLOCKSOURCE:
1662         case KVM_CAP_PIT:
1663         case KVM_CAP_NOP_IO_DELAY:
1664         case KVM_CAP_MP_STATE:
1665         case KVM_CAP_SYNC_MMU:
1666         case KVM_CAP_REINJECT_CONTROL:
1667         case KVM_CAP_IRQ_INJECT_STATUS:
1668         case KVM_CAP_ASSIGN_DEV_IRQ:
1669         case KVM_CAP_IRQFD:
1670         case KVM_CAP_IOEVENTFD:
1671         case KVM_CAP_PIT2:
1672         case KVM_CAP_PIT_STATE2:
1673         case KVM_CAP_SET_IDENTITY_MAP_ADDR:
1674         case KVM_CAP_XEN_HVM:
1675         case KVM_CAP_ADJUST_CLOCK:
1676         case KVM_CAP_VCPU_EVENTS:
1677         case KVM_CAP_HYPERV:
1678         case KVM_CAP_HYPERV_VAPIC:
1679         case KVM_CAP_HYPERV_SPIN:
1680         case KVM_CAP_PCI_SEGMENT:
1681         case KVM_CAP_DEBUGREGS:
1682         case KVM_CAP_X86_ROBUST_SINGLESTEP:
1683         case KVM_CAP_XSAVE:
1684                 r = 1;
1685                 break;
1686         case KVM_CAP_COALESCED_MMIO:
1687                 r = KVM_COALESCED_MMIO_PAGE_OFFSET;
1688                 break;
1689         case KVM_CAP_VAPIC:
1690                 r = !kvm_x86_ops->cpu_has_accelerated_tpr();
1691                 break;
1692         case KVM_CAP_NR_VCPUS:
1693                 r = KVM_MAX_VCPUS;
1694                 break;
1695         case KVM_CAP_NR_MEMSLOTS:
1696                 r = KVM_MEMORY_SLOTS;
1697                 break;
1698         case KVM_CAP_PV_MMU:    /* obsolete */
1699                 r = 0;
1700                 break;
1701         case KVM_CAP_IOMMU:
1702                 r = iommu_found();
1703                 break;
1704         case KVM_CAP_MCE:
1705                 r = KVM_MAX_MCE_BANKS;
1706                 break;
1707         case KVM_CAP_XCRS:
1708                 r = cpu_has_xsave;
1709                 break;
1710         default:
1711                 r = 0;
1712                 break;
1713         }
1714         return r;
1715
1716 }
1717
1718 long kvm_arch_dev_ioctl(struct file *filp,
1719                         unsigned int ioctl, unsigned long arg)
1720 {
1721         void __user *argp = (void __user *)arg;
1722         long r;
1723
1724         switch (ioctl) {
1725         case KVM_GET_MSR_INDEX_LIST: {
1726                 struct kvm_msr_list __user *user_msr_list = argp;
1727                 struct kvm_msr_list msr_list;
1728                 unsigned n;
1729
1730                 r = -EFAULT;
1731                 if (copy_from_user(&msr_list, user_msr_list, sizeof msr_list))
1732                         goto out;
1733                 n = msr_list.nmsrs;
1734                 msr_list.nmsrs = num_msrs_to_save + ARRAY_SIZE(emulated_msrs);
1735                 if (copy_to_user(user_msr_list, &msr_list, sizeof msr_list))
1736                         goto out;
1737                 r = -E2BIG;
1738                 if (n < msr_list.nmsrs)
1739                         goto out;
1740                 r = -EFAULT;
1741                 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
1742                                  num_msrs_to_save * sizeof(u32)))
1743                         goto out;
1744                 if (copy_to_user(user_msr_list->indices + num_msrs_to_save,
1745                                  &emulated_msrs,
1746                                  ARRAY_SIZE(emulated_msrs) * sizeof(u32)))
1747                         goto out;
1748                 r = 0;
1749                 break;
1750         }
1751         case KVM_GET_SUPPORTED_CPUID: {
1752                 struct kvm_cpuid2 __user *cpuid_arg = argp;
1753                 struct kvm_cpuid2 cpuid;
1754
1755                 r = -EFAULT;
1756                 if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
1757                         goto out;
1758                 r = kvm_dev_ioctl_get_supported_cpuid(&cpuid,
1759                                                       cpuid_arg->entries);
1760                 if (r)
1761                         goto out;
1762
1763                 r = -EFAULT;
1764                 if (copy_to_user(cpuid_arg, &cpuid, sizeof cpuid))
1765                         goto out;
1766                 r = 0;
1767                 break;
1768         }
1769         case KVM_X86_GET_MCE_CAP_SUPPORTED: {
1770                 u64 mce_cap;
1771
1772                 mce_cap = KVM_MCE_CAP_SUPPORTED;
1773                 r = -EFAULT;
1774                 if (copy_to_user(argp, &mce_cap, sizeof mce_cap))
1775                         goto out;
1776                 r = 0;
1777                 break;
1778         }
1779         default:
1780                 r = -EINVAL;
1781         }
1782 out:
1783         return r;
1784 }
1785
1786 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1787 {
1788         kvm_x86_ops->vcpu_load(vcpu, cpu);
1789         if (unlikely(per_cpu(cpu_tsc_khz, cpu) == 0)) {
1790                 unsigned long khz = cpufreq_quick_get(cpu);
1791                 if (!khz)
1792                         khz = tsc_khz;
1793                 per_cpu(cpu_tsc_khz, cpu) = khz;
1794         }
1795         kvm_request_guest_time_update(vcpu);
1796 }
1797
1798 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
1799 {
1800         kvm_x86_ops->vcpu_put(vcpu);
1801         kvm_put_guest_fpu(vcpu);
1802 }
1803
1804 static int is_efer_nx(void)
1805 {
1806         unsigned long long efer = 0;
1807
1808         rdmsrl_safe(MSR_EFER, &efer);
1809         return efer & EFER_NX;
1810 }
1811
1812 static void cpuid_fix_nx_cap(struct kvm_vcpu *vcpu)
1813 {
1814         int i;
1815         struct kvm_cpuid_entry2 *e, *entry;
1816
1817         entry = NULL;
1818         for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
1819                 e = &vcpu->arch.cpuid_entries[i];
1820                 if (e->function == 0x80000001) {
1821                         entry = e;
1822                         break;
1823                 }
1824         }
1825         if (entry && (entry->edx & (1 << 20)) && !is_efer_nx()) {
1826                 entry->edx &= ~(1 << 20);
1827                 printk(KERN_INFO "kvm: guest NX capability removed\n");
1828         }
1829 }
1830
1831 /* when an old userspace process fills a new kernel module */
1832 static int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
1833                                     struct kvm_cpuid *cpuid,
1834                                     struct kvm_cpuid_entry __user *entries)
1835 {
1836         int r, i;
1837         struct kvm_cpuid_entry *cpuid_entries;
1838
1839         r = -E2BIG;
1840         if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
1841                 goto out;
1842         r = -ENOMEM;
1843         cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry) * cpuid->nent);
1844         if (!cpuid_entries)
1845                 goto out;
1846         r = -EFAULT;
1847         if (copy_from_user(cpuid_entries, entries,
1848                            cpuid->nent * sizeof(struct kvm_cpuid_entry)))
1849                 goto out_free;
1850         for (i = 0; i < cpuid->nent; i++) {
1851                 vcpu->arch.cpuid_entries[i].function = cpuid_entries[i].function;
1852                 vcpu->arch.cpuid_entries[i].eax = cpuid_entries[i].eax;
1853                 vcpu->arch.cpuid_entries[i].ebx = cpuid_entries[i].ebx;
1854                 vcpu->arch.cpuid_entries[i].ecx = cpuid_entries[i].ecx;
1855                 vcpu->arch.cpuid_entries[i].edx = cpuid_entries[i].edx;
1856                 vcpu->arch.cpuid_entries[i].index = 0;
1857                 vcpu->arch.cpuid_entries[i].flags = 0;
1858                 vcpu->arch.cpuid_entries[i].padding[0] = 0;
1859                 vcpu->arch.cpuid_entries[i].padding[1] = 0;
1860                 vcpu->arch.cpuid_entries[i].padding[2] = 0;
1861         }
1862         vcpu->arch.cpuid_nent = cpuid->nent;
1863         cpuid_fix_nx_cap(vcpu);
1864         r = 0;
1865         kvm_apic_set_version(vcpu);
1866         kvm_x86_ops->cpuid_update(vcpu);
1867         update_cpuid(vcpu);
1868
1869 out_free:
1870         vfree(cpuid_entries);
1871 out:
1872         return r;
1873 }
1874
1875 static int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
1876                                      struct kvm_cpuid2 *cpuid,
1877                                      struct kvm_cpuid_entry2 __user *entries)
1878 {
1879         int r;
1880
1881         r = -E2BIG;
1882         if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
1883                 goto out;
1884         r = -EFAULT;
1885         if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
1886                            cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
1887                 goto out;
1888         vcpu->arch.cpuid_nent = cpuid->nent;
1889         kvm_apic_set_version(vcpu);
1890         kvm_x86_ops->cpuid_update(vcpu);
1891         update_cpuid(vcpu);
1892         return 0;
1893
1894 out:
1895         return r;
1896 }
1897
1898 static int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
1899                                      struct kvm_cpuid2 *cpuid,
1900                                      struct kvm_cpuid_entry2 __user *entries)
1901 {
1902         int r;
1903
1904         r = -E2BIG;
1905         if (cpuid->nent < vcpu->arch.cpuid_nent)
1906                 goto out;
1907         r = -EFAULT;
1908         if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
1909                          vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
1910                 goto out;
1911         return 0;
1912
1913 out:
1914         cpuid->nent = vcpu->arch.cpuid_nent;
1915         return r;
1916 }
1917
1918 static void do_cpuid_1_ent(struct kvm_cpuid_entry2 *entry, u32 function,
1919                            u32 index)
1920 {
1921         entry->function = function;
1922         entry->index = index;
1923         cpuid_count(entry->function, entry->index,
1924                     &entry->eax, &entry->ebx, &entry->ecx, &entry->edx);
1925         entry->flags = 0;
1926 }
1927
1928 #define F(x) bit(X86_FEATURE_##x)
1929
1930 static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
1931                          u32 index, int *nent, int maxnent)
1932 {
1933         unsigned f_nx = is_efer_nx() ? F(NX) : 0;
1934 #ifdef CONFIG_X86_64
1935         unsigned f_gbpages = (kvm_x86_ops->get_lpage_level() == PT_PDPE_LEVEL)
1936                                 ? F(GBPAGES) : 0;
1937         unsigned f_lm = F(LM);
1938 #else
1939         unsigned f_gbpages = 0;
1940         unsigned f_lm = 0;
1941 #endif
1942         unsigned f_rdtscp = kvm_x86_ops->rdtscp_supported() ? F(RDTSCP) : 0;
1943
1944         /* cpuid 1.edx */
1945         const u32 kvm_supported_word0_x86_features =
1946                 F(FPU) | F(VME) | F(DE) | F(PSE) |
1947                 F(TSC) | F(MSR) | F(PAE) | F(MCE) |
1948                 F(CX8) | F(APIC) | 0 /* Reserved */ | F(SEP) |
1949                 F(MTRR) | F(PGE) | F(MCA) | F(CMOV) |
1950                 F(PAT) | F(PSE36) | 0 /* PSN */ | F(CLFLSH) |
1951                 0 /* Reserved, DS, ACPI */ | F(MMX) |
1952                 F(FXSR) | F(XMM) | F(XMM2) | F(SELFSNOOP) |
1953                 0 /* HTT, TM, Reserved, PBE */;
1954         /* cpuid 0x80000001.edx */
1955         const u32 kvm_supported_word1_x86_features =
1956                 F(FPU) | F(VME) | F(DE) | F(PSE) |
1957                 F(TSC) | F(MSR) | F(PAE) | F(MCE) |
1958                 F(CX8) | F(APIC) | 0 /* Reserved */ | F(SYSCALL) |
1959                 F(MTRR) | F(PGE) | F(MCA) | F(CMOV) |
1960                 F(PAT) | F(PSE36) | 0 /* Reserved */ |
1961                 f_nx | 0 /* Reserved */ | F(MMXEXT) | F(MMX) |
1962                 F(FXSR) | F(FXSR_OPT) | f_gbpages | f_rdtscp |
1963                 0 /* Reserved */ | f_lm | F(3DNOWEXT) | F(3DNOW);
1964         /* cpuid 1.ecx */
1965         const u32 kvm_supported_word4_x86_features =
1966                 F(XMM3) | 0 /* Reserved, DTES64, MONITOR */ |
1967                 0 /* DS-CPL, VMX, SMX, EST */ |
1968                 0 /* TM2 */ | F(SSSE3) | 0 /* CNXT-ID */ | 0 /* Reserved */ |
1969                 0 /* Reserved */ | F(CX16) | 0 /* xTPR Update, PDCM */ |
1970                 0 /* Reserved, DCA */ | F(XMM4_1) |
1971                 F(XMM4_2) | F(X2APIC) | F(MOVBE) | F(POPCNT) |
1972                 0 /* Reserved, AES */ | F(XSAVE) | 0 /* OSXSAVE */;
1973         /* cpuid 0x80000001.ecx */
1974         const u32 kvm_supported_word6_x86_features =
1975                 F(LAHF_LM) | F(CMP_LEGACY) | F(SVM) | 0 /* ExtApicSpace */ |
1976                 F(CR8_LEGACY) | F(ABM) | F(SSE4A) | F(MISALIGNSSE) |
1977                 F(3DNOWPREFETCH) | 0 /* OSVW */ | 0 /* IBS */ | F(SSE5) |
1978                 0 /* SKINIT */ | 0 /* WDT */;
1979
1980         /* all calls to cpuid_count() should be made on the same cpu */
1981         get_cpu();
1982         do_cpuid_1_ent(entry, function, index);
1983         ++*nent;
1984
1985         switch (function) {
1986         case 0:
1987                 entry->eax = min(entry->eax, (u32)0xd);
1988                 break;
1989         case 1:
1990                 entry->edx &= kvm_supported_word0_x86_features;
1991                 entry->ecx &= kvm_supported_word4_x86_features;
1992                 /* we support x2apic emulation even if host does not support
1993                  * it since we emulate x2apic in software */
1994                 entry->ecx |= F(X2APIC);
1995                 break;
1996         /* function 2 entries are STATEFUL. That is, repeated cpuid commands
1997          * may return different values. This forces us to get_cpu() before
1998          * issuing the first command, and also to emulate this annoying behavior
1999          * in kvm_emulate_cpuid() using KVM_CPUID_FLAG_STATE_READ_NEXT */
2000         case 2: {
2001                 int t, times = entry->eax & 0xff;
2002
2003                 entry->flags |= KVM_CPUID_FLAG_STATEFUL_FUNC;
2004                 entry->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT;
2005                 for (t = 1; t < times && *nent < maxnent; ++t) {
2006                         do_cpuid_1_ent(&entry[t], function, 0);
2007                         entry[t].flags |= KVM_CPUID_FLAG_STATEFUL_FUNC;
2008                         ++*nent;
2009                 }
2010                 break;
2011         }
2012         /* function 4 and 0xb have additional index. */
2013         case 4: {
2014                 int i, cache_type;
2015
2016                 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
2017                 /* read more entries until cache_type is zero */
2018                 for (i = 1; *nent < maxnent; ++i) {
2019                         cache_type = entry[i - 1].eax & 0x1f;
2020                         if (!cache_type)
2021                                 break;
2022                         do_cpuid_1_ent(&entry[i], function, i);
2023                         entry[i].flags |=
2024                                KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
2025                         ++*nent;
2026                 }
2027                 break;
2028         }
2029         case 0xb: {
2030                 int i, level_type;
2031
2032                 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
2033                 /* read more entries until level_type is zero */
2034                 for (i = 1; *nent < maxnent; ++i) {
2035                         level_type = entry[i - 1].ecx & 0xff00;
2036                         if (!level_type)
2037                                 break;
2038                         do_cpuid_1_ent(&entry[i], function, i);
2039                         entry[i].flags |=
2040                                KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
2041                         ++*nent;
2042                 }
2043                 break;
2044         }
2045         case 0xd: {
2046                 int i;
2047
2048                 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
2049                 for (i = 1; *nent < maxnent; ++i) {
2050                         if (entry[i - 1].eax == 0 && i != 2)
2051                                 break;
2052                         do_cpuid_1_ent(&entry[i], function, i);
2053                         entry[i].flags |=
2054                                KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
2055                         ++*nent;
2056                 }
2057                 break;
2058         }
2059         case KVM_CPUID_SIGNATURE: {
2060                 char signature[12] = "KVMKVMKVM\0\0";
2061                 u32 *sigptr = (u32 *)signature;
2062                 entry->eax = 0;
2063                 entry->ebx = sigptr[0];
2064                 entry->ecx = sigptr[1];
2065                 entry->edx = sigptr[2];
2066                 break;
2067         }
2068         case KVM_CPUID_FEATURES:
2069                 entry->eax = (1 << KVM_FEATURE_CLOCKSOURCE) |
2070                              (1 << KVM_FEATURE_NOP_IO_DELAY) |
2071                              (1 << KVM_FEATURE_CLOCKSOURCE2) |
2072                              (1 << KVM_FEATURE_CLOCKSOURCE_STABLE_BIT);
2073                 entry->ebx = 0;
2074                 entry->ecx = 0;
2075                 entry->edx = 0;
2076                 break;
2077         case 0x80000000:
2078                 entry->eax = min(entry->eax, 0x8000001a);
2079                 break;
2080         case 0x80000001:
2081                 entry->edx &= kvm_supported_word1_x86_features;
2082                 entry->ecx &= kvm_supported_word6_x86_features;
2083                 break;
2084         }
2085
2086         kvm_x86_ops->set_supported_cpuid(function, entry);
2087
2088         put_cpu();
2089 }
2090
2091 #undef F
2092
2093 static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
2094                                      struct kvm_cpuid_entry2 __user *entries)
2095 {
2096         struct kvm_cpuid_entry2 *cpuid_entries;
2097         int limit, nent = 0, r = -E2BIG;
2098         u32 func;
2099
2100         if (cpuid->nent < 1)
2101                 goto out;
2102         if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
2103                 cpuid->nent = KVM_MAX_CPUID_ENTRIES;
2104         r = -ENOMEM;
2105         cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry2) * cpuid->nent);
2106         if (!cpuid_entries)
2107                 goto out;
2108
2109         do_cpuid_ent(&cpuid_entries[0], 0, 0, &nent, cpuid->nent);
2110         limit = cpuid_entries[0].eax;
2111         for (func = 1; func <= limit && nent < cpuid->nent; ++func)
2112                 do_cpuid_ent(&cpuid_entries[nent], func, 0,
2113                              &nent, cpuid->nent);
2114         r = -E2BIG;
2115         if (nent >= cpuid->nent)
2116                 goto out_free;
2117
2118         do_cpuid_ent(&cpuid_entries[nent], 0x80000000, 0, &nent, cpuid->nent);
2119         limit = cpuid_entries[nent - 1].eax;
2120         for (func = 0x80000001; func <= limit && nent < cpuid->nent; ++func)
2121                 do_cpuid_ent(&cpuid_entries[nent], func, 0,
2122                              &nent, cpuid->nent);
2123
2124
2125
2126         r = -E2BIG;
2127         if (nent >= cpuid->nent)
2128                 goto out_free;
2129
2130         do_cpuid_ent(&cpuid_entries[nent], KVM_CPUID_SIGNATURE, 0, &nent,
2131                      cpuid->nent);
2132
2133         r = -E2BIG;
2134         if (nent >= cpuid->nent)
2135                 goto out_free;
2136
2137         do_cpuid_ent(&cpuid_entries[nent], KVM_CPUID_FEATURES, 0, &nent,
2138                      cpuid->nent);
2139
2140         r = -E2BIG;
2141         if (nent >= cpuid->nent)
2142                 goto out_free;
2143
2144         r = -EFAULT;
2145         if (copy_to_user(entries, cpuid_entries,
2146                          nent * sizeof(struct kvm_cpuid_entry2)))
2147                 goto out_free;
2148         cpuid->nent = nent;
2149         r = 0;
2150
2151 out_free:
2152         vfree(cpuid_entries);
2153 out:
2154         return r;
2155 }
2156
2157 static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu,
2158                                     struct kvm_lapic_state *s)
2159 {
2160         memcpy(s->regs, vcpu->arch.apic->regs, sizeof *s);
2161
2162         return 0;
2163 }
2164
2165 static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
2166                                     struct kvm_lapic_state *s)
2167 {
2168         memcpy(vcpu->arch.apic->regs, s->regs, sizeof *s);
2169         kvm_apic_post_state_restore(vcpu);
2170         update_cr8_intercept(vcpu);
2171
2172         return 0;
2173 }
2174
2175 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
2176                                     struct kvm_interrupt *irq)
2177 {
2178         if (irq->irq < 0 || irq->irq >= 256)
2179                 return -EINVAL;
2180         if (irqchip_in_kernel(vcpu->kvm))
2181                 return -ENXIO;
2182
2183         kvm_queue_interrupt(vcpu, irq->irq, false);
2184
2185         return 0;
2186 }
2187
2188 static int kvm_vcpu_ioctl_nmi(struct kvm_vcpu *vcpu)
2189 {
2190         kvm_inject_nmi(vcpu);
2191
2192         return 0;
2193 }
2194
2195 static int vcpu_ioctl_tpr_access_reporting(struct kvm_vcpu *vcpu,
2196                                            struct kvm_tpr_access_ctl *tac)
2197 {
2198         if (tac->flags)
2199                 return -EINVAL;
2200         vcpu->arch.tpr_access_reporting = !!tac->enabled;
2201         return 0;
2202 }
2203
2204 static int kvm_vcpu_ioctl_x86_setup_mce(struct kvm_vcpu *vcpu,
2205                                         u64 mcg_cap)
2206 {
2207         int r;
2208         unsigned bank_num = mcg_cap & 0xff, bank;
2209
2210         r = -EINVAL;
2211         if (!bank_num || bank_num >= KVM_MAX_MCE_BANKS)
2212                 goto out;
2213         if (mcg_cap & ~(KVM_MCE_CAP_SUPPORTED | 0xff | 0xff0000))
2214                 goto out;
2215         r = 0;
2216         vcpu->arch.mcg_cap = mcg_cap;
2217         /* Init IA32_MCG_CTL to all 1s */
2218         if (mcg_cap & MCG_CTL_P)
2219                 vcpu->arch.mcg_ctl = ~(u64)0;
2220         /* Init IA32_MCi_CTL to all 1s */
2221         for (bank = 0; bank < bank_num; bank++)
2222                 vcpu->arch.mce_banks[bank*4] = ~(u64)0;
2223 out:
2224         return r;
2225 }
2226
2227 static int kvm_vcpu_ioctl_x86_set_mce(struct kvm_vcpu *vcpu,
2228                                       struct kvm_x86_mce *mce)
2229 {
2230         u64 mcg_cap = vcpu->arch.mcg_cap;
2231         unsigned bank_num = mcg_cap & 0xff;
2232         u64 *banks = vcpu->arch.mce_banks;
2233
2234         if (mce->bank >= bank_num || !(mce->status & MCI_STATUS_VAL))
2235                 return -EINVAL;
2236         /*
2237          * if IA32_MCG_CTL is not all 1s, the uncorrected error
2238          * reporting is disabled
2239          */
2240         if ((mce->status & MCI_STATUS_UC) && (mcg_cap & MCG_CTL_P) &&
2241             vcpu->arch.mcg_ctl != ~(u64)0)
2242                 return 0;
2243         banks += 4 * mce->bank;
2244         /*
2245          * if IA32_MCi_CTL is not all 1s, the uncorrected error
2246          * reporting is disabled for the bank
2247          */
2248         if ((mce->status & MCI_STATUS_UC) && banks[0] != ~(u64)0)
2249                 return 0;
2250         if (mce->status & MCI_STATUS_UC) {
2251                 if ((vcpu->arch.mcg_status & MCG_STATUS_MCIP) ||
2252                     !kvm_read_cr4_bits(vcpu, X86_CR4_MCE)) {
2253                         printk(KERN_DEBUG "kvm: set_mce: "
2254                                "injects mce exception while "
2255                                "previous one is in progress!\n");
2256                         set_bit(KVM_REQ_TRIPLE_FAULT, &vcpu->requests);
2257                         return 0;
2258                 }
2259                 if (banks[1] & MCI_STATUS_VAL)
2260                         mce->status |= MCI_STATUS_OVER;
2261                 banks[2] = mce->addr;
2262                 banks[3] = mce->misc;
2263                 vcpu->arch.mcg_status = mce->mcg_status;
2264                 banks[1] = mce->status;
2265                 kvm_queue_exception(vcpu, MC_VECTOR);
2266         } else if (!(banks[1] & MCI_STATUS_VAL)
2267                    || !(banks[1] & MCI_STATUS_UC)) {
2268                 if (banks[1] & MCI_STATUS_VAL)
2269                         mce->status |= MCI_STATUS_OVER;
2270                 banks[2] = mce->addr;
2271                 banks[3] = mce->misc;
2272                 banks[1] = mce->status;
2273         } else
2274                 banks[1] |= MCI_STATUS_OVER;
2275         return 0;
2276 }
2277
2278 static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
2279                                                struct kvm_vcpu_events *events)
2280 {
2281         events->exception.injected =
2282                 vcpu->arch.exception.pending &&
2283                 !kvm_exception_is_soft(vcpu->arch.exception.nr);
2284         events->exception.nr = vcpu->arch.exception.nr;
2285         events->exception.has_error_code = vcpu->arch.exception.has_error_code;
2286         events->exception.error_code = vcpu->arch.exception.error_code;
2287
2288         events->interrupt.injected =
2289                 vcpu->arch.interrupt.pending && !vcpu->arch.interrupt.soft;
2290         events->interrupt.nr = vcpu->arch.interrupt.nr;
2291         events->interrupt.soft = 0;
2292         events->interrupt.shadow =
2293                 kvm_x86_ops->get_interrupt_shadow(vcpu,
2294                         KVM_X86_SHADOW_INT_MOV_SS | KVM_X86_SHADOW_INT_STI);
2295
2296         events->nmi.injected = vcpu->arch.nmi_injected;
2297         events->nmi.pending = vcpu->arch.nmi_pending;
2298         events->nmi.masked = kvm_x86_ops->get_nmi_mask(vcpu);
2299
2300         events->sipi_vector = vcpu->arch.sipi_vector;
2301
2302         events->flags = (KVM_VCPUEVENT_VALID_NMI_PENDING
2303                          | KVM_VCPUEVENT_VALID_SIPI_VECTOR
2304                          | KVM_VCPUEVENT_VALID_SHADOW);
2305 }
2306
2307 static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
2308                                               struct kvm_vcpu_events *events)
2309 {
2310         if (events->flags & ~(KVM_VCPUEVENT_VALID_NMI_PENDING
2311                               | KVM_VCPUEVENT_VALID_SIPI_VECTOR
2312                               | KVM_VCPUEVENT_VALID_SHADOW))
2313                 return -EINVAL;
2314
2315         vcpu->arch.exception.pending = events->exception.injected;
2316         vcpu->arch.exception.nr = events->exception.nr;
2317         vcpu->arch.exception.has_error_code = events->exception.has_error_code;
2318         vcpu->arch.exception.error_code = events->exception.error_code;
2319
2320         vcpu->arch.interrupt.pending = events->interrupt.injected;
2321         vcpu->arch.interrupt.nr = events->interrupt.nr;
2322         vcpu->arch.interrupt.soft = events->interrupt.soft;
2323         if (vcpu->arch.interrupt.pending && irqchip_in_kernel(vcpu->kvm))
2324                 kvm_pic_clear_isr_ack(vcpu->kvm);
2325         if (events->flags & KVM_VCPUEVENT_VALID_SHADOW)
2326                 kvm_x86_ops->set_interrupt_shadow(vcpu,
2327                                                   events->interrupt.shadow);
2328
2329         vcpu->arch.nmi_injected = events->nmi.injected;
2330         if (events->flags & KVM_VCPUEVENT_VALID_NMI_PENDING)
2331                 vcpu->arch.nmi_pending = events->nmi.pending;
2332         kvm_x86_ops->set_nmi_mask(vcpu, events->nmi.masked);
2333
2334         if (events->flags & KVM_VCPUEVENT_VALID_SIPI_VECTOR)
2335                 vcpu->arch.sipi_vector = events->sipi_vector;
2336
2337         return 0;
2338 }
2339
2340 static void kvm_vcpu_ioctl_x86_get_debugregs(struct kvm_vcpu *vcpu,
2341                                              struct kvm_debugregs *dbgregs)
2342 {
2343         memcpy(dbgregs->db, vcpu->arch.db, sizeof(vcpu->arch.db));
2344         dbgregs->dr6 = vcpu->arch.dr6;
2345         dbgregs->dr7 = vcpu->arch.dr7;
2346         dbgregs->flags = 0;
2347 }
2348
2349 static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu,
2350                                             struct kvm_debugregs *dbgregs)
2351 {
2352         if (dbgregs->flags)
2353                 return -EINVAL;
2354
2355         memcpy(vcpu->arch.db, dbgregs->db, sizeof(vcpu->arch.db));
2356         vcpu->arch.dr6 = dbgregs->dr6;
2357         vcpu->arch.dr7 = dbgregs->dr7;
2358
2359         return 0;
2360 }
2361
2362 static void kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu *vcpu,
2363                                          struct kvm_xsave *guest_xsave)
2364 {
2365         if (cpu_has_xsave)
2366                 memcpy(guest_xsave->region,
2367                         &vcpu->arch.guest_fpu.state->xsave,
2368                         sizeof(struct xsave_struct));
2369         else {
2370                 memcpy(guest_xsave->region,
2371                         &vcpu->arch.guest_fpu.state->fxsave,
2372                         sizeof(struct i387_fxsave_struct));
2373                 *(u64 *)&guest_xsave->region[XSAVE_HDR_OFFSET / sizeof(u32)] =
2374                         XSTATE_FPSSE;
2375         }
2376 }
2377
2378 static int kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu *vcpu,
2379                                         struct kvm_xsave *guest_xsave)
2380 {
2381         u64 xstate_bv =
2382                 *(u64 *)&guest_xsave->region[XSAVE_HDR_OFFSET / sizeof(u32)];
2383
2384         if (cpu_has_xsave)
2385                 memcpy(&vcpu->arch.guest_fpu.state->xsave,
2386                         guest_xsave->region, sizeof(struct xsave_struct));
2387         else {
2388                 if (xstate_bv & ~XSTATE_FPSSE)
2389                         return -EINVAL;
2390                 memcpy(&vcpu->arch.guest_fpu.state->fxsave,
2391                         guest_xsave->region, sizeof(struct i387_fxsave_struct));
2392         }
2393         return 0;
2394 }
2395
2396 static void kvm_vcpu_ioctl_x86_get_xcrs(struct kvm_vcpu *vcpu,
2397                                         struct kvm_xcrs *guest_xcrs)
2398 {
2399         if (!cpu_has_xsave) {
2400                 guest_xcrs->nr_xcrs = 0;
2401                 return;
2402         }
2403
2404         guest_xcrs->nr_xcrs = 1;
2405         guest_xcrs->flags = 0;
2406         guest_xcrs->xcrs[0].xcr = XCR_XFEATURE_ENABLED_MASK;
2407         guest_xcrs->xcrs[0].value = vcpu->arch.xcr0;
2408 }
2409
2410 static int kvm_vcpu_ioctl_x86_set_xcrs(struct kvm_vcpu *vcpu,
2411                                        struct kvm_xcrs *guest_xcrs)
2412 {
2413         int i, r = 0;
2414
2415         if (!cpu_has_xsave)
2416                 return -EINVAL;
2417
2418         if (guest_xcrs->nr_xcrs > KVM_MAX_XCRS || guest_xcrs->flags)
2419                 return -EINVAL;
2420
2421         for (i = 0; i < guest_xcrs->nr_xcrs; i++)
2422                 /* Only support XCR0 currently */
2423                 if (guest_xcrs->xcrs[0].xcr == XCR_XFEATURE_ENABLED_MASK) {
2424                         r = __kvm_set_xcr(vcpu, XCR_XFEATURE_ENABLED_MASK,
2425                                 guest_xcrs->xcrs[0].value);
2426                         break;
2427                 }
2428         if (r)
2429                 r = -EINVAL;
2430         return r;
2431 }
2432
2433 long kvm_arch_vcpu_ioctl(struct file *filp,
2434                          unsigned int ioctl, unsigned long arg)
2435 {
2436         struct kvm_vcpu *vcpu = filp->private_data;
2437         void __user *argp = (void __user *)arg;
2438         int r;
2439         union {
2440                 struct kvm_lapic_state *lapic;
2441                 struct kvm_xsave *xsave;
2442                 struct kvm_xcrs *xcrs;
2443                 void *buffer;
2444         } u;
2445
2446         u.buffer = NULL;
2447         switch (ioctl) {
2448         case KVM_GET_LAPIC: {
2449                 r = -EINVAL;
2450                 if (!vcpu->arch.apic)
2451                         goto out;
2452                 u.lapic = kzalloc(sizeof(struct kvm_lapic_state), GFP_KERNEL);
2453
2454                 r = -ENOMEM;
2455                 if (!u.lapic)
2456                         goto out;
2457                 r = kvm_vcpu_ioctl_get_lapic(vcpu, u.lapic);
2458                 if (r)
2459                         goto out;
2460                 r = -EFAULT;
2461                 if (copy_to_user(argp, u.lapic, sizeof(struct kvm_lapic_state)))
2462                         goto out;
2463                 r = 0;
2464                 break;
2465         }
2466         case KVM_SET_LAPIC: {
2467                 r = -EINVAL;
2468                 if (!vcpu->arch.apic)
2469                         goto out;
2470                 u.lapic = kmalloc(sizeof(struct kvm_lapic_state), GFP_KERNEL);
2471                 r = -ENOMEM;
2472                 if (!u.lapic)
2473                         goto out;
2474                 r = -EFAULT;
2475                 if (copy_from_user(u.lapic, argp, sizeof(struct kvm_lapic_state)))
2476                         goto out;
2477                 r = kvm_vcpu_ioctl_set_lapic(vcpu, u.lapic);
2478                 if (r)
2479                         goto out;
2480                 r = 0;
2481                 break;
2482         }
2483         case KVM_INTERRUPT: {
2484                 struct kvm_interrupt irq;
2485
2486                 r = -EFAULT;
2487                 if (copy_from_user(&irq, argp, sizeof irq))
2488                         goto out;
2489                 r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
2490                 if (r)
2491                         goto out;
2492                 r = 0;
2493                 break;
2494         }
2495         case KVM_NMI: {
2496                 r = kvm_vcpu_ioctl_nmi(vcpu);
2497                 if (r)
2498                         goto out;
2499                 r = 0;
2500                 break;
2501         }
2502         case KVM_SET_CPUID: {
2503                 struct kvm_cpuid __user *cpuid_arg = argp;
2504                 struct kvm_cpuid cpuid;
2505
2506                 r = -EFAULT;
2507                 if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
2508                         goto out;
2509                 r = kvm_vcpu_ioctl_set_cpuid(vcpu, &cpuid, cpuid_arg->entries);
2510                 if (r)
2511                         goto out;
2512                 break;
2513         }
2514         case KVM_SET_CPUID2: {
2515                 struct kvm_cpuid2 __user *cpuid_arg = argp;
2516                 struct kvm_cpuid2 cpuid;
2517
2518                 r = -EFAULT;
2519                 if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
2520                         goto out;
2521                 r = kvm_vcpu_ioctl_set_cpuid2(vcpu, &cpuid,
2522                                               cpuid_arg->entries);
2523                 if (r)
2524                         goto out;
2525                 break;
2526         }
2527         case KVM_GET_CPUID2: {
2528                 struct kvm_cpuid2 __user *cpuid_arg = argp;
2529                 struct kvm_cpuid2 cpuid;
2530
2531                 r = -EFAULT;
2532                 if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
2533                         goto out;
2534                 r = kvm_vcpu_ioctl_get_cpuid2(vcpu, &cpuid,
2535                                               cpuid_arg->entries);
2536                 if (r)
2537                         goto out;
2538                 r = -EFAULT;
2539                 if (copy_to_user(cpuid_arg, &cpuid, sizeof cpuid))
2540                         goto out;
2541                 r = 0;
2542                 break;
2543         }
2544         case KVM_GET_MSRS:
2545                 r = msr_io(vcpu, argp, kvm_get_msr, 1);
2546                 break;
2547         case KVM_SET_MSRS:
2548                 r = msr_io(vcpu, argp, do_set_msr, 0);
2549                 break;
2550         case KVM_TPR_ACCESS_REPORTING: {
2551                 struct kvm_tpr_access_ctl tac;
2552
2553                 r = -EFAULT;
2554                 if (copy_from_user(&tac, argp, sizeof tac))
2555                         goto out;
2556                 r = vcpu_ioctl_tpr_access_reporting(vcpu, &tac);
2557                 if (r)
2558                         goto out;
2559                 r = -EFAULT;
2560                 if (copy_to_user(argp, &tac, sizeof tac))
2561                         goto out;
2562                 r = 0;
2563                 break;
2564         };
2565         case KVM_SET_VAPIC_ADDR: {
2566                 struct kvm_vapic_addr va;
2567
2568                 r = -EINVAL;
2569                 if (!irqchip_in_kernel(vcpu->kvm))
2570                         goto out;
2571                 r = -EFAULT;
2572                 if (copy_from_user(&va, argp, sizeof va))
2573                         goto out;
2574                 r = 0;
2575                 kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr);
2576                 break;
2577         }
2578         case KVM_X86_SETUP_MCE: {
2579                 u64 mcg_cap;
2580
2581                 r = -EFAULT;
2582                 if (copy_from_user(&mcg_cap, argp, sizeof mcg_cap))
2583                         goto out;
2584                 r = kvm_vcpu_ioctl_x86_setup_mce(vcpu, mcg_cap);
2585                 break;
2586         }
2587         case KVM_X86_SET_MCE: {
2588                 struct kvm_x86_mce mce;
2589
2590                 r = -EFAULT;
2591                 if (copy_from_user(&mce, argp, sizeof mce))
2592                         goto out;
2593                 r = kvm_vcpu_ioctl_x86_set_mce(vcpu, &mce);
2594                 break;
2595         }
2596         case KVM_GET_VCPU_EVENTS: {
2597                 struct kvm_vcpu_events events;
2598
2599                 kvm_vcpu_ioctl_x86_get_vcpu_events(vcpu, &events);
2600
2601                 r = -EFAULT;
2602                 if (copy_to_user(argp, &events, sizeof(struct kvm_vcpu_events)))
2603                         break;
2604                 r = 0;
2605                 break;
2606         }
2607         case KVM_SET_VCPU_EVENTS: {
2608                 struct kvm_vcpu_events events;
2609
2610                 r = -EFAULT;
2611                 if (copy_from_user(&events, argp, sizeof(struct kvm_vcpu_events)))
2612                         break;
2613
2614                 r = kvm_vcpu_ioctl_x86_set_vcpu_events(vcpu, &events);
2615                 break;
2616         }
2617         case KVM_GET_DEBUGREGS: {
2618                 struct kvm_debugregs dbgregs;
2619
2620                 kvm_vcpu_ioctl_x86_get_debugregs(vcpu, &dbgregs);
2621
2622                 r = -EFAULT;
2623                 if (copy_to_user(argp, &dbgregs,
2624                                  sizeof(struct kvm_debugregs)))
2625                         break;
2626                 r = 0;
2627                 break;
2628         }
2629         case KVM_SET_DEBUGREGS: {
2630                 struct kvm_debugregs dbgregs;
2631
2632                 r = -EFAULT;
2633                 if (copy_from_user(&dbgregs, argp,
2634                                    sizeof(struct kvm_debugregs)))
2635                         break;
2636
2637                 r = kvm_vcpu_ioctl_x86_set_debugregs(vcpu, &dbgregs);
2638                 break;
2639         }
2640         case KVM_GET_XSAVE: {
2641                 u.xsave = kzalloc(sizeof(struct kvm_xsave), GFP_KERNEL);
2642                 r = -ENOMEM;
2643                 if (!u.xsave)
2644                         break;
2645
2646                 kvm_vcpu_ioctl_x86_get_xsave(vcpu, u.xsave);
2647
2648                 r = -EFAULT;
2649                 if (copy_to_user(argp, u.xsave, sizeof(struct kvm_xsave)))
2650                         break;
2651                 r = 0;
2652                 break;
2653         }
2654         case KVM_SET_XSAVE: {
2655                 u.xsave = kzalloc(sizeof(struct kvm_xsave), GFP_KERNEL);
2656                 r = -ENOMEM;
2657                 if (!u.xsave)
2658                         break;
2659
2660                 r = -EFAULT;
2661                 if (copy_from_user(u.xsave, argp, sizeof(struct kvm_xsave)))
2662                         break;
2663
2664                 r = kvm_vcpu_ioctl_x86_set_xsave(vcpu, u.xsave);
2665                 break;
2666         }
2667         case KVM_GET_XCRS: {
2668                 u.xcrs = kzalloc(sizeof(struct kvm_xcrs), GFP_KERNEL);
2669                 r = -ENOMEM;
2670                 if (!u.xcrs)
2671                         break;
2672
2673                 kvm_vcpu_ioctl_x86_get_xcrs(vcpu, u.xcrs);
2674
2675                 r = -EFAULT;
2676                 if (copy_to_user(argp, u.xcrs,
2677                                  sizeof(struct kvm_xcrs)))
2678                         break;
2679                 r = 0;
2680                 break;
2681         }
2682         case KVM_SET_XCRS: {
2683                 u.xcrs = kzalloc(sizeof(struct kvm_xcrs), GFP_KERNEL);
2684                 r = -ENOMEM;
2685                 if (!u.xcrs)
2686                         break;
2687
2688                 r = -EFAULT;
2689                 if (copy_from_user(u.xcrs, argp,
2690                                    sizeof(struct kvm_xcrs)))
2691                         break;
2692
2693                 r = kvm_vcpu_ioctl_x86_set_xcrs(vcpu, u.xcrs);
2694                 break;
2695         }
2696         default:
2697                 r = -EINVAL;
2698         }
2699 out:
2700         kfree(u.buffer);
2701         return r;
2702 }
2703
2704 static int kvm_vm_ioctl_set_tss_addr(struct kvm *kvm, unsigned long addr)
2705 {
2706         int ret;
2707
2708         if (addr > (unsigned int)(-3 * PAGE_SIZE))
2709                 return -1;
2710         ret = kvm_x86_ops->set_tss_addr(kvm, addr);
2711         return ret;
2712 }
2713
2714 static int kvm_vm_ioctl_set_identity_map_addr(struct kvm *kvm,
2715                                               u64 ident_addr)
2716 {
2717         kvm->arch.ept_identity_map_addr = ident_addr;
2718         return 0;
2719 }
2720
2721 static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm,
2722                                           u32 kvm_nr_mmu_pages)
2723 {
2724         if (kvm_nr_mmu_pages < KVM_MIN_ALLOC_MMU_PAGES)
2725                 return -EINVAL;
2726
2727         mutex_lock(&kvm->slots_lock);
2728         spin_lock(&kvm->mmu_lock);
2729
2730         kvm_mmu_change_mmu_pages(kvm, kvm_nr_mmu_pages);
2731         kvm->arch.n_requested_mmu_pages = kvm_nr_mmu_pages;
2732
2733         spin_unlock(&kvm->mmu_lock);
2734         mutex_unlock(&kvm->slots_lock);
2735         return 0;
2736 }
2737
2738 static int kvm_vm_ioctl_get_nr_mmu_pages(struct kvm *kvm)
2739 {
2740         return kvm->arch.n_alloc_mmu_pages;
2741 }
2742
2743 gfn_t unalias_gfn_instantiation(struct kvm *kvm, gfn_t gfn)
2744 {
2745         int i;
2746         struct kvm_mem_alias *alias;
2747         struct kvm_mem_aliases *aliases;
2748
2749         aliases = kvm_aliases(kvm);
2750
2751         for (i = 0; i < aliases->naliases; ++i) {
2752                 alias = &aliases->aliases[i];
2753                 if (alias->flags & KVM_ALIAS_INVALID)
2754                         continue;
2755                 if (gfn >= alias->base_gfn
2756                     && gfn < alias->base_gfn + alias->npages)
2757                         return alias->target_gfn + gfn - alias->base_gfn;
2758         }
2759         return gfn;
2760 }
2761
2762 gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
2763 {
2764         int i;
2765         struct kvm_mem_alias *alias;
2766         struct kvm_mem_aliases *aliases;
2767
2768         aliases = kvm_aliases(kvm);
2769
2770         for (i = 0; i < aliases->naliases; ++i) {
2771                 alias = &aliases->aliases[i];
2772                 if (gfn >= alias->base_gfn
2773                     && gfn < alias->base_gfn + alias->npages)
2774                         return alias->target_gfn + gfn - alias->base_gfn;
2775         }
2776         return gfn;
2777 }
2778
2779 /*
2780  * Set a new alias region.  Aliases map a portion of physical memory into
2781  * another portion.  This is useful for memory windows, for example the PC
2782  * VGA region.
2783  */
2784 static int kvm_vm_ioctl_set_memory_alias(struct kvm *kvm,
2785                                          struct kvm_memory_alias *alias)
2786 {
2787         int r, n;
2788         struct kvm_mem_alias *p;
2789         struct kvm_mem_aliases *aliases, *old_aliases;
2790
2791         r = -EINVAL;
2792         /* General sanity checks */
2793         if (alias->memory_size & (PAGE_SIZE - 1))
2794                 goto out;
2795         if (alias->guest_phys_addr & (PAGE_SIZE - 1))
2796                 goto out;
2797         if (alias->slot >= KVM_ALIAS_SLOTS)
2798                 goto out;
2799         if (alias->guest_phys_addr + alias->memory_size
2800             < alias->guest_phys_addr)
2801                 goto out;
2802         if (alias->target_phys_addr + alias->memory_size
2803             < alias->target_phys_addr)
2804                 goto out;
2805
2806         r = -ENOMEM;
2807         aliases = kzalloc(sizeof(struct kvm_mem_aliases), GFP_KERNEL);
2808         if (!aliases)
2809                 goto out;
2810
2811         mutex_lock(&kvm->slots_lock);
2812
2813         /* invalidate any gfn reference in case of deletion/shrinking */
2814         memcpy(aliases, kvm->arch.aliases, sizeof(struct kvm_mem_aliases));
2815         aliases->aliases[alias->slot].flags |= KVM_ALIAS_INVALID;
2816         old_aliases = kvm->arch.aliases;
2817         rcu_assign_pointer(kvm->arch.aliases, aliases);
2818         synchronize_srcu_expedited(&kvm->srcu);
2819         kvm_mmu_zap_all(kvm);
2820         kfree(old_aliases);
2821
2822         r = -ENOMEM;
2823         aliases = kzalloc(sizeof(struct kvm_mem_aliases), GFP_KERNEL);
2824         if (!aliases)
2825                 goto out_unlock;
2826
2827         memcpy(aliases, kvm->arch.aliases, sizeof(struct kvm_mem_aliases));
2828
2829         p = &aliases->aliases[alias->slot];
2830         p->base_gfn = alias->guest_phys_addr >> PAGE_SHIFT;
2831         p->npages = alias->memory_size >> PAGE_SHIFT;
2832         p->target_gfn = alias->target_phys_addr >> PAGE_SHIFT;
2833         p->flags &= ~(KVM_ALIAS_INVALID);
2834
2835         for (n = KVM_ALIAS_SLOTS; n > 0; --n)
2836                 if (aliases->aliases[n - 1].npages)
2837                         break;
2838         aliases->naliases = n;
2839
2840         old_aliases = kvm->arch.aliases;
2841         rcu_assign_pointer(kvm->arch.aliases, aliases);
2842         synchronize_srcu_expedited(&kvm->srcu);
2843         kfree(old_aliases);
2844         r = 0;
2845
2846 out_unlock:
2847         mutex_unlock(&kvm->slots_lock);
2848 out:
2849         return r;
2850 }
2851
2852 static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
2853 {
2854         int r;
2855
2856         r = 0;
2857         switch (chip->chip_id) {
2858         case KVM_IRQCHIP_PIC_MASTER:
2859                 memcpy(&chip->chip.pic,
2860                         &pic_irqchip(kvm)->pics[0],
2861                         sizeof(struct kvm_pic_state));
2862                 break;
2863         case KVM_IRQCHIP_PIC_SLAVE:
2864                 memcpy(&chip->chip.pic,
2865                         &pic_irqchip(kvm)->pics[1],
2866                         sizeof(struct kvm_pic_state));
2867                 break;
2868         case KVM_IRQCHIP_IOAPIC:
2869                 r = kvm_get_ioapic(kvm, &chip->chip.ioapic);
2870                 break;
2871         default:
2872                 r = -EINVAL;
2873                 break;
2874         }
2875         return r;
2876 }
2877
2878 static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
2879 {
2880         int r;
2881
2882         r = 0;
2883         switch (chip->chip_id) {
2884         case KVM_IRQCHIP_PIC_MASTER:
2885                 raw_spin_lock(&pic_irqchip(kvm)->lock);
2886                 memcpy(&pic_irqchip(kvm)->pics[0],
2887                         &chip->chip.pic,
2888                         sizeof(struct kvm_pic_state));
2889                 raw_spin_unlock(&pic_irqchip(kvm)->lock);
2890                 break;
2891         case KVM_IRQCHIP_PIC_SLAVE:
2892                 raw_spin_lock(&pic_irqchip(kvm)->lock);
2893                 memcpy(&pic_irqchip(kvm)->pics[1],
2894                         &chip->chip.pic,
2895                         sizeof(struct kvm_pic_state));
2896                 raw_spin_unlock(&pic_irqchip(kvm)->lock);
2897                 break;
2898         case KVM_IRQCHIP_IOAPIC:
2899                 r = kvm_set_ioapic(kvm, &chip->chip.ioapic);
2900                 break;
2901         default:
2902                 r = -EINVAL;
2903                 break;
2904         }
2905         kvm_pic_update_irq(pic_irqchip(kvm));
2906         return r;
2907 }
2908
2909 static int kvm_vm_ioctl_get_pit(struct kvm *kvm, struct kvm_pit_state *ps)
2910 {
2911         int r = 0;
2912
2913         mutex_lock(&kvm->arch.vpit->pit_state.lock);
2914         memcpy(ps, &kvm->arch.vpit->pit_state, sizeof(struct kvm_pit_state));
2915         mutex_unlock(&kvm->arch.vpit->pit_state.lock);
2916         return r;
2917 }
2918
2919 static int kvm_vm_ioctl_set_pit(struct kvm *kvm, struct kvm_pit_state *ps)
2920 {
2921         int r = 0;
2922
2923         mutex_lock(&kvm->arch.vpit->pit_state.lock);
2924         memcpy(&kvm->arch.vpit->pit_state, ps, sizeof(struct kvm_pit_state));
2925         kvm_pit_load_count(kvm, 0, ps->channels[0].count, 0);
2926         mutex_unlock(&kvm->arch.vpit->pit_state.lock);
2927         return r;
2928 }
2929
2930 static int kvm_vm_ioctl_get_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps)
2931 {
2932         int r = 0;
2933
2934         mutex_lock(&kvm->arch.vpit->pit_state.lock);
2935         memcpy(ps->channels, &kvm->arch.vpit->pit_state.channels,
2936                 sizeof(ps->channels));
2937         ps->flags = kvm->arch.vpit->pit_state.flags;
2938         mutex_unlock(&kvm->arch.vpit->pit_state.lock);
2939         return r;
2940 }
2941
2942 static int kvm_vm_ioctl_set_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps)
2943 {
2944         int r = 0, start = 0;
2945         u32 prev_legacy, cur_legacy;
2946         mutex_lock(&kvm->arch.vpit->pit_state.lock);
2947         prev_legacy = kvm->arch.vpit->pit_state.flags & KVM_PIT_FLAGS_HPET_LEGACY;
2948         cur_legacy = ps->flags & KVM_PIT_FLAGS_HPET_LEGACY;
2949         if (!prev_legacy && cur_legacy)
2950                 start = 1;
2951         memcpy(&kvm->arch.vpit->pit_state.channels, &ps->channels,
2952                sizeof(kvm->arch.vpit->pit_state.channels));
2953         kvm->arch.vpit->pit_state.flags = ps->flags;
2954         kvm_pit_load_count(kvm, 0, kvm->arch.vpit->pit_state.channels[0].count, start);
2955         mutex_unlock(&kvm->arch.vpit->pit_state.lock);
2956         return r;
2957 }
2958
2959 static int kvm_vm_ioctl_reinject(struct kvm *kvm,
2960                                  struct kvm_reinject_control *control)
2961 {
2962         if (!kvm->arch.vpit)
2963                 return -ENXIO;
2964         mutex_lock(&kvm->arch.vpit->pit_state.lock);
2965         kvm->arch.vpit->pit_state.pit_timer.reinject = control->pit_reinject;
2966         mutex_unlock(&kvm->arch.vpit->pit_state.lock);
2967         return 0;
2968 }
2969
2970 /*
2971  * Get (and clear) the dirty memory log for a memory slot.
2972  */
2973 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
2974                                       struct kvm_dirty_log *log)
2975 {
2976         int r, i;
2977         struct kvm_memory_slot *memslot;
2978         unsigned long n;
2979         unsigned long is_dirty = 0;
2980
2981         mutex_lock(&kvm->slots_lock);
2982
2983         r = -EINVAL;
2984         if (log->slot >= KVM_MEMORY_SLOTS)
2985                 goto out;
2986
2987         memslot = &kvm->memslots->memslots[log->slot];
2988         r = -ENOENT;
2989         if (!memslot->dirty_bitmap)
2990                 goto out;
2991
2992         n = kvm_dirty_bitmap_bytes(memslot);
2993
2994         for (i = 0; !is_dirty && i < n/sizeof(long); i++)
2995                 is_dirty = memslot->dirty_bitmap[i];
2996
2997         /* If nothing is dirty, don't bother messing with page tables. */
2998         if (is_dirty) {
2999                 struct kvm_memslots *slots, *old_slots;
3000                 unsigned long *dirty_bitmap;
3001
3002                 spin_lock(&kvm->mmu_lock);
3003                 kvm_mmu_slot_remove_write_access(kvm, log->slot);
3004                 spin_unlock(&kvm->mmu_lock);
3005
3006                 r = -ENOMEM;
3007                 dirty_bitmap = vmalloc(n);
3008                 if (!dirty_bitmap)
3009                         goto out;
3010                 memset(dirty_bitmap, 0, n);
3011
3012                 r = -ENOMEM;
3013                 slots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL);
3014                 if (!slots) {
3015                         vfree(dirty_bitmap);
3016                         goto out;
3017                 }
3018                 memcpy(slots, kvm->memslots, sizeof(struct kvm_memslots));
3019                 slots->memslots[log->slot].dirty_bitmap = dirty_bitmap;
3020
3021                 old_slots = kvm->memslots;
3022                 rcu_assign_pointer(kvm->memslots, slots);
3023                 synchronize_srcu_expedited(&kvm->srcu);
3024                 dirty_bitmap = old_slots->memslots[log->slot].dirty_bitmap;
3025                 kfree(old_slots);
3026
3027                 r = -EFAULT;
3028                 if (copy_to_user(log->dirty_bitmap, dirty_bitmap, n)) {
3029                         vfree(dirty_bitmap);
3030                         goto out;
3031                 }
3032                 vfree(dirty_bitmap);
3033         } else {
3034                 r = -EFAULT;
3035                 if (clear_user(log->dirty_bitmap, n))
3036                         goto out;
3037         }
3038
3039         r = 0;
3040 out:
3041         mutex_unlock(&kvm->slots_lock);
3042         return r;
3043 }
3044
3045 long kvm_arch_vm_ioctl(struct file *filp,
3046                        unsigned int ioctl, unsigned long arg)
3047 {
3048         struct kvm *kvm = filp->private_data;
3049         void __user *argp = (void __user *)arg;
3050         int r = -ENOTTY;
3051         /*
3052          * This union makes it completely explicit to gcc-3.x
3053          * that these two variables' stack usage should be
3054          * combined, not added together.
3055          */
3056         union {
3057                 struct kvm_pit_state ps;
3058                 struct kvm_pit_state2 ps2;
3059                 struct kvm_memory_alias alias;
3060                 struct kvm_pit_config pit_config;
3061         } u;
3062
3063         switch (ioctl) {
3064         case KVM_SET_TSS_ADDR:
3065                 r = kvm_vm_ioctl_set_tss_addr(kvm, arg);
3066                 if (r < 0)
3067                         goto out;
3068                 break;
3069         case KVM_SET_IDENTITY_MAP_ADDR: {
3070                 u64 ident_addr;
3071
3072                 r = -EFAULT;
3073                 if (copy_from_user(&ident_addr, argp, sizeof ident_addr))
3074                         goto out;
3075                 r = kvm_vm_ioctl_set_identity_map_addr(kvm, ident_addr);
3076                 if (r < 0)
3077                         goto out;
3078                 break;
3079         }
3080         case KVM_SET_MEMORY_REGION: {
3081                 struct kvm_memory_region kvm_mem;
3082                 struct kvm_userspace_memory_region kvm_userspace_mem;
3083
3084                 r = -EFAULT;
3085                 if (copy_from_user(&kvm_mem, argp, sizeof kvm_mem))
3086                         goto out;
3087                 kvm_userspace_mem.slot = kvm_mem.slot;
3088                 kvm_userspace_mem.flags = kvm_mem.flags;
3089                 kvm_userspace_mem.guest_phys_addr = kvm_mem.guest_phys_addr;
3090                 kvm_userspace_mem.memory_size = kvm_mem.memory_size;
3091                 r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem, 0);
3092                 if (r)
3093                         goto out;
3094                 break;
3095         }
3096         case KVM_SET_NR_MMU_PAGES:
3097                 r = kvm_vm_ioctl_set_nr_mmu_pages(kvm, arg);
3098                 if (r)
3099                         goto out;
3100                 break;
3101         case KVM_GET_NR_MMU_PAGES:
3102                 r = kvm_vm_ioctl_get_nr_mmu_pages(kvm);
3103                 break;
3104         case KVM_SET_MEMORY_ALIAS:
3105                 r = -EFAULT;
3106                 if (copy_from_user(&u.alias, argp, sizeof(struct kvm_memory_alias)))
3107                         goto out;
3108                 r = kvm_vm_ioctl_set_memory_alias(kvm, &u.alias);
3109                 if (r)
3110                         goto out;
3111                 break;
3112         case KVM_CREATE_IRQCHIP: {
3113                 struct kvm_pic *vpic;
3114
3115                 mutex_lock(&kvm->lock);
3116                 r = -EEXIST;
3117                 if (kvm->arch.vpic)
3118                         goto create_irqchip_unlock;
3119                 r = -ENOMEM;
3120                 vpic = kvm_create_pic(kvm);
3121                 if (vpic) {
3122                         r = kvm_ioapic_init(kvm);
3123                         if (r) {
3124                                 kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS,
3125                                                           &vpic->dev);
3126                                 kfree(vpic);
3127                                 goto create_irqchip_unlock;
3128                         }
3129                 } else
3130                         goto create_irqchip_unlock;
3131                 smp_wmb();
3132                 kvm->arch.vpic = vpic;
3133                 smp_wmb();
3134                 r = kvm_setup_default_irq_routing(kvm);
3135                 if (r) {
3136                         mutex_lock(&kvm->irq_lock);
3137                         kvm_ioapic_destroy(kvm);
3138                         kvm_destroy_pic(kvm);
3139                         mutex_unlock(&kvm->irq_lock);
3140                 }
3141         create_irqchip_unlock:
3142                 mutex_unlock(&kvm->lock);
3143                 break;
3144         }
3145         case KVM_CREATE_PIT:
3146                 u.pit_config.flags = KVM_PIT_SPEAKER_DUMMY;
3147                 goto create_pit;
3148         case KVM_CREATE_PIT2:
3149                 r = -EFAULT;
3150                 if (copy_from_user(&u.pit_config, argp,
3151                                    sizeof(struct kvm_pit_config)))
3152                         goto out;
3153         create_pit:
3154                 mutex_lock(&kvm->slots_lock);
3155                 r = -EEXIST;
3156                 if (kvm->arch.vpit)
3157                         goto create_pit_unlock;
3158                 r = -ENOMEM;
3159                 kvm->arch.vpit = kvm_create_pit(kvm, u.pit_config.flags);
3160                 if (kvm->arch.vpit)
3161                         r = 0;
3162         create_pit_unlock:
3163                 mutex_unlock(&kvm->slots_lock);
3164                 break;
3165         case KVM_IRQ_LINE_STATUS:
3166         case KVM_IRQ_LINE: {
3167                 struct kvm_irq_level irq_event;
3168
3169                 r = -EFAULT;
3170                 if (copy_from_user(&irq_event, argp, sizeof irq_event))
3171                         goto out;
3172                 r = -ENXIO;
3173                 if (irqchip_in_kernel(kvm)) {
3174                         __s32 status;
3175                         status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID,
3176                                         irq_event.irq, irq_event.level);
3177                         if (ioctl == KVM_IRQ_LINE_STATUS) {
3178                                 r = -EFAULT;
3179                                 irq_event.status = status;
3180                                 if (copy_to_user(argp, &irq_event,
3181                                                         sizeof irq_event))
3182                                         goto out;
3183                         }
3184                         r = 0;
3185                 }
3186                 break;
3187         }
3188         case KVM_GET_IRQCHIP: {
3189                 /* 0: PIC master, 1: PIC slave, 2: IOAPIC */
3190                 struct kvm_irqchip *chip = kmalloc(sizeof(*chip), GFP_KERNEL);
3191
3192                 r = -ENOMEM;
3193                 if (!chip)
3194                         goto out;
3195                 r = -EFAULT;
3196                 if (copy_from_user(chip, argp, sizeof *chip))
3197                         goto get_irqchip_out;
3198                 r = -ENXIO;
3199                 if (!irqchip_in_kernel(kvm))
3200                         goto get_irqchip_out;
3201                 r = kvm_vm_ioctl_get_irqchip(kvm, chip);
3202                 if (r)
3203                         goto get_irqchip_out;
3204                 r = -EFAULT;
3205                 if (copy_to_user(argp, chip, sizeof *chip))
3206                         goto get_irqchip_out;
3207                 r = 0;
3208         get_irqchip_out:
3209                 kfree(chip);
3210                 if (r)
3211                         goto out;
3212                 break;
3213         }
3214         case KVM_SET_IRQCHIP: {
3215                 /* 0: PIC master, 1: PIC slave, 2: IOAPIC */
3216                 struct kvm_irqchip *chip = kmalloc(sizeof(*chip), GFP_KERNEL);
3217
3218                 r = -ENOMEM;
3219                 if (!chip)
3220                         goto out;
3221                 r = -EFAULT;
3222                 if (copy_from_user(chip, argp, sizeof *chip))
3223                         goto set_irqchip_out;
3224                 r = -ENXIO;
3225                 if (!irqchip_in_kernel(kvm))
3226                         goto set_irqchip_out;
3227                 r = kvm_vm_ioctl_set_irqchip(kvm, chip);
3228                 if (r)
3229                         goto set_irqchip_out;
3230                 r = 0;
3231         set_irqchip_out:
3232                 kfree(chip);
3233                 if (r)
3234                         goto out;
3235                 break;
3236         }
3237         case KVM_GET_PIT: {
3238                 r = -EFAULT;
3239                 if (copy_from_user(&u.ps, argp, sizeof(struct kvm_pit_state)))
3240                         goto out;
3241                 r = -ENXIO;
3242                 if (!kvm->arch.vpit)
3243                         goto out;
3244                 r = kvm_vm_ioctl_get_pit(kvm, &u.ps);
3245                 if (r)
3246                         goto out;
3247                 r = -EFAULT;
3248                 if (copy_to_user(argp, &u.ps, sizeof(struct kvm_pit_state)))
3249                         goto out;
3250                 r = 0;
3251                 break;
3252         }
3253         case KVM_SET_PIT: {
3254                 r = -EFAULT;
3255                 if (copy_from_user(&u.ps, argp, sizeof u.ps))
3256                         goto out;
3257                 r = -ENXIO;
3258                 if (!kvm->arch.vpit)
3259                         goto out;
3260                 r = kvm_vm_ioctl_set_pit(kvm, &u.ps);
3261                 if (r)
3262                         goto out;
3263                 r = 0;
3264                 break;
3265         }
3266         case KVM_GET_PIT2: {
3267                 r = -ENXIO;
3268                 if (!kvm->arch.vpit)
3269                         goto out;
3270                 r = kvm_vm_ioctl_get_pit2(kvm, &u.ps2);
3271                 if (r)
3272                         goto out;
3273                 r = -EFAULT;
3274                 if (copy_to_user(argp, &u.ps2, sizeof(u.ps2)))
3275                         goto out;
3276                 r = 0;
3277                 break;
3278         }
3279         case KVM_SET_PIT2: {
3280                 r = -EFAULT;
3281                 if (copy_from_user(&u.ps2, argp, sizeof(u.ps2)))
3282                         goto out;
3283                 r = -ENXIO;
3284                 if (!kvm->arch.vpit)
3285                         goto out;
3286                 r = kvm_vm_ioctl_set_pit2(kvm, &u.ps2);
3287                 if (r)
3288                         goto out;
3289                 r = 0;
3290                 break;
3291         }
3292         case KVM_REINJECT_CONTROL: {
3293                 struct kvm_reinject_control control;
3294                 r =  -EFAULT;
3295                 if (copy_from_user(&control, argp, sizeof(control)))
3296                         goto out;
3297                 r = kvm_vm_ioctl_reinject(kvm, &control);
3298                 if (r)
3299                         goto out;
3300                 r = 0;
3301                 break;
3302         }
3303         case KVM_XEN_HVM_CONFIG: {
3304                 r = -EFAULT;
3305                 if (copy_from_user(&kvm->arch.xen_hvm_config, argp,
3306                                    sizeof(struct kvm_xen_hvm_config)))
3307                         goto out;
3308                 r = -EINVAL;
3309                 if (kvm->arch.xen_hvm_config.flags)
3310                         goto out;
3311                 r = 0;
3312                 break;
3313         }
3314         case KVM_SET_CLOCK: {
3315                 struct timespec now;
3316                 struct kvm_clock_data user_ns;
3317                 u64 now_ns;
3318                 s64 delta;
3319
3320                 r = -EFAULT;
3321                 if (copy_from_user(&user_ns, argp, sizeof(user_ns)))
3322                         goto out;
3323
3324                 r = -EINVAL;
3325                 if (user_ns.flags)
3326                         goto out;
3327
3328                 r = 0;
3329                 ktime_get_ts(&now);
3330                 now_ns = timespec_to_ns(&now);
3331                 delta = user_ns.clock - now_ns;
3332                 kvm->arch.kvmclock_offset = delta;
3333                 break;
3334         }
3335         case KVM_GET_CLOCK: {
3336                 struct timespec now;
3337                 struct kvm_clock_data user_ns;
3338                 u64 now_ns;
3339
3340                 ktime_get_ts(&now);
3341                 now_ns = timespec_to_ns(&now);
3342                 user_ns.clock = kvm->arch.kvmclock_offset + now_ns;
3343                 user_ns.flags = 0;
3344
3345                 r = -EFAULT;
3346                 if (copy_to_user(argp, &user_ns, sizeof(user_ns)))
3347                         goto out;
3348                 r = 0;
3349                 break;
3350         }
3351
3352         default:
3353                 ;
3354         }
3355 out:
3356         return r;
3357 }
3358
3359 static void kvm_init_msr_list(void)
3360 {
3361         u32 dummy[2];
3362         unsigned i, j;
3363
3364         /* skip the first msrs in the list. KVM-specific */
3365         for (i = j = KVM_SAVE_MSRS_BEGIN; i < ARRAY_SIZE(msrs_to_save); i++) {
3366                 if (rdmsr_safe(msrs_to_save[i], &dummy[0], &dummy[1]) < 0)
3367                         continue;
3368                 if (j < i)
3369                         msrs_to_save[j] = msrs_to_save[i];
3370                 j++;
3371         }
3372         num_msrs_to_save = j;
3373 }
3374
3375 static int vcpu_mmio_write(struct kvm_vcpu *vcpu, gpa_t addr, int len,
3376                            const void *v)
3377 {
3378         if (vcpu->arch.apic &&
3379             !kvm_iodevice_write(&vcpu->arch.apic->dev, addr, len, v))
3380                 return 0;
3381
3382         return kvm_io_bus_write(vcpu->kvm, KVM_MMIO_BUS, addr, len, v);
3383 }
3384
3385 static int vcpu_mmio_read(struct kvm_vcpu *vcpu, gpa_t addr, int len, void *v)
3386 {
3387         if (vcpu->arch.apic &&
3388             !kvm_iodevice_read(&vcpu->arch.apic->dev, addr, len, v))
3389                 return 0;
3390
3391         return kvm_io_bus_read(vcpu->kvm, KVM_MMIO_BUS, addr, len, v);
3392 }
3393
3394 static void kvm_set_segment(struct kvm_vcpu *vcpu,
3395                         struct kvm_segment *var, int seg)
3396 {
3397         kvm_x86_ops->set_segment(vcpu, var, seg);
3398 }
3399
3400 void kvm_get_segment(struct kvm_vcpu *vcpu,
3401                      struct kvm_segment *var, int seg)
3402 {
3403         kvm_x86_ops->get_segment(vcpu, var, seg);
3404 }
3405
3406 gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva, u32 *error)
3407 {
3408         u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
3409         return vcpu->arch.mmu.gva_to_gpa(vcpu, gva, access, error);
3410 }
3411
3412  gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva, u32 *error)
3413 {
3414         u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
3415         access |= PFERR_FETCH_MASK;
3416         return vcpu->arch.mmu.gva_to_gpa(vcpu, gva, access, error);
3417 }
3418
3419 gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva, u32 *error)
3420 {
3421         u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
3422         access |= PFERR_WRITE_MASK;
3423         return vcpu->arch.mmu.gva_to_gpa(vcpu, gva, access, error);
3424 }
3425
3426 /* uses this to access any guest's mapped memory without checking CPL */
3427 gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva, u32 *error)
3428 {
3429         return vcpu->arch.mmu.gva_to_gpa(vcpu, gva, 0, error);
3430 }
3431
3432 static int kvm_read_guest_virt_helper(gva_t addr, void *val, unsigned int bytes,
3433                                       struct kvm_vcpu *vcpu, u32 access,
3434                                       u32 *error)
3435 {
3436         void *data = val;
3437         int r = X86EMUL_CONTINUE;
3438
3439         while (bytes) {
3440                 gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr, access, error);
3441                 unsigned offset = addr & (PAGE_SIZE-1);
3442                 unsigned toread = min(bytes, (unsigned)PAGE_SIZE - offset);
3443                 int ret;
3444
3445                 if (gpa == UNMAPPED_GVA) {
3446                         r = X86EMUL_PROPAGATE_FAULT;
3447                         goto out;
3448                 }
3449                 ret = kvm_read_guest(vcpu->kvm, gpa, data, toread);
3450                 if (ret < 0) {
3451                         r = X86EMUL_IO_NEEDED;
3452                         goto out;
3453                 }
3454
3455                 bytes -= toread;
3456                 data += toread;
3457                 addr += toread;
3458         }
3459 out:
3460         return r;
3461 }
3462
3463 /* used for instruction fetching */
3464 static int kvm_fetch_guest_virt(gva_t addr, void *val, unsigned int bytes,
3465                                 struct kvm_vcpu *vcpu, u32 *error)
3466 {
3467         u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
3468         return kvm_read_guest_virt_helper(addr, val, bytes, vcpu,
3469                                           access | PFERR_FETCH_MASK, error);
3470 }
3471
3472 static int kvm_read_guest_virt(gva_t addr, void *val, unsigned int bytes,
3473                                struct kvm_vcpu *vcpu, u32 *error)
3474 {
3475         u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
3476         return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access,
3477                                           error);
3478 }
3479
3480 static int kvm_read_guest_virt_system(gva_t addr, void *val, unsigned int bytes,
3481                                struct kvm_vcpu *vcpu, u32 *error)
3482 {
3483         return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, 0, error);
3484 }
3485
3486 static int kvm_write_guest_virt_system(gva_t addr, void *val,
3487                                        unsigned int bytes,
3488                                        struct kvm_vcpu *vcpu,
3489                                        u32 *error)
3490 {
3491         void *data = val;
3492         int r = X86EMUL_CONTINUE;
3493
3494         while (bytes) {
3495                 gpa_t gpa =  vcpu->arch.mmu.gva_to_gpa(vcpu, addr,
3496                                                        PFERR_WRITE_MASK, error);
3497                 unsigned offset = addr & (PAGE_SIZE-1);
3498                 unsigned towrite = min(bytes, (unsigned)PAGE_SIZE - offset);
3499                 int ret;
3500
3501                 if (gpa == UNMAPPED_GVA) {
3502                         r = X86EMUL_PROPAGATE_FAULT;
3503                         goto out;
3504                 }
3505                 ret = kvm_write_guest(vcpu->kvm, gpa, data, towrite);
3506                 if (ret < 0) {
3507                         r = X86EMUL_IO_NEEDED;
3508                         goto out;
3509                 }
3510
3511                 bytes -= towrite;
3512                 data += towrite;
3513                 addr += towrite;
3514         }
3515 out:
3516         return r;
3517 }
3518
3519 static int emulator_read_emulated(unsigned long addr,
3520                                   void *val,
3521                                   unsigned int bytes,
3522                                   unsigned int *error_code,
3523                                   struct kvm_vcpu *vcpu)
3524 {
3525         gpa_t                 gpa;
3526
3527         if (vcpu->mmio_read_completed) {
3528                 memcpy(val, vcpu->mmio_data, bytes);
3529                 trace_kvm_mmio(KVM_TRACE_MMIO_READ, bytes,
3530                                vcpu->mmio_phys_addr, *(u64 *)val);
3531                 vcpu->mmio_read_completed = 0;
3532                 return X86EMUL_CONTINUE;
3533         }
3534
3535         gpa = kvm_mmu_gva_to_gpa_read(vcpu, addr, error_code);
3536
3537         if (gpa == UNMAPPED_GVA)
3538                 return X86EMUL_PROPAGATE_FAULT;
3539
3540         /* For APIC access vmexit */
3541         if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
3542                 goto mmio;
3543
3544         if (kvm_read_guest_virt(addr, val, bytes, vcpu, NULL)
3545                                 == X86EMUL_CONTINUE)
3546                 return X86EMUL_CONTINUE;
3547
3548 mmio:
3549         /*
3550          * Is this MMIO handled locally?
3551          */
3552         if (!vcpu_mmio_read(vcpu, gpa, bytes, val)) {
3553                 trace_kvm_mmio(KVM_TRACE_MMIO_READ, bytes, gpa, *(u64 *)val);
3554                 return X86EMUL_CONTINUE;
3555         }
3556
3557         trace_kvm_mmio(KVM_TRACE_MMIO_READ_UNSATISFIED, bytes, gpa, 0);
3558
3559         vcpu->mmio_needed = 1;
3560         vcpu->run->exit_reason = KVM_EXIT_MMIO;
3561         vcpu->run->mmio.phys_addr = vcpu->mmio_phys_addr = gpa;
3562         vcpu->run->mmio.len = vcpu->mmio_size = bytes;
3563         vcpu->run->mmio.is_write = vcpu->mmio_is_write = 0;
3564
3565         return X86EMUL_IO_NEEDED;
3566 }
3567
3568 int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
3569                           const void *val, int bytes)
3570 {
3571         int ret;
3572
3573         ret = kvm_write_guest(vcpu->kvm, gpa, val, bytes);
3574         if (ret < 0)
3575                 return 0;
3576         kvm_mmu_pte_write(vcpu, gpa, val, bytes, 1);
3577         return 1;
3578 }
3579
3580 static int emulator_write_emulated_onepage(unsigned long addr,
3581                                            const void *val,
3582                                            unsigned int bytes,
3583                                            unsigned int *error_code,
3584                                            struct kvm_vcpu *vcpu)
3585 {
3586         gpa_t                 gpa;
3587
3588         gpa = kvm_mmu_gva_to_gpa_write(vcpu, addr, error_code);
3589
3590         if (gpa == UNMAPPED_GVA)
3591                 return X86EMUL_PROPAGATE_FAULT;
3592
3593         /* For APIC access vmexit */
3594         if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
3595                 goto mmio;
3596
3597         if (emulator_write_phys(vcpu, gpa, val, bytes))
3598                 return X86EMUL_CONTINUE;
3599
3600 mmio:
3601         trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, bytes, gpa, *(u64 *)val);
3602         /*
3603          * Is this MMIO handled locally?
3604          */
3605         if (!vcpu_mmio_write(vcpu, gpa, bytes, val))
3606                 return X86EMUL_CONTINUE;
3607
3608         vcpu->mmio_needed = 1;
3609         vcpu->run->exit_reason = KVM_EXIT_MMIO;
3610         vcpu->run->mmio.phys_addr = vcpu->mmio_phys_addr = gpa;
3611         vcpu->run->mmio.len = vcpu->mmio_size = bytes;
3612         vcpu->run->mmio.is_write = vcpu->mmio_is_write = 1;
3613         memcpy(vcpu->run->mmio.data, val, bytes);
3614
3615         return X86EMUL_CONTINUE;
3616 }
3617
3618 int emulator_write_emulated(unsigned long addr,
3619                             const void *val,
3620                             unsigned int bytes,
3621                             unsigned int *error_code,
3622                             struct kvm_vcpu *vcpu)
3623 {
3624         /* Crossing a page boundary? */
3625         if (((addr + bytes - 1) ^ addr) & PAGE_MASK) {
3626                 int rc, now;
3627
3628                 now = -addr & ~PAGE_MASK;
3629                 rc = emulator_write_emulated_onepage(addr, val, now, error_code,
3630                                                      vcpu);
3631                 if (rc != X86EMUL_CONTINUE)
3632                         return rc;
3633                 addr += now;
3634                 val += now;
3635                 bytes -= now;
3636         }
3637         return emulator_write_emulated_onepage(addr, val, bytes, error_code,
3638                                                vcpu);
3639 }
3640
3641 #define CMPXCHG_TYPE(t, ptr, old, new) \
3642         (cmpxchg((t *)(ptr), *(t *)(old), *(t *)(new)) == *(t *)(old))
3643
3644 #ifdef CONFIG_X86_64
3645 #  define CMPXCHG64(ptr, old, new) CMPXCHG_TYPE(u64, ptr, old, new)
3646 #else
3647 #  define CMPXCHG64(ptr, old, new) \
3648         (cmpxchg64((u64 *)(ptr), *(u64 *)(old), *(u64 *)(new)) == *(u64 *)(old))
3649 #endif
3650
3651 static int emulator_cmpxchg_emulated(unsigned long addr,
3652                                      const void *old,
3653                                      const void *new,
3654                                      unsigned int bytes,
3655                                      unsigned int *error_code,
3656                                      struct kvm_vcpu *vcpu)
3657 {
3658         gpa_t gpa;
3659         struct page *page;
3660         char *kaddr;
3661         bool exchanged;
3662
3663         /* guests cmpxchg8b have to be emulated atomically */
3664         if (bytes > 8 || (bytes & (bytes - 1)))
3665                 goto emul_write;
3666
3667         gpa = kvm_mmu_gva_to_gpa_write(vcpu, addr, NULL);
3668
3669         if (gpa == UNMAPPED_GVA ||
3670             (gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
3671                 goto emul_write;
3672
3673         if (((gpa + bytes - 1) & PAGE_MASK) != (gpa & PAGE_MASK))
3674                 goto emul_write;
3675
3676         page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT);
3677
3678         kaddr = kmap_atomic(page, KM_USER0);
3679         kaddr += offset_in_page(gpa);
3680         switch (bytes) {
3681         case 1:
3682                 exchanged = CMPXCHG_TYPE(u8, kaddr, old, new);
3683                 break;
3684         case 2:
3685                 exchanged = CMPXCHG_TYPE(u16, kaddr, old, new);
3686                 break;
3687         case 4:
3688                 exchanged = CMPXCHG_TYPE(u32, kaddr, old, new);
3689                 break;
3690         case 8:
3691                 exchanged = CMPXCHG64(kaddr, old, new);
3692                 break;
3693         default:
3694                 BUG();
3695         }
3696         kunmap_atomic(kaddr, KM_USER0);
3697         kvm_release_page_dirty(page);
3698
3699         if (!exchanged)
3700                 return X86EMUL_CMPXCHG_FAILED;
3701
3702         kvm_mmu_pte_write(vcpu, gpa, new, bytes, 1);
3703
3704         return X86EMUL_CONTINUE;
3705
3706 emul_write:
3707         printk_once(KERN_WARNING "kvm: emulating exchange as write\n");
3708
3709         return emulator_write_emulated(addr, new, bytes, error_code, vcpu);
3710 }
3711
3712 static int kernel_pio(struct kvm_vcpu *vcpu, void *pd)
3713 {
3714         /* TODO: String I/O for in kernel device */
3715         int r;
3716
3717         if (vcpu->arch.pio.in)
3718                 r = kvm_io_bus_read(vcpu->kvm, KVM_PIO_BUS, vcpu->arch.pio.port,
3719                                     vcpu->arch.pio.size, pd);
3720         else
3721                 r = kvm_io_bus_write(vcpu->kvm, KVM_PIO_BUS,
3722                                      vcpu->arch.pio.port, vcpu->arch.pio.size,
3723                                      pd);
3724         return r;
3725 }
3726
3727
3728 static int emulator_pio_in_emulated(int size, unsigned short port, void *val,
3729                              unsigned int count, struct kvm_vcpu *vcpu)
3730 {
3731         if (vcpu->arch.pio.count)
3732                 goto data_avail;
3733
3734         trace_kvm_pio(1, port, size, 1);
3735
3736         vcpu->arch.pio.port = port;
3737         vcpu->arch.pio.in = 1;
3738         vcpu->arch.pio.count  = count;
3739         vcpu->arch.pio.size = size;
3740
3741         if (!kernel_pio(vcpu, vcpu->arch.pio_data)) {
3742         data_avail:
3743                 memcpy(val, vcpu->arch.pio_data, size * count);
3744                 vcpu->arch.pio.count = 0;
3745                 return 1;
3746         }
3747
3748         vcpu->run->exit_reason = KVM_EXIT_IO;
3749         vcpu->run->io.direction = KVM_EXIT_IO_IN;
3750         vcpu->run->io.size = size;
3751         vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE;
3752         vcpu->run->io.count = count;
3753         vcpu->run->io.port = port;
3754
3755         return 0;
3756 }
3757
3758 static int emulator_pio_out_emulated(int size, unsigned short port,
3759                               const void *val, unsigned int count,
3760                               struct kvm_vcpu *vcpu)
3761 {
3762         trace_kvm_pio(0, port, size, 1);
3763
3764         vcpu->arch.pio.port = port;
3765         vcpu->arch.pio.in = 0;
3766         vcpu->arch.pio.count = count;
3767         vcpu->arch.pio.size = size;
3768
3769         memcpy(vcpu->arch.pio_data, val, size * count);
3770
3771         if (!kernel_pio(vcpu, vcpu->arch.pio_data)) {
3772                 vcpu->arch.pio.count = 0;
3773                 return 1;
3774         }
3775
3776         vcpu->run->exit_reason = KVM_EXIT_IO;
3777         vcpu->run->io.direction = KVM_EXIT_IO_OUT;
3778         vcpu->run->io.size = size;
3779         vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE;
3780         vcpu->run->io.count = count;
3781         vcpu->run->io.port = port;
3782
3783         return 0;
3784 }
3785
3786 static unsigned long get_segment_base(struct kvm_vcpu *vcpu, int seg)
3787 {
3788         return kvm_x86_ops->get_segment_base(vcpu, seg);
3789 }
3790
3791 int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address)
3792 {
3793         kvm_mmu_invlpg(vcpu, address);
3794         return X86EMUL_CONTINUE;
3795 }
3796
3797 int emulate_clts(struct kvm_vcpu *vcpu)
3798 {
3799         kvm_x86_ops->set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~X86_CR0_TS));
3800         kvm_x86_ops->fpu_activate(vcpu);
3801         return X86EMUL_CONTINUE;
3802 }
3803
3804 int emulator_get_dr(int dr, unsigned long *dest, struct kvm_vcpu *vcpu)
3805 {
3806         return _kvm_get_dr(vcpu, dr, dest);
3807 }
3808
3809 int emulator_set_dr(int dr, unsigned long value, struct kvm_vcpu *vcpu)
3810 {
3811
3812         return __kvm_set_dr(vcpu, dr, value);
3813 }
3814
3815 static u64 mk_cr_64(u64 curr_cr, u32 new_val)
3816 {
3817         return (curr_cr & ~((1ULL << 32) - 1)) | new_val;
3818 }
3819
3820 static unsigned long emulator_get_cr(int cr, struct kvm_vcpu *vcpu)
3821 {
3822         unsigned long value;
3823
3824         switch (cr) {
3825         case 0:
3826                 value = kvm_read_cr0(vcpu);
3827                 break;
3828         case 2:
3829                 value = vcpu->arch.cr2;
3830                 break;
3831         case 3:
3832                 value = vcpu->arch.cr3;
3833                 break;
3834         case 4:
3835                 value = kvm_read_cr4(vcpu);
3836                 break;
3837         case 8:
3838                 value = kvm_get_cr8(vcpu);
3839                 break;
3840         default:
3841                 vcpu_printf(vcpu, "%s: unexpected cr %u\n", __func__, cr);
3842                 return 0;
3843         }
3844
3845         return value;
3846 }
3847
3848 static int emulator_set_cr(int cr, unsigned long val, struct kvm_vcpu *vcpu)
3849 {
3850         int res = 0;
3851
3852         switch (cr) {
3853         case 0:
3854                 res = kvm_set_cr0(vcpu, mk_cr_64(kvm_read_cr0(vcpu), val));
3855                 break;
3856         case 2:
3857                 vcpu->arch.cr2 = val;
3858                 break;
3859         case 3:
3860                 res = kvm_set_cr3(vcpu, val);
3861                 break;
3862         case 4:
3863                 res = kvm_set_cr4(vcpu, mk_cr_64(kvm_read_cr4(vcpu), val));
3864                 break;
3865         case 8:
3866                 res = __kvm_set_cr8(vcpu, val & 0xfUL);
3867                 break;
3868         default:
3869                 vcpu_printf(vcpu, "%s: unexpected cr %u\n", __func__, cr);
3870                 res = -1;
3871         }
3872
3873         return res;
3874 }
3875
3876 static int emulator_get_cpl(struct kvm_vcpu *vcpu)
3877 {
3878         return kvm_x86_ops->get_cpl(vcpu);
3879 }
3880
3881 static void emulator_get_gdt(struct desc_ptr *dt, struct kvm_vcpu *vcpu)
3882 {
3883         kvm_x86_ops->get_gdt(vcpu, dt);
3884 }
3885
3886 static unsigned long emulator_get_cached_segment_base(int seg,
3887                                                       struct kvm_vcpu *vcpu)
3888 {
3889         return get_segment_base(vcpu, seg);
3890 }
3891
3892 static bool emulator_get_cached_descriptor(struct desc_struct *desc, int seg,
3893                                            struct kvm_vcpu *vcpu)
3894 {
3895         struct kvm_segment var;
3896
3897         kvm_get_segment(vcpu, &var, seg);
3898
3899         if (var.unusable)
3900                 return false;
3901
3902         if (var.g)
3903                 var.limit >>= 12;
3904         set_desc_limit(desc, var.limit);
3905         set_desc_base(desc, (unsigned long)var.base);
3906         desc->type = var.type;
3907         desc->s = var.s;
3908         desc->dpl = var.dpl;
3909         desc->p = var.present;
3910         desc->avl = var.avl;
3911         desc->l = var.l;
3912         desc->d = var.db;
3913         desc->g = var.g;
3914
3915         return true;
3916 }
3917
3918 static void emulator_set_cached_descriptor(struct desc_struct *desc, int seg,
3919                                            struct kvm_vcpu *vcpu)
3920 {
3921         struct kvm_segment var;
3922
3923         /* needed to preserve selector */
3924         kvm_get_segment(vcpu, &var, seg);
3925
3926         var.base = get_desc_base(desc);
3927         var.limit = get_desc_limit(desc);
3928         if (desc->g)
3929                 var.limit = (var.limit << 12) | 0xfff;
3930         var.type = desc->type;
3931         var.present = desc->p;
3932         var.dpl = desc->dpl;
3933         var.db = desc->d;
3934         var.s = desc->s;
3935         var.l = desc->l;
3936         var.g = desc->g;
3937         var.avl = desc->avl;
3938         var.present = desc->p;
3939         var.unusable = !var.present;
3940         var.padding = 0;
3941
3942         kvm_set_segment(vcpu, &var, seg);
3943         return;
3944 }
3945
3946 static u16 emulator_get_segment_selector(int seg, struct kvm_vcpu *vcpu)
3947 {
3948         struct kvm_segment kvm_seg;
3949
3950         kvm_get_segment(vcpu, &kvm_seg, seg);
3951         return kvm_seg.selector;
3952 }
3953
3954 static void emulator_set_segment_selector(u16 sel, int seg,
3955                                           struct kvm_vcpu *vcpu)
3956 {
3957         struct kvm_segment kvm_seg;
3958
3959         kvm_get_segment(vcpu, &kvm_seg, seg);
3960         kvm_seg.selector = sel;
3961         kvm_set_segment(vcpu, &kvm_seg, seg);
3962 }
3963
3964 static struct x86_emulate_ops emulate_ops = {
3965         .read_std            = kvm_read_guest_virt_system,
3966         .write_std           = kvm_write_guest_virt_system,
3967         .fetch               = kvm_fetch_guest_virt,
3968         .read_emulated       = emulator_read_emulated,
3969         .write_emulated      = emulator_write_emulated,
3970         .cmpxchg_emulated    = emulator_cmpxchg_emulated,
3971         .pio_in_emulated     = emulator_pio_in_emulated,
3972         .pio_out_emulated    = emulator_pio_out_emulated,
3973         .get_cached_descriptor = emulator_get_cached_descriptor,
3974         .set_cached_descriptor = emulator_set_cached_descriptor,
3975         .get_segment_selector = emulator_get_segment_selector,
3976         .set_segment_selector = emulator_set_segment_selector,
3977         .get_cached_segment_base = emulator_get_cached_segment_base,
3978         .get_gdt             = emulator_get_gdt,
3979         .get_cr              = emulator_get_cr,
3980         .set_cr              = emulator_set_cr,
3981         .cpl                 = emulator_get_cpl,
3982         .get_dr              = emulator_get_dr,
3983         .set_dr              = emulator_set_dr,
3984         .set_msr             = kvm_set_msr,
3985         .get_msr             = kvm_get_msr,
3986 };
3987
3988 static void cache_all_regs(struct kvm_vcpu *vcpu)
3989 {
3990         kvm_register_read(vcpu, VCPU_REGS_RAX);
3991         kvm_register_read(vcpu, VCPU_REGS_RSP);
3992         kvm_register_read(vcpu, VCPU_REGS_RIP);
3993         vcpu->arch.regs_dirty = ~0;
3994 }
3995
3996 static void toggle_interruptibility(struct kvm_vcpu *vcpu, u32 mask)
3997 {
3998         u32 int_shadow = kvm_x86_ops->get_interrupt_shadow(vcpu, mask);
3999         /*
4000          * an sti; sti; sequence only disable interrupts for the first
4001          * instruction. So, if the last instruction, be it emulated or
4002          * not, left the system with the INT_STI flag enabled, it
4003          * means that the last instruction is an sti. We should not
4004          * leave the flag on in this case. The same goes for mov ss
4005          */
4006         if (!(int_shadow & mask))
4007                 kvm_x86_ops->set_interrupt_shadow(vcpu, mask);
4008 }
4009
4010 static void inject_emulated_exception(struct kvm_vcpu *vcpu)
4011 {
4012         struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
4013         if (ctxt->exception == PF_VECTOR)
4014                 kvm_inject_page_fault(vcpu, ctxt->cr2, ctxt->error_code);
4015         else if (ctxt->error_code_valid)
4016                 kvm_queue_exception_e(vcpu, ctxt->exception, ctxt->error_code);
4017         else
4018                 kvm_queue_exception(vcpu, ctxt->exception);
4019 }
4020
4021 static int handle_emulation_failure(struct kvm_vcpu *vcpu)
4022 {
4023         ++vcpu->stat.insn_emulation_fail;
4024         trace_kvm_emulate_insn_failed(vcpu);
4025         vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
4026         vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
4027         vcpu->run->internal.ndata = 0;
4028         kvm_queue_exception(vcpu, UD_VECTOR);
4029         return EMULATE_FAIL;
4030 }
4031
4032 int emulate_instruction(struct kvm_vcpu *vcpu,
4033                         unsigned long cr2,
4034                         u16 error_code,
4035                         int emulation_type)
4036 {
4037         int r;
4038         struct decode_cache *c = &vcpu->arch.emulate_ctxt.decode;
4039
4040         kvm_clear_exception_queue(vcpu);
4041         vcpu->arch.mmio_fault_cr2 = cr2;
4042         /*
4043          * TODO: fix emulate.c to use guest_read/write_register
4044          * instead of direct ->regs accesses, can save hundred cycles
4045          * on Intel for instructions that don't read/change RSP, for
4046          * for example.
4047          */
4048         cache_all_regs(vcpu);
4049
4050         if (!(emulation_type & EMULTYPE_NO_DECODE)) {
4051                 int cs_db, cs_l;
4052                 kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
4053
4054                 vcpu->arch.emulate_ctxt.vcpu = vcpu;
4055                 vcpu->arch.emulate_ctxt.eflags = kvm_x86_ops->get_rflags(vcpu);
4056                 vcpu->arch.emulate_ctxt.eip = kvm_rip_read(vcpu);
4057                 vcpu->arch.emulate_ctxt.mode =
4058                         (!is_protmode(vcpu)) ? X86EMUL_MODE_REAL :
4059                         (vcpu->arch.emulate_ctxt.eflags & X86_EFLAGS_VM)
4060                         ? X86EMUL_MODE_VM86 : cs_l
4061                         ? X86EMUL_MODE_PROT64 : cs_db
4062                         ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
4063                 memset(c, 0, sizeof(struct decode_cache));
4064                 memcpy(c->regs, vcpu->arch.regs, sizeof c->regs);
4065                 vcpu->arch.emulate_ctxt.interruptibility = 0;
4066                 vcpu->arch.emulate_ctxt.exception = -1;
4067
4068                 r = x86_decode_insn(&vcpu->arch.emulate_ctxt, &emulate_ops);
4069                 trace_kvm_emulate_insn_start(vcpu);
4070
4071                 /* Only allow emulation of specific instructions on #UD
4072                  * (namely VMMCALL, sysenter, sysexit, syscall)*/
4073                 if (emulation_type & EMULTYPE_TRAP_UD) {
4074                         if (!c->twobyte)
4075                                 return EMULATE_FAIL;
4076                         switch (c->b) {
4077                         case 0x01: /* VMMCALL */
4078                                 if (c->modrm_mod != 3 || c->modrm_rm != 1)
4079                                         return EMULATE_FAIL;
4080                                 break;
4081                         case 0x34: /* sysenter */
4082                         case 0x35: /* sysexit */
4083                                 if (c->modrm_mod != 0 || c->modrm_rm != 0)
4084                                         return EMULATE_FAIL;
4085                                 break;
4086                         case 0x05: /* syscall */
4087                                 if (c->modrm_mod != 0 || c->modrm_rm != 0)
4088                                         return EMULATE_FAIL;
4089                                 break;
4090                         default:
4091                                 return EMULATE_FAIL;
4092                         }
4093
4094                         if (!(c->modrm_reg == 0 || c->modrm_reg == 3))
4095                                 return EMULATE_FAIL;
4096                 }
4097
4098                 ++vcpu->stat.insn_emulation;
4099                 if (r)  {
4100                         if (kvm_mmu_unprotect_page_virt(vcpu, cr2))
4101                                 return EMULATE_DONE;
4102                         if (emulation_type & EMULTYPE_SKIP)
4103                                 return EMULATE_FAIL;
4104                         return handle_emulation_failure(vcpu);
4105                 }
4106         }
4107
4108         if (emulation_type & EMULTYPE_SKIP) {
4109                 kvm_rip_write(vcpu, vcpu->arch.emulate_ctxt.decode.eip);
4110                 return EMULATE_DONE;
4111         }
4112
4113         /* this is needed for vmware backdor interface to work since it
4114            changes registers values  during IO operation */
4115         memcpy(c->regs, vcpu->arch.regs, sizeof c->regs);
4116
4117 restart:
4118         r = x86_emulate_insn(&vcpu->arch.emulate_ctxt, &emulate_ops);
4119
4120         if (r) { /* emulation failed */
4121                 /*
4122                  * if emulation was due to access to shadowed page table
4123                  * and it failed try to unshadow page and re-entetr the
4124                  * guest to let CPU execute the instruction.
4125                  */
4126                 if (kvm_mmu_unprotect_page_virt(vcpu, cr2))
4127                         return EMULATE_DONE;
4128
4129                 return handle_emulation_failure(vcpu);
4130         }
4131
4132         toggle_interruptibility(vcpu, vcpu->arch.emulate_ctxt.interruptibility);
4133         kvm_x86_ops->set_rflags(vcpu, vcpu->arch.emulate_ctxt.eflags);
4134         memcpy(vcpu->arch.regs, c->regs, sizeof c->regs);
4135         kvm_rip_write(vcpu, vcpu->arch.emulate_ctxt.eip);
4136
4137         if (vcpu->arch.emulate_ctxt.exception >= 0) {
4138                 inject_emulated_exception(vcpu);
4139                 return EMULATE_DONE;
4140         }
4141
4142         if (vcpu->arch.pio.count) {
4143                 if (!vcpu->arch.pio.in)
4144                         vcpu->arch.pio.count = 0;
4145                 return EMULATE_DO_MMIO;
4146         }
4147
4148         if (vcpu->mmio_needed) {
4149                 if (vcpu->mmio_is_write)
4150                         vcpu->mmio_needed = 0;
4151                 return EMULATE_DO_MMIO;
4152         }
4153
4154         if (vcpu->arch.emulate_ctxt.restart)
4155                 goto restart;
4156
4157         return EMULATE_DONE;
4158 }
4159 EXPORT_SYMBOL_GPL(emulate_instruction);
4160
4161 int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size, unsigned short port)
4162 {
4163         unsigned long val = kvm_register_read(vcpu, VCPU_REGS_RAX);
4164         int ret = emulator_pio_out_emulated(size, port, &val, 1, vcpu);
4165         /* do not return to emulator after return from userspace */
4166         vcpu->arch.pio.count = 0;
4167         return ret;
4168 }
4169 EXPORT_SYMBOL_GPL(kvm_fast_pio_out);
4170
4171 static void bounce_off(void *info)
4172 {
4173         /* nothing */
4174 }
4175
4176 static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
4177                                      void *data)
4178 {
4179         struct cpufreq_freqs *freq = data;
4180         struct kvm *kvm;
4181         struct kvm_vcpu *vcpu;
4182         int i, send_ipi = 0;
4183
4184         if (val == CPUFREQ_PRECHANGE && freq->old > freq->new)
4185                 return 0;
4186         if (val == CPUFREQ_POSTCHANGE && freq->old < freq->new)
4187                 return 0;
4188         per_cpu(cpu_tsc_khz, freq->cpu) = freq->new;
4189
4190         spin_lock(&kvm_lock);
4191         list_for_each_entry(kvm, &vm_list, vm_list) {
4192                 kvm_for_each_vcpu(i, vcpu, kvm) {
4193                         if (vcpu->cpu != freq->cpu)
4194                                 continue;
4195                         if (!kvm_request_guest_time_update(vcpu))
4196                                 continue;
4197                         if (vcpu->cpu != smp_processor_id())
4198                                 send_ipi++;
4199                 }
4200         }
4201         spin_unlock(&kvm_lock);
4202
4203         if (freq->old < freq->new && send_ipi) {
4204                 /*
4205                  * We upscale the frequency.  Must make the guest
4206                  * doesn't see old kvmclock values while running with
4207                  * the new frequency, otherwise we risk the guest sees
4208                  * time go backwards.
4209                  *
4210                  * In case we update the frequency for another cpu
4211                  * (which might be in guest context) send an interrupt
4212                  * to kick the cpu out of guest context.  Next time
4213                  * guest context is entered kvmclock will be updated,
4214                  * so the guest will not see stale values.
4215                  */
4216                 smp_call_function_single(freq->cpu, bounce_off, NULL, 1);
4217         }
4218         return 0;
4219 }
4220
4221 static struct notifier_block kvmclock_cpufreq_notifier_block = {
4222         .notifier_call  = kvmclock_cpufreq_notifier
4223 };
4224
4225 static void kvm_timer_init(void)
4226 {
4227         int cpu;
4228
4229         if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) {
4230                 cpufreq_register_notifier(&kvmclock_cpufreq_notifier_block,
4231                                           CPUFREQ_TRANSITION_NOTIFIER);
4232                 for_each_online_cpu(cpu) {
4233                         unsigned long khz = cpufreq_get(cpu);
4234                         if (!khz)
4235                                 khz = tsc_khz;
4236                         per_cpu(cpu_tsc_khz, cpu) = khz;
4237                 }
4238         } else {
4239                 for_each_possible_cpu(cpu)
4240                         per_cpu(cpu_tsc_khz, cpu) = tsc_khz;
4241         }
4242 }
4243
4244 static DEFINE_PER_CPU(struct kvm_vcpu *, current_vcpu);
4245
4246 static int kvm_is_in_guest(void)
4247 {
4248         return percpu_read(current_vcpu) != NULL;
4249 }
4250
4251 static int kvm_is_user_mode(void)
4252 {
4253         int user_mode = 3;
4254
4255         if (percpu_read(current_vcpu))
4256                 user_mode = kvm_x86_ops->get_cpl(percpu_read(current_vcpu));
4257
4258         return user_mode != 0;
4259 }
4260
4261 static unsigned long kvm_get_guest_ip(void)
4262 {
4263         unsigned long ip = 0;
4264
4265         if (percpu_read(current_vcpu))
4266                 ip = kvm_rip_read(percpu_read(current_vcpu));
4267
4268         return ip;
4269 }
4270
4271 static struct perf_guest_info_callbacks kvm_guest_cbs = {
4272         .is_in_guest            = kvm_is_in_guest,
4273         .is_user_mode           = kvm_is_user_mode,
4274         .get_guest_ip           = kvm_get_guest_ip,
4275 };
4276
4277 void kvm_before_handle_nmi(struct kvm_vcpu *vcpu)
4278 {
4279         percpu_write(current_vcpu, vcpu);
4280 }
4281 EXPORT_SYMBOL_GPL(kvm_before_handle_nmi);
4282
4283 void kvm_after_handle_nmi(struct kvm_vcpu *vcpu)
4284 {
4285         percpu_write(current_vcpu, NULL);
4286 }
4287 EXPORT_SYMBOL_GPL(kvm_after_handle_nmi);
4288
4289 int kvm_arch_init(void *opaque)
4290 {
4291         int r;
4292         struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
4293
4294         if (kvm_x86_ops) {
4295                 printk(KERN_ERR "kvm: already loaded the other module\n");
4296                 r = -EEXIST;
4297                 goto out;
4298         }
4299
4300         if (!ops->cpu_has_kvm_support()) {
4301                 printk(KERN_ERR "kvm: no hardware support\n");
4302                 r = -EOPNOTSUPP;
4303                 goto out;
4304         }
4305         if (ops->disabled_by_bios()) {
4306                 printk(KERN_ERR "kvm: disabled by bios\n");
4307                 r = -EOPNOTSUPP;
4308                 goto out;
4309         }
4310
4311         r = kvm_mmu_module_init();
4312         if (r)
4313                 goto out;
4314
4315         kvm_init_msr_list();
4316
4317         kvm_x86_ops = ops;
4318         kvm_mmu_set_nonpresent_ptes(0ull, 0ull);
4319         kvm_mmu_set_base_ptes(PT_PRESENT_MASK);
4320         kvm_mmu_set_mask_ptes(PT_USER_MASK, PT_ACCESSED_MASK,
4321                         PT_DIRTY_MASK, PT64_NX_MASK, 0);
4322
4323         kvm_timer_init();
4324
4325         perf_register_guest_info_callbacks(&kvm_guest_cbs);
4326
4327         if (cpu_has_xsave)
4328                 host_xcr0 = xgetbv(XCR_XFEATURE_ENABLED_MASK);
4329
4330         return 0;
4331
4332 out:
4333         return r;
4334 }
4335
4336 void kvm_arch_exit(void)
4337 {
4338         perf_unregister_guest_info_callbacks(&kvm_guest_cbs);
4339
4340         if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
4341                 cpufreq_unregister_notifier(&kvmclock_cpufreq_notifier_block,
4342                                             CPUFREQ_TRANSITION_NOTIFIER);
4343         kvm_x86_ops = NULL;
4344         kvm_mmu_module_exit();
4345 }
4346
4347 int kvm_emulate_halt(struct kvm_vcpu *vcpu)
4348 {
4349         ++vcpu->stat.halt_exits;
4350         if (irqchip_in_kernel(vcpu->kvm)) {
4351                 vcpu->arch.mp_state = KVM_MP_STATE_HALTED;
4352                 return 1;
4353         } else {
4354                 vcpu->run->exit_reason = KVM_EXIT_HLT;
4355                 return 0;
4356         }
4357 }
4358 EXPORT_SYMBOL_GPL(kvm_emulate_halt);
4359
4360 static inline gpa_t hc_gpa(struct kvm_vcpu *vcpu, unsigned long a0,
4361                            unsigned long a1)
4362 {
4363         if (is_long_mode(vcpu))
4364                 return a0;
4365         else
4366                 return a0 | ((gpa_t)a1 << 32);
4367 }
4368
4369 int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
4370 {
4371         u64 param, ingpa, outgpa, ret;
4372         uint16_t code, rep_idx, rep_cnt, res = HV_STATUS_SUCCESS, rep_done = 0;
4373         bool fast, longmode;
4374         int cs_db, cs_l;
4375
4376         /*
4377          * hypercall generates UD from non zero cpl and real mode
4378          * per HYPER-V spec
4379          */
4380         if (kvm_x86_ops->get_cpl(vcpu) != 0 || !is_protmode(vcpu)) {
4381                 kvm_queue_exception(vcpu, UD_VECTOR);
4382                 return 0;
4383         }
4384
4385         kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
4386         longmode = is_long_mode(vcpu) && cs_l == 1;
4387
4388         if (!longmode) {
4389                 param = ((u64)kvm_register_read(vcpu, VCPU_REGS_RDX) << 32) |
4390                         (kvm_register_read(vcpu, VCPU_REGS_RAX) & 0xffffffff);
4391                 ingpa = ((u64)kvm_register_read(vcpu, VCPU_REGS_RBX) << 32) |
4392                         (kvm_register_read(vcpu, VCPU_REGS_RCX) & 0xffffffff);
4393                 outgpa = ((u64)kvm_register_read(vcpu, VCPU_REGS_RDI) << 32) |
4394                         (kvm_register_read(vcpu, VCPU_REGS_RSI) & 0xffffffff);
4395         }
4396 #ifdef CONFIG_X86_64
4397         else {
4398                 param = kvm_register_read(vcpu, VCPU_REGS_RCX);
4399                 ingpa = kvm_register_read(vcpu, VCPU_REGS_RDX);
4400                 outgpa = kvm_register_read(vcpu, VCPU_REGS_R8);
4401         }
4402 #endif
4403
4404         code = param & 0xffff;
4405         fast = (param >> 16) & 0x1;
4406         rep_cnt = (param >> 32) & 0xfff;
4407         rep_idx = (param >> 48) & 0xfff;
4408
4409         trace_kvm_hv_hypercall(code, fast, rep_cnt, rep_idx, ingpa, outgpa);
4410
4411         switch (code) {
4412         case HV_X64_HV_NOTIFY_LONG_SPIN_WAIT:
4413                 kvm_vcpu_on_spin(vcpu);
4414                 break;
4415         default:
4416                 res = HV_STATUS_INVALID_HYPERCALL_CODE;
4417                 break;
4418         }
4419
4420         ret = res | (((u64)rep_done & 0xfff) << 32);
4421         if (longmode) {
4422                 kvm_register_write(vcpu, VCPU_REGS_RAX, ret);
4423         } else {
4424                 kvm_register_write(vcpu, VCPU_REGS_RDX, ret >> 32);
4425                 kvm_register_write(vcpu, VCPU_REGS_RAX, ret & 0xffffffff);
4426         }
4427
4428         return 1;
4429 }
4430
4431 int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
4432 {
4433         unsigned long nr, a0, a1, a2, a3, ret;
4434         int r = 1;
4435
4436         if (kvm_hv_hypercall_enabled(vcpu->kvm))
4437                 return kvm_hv_hypercall(vcpu);
4438
4439         nr = kvm_register_read(vcpu, VCPU_REGS_RAX);
4440         a0 = kvm_register_read(vcpu, VCPU_REGS_RBX);
4441         a1 = kvm_register_read(vcpu, VCPU_REGS_RCX);
4442         a2 = kvm_register_read(vcpu, VCPU_REGS_RDX);
4443         a3 = kvm_register_read(vcpu, VCPU_REGS_RSI);
4444
4445         trace_kvm_hypercall(nr, a0, a1, a2, a3);
4446
4447         if (!is_long_mode(vcpu)) {
4448                 nr &= 0xFFFFFFFF;
4449                 a0 &= 0xFFFFFFFF;
4450                 a1 &= 0xFFFFFFFF;
4451                 a2 &= 0xFFFFFFFF;
4452                 a3 &= 0xFFFFFFFF;
4453         }
4454
4455         if (kvm_x86_ops->get_cpl(vcpu) != 0) {
4456                 ret = -KVM_EPERM;
4457                 goto out;
4458         }
4459
4460         switch (nr) {
4461         case KVM_HC_VAPIC_POLL_IRQ:
4462                 ret = 0;
4463                 break;
4464         case KVM_HC_MMU_OP:
4465                 r = kvm_pv_mmu_op(vcpu, a0, hc_gpa(vcpu, a1, a2), &ret);
4466                 break;
4467         default:
4468                 ret = -KVM_ENOSYS;
4469                 break;
4470         }
4471 out:
4472         kvm_register_write(vcpu, VCPU_REGS_RAX, ret);
4473         ++vcpu->stat.hypercalls;
4474         return r;
4475 }
4476 EXPORT_SYMBOL_GPL(kvm_emulate_hypercall);
4477
4478 int kvm_fix_hypercall(struct kvm_vcpu *vcpu)
4479 {
4480         char instruction[3];
4481         unsigned long rip = kvm_rip_read(vcpu);
4482
4483         /*
4484          * Blow out the MMU to ensure that no other VCPU has an active mapping
4485          * to ensure that the updated hypercall appears atomically across all
4486          * VCPUs.
4487          */
4488         kvm_mmu_zap_all(vcpu->kvm);
4489
4490         kvm_x86_ops->patch_hypercall(vcpu, instruction);
4491
4492         return emulator_write_emulated(rip, instruction, 3, NULL, vcpu);
4493 }
4494
4495 void realmode_lgdt(struct kvm_vcpu *vcpu, u16 limit, unsigned long base)
4496 {
4497         struct desc_ptr dt = { limit, base };
4498
4499         kvm_x86_ops->set_gdt(vcpu, &dt);
4500 }
4501
4502 void realmode_lidt(struct kvm_vcpu *vcpu, u16 limit, unsigned long base)
4503 {
4504         struct desc_ptr dt = { limit, base };
4505
4506         kvm_x86_ops->set_idt(vcpu, &dt);
4507 }
4508
4509 static int move_to_next_stateful_cpuid_entry(struct kvm_vcpu *vcpu, int i)
4510 {
4511         struct kvm_cpuid_entry2 *e = &vcpu->arch.cpuid_entries[i];
4512         int j, nent = vcpu->arch.cpuid_nent;
4513
4514         e->flags &= ~KVM_CPUID_FLAG_STATE_READ_NEXT;
4515         /* when no next entry is found, the current entry[i] is reselected */
4516         for (j = i + 1; ; j = (j + 1) % nent) {
4517                 struct kvm_cpuid_entry2 *ej = &vcpu->arch.cpuid_entries[j];
4518                 if (ej->function == e->function) {
4519                         ej->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT;
4520                         return j;
4521                 }
4522         }
4523         return 0; /* silence gcc, even though control never reaches here */
4524 }
4525
4526 /* find an entry with matching function, matching index (if needed), and that
4527  * should be read next (if it's stateful) */
4528 static int is_matching_cpuid_entry(struct kvm_cpuid_entry2 *e,
4529         u32 function, u32 index)
4530 {
4531         if (e->function != function)
4532                 return 0;
4533         if ((e->flags & KVM_CPUID_FLAG_SIGNIFCANT_INDEX) && e->index != index)
4534                 return 0;
4535         if ((e->flags & KVM_CPUID_FLAG_STATEFUL_FUNC) &&
4536             !(e->flags & KVM_CPUID_FLAG_STATE_READ_NEXT))
4537                 return 0;
4538         return 1;
4539 }
4540
4541 struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
4542                                               u32 function, u32 index)
4543 {
4544         int i;
4545         struct kvm_cpuid_entry2 *best = NULL;
4546
4547         for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
4548                 struct kvm_cpuid_entry2 *e;
4549
4550                 e = &vcpu->arch.cpuid_entries[i];
4551                 if (is_matching_cpuid_entry(e, function, index)) {
4552                         if (e->flags & KVM_CPUID_FLAG_STATEFUL_FUNC)
4553                                 move_to_next_stateful_cpuid_entry(vcpu, i);
4554                         best = e;
4555                         break;
4556                 }
4557                 /*
4558                  * Both basic or both extended?
4559                  */
4560                 if (((e->function ^ function) & 0x80000000) == 0)
4561                         if (!best || e->function > best->function)
4562                                 best = e;
4563         }
4564         return best;
4565 }
4566 EXPORT_SYMBOL_GPL(kvm_find_cpuid_entry);
4567
4568 int cpuid_maxphyaddr(struct kvm_vcpu *vcpu)
4569 {
4570         struct kvm_cpuid_entry2 *best;
4571
4572         best = kvm_find_cpuid_entry(vcpu, 0x80000000, 0);
4573         if (!best || best->eax < 0x80000008)
4574                 goto not_found;
4575         best = kvm_find_cpuid_entry(vcpu, 0x80000008, 0);
4576         if (best)
4577                 return best->eax & 0xff;
4578 not_found:
4579         return 36;
4580 }
4581
4582 void kvm_emulate_cpuid(struct kvm_vcpu *vcpu)
4583 {
4584         u32 function, index;
4585         struct kvm_cpuid_entry2 *best;
4586
4587         function = kvm_register_read(vcpu, VCPU_REGS_RAX);
4588         index = kvm_register_read(vcpu, VCPU_REGS_RCX);
4589         kvm_register_write(vcpu, VCPU_REGS_RAX, 0);
4590         kvm_register_write(vcpu, VCPU_REGS_RBX, 0);
4591         kvm_register_write(vcpu, VCPU_REGS_RCX, 0);
4592         kvm_register_write(vcpu, VCPU_REGS_RDX, 0);
4593         best = kvm_find_cpuid_entry(vcpu, function, index);
4594         if (best) {
4595                 kvm_register_write(vcpu, VCPU_REGS_RAX, best->eax);
4596                 kvm_register_write(vcpu, VCPU_REGS_RBX, best->ebx);
4597                 kvm_register_write(vcpu, VCPU_REGS_RCX, best->ecx);
4598                 kvm_register_write(vcpu, VCPU_REGS_RDX, best->edx);
4599         }
4600         kvm_x86_ops->skip_emulated_instruction(vcpu);
4601         trace_kvm_cpuid(function,
4602                         kvm_register_read(vcpu, VCPU_REGS_RAX),
4603                         kvm_register_read(vcpu, VCPU_REGS_RBX),
4604                         kvm_register_read(vcpu, VCPU_REGS_RCX),
4605                         kvm_register_read(vcpu, VCPU_REGS_RDX));
4606 }
4607 EXPORT_SYMBOL_GPL(kvm_emulate_cpuid);
4608
4609 /*
4610  * Check if userspace requested an interrupt window, and that the
4611  * interrupt window is open.
4612  *
4613  * No need to exit to userspace if we already have an interrupt queued.
4614  */
4615 static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu)
4616 {
4617         return (!irqchip_in_kernel(vcpu->kvm) && !kvm_cpu_has_interrupt(vcpu) &&
4618                 vcpu->run->request_interrupt_window &&
4619                 kvm_arch_interrupt_allowed(vcpu));
4620 }
4621
4622 static void post_kvm_run_save(struct kvm_vcpu *vcpu)
4623 {
4624         struct kvm_run *kvm_run = vcpu->run;
4625
4626         kvm_run->if_flag = (kvm_get_rflags(vcpu) & X86_EFLAGS_IF) != 0;
4627         kvm_run->cr8 = kvm_get_cr8(vcpu);
4628         kvm_run->apic_base = kvm_get_apic_base(vcpu);
4629         if (irqchip_in_kernel(vcpu->kvm))
4630                 kvm_run->ready_for_interrupt_injection = 1;
4631         else
4632                 kvm_run->ready_for_interrupt_injection =
4633                         kvm_arch_interrupt_allowed(vcpu) &&
4634                         !kvm_cpu_has_interrupt(vcpu) &&
4635                         !kvm_event_needs_reinjection(vcpu);
4636 }
4637
4638 static void vapic_enter(struct kvm_vcpu *vcpu)
4639 {
4640         struct kvm_lapic *apic = vcpu->arch.apic;
4641         struct page *page;
4642
4643         if (!apic || !apic->vapic_addr)
4644                 return;
4645
4646         page = gfn_to_page(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT);
4647
4648         vcpu->arch.apic->vapic_page = page;
4649 }
4650
4651 static void vapic_exit(struct kvm_vcpu *vcpu)
4652 {
4653         struct kvm_lapic *apic = vcpu->arch.apic;
4654         int idx;
4655
4656         if (!apic || !apic->vapic_addr)
4657                 return;
4658
4659         idx = srcu_read_lock(&vcpu->kvm->srcu);
4660         kvm_release_page_dirty(apic->vapic_page);
4661         mark_page_dirty(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT);
4662         srcu_read_unlock(&vcpu->kvm->srcu, idx);
4663 }
4664
4665 static void update_cr8_intercept(struct kvm_vcpu *vcpu)
4666 {
4667         int max_irr, tpr;
4668
4669         if (!kvm_x86_ops->update_cr8_intercept)
4670                 return;
4671
4672         if (!vcpu->arch.apic)
4673                 return;
4674
4675         if (!vcpu->arch.apic->vapic_addr)
4676                 max_irr = kvm_lapic_find_highest_irr(vcpu);
4677         else
4678                 max_irr = -1;
4679
4680         if (max_irr != -1)
4681                 max_irr >>= 4;
4682
4683         tpr = kvm_lapic_get_cr8(vcpu);
4684
4685         kvm_x86_ops->update_cr8_intercept(vcpu, tpr, max_irr);
4686 }
4687
4688 static void inject_pending_event(struct kvm_vcpu *vcpu)
4689 {
4690         /* try to reinject previous events if any */
4691         if (vcpu->arch.exception.pending) {
4692                 trace_kvm_inj_exception(vcpu->arch.exception.nr,
4693                                         vcpu->arch.exception.has_error_code,
4694                                         vcpu->arch.exception.error_code);
4695                 kvm_x86_ops->queue_exception(vcpu, vcpu->arch.exception.nr,
4696                                           vcpu->arch.exception.has_error_code,
4697                                           vcpu->arch.exception.error_code,
4698                                           vcpu->arch.exception.reinject);
4699                 return;
4700         }
4701
4702         if (vcpu->arch.nmi_injected) {
4703                 kvm_x86_ops->set_nmi(vcpu);
4704                 return;
4705         }
4706
4707         if (vcpu->arch.interrupt.pending) {
4708                 kvm_x86_ops->set_irq(vcpu);
4709                 return;
4710         }
4711
4712         /* try to inject new event if pending */
4713         if (vcpu->arch.nmi_pending) {
4714                 if (kvm_x86_ops->nmi_allowed(vcpu)) {
4715                         vcpu->arch.nmi_pending = false;
4716                         vcpu->arch.nmi_injected = true;
4717                         kvm_x86_ops->set_nmi(vcpu);
4718                 }
4719         } else if (kvm_cpu_has_interrupt(vcpu)) {
4720                 if (kvm_x86_ops->interrupt_allowed(vcpu)) {
4721                         kvm_queue_interrupt(vcpu, kvm_cpu_get_interrupt(vcpu),
4722                                             false);
4723                         kvm_x86_ops->set_irq(vcpu);
4724                 }
4725         }
4726 }
4727
4728 static void kvm_load_guest_xcr0(struct kvm_vcpu *vcpu)
4729 {
4730         if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE) &&
4731                         !vcpu->guest_xcr0_loaded) {
4732                 /* kvm_set_xcr() also depends on this */
4733                 xsetbv(XCR_XFEATURE_ENABLED_MASK, vcpu->arch.xcr0);
4734                 vcpu->guest_xcr0_loaded = 1;
4735         }
4736 }
4737
4738 static void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu)
4739 {
4740         if (vcpu->guest_xcr0_loaded) {
4741                 if (vcpu->arch.xcr0 != host_xcr0)
4742                         xsetbv(XCR_XFEATURE_ENABLED_MASK, host_xcr0);
4743                 vcpu->guest_xcr0_loaded = 0;
4744         }
4745 }
4746
4747 static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
4748 {
4749         int r;
4750         bool req_int_win = !irqchip_in_kernel(vcpu->kvm) &&
4751                 vcpu->run->request_interrupt_window;
4752
4753         if (vcpu->requests)
4754                 if (test_and_clear_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests))
4755                         kvm_mmu_unload(vcpu);
4756
4757         r = kvm_mmu_reload(vcpu);
4758         if (unlikely(r))
4759                 goto out;
4760
4761         if (vcpu->requests) {
4762                 if (test_and_clear_bit(KVM_REQ_MIGRATE_TIMER, &vcpu->requests))
4763                         __kvm_migrate_timers(vcpu);
4764                 if (test_and_clear_bit(KVM_REQ_KVMCLOCK_UPDATE, &vcpu->requests))
4765                         kvm_write_guest_time(vcpu);
4766                 if (test_and_clear_bit(KVM_REQ_MMU_SYNC, &vcpu->requests))
4767                         kvm_mmu_sync_roots(vcpu);
4768                 if (test_and_clear_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests))
4769                         kvm_x86_ops->tlb_flush(vcpu);
4770                 if (test_and_clear_bit(KVM_REQ_REPORT_TPR_ACCESS,
4771                                        &vcpu->requests)) {
4772                         vcpu->run->exit_reason = KVM_EXIT_TPR_ACCESS;
4773                         r = 0;
4774                         goto out;
4775                 }
4776                 if (test_and_clear_bit(KVM_REQ_TRIPLE_FAULT, &vcpu->requests)) {
4777                         vcpu->run->exit_reason = KVM_EXIT_SHUTDOWN;
4778                         r = 0;
4779                         goto out;
4780                 }
4781                 if (test_and_clear_bit(KVM_REQ_DEACTIVATE_FPU, &vcpu->requests)) {
4782                         vcpu->fpu_active = 0;
4783                         kvm_x86_ops->fpu_deactivate(vcpu);
4784                 }
4785         }
4786
4787         preempt_disable();
4788
4789         kvm_x86_ops->prepare_guest_switch(vcpu);
4790         if (vcpu->fpu_active)
4791                 kvm_load_guest_fpu(vcpu);
4792         kvm_load_guest_xcr0(vcpu);
4793
4794         atomic_set(&vcpu->guest_mode, 1);
4795         smp_wmb();
4796
4797         local_irq_disable();
4798
4799         if (!atomic_read(&vcpu->guest_mode) || vcpu->requests
4800             || need_resched() || signal_pending(current)) {
4801                 atomic_set(&vcpu->guest_mode, 0);
4802                 smp_wmb();
4803                 local_irq_enable();
4804                 preempt_enable();
4805                 r = 1;
4806                 goto out;
4807         }
4808
4809         inject_pending_event(vcpu);
4810
4811         /* enable NMI/IRQ window open exits if needed */
4812         if (vcpu->arch.nmi_pending)
4813                 kvm_x86_ops->enable_nmi_window(vcpu);
4814         else if (kvm_cpu_has_interrupt(vcpu) || req_int_win)
4815                 kvm_x86_ops->enable_irq_window(vcpu);
4816
4817         if (kvm_lapic_enabled(vcpu)) {
4818                 update_cr8_intercept(vcpu);
4819                 kvm_lapic_sync_to_vapic(vcpu);
4820         }
4821
4822         srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
4823
4824         kvm_guest_enter();
4825
4826         if (unlikely(vcpu->arch.switch_db_regs)) {
4827                 set_debugreg(0, 7);
4828                 set_debugreg(vcpu->arch.eff_db[0], 0);
4829                 set_debugreg(vcpu->arch.eff_db[1], 1);
4830                 set_debugreg(vcpu->arch.eff_db[2], 2);
4831                 set_debugreg(vcpu->arch.eff_db[3], 3);
4832         }
4833
4834         trace_kvm_entry(vcpu->vcpu_id);
4835         kvm_x86_ops->run(vcpu);
4836
4837         /*
4838          * If the guest has used debug registers, at least dr7
4839          * will be disabled while returning to the host.
4840          * If we don't have active breakpoints in the host, we don't
4841          * care about the messed up debug address registers. But if
4842          * we have some of them active, restore the old state.
4843          */
4844         if (hw_breakpoint_active())
4845                 hw_breakpoint_restore();
4846
4847         atomic_set(&vcpu->guest_mode, 0);
4848         smp_wmb();
4849         local_irq_enable();
4850
4851         ++vcpu->stat.exits;
4852
4853         /*
4854          * We must have an instruction between local_irq_enable() and
4855          * kvm_guest_exit(), so the timer interrupt isn't delayed by
4856          * the interrupt shadow.  The stat.exits increment will do nicely.
4857          * But we need to prevent reordering, hence this barrier():
4858          */
4859         barrier();
4860
4861         kvm_guest_exit();
4862
4863         preempt_enable();
4864
4865         vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
4866
4867         /*
4868          * Profile KVM exit RIPs:
4869          */
4870         if (unlikely(prof_on == KVM_PROFILING)) {
4871                 unsigned long rip = kvm_rip_read(vcpu);
4872                 profile_hit(KVM_PROFILING, (void *)rip);
4873         }
4874
4875
4876         kvm_lapic_sync_from_vapic(vcpu);
4877
4878         r = kvm_x86_ops->handle_exit(vcpu);
4879 out:
4880         return r;
4881 }
4882
4883
4884 static int __vcpu_run(struct kvm_vcpu *vcpu)
4885 {
4886         int r;
4887         struct kvm *kvm = vcpu->kvm;
4888
4889         if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_SIPI_RECEIVED)) {
4890                 pr_debug("vcpu %d received sipi with vector # %x\n",
4891                          vcpu->vcpu_id, vcpu->arch.sipi_vector);
4892                 kvm_lapic_reset(vcpu);
4893                 r = kvm_arch_vcpu_reset(vcpu);
4894                 if (r)
4895                         return r;
4896                 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
4897         }
4898
4899         vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
4900         vapic_enter(vcpu);
4901
4902         r = 1;
4903         while (r > 0) {
4904                 if (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE)
4905                         r = vcpu_enter_guest(vcpu);
4906                 else {
4907                         srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
4908                         kvm_vcpu_block(vcpu);
4909                         vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
4910                         if (test_and_clear_bit(KVM_REQ_UNHALT, &vcpu->requests))
4911                         {
4912                                 switch(vcpu->arch.mp_state) {
4913                                 case KVM_MP_STATE_HALTED:
4914                                         vcpu->arch.mp_state =
4915                                                 KVM_MP_STATE_RUNNABLE;
4916                                 case KVM_MP_STATE_RUNNABLE:
4917                                         break;
4918                                 case KVM_MP_STATE_SIPI_RECEIVED:
4919                                 default:
4920                                         r = -EINTR;
4921                                         break;
4922                                 }
4923                         }
4924                 }
4925
4926                 if (r <= 0)
4927                         break;
4928
4929                 clear_bit(KVM_REQ_PENDING_TIMER, &vcpu->requests);
4930                 if (kvm_cpu_has_pending_timer(vcpu))
4931                         kvm_inject_pending_timer_irqs(vcpu);
4932
4933                 if (dm_request_for_irq_injection(vcpu)) {
4934                         r = -EINTR;
4935                         vcpu->run->exit_reason = KVM_EXIT_INTR;
4936                         ++vcpu->stat.request_irq_exits;
4937                 }
4938                 if (signal_pending(current)) {
4939                         r = -EINTR;
4940                         vcpu->run->exit_reason = KVM_EXIT_INTR;
4941                         ++vcpu->stat.signal_exits;
4942                 }
4943                 if (need_resched()) {
4944                         srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
4945                         kvm_resched(vcpu);
4946                         vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
4947                 }
4948         }
4949
4950         srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
4951
4952         vapic_exit(vcpu);
4953
4954         return r;
4955 }
4956
4957 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
4958 {
4959         int r;
4960         sigset_t sigsaved;
4961
4962         if (vcpu->sigset_active)
4963                 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
4964
4965         if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) {
4966                 kvm_vcpu_block(vcpu);
4967                 clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
4968                 r = -EAGAIN;
4969                 goto out;
4970         }
4971
4972         /* re-sync apic's tpr */
4973         if (!irqchip_in_kernel(vcpu->kvm))
4974                 kvm_set_cr8(vcpu, kvm_run->cr8);
4975
4976         if (vcpu->arch.pio.count || vcpu->mmio_needed ||
4977             vcpu->arch.emulate_ctxt.restart) {
4978                 if (vcpu->mmio_needed) {
4979                         memcpy(vcpu->mmio_data, kvm_run->mmio.data, 8);
4980                         vcpu->mmio_read_completed = 1;
4981                         vcpu->mmio_needed = 0;
4982                 }
4983                 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
4984                 r = emulate_instruction(vcpu, 0, 0, EMULTYPE_NO_DECODE);
4985                 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
4986                 if (r != EMULATE_DONE) {
4987                         r = 0;
4988                         goto out;
4989                 }
4990         }
4991         if (kvm_run->exit_reason == KVM_EXIT_HYPERCALL)
4992                 kvm_register_write(vcpu, VCPU_REGS_RAX,
4993                                      kvm_run->hypercall.ret);
4994
4995         r = __vcpu_run(vcpu);
4996
4997 out:
4998         post_kvm_run_save(vcpu);
4999         if (vcpu->sigset_active)
5000                 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
5001
5002         return r;
5003 }
5004
5005 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
5006 {
5007         regs->rax = kvm_register_read(vcpu, VCPU_REGS_RAX);
5008         regs->rbx = kvm_register_read(vcpu, VCPU_REGS_RBX);
5009         regs->rcx = kvm_register_read(vcpu, VCPU_REGS_RCX);
5010         regs->rdx = kvm_register_read(vcpu, VCPU_REGS_RDX);
5011         regs->rsi = kvm_register_read(vcpu, VCPU_REGS_RSI);
5012         regs->rdi = kvm_register_read(vcpu, VCPU_REGS_RDI);
5013         regs->rsp = kvm_register_read(vcpu, VCPU_REGS_RSP);
5014         regs->rbp = kvm_register_read(vcpu, VCPU_REGS_RBP);
5015 #ifdef CONFIG_X86_64
5016         regs->r8 = kvm_register_read(vcpu, VCPU_REGS_R8);
5017         regs->r9 = kvm_register_read(vcpu, VCPU_REGS_R9);
5018         regs->r10 = kvm_register_read(vcpu, VCPU_REGS_R10);
5019         regs->r11 = kvm_register_read(vcpu, VCPU_REGS_R11);
5020         regs->r12 = kvm_register_read(vcpu, VCPU_REGS_R12);
5021         regs->r13 = kvm_register_read(vcpu, VCPU_REGS_R13);
5022         regs->r14 = kvm_register_read(vcpu, VCPU_REGS_R14);
5023         regs->r15 = kvm_register_read(vcpu, VCPU_REGS_R15);
5024 #endif
5025
5026         regs->rip = kvm_rip_read(vcpu);
5027         regs->rflags = kvm_get_rflags(vcpu);
5028
5029         return 0;
5030 }
5031
5032 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
5033 {
5034         kvm_register_write(vcpu, VCPU_REGS_RAX, regs->rax);
5035         kvm_register_write(vcpu, VCPU_REGS_RBX, regs->rbx);
5036         kvm_register_write(vcpu, VCPU_REGS_RCX, regs->rcx);
5037         kvm_register_write(vcpu, VCPU_REGS_RDX, regs->rdx);
5038         kvm_register_write(vcpu, VCPU_REGS_RSI, regs->rsi);
5039         kvm_register_write(vcpu, VCPU_REGS_RDI, regs->rdi);
5040         kvm_register_write(vcpu, VCPU_REGS_RSP, regs->rsp);
5041         kvm_register_write(vcpu, VCPU_REGS_RBP, regs->rbp);
5042 #ifdef CONFIG_X86_64
5043         kvm_register_write(vcpu, VCPU_REGS_R8, regs->r8);
5044         kvm_register_write(vcpu, VCPU_REGS_R9, regs->r9);
5045         kvm_register_write(vcpu, VCPU_REGS_R10, regs->r10);
5046         kvm_register_write(vcpu, VCPU_REGS_R11, regs->r11);
5047         kvm_register_write(vcpu, VCPU_REGS_R12, regs->r12);
5048         kvm_register_write(vcpu, VCPU_REGS_R13, regs->r13);
5049         kvm_register_write(vcpu, VCPU_REGS_R14, regs->r14);
5050         kvm_register_write(vcpu, VCPU_REGS_R15, regs->r15);
5051 #endif
5052
5053         kvm_rip_write(vcpu, regs->rip);
5054         kvm_set_rflags(vcpu, regs->rflags);
5055
5056         vcpu->arch.exception.pending = false;
5057
5058         return 0;
5059 }
5060
5061 void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
5062 {
5063         struct kvm_segment cs;
5064
5065         kvm_get_segment(vcpu, &cs, VCPU_SREG_CS);
5066         *db = cs.db;
5067         *l = cs.l;
5068 }
5069 EXPORT_SYMBOL_GPL(kvm_get_cs_db_l_bits);
5070
5071 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
5072                                   struct kvm_sregs *sregs)
5073 {
5074         struct desc_ptr dt;
5075
5076         kvm_get_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
5077         kvm_get_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
5078         kvm_get_segment(vcpu, &sregs->es, VCPU_SREG_ES);
5079         kvm_get_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
5080         kvm_get_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
5081         kvm_get_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
5082
5083         kvm_get_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
5084         kvm_get_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
5085
5086         kvm_x86_ops->get_idt(vcpu, &dt);
5087         sregs->idt.limit = dt.size;
5088         sregs->idt.base = dt.address;
5089         kvm_x86_ops->get_gdt(vcpu, &dt);
5090         sregs->gdt.limit = dt.size;
5091         sregs->gdt.base = dt.address;
5092
5093         sregs->cr0 = kvm_read_cr0(vcpu);
5094         sregs->cr2 = vcpu->arch.cr2;
5095         sregs->cr3 = vcpu->arch.cr3;
5096         sregs->cr4 = kvm_read_cr4(vcpu);
5097         sregs->cr8 = kvm_get_cr8(vcpu);
5098         sregs->efer = vcpu->arch.efer;
5099         sregs->apic_base = kvm_get_apic_base(vcpu);
5100
5101         memset(sregs->interrupt_bitmap, 0, sizeof sregs->interrupt_bitmap);
5102
5103         if (vcpu->arch.interrupt.pending && !vcpu->arch.interrupt.soft)
5104                 set_bit(vcpu->arch.interrupt.nr,
5105                         (unsigned long *)sregs->interrupt_bitmap);
5106
5107         return 0;
5108 }
5109
5110 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
5111                                     struct kvm_mp_state *mp_state)
5112 {
5113         mp_state->mp_state = vcpu->arch.mp_state;
5114         return 0;
5115 }
5116
5117 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
5118                                     struct kvm_mp_state *mp_state)
5119 {
5120         vcpu->arch.mp_state = mp_state->mp_state;
5121         return 0;
5122 }
5123
5124 int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason,
5125                     bool has_error_code, u32 error_code)
5126 {
5127         struct decode_cache *c = &vcpu->arch.emulate_ctxt.decode;
5128         int cs_db, cs_l, ret;
5129         cache_all_regs(vcpu);
5130
5131         kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
5132
5133         vcpu->arch.emulate_ctxt.vcpu = vcpu;
5134         vcpu->arch.emulate_ctxt.eflags = kvm_x86_ops->get_rflags(vcpu);
5135         vcpu->arch.emulate_ctxt.eip = kvm_rip_read(vcpu);
5136         vcpu->arch.emulate_ctxt.mode =
5137                 (!is_protmode(vcpu)) ? X86EMUL_MODE_REAL :
5138                 (vcpu->arch.emulate_ctxt.eflags & X86_EFLAGS_VM)
5139                 ? X86EMUL_MODE_VM86 : cs_l
5140                 ? X86EMUL_MODE_PROT64 : cs_db
5141                 ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
5142         memset(c, 0, sizeof(struct decode_cache));
5143         memcpy(c->regs, vcpu->arch.regs, sizeof c->regs);
5144
5145         ret = emulator_task_switch(&vcpu->arch.emulate_ctxt, &emulate_ops,
5146                                    tss_selector, reason, has_error_code,
5147                                    error_code);
5148
5149         if (ret)
5150                 return EMULATE_FAIL;
5151
5152         memcpy(vcpu->arch.regs, c->regs, sizeof c->regs);
5153         kvm_rip_write(vcpu, vcpu->arch.emulate_ctxt.eip);
5154         kvm_x86_ops->set_rflags(vcpu, vcpu->arch.emulate_ctxt.eflags);
5155         return EMULATE_DONE;
5156 }
5157 EXPORT_SYMBOL_GPL(kvm_task_switch);
5158
5159 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
5160                                   struct kvm_sregs *sregs)
5161 {
5162         int mmu_reset_needed = 0;
5163         int pending_vec, max_bits;
5164         struct desc_ptr dt;
5165
5166         dt.size = sregs->idt.limit;
5167         dt.address = sregs->idt.base;
5168         kvm_x86_ops->set_idt(vcpu, &dt);
5169         dt.size = sregs->gdt.limit;
5170         dt.address = sregs->gdt.base;
5171         kvm_x86_ops->set_gdt(vcpu, &dt);
5172
5173         vcpu->arch.cr2 = sregs->cr2;
5174         mmu_reset_needed |= vcpu->arch.cr3 != sregs->cr3;
5175         vcpu->arch.cr3 = sregs->cr3;
5176
5177         kvm_set_cr8(vcpu, sregs->cr8);
5178
5179         mmu_reset_needed |= vcpu->arch.efer != sregs->efer;
5180         kvm_x86_ops->set_efer(vcpu, sregs->efer);
5181         kvm_set_apic_base(vcpu, sregs->apic_base);
5182
5183         mmu_reset_needed |= kvm_read_cr0(vcpu) != sregs->cr0;
5184         kvm_x86_ops->set_cr0(vcpu, sregs->cr0);
5185         vcpu->arch.cr0 = sregs->cr0;
5186
5187         mmu_reset_needed |= kvm_read_cr4(vcpu) != sregs->cr4;
5188         kvm_x86_ops->set_cr4(vcpu, sregs->cr4);
5189         if (!is_long_mode(vcpu) && is_pae(vcpu)) {
5190                 load_pdptrs(vcpu, vcpu->arch.cr3);
5191                 mmu_reset_needed = 1;
5192         }
5193
5194         if (mmu_reset_needed)
5195                 kvm_mmu_reset_context(vcpu);
5196
5197         max_bits = (sizeof sregs->interrupt_bitmap) << 3;
5198         pending_vec = find_first_bit(
5199                 (const unsigned long *)sregs->interrupt_bitmap, max_bits);
5200         if (pending_vec < max_bits) {
5201                 kvm_queue_interrupt(vcpu, pending_vec, false);
5202                 pr_debug("Set back pending irq %d\n", pending_vec);
5203                 if (irqchip_in_kernel(vcpu->kvm))
5204                         kvm_pic_clear_isr_ack(vcpu->kvm);
5205         }
5206
5207         kvm_set_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
5208         kvm_set_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
5209         kvm_set_segment(vcpu, &sregs->es, VCPU_SREG_ES);
5210         kvm_set_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
5211         kvm_set_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
5212         kvm_set_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
5213
5214         kvm_set_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
5215         kvm_set_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
5216
5217         update_cr8_intercept(vcpu);
5218
5219         /* Older userspace won't unhalt the vcpu on reset. */
5220         if (kvm_vcpu_is_bsp(vcpu) && kvm_rip_read(vcpu) == 0xfff0 &&
5221             sregs->cs.selector == 0xf000 && sregs->cs.base == 0xffff0000 &&
5222             !is_protmode(vcpu))
5223                 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
5224
5225         return 0;
5226 }
5227
5228 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
5229                                         struct kvm_guest_debug *dbg)
5230 {
5231         unsigned long rflags;
5232         int i, r;
5233
5234         if (dbg->control & (KVM_GUESTDBG_INJECT_DB | KVM_GUESTDBG_INJECT_BP)) {
5235                 r = -EBUSY;
5236                 if (vcpu->arch.exception.pending)
5237                         goto out;
5238                 if (dbg->control & KVM_GUESTDBG_INJECT_DB)
5239                         kvm_queue_exception(vcpu, DB_VECTOR);
5240                 else
5241                         kvm_queue_exception(vcpu, BP_VECTOR);
5242         }
5243
5244         /*
5245          * Read rflags as long as potentially injected trace flags are still
5246          * filtered out.
5247          */
5248         rflags = kvm_get_rflags(vcpu);
5249
5250         vcpu->guest_debug = dbg->control;
5251         if (!(vcpu->guest_debug & KVM_GUESTDBG_ENABLE))
5252                 vcpu->guest_debug = 0;
5253
5254         if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) {
5255                 for (i = 0; i < KVM_NR_DB_REGS; ++i)
5256                         vcpu->arch.eff_db[i] = dbg->arch.debugreg[i];
5257                 vcpu->arch.switch_db_regs =
5258                         (dbg->arch.debugreg[7] & DR7_BP_EN_MASK);
5259         } else {
5260                 for (i = 0; i < KVM_NR_DB_REGS; i++)
5261                         vcpu->arch.eff_db[i] = vcpu->arch.db[i];
5262                 vcpu->arch.switch_db_regs = (vcpu->arch.dr7 & DR7_BP_EN_MASK);
5263         }
5264
5265         if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
5266                 vcpu->arch.singlestep_rip = kvm_rip_read(vcpu) +
5267                         get_segment_base(vcpu, VCPU_SREG_CS);
5268
5269         /*
5270          * Trigger an rflags update that will inject or remove the trace
5271          * flags.
5272          */
5273         kvm_set_rflags(vcpu, rflags);
5274
5275         kvm_x86_ops->set_guest_debug(vcpu, dbg);
5276
5277         r = 0;
5278
5279 out:
5280
5281         return r;
5282 }
5283
5284 /*
5285  * Translate a guest virtual address to a guest physical address.
5286  */
5287 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
5288                                     struct kvm_translation *tr)
5289 {
5290         unsigned long vaddr = tr->linear_address;
5291         gpa_t gpa;
5292         int idx;
5293
5294         idx = srcu_read_lock(&vcpu->kvm->srcu);
5295         gpa = kvm_mmu_gva_to_gpa_system(vcpu, vaddr, NULL);
5296         srcu_read_unlock(&vcpu->kvm->srcu, idx);
5297         tr->physical_address = gpa;
5298         tr->valid = gpa != UNMAPPED_GVA;
5299         tr->writeable = 1;
5300         tr->usermode = 0;
5301
5302         return 0;
5303 }
5304
5305 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
5306 {
5307         struct i387_fxsave_struct *fxsave =
5308                         &vcpu->arch.guest_fpu.state->fxsave;
5309
5310         memcpy(fpu->fpr, fxsave->st_space, 128);
5311         fpu->fcw = fxsave->cwd;
5312         fpu->fsw = fxsave->swd;
5313         fpu->ftwx = fxsave->twd;
5314         fpu->last_opcode = fxsave->fop;
5315         fpu->last_ip = fxsave->rip;
5316         fpu->last_dp = fxsave->rdp;
5317         memcpy(fpu->xmm, fxsave->xmm_space, sizeof fxsave->xmm_space);
5318
5319         return 0;
5320 }
5321
5322 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
5323 {
5324         struct i387_fxsave_struct *fxsave =
5325                         &vcpu->arch.guest_fpu.state->fxsave;
5326
5327         memcpy(fxsave->st_space, fpu->fpr, 128);
5328         fxsave->cwd = fpu->fcw;
5329         fxsave->swd = fpu->fsw;
5330         fxsave->twd = fpu->ftwx;
5331         fxsave->fop = fpu->last_opcode;
5332         fxsave->rip = fpu->last_ip;
5333         fxsave->rdp = fpu->last_dp;
5334         memcpy(fxsave->xmm_space, fpu->xmm, sizeof fxsave->xmm_space);
5335
5336         return 0;
5337 }
5338
5339 int fx_init(struct kvm_vcpu *vcpu)
5340 {
5341         int err;
5342
5343         err = fpu_alloc(&vcpu->arch.guest_fpu);
5344         if (err)
5345                 return err;
5346
5347         fpu_finit(&vcpu->arch.guest_fpu);
5348
5349         /*
5350          * Ensure guest xcr0 is valid for loading
5351          */
5352         vcpu->arch.xcr0 = XSTATE_FP;
5353
5354         vcpu->arch.cr0 |= X86_CR0_ET;
5355
5356         return 0;
5357 }
5358 EXPORT_SYMBOL_GPL(fx_init);
5359
5360 static void fx_free(struct kvm_vcpu *vcpu)
5361 {
5362         fpu_free(&vcpu->arch.guest_fpu);
5363 }
5364
5365 void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
5366 {
5367         if (vcpu->guest_fpu_loaded)
5368                 return;
5369
5370         /*
5371          * Restore all possible states in the guest,
5372          * and assume host would use all available bits.
5373          * Guest xcr0 would be loaded later.
5374          */
5375         kvm_put_guest_xcr0(vcpu);
5376         vcpu->guest_fpu_loaded = 1;
5377         unlazy_fpu(current);
5378         fpu_restore_checking(&vcpu->arch.guest_fpu);
5379         trace_kvm_fpu(1);
5380 }
5381
5382 void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
5383 {
5384         kvm_put_guest_xcr0(vcpu);
5385
5386         if (!vcpu->guest_fpu_loaded)
5387                 return;
5388
5389         vcpu->guest_fpu_loaded = 0;
5390         fpu_save_init(&vcpu->arch.guest_fpu);
5391         ++vcpu->stat.fpu_reload;
5392         set_bit(KVM_REQ_DEACTIVATE_FPU, &vcpu->requests);
5393         trace_kvm_fpu(0);
5394 }
5395
5396 void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
5397 {
5398         if (vcpu->arch.time_page) {
5399                 kvm_release_page_dirty(vcpu->arch.time_page);
5400                 vcpu->arch.time_page = NULL;
5401         }
5402
5403         fx_free(vcpu);
5404         kvm_x86_ops->vcpu_free(vcpu);
5405 }
5406
5407 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
5408                                                 unsigned int id)
5409 {
5410         return kvm_x86_ops->vcpu_create(kvm, id);
5411 }
5412
5413 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
5414 {
5415         int r;
5416
5417         vcpu->arch.mtrr_state.have_fixed = 1;
5418         vcpu_load(vcpu);
5419         r = kvm_arch_vcpu_reset(vcpu);
5420         if (r == 0)
5421                 r = kvm_mmu_setup(vcpu);
5422         vcpu_put(vcpu);
5423         if (r < 0)
5424                 goto free_vcpu;
5425
5426         return 0;
5427 free_vcpu:
5428         kvm_x86_ops->vcpu_free(vcpu);
5429         return r;
5430 }
5431
5432 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
5433 {
5434         vcpu_load(vcpu);
5435         kvm_mmu_unload(vcpu);
5436         vcpu_put(vcpu);
5437
5438         fx_free(vcpu);
5439         kvm_x86_ops->vcpu_free(vcpu);
5440 }
5441
5442 int kvm_arch_vcpu_reset(struct kvm_vcpu *vcpu)
5443 {
5444         vcpu->arch.nmi_pending = false;
5445         vcpu->arch.nmi_injected = false;
5446
5447         vcpu->arch.switch_db_regs = 0;
5448         memset(vcpu->arch.db, 0, sizeof(vcpu->arch.db));
5449         vcpu->arch.dr6 = DR6_FIXED_1;
5450         vcpu->arch.dr7 = DR7_FIXED_1;
5451
5452         return kvm_x86_ops->vcpu_reset(vcpu);
5453 }
5454
5455 int kvm_arch_hardware_enable(void *garbage)
5456 {
5457         /*
5458          * Since this may be called from a hotplug notifcation,
5459          * we can't get the CPU frequency directly.
5460          */
5461         if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) {
5462                 int cpu = raw_smp_processor_id();
5463                 per_cpu(cpu_tsc_khz, cpu) = 0;
5464         }
5465
5466         kvm_shared_msr_cpu_online();
5467
5468         return kvm_x86_ops->hardware_enable(garbage);
5469 }
5470
5471 void kvm_arch_hardware_disable(void *garbage)
5472 {
5473         kvm_x86_ops->hardware_disable(garbage);
5474         drop_user_return_notifiers(garbage);
5475 }
5476
5477 int kvm_arch_hardware_setup(void)
5478 {
5479         return kvm_x86_ops->hardware_setup();
5480 }
5481
5482 void kvm_arch_hardware_unsetup(void)
5483 {
5484         kvm_x86_ops->hardware_unsetup();
5485 }
5486
5487 void kvm_arch_check_processor_compat(void *rtn)
5488 {
5489         kvm_x86_ops->check_processor_compatibility(rtn);
5490 }
5491
5492 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
5493 {
5494         struct page *page;
5495         struct kvm *kvm;
5496         int r;
5497
5498         BUG_ON(vcpu->kvm == NULL);
5499         kvm = vcpu->kvm;
5500
5501         vcpu->arch.mmu.root_hpa = INVALID_PAGE;
5502         if (!irqchip_in_kernel(kvm) || kvm_vcpu_is_bsp(vcpu))
5503                 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
5504         else
5505                 vcpu->arch.mp_state = KVM_MP_STATE_UNINITIALIZED;
5506
5507         page = alloc_page(GFP_KERNEL | __GFP_ZERO);
5508         if (!page) {
5509                 r = -ENOMEM;
5510                 goto fail;
5511         }
5512         vcpu->arch.pio_data = page_address(page);
5513
5514         r = kvm_mmu_create(vcpu);
5515         if (r < 0)
5516                 goto fail_free_pio_data;
5517
5518         if (irqchip_in_kernel(kvm)) {
5519                 r = kvm_create_lapic(vcpu);
5520                 if (r < 0)
5521                         goto fail_mmu_destroy;
5522         }
5523
5524         vcpu->arch.mce_banks = kzalloc(KVM_MAX_MCE_BANKS * sizeof(u64) * 4,
5525                                        GFP_KERNEL);
5526         if (!vcpu->arch.mce_banks) {
5527                 r = -ENOMEM;
5528                 goto fail_free_lapic;
5529         }
5530         vcpu->arch.mcg_cap = KVM_MAX_MCE_BANKS;
5531
5532         return 0;
5533 fail_free_lapic:
5534         kvm_free_lapic(vcpu);
5535 fail_mmu_destroy:
5536         kvm_mmu_destroy(vcpu);
5537 fail_free_pio_data:
5538         free_page((unsigned long)vcpu->arch.pio_data);
5539 fail:
5540         return r;
5541 }
5542
5543 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
5544 {
5545         int idx;
5546
5547         kfree(vcpu->arch.mce_banks);
5548         kvm_free_lapic(vcpu);
5549         idx = srcu_read_lock(&vcpu->kvm->srcu);
5550         kvm_mmu_destroy(vcpu);
5551         srcu_read_unlock(&vcpu->kvm->srcu, idx);
5552         free_page((unsigned long)vcpu->arch.pio_data);
5553 }
5554
5555 struct  kvm *kvm_arch_create_vm(void)
5556 {
5557         struct kvm *kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL);
5558
5559         if (!kvm)
5560                 return ERR_PTR(-ENOMEM);
5561
5562         kvm->arch.aliases = kzalloc(sizeof(struct kvm_mem_aliases), GFP_KERNEL);
5563         if (!kvm->arch.aliases) {
5564                 kfree(kvm);
5565                 return ERR_PTR(-ENOMEM);
5566         }
5567
5568         INIT_LIST_HEAD(&kvm->arch.active_mmu_pages);
5569         INIT_LIST_HEAD(&kvm->arch.assigned_dev_head);
5570
5571         /* Reserve bit 0 of irq_sources_bitmap for userspace irq source */
5572         set_bit(KVM_USERSPACE_IRQ_SOURCE_ID, &kvm->arch.irq_sources_bitmap);
5573
5574         rdtscll(kvm->arch.vm_init_tsc);
5575
5576         return kvm;
5577 }
5578
5579 static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu)
5580 {
5581         vcpu_load(vcpu);
5582         kvm_mmu_unload(vcpu);
5583         vcpu_put(vcpu);
5584 }
5585
5586 static void kvm_free_vcpus(struct kvm *kvm)
5587 {
5588         unsigned int i;
5589         struct kvm_vcpu *vcpu;
5590
5591         /*
5592          * Unpin any mmu pages first.
5593          */
5594         kvm_for_each_vcpu(i, vcpu, kvm)
5595                 kvm_unload_vcpu_mmu(vcpu);
5596         kvm_for_each_vcpu(i, vcpu, kvm)
5597                 kvm_arch_vcpu_free(vcpu);
5598
5599         mutex_lock(&kvm->lock);
5600         for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
5601                 kvm->vcpus[i] = NULL;
5602
5603         atomic_set(&kvm->online_vcpus, 0);
5604         mutex_unlock(&kvm->lock);
5605 }
5606
5607 void kvm_arch_sync_events(struct kvm *kvm)
5608 {
5609         kvm_free_all_assigned_devices(kvm);
5610 }
5611
5612 void kvm_arch_destroy_vm(struct kvm *kvm)
5613 {
5614         kvm_iommu_unmap_guest(kvm);
5615         kvm_free_pit(kvm);
5616         kfree(kvm->arch.vpic);
5617         kfree(kvm->arch.vioapic);
5618         kvm_free_vcpus(kvm);
5619         kvm_free_physmem(kvm);
5620         if (kvm->arch.apic_access_page)
5621                 put_page(kvm->arch.apic_access_page);
5622         if (kvm->arch.ept_identity_pagetable)
5623                 put_page(kvm->arch.ept_identity_pagetable);
5624         cleanup_srcu_struct(&kvm->srcu);
5625         kfree(kvm->arch.aliases);
5626         kfree(kvm);
5627 }
5628
5629 int kvm_arch_prepare_memory_region(struct kvm *kvm,
5630                                 struct kvm_memory_slot *memslot,
5631                                 struct kvm_memory_slot old,
5632                                 struct kvm_userspace_memory_region *mem,
5633                                 int user_alloc)
5634 {
5635         int npages = memslot->npages;
5636
5637         /*To keep backward compatibility with older userspace,
5638          *x86 needs to hanlde !user_alloc case.
5639          */
5640         if (!user_alloc) {
5641                 if (npages && !old.rmap) {
5642                         unsigned long userspace_addr;
5643
5644                         down_write(&current->mm->mmap_sem);
5645                         userspace_addr = do_mmap(NULL, 0,
5646                                                  npages * PAGE_SIZE,
5647                                                  PROT_READ | PROT_WRITE,
5648                                                  MAP_PRIVATE | MAP_ANONYMOUS,
5649                                                  0);
5650                         up_write(&current->mm->mmap_sem);
5651
5652                         if (IS_ERR((void *)userspace_addr))
5653                                 return PTR_ERR((void *)userspace_addr);
5654
5655                         memslot->userspace_addr = userspace_addr;
5656                 }
5657         }
5658
5659
5660         return 0;
5661 }
5662
5663 void kvm_arch_commit_memory_region(struct kvm *kvm,
5664                                 struct kvm_userspace_memory_region *mem,
5665                                 struct kvm_memory_slot old,
5666                                 int user_alloc)
5667 {
5668
5669         int npages = mem->memory_size >> PAGE_SHIFT;
5670
5671         if (!user_alloc && !old.user_alloc && old.rmap && !npages) {
5672                 int ret;
5673
5674                 down_write(&current->mm->mmap_sem);
5675                 ret = do_munmap(current->mm, old.userspace_addr,
5676                                 old.npages * PAGE_SIZE);
5677                 up_write(&current->mm->mmap_sem);
5678                 if (ret < 0)
5679                         printk(KERN_WARNING
5680                                "kvm_vm_ioctl_set_memory_region: "
5681                                "failed to munmap memory\n");
5682         }
5683
5684         spin_lock(&kvm->mmu_lock);
5685         if (!kvm->arch.n_requested_mmu_pages) {
5686                 unsigned int nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm);
5687                 kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages);
5688         }
5689
5690         kvm_mmu_slot_remove_write_access(kvm, mem->slot);
5691         spin_unlock(&kvm->mmu_lock);
5692 }
5693
5694 void kvm_arch_flush_shadow(struct kvm *kvm)
5695 {
5696         kvm_mmu_zap_all(kvm);
5697         kvm_reload_remote_mmus(kvm);
5698 }
5699
5700 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
5701 {
5702         return vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE
5703                 || vcpu->arch.mp_state == KVM_MP_STATE_SIPI_RECEIVED
5704                 || vcpu->arch.nmi_pending ||
5705                 (kvm_arch_interrupt_allowed(vcpu) &&
5706                  kvm_cpu_has_interrupt(vcpu));
5707 }
5708
5709 void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
5710 {
5711         int me;
5712         int cpu = vcpu->cpu;
5713
5714         if (waitqueue_active(&vcpu->wq)) {
5715                 wake_up_interruptible(&vcpu->wq);
5716                 ++vcpu->stat.halt_wakeup;
5717         }
5718
5719         me = get_cpu();
5720         if (cpu != me && (unsigned)cpu < nr_cpu_ids && cpu_online(cpu))
5721                 if (atomic_xchg(&vcpu->guest_mode, 0))
5722                         smp_send_reschedule(cpu);
5723         put_cpu();
5724 }
5725
5726 int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu)
5727 {
5728         return kvm_x86_ops->interrupt_allowed(vcpu);
5729 }
5730
5731 bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip)
5732 {
5733         unsigned long current_rip = kvm_rip_read(vcpu) +
5734                 get_segment_base(vcpu, VCPU_SREG_CS);
5735
5736         return current_rip == linear_rip;
5737 }
5738 EXPORT_SYMBOL_GPL(kvm_is_linear_rip);
5739
5740 unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu)
5741 {
5742         unsigned long rflags;
5743
5744         rflags = kvm_x86_ops->get_rflags(vcpu);
5745         if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
5746                 rflags &= ~X86_EFLAGS_TF;
5747         return rflags;
5748 }
5749 EXPORT_SYMBOL_GPL(kvm_get_rflags);
5750
5751 void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
5752 {
5753         if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP &&
5754             kvm_is_linear_rip(vcpu, vcpu->arch.singlestep_rip))
5755                 rflags |= X86_EFLAGS_TF;
5756         kvm_x86_ops->set_rflags(vcpu, rflags);
5757 }
5758 EXPORT_SYMBOL_GPL(kvm_set_rflags);
5759
5760 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_exit);
5761 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_inj_virq);
5762 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_page_fault);
5763 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_msr);
5764 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_cr);
5765 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmrun);
5766 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmexit);
5767 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmexit_inject);
5768 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_intr_vmexit);
5769 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_invlpga);
5770 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_skinit);
5771 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_intercepts);