3ce8a162933095c6685ca506d59a88928381beec
[linux-3.10.git] / arch / x86 / kvm / vmx.c
1 /*
2  * Kernel-based Virtual Machine driver for Linux
3  *
4  * This module enables machines with Intel VT-x extensions to run virtual
5  * machines without emulation or binary translation.
6  *
7  * Copyright (C) 2006 Qumranet, Inc.
8  * Copyright 2010 Red Hat, Inc. and/or its affiliates.
9  *
10  * Authors:
11  *   Avi Kivity   <avi@qumranet.com>
12  *   Yaniv Kamay  <yaniv@qumranet.com>
13  *
14  * This work is licensed under the terms of the GNU GPL, version 2.  See
15  * the COPYING file in the top-level directory.
16  *
17  */
18
19 #include "irq.h"
20 #include "mmu.h"
21 #include "cpuid.h"
22
23 #include <linux/kvm_host.h>
24 #include <linux/module.h>
25 #include <linux/kernel.h>
26 #include <linux/mm.h>
27 #include <linux/highmem.h>
28 #include <linux/sched.h>
29 #include <linux/moduleparam.h>
30 #include <linux/mod_devicetable.h>
31 #include <linux/ftrace_event.h>
32 #include <linux/slab.h>
33 #include <linux/tboot.h>
34 #include "kvm_cache_regs.h"
35 #include "x86.h"
36
37 #include <asm/io.h>
38 #include <asm/desc.h>
39 #include <asm/vmx.h>
40 #include <asm/virtext.h>
41 #include <asm/mce.h>
42 #include <asm/i387.h>
43 #include <asm/xcr.h>
44 #include <asm/perf_event.h>
45 #include <asm/kexec.h>
46
47 #include "trace.h"
48
49 #define __ex(x) __kvm_handle_fault_on_reboot(x)
50 #define __ex_clear(x, reg) \
51         ____kvm_handle_fault_on_reboot(x, "xor " reg " , " reg)
52
53 MODULE_AUTHOR("Qumranet");
54 MODULE_LICENSE("GPL");
55
56 static const struct x86_cpu_id vmx_cpu_id[] = {
57         X86_FEATURE_MATCH(X86_FEATURE_VMX),
58         {}
59 };
60 MODULE_DEVICE_TABLE(x86cpu, vmx_cpu_id);
61
62 static bool __read_mostly enable_vpid = 1;
63 module_param_named(vpid, enable_vpid, bool, 0444);
64
65 static bool __read_mostly flexpriority_enabled = 1;
66 module_param_named(flexpriority, flexpriority_enabled, bool, S_IRUGO);
67
68 static bool __read_mostly enable_ept = 1;
69 module_param_named(ept, enable_ept, bool, S_IRUGO);
70
71 static bool __read_mostly enable_unrestricted_guest = 1;
72 module_param_named(unrestricted_guest,
73                         enable_unrestricted_guest, bool, S_IRUGO);
74
75 static bool __read_mostly enable_ept_ad_bits = 1;
76 module_param_named(eptad, enable_ept_ad_bits, bool, S_IRUGO);
77
78 static bool __read_mostly emulate_invalid_guest_state = true;
79 module_param(emulate_invalid_guest_state, bool, S_IRUGO);
80
81 static bool __read_mostly vmm_exclusive = 1;
82 module_param(vmm_exclusive, bool, S_IRUGO);
83
84 static bool __read_mostly fasteoi = 1;
85 module_param(fasteoi, bool, S_IRUGO);
86
87 static bool __read_mostly enable_apicv_reg = 1;
88 module_param(enable_apicv_reg, bool, S_IRUGO);
89
90 /*
91  * If nested=1, nested virtualization is supported, i.e., guests may use
92  * VMX and be a hypervisor for its own guests. If nested=0, guests may not
93  * use VMX instructions.
94  */
95 static bool __read_mostly nested = 0;
96 module_param(nested, bool, S_IRUGO);
97
98 #define KVM_GUEST_CR0_MASK_UNRESTRICTED_GUEST                           \
99         (X86_CR0_WP | X86_CR0_NE | X86_CR0_NW | X86_CR0_CD)
100 #define KVM_GUEST_CR0_MASK                                              \
101         (KVM_GUEST_CR0_MASK_UNRESTRICTED_GUEST | X86_CR0_PG | X86_CR0_PE)
102 #define KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST                         \
103         (X86_CR0_WP | X86_CR0_NE)
104 #define KVM_VM_CR0_ALWAYS_ON                                            \
105         (KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST | X86_CR0_PG | X86_CR0_PE)
106 #define KVM_CR4_GUEST_OWNED_BITS                                      \
107         (X86_CR4_PVI | X86_CR4_DE | X86_CR4_PCE | X86_CR4_OSFXSR      \
108          | X86_CR4_OSXMMEXCPT)
109
110 #define KVM_PMODE_VM_CR4_ALWAYS_ON (X86_CR4_PAE | X86_CR4_VMXE)
111 #define KVM_RMODE_VM_CR4_ALWAYS_ON (X86_CR4_VME | X86_CR4_PAE | X86_CR4_VMXE)
112
113 #define RMODE_GUEST_OWNED_EFLAGS_BITS (~(X86_EFLAGS_IOPL | X86_EFLAGS_VM))
114
115 /*
116  * These 2 parameters are used to config the controls for Pause-Loop Exiting:
117  * ple_gap:    upper bound on the amount of time between two successive
118  *             executions of PAUSE in a loop. Also indicate if ple enabled.
119  *             According to test, this time is usually smaller than 128 cycles.
120  * ple_window: upper bound on the amount of time a guest is allowed to execute
121  *             in a PAUSE loop. Tests indicate that most spinlocks are held for
122  *             less than 2^12 cycles
123  * Time is measured based on a counter that runs at the same rate as the TSC,
124  * refer SDM volume 3b section 21.6.13 & 22.1.3.
125  */
126 #define KVM_VMX_DEFAULT_PLE_GAP    128
127 #define KVM_VMX_DEFAULT_PLE_WINDOW 4096
128 static int ple_gap = KVM_VMX_DEFAULT_PLE_GAP;
129 module_param(ple_gap, int, S_IRUGO);
130
131 static int ple_window = KVM_VMX_DEFAULT_PLE_WINDOW;
132 module_param(ple_window, int, S_IRUGO);
133
134 extern const ulong vmx_return;
135
136 #define NR_AUTOLOAD_MSRS 8
137 #define VMCS02_POOL_SIZE 1
138
139 struct vmcs {
140         u32 revision_id;
141         u32 abort;
142         char data[0];
143 };
144
145 /*
146  * Track a VMCS that may be loaded on a certain CPU. If it is (cpu!=-1), also
147  * remember whether it was VMLAUNCHed, and maintain a linked list of all VMCSs
148  * loaded on this CPU (so we can clear them if the CPU goes down).
149  */
150 struct loaded_vmcs {
151         struct vmcs *vmcs;
152         int cpu;
153         int launched;
154         struct list_head loaded_vmcss_on_cpu_link;
155 };
156
157 struct shared_msr_entry {
158         unsigned index;
159         u64 data;
160         u64 mask;
161 };
162
163 /*
164  * struct vmcs12 describes the state that our guest hypervisor (L1) keeps for a
165  * single nested guest (L2), hence the name vmcs12. Any VMX implementation has
166  * a VMCS structure, and vmcs12 is our emulated VMX's VMCS. This structure is
167  * stored in guest memory specified by VMPTRLD, but is opaque to the guest,
168  * which must access it using VMREAD/VMWRITE/VMCLEAR instructions.
169  * More than one of these structures may exist, if L1 runs multiple L2 guests.
170  * nested_vmx_run() will use the data here to build a vmcs02: a VMCS for the
171  * underlying hardware which will be used to run L2.
172  * This structure is packed to ensure that its layout is identical across
173  * machines (necessary for live migration).
174  * If there are changes in this struct, VMCS12_REVISION must be changed.
175  */
176 typedef u64 natural_width;
177 struct __packed vmcs12 {
178         /* According to the Intel spec, a VMCS region must start with the
179          * following two fields. Then follow implementation-specific data.
180          */
181         u32 revision_id;
182         u32 abort;
183
184         u32 launch_state; /* set to 0 by VMCLEAR, to 1 by VMLAUNCH */
185         u32 padding[7]; /* room for future expansion */
186
187         u64 io_bitmap_a;
188         u64 io_bitmap_b;
189         u64 msr_bitmap;
190         u64 vm_exit_msr_store_addr;
191         u64 vm_exit_msr_load_addr;
192         u64 vm_entry_msr_load_addr;
193         u64 tsc_offset;
194         u64 virtual_apic_page_addr;
195         u64 apic_access_addr;
196         u64 ept_pointer;
197         u64 guest_physical_address;
198         u64 vmcs_link_pointer;
199         u64 guest_ia32_debugctl;
200         u64 guest_ia32_pat;
201         u64 guest_ia32_efer;
202         u64 guest_ia32_perf_global_ctrl;
203         u64 guest_pdptr0;
204         u64 guest_pdptr1;
205         u64 guest_pdptr2;
206         u64 guest_pdptr3;
207         u64 host_ia32_pat;
208         u64 host_ia32_efer;
209         u64 host_ia32_perf_global_ctrl;
210         u64 padding64[8]; /* room for future expansion */
211         /*
212          * To allow migration of L1 (complete with its L2 guests) between
213          * machines of different natural widths (32 or 64 bit), we cannot have
214          * unsigned long fields with no explict size. We use u64 (aliased
215          * natural_width) instead. Luckily, x86 is little-endian.
216          */
217         natural_width cr0_guest_host_mask;
218         natural_width cr4_guest_host_mask;
219         natural_width cr0_read_shadow;
220         natural_width cr4_read_shadow;
221         natural_width cr3_target_value0;
222         natural_width cr3_target_value1;
223         natural_width cr3_target_value2;
224         natural_width cr3_target_value3;
225         natural_width exit_qualification;
226         natural_width guest_linear_address;
227         natural_width guest_cr0;
228         natural_width guest_cr3;
229         natural_width guest_cr4;
230         natural_width guest_es_base;
231         natural_width guest_cs_base;
232         natural_width guest_ss_base;
233         natural_width guest_ds_base;
234         natural_width guest_fs_base;
235         natural_width guest_gs_base;
236         natural_width guest_ldtr_base;
237         natural_width guest_tr_base;
238         natural_width guest_gdtr_base;
239         natural_width guest_idtr_base;
240         natural_width guest_dr7;
241         natural_width guest_rsp;
242         natural_width guest_rip;
243         natural_width guest_rflags;
244         natural_width guest_pending_dbg_exceptions;
245         natural_width guest_sysenter_esp;
246         natural_width guest_sysenter_eip;
247         natural_width host_cr0;
248         natural_width host_cr3;
249         natural_width host_cr4;
250         natural_width host_fs_base;
251         natural_width host_gs_base;
252         natural_width host_tr_base;
253         natural_width host_gdtr_base;
254         natural_width host_idtr_base;
255         natural_width host_ia32_sysenter_esp;
256         natural_width host_ia32_sysenter_eip;
257         natural_width host_rsp;
258         natural_width host_rip;
259         natural_width paddingl[8]; /* room for future expansion */
260         u32 pin_based_vm_exec_control;
261         u32 cpu_based_vm_exec_control;
262         u32 exception_bitmap;
263         u32 page_fault_error_code_mask;
264         u32 page_fault_error_code_match;
265         u32 cr3_target_count;
266         u32 vm_exit_controls;
267         u32 vm_exit_msr_store_count;
268         u32 vm_exit_msr_load_count;
269         u32 vm_entry_controls;
270         u32 vm_entry_msr_load_count;
271         u32 vm_entry_intr_info_field;
272         u32 vm_entry_exception_error_code;
273         u32 vm_entry_instruction_len;
274         u32 tpr_threshold;
275         u32 secondary_vm_exec_control;
276         u32 vm_instruction_error;
277         u32 vm_exit_reason;
278         u32 vm_exit_intr_info;
279         u32 vm_exit_intr_error_code;
280         u32 idt_vectoring_info_field;
281         u32 idt_vectoring_error_code;
282         u32 vm_exit_instruction_len;
283         u32 vmx_instruction_info;
284         u32 guest_es_limit;
285         u32 guest_cs_limit;
286         u32 guest_ss_limit;
287         u32 guest_ds_limit;
288         u32 guest_fs_limit;
289         u32 guest_gs_limit;
290         u32 guest_ldtr_limit;
291         u32 guest_tr_limit;
292         u32 guest_gdtr_limit;
293         u32 guest_idtr_limit;
294         u32 guest_es_ar_bytes;
295         u32 guest_cs_ar_bytes;
296         u32 guest_ss_ar_bytes;
297         u32 guest_ds_ar_bytes;
298         u32 guest_fs_ar_bytes;
299         u32 guest_gs_ar_bytes;
300         u32 guest_ldtr_ar_bytes;
301         u32 guest_tr_ar_bytes;
302         u32 guest_interruptibility_info;
303         u32 guest_activity_state;
304         u32 guest_sysenter_cs;
305         u32 host_ia32_sysenter_cs;
306         u32 padding32[8]; /* room for future expansion */
307         u16 virtual_processor_id;
308         u16 guest_es_selector;
309         u16 guest_cs_selector;
310         u16 guest_ss_selector;
311         u16 guest_ds_selector;
312         u16 guest_fs_selector;
313         u16 guest_gs_selector;
314         u16 guest_ldtr_selector;
315         u16 guest_tr_selector;
316         u16 host_es_selector;
317         u16 host_cs_selector;
318         u16 host_ss_selector;
319         u16 host_ds_selector;
320         u16 host_fs_selector;
321         u16 host_gs_selector;
322         u16 host_tr_selector;
323 };
324
325 /*
326  * VMCS12_REVISION is an arbitrary id that should be changed if the content or
327  * layout of struct vmcs12 is changed. MSR_IA32_VMX_BASIC returns this id, and
328  * VMPTRLD verifies that the VMCS region that L1 is loading contains this id.
329  */
330 #define VMCS12_REVISION 0x11e57ed0
331
332 /*
333  * VMCS12_SIZE is the number of bytes L1 should allocate for the VMXON region
334  * and any VMCS region. Although only sizeof(struct vmcs12) are used by the
335  * current implementation, 4K are reserved to avoid future complications.
336  */
337 #define VMCS12_SIZE 0x1000
338
339 /* Used to remember the last vmcs02 used for some recently used vmcs12s */
340 struct vmcs02_list {
341         struct list_head list;
342         gpa_t vmptr;
343         struct loaded_vmcs vmcs02;
344 };
345
346 /*
347  * The nested_vmx structure is part of vcpu_vmx, and holds information we need
348  * for correct emulation of VMX (i.e., nested VMX) on this vcpu.
349  */
350 struct nested_vmx {
351         /* Has the level1 guest done vmxon? */
352         bool vmxon;
353
354         /* The guest-physical address of the current VMCS L1 keeps for L2 */
355         gpa_t current_vmptr;
356         /* The host-usable pointer to the above */
357         struct page *current_vmcs12_page;
358         struct vmcs12 *current_vmcs12;
359
360         /* vmcs02_list cache of VMCSs recently used to run L2 guests */
361         struct list_head vmcs02_pool;
362         int vmcs02_num;
363         u64 vmcs01_tsc_offset;
364         /* L2 must run next, and mustn't decide to exit to L1. */
365         bool nested_run_pending;
366         /*
367          * Guest pages referred to in vmcs02 with host-physical pointers, so
368          * we must keep them pinned while L2 runs.
369          */
370         struct page *apic_access_page;
371 };
372
373 struct vcpu_vmx {
374         struct kvm_vcpu       vcpu;
375         unsigned long         host_rsp;
376         u8                    fail;
377         u8                    cpl;
378         bool                  nmi_known_unmasked;
379         u32                   exit_intr_info;
380         u32                   idt_vectoring_info;
381         ulong                 rflags;
382         struct shared_msr_entry *guest_msrs;
383         int                   nmsrs;
384         int                   save_nmsrs;
385 #ifdef CONFIG_X86_64
386         u64                   msr_host_kernel_gs_base;
387         u64                   msr_guest_kernel_gs_base;
388 #endif
389         /*
390          * loaded_vmcs points to the VMCS currently used in this vcpu. For a
391          * non-nested (L1) guest, it always points to vmcs01. For a nested
392          * guest (L2), it points to a different VMCS.
393          */
394         struct loaded_vmcs    vmcs01;
395         struct loaded_vmcs   *loaded_vmcs;
396         bool                  __launched; /* temporary, used in vmx_vcpu_run */
397         struct msr_autoload {
398                 unsigned nr;
399                 struct vmx_msr_entry guest[NR_AUTOLOAD_MSRS];
400                 struct vmx_msr_entry host[NR_AUTOLOAD_MSRS];
401         } msr_autoload;
402         struct {
403                 int           loaded;
404                 u16           fs_sel, gs_sel, ldt_sel;
405 #ifdef CONFIG_X86_64
406                 u16           ds_sel, es_sel;
407 #endif
408                 int           gs_ldt_reload_needed;
409                 int           fs_reload_needed;
410         } host_state;
411         struct {
412                 int vm86_active;
413                 ulong save_rflags;
414                 struct kvm_segment segs[8];
415         } rmode;
416         struct {
417                 u32 bitmask; /* 4 bits per segment (1 bit per field) */
418                 struct kvm_save_segment {
419                         u16 selector;
420                         unsigned long base;
421                         u32 limit;
422                         u32 ar;
423                 } seg[8];
424         } segment_cache;
425         int vpid;
426         bool emulation_required;
427
428         /* Support for vnmi-less CPUs */
429         int soft_vnmi_blocked;
430         ktime_t entry_time;
431         s64 vnmi_blocked_time;
432         u32 exit_reason;
433
434         bool rdtscp_enabled;
435
436         /* Support for a guest hypervisor (nested VMX) */
437         struct nested_vmx nested;
438 };
439
440 enum segment_cache_field {
441         SEG_FIELD_SEL = 0,
442         SEG_FIELD_BASE = 1,
443         SEG_FIELD_LIMIT = 2,
444         SEG_FIELD_AR = 3,
445
446         SEG_FIELD_NR = 4
447 };
448
449 static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu)
450 {
451         return container_of(vcpu, struct vcpu_vmx, vcpu);
452 }
453
454 #define VMCS12_OFFSET(x) offsetof(struct vmcs12, x)
455 #define FIELD(number, name)     [number] = VMCS12_OFFSET(name)
456 #define FIELD64(number, name)   [number] = VMCS12_OFFSET(name), \
457                                 [number##_HIGH] = VMCS12_OFFSET(name)+4
458
459 static const unsigned short vmcs_field_to_offset_table[] = {
460         FIELD(VIRTUAL_PROCESSOR_ID, virtual_processor_id),
461         FIELD(GUEST_ES_SELECTOR, guest_es_selector),
462         FIELD(GUEST_CS_SELECTOR, guest_cs_selector),
463         FIELD(GUEST_SS_SELECTOR, guest_ss_selector),
464         FIELD(GUEST_DS_SELECTOR, guest_ds_selector),
465         FIELD(GUEST_FS_SELECTOR, guest_fs_selector),
466         FIELD(GUEST_GS_SELECTOR, guest_gs_selector),
467         FIELD(GUEST_LDTR_SELECTOR, guest_ldtr_selector),
468         FIELD(GUEST_TR_SELECTOR, guest_tr_selector),
469         FIELD(HOST_ES_SELECTOR, host_es_selector),
470         FIELD(HOST_CS_SELECTOR, host_cs_selector),
471         FIELD(HOST_SS_SELECTOR, host_ss_selector),
472         FIELD(HOST_DS_SELECTOR, host_ds_selector),
473         FIELD(HOST_FS_SELECTOR, host_fs_selector),
474         FIELD(HOST_GS_SELECTOR, host_gs_selector),
475         FIELD(HOST_TR_SELECTOR, host_tr_selector),
476         FIELD64(IO_BITMAP_A, io_bitmap_a),
477         FIELD64(IO_BITMAP_B, io_bitmap_b),
478         FIELD64(MSR_BITMAP, msr_bitmap),
479         FIELD64(VM_EXIT_MSR_STORE_ADDR, vm_exit_msr_store_addr),
480         FIELD64(VM_EXIT_MSR_LOAD_ADDR, vm_exit_msr_load_addr),
481         FIELD64(VM_ENTRY_MSR_LOAD_ADDR, vm_entry_msr_load_addr),
482         FIELD64(TSC_OFFSET, tsc_offset),
483         FIELD64(VIRTUAL_APIC_PAGE_ADDR, virtual_apic_page_addr),
484         FIELD64(APIC_ACCESS_ADDR, apic_access_addr),
485         FIELD64(EPT_POINTER, ept_pointer),
486         FIELD64(GUEST_PHYSICAL_ADDRESS, guest_physical_address),
487         FIELD64(VMCS_LINK_POINTER, vmcs_link_pointer),
488         FIELD64(GUEST_IA32_DEBUGCTL, guest_ia32_debugctl),
489         FIELD64(GUEST_IA32_PAT, guest_ia32_pat),
490         FIELD64(GUEST_IA32_EFER, guest_ia32_efer),
491         FIELD64(GUEST_IA32_PERF_GLOBAL_CTRL, guest_ia32_perf_global_ctrl),
492         FIELD64(GUEST_PDPTR0, guest_pdptr0),
493         FIELD64(GUEST_PDPTR1, guest_pdptr1),
494         FIELD64(GUEST_PDPTR2, guest_pdptr2),
495         FIELD64(GUEST_PDPTR3, guest_pdptr3),
496         FIELD64(HOST_IA32_PAT, host_ia32_pat),
497         FIELD64(HOST_IA32_EFER, host_ia32_efer),
498         FIELD64(HOST_IA32_PERF_GLOBAL_CTRL, host_ia32_perf_global_ctrl),
499         FIELD(PIN_BASED_VM_EXEC_CONTROL, pin_based_vm_exec_control),
500         FIELD(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control),
501         FIELD(EXCEPTION_BITMAP, exception_bitmap),
502         FIELD(PAGE_FAULT_ERROR_CODE_MASK, page_fault_error_code_mask),
503         FIELD(PAGE_FAULT_ERROR_CODE_MATCH, page_fault_error_code_match),
504         FIELD(CR3_TARGET_COUNT, cr3_target_count),
505         FIELD(VM_EXIT_CONTROLS, vm_exit_controls),
506         FIELD(VM_EXIT_MSR_STORE_COUNT, vm_exit_msr_store_count),
507         FIELD(VM_EXIT_MSR_LOAD_COUNT, vm_exit_msr_load_count),
508         FIELD(VM_ENTRY_CONTROLS, vm_entry_controls),
509         FIELD(VM_ENTRY_MSR_LOAD_COUNT, vm_entry_msr_load_count),
510         FIELD(VM_ENTRY_INTR_INFO_FIELD, vm_entry_intr_info_field),
511         FIELD(VM_ENTRY_EXCEPTION_ERROR_CODE, vm_entry_exception_error_code),
512         FIELD(VM_ENTRY_INSTRUCTION_LEN, vm_entry_instruction_len),
513         FIELD(TPR_THRESHOLD, tpr_threshold),
514         FIELD(SECONDARY_VM_EXEC_CONTROL, secondary_vm_exec_control),
515         FIELD(VM_INSTRUCTION_ERROR, vm_instruction_error),
516         FIELD(VM_EXIT_REASON, vm_exit_reason),
517         FIELD(VM_EXIT_INTR_INFO, vm_exit_intr_info),
518         FIELD(VM_EXIT_INTR_ERROR_CODE, vm_exit_intr_error_code),
519         FIELD(IDT_VECTORING_INFO_FIELD, idt_vectoring_info_field),
520         FIELD(IDT_VECTORING_ERROR_CODE, idt_vectoring_error_code),
521         FIELD(VM_EXIT_INSTRUCTION_LEN, vm_exit_instruction_len),
522         FIELD(VMX_INSTRUCTION_INFO, vmx_instruction_info),
523         FIELD(GUEST_ES_LIMIT, guest_es_limit),
524         FIELD(GUEST_CS_LIMIT, guest_cs_limit),
525         FIELD(GUEST_SS_LIMIT, guest_ss_limit),
526         FIELD(GUEST_DS_LIMIT, guest_ds_limit),
527         FIELD(GUEST_FS_LIMIT, guest_fs_limit),
528         FIELD(GUEST_GS_LIMIT, guest_gs_limit),
529         FIELD(GUEST_LDTR_LIMIT, guest_ldtr_limit),
530         FIELD(GUEST_TR_LIMIT, guest_tr_limit),
531         FIELD(GUEST_GDTR_LIMIT, guest_gdtr_limit),
532         FIELD(GUEST_IDTR_LIMIT, guest_idtr_limit),
533         FIELD(GUEST_ES_AR_BYTES, guest_es_ar_bytes),
534         FIELD(GUEST_CS_AR_BYTES, guest_cs_ar_bytes),
535         FIELD(GUEST_SS_AR_BYTES, guest_ss_ar_bytes),
536         FIELD(GUEST_DS_AR_BYTES, guest_ds_ar_bytes),
537         FIELD(GUEST_FS_AR_BYTES, guest_fs_ar_bytes),
538         FIELD(GUEST_GS_AR_BYTES, guest_gs_ar_bytes),
539         FIELD(GUEST_LDTR_AR_BYTES, guest_ldtr_ar_bytes),
540         FIELD(GUEST_TR_AR_BYTES, guest_tr_ar_bytes),
541         FIELD(GUEST_INTERRUPTIBILITY_INFO, guest_interruptibility_info),
542         FIELD(GUEST_ACTIVITY_STATE, guest_activity_state),
543         FIELD(GUEST_SYSENTER_CS, guest_sysenter_cs),
544         FIELD(HOST_IA32_SYSENTER_CS, host_ia32_sysenter_cs),
545         FIELD(CR0_GUEST_HOST_MASK, cr0_guest_host_mask),
546         FIELD(CR4_GUEST_HOST_MASK, cr4_guest_host_mask),
547         FIELD(CR0_READ_SHADOW, cr0_read_shadow),
548         FIELD(CR4_READ_SHADOW, cr4_read_shadow),
549         FIELD(CR3_TARGET_VALUE0, cr3_target_value0),
550         FIELD(CR3_TARGET_VALUE1, cr3_target_value1),
551         FIELD(CR3_TARGET_VALUE2, cr3_target_value2),
552         FIELD(CR3_TARGET_VALUE3, cr3_target_value3),
553         FIELD(EXIT_QUALIFICATION, exit_qualification),
554         FIELD(GUEST_LINEAR_ADDRESS, guest_linear_address),
555         FIELD(GUEST_CR0, guest_cr0),
556         FIELD(GUEST_CR3, guest_cr3),
557         FIELD(GUEST_CR4, guest_cr4),
558         FIELD(GUEST_ES_BASE, guest_es_base),
559         FIELD(GUEST_CS_BASE, guest_cs_base),
560         FIELD(GUEST_SS_BASE, guest_ss_base),
561         FIELD(GUEST_DS_BASE, guest_ds_base),
562         FIELD(GUEST_FS_BASE, guest_fs_base),
563         FIELD(GUEST_GS_BASE, guest_gs_base),
564         FIELD(GUEST_LDTR_BASE, guest_ldtr_base),
565         FIELD(GUEST_TR_BASE, guest_tr_base),
566         FIELD(GUEST_GDTR_BASE, guest_gdtr_base),
567         FIELD(GUEST_IDTR_BASE, guest_idtr_base),
568         FIELD(GUEST_DR7, guest_dr7),
569         FIELD(GUEST_RSP, guest_rsp),
570         FIELD(GUEST_RIP, guest_rip),
571         FIELD(GUEST_RFLAGS, guest_rflags),
572         FIELD(GUEST_PENDING_DBG_EXCEPTIONS, guest_pending_dbg_exceptions),
573         FIELD(GUEST_SYSENTER_ESP, guest_sysenter_esp),
574         FIELD(GUEST_SYSENTER_EIP, guest_sysenter_eip),
575         FIELD(HOST_CR0, host_cr0),
576         FIELD(HOST_CR3, host_cr3),
577         FIELD(HOST_CR4, host_cr4),
578         FIELD(HOST_FS_BASE, host_fs_base),
579         FIELD(HOST_GS_BASE, host_gs_base),
580         FIELD(HOST_TR_BASE, host_tr_base),
581         FIELD(HOST_GDTR_BASE, host_gdtr_base),
582         FIELD(HOST_IDTR_BASE, host_idtr_base),
583         FIELD(HOST_IA32_SYSENTER_ESP, host_ia32_sysenter_esp),
584         FIELD(HOST_IA32_SYSENTER_EIP, host_ia32_sysenter_eip),
585         FIELD(HOST_RSP, host_rsp),
586         FIELD(HOST_RIP, host_rip),
587 };
588 static const int max_vmcs_field = ARRAY_SIZE(vmcs_field_to_offset_table);
589
590 static inline short vmcs_field_to_offset(unsigned long field)
591 {
592         if (field >= max_vmcs_field || vmcs_field_to_offset_table[field] == 0)
593                 return -1;
594         return vmcs_field_to_offset_table[field];
595 }
596
597 static inline struct vmcs12 *get_vmcs12(struct kvm_vcpu *vcpu)
598 {
599         return to_vmx(vcpu)->nested.current_vmcs12;
600 }
601
602 static struct page *nested_get_page(struct kvm_vcpu *vcpu, gpa_t addr)
603 {
604         struct page *page = gfn_to_page(vcpu->kvm, addr >> PAGE_SHIFT);
605         if (is_error_page(page))
606                 return NULL;
607
608         return page;
609 }
610
611 static void nested_release_page(struct page *page)
612 {
613         kvm_release_page_dirty(page);
614 }
615
616 static void nested_release_page_clean(struct page *page)
617 {
618         kvm_release_page_clean(page);
619 }
620
621 static u64 construct_eptp(unsigned long root_hpa);
622 static void kvm_cpu_vmxon(u64 addr);
623 static void kvm_cpu_vmxoff(void);
624 static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3);
625 static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr);
626 static void vmx_set_segment(struct kvm_vcpu *vcpu,
627                             struct kvm_segment *var, int seg);
628 static void vmx_get_segment(struct kvm_vcpu *vcpu,
629                             struct kvm_segment *var, int seg);
630 static bool guest_state_valid(struct kvm_vcpu *vcpu);
631 static u32 vmx_segment_access_rights(struct kvm_segment *var);
632
633 static DEFINE_PER_CPU(struct vmcs *, vmxarea);
634 static DEFINE_PER_CPU(struct vmcs *, current_vmcs);
635 /*
636  * We maintain a per-CPU linked-list of VMCS loaded on that CPU. This is needed
637  * when a CPU is brought down, and we need to VMCLEAR all VMCSs loaded on it.
638  */
639 static DEFINE_PER_CPU(struct list_head, loaded_vmcss_on_cpu);
640 static DEFINE_PER_CPU(struct desc_ptr, host_gdt);
641
642 static unsigned long *vmx_io_bitmap_a;
643 static unsigned long *vmx_io_bitmap_b;
644 static unsigned long *vmx_msr_bitmap_legacy;
645 static unsigned long *vmx_msr_bitmap_longmode;
646 static unsigned long *vmx_msr_bitmap_legacy_x2apic;
647 static unsigned long *vmx_msr_bitmap_longmode_x2apic;
648
649 static bool cpu_has_load_ia32_efer;
650 static bool cpu_has_load_perf_global_ctrl;
651
652 static DECLARE_BITMAP(vmx_vpid_bitmap, VMX_NR_VPIDS);
653 static DEFINE_SPINLOCK(vmx_vpid_lock);
654
655 static struct vmcs_config {
656         int size;
657         int order;
658         u32 revision_id;
659         u32 pin_based_exec_ctrl;
660         u32 cpu_based_exec_ctrl;
661         u32 cpu_based_2nd_exec_ctrl;
662         u32 vmexit_ctrl;
663         u32 vmentry_ctrl;
664 } vmcs_config;
665
666 static struct vmx_capability {
667         u32 ept;
668         u32 vpid;
669 } vmx_capability;
670
671 #define VMX_SEGMENT_FIELD(seg)                                  \
672         [VCPU_SREG_##seg] = {                                   \
673                 .selector = GUEST_##seg##_SELECTOR,             \
674                 .base = GUEST_##seg##_BASE,                     \
675                 .limit = GUEST_##seg##_LIMIT,                   \
676                 .ar_bytes = GUEST_##seg##_AR_BYTES,             \
677         }
678
679 static const struct kvm_vmx_segment_field {
680         unsigned selector;
681         unsigned base;
682         unsigned limit;
683         unsigned ar_bytes;
684 } kvm_vmx_segment_fields[] = {
685         VMX_SEGMENT_FIELD(CS),
686         VMX_SEGMENT_FIELD(DS),
687         VMX_SEGMENT_FIELD(ES),
688         VMX_SEGMENT_FIELD(FS),
689         VMX_SEGMENT_FIELD(GS),
690         VMX_SEGMENT_FIELD(SS),
691         VMX_SEGMENT_FIELD(TR),
692         VMX_SEGMENT_FIELD(LDTR),
693 };
694
695 static u64 host_efer;
696
697 static void ept_save_pdptrs(struct kvm_vcpu *vcpu);
698
699 /*
700  * Keep MSR_STAR at the end, as setup_msrs() will try to optimize it
701  * away by decrementing the array size.
702  */
703 static const u32 vmx_msr_index[] = {
704 #ifdef CONFIG_X86_64
705         MSR_SYSCALL_MASK, MSR_LSTAR, MSR_CSTAR,
706 #endif
707         MSR_EFER, MSR_TSC_AUX, MSR_STAR,
708 };
709 #define NR_VMX_MSR ARRAY_SIZE(vmx_msr_index)
710
711 static inline bool is_page_fault(u32 intr_info)
712 {
713         return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
714                              INTR_INFO_VALID_MASK)) ==
715                 (INTR_TYPE_HARD_EXCEPTION | PF_VECTOR | INTR_INFO_VALID_MASK);
716 }
717
718 static inline bool is_no_device(u32 intr_info)
719 {
720         return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
721                              INTR_INFO_VALID_MASK)) ==
722                 (INTR_TYPE_HARD_EXCEPTION | NM_VECTOR | INTR_INFO_VALID_MASK);
723 }
724
725 static inline bool is_invalid_opcode(u32 intr_info)
726 {
727         return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
728                              INTR_INFO_VALID_MASK)) ==
729                 (INTR_TYPE_HARD_EXCEPTION | UD_VECTOR | INTR_INFO_VALID_MASK);
730 }
731
732 static inline bool is_external_interrupt(u32 intr_info)
733 {
734         return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK))
735                 == (INTR_TYPE_EXT_INTR | INTR_INFO_VALID_MASK);
736 }
737
738 static inline bool is_machine_check(u32 intr_info)
739 {
740         return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
741                              INTR_INFO_VALID_MASK)) ==
742                 (INTR_TYPE_HARD_EXCEPTION | MC_VECTOR | INTR_INFO_VALID_MASK);
743 }
744
745 static inline bool cpu_has_vmx_msr_bitmap(void)
746 {
747         return vmcs_config.cpu_based_exec_ctrl & CPU_BASED_USE_MSR_BITMAPS;
748 }
749
750 static inline bool cpu_has_vmx_tpr_shadow(void)
751 {
752         return vmcs_config.cpu_based_exec_ctrl & CPU_BASED_TPR_SHADOW;
753 }
754
755 static inline bool vm_need_tpr_shadow(struct kvm *kvm)
756 {
757         return (cpu_has_vmx_tpr_shadow()) && (irqchip_in_kernel(kvm));
758 }
759
760 static inline bool cpu_has_secondary_exec_ctrls(void)
761 {
762         return vmcs_config.cpu_based_exec_ctrl &
763                 CPU_BASED_ACTIVATE_SECONDARY_CONTROLS;
764 }
765
766 static inline bool cpu_has_vmx_virtualize_apic_accesses(void)
767 {
768         return vmcs_config.cpu_based_2nd_exec_ctrl &
769                 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
770 }
771
772 static inline bool cpu_has_vmx_virtualize_x2apic_mode(void)
773 {
774         return vmcs_config.cpu_based_2nd_exec_ctrl &
775                 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE;
776 }
777
778 static inline bool cpu_has_vmx_apic_register_virt(void)
779 {
780         return vmcs_config.cpu_based_2nd_exec_ctrl &
781                 SECONDARY_EXEC_APIC_REGISTER_VIRT;
782 }
783
784 static inline bool cpu_has_vmx_flexpriority(void)
785 {
786         return cpu_has_vmx_tpr_shadow() &&
787                 cpu_has_vmx_virtualize_apic_accesses();
788 }
789
790 static inline bool cpu_has_vmx_ept_execute_only(void)
791 {
792         return vmx_capability.ept & VMX_EPT_EXECUTE_ONLY_BIT;
793 }
794
795 static inline bool cpu_has_vmx_eptp_uncacheable(void)
796 {
797         return vmx_capability.ept & VMX_EPTP_UC_BIT;
798 }
799
800 static inline bool cpu_has_vmx_eptp_writeback(void)
801 {
802         return vmx_capability.ept & VMX_EPTP_WB_BIT;
803 }
804
805 static inline bool cpu_has_vmx_ept_2m_page(void)
806 {
807         return vmx_capability.ept & VMX_EPT_2MB_PAGE_BIT;
808 }
809
810 static inline bool cpu_has_vmx_ept_1g_page(void)
811 {
812         return vmx_capability.ept & VMX_EPT_1GB_PAGE_BIT;
813 }
814
815 static inline bool cpu_has_vmx_ept_4levels(void)
816 {
817         return vmx_capability.ept & VMX_EPT_PAGE_WALK_4_BIT;
818 }
819
820 static inline bool cpu_has_vmx_ept_ad_bits(void)
821 {
822         return vmx_capability.ept & VMX_EPT_AD_BIT;
823 }
824
825 static inline bool cpu_has_vmx_invept_context(void)
826 {
827         return vmx_capability.ept & VMX_EPT_EXTENT_CONTEXT_BIT;
828 }
829
830 static inline bool cpu_has_vmx_invept_global(void)
831 {
832         return vmx_capability.ept & VMX_EPT_EXTENT_GLOBAL_BIT;
833 }
834
835 static inline bool cpu_has_vmx_invvpid_single(void)
836 {
837         return vmx_capability.vpid & VMX_VPID_EXTENT_SINGLE_CONTEXT_BIT;
838 }
839
840 static inline bool cpu_has_vmx_invvpid_global(void)
841 {
842         return vmx_capability.vpid & VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT;
843 }
844
845 static inline bool cpu_has_vmx_ept(void)
846 {
847         return vmcs_config.cpu_based_2nd_exec_ctrl &
848                 SECONDARY_EXEC_ENABLE_EPT;
849 }
850
851 static inline bool cpu_has_vmx_unrestricted_guest(void)
852 {
853         return vmcs_config.cpu_based_2nd_exec_ctrl &
854                 SECONDARY_EXEC_UNRESTRICTED_GUEST;
855 }
856
857 static inline bool cpu_has_vmx_ple(void)
858 {
859         return vmcs_config.cpu_based_2nd_exec_ctrl &
860                 SECONDARY_EXEC_PAUSE_LOOP_EXITING;
861 }
862
863 static inline bool vm_need_virtualize_apic_accesses(struct kvm *kvm)
864 {
865         return flexpriority_enabled && irqchip_in_kernel(kvm);
866 }
867
868 static inline bool cpu_has_vmx_vpid(void)
869 {
870         return vmcs_config.cpu_based_2nd_exec_ctrl &
871                 SECONDARY_EXEC_ENABLE_VPID;
872 }
873
874 static inline bool cpu_has_vmx_rdtscp(void)
875 {
876         return vmcs_config.cpu_based_2nd_exec_ctrl &
877                 SECONDARY_EXEC_RDTSCP;
878 }
879
880 static inline bool cpu_has_vmx_invpcid(void)
881 {
882         return vmcs_config.cpu_based_2nd_exec_ctrl &
883                 SECONDARY_EXEC_ENABLE_INVPCID;
884 }
885
886 static inline bool cpu_has_virtual_nmis(void)
887 {
888         return vmcs_config.pin_based_exec_ctrl & PIN_BASED_VIRTUAL_NMIS;
889 }
890
891 static inline bool cpu_has_vmx_wbinvd_exit(void)
892 {
893         return vmcs_config.cpu_based_2nd_exec_ctrl &
894                 SECONDARY_EXEC_WBINVD_EXITING;
895 }
896
897 static inline bool report_flexpriority(void)
898 {
899         return flexpriority_enabled;
900 }
901
902 static inline bool nested_cpu_has(struct vmcs12 *vmcs12, u32 bit)
903 {
904         return vmcs12->cpu_based_vm_exec_control & bit;
905 }
906
907 static inline bool nested_cpu_has2(struct vmcs12 *vmcs12, u32 bit)
908 {
909         return (vmcs12->cpu_based_vm_exec_control &
910                         CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) &&
911                 (vmcs12->secondary_vm_exec_control & bit);
912 }
913
914 static inline bool nested_cpu_has_virtual_nmis(struct vmcs12 *vmcs12,
915         struct kvm_vcpu *vcpu)
916 {
917         return vmcs12->pin_based_vm_exec_control & PIN_BASED_VIRTUAL_NMIS;
918 }
919
920 static inline bool is_exception(u32 intr_info)
921 {
922         return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK))
923                 == (INTR_TYPE_HARD_EXCEPTION | INTR_INFO_VALID_MASK);
924 }
925
926 static void nested_vmx_vmexit(struct kvm_vcpu *vcpu);
927 static void nested_vmx_entry_failure(struct kvm_vcpu *vcpu,
928                         struct vmcs12 *vmcs12,
929                         u32 reason, unsigned long qualification);
930
931 static int __find_msr_index(struct vcpu_vmx *vmx, u32 msr)
932 {
933         int i;
934
935         for (i = 0; i < vmx->nmsrs; ++i)
936                 if (vmx_msr_index[vmx->guest_msrs[i].index] == msr)
937                         return i;
938         return -1;
939 }
940
941 static inline void __invvpid(int ext, u16 vpid, gva_t gva)
942 {
943     struct {
944         u64 vpid : 16;
945         u64 rsvd : 48;
946         u64 gva;
947     } operand = { vpid, 0, gva };
948
949     asm volatile (__ex(ASM_VMX_INVVPID)
950                   /* CF==1 or ZF==1 --> rc = -1 */
951                   "; ja 1f ; ud2 ; 1:"
952                   : : "a"(&operand), "c"(ext) : "cc", "memory");
953 }
954
955 static inline void __invept(int ext, u64 eptp, gpa_t gpa)
956 {
957         struct {
958                 u64 eptp, gpa;
959         } operand = {eptp, gpa};
960
961         asm volatile (__ex(ASM_VMX_INVEPT)
962                         /* CF==1 or ZF==1 --> rc = -1 */
963                         "; ja 1f ; ud2 ; 1:\n"
964                         : : "a" (&operand), "c" (ext) : "cc", "memory");
965 }
966
967 static struct shared_msr_entry *find_msr_entry(struct vcpu_vmx *vmx, u32 msr)
968 {
969         int i;
970
971         i = __find_msr_index(vmx, msr);
972         if (i >= 0)
973                 return &vmx->guest_msrs[i];
974         return NULL;
975 }
976
977 static void vmcs_clear(struct vmcs *vmcs)
978 {
979         u64 phys_addr = __pa(vmcs);
980         u8 error;
981
982         asm volatile (__ex(ASM_VMX_VMCLEAR_RAX) "; setna %0"
983                       : "=qm"(error) : "a"(&phys_addr), "m"(phys_addr)
984                       : "cc", "memory");
985         if (error)
986                 printk(KERN_ERR "kvm: vmclear fail: %p/%llx\n",
987                        vmcs, phys_addr);
988 }
989
990 static inline void loaded_vmcs_init(struct loaded_vmcs *loaded_vmcs)
991 {
992         vmcs_clear(loaded_vmcs->vmcs);
993         loaded_vmcs->cpu = -1;
994         loaded_vmcs->launched = 0;
995 }
996
997 static void vmcs_load(struct vmcs *vmcs)
998 {
999         u64 phys_addr = __pa(vmcs);
1000         u8 error;
1001
1002         asm volatile (__ex(ASM_VMX_VMPTRLD_RAX) "; setna %0"
1003                         : "=qm"(error) : "a"(&phys_addr), "m"(phys_addr)
1004                         : "cc", "memory");
1005         if (error)
1006                 printk(KERN_ERR "kvm: vmptrld %p/%llx failed\n",
1007                        vmcs, phys_addr);
1008 }
1009
1010 #ifdef CONFIG_KEXEC
1011 /*
1012  * This bitmap is used to indicate whether the vmclear
1013  * operation is enabled on all cpus. All disabled by
1014  * default.
1015  */
1016 static cpumask_t crash_vmclear_enabled_bitmap = CPU_MASK_NONE;
1017
1018 static inline void crash_enable_local_vmclear(int cpu)
1019 {
1020         cpumask_set_cpu(cpu, &crash_vmclear_enabled_bitmap);
1021 }
1022
1023 static inline void crash_disable_local_vmclear(int cpu)
1024 {
1025         cpumask_clear_cpu(cpu, &crash_vmclear_enabled_bitmap);
1026 }
1027
1028 static inline int crash_local_vmclear_enabled(int cpu)
1029 {
1030         return cpumask_test_cpu(cpu, &crash_vmclear_enabled_bitmap);
1031 }
1032
1033 static void crash_vmclear_local_loaded_vmcss(void)
1034 {
1035         int cpu = raw_smp_processor_id();
1036         struct loaded_vmcs *v;
1037
1038         if (!crash_local_vmclear_enabled(cpu))
1039                 return;
1040
1041         list_for_each_entry(v, &per_cpu(loaded_vmcss_on_cpu, cpu),
1042                             loaded_vmcss_on_cpu_link)
1043                 vmcs_clear(v->vmcs);
1044 }
1045 #else
1046 static inline void crash_enable_local_vmclear(int cpu) { }
1047 static inline void crash_disable_local_vmclear(int cpu) { }
1048 #endif /* CONFIG_KEXEC */
1049
1050 static void __loaded_vmcs_clear(void *arg)
1051 {
1052         struct loaded_vmcs *loaded_vmcs = arg;
1053         int cpu = raw_smp_processor_id();
1054
1055         if (loaded_vmcs->cpu != cpu)
1056                 return; /* vcpu migration can race with cpu offline */
1057         if (per_cpu(current_vmcs, cpu) == loaded_vmcs->vmcs)
1058                 per_cpu(current_vmcs, cpu) = NULL;
1059         crash_disable_local_vmclear(cpu);
1060         list_del(&loaded_vmcs->loaded_vmcss_on_cpu_link);
1061
1062         /*
1063          * we should ensure updating loaded_vmcs->loaded_vmcss_on_cpu_link
1064          * is before setting loaded_vmcs->vcpu to -1 which is done in
1065          * loaded_vmcs_init. Otherwise, other cpu can see vcpu = -1 fist
1066          * then adds the vmcs into percpu list before it is deleted.
1067          */
1068         smp_wmb();
1069
1070         loaded_vmcs_init(loaded_vmcs);
1071         crash_enable_local_vmclear(cpu);
1072 }
1073
1074 static void loaded_vmcs_clear(struct loaded_vmcs *loaded_vmcs)
1075 {
1076         int cpu = loaded_vmcs->cpu;
1077
1078         if (cpu != -1)
1079                 smp_call_function_single(cpu,
1080                          __loaded_vmcs_clear, loaded_vmcs, 1);
1081 }
1082
1083 static inline void vpid_sync_vcpu_single(struct vcpu_vmx *vmx)
1084 {
1085         if (vmx->vpid == 0)
1086                 return;
1087
1088         if (cpu_has_vmx_invvpid_single())
1089                 __invvpid(VMX_VPID_EXTENT_SINGLE_CONTEXT, vmx->vpid, 0);
1090 }
1091
1092 static inline void vpid_sync_vcpu_global(void)
1093 {
1094         if (cpu_has_vmx_invvpid_global())
1095                 __invvpid(VMX_VPID_EXTENT_ALL_CONTEXT, 0, 0);
1096 }
1097
1098 static inline void vpid_sync_context(struct vcpu_vmx *vmx)
1099 {
1100         if (cpu_has_vmx_invvpid_single())
1101                 vpid_sync_vcpu_single(vmx);
1102         else
1103                 vpid_sync_vcpu_global();
1104 }
1105
1106 static inline void ept_sync_global(void)
1107 {
1108         if (cpu_has_vmx_invept_global())
1109                 __invept(VMX_EPT_EXTENT_GLOBAL, 0, 0);
1110 }
1111
1112 static inline void ept_sync_context(u64 eptp)
1113 {
1114         if (enable_ept) {
1115                 if (cpu_has_vmx_invept_context())
1116                         __invept(VMX_EPT_EXTENT_CONTEXT, eptp, 0);
1117                 else
1118                         ept_sync_global();
1119         }
1120 }
1121
1122 static __always_inline unsigned long vmcs_readl(unsigned long field)
1123 {
1124         unsigned long value;
1125
1126         asm volatile (__ex_clear(ASM_VMX_VMREAD_RDX_RAX, "%0")
1127                       : "=a"(value) : "d"(field) : "cc");
1128         return value;
1129 }
1130
1131 static __always_inline u16 vmcs_read16(unsigned long field)
1132 {
1133         return vmcs_readl(field);
1134 }
1135
1136 static __always_inline u32 vmcs_read32(unsigned long field)
1137 {
1138         return vmcs_readl(field);
1139 }
1140
1141 static __always_inline u64 vmcs_read64(unsigned long field)
1142 {
1143 #ifdef CONFIG_X86_64
1144         return vmcs_readl(field);
1145 #else
1146         return vmcs_readl(field) | ((u64)vmcs_readl(field+1) << 32);
1147 #endif
1148 }
1149
1150 static noinline void vmwrite_error(unsigned long field, unsigned long value)
1151 {
1152         printk(KERN_ERR "vmwrite error: reg %lx value %lx (err %d)\n",
1153                field, value, vmcs_read32(VM_INSTRUCTION_ERROR));
1154         dump_stack();
1155 }
1156
1157 static void vmcs_writel(unsigned long field, unsigned long value)
1158 {
1159         u8 error;
1160
1161         asm volatile (__ex(ASM_VMX_VMWRITE_RAX_RDX) "; setna %0"
1162                        : "=q"(error) : "a"(value), "d"(field) : "cc");
1163         if (unlikely(error))
1164                 vmwrite_error(field, value);
1165 }
1166
1167 static void vmcs_write16(unsigned long field, u16 value)
1168 {
1169         vmcs_writel(field, value);
1170 }
1171
1172 static void vmcs_write32(unsigned long field, u32 value)
1173 {
1174         vmcs_writel(field, value);
1175 }
1176
1177 static void vmcs_write64(unsigned long field, u64 value)
1178 {
1179         vmcs_writel(field, value);
1180 #ifndef CONFIG_X86_64
1181         asm volatile ("");
1182         vmcs_writel(field+1, value >> 32);
1183 #endif
1184 }
1185
1186 static void vmcs_clear_bits(unsigned long field, u32 mask)
1187 {
1188         vmcs_writel(field, vmcs_readl(field) & ~mask);
1189 }
1190
1191 static void vmcs_set_bits(unsigned long field, u32 mask)
1192 {
1193         vmcs_writel(field, vmcs_readl(field) | mask);
1194 }
1195
1196 static void vmx_segment_cache_clear(struct vcpu_vmx *vmx)
1197 {
1198         vmx->segment_cache.bitmask = 0;
1199 }
1200
1201 static bool vmx_segment_cache_test_set(struct vcpu_vmx *vmx, unsigned seg,
1202                                        unsigned field)
1203 {
1204         bool ret;
1205         u32 mask = 1 << (seg * SEG_FIELD_NR + field);
1206
1207         if (!(vmx->vcpu.arch.regs_avail & (1 << VCPU_EXREG_SEGMENTS))) {
1208                 vmx->vcpu.arch.regs_avail |= (1 << VCPU_EXREG_SEGMENTS);
1209                 vmx->segment_cache.bitmask = 0;
1210         }
1211         ret = vmx->segment_cache.bitmask & mask;
1212         vmx->segment_cache.bitmask |= mask;
1213         return ret;
1214 }
1215
1216 static u16 vmx_read_guest_seg_selector(struct vcpu_vmx *vmx, unsigned seg)
1217 {
1218         u16 *p = &vmx->segment_cache.seg[seg].selector;
1219
1220         if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_SEL))
1221                 *p = vmcs_read16(kvm_vmx_segment_fields[seg].selector);
1222         return *p;
1223 }
1224
1225 static ulong vmx_read_guest_seg_base(struct vcpu_vmx *vmx, unsigned seg)
1226 {
1227         ulong *p = &vmx->segment_cache.seg[seg].base;
1228
1229         if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_BASE))
1230                 *p = vmcs_readl(kvm_vmx_segment_fields[seg].base);
1231         return *p;
1232 }
1233
1234 static u32 vmx_read_guest_seg_limit(struct vcpu_vmx *vmx, unsigned seg)
1235 {
1236         u32 *p = &vmx->segment_cache.seg[seg].limit;
1237
1238         if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_LIMIT))
1239                 *p = vmcs_read32(kvm_vmx_segment_fields[seg].limit);
1240         return *p;
1241 }
1242
1243 static u32 vmx_read_guest_seg_ar(struct vcpu_vmx *vmx, unsigned seg)
1244 {
1245         u32 *p = &vmx->segment_cache.seg[seg].ar;
1246
1247         if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_AR))
1248                 *p = vmcs_read32(kvm_vmx_segment_fields[seg].ar_bytes);
1249         return *p;
1250 }
1251
1252 static void update_exception_bitmap(struct kvm_vcpu *vcpu)
1253 {
1254         u32 eb;
1255
1256         eb = (1u << PF_VECTOR) | (1u << UD_VECTOR) | (1u << MC_VECTOR) |
1257              (1u << NM_VECTOR) | (1u << DB_VECTOR);
1258         if ((vcpu->guest_debug &
1259              (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP)) ==
1260             (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP))
1261                 eb |= 1u << BP_VECTOR;
1262         if (to_vmx(vcpu)->rmode.vm86_active)
1263                 eb = ~0;
1264         if (enable_ept)
1265                 eb &= ~(1u << PF_VECTOR); /* bypass_guest_pf = 0 */
1266         if (vcpu->fpu_active)
1267                 eb &= ~(1u << NM_VECTOR);
1268
1269         /* When we are running a nested L2 guest and L1 specified for it a
1270          * certain exception bitmap, we must trap the same exceptions and pass
1271          * them to L1. When running L2, we will only handle the exceptions
1272          * specified above if L1 did not want them.
1273          */
1274         if (is_guest_mode(vcpu))
1275                 eb |= get_vmcs12(vcpu)->exception_bitmap;
1276
1277         vmcs_write32(EXCEPTION_BITMAP, eb);
1278 }
1279
1280 static void clear_atomic_switch_msr_special(unsigned long entry,
1281                 unsigned long exit)
1282 {
1283         vmcs_clear_bits(VM_ENTRY_CONTROLS, entry);
1284         vmcs_clear_bits(VM_EXIT_CONTROLS, exit);
1285 }
1286
1287 static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr)
1288 {
1289         unsigned i;
1290         struct msr_autoload *m = &vmx->msr_autoload;
1291
1292         switch (msr) {
1293         case MSR_EFER:
1294                 if (cpu_has_load_ia32_efer) {
1295                         clear_atomic_switch_msr_special(VM_ENTRY_LOAD_IA32_EFER,
1296                                         VM_EXIT_LOAD_IA32_EFER);
1297                         return;
1298                 }
1299                 break;
1300         case MSR_CORE_PERF_GLOBAL_CTRL:
1301                 if (cpu_has_load_perf_global_ctrl) {
1302                         clear_atomic_switch_msr_special(
1303                                         VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL,
1304                                         VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL);
1305                         return;
1306                 }
1307                 break;
1308         }
1309
1310         for (i = 0; i < m->nr; ++i)
1311                 if (m->guest[i].index == msr)
1312                         break;
1313
1314         if (i == m->nr)
1315                 return;
1316         --m->nr;
1317         m->guest[i] = m->guest[m->nr];
1318         m->host[i] = m->host[m->nr];
1319         vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->nr);
1320         vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->nr);
1321 }
1322
1323 static void add_atomic_switch_msr_special(unsigned long entry,
1324                 unsigned long exit, unsigned long guest_val_vmcs,
1325                 unsigned long host_val_vmcs, u64 guest_val, u64 host_val)
1326 {
1327         vmcs_write64(guest_val_vmcs, guest_val);
1328         vmcs_write64(host_val_vmcs, host_val);
1329         vmcs_set_bits(VM_ENTRY_CONTROLS, entry);
1330         vmcs_set_bits(VM_EXIT_CONTROLS, exit);
1331 }
1332
1333 static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
1334                                   u64 guest_val, u64 host_val)
1335 {
1336         unsigned i;
1337         struct msr_autoload *m = &vmx->msr_autoload;
1338
1339         switch (msr) {
1340         case MSR_EFER:
1341                 if (cpu_has_load_ia32_efer) {
1342                         add_atomic_switch_msr_special(VM_ENTRY_LOAD_IA32_EFER,
1343                                         VM_EXIT_LOAD_IA32_EFER,
1344                                         GUEST_IA32_EFER,
1345                                         HOST_IA32_EFER,
1346                                         guest_val, host_val);
1347                         return;
1348                 }
1349                 break;
1350         case MSR_CORE_PERF_GLOBAL_CTRL:
1351                 if (cpu_has_load_perf_global_ctrl) {
1352                         add_atomic_switch_msr_special(
1353                                         VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL,
1354                                         VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL,
1355                                         GUEST_IA32_PERF_GLOBAL_CTRL,
1356                                         HOST_IA32_PERF_GLOBAL_CTRL,
1357                                         guest_val, host_val);
1358                         return;
1359                 }
1360                 break;
1361         }
1362
1363         for (i = 0; i < m->nr; ++i)
1364                 if (m->guest[i].index == msr)
1365                         break;
1366
1367         if (i == NR_AUTOLOAD_MSRS) {
1368                 printk_once(KERN_WARNING"Not enough mst switch entries. "
1369                                 "Can't add msr %x\n", msr);
1370                 return;
1371         } else if (i == m->nr) {
1372                 ++m->nr;
1373                 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->nr);
1374                 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->nr);
1375         }
1376
1377         m->guest[i].index = msr;
1378         m->guest[i].value = guest_val;
1379         m->host[i].index = msr;
1380         m->host[i].value = host_val;
1381 }
1382
1383 static void reload_tss(void)
1384 {
1385         /*
1386          * VT restores TR but not its size.  Useless.
1387          */
1388         struct desc_ptr *gdt = &__get_cpu_var(host_gdt);
1389         struct desc_struct *descs;
1390
1391         descs = (void *)gdt->address;
1392         descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
1393         load_TR_desc();
1394 }
1395
1396 static bool update_transition_efer(struct vcpu_vmx *vmx, int efer_offset)
1397 {
1398         u64 guest_efer;
1399         u64 ignore_bits;
1400
1401         guest_efer = vmx->vcpu.arch.efer;
1402
1403         /*
1404          * NX is emulated; LMA and LME handled by hardware; SCE meaningless
1405          * outside long mode
1406          */
1407         ignore_bits = EFER_NX | EFER_SCE;
1408 #ifdef CONFIG_X86_64
1409         ignore_bits |= EFER_LMA | EFER_LME;
1410         /* SCE is meaningful only in long mode on Intel */
1411         if (guest_efer & EFER_LMA)
1412                 ignore_bits &= ~(u64)EFER_SCE;
1413 #endif
1414         guest_efer &= ~ignore_bits;
1415         guest_efer |= host_efer & ignore_bits;
1416         vmx->guest_msrs[efer_offset].data = guest_efer;
1417         vmx->guest_msrs[efer_offset].mask = ~ignore_bits;
1418
1419         clear_atomic_switch_msr(vmx, MSR_EFER);
1420         /* On ept, can't emulate nx, and must switch nx atomically */
1421         if (enable_ept && ((vmx->vcpu.arch.efer ^ host_efer) & EFER_NX)) {
1422                 guest_efer = vmx->vcpu.arch.efer;
1423                 if (!(guest_efer & EFER_LMA))
1424                         guest_efer &= ~EFER_LME;
1425                 add_atomic_switch_msr(vmx, MSR_EFER, guest_efer, host_efer);
1426                 return false;
1427         }
1428
1429         return true;
1430 }
1431
1432 static unsigned long segment_base(u16 selector)
1433 {
1434         struct desc_ptr *gdt = &__get_cpu_var(host_gdt);
1435         struct desc_struct *d;
1436         unsigned long table_base;
1437         unsigned long v;
1438
1439         if (!(selector & ~3))
1440                 return 0;
1441
1442         table_base = gdt->address;
1443
1444         if (selector & 4) {           /* from ldt */
1445                 u16 ldt_selector = kvm_read_ldt();
1446
1447                 if (!(ldt_selector & ~3))
1448                         return 0;
1449
1450                 table_base = segment_base(ldt_selector);
1451         }
1452         d = (struct desc_struct *)(table_base + (selector & ~7));
1453         v = get_desc_base(d);
1454 #ifdef CONFIG_X86_64
1455        if (d->s == 0 && (d->type == 2 || d->type == 9 || d->type == 11))
1456                v |= ((unsigned long)((struct ldttss_desc64 *)d)->base3) << 32;
1457 #endif
1458         return v;
1459 }
1460
1461 static inline unsigned long kvm_read_tr_base(void)
1462 {
1463         u16 tr;
1464         asm("str %0" : "=g"(tr));
1465         return segment_base(tr);
1466 }
1467
1468 static void vmx_save_host_state(struct kvm_vcpu *vcpu)
1469 {
1470         struct vcpu_vmx *vmx = to_vmx(vcpu);
1471         int i;
1472
1473         if (vmx->host_state.loaded)
1474                 return;
1475
1476         vmx->host_state.loaded = 1;
1477         /*
1478          * Set host fs and gs selectors.  Unfortunately, 22.2.3 does not
1479          * allow segment selectors with cpl > 0 or ti == 1.
1480          */
1481         vmx->host_state.ldt_sel = kvm_read_ldt();
1482         vmx->host_state.gs_ldt_reload_needed = vmx->host_state.ldt_sel;
1483         savesegment(fs, vmx->host_state.fs_sel);
1484         if (!(vmx->host_state.fs_sel & 7)) {
1485                 vmcs_write16(HOST_FS_SELECTOR, vmx->host_state.fs_sel);
1486                 vmx->host_state.fs_reload_needed = 0;
1487         } else {
1488                 vmcs_write16(HOST_FS_SELECTOR, 0);
1489                 vmx->host_state.fs_reload_needed = 1;
1490         }
1491         savesegment(gs, vmx->host_state.gs_sel);
1492         if (!(vmx->host_state.gs_sel & 7))
1493                 vmcs_write16(HOST_GS_SELECTOR, vmx->host_state.gs_sel);
1494         else {
1495                 vmcs_write16(HOST_GS_SELECTOR, 0);
1496                 vmx->host_state.gs_ldt_reload_needed = 1;
1497         }
1498
1499 #ifdef CONFIG_X86_64
1500         savesegment(ds, vmx->host_state.ds_sel);
1501         savesegment(es, vmx->host_state.es_sel);
1502 #endif
1503
1504 #ifdef CONFIG_X86_64
1505         vmcs_writel(HOST_FS_BASE, read_msr(MSR_FS_BASE));
1506         vmcs_writel(HOST_GS_BASE, read_msr(MSR_GS_BASE));
1507 #else
1508         vmcs_writel(HOST_FS_BASE, segment_base(vmx->host_state.fs_sel));
1509         vmcs_writel(HOST_GS_BASE, segment_base(vmx->host_state.gs_sel));
1510 #endif
1511
1512 #ifdef CONFIG_X86_64
1513         rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base);
1514         if (is_long_mode(&vmx->vcpu))
1515                 wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
1516 #endif
1517         for (i = 0; i < vmx->save_nmsrs; ++i)
1518                 kvm_set_shared_msr(vmx->guest_msrs[i].index,
1519                                    vmx->guest_msrs[i].data,
1520                                    vmx->guest_msrs[i].mask);
1521 }
1522
1523 static void __vmx_load_host_state(struct vcpu_vmx *vmx)
1524 {
1525         if (!vmx->host_state.loaded)
1526                 return;
1527
1528         ++vmx->vcpu.stat.host_state_reload;
1529         vmx->host_state.loaded = 0;
1530 #ifdef CONFIG_X86_64
1531         if (is_long_mode(&vmx->vcpu))
1532                 rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
1533 #endif
1534         if (vmx->host_state.gs_ldt_reload_needed) {
1535                 kvm_load_ldt(vmx->host_state.ldt_sel);
1536 #ifdef CONFIG_X86_64
1537                 load_gs_index(vmx->host_state.gs_sel);
1538 #else
1539                 loadsegment(gs, vmx->host_state.gs_sel);
1540 #endif
1541         }
1542         if (vmx->host_state.fs_reload_needed)
1543                 loadsegment(fs, vmx->host_state.fs_sel);
1544 #ifdef CONFIG_X86_64
1545         if (unlikely(vmx->host_state.ds_sel | vmx->host_state.es_sel)) {
1546                 loadsegment(ds, vmx->host_state.ds_sel);
1547                 loadsegment(es, vmx->host_state.es_sel);
1548         }
1549 #endif
1550         reload_tss();
1551 #ifdef CONFIG_X86_64
1552         wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base);
1553 #endif
1554         /*
1555          * If the FPU is not active (through the host task or
1556          * the guest vcpu), then restore the cr0.TS bit.
1557          */
1558         if (!user_has_fpu() && !vmx->vcpu.guest_fpu_loaded)
1559                 stts();
1560         load_gdt(&__get_cpu_var(host_gdt));
1561 }
1562
1563 static void vmx_load_host_state(struct vcpu_vmx *vmx)
1564 {
1565         preempt_disable();
1566         __vmx_load_host_state(vmx);
1567         preempt_enable();
1568 }
1569
1570 /*
1571  * Switches to specified vcpu, until a matching vcpu_put(), but assumes
1572  * vcpu mutex is already taken.
1573  */
1574 static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1575 {
1576         struct vcpu_vmx *vmx = to_vmx(vcpu);
1577         u64 phys_addr = __pa(per_cpu(vmxarea, cpu));
1578
1579         if (!vmm_exclusive)
1580                 kvm_cpu_vmxon(phys_addr);
1581         else if (vmx->loaded_vmcs->cpu != cpu)
1582                 loaded_vmcs_clear(vmx->loaded_vmcs);
1583
1584         if (per_cpu(current_vmcs, cpu) != vmx->loaded_vmcs->vmcs) {
1585                 per_cpu(current_vmcs, cpu) = vmx->loaded_vmcs->vmcs;
1586                 vmcs_load(vmx->loaded_vmcs->vmcs);
1587         }
1588
1589         if (vmx->loaded_vmcs->cpu != cpu) {
1590                 struct desc_ptr *gdt = &__get_cpu_var(host_gdt);
1591                 unsigned long sysenter_esp;
1592
1593                 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
1594                 local_irq_disable();
1595                 crash_disable_local_vmclear(cpu);
1596
1597                 /*
1598                  * Read loaded_vmcs->cpu should be before fetching
1599                  * loaded_vmcs->loaded_vmcss_on_cpu_link.
1600                  * See the comments in __loaded_vmcs_clear().
1601                  */
1602                 smp_rmb();
1603
1604                 list_add(&vmx->loaded_vmcs->loaded_vmcss_on_cpu_link,
1605                          &per_cpu(loaded_vmcss_on_cpu, cpu));
1606                 crash_enable_local_vmclear(cpu);
1607                 local_irq_enable();
1608
1609                 /*
1610                  * Linux uses per-cpu TSS and GDT, so set these when switching
1611                  * processors.
1612                  */
1613                 vmcs_writel(HOST_TR_BASE, kvm_read_tr_base()); /* 22.2.4 */
1614                 vmcs_writel(HOST_GDTR_BASE, gdt->address);   /* 22.2.4 */
1615
1616                 rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
1617                 vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */
1618                 vmx->loaded_vmcs->cpu = cpu;
1619         }
1620 }
1621
1622 static void vmx_vcpu_put(struct kvm_vcpu *vcpu)
1623 {
1624         __vmx_load_host_state(to_vmx(vcpu));
1625         if (!vmm_exclusive) {
1626                 __loaded_vmcs_clear(to_vmx(vcpu)->loaded_vmcs);
1627                 vcpu->cpu = -1;
1628                 kvm_cpu_vmxoff();
1629         }
1630 }
1631
1632 static void vmx_fpu_activate(struct kvm_vcpu *vcpu)
1633 {
1634         ulong cr0;
1635
1636         if (vcpu->fpu_active)
1637                 return;
1638         vcpu->fpu_active = 1;
1639         cr0 = vmcs_readl(GUEST_CR0);
1640         cr0 &= ~(X86_CR0_TS | X86_CR0_MP);
1641         cr0 |= kvm_read_cr0_bits(vcpu, X86_CR0_TS | X86_CR0_MP);
1642         vmcs_writel(GUEST_CR0, cr0);
1643         update_exception_bitmap(vcpu);
1644         vcpu->arch.cr0_guest_owned_bits = X86_CR0_TS;
1645         if (is_guest_mode(vcpu))
1646                 vcpu->arch.cr0_guest_owned_bits &=
1647                         ~get_vmcs12(vcpu)->cr0_guest_host_mask;
1648         vmcs_writel(CR0_GUEST_HOST_MASK, ~vcpu->arch.cr0_guest_owned_bits);
1649 }
1650
1651 static void vmx_decache_cr0_guest_bits(struct kvm_vcpu *vcpu);
1652
1653 /*
1654  * Return the cr0 value that a nested guest would read. This is a combination
1655  * of the real cr0 used to run the guest (guest_cr0), and the bits shadowed by
1656  * its hypervisor (cr0_read_shadow).
1657  */
1658 static inline unsigned long nested_read_cr0(struct vmcs12 *fields)
1659 {
1660         return (fields->guest_cr0 & ~fields->cr0_guest_host_mask) |
1661                 (fields->cr0_read_shadow & fields->cr0_guest_host_mask);
1662 }
1663 static inline unsigned long nested_read_cr4(struct vmcs12 *fields)
1664 {
1665         return (fields->guest_cr4 & ~fields->cr4_guest_host_mask) |
1666                 (fields->cr4_read_shadow & fields->cr4_guest_host_mask);
1667 }
1668
1669 static void vmx_fpu_deactivate(struct kvm_vcpu *vcpu)
1670 {
1671         /* Note that there is no vcpu->fpu_active = 0 here. The caller must
1672          * set this *before* calling this function.
1673          */
1674         vmx_decache_cr0_guest_bits(vcpu);
1675         vmcs_set_bits(GUEST_CR0, X86_CR0_TS | X86_CR0_MP);
1676         update_exception_bitmap(vcpu);
1677         vcpu->arch.cr0_guest_owned_bits = 0;
1678         vmcs_writel(CR0_GUEST_HOST_MASK, ~vcpu->arch.cr0_guest_owned_bits);
1679         if (is_guest_mode(vcpu)) {
1680                 /*
1681                  * L1's specified read shadow might not contain the TS bit,
1682                  * so now that we turned on shadowing of this bit, we need to
1683                  * set this bit of the shadow. Like in nested_vmx_run we need
1684                  * nested_read_cr0(vmcs12), but vmcs12->guest_cr0 is not yet
1685                  * up-to-date here because we just decached cr0.TS (and we'll
1686                  * only update vmcs12->guest_cr0 on nested exit).
1687                  */
1688                 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
1689                 vmcs12->guest_cr0 = (vmcs12->guest_cr0 & ~X86_CR0_TS) |
1690                         (vcpu->arch.cr0 & X86_CR0_TS);
1691                 vmcs_writel(CR0_READ_SHADOW, nested_read_cr0(vmcs12));
1692         } else
1693                 vmcs_writel(CR0_READ_SHADOW, vcpu->arch.cr0);
1694 }
1695
1696 static unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu)
1697 {
1698         unsigned long rflags, save_rflags;
1699
1700         if (!test_bit(VCPU_EXREG_RFLAGS, (ulong *)&vcpu->arch.regs_avail)) {
1701                 __set_bit(VCPU_EXREG_RFLAGS, (ulong *)&vcpu->arch.regs_avail);
1702                 rflags = vmcs_readl(GUEST_RFLAGS);
1703                 if (to_vmx(vcpu)->rmode.vm86_active) {
1704                         rflags &= RMODE_GUEST_OWNED_EFLAGS_BITS;
1705                         save_rflags = to_vmx(vcpu)->rmode.save_rflags;
1706                         rflags |= save_rflags & ~RMODE_GUEST_OWNED_EFLAGS_BITS;
1707                 }
1708                 to_vmx(vcpu)->rflags = rflags;
1709         }
1710         return to_vmx(vcpu)->rflags;
1711 }
1712
1713 static void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
1714 {
1715         __set_bit(VCPU_EXREG_RFLAGS, (ulong *)&vcpu->arch.regs_avail);
1716         to_vmx(vcpu)->rflags = rflags;
1717         if (to_vmx(vcpu)->rmode.vm86_active) {
1718                 to_vmx(vcpu)->rmode.save_rflags = rflags;
1719                 rflags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM;
1720         }
1721         vmcs_writel(GUEST_RFLAGS, rflags);
1722 }
1723
1724 static u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
1725 {
1726         u32 interruptibility = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
1727         int ret = 0;
1728
1729         if (interruptibility & GUEST_INTR_STATE_STI)
1730                 ret |= KVM_X86_SHADOW_INT_STI;
1731         if (interruptibility & GUEST_INTR_STATE_MOV_SS)
1732                 ret |= KVM_X86_SHADOW_INT_MOV_SS;
1733
1734         return ret & mask;
1735 }
1736
1737 static void vmx_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
1738 {
1739         u32 interruptibility_old = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
1740         u32 interruptibility = interruptibility_old;
1741
1742         interruptibility &= ~(GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS);
1743
1744         if (mask & KVM_X86_SHADOW_INT_MOV_SS)
1745                 interruptibility |= GUEST_INTR_STATE_MOV_SS;
1746         else if (mask & KVM_X86_SHADOW_INT_STI)
1747                 interruptibility |= GUEST_INTR_STATE_STI;
1748
1749         if ((interruptibility != interruptibility_old))
1750                 vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, interruptibility);
1751 }
1752
1753 static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
1754 {
1755         unsigned long rip;
1756
1757         rip = kvm_rip_read(vcpu);
1758         rip += vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
1759         kvm_rip_write(vcpu, rip);
1760
1761         /* skipping an emulated instruction also counts */
1762         vmx_set_interrupt_shadow(vcpu, 0);
1763 }
1764
1765 /*
1766  * KVM wants to inject page-faults which it got to the guest. This function
1767  * checks whether in a nested guest, we need to inject them to L1 or L2.
1768  * This function assumes it is called with the exit reason in vmcs02 being
1769  * a #PF exception (this is the only case in which KVM injects a #PF when L2
1770  * is running).
1771  */
1772 static int nested_pf_handled(struct kvm_vcpu *vcpu)
1773 {
1774         struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
1775
1776         /* TODO: also check PFEC_MATCH/MASK, not just EB.PF. */
1777         if (!(vmcs12->exception_bitmap & (1u << PF_VECTOR)))
1778                 return 0;
1779
1780         nested_vmx_vmexit(vcpu);
1781         return 1;
1782 }
1783
1784 static void vmx_queue_exception(struct kvm_vcpu *vcpu, unsigned nr,
1785                                 bool has_error_code, u32 error_code,
1786                                 bool reinject)
1787 {
1788         struct vcpu_vmx *vmx = to_vmx(vcpu);
1789         u32 intr_info = nr | INTR_INFO_VALID_MASK;
1790
1791         if (nr == PF_VECTOR && is_guest_mode(vcpu) &&
1792                 nested_pf_handled(vcpu))
1793                 return;
1794
1795         if (has_error_code) {
1796                 vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, error_code);
1797                 intr_info |= INTR_INFO_DELIVER_CODE_MASK;
1798         }
1799
1800         if (vmx->rmode.vm86_active) {
1801                 int inc_eip = 0;
1802                 if (kvm_exception_is_soft(nr))
1803                         inc_eip = vcpu->arch.event_exit_inst_len;
1804                 if (kvm_inject_realmode_interrupt(vcpu, nr, inc_eip) != EMULATE_DONE)
1805                         kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
1806                 return;
1807         }
1808
1809         if (kvm_exception_is_soft(nr)) {
1810                 vmcs_write32(VM_ENTRY_INSTRUCTION_LEN,
1811                              vmx->vcpu.arch.event_exit_inst_len);
1812                 intr_info |= INTR_TYPE_SOFT_EXCEPTION;
1813         } else
1814                 intr_info |= INTR_TYPE_HARD_EXCEPTION;
1815
1816         vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, intr_info);
1817 }
1818
1819 static bool vmx_rdtscp_supported(void)
1820 {
1821         return cpu_has_vmx_rdtscp();
1822 }
1823
1824 static bool vmx_invpcid_supported(void)
1825 {
1826         return cpu_has_vmx_invpcid() && enable_ept;
1827 }
1828
1829 /*
1830  * Swap MSR entry in host/guest MSR entry array.
1831  */
1832 static void move_msr_up(struct vcpu_vmx *vmx, int from, int to)
1833 {
1834         struct shared_msr_entry tmp;
1835
1836         tmp = vmx->guest_msrs[to];
1837         vmx->guest_msrs[to] = vmx->guest_msrs[from];
1838         vmx->guest_msrs[from] = tmp;
1839 }
1840
1841 static void vmx_set_msr_bitmap(struct kvm_vcpu *vcpu)
1842 {
1843         unsigned long *msr_bitmap;
1844
1845         if (irqchip_in_kernel(vcpu->kvm) && apic_x2apic_mode(vcpu->arch.apic)) {
1846                 if (is_long_mode(vcpu))
1847                         msr_bitmap = vmx_msr_bitmap_longmode_x2apic;
1848                 else
1849                         msr_bitmap = vmx_msr_bitmap_legacy_x2apic;
1850         } else {
1851                 if (is_long_mode(vcpu))
1852                         msr_bitmap = vmx_msr_bitmap_longmode;
1853                 else
1854                         msr_bitmap = vmx_msr_bitmap_legacy;
1855         }
1856
1857         vmcs_write64(MSR_BITMAP, __pa(msr_bitmap));
1858 }
1859
1860 /*
1861  * Set up the vmcs to automatically save and restore system
1862  * msrs.  Don't touch the 64-bit msrs if the guest is in legacy
1863  * mode, as fiddling with msrs is very expensive.
1864  */
1865 static void setup_msrs(struct vcpu_vmx *vmx)
1866 {
1867         int save_nmsrs, index;
1868
1869         save_nmsrs = 0;
1870 #ifdef CONFIG_X86_64
1871         if (is_long_mode(&vmx->vcpu)) {
1872                 index = __find_msr_index(vmx, MSR_SYSCALL_MASK);
1873                 if (index >= 0)
1874                         move_msr_up(vmx, index, save_nmsrs++);
1875                 index = __find_msr_index(vmx, MSR_LSTAR);
1876                 if (index >= 0)
1877                         move_msr_up(vmx, index, save_nmsrs++);
1878                 index = __find_msr_index(vmx, MSR_CSTAR);
1879                 if (index >= 0)
1880                         move_msr_up(vmx, index, save_nmsrs++);
1881                 index = __find_msr_index(vmx, MSR_TSC_AUX);
1882                 if (index >= 0 && vmx->rdtscp_enabled)
1883                         move_msr_up(vmx, index, save_nmsrs++);
1884                 /*
1885                  * MSR_STAR is only needed on long mode guests, and only
1886                  * if efer.sce is enabled.
1887                  */
1888                 index = __find_msr_index(vmx, MSR_STAR);
1889                 if ((index >= 0) && (vmx->vcpu.arch.efer & EFER_SCE))
1890                         move_msr_up(vmx, index, save_nmsrs++);
1891         }
1892 #endif
1893         index = __find_msr_index(vmx, MSR_EFER);
1894         if (index >= 0 && update_transition_efer(vmx, index))
1895                 move_msr_up(vmx, index, save_nmsrs++);
1896
1897         vmx->save_nmsrs = save_nmsrs;
1898
1899         if (cpu_has_vmx_msr_bitmap())
1900                 vmx_set_msr_bitmap(&vmx->vcpu);
1901 }
1902
1903 /*
1904  * reads and returns guest's timestamp counter "register"
1905  * guest_tsc = host_tsc + tsc_offset    -- 21.3
1906  */
1907 static u64 guest_read_tsc(void)
1908 {
1909         u64 host_tsc, tsc_offset;
1910
1911         rdtscll(host_tsc);
1912         tsc_offset = vmcs_read64(TSC_OFFSET);
1913         return host_tsc + tsc_offset;
1914 }
1915
1916 /*
1917  * Like guest_read_tsc, but always returns L1's notion of the timestamp
1918  * counter, even if a nested guest (L2) is currently running.
1919  */
1920 u64 vmx_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc)
1921 {
1922         u64 tsc_offset;
1923
1924         tsc_offset = is_guest_mode(vcpu) ?
1925                 to_vmx(vcpu)->nested.vmcs01_tsc_offset :
1926                 vmcs_read64(TSC_OFFSET);
1927         return host_tsc + tsc_offset;
1928 }
1929
1930 /*
1931  * Engage any workarounds for mis-matched TSC rates.  Currently limited to
1932  * software catchup for faster rates on slower CPUs.
1933  */
1934 static void vmx_set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz, bool scale)
1935 {
1936         if (!scale)
1937                 return;
1938
1939         if (user_tsc_khz > tsc_khz) {
1940                 vcpu->arch.tsc_catchup = 1;
1941                 vcpu->arch.tsc_always_catchup = 1;
1942         } else
1943                 WARN(1, "user requested TSC rate below hardware speed\n");
1944 }
1945
1946 static u64 vmx_read_tsc_offset(struct kvm_vcpu *vcpu)
1947 {
1948         return vmcs_read64(TSC_OFFSET);
1949 }
1950
1951 /*
1952  * writes 'offset' into guest's timestamp counter offset register
1953  */
1954 static void vmx_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
1955 {
1956         if (is_guest_mode(vcpu)) {
1957                 /*
1958                  * We're here if L1 chose not to trap WRMSR to TSC. According
1959                  * to the spec, this should set L1's TSC; The offset that L1
1960                  * set for L2 remains unchanged, and still needs to be added
1961                  * to the newly set TSC to get L2's TSC.
1962                  */
1963                 struct vmcs12 *vmcs12;
1964                 to_vmx(vcpu)->nested.vmcs01_tsc_offset = offset;
1965                 /* recalculate vmcs02.TSC_OFFSET: */
1966                 vmcs12 = get_vmcs12(vcpu);
1967                 vmcs_write64(TSC_OFFSET, offset +
1968                         (nested_cpu_has(vmcs12, CPU_BASED_USE_TSC_OFFSETING) ?
1969                          vmcs12->tsc_offset : 0));
1970         } else {
1971                 vmcs_write64(TSC_OFFSET, offset);
1972         }
1973 }
1974
1975 static void vmx_adjust_tsc_offset(struct kvm_vcpu *vcpu, s64 adjustment, bool host)
1976 {
1977         u64 offset = vmcs_read64(TSC_OFFSET);
1978         vmcs_write64(TSC_OFFSET, offset + adjustment);
1979         if (is_guest_mode(vcpu)) {
1980                 /* Even when running L2, the adjustment needs to apply to L1 */
1981                 to_vmx(vcpu)->nested.vmcs01_tsc_offset += adjustment;
1982         }
1983 }
1984
1985 static u64 vmx_compute_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc)
1986 {
1987         return target_tsc - native_read_tsc();
1988 }
1989
1990 static bool guest_cpuid_has_vmx(struct kvm_vcpu *vcpu)
1991 {
1992         struct kvm_cpuid_entry2 *best = kvm_find_cpuid_entry(vcpu, 1, 0);
1993         return best && (best->ecx & (1 << (X86_FEATURE_VMX & 31)));
1994 }
1995
1996 /*
1997  * nested_vmx_allowed() checks whether a guest should be allowed to use VMX
1998  * instructions and MSRs (i.e., nested VMX). Nested VMX is disabled for
1999  * all guests if the "nested" module option is off, and can also be disabled
2000  * for a single guest by disabling its VMX cpuid bit.
2001  */
2002 static inline bool nested_vmx_allowed(struct kvm_vcpu *vcpu)
2003 {
2004         return nested && guest_cpuid_has_vmx(vcpu);
2005 }
2006
2007 /*
2008  * nested_vmx_setup_ctls_msrs() sets up variables containing the values to be
2009  * returned for the various VMX controls MSRs when nested VMX is enabled.
2010  * The same values should also be used to verify that vmcs12 control fields are
2011  * valid during nested entry from L1 to L2.
2012  * Each of these control msrs has a low and high 32-bit half: A low bit is on
2013  * if the corresponding bit in the (32-bit) control field *must* be on, and a
2014  * bit in the high half is on if the corresponding bit in the control field
2015  * may be on. See also vmx_control_verify().
2016  * TODO: allow these variables to be modified (downgraded) by module options
2017  * or other means.
2018  */
2019 static u32 nested_vmx_procbased_ctls_low, nested_vmx_procbased_ctls_high;
2020 static u32 nested_vmx_secondary_ctls_low, nested_vmx_secondary_ctls_high;
2021 static u32 nested_vmx_pinbased_ctls_low, nested_vmx_pinbased_ctls_high;
2022 static u32 nested_vmx_exit_ctls_low, nested_vmx_exit_ctls_high;
2023 static u32 nested_vmx_entry_ctls_low, nested_vmx_entry_ctls_high;
2024 static __init void nested_vmx_setup_ctls_msrs(void)
2025 {
2026         /*
2027          * Note that as a general rule, the high half of the MSRs (bits in
2028          * the control fields which may be 1) should be initialized by the
2029          * intersection of the underlying hardware's MSR (i.e., features which
2030          * can be supported) and the list of features we want to expose -
2031          * because they are known to be properly supported in our code.
2032          * Also, usually, the low half of the MSRs (bits which must be 1) can
2033          * be set to 0, meaning that L1 may turn off any of these bits. The
2034          * reason is that if one of these bits is necessary, it will appear
2035          * in vmcs01 and prepare_vmcs02, when it bitwise-or's the control
2036          * fields of vmcs01 and vmcs02, will turn these bits off - and
2037          * nested_vmx_exit_handled() will not pass related exits to L1.
2038          * These rules have exceptions below.
2039          */
2040
2041         /* pin-based controls */
2042         /*
2043          * According to the Intel spec, if bit 55 of VMX_BASIC is off (as it is
2044          * in our case), bits 1, 2 and 4 (i.e., 0x16) must be 1 in this MSR.
2045          */
2046         nested_vmx_pinbased_ctls_low = 0x16 ;
2047         nested_vmx_pinbased_ctls_high = 0x16 |
2048                 PIN_BASED_EXT_INTR_MASK | PIN_BASED_NMI_EXITING |
2049                 PIN_BASED_VIRTUAL_NMIS;
2050
2051         /* exit controls */
2052         nested_vmx_exit_ctls_low = 0;
2053         /* Note that guest use of VM_EXIT_ACK_INTR_ON_EXIT is not supported. */
2054 #ifdef CONFIG_X86_64
2055         nested_vmx_exit_ctls_high = VM_EXIT_HOST_ADDR_SPACE_SIZE;
2056 #else
2057         nested_vmx_exit_ctls_high = 0;
2058 #endif
2059
2060         /* entry controls */
2061         rdmsr(MSR_IA32_VMX_ENTRY_CTLS,
2062                 nested_vmx_entry_ctls_low, nested_vmx_entry_ctls_high);
2063         nested_vmx_entry_ctls_low = 0;
2064         nested_vmx_entry_ctls_high &=
2065                 VM_ENTRY_LOAD_IA32_PAT | VM_ENTRY_IA32E_MODE;
2066
2067         /* cpu-based controls */
2068         rdmsr(MSR_IA32_VMX_PROCBASED_CTLS,
2069                 nested_vmx_procbased_ctls_low, nested_vmx_procbased_ctls_high);
2070         nested_vmx_procbased_ctls_low = 0;
2071         nested_vmx_procbased_ctls_high &=
2072                 CPU_BASED_VIRTUAL_INTR_PENDING | CPU_BASED_USE_TSC_OFFSETING |
2073                 CPU_BASED_HLT_EXITING | CPU_BASED_INVLPG_EXITING |
2074                 CPU_BASED_MWAIT_EXITING | CPU_BASED_CR3_LOAD_EXITING |
2075                 CPU_BASED_CR3_STORE_EXITING |
2076 #ifdef CONFIG_X86_64
2077                 CPU_BASED_CR8_LOAD_EXITING | CPU_BASED_CR8_STORE_EXITING |
2078 #endif
2079                 CPU_BASED_MOV_DR_EXITING | CPU_BASED_UNCOND_IO_EXITING |
2080                 CPU_BASED_USE_IO_BITMAPS | CPU_BASED_MONITOR_EXITING |
2081                 CPU_BASED_RDPMC_EXITING | CPU_BASED_RDTSC_EXITING |
2082                 CPU_BASED_ACTIVATE_SECONDARY_CONTROLS;
2083         /*
2084          * We can allow some features even when not supported by the
2085          * hardware. For example, L1 can specify an MSR bitmap - and we
2086          * can use it to avoid exits to L1 - even when L0 runs L2
2087          * without MSR bitmaps.
2088          */
2089         nested_vmx_procbased_ctls_high |= CPU_BASED_USE_MSR_BITMAPS;
2090
2091         /* secondary cpu-based controls */
2092         rdmsr(MSR_IA32_VMX_PROCBASED_CTLS2,
2093                 nested_vmx_secondary_ctls_low, nested_vmx_secondary_ctls_high);
2094         nested_vmx_secondary_ctls_low = 0;
2095         nested_vmx_secondary_ctls_high &=
2096                 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
2097 }
2098
2099 static inline bool vmx_control_verify(u32 control, u32 low, u32 high)
2100 {
2101         /*
2102          * Bits 0 in high must be 0, and bits 1 in low must be 1.
2103          */
2104         return ((control & high) | low) == control;
2105 }
2106
2107 static inline u64 vmx_control_msr(u32 low, u32 high)
2108 {
2109         return low | ((u64)high << 32);
2110 }
2111
2112 /*
2113  * If we allow our guest to use VMX instructions (i.e., nested VMX), we should
2114  * also let it use VMX-specific MSRs.
2115  * vmx_get_vmx_msr() and vmx_set_vmx_msr() return 1 when we handled a
2116  * VMX-specific MSR, or 0 when we haven't (and the caller should handle it
2117  * like all other MSRs).
2118  */
2119 static int vmx_get_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
2120 {
2121         if (!nested_vmx_allowed(vcpu) && msr_index >= MSR_IA32_VMX_BASIC &&
2122                      msr_index <= MSR_IA32_VMX_TRUE_ENTRY_CTLS) {
2123                 /*
2124                  * According to the spec, processors which do not support VMX
2125                  * should throw a #GP(0) when VMX capability MSRs are read.
2126                  */
2127                 kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
2128                 return 1;
2129         }
2130
2131         switch (msr_index) {
2132         case MSR_IA32_FEATURE_CONTROL:
2133                 *pdata = 0;
2134                 break;
2135         case MSR_IA32_VMX_BASIC:
2136                 /*
2137                  * This MSR reports some information about VMX support. We
2138                  * should return information about the VMX we emulate for the
2139                  * guest, and the VMCS structure we give it - not about the
2140                  * VMX support of the underlying hardware.
2141                  */
2142                 *pdata = VMCS12_REVISION |
2143                            ((u64)VMCS12_SIZE << VMX_BASIC_VMCS_SIZE_SHIFT) |
2144                            (VMX_BASIC_MEM_TYPE_WB << VMX_BASIC_MEM_TYPE_SHIFT);
2145                 break;
2146         case MSR_IA32_VMX_TRUE_PINBASED_CTLS:
2147         case MSR_IA32_VMX_PINBASED_CTLS:
2148                 *pdata = vmx_control_msr(nested_vmx_pinbased_ctls_low,
2149                                         nested_vmx_pinbased_ctls_high);
2150                 break;
2151         case MSR_IA32_VMX_TRUE_PROCBASED_CTLS:
2152         case MSR_IA32_VMX_PROCBASED_CTLS:
2153                 *pdata = vmx_control_msr(nested_vmx_procbased_ctls_low,
2154                                         nested_vmx_procbased_ctls_high);
2155                 break;
2156         case MSR_IA32_VMX_TRUE_EXIT_CTLS:
2157         case MSR_IA32_VMX_EXIT_CTLS:
2158                 *pdata = vmx_control_msr(nested_vmx_exit_ctls_low,
2159                                         nested_vmx_exit_ctls_high);
2160                 break;
2161         case MSR_IA32_VMX_TRUE_ENTRY_CTLS:
2162         case MSR_IA32_VMX_ENTRY_CTLS:
2163                 *pdata = vmx_control_msr(nested_vmx_entry_ctls_low,
2164                                         nested_vmx_entry_ctls_high);
2165                 break;
2166         case MSR_IA32_VMX_MISC:
2167                 *pdata = 0;
2168                 break;
2169         /*
2170          * These MSRs specify bits which the guest must keep fixed (on or off)
2171          * while L1 is in VMXON mode (in L1's root mode, or running an L2).
2172          * We picked the standard core2 setting.
2173          */
2174 #define VMXON_CR0_ALWAYSON      (X86_CR0_PE | X86_CR0_PG | X86_CR0_NE)
2175 #define VMXON_CR4_ALWAYSON      X86_CR4_VMXE
2176         case MSR_IA32_VMX_CR0_FIXED0:
2177                 *pdata = VMXON_CR0_ALWAYSON;
2178                 break;
2179         case MSR_IA32_VMX_CR0_FIXED1:
2180                 *pdata = -1ULL;
2181                 break;
2182         case MSR_IA32_VMX_CR4_FIXED0:
2183                 *pdata = VMXON_CR4_ALWAYSON;
2184                 break;
2185         case MSR_IA32_VMX_CR4_FIXED1:
2186                 *pdata = -1ULL;
2187                 break;
2188         case MSR_IA32_VMX_VMCS_ENUM:
2189                 *pdata = 0x1f;
2190                 break;
2191         case MSR_IA32_VMX_PROCBASED_CTLS2:
2192                 *pdata = vmx_control_msr(nested_vmx_secondary_ctls_low,
2193                                         nested_vmx_secondary_ctls_high);
2194                 break;
2195         case MSR_IA32_VMX_EPT_VPID_CAP:
2196                 /* Currently, no nested ept or nested vpid */
2197                 *pdata = 0;
2198                 break;
2199         default:
2200                 return 0;
2201         }
2202
2203         return 1;
2204 }
2205
2206 static int vmx_set_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
2207 {
2208         if (!nested_vmx_allowed(vcpu))
2209                 return 0;
2210
2211         if (msr_index == MSR_IA32_FEATURE_CONTROL)
2212                 /* TODO: the right thing. */
2213                 return 1;
2214         /*
2215          * No need to treat VMX capability MSRs specially: If we don't handle
2216          * them, handle_wrmsr will #GP(0), which is correct (they are readonly)
2217          */
2218         return 0;
2219 }
2220
2221 /*
2222  * Reads an msr value (of 'msr_index') into 'pdata'.
2223  * Returns 0 on success, non-0 otherwise.
2224  * Assumes vcpu_load() was already called.
2225  */
2226 static int vmx_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
2227 {
2228         u64 data;
2229         struct shared_msr_entry *msr;
2230
2231         if (!pdata) {
2232                 printk(KERN_ERR "BUG: get_msr called with NULL pdata\n");
2233                 return -EINVAL;
2234         }
2235
2236         switch (msr_index) {
2237 #ifdef CONFIG_X86_64
2238         case MSR_FS_BASE:
2239                 data = vmcs_readl(GUEST_FS_BASE);
2240                 break;
2241         case MSR_GS_BASE:
2242                 data = vmcs_readl(GUEST_GS_BASE);
2243                 break;
2244         case MSR_KERNEL_GS_BASE:
2245                 vmx_load_host_state(to_vmx(vcpu));
2246                 data = to_vmx(vcpu)->msr_guest_kernel_gs_base;
2247                 break;
2248 #endif
2249         case MSR_EFER:
2250                 return kvm_get_msr_common(vcpu, msr_index, pdata);
2251         case MSR_IA32_TSC:
2252                 data = guest_read_tsc();
2253                 break;
2254         case MSR_IA32_SYSENTER_CS:
2255                 data = vmcs_read32(GUEST_SYSENTER_CS);
2256                 break;
2257         case MSR_IA32_SYSENTER_EIP:
2258                 data = vmcs_readl(GUEST_SYSENTER_EIP);
2259                 break;
2260         case MSR_IA32_SYSENTER_ESP:
2261                 data = vmcs_readl(GUEST_SYSENTER_ESP);
2262                 break;
2263         case MSR_TSC_AUX:
2264                 if (!to_vmx(vcpu)->rdtscp_enabled)
2265                         return 1;
2266                 /* Otherwise falls through */
2267         default:
2268                 if (vmx_get_vmx_msr(vcpu, msr_index, pdata))
2269                         return 0;
2270                 msr = find_msr_entry(to_vmx(vcpu), msr_index);
2271                 if (msr) {
2272                         data = msr->data;
2273                         break;
2274                 }
2275                 return kvm_get_msr_common(vcpu, msr_index, pdata);
2276         }
2277
2278         *pdata = data;
2279         return 0;
2280 }
2281
2282 /*
2283  * Writes msr value into into the appropriate "register".
2284  * Returns 0 on success, non-0 otherwise.
2285  * Assumes vcpu_load() was already called.
2286  */
2287 static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
2288 {
2289         struct vcpu_vmx *vmx = to_vmx(vcpu);
2290         struct shared_msr_entry *msr;
2291         int ret = 0;
2292         u32 msr_index = msr_info->index;
2293         u64 data = msr_info->data;
2294
2295         switch (msr_index) {
2296         case MSR_EFER:
2297                 ret = kvm_set_msr_common(vcpu, msr_info);
2298                 break;
2299 #ifdef CONFIG_X86_64
2300         case MSR_FS_BASE:
2301                 vmx_segment_cache_clear(vmx);
2302                 vmcs_writel(GUEST_FS_BASE, data);
2303                 break;
2304         case MSR_GS_BASE:
2305                 vmx_segment_cache_clear(vmx);
2306                 vmcs_writel(GUEST_GS_BASE, data);
2307                 break;
2308         case MSR_KERNEL_GS_BASE:
2309                 vmx_load_host_state(vmx);
2310                 vmx->msr_guest_kernel_gs_base = data;
2311                 break;
2312 #endif
2313         case MSR_IA32_SYSENTER_CS:
2314                 vmcs_write32(GUEST_SYSENTER_CS, data);
2315                 break;
2316         case MSR_IA32_SYSENTER_EIP:
2317                 vmcs_writel(GUEST_SYSENTER_EIP, data);
2318                 break;
2319         case MSR_IA32_SYSENTER_ESP:
2320                 vmcs_writel(GUEST_SYSENTER_ESP, data);
2321                 break;
2322         case MSR_IA32_TSC:
2323                 kvm_write_tsc(vcpu, msr_info);
2324                 break;
2325         case MSR_IA32_CR_PAT:
2326                 if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) {
2327                         vmcs_write64(GUEST_IA32_PAT, data);
2328                         vcpu->arch.pat = data;
2329                         break;
2330                 }
2331                 ret = kvm_set_msr_common(vcpu, msr_info);
2332                 break;
2333         case MSR_IA32_TSC_ADJUST:
2334                 ret = kvm_set_msr_common(vcpu, msr_info);
2335                 break;
2336         case MSR_TSC_AUX:
2337                 if (!vmx->rdtscp_enabled)
2338                         return 1;
2339                 /* Check reserved bit, higher 32 bits should be zero */
2340                 if ((data >> 32) != 0)
2341                         return 1;
2342                 /* Otherwise falls through */
2343         default:
2344                 if (vmx_set_vmx_msr(vcpu, msr_index, data))
2345                         break;
2346                 msr = find_msr_entry(vmx, msr_index);
2347                 if (msr) {
2348                         msr->data = data;
2349                         if (msr - vmx->guest_msrs < vmx->save_nmsrs) {
2350                                 preempt_disable();
2351                                 kvm_set_shared_msr(msr->index, msr->data,
2352                                                    msr->mask);
2353                                 preempt_enable();
2354                         }
2355                         break;
2356                 }
2357                 ret = kvm_set_msr_common(vcpu, msr_info);
2358         }
2359
2360         return ret;
2361 }
2362
2363 static void vmx_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg)
2364 {
2365         __set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
2366         switch (reg) {
2367         case VCPU_REGS_RSP:
2368                 vcpu->arch.regs[VCPU_REGS_RSP] = vmcs_readl(GUEST_RSP);
2369                 break;
2370         case VCPU_REGS_RIP:
2371                 vcpu->arch.regs[VCPU_REGS_RIP] = vmcs_readl(GUEST_RIP);
2372                 break;
2373         case VCPU_EXREG_PDPTR:
2374                 if (enable_ept)
2375                         ept_save_pdptrs(vcpu);
2376                 break;
2377         default:
2378                 break;
2379         }
2380 }
2381
2382 static __init int cpu_has_kvm_support(void)
2383 {
2384         return cpu_has_vmx();
2385 }
2386
2387 static __init int vmx_disabled_by_bios(void)
2388 {
2389         u64 msr;
2390
2391         rdmsrl(MSR_IA32_FEATURE_CONTROL, msr);
2392         if (msr & FEATURE_CONTROL_LOCKED) {
2393                 /* launched w/ TXT and VMX disabled */
2394                 if (!(msr & FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX)
2395                         && tboot_enabled())
2396                         return 1;
2397                 /* launched w/o TXT and VMX only enabled w/ TXT */
2398                 if (!(msr & FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX)
2399                         && (msr & FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX)
2400                         && !tboot_enabled()) {
2401                         printk(KERN_WARNING "kvm: disable TXT in the BIOS or "
2402                                 "activate TXT before enabling KVM\n");
2403                         return 1;
2404                 }
2405                 /* launched w/o TXT and VMX disabled */
2406                 if (!(msr & FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX)
2407                         && !tboot_enabled())
2408                         return 1;
2409         }
2410
2411         return 0;
2412 }
2413
2414 static void kvm_cpu_vmxon(u64 addr)
2415 {
2416         asm volatile (ASM_VMX_VMXON_RAX
2417                         : : "a"(&addr), "m"(addr)
2418                         : "memory", "cc");
2419 }
2420
2421 static int hardware_enable(void *garbage)
2422 {
2423         int cpu = raw_smp_processor_id();
2424         u64 phys_addr = __pa(per_cpu(vmxarea, cpu));
2425         u64 old, test_bits;
2426
2427         if (read_cr4() & X86_CR4_VMXE)
2428                 return -EBUSY;
2429
2430         INIT_LIST_HEAD(&per_cpu(loaded_vmcss_on_cpu, cpu));
2431
2432         /*
2433          * Now we can enable the vmclear operation in kdump
2434          * since the loaded_vmcss_on_cpu list on this cpu
2435          * has been initialized.
2436          *
2437          * Though the cpu is not in VMX operation now, there
2438          * is no problem to enable the vmclear operation
2439          * for the loaded_vmcss_on_cpu list is empty!
2440          */
2441         crash_enable_local_vmclear(cpu);
2442
2443         rdmsrl(MSR_IA32_FEATURE_CONTROL, old);
2444
2445         test_bits = FEATURE_CONTROL_LOCKED;
2446         test_bits |= FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX;
2447         if (tboot_enabled())
2448                 test_bits |= FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX;
2449
2450         if ((old & test_bits) != test_bits) {
2451                 /* enable and lock */
2452                 wrmsrl(MSR_IA32_FEATURE_CONTROL, old | test_bits);
2453         }
2454         write_cr4(read_cr4() | X86_CR4_VMXE); /* FIXME: not cpu hotplug safe */
2455
2456         if (vmm_exclusive) {
2457                 kvm_cpu_vmxon(phys_addr);
2458                 ept_sync_global();
2459         }
2460
2461         store_gdt(&__get_cpu_var(host_gdt));
2462
2463         return 0;
2464 }
2465
2466 static void vmclear_local_loaded_vmcss(void)
2467 {
2468         int cpu = raw_smp_processor_id();
2469         struct loaded_vmcs *v, *n;
2470
2471         list_for_each_entry_safe(v, n, &per_cpu(loaded_vmcss_on_cpu, cpu),
2472                                  loaded_vmcss_on_cpu_link)
2473                 __loaded_vmcs_clear(v);
2474 }
2475
2476
2477 /* Just like cpu_vmxoff(), but with the __kvm_handle_fault_on_reboot()
2478  * tricks.
2479  */
2480 static void kvm_cpu_vmxoff(void)
2481 {
2482         asm volatile (__ex(ASM_VMX_VMXOFF) : : : "cc");
2483 }
2484
2485 static void hardware_disable(void *garbage)
2486 {
2487         if (vmm_exclusive) {
2488                 vmclear_local_loaded_vmcss();
2489                 kvm_cpu_vmxoff();
2490         }
2491         write_cr4(read_cr4() & ~X86_CR4_VMXE);
2492 }
2493
2494 static __init int adjust_vmx_controls(u32 ctl_min, u32 ctl_opt,
2495                                       u32 msr, u32 *result)
2496 {
2497         u32 vmx_msr_low, vmx_msr_high;
2498         u32 ctl = ctl_min | ctl_opt;
2499
2500         rdmsr(msr, vmx_msr_low, vmx_msr_high);
2501
2502         ctl &= vmx_msr_high; /* bit == 0 in high word ==> must be zero */
2503         ctl |= vmx_msr_low;  /* bit == 1 in low word  ==> must be one  */
2504
2505         /* Ensure minimum (required) set of control bits are supported. */
2506         if (ctl_min & ~ctl)
2507                 return -EIO;
2508
2509         *result = ctl;
2510         return 0;
2511 }
2512
2513 static __init bool allow_1_setting(u32 msr, u32 ctl)
2514 {
2515         u32 vmx_msr_low, vmx_msr_high;
2516
2517         rdmsr(msr, vmx_msr_low, vmx_msr_high);
2518         return vmx_msr_high & ctl;
2519 }
2520
2521 static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf)
2522 {
2523         u32 vmx_msr_low, vmx_msr_high;
2524         u32 min, opt, min2, opt2;
2525         u32 _pin_based_exec_control = 0;
2526         u32 _cpu_based_exec_control = 0;
2527         u32 _cpu_based_2nd_exec_control = 0;
2528         u32 _vmexit_control = 0;
2529         u32 _vmentry_control = 0;
2530
2531         min = PIN_BASED_EXT_INTR_MASK | PIN_BASED_NMI_EXITING;
2532         opt = PIN_BASED_VIRTUAL_NMIS;
2533         if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PINBASED_CTLS,
2534                                 &_pin_based_exec_control) < 0)
2535                 return -EIO;
2536
2537         min = CPU_BASED_HLT_EXITING |
2538 #ifdef CONFIG_X86_64
2539               CPU_BASED_CR8_LOAD_EXITING |
2540               CPU_BASED_CR8_STORE_EXITING |
2541 #endif
2542               CPU_BASED_CR3_LOAD_EXITING |
2543               CPU_BASED_CR3_STORE_EXITING |
2544               CPU_BASED_USE_IO_BITMAPS |
2545               CPU_BASED_MOV_DR_EXITING |
2546               CPU_BASED_USE_TSC_OFFSETING |
2547               CPU_BASED_MWAIT_EXITING |
2548               CPU_BASED_MONITOR_EXITING |
2549               CPU_BASED_INVLPG_EXITING |
2550               CPU_BASED_RDPMC_EXITING;
2551
2552         opt = CPU_BASED_TPR_SHADOW |
2553               CPU_BASED_USE_MSR_BITMAPS |
2554               CPU_BASED_ACTIVATE_SECONDARY_CONTROLS;
2555         if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PROCBASED_CTLS,
2556                                 &_cpu_based_exec_control) < 0)
2557                 return -EIO;
2558 #ifdef CONFIG_X86_64
2559         if ((_cpu_based_exec_control & CPU_BASED_TPR_SHADOW))
2560                 _cpu_based_exec_control &= ~CPU_BASED_CR8_LOAD_EXITING &
2561                                            ~CPU_BASED_CR8_STORE_EXITING;
2562 #endif
2563         if (_cpu_based_exec_control & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) {
2564                 min2 = 0;
2565                 opt2 = SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
2566                         SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
2567                         SECONDARY_EXEC_WBINVD_EXITING |
2568                         SECONDARY_EXEC_ENABLE_VPID |
2569                         SECONDARY_EXEC_ENABLE_EPT |
2570                         SECONDARY_EXEC_UNRESTRICTED_GUEST |
2571                         SECONDARY_EXEC_PAUSE_LOOP_EXITING |
2572                         SECONDARY_EXEC_RDTSCP |
2573                         SECONDARY_EXEC_ENABLE_INVPCID |
2574                         SECONDARY_EXEC_APIC_REGISTER_VIRT;
2575                 if (adjust_vmx_controls(min2, opt2,
2576                                         MSR_IA32_VMX_PROCBASED_CTLS2,
2577                                         &_cpu_based_2nd_exec_control) < 0)
2578                         return -EIO;
2579         }
2580 #ifndef CONFIG_X86_64
2581         if (!(_cpu_based_2nd_exec_control &
2582                                 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES))
2583                 _cpu_based_exec_control &= ~CPU_BASED_TPR_SHADOW;
2584 #endif
2585
2586         if (!(_cpu_based_exec_control & CPU_BASED_TPR_SHADOW))
2587                 _cpu_based_2nd_exec_control &= ~(
2588                                 SECONDARY_EXEC_APIC_REGISTER_VIRT |
2589                                 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE);
2590
2591         if (_cpu_based_2nd_exec_control & SECONDARY_EXEC_ENABLE_EPT) {
2592                 /* CR3 accesses and invlpg don't need to cause VM Exits when EPT
2593                    enabled */
2594                 _cpu_based_exec_control &= ~(CPU_BASED_CR3_LOAD_EXITING |
2595                                              CPU_BASED_CR3_STORE_EXITING |
2596                                              CPU_BASED_INVLPG_EXITING);
2597                 rdmsr(MSR_IA32_VMX_EPT_VPID_CAP,
2598                       vmx_capability.ept, vmx_capability.vpid);
2599         }
2600
2601         min = 0;
2602 #ifdef CONFIG_X86_64
2603         min |= VM_EXIT_HOST_ADDR_SPACE_SIZE;
2604 #endif
2605         opt = VM_EXIT_SAVE_IA32_PAT | VM_EXIT_LOAD_IA32_PAT;
2606         if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_EXIT_CTLS,
2607                                 &_vmexit_control) < 0)
2608                 return -EIO;
2609
2610         min = 0;
2611         opt = VM_ENTRY_LOAD_IA32_PAT;
2612         if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_ENTRY_CTLS,
2613                                 &_vmentry_control) < 0)
2614                 return -EIO;
2615
2616         rdmsr(MSR_IA32_VMX_BASIC, vmx_msr_low, vmx_msr_high);
2617
2618         /* IA-32 SDM Vol 3B: VMCS size is never greater than 4kB. */
2619         if ((vmx_msr_high & 0x1fff) > PAGE_SIZE)
2620                 return -EIO;
2621
2622 #ifdef CONFIG_X86_64
2623         /* IA-32 SDM Vol 3B: 64-bit CPUs always have VMX_BASIC_MSR[48]==0. */
2624         if (vmx_msr_high & (1u<<16))
2625                 return -EIO;
2626 #endif
2627
2628         /* Require Write-Back (WB) memory type for VMCS accesses. */
2629         if (((vmx_msr_high >> 18) & 15) != 6)
2630                 return -EIO;
2631
2632         vmcs_conf->size = vmx_msr_high & 0x1fff;
2633         vmcs_conf->order = get_order(vmcs_config.size);
2634         vmcs_conf->revision_id = vmx_msr_low;
2635
2636         vmcs_conf->pin_based_exec_ctrl = _pin_based_exec_control;
2637         vmcs_conf->cpu_based_exec_ctrl = _cpu_based_exec_control;
2638         vmcs_conf->cpu_based_2nd_exec_ctrl = _cpu_based_2nd_exec_control;
2639         vmcs_conf->vmexit_ctrl         = _vmexit_control;
2640         vmcs_conf->vmentry_ctrl        = _vmentry_control;
2641
2642         cpu_has_load_ia32_efer =
2643                 allow_1_setting(MSR_IA32_VMX_ENTRY_CTLS,
2644                                 VM_ENTRY_LOAD_IA32_EFER)
2645                 && allow_1_setting(MSR_IA32_VMX_EXIT_CTLS,
2646                                    VM_EXIT_LOAD_IA32_EFER);
2647
2648         cpu_has_load_perf_global_ctrl =
2649                 allow_1_setting(MSR_IA32_VMX_ENTRY_CTLS,
2650                                 VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL)
2651                 && allow_1_setting(MSR_IA32_VMX_EXIT_CTLS,
2652                                    VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL);
2653
2654         /*
2655          * Some cpus support VM_ENTRY_(LOAD|SAVE)_IA32_PERF_GLOBAL_CTRL
2656          * but due to arrata below it can't be used. Workaround is to use
2657          * msr load mechanism to switch IA32_PERF_GLOBAL_CTRL.
2658          *
2659          * VM Exit May Incorrectly Clear IA32_PERF_GLOBAL_CTRL [34:32]
2660          *
2661          * AAK155             (model 26)
2662          * AAP115             (model 30)
2663          * AAT100             (model 37)
2664          * BC86,AAY89,BD102   (model 44)
2665          * BA97               (model 46)
2666          *
2667          */
2668         if (cpu_has_load_perf_global_ctrl && boot_cpu_data.x86 == 0x6) {
2669                 switch (boot_cpu_data.x86_model) {
2670                 case 26:
2671                 case 30:
2672                 case 37:
2673                 case 44:
2674                 case 46:
2675                         cpu_has_load_perf_global_ctrl = false;
2676                         printk_once(KERN_WARNING"kvm: VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL "
2677                                         "does not work properly. Using workaround\n");
2678                         break;
2679                 default:
2680                         break;
2681                 }
2682         }
2683
2684         return 0;
2685 }
2686
2687 static struct vmcs *alloc_vmcs_cpu(int cpu)
2688 {
2689         int node = cpu_to_node(cpu);
2690         struct page *pages;
2691         struct vmcs *vmcs;
2692
2693         pages = alloc_pages_exact_node(node, GFP_KERNEL, vmcs_config.order);
2694         if (!pages)
2695                 return NULL;
2696         vmcs = page_address(pages);
2697         memset(vmcs, 0, vmcs_config.size);
2698         vmcs->revision_id = vmcs_config.revision_id; /* vmcs revision id */
2699         return vmcs;
2700 }
2701
2702 static struct vmcs *alloc_vmcs(void)
2703 {
2704         return alloc_vmcs_cpu(raw_smp_processor_id());
2705 }
2706
2707 static void free_vmcs(struct vmcs *vmcs)
2708 {
2709         free_pages((unsigned long)vmcs, vmcs_config.order);
2710 }
2711
2712 /*
2713  * Free a VMCS, but before that VMCLEAR it on the CPU where it was last loaded
2714  */
2715 static void free_loaded_vmcs(struct loaded_vmcs *loaded_vmcs)
2716 {
2717         if (!loaded_vmcs->vmcs)
2718                 return;
2719         loaded_vmcs_clear(loaded_vmcs);
2720         free_vmcs(loaded_vmcs->vmcs);
2721         loaded_vmcs->vmcs = NULL;
2722 }
2723
2724 static void free_kvm_area(void)
2725 {
2726         int cpu;
2727
2728         for_each_possible_cpu(cpu) {
2729                 free_vmcs(per_cpu(vmxarea, cpu));
2730                 per_cpu(vmxarea, cpu) = NULL;
2731         }
2732 }
2733
2734 static __init int alloc_kvm_area(void)
2735 {
2736         int cpu;
2737
2738         for_each_possible_cpu(cpu) {
2739                 struct vmcs *vmcs;
2740
2741                 vmcs = alloc_vmcs_cpu(cpu);
2742                 if (!vmcs) {
2743                         free_kvm_area();
2744                         return -ENOMEM;
2745                 }
2746
2747                 per_cpu(vmxarea, cpu) = vmcs;
2748         }
2749         return 0;
2750 }
2751
2752 static __init int hardware_setup(void)
2753 {
2754         if (setup_vmcs_config(&vmcs_config) < 0)
2755                 return -EIO;
2756
2757         if (boot_cpu_has(X86_FEATURE_NX))
2758                 kvm_enable_efer_bits(EFER_NX);
2759
2760         if (!cpu_has_vmx_vpid())
2761                 enable_vpid = 0;
2762
2763         if (!cpu_has_vmx_ept() ||
2764             !cpu_has_vmx_ept_4levels()) {
2765                 enable_ept = 0;
2766                 enable_unrestricted_guest = 0;
2767                 enable_ept_ad_bits = 0;
2768         }
2769
2770         if (!cpu_has_vmx_ept_ad_bits())
2771                 enable_ept_ad_bits = 0;
2772
2773         if (!cpu_has_vmx_unrestricted_guest())
2774                 enable_unrestricted_guest = 0;
2775
2776         if (!cpu_has_vmx_flexpriority())
2777                 flexpriority_enabled = 0;
2778
2779         if (!cpu_has_vmx_tpr_shadow())
2780                 kvm_x86_ops->update_cr8_intercept = NULL;
2781
2782         if (enable_ept && !cpu_has_vmx_ept_2m_page())
2783                 kvm_disable_largepages();
2784
2785         if (!cpu_has_vmx_ple())
2786                 ple_gap = 0;
2787
2788         if (!cpu_has_vmx_apic_register_virt())
2789                 enable_apicv_reg = 0;
2790
2791         if (nested)
2792                 nested_vmx_setup_ctls_msrs();
2793
2794         return alloc_kvm_area();
2795 }
2796
2797 static __exit void hardware_unsetup(void)
2798 {
2799         free_kvm_area();
2800 }
2801
2802 static bool emulation_required(struct kvm_vcpu *vcpu)
2803 {
2804         return emulate_invalid_guest_state && !guest_state_valid(vcpu);
2805 }
2806
2807 static void fix_pmode_seg(struct kvm_vcpu *vcpu, int seg,
2808                 struct kvm_segment *save)
2809 {
2810         if (!emulate_invalid_guest_state) {
2811                 /*
2812                  * CS and SS RPL should be equal during guest entry according
2813                  * to VMX spec, but in reality it is not always so. Since vcpu
2814                  * is in the middle of the transition from real mode to
2815                  * protected mode it is safe to assume that RPL 0 is a good
2816                  * default value.
2817                  */
2818                 if (seg == VCPU_SREG_CS || seg == VCPU_SREG_SS)
2819                         save->selector &= ~SELECTOR_RPL_MASK;
2820                 save->dpl = save->selector & SELECTOR_RPL_MASK;
2821                 save->s = 1;
2822         }
2823         vmx_set_segment(vcpu, save, seg);
2824 }
2825
2826 static void enter_pmode(struct kvm_vcpu *vcpu)
2827 {
2828         unsigned long flags;
2829         struct vcpu_vmx *vmx = to_vmx(vcpu);
2830
2831         /*
2832          * Update real mode segment cache. It may be not up-to-date if sement
2833          * register was written while vcpu was in a guest mode.
2834          */
2835         vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_ES], VCPU_SREG_ES);
2836         vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_DS], VCPU_SREG_DS);
2837         vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_FS], VCPU_SREG_FS);
2838         vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_GS], VCPU_SREG_GS);
2839         vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_SS], VCPU_SREG_SS);
2840         vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_CS], VCPU_SREG_CS);
2841
2842         vmx->rmode.vm86_active = 0;
2843
2844         vmx_segment_cache_clear(vmx);
2845
2846         vmx_set_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_TR], VCPU_SREG_TR);
2847
2848         flags = vmcs_readl(GUEST_RFLAGS);
2849         flags &= RMODE_GUEST_OWNED_EFLAGS_BITS;
2850         flags |= vmx->rmode.save_rflags & ~RMODE_GUEST_OWNED_EFLAGS_BITS;
2851         vmcs_writel(GUEST_RFLAGS, flags);
2852
2853         vmcs_writel(GUEST_CR4, (vmcs_readl(GUEST_CR4) & ~X86_CR4_VME) |
2854                         (vmcs_readl(CR4_READ_SHADOW) & X86_CR4_VME));
2855
2856         update_exception_bitmap(vcpu);
2857
2858         fix_pmode_seg(vcpu, VCPU_SREG_CS, &vmx->rmode.segs[VCPU_SREG_CS]);
2859         fix_pmode_seg(vcpu, VCPU_SREG_SS, &vmx->rmode.segs[VCPU_SREG_SS]);
2860         fix_pmode_seg(vcpu, VCPU_SREG_ES, &vmx->rmode.segs[VCPU_SREG_ES]);
2861         fix_pmode_seg(vcpu, VCPU_SREG_DS, &vmx->rmode.segs[VCPU_SREG_DS]);
2862         fix_pmode_seg(vcpu, VCPU_SREG_FS, &vmx->rmode.segs[VCPU_SREG_FS]);
2863         fix_pmode_seg(vcpu, VCPU_SREG_GS, &vmx->rmode.segs[VCPU_SREG_GS]);
2864
2865         /* CPL is always 0 when CPU enters protected mode */
2866         __set_bit(VCPU_EXREG_CPL, (ulong *)&vcpu->arch.regs_avail);
2867         vmx->cpl = 0;
2868 }
2869
2870 static gva_t rmode_tss_base(struct kvm *kvm)
2871 {
2872         if (!kvm->arch.tss_addr) {
2873                 struct kvm_memslots *slots;
2874                 struct kvm_memory_slot *slot;
2875                 gfn_t base_gfn;
2876
2877                 slots = kvm_memslots(kvm);
2878                 slot = id_to_memslot(slots, 0);
2879                 base_gfn = slot->base_gfn + slot->npages - 3;
2880
2881                 return base_gfn << PAGE_SHIFT;
2882         }
2883         return kvm->arch.tss_addr;
2884 }
2885
2886 static void fix_rmode_seg(int seg, struct kvm_segment *save)
2887 {
2888         const struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
2889         struct kvm_segment var = *save;
2890
2891         var.dpl = 0x3;
2892         if (seg == VCPU_SREG_CS)
2893                 var.type = 0x3;
2894
2895         if (!emulate_invalid_guest_state) {
2896                 var.selector = var.base >> 4;
2897                 var.base = var.base & 0xffff0;
2898                 var.limit = 0xffff;
2899                 var.g = 0;
2900                 var.db = 0;
2901                 var.present = 1;
2902                 var.s = 1;
2903                 var.l = 0;
2904                 var.unusable = 0;
2905                 var.type = 0x3;
2906                 var.avl = 0;
2907                 if (save->base & 0xf)
2908                         printk_once(KERN_WARNING "kvm: segment base is not "
2909                                         "paragraph aligned when entering "
2910                                         "protected mode (seg=%d)", seg);
2911         }
2912
2913         vmcs_write16(sf->selector, var.selector);
2914         vmcs_write32(sf->base, var.base);
2915         vmcs_write32(sf->limit, var.limit);
2916         vmcs_write32(sf->ar_bytes, vmx_segment_access_rights(&var));
2917 }
2918
2919 static void enter_rmode(struct kvm_vcpu *vcpu)
2920 {
2921         unsigned long flags;
2922         struct vcpu_vmx *vmx = to_vmx(vcpu);
2923
2924         vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_TR], VCPU_SREG_TR);
2925         vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_ES], VCPU_SREG_ES);
2926         vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_DS], VCPU_SREG_DS);
2927         vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_FS], VCPU_SREG_FS);
2928         vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_GS], VCPU_SREG_GS);
2929         vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_SS], VCPU_SREG_SS);
2930         vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_CS], VCPU_SREG_CS);
2931
2932         vmx->rmode.vm86_active = 1;
2933
2934         /*
2935          * Very old userspace does not call KVM_SET_TSS_ADDR before entering
2936          * vcpu. Call it here with phys address pointing 16M below 4G.
2937          */
2938         if (!vcpu->kvm->arch.tss_addr) {
2939                 printk_once(KERN_WARNING "kvm: KVM_SET_TSS_ADDR need to be "
2940                              "called before entering vcpu\n");
2941                 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
2942                 vmx_set_tss_addr(vcpu->kvm, 0xfeffd000);
2943                 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
2944         }
2945
2946         vmx_segment_cache_clear(vmx);
2947
2948         vmcs_writel(GUEST_TR_BASE, rmode_tss_base(vcpu->kvm));
2949         vmcs_write32(GUEST_TR_LIMIT, RMODE_TSS_SIZE - 1);
2950         vmcs_write32(GUEST_TR_AR_BYTES, 0x008b);
2951
2952         flags = vmcs_readl(GUEST_RFLAGS);
2953         vmx->rmode.save_rflags = flags;
2954
2955         flags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM;
2956
2957         vmcs_writel(GUEST_RFLAGS, flags);
2958         vmcs_writel(GUEST_CR4, vmcs_readl(GUEST_CR4) | X86_CR4_VME);
2959         update_exception_bitmap(vcpu);
2960
2961         fix_rmode_seg(VCPU_SREG_SS, &vmx->rmode.segs[VCPU_SREG_SS]);
2962         fix_rmode_seg(VCPU_SREG_CS, &vmx->rmode.segs[VCPU_SREG_CS]);
2963         fix_rmode_seg(VCPU_SREG_ES, &vmx->rmode.segs[VCPU_SREG_ES]);
2964         fix_rmode_seg(VCPU_SREG_DS, &vmx->rmode.segs[VCPU_SREG_DS]);
2965         fix_rmode_seg(VCPU_SREG_GS, &vmx->rmode.segs[VCPU_SREG_GS]);
2966         fix_rmode_seg(VCPU_SREG_FS, &vmx->rmode.segs[VCPU_SREG_FS]);
2967
2968         kvm_mmu_reset_context(vcpu);
2969 }
2970
2971 static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer)
2972 {
2973         struct vcpu_vmx *vmx = to_vmx(vcpu);
2974         struct shared_msr_entry *msr = find_msr_entry(vmx, MSR_EFER);
2975
2976         if (!msr)
2977                 return;
2978
2979         /*
2980          * Force kernel_gs_base reloading before EFER changes, as control
2981          * of this msr depends on is_long_mode().
2982          */
2983         vmx_load_host_state(to_vmx(vcpu));
2984         vcpu->arch.efer = efer;
2985         if (efer & EFER_LMA) {
2986                 vmcs_write32(VM_ENTRY_CONTROLS,
2987                              vmcs_read32(VM_ENTRY_CONTROLS) |
2988                              VM_ENTRY_IA32E_MODE);
2989                 msr->data = efer;
2990         } else {
2991                 vmcs_write32(VM_ENTRY_CONTROLS,
2992                              vmcs_read32(VM_ENTRY_CONTROLS) &
2993                              ~VM_ENTRY_IA32E_MODE);
2994
2995                 msr->data = efer & ~EFER_LME;
2996         }
2997         setup_msrs(vmx);
2998 }
2999
3000 #ifdef CONFIG_X86_64
3001
3002 static void enter_lmode(struct kvm_vcpu *vcpu)
3003 {
3004         u32 guest_tr_ar;
3005
3006         vmx_segment_cache_clear(to_vmx(vcpu));
3007
3008         guest_tr_ar = vmcs_read32(GUEST_TR_AR_BYTES);
3009         if ((guest_tr_ar & AR_TYPE_MASK) != AR_TYPE_BUSY_64_TSS) {
3010                 pr_debug_ratelimited("%s: tss fixup for long mode. \n",
3011                                      __func__);
3012                 vmcs_write32(GUEST_TR_AR_BYTES,
3013                              (guest_tr_ar & ~AR_TYPE_MASK)
3014                              | AR_TYPE_BUSY_64_TSS);
3015         }
3016         vmx_set_efer(vcpu, vcpu->arch.efer | EFER_LMA);
3017 }
3018
3019 static void exit_lmode(struct kvm_vcpu *vcpu)
3020 {
3021         vmcs_write32(VM_ENTRY_CONTROLS,
3022                      vmcs_read32(VM_ENTRY_CONTROLS)
3023                      & ~VM_ENTRY_IA32E_MODE);
3024         vmx_set_efer(vcpu, vcpu->arch.efer & ~EFER_LMA);
3025 }
3026
3027 #endif
3028
3029 static void vmx_flush_tlb(struct kvm_vcpu *vcpu)
3030 {
3031         vpid_sync_context(to_vmx(vcpu));
3032         if (enable_ept) {
3033                 if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
3034                         return;
3035                 ept_sync_context(construct_eptp(vcpu->arch.mmu.root_hpa));
3036         }
3037 }
3038
3039 static void vmx_decache_cr0_guest_bits(struct kvm_vcpu *vcpu)
3040 {
3041         ulong cr0_guest_owned_bits = vcpu->arch.cr0_guest_owned_bits;
3042
3043         vcpu->arch.cr0 &= ~cr0_guest_owned_bits;
3044         vcpu->arch.cr0 |= vmcs_readl(GUEST_CR0) & cr0_guest_owned_bits;
3045 }
3046
3047 static void vmx_decache_cr3(struct kvm_vcpu *vcpu)
3048 {
3049         if (enable_ept && is_paging(vcpu))
3050                 vcpu->arch.cr3 = vmcs_readl(GUEST_CR3);
3051         __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail);
3052 }
3053
3054 static void vmx_decache_cr4_guest_bits(struct kvm_vcpu *vcpu)
3055 {
3056         ulong cr4_guest_owned_bits = vcpu->arch.cr4_guest_owned_bits;
3057
3058         vcpu->arch.cr4 &= ~cr4_guest_owned_bits;
3059         vcpu->arch.cr4 |= vmcs_readl(GUEST_CR4) & cr4_guest_owned_bits;
3060 }
3061
3062 static void ept_load_pdptrs(struct kvm_vcpu *vcpu)
3063 {
3064         if (!test_bit(VCPU_EXREG_PDPTR,
3065                       (unsigned long *)&vcpu->arch.regs_dirty))
3066                 return;
3067
3068         if (is_paging(vcpu) && is_pae(vcpu) && !is_long_mode(vcpu)) {
3069                 vmcs_write64(GUEST_PDPTR0, vcpu->arch.mmu.pdptrs[0]);
3070                 vmcs_write64(GUEST_PDPTR1, vcpu->arch.mmu.pdptrs[1]);
3071                 vmcs_write64(GUEST_PDPTR2, vcpu->arch.mmu.pdptrs[2]);
3072                 vmcs_write64(GUEST_PDPTR3, vcpu->arch.mmu.pdptrs[3]);
3073         }
3074 }
3075
3076 static void ept_save_pdptrs(struct kvm_vcpu *vcpu)
3077 {
3078         if (is_paging(vcpu) && is_pae(vcpu) && !is_long_mode(vcpu)) {
3079                 vcpu->arch.mmu.pdptrs[0] = vmcs_read64(GUEST_PDPTR0);
3080                 vcpu->arch.mmu.pdptrs[1] = vmcs_read64(GUEST_PDPTR1);
3081                 vcpu->arch.mmu.pdptrs[2] = vmcs_read64(GUEST_PDPTR2);
3082                 vcpu->arch.mmu.pdptrs[3] = vmcs_read64(GUEST_PDPTR3);
3083         }
3084
3085         __set_bit(VCPU_EXREG_PDPTR,
3086                   (unsigned long *)&vcpu->arch.regs_avail);
3087         __set_bit(VCPU_EXREG_PDPTR,
3088                   (unsigned long *)&vcpu->arch.regs_dirty);
3089 }
3090
3091 static int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
3092
3093 static void ept_update_paging_mode_cr0(unsigned long *hw_cr0,
3094                                         unsigned long cr0,
3095                                         struct kvm_vcpu *vcpu)
3096 {
3097         if (!test_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail))
3098                 vmx_decache_cr3(vcpu);
3099         if (!(cr0 & X86_CR0_PG)) {
3100                 /* From paging/starting to nonpaging */
3101                 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL,
3102                              vmcs_read32(CPU_BASED_VM_EXEC_CONTROL) |
3103                              (CPU_BASED_CR3_LOAD_EXITING |
3104                               CPU_BASED_CR3_STORE_EXITING));
3105                 vcpu->arch.cr0 = cr0;
3106                 vmx_set_cr4(vcpu, kvm_read_cr4(vcpu));
3107         } else if (!is_paging(vcpu)) {
3108                 /* From nonpaging to paging */
3109                 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL,
3110                              vmcs_read32(CPU_BASED_VM_EXEC_CONTROL) &
3111                              ~(CPU_BASED_CR3_LOAD_EXITING |
3112                                CPU_BASED_CR3_STORE_EXITING));
3113                 vcpu->arch.cr0 = cr0;
3114                 vmx_set_cr4(vcpu, kvm_read_cr4(vcpu));
3115         }
3116
3117         if (!(cr0 & X86_CR0_WP))
3118                 *hw_cr0 &= ~X86_CR0_WP;
3119 }
3120
3121 static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
3122 {
3123         struct vcpu_vmx *vmx = to_vmx(vcpu);
3124         unsigned long hw_cr0;
3125
3126         if (enable_unrestricted_guest)
3127                 hw_cr0 = (cr0 & ~KVM_GUEST_CR0_MASK_UNRESTRICTED_GUEST)
3128                         | KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST;
3129         else {
3130                 hw_cr0 = (cr0 & ~KVM_GUEST_CR0_MASK) | KVM_VM_CR0_ALWAYS_ON;
3131
3132                 if (vmx->rmode.vm86_active && (cr0 & X86_CR0_PE))
3133                         enter_pmode(vcpu);
3134
3135                 if (!vmx->rmode.vm86_active && !(cr0 & X86_CR0_PE))
3136                         enter_rmode(vcpu);
3137         }
3138
3139 #ifdef CONFIG_X86_64
3140         if (vcpu->arch.efer & EFER_LME) {
3141                 if (!is_paging(vcpu) && (cr0 & X86_CR0_PG))
3142                         enter_lmode(vcpu);
3143                 if (is_paging(vcpu) && !(cr0 & X86_CR0_PG))
3144                         exit_lmode(vcpu);
3145         }
3146 #endif
3147
3148         if (enable_ept)
3149                 ept_update_paging_mode_cr0(&hw_cr0, cr0, vcpu);
3150
3151         if (!vcpu->fpu_active)
3152                 hw_cr0 |= X86_CR0_TS | X86_CR0_MP;
3153
3154         vmcs_writel(CR0_READ_SHADOW, cr0);
3155         vmcs_writel(GUEST_CR0, hw_cr0);
3156         vcpu->arch.cr0 = cr0;
3157
3158         /* depends on vcpu->arch.cr0 to be set to a new value */
3159         vmx->emulation_required = emulation_required(vcpu);
3160 }
3161
3162 static u64 construct_eptp(unsigned long root_hpa)
3163 {
3164         u64 eptp;
3165
3166         /* TODO write the value reading from MSR */
3167         eptp = VMX_EPT_DEFAULT_MT |
3168                 VMX_EPT_DEFAULT_GAW << VMX_EPT_GAW_EPTP_SHIFT;
3169         if (enable_ept_ad_bits)
3170                 eptp |= VMX_EPT_AD_ENABLE_BIT;
3171         eptp |= (root_hpa & PAGE_MASK);
3172
3173         return eptp;
3174 }
3175
3176 static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
3177 {
3178         unsigned long guest_cr3;
3179         u64 eptp;
3180
3181         guest_cr3 = cr3;
3182         if (enable_ept) {
3183                 eptp = construct_eptp(cr3);
3184                 vmcs_write64(EPT_POINTER, eptp);
3185                 guest_cr3 = is_paging(vcpu) ? kvm_read_cr3(vcpu) :
3186                         vcpu->kvm->arch.ept_identity_map_addr;
3187                 ept_load_pdptrs(vcpu);
3188         }
3189
3190         vmx_flush_tlb(vcpu);
3191         vmcs_writel(GUEST_CR3, guest_cr3);
3192 }
3193
3194 static int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
3195 {
3196         unsigned long hw_cr4 = cr4 | (to_vmx(vcpu)->rmode.vm86_active ?
3197                     KVM_RMODE_VM_CR4_ALWAYS_ON : KVM_PMODE_VM_CR4_ALWAYS_ON);
3198
3199         if (cr4 & X86_CR4_VMXE) {
3200                 /*
3201                  * To use VMXON (and later other VMX instructions), a guest
3202                  * must first be able to turn on cr4.VMXE (see handle_vmon()).
3203                  * So basically the check on whether to allow nested VMX
3204                  * is here.
3205                  */
3206                 if (!nested_vmx_allowed(vcpu))
3207                         return 1;
3208         } else if (to_vmx(vcpu)->nested.vmxon)
3209                 return 1;
3210
3211         vcpu->arch.cr4 = cr4;
3212         if (enable_ept) {
3213                 if (!is_paging(vcpu)) {
3214                         hw_cr4 &= ~X86_CR4_PAE;
3215                         hw_cr4 |= X86_CR4_PSE;
3216                 } else if (!(cr4 & X86_CR4_PAE)) {
3217                         hw_cr4 &= ~X86_CR4_PAE;
3218                 }
3219         }
3220
3221         vmcs_writel(CR4_READ_SHADOW, cr4);
3222         vmcs_writel(GUEST_CR4, hw_cr4);
3223         return 0;
3224 }
3225
3226 static void vmx_get_segment(struct kvm_vcpu *vcpu,
3227                             struct kvm_segment *var, int seg)
3228 {
3229         struct vcpu_vmx *vmx = to_vmx(vcpu);
3230         u32 ar;
3231
3232         if (vmx->rmode.vm86_active && seg != VCPU_SREG_LDTR) {
3233                 *var = vmx->rmode.segs[seg];
3234                 if (seg == VCPU_SREG_TR
3235                     || var->selector == vmx_read_guest_seg_selector(vmx, seg))
3236                         return;
3237                 var->base = vmx_read_guest_seg_base(vmx, seg);
3238                 var->selector = vmx_read_guest_seg_selector(vmx, seg);
3239                 return;
3240         }
3241         var->base = vmx_read_guest_seg_base(vmx, seg);
3242         var->limit = vmx_read_guest_seg_limit(vmx, seg);
3243         var->selector = vmx_read_guest_seg_selector(vmx, seg);
3244         ar = vmx_read_guest_seg_ar(vmx, seg);
3245         var->type = ar & 15;
3246         var->s = (ar >> 4) & 1;
3247         var->dpl = (ar >> 5) & 3;
3248         var->present = (ar >> 7) & 1;
3249         var->avl = (ar >> 12) & 1;
3250         var->l = (ar >> 13) & 1;
3251         var->db = (ar >> 14) & 1;
3252         var->g = (ar >> 15) & 1;
3253         var->unusable = (ar >> 16) & 1;
3254 }
3255
3256 static u64 vmx_get_segment_base(struct kvm_vcpu *vcpu, int seg)
3257 {
3258         struct kvm_segment s;
3259
3260         if (to_vmx(vcpu)->rmode.vm86_active) {
3261                 vmx_get_segment(vcpu, &s, seg);
3262                 return s.base;
3263         }
3264         return vmx_read_guest_seg_base(to_vmx(vcpu), seg);
3265 }
3266
3267 static int vmx_get_cpl(struct kvm_vcpu *vcpu)
3268 {
3269         struct vcpu_vmx *vmx = to_vmx(vcpu);
3270
3271         if (!is_protmode(vcpu))
3272                 return 0;
3273
3274         if (!is_long_mode(vcpu)
3275             && (kvm_get_rflags(vcpu) & X86_EFLAGS_VM)) /* if virtual 8086 */
3276                 return 3;
3277
3278         if (!test_bit(VCPU_EXREG_CPL, (ulong *)&vcpu->arch.regs_avail)) {
3279                 __set_bit(VCPU_EXREG_CPL, (ulong *)&vcpu->arch.regs_avail);
3280                 vmx->cpl = vmx_read_guest_seg_selector(vmx, VCPU_SREG_CS) & 3;
3281         }
3282
3283         return vmx->cpl;
3284 }
3285
3286
3287 static u32 vmx_segment_access_rights(struct kvm_segment *var)
3288 {
3289         u32 ar;
3290
3291         if (var->unusable || !var->present)
3292                 ar = 1 << 16;
3293         else {
3294                 ar = var->type & 15;
3295                 ar |= (var->s & 1) << 4;
3296                 ar |= (var->dpl & 3) << 5;
3297                 ar |= (var->present & 1) << 7;
3298                 ar |= (var->avl & 1) << 12;
3299                 ar |= (var->l & 1) << 13;
3300                 ar |= (var->db & 1) << 14;
3301                 ar |= (var->g & 1) << 15;
3302         }
3303
3304         return ar;
3305 }
3306
3307 static void vmx_set_segment(struct kvm_vcpu *vcpu,
3308                             struct kvm_segment *var, int seg)
3309 {
3310         struct vcpu_vmx *vmx = to_vmx(vcpu);
3311         const struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
3312
3313         vmx_segment_cache_clear(vmx);
3314         if (seg == VCPU_SREG_CS)
3315                 __clear_bit(VCPU_EXREG_CPL, (ulong *)&vcpu->arch.regs_avail);
3316
3317         if (vmx->rmode.vm86_active && seg != VCPU_SREG_LDTR) {
3318                 vmx->rmode.segs[seg] = *var;
3319                 if (seg == VCPU_SREG_TR)
3320                         vmcs_write16(sf->selector, var->selector);
3321                 else if (var->s)
3322                         fix_rmode_seg(seg, &vmx->rmode.segs[seg]);
3323                 goto out;
3324         }
3325
3326         vmcs_writel(sf->base, var->base);
3327         vmcs_write32(sf->limit, var->limit);
3328         vmcs_write16(sf->selector, var->selector);
3329
3330         /*
3331          *   Fix the "Accessed" bit in AR field of segment registers for older
3332          * qemu binaries.
3333          *   IA32 arch specifies that at the time of processor reset the
3334          * "Accessed" bit in the AR field of segment registers is 1. And qemu
3335          * is setting it to 0 in the userland code. This causes invalid guest
3336          * state vmexit when "unrestricted guest" mode is turned on.
3337          *    Fix for this setup issue in cpu_reset is being pushed in the qemu
3338          * tree. Newer qemu binaries with that qemu fix would not need this
3339          * kvm hack.
3340          */
3341         if (enable_unrestricted_guest && (seg != VCPU_SREG_LDTR))
3342                 var->type |= 0x1; /* Accessed */
3343
3344         vmcs_write32(sf->ar_bytes, vmx_segment_access_rights(var));
3345
3346 out:
3347         vmx->emulation_required |= emulation_required(vcpu);
3348 }
3349
3350 static void vmx_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
3351 {
3352         u32 ar = vmx_read_guest_seg_ar(to_vmx(vcpu), VCPU_SREG_CS);
3353
3354         *db = (ar >> 14) & 1;
3355         *l = (ar >> 13) & 1;
3356 }
3357
3358 static void vmx_get_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
3359 {
3360         dt->size = vmcs_read32(GUEST_IDTR_LIMIT);
3361         dt->address = vmcs_readl(GUEST_IDTR_BASE);
3362 }
3363
3364 static void vmx_set_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
3365 {
3366         vmcs_write32(GUEST_IDTR_LIMIT, dt->size);
3367         vmcs_writel(GUEST_IDTR_BASE, dt->address);
3368 }
3369
3370 static void vmx_get_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
3371 {
3372         dt->size = vmcs_read32(GUEST_GDTR_LIMIT);
3373         dt->address = vmcs_readl(GUEST_GDTR_BASE);
3374 }
3375
3376 static void vmx_set_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
3377 {
3378         vmcs_write32(GUEST_GDTR_LIMIT, dt->size);
3379         vmcs_writel(GUEST_GDTR_BASE, dt->address);
3380 }
3381
3382 static bool rmode_segment_valid(struct kvm_vcpu *vcpu, int seg)
3383 {
3384         struct kvm_segment var;
3385         u32 ar;
3386
3387         vmx_get_segment(vcpu, &var, seg);
3388         var.dpl = 0x3;
3389         if (seg == VCPU_SREG_CS)
3390                 var.type = 0x3;
3391         ar = vmx_segment_access_rights(&var);
3392
3393         if (var.base != (var.selector << 4))
3394                 return false;
3395         if (var.limit != 0xffff)
3396                 return false;
3397         if (ar != 0xf3)
3398                 return false;
3399
3400         return true;
3401 }
3402
3403 static bool code_segment_valid(struct kvm_vcpu *vcpu)
3404 {
3405         struct kvm_segment cs;
3406         unsigned int cs_rpl;
3407
3408         vmx_get_segment(vcpu, &cs, VCPU_SREG_CS);
3409         cs_rpl = cs.selector & SELECTOR_RPL_MASK;
3410
3411         if (cs.unusable)
3412                 return false;
3413         if (~cs.type & (AR_TYPE_CODE_MASK|AR_TYPE_ACCESSES_MASK))
3414                 return false;
3415         if (!cs.s)
3416                 return false;
3417         if (cs.type & AR_TYPE_WRITEABLE_MASK) {
3418                 if (cs.dpl > cs_rpl)
3419                         return false;
3420         } else {
3421                 if (cs.dpl != cs_rpl)
3422                         return false;
3423         }
3424         if (!cs.present)
3425                 return false;
3426
3427         /* TODO: Add Reserved field check, this'll require a new member in the kvm_segment_field structure */
3428         return true;
3429 }
3430
3431 static bool stack_segment_valid(struct kvm_vcpu *vcpu)
3432 {
3433         struct kvm_segment ss;
3434         unsigned int ss_rpl;
3435
3436         vmx_get_segment(vcpu, &ss, VCPU_SREG_SS);
3437         ss_rpl = ss.selector & SELECTOR_RPL_MASK;
3438
3439         if (ss.unusable)
3440                 return true;
3441         if (ss.type != 3 && ss.type != 7)
3442                 return false;
3443         if (!ss.s)
3444                 return false;
3445         if (ss.dpl != ss_rpl) /* DPL != RPL */
3446                 return false;
3447         if (!ss.present)
3448                 return false;
3449
3450         return true;
3451 }
3452
3453 static bool data_segment_valid(struct kvm_vcpu *vcpu, int seg)
3454 {
3455         struct kvm_segment var;
3456         unsigned int rpl;
3457
3458         vmx_get_segment(vcpu, &var, seg);
3459         rpl = var.selector & SELECTOR_RPL_MASK;
3460
3461         if (var.unusable)
3462                 return true;
3463         if (!var.s)
3464                 return false;
3465         if (!var.present)
3466                 return false;
3467         if (~var.type & (AR_TYPE_CODE_MASK|AR_TYPE_WRITEABLE_MASK)) {
3468                 if (var.dpl < rpl) /* DPL < RPL */
3469                         return false;
3470         }
3471
3472         /* TODO: Add other members to kvm_segment_field to allow checking for other access
3473          * rights flags
3474          */
3475         return true;
3476 }
3477
3478 static bool tr_valid(struct kvm_vcpu *vcpu)
3479 {
3480         struct kvm_segment tr;
3481
3482         vmx_get_segment(vcpu, &tr, VCPU_SREG_TR);
3483
3484         if (tr.unusable)
3485                 return false;
3486         if (tr.selector & SELECTOR_TI_MASK)     /* TI = 1 */
3487                 return false;
3488         if (tr.type != 3 && tr.type != 11) /* TODO: Check if guest is in IA32e mode */
3489                 return false;
3490         if (!tr.present)
3491                 return false;
3492
3493         return true;
3494 }
3495
3496 static bool ldtr_valid(struct kvm_vcpu *vcpu)
3497 {
3498         struct kvm_segment ldtr;
3499
3500         vmx_get_segment(vcpu, &ldtr, VCPU_SREG_LDTR);
3501
3502         if (ldtr.unusable)
3503                 return true;
3504         if (ldtr.selector & SELECTOR_TI_MASK)   /* TI = 1 */
3505                 return false;
3506         if (ldtr.type != 2)
3507                 return false;
3508         if (!ldtr.present)
3509                 return false;
3510
3511         return true;
3512 }
3513
3514 static bool cs_ss_rpl_check(struct kvm_vcpu *vcpu)
3515 {
3516         struct kvm_segment cs, ss;
3517
3518         vmx_get_segment(vcpu, &cs, VCPU_SREG_CS);
3519         vmx_get_segment(vcpu, &ss, VCPU_SREG_SS);
3520
3521         return ((cs.selector & SELECTOR_RPL_MASK) ==
3522                  (ss.selector & SELECTOR_RPL_MASK));
3523 }
3524
3525 /*
3526  * Check if guest state is valid. Returns true if valid, false if
3527  * not.
3528  * We assume that registers are always usable
3529  */
3530 static bool guest_state_valid(struct kvm_vcpu *vcpu)
3531 {
3532         if (enable_unrestricted_guest)
3533                 return true;
3534
3535         /* real mode guest state checks */
3536         if (!is_protmode(vcpu)) {
3537                 if (!rmode_segment_valid(vcpu, VCPU_SREG_CS))
3538                         return false;
3539                 if (!rmode_segment_valid(vcpu, VCPU_SREG_SS))
3540                         return false;
3541                 if (!rmode_segment_valid(vcpu, VCPU_SREG_DS))
3542                         return false;
3543                 if (!rmode_segment_valid(vcpu, VCPU_SREG_ES))
3544                         return false;
3545                 if (!rmode_segment_valid(vcpu, VCPU_SREG_FS))
3546                         return false;
3547                 if (!rmode_segment_valid(vcpu, VCPU_SREG_GS))
3548                         return false;
3549         } else {
3550         /* protected mode guest state checks */
3551                 if (!cs_ss_rpl_check(vcpu))
3552                         return false;
3553                 if (!code_segment_valid(vcpu))
3554                         return false;
3555                 if (!stack_segment_valid(vcpu))
3556                         return false;
3557                 if (!data_segment_valid(vcpu, VCPU_SREG_DS))
3558                         return false;
3559                 if (!data_segment_valid(vcpu, VCPU_SREG_ES))
3560                         return false;
3561                 if (!data_segment_valid(vcpu, VCPU_SREG_FS))
3562                         return false;
3563                 if (!data_segment_valid(vcpu, VCPU_SREG_GS))
3564                         return false;
3565                 if (!tr_valid(vcpu))
3566                         return false;
3567                 if (!ldtr_valid(vcpu))
3568                         return false;
3569         }
3570         /* TODO:
3571          * - Add checks on RIP
3572          * - Add checks on RFLAGS
3573          */
3574
3575         return true;
3576 }
3577
3578 static int init_rmode_tss(struct kvm *kvm)
3579 {
3580         gfn_t fn;
3581         u16 data = 0;
3582         int r, idx, ret = 0;
3583
3584         idx = srcu_read_lock(&kvm->srcu);
3585         fn = rmode_tss_base(kvm) >> PAGE_SHIFT;
3586         r = kvm_clear_guest_page(kvm, fn, 0, PAGE_SIZE);
3587         if (r < 0)
3588                 goto out;
3589         data = TSS_BASE_SIZE + TSS_REDIRECTION_SIZE;
3590         r = kvm_write_guest_page(kvm, fn++, &data,
3591                         TSS_IOPB_BASE_OFFSET, sizeof(u16));
3592         if (r < 0)
3593                 goto out;
3594         r = kvm_clear_guest_page(kvm, fn++, 0, PAGE_SIZE);
3595         if (r < 0)
3596                 goto out;
3597         r = kvm_clear_guest_page(kvm, fn, 0, PAGE_SIZE);
3598         if (r < 0)
3599                 goto out;
3600         data = ~0;
3601         r = kvm_write_guest_page(kvm, fn, &data,
3602                                  RMODE_TSS_SIZE - 2 * PAGE_SIZE - 1,
3603                                  sizeof(u8));
3604         if (r < 0)
3605                 goto out;
3606
3607         ret = 1;
3608 out:
3609         srcu_read_unlock(&kvm->srcu, idx);
3610         return ret;
3611 }
3612
3613 static int init_rmode_identity_map(struct kvm *kvm)
3614 {
3615         int i, idx, r, ret;
3616         pfn_t identity_map_pfn;
3617         u32 tmp;
3618
3619         if (!enable_ept)
3620                 return 1;
3621         if (unlikely(!kvm->arch.ept_identity_pagetable)) {
3622                 printk(KERN_ERR "EPT: identity-mapping pagetable "
3623                         "haven't been allocated!\n");
3624                 return 0;
3625         }
3626         if (likely(kvm->arch.ept_identity_pagetable_done))
3627                 return 1;
3628         ret = 0;
3629         identity_map_pfn = kvm->arch.ept_identity_map_addr >> PAGE_SHIFT;
3630         idx = srcu_read_lock(&kvm->srcu);
3631         r = kvm_clear_guest_page(kvm, identity_map_pfn, 0, PAGE_SIZE);
3632         if (r < 0)
3633                 goto out;
3634         /* Set up identity-mapping pagetable for EPT in real mode */
3635         for (i = 0; i < PT32_ENT_PER_PAGE; i++) {
3636                 tmp = (i << 22) + (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER |
3637                         _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_PSE);
3638                 r = kvm_write_guest_page(kvm, identity_map_pfn,
3639                                 &tmp, i * sizeof(tmp), sizeof(tmp));
3640                 if (r < 0)
3641                         goto out;
3642         }
3643         kvm->arch.ept_identity_pagetable_done = true;
3644         ret = 1;
3645 out:
3646         srcu_read_unlock(&kvm->srcu, idx);
3647         return ret;
3648 }
3649
3650 static void seg_setup(int seg)
3651 {
3652         const struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
3653         unsigned int ar;
3654
3655         vmcs_write16(sf->selector, 0);
3656         vmcs_writel(sf->base, 0);
3657         vmcs_write32(sf->limit, 0xffff);
3658         ar = 0x93;
3659         if (seg == VCPU_SREG_CS)
3660                 ar |= 0x08; /* code segment */
3661
3662         vmcs_write32(sf->ar_bytes, ar);
3663 }
3664
3665 static int alloc_apic_access_page(struct kvm *kvm)
3666 {
3667         struct page *page;
3668         struct kvm_userspace_memory_region kvm_userspace_mem;
3669         int r = 0;
3670
3671         mutex_lock(&kvm->slots_lock);
3672         if (kvm->arch.apic_access_page)
3673                 goto out;
3674         kvm_userspace_mem.slot = APIC_ACCESS_PAGE_PRIVATE_MEMSLOT;
3675         kvm_userspace_mem.flags = 0;
3676         kvm_userspace_mem.guest_phys_addr = 0xfee00000ULL;
3677         kvm_userspace_mem.memory_size = PAGE_SIZE;
3678         r = __kvm_set_memory_region(kvm, &kvm_userspace_mem, false);
3679         if (r)
3680                 goto out;
3681
3682         page = gfn_to_page(kvm, 0xfee00);
3683         if (is_error_page(page)) {
3684                 r = -EFAULT;
3685                 goto out;
3686         }
3687
3688         kvm->arch.apic_access_page = page;
3689 out:
3690         mutex_unlock(&kvm->slots_lock);
3691         return r;
3692 }
3693
3694 static int alloc_identity_pagetable(struct kvm *kvm)
3695 {
3696         struct page *page;
3697         struct kvm_userspace_memory_region kvm_userspace_mem;
3698         int r = 0;
3699
3700         mutex_lock(&kvm->slots_lock);
3701         if (kvm->arch.ept_identity_pagetable)
3702                 goto out;
3703         kvm_userspace_mem.slot = IDENTITY_PAGETABLE_PRIVATE_MEMSLOT;
3704         kvm_userspace_mem.flags = 0;
3705         kvm_userspace_mem.guest_phys_addr =
3706                 kvm->arch.ept_identity_map_addr;
3707         kvm_userspace_mem.memory_size = PAGE_SIZE;
3708         r = __kvm_set_memory_region(kvm, &kvm_userspace_mem, false);
3709         if (r)
3710                 goto out;
3711
3712         page = gfn_to_page(kvm, kvm->arch.ept_identity_map_addr >> PAGE_SHIFT);
3713         if (is_error_page(page)) {
3714                 r = -EFAULT;
3715                 goto out;
3716         }
3717
3718         kvm->arch.ept_identity_pagetable = page;
3719 out:
3720         mutex_unlock(&kvm->slots_lock);
3721         return r;
3722 }
3723
3724 static void allocate_vpid(struct vcpu_vmx *vmx)
3725 {
3726         int vpid;
3727
3728         vmx->vpid = 0;
3729         if (!enable_vpid)
3730                 return;
3731         spin_lock(&vmx_vpid_lock);
3732         vpid = find_first_zero_bit(vmx_vpid_bitmap, VMX_NR_VPIDS);
3733         if (vpid < VMX_NR_VPIDS) {
3734                 vmx->vpid = vpid;
3735                 __set_bit(vpid, vmx_vpid_bitmap);
3736         }
3737         spin_unlock(&vmx_vpid_lock);
3738 }
3739
3740 static void free_vpid(struct vcpu_vmx *vmx)
3741 {
3742         if (!enable_vpid)
3743                 return;
3744         spin_lock(&vmx_vpid_lock);
3745         if (vmx->vpid != 0)
3746                 __clear_bit(vmx->vpid, vmx_vpid_bitmap);
3747         spin_unlock(&vmx_vpid_lock);
3748 }
3749
3750 #define MSR_TYPE_R      1
3751 #define MSR_TYPE_W      2
3752 static void __vmx_disable_intercept_for_msr(unsigned long *msr_bitmap,
3753                                                 u32 msr, int type)
3754 {
3755         int f = sizeof(unsigned long);
3756
3757         if (!cpu_has_vmx_msr_bitmap())
3758                 return;
3759
3760         /*
3761          * See Intel PRM Vol. 3, 20.6.9 (MSR-Bitmap Address). Early manuals
3762          * have the write-low and read-high bitmap offsets the wrong way round.
3763          * We can control MSRs 0x00000000-0x00001fff and 0xc0000000-0xc0001fff.
3764          */
3765         if (msr <= 0x1fff) {
3766                 if (type & MSR_TYPE_R)
3767                         /* read-low */
3768                         __clear_bit(msr, msr_bitmap + 0x000 / f);
3769
3770                 if (type & MSR_TYPE_W)
3771                         /* write-low */
3772                         __clear_bit(msr, msr_bitmap + 0x800 / f);
3773
3774         } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) {
3775                 msr &= 0x1fff;
3776                 if (type & MSR_TYPE_R)
3777                         /* read-high */
3778                         __clear_bit(msr, msr_bitmap + 0x400 / f);
3779
3780                 if (type & MSR_TYPE_W)
3781                         /* write-high */
3782                         __clear_bit(msr, msr_bitmap + 0xc00 / f);
3783
3784         }
3785 }
3786
3787 static void __vmx_enable_intercept_for_msr(unsigned long *msr_bitmap,
3788                                                 u32 msr, int type)
3789 {
3790         int f = sizeof(unsigned long);
3791
3792         if (!cpu_has_vmx_msr_bitmap())
3793                 return;
3794
3795         /*
3796          * See Intel PRM Vol. 3, 20.6.9 (MSR-Bitmap Address). Early manuals
3797          * have the write-low and read-high bitmap offsets the wrong way round.
3798          * We can control MSRs 0x00000000-0x00001fff and 0xc0000000-0xc0001fff.
3799          */
3800         if (msr <= 0x1fff) {
3801                 if (type & MSR_TYPE_R)
3802                         /* read-low */
3803                         __set_bit(msr, msr_bitmap + 0x000 / f);
3804
3805                 if (type & MSR_TYPE_W)
3806                         /* write-low */
3807                         __set_bit(msr, msr_bitmap + 0x800 / f);
3808
3809         } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) {
3810                 msr &= 0x1fff;
3811                 if (type & MSR_TYPE_R)
3812                         /* read-high */
3813                         __set_bit(msr, msr_bitmap + 0x400 / f);
3814
3815                 if (type & MSR_TYPE_W)
3816                         /* write-high */
3817                         __set_bit(msr, msr_bitmap + 0xc00 / f);
3818
3819         }
3820 }
3821
3822 static void vmx_disable_intercept_for_msr(u32 msr, bool longmode_only)
3823 {
3824         if (!longmode_only)
3825                 __vmx_disable_intercept_for_msr(vmx_msr_bitmap_legacy,
3826                                                 msr, MSR_TYPE_R | MSR_TYPE_W);
3827         __vmx_disable_intercept_for_msr(vmx_msr_bitmap_longmode,
3828                                                 msr, MSR_TYPE_R | MSR_TYPE_W);
3829 }
3830
3831 static void vmx_enable_intercept_msr_read_x2apic(u32 msr)
3832 {
3833         __vmx_enable_intercept_for_msr(vmx_msr_bitmap_legacy_x2apic,
3834                         msr, MSR_TYPE_R);
3835         __vmx_enable_intercept_for_msr(vmx_msr_bitmap_longmode_x2apic,
3836                         msr, MSR_TYPE_R);
3837 }
3838
3839 static void vmx_disable_intercept_msr_read_x2apic(u32 msr)
3840 {
3841         __vmx_disable_intercept_for_msr(vmx_msr_bitmap_legacy_x2apic,
3842                         msr, MSR_TYPE_R);
3843         __vmx_disable_intercept_for_msr(vmx_msr_bitmap_longmode_x2apic,
3844                         msr, MSR_TYPE_R);
3845 }
3846
3847 static void vmx_disable_intercept_msr_write_x2apic(u32 msr)
3848 {
3849         __vmx_disable_intercept_for_msr(vmx_msr_bitmap_legacy_x2apic,
3850                         msr, MSR_TYPE_W);
3851         __vmx_disable_intercept_for_msr(vmx_msr_bitmap_longmode_x2apic,
3852                         msr, MSR_TYPE_W);
3853 }
3854
3855 /*
3856  * Set up the vmcs's constant host-state fields, i.e., host-state fields that
3857  * will not change in the lifetime of the guest.
3858  * Note that host-state that does change is set elsewhere. E.g., host-state
3859  * that is set differently for each CPU is set in vmx_vcpu_load(), not here.
3860  */
3861 static void vmx_set_constant_host_state(void)
3862 {
3863         u32 low32, high32;
3864         unsigned long tmpl;
3865         struct desc_ptr dt;
3866
3867         vmcs_writel(HOST_CR0, read_cr0() & ~X86_CR0_TS);  /* 22.2.3 */
3868         vmcs_writel(HOST_CR4, read_cr4());  /* 22.2.3, 22.2.5 */
3869         vmcs_writel(HOST_CR3, read_cr3());  /* 22.2.3  FIXME: shadow tables */
3870
3871         vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS);  /* 22.2.4 */
3872 #ifdef CONFIG_X86_64
3873         /*
3874          * Load null selectors, so we can avoid reloading them in
3875          * __vmx_load_host_state(), in case userspace uses the null selectors
3876          * too (the expected case).
3877          */
3878         vmcs_write16(HOST_DS_SELECTOR, 0);
3879         vmcs_write16(HOST_ES_SELECTOR, 0);
3880 #else
3881         vmcs_write16(HOST_DS_SELECTOR, __KERNEL_DS);  /* 22.2.4 */
3882         vmcs_write16(HOST_ES_SELECTOR, __KERNEL_DS);  /* 22.2.4 */
3883 #endif
3884         vmcs_write16(HOST_SS_SELECTOR, __KERNEL_DS);  /* 22.2.4 */
3885         vmcs_write16(HOST_TR_SELECTOR, GDT_ENTRY_TSS*8);  /* 22.2.4 */
3886
3887         native_store_idt(&dt);
3888         vmcs_writel(HOST_IDTR_BASE, dt.address);   /* 22.2.4 */
3889
3890         vmcs_writel(HOST_RIP, vmx_return); /* 22.2.5 */
3891
3892         rdmsr(MSR_IA32_SYSENTER_CS, low32, high32);
3893         vmcs_write32(HOST_IA32_SYSENTER_CS, low32);
3894         rdmsrl(MSR_IA32_SYSENTER_EIP, tmpl);
3895         vmcs_writel(HOST_IA32_SYSENTER_EIP, tmpl);   /* 22.2.3 */
3896
3897         if (vmcs_config.vmexit_ctrl & VM_EXIT_LOAD_IA32_PAT) {
3898                 rdmsr(MSR_IA32_CR_PAT, low32, high32);
3899                 vmcs_write64(HOST_IA32_PAT, low32 | ((u64) high32 << 32));
3900         }
3901 }
3902
3903 static void set_cr4_guest_host_mask(struct vcpu_vmx *vmx)
3904 {
3905         vmx->vcpu.arch.cr4_guest_owned_bits = KVM_CR4_GUEST_OWNED_BITS;
3906         if (enable_ept)
3907                 vmx->vcpu.arch.cr4_guest_owned_bits |= X86_CR4_PGE;
3908         if (is_guest_mode(&vmx->vcpu))
3909                 vmx->vcpu.arch.cr4_guest_owned_bits &=
3910                         ~get_vmcs12(&vmx->vcpu)->cr4_guest_host_mask;
3911         vmcs_writel(CR4_GUEST_HOST_MASK, ~vmx->vcpu.arch.cr4_guest_owned_bits);
3912 }
3913
3914 static u32 vmx_exec_control(struct vcpu_vmx *vmx)
3915 {
3916         u32 exec_control = vmcs_config.cpu_based_exec_ctrl;
3917         if (!vm_need_tpr_shadow(vmx->vcpu.kvm)) {
3918                 exec_control &= ~CPU_BASED_TPR_SHADOW;
3919 #ifdef CONFIG_X86_64
3920                 exec_control |= CPU_BASED_CR8_STORE_EXITING |
3921                                 CPU_BASED_CR8_LOAD_EXITING;
3922 #endif
3923         }
3924         if (!enable_ept)
3925                 exec_control |= CPU_BASED_CR3_STORE_EXITING |
3926                                 CPU_BASED_CR3_LOAD_EXITING  |
3927                                 CPU_BASED_INVLPG_EXITING;
3928         return exec_control;
3929 }
3930
3931 static u32 vmx_secondary_exec_control(struct vcpu_vmx *vmx)
3932 {
3933         u32 exec_control = vmcs_config.cpu_based_2nd_exec_ctrl;
3934         if (!vm_need_virtualize_apic_accesses(vmx->vcpu.kvm))
3935                 exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
3936         if (vmx->vpid == 0)
3937                 exec_control &= ~SECONDARY_EXEC_ENABLE_VPID;
3938         if (!enable_ept) {
3939                 exec_control &= ~SECONDARY_EXEC_ENABLE_EPT;
3940                 enable_unrestricted_guest = 0;
3941                 /* Enable INVPCID for non-ept guests may cause performance regression. */
3942                 exec_control &= ~SECONDARY_EXEC_ENABLE_INVPCID;
3943         }
3944         if (!enable_unrestricted_guest)
3945                 exec_control &= ~SECONDARY_EXEC_UNRESTRICTED_GUEST;
3946         if (!ple_gap)
3947                 exec_control &= ~SECONDARY_EXEC_PAUSE_LOOP_EXITING;
3948         if (!enable_apicv_reg || !irqchip_in_kernel(vmx->vcpu.kvm))
3949                 exec_control &= ~SECONDARY_EXEC_APIC_REGISTER_VIRT;
3950         exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE;
3951         return exec_control;
3952 }
3953
3954 static void ept_set_mmio_spte_mask(void)
3955 {
3956         /*
3957          * EPT Misconfigurations can be generated if the value of bits 2:0
3958          * of an EPT paging-structure entry is 110b (write/execute).
3959          * Also, magic bits (0xffull << 49) is set to quickly identify mmio
3960          * spte.
3961          */
3962         kvm_mmu_set_mmio_spte_mask(0xffull << 49 | 0x6ull);
3963 }
3964
3965 /*
3966  * Sets up the vmcs for emulated real mode.
3967  */
3968 static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
3969 {
3970 #ifdef CONFIG_X86_64
3971         unsigned long a;
3972 #endif
3973         int i;
3974
3975         /* I/O */
3976         vmcs_write64(IO_BITMAP_A, __pa(vmx_io_bitmap_a));
3977         vmcs_write64(IO_BITMAP_B, __pa(vmx_io_bitmap_b));
3978
3979         if (cpu_has_vmx_msr_bitmap())
3980                 vmcs_write64(MSR_BITMAP, __pa(vmx_msr_bitmap_legacy));
3981
3982         vmcs_write64(VMCS_LINK_POINTER, -1ull); /* 22.3.1.5 */
3983
3984         /* Control */
3985         vmcs_write32(PIN_BASED_VM_EXEC_CONTROL,
3986                 vmcs_config.pin_based_exec_ctrl);
3987
3988         vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, vmx_exec_control(vmx));
3989
3990         if (cpu_has_secondary_exec_ctrls()) {
3991                 vmcs_write32(SECONDARY_VM_EXEC_CONTROL,
3992                                 vmx_secondary_exec_control(vmx));
3993         }
3994
3995         if (ple_gap) {
3996                 vmcs_write32(PLE_GAP, ple_gap);
3997                 vmcs_write32(PLE_WINDOW, ple_window);
3998         }
3999
4000         vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, 0);
4001         vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, 0);
4002         vmcs_write32(CR3_TARGET_COUNT, 0);           /* 22.2.1 */
4003
4004         vmcs_write16(HOST_FS_SELECTOR, 0);            /* 22.2.4 */
4005         vmcs_write16(HOST_GS_SELECTOR, 0);            /* 22.2.4 */
4006         vmx_set_constant_host_state();
4007 #ifdef CONFIG_X86_64
4008         rdmsrl(MSR_FS_BASE, a);
4009         vmcs_writel(HOST_FS_BASE, a); /* 22.2.4 */
4010         rdmsrl(MSR_GS_BASE, a);
4011         vmcs_writel(HOST_GS_BASE, a); /* 22.2.4 */
4012 #else
4013         vmcs_writel(HOST_FS_BASE, 0); /* 22.2.4 */
4014         vmcs_writel(HOST_GS_BASE, 0); /* 22.2.4 */
4015 #endif
4016
4017         vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
4018         vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
4019         vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host));
4020         vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0);
4021         vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest));
4022
4023         if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) {
4024                 u32 msr_low, msr_high;
4025                 u64 host_pat;
4026                 rdmsr(MSR_IA32_CR_PAT, msr_low, msr_high);
4027                 host_pat = msr_low | ((u64) msr_high << 32);
4028                 /* Write the default value follow host pat */
4029                 vmcs_write64(GUEST_IA32_PAT, host_pat);
4030                 /* Keep arch.pat sync with GUEST_IA32_PAT */
4031                 vmx->vcpu.arch.pat = host_pat;
4032         }
4033
4034         for (i = 0; i < NR_VMX_MSR; ++i) {
4035                 u32 index = vmx_msr_index[i];
4036                 u32 data_low, data_high;
4037                 int j = vmx->nmsrs;
4038
4039                 if (rdmsr_safe(index, &data_low, &data_high) < 0)
4040                         continue;
4041                 if (wrmsr_safe(index, data_low, data_high) < 0)
4042                         continue;
4043                 vmx->guest_msrs[j].index = i;
4044                 vmx->guest_msrs[j].data = 0;
4045                 vmx->guest_msrs[j].mask = -1ull;
4046                 ++vmx->nmsrs;
4047         }
4048
4049         vmcs_write32(VM_EXIT_CONTROLS, vmcs_config.vmexit_ctrl);
4050
4051         /* 22.2.1, 20.8.1 */
4052         vmcs_write32(VM_ENTRY_CONTROLS, vmcs_config.vmentry_ctrl);
4053
4054         vmcs_writel(CR0_GUEST_HOST_MASK, ~0UL);
4055         set_cr4_guest_host_mask(vmx);
4056
4057         return 0;
4058 }
4059
4060 static int vmx_vcpu_reset(struct kvm_vcpu *vcpu)
4061 {
4062         struct vcpu_vmx *vmx = to_vmx(vcpu);
4063         u64 msr;
4064         int ret;
4065
4066         vmx->rmode.vm86_active = 0;
4067
4068         vmx->soft_vnmi_blocked = 0;
4069
4070         vmx->vcpu.arch.regs[VCPU_REGS_RDX] = get_rdx_init_val();
4071         kvm_set_cr8(&vmx->vcpu, 0);
4072         msr = 0xfee00000 | MSR_IA32_APICBASE_ENABLE;
4073         if (kvm_vcpu_is_bsp(&vmx->vcpu))
4074                 msr |= MSR_IA32_APICBASE_BSP;
4075         kvm_set_apic_base(&vmx->vcpu, msr);
4076
4077         vmx_segment_cache_clear(vmx);
4078
4079         seg_setup(VCPU_SREG_CS);
4080         if (kvm_vcpu_is_bsp(&vmx->vcpu))
4081                 vmcs_write16(GUEST_CS_SELECTOR, 0xf000);
4082         else {
4083                 vmcs_write16(GUEST_CS_SELECTOR, vmx->vcpu.arch.sipi_vector << 8);
4084                 vmcs_writel(GUEST_CS_BASE, vmx->vcpu.arch.sipi_vector << 12);
4085         }
4086
4087         seg_setup(VCPU_SREG_DS);
4088         seg_setup(VCPU_SREG_ES);
4089         seg_setup(VCPU_SREG_FS);
4090         seg_setup(VCPU_SREG_GS);
4091         seg_setup(VCPU_SREG_SS);
4092
4093         vmcs_write16(GUEST_TR_SELECTOR, 0);
4094         vmcs_writel(GUEST_TR_BASE, 0);
4095         vmcs_write32(GUEST_TR_LIMIT, 0xffff);
4096         vmcs_write32(GUEST_TR_AR_BYTES, 0x008b);
4097
4098         vmcs_write16(GUEST_LDTR_SELECTOR, 0);
4099         vmcs_writel(GUEST_LDTR_BASE, 0);
4100         vmcs_write32(GUEST_LDTR_LIMIT, 0xffff);
4101         vmcs_write32(GUEST_LDTR_AR_BYTES, 0x00082);
4102
4103         vmcs_write32(GUEST_SYSENTER_CS, 0);
4104         vmcs_writel(GUEST_SYSENTER_ESP, 0);
4105         vmcs_writel(GUEST_SYSENTER_EIP, 0);
4106
4107         vmcs_writel(GUEST_RFLAGS, 0x02);
4108         if (kvm_vcpu_is_bsp(&vmx->vcpu))
4109                 kvm_rip_write(vcpu, 0xfff0);
4110         else
4111                 kvm_rip_write(vcpu, 0);
4112
4113         vmcs_writel(GUEST_GDTR_BASE, 0);
4114         vmcs_write32(GUEST_GDTR_LIMIT, 0xffff);
4115
4116         vmcs_writel(GUEST_IDTR_BASE, 0);
4117         vmcs_write32(GUEST_IDTR_LIMIT, 0xffff);
4118
4119         vmcs_write32(GUEST_ACTIVITY_STATE, GUEST_ACTIVITY_ACTIVE);
4120         vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, 0);
4121         vmcs_write32(GUEST_PENDING_DBG_EXCEPTIONS, 0);
4122
4123         /* Special registers */
4124         vmcs_write64(GUEST_IA32_DEBUGCTL, 0);
4125
4126         setup_msrs(vmx);
4127
4128         vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0);  /* 22.2.1 */
4129
4130         if (cpu_has_vmx_tpr_shadow()) {
4131                 vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, 0);
4132                 if (vm_need_tpr_shadow(vmx->vcpu.kvm))
4133                         vmcs_write64(VIRTUAL_APIC_PAGE_ADDR,
4134                                      __pa(vmx->vcpu.arch.apic->regs));
4135                 vmcs_write32(TPR_THRESHOLD, 0);
4136         }
4137
4138         if (vm_need_virtualize_apic_accesses(vmx->vcpu.kvm))
4139                 vmcs_write64(APIC_ACCESS_ADDR,
4140                              page_to_phys(vmx->vcpu.kvm->arch.apic_access_page));
4141
4142         if (vmx->vpid != 0)
4143                 vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid);
4144
4145         vmx->vcpu.arch.cr0 = X86_CR0_NW | X86_CR0_CD | X86_CR0_ET;
4146         vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
4147         vmx_set_cr0(&vmx->vcpu, kvm_read_cr0(vcpu)); /* enter rmode */
4148         srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
4149         vmx_set_cr4(&vmx->vcpu, 0);
4150         vmx_set_efer(&vmx->vcpu, 0);
4151         vmx_fpu_activate(&vmx->vcpu);
4152         update_exception_bitmap(&vmx->vcpu);
4153
4154         vpid_sync_context(vmx);
4155
4156         ret = 0;
4157
4158         return ret;
4159 }
4160
4161 /*
4162  * In nested virtualization, check if L1 asked to exit on external interrupts.
4163  * For most existing hypervisors, this will always return true.
4164  */
4165 static bool nested_exit_on_intr(struct kvm_vcpu *vcpu)
4166 {
4167         return get_vmcs12(vcpu)->pin_based_vm_exec_control &
4168                 PIN_BASED_EXT_INTR_MASK;
4169 }
4170
4171 static void enable_irq_window(struct kvm_vcpu *vcpu)
4172 {
4173         u32 cpu_based_vm_exec_control;
4174         if (is_guest_mode(vcpu) && nested_exit_on_intr(vcpu)) {
4175                 /*
4176                  * We get here if vmx_interrupt_allowed() said we can't
4177                  * inject to L1 now because L2 must run. Ask L2 to exit
4178                  * right after entry, so we can inject to L1 more promptly.
4179                  */
4180                 kvm_make_request(KVM_REQ_IMMEDIATE_EXIT, vcpu);
4181                 return;
4182         }
4183
4184         cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
4185         cpu_based_vm_exec_control |= CPU_BASED_VIRTUAL_INTR_PENDING;
4186         vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control);
4187 }
4188
4189 static void enable_nmi_window(struct kvm_vcpu *vcpu)
4190 {
4191         u32 cpu_based_vm_exec_control;
4192
4193         if (!cpu_has_virtual_nmis()) {
4194                 enable_irq_window(vcpu);
4195                 return;
4196         }
4197
4198         if (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & GUEST_INTR_STATE_STI) {
4199                 enable_irq_window(vcpu);
4200                 return;
4201         }
4202         cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
4203         cpu_based_vm_exec_control |= CPU_BASED_VIRTUAL_NMI_PENDING;
4204         vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control);
4205 }
4206
4207 static void vmx_inject_irq(struct kvm_vcpu *vcpu)
4208 {
4209         struct vcpu_vmx *vmx = to_vmx(vcpu);
4210         uint32_t intr;
4211         int irq = vcpu->arch.interrupt.nr;
4212
4213         trace_kvm_inj_virq(irq);
4214
4215         ++vcpu->stat.irq_injections;
4216         if (vmx->rmode.vm86_active) {
4217                 int inc_eip = 0;
4218                 if (vcpu->arch.interrupt.soft)
4219                         inc_eip = vcpu->arch.event_exit_inst_len;
4220                 if (kvm_inject_realmode_interrupt(vcpu, irq, inc_eip) != EMULATE_DONE)
4221                         kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
4222                 return;
4223         }
4224         intr = irq | INTR_INFO_VALID_MASK;
4225         if (vcpu->arch.interrupt.soft) {
4226                 intr |= INTR_TYPE_SOFT_INTR;
4227                 vmcs_write32(VM_ENTRY_INSTRUCTION_LEN,
4228                              vmx->vcpu.arch.event_exit_inst_len);
4229         } else
4230                 intr |= INTR_TYPE_EXT_INTR;
4231         vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, intr);
4232 }
4233
4234 static void vmx_inject_nmi(struct kvm_vcpu *vcpu)
4235 {
4236         struct vcpu_vmx *vmx = to_vmx(vcpu);
4237
4238         if (is_guest_mode(vcpu))
4239                 return;
4240
4241         if (!cpu_has_virtual_nmis()) {
4242                 /*
4243                  * Tracking the NMI-blocked state in software is built upon
4244                  * finding the next open IRQ window. This, in turn, depends on
4245                  * well-behaving guests: They have to keep IRQs disabled at
4246                  * least as long as the NMI handler runs. Otherwise we may
4247                  * cause NMI nesting, maybe breaking the guest. But as this is
4248                  * highly unlikely, we can live with the residual risk.
4249                  */
4250                 vmx->soft_vnmi_blocked = 1;
4251                 vmx->vnmi_blocked_time = 0;
4252         }
4253
4254         ++vcpu->stat.nmi_injections;
4255         vmx->nmi_known_unmasked = false;
4256         if (vmx->rmode.vm86_active) {
4257                 if (kvm_inject_realmode_interrupt(vcpu, NMI_VECTOR, 0) != EMULATE_DONE)
4258                         kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
4259                 return;
4260         }
4261         vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
4262                         INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK | NMI_VECTOR);
4263 }
4264
4265 static int vmx_nmi_allowed(struct kvm_vcpu *vcpu)
4266 {
4267         if (!cpu_has_virtual_nmis() && to_vmx(vcpu)->soft_vnmi_blocked)
4268                 return 0;
4269
4270         return  !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) &
4271                   (GUEST_INTR_STATE_MOV_SS | GUEST_INTR_STATE_STI
4272                    | GUEST_INTR_STATE_NMI));
4273 }
4274
4275 static bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu)
4276 {
4277         if (!cpu_has_virtual_nmis())
4278                 return to_vmx(vcpu)->soft_vnmi_blocked;
4279         if (to_vmx(vcpu)->nmi_known_unmasked)
4280                 return false;
4281         return vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & GUEST_INTR_STATE_NMI;
4282 }
4283
4284 static void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked)
4285 {
4286         struct vcpu_vmx *vmx = to_vmx(vcpu);
4287
4288         if (!cpu_has_virtual_nmis()) {
4289                 if (vmx->soft_vnmi_blocked != masked) {
4290                         vmx->soft_vnmi_blocked = masked;
4291                         vmx->vnmi_blocked_time = 0;
4292                 }
4293         } else {
4294                 vmx->nmi_known_unmasked = !masked;
4295                 if (masked)
4296                         vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO,
4297                                       GUEST_INTR_STATE_NMI);
4298                 else
4299                         vmcs_clear_bits(GUEST_INTERRUPTIBILITY_INFO,
4300                                         GUEST_INTR_STATE_NMI);
4301         }
4302 }
4303
4304 static int vmx_interrupt_allowed(struct kvm_vcpu *vcpu)
4305 {
4306         if (is_guest_mode(vcpu) && nested_exit_on_intr(vcpu)) {
4307                 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
4308                 if (to_vmx(vcpu)->nested.nested_run_pending ||
4309                     (vmcs12->idt_vectoring_info_field &
4310                      VECTORING_INFO_VALID_MASK))
4311                         return 0;
4312                 nested_vmx_vmexit(vcpu);
4313                 vmcs12->vm_exit_reason = EXIT_REASON_EXTERNAL_INTERRUPT;
4314                 vmcs12->vm_exit_intr_info = 0;
4315                 /* fall through to normal code, but now in L1, not L2 */
4316         }
4317
4318         return (vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) &&
4319                 !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) &
4320                         (GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS));
4321 }
4322
4323 static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr)
4324 {
4325         int ret;
4326         struct kvm_userspace_memory_region tss_mem = {
4327                 .slot = TSS_PRIVATE_MEMSLOT,
4328                 .guest_phys_addr = addr,
4329                 .memory_size = PAGE_SIZE * 3,
4330                 .flags = 0,
4331         };
4332
4333         ret = kvm_set_memory_region(kvm, &tss_mem, false);
4334         if (ret)
4335                 return ret;
4336         kvm->arch.tss_addr = addr;
4337         if (!init_rmode_tss(kvm))
4338                 return  -ENOMEM;
4339
4340         return 0;
4341 }
4342
4343 static bool rmode_exception(struct kvm_vcpu *vcpu, int vec)
4344 {
4345         switch (vec) {
4346         case BP_VECTOR:
4347                 /*
4348                  * Update instruction length as we may reinject the exception
4349                  * from user space while in guest debugging mode.
4350                  */
4351                 to_vmx(vcpu)->vcpu.arch.event_exit_inst_len =
4352                         vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
4353                 if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
4354                         return false;
4355                 /* fall through */
4356         case DB_VECTOR:
4357                 if (vcpu->guest_debug &
4358                         (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))
4359                         return false;
4360                 /* fall through */
4361         case DE_VECTOR:
4362         case OF_VECTOR:
4363         case BR_VECTOR:
4364         case UD_VECTOR:
4365         case DF_VECTOR:
4366         case SS_VECTOR:
4367         case GP_VECTOR:
4368         case MF_VECTOR:
4369                 return true;
4370         break;
4371         }
4372         return false;
4373 }
4374
4375 static int handle_rmode_exception(struct kvm_vcpu *vcpu,
4376                                   int vec, u32 err_code)
4377 {
4378         /*
4379          * Instruction with address size override prefix opcode 0x67
4380          * Cause the #SS fault with 0 error code in VM86 mode.
4381          */
4382         if (((vec == GP_VECTOR) || (vec == SS_VECTOR)) && err_code == 0) {
4383                 if (emulate_instruction(vcpu, 0) == EMULATE_DONE) {
4384                         if (vcpu->arch.halt_request) {
4385                                 vcpu->arch.halt_request = 0;
4386                                 return kvm_emulate_halt(vcpu);
4387                         }
4388                         return 1;
4389                 }
4390                 return 0;
4391         }
4392
4393         /*
4394          * Forward all other exceptions that are valid in real mode.
4395          * FIXME: Breaks guest debugging in real mode, needs to be fixed with
4396          *        the required debugging infrastructure rework.
4397          */
4398         kvm_queue_exception(vcpu, vec);
4399         return 1;
4400 }
4401
4402 /*
4403  * Trigger machine check on the host. We assume all the MSRs are already set up
4404  * by the CPU and that we still run on the same CPU as the MCE occurred on.
4405  * We pass a fake environment to the machine check handler because we want
4406  * the guest to be always treated like user space, no matter what context
4407  * it used internally.
4408  */
4409 static void kvm_machine_check(void)
4410 {
4411 #if defined(CONFIG_X86_MCE) && defined(CONFIG_X86_64)
4412         struct pt_regs regs = {
4413                 .cs = 3, /* Fake ring 3 no matter what the guest ran on */
4414                 .flags = X86_EFLAGS_IF,
4415         };
4416
4417         do_machine_check(&regs, 0);
4418 #endif
4419 }
4420
4421 static int handle_machine_check(struct kvm_vcpu *vcpu)
4422 {
4423         /* already handled by vcpu_run */
4424         return 1;
4425 }
4426
4427 static int handle_exception(struct kvm_vcpu *vcpu)
4428 {
4429         struct vcpu_vmx *vmx = to_vmx(vcpu);
4430         struct kvm_run *kvm_run = vcpu->run;
4431         u32 intr_info, ex_no, error_code;
4432         unsigned long cr2, rip, dr6;
4433         u32 vect_info;
4434         enum emulation_result er;
4435
4436         vect_info = vmx->idt_vectoring_info;
4437         intr_info = vmx->exit_intr_info;
4438
4439         if (is_machine_check(intr_info))
4440                 return handle_machine_check(vcpu);
4441
4442         if ((intr_info & INTR_INFO_INTR_TYPE_MASK) == INTR_TYPE_NMI_INTR)
4443                 return 1;  /* already handled by vmx_vcpu_run() */
4444
4445         if (is_no_device(intr_info)) {
4446                 vmx_fpu_activate(vcpu);
4447                 return 1;
4448         }
4449
4450         if (is_invalid_opcode(intr_info)) {
4451                 er = emulate_instruction(vcpu, EMULTYPE_TRAP_UD);
4452                 if (er != EMULATE_DONE)
4453                         kvm_queue_exception(vcpu, UD_VECTOR);
4454                 return 1;
4455         }
4456
4457         error_code = 0;
4458         if (intr_info & INTR_INFO_DELIVER_CODE_MASK)
4459                 error_code = vmcs_read32(VM_EXIT_INTR_ERROR_CODE);
4460
4461         /*
4462          * The #PF with PFEC.RSVD = 1 indicates the guest is accessing
4463          * MMIO, it is better to report an internal error.
4464          * See the comments in vmx_handle_exit.
4465          */
4466         if ((vect_info & VECTORING_INFO_VALID_MASK) &&
4467             !(is_page_fault(intr_info) && !(error_code & PFERR_RSVD_MASK))) {
4468                 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
4469                 vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_SIMUL_EX;
4470                 vcpu->run->internal.ndata = 2;
4471                 vcpu->run->internal.data[0] = vect_info;
4472                 vcpu->run->internal.data[1] = intr_info;
4473                 return 0;
4474         }
4475
4476         if (is_page_fault(intr_info)) {
4477                 /* EPT won't cause page fault directly */
4478                 BUG_ON(enable_ept);
4479                 cr2 = vmcs_readl(EXIT_QUALIFICATION);
4480                 trace_kvm_page_fault(cr2, error_code);
4481
4482                 if (kvm_event_needs_reinjection(vcpu))
4483                         kvm_mmu_unprotect_page_virt(vcpu, cr2);
4484                 return kvm_mmu_page_fault(vcpu, cr2, error_code, NULL, 0);
4485         }
4486
4487         ex_no = intr_info & INTR_INFO_VECTOR_MASK;
4488
4489         if (vmx->rmode.vm86_active && rmode_exception(vcpu, ex_no))
4490                 return handle_rmode_exception(vcpu, ex_no, error_code);
4491
4492         switch (ex_no) {
4493         case DB_VECTOR:
4494                 dr6 = vmcs_readl(EXIT_QUALIFICATION);
4495                 if (!(vcpu->guest_debug &
4496                       (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))) {
4497                         vcpu->arch.dr6 = dr6 | DR6_FIXED_1;
4498                         kvm_queue_exception(vcpu, DB_VECTOR);
4499                         return 1;
4500                 }
4501                 kvm_run->debug.arch.dr6 = dr6 | DR6_FIXED_1;
4502                 kvm_run->debug.arch.dr7 = vmcs_readl(GUEST_DR7);
4503                 /* fall through */
4504         case BP_VECTOR:
4505                 /*
4506                  * Update instruction length as we may reinject #BP from
4507                  * user space while in guest debugging mode. Reading it for
4508                  * #DB as well causes no harm, it is not used in that case.
4509                  */
4510                 vmx->vcpu.arch.event_exit_inst_len =
4511                         vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
4512                 kvm_run->exit_reason = KVM_EXIT_DEBUG;
4513                 rip = kvm_rip_read(vcpu);
4514                 kvm_run->debug.arch.pc = vmcs_readl(GUEST_CS_BASE) + rip;
4515                 kvm_run->debug.arch.exception = ex_no;
4516                 break;
4517         default:
4518                 kvm_run->exit_reason = KVM_EXIT_EXCEPTION;
4519                 kvm_run->ex.exception = ex_no;
4520                 kvm_run->ex.error_code = error_code;
4521                 break;
4522         }
4523         return 0;
4524 }
4525
4526 static int handle_external_interrupt(struct kvm_vcpu *vcpu)
4527 {
4528         ++vcpu->stat.irq_exits;
4529         return 1;
4530 }
4531
4532 static int handle_triple_fault(struct kvm_vcpu *vcpu)
4533 {
4534         vcpu->run->exit_reason = KVM_EXIT_SHUTDOWN;
4535         return 0;
4536 }
4537
4538 static int handle_io(struct kvm_vcpu *vcpu)
4539 {
4540         unsigned long exit_qualification;
4541         int size, in, string;
4542         unsigned port;
4543
4544         exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
4545         string = (exit_qualification & 16) != 0;
4546         in = (exit_qualification & 8) != 0;
4547
4548         ++vcpu->stat.io_exits;
4549
4550         if (string || in)
4551                 return emulate_instruction(vcpu, 0) == EMULATE_DONE;
4552
4553         port = exit_qualification >> 16;
4554         size = (exit_qualification & 7) + 1;
4555         skip_emulated_instruction(vcpu);
4556
4557         return kvm_fast_pio_out(vcpu, size, port);
4558 }
4559
4560 static void
4561 vmx_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall)
4562 {
4563         /*
4564          * Patch in the VMCALL instruction:
4565          */
4566         hypercall[0] = 0x0f;
4567         hypercall[1] = 0x01;
4568         hypercall[2] = 0xc1;
4569 }
4570
4571 /* called to set cr0 as appropriate for a mov-to-cr0 exit. */
4572 static int handle_set_cr0(struct kvm_vcpu *vcpu, unsigned long val)
4573 {
4574         if (to_vmx(vcpu)->nested.vmxon &&
4575             ((val & VMXON_CR0_ALWAYSON) != VMXON_CR0_ALWAYSON))
4576                 return 1;
4577
4578         if (is_guest_mode(vcpu)) {
4579                 /*
4580                  * We get here when L2 changed cr0 in a way that did not change
4581                  * any of L1's shadowed bits (see nested_vmx_exit_handled_cr),
4582                  * but did change L0 shadowed bits. This can currently happen
4583                  * with the TS bit: L0 may want to leave TS on (for lazy fpu
4584                  * loading) while pretending to allow the guest to change it.
4585                  */
4586                 if (kvm_set_cr0(vcpu, (val & vcpu->arch.cr0_guest_owned_bits) |
4587                          (vcpu->arch.cr0 & ~vcpu->arch.cr0_guest_owned_bits)))
4588                         return 1;
4589                 vmcs_writel(CR0_READ_SHADOW, val);
4590                 return 0;
4591         } else
4592                 return kvm_set_cr0(vcpu, val);
4593 }
4594
4595 static int handle_set_cr4(struct kvm_vcpu *vcpu, unsigned long val)
4596 {
4597         if (is_guest_mode(vcpu)) {
4598                 if (kvm_set_cr4(vcpu, (val & vcpu->arch.cr4_guest_owned_bits) |
4599                          (vcpu->arch.cr4 & ~vcpu->arch.cr4_guest_owned_bits)))
4600                         return 1;
4601                 vmcs_writel(CR4_READ_SHADOW, val);
4602                 return 0;
4603         } else
4604                 return kvm_set_cr4(vcpu, val);
4605 }
4606
4607 /* called to set cr0 as approriate for clts instruction exit. */
4608 static void handle_clts(struct kvm_vcpu *vcpu)
4609 {
4610         if (is_guest_mode(vcpu)) {
4611                 /*
4612                  * We get here when L2 did CLTS, and L1 didn't shadow CR0.TS
4613                  * but we did (!fpu_active). We need to keep GUEST_CR0.TS on,
4614                  * just pretend it's off (also in arch.cr0 for fpu_activate).
4615                  */
4616                 vmcs_writel(CR0_READ_SHADOW,
4617                         vmcs_readl(CR0_READ_SHADOW) & ~X86_CR0_TS);
4618                 vcpu->arch.cr0 &= ~X86_CR0_TS;
4619         } else
4620                 vmx_set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~X86_CR0_TS));
4621 }
4622
4623 static int handle_cr(struct kvm_vcpu *vcpu)
4624 {
4625         unsigned long exit_qualification, val;
4626         int cr;
4627         int reg;
4628         int err;
4629
4630         exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
4631         cr = exit_qualification & 15;
4632         reg = (exit_qualification >> 8) & 15;
4633         switch ((exit_qualification >> 4) & 3) {
4634         case 0: /* mov to cr */
4635                 val = kvm_register_read(vcpu, reg);
4636                 trace_kvm_cr_write(cr, val);
4637                 switch (cr) {
4638                 case 0:
4639                         err = handle_set_cr0(vcpu, val);
4640                         kvm_complete_insn_gp(vcpu, err);
4641                         return 1;
4642                 case 3:
4643                         err = kvm_set_cr3(vcpu, val);
4644                         kvm_complete_insn_gp(vcpu, err);
4645                         return 1;
4646                 case 4:
4647                         err = handle_set_cr4(vcpu, val);
4648                         kvm_complete_insn_gp(vcpu, err);
4649                         return 1;
4650                 case 8: {
4651                                 u8 cr8_prev = kvm_get_cr8(vcpu);
4652                                 u8 cr8 = kvm_register_read(vcpu, reg);
4653                                 err = kvm_set_cr8(vcpu, cr8);
4654                                 kvm_complete_insn_gp(vcpu, err);
4655                                 if (irqchip_in_kernel(vcpu->kvm))
4656                                         return 1;
4657                                 if (cr8_prev <= cr8)
4658                                         return 1;
4659                                 vcpu->run->exit_reason = KVM_EXIT_SET_TPR;
4660                                 return 0;
4661                         }
4662                 }
4663                 break;
4664         case 2: /* clts */
4665                 handle_clts(vcpu);
4666                 trace_kvm_cr_write(0, kvm_read_cr0(vcpu));
4667                 skip_emulated_instruction(vcpu);
4668                 vmx_fpu_activate(vcpu);
4669                 return 1;
4670         case 1: /*mov from cr*/
4671                 switch (cr) {
4672                 case 3:
4673                         val = kvm_read_cr3(vcpu);
4674                         kvm_register_write(vcpu, reg, val);
4675                         trace_kvm_cr_read(cr, val);
4676                         skip_emulated_instruction(vcpu);
4677                         return 1;
4678                 case 8:
4679                         val = kvm_get_cr8(vcpu);
4680                         kvm_register_write(vcpu, reg, val);
4681                         trace_kvm_cr_read(cr, val);
4682                         skip_emulated_instruction(vcpu);
4683                         return 1;
4684                 }
4685                 break;
4686         case 3: /* lmsw */
4687                 val = (exit_qualification >> LMSW_SOURCE_DATA_SHIFT) & 0x0f;
4688                 trace_kvm_cr_write(0, (kvm_read_cr0(vcpu) & ~0xful) | val);
4689                 kvm_lmsw(vcpu, val);
4690
4691                 skip_emulated_instruction(vcpu);
4692                 return 1;
4693         default:
4694                 break;
4695         }
4696         vcpu->run->exit_reason = 0;
4697         vcpu_unimpl(vcpu, "unhandled control register: op %d cr %d\n",
4698                (int)(exit_qualification >> 4) & 3, cr);
4699         return 0;
4700 }
4701
4702 static int handle_dr(struct kvm_vcpu *vcpu)
4703 {
4704         unsigned long exit_qualification;
4705         int dr, reg;
4706
4707         /* Do not handle if the CPL > 0, will trigger GP on re-entry */
4708         if (!kvm_require_cpl(vcpu, 0))
4709                 return 1;
4710         dr = vmcs_readl(GUEST_DR7);
4711         if (dr & DR7_GD) {
4712                 /*
4713                  * As the vm-exit takes precedence over the debug trap, we
4714                  * need to emulate the latter, either for the host or the
4715                  * guest debugging itself.
4716                  */
4717                 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) {
4718                         vcpu->run->debug.arch.dr6 = vcpu->arch.dr6;
4719                         vcpu->run->debug.arch.dr7 = dr;
4720                         vcpu->run->debug.arch.pc =
4721                                 vmcs_readl(GUEST_CS_BASE) +
4722                                 vmcs_readl(GUEST_RIP);
4723                         vcpu->run->debug.arch.exception = DB_VECTOR;
4724                         vcpu->run->exit_reason = KVM_EXIT_DEBUG;
4725                         return 0;
4726                 } else {
4727                         vcpu->arch.dr7 &= ~DR7_GD;
4728                         vcpu->arch.dr6 |= DR6_BD;
4729                         vmcs_writel(GUEST_DR7, vcpu->arch.dr7);
4730                         kvm_queue_exception(vcpu, DB_VECTOR);
4731                         return 1;
4732                 }
4733         }
4734
4735         exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
4736         dr = exit_qualification & DEBUG_REG_ACCESS_NUM;
4737         reg = DEBUG_REG_ACCESS_REG(exit_qualification);
4738         if (exit_qualification & TYPE_MOV_FROM_DR) {
4739                 unsigned long val;
4740                 if (!kvm_get_dr(vcpu, dr, &val))
4741                         kvm_register_write(vcpu, reg, val);
4742         } else
4743                 kvm_set_dr(vcpu, dr, vcpu->arch.regs[reg]);
4744         skip_emulated_instruction(vcpu);
4745         return 1;
4746 }
4747
4748 static void vmx_set_dr7(struct kvm_vcpu *vcpu, unsigned long val)
4749 {
4750         vmcs_writel(GUEST_DR7, val);
4751 }
4752
4753 static int handle_cpuid(struct kvm_vcpu *vcpu)
4754 {
4755         kvm_emulate_cpuid(vcpu);
4756         return 1;
4757 }
4758
4759 static int handle_rdmsr(struct kvm_vcpu *vcpu)
4760 {
4761         u32 ecx = vcpu->arch.regs[VCPU_REGS_RCX];
4762         u64 data;
4763
4764         if (vmx_get_msr(vcpu, ecx, &data)) {
4765                 trace_kvm_msr_read_ex(ecx);
4766                 kvm_inject_gp(vcpu, 0);
4767                 return 1;
4768         }
4769
4770         trace_kvm_msr_read(ecx, data);
4771
4772         /* FIXME: handling of bits 32:63 of rax, rdx */
4773         vcpu->arch.regs[VCPU_REGS_RAX] = data & -1u;
4774         vcpu->arch.regs[VCPU_REGS_RDX] = (data >> 32) & -1u;
4775         skip_emulated_instruction(vcpu);
4776         return 1;
4777 }
4778
4779 static int handle_wrmsr(struct kvm_vcpu *vcpu)
4780 {
4781         struct msr_data msr;
4782         u32 ecx = vcpu->arch.regs[VCPU_REGS_RCX];
4783         u64 data = (vcpu->arch.regs[VCPU_REGS_RAX] & -1u)
4784                 | ((u64)(vcpu->arch.regs[VCPU_REGS_RDX] & -1u) << 32);
4785
4786         msr.data = data;
4787         msr.index = ecx;
4788         msr.host_initiated = false;
4789         if (vmx_set_msr(vcpu, &msr) != 0) {
4790                 trace_kvm_msr_write_ex(ecx, data);
4791                 kvm_inject_gp(vcpu, 0);
4792                 return 1;
4793         }
4794
4795         trace_kvm_msr_write(ecx, data);
4796         skip_emulated_instruction(vcpu);
4797         return 1;
4798 }
4799
4800 static int handle_tpr_below_threshold(struct kvm_vcpu *vcpu)
4801 {
4802         kvm_make_request(KVM_REQ_EVENT, vcpu);
4803         return 1;
4804 }
4805
4806 static int handle_interrupt_window(struct kvm_vcpu *vcpu)
4807 {
4808         u32 cpu_based_vm_exec_control;
4809
4810         /* clear pending irq */
4811         cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
4812         cpu_based_vm_exec_control &= ~CPU_BASED_VIRTUAL_INTR_PENDING;
4813         vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control);
4814
4815         kvm_make_request(KVM_REQ_EVENT, vcpu);
4816
4817         ++vcpu->stat.irq_window_exits;
4818
4819         /*
4820          * If the user space waits to inject interrupts, exit as soon as
4821          * possible
4822          */
4823         if (!irqchip_in_kernel(vcpu->kvm) &&
4824             vcpu->run->request_interrupt_window &&
4825             !kvm_cpu_has_interrupt(vcpu)) {
4826                 vcpu->run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
4827                 return 0;
4828         }
4829         return 1;
4830 }
4831
4832 static int handle_halt(struct kvm_vcpu *vcpu)
4833 {
4834         skip_emulated_instruction(vcpu);
4835         return kvm_emulate_halt(vcpu);
4836 }
4837
4838 static int handle_vmcall(struct kvm_vcpu *vcpu)
4839 {
4840         skip_emulated_instruction(vcpu);
4841         kvm_emulate_hypercall(vcpu);
4842         return 1;
4843 }
4844
4845 static int handle_invd(struct kvm_vcpu *vcpu)
4846 {
4847         return emulate_instruction(vcpu, 0) == EMULATE_DONE;
4848 }
4849
4850 static int handle_invlpg(struct kvm_vcpu *vcpu)
4851 {
4852         unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
4853
4854         kvm_mmu_invlpg(vcpu, exit_qualification);
4855         skip_emulated_instruction(vcpu);
4856         return 1;
4857 }
4858
4859 static int handle_rdpmc(struct kvm_vcpu *vcpu)
4860 {
4861         int err;
4862
4863         err = kvm_rdpmc(vcpu);
4864         kvm_complete_insn_gp(vcpu, err);
4865
4866         return 1;
4867 }
4868
4869 static int handle_wbinvd(struct kvm_vcpu *vcpu)
4870 {
4871         skip_emulated_instruction(vcpu);
4872         kvm_emulate_wbinvd(vcpu);
4873         return 1;
4874 }
4875
4876 static int handle_xsetbv(struct kvm_vcpu *vcpu)
4877 {
4878         u64 new_bv = kvm_read_edx_eax(vcpu);
4879         u32 index = kvm_register_read(vcpu, VCPU_REGS_RCX);
4880
4881         if (kvm_set_xcr(vcpu, index, new_bv) == 0)
4882                 skip_emulated_instruction(vcpu);
4883         return 1;
4884 }
4885
4886 static int handle_apic_access(struct kvm_vcpu *vcpu)
4887 {
4888         if (likely(fasteoi)) {
4889                 unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
4890                 int access_type, offset;
4891
4892                 access_type = exit_qualification & APIC_ACCESS_TYPE;
4893                 offset = exit_qualification & APIC_ACCESS_OFFSET;
4894                 /*
4895                  * Sane guest uses MOV to write EOI, with written value
4896                  * not cared. So make a short-circuit here by avoiding
4897                  * heavy instruction emulation.
4898                  */
4899                 if ((access_type == TYPE_LINEAR_APIC_INST_WRITE) &&
4900                     (offset == APIC_EOI)) {
4901                         kvm_lapic_set_eoi(vcpu);
4902                         skip_emulated_instruction(vcpu);
4903                         return 1;
4904                 }
4905         }
4906         return emulate_instruction(vcpu, 0) == EMULATE_DONE;
4907 }
4908
4909 static int handle_apic_write(struct kvm_vcpu *vcpu)
4910 {
4911         unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
4912         u32 offset = exit_qualification & 0xfff;
4913
4914         /* APIC-write VM exit is trap-like and thus no need to adjust IP */
4915         kvm_apic_write_nodecode(vcpu, offset);
4916         return 1;
4917 }
4918
4919 static int handle_task_switch(struct kvm_vcpu *vcpu)
4920 {
4921         struct vcpu_vmx *vmx = to_vmx(vcpu);
4922         unsigned long exit_qualification;
4923         bool has_error_code = false;
4924         u32 error_code = 0;
4925         u16 tss_selector;
4926         int reason, type, idt_v, idt_index;
4927
4928         idt_v = (vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK);
4929         idt_index = (vmx->idt_vectoring_info & VECTORING_INFO_VECTOR_MASK);
4930         type = (vmx->idt_vectoring_info & VECTORING_INFO_TYPE_MASK);
4931
4932         exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
4933
4934         reason = (u32)exit_qualification >> 30;
4935         if (reason == TASK_SWITCH_GATE && idt_v) {
4936                 switch (type) {
4937                 case INTR_TYPE_NMI_INTR:
4938                         vcpu->arch.nmi_injected = false;
4939                         vmx_set_nmi_mask(vcpu, true);
4940                         break;
4941                 case INTR_TYPE_EXT_INTR:
4942                 case INTR_TYPE_SOFT_INTR:
4943                         kvm_clear_interrupt_queue(vcpu);
4944                         break;
4945                 case INTR_TYPE_HARD_EXCEPTION:
4946                         if (vmx->idt_vectoring_info &
4947                             VECTORING_INFO_DELIVER_CODE_MASK) {
4948                                 has_error_code = true;
4949                                 error_code =
4950                                         vmcs_read32(IDT_VECTORING_ERROR_CODE);
4951                         }
4952                         /* fall through */
4953                 case INTR_TYPE_SOFT_EXCEPTION:
4954                         kvm_clear_exception_queue(vcpu);
4955                         break;
4956                 default:
4957                         break;
4958                 }
4959         }
4960         tss_selector = exit_qualification;
4961
4962         if (!idt_v || (type != INTR_TYPE_HARD_EXCEPTION &&
4963                        type != INTR_TYPE_EXT_INTR &&
4964                        type != INTR_TYPE_NMI_INTR))
4965                 skip_emulated_instruction(vcpu);
4966
4967         if (kvm_task_switch(vcpu, tss_selector,
4968                             type == INTR_TYPE_SOFT_INTR ? idt_index : -1, reason,
4969                             has_error_code, error_code) == EMULATE_FAIL) {
4970                 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
4971                 vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
4972                 vcpu->run->internal.ndata = 0;
4973                 return 0;
4974         }
4975
4976         /* clear all local breakpoint enable flags */
4977         vmcs_writel(GUEST_DR7, vmcs_readl(GUEST_DR7) & ~55);
4978
4979         /*
4980          * TODO: What about debug traps on tss switch?
4981          *       Are we supposed to inject them and update dr6?
4982          */
4983
4984         return 1;
4985 }
4986
4987 static int handle_ept_violation(struct kvm_vcpu *vcpu)
4988 {
4989         unsigned long exit_qualification;
4990         gpa_t gpa;
4991         u32 error_code;
4992         int gla_validity;
4993
4994         exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
4995
4996         gla_validity = (exit_qualification >> 7) & 0x3;
4997         if (gla_validity != 0x3 && gla_validity != 0x1 && gla_validity != 0) {
4998                 printk(KERN_ERR "EPT: Handling EPT violation failed!\n");
4999                 printk(KERN_ERR "EPT: GPA: 0x%lx, GVA: 0x%lx\n",
5000                         (long unsigned int)vmcs_read64(GUEST_PHYSICAL_ADDRESS),
5001                         vmcs_readl(GUEST_LINEAR_ADDRESS));
5002                 printk(KERN_ERR "EPT: Exit qualification is 0x%lx\n",
5003                         (long unsigned int)exit_qualification);
5004                 vcpu->run->exit_reason = KVM_EXIT_UNKNOWN;
5005                 vcpu->run->hw.hardware_exit_reason = EXIT_REASON_EPT_VIOLATION;
5006                 return 0;
5007         }
5008
5009         gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS);
5010         trace_kvm_page_fault(gpa, exit_qualification);
5011
5012         /* It is a write fault? */
5013         error_code = exit_qualification & (1U << 1);
5014         /* ept page table is present? */
5015         error_code |= (exit_qualification >> 3) & 0x1;
5016
5017         return kvm_mmu_page_fault(vcpu, gpa, error_code, NULL, 0);
5018 }
5019
5020 static u64 ept_rsvd_mask(u64 spte, int level)
5021 {
5022         int i;
5023         u64 mask = 0;
5024
5025         for (i = 51; i > boot_cpu_data.x86_phys_bits; i--)
5026                 mask |= (1ULL << i);
5027
5028         if (level > 2)
5029                 /* bits 7:3 reserved */
5030                 mask |= 0xf8;
5031         else if (level == 2) {
5032                 if (spte & (1ULL << 7))
5033                         /* 2MB ref, bits 20:12 reserved */
5034                         mask |= 0x1ff000;
5035                 else
5036                         /* bits 6:3 reserved */
5037                         mask |= 0x78;
5038         }
5039
5040         return mask;
5041 }
5042
5043 static void ept_misconfig_inspect_spte(struct kvm_vcpu *vcpu, u64 spte,
5044                                        int level)
5045 {
5046         printk(KERN_ERR "%s: spte 0x%llx level %d\n", __func__, spte, level);
5047
5048         /* 010b (write-only) */
5049         WARN_ON((spte & 0x7) == 0x2);
5050
5051         /* 110b (write/execute) */
5052         WARN_ON((spte & 0x7) == 0x6);
5053
5054         /* 100b (execute-only) and value not supported by logical processor */
5055         if (!cpu_has_vmx_ept_execute_only())
5056                 WARN_ON((spte & 0x7) == 0x4);
5057
5058         /* not 000b */
5059         if ((spte & 0x7)) {
5060                 u64 rsvd_bits = spte & ept_rsvd_mask(spte, level);
5061
5062                 if (rsvd_bits != 0) {
5063                         printk(KERN_ERR "%s: rsvd_bits = 0x%llx\n",
5064                                          __func__, rsvd_bits);
5065                         WARN_ON(1);
5066                 }
5067
5068                 if (level == 1 || (level == 2 && (spte & (1ULL << 7)))) {
5069                         u64 ept_mem_type = (spte & 0x38) >> 3;
5070
5071                         if (ept_mem_type == 2 || ept_mem_type == 3 ||
5072                             ept_mem_type == 7) {
5073                                 printk(KERN_ERR "%s: ept_mem_type=0x%llx\n",
5074                                                 __func__, ept_mem_type);
5075                                 WARN_ON(1);
5076                         }
5077                 }
5078         }
5079 }
5080
5081 static int handle_ept_misconfig(struct kvm_vcpu *vcpu)
5082 {
5083         u64 sptes[4];
5084         int nr_sptes, i, ret;
5085         gpa_t gpa;
5086
5087         gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS);
5088
5089         ret = handle_mmio_page_fault_common(vcpu, gpa, true);
5090         if (likely(ret == 1))
5091                 return x86_emulate_instruction(vcpu, gpa, 0, NULL, 0) ==
5092                                               EMULATE_DONE;
5093         if (unlikely(!ret))
5094                 return 1;
5095
5096         /* It is the real ept misconfig */
5097         printk(KERN_ERR "EPT: Misconfiguration.\n");
5098         printk(KERN_ERR "EPT: GPA: 0x%llx\n", gpa);
5099
5100         nr_sptes = kvm_mmu_get_spte_hierarchy(vcpu, gpa, sptes);
5101
5102         for (i = PT64_ROOT_LEVEL; i > PT64_ROOT_LEVEL - nr_sptes; --i)
5103                 ept_misconfig_inspect_spte(vcpu, sptes[i-1], i);
5104
5105         vcpu->run->exit_reason = KVM_EXIT_UNKNOWN;
5106         vcpu->run->hw.hardware_exit_reason = EXIT_REASON_EPT_MISCONFIG;
5107
5108         return 0;
5109 }
5110
5111 static int handle_nmi_window(struct kvm_vcpu *vcpu)
5112 {
5113         u32 cpu_based_vm_exec_control;
5114
5115         /* clear pending NMI */
5116         cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
5117         cpu_based_vm_exec_control &= ~CPU_BASED_VIRTUAL_NMI_PENDING;
5118         vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control);
5119         ++vcpu->stat.nmi_window_exits;
5120         kvm_make_request(KVM_REQ_EVENT, vcpu);
5121
5122         return 1;
5123 }
5124
5125 static int handle_invalid_guest_state(struct kvm_vcpu *vcpu)
5126 {
5127         struct vcpu_vmx *vmx = to_vmx(vcpu);
5128         enum emulation_result err = EMULATE_DONE;
5129         int ret = 1;
5130         u32 cpu_exec_ctrl;
5131         bool intr_window_requested;
5132         unsigned count = 130;
5133
5134         cpu_exec_ctrl = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
5135         intr_window_requested = cpu_exec_ctrl & CPU_BASED_VIRTUAL_INTR_PENDING;
5136
5137         while (!guest_state_valid(vcpu) && count-- != 0) {
5138                 if (intr_window_requested && vmx_interrupt_allowed(vcpu))
5139                         return handle_interrupt_window(&vmx->vcpu);
5140
5141                 if (test_bit(KVM_REQ_EVENT, &vcpu->requests))
5142                         return 1;
5143
5144                 err = emulate_instruction(vcpu, 0);
5145
5146                 if (err == EMULATE_DO_MMIO) {
5147                         ret = 0;
5148                         goto out;
5149                 }
5150
5151                 if (err != EMULATE_DONE) {
5152                         vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
5153                         vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
5154                         vcpu->run->internal.ndata = 0;
5155                         return 0;
5156                 }
5157
5158                 if (signal_pending(current))
5159                         goto out;
5160                 if (need_resched())
5161                         schedule();
5162         }
5163
5164         vmx->emulation_required = emulation_required(vcpu);
5165 out:
5166         return ret;
5167 }
5168
5169 /*
5170  * Indicate a busy-waiting vcpu in spinlock. We do not enable the PAUSE
5171  * exiting, so only get here on cpu with PAUSE-Loop-Exiting.
5172  */
5173 static int handle_pause(struct kvm_vcpu *vcpu)
5174 {
5175         skip_emulated_instruction(vcpu);
5176         kvm_vcpu_on_spin(vcpu);
5177
5178         return 1;
5179 }
5180
5181 static int handle_invalid_op(struct kvm_vcpu *vcpu)
5182 {
5183         kvm_queue_exception(vcpu, UD_VECTOR);
5184         return 1;
5185 }
5186
5187 /*
5188  * To run an L2 guest, we need a vmcs02 based on the L1-specified vmcs12.
5189  * We could reuse a single VMCS for all the L2 guests, but we also want the
5190  * option to allocate a separate vmcs02 for each separate loaded vmcs12 - this
5191  * allows keeping them loaded on the processor, and in the future will allow
5192  * optimizations where prepare_vmcs02 doesn't need to set all the fields on
5193  * every entry if they never change.
5194  * So we keep, in vmx->nested.vmcs02_pool, a cache of size VMCS02_POOL_SIZE
5195  * (>=0) with a vmcs02 for each recently loaded vmcs12s, most recent first.
5196  *
5197  * The following functions allocate and free a vmcs02 in this pool.
5198  */
5199
5200 /* Get a VMCS from the pool to use as vmcs02 for the current vmcs12. */
5201 static struct loaded_vmcs *nested_get_current_vmcs02(struct vcpu_vmx *vmx)
5202 {
5203         struct vmcs02_list *item;
5204         list_for_each_entry(item, &vmx->nested.vmcs02_pool, list)
5205                 if (item->vmptr == vmx->nested.current_vmptr) {
5206                         list_move(&item->list, &vmx->nested.vmcs02_pool);
5207                         return &item->vmcs02;
5208                 }
5209
5210         if (vmx->nested.vmcs02_num >= max(VMCS02_POOL_SIZE, 1)) {
5211                 /* Recycle the least recently used VMCS. */
5212                 item = list_entry(vmx->nested.vmcs02_pool.prev,
5213                         struct vmcs02_list, list);
5214                 item->vmptr = vmx->nested.current_vmptr;
5215                 list_move(&item->list, &vmx->nested.vmcs02_pool);
5216                 return &item->vmcs02;
5217         }
5218
5219         /* Create a new VMCS */
5220         item = (struct vmcs02_list *)
5221                 kmalloc(sizeof(struct vmcs02_list), GFP_KERNEL);
5222         if (!item)
5223                 return NULL;
5224         item->vmcs02.vmcs = alloc_vmcs();
5225         if (!item->vmcs02.vmcs) {
5226                 kfree(item);
5227                 return NULL;
5228         }
5229         loaded_vmcs_init(&item->vmcs02);
5230         item->vmptr = vmx->nested.current_vmptr;
5231         list_add(&(item->list), &(vmx->nested.vmcs02_pool));
5232         vmx->nested.vmcs02_num++;
5233         return &item->vmcs02;
5234 }
5235
5236 /* Free and remove from pool a vmcs02 saved for a vmcs12 (if there is one) */
5237 static void nested_free_vmcs02(struct vcpu_vmx *vmx, gpa_t vmptr)
5238 {
5239         struct vmcs02_list *item;
5240         list_for_each_entry(item, &vmx->nested.vmcs02_pool, list)
5241                 if (item->vmptr == vmptr) {
5242                         free_loaded_vmcs(&item->vmcs02);
5243                         list_del(&item->list);
5244                         kfree(item);
5245                         vmx->nested.vmcs02_num--;
5246                         return;
5247                 }
5248 }
5249
5250 /*
5251  * Free all VMCSs saved for this vcpu, except the one pointed by
5252  * vmx->loaded_vmcs. These include the VMCSs in vmcs02_pool (except the one
5253  * currently used, if running L2), and vmcs01 when running L2.
5254  */
5255 static void nested_free_all_saved_vmcss(struct vcpu_vmx *vmx)
5256 {
5257         struct vmcs02_list *item, *n;
5258         list_for_each_entry_safe(item, n, &vmx->nested.vmcs02_pool, list) {
5259                 if (vmx->loaded_vmcs != &item->vmcs02)
5260                         free_loaded_vmcs(&item->vmcs02);
5261                 list_del(&item->list);
5262                 kfree(item);
5263         }
5264         vmx->nested.vmcs02_num = 0;
5265
5266         if (vmx->loaded_vmcs != &vmx->vmcs01)
5267                 free_loaded_vmcs(&vmx->vmcs01);
5268 }
5269
5270 /*
5271  * Emulate the VMXON instruction.
5272  * Currently, we just remember that VMX is active, and do not save or even
5273  * inspect the argument to VMXON (the so-called "VMXON pointer") because we
5274  * do not currently need to store anything in that guest-allocated memory
5275  * region. Consequently, VMCLEAR and VMPTRLD also do not verify that the their
5276  * argument is different from the VMXON pointer (which the spec says they do).
5277  */
5278 static int handle_vmon(struct kvm_vcpu *vcpu)
5279 {
5280         struct kvm_segment cs;
5281         struct vcpu_vmx *vmx = to_vmx(vcpu);
5282
5283         /* The Intel VMX Instruction Reference lists a bunch of bits that
5284          * are prerequisite to running VMXON, most notably cr4.VMXE must be
5285          * set to 1 (see vmx_set_cr4() for when we allow the guest to set this).
5286          * Otherwise, we should fail with #UD. We test these now:
5287          */
5288         if (!kvm_read_cr4_bits(vcpu, X86_CR4_VMXE) ||
5289             !kvm_read_cr0_bits(vcpu, X86_CR0_PE) ||
5290             (vmx_get_rflags(vcpu) & X86_EFLAGS_VM)) {
5291                 kvm_queue_exception(vcpu, UD_VECTOR);
5292                 return 1;
5293         }
5294
5295         vmx_get_segment(vcpu, &cs, VCPU_SREG_CS);
5296         if (is_long_mode(vcpu) && !cs.l) {
5297                 kvm_queue_exception(vcpu, UD_VECTOR);
5298                 return 1;
5299         }
5300
5301         if (vmx_get_cpl(vcpu)) {
5302                 kvm_inject_gp(vcpu, 0);
5303                 return 1;
5304         }
5305
5306         INIT_LIST_HEAD(&(vmx->nested.vmcs02_pool));
5307         vmx->nested.vmcs02_num = 0;
5308
5309         vmx->nested.vmxon = true;
5310
5311         skip_emulated_instruction(vcpu);
5312         return 1;
5313 }
5314
5315 /*
5316  * Intel's VMX Instruction Reference specifies a common set of prerequisites
5317  * for running VMX instructions (except VMXON, whose prerequisites are
5318  * slightly different). It also specifies what exception to inject otherwise.
5319  */
5320 static int nested_vmx_check_permission(struct kvm_vcpu *vcpu)
5321 {
5322         struct kvm_segment cs;
5323         struct vcpu_vmx *vmx = to_vmx(vcpu);
5324
5325         if (!vmx->nested.vmxon) {
5326                 kvm_queue_exception(vcpu, UD_VECTOR);
5327                 return 0;
5328         }
5329
5330         vmx_get_segment(vcpu, &cs, VCPU_SREG_CS);
5331         if ((vmx_get_rflags(vcpu) & X86_EFLAGS_VM) ||
5332             (is_long_mode(vcpu) && !cs.l)) {
5333                 kvm_queue_exception(vcpu, UD_VECTOR);
5334                 return 0;
5335         }
5336
5337         if (vmx_get_cpl(vcpu)) {
5338                 kvm_inject_gp(vcpu, 0);
5339                 return 0;
5340         }
5341
5342         return 1;
5343 }
5344
5345 /*
5346  * Free whatever needs to be freed from vmx->nested when L1 goes down, or
5347  * just stops using VMX.
5348  */
5349 static void free_nested(struct vcpu_vmx *vmx)
5350 {
5351         if (!vmx->nested.vmxon)
5352                 return;
5353         vmx->nested.vmxon = false;
5354         if (vmx->nested.current_vmptr != -1ull) {
5355                 kunmap(vmx->nested.current_vmcs12_page);
5356                 nested_release_page(vmx->nested.current_vmcs12_page);
5357                 vmx->nested.current_vmptr = -1ull;
5358                 vmx->nested.current_vmcs12 = NULL;
5359         }
5360         /* Unpin physical memory we referred to in current vmcs02 */
5361         if (vmx->nested.apic_access_page) {
5362                 nested_release_page(vmx->nested.apic_access_page);
5363                 vmx->nested.apic_access_page = 0;
5364         }
5365
5366         nested_free_all_saved_vmcss(vmx);
5367 }
5368
5369 /* Emulate the VMXOFF instruction */
5370 static int handle_vmoff(struct kvm_vcpu *vcpu)
5371 {
5372         if (!nested_vmx_check_permission(vcpu))
5373                 return 1;
5374         free_nested(to_vmx(vcpu));
5375         skip_emulated_instruction(vcpu);
5376         return 1;
5377 }
5378
5379 /*
5380  * Decode the memory-address operand of a vmx instruction, as recorded on an
5381  * exit caused by such an instruction (run by a guest hypervisor).
5382  * On success, returns 0. When the operand is invalid, returns 1 and throws
5383  * #UD or #GP.
5384  */
5385 static int get_vmx_mem_address(struct kvm_vcpu *vcpu,
5386                                  unsigned long exit_qualification,
5387                                  u32 vmx_instruction_info, gva_t *ret)
5388 {
5389         /*
5390          * According to Vol. 3B, "Information for VM Exits Due to Instruction
5391          * Execution", on an exit, vmx_instruction_info holds most of the
5392          * addressing components of the operand. Only the displacement part
5393          * is put in exit_qualification (see 3B, "Basic VM-Exit Information").
5394          * For how an actual address is calculated from all these components,
5395          * refer to Vol. 1, "Operand Addressing".
5396          */
5397         int  scaling = vmx_instruction_info & 3;
5398         int  addr_size = (vmx_instruction_info >> 7) & 7;
5399         bool is_reg = vmx_instruction_info & (1u << 10);
5400         int  seg_reg = (vmx_instruction_info >> 15) & 7;
5401         int  index_reg = (vmx_instruction_info >> 18) & 0xf;
5402         bool index_is_valid = !(vmx_instruction_info & (1u << 22));
5403         int  base_reg       = (vmx_instruction_info >> 23) & 0xf;
5404         bool base_is_valid  = !(vmx_instruction_info & (1u << 27));
5405
5406         if (is_reg) {
5407                 kvm_queue_exception(vcpu, UD_VECTOR);
5408                 return 1;
5409         }
5410
5411         /* Addr = segment_base + offset */
5412         /* offset = base + [index * scale] + displacement */
5413         *ret = vmx_get_segment_base(vcpu, seg_reg);
5414         if (base_is_valid)
5415                 *ret += kvm_register_read(vcpu, base_reg);
5416         if (index_is_valid)
5417                 *ret += kvm_register_read(vcpu, index_reg)<<scaling;
5418         *ret += exit_qualification; /* holds the displacement */
5419
5420         if (addr_size == 1) /* 32 bit */
5421                 *ret &= 0xffffffff;
5422
5423         /*
5424          * TODO: throw #GP (and return 1) in various cases that the VM*
5425          * instructions require it - e.g., offset beyond segment limit,
5426          * unusable or unreadable/unwritable segment, non-canonical 64-bit
5427          * address, and so on. Currently these are not checked.
5428          */
5429         return 0;
5430 }
5431
5432 /*
5433  * The following 3 functions, nested_vmx_succeed()/failValid()/failInvalid(),
5434  * set the success or error code of an emulated VMX instruction, as specified
5435  * by Vol 2B, VMX Instruction Reference, "Conventions".
5436  */
5437 static void nested_vmx_succeed(struct kvm_vcpu *vcpu)
5438 {
5439         vmx_set_rflags(vcpu, vmx_get_rflags(vcpu)
5440                         & ~(X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
5441                             X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_OF));
5442 }
5443
5444 static void nested_vmx_failInvalid(struct kvm_vcpu *vcpu)
5445 {
5446         vmx_set_rflags(vcpu, (vmx_get_rflags(vcpu)
5447                         & ~(X86_EFLAGS_PF | X86_EFLAGS_AF | X86_EFLAGS_ZF |
5448                             X86_EFLAGS_SF | X86_EFLAGS_OF))
5449                         | X86_EFLAGS_CF);
5450 }
5451
5452 static void nested_vmx_failValid(struct kvm_vcpu *vcpu,
5453                                         u32 vm_instruction_error)
5454 {
5455         if (to_vmx(vcpu)->nested.current_vmptr == -1ull) {
5456                 /*
5457                  * failValid writes the error number to the current VMCS, which
5458                  * can't be done there isn't a current VMCS.
5459                  */
5460                 nested_vmx_failInvalid(vcpu);
5461                 return;
5462         }
5463         vmx_set_rflags(vcpu, (vmx_get_rflags(vcpu)
5464                         & ~(X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
5465                             X86_EFLAGS_SF | X86_EFLAGS_OF))
5466                         | X86_EFLAGS_ZF);
5467         get_vmcs12(vcpu)->vm_instruction_error = vm_instruction_error;
5468 }
5469
5470 /* Emulate the VMCLEAR instruction */
5471 static int handle_vmclear(struct kvm_vcpu *vcpu)
5472 {
5473         struct vcpu_vmx *vmx = to_vmx(vcpu);
5474         gva_t gva;
5475         gpa_t vmptr;
5476         struct vmcs12 *vmcs12;
5477         struct page *page;
5478         struct x86_exception e;
5479
5480         if (!nested_vmx_check_permission(vcpu))
5481                 return 1;
5482
5483         if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION),
5484                         vmcs_read32(VMX_INSTRUCTION_INFO), &gva))
5485                 return 1;
5486
5487         if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, &vmptr,
5488                                 sizeof(vmptr), &e)) {
5489                 kvm_inject_page_fault(vcpu, &e);
5490                 return 1;
5491         }
5492
5493         if (!IS_ALIGNED(vmptr, PAGE_SIZE)) {
5494                 nested_vmx_failValid(vcpu, VMXERR_VMCLEAR_INVALID_ADDRESS);
5495                 skip_emulated_instruction(vcpu);
5496                 return 1;
5497         }
5498
5499         if (vmptr == vmx->nested.current_vmptr) {
5500                 kunmap(vmx->nested.current_vmcs12_page);
5501                 nested_release_page(vmx->nested.current_vmcs12_page);
5502                 vmx->nested.current_vmptr = -1ull;
5503                 vmx->nested.current_vmcs12 = NULL;
5504         }
5505
5506         page = nested_get_page(vcpu, vmptr);
5507         if (page == NULL) {
5508                 /*
5509                  * For accurate processor emulation, VMCLEAR beyond available
5510                  * physical memory should do nothing at all. However, it is
5511                  * possible that a nested vmx bug, not a guest hypervisor bug,
5512                  * resulted in this case, so let's shut down before doing any
5513                  * more damage:
5514                  */
5515                 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
5516                 return 1;
5517         }
5518         vmcs12 = kmap(page);
5519         vmcs12->launch_state = 0;
5520         kunmap(page);
5521         nested_release_page(page);
5522
5523         nested_free_vmcs02(vmx, vmptr);
5524
5525         skip_emulated_instruction(vcpu);
5526         nested_vmx_succeed(vcpu);
5527         return 1;
5528 }
5529
5530 static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch);
5531
5532 /* Emulate the VMLAUNCH instruction */
5533 static int handle_vmlaunch(struct kvm_vcpu *vcpu)
5534 {
5535         return nested_vmx_run(vcpu, true);
5536 }
5537
5538 /* Emulate the VMRESUME instruction */
5539 static int handle_vmresume(struct kvm_vcpu *vcpu)
5540 {
5541
5542         return nested_vmx_run(vcpu, false);
5543 }
5544
5545 enum vmcs_field_type {
5546         VMCS_FIELD_TYPE_U16 = 0,
5547         VMCS_FIELD_TYPE_U64 = 1,
5548         VMCS_FIELD_TYPE_U32 = 2,
5549         VMCS_FIELD_TYPE_NATURAL_WIDTH = 3
5550 };
5551
5552 static inline int vmcs_field_type(unsigned long field)
5553 {
5554         if (0x1 & field)        /* the *_HIGH fields are all 32 bit */
5555                 return VMCS_FIELD_TYPE_U32;
5556         return (field >> 13) & 0x3 ;
5557 }
5558
5559 static inline int vmcs_field_readonly(unsigned long field)
5560 {
5561         return (((field >> 10) & 0x3) == 1);
5562 }
5563
5564 /*
5565  * Read a vmcs12 field. Since these can have varying lengths and we return
5566  * one type, we chose the biggest type (u64) and zero-extend the return value
5567  * to that size. Note that the caller, handle_vmread, might need to use only
5568  * some of the bits we return here (e.g., on 32-bit guests, only 32 bits of
5569  * 64-bit fields are to be returned).
5570  */
5571 static inline bool vmcs12_read_any(struct kvm_vcpu *vcpu,
5572                                         unsigned long field, u64 *ret)
5573 {
5574         short offset = vmcs_field_to_offset(field);
5575         char *p;
5576
5577         if (offset < 0)
5578                 return 0;
5579
5580         p = ((char *)(get_vmcs12(vcpu))) + offset;
5581
5582         switch (vmcs_field_type(field)) {
5583         case VMCS_FIELD_TYPE_NATURAL_WIDTH:
5584                 *ret = *((natural_width *)p);
5585                 return 1;
5586         case VMCS_FIELD_TYPE_U16:
5587                 *ret = *((u16 *)p);
5588                 return 1;
5589         case VMCS_FIELD_TYPE_U32:
5590                 *ret = *((u32 *)p);
5591                 return 1;
5592         case VMCS_FIELD_TYPE_U64:
5593                 *ret = *((u64 *)p);
5594                 return 1;
5595         default:
5596                 return 0; /* can never happen. */
5597         }
5598 }
5599
5600 /*
5601  * VMX instructions which assume a current vmcs12 (i.e., that VMPTRLD was
5602  * used before) all generate the same failure when it is missing.
5603  */
5604 static int nested_vmx_check_vmcs12(struct kvm_vcpu *vcpu)
5605 {
5606         struct vcpu_vmx *vmx = to_vmx(vcpu);
5607         if (vmx->nested.current_vmptr == -1ull) {
5608                 nested_vmx_failInvalid(vcpu);
5609                 skip_emulated_instruction(vcpu);
5610                 return 0;
5611         }
5612         return 1;
5613 }
5614
5615 static int handle_vmread(struct kvm_vcpu *vcpu)
5616 {
5617         unsigned long field;
5618         u64 field_value;
5619         unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
5620         u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
5621         gva_t gva = 0;
5622
5623         if (!nested_vmx_check_permission(vcpu) ||
5624             !nested_vmx_check_vmcs12(vcpu))
5625                 return 1;
5626
5627         /* Decode instruction info and find the field to read */
5628         field = kvm_register_read(vcpu, (((vmx_instruction_info) >> 28) & 0xf));
5629         /* Read the field, zero-extended to a u64 field_value */
5630         if (!vmcs12_read_any(vcpu, field, &field_value)) {
5631                 nested_vmx_failValid(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT);
5632                 skip_emulated_instruction(vcpu);
5633                 return 1;
5634         }
5635         /*
5636          * Now copy part of this value to register or memory, as requested.
5637          * Note that the number of bits actually copied is 32 or 64 depending
5638          * on the guest's mode (32 or 64 bit), not on the given field's length.
5639          */
5640         if (vmx_instruction_info & (1u << 10)) {
5641                 kvm_register_write(vcpu, (((vmx_instruction_info) >> 3) & 0xf),
5642                         field_value);
5643         } else {
5644                 if (get_vmx_mem_address(vcpu, exit_qualification,
5645                                 vmx_instruction_info, &gva))
5646                         return 1;
5647                 /* _system ok, as nested_vmx_check_permission verified cpl=0 */
5648                 kvm_write_guest_virt_system(&vcpu->arch.emulate_ctxt, gva,
5649                              &field_value, (is_long_mode(vcpu) ? 8 : 4), NULL);
5650         }
5651
5652         nested_vmx_succeed(vcpu);
5653         skip_emulated_instruction(vcpu);
5654         return 1;
5655 }
5656
5657
5658 static int handle_vmwrite(struct kvm_vcpu *vcpu)
5659 {
5660         unsigned long field;
5661         gva_t gva;
5662         unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
5663         u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
5664         char *p;
5665         short offset;
5666         /* The value to write might be 32 or 64 bits, depending on L1's long
5667          * mode, and eventually we need to write that into a field of several
5668          * possible lengths. The code below first zero-extends the value to 64
5669          * bit (field_value), and then copies only the approriate number of
5670          * bits into the vmcs12 field.
5671          */
5672         u64 field_value = 0;
5673         struct x86_exception e;
5674
5675         if (!nested_vmx_check_permission(vcpu) ||
5676             !nested_vmx_check_vmcs12(vcpu))
5677                 return 1;
5678
5679         if (vmx_instruction_info & (1u << 10))
5680                 field_value = kvm_register_read(vcpu,
5681                         (((vmx_instruction_info) >> 3) & 0xf));
5682         else {
5683                 if (get_vmx_mem_address(vcpu, exit_qualification,
5684                                 vmx_instruction_info, &gva))
5685                         return 1;
5686                 if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva,
5687                            &field_value, (is_long_mode(vcpu) ? 8 : 4), &e)) {
5688                         kvm_inject_page_fault(vcpu, &e);
5689                         return 1;
5690                 }
5691         }
5692
5693
5694         field = kvm_register_read(vcpu, (((vmx_instruction_info) >> 28) & 0xf));
5695         if (vmcs_field_readonly(field)) {
5696                 nested_vmx_failValid(vcpu,
5697                         VMXERR_VMWRITE_READ_ONLY_VMCS_COMPONENT);
5698                 skip_emulated_instruction(vcpu);
5699                 return 1;
5700         }
5701
5702         offset = vmcs_field_to_offset(field);
5703         if (offset < 0) {
5704                 nested_vmx_failValid(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT);
5705                 skip_emulated_instruction(vcpu);
5706                 return 1;
5707         }
5708         p = ((char *) get_vmcs12(vcpu)) + offset;
5709
5710         switch (vmcs_field_type(field)) {
5711         case VMCS_FIELD_TYPE_U16:
5712                 *(u16 *)p = field_value;
5713                 break;
5714         case VMCS_FIELD_TYPE_U32:
5715                 *(u32 *)p = field_value;
5716                 break;
5717         case VMCS_FIELD_TYPE_U64:
5718                 *(u64 *)p = field_value;
5719                 break;
5720         case VMCS_FIELD_TYPE_NATURAL_WIDTH:
5721                 *(natural_width *)p = field_value;
5722                 break;
5723         default:
5724                 nested_vmx_failValid(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT);
5725                 skip_emulated_instruction(vcpu);
5726                 return 1;
5727         }
5728
5729         nested_vmx_succeed(vcpu);
5730         skip_emulated_instruction(vcpu);
5731         return 1;
5732 }
5733
5734 /* Emulate the VMPTRLD instruction */
5735 static int handle_vmptrld(struct kvm_vcpu *vcpu)
5736 {
5737         struct vcpu_vmx *vmx = to_vmx(vcpu);
5738         gva_t gva;
5739         gpa_t vmptr;
5740         struct x86_exception e;
5741
5742         if (!nested_vmx_check_permission(vcpu))
5743                 return 1;
5744
5745         if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION),
5746                         vmcs_read32(VMX_INSTRUCTION_INFO), &gva))
5747                 return 1;
5748
5749         if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, &vmptr,
5750                                 sizeof(vmptr), &e)) {
5751                 kvm_inject_page_fault(vcpu, &e);
5752                 return 1;
5753         }
5754
5755         if (!IS_ALIGNED(vmptr, PAGE_SIZE)) {
5756                 nested_vmx_failValid(vcpu, VMXERR_VMPTRLD_INVALID_ADDRESS);
5757                 skip_emulated_instruction(vcpu);
5758                 return 1;
5759         }
5760
5761         if (vmx->nested.current_vmptr != vmptr) {
5762                 struct vmcs12 *new_vmcs12;
5763                 struct page *page;
5764                 page = nested_get_page(vcpu, vmptr);
5765                 if (page == NULL) {
5766                         nested_vmx_failInvalid(vcpu);
5767                         skip_emulated_instruction(vcpu);
5768                         return 1;
5769                 }
5770                 new_vmcs12 = kmap(page);
5771                 if (new_vmcs12->revision_id != VMCS12_REVISION) {
5772                         kunmap(page);
5773                         nested_release_page_clean(page);
5774                         nested_vmx_failValid(vcpu,
5775                                 VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID);
5776                         skip_emulated_instruction(vcpu);
5777                         return 1;
5778                 }
5779                 if (vmx->nested.current_vmptr != -1ull) {
5780                         kunmap(vmx->nested.current_vmcs12_page);
5781                         nested_release_page(vmx->nested.current_vmcs12_page);
5782                 }
5783
5784                 vmx->nested.current_vmptr = vmptr;
5785                 vmx->nested.current_vmcs12 = new_vmcs12;
5786                 vmx->nested.current_vmcs12_page = page;
5787         }
5788
5789         nested_vmx_succeed(vcpu);
5790         skip_emulated_instruction(vcpu);
5791         return 1;
5792 }
5793
5794 /* Emulate the VMPTRST instruction */
5795 static int handle_vmptrst(struct kvm_vcpu *vcpu)
5796 {
5797         unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
5798         u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
5799         gva_t vmcs_gva;
5800         struct x86_exception e;
5801
5802         if (!nested_vmx_check_permission(vcpu))
5803                 return 1;
5804
5805         if (get_vmx_mem_address(vcpu, exit_qualification,
5806                         vmx_instruction_info, &vmcs_gva))
5807                 return 1;
5808         /* ok to use *_system, as nested_vmx_check_permission verified cpl=0 */
5809         if (kvm_write_guest_virt_system(&vcpu->arch.emulate_ctxt, vmcs_gva,
5810                                  (void *)&to_vmx(vcpu)->nested.current_vmptr,
5811                                  sizeof(u64), &e)) {
5812                 kvm_inject_page_fault(vcpu, &e);
5813                 return 1;
5814         }
5815         nested_vmx_succeed(vcpu);
5816         skip_emulated_instruction(vcpu);
5817         return 1;
5818 }
5819
5820 /*
5821  * The exit handlers return 1 if the exit was handled fully and guest execution
5822  * may resume.  Otherwise they set the kvm_run parameter to indicate what needs
5823  * to be done to userspace and return 0.
5824  */
5825 static int (*const kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu) = {
5826         [EXIT_REASON_EXCEPTION_NMI]           = handle_exception,
5827         [EXIT_REASON_EXTERNAL_INTERRUPT]      = handle_external_interrupt,
5828         [EXIT_REASON_TRIPLE_FAULT]            = handle_triple_fault,
5829         [EXIT_REASON_NMI_WINDOW]              = handle_nmi_window,
5830         [EXIT_REASON_IO_INSTRUCTION]          = handle_io,
5831         [EXIT_REASON_CR_ACCESS]               = handle_cr,
5832         [EXIT_REASON_DR_ACCESS]               = handle_dr,
5833         [EXIT_REASON_CPUID]                   = handle_cpuid,
5834         [EXIT_REASON_MSR_READ]                = handle_rdmsr,
5835         [EXIT_REASON_MSR_WRITE]               = handle_wrmsr,
5836         [EXIT_REASON_PENDING_INTERRUPT]       = handle_interrupt_window,
5837         [EXIT_REASON_HLT]                     = handle_halt,
5838         [EXIT_REASON_INVD]                    = handle_invd,
5839         [EXIT_REASON_INVLPG]                  = handle_invlpg,
5840         [EXIT_REASON_RDPMC]                   = handle_rdpmc,
5841         [EXIT_REASON_VMCALL]                  = handle_vmcall,
5842         [EXIT_REASON_VMCLEAR]                 = handle_vmclear,
5843         [EXIT_REASON_VMLAUNCH]                = handle_vmlaunch,
5844         [EXIT_REASON_VMPTRLD]                 = handle_vmptrld,
5845         [EXIT_REASON_VMPTRST]                 = handle_vmptrst,
5846         [EXIT_REASON_VMREAD]                  = handle_vmread,
5847         [EXIT_REASON_VMRESUME]                = handle_vmresume,
5848         [EXIT_REASON_VMWRITE]                 = handle_vmwrite,
5849         [EXIT_REASON_VMOFF]                   = handle_vmoff,
5850         [EXIT_REASON_VMON]                    = handle_vmon,
5851         [EXIT_REASON_TPR_BELOW_THRESHOLD]     = handle_tpr_below_threshold,
5852         [EXIT_REASON_APIC_ACCESS]             = handle_apic_access,
5853         [EXIT_REASON_APIC_WRITE]              = handle_apic_write,
5854         [EXIT_REASON_WBINVD]                  = handle_wbinvd,
5855         [EXIT_REASON_XSETBV]                  = handle_xsetbv,
5856         [EXIT_REASON_TASK_SWITCH]             = handle_task_switch,
5857         [EXIT_REASON_MCE_DURING_VMENTRY]      = handle_machine_check,
5858         [EXIT_REASON_EPT_VIOLATION]           = handle_ept_violation,
5859         [EXIT_REASON_EPT_MISCONFIG]           = handle_ept_misconfig,
5860         [EXIT_REASON_PAUSE_INSTRUCTION]       = handle_pause,
5861         [EXIT_REASON_MWAIT_INSTRUCTION]       = handle_invalid_op,
5862         [EXIT_REASON_MONITOR_INSTRUCTION]     = handle_invalid_op,
5863 };
5864
5865 static const int kvm_vmx_max_exit_handlers =
5866         ARRAY_SIZE(kvm_vmx_exit_handlers);
5867
5868 /*
5869  * Return 1 if we should exit from L2 to L1 to handle an MSR access access,
5870  * rather than handle it ourselves in L0. I.e., check whether L1 expressed
5871  * disinterest in the current event (read or write a specific MSR) by using an
5872  * MSR bitmap. This may be the case even when L0 doesn't use MSR bitmaps.
5873  */
5874 static bool nested_vmx_exit_handled_msr(struct kvm_vcpu *vcpu,
5875         struct vmcs12 *vmcs12, u32 exit_reason)
5876 {
5877         u32 msr_index = vcpu->arch.regs[VCPU_REGS_RCX];
5878         gpa_t bitmap;
5879
5880         if (!nested_cpu_has(get_vmcs12(vcpu), CPU_BASED_USE_MSR_BITMAPS))
5881                 return 1;
5882
5883         /*
5884          * The MSR_BITMAP page is divided into four 1024-byte bitmaps,
5885          * for the four combinations of read/write and low/high MSR numbers.
5886          * First we need to figure out which of the four to use:
5887          */
5888         bitmap = vmcs12->msr_bitmap;
5889         if (exit_reason == EXIT_REASON_MSR_WRITE)
5890                 bitmap += 2048;
5891         if (msr_index >= 0xc0000000) {
5892                 msr_index -= 0xc0000000;
5893                 bitmap += 1024;
5894         }
5895
5896         /* Then read the msr_index'th bit from this bitmap: */
5897         if (msr_index < 1024*8) {
5898                 unsigned char b;
5899                 kvm_read_guest(vcpu->kvm, bitmap + msr_index/8, &b, 1);
5900                 return 1 & (b >> (msr_index & 7));
5901         } else
5902                 return 1; /* let L1 handle the wrong parameter */
5903 }
5904
5905 /*
5906  * Return 1 if we should exit from L2 to L1 to handle a CR access exit,
5907  * rather than handle it ourselves in L0. I.e., check if L1 wanted to
5908  * intercept (via guest_host_mask etc.) the current event.
5909  */
5910 static bool nested_vmx_exit_handled_cr(struct kvm_vcpu *vcpu,
5911         struct vmcs12 *vmcs12)
5912 {
5913         unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
5914         int cr = exit_qualification & 15;
5915         int reg = (exit_qualification >> 8) & 15;
5916         unsigned long val = kvm_register_read(vcpu, reg);
5917
5918         switch ((exit_qualification >> 4) & 3) {
5919         case 0: /* mov to cr */
5920                 switch (cr) {
5921                 case 0:
5922                         if (vmcs12->cr0_guest_host_mask &
5923                             (val ^ vmcs12->cr0_read_shadow))
5924                                 return 1;
5925                         break;
5926                 case 3:
5927                         if ((vmcs12->cr3_target_count >= 1 &&
5928                                         vmcs12->cr3_target_value0 == val) ||
5929                                 (vmcs12->cr3_target_count >= 2 &&
5930                                         vmcs12->cr3_target_value1 == val) ||
5931                                 (vmcs12->cr3_target_count >= 3 &&
5932                                         vmcs12->cr3_target_value2 == val) ||
5933                                 (vmcs12->cr3_target_count >= 4 &&
5934                                         vmcs12->cr3_target_value3 == val))
5935                                 return 0;
5936                         if (nested_cpu_has(vmcs12, CPU_BASED_CR3_LOAD_EXITING))
5937                                 return 1;
5938                         break;
5939                 case 4:
5940                         if (vmcs12->cr4_guest_host_mask &
5941                             (vmcs12->cr4_read_shadow ^ val))
5942                                 return 1;
5943                         break;
5944                 case 8:
5945                         if (nested_cpu_has(vmcs12, CPU_BASED_CR8_LOAD_EXITING))
5946                                 return 1;
5947                         break;
5948                 }
5949                 break;
5950         case 2: /* clts */
5951                 if ((vmcs12->cr0_guest_host_mask & X86_CR0_TS) &&
5952                     (vmcs12->cr0_read_shadow & X86_CR0_TS))
5953                         return 1;
5954                 break;
5955         case 1: /* mov from cr */
5956                 switch (cr) {
5957                 case 3:
5958                         if (vmcs12->cpu_based_vm_exec_control &
5959                             CPU_BASED_CR3_STORE_EXITING)
5960                                 return 1;
5961                         break;
5962                 case 8:
5963                         if (vmcs12->cpu_based_vm_exec_control &
5964                             CPU_BASED_CR8_STORE_EXITING)
5965                                 return 1;
5966                         break;
5967                 }
5968                 break;
5969         case 3: /* lmsw */
5970                 /*
5971                  * lmsw can change bits 1..3 of cr0, and only set bit 0 of
5972                  * cr0. Other attempted changes are ignored, with no exit.
5973                  */
5974                 if (vmcs12->cr0_guest_host_mask & 0xe &
5975                     (val ^ vmcs12->cr0_read_shadow))
5976                         return 1;
5977                 if ((vmcs12->cr0_guest_host_mask & 0x1) &&
5978                     !(vmcs12->cr0_read_shadow & 0x1) &&
5979                     (val & 0x1))
5980                         return 1;
5981                 break;
5982         }
5983         return 0;
5984 }
5985
5986 /*
5987  * Return 1 if we should exit from L2 to L1 to handle an exit, or 0 if we
5988  * should handle it ourselves in L0 (and then continue L2). Only call this
5989  * when in is_guest_mode (L2).
5990  */
5991 static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu)
5992 {
5993         u32 exit_reason = vmcs_read32(VM_EXIT_REASON);
5994         u32 intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
5995         struct vcpu_vmx *vmx = to_vmx(vcpu);
5996         struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
5997
5998         if (vmx->nested.nested_run_pending)
5999                 return 0;
6000
6001         if (unlikely(vmx->fail)) {
6002                 pr_info_ratelimited("%s failed vm entry %x\n", __func__,
6003                                     vmcs_read32(VM_INSTRUCTION_ERROR));
6004                 return 1;
6005         }
6006
6007         switch (exit_reason) {
6008         case EXIT_REASON_EXCEPTION_NMI:
6009                 if (!is_exception(intr_info))
6010                         return 0;
6011                 else if (is_page_fault(intr_info))
6012                         return enable_ept;
6013                 return vmcs12->exception_bitmap &
6014                                 (1u << (intr_info & INTR_INFO_VECTOR_MASK));
6015         case EXIT_REASON_EXTERNAL_INTERRUPT:
6016                 return 0;
6017         case EXIT_REASON_TRIPLE_FAULT:
6018                 return 1;
6019         case EXIT_REASON_PENDING_INTERRUPT:
6020         case EXIT_REASON_NMI_WINDOW:
6021                 /*
6022                  * prepare_vmcs02() set the CPU_BASED_VIRTUAL_INTR_PENDING bit
6023                  * (aka Interrupt Window Exiting) only when L1 turned it on,
6024                  * so if we got a PENDING_INTERRUPT exit, this must be for L1.
6025                  * Same for NMI Window Exiting.
6026                  */
6027                 return 1;
6028         case EXIT_REASON_TASK_SWITCH:
6029                 return 1;
6030         case EXIT_REASON_CPUID:
6031                 return 1;
6032         case EXIT_REASON_HLT:
6033                 return nested_cpu_has(vmcs12, CPU_BASED_HLT_EXITING);
6034         case EXIT_REASON_INVD:
6035                 return 1;
6036         case EXIT_REASON_INVLPG:
6037                 return nested_cpu_has(vmcs12, CPU_BASED_INVLPG_EXITING);
6038         case EXIT_REASON_RDPMC:
6039                 return nested_cpu_has(vmcs12, CPU_BASED_RDPMC_EXITING);
6040         case EXIT_REASON_RDTSC:
6041                 return nested_cpu_has(vmcs12, CPU_BASED_RDTSC_EXITING);
6042         case EXIT_REASON_VMCALL: case EXIT_REASON_VMCLEAR:
6043         case EXIT_REASON_VMLAUNCH: case EXIT_REASON_VMPTRLD:
6044         case EXIT_REASON_VMPTRST: case EXIT_REASON_VMREAD:
6045         case EXIT_REASON_VMRESUME: case EXIT_REASON_VMWRITE:
6046         case EXIT_REASON_VMOFF: case EXIT_REASON_VMON:
6047                 /*
6048                  * VMX instructions trap unconditionally. This allows L1 to
6049                  * emulate them for its L2 guest, i.e., allows 3-level nesting!
6050                  */
6051                 return 1;
6052         case EXIT_REASON_CR_ACCESS:
6053                 return nested_vmx_exit_handled_cr(vcpu, vmcs12);
6054         case EXIT_REASON_DR_ACCESS:
6055                 return nested_cpu_has(vmcs12, CPU_BASED_MOV_DR_EXITING);
6056         case EXIT_REASON_IO_INSTRUCTION:
6057                 /* TODO: support IO bitmaps */
6058                 return 1;
6059         case EXIT_REASON_MSR_READ:
6060         case EXIT_REASON_MSR_WRITE:
6061                 return nested_vmx_exit_handled_msr(vcpu, vmcs12, exit_reason);
6062         case EXIT_REASON_INVALID_STATE:
6063                 return 1;
6064         case EXIT_REASON_MWAIT_INSTRUCTION:
6065                 return nested_cpu_has(vmcs12, CPU_BASED_MWAIT_EXITING);
6066         case EXIT_REASON_MONITOR_INSTRUCTION:
6067                 return nested_cpu_has(vmcs12, CPU_BASED_MONITOR_EXITING);
6068         case EXIT_REASON_PAUSE_INSTRUCTION:
6069                 return nested_cpu_has(vmcs12, CPU_BASED_PAUSE_EXITING) ||
6070                         nested_cpu_has2(vmcs12,
6071                                 SECONDARY_EXEC_PAUSE_LOOP_EXITING);
6072         case EXIT_REASON_MCE_DURING_VMENTRY:
6073                 return 0;
6074         case EXIT_REASON_TPR_BELOW_THRESHOLD:
6075                 return 1;
6076         case EXIT_REASON_APIC_ACCESS:
6077                 return nested_cpu_has2(vmcs12,
6078                         SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES);
6079         case EXIT_REASON_EPT_VIOLATION:
6080         case EXIT_REASON_EPT_MISCONFIG:
6081                 return 0;
6082         case EXIT_REASON_WBINVD:
6083                 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_WBINVD_EXITING);
6084         case EXIT_REASON_XSETBV:
6085                 return 1;
6086         default:
6087                 return 1;
6088         }
6089 }
6090
6091 static void vmx_get_exit_info(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2)
6092 {
6093         *info1 = vmcs_readl(EXIT_QUALIFICATION);
6094         *info2 = vmcs_read32(VM_EXIT_INTR_INFO);
6095 }
6096
6097 /*
6098  * The guest has exited.  See if we can fix it or if we need userspace
6099  * assistance.
6100  */
6101 static int vmx_handle_exit(struct kvm_vcpu *vcpu)
6102 {
6103         struct vcpu_vmx *vmx = to_vmx(vcpu);
6104         u32 exit_reason = vmx->exit_reason;
6105         u32 vectoring_info = vmx->idt_vectoring_info;
6106
6107         /* If guest state is invalid, start emulating */
6108         if (vmx->emulation_required)
6109                 return handle_invalid_guest_state(vcpu);
6110
6111         /*
6112          * the KVM_REQ_EVENT optimization bit is only on for one entry, and if
6113          * we did not inject a still-pending event to L1 now because of
6114          * nested_run_pending, we need to re-enable this bit.
6115          */
6116         if (vmx->nested.nested_run_pending)
6117                 kvm_make_request(KVM_REQ_EVENT, vcpu);
6118
6119         if (!is_guest_mode(vcpu) && (exit_reason == EXIT_REASON_VMLAUNCH ||
6120             exit_reason == EXIT_REASON_VMRESUME))
6121                 vmx->nested.nested_run_pending = 1;
6122         else
6123                 vmx->nested.nested_run_pending = 0;
6124
6125         if (is_guest_mode(vcpu) && nested_vmx_exit_handled(vcpu)) {
6126                 nested_vmx_vmexit(vcpu);
6127                 return 1;
6128         }
6129
6130         if (exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY) {
6131                 vcpu->run->exit_reason = KVM_EXIT_FAIL_ENTRY;
6132                 vcpu->run->fail_entry.hardware_entry_failure_reason
6133                         = exit_reason;
6134                 return 0;
6135         }
6136
6137         if (unlikely(vmx->fail)) {
6138                 vcpu->run->exit_reason = KVM_EXIT_FAIL_ENTRY;
6139                 vcpu->run->fail_entry.hardware_entry_failure_reason
6140                         = vmcs_read32(VM_INSTRUCTION_ERROR);
6141                 return 0;
6142         }
6143
6144         /*
6145          * Note:
6146          * Do not try to fix EXIT_REASON_EPT_MISCONFIG if it caused by
6147          * delivery event since it indicates guest is accessing MMIO.
6148          * The vm-exit can be triggered again after return to guest that
6149          * will cause infinite loop.
6150          */
6151         if ((vectoring_info & VECTORING_INFO_VALID_MASK) &&
6152                         (exit_reason != EXIT_REASON_EXCEPTION_NMI &&
6153                         exit_reason != EXIT_REASON_EPT_VIOLATION &&
6154                         exit_reason != EXIT_REASON_TASK_SWITCH)) {
6155                 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
6156                 vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_DELIVERY_EV;
6157                 vcpu->run->internal.ndata = 2;
6158                 vcpu->run->internal.data[0] = vectoring_info;
6159                 vcpu->run->internal.data[1] = exit_reason;
6160                 return 0;
6161         }
6162
6163         if (unlikely(!cpu_has_virtual_nmis() && vmx->soft_vnmi_blocked &&
6164             !(is_guest_mode(vcpu) && nested_cpu_has_virtual_nmis(
6165                                         get_vmcs12(vcpu), vcpu)))) {
6166                 if (vmx_interrupt_allowed(vcpu)) {
6167                         vmx->soft_vnmi_blocked = 0;
6168                 } else if (vmx->vnmi_blocked_time > 1000000000LL &&
6169                            vcpu->arch.nmi_pending) {
6170                         /*
6171                          * This CPU don't support us in finding the end of an
6172                          * NMI-blocked window if the guest runs with IRQs
6173                          * disabled. So we pull the trigger after 1 s of
6174                          * futile waiting, but inform the user about this.
6175                          */
6176                         printk(KERN_WARNING "%s: Breaking out of NMI-blocked "
6177                                "state on VCPU %d after 1 s timeout\n",
6178                                __func__, vcpu->vcpu_id);
6179                         vmx->soft_vnmi_blocked = 0;
6180                 }
6181         }
6182
6183         if (exit_reason < kvm_vmx_max_exit_handlers
6184             && kvm_vmx_exit_handlers[exit_reason])
6185                 return kvm_vmx_exit_handlers[exit_reason](vcpu);
6186         else {
6187                 vcpu->run->exit_reason = KVM_EXIT_UNKNOWN;
6188                 vcpu->run->hw.hardware_exit_reason = exit_reason;
6189         }
6190         return 0;
6191 }
6192
6193 static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
6194 {
6195         if (irr == -1 || tpr < irr) {
6196                 vmcs_write32(TPR_THRESHOLD, 0);
6197                 return;
6198         }
6199
6200         vmcs_write32(TPR_THRESHOLD, irr);
6201 }
6202
6203 static void vmx_set_virtual_x2apic_mode(struct kvm_vcpu *vcpu, bool set)
6204 {
6205         u32 sec_exec_control;
6206
6207         /*
6208          * There is not point to enable virtualize x2apic without enable
6209          * apicv
6210          */
6211         if (!cpu_has_vmx_virtualize_x2apic_mode() || !enable_apicv_reg)
6212                 return;
6213
6214         if (!vm_need_tpr_shadow(vcpu->kvm))
6215                 return;
6216
6217         sec_exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL);
6218
6219         if (set) {
6220                 sec_exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
6221                 sec_exec_control |= SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE;
6222         } else {
6223                 sec_exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE;
6224                 sec_exec_control |= SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
6225         }
6226         vmcs_write32(SECONDARY_VM_EXEC_CONTROL, sec_exec_control);
6227
6228         vmx_set_msr_bitmap(vcpu);
6229 }
6230
6231 static void vmx_complete_atomic_exit(struct vcpu_vmx *vmx)
6232 {
6233         u32 exit_intr_info;
6234
6235         if (!(vmx->exit_reason == EXIT_REASON_MCE_DURING_VMENTRY
6236               || vmx->exit_reason == EXIT_REASON_EXCEPTION_NMI))
6237                 return;
6238
6239         vmx->exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
6240         exit_intr_info = vmx->exit_intr_info;
6241
6242         /* Handle machine checks before interrupts are enabled */
6243         if (is_machine_check(exit_intr_info))
6244                 kvm_machine_check();
6245
6246         /* We need to handle NMIs before interrupts are enabled */
6247         if ((exit_intr_info & INTR_INFO_INTR_TYPE_MASK) == INTR_TYPE_NMI_INTR &&
6248             (exit_intr_info & INTR_INFO_VALID_MASK)) {
6249                 kvm_before_handle_nmi(&vmx->vcpu);
6250                 asm("int $2");
6251                 kvm_after_handle_nmi(&vmx->vcpu);
6252         }
6253 }
6254
6255 static void vmx_recover_nmi_blocking(struct vcpu_vmx *vmx)
6256 {
6257         u32 exit_intr_info;
6258         bool unblock_nmi;
6259         u8 vector;
6260         bool idtv_info_valid;
6261
6262         idtv_info_valid = vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK;
6263
6264         if (cpu_has_virtual_nmis()) {
6265                 if (vmx->nmi_known_unmasked)
6266                         return;
6267                 /*
6268                  * Can't use vmx->exit_intr_info since we're not sure what
6269                  * the exit reason is.
6270                  */
6271                 exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
6272                 unblock_nmi = (exit_intr_info & INTR_INFO_UNBLOCK_NMI) != 0;
6273                 vector = exit_intr_info & INTR_INFO_VECTOR_MASK;
6274                 /*
6275                  * SDM 3: 27.7.1.2 (September 2008)
6276                  * Re-set bit "block by NMI" before VM entry if vmexit caused by
6277                  * a guest IRET fault.
6278                  * SDM 3: 23.2.2 (September 2008)
6279                  * Bit 12 is undefined in any of the following cases:
6280                  *  If the VM exit sets the valid bit in the IDT-vectoring
6281                  *   information field.
6282                  *  If the VM exit is due to a double fault.
6283                  */
6284                 if ((exit_intr_info & INTR_INFO_VALID_MASK) && unblock_nmi &&
6285                     vector != DF_VECTOR && !idtv_info_valid)
6286                         vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO,
6287                                       GUEST_INTR_STATE_NMI);
6288                 else
6289                         vmx->nmi_known_unmasked =
6290                                 !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO)
6291                                   & GUEST_INTR_STATE_NMI);
6292         } else if (unlikely(vmx->soft_vnmi_blocked))
6293                 vmx->vnmi_blocked_time +=
6294                         ktime_to_ns(ktime_sub(ktime_get(), vmx->entry_time));
6295 }
6296
6297 static void __vmx_complete_interrupts(struct vcpu_vmx *vmx,
6298                                       u32 idt_vectoring_info,
6299                                       int instr_len_field,
6300                                       int error_code_field)
6301 {
6302         u8 vector;
6303         int type;
6304         bool idtv_info_valid;
6305
6306         idtv_info_valid = idt_vectoring_info & VECTORING_INFO_VALID_MASK;
6307
6308         vmx->vcpu.arch.nmi_injected = false;
6309         kvm_clear_exception_queue(&vmx->vcpu);
6310         kvm_clear_interrupt_queue(&vmx->vcpu);
6311
6312         if (!idtv_info_valid)
6313                 return;
6314
6315         kvm_make_request(KVM_REQ_EVENT, &vmx->vcpu);
6316
6317         vector = idt_vectoring_info & VECTORING_INFO_VECTOR_MASK;
6318         type = idt_vectoring_info & VECTORING_INFO_TYPE_MASK;
6319
6320         switch (type) {
6321         case INTR_TYPE_NMI_INTR:
6322                 vmx->vcpu.arch.nmi_injected = true;
6323                 /*
6324                  * SDM 3: 27.7.1.2 (September 2008)
6325                  * Clear bit "block by NMI" before VM entry if a NMI
6326                  * delivery faulted.
6327                  */
6328                 vmx_set_nmi_mask(&vmx->vcpu, false);
6329                 break;
6330         case INTR_TYPE_SOFT_EXCEPTION:
6331                 vmx->vcpu.arch.event_exit_inst_len =
6332                         vmcs_read32(instr_len_field);
6333                 /* fall through */
6334         case INTR_TYPE_HARD_EXCEPTION:
6335                 if (idt_vectoring_info & VECTORING_INFO_DELIVER_CODE_MASK) {
6336                         u32 err = vmcs_read32(error_code_field);
6337                         kvm_queue_exception_e(&vmx->vcpu, vector, err);
6338                 } else
6339                         kvm_queue_exception(&vmx->vcpu, vector);
6340                 break;
6341         case INTR_TYPE_SOFT_INTR:
6342                 vmx->vcpu.arch.event_exit_inst_len =
6343                         vmcs_read32(instr_len_field);
6344                 /* fall through */
6345         case INTR_TYPE_EXT_INTR:
6346                 kvm_queue_interrupt(&vmx->vcpu, vector,
6347                         type == INTR_TYPE_SOFT_INTR);
6348                 break;
6349         default:
6350                 break;
6351         }
6352 }
6353
6354 static void vmx_complete_interrupts(struct vcpu_vmx *vmx)
6355 {
6356         if (is_guest_mode(&vmx->vcpu))
6357                 return;
6358         __vmx_complete_interrupts(vmx, vmx->idt_vectoring_info,
6359                                   VM_EXIT_INSTRUCTION_LEN,
6360                                   IDT_VECTORING_ERROR_CODE);