KVM: Replace reads of vcpu->arch.cr3 by an accessor
[linux-3.10.git] / arch / x86 / kvm / kvm_cache_regs.h
1 #ifndef ASM_KVM_CACHE_REGS_H
2 #define ASM_KVM_CACHE_REGS_H
3
4 #define KVM_POSSIBLE_CR0_GUEST_BITS X86_CR0_TS
5 #define KVM_POSSIBLE_CR4_GUEST_BITS                               \
6         (X86_CR4_PVI | X86_CR4_DE | X86_CR4_PCE | X86_CR4_OSFXSR  \
7          | X86_CR4_OSXMMEXCPT | X86_CR4_PGE)
8
9 static inline unsigned long kvm_register_read(struct kvm_vcpu *vcpu,
10                                               enum kvm_reg reg)
11 {
12         if (!test_bit(reg, (unsigned long *)&vcpu->arch.regs_avail))
13                 kvm_x86_ops->cache_reg(vcpu, reg);
14
15         return vcpu->arch.regs[reg];
16 }
17
18 static inline void kvm_register_write(struct kvm_vcpu *vcpu,
19                                       enum kvm_reg reg,
20                                       unsigned long val)
21 {
22         vcpu->arch.regs[reg] = val;
23         __set_bit(reg, (unsigned long *)&vcpu->arch.regs_dirty);
24         __set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
25 }
26
27 static inline unsigned long kvm_rip_read(struct kvm_vcpu *vcpu)
28 {
29         return kvm_register_read(vcpu, VCPU_REGS_RIP);
30 }
31
32 static inline void kvm_rip_write(struct kvm_vcpu *vcpu, unsigned long val)
33 {
34         kvm_register_write(vcpu, VCPU_REGS_RIP, val);
35 }
36
37 static inline u64 kvm_pdptr_read(struct kvm_vcpu *vcpu, int index)
38 {
39         might_sleep();  /* on svm */
40
41         if (!test_bit(VCPU_EXREG_PDPTR,
42                       (unsigned long *)&vcpu->arch.regs_avail))
43                 kvm_x86_ops->cache_reg(vcpu, VCPU_EXREG_PDPTR);
44
45         return vcpu->arch.walk_mmu->pdptrs[index];
46 }
47
48 static inline u64 kvm_pdptr_read_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, int index)
49 {
50         load_pdptrs(vcpu, mmu, mmu->get_cr3(vcpu));
51
52         return mmu->pdptrs[index];
53 }
54
55 static inline ulong kvm_read_cr0_bits(struct kvm_vcpu *vcpu, ulong mask)
56 {
57         ulong tmask = mask & KVM_POSSIBLE_CR0_GUEST_BITS;
58         if (tmask & vcpu->arch.cr0_guest_owned_bits)
59                 kvm_x86_ops->decache_cr0_guest_bits(vcpu);
60         return vcpu->arch.cr0 & mask;
61 }
62
63 static inline ulong kvm_read_cr0(struct kvm_vcpu *vcpu)
64 {
65         return kvm_read_cr0_bits(vcpu, ~0UL);
66 }
67
68 static inline ulong kvm_read_cr4_bits(struct kvm_vcpu *vcpu, ulong mask)
69 {
70         ulong tmask = mask & KVM_POSSIBLE_CR4_GUEST_BITS;
71         if (tmask & vcpu->arch.cr4_guest_owned_bits)
72                 kvm_x86_ops->decache_cr4_guest_bits(vcpu);
73         return vcpu->arch.cr4 & mask;
74 }
75
76 static inline ulong kvm_read_cr3(struct kvm_vcpu *vcpu)
77 {
78         return vcpu->arch.cr3;
79 }
80
81 static inline ulong kvm_read_cr4(struct kvm_vcpu *vcpu)
82 {
83         return kvm_read_cr4_bits(vcpu, ~0UL);
84 }
85
86 static inline u64 kvm_read_edx_eax(struct kvm_vcpu *vcpu)
87 {
88         return (kvm_register_read(vcpu, VCPU_REGS_RAX) & -1u)
89                 | ((u64)(kvm_register_read(vcpu, VCPU_REGS_RDX) & -1u) << 32);
90 }
91
92 static inline void enter_guest_mode(struct kvm_vcpu *vcpu)
93 {
94         vcpu->arch.hflags |= HF_GUEST_MASK;
95 }
96
97 static inline void leave_guest_mode(struct kvm_vcpu *vcpu)
98 {
99         vcpu->arch.hflags &= ~HF_GUEST_MASK;
100 }
101
102 static inline bool is_guest_mode(struct kvm_vcpu *vcpu)
103 {
104         return vcpu->arch.hflags & HF_GUEST_MASK;
105 }
106
107 #endif