blob: 2bb1f2f66efabcf50854e69b3c456a7130a35acc [file] [log] [blame]
Avi Kivity6aa8b732006-12-10 02:21:36 -08001/*
2 * Kernel-based Virtual Machine driver for Linux
3 *
4 * This module enables machines with Intel VT-x extensions to run virtual
5 * machines without emulation or binary translation.
6 *
7 * Copyright (C) 2006 Qumranet, Inc.
8 *
9 * Authors:
10 * Avi Kivity <avi@qumranet.com>
11 * Yaniv Kamay <yaniv@qumranet.com>
12 *
13 * This work is licensed under the terms of the GNU GPL, version 2. See
14 * the COPYING file in the top-level directory.
15 *
16 */
17
18#include "kvm.h"
Avi Kivitye4956062007-06-28 14:15:57 -040019#include "x86_emulate.h"
20#include "segment_descriptor.h"
Eddie Dong85f455f2007-07-06 12:20:49 +030021#include "irq.h"
Avi Kivity6aa8b732006-12-10 02:21:36 -080022
23#include <linux/kvm.h>
24#include <linux/module.h>
25#include <linux/errno.h>
Avi Kivity6aa8b732006-12-10 02:21:36 -080026#include <linux/percpu.h>
27#include <linux/gfp.h>
Avi Kivity6aa8b732006-12-10 02:21:36 -080028#include <linux/mm.h>
29#include <linux/miscdevice.h>
30#include <linux/vmalloc.h>
Avi Kivity6aa8b732006-12-10 02:21:36 -080031#include <linux/reboot.h>
Avi Kivity6aa8b732006-12-10 02:21:36 -080032#include <linux/debugfs.h>
33#include <linux/highmem.h>
34#include <linux/file.h>
Avi Kivity59ae6c62007-02-12 00:54:48 -080035#include <linux/sysdev.h>
Avi Kivity774c47f2007-02-12 00:54:47 -080036#include <linux/cpu.h>
Alexey Dobriyane8edc6e2007-05-21 01:22:52 +040037#include <linux/sched.h>
Avi Kivityd9e368d2007-06-07 19:18:30 +030038#include <linux/cpumask.h>
39#include <linux/smp.h>
Avi Kivityd6d28162007-06-28 08:38:16 -040040#include <linux/anon_inodes.h>
Avi Kivity04d2cc72007-09-10 18:10:54 +030041#include <linux/profile.h>
Anthony Liguori7aa81cc2007-09-17 14:57:50 -050042#include <linux/kvm_para.h>
Avi Kivity6aa8b732006-12-10 02:21:36 -080043
Avi Kivitye4956062007-06-28 14:15:57 -040044#include <asm/processor.h>
45#include <asm/msr.h>
46#include <asm/io.h>
47#include <asm/uaccess.h>
48#include <asm/desc.h>
Avi Kivity6aa8b732006-12-10 02:21:36 -080049
50MODULE_AUTHOR("Qumranet");
51MODULE_LICENSE("GPL");
52
Avi Kivity133de902007-02-12 00:54:44 -080053static DEFINE_SPINLOCK(kvm_lock);
54static LIST_HEAD(vm_list);
55
Avi Kivity1b6c0162007-05-24 13:03:52 +030056static cpumask_t cpus_hardware_enabled;
57
Christian Ehrhardtcbdd1be2007-09-09 15:41:59 +030058struct kvm_x86_ops *kvm_x86_ops;
Rusty Russellc16f8622007-07-30 21:12:19 +100059struct kmem_cache *kvm_vcpu_cache;
60EXPORT_SYMBOL_GPL(kvm_vcpu_cache);
Avi Kivity1165f5f2007-04-19 17:27:43 +030061
Avi Kivity15ad7142007-07-11 18:17:21 +030062static __read_mostly struct preempt_ops kvm_preempt_ops;
63
Avi Kivity1165f5f2007-04-19 17:27:43 +030064#define STAT_OFFSET(x) offsetof(struct kvm_vcpu, stat.x)
Avi Kivity6aa8b732006-12-10 02:21:36 -080065
66static struct kvm_stats_debugfs_item {
67 const char *name;
Avi Kivity1165f5f2007-04-19 17:27:43 +030068 int offset;
Avi Kivity6aa8b732006-12-10 02:21:36 -080069 struct dentry *dentry;
70} debugfs_entries[] = {
Avi Kivity1165f5f2007-04-19 17:27:43 +030071 { "pf_fixed", STAT_OFFSET(pf_fixed) },
72 { "pf_guest", STAT_OFFSET(pf_guest) },
73 { "tlb_flush", STAT_OFFSET(tlb_flush) },
74 { "invlpg", STAT_OFFSET(invlpg) },
75 { "exits", STAT_OFFSET(exits) },
76 { "io_exits", STAT_OFFSET(io_exits) },
77 { "mmio_exits", STAT_OFFSET(mmio_exits) },
78 { "signal_exits", STAT_OFFSET(signal_exits) },
79 { "irq_window", STAT_OFFSET(irq_window_exits) },
80 { "halt_exits", STAT_OFFSET(halt_exits) },
Eddie Dongb6958ce2007-07-18 12:15:21 +030081 { "halt_wakeup", STAT_OFFSET(halt_wakeup) },
Avi Kivity1165f5f2007-04-19 17:27:43 +030082 { "request_irq", STAT_OFFSET(request_irq_exits) },
83 { "irq_exits", STAT_OFFSET(irq_exits) },
Avi Kivitye6adf282007-04-30 16:07:54 +030084 { "light_exits", STAT_OFFSET(light_exits) },
Eddie Dong2cc51562007-05-21 07:28:09 +030085 { "efer_reload", STAT_OFFSET(efer_reload) },
Avi Kivity1165f5f2007-04-19 17:27:43 +030086 { NULL }
Avi Kivity6aa8b732006-12-10 02:21:36 -080087};
88
89static struct dentry *debugfs_dir;
90
91#define MAX_IO_MSRS 256
92
Rusty Russell707d92f2007-07-17 23:19:08 +100093#define CR0_RESERVED_BITS \
94 (~(unsigned long)(X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS \
95 | X86_CR0_ET | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM \
96 | X86_CR0_NW | X86_CR0_CD | X86_CR0_PG))
Rusty Russell66aee912007-07-17 23:34:16 +100097#define CR4_RESERVED_BITS \
98 (~(unsigned long)(X86_CR4_VME | X86_CR4_PVI | X86_CR4_TSD | X86_CR4_DE\
99 | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_MCE \
100 | X86_CR4_PGE | X86_CR4_PCE | X86_CR4_OSFXSR \
101 | X86_CR4_OSXMMEXCPT | X86_CR4_VMXE))
102
Rusty Russell7075bc82007-07-17 23:37:17 +1000103#define CR8_RESERVED_BITS (~(unsigned long)X86_CR8_TPR)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800104#define EFER_RESERVED_BITS 0xfffffffffffff2fe
105
Avi Kivity05b3e0c2006-12-13 00:33:45 -0800106#ifdef CONFIG_X86_64
Avi Kivity6aa8b732006-12-10 02:21:36 -0800107// LDT or TSS descriptor in the GDT. 16 bytes.
108struct segment_descriptor_64 {
109 struct segment_descriptor s;
110 u32 base_higher;
111 u32 pad_zero;
112};
113
114#endif
115
Avi Kivitybccf2152007-02-21 18:04:26 +0200116static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl,
117 unsigned long arg);
118
Avi Kivity6aa8b732006-12-10 02:21:36 -0800119unsigned long segment_base(u16 selector)
120{
121 struct descriptor_table gdt;
122 struct segment_descriptor *d;
123 unsigned long table_base;
124 typedef unsigned long ul;
125 unsigned long v;
126
127 if (selector == 0)
128 return 0;
129
130 asm ("sgdt %0" : "=m"(gdt));
131 table_base = gdt.base;
132
133 if (selector & 4) { /* from ldt */
134 u16 ldt_selector;
135
136 asm ("sldt %0" : "=g"(ldt_selector));
137 table_base = segment_base(ldt_selector);
138 }
139 d = (struct segment_descriptor *)(table_base + (selector & ~7));
140 v = d->base_low | ((ul)d->base_mid << 16) | ((ul)d->base_high << 24);
Avi Kivity05b3e0c2006-12-13 00:33:45 -0800141#ifdef CONFIG_X86_64
Avi Kivity6aa8b732006-12-10 02:21:36 -0800142 if (d->system == 0
143 && (d->type == 2 || d->type == 9 || d->type == 11))
144 v |= ((ul)((struct segment_descriptor_64 *)d)->base_higher) << 32;
145#endif
146 return v;
147}
148EXPORT_SYMBOL_GPL(segment_base);
149
James Morris5aacf0c2006-12-22 01:04:55 -0800150static inline int valid_vcpu(int n)
151{
152 return likely(n >= 0 && n < KVM_MAX_VCPUS);
153}
154
Avi Kivity7702fd12007-06-14 16:27:40 +0300155void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
156{
157 if (!vcpu->fpu_active || vcpu->guest_fpu_loaded)
158 return;
159
160 vcpu->guest_fpu_loaded = 1;
Rusty Russellb114b082007-07-30 21:13:43 +1000161 fx_save(&vcpu->host_fx_image);
162 fx_restore(&vcpu->guest_fx_image);
Avi Kivity7702fd12007-06-14 16:27:40 +0300163}
164EXPORT_SYMBOL_GPL(kvm_load_guest_fpu);
165
166void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
167{
168 if (!vcpu->guest_fpu_loaded)
169 return;
170
171 vcpu->guest_fpu_loaded = 0;
Rusty Russellb114b082007-07-30 21:13:43 +1000172 fx_save(&vcpu->guest_fx_image);
173 fx_restore(&vcpu->host_fx_image);
Avi Kivity7702fd12007-06-14 16:27:40 +0300174}
175EXPORT_SYMBOL_GPL(kvm_put_guest_fpu);
176
Avi Kivity6aa8b732006-12-10 02:21:36 -0800177/*
178 * Switches to specified vcpu, until a matching vcpu_put()
179 */
Avi Kivitybccf2152007-02-21 18:04:26 +0200180static void vcpu_load(struct kvm_vcpu *vcpu)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800181{
Avi Kivity15ad7142007-07-11 18:17:21 +0300182 int cpu;
183
Avi Kivitybccf2152007-02-21 18:04:26 +0200184 mutex_lock(&vcpu->mutex);
Avi Kivity15ad7142007-07-11 18:17:21 +0300185 cpu = get_cpu();
186 preempt_notifier_register(&vcpu->preempt_notifier);
Christian Ehrhardtcbdd1be2007-09-09 15:41:59 +0300187 kvm_x86_ops->vcpu_load(vcpu, cpu);
Avi Kivity15ad7142007-07-11 18:17:21 +0300188 put_cpu();
Avi Kivitybccf2152007-02-21 18:04:26 +0200189}
190
Avi Kivity6aa8b732006-12-10 02:21:36 -0800191static void vcpu_put(struct kvm_vcpu *vcpu)
192{
Avi Kivity15ad7142007-07-11 18:17:21 +0300193 preempt_disable();
Christian Ehrhardtcbdd1be2007-09-09 15:41:59 +0300194 kvm_x86_ops->vcpu_put(vcpu);
Avi Kivity15ad7142007-07-11 18:17:21 +0300195 preempt_notifier_unregister(&vcpu->preempt_notifier);
196 preempt_enable();
Avi Kivity6aa8b732006-12-10 02:21:36 -0800197 mutex_unlock(&vcpu->mutex);
198}
199
Avi Kivityd9e368d2007-06-07 19:18:30 +0300200static void ack_flush(void *_completed)
201{
Avi Kivityd9e368d2007-06-07 19:18:30 +0300202}
203
204void kvm_flush_remote_tlbs(struct kvm *kvm)
205{
Laurent Vivier49d3bd72007-10-22 16:33:07 +0200206 int i, cpu;
Avi Kivityd9e368d2007-06-07 19:18:30 +0300207 cpumask_t cpus;
208 struct kvm_vcpu *vcpu;
Avi Kivityd9e368d2007-06-07 19:18:30 +0300209
Avi Kivityd9e368d2007-06-07 19:18:30 +0300210 cpus_clear(cpus);
Rusty Russellfb3f0f52007-07-27 17:16:56 +1000211 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
212 vcpu = kvm->vcpus[i];
213 if (!vcpu)
214 continue;
Avi Kivityd9e368d2007-06-07 19:18:30 +0300215 if (test_and_set_bit(KVM_TLB_FLUSH, &vcpu->requests))
216 continue;
217 cpu = vcpu->cpu;
218 if (cpu != -1 && cpu != raw_smp_processor_id())
Laurent Vivier49d3bd72007-10-22 16:33:07 +0200219 cpu_set(cpu, cpus);
Avi Kivityd9e368d2007-06-07 19:18:30 +0300220 }
Laurent Vivier49d3bd72007-10-22 16:33:07 +0200221 smp_call_function_mask(cpus, ack_flush, NULL, 1);
Avi Kivityd9e368d2007-06-07 19:18:30 +0300222}
223
Rusty Russellfb3f0f52007-07-27 17:16:56 +1000224int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
225{
226 struct page *page;
227 int r;
228
229 mutex_init(&vcpu->mutex);
230 vcpu->cpu = -1;
231 vcpu->mmu.root_hpa = INVALID_PAGE;
232 vcpu->kvm = kvm;
233 vcpu->vcpu_id = id;
He, Qingc5ec1532007-09-03 17:07:41 +0300234 if (!irqchip_in_kernel(kvm) || id == 0)
235 vcpu->mp_state = VCPU_MP_STATE_RUNNABLE;
236 else
237 vcpu->mp_state = VCPU_MP_STATE_UNINITIALIZED;
Eddie Dongb6958ce2007-07-18 12:15:21 +0300238 init_waitqueue_head(&vcpu->wq);
Rusty Russellfb3f0f52007-07-27 17:16:56 +1000239
240 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
241 if (!page) {
242 r = -ENOMEM;
243 goto fail;
244 }
245 vcpu->run = page_address(page);
246
247 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
248 if (!page) {
249 r = -ENOMEM;
250 goto fail_free_run;
251 }
252 vcpu->pio_data = page_address(page);
253
Rusty Russellfb3f0f52007-07-27 17:16:56 +1000254 r = kvm_mmu_create(vcpu);
255 if (r < 0)
256 goto fail_free_pio_data;
257
258 return 0;
259
260fail_free_pio_data:
261 free_page((unsigned long)vcpu->pio_data);
262fail_free_run:
263 free_page((unsigned long)vcpu->run);
264fail:
265 return -ENOMEM;
266}
267EXPORT_SYMBOL_GPL(kvm_vcpu_init);
268
269void kvm_vcpu_uninit(struct kvm_vcpu *vcpu)
270{
271 kvm_mmu_destroy(vcpu);
Eddie Dong1b9778d2007-09-03 16:56:58 +0300272 if (vcpu->apic)
273 hrtimer_cancel(&vcpu->apic->timer.dev);
Eddie Dong97222cc2007-09-12 10:58:04 +0300274 kvm_free_apic(vcpu->apic);
Rusty Russellfb3f0f52007-07-27 17:16:56 +1000275 free_page((unsigned long)vcpu->pio_data);
276 free_page((unsigned long)vcpu->run);
277}
278EXPORT_SYMBOL_GPL(kvm_vcpu_uninit);
279
Avi Kivityf17abe92007-02-21 19:28:04 +0200280static struct kvm *kvm_create_vm(void)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800281{
282 struct kvm *kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800283
284 if (!kvm)
Avi Kivityf17abe92007-02-21 19:28:04 +0200285 return ERR_PTR(-ENOMEM);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800286
Eddie Dong74906342007-06-19 18:05:03 +0300287 kvm_io_bus_init(&kvm->pio_bus);
Shaohua Li11ec2802007-07-23 14:51:37 +0800288 mutex_init(&kvm->lock);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800289 INIT_LIST_HEAD(&kvm->active_mmu_pages);
Gregory Haskins2eeb2e92007-05-31 14:08:53 -0400290 kvm_io_bus_init(&kvm->mmio_bus);
Rusty Russell5e58cfe2007-07-23 17:08:21 +1000291 spin_lock(&kvm_lock);
292 list_add(&kvm->vm_list, &vm_list);
293 spin_unlock(&kvm_lock);
Avi Kivityf17abe92007-02-21 19:28:04 +0200294 return kvm;
295}
296
Avi Kivity6aa8b732006-12-10 02:21:36 -0800297/*
298 * Free any memory in @free but not in @dont.
299 */
300static void kvm_free_physmem_slot(struct kvm_memory_slot *free,
301 struct kvm_memory_slot *dont)
302{
303 int i;
304
305 if (!dont || free->phys_mem != dont->phys_mem)
306 if (free->phys_mem) {
307 for (i = 0; i < free->npages; ++i)
Avi Kivity55a54f72006-12-29 16:49:58 -0800308 if (free->phys_mem[i])
309 __free_page(free->phys_mem[i]);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800310 vfree(free->phys_mem);
311 }
Izik Eidus290fc382007-09-27 14:11:22 +0200312 if (!dont || free->rmap != dont->rmap)
313 vfree(free->rmap);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800314
315 if (!dont || free->dirty_bitmap != dont->dirty_bitmap)
316 vfree(free->dirty_bitmap);
317
Al Viro8b6d44c2007-02-09 16:38:40 +0000318 free->phys_mem = NULL;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800319 free->npages = 0;
Al Viro8b6d44c2007-02-09 16:38:40 +0000320 free->dirty_bitmap = NULL;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800321}
322
323static void kvm_free_physmem(struct kvm *kvm)
324{
325 int i;
326
327 for (i = 0; i < kvm->nmemslots; ++i)
Al Viro8b6d44c2007-02-09 16:38:40 +0000328 kvm_free_physmem_slot(&kvm->memslots[i], NULL);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800329}
330
Avi Kivity039576c2007-03-20 12:46:50 +0200331static void free_pio_guest_pages(struct kvm_vcpu *vcpu)
332{
333 int i;
334
Rusty Russell3077c4512007-07-30 16:41:57 +1000335 for (i = 0; i < ARRAY_SIZE(vcpu->pio.guest_pages); ++i)
Avi Kivity039576c2007-03-20 12:46:50 +0200336 if (vcpu->pio.guest_pages[i]) {
337 __free_page(vcpu->pio.guest_pages[i]);
338 vcpu->pio.guest_pages[i] = NULL;
339 }
340}
341
Avi Kivity7b53aa52007-06-05 12:17:03 +0300342static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu)
343{
Avi Kivity7b53aa52007-06-05 12:17:03 +0300344 vcpu_load(vcpu);
345 kvm_mmu_unload(vcpu);
346 vcpu_put(vcpu);
347}
348
Avi Kivity6aa8b732006-12-10 02:21:36 -0800349static void kvm_free_vcpus(struct kvm *kvm)
350{
351 unsigned int i;
352
Avi Kivity7b53aa52007-06-05 12:17:03 +0300353 /*
354 * Unpin any mmu pages first.
355 */
356 for (i = 0; i < KVM_MAX_VCPUS; ++i)
Rusty Russellfb3f0f52007-07-27 17:16:56 +1000357 if (kvm->vcpus[i])
358 kvm_unload_vcpu_mmu(kvm->vcpus[i]);
359 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
360 if (kvm->vcpus[i]) {
Christian Ehrhardtcbdd1be2007-09-09 15:41:59 +0300361 kvm_x86_ops->vcpu_free(kvm->vcpus[i]);
Rusty Russellfb3f0f52007-07-27 17:16:56 +1000362 kvm->vcpus[i] = NULL;
363 }
364 }
365
Avi Kivity6aa8b732006-12-10 02:21:36 -0800366}
367
Avi Kivityf17abe92007-02-21 19:28:04 +0200368static void kvm_destroy_vm(struct kvm *kvm)
369{
Avi Kivity133de902007-02-12 00:54:44 -0800370 spin_lock(&kvm_lock);
371 list_del(&kvm->vm_list);
372 spin_unlock(&kvm_lock);
Eddie Dong74906342007-06-19 18:05:03 +0300373 kvm_io_bus_destroy(&kvm->pio_bus);
Gregory Haskins2eeb2e92007-05-31 14:08:53 -0400374 kvm_io_bus_destroy(&kvm->mmio_bus);
Eddie Dong85f455f2007-07-06 12:20:49 +0300375 kfree(kvm->vpic);
Eddie Dong1fd4f2a2007-07-18 12:03:39 +0300376 kfree(kvm->vioapic);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800377 kvm_free_vcpus(kvm);
378 kvm_free_physmem(kvm);
379 kfree(kvm);
Avi Kivityf17abe92007-02-21 19:28:04 +0200380}
381
382static int kvm_vm_release(struct inode *inode, struct file *filp)
383{
384 struct kvm *kvm = filp->private_data;
385
386 kvm_destroy_vm(kvm);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800387 return 0;
388}
389
390static void inject_gp(struct kvm_vcpu *vcpu)
391{
Christian Ehrhardtcbdd1be2007-09-09 15:41:59 +0300392 kvm_x86_ops->inject_gp(vcpu, 0);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800393}
394
Avi Kivity1342d352007-01-05 16:36:39 -0800395/*
396 * Load the pae pdptrs. Return true is they are all valid.
397 */
398static int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800399{
400 gfn_t pdpt_gfn = cr3 >> PAGE_SHIFT;
Avi Kivity1342d352007-01-05 16:36:39 -0800401 unsigned offset = ((cr3 & (PAGE_SIZE-1)) >> 5) << 2;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800402 int i;
Avi Kivity1342d352007-01-05 16:36:39 -0800403 int ret;
Rusty Russellc820c2a2007-07-25 13:29:51 +1000404 u64 pdpte[ARRAY_SIZE(vcpu->pdptrs)];
Avi Kivity6aa8b732006-12-10 02:21:36 -0800405
Shaohua Li11ec2802007-07-23 14:51:37 +0800406 mutex_lock(&vcpu->kvm->lock);
Izik Eidus195aefd2007-10-01 22:14:18 +0200407 ret = kvm_read_guest_page(vcpu->kvm, pdpt_gfn, pdpte,
408 offset * sizeof(u64), sizeof(pdpte));
409 if (ret < 0) {
Rusty Russellc820c2a2007-07-25 13:29:51 +1000410 ret = 0;
411 goto out;
412 }
Rusty Russellc820c2a2007-07-25 13:29:51 +1000413 for (i = 0; i < ARRAY_SIZE(pdpte); ++i) {
414 if ((pdpte[i] & 1) && (pdpte[i] & 0xfffffff0000001e6ull)) {
Avi Kivity1342d352007-01-05 16:36:39 -0800415 ret = 0;
416 goto out;
417 }
Avi Kivity6aa8b732006-12-10 02:21:36 -0800418 }
Rusty Russellc820c2a2007-07-25 13:29:51 +1000419 ret = 1;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800420
Rusty Russellc820c2a2007-07-25 13:29:51 +1000421 memcpy(vcpu->pdptrs, pdpte, sizeof(vcpu->pdptrs));
Avi Kivity1342d352007-01-05 16:36:39 -0800422out:
Shaohua Li11ec2802007-07-23 14:51:37 +0800423 mutex_unlock(&vcpu->kvm->lock);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800424
Avi Kivity1342d352007-01-05 16:36:39 -0800425 return ret;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800426}
427
428void set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
429{
Rusty Russell707d92f2007-07-17 23:19:08 +1000430 if (cr0 & CR0_RESERVED_BITS) {
Avi Kivity6aa8b732006-12-10 02:21:36 -0800431 printk(KERN_DEBUG "set_cr0: 0x%lx #GP, reserved bits 0x%lx\n",
432 cr0, vcpu->cr0);
433 inject_gp(vcpu);
434 return;
435 }
436
Rusty Russell707d92f2007-07-17 23:19:08 +1000437 if ((cr0 & X86_CR0_NW) && !(cr0 & X86_CR0_CD)) {
Avi Kivity6aa8b732006-12-10 02:21:36 -0800438 printk(KERN_DEBUG "set_cr0: #GP, CD == 0 && NW == 1\n");
439 inject_gp(vcpu);
440 return;
441 }
442
Rusty Russell707d92f2007-07-17 23:19:08 +1000443 if ((cr0 & X86_CR0_PG) && !(cr0 & X86_CR0_PE)) {
Avi Kivity6aa8b732006-12-10 02:21:36 -0800444 printk(KERN_DEBUG "set_cr0: #GP, set PG flag "
445 "and a clear PE flag\n");
446 inject_gp(vcpu);
447 return;
448 }
449
Rusty Russell707d92f2007-07-17 23:19:08 +1000450 if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
Avi Kivity05b3e0c2006-12-13 00:33:45 -0800451#ifdef CONFIG_X86_64
Avi Kivity6aa8b732006-12-10 02:21:36 -0800452 if ((vcpu->shadow_efer & EFER_LME)) {
453 int cs_db, cs_l;
454
455 if (!is_pae(vcpu)) {
456 printk(KERN_DEBUG "set_cr0: #GP, start paging "
457 "in long mode while PAE is disabled\n");
458 inject_gp(vcpu);
459 return;
460 }
Christian Ehrhardtcbdd1be2007-09-09 15:41:59 +0300461 kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800462 if (cs_l) {
463 printk(KERN_DEBUG "set_cr0: #GP, start paging "
464 "in long mode while CS.L == 1\n");
465 inject_gp(vcpu);
466 return;
467
468 }
469 } else
470#endif
Avi Kivity1342d352007-01-05 16:36:39 -0800471 if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->cr3)) {
Avi Kivity6aa8b732006-12-10 02:21:36 -0800472 printk(KERN_DEBUG "set_cr0: #GP, pdptrs "
473 "reserved bits\n");
474 inject_gp(vcpu);
475 return;
476 }
477
478 }
479
Christian Ehrhardtcbdd1be2007-09-09 15:41:59 +0300480 kvm_x86_ops->set_cr0(vcpu, cr0);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800481 vcpu->cr0 = cr0;
482
Shaohua Li11ec2802007-07-23 14:51:37 +0800483 mutex_lock(&vcpu->kvm->lock);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800484 kvm_mmu_reset_context(vcpu);
Shaohua Li11ec2802007-07-23 14:51:37 +0800485 mutex_unlock(&vcpu->kvm->lock);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800486 return;
487}
488EXPORT_SYMBOL_GPL(set_cr0);
489
490void lmsw(struct kvm_vcpu *vcpu, unsigned long msw)
491{
492 set_cr0(vcpu, (vcpu->cr0 & ~0x0ful) | (msw & 0x0f));
493}
494EXPORT_SYMBOL_GPL(lmsw);
495
496void set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
497{
Rusty Russell66aee912007-07-17 23:34:16 +1000498 if (cr4 & CR4_RESERVED_BITS) {
Avi Kivity6aa8b732006-12-10 02:21:36 -0800499 printk(KERN_DEBUG "set_cr4: #GP, reserved bits\n");
500 inject_gp(vcpu);
501 return;
502 }
503
Avi Kivitya9058ec2006-12-29 16:49:37 -0800504 if (is_long_mode(vcpu)) {
Rusty Russell66aee912007-07-17 23:34:16 +1000505 if (!(cr4 & X86_CR4_PAE)) {
Avi Kivity6aa8b732006-12-10 02:21:36 -0800506 printk(KERN_DEBUG "set_cr4: #GP, clearing PAE while "
507 "in long mode\n");
508 inject_gp(vcpu);
509 return;
510 }
Rusty Russell66aee912007-07-17 23:34:16 +1000511 } else if (is_paging(vcpu) && !is_pae(vcpu) && (cr4 & X86_CR4_PAE)
Avi Kivity1342d352007-01-05 16:36:39 -0800512 && !load_pdptrs(vcpu, vcpu->cr3)) {
Avi Kivity6aa8b732006-12-10 02:21:36 -0800513 printk(KERN_DEBUG "set_cr4: #GP, pdptrs reserved bits\n");
514 inject_gp(vcpu);
Rusty Russell310bc762007-07-23 17:11:02 +1000515 return;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800516 }
517
Rusty Russell66aee912007-07-17 23:34:16 +1000518 if (cr4 & X86_CR4_VMXE) {
Avi Kivity6aa8b732006-12-10 02:21:36 -0800519 printk(KERN_DEBUG "set_cr4: #GP, setting VMXE\n");
520 inject_gp(vcpu);
521 return;
522 }
Christian Ehrhardtcbdd1be2007-09-09 15:41:59 +0300523 kvm_x86_ops->set_cr4(vcpu, cr4);
Rusty Russell81f50e32007-09-06 01:20:38 +1000524 vcpu->cr4 = cr4;
Shaohua Li11ec2802007-07-23 14:51:37 +0800525 mutex_lock(&vcpu->kvm->lock);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800526 kvm_mmu_reset_context(vcpu);
Shaohua Li11ec2802007-07-23 14:51:37 +0800527 mutex_unlock(&vcpu->kvm->lock);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800528}
529EXPORT_SYMBOL_GPL(set_cr4);
530
531void set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
532{
Avi Kivitya9058ec2006-12-29 16:49:37 -0800533 if (is_long_mode(vcpu)) {
Rusty Russellf802a302007-07-17 23:32:55 +1000534 if (cr3 & CR3_L_MODE_RESERVED_BITS) {
Avi Kivity6aa8b732006-12-10 02:21:36 -0800535 printk(KERN_DEBUG "set_cr3: #GP, reserved bits\n");
536 inject_gp(vcpu);
537 return;
538 }
539 } else {
Rusty Russellf802a302007-07-17 23:32:55 +1000540 if (is_pae(vcpu)) {
541 if (cr3 & CR3_PAE_RESERVED_BITS) {
542 printk(KERN_DEBUG
543 "set_cr3: #GP, reserved bits\n");
544 inject_gp(vcpu);
545 return;
546 }
547 if (is_paging(vcpu) && !load_pdptrs(vcpu, cr3)) {
548 printk(KERN_DEBUG "set_cr3: #GP, pdptrs "
549 "reserved bits\n");
550 inject_gp(vcpu);
551 return;
552 }
Avi Kivity6aa8b732006-12-10 02:21:36 -0800553 }
Ryan Harper21764862007-09-18 14:05:16 -0500554 /*
555 * We don't check reserved bits in nonpae mode, because
556 * this isn't enforced, and VMware depends on this.
557 */
Avi Kivity6aa8b732006-12-10 02:21:36 -0800558 }
559
Shaohua Li11ec2802007-07-23 14:51:37 +0800560 mutex_lock(&vcpu->kvm->lock);
Ingo Molnard21225e2007-01-05 16:36:59 -0800561 /*
562 * Does the new cr3 value map to physical memory? (Note, we
563 * catch an invalid cr3 even in real-mode, because it would
564 * cause trouble later on when we turn on paging anyway.)
565 *
566 * A real CPU would silently accept an invalid cr3 and would
567 * attempt to use it - with largely undefined (and often hard
568 * to debug) behavior on the guest side.
569 */
570 if (unlikely(!gfn_to_memslot(vcpu->kvm, cr3 >> PAGE_SHIFT)))
571 inject_gp(vcpu);
Rusty Russellfb764412007-07-31 20:45:03 +1000572 else {
573 vcpu->cr3 = cr3;
Ingo Molnard21225e2007-01-05 16:36:59 -0800574 vcpu->mmu.new_cr3(vcpu);
Rusty Russellfb764412007-07-31 20:45:03 +1000575 }
Shaohua Li11ec2802007-07-23 14:51:37 +0800576 mutex_unlock(&vcpu->kvm->lock);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800577}
578EXPORT_SYMBOL_GPL(set_cr3);
579
580void set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8)
581{
Rusty Russell7075bc82007-07-17 23:37:17 +1000582 if (cr8 & CR8_RESERVED_BITS) {
Avi Kivity6aa8b732006-12-10 02:21:36 -0800583 printk(KERN_DEBUG "set_cr8: #GP, reserved bits 0x%lx\n", cr8);
584 inject_gp(vcpu);
585 return;
586 }
Eddie Dong97222cc2007-09-12 10:58:04 +0300587 if (irqchip_in_kernel(vcpu->kvm))
588 kvm_lapic_set_tpr(vcpu, cr8);
589 else
590 vcpu->cr8 = cr8;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800591}
592EXPORT_SYMBOL_GPL(set_cr8);
593
Eddie Dong7017fc32007-07-18 11:34:57 +0300594unsigned long get_cr8(struct kvm_vcpu *vcpu)
595{
Eddie Dong97222cc2007-09-12 10:58:04 +0300596 if (irqchip_in_kernel(vcpu->kvm))
597 return kvm_lapic_get_cr8(vcpu);
598 else
599 return vcpu->cr8;
Eddie Dong7017fc32007-07-18 11:34:57 +0300600}
601EXPORT_SYMBOL_GPL(get_cr8);
602
603u64 kvm_get_apic_base(struct kvm_vcpu *vcpu)
604{
Eddie Dong97222cc2007-09-12 10:58:04 +0300605 if (irqchip_in_kernel(vcpu->kvm))
606 return vcpu->apic_base;
607 else
608 return vcpu->apic_base;
Eddie Dong7017fc32007-07-18 11:34:57 +0300609}
610EXPORT_SYMBOL_GPL(kvm_get_apic_base);
611
612void kvm_set_apic_base(struct kvm_vcpu *vcpu, u64 data)
613{
Eddie Dong97222cc2007-09-12 10:58:04 +0300614 /* TODO: reserve bits check */
615 if (irqchip_in_kernel(vcpu->kvm))
616 kvm_lapic_set_base(vcpu, data);
617 else
618 vcpu->apic_base = data;
Eddie Dong7017fc32007-07-18 11:34:57 +0300619}
620EXPORT_SYMBOL_GPL(kvm_set_apic_base);
621
Avi Kivity6aa8b732006-12-10 02:21:36 -0800622void fx_init(struct kvm_vcpu *vcpu)
623{
Rusty Russellb114b082007-07-30 21:13:43 +1000624 unsigned after_mxcsr_mask;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800625
Rusty Russell9bd01502007-07-30 16:29:56 +1000626 /* Initialize guest FPU by resetting ours and saving into guest's */
627 preempt_disable();
Rusty Russellb114b082007-07-30 21:13:43 +1000628 fx_save(&vcpu->host_fx_image);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800629 fpu_init();
Rusty Russellb114b082007-07-30 21:13:43 +1000630 fx_save(&vcpu->guest_fx_image);
631 fx_restore(&vcpu->host_fx_image);
Rusty Russell9bd01502007-07-30 16:29:56 +1000632 preempt_enable();
Avi Kivity6aa8b732006-12-10 02:21:36 -0800633
Amit Shah380102c2007-08-25 11:35:52 +0300634 vcpu->cr0 |= X86_CR0_ET;
Rusty Russellb114b082007-07-30 21:13:43 +1000635 after_mxcsr_mask = offsetof(struct i387_fxsave_struct, st_space);
636 vcpu->guest_fx_image.mxcsr = 0x1f80;
637 memset((void *)&vcpu->guest_fx_image + after_mxcsr_mask,
638 0, sizeof(struct i387_fxsave_struct) - after_mxcsr_mask);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800639}
640EXPORT_SYMBOL_GPL(fx_init);
641
642/*
Avi Kivity6aa8b732006-12-10 02:21:36 -0800643 * Allocate some memory and give it an address in the guest physical address
644 * space.
645 *
646 * Discontiguous memory is allowed, mostly for framebuffers.
647 */
Avi Kivity2c6f5df2007-02-20 18:27:58 +0200648static int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
649 struct kvm_memory_region *mem)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800650{
651 int r;
652 gfn_t base_gfn;
653 unsigned long npages;
654 unsigned long i;
655 struct kvm_memory_slot *memslot;
656 struct kvm_memory_slot old, new;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800657
658 r = -EINVAL;
659 /* General sanity checks */
660 if (mem->memory_size & (PAGE_SIZE - 1))
661 goto out;
662 if (mem->guest_phys_addr & (PAGE_SIZE - 1))
663 goto out;
664 if (mem->slot >= KVM_MEMORY_SLOTS)
665 goto out;
666 if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr)
667 goto out;
668
669 memslot = &kvm->memslots[mem->slot];
670 base_gfn = mem->guest_phys_addr >> PAGE_SHIFT;
671 npages = mem->memory_size >> PAGE_SHIFT;
672
673 if (!npages)
674 mem->flags &= ~KVM_MEM_LOG_DIRTY_PAGES;
675
Shaohua Li11ec2802007-07-23 14:51:37 +0800676 mutex_lock(&kvm->lock);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800677
Avi Kivity6aa8b732006-12-10 02:21:36 -0800678 new = old = *memslot;
679
680 new.base_gfn = base_gfn;
681 new.npages = npages;
682 new.flags = mem->flags;
683
684 /* Disallow changing a memory slot's size. */
685 r = -EINVAL;
686 if (npages && old.npages && npages != old.npages)
687 goto out_unlock;
688
689 /* Check for overlaps */
690 r = -EEXIST;
691 for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
692 struct kvm_memory_slot *s = &kvm->memslots[i];
693
694 if (s == memslot)
695 continue;
696 if (!((base_gfn + npages <= s->base_gfn) ||
697 (base_gfn >= s->base_gfn + s->npages)))
698 goto out_unlock;
699 }
Avi Kivity6aa8b732006-12-10 02:21:36 -0800700
701 /* Deallocate if slot is being removed */
702 if (!npages)
Al Viro8b6d44c2007-02-09 16:38:40 +0000703 new.phys_mem = NULL;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800704
705 /* Free page dirty bitmap if unneeded */
706 if (!(new.flags & KVM_MEM_LOG_DIRTY_PAGES))
Al Viro8b6d44c2007-02-09 16:38:40 +0000707 new.dirty_bitmap = NULL;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800708
709 r = -ENOMEM;
710
711 /* Allocate if a slot is being created */
712 if (npages && !new.phys_mem) {
713 new.phys_mem = vmalloc(npages * sizeof(struct page *));
714
715 if (!new.phys_mem)
Laurent Vivier0d8d2bd2007-08-30 14:56:21 +0200716 goto out_unlock;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800717
Izik Eidus290fc382007-09-27 14:11:22 +0200718 new.rmap = vmalloc(npages * sizeof(struct page*));
719
720 if (!new.rmap)
721 goto out_unlock;
722
Avi Kivity6aa8b732006-12-10 02:21:36 -0800723 memset(new.phys_mem, 0, npages * sizeof(struct page *));
Izik Eidus290fc382007-09-27 14:11:22 +0200724 memset(new.rmap, 0, npages * sizeof(*new.rmap));
Avi Kivity6aa8b732006-12-10 02:21:36 -0800725 for (i = 0; i < npages; ++i) {
726 new.phys_mem[i] = alloc_page(GFP_HIGHUSER
727 | __GFP_ZERO);
728 if (!new.phys_mem[i])
Laurent Vivier0d8d2bd2007-08-30 14:56:21 +0200729 goto out_unlock;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800730 }
731 }
732
733 /* Allocate page dirty bitmap if needed */
734 if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) {
735 unsigned dirty_bytes = ALIGN(npages, BITS_PER_LONG) / 8;
736
737 new.dirty_bitmap = vmalloc(dirty_bytes);
738 if (!new.dirty_bitmap)
Laurent Vivier0d8d2bd2007-08-30 14:56:21 +0200739 goto out_unlock;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800740 memset(new.dirty_bitmap, 0, dirty_bytes);
741 }
742
Avi Kivity6aa8b732006-12-10 02:21:36 -0800743 if (mem->slot >= kvm->nmemslots)
744 kvm->nmemslots = mem->slot + 1;
745
Izik Eidus82ce2c92007-10-02 18:52:55 +0200746 if (!kvm->n_requested_mmu_pages) {
747 unsigned int n_pages;
748
749 if (npages) {
750 n_pages = npages * KVM_PERMILLE_MMU_PAGES / 1000;
751 kvm_mmu_change_mmu_pages(kvm, kvm->n_alloc_mmu_pages +
752 n_pages);
753 } else {
754 unsigned int nr_mmu_pages;
755
756 n_pages = old.npages * KVM_PERMILLE_MMU_PAGES / 1000;
757 nr_mmu_pages = kvm->n_alloc_mmu_pages - n_pages;
758 nr_mmu_pages = max(nr_mmu_pages,
759 (unsigned int) KVM_MIN_ALLOC_MMU_PAGES);
760 kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages);
761 }
762 }
763
Avi Kivity6aa8b732006-12-10 02:21:36 -0800764 *memslot = new;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800765
Avi Kivity90cb0522007-07-17 13:04:56 +0300766 kvm_mmu_slot_remove_write_access(kvm, mem->slot);
767 kvm_flush_remote_tlbs(kvm);
768
Shaohua Li11ec2802007-07-23 14:51:37 +0800769 mutex_unlock(&kvm->lock);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800770
Avi Kivity6aa8b732006-12-10 02:21:36 -0800771 kvm_free_physmem_slot(&old, &new);
772 return 0;
773
774out_unlock:
Shaohua Li11ec2802007-07-23 14:51:37 +0800775 mutex_unlock(&kvm->lock);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800776 kvm_free_physmem_slot(&new, &old);
777out:
778 return r;
779}
780
Izik Eidus82ce2c92007-10-02 18:52:55 +0200781static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm,
782 u32 kvm_nr_mmu_pages)
783{
784 if (kvm_nr_mmu_pages < KVM_MIN_ALLOC_MMU_PAGES)
785 return -EINVAL;
786
787 mutex_lock(&kvm->lock);
788
789 kvm_mmu_change_mmu_pages(kvm, kvm_nr_mmu_pages);
790 kvm->n_requested_mmu_pages = kvm_nr_mmu_pages;
791
792 mutex_unlock(&kvm->lock);
793 return 0;
794}
795
796static int kvm_vm_ioctl_get_nr_mmu_pages(struct kvm *kvm)
797{
798 return kvm->n_alloc_mmu_pages;
799}
800
Avi Kivity6aa8b732006-12-10 02:21:36 -0800801/*
802 * Get (and clear) the dirty memory log for a memory slot.
803 */
Avi Kivity2c6f5df2007-02-20 18:27:58 +0200804static int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
805 struct kvm_dirty_log *log)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800806{
807 struct kvm_memory_slot *memslot;
808 int r, i;
809 int n;
810 unsigned long any = 0;
811
Shaohua Li11ec2802007-07-23 14:51:37 +0800812 mutex_lock(&kvm->lock);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800813
Avi Kivity6aa8b732006-12-10 02:21:36 -0800814 r = -EINVAL;
815 if (log->slot >= KVM_MEMORY_SLOTS)
816 goto out;
817
818 memslot = &kvm->memslots[log->slot];
819 r = -ENOENT;
820 if (!memslot->dirty_bitmap)
821 goto out;
822
Uri Lublincd1a4a92007-02-22 16:43:09 +0200823 n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800824
Uri Lublincd1a4a92007-02-22 16:43:09 +0200825 for (i = 0; !any && i < n/sizeof(long); ++i)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800826 any = memslot->dirty_bitmap[i];
827
828 r = -EFAULT;
829 if (copy_to_user(log->dirty_bitmap, memslot->dirty_bitmap, n))
830 goto out;
831
Rusty Russell39214912007-07-31 19:57:47 +1000832 /* If nothing is dirty, don't bother messing with page tables. */
833 if (any) {
Rusty Russell39214912007-07-31 19:57:47 +1000834 kvm_mmu_slot_remove_write_access(kvm, log->slot);
835 kvm_flush_remote_tlbs(kvm);
836 memset(memslot->dirty_bitmap, 0, n);
Rusty Russell39214912007-07-31 19:57:47 +1000837 }
Avi Kivity6aa8b732006-12-10 02:21:36 -0800838
839 r = 0;
840
841out:
Shaohua Li11ec2802007-07-23 14:51:37 +0800842 mutex_unlock(&kvm->lock);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800843 return r;
844}
845
Avi Kivitye8207542007-03-30 16:54:30 +0300846/*
847 * Set a new alias region. Aliases map a portion of physical memory into
848 * another portion. This is useful for memory windows, for example the PC
849 * VGA region.
850 */
851static int kvm_vm_ioctl_set_memory_alias(struct kvm *kvm,
852 struct kvm_memory_alias *alias)
853{
854 int r, n;
855 struct kvm_mem_alias *p;
856
857 r = -EINVAL;
858 /* General sanity checks */
859 if (alias->memory_size & (PAGE_SIZE - 1))
860 goto out;
861 if (alias->guest_phys_addr & (PAGE_SIZE - 1))
862 goto out;
863 if (alias->slot >= KVM_ALIAS_SLOTS)
864 goto out;
865 if (alias->guest_phys_addr + alias->memory_size
866 < alias->guest_phys_addr)
867 goto out;
868 if (alias->target_phys_addr + alias->memory_size
869 < alias->target_phys_addr)
870 goto out;
871
Shaohua Li11ec2802007-07-23 14:51:37 +0800872 mutex_lock(&kvm->lock);
Avi Kivitye8207542007-03-30 16:54:30 +0300873
874 p = &kvm->aliases[alias->slot];
875 p->base_gfn = alias->guest_phys_addr >> PAGE_SHIFT;
876 p->npages = alias->memory_size >> PAGE_SHIFT;
877 p->target_gfn = alias->target_phys_addr >> PAGE_SHIFT;
878
879 for (n = KVM_ALIAS_SLOTS; n > 0; --n)
880 if (kvm->aliases[n - 1].npages)
881 break;
882 kvm->naliases = n;
883
Avi Kivity90cb0522007-07-17 13:04:56 +0300884 kvm_mmu_zap_all(kvm);
Avi Kivitye8207542007-03-30 16:54:30 +0300885
Shaohua Li11ec2802007-07-23 14:51:37 +0800886 mutex_unlock(&kvm->lock);
Avi Kivitye8207542007-03-30 16:54:30 +0300887
888 return 0;
889
890out:
891 return r;
892}
893
He, Qing6ceb9d72007-07-26 11:05:18 +0300894static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
895{
896 int r;
897
898 r = 0;
899 switch (chip->chip_id) {
900 case KVM_IRQCHIP_PIC_MASTER:
901 memcpy (&chip->chip.pic,
902 &pic_irqchip(kvm)->pics[0],
903 sizeof(struct kvm_pic_state));
904 break;
905 case KVM_IRQCHIP_PIC_SLAVE:
906 memcpy (&chip->chip.pic,
907 &pic_irqchip(kvm)->pics[1],
908 sizeof(struct kvm_pic_state));
909 break;
He, Qing6bf9e962007-08-05 10:49:16 +0300910 case KVM_IRQCHIP_IOAPIC:
911 memcpy (&chip->chip.ioapic,
912 ioapic_irqchip(kvm),
913 sizeof(struct kvm_ioapic_state));
914 break;
He, Qing6ceb9d72007-07-26 11:05:18 +0300915 default:
916 r = -EINVAL;
917 break;
918 }
919 return r;
920}
921
922static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
923{
924 int r;
925
926 r = 0;
927 switch (chip->chip_id) {
928 case KVM_IRQCHIP_PIC_MASTER:
929 memcpy (&pic_irqchip(kvm)->pics[0],
930 &chip->chip.pic,
931 sizeof(struct kvm_pic_state));
932 break;
933 case KVM_IRQCHIP_PIC_SLAVE:
934 memcpy (&pic_irqchip(kvm)->pics[1],
935 &chip->chip.pic,
936 sizeof(struct kvm_pic_state));
937 break;
He, Qing6bf9e962007-08-05 10:49:16 +0300938 case KVM_IRQCHIP_IOAPIC:
939 memcpy (ioapic_irqchip(kvm),
940 &chip->chip.ioapic,
941 sizeof(struct kvm_ioapic_state));
942 break;
He, Qing6ceb9d72007-07-26 11:05:18 +0300943 default:
944 r = -EINVAL;
945 break;
946 }
947 kvm_pic_update_irq(pic_irqchip(kvm));
948 return r;
949}
950
Izik Eidus290fc382007-09-27 14:11:22 +0200951gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
Avi Kivitye8207542007-03-30 16:54:30 +0300952{
953 int i;
954 struct kvm_mem_alias *alias;
955
956 for (i = 0; i < kvm->naliases; ++i) {
957 alias = &kvm->aliases[i];
958 if (gfn >= alias->base_gfn
959 && gfn < alias->base_gfn + alias->npages)
960 return alias->target_gfn + gfn - alias->base_gfn;
961 }
962 return gfn;
963}
964
965static struct kvm_memory_slot *__gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800966{
967 int i;
968
969 for (i = 0; i < kvm->nmemslots; ++i) {
970 struct kvm_memory_slot *memslot = &kvm->memslots[i];
971
972 if (gfn >= memslot->base_gfn
973 && gfn < memslot->base_gfn + memslot->npages)
974 return memslot;
975 }
Al Viro8b6d44c2007-02-09 16:38:40 +0000976 return NULL;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800977}
Avi Kivitye8207542007-03-30 16:54:30 +0300978
979struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
980{
981 gfn = unalias_gfn(kvm, gfn);
982 return __gfn_to_memslot(kvm, gfn);
983}
Avi Kivity6aa8b732006-12-10 02:21:36 -0800984
Avi Kivity954bbbc2007-03-30 14:02:32 +0300985struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
986{
987 struct kvm_memory_slot *slot;
988
Avi Kivitye8207542007-03-30 16:54:30 +0300989 gfn = unalias_gfn(kvm, gfn);
990 slot = __gfn_to_memslot(kvm, gfn);
Avi Kivity954bbbc2007-03-30 14:02:32 +0300991 if (!slot)
992 return NULL;
993 return slot->phys_mem[gfn - slot->base_gfn];
994}
995EXPORT_SYMBOL_GPL(gfn_to_page);
996
Izik Eidus195aefd2007-10-01 22:14:18 +0200997static int next_segment(unsigned long len, int offset)
998{
999 if (len > PAGE_SIZE - offset)
1000 return PAGE_SIZE - offset;
1001 else
1002 return len;
1003}
1004
1005int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
1006 int len)
1007{
1008 void *page_virt;
1009 struct page *page;
1010
1011 page = gfn_to_page(kvm, gfn);
1012 if (!page)
1013 return -EFAULT;
1014 page_virt = kmap_atomic(page, KM_USER0);
1015
1016 memcpy(data, page_virt + offset, len);
1017
1018 kunmap_atomic(page_virt, KM_USER0);
1019 return 0;
1020}
1021EXPORT_SYMBOL_GPL(kvm_read_guest_page);
1022
1023int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len)
1024{
1025 gfn_t gfn = gpa >> PAGE_SHIFT;
1026 int seg;
1027 int offset = offset_in_page(gpa);
1028 int ret;
1029
1030 while ((seg = next_segment(len, offset)) != 0) {
1031 ret = kvm_read_guest_page(kvm, gfn, data, offset, seg);
1032 if (ret < 0)
1033 return ret;
1034 offset = 0;
1035 len -= seg;
1036 data += seg;
1037 ++gfn;
1038 }
1039 return 0;
1040}
1041EXPORT_SYMBOL_GPL(kvm_read_guest);
1042
1043int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data,
1044 int offset, int len)
1045{
1046 void *page_virt;
1047 struct page *page;
1048
1049 page = gfn_to_page(kvm, gfn);
1050 if (!page)
1051 return -EFAULT;
1052 page_virt = kmap_atomic(page, KM_USER0);
1053
1054 memcpy(page_virt + offset, data, len);
1055
1056 kunmap_atomic(page_virt, KM_USER0);
1057 mark_page_dirty(kvm, gfn);
1058 return 0;
1059}
1060EXPORT_SYMBOL_GPL(kvm_write_guest_page);
1061
1062int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
1063 unsigned long len)
1064{
1065 gfn_t gfn = gpa >> PAGE_SHIFT;
1066 int seg;
1067 int offset = offset_in_page(gpa);
1068 int ret;
1069
1070 while ((seg = next_segment(len, offset)) != 0) {
1071 ret = kvm_write_guest_page(kvm, gfn, data, offset, seg);
1072 if (ret < 0)
1073 return ret;
1074 offset = 0;
1075 len -= seg;
1076 data += seg;
1077 ++gfn;
1078 }
1079 return 0;
1080}
1081
1082int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len)
1083{
1084 void *page_virt;
1085 struct page *page;
1086
1087 page = gfn_to_page(kvm, gfn);
1088 if (!page)
1089 return -EFAULT;
1090 page_virt = kmap_atomic(page, KM_USER0);
1091
1092 memset(page_virt + offset, 0, len);
1093
1094 kunmap_atomic(page_virt, KM_USER0);
1095 return 0;
1096}
1097EXPORT_SYMBOL_GPL(kvm_clear_guest_page);
1098
1099int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len)
1100{
1101 gfn_t gfn = gpa >> PAGE_SHIFT;
1102 int seg;
1103 int offset = offset_in_page(gpa);
1104 int ret;
1105
1106 while ((seg = next_segment(len, offset)) != 0) {
1107 ret = kvm_clear_guest_page(kvm, gfn, offset, seg);
1108 if (ret < 0)
1109 return ret;
1110 offset = 0;
1111 len -= seg;
1112 ++gfn;
1113 }
1114 return 0;
1115}
1116EXPORT_SYMBOL_GPL(kvm_clear_guest);
1117
Rusty Russell7e9d6192007-07-31 20:41:14 +10001118/* WARNING: Does not work on aliased pages. */
Avi Kivity6aa8b732006-12-10 02:21:36 -08001119void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
1120{
Nguyen Anh Quynh31389942007-06-05 10:35:19 +03001121 struct kvm_memory_slot *memslot;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001122
Rusty Russell7e9d6192007-07-31 20:41:14 +10001123 memslot = __gfn_to_memslot(kvm, gfn);
1124 if (memslot && memslot->dirty_bitmap) {
1125 unsigned long rel_gfn = gfn - memslot->base_gfn;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001126
Rusty Russell7e9d6192007-07-31 20:41:14 +10001127 /* avoid RMW */
1128 if (!test_bit(rel_gfn, memslot->dirty_bitmap))
1129 set_bit(rel_gfn, memslot->dirty_bitmap);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001130 }
1131}
1132
Laurent Viviere7d5d762007-07-30 13:41:19 +03001133int emulator_read_std(unsigned long addr,
Avi Kivity4c690a12007-04-22 15:28:19 +03001134 void *val,
Avi Kivity6aa8b732006-12-10 02:21:36 -08001135 unsigned int bytes,
Laurent Viviercebff022007-07-30 13:35:24 +03001136 struct kvm_vcpu *vcpu)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001137{
Avi Kivity6aa8b732006-12-10 02:21:36 -08001138 void *data = val;
1139
1140 while (bytes) {
1141 gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, addr);
1142 unsigned offset = addr & (PAGE_SIZE-1);
1143 unsigned tocopy = min(bytes, (unsigned)PAGE_SIZE - offset);
Izik Eidus195aefd2007-10-01 22:14:18 +02001144 int ret;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001145
1146 if (gpa == UNMAPPED_GVA)
1147 return X86EMUL_PROPAGATE_FAULT;
Izik Eidus195aefd2007-10-01 22:14:18 +02001148 ret = kvm_read_guest(vcpu->kvm, gpa, data, tocopy);
1149 if (ret < 0)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001150 return X86EMUL_UNHANDLEABLE;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001151
1152 bytes -= tocopy;
1153 data += tocopy;
1154 addr += tocopy;
1155 }
1156
1157 return X86EMUL_CONTINUE;
1158}
Laurent Viviere7d5d762007-07-30 13:41:19 +03001159EXPORT_SYMBOL_GPL(emulator_read_std);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001160
1161static int emulator_write_std(unsigned long addr,
Avi Kivity4c690a12007-04-22 15:28:19 +03001162 const void *val,
Avi Kivity6aa8b732006-12-10 02:21:36 -08001163 unsigned int bytes,
Laurent Viviercebff022007-07-30 13:35:24 +03001164 struct kvm_vcpu *vcpu)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001165{
Rusty Russellf0242472007-08-01 10:48:02 +10001166 pr_unimpl(vcpu, "emulator_write_std: addr %lx n %d\n", addr, bytes);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001167 return X86EMUL_UNHANDLEABLE;
1168}
1169
Eddie Dong97222cc2007-09-12 10:58:04 +03001170/*
1171 * Only apic need an MMIO device hook, so shortcut now..
1172 */
1173static struct kvm_io_device *vcpu_find_pervcpu_dev(struct kvm_vcpu *vcpu,
1174 gpa_t addr)
1175{
1176 struct kvm_io_device *dev;
1177
1178 if (vcpu->apic) {
1179 dev = &vcpu->apic->dev;
1180 if (dev->in_range(dev, addr))
1181 return dev;
1182 }
1183 return NULL;
1184}
1185
Gregory Haskins2eeb2e92007-05-31 14:08:53 -04001186static struct kvm_io_device *vcpu_find_mmio_dev(struct kvm_vcpu *vcpu,
1187 gpa_t addr)
1188{
Eddie Dong97222cc2007-09-12 10:58:04 +03001189 struct kvm_io_device *dev;
1190
1191 dev = vcpu_find_pervcpu_dev(vcpu, addr);
1192 if (dev == NULL)
1193 dev = kvm_io_bus_find_dev(&vcpu->kvm->mmio_bus, addr);
1194 return dev;
Gregory Haskins2eeb2e92007-05-31 14:08:53 -04001195}
1196
Eddie Dong74906342007-06-19 18:05:03 +03001197static struct kvm_io_device *vcpu_find_pio_dev(struct kvm_vcpu *vcpu,
1198 gpa_t addr)
1199{
1200 return kvm_io_bus_find_dev(&vcpu->kvm->pio_bus, addr);
1201}
1202
Avi Kivity6aa8b732006-12-10 02:21:36 -08001203static int emulator_read_emulated(unsigned long addr,
Avi Kivity4c690a12007-04-22 15:28:19 +03001204 void *val,
Avi Kivity6aa8b732006-12-10 02:21:36 -08001205 unsigned int bytes,
Laurent Viviercebff022007-07-30 13:35:24 +03001206 struct kvm_vcpu *vcpu)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001207{
Gregory Haskins2eeb2e92007-05-31 14:08:53 -04001208 struct kvm_io_device *mmio_dev;
1209 gpa_t gpa;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001210
1211 if (vcpu->mmio_read_completed) {
1212 memcpy(val, vcpu->mmio_data, bytes);
1213 vcpu->mmio_read_completed = 0;
1214 return X86EMUL_CONTINUE;
Laurent Viviercebff022007-07-30 13:35:24 +03001215 } else if (emulator_read_std(addr, val, bytes, vcpu)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001216 == X86EMUL_CONTINUE)
1217 return X86EMUL_CONTINUE;
Avi Kivityd27d4ac2007-02-19 14:37:46 +02001218
Gregory Haskins2eeb2e92007-05-31 14:08:53 -04001219 gpa = vcpu->mmu.gva_to_gpa(vcpu, addr);
1220 if (gpa == UNMAPPED_GVA)
1221 return X86EMUL_PROPAGATE_FAULT;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001222
Gregory Haskins2eeb2e92007-05-31 14:08:53 -04001223 /*
1224 * Is this MMIO handled locally?
1225 */
1226 mmio_dev = vcpu_find_mmio_dev(vcpu, gpa);
1227 if (mmio_dev) {
1228 kvm_iodevice_read(mmio_dev, gpa, bytes, val);
1229 return X86EMUL_CONTINUE;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001230 }
Gregory Haskins2eeb2e92007-05-31 14:08:53 -04001231
1232 vcpu->mmio_needed = 1;
1233 vcpu->mmio_phys_addr = gpa;
1234 vcpu->mmio_size = bytes;
1235 vcpu->mmio_is_write = 0;
1236
1237 return X86EMUL_UNHANDLEABLE;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001238}
1239
Avi Kivityda4a00f2007-01-05 16:36:44 -08001240static int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
Avi Kivity4c690a12007-04-22 15:28:19 +03001241 const void *val, int bytes)
Avi Kivityda4a00f2007-01-05 16:36:44 -08001242{
Izik Eidus195aefd2007-10-01 22:14:18 +02001243 int ret;
Avi Kivityda4a00f2007-01-05 16:36:44 -08001244
Izik Eidus195aefd2007-10-01 22:14:18 +02001245 ret = kvm_write_guest(vcpu->kvm, gpa, val, bytes);
1246 if (ret < 0)
Avi Kivityda4a00f2007-01-05 16:36:44 -08001247 return 0;
Shaohua Life551882007-07-23 14:51:39 +08001248 kvm_mmu_pte_write(vcpu, gpa, val, bytes);
Avi Kivityda4a00f2007-01-05 16:36:44 -08001249 return 1;
1250}
1251
Avi Kivityb0fcd902007-07-22 18:48:54 +03001252static int emulator_write_emulated_onepage(unsigned long addr,
1253 const void *val,
1254 unsigned int bytes,
Laurent Viviercebff022007-07-30 13:35:24 +03001255 struct kvm_vcpu *vcpu)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001256{
Gregory Haskins2eeb2e92007-05-31 14:08:53 -04001257 struct kvm_io_device *mmio_dev;
1258 gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, addr);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001259
Avi Kivityc9047f52007-04-17 10:53:22 +03001260 if (gpa == UNMAPPED_GVA) {
Christian Ehrhardtcbdd1be2007-09-09 15:41:59 +03001261 kvm_x86_ops->inject_page_fault(vcpu, addr, 2);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001262 return X86EMUL_PROPAGATE_FAULT;
Avi Kivityc9047f52007-04-17 10:53:22 +03001263 }
Avi Kivity6aa8b732006-12-10 02:21:36 -08001264
Avi Kivityda4a00f2007-01-05 16:36:44 -08001265 if (emulator_write_phys(vcpu, gpa, val, bytes))
1266 return X86EMUL_CONTINUE;
1267
Gregory Haskins2eeb2e92007-05-31 14:08:53 -04001268 /*
1269 * Is this MMIO handled locally?
1270 */
1271 mmio_dev = vcpu_find_mmio_dev(vcpu, gpa);
1272 if (mmio_dev) {
1273 kvm_iodevice_write(mmio_dev, gpa, bytes, val);
1274 return X86EMUL_CONTINUE;
1275 }
1276
Avi Kivity6aa8b732006-12-10 02:21:36 -08001277 vcpu->mmio_needed = 1;
1278 vcpu->mmio_phys_addr = gpa;
1279 vcpu->mmio_size = bytes;
1280 vcpu->mmio_is_write = 1;
Avi Kivity4c690a12007-04-22 15:28:19 +03001281 memcpy(vcpu->mmio_data, val, bytes);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001282
1283 return X86EMUL_CONTINUE;
1284}
1285
Laurent Viviere7d5d762007-07-30 13:41:19 +03001286int emulator_write_emulated(unsigned long addr,
Avi Kivityb0fcd902007-07-22 18:48:54 +03001287 const void *val,
1288 unsigned int bytes,
Laurent Viviercebff022007-07-30 13:35:24 +03001289 struct kvm_vcpu *vcpu)
Avi Kivityb0fcd902007-07-22 18:48:54 +03001290{
1291 /* Crossing a page boundary? */
1292 if (((addr + bytes - 1) ^ addr) & PAGE_MASK) {
1293 int rc, now;
1294
1295 now = -addr & ~PAGE_MASK;
Laurent Viviercebff022007-07-30 13:35:24 +03001296 rc = emulator_write_emulated_onepage(addr, val, now, vcpu);
Avi Kivityb0fcd902007-07-22 18:48:54 +03001297 if (rc != X86EMUL_CONTINUE)
1298 return rc;
1299 addr += now;
1300 val += now;
1301 bytes -= now;
1302 }
Laurent Viviercebff022007-07-30 13:35:24 +03001303 return emulator_write_emulated_onepage(addr, val, bytes, vcpu);
Avi Kivityb0fcd902007-07-22 18:48:54 +03001304}
Laurent Viviere7d5d762007-07-30 13:41:19 +03001305EXPORT_SYMBOL_GPL(emulator_write_emulated);
Avi Kivityb0fcd902007-07-22 18:48:54 +03001306
Avi Kivity6aa8b732006-12-10 02:21:36 -08001307static int emulator_cmpxchg_emulated(unsigned long addr,
Avi Kivity4c690a12007-04-22 15:28:19 +03001308 const void *old,
1309 const void *new,
Avi Kivity6aa8b732006-12-10 02:21:36 -08001310 unsigned int bytes,
Laurent Viviercebff022007-07-30 13:35:24 +03001311 struct kvm_vcpu *vcpu)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001312{
1313 static int reported;
1314
1315 if (!reported) {
1316 reported = 1;
1317 printk(KERN_WARNING "kvm: emulating exchange as write\n");
1318 }
Laurent Viviercebff022007-07-30 13:35:24 +03001319 return emulator_write_emulated(addr, new, bytes, vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001320}
1321
1322static unsigned long get_segment_base(struct kvm_vcpu *vcpu, int seg)
1323{
Christian Ehrhardtcbdd1be2007-09-09 15:41:59 +03001324 return kvm_x86_ops->get_segment_base(vcpu, seg);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001325}
1326
1327int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address)
1328{
Avi Kivity6aa8b732006-12-10 02:21:36 -08001329 return X86EMUL_CONTINUE;
1330}
1331
1332int emulate_clts(struct kvm_vcpu *vcpu)
1333{
Amit Shah404fb882007-11-19 17:57:35 +02001334 kvm_x86_ops->set_cr0(vcpu, vcpu->cr0 & ~X86_CR0_TS);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001335 return X86EMUL_CONTINUE;
1336}
1337
1338int emulator_get_dr(struct x86_emulate_ctxt* ctxt, int dr, unsigned long *dest)
1339{
1340 struct kvm_vcpu *vcpu = ctxt->vcpu;
1341
1342 switch (dr) {
1343 case 0 ... 3:
Christian Ehrhardtcbdd1be2007-09-09 15:41:59 +03001344 *dest = kvm_x86_ops->get_dr(vcpu, dr);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001345 return X86EMUL_CONTINUE;
1346 default:
Rusty Russellf0242472007-08-01 10:48:02 +10001347 pr_unimpl(vcpu, "%s: unexpected dr %u\n", __FUNCTION__, dr);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001348 return X86EMUL_UNHANDLEABLE;
1349 }
1350}
1351
1352int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long value)
1353{
1354 unsigned long mask = (ctxt->mode == X86EMUL_MODE_PROT64) ? ~0ULL : ~0U;
1355 int exception;
1356
Christian Ehrhardtcbdd1be2007-09-09 15:41:59 +03001357 kvm_x86_ops->set_dr(ctxt->vcpu, dr, value & mask, &exception);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001358 if (exception) {
1359 /* FIXME: better handling */
1360 return X86EMUL_UNHANDLEABLE;
1361 }
1362 return X86EMUL_CONTINUE;
1363}
1364
Avi Kivity054b1362007-09-12 13:21:09 +03001365void kvm_report_emulation_failure(struct kvm_vcpu *vcpu, const char *context)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001366{
1367 static int reported;
1368 u8 opcodes[4];
Avi Kivity054b1362007-09-12 13:21:09 +03001369 unsigned long rip = vcpu->rip;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001370 unsigned long rip_linear;
1371
Avi Kivity054b1362007-09-12 13:21:09 +03001372 rip_linear = rip + get_segment_base(vcpu, VCPU_SREG_CS);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001373
1374 if (reported)
1375 return;
1376
Avi Kivity054b1362007-09-12 13:21:09 +03001377 emulator_read_std(rip_linear, (void *)opcodes, 4, vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001378
Avi Kivity054b1362007-09-12 13:21:09 +03001379 printk(KERN_ERR "emulation failed (%s) rip %lx %02x %02x %02x %02x\n",
1380 context, rip, opcodes[0], opcodes[1], opcodes[2], opcodes[3]);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001381 reported = 1;
1382}
Avi Kivity054b1362007-09-12 13:21:09 +03001383EXPORT_SYMBOL_GPL(kvm_report_emulation_failure);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001384
1385struct x86_emulate_ops emulate_ops = {
1386 .read_std = emulator_read_std,
1387 .write_std = emulator_write_std,
1388 .read_emulated = emulator_read_emulated,
1389 .write_emulated = emulator_write_emulated,
1390 .cmpxchg_emulated = emulator_cmpxchg_emulated,
1391};
1392
1393int emulate_instruction(struct kvm_vcpu *vcpu,
1394 struct kvm_run *run,
1395 unsigned long cr2,
Laurent Vivier34273182007-09-18 11:27:37 +02001396 u16 error_code,
1397 int no_decode)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001398{
Laurent Viviera22436b2007-09-24 17:00:58 +02001399 int r;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001400
Avi Kivitye7df56e2007-03-14 15:54:54 +02001401 vcpu->mmio_fault_cr2 = cr2;
Christian Ehrhardtcbdd1be2007-09-09 15:41:59 +03001402 kvm_x86_ops->cache_regs(vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001403
Avi Kivity6aa8b732006-12-10 02:21:36 -08001404 vcpu->mmio_is_write = 0;
Laurent Viviere70669a2007-08-05 10:36:40 +03001405 vcpu->pio.string = 0;
Laurent Vivier34273182007-09-18 11:27:37 +02001406
1407 if (!no_decode) {
1408 int cs_db, cs_l;
1409 kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
1410
1411 vcpu->emulate_ctxt.vcpu = vcpu;
1412 vcpu->emulate_ctxt.eflags = kvm_x86_ops->get_rflags(vcpu);
1413 vcpu->emulate_ctxt.cr2 = cr2;
1414 vcpu->emulate_ctxt.mode =
1415 (vcpu->emulate_ctxt.eflags & X86_EFLAGS_VM)
1416 ? X86EMUL_MODE_REAL : cs_l
1417 ? X86EMUL_MODE_PROT64 : cs_db
1418 ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
1419
1420 if (vcpu->emulate_ctxt.mode == X86EMUL_MODE_PROT64) {
1421 vcpu->emulate_ctxt.cs_base = 0;
1422 vcpu->emulate_ctxt.ds_base = 0;
1423 vcpu->emulate_ctxt.es_base = 0;
1424 vcpu->emulate_ctxt.ss_base = 0;
1425 } else {
1426 vcpu->emulate_ctxt.cs_base =
1427 get_segment_base(vcpu, VCPU_SREG_CS);
1428 vcpu->emulate_ctxt.ds_base =
1429 get_segment_base(vcpu, VCPU_SREG_DS);
1430 vcpu->emulate_ctxt.es_base =
1431 get_segment_base(vcpu, VCPU_SREG_ES);
1432 vcpu->emulate_ctxt.ss_base =
1433 get_segment_base(vcpu, VCPU_SREG_SS);
1434 }
1435
1436 vcpu->emulate_ctxt.gs_base =
1437 get_segment_base(vcpu, VCPU_SREG_GS);
1438 vcpu->emulate_ctxt.fs_base =
1439 get_segment_base(vcpu, VCPU_SREG_FS);
1440
1441 r = x86_decode_insn(&vcpu->emulate_ctxt, &emulate_ops);
Laurent Viviera22436b2007-09-24 17:00:58 +02001442 if (r) {
1443 if (kvm_mmu_unprotect_page_virt(vcpu, cr2))
1444 return EMULATE_DONE;
1445 return EMULATE_FAIL;
1446 }
Laurent Vivier34273182007-09-18 11:27:37 +02001447 }
1448
Laurent Viviera22436b2007-09-24 17:00:58 +02001449 r = x86_emulate_insn(&vcpu->emulate_ctxt, &emulate_ops);
Laurent Vivier1be3aa42007-09-18 11:27:27 +02001450
Laurent Viviere70669a2007-08-05 10:36:40 +03001451 if (vcpu->pio.string)
1452 return EMULATE_DO_MMIO;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001453
1454 if ((r || vcpu->mmio_is_write) && run) {
Jeff Dike8fc0d082007-07-17 12:26:59 -04001455 run->exit_reason = KVM_EXIT_MMIO;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001456 run->mmio.phys_addr = vcpu->mmio_phys_addr;
1457 memcpy(run->mmio.data, vcpu->mmio_data, 8);
1458 run->mmio.len = vcpu->mmio_size;
1459 run->mmio.is_write = vcpu->mmio_is_write;
1460 }
1461
1462 if (r) {
Avi Kivitya4360362007-01-05 16:36:45 -08001463 if (kvm_mmu_unprotect_page_virt(vcpu, cr2))
1464 return EMULATE_DONE;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001465 if (!vcpu->mmio_needed) {
Avi Kivity054b1362007-09-12 13:21:09 +03001466 kvm_report_emulation_failure(vcpu, "mmio");
Avi Kivity6aa8b732006-12-10 02:21:36 -08001467 return EMULATE_FAIL;
1468 }
1469 return EMULATE_DO_MMIO;
1470 }
1471
Christian Ehrhardtcbdd1be2007-09-09 15:41:59 +03001472 kvm_x86_ops->decache_regs(vcpu);
Laurent Vivier34273182007-09-18 11:27:37 +02001473 kvm_x86_ops->set_rflags(vcpu, vcpu->emulate_ctxt.eflags);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001474
Avi Kivity02c83202007-04-29 15:02:17 +03001475 if (vcpu->mmio_is_write) {
1476 vcpu->mmio_needed = 0;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001477 return EMULATE_DO_MMIO;
Avi Kivity02c83202007-04-29 15:02:17 +03001478 }
Avi Kivity6aa8b732006-12-10 02:21:36 -08001479
1480 return EMULATE_DONE;
1481}
1482EXPORT_SYMBOL_GPL(emulate_instruction);
1483
Eddie Dongb6958ce2007-07-18 12:15:21 +03001484/*
1485 * The vCPU has executed a HLT instruction with in-kernel mode enabled.
1486 */
He, Qingc5ec1532007-09-03 17:07:41 +03001487static void kvm_vcpu_block(struct kvm_vcpu *vcpu)
Eddie Dongb6958ce2007-07-18 12:15:21 +03001488{
1489 DECLARE_WAITQUEUE(wait, current);
1490
1491 add_wait_queue(&vcpu->wq, &wait);
1492
1493 /*
1494 * We will block until either an interrupt or a signal wakes us up
1495 */
He, Qingc5ec1532007-09-03 17:07:41 +03001496 while (!kvm_cpu_has_interrupt(vcpu)
1497 && !signal_pending(current)
1498 && vcpu->mp_state != VCPU_MP_STATE_RUNNABLE
1499 && vcpu->mp_state != VCPU_MP_STATE_SIPI_RECEIVED) {
Eddie Dongb6958ce2007-07-18 12:15:21 +03001500 set_current_state(TASK_INTERRUPTIBLE);
1501 vcpu_put(vcpu);
1502 schedule();
1503 vcpu_load(vcpu);
1504 }
1505
He, Qingc5ec1532007-09-03 17:07:41 +03001506 __set_current_state(TASK_RUNNING);
Eddie Dongb6958ce2007-07-18 12:15:21 +03001507 remove_wait_queue(&vcpu->wq, &wait);
Eddie Dongb6958ce2007-07-18 12:15:21 +03001508}
1509
Avi Kivityd3bef152007-06-05 15:53:05 +03001510int kvm_emulate_halt(struct kvm_vcpu *vcpu)
1511{
Avi Kivityd3bef152007-06-05 15:53:05 +03001512 ++vcpu->stat.halt_exits;
Eddie Dongb6958ce2007-07-18 12:15:21 +03001513 if (irqchip_in_kernel(vcpu->kvm)) {
He, Qingc5ec1532007-09-03 17:07:41 +03001514 vcpu->mp_state = VCPU_MP_STATE_HALTED;
1515 kvm_vcpu_block(vcpu);
1516 if (vcpu->mp_state != VCPU_MP_STATE_RUNNABLE)
1517 return -EINTR;
Eddie Dongb6958ce2007-07-18 12:15:21 +03001518 return 1;
1519 } else {
1520 vcpu->run->exit_reason = KVM_EXIT_HLT;
1521 return 0;
1522 }
Avi Kivityd3bef152007-06-05 15:53:05 +03001523}
1524EXPORT_SYMBOL_GPL(kvm_emulate_halt);
1525
Anthony Liguori7aa81cc2007-09-17 14:57:50 -05001526int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
Avi Kivity270fd9b2007-02-19 14:37:47 +02001527{
Anthony Liguori7aa81cc2007-09-17 14:57:50 -05001528 unsigned long nr, a0, a1, a2, a3, ret;
Avi Kivity270fd9b2007-02-19 14:37:47 +02001529
Christian Ehrhardtcbdd1be2007-09-09 15:41:59 +03001530 kvm_x86_ops->cache_regs(vcpu);
Anthony Liguori7aa81cc2007-09-17 14:57:50 -05001531
1532 nr = vcpu->regs[VCPU_REGS_RAX];
1533 a0 = vcpu->regs[VCPU_REGS_RBX];
1534 a1 = vcpu->regs[VCPU_REGS_RCX];
1535 a2 = vcpu->regs[VCPU_REGS_RDX];
1536 a3 = vcpu->regs[VCPU_REGS_RSI];
1537
1538 if (!is_long_mode(vcpu)) {
1539 nr &= 0xFFFFFFFF;
1540 a0 &= 0xFFFFFFFF;
1541 a1 &= 0xFFFFFFFF;
1542 a2 &= 0xFFFFFFFF;
1543 a3 &= 0xFFFFFFFF;
Avi Kivity270fd9b2007-02-19 14:37:47 +02001544 }
Anthony Liguori7aa81cc2007-09-17 14:57:50 -05001545
Avi Kivity270fd9b2007-02-19 14:37:47 +02001546 switch (nr) {
1547 default:
Anthony Liguori7aa81cc2007-09-17 14:57:50 -05001548 ret = -KVM_ENOSYS;
1549 break;
Avi Kivity270fd9b2007-02-19 14:37:47 +02001550 }
1551 vcpu->regs[VCPU_REGS_RAX] = ret;
Christian Ehrhardtcbdd1be2007-09-09 15:41:59 +03001552 kvm_x86_ops->decache_regs(vcpu);
Anthony Liguori7aa81cc2007-09-17 14:57:50 -05001553 return 0;
Avi Kivity270fd9b2007-02-19 14:37:47 +02001554}
Anthony Liguori7aa81cc2007-09-17 14:57:50 -05001555EXPORT_SYMBOL_GPL(kvm_emulate_hypercall);
1556
1557int kvm_fix_hypercall(struct kvm_vcpu *vcpu)
1558{
1559 char instruction[3];
1560 int ret = 0;
1561
1562 mutex_lock(&vcpu->kvm->lock);
1563
1564 /*
1565 * Blow out the MMU to ensure that no other VCPU has an active mapping
1566 * to ensure that the updated hypercall appears atomically across all
1567 * VCPUs.
1568 */
1569 kvm_mmu_zap_all(vcpu->kvm);
1570
1571 kvm_x86_ops->cache_regs(vcpu);
1572 kvm_x86_ops->patch_hypercall(vcpu, instruction);
1573 if (emulator_write_emulated(vcpu->rip, instruction, 3, vcpu)
1574 != X86EMUL_CONTINUE)
1575 ret = -EFAULT;
1576
1577 mutex_unlock(&vcpu->kvm->lock);
1578
1579 return ret;
1580}
Avi Kivity270fd9b2007-02-19 14:37:47 +02001581
Avi Kivity6aa8b732006-12-10 02:21:36 -08001582static u64 mk_cr_64(u64 curr_cr, u32 new_val)
1583{
1584 return (curr_cr & ~((1ULL << 32) - 1)) | new_val;
1585}
1586
1587void realmode_lgdt(struct kvm_vcpu *vcpu, u16 limit, unsigned long base)
1588{
1589 struct descriptor_table dt = { limit, base };
1590
Christian Ehrhardtcbdd1be2007-09-09 15:41:59 +03001591 kvm_x86_ops->set_gdt(vcpu, &dt);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001592}
1593
1594void realmode_lidt(struct kvm_vcpu *vcpu, u16 limit, unsigned long base)
1595{
1596 struct descriptor_table dt = { limit, base };
1597
Christian Ehrhardtcbdd1be2007-09-09 15:41:59 +03001598 kvm_x86_ops->set_idt(vcpu, &dt);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001599}
1600
1601void realmode_lmsw(struct kvm_vcpu *vcpu, unsigned long msw,
1602 unsigned long *rflags)
1603{
1604 lmsw(vcpu, msw);
Christian Ehrhardtcbdd1be2007-09-09 15:41:59 +03001605 *rflags = kvm_x86_ops->get_rflags(vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001606}
1607
1608unsigned long realmode_get_cr(struct kvm_vcpu *vcpu, int cr)
1609{
Christian Ehrhardtcbdd1be2007-09-09 15:41:59 +03001610 kvm_x86_ops->decache_cr4_guest_bits(vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001611 switch (cr) {
1612 case 0:
1613 return vcpu->cr0;
1614 case 2:
1615 return vcpu->cr2;
1616 case 3:
1617 return vcpu->cr3;
1618 case 4:
1619 return vcpu->cr4;
1620 default:
1621 vcpu_printf(vcpu, "%s: unexpected cr %u\n", __FUNCTION__, cr);
1622 return 0;
1623 }
1624}
1625
1626void realmode_set_cr(struct kvm_vcpu *vcpu, int cr, unsigned long val,
1627 unsigned long *rflags)
1628{
1629 switch (cr) {
1630 case 0:
1631 set_cr0(vcpu, mk_cr_64(vcpu->cr0, val));
Christian Ehrhardtcbdd1be2007-09-09 15:41:59 +03001632 *rflags = kvm_x86_ops->get_rflags(vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001633 break;
1634 case 2:
1635 vcpu->cr2 = val;
1636 break;
1637 case 3:
1638 set_cr3(vcpu, val);
1639 break;
1640 case 4:
1641 set_cr4(vcpu, mk_cr_64(vcpu->cr4, val));
1642 break;
1643 default:
1644 vcpu_printf(vcpu, "%s: unexpected cr %u\n", __FUNCTION__, cr);
1645 }
1646}
1647
Avi Kivity3bab1f52006-12-29 16:49:48 -08001648int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
1649{
1650 u64 data;
1651
1652 switch (msr) {
1653 case 0xc0010010: /* SYSCFG */
1654 case 0xc0010015: /* HWCR */
1655 case MSR_IA32_PLATFORM_ID:
1656 case MSR_IA32_P5_MC_ADDR:
1657 case MSR_IA32_P5_MC_TYPE:
1658 case MSR_IA32_MC0_CTL:
1659 case MSR_IA32_MCG_STATUS:
1660 case MSR_IA32_MCG_CAP:
1661 case MSR_IA32_MC0_MISC:
1662 case MSR_IA32_MC0_MISC+4:
1663 case MSR_IA32_MC0_MISC+8:
1664 case MSR_IA32_MC0_MISC+12:
1665 case MSR_IA32_MC0_MISC+16:
1666 case MSR_IA32_UCODE_REV:
Avi Kivitya8d13ea2006-12-29 16:49:51 -08001667 case MSR_IA32_PERF_STATUS:
Matthew Gregan2dc70942007-05-06 10:59:46 +03001668 case MSR_IA32_EBL_CR_POWERON:
Avi Kivity3bab1f52006-12-29 16:49:48 -08001669 /* MTRR registers */
1670 case 0xfe:
1671 case 0x200 ... 0x2ff:
1672 data = 0;
1673 break;
Avi Kivitya8d13ea2006-12-29 16:49:51 -08001674 case 0xcd: /* fsb frequency */
1675 data = 3;
1676 break;
Avi Kivity3bab1f52006-12-29 16:49:48 -08001677 case MSR_IA32_APICBASE:
Eddie Dong7017fc32007-07-18 11:34:57 +03001678 data = kvm_get_apic_base(vcpu);
Avi Kivity3bab1f52006-12-29 16:49:48 -08001679 break;
Avi Kivity6f00e682007-01-26 00:56:40 -08001680 case MSR_IA32_MISC_ENABLE:
1681 data = vcpu->ia32_misc_enable_msr;
1682 break;
Avi Kivity3bab1f52006-12-29 16:49:48 -08001683#ifdef CONFIG_X86_64
1684 case MSR_EFER:
1685 data = vcpu->shadow_efer;
1686 break;
1687#endif
1688 default:
Rusty Russellf0242472007-08-01 10:48:02 +10001689 pr_unimpl(vcpu, "unhandled rdmsr: 0x%x\n", msr);
Avi Kivity3bab1f52006-12-29 16:49:48 -08001690 return 1;
1691 }
1692 *pdata = data;
1693 return 0;
1694}
1695EXPORT_SYMBOL_GPL(kvm_get_msr_common);
1696
Avi Kivity6aa8b732006-12-10 02:21:36 -08001697/*
1698 * Reads an msr value (of 'msr_index') into 'pdata'.
1699 * Returns 0 on success, non-0 otherwise.
1700 * Assumes vcpu_load() was already called.
1701 */
Avi Kivity35f3f282007-07-17 14:20:30 +03001702int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001703{
Christian Ehrhardtcbdd1be2007-09-09 15:41:59 +03001704 return kvm_x86_ops->get_msr(vcpu, msr_index, pdata);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001705}
1706
Avi Kivity05b3e0c2006-12-13 00:33:45 -08001707#ifdef CONFIG_X86_64
Avi Kivity6aa8b732006-12-10 02:21:36 -08001708
Avi Kivity3bab1f52006-12-29 16:49:48 -08001709static void set_efer(struct kvm_vcpu *vcpu, u64 efer)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001710{
Avi Kivity6aa8b732006-12-10 02:21:36 -08001711 if (efer & EFER_RESERVED_BITS) {
1712 printk(KERN_DEBUG "set_efer: 0x%llx #GP, reserved bits\n",
1713 efer);
1714 inject_gp(vcpu);
1715 return;
1716 }
1717
1718 if (is_paging(vcpu)
1719 && (vcpu->shadow_efer & EFER_LME) != (efer & EFER_LME)) {
1720 printk(KERN_DEBUG "set_efer: #GP, change LME while paging\n");
1721 inject_gp(vcpu);
1722 return;
1723 }
1724
Christian Ehrhardtcbdd1be2007-09-09 15:41:59 +03001725 kvm_x86_ops->set_efer(vcpu, efer);
Avi Kivity7725f0b2006-12-13 00:34:01 -08001726
Avi Kivity6aa8b732006-12-10 02:21:36 -08001727 efer &= ~EFER_LMA;
1728 efer |= vcpu->shadow_efer & EFER_LMA;
1729
1730 vcpu->shadow_efer = efer;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001731}
Avi Kivity6aa8b732006-12-10 02:21:36 -08001732
1733#endif
1734
Avi Kivity3bab1f52006-12-29 16:49:48 -08001735int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
1736{
1737 switch (msr) {
1738#ifdef CONFIG_X86_64
1739 case MSR_EFER:
1740 set_efer(vcpu, data);
1741 break;
1742#endif
1743 case MSR_IA32_MC0_STATUS:
Rusty Russellf0242472007-08-01 10:48:02 +10001744 pr_unimpl(vcpu, "%s: MSR_IA32_MC0_STATUS 0x%llx, nop\n",
Avi Kivity3bab1f52006-12-29 16:49:48 -08001745 __FUNCTION__, data);
1746 break;
Sergey Kiselev0e5bf0d2007-03-22 14:06:18 +02001747 case MSR_IA32_MCG_STATUS:
Rusty Russellf0242472007-08-01 10:48:02 +10001748 pr_unimpl(vcpu, "%s: MSR_IA32_MCG_STATUS 0x%llx, nop\n",
Sergey Kiselev0e5bf0d2007-03-22 14:06:18 +02001749 __FUNCTION__, data);
1750 break;
Avi Kivity3bab1f52006-12-29 16:49:48 -08001751 case MSR_IA32_UCODE_REV:
1752 case MSR_IA32_UCODE_WRITE:
1753 case 0x200 ... 0x2ff: /* MTRRs */
1754 break;
1755 case MSR_IA32_APICBASE:
Eddie Dong7017fc32007-07-18 11:34:57 +03001756 kvm_set_apic_base(vcpu, data);
Avi Kivity3bab1f52006-12-29 16:49:48 -08001757 break;
Avi Kivity6f00e682007-01-26 00:56:40 -08001758 case MSR_IA32_MISC_ENABLE:
1759 vcpu->ia32_misc_enable_msr = data;
1760 break;
Avi Kivity3bab1f52006-12-29 16:49:48 -08001761 default:
Rusty Russellf0242472007-08-01 10:48:02 +10001762 pr_unimpl(vcpu, "unhandled wrmsr: 0x%x\n", msr);
Avi Kivity3bab1f52006-12-29 16:49:48 -08001763 return 1;
1764 }
1765 return 0;
1766}
1767EXPORT_SYMBOL_GPL(kvm_set_msr_common);
1768
Avi Kivity6aa8b732006-12-10 02:21:36 -08001769/*
1770 * Writes msr value into into the appropriate "register".
1771 * Returns 0 on success, non-0 otherwise.
1772 * Assumes vcpu_load() was already called.
1773 */
Avi Kivity35f3f282007-07-17 14:20:30 +03001774int kvm_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001775{
Christian Ehrhardtcbdd1be2007-09-09 15:41:59 +03001776 return kvm_x86_ops->set_msr(vcpu, msr_index, data);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001777}
1778
1779void kvm_resched(struct kvm_vcpu *vcpu)
1780{
Yaozu Dong3fca0362007-04-25 16:49:19 +03001781 if (!need_resched())
1782 return;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001783 cond_resched();
Avi Kivity6aa8b732006-12-10 02:21:36 -08001784}
1785EXPORT_SYMBOL_GPL(kvm_resched);
1786
Avi Kivity06465c52007-02-28 20:46:53 +02001787void kvm_emulate_cpuid(struct kvm_vcpu *vcpu)
1788{
1789 int i;
1790 u32 function;
1791 struct kvm_cpuid_entry *e, *best;
1792
Christian Ehrhardtcbdd1be2007-09-09 15:41:59 +03001793 kvm_x86_ops->cache_regs(vcpu);
Avi Kivity06465c52007-02-28 20:46:53 +02001794 function = vcpu->regs[VCPU_REGS_RAX];
1795 vcpu->regs[VCPU_REGS_RAX] = 0;
1796 vcpu->regs[VCPU_REGS_RBX] = 0;
1797 vcpu->regs[VCPU_REGS_RCX] = 0;
1798 vcpu->regs[VCPU_REGS_RDX] = 0;
1799 best = NULL;
1800 for (i = 0; i < vcpu->cpuid_nent; ++i) {
1801 e = &vcpu->cpuid_entries[i];
1802 if (e->function == function) {
1803 best = e;
1804 break;
1805 }
1806 /*
1807 * Both basic or both extended?
1808 */
1809 if (((e->function ^ function) & 0x80000000) == 0)
1810 if (!best || e->function > best->function)
1811 best = e;
1812 }
1813 if (best) {
1814 vcpu->regs[VCPU_REGS_RAX] = best->eax;
1815 vcpu->regs[VCPU_REGS_RBX] = best->ebx;
1816 vcpu->regs[VCPU_REGS_RCX] = best->ecx;
1817 vcpu->regs[VCPU_REGS_RDX] = best->edx;
1818 }
Christian Ehrhardtcbdd1be2007-09-09 15:41:59 +03001819 kvm_x86_ops->decache_regs(vcpu);
1820 kvm_x86_ops->skip_emulated_instruction(vcpu);
Avi Kivity06465c52007-02-28 20:46:53 +02001821}
1822EXPORT_SYMBOL_GPL(kvm_emulate_cpuid);
1823
Avi Kivity039576c2007-03-20 12:46:50 +02001824static int pio_copy_data(struct kvm_vcpu *vcpu)
Avi Kivity46fc1472007-02-22 19:39:30 +02001825{
Avi Kivity039576c2007-03-20 12:46:50 +02001826 void *p = vcpu->pio_data;
1827 void *q;
1828 unsigned bytes;
1829 int nr_pages = vcpu->pio.guest_pages[1] ? 2 : 1;
1830
Avi Kivity039576c2007-03-20 12:46:50 +02001831 q = vmap(vcpu->pio.guest_pages, nr_pages, VM_READ|VM_WRITE,
1832 PAGE_KERNEL);
1833 if (!q) {
Avi Kivity039576c2007-03-20 12:46:50 +02001834 free_pio_guest_pages(vcpu);
1835 return -ENOMEM;
1836 }
1837 q += vcpu->pio.guest_page_offset;
1838 bytes = vcpu->pio.size * vcpu->pio.cur_count;
1839 if (vcpu->pio.in)
1840 memcpy(q, p, bytes);
1841 else
1842 memcpy(p, q, bytes);
1843 q -= vcpu->pio.guest_page_offset;
1844 vunmap(q);
Avi Kivity039576c2007-03-20 12:46:50 +02001845 free_pio_guest_pages(vcpu);
1846 return 0;
1847}
1848
1849static int complete_pio(struct kvm_vcpu *vcpu)
1850{
1851 struct kvm_pio_request *io = &vcpu->pio;
Avi Kivity46fc1472007-02-22 19:39:30 +02001852 long delta;
Avi Kivity039576c2007-03-20 12:46:50 +02001853 int r;
Avi Kivity46fc1472007-02-22 19:39:30 +02001854
Christian Ehrhardtcbdd1be2007-09-09 15:41:59 +03001855 kvm_x86_ops->cache_regs(vcpu);
Avi Kivity46fc1472007-02-22 19:39:30 +02001856
1857 if (!io->string) {
Avi Kivity039576c2007-03-20 12:46:50 +02001858 if (io->in)
1859 memcpy(&vcpu->regs[VCPU_REGS_RAX], vcpu->pio_data,
Avi Kivity46fc1472007-02-22 19:39:30 +02001860 io->size);
1861 } else {
Avi Kivity039576c2007-03-20 12:46:50 +02001862 if (io->in) {
1863 r = pio_copy_data(vcpu);
1864 if (r) {
Christian Ehrhardtcbdd1be2007-09-09 15:41:59 +03001865 kvm_x86_ops->cache_regs(vcpu);
Avi Kivity039576c2007-03-20 12:46:50 +02001866 return r;
1867 }
1868 }
1869
Avi Kivity46fc1472007-02-22 19:39:30 +02001870 delta = 1;
1871 if (io->rep) {
Avi Kivity039576c2007-03-20 12:46:50 +02001872 delta *= io->cur_count;
Avi Kivity46fc1472007-02-22 19:39:30 +02001873 /*
1874 * The size of the register should really depend on
1875 * current address size.
1876 */
1877 vcpu->regs[VCPU_REGS_RCX] -= delta;
1878 }
Avi Kivity039576c2007-03-20 12:46:50 +02001879 if (io->down)
Avi Kivity46fc1472007-02-22 19:39:30 +02001880 delta = -delta;
1881 delta *= io->size;
Avi Kivity039576c2007-03-20 12:46:50 +02001882 if (io->in)
Avi Kivity46fc1472007-02-22 19:39:30 +02001883 vcpu->regs[VCPU_REGS_RDI] += delta;
1884 else
1885 vcpu->regs[VCPU_REGS_RSI] += delta;
1886 }
1887
Christian Ehrhardtcbdd1be2007-09-09 15:41:59 +03001888 kvm_x86_ops->decache_regs(vcpu);
Avi Kivity46fc1472007-02-22 19:39:30 +02001889
Avi Kivity039576c2007-03-20 12:46:50 +02001890 io->count -= io->cur_count;
1891 io->cur_count = 0;
1892
Avi Kivity039576c2007-03-20 12:46:50 +02001893 return 0;
Avi Kivity46fc1472007-02-22 19:39:30 +02001894}
1895
Eddie Dong65619eb2007-07-17 11:52:33 +03001896static void kernel_pio(struct kvm_io_device *pio_dev,
1897 struct kvm_vcpu *vcpu,
1898 void *pd)
Eddie Dong74906342007-06-19 18:05:03 +03001899{
1900 /* TODO: String I/O for in kernel device */
1901
Eddie Dong9cf98822007-07-22 10:36:31 +03001902 mutex_lock(&vcpu->kvm->lock);
Eddie Dong74906342007-06-19 18:05:03 +03001903 if (vcpu->pio.in)
1904 kvm_iodevice_read(pio_dev, vcpu->pio.port,
1905 vcpu->pio.size,
Eddie Dong65619eb2007-07-17 11:52:33 +03001906 pd);
Eddie Dong74906342007-06-19 18:05:03 +03001907 else
1908 kvm_iodevice_write(pio_dev, vcpu->pio.port,
1909 vcpu->pio.size,
Eddie Dong65619eb2007-07-17 11:52:33 +03001910 pd);
Eddie Dong9cf98822007-07-22 10:36:31 +03001911 mutex_unlock(&vcpu->kvm->lock);
Eddie Dong65619eb2007-07-17 11:52:33 +03001912}
1913
1914static void pio_string_write(struct kvm_io_device *pio_dev,
1915 struct kvm_vcpu *vcpu)
1916{
1917 struct kvm_pio_request *io = &vcpu->pio;
1918 void *pd = vcpu->pio_data;
1919 int i;
1920
Eddie Dong9cf98822007-07-22 10:36:31 +03001921 mutex_lock(&vcpu->kvm->lock);
Eddie Dong65619eb2007-07-17 11:52:33 +03001922 for (i = 0; i < io->cur_count; i++) {
1923 kvm_iodevice_write(pio_dev, io->port,
1924 io->size,
1925 pd);
1926 pd += io->size;
1927 }
Eddie Dong9cf98822007-07-22 10:36:31 +03001928 mutex_unlock(&vcpu->kvm->lock);
Eddie Dong74906342007-06-19 18:05:03 +03001929}
1930
Laurent Vivier3090dd72007-08-05 10:43:32 +03001931int kvm_emulate_pio (struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
1932 int size, unsigned port)
1933{
1934 struct kvm_io_device *pio_dev;
1935
1936 vcpu->run->exit_reason = KVM_EXIT_IO;
1937 vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT;
1938 vcpu->run->io.size = vcpu->pio.size = size;
1939 vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE;
1940 vcpu->run->io.count = vcpu->pio.count = vcpu->pio.cur_count = 1;
1941 vcpu->run->io.port = vcpu->pio.port = port;
1942 vcpu->pio.in = in;
1943 vcpu->pio.string = 0;
1944 vcpu->pio.down = 0;
1945 vcpu->pio.guest_page_offset = 0;
1946 vcpu->pio.rep = 0;
1947
Christian Ehrhardtcbdd1be2007-09-09 15:41:59 +03001948 kvm_x86_ops->cache_regs(vcpu);
Laurent Vivier3090dd72007-08-05 10:43:32 +03001949 memcpy(vcpu->pio_data, &vcpu->regs[VCPU_REGS_RAX], 4);
Christian Ehrhardtcbdd1be2007-09-09 15:41:59 +03001950 kvm_x86_ops->decache_regs(vcpu);
Laurent Vivier3090dd72007-08-05 10:43:32 +03001951
Avi Kivity0967b7b2007-09-15 17:34:36 +03001952 kvm_x86_ops->skip_emulated_instruction(vcpu);
1953
Laurent Vivier3090dd72007-08-05 10:43:32 +03001954 pio_dev = vcpu_find_pio_dev(vcpu, port);
1955 if (pio_dev) {
1956 kernel_pio(pio_dev, vcpu, vcpu->pio_data);
1957 complete_pio(vcpu);
1958 return 1;
1959 }
1960 return 0;
1961}
1962EXPORT_SYMBOL_GPL(kvm_emulate_pio);
1963
1964int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
1965 int size, unsigned long count, int down,
Avi Kivity039576c2007-03-20 12:46:50 +02001966 gva_t address, int rep, unsigned port)
1967{
1968 unsigned now, in_page;
Eddie Dong65619eb2007-07-17 11:52:33 +03001969 int i, ret = 0;
Avi Kivity039576c2007-03-20 12:46:50 +02001970 int nr_pages = 1;
1971 struct page *page;
Eddie Dong74906342007-06-19 18:05:03 +03001972 struct kvm_io_device *pio_dev;
Avi Kivity039576c2007-03-20 12:46:50 +02001973
1974 vcpu->run->exit_reason = KVM_EXIT_IO;
1975 vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT;
Laurent Vivier3090dd72007-08-05 10:43:32 +03001976 vcpu->run->io.size = vcpu->pio.size = size;
Avi Kivity039576c2007-03-20 12:46:50 +02001977 vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE;
Laurent Vivier3090dd72007-08-05 10:43:32 +03001978 vcpu->run->io.count = vcpu->pio.count = vcpu->pio.cur_count = count;
1979 vcpu->run->io.port = vcpu->pio.port = port;
Avi Kivity039576c2007-03-20 12:46:50 +02001980 vcpu->pio.in = in;
Laurent Vivier3090dd72007-08-05 10:43:32 +03001981 vcpu->pio.string = 1;
Avi Kivity039576c2007-03-20 12:46:50 +02001982 vcpu->pio.down = down;
1983 vcpu->pio.guest_page_offset = offset_in_page(address);
1984 vcpu->pio.rep = rep;
1985
Avi Kivity039576c2007-03-20 12:46:50 +02001986 if (!count) {
Christian Ehrhardtcbdd1be2007-09-09 15:41:59 +03001987 kvm_x86_ops->skip_emulated_instruction(vcpu);
Avi Kivity039576c2007-03-20 12:46:50 +02001988 return 1;
1989 }
1990
Avi Kivity039576c2007-03-20 12:46:50 +02001991 if (!down)
1992 in_page = PAGE_SIZE - offset_in_page(address);
1993 else
1994 in_page = offset_in_page(address) + size;
1995 now = min(count, (unsigned long)in_page / size);
1996 if (!now) {
1997 /*
1998 * String I/O straddles page boundary. Pin two guest pages
1999 * so that we satisfy atomicity constraints. Do just one
2000 * transaction to avoid complexity.
2001 */
2002 nr_pages = 2;
2003 now = 1;
2004 }
2005 if (down) {
2006 /*
2007 * String I/O in reverse. Yuck. Kill the guest, fix later.
2008 */
Rusty Russellf0242472007-08-01 10:48:02 +10002009 pr_unimpl(vcpu, "guest string pio down\n");
Avi Kivity039576c2007-03-20 12:46:50 +02002010 inject_gp(vcpu);
2011 return 1;
2012 }
2013 vcpu->run->io.count = now;
2014 vcpu->pio.cur_count = now;
2015
Avi Kivity0967b7b2007-09-15 17:34:36 +03002016 if (vcpu->pio.cur_count == vcpu->pio.count)
2017 kvm_x86_ops->skip_emulated_instruction(vcpu);
2018
Avi Kivity039576c2007-03-20 12:46:50 +02002019 for (i = 0; i < nr_pages; ++i) {
Shaohua Li11ec2802007-07-23 14:51:37 +08002020 mutex_lock(&vcpu->kvm->lock);
Avi Kivity039576c2007-03-20 12:46:50 +02002021 page = gva_to_page(vcpu, address + i * PAGE_SIZE);
2022 if (page)
2023 get_page(page);
2024 vcpu->pio.guest_pages[i] = page;
Shaohua Li11ec2802007-07-23 14:51:37 +08002025 mutex_unlock(&vcpu->kvm->lock);
Avi Kivity039576c2007-03-20 12:46:50 +02002026 if (!page) {
2027 inject_gp(vcpu);
2028 free_pio_guest_pages(vcpu);
2029 return 1;
2030 }
2031 }
2032
Laurent Vivier3090dd72007-08-05 10:43:32 +03002033 pio_dev = vcpu_find_pio_dev(vcpu, port);
Eddie Dong65619eb2007-07-17 11:52:33 +03002034 if (!vcpu->pio.in) {
2035 /* string PIO write */
2036 ret = pio_copy_data(vcpu);
2037 if (ret >= 0 && pio_dev) {
2038 pio_string_write(pio_dev, vcpu);
2039 complete_pio(vcpu);
2040 if (vcpu->pio.count == 0)
2041 ret = 1;
2042 }
2043 } else if (pio_dev)
Rusty Russellf0242472007-08-01 10:48:02 +10002044 pr_unimpl(vcpu, "no string pio read support yet, "
Eddie Dong65619eb2007-07-17 11:52:33 +03002045 "port %x size %d count %ld\n",
2046 port, size, count);
2047
2048 return ret;
Avi Kivity039576c2007-03-20 12:46:50 +02002049}
Laurent Vivier3090dd72007-08-05 10:43:32 +03002050EXPORT_SYMBOL_GPL(kvm_emulate_pio_string);
Avi Kivity039576c2007-03-20 12:46:50 +02002051
Avi Kivity04d2cc72007-09-10 18:10:54 +03002052/*
2053 * Check if userspace requested an interrupt window, and that the
2054 * interrupt window is open.
2055 *
2056 * No need to exit to userspace if we already have an interrupt queued.
2057 */
2058static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu,
2059 struct kvm_run *kvm_run)
2060{
2061 return (!vcpu->irq_summary &&
2062 kvm_run->request_interrupt_window &&
2063 vcpu->interrupt_window_open &&
2064 (kvm_x86_ops->get_rflags(vcpu) & X86_EFLAGS_IF));
2065}
2066
2067static void post_kvm_run_save(struct kvm_vcpu *vcpu,
2068 struct kvm_run *kvm_run)
2069{
2070 kvm_run->if_flag = (kvm_x86_ops->get_rflags(vcpu) & X86_EFLAGS_IF) != 0;
2071 kvm_run->cr8 = get_cr8(vcpu);
2072 kvm_run->apic_base = kvm_get_apic_base(vcpu);
2073 if (irqchip_in_kernel(vcpu->kvm))
2074 kvm_run->ready_for_interrupt_injection = 1;
2075 else
2076 kvm_run->ready_for_interrupt_injection =
2077 (vcpu->interrupt_window_open &&
2078 vcpu->irq_summary == 0);
2079}
2080
2081static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2082{
2083 int r;
2084
2085 if (unlikely(vcpu->mp_state == VCPU_MP_STATE_SIPI_RECEIVED)) {
2086 printk("vcpu %d received sipi with vector # %x\n",
2087 vcpu->vcpu_id, vcpu->sipi_vector);
2088 kvm_lapic_reset(vcpu);
2089 kvm_x86_ops->vcpu_reset(vcpu);
2090 vcpu->mp_state = VCPU_MP_STATE_RUNNABLE;
2091 }
2092
2093preempted:
2094 if (vcpu->guest_debug.enabled)
2095 kvm_x86_ops->guest_debug_pre(vcpu);
2096
2097again:
2098 r = kvm_mmu_reload(vcpu);
2099 if (unlikely(r))
2100 goto out;
2101
2102 preempt_disable();
2103
2104 kvm_x86_ops->prepare_guest_switch(vcpu);
2105 kvm_load_guest_fpu(vcpu);
2106
2107 local_irq_disable();
2108
2109 if (signal_pending(current)) {
2110 local_irq_enable();
2111 preempt_enable();
2112 r = -EINTR;
2113 kvm_run->exit_reason = KVM_EXIT_INTR;
2114 ++vcpu->stat.signal_exits;
2115 goto out;
2116 }
2117
2118 if (irqchip_in_kernel(vcpu->kvm))
2119 kvm_x86_ops->inject_pending_irq(vcpu);
2120 else if (!vcpu->mmio_read_completed)
2121 kvm_x86_ops->inject_pending_vectors(vcpu, kvm_run);
2122
2123 vcpu->guest_mode = 1;
Laurent Vivierd172fcd2007-10-15 17:00:19 +02002124 kvm_guest_enter();
Avi Kivity04d2cc72007-09-10 18:10:54 +03002125
2126 if (vcpu->requests)
2127 if (test_and_clear_bit(KVM_TLB_FLUSH, &vcpu->requests))
2128 kvm_x86_ops->tlb_flush(vcpu);
2129
2130 kvm_x86_ops->run(vcpu, kvm_run);
2131
2132 vcpu->guest_mode = 0;
2133 local_irq_enable();
2134
2135 ++vcpu->stat.exits;
2136
Laurent Vivier0552f732007-10-18 15:19:01 +02002137 /*
2138 * We must have an instruction between local_irq_enable() and
2139 * kvm_guest_exit(), so the timer interrupt isn't delayed by
2140 * the interrupt shadow. The stat.exits increment will do nicely.
2141 * But we need to prevent reordering, hence this barrier():
2142 */
2143 barrier();
2144
2145 kvm_guest_exit();
2146
Avi Kivity04d2cc72007-09-10 18:10:54 +03002147 preempt_enable();
2148
2149 /*
2150 * Profile KVM exit RIPs:
2151 */
2152 if (unlikely(prof_on == KVM_PROFILING)) {
2153 kvm_x86_ops->cache_regs(vcpu);
2154 profile_hit(KVM_PROFILING, (void *)vcpu->rip);
2155 }
2156
2157 r = kvm_x86_ops->handle_exit(kvm_run, vcpu);
2158
2159 if (r > 0) {
2160 if (dm_request_for_irq_injection(vcpu, kvm_run)) {
2161 r = -EINTR;
2162 kvm_run->exit_reason = KVM_EXIT_INTR;
2163 ++vcpu->stat.request_irq_exits;
2164 goto out;
2165 }
2166 if (!need_resched()) {
2167 ++vcpu->stat.light_exits;
2168 goto again;
2169 }
2170 }
2171
2172out:
2173 if (r > 0) {
2174 kvm_resched(vcpu);
2175 goto preempted;
2176 }
2177
2178 post_kvm_run_save(vcpu, kvm_run);
2179
2180 return r;
2181}
2182
2183
Avi Kivitybccf2152007-02-21 18:04:26 +02002184static int kvm_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
Avi Kivity6aa8b732006-12-10 02:21:36 -08002185{
Avi Kivity6aa8b732006-12-10 02:21:36 -08002186 int r;
Avi Kivity1961d272007-03-05 19:46:05 +02002187 sigset_t sigsaved;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002188
Avi Kivitybccf2152007-02-21 18:04:26 +02002189 vcpu_load(vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002190
He, Qingc5ec1532007-09-03 17:07:41 +03002191 if (unlikely(vcpu->mp_state == VCPU_MP_STATE_UNINITIALIZED)) {
2192 kvm_vcpu_block(vcpu);
2193 vcpu_put(vcpu);
2194 return -EAGAIN;
2195 }
2196
Avi Kivity1961d272007-03-05 19:46:05 +02002197 if (vcpu->sigset_active)
2198 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
2199
Dor Laor54810342007-02-12 00:54:39 -08002200 /* re-sync apic's tpr */
He, Qing5cd4f6f2007-08-30 17:04:26 +08002201 if (!irqchip_in_kernel(vcpu->kvm))
2202 set_cr8(vcpu, kvm_run->cr8);
Dor Laor54810342007-02-12 00:54:39 -08002203
Avi Kivity02c83202007-04-29 15:02:17 +03002204 if (vcpu->pio.cur_count) {
2205 r = complete_pio(vcpu);
2206 if (r)
2207 goto out;
2208 }
2209
2210 if (vcpu->mmio_needed) {
2211 memcpy(vcpu->mmio_data, kvm_run->mmio.data, 8);
2212 vcpu->mmio_read_completed = 1;
2213 vcpu->mmio_needed = 0;
2214 r = emulate_instruction(vcpu, kvm_run,
Laurent Vivier34273182007-09-18 11:27:37 +02002215 vcpu->mmio_fault_cr2, 0, 1);
Avi Kivity02c83202007-04-29 15:02:17 +03002216 if (r == EMULATE_DO_MMIO) {
2217 /*
2218 * Read-modify-write. Back to userspace.
2219 */
Avi Kivity02c83202007-04-29 15:02:17 +03002220 r = 0;
2221 goto out;
Avi Kivity46fc1472007-02-22 19:39:30 +02002222 }
Avi Kivity6aa8b732006-12-10 02:21:36 -08002223 }
2224
Avi Kivity8eb7d332007-03-04 14:17:08 +02002225 if (kvm_run->exit_reason == KVM_EXIT_HYPERCALL) {
Christian Ehrhardtcbdd1be2007-09-09 15:41:59 +03002226 kvm_x86_ops->cache_regs(vcpu);
Avi Kivityb4e63f52007-03-04 13:59:30 +02002227 vcpu->regs[VCPU_REGS_RAX] = kvm_run->hypercall.ret;
Christian Ehrhardtcbdd1be2007-09-09 15:41:59 +03002228 kvm_x86_ops->decache_regs(vcpu);
Avi Kivityb4e63f52007-03-04 13:59:30 +02002229 }
2230
Avi Kivity04d2cc72007-09-10 18:10:54 +03002231 r = __vcpu_run(vcpu, kvm_run);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002232
Avi Kivity039576c2007-03-20 12:46:50 +02002233out:
Avi Kivity1961d272007-03-05 19:46:05 +02002234 if (vcpu->sigset_active)
2235 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
2236
Avi Kivity6aa8b732006-12-10 02:21:36 -08002237 vcpu_put(vcpu);
2238 return r;
2239}
2240
Avi Kivitybccf2152007-02-21 18:04:26 +02002241static int kvm_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu,
2242 struct kvm_regs *regs)
Avi Kivity6aa8b732006-12-10 02:21:36 -08002243{
Avi Kivitybccf2152007-02-21 18:04:26 +02002244 vcpu_load(vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002245
Christian Ehrhardtcbdd1be2007-09-09 15:41:59 +03002246 kvm_x86_ops->cache_regs(vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002247
2248 regs->rax = vcpu->regs[VCPU_REGS_RAX];
2249 regs->rbx = vcpu->regs[VCPU_REGS_RBX];
2250 regs->rcx = vcpu->regs[VCPU_REGS_RCX];
2251 regs->rdx = vcpu->regs[VCPU_REGS_RDX];
2252 regs->rsi = vcpu->regs[VCPU_REGS_RSI];
2253 regs->rdi = vcpu->regs[VCPU_REGS_RDI];
2254 regs->rsp = vcpu->regs[VCPU_REGS_RSP];
2255 regs->rbp = vcpu->regs[VCPU_REGS_RBP];
Avi Kivity05b3e0c2006-12-13 00:33:45 -08002256#ifdef CONFIG_X86_64
Avi Kivity6aa8b732006-12-10 02:21:36 -08002257 regs->r8 = vcpu->regs[VCPU_REGS_R8];
2258 regs->r9 = vcpu->regs[VCPU_REGS_R9];
2259 regs->r10 = vcpu->regs[VCPU_REGS_R10];
2260 regs->r11 = vcpu->regs[VCPU_REGS_R11];
2261 regs->r12 = vcpu->regs[VCPU_REGS_R12];
2262 regs->r13 = vcpu->regs[VCPU_REGS_R13];
2263 regs->r14 = vcpu->regs[VCPU_REGS_R14];
2264 regs->r15 = vcpu->regs[VCPU_REGS_R15];
2265#endif
2266
2267 regs->rip = vcpu->rip;
Christian Ehrhardtcbdd1be2007-09-09 15:41:59 +03002268 regs->rflags = kvm_x86_ops->get_rflags(vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002269
2270 /*
2271 * Don't leak debug flags in case they were set for guest debugging
2272 */
2273 if (vcpu->guest_debug.enabled && vcpu->guest_debug.singlestep)
2274 regs->rflags &= ~(X86_EFLAGS_TF | X86_EFLAGS_RF);
2275
2276 vcpu_put(vcpu);
2277
2278 return 0;
2279}
2280
Avi Kivitybccf2152007-02-21 18:04:26 +02002281static int kvm_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu,
2282 struct kvm_regs *regs)
Avi Kivity6aa8b732006-12-10 02:21:36 -08002283{
Avi Kivitybccf2152007-02-21 18:04:26 +02002284 vcpu_load(vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002285
2286 vcpu->regs[VCPU_REGS_RAX] = regs->rax;
2287 vcpu->regs[VCPU_REGS_RBX] = regs->rbx;
2288 vcpu->regs[VCPU_REGS_RCX] = regs->rcx;
2289 vcpu->regs[VCPU_REGS_RDX] = regs->rdx;
2290 vcpu->regs[VCPU_REGS_RSI] = regs->rsi;
2291 vcpu->regs[VCPU_REGS_RDI] = regs->rdi;
2292 vcpu->regs[VCPU_REGS_RSP] = regs->rsp;
2293 vcpu->regs[VCPU_REGS_RBP] = regs->rbp;
Avi Kivity05b3e0c2006-12-13 00:33:45 -08002294#ifdef CONFIG_X86_64
Avi Kivity6aa8b732006-12-10 02:21:36 -08002295 vcpu->regs[VCPU_REGS_R8] = regs->r8;
2296 vcpu->regs[VCPU_REGS_R9] = regs->r9;
2297 vcpu->regs[VCPU_REGS_R10] = regs->r10;
2298 vcpu->regs[VCPU_REGS_R11] = regs->r11;
2299 vcpu->regs[VCPU_REGS_R12] = regs->r12;
2300 vcpu->regs[VCPU_REGS_R13] = regs->r13;
2301 vcpu->regs[VCPU_REGS_R14] = regs->r14;
2302 vcpu->regs[VCPU_REGS_R15] = regs->r15;
2303#endif
2304
2305 vcpu->rip = regs->rip;
Christian Ehrhardtcbdd1be2007-09-09 15:41:59 +03002306 kvm_x86_ops->set_rflags(vcpu, regs->rflags);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002307
Christian Ehrhardtcbdd1be2007-09-09 15:41:59 +03002308 kvm_x86_ops->decache_regs(vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002309
2310 vcpu_put(vcpu);
2311
2312 return 0;
2313}
2314
2315static void get_segment(struct kvm_vcpu *vcpu,
2316 struct kvm_segment *var, int seg)
2317{
Christian Ehrhardtcbdd1be2007-09-09 15:41:59 +03002318 return kvm_x86_ops->get_segment(vcpu, var, seg);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002319}
2320
Avi Kivitybccf2152007-02-21 18:04:26 +02002321static int kvm_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
2322 struct kvm_sregs *sregs)
Avi Kivity6aa8b732006-12-10 02:21:36 -08002323{
Avi Kivity6aa8b732006-12-10 02:21:36 -08002324 struct descriptor_table dt;
Eddie Dong2a8067f2007-08-06 16:29:07 +03002325 int pending_vec;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002326
Avi Kivitybccf2152007-02-21 18:04:26 +02002327 vcpu_load(vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002328
2329 get_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
2330 get_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
2331 get_segment(vcpu, &sregs->es, VCPU_SREG_ES);
2332 get_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
2333 get_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
2334 get_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
2335
2336 get_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
2337 get_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
2338
Christian Ehrhardtcbdd1be2007-09-09 15:41:59 +03002339 kvm_x86_ops->get_idt(vcpu, &dt);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002340 sregs->idt.limit = dt.limit;
2341 sregs->idt.base = dt.base;
Christian Ehrhardtcbdd1be2007-09-09 15:41:59 +03002342 kvm_x86_ops->get_gdt(vcpu, &dt);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002343 sregs->gdt.limit = dt.limit;
2344 sregs->gdt.base = dt.base;
2345
Christian Ehrhardtcbdd1be2007-09-09 15:41:59 +03002346 kvm_x86_ops->decache_cr4_guest_bits(vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002347 sregs->cr0 = vcpu->cr0;
2348 sregs->cr2 = vcpu->cr2;
2349 sregs->cr3 = vcpu->cr3;
2350 sregs->cr4 = vcpu->cr4;
Eddie Dong7017fc32007-07-18 11:34:57 +03002351 sregs->cr8 = get_cr8(vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002352 sregs->efer = vcpu->shadow_efer;
Eddie Dong7017fc32007-07-18 11:34:57 +03002353 sregs->apic_base = kvm_get_apic_base(vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002354
Eddie Dong2a8067f2007-08-06 16:29:07 +03002355 if (irqchip_in_kernel(vcpu->kvm)) {
He, Qingc52fb352007-08-02 14:03:07 +03002356 memset(sregs->interrupt_bitmap, 0,
2357 sizeof sregs->interrupt_bitmap);
Christian Ehrhardtcbdd1be2007-09-09 15:41:59 +03002358 pending_vec = kvm_x86_ops->get_irq(vcpu);
Eddie Dong2a8067f2007-08-06 16:29:07 +03002359 if (pending_vec >= 0)
2360 set_bit(pending_vec, (unsigned long *)sregs->interrupt_bitmap);
2361 } else
He, Qingc52fb352007-08-02 14:03:07 +03002362 memcpy(sregs->interrupt_bitmap, vcpu->irq_pending,
2363 sizeof sregs->interrupt_bitmap);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002364
2365 vcpu_put(vcpu);
2366
2367 return 0;
2368}
2369
2370static void set_segment(struct kvm_vcpu *vcpu,
2371 struct kvm_segment *var, int seg)
2372{
Christian Ehrhardtcbdd1be2007-09-09 15:41:59 +03002373 return kvm_x86_ops->set_segment(vcpu, var, seg);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002374}
2375
Avi Kivitybccf2152007-02-21 18:04:26 +02002376static int kvm_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
2377 struct kvm_sregs *sregs)
Avi Kivity6aa8b732006-12-10 02:21:36 -08002378{
Avi Kivity6aa8b732006-12-10 02:21:36 -08002379 int mmu_reset_needed = 0;
Eddie Dong2a8067f2007-08-06 16:29:07 +03002380 int i, pending_vec, max_bits;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002381 struct descriptor_table dt;
2382
Avi Kivitybccf2152007-02-21 18:04:26 +02002383 vcpu_load(vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002384
Avi Kivity6aa8b732006-12-10 02:21:36 -08002385 dt.limit = sregs->idt.limit;
2386 dt.base = sregs->idt.base;
Christian Ehrhardtcbdd1be2007-09-09 15:41:59 +03002387 kvm_x86_ops->set_idt(vcpu, &dt);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002388 dt.limit = sregs->gdt.limit;
2389 dt.base = sregs->gdt.base;
Christian Ehrhardtcbdd1be2007-09-09 15:41:59 +03002390 kvm_x86_ops->set_gdt(vcpu, &dt);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002391
2392 vcpu->cr2 = sregs->cr2;
2393 mmu_reset_needed |= vcpu->cr3 != sregs->cr3;
2394 vcpu->cr3 = sregs->cr3;
2395
Eddie Dong7017fc32007-07-18 11:34:57 +03002396 set_cr8(vcpu, sregs->cr8);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002397
2398 mmu_reset_needed |= vcpu->shadow_efer != sregs->efer;
Avi Kivity05b3e0c2006-12-13 00:33:45 -08002399#ifdef CONFIG_X86_64
Christian Ehrhardtcbdd1be2007-09-09 15:41:59 +03002400 kvm_x86_ops->set_efer(vcpu, sregs->efer);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002401#endif
Eddie Dong7017fc32007-07-18 11:34:57 +03002402 kvm_set_apic_base(vcpu, sregs->apic_base);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002403
Christian Ehrhardtcbdd1be2007-09-09 15:41:59 +03002404 kvm_x86_ops->decache_cr4_guest_bits(vcpu);
Avi Kivity399badf2007-01-05 16:36:38 -08002405
Avi Kivity6aa8b732006-12-10 02:21:36 -08002406 mmu_reset_needed |= vcpu->cr0 != sregs->cr0;
Rusty Russell81f50e32007-09-06 01:20:38 +10002407 vcpu->cr0 = sregs->cr0;
Christian Ehrhardtcbdd1be2007-09-09 15:41:59 +03002408 kvm_x86_ops->set_cr0(vcpu, sregs->cr0);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002409
2410 mmu_reset_needed |= vcpu->cr4 != sregs->cr4;
Christian Ehrhardtcbdd1be2007-09-09 15:41:59 +03002411 kvm_x86_ops->set_cr4(vcpu, sregs->cr4);
Avi Kivity1b0973b2007-01-05 16:36:41 -08002412 if (!is_long_mode(vcpu) && is_pae(vcpu))
2413 load_pdptrs(vcpu, vcpu->cr3);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002414
2415 if (mmu_reset_needed)
2416 kvm_mmu_reset_context(vcpu);
2417
He, Qingc52fb352007-08-02 14:03:07 +03002418 if (!irqchip_in_kernel(vcpu->kvm)) {
2419 memcpy(vcpu->irq_pending, sregs->interrupt_bitmap,
2420 sizeof vcpu->irq_pending);
2421 vcpu->irq_summary = 0;
2422 for (i = 0; i < ARRAY_SIZE(vcpu->irq_pending); ++i)
2423 if (vcpu->irq_pending[i])
2424 __set_bit(i, &vcpu->irq_summary);
Eddie Dong2a8067f2007-08-06 16:29:07 +03002425 } else {
2426 max_bits = (sizeof sregs->interrupt_bitmap) << 3;
2427 pending_vec = find_first_bit(
2428 (const unsigned long *)sregs->interrupt_bitmap,
2429 max_bits);
2430 /* Only pending external irq is handled here */
2431 if (pending_vec < max_bits) {
Christian Ehrhardtcbdd1be2007-09-09 15:41:59 +03002432 kvm_x86_ops->set_irq(vcpu, pending_vec);
Eddie Dong2a8067f2007-08-06 16:29:07 +03002433 printk("Set back pending irq %d\n", pending_vec);
2434 }
He, Qingc52fb352007-08-02 14:03:07 +03002435 }
Avi Kivity6aa8b732006-12-10 02:21:36 -08002436
Avi Kivity024aa1c2007-03-21 13:44:58 +02002437 set_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
2438 set_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
2439 set_segment(vcpu, &sregs->es, VCPU_SREG_ES);
2440 set_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
2441 set_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
2442 set_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
2443
2444 set_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
2445 set_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
2446
Avi Kivity6aa8b732006-12-10 02:21:36 -08002447 vcpu_put(vcpu);
2448
2449 return 0;
2450}
2451
Rusty Russell1747fb72007-09-06 01:21:32 +10002452void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
2453{
2454 struct kvm_segment cs;
2455
2456 get_segment(vcpu, &cs, VCPU_SREG_CS);
2457 *db = cs.db;
2458 *l = cs.l;
2459}
2460EXPORT_SYMBOL_GPL(kvm_get_cs_db_l_bits);
2461
Avi Kivity6aa8b732006-12-10 02:21:36 -08002462/*
2463 * List of msr numbers which we expose to userspace through KVM_GET_MSRS
2464 * and KVM_SET_MSRS, and KVM_GET_MSR_INDEX_LIST.
Michael Riepebf591b22006-12-22 01:05:36 -08002465 *
2466 * This list is modified at module load time to reflect the
2467 * capabilities of the host cpu.
Avi Kivity6aa8b732006-12-10 02:21:36 -08002468 */
2469static u32 msrs_to_save[] = {
2470 MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
2471 MSR_K6_STAR,
Avi Kivity05b3e0c2006-12-13 00:33:45 -08002472#ifdef CONFIG_X86_64
Avi Kivity6aa8b732006-12-10 02:21:36 -08002473 MSR_CSTAR, MSR_KERNEL_GS_BASE, MSR_SYSCALL_MASK, MSR_LSTAR,
2474#endif
2475 MSR_IA32_TIME_STAMP_COUNTER,
2476};
2477
Michael Riepebf591b22006-12-22 01:05:36 -08002478static unsigned num_msrs_to_save;
2479
Avi Kivity6f00e682007-01-26 00:56:40 -08002480static u32 emulated_msrs[] = {
2481 MSR_IA32_MISC_ENABLE,
2482};
2483
Michael Riepebf591b22006-12-22 01:05:36 -08002484static __init void kvm_init_msr_list(void)
2485{
2486 u32 dummy[2];
2487 unsigned i, j;
2488
2489 for (i = j = 0; i < ARRAY_SIZE(msrs_to_save); i++) {
2490 if (rdmsr_safe(msrs_to_save[i], &dummy[0], &dummy[1]) < 0)
2491 continue;
2492 if (j < i)
2493 msrs_to_save[j] = msrs_to_save[i];
2494 j++;
2495 }
2496 num_msrs_to_save = j;
2497}
Avi Kivity6aa8b732006-12-10 02:21:36 -08002498
2499/*
2500 * Adapt set_msr() to msr_io()'s calling convention
2501 */
2502static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
2503{
Avi Kivity35f3f282007-07-17 14:20:30 +03002504 return kvm_set_msr(vcpu, index, *data);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002505}
2506
2507/*
2508 * Read or write a bunch of msrs. All parameters are kernel addresses.
2509 *
2510 * @return number of msrs set successfully.
2511 */
Avi Kivitybccf2152007-02-21 18:04:26 +02002512static int __msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs *msrs,
Avi Kivity6aa8b732006-12-10 02:21:36 -08002513 struct kvm_msr_entry *entries,
2514 int (*do_msr)(struct kvm_vcpu *vcpu,
2515 unsigned index, u64 *data))
2516{
Avi Kivity6aa8b732006-12-10 02:21:36 -08002517 int i;
2518
Avi Kivitybccf2152007-02-21 18:04:26 +02002519 vcpu_load(vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002520
2521 for (i = 0; i < msrs->nmsrs; ++i)
2522 if (do_msr(vcpu, entries[i].index, &entries[i].data))
2523 break;
2524
2525 vcpu_put(vcpu);
2526
2527 return i;
2528}
2529
2530/*
2531 * Read or write a bunch of msrs. Parameters are user addresses.
2532 *
2533 * @return number of msrs set successfully.
2534 */
Avi Kivitybccf2152007-02-21 18:04:26 +02002535static int msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs __user *user_msrs,
Avi Kivity6aa8b732006-12-10 02:21:36 -08002536 int (*do_msr)(struct kvm_vcpu *vcpu,
2537 unsigned index, u64 *data),
2538 int writeback)
2539{
2540 struct kvm_msrs msrs;
2541 struct kvm_msr_entry *entries;
2542 int r, n;
2543 unsigned size;
2544
2545 r = -EFAULT;
2546 if (copy_from_user(&msrs, user_msrs, sizeof msrs))
2547 goto out;
2548
2549 r = -E2BIG;
2550 if (msrs.nmsrs >= MAX_IO_MSRS)
2551 goto out;
2552
2553 r = -ENOMEM;
2554 size = sizeof(struct kvm_msr_entry) * msrs.nmsrs;
2555 entries = vmalloc(size);
2556 if (!entries)
2557 goto out;
2558
2559 r = -EFAULT;
2560 if (copy_from_user(entries, user_msrs->entries, size))
2561 goto out_free;
2562
Avi Kivitybccf2152007-02-21 18:04:26 +02002563 r = n = __msr_io(vcpu, &msrs, entries, do_msr);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002564 if (r < 0)
2565 goto out_free;
2566
2567 r = -EFAULT;
2568 if (writeback && copy_to_user(user_msrs->entries, entries, size))
2569 goto out_free;
2570
2571 r = n;
2572
2573out_free:
2574 vfree(entries);
2575out:
2576 return r;
2577}
2578
2579/*
2580 * Translate a guest virtual address to a guest physical address.
2581 */
Avi Kivitybccf2152007-02-21 18:04:26 +02002582static int kvm_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
2583 struct kvm_translation *tr)
Avi Kivity6aa8b732006-12-10 02:21:36 -08002584{
2585 unsigned long vaddr = tr->linear_address;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002586 gpa_t gpa;
2587
Avi Kivitybccf2152007-02-21 18:04:26 +02002588 vcpu_load(vcpu);
Shaohua Li11ec2802007-07-23 14:51:37 +08002589 mutex_lock(&vcpu->kvm->lock);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002590 gpa = vcpu->mmu.gva_to_gpa(vcpu, vaddr);
2591 tr->physical_address = gpa;
2592 tr->valid = gpa != UNMAPPED_GVA;
2593 tr->writeable = 1;
2594 tr->usermode = 0;
Shaohua Li11ec2802007-07-23 14:51:37 +08002595 mutex_unlock(&vcpu->kvm->lock);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002596 vcpu_put(vcpu);
2597
2598 return 0;
2599}
2600
Avi Kivitybccf2152007-02-21 18:04:26 +02002601static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
2602 struct kvm_interrupt *irq)
Avi Kivity6aa8b732006-12-10 02:21:36 -08002603{
Avi Kivity6aa8b732006-12-10 02:21:36 -08002604 if (irq->irq < 0 || irq->irq >= 256)
2605 return -EINVAL;
Eddie Dong97222cc2007-09-12 10:58:04 +03002606 if (irqchip_in_kernel(vcpu->kvm))
2607 return -ENXIO;
Avi Kivitybccf2152007-02-21 18:04:26 +02002608 vcpu_load(vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002609
2610 set_bit(irq->irq, vcpu->irq_pending);
2611 set_bit(irq->irq / BITS_PER_LONG, &vcpu->irq_summary);
2612
2613 vcpu_put(vcpu);
2614
2615 return 0;
2616}
2617
Avi Kivitybccf2152007-02-21 18:04:26 +02002618static int kvm_vcpu_ioctl_debug_guest(struct kvm_vcpu *vcpu,
2619 struct kvm_debug_guest *dbg)
Avi Kivity6aa8b732006-12-10 02:21:36 -08002620{
Avi Kivity6aa8b732006-12-10 02:21:36 -08002621 int r;
2622
Avi Kivitybccf2152007-02-21 18:04:26 +02002623 vcpu_load(vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002624
Christian Ehrhardtcbdd1be2007-09-09 15:41:59 +03002625 r = kvm_x86_ops->set_guest_debug(vcpu, dbg);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002626
2627 vcpu_put(vcpu);
2628
2629 return r;
2630}
2631
Avi Kivity9a2bb7f2007-02-22 12:58:31 +02002632static struct page *kvm_vcpu_nopage(struct vm_area_struct *vma,
2633 unsigned long address,
2634 int *type)
2635{
2636 struct kvm_vcpu *vcpu = vma->vm_file->private_data;
2637 unsigned long pgoff;
2638 struct page *page;
2639
Avi Kivity9a2bb7f2007-02-22 12:58:31 +02002640 pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
Avi Kivity039576c2007-03-20 12:46:50 +02002641 if (pgoff == 0)
2642 page = virt_to_page(vcpu->run);
2643 else if (pgoff == KVM_PIO_PAGE_OFFSET)
2644 page = virt_to_page(vcpu->pio_data);
2645 else
Avi Kivity9a2bb7f2007-02-22 12:58:31 +02002646 return NOPAGE_SIGBUS;
Avi Kivity9a2bb7f2007-02-22 12:58:31 +02002647 get_page(page);
Nguyen Anh Quynhcd0d9132007-07-11 14:30:54 +03002648 if (type != NULL)
2649 *type = VM_FAULT_MINOR;
2650
Avi Kivity9a2bb7f2007-02-22 12:58:31 +02002651 return page;
2652}
2653
2654static struct vm_operations_struct kvm_vcpu_vm_ops = {
2655 .nopage = kvm_vcpu_nopage,
2656};
2657
2658static int kvm_vcpu_mmap(struct file *file, struct vm_area_struct *vma)
2659{
2660 vma->vm_ops = &kvm_vcpu_vm_ops;
2661 return 0;
2662}
2663
Avi Kivitybccf2152007-02-21 18:04:26 +02002664static int kvm_vcpu_release(struct inode *inode, struct file *filp)
2665{
2666 struct kvm_vcpu *vcpu = filp->private_data;
2667
2668 fput(vcpu->kvm->filp);
2669 return 0;
2670}
2671
2672static struct file_operations kvm_vcpu_fops = {
2673 .release = kvm_vcpu_release,
2674 .unlocked_ioctl = kvm_vcpu_ioctl,
2675 .compat_ioctl = kvm_vcpu_ioctl,
Avi Kivity9a2bb7f2007-02-22 12:58:31 +02002676 .mmap = kvm_vcpu_mmap,
Avi Kivitybccf2152007-02-21 18:04:26 +02002677};
2678
2679/*
2680 * Allocates an inode for the vcpu.
2681 */
2682static int create_vcpu_fd(struct kvm_vcpu *vcpu)
2683{
2684 int fd, r;
2685 struct inode *inode;
2686 struct file *file;
2687
Avi Kivityd6d28162007-06-28 08:38:16 -04002688 r = anon_inode_getfd(&fd, &inode, &file,
2689 "kvm-vcpu", &kvm_vcpu_fops, vcpu);
2690 if (r)
2691 return r;
Avi Kivitybccf2152007-02-21 18:04:26 +02002692 atomic_inc(&vcpu->kvm->filp->f_count);
Avi Kivitybccf2152007-02-21 18:04:26 +02002693 return fd;
Avi Kivitybccf2152007-02-21 18:04:26 +02002694}
2695
Avi Kivityc5ea7662007-02-20 18:41:05 +02002696/*
2697 * Creates some virtual cpus. Good luck creating more than one.
2698 */
2699static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, int n)
2700{
2701 int r;
2702 struct kvm_vcpu *vcpu;
2703
Avi Kivityc5ea7662007-02-20 18:41:05 +02002704 if (!valid_vcpu(n))
Rusty Russellfb3f0f52007-07-27 17:16:56 +10002705 return -EINVAL;
Avi Kivityc5ea7662007-02-20 18:41:05 +02002706
Christian Ehrhardtcbdd1be2007-09-09 15:41:59 +03002707 vcpu = kvm_x86_ops->vcpu_create(kvm, n);
Rusty Russellfb3f0f52007-07-27 17:16:56 +10002708 if (IS_ERR(vcpu))
2709 return PTR_ERR(vcpu);
Avi Kivityc5ea7662007-02-20 18:41:05 +02002710
Avi Kivity15ad7142007-07-11 18:17:21 +03002711 preempt_notifier_init(&vcpu->preempt_notifier, &kvm_preempt_ops);
2712
Rusty Russellb114b082007-07-30 21:13:43 +10002713 /* We do fxsave: this must be aligned. */
2714 BUG_ON((unsigned long)&vcpu->host_fx_image & 0xF);
2715
Rusty Russellfb3f0f52007-07-27 17:16:56 +10002716 vcpu_load(vcpu);
Avi Kivityc5ea7662007-02-20 18:41:05 +02002717 r = kvm_mmu_setup(vcpu);
Avi Kivityc5ea7662007-02-20 18:41:05 +02002718 vcpu_put(vcpu);
Avi Kivityc5ea7662007-02-20 18:41:05 +02002719 if (r < 0)
Rusty Russellfb3f0f52007-07-27 17:16:56 +10002720 goto free_vcpu;
Avi Kivityc5ea7662007-02-20 18:41:05 +02002721
Shaohua Li11ec2802007-07-23 14:51:37 +08002722 mutex_lock(&kvm->lock);
Rusty Russellfb3f0f52007-07-27 17:16:56 +10002723 if (kvm->vcpus[n]) {
2724 r = -EEXIST;
Shaohua Li11ec2802007-07-23 14:51:37 +08002725 mutex_unlock(&kvm->lock);
Rusty Russellfb3f0f52007-07-27 17:16:56 +10002726 goto mmu_unload;
2727 }
2728 kvm->vcpus[n] = vcpu;
Shaohua Li11ec2802007-07-23 14:51:37 +08002729 mutex_unlock(&kvm->lock);
Rusty Russellfb3f0f52007-07-27 17:16:56 +10002730
2731 /* Now it's all set up, let userspace reach it */
Avi Kivitybccf2152007-02-21 18:04:26 +02002732 r = create_vcpu_fd(vcpu);
2733 if (r < 0)
Rusty Russellfb3f0f52007-07-27 17:16:56 +10002734 goto unlink;
Avi Kivitybccf2152007-02-21 18:04:26 +02002735 return r;
Avi Kivityc5ea7662007-02-20 18:41:05 +02002736
Rusty Russellfb3f0f52007-07-27 17:16:56 +10002737unlink:
Shaohua Li11ec2802007-07-23 14:51:37 +08002738 mutex_lock(&kvm->lock);
Rusty Russellfb3f0f52007-07-27 17:16:56 +10002739 kvm->vcpus[n] = NULL;
Shaohua Li11ec2802007-07-23 14:51:37 +08002740 mutex_unlock(&kvm->lock);
Rusty Russellfb3f0f52007-07-27 17:16:56 +10002741
2742mmu_unload:
2743 vcpu_load(vcpu);
2744 kvm_mmu_unload(vcpu);
2745 vcpu_put(vcpu);
2746
2747free_vcpu:
Christian Ehrhardtcbdd1be2007-09-09 15:41:59 +03002748 kvm_x86_ops->vcpu_free(vcpu);
Avi Kivityc5ea7662007-02-20 18:41:05 +02002749 return r;
2750}
2751
Eddie Dong2cc51562007-05-21 07:28:09 +03002752static void cpuid_fix_nx_cap(struct kvm_vcpu *vcpu)
2753{
2754 u64 efer;
2755 int i;
2756 struct kvm_cpuid_entry *e, *entry;
2757
2758 rdmsrl(MSR_EFER, efer);
2759 entry = NULL;
2760 for (i = 0; i < vcpu->cpuid_nent; ++i) {
2761 e = &vcpu->cpuid_entries[i];
2762 if (e->function == 0x80000001) {
2763 entry = e;
2764 break;
2765 }
2766 }
Avi Kivity4c981b42007-07-25 09:22:12 +03002767 if (entry && (entry->edx & (1 << 20)) && !(efer & EFER_NX)) {
Eddie Dong2cc51562007-05-21 07:28:09 +03002768 entry->edx &= ~(1 << 20);
Avi Kivity4c981b42007-07-25 09:22:12 +03002769 printk(KERN_INFO "kvm: guest NX capability removed\n");
Eddie Dong2cc51562007-05-21 07:28:09 +03002770 }
2771}
2772
Avi Kivity06465c52007-02-28 20:46:53 +02002773static int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
2774 struct kvm_cpuid *cpuid,
2775 struct kvm_cpuid_entry __user *entries)
2776{
2777 int r;
2778
2779 r = -E2BIG;
2780 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
2781 goto out;
2782 r = -EFAULT;
2783 if (copy_from_user(&vcpu->cpuid_entries, entries,
2784 cpuid->nent * sizeof(struct kvm_cpuid_entry)))
2785 goto out;
2786 vcpu->cpuid_nent = cpuid->nent;
Eddie Dong2cc51562007-05-21 07:28:09 +03002787 cpuid_fix_nx_cap(vcpu);
Avi Kivity06465c52007-02-28 20:46:53 +02002788 return 0;
2789
2790out:
2791 return r;
2792}
2793
Avi Kivity1961d272007-03-05 19:46:05 +02002794static int kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu *vcpu, sigset_t *sigset)
2795{
2796 if (sigset) {
2797 sigdelsetmask(sigset, sigmask(SIGKILL)|sigmask(SIGSTOP));
2798 vcpu->sigset_active = 1;
2799 vcpu->sigset = *sigset;
2800 } else
2801 vcpu->sigset_active = 0;
2802 return 0;
2803}
2804
Avi Kivityb8836732007-04-01 16:34:31 +03002805/*
2806 * fxsave fpu state. Taken from x86_64/processor.h. To be killed when
2807 * we have asm/x86/processor.h
2808 */
2809struct fxsave {
2810 u16 cwd;
2811 u16 swd;
2812 u16 twd;
2813 u16 fop;
2814 u64 rip;
2815 u64 rdp;
2816 u32 mxcsr;
2817 u32 mxcsr_mask;
2818 u32 st_space[32]; /* 8*16 bytes for each FP-reg = 128 bytes */
2819#ifdef CONFIG_X86_64
2820 u32 xmm_space[64]; /* 16*16 bytes for each XMM-reg = 256 bytes */
2821#else
2822 u32 xmm_space[32]; /* 8*16 bytes for each XMM-reg = 128 bytes */
2823#endif
2824};
2825
2826static int kvm_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
2827{
Rusty Russellb114b082007-07-30 21:13:43 +10002828 struct fxsave *fxsave = (struct fxsave *)&vcpu->guest_fx_image;
Avi Kivityb8836732007-04-01 16:34:31 +03002829
2830 vcpu_load(vcpu);
2831
2832 memcpy(fpu->fpr, fxsave->st_space, 128);
2833 fpu->fcw = fxsave->cwd;
2834 fpu->fsw = fxsave->swd;
2835 fpu->ftwx = fxsave->twd;
2836 fpu->last_opcode = fxsave->fop;
2837 fpu->last_ip = fxsave->rip;
2838 fpu->last_dp = fxsave->rdp;
2839 memcpy(fpu->xmm, fxsave->xmm_space, sizeof fxsave->xmm_space);
2840
2841 vcpu_put(vcpu);
2842
2843 return 0;
2844}
2845
2846static int kvm_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
2847{
Rusty Russellb114b082007-07-30 21:13:43 +10002848 struct fxsave *fxsave = (struct fxsave *)&vcpu->guest_fx_image;
Avi Kivityb8836732007-04-01 16:34:31 +03002849
2850 vcpu_load(vcpu);
2851
2852 memcpy(fxsave->st_space, fpu->fpr, 128);
2853 fxsave->cwd = fpu->fcw;
2854 fxsave->swd = fpu->fsw;
2855 fxsave->twd = fpu->ftwx;
2856 fxsave->fop = fpu->last_opcode;
2857 fxsave->rip = fpu->last_ip;
2858 fxsave->rdp = fpu->last_dp;
2859 memcpy(fxsave->xmm_space, fpu->xmm, sizeof fxsave->xmm_space);
2860
2861 vcpu_put(vcpu);
2862
2863 return 0;
2864}
2865
Eddie Dong96ad2cc2007-09-06 12:22:56 +03002866static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu,
2867 struct kvm_lapic_state *s)
2868{
2869 vcpu_load(vcpu);
2870 memcpy(s->regs, vcpu->apic->regs, sizeof *s);
2871 vcpu_put(vcpu);
2872
2873 return 0;
2874}
2875
2876static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
2877 struct kvm_lapic_state *s)
2878{
2879 vcpu_load(vcpu);
2880 memcpy(vcpu->apic->regs, s->regs, sizeof *s);
2881 kvm_apic_post_state_restore(vcpu);
2882 vcpu_put(vcpu);
2883
2884 return 0;
2885}
2886
Avi Kivitybccf2152007-02-21 18:04:26 +02002887static long kvm_vcpu_ioctl(struct file *filp,
2888 unsigned int ioctl, unsigned long arg)
Avi Kivity6aa8b732006-12-10 02:21:36 -08002889{
Avi Kivitybccf2152007-02-21 18:04:26 +02002890 struct kvm_vcpu *vcpu = filp->private_data;
Al Viro2f366982007-02-09 16:38:35 +00002891 void __user *argp = (void __user *)arg;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002892 int r = -EINVAL;
2893
2894 switch (ioctl) {
Avi Kivity9a2bb7f2007-02-22 12:58:31 +02002895 case KVM_RUN:
Avi Kivityf0fe5102007-03-07 13:11:17 +02002896 r = -EINVAL;
2897 if (arg)
2898 goto out;
Avi Kivity9a2bb7f2007-02-22 12:58:31 +02002899 r = kvm_vcpu_ioctl_run(vcpu, vcpu->run);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002900 break;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002901 case KVM_GET_REGS: {
2902 struct kvm_regs kvm_regs;
2903
Avi Kivitybccf2152007-02-21 18:04:26 +02002904 memset(&kvm_regs, 0, sizeof kvm_regs);
2905 r = kvm_vcpu_ioctl_get_regs(vcpu, &kvm_regs);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002906 if (r)
2907 goto out;
2908 r = -EFAULT;
Al Viro2f366982007-02-09 16:38:35 +00002909 if (copy_to_user(argp, &kvm_regs, sizeof kvm_regs))
Avi Kivity6aa8b732006-12-10 02:21:36 -08002910 goto out;
2911 r = 0;
2912 break;
2913 }
2914 case KVM_SET_REGS: {
2915 struct kvm_regs kvm_regs;
2916
2917 r = -EFAULT;
Al Viro2f366982007-02-09 16:38:35 +00002918 if (copy_from_user(&kvm_regs, argp, sizeof kvm_regs))
Avi Kivity6aa8b732006-12-10 02:21:36 -08002919 goto out;
Avi Kivitybccf2152007-02-21 18:04:26 +02002920 r = kvm_vcpu_ioctl_set_regs(vcpu, &kvm_regs);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002921 if (r)
2922 goto out;
2923 r = 0;
2924 break;
2925 }
2926 case KVM_GET_SREGS: {
2927 struct kvm_sregs kvm_sregs;
2928
Avi Kivitybccf2152007-02-21 18:04:26 +02002929 memset(&kvm_sregs, 0, sizeof kvm_sregs);
2930 r = kvm_vcpu_ioctl_get_sregs(vcpu, &kvm_sregs);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002931 if (r)
2932 goto out;
2933 r = -EFAULT;
Al Viro2f366982007-02-09 16:38:35 +00002934 if (copy_to_user(argp, &kvm_sregs, sizeof kvm_sregs))
Avi Kivity6aa8b732006-12-10 02:21:36 -08002935 goto out;
2936 r = 0;
2937 break;
2938 }
2939 case KVM_SET_SREGS: {
2940 struct kvm_sregs kvm_sregs;
2941
2942 r = -EFAULT;
Al Viro2f366982007-02-09 16:38:35 +00002943 if (copy_from_user(&kvm_sregs, argp, sizeof kvm_sregs))
Avi Kivity6aa8b732006-12-10 02:21:36 -08002944 goto out;
Avi Kivitybccf2152007-02-21 18:04:26 +02002945 r = kvm_vcpu_ioctl_set_sregs(vcpu, &kvm_sregs);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002946 if (r)
2947 goto out;
2948 r = 0;
2949 break;
2950 }
2951 case KVM_TRANSLATE: {
2952 struct kvm_translation tr;
2953
2954 r = -EFAULT;
Al Viro2f366982007-02-09 16:38:35 +00002955 if (copy_from_user(&tr, argp, sizeof tr))
Avi Kivity6aa8b732006-12-10 02:21:36 -08002956 goto out;
Avi Kivitybccf2152007-02-21 18:04:26 +02002957 r = kvm_vcpu_ioctl_translate(vcpu, &tr);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002958 if (r)
2959 goto out;
2960 r = -EFAULT;
Al Viro2f366982007-02-09 16:38:35 +00002961 if (copy_to_user(argp, &tr, sizeof tr))
Avi Kivity6aa8b732006-12-10 02:21:36 -08002962 goto out;
2963 r = 0;
2964 break;
2965 }
2966 case KVM_INTERRUPT: {
2967 struct kvm_interrupt irq;
2968
2969 r = -EFAULT;
Al Viro2f366982007-02-09 16:38:35 +00002970 if (copy_from_user(&irq, argp, sizeof irq))
Avi Kivity6aa8b732006-12-10 02:21:36 -08002971 goto out;
Avi Kivitybccf2152007-02-21 18:04:26 +02002972 r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002973 if (r)
2974 goto out;
2975 r = 0;
2976 break;
2977 }
2978 case KVM_DEBUG_GUEST: {
2979 struct kvm_debug_guest dbg;
2980
2981 r = -EFAULT;
Al Viro2f366982007-02-09 16:38:35 +00002982 if (copy_from_user(&dbg, argp, sizeof dbg))
Avi Kivity6aa8b732006-12-10 02:21:36 -08002983 goto out;
Avi Kivitybccf2152007-02-21 18:04:26 +02002984 r = kvm_vcpu_ioctl_debug_guest(vcpu, &dbg);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002985 if (r)
2986 goto out;
2987 r = 0;
2988 break;
2989 }
Avi Kivitybccf2152007-02-21 18:04:26 +02002990 case KVM_GET_MSRS:
Avi Kivity35f3f282007-07-17 14:20:30 +03002991 r = msr_io(vcpu, argp, kvm_get_msr, 1);
Avi Kivitybccf2152007-02-21 18:04:26 +02002992 break;
2993 case KVM_SET_MSRS:
2994 r = msr_io(vcpu, argp, do_set_msr, 0);
2995 break;
Avi Kivity06465c52007-02-28 20:46:53 +02002996 case KVM_SET_CPUID: {
2997 struct kvm_cpuid __user *cpuid_arg = argp;
2998 struct kvm_cpuid cpuid;
2999
3000 r = -EFAULT;
3001 if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
3002 goto out;
3003 r = kvm_vcpu_ioctl_set_cpuid(vcpu, &cpuid, cpuid_arg->entries);
3004 if (r)
3005 goto out;
3006 break;
3007 }
Avi Kivity1961d272007-03-05 19:46:05 +02003008 case KVM_SET_SIGNAL_MASK: {
3009 struct kvm_signal_mask __user *sigmask_arg = argp;
3010 struct kvm_signal_mask kvm_sigmask;
3011 sigset_t sigset, *p;
3012
3013 p = NULL;
3014 if (argp) {
3015 r = -EFAULT;
3016 if (copy_from_user(&kvm_sigmask, argp,
3017 sizeof kvm_sigmask))
3018 goto out;
3019 r = -EINVAL;
3020 if (kvm_sigmask.len != sizeof sigset)
3021 goto out;
3022 r = -EFAULT;
3023 if (copy_from_user(&sigset, sigmask_arg->sigset,
3024 sizeof sigset))
3025 goto out;
3026 p = &sigset;
3027 }
3028 r = kvm_vcpu_ioctl_set_sigmask(vcpu, &sigset);
3029 break;
3030 }
Avi Kivityb8836732007-04-01 16:34:31 +03003031 case KVM_GET_FPU: {
3032 struct kvm_fpu fpu;
3033
3034 memset(&fpu, 0, sizeof fpu);
3035 r = kvm_vcpu_ioctl_get_fpu(vcpu, &fpu);
3036 if (r)
3037 goto out;
3038 r = -EFAULT;
3039 if (copy_to_user(argp, &fpu, sizeof fpu))
3040 goto out;
3041 r = 0;
3042 break;
3043 }
3044 case KVM_SET_FPU: {
3045 struct kvm_fpu fpu;
3046
3047 r = -EFAULT;
3048 if (copy_from_user(&fpu, argp, sizeof fpu))
3049 goto out;
3050 r = kvm_vcpu_ioctl_set_fpu(vcpu, &fpu);
3051 if (r)
3052 goto out;
3053 r = 0;
3054 break;
3055 }
Eddie Dong96ad2cc2007-09-06 12:22:56 +03003056 case KVM_GET_LAPIC: {
3057 struct kvm_lapic_state lapic;
3058
3059 memset(&lapic, 0, sizeof lapic);
3060 r = kvm_vcpu_ioctl_get_lapic(vcpu, &lapic);
3061 if (r)
3062 goto out;
3063 r = -EFAULT;
3064 if (copy_to_user(argp, &lapic, sizeof lapic))
3065 goto out;
3066 r = 0;
3067 break;
3068 }
3069 case KVM_SET_LAPIC: {
3070 struct kvm_lapic_state lapic;
3071
3072 r = -EFAULT;
3073 if (copy_from_user(&lapic, argp, sizeof lapic))
3074 goto out;
3075 r = kvm_vcpu_ioctl_set_lapic(vcpu, &lapic);;
3076 if (r)
3077 goto out;
3078 r = 0;
3079 break;
3080 }
Avi Kivitybccf2152007-02-21 18:04:26 +02003081 default:
3082 ;
3083 }
3084out:
3085 return r;
3086}
3087
3088static long kvm_vm_ioctl(struct file *filp,
3089 unsigned int ioctl, unsigned long arg)
3090{
3091 struct kvm *kvm = filp->private_data;
3092 void __user *argp = (void __user *)arg;
3093 int r = -EINVAL;
3094
3095 switch (ioctl) {
3096 case KVM_CREATE_VCPU:
3097 r = kvm_vm_ioctl_create_vcpu(kvm, arg);
3098 if (r < 0)
3099 goto out;
3100 break;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003101 case KVM_SET_MEMORY_REGION: {
3102 struct kvm_memory_region kvm_mem;
3103
3104 r = -EFAULT;
Al Viro2f366982007-02-09 16:38:35 +00003105 if (copy_from_user(&kvm_mem, argp, sizeof kvm_mem))
Avi Kivity6aa8b732006-12-10 02:21:36 -08003106 goto out;
Avi Kivity2c6f5df2007-02-20 18:27:58 +02003107 r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_mem);
Avi Kivity6aa8b732006-12-10 02:21:36 -08003108 if (r)
3109 goto out;
3110 break;
3111 }
Izik Eidus82ce2c92007-10-02 18:52:55 +02003112 case KVM_SET_NR_MMU_PAGES:
3113 r = kvm_vm_ioctl_set_nr_mmu_pages(kvm, arg);
3114 if (r)
3115 goto out;
3116 break;
3117 case KVM_GET_NR_MMU_PAGES:
3118 r = kvm_vm_ioctl_get_nr_mmu_pages(kvm);
3119 break;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003120 case KVM_GET_DIRTY_LOG: {
3121 struct kvm_dirty_log log;
3122
3123 r = -EFAULT;
Al Viro2f366982007-02-09 16:38:35 +00003124 if (copy_from_user(&log, argp, sizeof log))
Avi Kivity6aa8b732006-12-10 02:21:36 -08003125 goto out;
Avi Kivity2c6f5df2007-02-20 18:27:58 +02003126 r = kvm_vm_ioctl_get_dirty_log(kvm, &log);
Avi Kivity6aa8b732006-12-10 02:21:36 -08003127 if (r)
3128 goto out;
3129 break;
3130 }
Avi Kivitye8207542007-03-30 16:54:30 +03003131 case KVM_SET_MEMORY_ALIAS: {
3132 struct kvm_memory_alias alias;
3133
3134 r = -EFAULT;
3135 if (copy_from_user(&alias, argp, sizeof alias))
3136 goto out;
3137 r = kvm_vm_ioctl_set_memory_alias(kvm, &alias);
3138 if (r)
3139 goto out;
3140 break;
3141 }
Eddie Dong85f455f2007-07-06 12:20:49 +03003142 case KVM_CREATE_IRQCHIP:
3143 r = -ENOMEM;
3144 kvm->vpic = kvm_create_pic(kvm);
Eddie Dong1fd4f2a2007-07-18 12:03:39 +03003145 if (kvm->vpic) {
3146 r = kvm_ioapic_init(kvm);
3147 if (r) {
3148 kfree(kvm->vpic);
3149 kvm->vpic = NULL;
3150 goto out;
3151 }
3152 }
Eddie Dong85f455f2007-07-06 12:20:49 +03003153 else
3154 goto out;
3155 break;
3156 case KVM_IRQ_LINE: {
3157 struct kvm_irq_level irq_event;
3158
3159 r = -EFAULT;
3160 if (copy_from_user(&irq_event, argp, sizeof irq_event))
3161 goto out;
3162 if (irqchip_in_kernel(kvm)) {
Eddie Dong9cf98822007-07-22 10:36:31 +03003163 mutex_lock(&kvm->lock);
Eddie Dong85f455f2007-07-06 12:20:49 +03003164 if (irq_event.irq < 16)
3165 kvm_pic_set_irq(pic_irqchip(kvm),
3166 irq_event.irq,
3167 irq_event.level);
Eddie Dong1fd4f2a2007-07-18 12:03:39 +03003168 kvm_ioapic_set_irq(kvm->vioapic,
3169 irq_event.irq,
3170 irq_event.level);
Eddie Dong9cf98822007-07-22 10:36:31 +03003171 mutex_unlock(&kvm->lock);
Eddie Dong85f455f2007-07-06 12:20:49 +03003172 r = 0;
3173 }
3174 break;
3175 }
He, Qing6ceb9d72007-07-26 11:05:18 +03003176 case KVM_GET_IRQCHIP: {
3177 /* 0: PIC master, 1: PIC slave, 2: IOAPIC */
3178 struct kvm_irqchip chip;
3179
3180 r = -EFAULT;
3181 if (copy_from_user(&chip, argp, sizeof chip))
3182 goto out;
3183 r = -ENXIO;
3184 if (!irqchip_in_kernel(kvm))
3185 goto out;
3186 r = kvm_vm_ioctl_get_irqchip(kvm, &chip);
3187 if (r)
3188 goto out;
3189 r = -EFAULT;
3190 if (copy_to_user(argp, &chip, sizeof chip))
3191 goto out;
3192 r = 0;
3193 break;
3194 }
3195 case KVM_SET_IRQCHIP: {
3196 /* 0: PIC master, 1: PIC slave, 2: IOAPIC */
3197 struct kvm_irqchip chip;
3198
3199 r = -EFAULT;
3200 if (copy_from_user(&chip, argp, sizeof chip))
3201 goto out;
3202 r = -ENXIO;
3203 if (!irqchip_in_kernel(kvm))
3204 goto out;
3205 r = kvm_vm_ioctl_set_irqchip(kvm, &chip);
3206 if (r)
3207 goto out;
3208 r = 0;
3209 break;
3210 }
Avi Kivityf17abe92007-02-21 19:28:04 +02003211 default:
3212 ;
3213 }
3214out:
3215 return r;
3216}
3217
3218static struct page *kvm_vm_nopage(struct vm_area_struct *vma,
3219 unsigned long address,
3220 int *type)
3221{
3222 struct kvm *kvm = vma->vm_file->private_data;
3223 unsigned long pgoff;
Avi Kivityf17abe92007-02-21 19:28:04 +02003224 struct page *page;
3225
Avi Kivityf17abe92007-02-21 19:28:04 +02003226 pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
Avi Kivity954bbbc2007-03-30 14:02:32 +03003227 page = gfn_to_page(kvm, pgoff);
Avi Kivityf17abe92007-02-21 19:28:04 +02003228 if (!page)
3229 return NOPAGE_SIGBUS;
3230 get_page(page);
Nguyen Anh Quynhcd0d9132007-07-11 14:30:54 +03003231 if (type != NULL)
3232 *type = VM_FAULT_MINOR;
3233
Avi Kivityf17abe92007-02-21 19:28:04 +02003234 return page;
3235}
3236
3237static struct vm_operations_struct kvm_vm_vm_ops = {
3238 .nopage = kvm_vm_nopage,
3239};
3240
3241static int kvm_vm_mmap(struct file *file, struct vm_area_struct *vma)
3242{
3243 vma->vm_ops = &kvm_vm_vm_ops;
3244 return 0;
3245}
3246
3247static struct file_operations kvm_vm_fops = {
3248 .release = kvm_vm_release,
3249 .unlocked_ioctl = kvm_vm_ioctl,
3250 .compat_ioctl = kvm_vm_ioctl,
3251 .mmap = kvm_vm_mmap,
3252};
3253
3254static int kvm_dev_ioctl_create_vm(void)
3255{
3256 int fd, r;
3257 struct inode *inode;
3258 struct file *file;
3259 struct kvm *kvm;
3260
Avi Kivityf17abe92007-02-21 19:28:04 +02003261 kvm = kvm_create_vm();
Avi Kivityd6d28162007-06-28 08:38:16 -04003262 if (IS_ERR(kvm))
3263 return PTR_ERR(kvm);
3264 r = anon_inode_getfd(&fd, &inode, &file, "kvm-vm", &kvm_vm_fops, kvm);
3265 if (r) {
3266 kvm_destroy_vm(kvm);
3267 return r;
Avi Kivityf17abe92007-02-21 19:28:04 +02003268 }
3269
Avi Kivitybccf2152007-02-21 18:04:26 +02003270 kvm->filp = file;
Avi Kivityf17abe92007-02-21 19:28:04 +02003271
Avi Kivityf17abe92007-02-21 19:28:04 +02003272 return fd;
Avi Kivityf17abe92007-02-21 19:28:04 +02003273}
3274
3275static long kvm_dev_ioctl(struct file *filp,
3276 unsigned int ioctl, unsigned long arg)
3277{
3278 void __user *argp = (void __user *)arg;
Avi Kivity07c45a32007-03-07 13:05:38 +02003279 long r = -EINVAL;
Avi Kivityf17abe92007-02-21 19:28:04 +02003280
3281 switch (ioctl) {
3282 case KVM_GET_API_VERSION:
Avi Kivityf0fe5102007-03-07 13:11:17 +02003283 r = -EINVAL;
3284 if (arg)
3285 goto out;
Avi Kivityf17abe92007-02-21 19:28:04 +02003286 r = KVM_API_VERSION;
3287 break;
3288 case KVM_CREATE_VM:
Avi Kivityf0fe5102007-03-07 13:11:17 +02003289 r = -EINVAL;
3290 if (arg)
3291 goto out;
Avi Kivityf17abe92007-02-21 19:28:04 +02003292 r = kvm_dev_ioctl_create_vm();
3293 break;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003294 case KVM_GET_MSR_INDEX_LIST: {
Al Viro2f366982007-02-09 16:38:35 +00003295 struct kvm_msr_list __user *user_msr_list = argp;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003296 struct kvm_msr_list msr_list;
3297 unsigned n;
3298
3299 r = -EFAULT;
3300 if (copy_from_user(&msr_list, user_msr_list, sizeof msr_list))
3301 goto out;
3302 n = msr_list.nmsrs;
Avi Kivity6f00e682007-01-26 00:56:40 -08003303 msr_list.nmsrs = num_msrs_to_save + ARRAY_SIZE(emulated_msrs);
Avi Kivity6aa8b732006-12-10 02:21:36 -08003304 if (copy_to_user(user_msr_list, &msr_list, sizeof msr_list))
3305 goto out;
3306 r = -E2BIG;
Michael Riepebf591b22006-12-22 01:05:36 -08003307 if (n < num_msrs_to_save)
Avi Kivity6aa8b732006-12-10 02:21:36 -08003308 goto out;
3309 r = -EFAULT;
3310 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
Michael Riepebf591b22006-12-22 01:05:36 -08003311 num_msrs_to_save * sizeof(u32)))
Avi Kivity6aa8b732006-12-10 02:21:36 -08003312 goto out;
Avi Kivity6f00e682007-01-26 00:56:40 -08003313 if (copy_to_user(user_msr_list->indices
3314 + num_msrs_to_save * sizeof(u32),
3315 &emulated_msrs,
3316 ARRAY_SIZE(emulated_msrs) * sizeof(u32)))
3317 goto out;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003318 r = 0;
Avi Kivitycc1d8952007-01-05 16:36:58 -08003319 break;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003320 }
Eddie Dong85f455f2007-07-06 12:20:49 +03003321 case KVM_CHECK_EXTENSION: {
3322 int ext = (long)argp;
3323
3324 switch (ext) {
3325 case KVM_CAP_IRQCHIP:
Eddie Dongb6958ce2007-07-18 12:15:21 +03003326 case KVM_CAP_HLT:
Izik Eidus82ce2c92007-10-02 18:52:55 +02003327 case KVM_CAP_MMU_SHADOW_CACHE_CONTROL:
Eddie Dong85f455f2007-07-06 12:20:49 +03003328 r = 1;
3329 break;
3330 default:
3331 r = 0;
3332 break;
3333 }
Avi Kivity5d308f42007-03-01 17:56:20 +02003334 break;
Eddie Dong85f455f2007-07-06 12:20:49 +03003335 }
Avi Kivity07c45a32007-03-07 13:05:38 +02003336 case KVM_GET_VCPU_MMAP_SIZE:
3337 r = -EINVAL;
3338 if (arg)
3339 goto out;
Avi Kivity039576c2007-03-20 12:46:50 +02003340 r = 2 * PAGE_SIZE;
Avi Kivity07c45a32007-03-07 13:05:38 +02003341 break;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003342 default:
3343 ;
3344 }
3345out:
3346 return r;
3347}
3348
Avi Kivity6aa8b732006-12-10 02:21:36 -08003349static struct file_operations kvm_chardev_ops = {
Avi Kivity6aa8b732006-12-10 02:21:36 -08003350 .unlocked_ioctl = kvm_dev_ioctl,
3351 .compat_ioctl = kvm_dev_ioctl,
Avi Kivity6aa8b732006-12-10 02:21:36 -08003352};
3353
3354static struct miscdevice kvm_dev = {
Avi Kivitybbe44322007-03-04 13:27:36 +02003355 KVM_MINOR,
Avi Kivity6aa8b732006-12-10 02:21:36 -08003356 "kvm",
3357 &kvm_chardev_ops,
3358};
3359
Avi Kivity774c47f2007-02-12 00:54:47 -08003360/*
3361 * Make sure that a cpu that is being hot-unplugged does not have any vcpus
3362 * cached on it.
3363 */
3364static void decache_vcpus_on_cpu(int cpu)
3365{
3366 struct kvm *vm;
3367 struct kvm_vcpu *vcpu;
3368 int i;
3369
3370 spin_lock(&kvm_lock);
Shaohua Li11ec2802007-07-23 14:51:37 +08003371 list_for_each_entry(vm, &vm_list, vm_list)
Avi Kivity774c47f2007-02-12 00:54:47 -08003372 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
Rusty Russellfb3f0f52007-07-27 17:16:56 +10003373 vcpu = vm->vcpus[i];
3374 if (!vcpu)
3375 continue;
Avi Kivity774c47f2007-02-12 00:54:47 -08003376 /*
3377 * If the vcpu is locked, then it is running on some
3378 * other cpu and therefore it is not cached on the
3379 * cpu in question.
3380 *
3381 * If it's not locked, check the last cpu it executed
3382 * on.
3383 */
3384 if (mutex_trylock(&vcpu->mutex)) {
3385 if (vcpu->cpu == cpu) {
Christian Ehrhardtcbdd1be2007-09-09 15:41:59 +03003386 kvm_x86_ops->vcpu_decache(vcpu);
Avi Kivity774c47f2007-02-12 00:54:47 -08003387 vcpu->cpu = -1;
3388 }
3389 mutex_unlock(&vcpu->mutex);
3390 }
3391 }
3392 spin_unlock(&kvm_lock);
3393}
3394
Avi Kivity1b6c0162007-05-24 13:03:52 +03003395static void hardware_enable(void *junk)
3396{
3397 int cpu = raw_smp_processor_id();
3398
3399 if (cpu_isset(cpu, cpus_hardware_enabled))
3400 return;
3401 cpu_set(cpu, cpus_hardware_enabled);
Christian Ehrhardtcbdd1be2007-09-09 15:41:59 +03003402 kvm_x86_ops->hardware_enable(NULL);
Avi Kivity1b6c0162007-05-24 13:03:52 +03003403}
3404
3405static void hardware_disable(void *junk)
3406{
3407 int cpu = raw_smp_processor_id();
3408
3409 if (!cpu_isset(cpu, cpus_hardware_enabled))
3410 return;
3411 cpu_clear(cpu, cpus_hardware_enabled);
3412 decache_vcpus_on_cpu(cpu);
Christian Ehrhardtcbdd1be2007-09-09 15:41:59 +03003413 kvm_x86_ops->hardware_disable(NULL);
Avi Kivity1b6c0162007-05-24 13:03:52 +03003414}
3415
Avi Kivity774c47f2007-02-12 00:54:47 -08003416static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val,
3417 void *v)
3418{
3419 int cpu = (long)v;
3420
3421 switch (val) {
Avi Kivitycec9ad22007-05-24 13:11:41 +03003422 case CPU_DYING:
3423 case CPU_DYING_FROZEN:
Avi Kivity6ec8a852007-08-19 15:57:26 +03003424 printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n",
3425 cpu);
3426 hardware_disable(NULL);
3427 break;
Avi Kivity774c47f2007-02-12 00:54:47 -08003428 case CPU_UP_CANCELED:
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -07003429 case CPU_UP_CANCELED_FROZEN:
Jeremy Katz43934a32007-02-19 14:37:46 +02003430 printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n",
3431 cpu);
Avi Kivity1b6c0162007-05-24 13:03:52 +03003432 smp_call_function_single(cpu, hardware_disable, NULL, 0, 1);
Avi Kivity774c47f2007-02-12 00:54:47 -08003433 break;
Jeremy Katz43934a32007-02-19 14:37:46 +02003434 case CPU_ONLINE:
Rafael J. Wysocki8bb78442007-05-09 02:35:10 -07003435 case CPU_ONLINE_FROZEN:
Jeremy Katz43934a32007-02-19 14:37:46 +02003436 printk(KERN_INFO "kvm: enabling virtualization on CPU%d\n",
3437 cpu);
Avi Kivity1b6c0162007-05-24 13:03:52 +03003438 smp_call_function_single(cpu, hardware_enable, NULL, 0, 1);
Avi Kivity774c47f2007-02-12 00:54:47 -08003439 break;
3440 }
3441 return NOTIFY_OK;
3442}
3443
Rusty Russell9a2b85c2007-07-17 23:17:55 +10003444static int kvm_reboot(struct notifier_block *notifier, unsigned long val,
3445 void *v)
3446{
3447 if (val == SYS_RESTART) {
3448 /*
3449 * Some (well, at least mine) BIOSes hang on reboot if
3450 * in vmx root mode.
3451 */
3452 printk(KERN_INFO "kvm: exiting hardware virtualization\n");
3453 on_each_cpu(hardware_disable, NULL, 0, 1);
3454 }
3455 return NOTIFY_OK;
3456}
3457
3458static struct notifier_block kvm_reboot_notifier = {
3459 .notifier_call = kvm_reboot,
3460 .priority = 0,
3461};
3462
Gregory Haskins2eeb2e92007-05-31 14:08:53 -04003463void kvm_io_bus_init(struct kvm_io_bus *bus)
3464{
3465 memset(bus, 0, sizeof(*bus));
3466}
3467
3468void kvm_io_bus_destroy(struct kvm_io_bus *bus)
3469{
3470 int i;
3471
3472 for (i = 0; i < bus->dev_count; i++) {
3473 struct kvm_io_device *pos = bus->devs[i];
3474
3475 kvm_iodevice_destructor(pos);
3476 }
3477}
3478
3479struct kvm_io_device *kvm_io_bus_find_dev(struct kvm_io_bus *bus, gpa_t addr)
3480{
3481 int i;
3482
3483 for (i = 0; i < bus->dev_count; i++) {
3484 struct kvm_io_device *pos = bus->devs[i];
3485
3486 if (pos->in_range(pos, addr))
3487 return pos;
3488 }
3489
3490 return NULL;
3491}
3492
3493void kvm_io_bus_register_dev(struct kvm_io_bus *bus, struct kvm_io_device *dev)
3494{
3495 BUG_ON(bus->dev_count > (NR_IOBUS_DEVS-1));
3496
3497 bus->devs[bus->dev_count++] = dev;
3498}
3499
Avi Kivity774c47f2007-02-12 00:54:47 -08003500static struct notifier_block kvm_cpu_notifier = {
3501 .notifier_call = kvm_cpu_hotplug,
3502 .priority = 20, /* must be > scheduler priority */
3503};
3504
Avi Kivity1165f5f2007-04-19 17:27:43 +03003505static u64 stat_get(void *_offset)
3506{
3507 unsigned offset = (long)_offset;
3508 u64 total = 0;
3509 struct kvm *kvm;
3510 struct kvm_vcpu *vcpu;
3511 int i;
3512
3513 spin_lock(&kvm_lock);
3514 list_for_each_entry(kvm, &vm_list, vm_list)
3515 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
Rusty Russellfb3f0f52007-07-27 17:16:56 +10003516 vcpu = kvm->vcpus[i];
3517 if (vcpu)
3518 total += *(u32 *)((void *)vcpu + offset);
Avi Kivity1165f5f2007-04-19 17:27:43 +03003519 }
3520 spin_unlock(&kvm_lock);
3521 return total;
3522}
3523
Rusty Russell3dea7ca2007-08-01 10:12:22 +10003524DEFINE_SIMPLE_ATTRIBUTE(stat_fops, stat_get, NULL, "%llu\n");
Avi Kivity1165f5f2007-04-19 17:27:43 +03003525
Avi Kivity6aa8b732006-12-10 02:21:36 -08003526static __init void kvm_init_debug(void)
3527{
3528 struct kvm_stats_debugfs_item *p;
3529
Al Viro8b6d44c2007-02-09 16:38:40 +00003530 debugfs_dir = debugfs_create_dir("kvm", NULL);
Avi Kivity6aa8b732006-12-10 02:21:36 -08003531 for (p = debugfs_entries; p->name; ++p)
Avi Kivity1165f5f2007-04-19 17:27:43 +03003532 p->dentry = debugfs_create_file(p->name, 0444, debugfs_dir,
3533 (void *)(long)p->offset,
3534 &stat_fops);
Avi Kivity6aa8b732006-12-10 02:21:36 -08003535}
3536
3537static void kvm_exit_debug(void)
3538{
3539 struct kvm_stats_debugfs_item *p;
3540
3541 for (p = debugfs_entries; p->name; ++p)
3542 debugfs_remove(p->dentry);
3543 debugfs_remove(debugfs_dir);
3544}
3545
Avi Kivity59ae6c62007-02-12 00:54:48 -08003546static int kvm_suspend(struct sys_device *dev, pm_message_t state)
3547{
Avi Kivity4267c412007-05-24 13:09:41 +03003548 hardware_disable(NULL);
Avi Kivity59ae6c62007-02-12 00:54:48 -08003549 return 0;
3550}
3551
3552static int kvm_resume(struct sys_device *dev)
3553{
Avi Kivity4267c412007-05-24 13:09:41 +03003554 hardware_enable(NULL);
Avi Kivity59ae6c62007-02-12 00:54:48 -08003555 return 0;
3556}
3557
3558static struct sysdev_class kvm_sysdev_class = {
Kay Sieversaf5ca3f2007-12-20 02:09:39 +01003559 .name = "kvm",
Avi Kivity59ae6c62007-02-12 00:54:48 -08003560 .suspend = kvm_suspend,
3561 .resume = kvm_resume,
3562};
3563
3564static struct sys_device kvm_sysdev = {
3565 .id = 0,
3566 .cls = &kvm_sysdev_class,
3567};
3568
Avi Kivity6aa8b732006-12-10 02:21:36 -08003569hpa_t bad_page_address;
3570
Avi Kivity15ad7142007-07-11 18:17:21 +03003571static inline
3572struct kvm_vcpu *preempt_notifier_to_vcpu(struct preempt_notifier *pn)
3573{
3574 return container_of(pn, struct kvm_vcpu, preempt_notifier);
3575}
3576
3577static void kvm_sched_in(struct preempt_notifier *pn, int cpu)
3578{
3579 struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
3580
Christian Ehrhardtcbdd1be2007-09-09 15:41:59 +03003581 kvm_x86_ops->vcpu_load(vcpu, cpu);
Avi Kivity15ad7142007-07-11 18:17:21 +03003582}
3583
3584static void kvm_sched_out(struct preempt_notifier *pn,
3585 struct task_struct *next)
3586{
3587 struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
3588
Christian Ehrhardtcbdd1be2007-09-09 15:41:59 +03003589 kvm_x86_ops->vcpu_put(vcpu);
Avi Kivity15ad7142007-07-11 18:17:21 +03003590}
3591
Christian Ehrhardtcbdd1be2007-09-09 15:41:59 +03003592int kvm_init_x86(struct kvm_x86_ops *ops, unsigned int vcpu_size,
Rusty Russellc16f8622007-07-30 21:12:19 +10003593 struct module *module)
Avi Kivity6aa8b732006-12-10 02:21:36 -08003594{
3595 int r;
Yang, Sheng002c7f72007-07-31 14:23:01 +03003596 int cpu;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003597
Christian Ehrhardtcbdd1be2007-09-09 15:41:59 +03003598 if (kvm_x86_ops) {
Yoshimi Ichiyanagi09db28b2006-12-29 16:49:41 -08003599 printk(KERN_ERR "kvm: already loaded the other module\n");
3600 return -EEXIST;
3601 }
3602
Yoshimi Ichiyanagie097f352007-01-05 16:36:24 -08003603 if (!ops->cpu_has_kvm_support()) {
Avi Kivity6aa8b732006-12-10 02:21:36 -08003604 printk(KERN_ERR "kvm: no hardware support\n");
3605 return -EOPNOTSUPP;
3606 }
Yoshimi Ichiyanagie097f352007-01-05 16:36:24 -08003607 if (ops->disabled_by_bios()) {
Avi Kivity6aa8b732006-12-10 02:21:36 -08003608 printk(KERN_ERR "kvm: disabled by bios\n");
3609 return -EOPNOTSUPP;
3610 }
3611
Christian Ehrhardtcbdd1be2007-09-09 15:41:59 +03003612 kvm_x86_ops = ops;
Yoshimi Ichiyanagie097f352007-01-05 16:36:24 -08003613
Christian Ehrhardtcbdd1be2007-09-09 15:41:59 +03003614 r = kvm_x86_ops->hardware_setup();
Avi Kivity6aa8b732006-12-10 02:21:36 -08003615 if (r < 0)
Avi Kivityca45aaa2007-03-01 19:21:03 +02003616 goto out;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003617
Yang, Sheng002c7f72007-07-31 14:23:01 +03003618 for_each_online_cpu(cpu) {
3619 smp_call_function_single(cpu,
Christian Ehrhardtcbdd1be2007-09-09 15:41:59 +03003620 kvm_x86_ops->check_processor_compatibility,
Yang, Sheng002c7f72007-07-31 14:23:01 +03003621 &r, 0, 1);
3622 if (r < 0)
3623 goto out_free_0;
3624 }
3625
Avi Kivity1b6c0162007-05-24 13:03:52 +03003626 on_each_cpu(hardware_enable, NULL, 0, 1);
Avi Kivity774c47f2007-02-12 00:54:47 -08003627 r = register_cpu_notifier(&kvm_cpu_notifier);
3628 if (r)
3629 goto out_free_1;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003630 register_reboot_notifier(&kvm_reboot_notifier);
3631
Avi Kivity59ae6c62007-02-12 00:54:48 -08003632 r = sysdev_class_register(&kvm_sysdev_class);
3633 if (r)
3634 goto out_free_2;
3635
3636 r = sysdev_register(&kvm_sysdev);
3637 if (r)
3638 goto out_free_3;
3639
Rusty Russellc16f8622007-07-30 21:12:19 +10003640 /* A kmem cache lets us meet the alignment requirements of fx_save. */
3641 kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size,
3642 __alignof__(struct kvm_vcpu), 0, 0);
3643 if (!kvm_vcpu_cache) {
3644 r = -ENOMEM;
3645 goto out_free_4;
3646 }
3647
Avi Kivity6aa8b732006-12-10 02:21:36 -08003648 kvm_chardev_ops.owner = module;
3649
3650 r = misc_register(&kvm_dev);
3651 if (r) {
3652 printk (KERN_ERR "kvm: misc device register failed\n");
3653 goto out_free;
3654 }
3655
Avi Kivity15ad7142007-07-11 18:17:21 +03003656 kvm_preempt_ops.sched_in = kvm_sched_in;
3657 kvm_preempt_ops.sched_out = kvm_sched_out;
3658
Avi Kivityc7addb92007-09-16 18:58:32 +02003659 kvm_mmu_set_nonpresent_ptes(0ull, 0ull);
3660
3661 return 0;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003662
3663out_free:
Rusty Russellc16f8622007-07-30 21:12:19 +10003664 kmem_cache_destroy(kvm_vcpu_cache);
3665out_free_4:
Avi Kivity59ae6c62007-02-12 00:54:48 -08003666 sysdev_unregister(&kvm_sysdev);
3667out_free_3:
3668 sysdev_class_unregister(&kvm_sysdev_class);
3669out_free_2:
Avi Kivity6aa8b732006-12-10 02:21:36 -08003670 unregister_reboot_notifier(&kvm_reboot_notifier);
Avi Kivity774c47f2007-02-12 00:54:47 -08003671 unregister_cpu_notifier(&kvm_cpu_notifier);
3672out_free_1:
Avi Kivity1b6c0162007-05-24 13:03:52 +03003673 on_each_cpu(hardware_disable, NULL, 0, 1);
Yang, Sheng002c7f72007-07-31 14:23:01 +03003674out_free_0:
Christian Ehrhardtcbdd1be2007-09-09 15:41:59 +03003675 kvm_x86_ops->hardware_unsetup();
Avi Kivityca45aaa2007-03-01 19:21:03 +02003676out:
Christian Ehrhardtcbdd1be2007-09-09 15:41:59 +03003677 kvm_x86_ops = NULL;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003678 return r;
3679}
3680
Christian Ehrhardtcbdd1be2007-09-09 15:41:59 +03003681void kvm_exit_x86(void)
Avi Kivity6aa8b732006-12-10 02:21:36 -08003682{
3683 misc_deregister(&kvm_dev);
Rusty Russellc16f8622007-07-30 21:12:19 +10003684 kmem_cache_destroy(kvm_vcpu_cache);
Avi Kivity59ae6c62007-02-12 00:54:48 -08003685 sysdev_unregister(&kvm_sysdev);
3686 sysdev_class_unregister(&kvm_sysdev_class);
Avi Kivity6aa8b732006-12-10 02:21:36 -08003687 unregister_reboot_notifier(&kvm_reboot_notifier);
Avi Kivity59ae6c62007-02-12 00:54:48 -08003688 unregister_cpu_notifier(&kvm_cpu_notifier);
Avi Kivity1b6c0162007-05-24 13:03:52 +03003689 on_each_cpu(hardware_disable, NULL, 0, 1);
Christian Ehrhardtcbdd1be2007-09-09 15:41:59 +03003690 kvm_x86_ops->hardware_unsetup();
3691 kvm_x86_ops = NULL;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003692}
3693
3694static __init int kvm_init(void)
3695{
3696 static struct page *bad_page;
Avi Kivity37e29d92007-02-20 14:07:37 +02003697 int r;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003698
Avi Kivityb5a33a72007-04-15 16:31:09 +03003699 r = kvm_mmu_module_init();
3700 if (r)
3701 goto out4;
3702
Avi Kivity6aa8b732006-12-10 02:21:36 -08003703 kvm_init_debug();
3704
Michael Riepebf591b22006-12-22 01:05:36 -08003705 kvm_init_msr_list();
3706
Avi Kivity6aa8b732006-12-10 02:21:36 -08003707 if ((bad_page = alloc_page(GFP_KERNEL)) == NULL) {
3708 r = -ENOMEM;
3709 goto out;
3710 }
3711
3712 bad_page_address = page_to_pfn(bad_page) << PAGE_SHIFT;
3713 memset(__va(bad_page_address), 0, PAGE_SIZE);
3714
Avi Kivity58e690e2007-02-26 16:29:43 +02003715 return 0;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003716
3717out:
3718 kvm_exit_debug();
Avi Kivityb5a33a72007-04-15 16:31:09 +03003719 kvm_mmu_module_exit();
3720out4:
Avi Kivity6aa8b732006-12-10 02:21:36 -08003721 return r;
3722}
3723
3724static __exit void kvm_exit(void)
3725{
3726 kvm_exit_debug();
3727 __free_page(pfn_to_page(bad_page_address >> PAGE_SHIFT));
Avi Kivityb5a33a72007-04-15 16:31:09 +03003728 kvm_mmu_module_exit();
Avi Kivity6aa8b732006-12-10 02:21:36 -08003729}
3730
3731module_init(kvm_init)
3732module_exit(kvm_exit)
3733
Christian Ehrhardtcbdd1be2007-09-09 15:41:59 +03003734EXPORT_SYMBOL_GPL(kvm_init_x86);
3735EXPORT_SYMBOL_GPL(kvm_exit_x86);