2 * s390host.c -- hosting zSeries kernel virtual machines
4 * Copyright IBM Corp. 2008,2009
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
13 * Christian Ehrhardt <ehrhardt@de.ibm.com>
16 #include <linux/compiler.h>
17 #include <linux/err.h>
19 #include <linux/hrtimer.h>
20 #include <linux/init.h>
21 #include <linux/kvm.h>
22 #include <linux/kvm_host.h>
23 #include <linux/module.h>
24 #include <linux/slab.h>
25 #include <linux/timer.h>
26 #include <asm/asm-offsets.h>
27 #include <asm/lowcore.h>
28 #include <asm/pgtable.h>
30 #include <asm/system.h>
34 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
36 struct kvm_stats_debugfs_item debugfs_entries[] = {
37 { "userspace_handled", VCPU_STAT(exit_userspace) },
38 { "exit_null", VCPU_STAT(exit_null) },
39 { "exit_validity", VCPU_STAT(exit_validity) },
40 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
41 { "exit_external_request", VCPU_STAT(exit_external_request) },
42 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
43 { "exit_instruction", VCPU_STAT(exit_instruction) },
44 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
45 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
46 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
47 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
48 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
49 { "deliver_external_call", VCPU_STAT(deliver_external_call) },
50 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
51 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
52 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
53 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
54 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
55 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
56 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
57 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
58 { "instruction_spx", VCPU_STAT(instruction_spx) },
59 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
60 { "instruction_stap", VCPU_STAT(instruction_stap) },
61 { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
62 { "instruction_stsch", VCPU_STAT(instruction_stsch) },
63 { "instruction_chsc", VCPU_STAT(instruction_chsc) },
64 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
65 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
66 { "instruction_tprot", VCPU_STAT(instruction_tprot) },
67 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
68 { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
69 { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
70 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
71 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
72 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
73 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
74 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
75 { "diagnose_10", VCPU_STAT(diagnose_10) },
76 { "diagnose_44", VCPU_STAT(diagnose_44) },
80 static unsigned long long *facilities;
82 /* Section: not file related */
83 int kvm_arch_hardware_enable(void *garbage)
85 /* every s390 is virtualization enabled ;-) */
89 void kvm_arch_hardware_disable(void *garbage)
93 int kvm_arch_hardware_setup(void)
98 void kvm_arch_hardware_unsetup(void)
102 void kvm_arch_check_processor_compat(void *rtn)
106 int kvm_arch_init(void *opaque)
111 void kvm_arch_exit(void)
115 /* Section: device related */
116 long kvm_arch_dev_ioctl(struct file *filp,
117 unsigned int ioctl, unsigned long arg)
119 if (ioctl == KVM_S390_ENABLE_SIE)
120 return s390_enable_sie();
124 int kvm_dev_ioctl_check_extension(long ext)
129 case KVM_CAP_S390_PSW:
130 case KVM_CAP_S390_GMAP:
131 case KVM_CAP_SYNC_MMU:
140 /* Section: vm related */
142 * Get (and clear) the dirty memory log for a memory slot.
144 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
145 struct kvm_dirty_log *log)
150 long kvm_arch_vm_ioctl(struct file *filp,
151 unsigned int ioctl, unsigned long arg)
153 struct kvm *kvm = filp->private_data;
154 void __user *argp = (void __user *)arg;
158 case KVM_S390_INTERRUPT: {
159 struct kvm_s390_interrupt s390int;
162 if (copy_from_user(&s390int, argp, sizeof(s390int)))
164 r = kvm_s390_inject_vm(kvm, &s390int);
174 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
180 #ifdef CONFIG_KVM_S390_UCONTROL
181 if (type & ~KVM_VM_S390_UCONTROL)
183 if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
190 rc = s390_enable_sie();
196 kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
200 sprintf(debug_name, "kvm-%u", current->pid);
202 kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
206 spin_lock_init(&kvm->arch.float_int.lock);
207 INIT_LIST_HEAD(&kvm->arch.float_int.list);
209 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
210 VM_EVENT(kvm, 3, "%s", "vm created");
212 if (type & KVM_VM_S390_UCONTROL) {
213 kvm->arch.gmap = NULL;
215 kvm->arch.gmap = gmap_alloc(current->mm);
221 debug_unregister(kvm->arch.dbf);
223 free_page((unsigned long)(kvm->arch.sca));
228 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
230 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
231 clear_bit(63 - vcpu->vcpu_id, (unsigned long *) &vcpu->kvm->arch.sca->mcn);
232 if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
233 (__u64) vcpu->arch.sie_block)
234 vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
237 if (kvm_is_ucontrol(vcpu->kvm))
238 gmap_free(vcpu->arch.gmap);
240 free_page((unsigned long)(vcpu->arch.sie_block));
241 kvm_vcpu_uninit(vcpu);
245 static void kvm_free_vcpus(struct kvm *kvm)
248 struct kvm_vcpu *vcpu;
250 kvm_for_each_vcpu(i, vcpu, kvm)
251 kvm_arch_vcpu_destroy(vcpu);
253 mutex_lock(&kvm->lock);
254 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
255 kvm->vcpus[i] = NULL;
257 atomic_set(&kvm->online_vcpus, 0);
258 mutex_unlock(&kvm->lock);
261 void kvm_arch_sync_events(struct kvm *kvm)
265 void kvm_arch_destroy_vm(struct kvm *kvm)
268 free_page((unsigned long)(kvm->arch.sca));
269 debug_unregister(kvm->arch.dbf);
270 if (!kvm_is_ucontrol(kvm))
271 gmap_free(kvm->arch.gmap);
274 /* Section: vcpu related */
275 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
277 if (kvm_is_ucontrol(vcpu->kvm)) {
278 vcpu->arch.gmap = gmap_alloc(current->mm);
279 if (!vcpu->arch.gmap)
284 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
288 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
293 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
295 save_fp_regs(&vcpu->arch.host_fpregs);
296 save_access_regs(vcpu->arch.host_acrs);
297 vcpu->arch.guest_fpregs.fpc &= FPC_VALID_MASK;
298 restore_fp_regs(&vcpu->arch.guest_fpregs);
299 restore_access_regs(vcpu->arch.guest_acrs);
300 gmap_enable(vcpu->arch.gmap);
301 atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
304 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
306 atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
307 gmap_disable(vcpu->arch.gmap);
308 save_fp_regs(&vcpu->arch.guest_fpregs);
309 save_access_regs(vcpu->arch.guest_acrs);
310 restore_fp_regs(&vcpu->arch.host_fpregs);
311 restore_access_regs(vcpu->arch.host_acrs);
314 static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
316 /* this equals initial cpu reset in pop, but we don't switch to ESA */
317 vcpu->arch.sie_block->gpsw.mask = 0UL;
318 vcpu->arch.sie_block->gpsw.addr = 0UL;
319 vcpu->arch.sie_block->prefix = 0UL;
320 vcpu->arch.sie_block->ihcpu = 0xffff;
321 vcpu->arch.sie_block->cputm = 0UL;
322 vcpu->arch.sie_block->ckc = 0UL;
323 vcpu->arch.sie_block->todpr = 0;
324 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
325 vcpu->arch.sie_block->gcr[0] = 0xE0UL;
326 vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
327 vcpu->arch.guest_fpregs.fpc = 0;
328 asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
329 vcpu->arch.sie_block->gbea = 1;
332 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
334 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
337 vcpu->arch.sie_block->ecb = 6;
338 vcpu->arch.sie_block->eca = 0xC1002001U;
339 vcpu->arch.sie_block->fac = (int) (long) facilities;
340 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
341 tasklet_init(&vcpu->arch.tasklet, kvm_s390_tasklet,
342 (unsigned long) vcpu);
343 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
344 get_cpu_id(&vcpu->arch.cpu_id);
345 vcpu->arch.cpu_id.version = 0xff;
349 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
352 struct kvm_vcpu *vcpu;
355 if (id >= KVM_MAX_VCPUS)
360 vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
364 vcpu->arch.sie_block = (struct kvm_s390_sie_block *)
365 get_zeroed_page(GFP_KERNEL);
367 if (!vcpu->arch.sie_block)
370 vcpu->arch.sie_block->icpua = id;
371 BUG_ON(!kvm->arch.sca);
372 if (!kvm->arch.sca->cpu[id].sda)
373 kvm->arch.sca->cpu[id].sda = (__u64) vcpu->arch.sie_block;
374 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)kvm->arch.sca) >> 32);
375 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
376 set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
378 spin_lock_init(&vcpu->arch.local_int.lock);
379 INIT_LIST_HEAD(&vcpu->arch.local_int.list);
380 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
381 spin_lock(&kvm->arch.float_int.lock);
382 kvm->arch.float_int.local_int[id] = &vcpu->arch.local_int;
383 init_waitqueue_head(&vcpu->arch.local_int.wq);
384 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
385 spin_unlock(&kvm->arch.float_int.lock);
387 rc = kvm_vcpu_init(vcpu, kvm, id);
389 goto out_free_sie_block;
390 VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
391 vcpu->arch.sie_block);
395 free_page((unsigned long)(vcpu->arch.sie_block));
402 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
404 /* kvm common code refers to this, but never calls it */
409 static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
411 kvm_s390_vcpu_initial_reset(vcpu);
415 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
417 memcpy(&vcpu->arch.guest_gprs, ®s->gprs, sizeof(regs->gprs));
421 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
423 memcpy(®s->gprs, &vcpu->arch.guest_gprs, sizeof(regs->gprs));
427 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
428 struct kvm_sregs *sregs)
430 memcpy(&vcpu->arch.guest_acrs, &sregs->acrs, sizeof(sregs->acrs));
431 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
432 restore_access_regs(vcpu->arch.guest_acrs);
436 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
437 struct kvm_sregs *sregs)
439 memcpy(&sregs->acrs, &vcpu->arch.guest_acrs, sizeof(sregs->acrs));
440 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
444 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
446 memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
447 vcpu->arch.guest_fpregs.fpc = fpu->fpc;
448 restore_fp_regs(&vcpu->arch.guest_fpregs);
452 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
454 memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
455 fpu->fpc = vcpu->arch.guest_fpregs.fpc;
459 static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
463 if (!(atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_STOPPED))
466 vcpu->run->psw_mask = psw.mask;
467 vcpu->run->psw_addr = psw.addr;
472 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
473 struct kvm_translation *tr)
475 return -EINVAL; /* not implemented yet */
478 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
479 struct kvm_guest_debug *dbg)
481 return -EINVAL; /* not implemented yet */
484 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
485 struct kvm_mp_state *mp_state)
487 return -EINVAL; /* not implemented yet */
490 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
491 struct kvm_mp_state *mp_state)
493 return -EINVAL; /* not implemented yet */
496 static int __vcpu_run(struct kvm_vcpu *vcpu)
500 memcpy(&vcpu->arch.sie_block->gg14, &vcpu->arch.guest_gprs[14], 16);
505 if (test_thread_flag(TIF_MCCK_PENDING))
508 kvm_s390_deliver_pending_interrupts(vcpu);
510 vcpu->arch.sie_block->icptcode = 0;
514 VCPU_EVENT(vcpu, 6, "entering sie flags %x",
515 atomic_read(&vcpu->arch.sie_block->cpuflags));
516 rc = sie64a(vcpu->arch.sie_block, vcpu->arch.guest_gprs);
518 if (kvm_is_ucontrol(vcpu->kvm)) {
519 rc = SIE_INTERCEPT_UCONTROL;
521 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
522 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
526 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
527 vcpu->arch.sie_block->icptcode);
532 memcpy(&vcpu->arch.guest_gprs[14], &vcpu->arch.sie_block->gg14, 16);
536 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
542 if (vcpu->sigset_active)
543 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
545 atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
547 BUG_ON(vcpu->kvm->arch.float_int.local_int[vcpu->vcpu_id] == NULL);
549 switch (kvm_run->exit_reason) {
550 case KVM_EXIT_S390_SIEIC:
551 case KVM_EXIT_UNKNOWN:
553 case KVM_EXIT_S390_RESET:
554 case KVM_EXIT_S390_UCONTROL:
560 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
561 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
566 rc = __vcpu_run(vcpu);
569 rc = kvm_handle_sie_intercept(vcpu);
570 } while (!signal_pending(current) && !rc);
572 if (rc == SIE_INTERCEPT_RERUNVCPU)
575 if (signal_pending(current) && !rc) {
576 kvm_run->exit_reason = KVM_EXIT_INTR;
580 #ifdef CONFIG_KVM_S390_UCONTROL
581 if (rc == SIE_INTERCEPT_UCONTROL) {
582 kvm_run->exit_reason = KVM_EXIT_S390_UCONTROL;
583 kvm_run->s390_ucontrol.trans_exc_code =
584 current->thread.gmap_addr;
585 kvm_run->s390_ucontrol.pgm_code = 0x10;
590 if (rc == -EOPNOTSUPP) {
591 /* intercept cannot be handled in-kernel, prepare kvm-run */
592 kvm_run->exit_reason = KVM_EXIT_S390_SIEIC;
593 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
594 kvm_run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
595 kvm_run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
599 if (rc == -EREMOTE) {
600 /* intercept was handled, but userspace support is needed
601 * kvm_run has been prepared by the handler */
605 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
606 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
608 if (vcpu->sigset_active)
609 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
611 vcpu->stat.exit_userspace++;
615 static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, void *from,
616 unsigned long n, int prefix)
619 return copy_to_guest(vcpu, guestdest, from, n);
621 return copy_to_guest_absolute(vcpu, guestdest, from, n);
625 * store status at address
626 * we use have two special cases:
627 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
628 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
630 int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
632 unsigned char archmode = 1;
635 if (addr == KVM_S390_STORE_STATUS_NOADDR) {
636 if (copy_to_guest_absolute(vcpu, 163ul, &archmode, 1))
638 addr = SAVE_AREA_BASE;
640 } else if (addr == KVM_S390_STORE_STATUS_PREFIXED) {
641 if (copy_to_guest(vcpu, 163ul, &archmode, 1))
643 addr = SAVE_AREA_BASE;
648 if (__guestcopy(vcpu, addr + offsetof(struct save_area, fp_regs),
649 vcpu->arch.guest_fpregs.fprs, 128, prefix))
652 if (__guestcopy(vcpu, addr + offsetof(struct save_area, gp_regs),
653 vcpu->arch.guest_gprs, 128, prefix))
656 if (__guestcopy(vcpu, addr + offsetof(struct save_area, psw),
657 &vcpu->arch.sie_block->gpsw, 16, prefix))
660 if (__guestcopy(vcpu, addr + offsetof(struct save_area, pref_reg),
661 &vcpu->arch.sie_block->prefix, 4, prefix))
664 if (__guestcopy(vcpu,
665 addr + offsetof(struct save_area, fp_ctrl_reg),
666 &vcpu->arch.guest_fpregs.fpc, 4, prefix))
669 if (__guestcopy(vcpu, addr + offsetof(struct save_area, tod_reg),
670 &vcpu->arch.sie_block->todpr, 4, prefix))
673 if (__guestcopy(vcpu, addr + offsetof(struct save_area, timer),
674 &vcpu->arch.sie_block->cputm, 8, prefix))
677 if (__guestcopy(vcpu, addr + offsetof(struct save_area, clk_cmp),
678 &vcpu->arch.sie_block->ckc, 8, prefix))
681 if (__guestcopy(vcpu, addr + offsetof(struct save_area, acc_regs),
682 &vcpu->arch.guest_acrs, 64, prefix))
685 if (__guestcopy(vcpu,
686 addr + offsetof(struct save_area, ctrl_regs),
687 &vcpu->arch.sie_block->gcr, 128, prefix))
692 long kvm_arch_vcpu_ioctl(struct file *filp,
693 unsigned int ioctl, unsigned long arg)
695 struct kvm_vcpu *vcpu = filp->private_data;
696 void __user *argp = (void __user *)arg;
700 case KVM_S390_INTERRUPT: {
701 struct kvm_s390_interrupt s390int;
704 if (copy_from_user(&s390int, argp, sizeof(s390int)))
706 r = kvm_s390_inject_vcpu(vcpu, &s390int);
709 case KVM_S390_STORE_STATUS:
710 r = kvm_s390_vcpu_store_status(vcpu, arg);
712 case KVM_S390_SET_INITIAL_PSW: {
716 if (copy_from_user(&psw, argp, sizeof(psw)))
718 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
721 case KVM_S390_INITIAL_RESET:
722 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
724 #ifdef CONFIG_KVM_S390_UCONTROL
725 case KVM_S390_UCAS_MAP: {
726 struct kvm_s390_ucas_mapping ucasmap;
728 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
733 if (!kvm_is_ucontrol(vcpu->kvm)) {
738 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
739 ucasmap.vcpu_addr, ucasmap.length);
742 case KVM_S390_UCAS_UNMAP: {
743 struct kvm_s390_ucas_mapping ucasmap;
745 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
750 if (!kvm_is_ucontrol(vcpu->kvm)) {
755 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
766 int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
768 #ifdef CONFIG_KVM_S390_UCONTROL
769 if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
770 && (kvm_is_ucontrol(vcpu->kvm))) {
771 vmf->page = virt_to_page(vcpu->arch.sie_block);
776 return VM_FAULT_SIGBUS;
779 /* Section: memory related */
780 int kvm_arch_prepare_memory_region(struct kvm *kvm,
781 struct kvm_memory_slot *memslot,
782 struct kvm_memory_slot old,
783 struct kvm_userspace_memory_region *mem,
786 /* A few sanity checks. We can have exactly one memory slot which has
787 to start at guest virtual zero and which has to be located at a
788 page boundary in userland and which has to end at a page boundary.
789 The memory in userland is ok to be fragmented into various different
790 vmas. It is okay to mmap() and munmap() stuff in this slot after
791 doing this call at any time */
796 if (mem->guest_phys_addr)
799 if (mem->userspace_addr & 0xffffful)
802 if (mem->memory_size & 0xffffful)
811 void kvm_arch_commit_memory_region(struct kvm *kvm,
812 struct kvm_userspace_memory_region *mem,
813 struct kvm_memory_slot old,
819 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
820 mem->guest_phys_addr, mem->memory_size);
822 printk(KERN_WARNING "kvm-s390: failed to commit memory region\n");
826 void kvm_arch_flush_shadow(struct kvm *kvm)
830 static int __init kvm_s390_init(void)
833 ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
838 * guests can ask for up to 255+1 double words, we need a full page
839 * to hold the maximum amount of facilities. On the other hand, we
840 * only set facilities that are known to work in KVM.
842 facilities = (unsigned long long *) get_zeroed_page(GFP_KERNEL|GFP_DMA);
847 memcpy(facilities, S390_lowcore.stfle_fac_list, 16);
848 facilities[0] &= 0xff00fff3f47c0000ULL;
849 facilities[1] &= 0x201c000000000000ULL;
853 static void __exit kvm_s390_exit(void)
855 free_page((unsigned long) facilities);
859 module_init(kvm_s390_init);
860 module_exit(kvm_s390_exit);