]> nv-tegra.nvidia Code Review - linux-3.10.git/blob - arch/s390/kvm/kvm-s390.c
KVM: s390: Fix incorrect return value
[linux-3.10.git] / arch / s390 / kvm / kvm-s390.c
1 /*
2  * s390host.c --  hosting zSeries kernel virtual machines
3  *
4  * Copyright IBM Corp. 2008
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License (version 2 only)
8  * as published by the Free Software Foundation.
9  *
10  *    Author(s): Carsten Otte <cotte@de.ibm.com>
11  *               Christian Borntraeger <borntraeger@de.ibm.com>
12  *               Heiko Carstens <heiko.carstens@de.ibm.com>
13  */
14
15 #include <linux/compiler.h>
16 #include <linux/err.h>
17 #include <linux/fs.h>
18 #include <linux/init.h>
19 #include <linux/kvm.h>
20 #include <linux/kvm_host.h>
21 #include <linux/module.h>
22 #include <linux/slab.h>
23 #include <linux/timer.h>
24 #include <asm/lowcore.h>
25 #include <asm/pgtable.h>
26
27 #include "kvm-s390.h"
28 #include "gaccess.h"
29
30 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
31
32 struct kvm_stats_debugfs_item debugfs_entries[] = {
33         { "userspace_handled", VCPU_STAT(exit_userspace) },
34         { "exit_validity", VCPU_STAT(exit_validity) },
35         { "exit_stop_request", VCPU_STAT(exit_stop_request) },
36         { "exit_external_request", VCPU_STAT(exit_external_request) },
37         { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
38         { "exit_instruction", VCPU_STAT(exit_instruction) },
39         { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
40         { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
41         { "instruction_lctg", VCPU_STAT(instruction_lctg) },
42         { "instruction_lctl", VCPU_STAT(instruction_lctl) },
43         { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
44         { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
45         { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
46         { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
47         { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
48         { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
49         { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
50         { "exit_wait_state", VCPU_STAT(exit_wait_state) },
51         { "instruction_stidp", VCPU_STAT(instruction_stidp) },
52         { "instruction_spx", VCPU_STAT(instruction_spx) },
53         { "instruction_stpx", VCPU_STAT(instruction_stpx) },
54         { "instruction_stap", VCPU_STAT(instruction_stap) },
55         { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
56         { "instruction_stsch", VCPU_STAT(instruction_stsch) },
57         { "instruction_chsc", VCPU_STAT(instruction_chsc) },
58         { "instruction_stsi", VCPU_STAT(instruction_stsi) },
59         { "instruction_stfl", VCPU_STAT(instruction_stfl) },
60         { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
61         { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
62         { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
63         { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
64         { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
65         { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
66         { "diagnose_44", VCPU_STAT(diagnose_44) },
67         { NULL }
68 };
69
70
71 /* Section: not file related */
72 void kvm_arch_hardware_enable(void *garbage)
73 {
74         /* every s390 is virtualization enabled ;-) */
75 }
76
77 void kvm_arch_hardware_disable(void *garbage)
78 {
79 }
80
81 void decache_vcpus_on_cpu(int cpu)
82 {
83 }
84
85 int kvm_arch_hardware_setup(void)
86 {
87         return 0;
88 }
89
90 void kvm_arch_hardware_unsetup(void)
91 {
92 }
93
94 void kvm_arch_check_processor_compat(void *rtn)
95 {
96 }
97
98 int kvm_arch_init(void *opaque)
99 {
100         return 0;
101 }
102
103 void kvm_arch_exit(void)
104 {
105 }
106
107 /* Section: device related */
108 long kvm_arch_dev_ioctl(struct file *filp,
109                         unsigned int ioctl, unsigned long arg)
110 {
111         if (ioctl == KVM_S390_ENABLE_SIE)
112                 return s390_enable_sie();
113         return -EINVAL;
114 }
115
116 int kvm_dev_ioctl_check_extension(long ext)
117 {
118         return 0;
119 }
120
121 /* Section: vm related */
122 /*
123  * Get (and clear) the dirty memory log for a memory slot.
124  */
125 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
126                                struct kvm_dirty_log *log)
127 {
128         return 0;
129 }
130
131 long kvm_arch_vm_ioctl(struct file *filp,
132                        unsigned int ioctl, unsigned long arg)
133 {
134         struct kvm *kvm = filp->private_data;
135         void __user *argp = (void __user *)arg;
136         int r;
137
138         switch (ioctl) {
139         case KVM_S390_INTERRUPT: {
140                 struct kvm_s390_interrupt s390int;
141
142                 r = -EFAULT;
143                 if (copy_from_user(&s390int, argp, sizeof(s390int)))
144                         break;
145                 r = kvm_s390_inject_vm(kvm, &s390int);
146                 break;
147         }
148         default:
149                 r = -EINVAL;
150         }
151
152         return r;
153 }
154
155 struct kvm *kvm_arch_create_vm(void)
156 {
157         struct kvm *kvm;
158         int rc;
159         char debug_name[16];
160
161         rc = s390_enable_sie();
162         if (rc)
163                 goto out_nokvm;
164
165         rc = -ENOMEM;
166         kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL);
167         if (!kvm)
168                 goto out_nokvm;
169
170         kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
171         if (!kvm->arch.sca)
172                 goto out_nosca;
173
174         sprintf(debug_name, "kvm-%u", current->pid);
175
176         kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
177         if (!kvm->arch.dbf)
178                 goto out_nodbf;
179
180         spin_lock_init(&kvm->arch.float_int.lock);
181         INIT_LIST_HEAD(&kvm->arch.float_int.list);
182
183         debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
184         VM_EVENT(kvm, 3, "%s", "vm created");
185
186         try_module_get(THIS_MODULE);
187
188         return kvm;
189 out_nodbf:
190         free_page((unsigned long)(kvm->arch.sca));
191 out_nosca:
192         kfree(kvm);
193 out_nokvm:
194         return ERR_PTR(rc);
195 }
196
197 void kvm_arch_destroy_vm(struct kvm *kvm)
198 {
199         debug_unregister(kvm->arch.dbf);
200         free_page((unsigned long)(kvm->arch.sca));
201         kfree(kvm);
202         module_put(THIS_MODULE);
203 }
204
205 /* Section: vcpu related */
206 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
207 {
208         return 0;
209 }
210
211 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
212 {
213         /* kvm common code refers to this, but does'nt call it */
214         BUG();
215 }
216
217 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
218 {
219         save_fp_regs(&vcpu->arch.host_fpregs);
220         save_access_regs(vcpu->arch.host_acrs);
221         vcpu->arch.guest_fpregs.fpc &= FPC_VALID_MASK;
222         restore_fp_regs(&vcpu->arch.guest_fpregs);
223         restore_access_regs(vcpu->arch.guest_acrs);
224
225         if (signal_pending(current))
226                 atomic_set_mask(CPUSTAT_STOP_INT,
227                         &vcpu->arch.sie_block->cpuflags);
228 }
229
230 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
231 {
232         save_fp_regs(&vcpu->arch.guest_fpregs);
233         save_access_regs(vcpu->arch.guest_acrs);
234         restore_fp_regs(&vcpu->arch.host_fpregs);
235         restore_access_regs(vcpu->arch.host_acrs);
236 }
237
238 static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
239 {
240         /* this equals initial cpu reset in pop, but we don't switch to ESA */
241         vcpu->arch.sie_block->gpsw.mask = 0UL;
242         vcpu->arch.sie_block->gpsw.addr = 0UL;
243         vcpu->arch.sie_block->prefix    = 0UL;
244         vcpu->arch.sie_block->ihcpu     = 0xffff;
245         vcpu->arch.sie_block->cputm     = 0UL;
246         vcpu->arch.sie_block->ckc       = 0UL;
247         vcpu->arch.sie_block->todpr     = 0;
248         memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
249         vcpu->arch.sie_block->gcr[0]  = 0xE0UL;
250         vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
251         vcpu->arch.guest_fpregs.fpc = 0;
252         asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
253         vcpu->arch.sie_block->gbea = 1;
254 }
255
256 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
257 {
258         atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH);
259         vcpu->arch.sie_block->gmslm = 0xffffffffffUL;
260         vcpu->arch.sie_block->gmsor = 0x000000000000;
261         vcpu->arch.sie_block->ecb   = 2;
262         vcpu->arch.sie_block->eca   = 0xC1002001U;
263         setup_timer(&vcpu->arch.ckc_timer, kvm_s390_idle_wakeup,
264                  (unsigned long) vcpu);
265         get_cpu_id(&vcpu->arch.cpu_id);
266         vcpu->arch.cpu_id.version = 0xfe;
267         return 0;
268 }
269
270 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
271                                       unsigned int id)
272 {
273         struct kvm_vcpu *vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
274         int rc = -ENOMEM;
275
276         if (!vcpu)
277                 goto out_nomem;
278
279         vcpu->arch.sie_block = (struct sie_block *) get_zeroed_page(GFP_KERNEL);
280
281         if (!vcpu->arch.sie_block)
282                 goto out_free_cpu;
283
284         vcpu->arch.sie_block->icpua = id;
285         BUG_ON(!kvm->arch.sca);
286         BUG_ON(kvm->arch.sca->cpu[id].sda);
287         kvm->arch.sca->cpu[id].sda = (__u64) vcpu->arch.sie_block;
288         vcpu->arch.sie_block->scaoh = (__u32)(((__u64)kvm->arch.sca) >> 32);
289         vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
290
291         spin_lock_init(&vcpu->arch.local_int.lock);
292         INIT_LIST_HEAD(&vcpu->arch.local_int.list);
293         vcpu->arch.local_int.float_int = &kvm->arch.float_int;
294         spin_lock_bh(&kvm->arch.float_int.lock);
295         kvm->arch.float_int.local_int[id] = &vcpu->arch.local_int;
296         init_waitqueue_head(&vcpu->arch.local_int.wq);
297         vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
298         spin_unlock_bh(&kvm->arch.float_int.lock);
299
300         rc = kvm_vcpu_init(vcpu, kvm, id);
301         if (rc)
302                 goto out_free_cpu;
303         VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
304                  vcpu->arch.sie_block);
305
306         try_module_get(THIS_MODULE);
307
308         return vcpu;
309 out_free_cpu:
310         kfree(vcpu);
311 out_nomem:
312         return ERR_PTR(rc);
313 }
314
315 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
316 {
317         VCPU_EVENT(vcpu, 3, "%s", "destroy cpu");
318         free_page((unsigned long)(vcpu->arch.sie_block));
319         kfree(vcpu);
320         module_put(THIS_MODULE);
321 }
322
323 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
324 {
325         /* kvm common code refers to this, but never calls it */
326         BUG();
327         return 0;
328 }
329
330 static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
331 {
332         vcpu_load(vcpu);
333         kvm_s390_vcpu_initial_reset(vcpu);
334         vcpu_put(vcpu);
335         return 0;
336 }
337
338 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
339 {
340         vcpu_load(vcpu);
341         memcpy(&vcpu->arch.guest_gprs, &regs->gprs, sizeof(regs->gprs));
342         vcpu_put(vcpu);
343         return 0;
344 }
345
346 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
347 {
348         vcpu_load(vcpu);
349         memcpy(&regs->gprs, &vcpu->arch.guest_gprs, sizeof(regs->gprs));
350         vcpu_put(vcpu);
351         return 0;
352 }
353
354 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
355                                   struct kvm_sregs *sregs)
356 {
357         vcpu_load(vcpu);
358         memcpy(&vcpu->arch.guest_acrs, &sregs->acrs, sizeof(sregs->acrs));
359         memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
360         vcpu_put(vcpu);
361         return 0;
362 }
363
364 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
365                                   struct kvm_sregs *sregs)
366 {
367         vcpu_load(vcpu);
368         memcpy(&sregs->acrs, &vcpu->arch.guest_acrs, sizeof(sregs->acrs));
369         memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
370         vcpu_put(vcpu);
371         return 0;
372 }
373
374 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
375 {
376         vcpu_load(vcpu);
377         memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
378         vcpu->arch.guest_fpregs.fpc = fpu->fpc;
379         vcpu_put(vcpu);
380         return 0;
381 }
382
383 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
384 {
385         vcpu_load(vcpu);
386         memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
387         fpu->fpc = vcpu->arch.guest_fpregs.fpc;
388         vcpu_put(vcpu);
389         return 0;
390 }
391
392 static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
393 {
394         int rc = 0;
395
396         vcpu_load(vcpu);
397         if (atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_RUNNING)
398                 rc = -EBUSY;
399         else
400                 vcpu->arch.sie_block->gpsw = psw;
401         vcpu_put(vcpu);
402         return rc;
403 }
404
405 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
406                                   struct kvm_translation *tr)
407 {
408         return -EINVAL; /* not implemented yet */
409 }
410
411 int kvm_arch_vcpu_ioctl_debug_guest(struct kvm_vcpu *vcpu,
412                                     struct kvm_debug_guest *dbg)
413 {
414         return -EINVAL; /* not implemented yet */
415 }
416
417 static void __vcpu_run(struct kvm_vcpu *vcpu)
418 {
419         memcpy(&vcpu->arch.sie_block->gg14, &vcpu->arch.guest_gprs[14], 16);
420
421         if (need_resched())
422                 schedule();
423
424         vcpu->arch.sie_block->icptcode = 0;
425         local_irq_disable();
426         kvm_guest_enter();
427         local_irq_enable();
428         VCPU_EVENT(vcpu, 6, "entering sie flags %x",
429                    atomic_read(&vcpu->arch.sie_block->cpuflags));
430         sie64a(vcpu->arch.sie_block, vcpu->arch.guest_gprs);
431         VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
432                    vcpu->arch.sie_block->icptcode);
433         local_irq_disable();
434         kvm_guest_exit();
435         local_irq_enable();
436
437         memcpy(&vcpu->arch.guest_gprs[14], &vcpu->arch.sie_block->gg14, 16);
438 }
439
440 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
441 {
442         int rc;
443         sigset_t sigsaved;
444
445         vcpu_load(vcpu);
446
447         if (vcpu->sigset_active)
448                 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
449
450         atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
451
452         BUG_ON(vcpu->kvm->arch.float_int.local_int[vcpu->vcpu_id] == NULL);
453
454         switch (kvm_run->exit_reason) {
455         case KVM_EXIT_S390_SIEIC:
456                 vcpu->arch.sie_block->gpsw.mask = kvm_run->s390_sieic.mask;
457                 vcpu->arch.sie_block->gpsw.addr = kvm_run->s390_sieic.addr;
458                 break;
459         case KVM_EXIT_UNKNOWN:
460         case KVM_EXIT_S390_RESET:
461                 break;
462         default:
463                 BUG();
464         }
465
466         might_sleep();
467
468         do {
469                 kvm_s390_deliver_pending_interrupts(vcpu);
470                 __vcpu_run(vcpu);
471                 rc = kvm_handle_sie_intercept(vcpu);
472         } while (!signal_pending(current) && !rc);
473
474         if (signal_pending(current) && !rc)
475                 rc = -EINTR;
476
477         if (rc == -ENOTSUPP) {
478                 /* intercept cannot be handled in-kernel, prepare kvm-run */
479                 kvm_run->exit_reason         = KVM_EXIT_S390_SIEIC;
480                 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
481                 kvm_run->s390_sieic.mask     = vcpu->arch.sie_block->gpsw.mask;
482                 kvm_run->s390_sieic.addr     = vcpu->arch.sie_block->gpsw.addr;
483                 kvm_run->s390_sieic.ipa      = vcpu->arch.sie_block->ipa;
484                 kvm_run->s390_sieic.ipb      = vcpu->arch.sie_block->ipb;
485                 rc = 0;
486         }
487
488         if (rc == -EREMOTE) {
489                 /* intercept was handled, but userspace support is needed
490                  * kvm_run has been prepared by the handler */
491                 rc = 0;
492         }
493
494         if (vcpu->sigset_active)
495                 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
496
497         vcpu_put(vcpu);
498
499         vcpu->stat.exit_userspace++;
500         return rc;
501 }
502
503 static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, const void *from,
504                        unsigned long n, int prefix)
505 {
506         if (prefix)
507                 return copy_to_guest(vcpu, guestdest, from, n);
508         else
509                 return copy_to_guest_absolute(vcpu, guestdest, from, n);
510 }
511
512 /*
513  * store status at address
514  * we use have two special cases:
515  * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
516  * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
517  */
518 int __kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
519 {
520         const unsigned char archmode = 1;
521         int prefix;
522
523         if (addr == KVM_S390_STORE_STATUS_NOADDR) {
524                 if (copy_to_guest_absolute(vcpu, 163ul, &archmode, 1))
525                         return -EFAULT;
526                 addr = SAVE_AREA_BASE;
527                 prefix = 0;
528         } else if (addr == KVM_S390_STORE_STATUS_PREFIXED) {
529                 if (copy_to_guest(vcpu, 163ul, &archmode, 1))
530                         return -EFAULT;
531                 addr = SAVE_AREA_BASE;
532                 prefix = 1;
533         } else
534                 prefix = 0;
535
536         if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, fp_regs),
537                         vcpu->arch.guest_fpregs.fprs, 128, prefix))
538                 return -EFAULT;
539
540         if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, gp_regs),
541                         vcpu->arch.guest_gprs, 128, prefix))
542                 return -EFAULT;
543
544         if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, psw),
545                         &vcpu->arch.sie_block->gpsw, 16, prefix))
546                 return -EFAULT;
547
548         if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, pref_reg),
549                         &vcpu->arch.sie_block->prefix, 4, prefix))
550                 return -EFAULT;
551
552         if (__guestcopy(vcpu,
553                         addr + offsetof(struct save_area_s390x, fp_ctrl_reg),
554                         &vcpu->arch.guest_fpregs.fpc, 4, prefix))
555                 return -EFAULT;
556
557         if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, tod_reg),
558                         &vcpu->arch.sie_block->todpr, 4, prefix))
559                 return -EFAULT;
560
561         if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, timer),
562                         &vcpu->arch.sie_block->cputm, 8, prefix))
563                 return -EFAULT;
564
565         if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, clk_cmp),
566                         &vcpu->arch.sie_block->ckc, 8, prefix))
567                 return -EFAULT;
568
569         if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, acc_regs),
570                         &vcpu->arch.guest_acrs, 64, prefix))
571                 return -EFAULT;
572
573         if (__guestcopy(vcpu,
574                         addr + offsetof(struct save_area_s390x, ctrl_regs),
575                         &vcpu->arch.sie_block->gcr, 128, prefix))
576                 return -EFAULT;
577         return 0;
578 }
579
580 static int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
581 {
582         int rc;
583
584         vcpu_load(vcpu);
585         rc = __kvm_s390_vcpu_store_status(vcpu, addr);
586         vcpu_put(vcpu);
587         return rc;
588 }
589
590 long kvm_arch_vcpu_ioctl(struct file *filp,
591                          unsigned int ioctl, unsigned long arg)
592 {
593         struct kvm_vcpu *vcpu = filp->private_data;
594         void __user *argp = (void __user *)arg;
595
596         switch (ioctl) {
597         case KVM_S390_INTERRUPT: {
598                 struct kvm_s390_interrupt s390int;
599
600                 if (copy_from_user(&s390int, argp, sizeof(s390int)))
601                         return -EFAULT;
602                 return kvm_s390_inject_vcpu(vcpu, &s390int);
603         }
604         case KVM_S390_STORE_STATUS:
605                 return kvm_s390_vcpu_store_status(vcpu, arg);
606         case KVM_S390_SET_INITIAL_PSW: {
607                 psw_t psw;
608
609                 if (copy_from_user(&psw, argp, sizeof(psw)))
610                         return -EFAULT;
611                 return kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
612         }
613         case KVM_S390_INITIAL_RESET:
614                 return kvm_arch_vcpu_ioctl_initial_reset(vcpu);
615         default:
616                 ;
617         }
618         return -EINVAL;
619 }
620
621 /* Section: memory related */
622 int kvm_arch_set_memory_region(struct kvm *kvm,
623                                 struct kvm_userspace_memory_region *mem,
624                                 struct kvm_memory_slot old,
625                                 int user_alloc)
626 {
627         /* A few sanity checks. We can have exactly one memory slot which has
628            to start at guest virtual zero and which has to be located at a
629            page boundary in userland and which has to end at a page boundary.
630            The memory in userland is ok to be fragmented into various different
631            vmas. It is okay to mmap() and munmap() stuff in this slot after
632            doing this call at any time */
633
634         if (mem->slot)
635                 return -EINVAL;
636
637         if (mem->guest_phys_addr)
638                 return -EINVAL;
639
640         if (mem->userspace_addr & (PAGE_SIZE - 1))
641                 return -EINVAL;
642
643         if (mem->memory_size & (PAGE_SIZE - 1))
644                 return -EINVAL;
645
646         kvm->arch.guest_origin = mem->userspace_addr;
647         kvm->arch.guest_memsize = mem->memory_size;
648
649         /* FIXME: we do want to interrupt running CPUs and update their memory
650            configuration now to avoid race conditions. But hey, changing the
651            memory layout while virtual CPUs are running is usually bad
652            programming practice. */
653
654         return 0;
655 }
656
657 gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
658 {
659         return gfn;
660 }
661
662 static int __init kvm_s390_init(void)
663 {
664         return kvm_init(NULL, sizeof(struct kvm_vcpu), THIS_MODULE);
665 }
666
667 static void __exit kvm_s390_exit(void)
668 {
669         kvm_exit();
670 }
671
672 module_init(kvm_s390_init);
673 module_exit(kvm_s390_exit);