]> nv-tegra.nvidia Code Review - linux-2.6.git/blob - arch/s390/kvm/kvm-s390.c
KVM: Fix memory leak on guest exit
[linux-2.6.git] / arch / s390 / kvm / kvm-s390.c
1 /*
2  * s390host.c --  hosting zSeries kernel virtual machines
3  *
4  * Copyright IBM Corp. 2008
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License (version 2 only)
8  * as published by the Free Software Foundation.
9  *
10  *    Author(s): Carsten Otte <cotte@de.ibm.com>
11  *               Christian Borntraeger <borntraeger@de.ibm.com>
12  *               Heiko Carstens <heiko.carstens@de.ibm.com>
13  */
14
15 #include <linux/compiler.h>
16 #include <linux/err.h>
17 #include <linux/fs.h>
18 #include <linux/init.h>
19 #include <linux/kvm.h>
20 #include <linux/kvm_host.h>
21 #include <linux/module.h>
22 #include <linux/slab.h>
23 #include <linux/timer.h>
24 #include <asm/lowcore.h>
25 #include <asm/pgtable.h>
26
27 #include "kvm-s390.h"
28 #include "gaccess.h"
29
30 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
31
32 struct kvm_stats_debugfs_item debugfs_entries[] = {
33         { "userspace_handled", VCPU_STAT(exit_userspace) },
34         { "exit_null", VCPU_STAT(exit_null) },
35         { "exit_validity", VCPU_STAT(exit_validity) },
36         { "exit_stop_request", VCPU_STAT(exit_stop_request) },
37         { "exit_external_request", VCPU_STAT(exit_external_request) },
38         { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
39         { "exit_instruction", VCPU_STAT(exit_instruction) },
40         { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
41         { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
42         { "instruction_lctg", VCPU_STAT(instruction_lctg) },
43         { "instruction_lctl", VCPU_STAT(instruction_lctl) },
44         { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
45         { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
46         { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
47         { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
48         { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
49         { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
50         { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
51         { "exit_wait_state", VCPU_STAT(exit_wait_state) },
52         { "instruction_stidp", VCPU_STAT(instruction_stidp) },
53         { "instruction_spx", VCPU_STAT(instruction_spx) },
54         { "instruction_stpx", VCPU_STAT(instruction_stpx) },
55         { "instruction_stap", VCPU_STAT(instruction_stap) },
56         { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
57         { "instruction_stsch", VCPU_STAT(instruction_stsch) },
58         { "instruction_chsc", VCPU_STAT(instruction_chsc) },
59         { "instruction_stsi", VCPU_STAT(instruction_stsi) },
60         { "instruction_stfl", VCPU_STAT(instruction_stfl) },
61         { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
62         { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
63         { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
64         { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
65         { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
66         { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
67         { "diagnose_44", VCPU_STAT(diagnose_44) },
68         { NULL }
69 };
70
71
72 /* Section: not file related */
73 void kvm_arch_hardware_enable(void *garbage)
74 {
75         /* every s390 is virtualization enabled ;-) */
76 }
77
78 void kvm_arch_hardware_disable(void *garbage)
79 {
80 }
81
82 int kvm_arch_hardware_setup(void)
83 {
84         return 0;
85 }
86
87 void kvm_arch_hardware_unsetup(void)
88 {
89 }
90
91 void kvm_arch_check_processor_compat(void *rtn)
92 {
93 }
94
95 int kvm_arch_init(void *opaque)
96 {
97         return 0;
98 }
99
100 void kvm_arch_exit(void)
101 {
102 }
103
104 /* Section: device related */
105 long kvm_arch_dev_ioctl(struct file *filp,
106                         unsigned int ioctl, unsigned long arg)
107 {
108         if (ioctl == KVM_S390_ENABLE_SIE)
109                 return s390_enable_sie();
110         return -EINVAL;
111 }
112
113 int kvm_dev_ioctl_check_extension(long ext)
114 {
115         return 0;
116 }
117
118 /* Section: vm related */
119 /*
120  * Get (and clear) the dirty memory log for a memory slot.
121  */
122 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
123                                struct kvm_dirty_log *log)
124 {
125         return 0;
126 }
127
128 long kvm_arch_vm_ioctl(struct file *filp,
129                        unsigned int ioctl, unsigned long arg)
130 {
131         struct kvm *kvm = filp->private_data;
132         void __user *argp = (void __user *)arg;
133         int r;
134
135         switch (ioctl) {
136         case KVM_S390_INTERRUPT: {
137                 struct kvm_s390_interrupt s390int;
138
139                 r = -EFAULT;
140                 if (copy_from_user(&s390int, argp, sizeof(s390int)))
141                         break;
142                 r = kvm_s390_inject_vm(kvm, &s390int);
143                 break;
144         }
145         default:
146                 r = -EINVAL;
147         }
148
149         return r;
150 }
151
152 struct kvm *kvm_arch_create_vm(void)
153 {
154         struct kvm *kvm;
155         int rc;
156         char debug_name[16];
157
158         rc = s390_enable_sie();
159         if (rc)
160                 goto out_nokvm;
161
162         rc = -ENOMEM;
163         kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL);
164         if (!kvm)
165                 goto out_nokvm;
166
167         kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
168         if (!kvm->arch.sca)
169                 goto out_nosca;
170
171         sprintf(debug_name, "kvm-%u", current->pid);
172
173         kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
174         if (!kvm->arch.dbf)
175                 goto out_nodbf;
176
177         spin_lock_init(&kvm->arch.float_int.lock);
178         INIT_LIST_HEAD(&kvm->arch.float_int.list);
179
180         debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
181         VM_EVENT(kvm, 3, "%s", "vm created");
182
183         try_module_get(THIS_MODULE);
184
185         return kvm;
186 out_nodbf:
187         free_page((unsigned long)(kvm->arch.sca));
188 out_nosca:
189         kfree(kvm);
190 out_nokvm:
191         return ERR_PTR(rc);
192 }
193
194 void kvm_arch_destroy_vm(struct kvm *kvm)
195 {
196         debug_unregister(kvm->arch.dbf);
197         kvm_free_physmem(kvm);
198         free_page((unsigned long)(kvm->arch.sca));
199         kfree(kvm);
200         module_put(THIS_MODULE);
201 }
202
203 /* Section: vcpu related */
204 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
205 {
206         return 0;
207 }
208
209 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
210 {
211         /* kvm common code refers to this, but does'nt call it */
212         BUG();
213 }
214
215 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
216 {
217         save_fp_regs(&vcpu->arch.host_fpregs);
218         save_access_regs(vcpu->arch.host_acrs);
219         vcpu->arch.guest_fpregs.fpc &= FPC_VALID_MASK;
220         restore_fp_regs(&vcpu->arch.guest_fpregs);
221         restore_access_regs(vcpu->arch.guest_acrs);
222 }
223
224 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
225 {
226         save_fp_regs(&vcpu->arch.guest_fpregs);
227         save_access_regs(vcpu->arch.guest_acrs);
228         restore_fp_regs(&vcpu->arch.host_fpregs);
229         restore_access_regs(vcpu->arch.host_acrs);
230 }
231
232 static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
233 {
234         /* this equals initial cpu reset in pop, but we don't switch to ESA */
235         vcpu->arch.sie_block->gpsw.mask = 0UL;
236         vcpu->arch.sie_block->gpsw.addr = 0UL;
237         vcpu->arch.sie_block->prefix    = 0UL;
238         vcpu->arch.sie_block->ihcpu     = 0xffff;
239         vcpu->arch.sie_block->cputm     = 0UL;
240         vcpu->arch.sie_block->ckc       = 0UL;
241         vcpu->arch.sie_block->todpr     = 0;
242         memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
243         vcpu->arch.sie_block->gcr[0]  = 0xE0UL;
244         vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
245         vcpu->arch.guest_fpregs.fpc = 0;
246         asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
247         vcpu->arch.sie_block->gbea = 1;
248 }
249
250 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
251 {
252         atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH);
253         vcpu->arch.sie_block->gmslm = 0xffffffffffUL;
254         vcpu->arch.sie_block->gmsor = 0x000000000000;
255         vcpu->arch.sie_block->ecb   = 2;
256         vcpu->arch.sie_block->eca   = 0xC1002001U;
257         setup_timer(&vcpu->arch.ckc_timer, kvm_s390_idle_wakeup,
258                  (unsigned long) vcpu);
259         get_cpu_id(&vcpu->arch.cpu_id);
260         vcpu->arch.cpu_id.version = 0xfe;
261         return 0;
262 }
263
264 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
265                                       unsigned int id)
266 {
267         struct kvm_vcpu *vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
268         int rc = -ENOMEM;
269
270         if (!vcpu)
271                 goto out_nomem;
272
273         vcpu->arch.sie_block = (struct sie_block *) get_zeroed_page(GFP_KERNEL);
274
275         if (!vcpu->arch.sie_block)
276                 goto out_free_cpu;
277
278         vcpu->arch.sie_block->icpua = id;
279         BUG_ON(!kvm->arch.sca);
280         BUG_ON(kvm->arch.sca->cpu[id].sda);
281         kvm->arch.sca->cpu[id].sda = (__u64) vcpu->arch.sie_block;
282         vcpu->arch.sie_block->scaoh = (__u32)(((__u64)kvm->arch.sca) >> 32);
283         vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
284
285         spin_lock_init(&vcpu->arch.local_int.lock);
286         INIT_LIST_HEAD(&vcpu->arch.local_int.list);
287         vcpu->arch.local_int.float_int = &kvm->arch.float_int;
288         spin_lock_bh(&kvm->arch.float_int.lock);
289         kvm->arch.float_int.local_int[id] = &vcpu->arch.local_int;
290         init_waitqueue_head(&vcpu->arch.local_int.wq);
291         vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
292         spin_unlock_bh(&kvm->arch.float_int.lock);
293
294         rc = kvm_vcpu_init(vcpu, kvm, id);
295         if (rc)
296                 goto out_free_cpu;
297         VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
298                  vcpu->arch.sie_block);
299
300         try_module_get(THIS_MODULE);
301
302         return vcpu;
303 out_free_cpu:
304         kfree(vcpu);
305 out_nomem:
306         return ERR_PTR(rc);
307 }
308
309 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
310 {
311         VCPU_EVENT(vcpu, 3, "%s", "destroy cpu");
312         free_page((unsigned long)(vcpu->arch.sie_block));
313         kfree(vcpu);
314         module_put(THIS_MODULE);
315 }
316
317 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
318 {
319         /* kvm common code refers to this, but never calls it */
320         BUG();
321         return 0;
322 }
323
324 static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
325 {
326         vcpu_load(vcpu);
327         kvm_s390_vcpu_initial_reset(vcpu);
328         vcpu_put(vcpu);
329         return 0;
330 }
331
332 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
333 {
334         vcpu_load(vcpu);
335         memcpy(&vcpu->arch.guest_gprs, &regs->gprs, sizeof(regs->gprs));
336         vcpu_put(vcpu);
337         return 0;
338 }
339
340 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
341 {
342         vcpu_load(vcpu);
343         memcpy(&regs->gprs, &vcpu->arch.guest_gprs, sizeof(regs->gprs));
344         vcpu_put(vcpu);
345         return 0;
346 }
347
348 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
349                                   struct kvm_sregs *sregs)
350 {
351         vcpu_load(vcpu);
352         memcpy(&vcpu->arch.guest_acrs, &sregs->acrs, sizeof(sregs->acrs));
353         memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
354         vcpu_put(vcpu);
355         return 0;
356 }
357
358 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
359                                   struct kvm_sregs *sregs)
360 {
361         vcpu_load(vcpu);
362         memcpy(&sregs->acrs, &vcpu->arch.guest_acrs, sizeof(sregs->acrs));
363         memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
364         vcpu_put(vcpu);
365         return 0;
366 }
367
368 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
369 {
370         vcpu_load(vcpu);
371         memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
372         vcpu->arch.guest_fpregs.fpc = fpu->fpc;
373         vcpu_put(vcpu);
374         return 0;
375 }
376
377 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
378 {
379         vcpu_load(vcpu);
380         memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
381         fpu->fpc = vcpu->arch.guest_fpregs.fpc;
382         vcpu_put(vcpu);
383         return 0;
384 }
385
386 static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
387 {
388         int rc = 0;
389
390         vcpu_load(vcpu);
391         if (atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_RUNNING)
392                 rc = -EBUSY;
393         else
394                 vcpu->arch.sie_block->gpsw = psw;
395         vcpu_put(vcpu);
396         return rc;
397 }
398
399 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
400                                   struct kvm_translation *tr)
401 {
402         return -EINVAL; /* not implemented yet */
403 }
404
405 int kvm_arch_vcpu_ioctl_debug_guest(struct kvm_vcpu *vcpu,
406                                     struct kvm_debug_guest *dbg)
407 {
408         return -EINVAL; /* not implemented yet */
409 }
410
411 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
412                                     struct kvm_mp_state *mp_state)
413 {
414         return -EINVAL; /* not implemented yet */
415 }
416
417 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
418                                     struct kvm_mp_state *mp_state)
419 {
420         return -EINVAL; /* not implemented yet */
421 }
422
423 extern void s390_handle_mcck(void);
424
425 static void __vcpu_run(struct kvm_vcpu *vcpu)
426 {
427         memcpy(&vcpu->arch.sie_block->gg14, &vcpu->arch.guest_gprs[14], 16);
428
429         if (need_resched())
430                 schedule();
431
432         if (test_thread_flag(TIF_MCCK_PENDING))
433                 s390_handle_mcck();
434
435         kvm_s390_deliver_pending_interrupts(vcpu);
436
437         vcpu->arch.sie_block->icptcode = 0;
438         local_irq_disable();
439         kvm_guest_enter();
440         local_irq_enable();
441         VCPU_EVENT(vcpu, 6, "entering sie flags %x",
442                    atomic_read(&vcpu->arch.sie_block->cpuflags));
443         if (sie64a(vcpu->arch.sie_block, vcpu->arch.guest_gprs)) {
444                 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
445                 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
446         }
447         VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
448                    vcpu->arch.sie_block->icptcode);
449         local_irq_disable();
450         kvm_guest_exit();
451         local_irq_enable();
452
453         memcpy(&vcpu->arch.guest_gprs[14], &vcpu->arch.sie_block->gg14, 16);
454 }
455
456 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
457 {
458         int rc;
459         sigset_t sigsaved;
460
461         vcpu_load(vcpu);
462
463         if (vcpu->sigset_active)
464                 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
465
466         atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
467
468         BUG_ON(vcpu->kvm->arch.float_int.local_int[vcpu->vcpu_id] == NULL);
469
470         switch (kvm_run->exit_reason) {
471         case KVM_EXIT_S390_SIEIC:
472                 vcpu->arch.sie_block->gpsw.mask = kvm_run->s390_sieic.mask;
473                 vcpu->arch.sie_block->gpsw.addr = kvm_run->s390_sieic.addr;
474                 break;
475         case KVM_EXIT_UNKNOWN:
476         case KVM_EXIT_S390_RESET:
477                 break;
478         default:
479                 BUG();
480         }
481
482         might_sleep();
483
484         do {
485                 __vcpu_run(vcpu);
486                 rc = kvm_handle_sie_intercept(vcpu);
487         } while (!signal_pending(current) && !rc);
488
489         if (signal_pending(current) && !rc)
490                 rc = -EINTR;
491
492         if (rc == -ENOTSUPP) {
493                 /* intercept cannot be handled in-kernel, prepare kvm-run */
494                 kvm_run->exit_reason         = KVM_EXIT_S390_SIEIC;
495                 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
496                 kvm_run->s390_sieic.mask     = vcpu->arch.sie_block->gpsw.mask;
497                 kvm_run->s390_sieic.addr     = vcpu->arch.sie_block->gpsw.addr;
498                 kvm_run->s390_sieic.ipa      = vcpu->arch.sie_block->ipa;
499                 kvm_run->s390_sieic.ipb      = vcpu->arch.sie_block->ipb;
500                 rc = 0;
501         }
502
503         if (rc == -EREMOTE) {
504                 /* intercept was handled, but userspace support is needed
505                  * kvm_run has been prepared by the handler */
506                 rc = 0;
507         }
508
509         if (vcpu->sigset_active)
510                 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
511
512         vcpu_put(vcpu);
513
514         vcpu->stat.exit_userspace++;
515         return rc;
516 }
517
518 static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, const void *from,
519                        unsigned long n, int prefix)
520 {
521         if (prefix)
522                 return copy_to_guest(vcpu, guestdest, from, n);
523         else
524                 return copy_to_guest_absolute(vcpu, guestdest, from, n);
525 }
526
527 /*
528  * store status at address
529  * we use have two special cases:
530  * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
531  * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
532  */
533 int __kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
534 {
535         const unsigned char archmode = 1;
536         int prefix;
537
538         if (addr == KVM_S390_STORE_STATUS_NOADDR) {
539                 if (copy_to_guest_absolute(vcpu, 163ul, &archmode, 1))
540                         return -EFAULT;
541                 addr = SAVE_AREA_BASE;
542                 prefix = 0;
543         } else if (addr == KVM_S390_STORE_STATUS_PREFIXED) {
544                 if (copy_to_guest(vcpu, 163ul, &archmode, 1))
545                         return -EFAULT;
546                 addr = SAVE_AREA_BASE;
547                 prefix = 1;
548         } else
549                 prefix = 0;
550
551         if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, fp_regs),
552                         vcpu->arch.guest_fpregs.fprs, 128, prefix))
553                 return -EFAULT;
554
555         if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, gp_regs),
556                         vcpu->arch.guest_gprs, 128, prefix))
557                 return -EFAULT;
558
559         if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, psw),
560                         &vcpu->arch.sie_block->gpsw, 16, prefix))
561                 return -EFAULT;
562
563         if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, pref_reg),
564                         &vcpu->arch.sie_block->prefix, 4, prefix))
565                 return -EFAULT;
566
567         if (__guestcopy(vcpu,
568                         addr + offsetof(struct save_area_s390x, fp_ctrl_reg),
569                         &vcpu->arch.guest_fpregs.fpc, 4, prefix))
570                 return -EFAULT;
571
572         if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, tod_reg),
573                         &vcpu->arch.sie_block->todpr, 4, prefix))
574                 return -EFAULT;
575
576         if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, timer),
577                         &vcpu->arch.sie_block->cputm, 8, prefix))
578                 return -EFAULT;
579
580         if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, clk_cmp),
581                         &vcpu->arch.sie_block->ckc, 8, prefix))
582                 return -EFAULT;
583
584         if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, acc_regs),
585                         &vcpu->arch.guest_acrs, 64, prefix))
586                 return -EFAULT;
587
588         if (__guestcopy(vcpu,
589                         addr + offsetof(struct save_area_s390x, ctrl_regs),
590                         &vcpu->arch.sie_block->gcr, 128, prefix))
591                 return -EFAULT;
592         return 0;
593 }
594
595 static int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
596 {
597         int rc;
598
599         vcpu_load(vcpu);
600         rc = __kvm_s390_vcpu_store_status(vcpu, addr);
601         vcpu_put(vcpu);
602         return rc;
603 }
604
605 long kvm_arch_vcpu_ioctl(struct file *filp,
606                          unsigned int ioctl, unsigned long arg)
607 {
608         struct kvm_vcpu *vcpu = filp->private_data;
609         void __user *argp = (void __user *)arg;
610
611         switch (ioctl) {
612         case KVM_S390_INTERRUPT: {
613                 struct kvm_s390_interrupt s390int;
614
615                 if (copy_from_user(&s390int, argp, sizeof(s390int)))
616                         return -EFAULT;
617                 return kvm_s390_inject_vcpu(vcpu, &s390int);
618         }
619         case KVM_S390_STORE_STATUS:
620                 return kvm_s390_vcpu_store_status(vcpu, arg);
621         case KVM_S390_SET_INITIAL_PSW: {
622                 psw_t psw;
623
624                 if (copy_from_user(&psw, argp, sizeof(psw)))
625                         return -EFAULT;
626                 return kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
627         }
628         case KVM_S390_INITIAL_RESET:
629                 return kvm_arch_vcpu_ioctl_initial_reset(vcpu);
630         default:
631                 ;
632         }
633         return -EINVAL;
634 }
635
636 /* Section: memory related */
637 int kvm_arch_set_memory_region(struct kvm *kvm,
638                                 struct kvm_userspace_memory_region *mem,
639                                 struct kvm_memory_slot old,
640                                 int user_alloc)
641 {
642         /* A few sanity checks. We can have exactly one memory slot which has
643            to start at guest virtual zero and which has to be located at a
644            page boundary in userland and which has to end at a page boundary.
645            The memory in userland is ok to be fragmented into various different
646            vmas. It is okay to mmap() and munmap() stuff in this slot after
647            doing this call at any time */
648
649         if (mem->slot)
650                 return -EINVAL;
651
652         if (mem->guest_phys_addr)
653                 return -EINVAL;
654
655         if (mem->userspace_addr & (PAGE_SIZE - 1))
656                 return -EINVAL;
657
658         if (mem->memory_size & (PAGE_SIZE - 1))
659                 return -EINVAL;
660
661         kvm->arch.guest_origin = mem->userspace_addr;
662         kvm->arch.guest_memsize = mem->memory_size;
663
664         /* FIXME: we do want to interrupt running CPUs and update their memory
665            configuration now to avoid race conditions. But hey, changing the
666            memory layout while virtual CPUs are running is usually bad
667            programming practice. */
668
669         return 0;
670 }
671
672 gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
673 {
674         return gfn;
675 }
676
677 static int __init kvm_s390_init(void)
678 {
679         return kvm_init(NULL, sizeof(struct kvm_vcpu), THIS_MODULE);
680 }
681
682 static void __exit kvm_s390_exit(void)
683 {
684         kvm_exit();
685 }
686
687 module_init(kvm_s390_init);
688 module_exit(kvm_s390_exit);