c8002193d9d41649c2279e8b68c9dd1afdf09617
[linux-2.6.git] / arch / s390 / kvm / kvm-s390.c
1 /*
2  * s390host.c --  hosting zSeries kernel virtual machines
3  *
4  * Copyright IBM Corp. 2008,2009
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License (version 2 only)
8  * as published by the Free Software Foundation.
9  *
10  *    Author(s): Carsten Otte <cotte@de.ibm.com>
11  *               Christian Borntraeger <borntraeger@de.ibm.com>
12  *               Heiko Carstens <heiko.carstens@de.ibm.com>
13  *               Christian Ehrhardt <ehrhardt@de.ibm.com>
14  */
15
16 #include <linux/compiler.h>
17 #include <linux/err.h>
18 #include <linux/fs.h>
19 #include <linux/hrtimer.h>
20 #include <linux/init.h>
21 #include <linux/kvm.h>
22 #include <linux/kvm_host.h>
23 #include <linux/module.h>
24 #include <linux/slab.h>
25 #include <linux/timer.h>
26 #include <asm/asm-offsets.h>
27 #include <asm/lowcore.h>
28 #include <asm/pgtable.h>
29 #include <asm/nmi.h>
30 #include <asm/system.h>
31 #include "kvm-s390.h"
32 #include "gaccess.h"
33
34 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
35
36 struct kvm_stats_debugfs_item debugfs_entries[] = {
37         { "userspace_handled", VCPU_STAT(exit_userspace) },
38         { "exit_null", VCPU_STAT(exit_null) },
39         { "exit_validity", VCPU_STAT(exit_validity) },
40         { "exit_stop_request", VCPU_STAT(exit_stop_request) },
41         { "exit_external_request", VCPU_STAT(exit_external_request) },
42         { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
43         { "exit_instruction", VCPU_STAT(exit_instruction) },
44         { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
45         { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
46         { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
47         { "instruction_lctl", VCPU_STAT(instruction_lctl) },
48         { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
49         { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
50         { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
51         { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
52         { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
53         { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
54         { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
55         { "exit_wait_state", VCPU_STAT(exit_wait_state) },
56         { "instruction_stidp", VCPU_STAT(instruction_stidp) },
57         { "instruction_spx", VCPU_STAT(instruction_spx) },
58         { "instruction_stpx", VCPU_STAT(instruction_stpx) },
59         { "instruction_stap", VCPU_STAT(instruction_stap) },
60         { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
61         { "instruction_stsch", VCPU_STAT(instruction_stsch) },
62         { "instruction_chsc", VCPU_STAT(instruction_chsc) },
63         { "instruction_stsi", VCPU_STAT(instruction_stsi) },
64         { "instruction_stfl", VCPU_STAT(instruction_stfl) },
65         { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
66         { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
67         { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
68         { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
69         { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
70         { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
71         { "diagnose_44", VCPU_STAT(diagnose_44) },
72         { NULL }
73 };
74
75 static unsigned long long *facilities;
76
77 /* Section: not file related */
78 int kvm_arch_hardware_enable(void *garbage)
79 {
80         /* every s390 is virtualization enabled ;-) */
81         return 0;
82 }
83
84 void kvm_arch_hardware_disable(void *garbage)
85 {
86 }
87
88 int kvm_arch_hardware_setup(void)
89 {
90         return 0;
91 }
92
93 void kvm_arch_hardware_unsetup(void)
94 {
95 }
96
97 void kvm_arch_check_processor_compat(void *rtn)
98 {
99 }
100
101 int kvm_arch_init(void *opaque)
102 {
103         return 0;
104 }
105
106 void kvm_arch_exit(void)
107 {
108 }
109
110 /* Section: device related */
111 long kvm_arch_dev_ioctl(struct file *filp,
112                         unsigned int ioctl, unsigned long arg)
113 {
114         if (ioctl == KVM_S390_ENABLE_SIE)
115                 return s390_enable_sie();
116         return -EINVAL;
117 }
118
119 int kvm_dev_ioctl_check_extension(long ext)
120 {
121         int r;
122
123         switch (ext) {
124         case KVM_CAP_S390_PSW:
125                 r = 1;
126                 break;
127         default:
128                 r = 0;
129         }
130         return r;
131 }
132
133 /* Section: vm related */
134 /*
135  * Get (and clear) the dirty memory log for a memory slot.
136  */
137 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
138                                struct kvm_dirty_log *log)
139 {
140         return 0;
141 }
142
143 long kvm_arch_vm_ioctl(struct file *filp,
144                        unsigned int ioctl, unsigned long arg)
145 {
146         struct kvm *kvm = filp->private_data;
147         void __user *argp = (void __user *)arg;
148         int r;
149
150         switch (ioctl) {
151         case KVM_S390_INTERRUPT: {
152                 struct kvm_s390_interrupt s390int;
153
154                 r = -EFAULT;
155                 if (copy_from_user(&s390int, argp, sizeof(s390int)))
156                         break;
157                 r = kvm_s390_inject_vm(kvm, &s390int);
158                 break;
159         }
160         default:
161                 r = -ENOTTY;
162         }
163
164         return r;
165 }
166
167 struct kvm *kvm_arch_create_vm(void)
168 {
169         struct kvm *kvm;
170         int rc;
171         char debug_name[16];
172
173         rc = s390_enable_sie();
174         if (rc)
175                 goto out_nokvm;
176
177         rc = -ENOMEM;
178         kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL);
179         if (!kvm)
180                 goto out_nokvm;
181
182         kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
183         if (!kvm->arch.sca)
184                 goto out_nosca;
185
186         sprintf(debug_name, "kvm-%u", current->pid);
187
188         kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
189         if (!kvm->arch.dbf)
190                 goto out_nodbf;
191
192         spin_lock_init(&kvm->arch.float_int.lock);
193         INIT_LIST_HEAD(&kvm->arch.float_int.list);
194
195         debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
196         VM_EVENT(kvm, 3, "%s", "vm created");
197
198         return kvm;
199 out_nodbf:
200         free_page((unsigned long)(kvm->arch.sca));
201 out_nosca:
202         kfree(kvm);
203 out_nokvm:
204         return ERR_PTR(rc);
205 }
206
207 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
208 {
209         VCPU_EVENT(vcpu, 3, "%s", "free cpu");
210         if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
211                 (__u64) vcpu->arch.sie_block)
212                 vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
213         smp_mb();
214         free_page((unsigned long)(vcpu->arch.sie_block));
215         kvm_vcpu_uninit(vcpu);
216         kfree(vcpu);
217 }
218
219 static void kvm_free_vcpus(struct kvm *kvm)
220 {
221         unsigned int i;
222         struct kvm_vcpu *vcpu;
223
224         kvm_for_each_vcpu(i, vcpu, kvm)
225                 kvm_arch_vcpu_destroy(vcpu);
226
227         mutex_lock(&kvm->lock);
228         for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
229                 kvm->vcpus[i] = NULL;
230
231         atomic_set(&kvm->online_vcpus, 0);
232         mutex_unlock(&kvm->lock);
233 }
234
235 void kvm_arch_sync_events(struct kvm *kvm)
236 {
237 }
238
239 void kvm_arch_destroy_vm(struct kvm *kvm)
240 {
241         kvm_free_vcpus(kvm);
242         kvm_free_physmem(kvm);
243         free_page((unsigned long)(kvm->arch.sca));
244         debug_unregister(kvm->arch.dbf);
245         kfree(kvm);
246 }
247
248 /* Section: vcpu related */
249 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
250 {
251         return 0;
252 }
253
254 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
255 {
256         /* Nothing todo */
257 }
258
259 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
260 {
261         save_fp_regs(&vcpu->arch.host_fpregs);
262         save_access_regs(vcpu->arch.host_acrs);
263         vcpu->arch.guest_fpregs.fpc &= FPC_VALID_MASK;
264         restore_fp_regs(&vcpu->arch.guest_fpregs);
265         restore_access_regs(vcpu->arch.guest_acrs);
266 }
267
268 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
269 {
270         save_fp_regs(&vcpu->arch.guest_fpregs);
271         save_access_regs(vcpu->arch.guest_acrs);
272         restore_fp_regs(&vcpu->arch.host_fpregs);
273         restore_access_regs(vcpu->arch.host_acrs);
274 }
275
276 static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
277 {
278         /* this equals initial cpu reset in pop, but we don't switch to ESA */
279         vcpu->arch.sie_block->gpsw.mask = 0UL;
280         vcpu->arch.sie_block->gpsw.addr = 0UL;
281         vcpu->arch.sie_block->prefix    = 0UL;
282         vcpu->arch.sie_block->ihcpu     = 0xffff;
283         vcpu->arch.sie_block->cputm     = 0UL;
284         vcpu->arch.sie_block->ckc       = 0UL;
285         vcpu->arch.sie_block->todpr     = 0;
286         memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
287         vcpu->arch.sie_block->gcr[0]  = 0xE0UL;
288         vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
289         vcpu->arch.guest_fpregs.fpc = 0;
290         asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
291         vcpu->arch.sie_block->gbea = 1;
292 }
293
294 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
295 {
296         atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH);
297         set_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests);
298         vcpu->arch.sie_block->ecb   = 2;
299         vcpu->arch.sie_block->eca   = 0xC1002001U;
300         vcpu->arch.sie_block->fac   = (int) (long) facilities;
301         hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
302         tasklet_init(&vcpu->arch.tasklet, kvm_s390_tasklet,
303                      (unsigned long) vcpu);
304         vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
305         get_cpu_id(&vcpu->arch.cpu_id);
306         vcpu->arch.cpu_id.version = 0xff;
307         return 0;
308 }
309
310 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
311                                       unsigned int id)
312 {
313         struct kvm_vcpu *vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
314         int rc = -ENOMEM;
315
316         if (!vcpu)
317                 goto out_nomem;
318
319         vcpu->arch.sie_block = (struct kvm_s390_sie_block *)
320                                         get_zeroed_page(GFP_KERNEL);
321
322         if (!vcpu->arch.sie_block)
323                 goto out_free_cpu;
324
325         vcpu->arch.sie_block->icpua = id;
326         BUG_ON(!kvm->arch.sca);
327         if (!kvm->arch.sca->cpu[id].sda)
328                 kvm->arch.sca->cpu[id].sda = (__u64) vcpu->arch.sie_block;
329         vcpu->arch.sie_block->scaoh = (__u32)(((__u64)kvm->arch.sca) >> 32);
330         vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
331
332         spin_lock_init(&vcpu->arch.local_int.lock);
333         INIT_LIST_HEAD(&vcpu->arch.local_int.list);
334         vcpu->arch.local_int.float_int = &kvm->arch.float_int;
335         spin_lock(&kvm->arch.float_int.lock);
336         kvm->arch.float_int.local_int[id] = &vcpu->arch.local_int;
337         init_waitqueue_head(&vcpu->arch.local_int.wq);
338         vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
339         spin_unlock(&kvm->arch.float_int.lock);
340
341         rc = kvm_vcpu_init(vcpu, kvm, id);
342         if (rc)
343                 goto out_free_cpu;
344         VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
345                  vcpu->arch.sie_block);
346
347         return vcpu;
348 out_free_cpu:
349         kfree(vcpu);
350 out_nomem:
351         return ERR_PTR(rc);
352 }
353
354 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
355 {
356         /* kvm common code refers to this, but never calls it */
357         BUG();
358         return 0;
359 }
360
361 static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
362 {
363         vcpu_load(vcpu);
364         kvm_s390_vcpu_initial_reset(vcpu);
365         vcpu_put(vcpu);
366         return 0;
367 }
368
369 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
370 {
371         vcpu_load(vcpu);
372         memcpy(&vcpu->arch.guest_gprs, &regs->gprs, sizeof(regs->gprs));
373         vcpu_put(vcpu);
374         return 0;
375 }
376
377 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
378 {
379         vcpu_load(vcpu);
380         memcpy(&regs->gprs, &vcpu->arch.guest_gprs, sizeof(regs->gprs));
381         vcpu_put(vcpu);
382         return 0;
383 }
384
385 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
386                                   struct kvm_sregs *sregs)
387 {
388         vcpu_load(vcpu);
389         memcpy(&vcpu->arch.guest_acrs, &sregs->acrs, sizeof(sregs->acrs));
390         memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
391         vcpu_put(vcpu);
392         return 0;
393 }
394
395 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
396                                   struct kvm_sregs *sregs)
397 {
398         vcpu_load(vcpu);
399         memcpy(&sregs->acrs, &vcpu->arch.guest_acrs, sizeof(sregs->acrs));
400         memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
401         vcpu_put(vcpu);
402         return 0;
403 }
404
405 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
406 {
407         vcpu_load(vcpu);
408         memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
409         vcpu->arch.guest_fpregs.fpc = fpu->fpc;
410         vcpu_put(vcpu);
411         return 0;
412 }
413
414 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
415 {
416         vcpu_load(vcpu);
417         memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
418         fpu->fpc = vcpu->arch.guest_fpregs.fpc;
419         vcpu_put(vcpu);
420         return 0;
421 }
422
423 static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
424 {
425         int rc = 0;
426
427         vcpu_load(vcpu);
428         if (atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_RUNNING)
429                 rc = -EBUSY;
430         else {
431                 vcpu->run->psw_mask = psw.mask;
432                 vcpu->run->psw_addr = psw.addr;
433         }
434         vcpu_put(vcpu);
435         return rc;
436 }
437
438 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
439                                   struct kvm_translation *tr)
440 {
441         return -EINVAL; /* not implemented yet */
442 }
443
444 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
445                                         struct kvm_guest_debug *dbg)
446 {
447         return -EINVAL; /* not implemented yet */
448 }
449
450 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
451                                     struct kvm_mp_state *mp_state)
452 {
453         return -EINVAL; /* not implemented yet */
454 }
455
456 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
457                                     struct kvm_mp_state *mp_state)
458 {
459         return -EINVAL; /* not implemented yet */
460 }
461
462 static void __vcpu_run(struct kvm_vcpu *vcpu)
463 {
464         memcpy(&vcpu->arch.sie_block->gg14, &vcpu->arch.guest_gprs[14], 16);
465
466         if (need_resched())
467                 schedule();
468
469         if (test_thread_flag(TIF_MCCK_PENDING))
470                 s390_handle_mcck();
471
472         kvm_s390_deliver_pending_interrupts(vcpu);
473
474         vcpu->arch.sie_block->icptcode = 0;
475         local_irq_disable();
476         kvm_guest_enter();
477         local_irq_enable();
478         VCPU_EVENT(vcpu, 6, "entering sie flags %x",
479                    atomic_read(&vcpu->arch.sie_block->cpuflags));
480         if (sie64a(vcpu->arch.sie_block, vcpu->arch.guest_gprs)) {
481                 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
482                 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
483         }
484         VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
485                    vcpu->arch.sie_block->icptcode);
486         local_irq_disable();
487         kvm_guest_exit();
488         local_irq_enable();
489
490         memcpy(&vcpu->arch.guest_gprs[14], &vcpu->arch.sie_block->gg14, 16);
491 }
492
493 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
494 {
495         int rc;
496         sigset_t sigsaved;
497
498         vcpu_load(vcpu);
499
500 rerun_vcpu:
501         if (vcpu->requests)
502                 if (test_and_clear_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests))
503                         kvm_s390_vcpu_set_mem(vcpu);
504
505         /* verify, that memory has been registered */
506         if (!vcpu->arch.sie_block->gmslm) {
507                 vcpu_put(vcpu);
508                 VCPU_EVENT(vcpu, 3, "%s", "no memory registered to run vcpu");
509                 return -EINVAL;
510         }
511
512         if (vcpu->sigset_active)
513                 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
514
515         atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
516
517         BUG_ON(vcpu->kvm->arch.float_int.local_int[vcpu->vcpu_id] == NULL);
518
519         switch (kvm_run->exit_reason) {
520         case KVM_EXIT_S390_SIEIC:
521         case KVM_EXIT_UNKNOWN:
522         case KVM_EXIT_INTR:
523         case KVM_EXIT_S390_RESET:
524                 break;
525         default:
526                 BUG();
527         }
528
529         vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
530         vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
531
532         might_fault();
533
534         do {
535                 __vcpu_run(vcpu);
536                 rc = kvm_handle_sie_intercept(vcpu);
537         } while (!signal_pending(current) && !rc);
538
539         if (rc == SIE_INTERCEPT_RERUNVCPU)
540                 goto rerun_vcpu;
541
542         if (signal_pending(current) && !rc) {
543                 kvm_run->exit_reason = KVM_EXIT_INTR;
544                 rc = -EINTR;
545         }
546
547         if (rc == -EOPNOTSUPP) {
548                 /* intercept cannot be handled in-kernel, prepare kvm-run */
549                 kvm_run->exit_reason         = KVM_EXIT_S390_SIEIC;
550                 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
551                 kvm_run->s390_sieic.ipa      = vcpu->arch.sie_block->ipa;
552                 kvm_run->s390_sieic.ipb      = vcpu->arch.sie_block->ipb;
553                 rc = 0;
554         }
555
556         if (rc == -EREMOTE) {
557                 /* intercept was handled, but userspace support is needed
558                  * kvm_run has been prepared by the handler */
559                 rc = 0;
560         }
561
562         kvm_run->psw_mask     = vcpu->arch.sie_block->gpsw.mask;
563         kvm_run->psw_addr     = vcpu->arch.sie_block->gpsw.addr;
564
565         if (vcpu->sigset_active)
566                 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
567
568         vcpu_put(vcpu);
569
570         vcpu->stat.exit_userspace++;
571         return rc;
572 }
573
574 static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, const void *from,
575                        unsigned long n, int prefix)
576 {
577         if (prefix)
578                 return copy_to_guest(vcpu, guestdest, from, n);
579         else
580                 return copy_to_guest_absolute(vcpu, guestdest, from, n);
581 }
582
583 /*
584  * store status at address
585  * we use have two special cases:
586  * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
587  * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
588  */
589 int __kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
590 {
591         const unsigned char archmode = 1;
592         int prefix;
593
594         if (addr == KVM_S390_STORE_STATUS_NOADDR) {
595                 if (copy_to_guest_absolute(vcpu, 163ul, &archmode, 1))
596                         return -EFAULT;
597                 addr = SAVE_AREA_BASE;
598                 prefix = 0;
599         } else if (addr == KVM_S390_STORE_STATUS_PREFIXED) {
600                 if (copy_to_guest(vcpu, 163ul, &archmode, 1))
601                         return -EFAULT;
602                 addr = SAVE_AREA_BASE;
603                 prefix = 1;
604         } else
605                 prefix = 0;
606
607         if (__guestcopy(vcpu, addr + offsetof(struct save_area, fp_regs),
608                         vcpu->arch.guest_fpregs.fprs, 128, prefix))
609                 return -EFAULT;
610
611         if (__guestcopy(vcpu, addr + offsetof(struct save_area, gp_regs),
612                         vcpu->arch.guest_gprs, 128, prefix))
613                 return -EFAULT;
614
615         if (__guestcopy(vcpu, addr + offsetof(struct save_area, psw),
616                         &vcpu->arch.sie_block->gpsw, 16, prefix))
617                 return -EFAULT;
618
619         if (__guestcopy(vcpu, addr + offsetof(struct save_area, pref_reg),
620                         &vcpu->arch.sie_block->prefix, 4, prefix))
621                 return -EFAULT;
622
623         if (__guestcopy(vcpu,
624                         addr + offsetof(struct save_area, fp_ctrl_reg),
625                         &vcpu->arch.guest_fpregs.fpc, 4, prefix))
626                 return -EFAULT;
627
628         if (__guestcopy(vcpu, addr + offsetof(struct save_area, tod_reg),
629                         &vcpu->arch.sie_block->todpr, 4, prefix))
630                 return -EFAULT;
631
632         if (__guestcopy(vcpu, addr + offsetof(struct save_area, timer),
633                         &vcpu->arch.sie_block->cputm, 8, prefix))
634                 return -EFAULT;
635
636         if (__guestcopy(vcpu, addr + offsetof(struct save_area, clk_cmp),
637                         &vcpu->arch.sie_block->ckc, 8, prefix))
638                 return -EFAULT;
639
640         if (__guestcopy(vcpu, addr + offsetof(struct save_area, acc_regs),
641                         &vcpu->arch.guest_acrs, 64, prefix))
642                 return -EFAULT;
643
644         if (__guestcopy(vcpu,
645                         addr + offsetof(struct save_area, ctrl_regs),
646                         &vcpu->arch.sie_block->gcr, 128, prefix))
647                 return -EFAULT;
648         return 0;
649 }
650
651 static int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
652 {
653         int rc;
654
655         vcpu_load(vcpu);
656         rc = __kvm_s390_vcpu_store_status(vcpu, addr);
657         vcpu_put(vcpu);
658         return rc;
659 }
660
661 long kvm_arch_vcpu_ioctl(struct file *filp,
662                          unsigned int ioctl, unsigned long arg)
663 {
664         struct kvm_vcpu *vcpu = filp->private_data;
665         void __user *argp = (void __user *)arg;
666
667         switch (ioctl) {
668         case KVM_S390_INTERRUPT: {
669                 struct kvm_s390_interrupt s390int;
670
671                 if (copy_from_user(&s390int, argp, sizeof(s390int)))
672                         return -EFAULT;
673                 return kvm_s390_inject_vcpu(vcpu, &s390int);
674         }
675         case KVM_S390_STORE_STATUS:
676                 return kvm_s390_vcpu_store_status(vcpu, arg);
677         case KVM_S390_SET_INITIAL_PSW: {
678                 psw_t psw;
679
680                 if (copy_from_user(&psw, argp, sizeof(psw)))
681                         return -EFAULT;
682                 return kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
683         }
684         case KVM_S390_INITIAL_RESET:
685                 return kvm_arch_vcpu_ioctl_initial_reset(vcpu);
686         default:
687                 ;
688         }
689         return -EINVAL;
690 }
691
692 /* Section: memory related */
693 int kvm_arch_prepare_memory_region(struct kvm *kvm,
694                                    struct kvm_memory_slot *memslot,
695                                    struct kvm_memory_slot old,
696                                    struct kvm_userspace_memory_region *mem,
697                                    int user_alloc)
698 {
699         /* A few sanity checks. We can have exactly one memory slot which has
700            to start at guest virtual zero and which has to be located at a
701            page boundary in userland and which has to end at a page boundary.
702            The memory in userland is ok to be fragmented into various different
703            vmas. It is okay to mmap() and munmap() stuff in this slot after
704            doing this call at any time */
705
706         if (mem->slot)
707                 return -EINVAL;
708
709         if (mem->guest_phys_addr)
710                 return -EINVAL;
711
712         if (mem->userspace_addr & (PAGE_SIZE - 1))
713                 return -EINVAL;
714
715         if (mem->memory_size & (PAGE_SIZE - 1))
716                 return -EINVAL;
717
718         if (!user_alloc)
719                 return -EINVAL;
720
721         return 0;
722 }
723
724 void kvm_arch_commit_memory_region(struct kvm *kvm,
725                                 struct kvm_userspace_memory_region *mem,
726                                 struct kvm_memory_slot old,
727                                 int user_alloc)
728 {
729         int i;
730         struct kvm_vcpu *vcpu;
731
732         /* request update of sie control block for all available vcpus */
733         kvm_for_each_vcpu(i, vcpu, kvm) {
734                 if (test_and_set_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests))
735                         continue;
736                 kvm_s390_inject_sigp_stop(vcpu, ACTION_RELOADVCPU_ON_STOP);
737         }
738 }
739
740 void kvm_arch_flush_shadow(struct kvm *kvm)
741 {
742 }
743
744 gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
745 {
746         return gfn;
747 }
748
749 static int __init kvm_s390_init(void)
750 {
751         int ret;
752         ret = kvm_init(NULL, sizeof(struct kvm_vcpu), THIS_MODULE);
753         if (ret)
754                 return ret;
755
756         /*
757          * guests can ask for up to 255+1 double words, we need a full page
758          * to hold the maximum amount of facilites. On the other hand, we
759          * only set facilities that are known to work in KVM.
760          */
761         facilities = (unsigned long long *) get_zeroed_page(GFP_DMA);
762         if (!facilities) {
763                 kvm_exit();
764                 return -ENOMEM;
765         }
766         stfle(facilities, 1);
767         facilities[0] &= 0xff00fff3f0700000ULL;
768         return 0;
769 }
770
771 static void __exit kvm_s390_exit(void)
772 {
773         free_page((unsigned long) facilities);
774         kvm_exit();
775 }
776
777 module_init(kvm_s390_init);
778 module_exit(kvm_s390_exit);