KVM: s390: ucontrol: export SIE control block to user
[linux-2.6.git] / arch / s390 / kvm / kvm-s390.c
1 /*
2  * s390host.c --  hosting zSeries kernel virtual machines
3  *
4  * Copyright IBM Corp. 2008,2009
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License (version 2 only)
8  * as published by the Free Software Foundation.
9  *
10  *    Author(s): Carsten Otte <cotte@de.ibm.com>
11  *               Christian Borntraeger <borntraeger@de.ibm.com>
12  *               Heiko Carstens <heiko.carstens@de.ibm.com>
13  *               Christian Ehrhardt <ehrhardt@de.ibm.com>
14  */
15
16 #include <linux/compiler.h>
17 #include <linux/err.h>
18 #include <linux/fs.h>
19 #include <linux/hrtimer.h>
20 #include <linux/init.h>
21 #include <linux/kvm.h>
22 #include <linux/kvm_host.h>
23 #include <linux/module.h>
24 #include <linux/slab.h>
25 #include <linux/timer.h>
26 #include <asm/asm-offsets.h>
27 #include <asm/lowcore.h>
28 #include <asm/pgtable.h>
29 #include <asm/nmi.h>
30 #include <asm/system.h>
31 #include "kvm-s390.h"
32 #include "gaccess.h"
33
34 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
35
36 struct kvm_stats_debugfs_item debugfs_entries[] = {
37         { "userspace_handled", VCPU_STAT(exit_userspace) },
38         { "exit_null", VCPU_STAT(exit_null) },
39         { "exit_validity", VCPU_STAT(exit_validity) },
40         { "exit_stop_request", VCPU_STAT(exit_stop_request) },
41         { "exit_external_request", VCPU_STAT(exit_external_request) },
42         { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
43         { "exit_instruction", VCPU_STAT(exit_instruction) },
44         { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
45         { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
46         { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
47         { "instruction_lctl", VCPU_STAT(instruction_lctl) },
48         { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
49         { "deliver_external_call", VCPU_STAT(deliver_external_call) },
50         { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
51         { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
52         { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
53         { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
54         { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
55         { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
56         { "exit_wait_state", VCPU_STAT(exit_wait_state) },
57         { "instruction_stidp", VCPU_STAT(instruction_stidp) },
58         { "instruction_spx", VCPU_STAT(instruction_spx) },
59         { "instruction_stpx", VCPU_STAT(instruction_stpx) },
60         { "instruction_stap", VCPU_STAT(instruction_stap) },
61         { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
62         { "instruction_stsch", VCPU_STAT(instruction_stsch) },
63         { "instruction_chsc", VCPU_STAT(instruction_chsc) },
64         { "instruction_stsi", VCPU_STAT(instruction_stsi) },
65         { "instruction_stfl", VCPU_STAT(instruction_stfl) },
66         { "instruction_tprot", VCPU_STAT(instruction_tprot) },
67         { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
68         { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
69         { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
70         { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
71         { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
72         { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
73         { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
74         { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
75         { "diagnose_10", VCPU_STAT(diagnose_10) },
76         { "diagnose_44", VCPU_STAT(diagnose_44) },
77         { NULL }
78 };
79
80 static unsigned long long *facilities;
81
82 /* Section: not file related */
83 int kvm_arch_hardware_enable(void *garbage)
84 {
85         /* every s390 is virtualization enabled ;-) */
86         return 0;
87 }
88
89 void kvm_arch_hardware_disable(void *garbage)
90 {
91 }
92
93 int kvm_arch_hardware_setup(void)
94 {
95         return 0;
96 }
97
98 void kvm_arch_hardware_unsetup(void)
99 {
100 }
101
102 void kvm_arch_check_processor_compat(void *rtn)
103 {
104 }
105
106 int kvm_arch_init(void *opaque)
107 {
108         return 0;
109 }
110
111 void kvm_arch_exit(void)
112 {
113 }
114
115 /* Section: device related */
116 long kvm_arch_dev_ioctl(struct file *filp,
117                         unsigned int ioctl, unsigned long arg)
118 {
119         if (ioctl == KVM_S390_ENABLE_SIE)
120                 return s390_enable_sie();
121         return -EINVAL;
122 }
123
124 int kvm_dev_ioctl_check_extension(long ext)
125 {
126         int r;
127
128         switch (ext) {
129         case KVM_CAP_S390_PSW:
130         case KVM_CAP_S390_GMAP:
131         case KVM_CAP_SYNC_MMU:
132                 r = 1;
133                 break;
134         default:
135                 r = 0;
136         }
137         return r;
138 }
139
140 /* Section: vm related */
141 /*
142  * Get (and clear) the dirty memory log for a memory slot.
143  */
144 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
145                                struct kvm_dirty_log *log)
146 {
147         return 0;
148 }
149
150 long kvm_arch_vm_ioctl(struct file *filp,
151                        unsigned int ioctl, unsigned long arg)
152 {
153         struct kvm *kvm = filp->private_data;
154         void __user *argp = (void __user *)arg;
155         int r;
156
157         switch (ioctl) {
158         case KVM_S390_INTERRUPT: {
159                 struct kvm_s390_interrupt s390int;
160
161                 r = -EFAULT;
162                 if (copy_from_user(&s390int, argp, sizeof(s390int)))
163                         break;
164                 r = kvm_s390_inject_vm(kvm, &s390int);
165                 break;
166         }
167         default:
168                 r = -ENOTTY;
169         }
170
171         return r;
172 }
173
174 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
175 {
176         int rc;
177         char debug_name[16];
178
179         rc = -EINVAL;
180 #ifdef CONFIG_KVM_S390_UCONTROL
181         if (type & ~KVM_VM_S390_UCONTROL)
182                 goto out_err;
183         if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
184                 goto out_err;
185 #else
186         if (type)
187                 goto out_err;
188 #endif
189
190         rc = s390_enable_sie();
191         if (rc)
192                 goto out_err;
193
194         rc = -ENOMEM;
195
196         kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
197         if (!kvm->arch.sca)
198                 goto out_err;
199
200         sprintf(debug_name, "kvm-%u", current->pid);
201
202         kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
203         if (!kvm->arch.dbf)
204                 goto out_nodbf;
205
206         spin_lock_init(&kvm->arch.float_int.lock);
207         INIT_LIST_HEAD(&kvm->arch.float_int.list);
208
209         debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
210         VM_EVENT(kvm, 3, "%s", "vm created");
211
212         if (type & KVM_VM_S390_UCONTROL) {
213                 kvm->arch.gmap = NULL;
214         } else {
215                 kvm->arch.gmap = gmap_alloc(current->mm);
216                 if (!kvm->arch.gmap)
217                         goto out_nogmap;
218         }
219         return 0;
220 out_nogmap:
221         debug_unregister(kvm->arch.dbf);
222 out_nodbf:
223         free_page((unsigned long)(kvm->arch.sca));
224 out_err:
225         return rc;
226 }
227
228 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
229 {
230         VCPU_EVENT(vcpu, 3, "%s", "free cpu");
231         clear_bit(63 - vcpu->vcpu_id, (unsigned long *) &vcpu->kvm->arch.sca->mcn);
232         if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
233                 (__u64) vcpu->arch.sie_block)
234                 vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
235         smp_mb();
236
237         if (kvm_is_ucontrol(vcpu->kvm))
238                 gmap_free(vcpu->arch.gmap);
239
240         free_page((unsigned long)(vcpu->arch.sie_block));
241         kvm_vcpu_uninit(vcpu);
242         kfree(vcpu);
243 }
244
245 static void kvm_free_vcpus(struct kvm *kvm)
246 {
247         unsigned int i;
248         struct kvm_vcpu *vcpu;
249
250         kvm_for_each_vcpu(i, vcpu, kvm)
251                 kvm_arch_vcpu_destroy(vcpu);
252
253         mutex_lock(&kvm->lock);
254         for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
255                 kvm->vcpus[i] = NULL;
256
257         atomic_set(&kvm->online_vcpus, 0);
258         mutex_unlock(&kvm->lock);
259 }
260
261 void kvm_arch_sync_events(struct kvm *kvm)
262 {
263 }
264
265 void kvm_arch_destroy_vm(struct kvm *kvm)
266 {
267         kvm_free_vcpus(kvm);
268         free_page((unsigned long)(kvm->arch.sca));
269         debug_unregister(kvm->arch.dbf);
270         if (!kvm_is_ucontrol(kvm))
271                 gmap_free(kvm->arch.gmap);
272 }
273
274 /* Section: vcpu related */
275 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
276 {
277         if (kvm_is_ucontrol(vcpu->kvm)) {
278                 vcpu->arch.gmap = gmap_alloc(current->mm);
279                 if (!vcpu->arch.gmap)
280                         return -ENOMEM;
281                 return 0;
282         }
283
284         vcpu->arch.gmap = vcpu->kvm->arch.gmap;
285         return 0;
286 }
287
288 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
289 {
290         /* Nothing todo */
291 }
292
293 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
294 {
295         save_fp_regs(&vcpu->arch.host_fpregs);
296         save_access_regs(vcpu->arch.host_acrs);
297         vcpu->arch.guest_fpregs.fpc &= FPC_VALID_MASK;
298         restore_fp_regs(&vcpu->arch.guest_fpregs);
299         restore_access_regs(vcpu->arch.guest_acrs);
300         gmap_enable(vcpu->arch.gmap);
301         atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
302 }
303
304 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
305 {
306         atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
307         gmap_disable(vcpu->arch.gmap);
308         save_fp_regs(&vcpu->arch.guest_fpregs);
309         save_access_regs(vcpu->arch.guest_acrs);
310         restore_fp_regs(&vcpu->arch.host_fpregs);
311         restore_access_regs(vcpu->arch.host_acrs);
312 }
313
314 static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
315 {
316         /* this equals initial cpu reset in pop, but we don't switch to ESA */
317         vcpu->arch.sie_block->gpsw.mask = 0UL;
318         vcpu->arch.sie_block->gpsw.addr = 0UL;
319         vcpu->arch.sie_block->prefix    = 0UL;
320         vcpu->arch.sie_block->ihcpu     = 0xffff;
321         vcpu->arch.sie_block->cputm     = 0UL;
322         vcpu->arch.sie_block->ckc       = 0UL;
323         vcpu->arch.sie_block->todpr     = 0;
324         memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
325         vcpu->arch.sie_block->gcr[0]  = 0xE0UL;
326         vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
327         vcpu->arch.guest_fpregs.fpc = 0;
328         asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
329         vcpu->arch.sie_block->gbea = 1;
330 }
331
332 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
333 {
334         atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
335                                                     CPUSTAT_SM |
336                                                     CPUSTAT_STOPPED);
337         vcpu->arch.sie_block->ecb   = 6;
338         vcpu->arch.sie_block->eca   = 0xC1002001U;
339         vcpu->arch.sie_block->fac   = (int) (long) facilities;
340         hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
341         tasklet_init(&vcpu->arch.tasklet, kvm_s390_tasklet,
342                      (unsigned long) vcpu);
343         vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
344         get_cpu_id(&vcpu->arch.cpu_id);
345         vcpu->arch.cpu_id.version = 0xff;
346         return 0;
347 }
348
349 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
350                                       unsigned int id)
351 {
352         struct kvm_vcpu *vcpu;
353         int rc = -EINVAL;
354
355         if (id >= KVM_MAX_VCPUS)
356                 goto out;
357
358         rc = -ENOMEM;
359
360         vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
361         if (!vcpu)
362                 goto out;
363
364         vcpu->arch.sie_block = (struct kvm_s390_sie_block *)
365                                         get_zeroed_page(GFP_KERNEL);
366
367         if (!vcpu->arch.sie_block)
368                 goto out_free_cpu;
369
370         vcpu->arch.sie_block->icpua = id;
371         BUG_ON(!kvm->arch.sca);
372         if (!kvm->arch.sca->cpu[id].sda)
373                 kvm->arch.sca->cpu[id].sda = (__u64) vcpu->arch.sie_block;
374         vcpu->arch.sie_block->scaoh = (__u32)(((__u64)kvm->arch.sca) >> 32);
375         vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
376         set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
377
378         spin_lock_init(&vcpu->arch.local_int.lock);
379         INIT_LIST_HEAD(&vcpu->arch.local_int.list);
380         vcpu->arch.local_int.float_int = &kvm->arch.float_int;
381         spin_lock(&kvm->arch.float_int.lock);
382         kvm->arch.float_int.local_int[id] = &vcpu->arch.local_int;
383         init_waitqueue_head(&vcpu->arch.local_int.wq);
384         vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
385         spin_unlock(&kvm->arch.float_int.lock);
386
387         rc = kvm_vcpu_init(vcpu, kvm, id);
388         if (rc)
389                 goto out_free_sie_block;
390         VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
391                  vcpu->arch.sie_block);
392
393         return vcpu;
394 out_free_sie_block:
395         free_page((unsigned long)(vcpu->arch.sie_block));
396 out_free_cpu:
397         kfree(vcpu);
398 out:
399         return ERR_PTR(rc);
400 }
401
402 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
403 {
404         /* kvm common code refers to this, but never calls it */
405         BUG();
406         return 0;
407 }
408
409 static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
410 {
411         kvm_s390_vcpu_initial_reset(vcpu);
412         return 0;
413 }
414
415 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
416 {
417         memcpy(&vcpu->arch.guest_gprs, &regs->gprs, sizeof(regs->gprs));
418         return 0;
419 }
420
421 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
422 {
423         memcpy(&regs->gprs, &vcpu->arch.guest_gprs, sizeof(regs->gprs));
424         return 0;
425 }
426
427 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
428                                   struct kvm_sregs *sregs)
429 {
430         memcpy(&vcpu->arch.guest_acrs, &sregs->acrs, sizeof(sregs->acrs));
431         memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
432         restore_access_regs(vcpu->arch.guest_acrs);
433         return 0;
434 }
435
436 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
437                                   struct kvm_sregs *sregs)
438 {
439         memcpy(&sregs->acrs, &vcpu->arch.guest_acrs, sizeof(sregs->acrs));
440         memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
441         return 0;
442 }
443
444 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
445 {
446         memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
447         vcpu->arch.guest_fpregs.fpc = fpu->fpc;
448         restore_fp_regs(&vcpu->arch.guest_fpregs);
449         return 0;
450 }
451
452 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
453 {
454         memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
455         fpu->fpc = vcpu->arch.guest_fpregs.fpc;
456         return 0;
457 }
458
459 static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
460 {
461         int rc = 0;
462
463         if (!(atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_STOPPED))
464                 rc = -EBUSY;
465         else {
466                 vcpu->run->psw_mask = psw.mask;
467                 vcpu->run->psw_addr = psw.addr;
468         }
469         return rc;
470 }
471
472 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
473                                   struct kvm_translation *tr)
474 {
475         return -EINVAL; /* not implemented yet */
476 }
477
478 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
479                                         struct kvm_guest_debug *dbg)
480 {
481         return -EINVAL; /* not implemented yet */
482 }
483
484 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
485                                     struct kvm_mp_state *mp_state)
486 {
487         return -EINVAL; /* not implemented yet */
488 }
489
490 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
491                                     struct kvm_mp_state *mp_state)
492 {
493         return -EINVAL; /* not implemented yet */
494 }
495
496 static int __vcpu_run(struct kvm_vcpu *vcpu)
497 {
498         int rc;
499
500         memcpy(&vcpu->arch.sie_block->gg14, &vcpu->arch.guest_gprs[14], 16);
501
502         if (need_resched())
503                 schedule();
504
505         if (test_thread_flag(TIF_MCCK_PENDING))
506                 s390_handle_mcck();
507
508         kvm_s390_deliver_pending_interrupts(vcpu);
509
510         vcpu->arch.sie_block->icptcode = 0;
511         local_irq_disable();
512         kvm_guest_enter();
513         local_irq_enable();
514         VCPU_EVENT(vcpu, 6, "entering sie flags %x",
515                    atomic_read(&vcpu->arch.sie_block->cpuflags));
516         rc = sie64a(vcpu->arch.sie_block, vcpu->arch.guest_gprs);
517         if (rc) {
518                 if (kvm_is_ucontrol(vcpu->kvm)) {
519                         rc = SIE_INTERCEPT_UCONTROL;
520                 } else {
521                         VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
522                         kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
523                         rc = 0;
524                 }
525         }
526         VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
527                    vcpu->arch.sie_block->icptcode);
528         local_irq_disable();
529         kvm_guest_exit();
530         local_irq_enable();
531
532         memcpy(&vcpu->arch.guest_gprs[14], &vcpu->arch.sie_block->gg14, 16);
533         return rc;
534 }
535
536 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
537 {
538         int rc;
539         sigset_t sigsaved;
540
541 rerun_vcpu:
542         if (vcpu->sigset_active)
543                 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
544
545         atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
546
547         BUG_ON(vcpu->kvm->arch.float_int.local_int[vcpu->vcpu_id] == NULL);
548
549         switch (kvm_run->exit_reason) {
550         case KVM_EXIT_S390_SIEIC:
551         case KVM_EXIT_UNKNOWN:
552         case KVM_EXIT_INTR:
553         case KVM_EXIT_S390_RESET:
554         case KVM_EXIT_S390_UCONTROL:
555                 break;
556         default:
557                 BUG();
558         }
559
560         vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
561         vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
562
563         might_fault();
564
565         do {
566                 rc = __vcpu_run(vcpu);
567                 if (rc)
568                         break;
569                 rc = kvm_handle_sie_intercept(vcpu);
570         } while (!signal_pending(current) && !rc);
571
572         if (rc == SIE_INTERCEPT_RERUNVCPU)
573                 goto rerun_vcpu;
574
575         if (signal_pending(current) && !rc) {
576                 kvm_run->exit_reason = KVM_EXIT_INTR;
577                 rc = -EINTR;
578         }
579
580 #ifdef CONFIG_KVM_S390_UCONTROL
581         if (rc == SIE_INTERCEPT_UCONTROL) {
582                 kvm_run->exit_reason = KVM_EXIT_S390_UCONTROL;
583                 kvm_run->s390_ucontrol.trans_exc_code =
584                         current->thread.gmap_addr;
585                 kvm_run->s390_ucontrol.pgm_code = 0x10;
586                 rc = 0;
587         }
588 #endif
589
590         if (rc == -EOPNOTSUPP) {
591                 /* intercept cannot be handled in-kernel, prepare kvm-run */
592                 kvm_run->exit_reason         = KVM_EXIT_S390_SIEIC;
593                 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
594                 kvm_run->s390_sieic.ipa      = vcpu->arch.sie_block->ipa;
595                 kvm_run->s390_sieic.ipb      = vcpu->arch.sie_block->ipb;
596                 rc = 0;
597         }
598
599         if (rc == -EREMOTE) {
600                 /* intercept was handled, but userspace support is needed
601                  * kvm_run has been prepared by the handler */
602                 rc = 0;
603         }
604
605         kvm_run->psw_mask     = vcpu->arch.sie_block->gpsw.mask;
606         kvm_run->psw_addr     = vcpu->arch.sie_block->gpsw.addr;
607
608         if (vcpu->sigset_active)
609                 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
610
611         vcpu->stat.exit_userspace++;
612         return rc;
613 }
614
615 static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, void *from,
616                        unsigned long n, int prefix)
617 {
618         if (prefix)
619                 return copy_to_guest(vcpu, guestdest, from, n);
620         else
621                 return copy_to_guest_absolute(vcpu, guestdest, from, n);
622 }
623
624 /*
625  * store status at address
626  * we use have two special cases:
627  * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
628  * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
629  */
630 int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
631 {
632         unsigned char archmode = 1;
633         int prefix;
634
635         if (addr == KVM_S390_STORE_STATUS_NOADDR) {
636                 if (copy_to_guest_absolute(vcpu, 163ul, &archmode, 1))
637                         return -EFAULT;
638                 addr = SAVE_AREA_BASE;
639                 prefix = 0;
640         } else if (addr == KVM_S390_STORE_STATUS_PREFIXED) {
641                 if (copy_to_guest(vcpu, 163ul, &archmode, 1))
642                         return -EFAULT;
643                 addr = SAVE_AREA_BASE;
644                 prefix = 1;
645         } else
646                 prefix = 0;
647
648         if (__guestcopy(vcpu, addr + offsetof(struct save_area, fp_regs),
649                         vcpu->arch.guest_fpregs.fprs, 128, prefix))
650                 return -EFAULT;
651
652         if (__guestcopy(vcpu, addr + offsetof(struct save_area, gp_regs),
653                         vcpu->arch.guest_gprs, 128, prefix))
654                 return -EFAULT;
655
656         if (__guestcopy(vcpu, addr + offsetof(struct save_area, psw),
657                         &vcpu->arch.sie_block->gpsw, 16, prefix))
658                 return -EFAULT;
659
660         if (__guestcopy(vcpu, addr + offsetof(struct save_area, pref_reg),
661                         &vcpu->arch.sie_block->prefix, 4, prefix))
662                 return -EFAULT;
663
664         if (__guestcopy(vcpu,
665                         addr + offsetof(struct save_area, fp_ctrl_reg),
666                         &vcpu->arch.guest_fpregs.fpc, 4, prefix))
667                 return -EFAULT;
668
669         if (__guestcopy(vcpu, addr + offsetof(struct save_area, tod_reg),
670                         &vcpu->arch.sie_block->todpr, 4, prefix))
671                 return -EFAULT;
672
673         if (__guestcopy(vcpu, addr + offsetof(struct save_area, timer),
674                         &vcpu->arch.sie_block->cputm, 8, prefix))
675                 return -EFAULT;
676
677         if (__guestcopy(vcpu, addr + offsetof(struct save_area, clk_cmp),
678                         &vcpu->arch.sie_block->ckc, 8, prefix))
679                 return -EFAULT;
680
681         if (__guestcopy(vcpu, addr + offsetof(struct save_area, acc_regs),
682                         &vcpu->arch.guest_acrs, 64, prefix))
683                 return -EFAULT;
684
685         if (__guestcopy(vcpu,
686                         addr + offsetof(struct save_area, ctrl_regs),
687                         &vcpu->arch.sie_block->gcr, 128, prefix))
688                 return -EFAULT;
689         return 0;
690 }
691
692 long kvm_arch_vcpu_ioctl(struct file *filp,
693                          unsigned int ioctl, unsigned long arg)
694 {
695         struct kvm_vcpu *vcpu = filp->private_data;
696         void __user *argp = (void __user *)arg;
697         long r;
698
699         switch (ioctl) {
700         case KVM_S390_INTERRUPT: {
701                 struct kvm_s390_interrupt s390int;
702
703                 r = -EFAULT;
704                 if (copy_from_user(&s390int, argp, sizeof(s390int)))
705                         break;
706                 r = kvm_s390_inject_vcpu(vcpu, &s390int);
707                 break;
708         }
709         case KVM_S390_STORE_STATUS:
710                 r = kvm_s390_vcpu_store_status(vcpu, arg);
711                 break;
712         case KVM_S390_SET_INITIAL_PSW: {
713                 psw_t psw;
714
715                 r = -EFAULT;
716                 if (copy_from_user(&psw, argp, sizeof(psw)))
717                         break;
718                 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
719                 break;
720         }
721         case KVM_S390_INITIAL_RESET:
722                 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
723                 break;
724 #ifdef CONFIG_KVM_S390_UCONTROL
725         case KVM_S390_UCAS_MAP: {
726                 struct kvm_s390_ucas_mapping ucasmap;
727
728                 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
729                         r = -EFAULT;
730                         break;
731                 }
732
733                 if (!kvm_is_ucontrol(vcpu->kvm)) {
734                         r = -EINVAL;
735                         break;
736                 }
737
738                 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
739                                      ucasmap.vcpu_addr, ucasmap.length);
740                 break;
741         }
742         case KVM_S390_UCAS_UNMAP: {
743                 struct kvm_s390_ucas_mapping ucasmap;
744
745                 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
746                         r = -EFAULT;
747                         break;
748                 }
749
750                 if (!kvm_is_ucontrol(vcpu->kvm)) {
751                         r = -EINVAL;
752                         break;
753                 }
754
755                 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
756                         ucasmap.length);
757                 break;
758         }
759 #endif
760         default:
761                 r = -EINVAL;
762         }
763         return r;
764 }
765
766 int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
767 {
768 #ifdef CONFIG_KVM_S390_UCONTROL
769         if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
770                  && (kvm_is_ucontrol(vcpu->kvm))) {
771                 vmf->page = virt_to_page(vcpu->arch.sie_block);
772                 get_page(vmf->page);
773                 return 0;
774         }
775 #endif
776         return VM_FAULT_SIGBUS;
777 }
778
779 /* Section: memory related */
780 int kvm_arch_prepare_memory_region(struct kvm *kvm,
781                                    struct kvm_memory_slot *memslot,
782                                    struct kvm_memory_slot old,
783                                    struct kvm_userspace_memory_region *mem,
784                                    int user_alloc)
785 {
786         /* A few sanity checks. We can have exactly one memory slot which has
787            to start at guest virtual zero and which has to be located at a
788            page boundary in userland and which has to end at a page boundary.
789            The memory in userland is ok to be fragmented into various different
790            vmas. It is okay to mmap() and munmap() stuff in this slot after
791            doing this call at any time */
792
793         if (mem->slot)
794                 return -EINVAL;
795
796         if (mem->guest_phys_addr)
797                 return -EINVAL;
798
799         if (mem->userspace_addr & 0xffffful)
800                 return -EINVAL;
801
802         if (mem->memory_size & 0xffffful)
803                 return -EINVAL;
804
805         if (!user_alloc)
806                 return -EINVAL;
807
808         return 0;
809 }
810
811 void kvm_arch_commit_memory_region(struct kvm *kvm,
812                                 struct kvm_userspace_memory_region *mem,
813                                 struct kvm_memory_slot old,
814                                 int user_alloc)
815 {
816         int rc;
817
818
819         rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
820                 mem->guest_phys_addr, mem->memory_size);
821         if (rc)
822                 printk(KERN_WARNING "kvm-s390: failed to commit memory region\n");
823         return;
824 }
825
826 void kvm_arch_flush_shadow(struct kvm *kvm)
827 {
828 }
829
830 static int __init kvm_s390_init(void)
831 {
832         int ret;
833         ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
834         if (ret)
835                 return ret;
836
837         /*
838          * guests can ask for up to 255+1 double words, we need a full page
839          * to hold the maximum amount of facilities. On the other hand, we
840          * only set facilities that are known to work in KVM.
841          */
842         facilities = (unsigned long long *) get_zeroed_page(GFP_KERNEL|GFP_DMA);
843         if (!facilities) {
844                 kvm_exit();
845                 return -ENOMEM;
846         }
847         memcpy(facilities, S390_lowcore.stfle_fac_list, 16);
848         facilities[0] &= 0xff00fff3f47c0000ULL;
849         facilities[1] &= 0x201c000000000000ULL;
850         return 0;
851 }
852
853 static void __exit kvm_s390_exit(void)
854 {
855         free_page((unsigned long) facilities);
856         kvm_exit();
857 }
858
859 module_init(kvm_s390_init);
860 module_exit(kvm_s390_exit);