KVM: Introduce kvm_memory_slot::arch and move lpage_info into it
[linux-3.10.git] / arch / s390 / kvm / kvm-s390.c
1 /*
2  * s390host.c --  hosting zSeries kernel virtual machines
3  *
4  * Copyright IBM Corp. 2008,2009
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License (version 2 only)
8  * as published by the Free Software Foundation.
9  *
10  *    Author(s): Carsten Otte <cotte@de.ibm.com>
11  *               Christian Borntraeger <borntraeger@de.ibm.com>
12  *               Heiko Carstens <heiko.carstens@de.ibm.com>
13  *               Christian Ehrhardt <ehrhardt@de.ibm.com>
14  */
15
16 #include <linux/compiler.h>
17 #include <linux/err.h>
18 #include <linux/fs.h>
19 #include <linux/hrtimer.h>
20 #include <linux/init.h>
21 #include <linux/kvm.h>
22 #include <linux/kvm_host.h>
23 #include <linux/module.h>
24 #include <linux/slab.h>
25 #include <linux/timer.h>
26 #include <asm/asm-offsets.h>
27 #include <asm/lowcore.h>
28 #include <asm/pgtable.h>
29 #include <asm/nmi.h>
30 #include <asm/system.h>
31 #include "kvm-s390.h"
32 #include "gaccess.h"
33
34 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
35
36 struct kvm_stats_debugfs_item debugfs_entries[] = {
37         { "userspace_handled", VCPU_STAT(exit_userspace) },
38         { "exit_null", VCPU_STAT(exit_null) },
39         { "exit_validity", VCPU_STAT(exit_validity) },
40         { "exit_stop_request", VCPU_STAT(exit_stop_request) },
41         { "exit_external_request", VCPU_STAT(exit_external_request) },
42         { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
43         { "exit_instruction", VCPU_STAT(exit_instruction) },
44         { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
45         { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
46         { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
47         { "instruction_lctl", VCPU_STAT(instruction_lctl) },
48         { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
49         { "deliver_external_call", VCPU_STAT(deliver_external_call) },
50         { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
51         { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
52         { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
53         { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
54         { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
55         { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
56         { "exit_wait_state", VCPU_STAT(exit_wait_state) },
57         { "instruction_stidp", VCPU_STAT(instruction_stidp) },
58         { "instruction_spx", VCPU_STAT(instruction_spx) },
59         { "instruction_stpx", VCPU_STAT(instruction_stpx) },
60         { "instruction_stap", VCPU_STAT(instruction_stap) },
61         { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
62         { "instruction_stsch", VCPU_STAT(instruction_stsch) },
63         { "instruction_chsc", VCPU_STAT(instruction_chsc) },
64         { "instruction_stsi", VCPU_STAT(instruction_stsi) },
65         { "instruction_stfl", VCPU_STAT(instruction_stfl) },
66         { "instruction_tprot", VCPU_STAT(instruction_tprot) },
67         { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
68         { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
69         { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
70         { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
71         { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
72         { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
73         { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
74         { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
75         { "diagnose_10", VCPU_STAT(diagnose_10) },
76         { "diagnose_44", VCPU_STAT(diagnose_44) },
77         { NULL }
78 };
79
80 static unsigned long long *facilities;
81
82 /* Section: not file related */
83 int kvm_arch_hardware_enable(void *garbage)
84 {
85         /* every s390 is virtualization enabled ;-) */
86         return 0;
87 }
88
89 void kvm_arch_hardware_disable(void *garbage)
90 {
91 }
92
93 int kvm_arch_hardware_setup(void)
94 {
95         return 0;
96 }
97
98 void kvm_arch_hardware_unsetup(void)
99 {
100 }
101
102 void kvm_arch_check_processor_compat(void *rtn)
103 {
104 }
105
106 int kvm_arch_init(void *opaque)
107 {
108         return 0;
109 }
110
111 void kvm_arch_exit(void)
112 {
113 }
114
115 /* Section: device related */
116 long kvm_arch_dev_ioctl(struct file *filp,
117                         unsigned int ioctl, unsigned long arg)
118 {
119         if (ioctl == KVM_S390_ENABLE_SIE)
120                 return s390_enable_sie();
121         return -EINVAL;
122 }
123
124 int kvm_dev_ioctl_check_extension(long ext)
125 {
126         int r;
127
128         switch (ext) {
129         case KVM_CAP_S390_PSW:
130         case KVM_CAP_S390_GMAP:
131         case KVM_CAP_SYNC_MMU:
132 #ifdef CONFIG_KVM_S390_UCONTROL
133         case KVM_CAP_S390_UCONTROL:
134 #endif
135         case KVM_CAP_SYNC_REGS:
136                 r = 1;
137                 break;
138         default:
139                 r = 0;
140         }
141         return r;
142 }
143
144 /* Section: vm related */
145 /*
146  * Get (and clear) the dirty memory log for a memory slot.
147  */
148 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
149                                struct kvm_dirty_log *log)
150 {
151         return 0;
152 }
153
154 long kvm_arch_vm_ioctl(struct file *filp,
155                        unsigned int ioctl, unsigned long arg)
156 {
157         struct kvm *kvm = filp->private_data;
158         void __user *argp = (void __user *)arg;
159         int r;
160
161         switch (ioctl) {
162         case KVM_S390_INTERRUPT: {
163                 struct kvm_s390_interrupt s390int;
164
165                 r = -EFAULT;
166                 if (copy_from_user(&s390int, argp, sizeof(s390int)))
167                         break;
168                 r = kvm_s390_inject_vm(kvm, &s390int);
169                 break;
170         }
171         default:
172                 r = -ENOTTY;
173         }
174
175         return r;
176 }
177
178 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
179 {
180         int rc;
181         char debug_name[16];
182
183         rc = -EINVAL;
184 #ifdef CONFIG_KVM_S390_UCONTROL
185         if (type & ~KVM_VM_S390_UCONTROL)
186                 goto out_err;
187         if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
188                 goto out_err;
189 #else
190         if (type)
191                 goto out_err;
192 #endif
193
194         rc = s390_enable_sie();
195         if (rc)
196                 goto out_err;
197
198         rc = -ENOMEM;
199
200         kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
201         if (!kvm->arch.sca)
202                 goto out_err;
203
204         sprintf(debug_name, "kvm-%u", current->pid);
205
206         kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
207         if (!kvm->arch.dbf)
208                 goto out_nodbf;
209
210         spin_lock_init(&kvm->arch.float_int.lock);
211         INIT_LIST_HEAD(&kvm->arch.float_int.list);
212
213         debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
214         VM_EVENT(kvm, 3, "%s", "vm created");
215
216         if (type & KVM_VM_S390_UCONTROL) {
217                 kvm->arch.gmap = NULL;
218         } else {
219                 kvm->arch.gmap = gmap_alloc(current->mm);
220                 if (!kvm->arch.gmap)
221                         goto out_nogmap;
222         }
223         return 0;
224 out_nogmap:
225         debug_unregister(kvm->arch.dbf);
226 out_nodbf:
227         free_page((unsigned long)(kvm->arch.sca));
228 out_err:
229         return rc;
230 }
231
232 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
233 {
234         VCPU_EVENT(vcpu, 3, "%s", "free cpu");
235         if (!kvm_is_ucontrol(vcpu->kvm)) {
236                 clear_bit(63 - vcpu->vcpu_id,
237                           (unsigned long *) &vcpu->kvm->arch.sca->mcn);
238                 if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
239                     (__u64) vcpu->arch.sie_block)
240                         vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
241         }
242         smp_mb();
243
244         if (kvm_is_ucontrol(vcpu->kvm))
245                 gmap_free(vcpu->arch.gmap);
246
247         free_page((unsigned long)(vcpu->arch.sie_block));
248         kvm_vcpu_uninit(vcpu);
249         kfree(vcpu);
250 }
251
252 static void kvm_free_vcpus(struct kvm *kvm)
253 {
254         unsigned int i;
255         struct kvm_vcpu *vcpu;
256
257         kvm_for_each_vcpu(i, vcpu, kvm)
258                 kvm_arch_vcpu_destroy(vcpu);
259
260         mutex_lock(&kvm->lock);
261         for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
262                 kvm->vcpus[i] = NULL;
263
264         atomic_set(&kvm->online_vcpus, 0);
265         mutex_unlock(&kvm->lock);
266 }
267
268 void kvm_arch_sync_events(struct kvm *kvm)
269 {
270 }
271
272 void kvm_arch_destroy_vm(struct kvm *kvm)
273 {
274         kvm_free_vcpus(kvm);
275         free_page((unsigned long)(kvm->arch.sca));
276         debug_unregister(kvm->arch.dbf);
277         if (!kvm_is_ucontrol(kvm))
278                 gmap_free(kvm->arch.gmap);
279 }
280
281 /* Section: vcpu related */
282 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
283 {
284         if (kvm_is_ucontrol(vcpu->kvm)) {
285                 vcpu->arch.gmap = gmap_alloc(current->mm);
286                 if (!vcpu->arch.gmap)
287                         return -ENOMEM;
288                 return 0;
289         }
290
291         vcpu->arch.gmap = vcpu->kvm->arch.gmap;
292         vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
293                                     KVM_SYNC_GPRS |
294                                     KVM_SYNC_ACRS |
295                                     KVM_SYNC_CRS;
296         return 0;
297 }
298
299 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
300 {
301         /* Nothing todo */
302 }
303
304 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
305 {
306         save_fp_regs(&vcpu->arch.host_fpregs);
307         save_access_regs(vcpu->arch.host_acrs);
308         vcpu->arch.guest_fpregs.fpc &= FPC_VALID_MASK;
309         restore_fp_regs(&vcpu->arch.guest_fpregs);
310         restore_access_regs(vcpu->run->s.regs.acrs);
311         gmap_enable(vcpu->arch.gmap);
312         atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
313 }
314
315 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
316 {
317         atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
318         gmap_disable(vcpu->arch.gmap);
319         save_fp_regs(&vcpu->arch.guest_fpregs);
320         save_access_regs(vcpu->run->s.regs.acrs);
321         restore_fp_regs(&vcpu->arch.host_fpregs);
322         restore_access_regs(vcpu->arch.host_acrs);
323 }
324
325 static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
326 {
327         /* this equals initial cpu reset in pop, but we don't switch to ESA */
328         vcpu->arch.sie_block->gpsw.mask = 0UL;
329         vcpu->arch.sie_block->gpsw.addr = 0UL;
330         kvm_s390_set_prefix(vcpu, 0);
331         vcpu->arch.sie_block->cputm     = 0UL;
332         vcpu->arch.sie_block->ckc       = 0UL;
333         vcpu->arch.sie_block->todpr     = 0;
334         memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
335         vcpu->arch.sie_block->gcr[0]  = 0xE0UL;
336         vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
337         vcpu->arch.guest_fpregs.fpc = 0;
338         asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
339         vcpu->arch.sie_block->gbea = 1;
340 }
341
342 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
343 {
344         atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
345                                                     CPUSTAT_SM |
346                                                     CPUSTAT_STOPPED);
347         vcpu->arch.sie_block->ecb   = 6;
348         vcpu->arch.sie_block->eca   = 0xC1002001U;
349         vcpu->arch.sie_block->fac   = (int) (long) facilities;
350         hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
351         tasklet_init(&vcpu->arch.tasklet, kvm_s390_tasklet,
352                      (unsigned long) vcpu);
353         vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
354         get_cpu_id(&vcpu->arch.cpu_id);
355         vcpu->arch.cpu_id.version = 0xff;
356         return 0;
357 }
358
359 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
360                                       unsigned int id)
361 {
362         struct kvm_vcpu *vcpu;
363         int rc = -EINVAL;
364
365         if (id >= KVM_MAX_VCPUS)
366                 goto out;
367
368         rc = -ENOMEM;
369
370         vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
371         if (!vcpu)
372                 goto out;
373
374         vcpu->arch.sie_block = (struct kvm_s390_sie_block *)
375                                         get_zeroed_page(GFP_KERNEL);
376
377         if (!vcpu->arch.sie_block)
378                 goto out_free_cpu;
379
380         vcpu->arch.sie_block->icpua = id;
381         if (!kvm_is_ucontrol(kvm)) {
382                 if (!kvm->arch.sca) {
383                         WARN_ON_ONCE(1);
384                         goto out_free_cpu;
385                 }
386                 if (!kvm->arch.sca->cpu[id].sda)
387                         kvm->arch.sca->cpu[id].sda =
388                                 (__u64) vcpu->arch.sie_block;
389                 vcpu->arch.sie_block->scaoh =
390                         (__u32)(((__u64)kvm->arch.sca) >> 32);
391                 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
392                 set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
393         }
394
395         spin_lock_init(&vcpu->arch.local_int.lock);
396         INIT_LIST_HEAD(&vcpu->arch.local_int.list);
397         vcpu->arch.local_int.float_int = &kvm->arch.float_int;
398         spin_lock(&kvm->arch.float_int.lock);
399         kvm->arch.float_int.local_int[id] = &vcpu->arch.local_int;
400         init_waitqueue_head(&vcpu->arch.local_int.wq);
401         vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
402         spin_unlock(&kvm->arch.float_int.lock);
403
404         rc = kvm_vcpu_init(vcpu, kvm, id);
405         if (rc)
406                 goto out_free_sie_block;
407         VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
408                  vcpu->arch.sie_block);
409
410         return vcpu;
411 out_free_sie_block:
412         free_page((unsigned long)(vcpu->arch.sie_block));
413 out_free_cpu:
414         kfree(vcpu);
415 out:
416         return ERR_PTR(rc);
417 }
418
419 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
420 {
421         /* kvm common code refers to this, but never calls it */
422         BUG();
423         return 0;
424 }
425
426 static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
427 {
428         kvm_s390_vcpu_initial_reset(vcpu);
429         return 0;
430 }
431
432 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
433 {
434         memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
435         return 0;
436 }
437
438 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
439 {
440         memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
441         return 0;
442 }
443
444 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
445                                   struct kvm_sregs *sregs)
446 {
447         memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
448         memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
449         restore_access_regs(vcpu->run->s.regs.acrs);
450         return 0;
451 }
452
453 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
454                                   struct kvm_sregs *sregs)
455 {
456         memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
457         memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
458         return 0;
459 }
460
461 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
462 {
463         memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
464         vcpu->arch.guest_fpregs.fpc = fpu->fpc & FPC_VALID_MASK;
465         restore_fp_regs(&vcpu->arch.guest_fpregs);
466         return 0;
467 }
468
469 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
470 {
471         memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
472         fpu->fpc = vcpu->arch.guest_fpregs.fpc;
473         return 0;
474 }
475
476 static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
477 {
478         int rc = 0;
479
480         if (!(atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_STOPPED))
481                 rc = -EBUSY;
482         else {
483                 vcpu->run->psw_mask = psw.mask;
484                 vcpu->run->psw_addr = psw.addr;
485         }
486         return rc;
487 }
488
489 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
490                                   struct kvm_translation *tr)
491 {
492         return -EINVAL; /* not implemented yet */
493 }
494
495 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
496                                         struct kvm_guest_debug *dbg)
497 {
498         return -EINVAL; /* not implemented yet */
499 }
500
501 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
502                                     struct kvm_mp_state *mp_state)
503 {
504         return -EINVAL; /* not implemented yet */
505 }
506
507 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
508                                     struct kvm_mp_state *mp_state)
509 {
510         return -EINVAL; /* not implemented yet */
511 }
512
513 static int __vcpu_run(struct kvm_vcpu *vcpu)
514 {
515         int rc;
516
517         memcpy(&vcpu->arch.sie_block->gg14, &vcpu->run->s.regs.gprs[14], 16);
518
519         if (need_resched())
520                 schedule();
521
522         if (test_thread_flag(TIF_MCCK_PENDING))
523                 s390_handle_mcck();
524
525         if (!kvm_is_ucontrol(vcpu->kvm))
526                 kvm_s390_deliver_pending_interrupts(vcpu);
527
528         vcpu->arch.sie_block->icptcode = 0;
529         local_irq_disable();
530         kvm_guest_enter();
531         local_irq_enable();
532         VCPU_EVENT(vcpu, 6, "entering sie flags %x",
533                    atomic_read(&vcpu->arch.sie_block->cpuflags));
534         rc = sie64a(vcpu->arch.sie_block, vcpu->run->s.regs.gprs);
535         if (rc) {
536                 if (kvm_is_ucontrol(vcpu->kvm)) {
537                         rc = SIE_INTERCEPT_UCONTROL;
538                 } else {
539                         VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
540                         kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
541                         rc = 0;
542                 }
543         }
544         VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
545                    vcpu->arch.sie_block->icptcode);
546         local_irq_disable();
547         kvm_guest_exit();
548         local_irq_enable();
549
550         memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16);
551         return rc;
552 }
553
554 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
555 {
556         int rc;
557         sigset_t sigsaved;
558
559 rerun_vcpu:
560         if (vcpu->sigset_active)
561                 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
562
563         atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
564
565         BUG_ON(vcpu->kvm->arch.float_int.local_int[vcpu->vcpu_id] == NULL);
566
567         switch (kvm_run->exit_reason) {
568         case KVM_EXIT_S390_SIEIC:
569         case KVM_EXIT_UNKNOWN:
570         case KVM_EXIT_INTR:
571         case KVM_EXIT_S390_RESET:
572         case KVM_EXIT_S390_UCONTROL:
573                 break;
574         default:
575                 BUG();
576         }
577
578         vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
579         vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
580         if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX) {
581                 kvm_run->kvm_dirty_regs &= ~KVM_SYNC_PREFIX;
582                 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
583         }
584         if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
585                 kvm_run->kvm_dirty_regs &= ~KVM_SYNC_CRS;
586                 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
587                 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
588         }
589
590         might_fault();
591
592         do {
593                 rc = __vcpu_run(vcpu);
594                 if (rc)
595                         break;
596                 if (kvm_is_ucontrol(vcpu->kvm))
597                         rc = -EOPNOTSUPP;
598                 else
599                         rc = kvm_handle_sie_intercept(vcpu);
600         } while (!signal_pending(current) && !rc);
601
602         if (rc == SIE_INTERCEPT_RERUNVCPU)
603                 goto rerun_vcpu;
604
605         if (signal_pending(current) && !rc) {
606                 kvm_run->exit_reason = KVM_EXIT_INTR;
607                 rc = -EINTR;
608         }
609
610 #ifdef CONFIG_KVM_S390_UCONTROL
611         if (rc == SIE_INTERCEPT_UCONTROL) {
612                 kvm_run->exit_reason = KVM_EXIT_S390_UCONTROL;
613                 kvm_run->s390_ucontrol.trans_exc_code =
614                         current->thread.gmap_addr;
615                 kvm_run->s390_ucontrol.pgm_code = 0x10;
616                 rc = 0;
617         }
618 #endif
619
620         if (rc == -EOPNOTSUPP) {
621                 /* intercept cannot be handled in-kernel, prepare kvm-run */
622                 kvm_run->exit_reason         = KVM_EXIT_S390_SIEIC;
623                 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
624                 kvm_run->s390_sieic.ipa      = vcpu->arch.sie_block->ipa;
625                 kvm_run->s390_sieic.ipb      = vcpu->arch.sie_block->ipb;
626                 rc = 0;
627         }
628
629         if (rc == -EREMOTE) {
630                 /* intercept was handled, but userspace support is needed
631                  * kvm_run has been prepared by the handler */
632                 rc = 0;
633         }
634
635         kvm_run->psw_mask     = vcpu->arch.sie_block->gpsw.mask;
636         kvm_run->psw_addr     = vcpu->arch.sie_block->gpsw.addr;
637         kvm_run->s.regs.prefix = vcpu->arch.sie_block->prefix;
638         memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
639
640         if (vcpu->sigset_active)
641                 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
642
643         vcpu->stat.exit_userspace++;
644         return rc;
645 }
646
647 static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, void *from,
648                        unsigned long n, int prefix)
649 {
650         if (prefix)
651                 return copy_to_guest(vcpu, guestdest, from, n);
652         else
653                 return copy_to_guest_absolute(vcpu, guestdest, from, n);
654 }
655
656 /*
657  * store status at address
658  * we use have two special cases:
659  * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
660  * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
661  */
662 int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
663 {
664         unsigned char archmode = 1;
665         int prefix;
666
667         if (addr == KVM_S390_STORE_STATUS_NOADDR) {
668                 if (copy_to_guest_absolute(vcpu, 163ul, &archmode, 1))
669                         return -EFAULT;
670                 addr = SAVE_AREA_BASE;
671                 prefix = 0;
672         } else if (addr == KVM_S390_STORE_STATUS_PREFIXED) {
673                 if (copy_to_guest(vcpu, 163ul, &archmode, 1))
674                         return -EFAULT;
675                 addr = SAVE_AREA_BASE;
676                 prefix = 1;
677         } else
678                 prefix = 0;
679
680         if (__guestcopy(vcpu, addr + offsetof(struct save_area, fp_regs),
681                         vcpu->arch.guest_fpregs.fprs, 128, prefix))
682                 return -EFAULT;
683
684         if (__guestcopy(vcpu, addr + offsetof(struct save_area, gp_regs),
685                         vcpu->run->s.regs.gprs, 128, prefix))
686                 return -EFAULT;
687
688         if (__guestcopy(vcpu, addr + offsetof(struct save_area, psw),
689                         &vcpu->arch.sie_block->gpsw, 16, prefix))
690                 return -EFAULT;
691
692         if (__guestcopy(vcpu, addr + offsetof(struct save_area, pref_reg),
693                         &vcpu->arch.sie_block->prefix, 4, prefix))
694                 return -EFAULT;
695
696         if (__guestcopy(vcpu,
697                         addr + offsetof(struct save_area, fp_ctrl_reg),
698                         &vcpu->arch.guest_fpregs.fpc, 4, prefix))
699                 return -EFAULT;
700
701         if (__guestcopy(vcpu, addr + offsetof(struct save_area, tod_reg),
702                         &vcpu->arch.sie_block->todpr, 4, prefix))
703                 return -EFAULT;
704
705         if (__guestcopy(vcpu, addr + offsetof(struct save_area, timer),
706                         &vcpu->arch.sie_block->cputm, 8, prefix))
707                 return -EFAULT;
708
709         if (__guestcopy(vcpu, addr + offsetof(struct save_area, clk_cmp),
710                         &vcpu->arch.sie_block->ckc, 8, prefix))
711                 return -EFAULT;
712
713         if (__guestcopy(vcpu, addr + offsetof(struct save_area, acc_regs),
714                         &vcpu->run->s.regs.acrs, 64, prefix))
715                 return -EFAULT;
716
717         if (__guestcopy(vcpu,
718                         addr + offsetof(struct save_area, ctrl_regs),
719                         &vcpu->arch.sie_block->gcr, 128, prefix))
720                 return -EFAULT;
721         return 0;
722 }
723
724 long kvm_arch_vcpu_ioctl(struct file *filp,
725                          unsigned int ioctl, unsigned long arg)
726 {
727         struct kvm_vcpu *vcpu = filp->private_data;
728         void __user *argp = (void __user *)arg;
729         long r;
730
731         switch (ioctl) {
732         case KVM_S390_INTERRUPT: {
733                 struct kvm_s390_interrupt s390int;
734
735                 r = -EFAULT;
736                 if (copy_from_user(&s390int, argp, sizeof(s390int)))
737                         break;
738                 r = kvm_s390_inject_vcpu(vcpu, &s390int);
739                 break;
740         }
741         case KVM_S390_STORE_STATUS:
742                 r = kvm_s390_vcpu_store_status(vcpu, arg);
743                 break;
744         case KVM_S390_SET_INITIAL_PSW: {
745                 psw_t psw;
746
747                 r = -EFAULT;
748                 if (copy_from_user(&psw, argp, sizeof(psw)))
749                         break;
750                 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
751                 break;
752         }
753         case KVM_S390_INITIAL_RESET:
754                 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
755                 break;
756 #ifdef CONFIG_KVM_S390_UCONTROL
757         case KVM_S390_UCAS_MAP: {
758                 struct kvm_s390_ucas_mapping ucasmap;
759
760                 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
761                         r = -EFAULT;
762                         break;
763                 }
764
765                 if (!kvm_is_ucontrol(vcpu->kvm)) {
766                         r = -EINVAL;
767                         break;
768                 }
769
770                 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
771                                      ucasmap.vcpu_addr, ucasmap.length);
772                 break;
773         }
774         case KVM_S390_UCAS_UNMAP: {
775                 struct kvm_s390_ucas_mapping ucasmap;
776
777                 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
778                         r = -EFAULT;
779                         break;
780                 }
781
782                 if (!kvm_is_ucontrol(vcpu->kvm)) {
783                         r = -EINVAL;
784                         break;
785                 }
786
787                 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
788                         ucasmap.length);
789                 break;
790         }
791 #endif
792         case KVM_S390_VCPU_FAULT: {
793                 r = gmap_fault(arg, vcpu->arch.gmap);
794                 if (!IS_ERR_VALUE(r))
795                         r = 0;
796                 break;
797         }
798         default:
799                 r = -ENOTTY;
800         }
801         return r;
802 }
803
804 int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
805 {
806 #ifdef CONFIG_KVM_S390_UCONTROL
807         if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
808                  && (kvm_is_ucontrol(vcpu->kvm))) {
809                 vmf->page = virt_to_page(vcpu->arch.sie_block);
810                 get_page(vmf->page);
811                 return 0;
812         }
813 #endif
814         return VM_FAULT_SIGBUS;
815 }
816
817 void kvm_arch_free_memslot(struct kvm_memory_slot *free,
818                            struct kvm_memory_slot *dont)
819 {
820 }
821
822 int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages)
823 {
824         return 0;
825 }
826
827 /* Section: memory related */
828 int kvm_arch_prepare_memory_region(struct kvm *kvm,
829                                    struct kvm_memory_slot *memslot,
830                                    struct kvm_memory_slot old,
831                                    struct kvm_userspace_memory_region *mem,
832                                    int user_alloc)
833 {
834         /* A few sanity checks. We can have exactly one memory slot which has
835            to start at guest virtual zero and which has to be located at a
836            page boundary in userland and which has to end at a page boundary.
837            The memory in userland is ok to be fragmented into various different
838            vmas. It is okay to mmap() and munmap() stuff in this slot after
839            doing this call at any time */
840
841         if (mem->slot)
842                 return -EINVAL;
843
844         if (mem->guest_phys_addr)
845                 return -EINVAL;
846
847         if (mem->userspace_addr & 0xffffful)
848                 return -EINVAL;
849
850         if (mem->memory_size & 0xffffful)
851                 return -EINVAL;
852
853         if (!user_alloc)
854                 return -EINVAL;
855
856         return 0;
857 }
858
859 void kvm_arch_commit_memory_region(struct kvm *kvm,
860                                 struct kvm_userspace_memory_region *mem,
861                                 struct kvm_memory_slot old,
862                                 int user_alloc)
863 {
864         int rc;
865
866
867         rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
868                 mem->guest_phys_addr, mem->memory_size);
869         if (rc)
870                 printk(KERN_WARNING "kvm-s390: failed to commit memory region\n");
871         return;
872 }
873
874 void kvm_arch_flush_shadow(struct kvm *kvm)
875 {
876 }
877
878 static int __init kvm_s390_init(void)
879 {
880         int ret;
881         ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
882         if (ret)
883                 return ret;
884
885         /*
886          * guests can ask for up to 255+1 double words, we need a full page
887          * to hold the maximum amount of facilities. On the other hand, we
888          * only set facilities that are known to work in KVM.
889          */
890         facilities = (unsigned long long *) get_zeroed_page(GFP_KERNEL|GFP_DMA);
891         if (!facilities) {
892                 kvm_exit();
893                 return -ENOMEM;
894         }
895         memcpy(facilities, S390_lowcore.stfle_fac_list, 16);
896         facilities[0] &= 0xff00fff3f47c0000ULL;
897         facilities[1] &= 0x201c000000000000ULL;
898         return 0;
899 }
900
901 static void __exit kvm_s390_exit(void)
902 {
903         free_page((unsigned long) facilities);
904         kvm_exit();
905 }
906
907 module_init(kvm_s390_init);
908 module_exit(kvm_s390_exit);