[S390] appldata/extmem/kvm: add missing GFP_KERNEL flag
[linux-2.6.git] / arch / s390 / kvm / kvm-s390.c
1 /*
2  * s390host.c --  hosting zSeries kernel virtual machines
3  *
4  * Copyright IBM Corp. 2008,2009
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License (version 2 only)
8  * as published by the Free Software Foundation.
9  *
10  *    Author(s): Carsten Otte <cotte@de.ibm.com>
11  *               Christian Borntraeger <borntraeger@de.ibm.com>
12  *               Heiko Carstens <heiko.carstens@de.ibm.com>
13  *               Christian Ehrhardt <ehrhardt@de.ibm.com>
14  */
15
16 #include <linux/compiler.h>
17 #include <linux/err.h>
18 #include <linux/fs.h>
19 #include <linux/hrtimer.h>
20 #include <linux/init.h>
21 #include <linux/kvm.h>
22 #include <linux/kvm_host.h>
23 #include <linux/module.h>
24 #include <linux/slab.h>
25 #include <linux/timer.h>
26 #include <asm/asm-offsets.h>
27 #include <asm/lowcore.h>
28 #include <asm/pgtable.h>
29 #include <asm/nmi.h>
30 #include <asm/system.h>
31 #include "kvm-s390.h"
32 #include "gaccess.h"
33
34 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
35
36 struct kvm_stats_debugfs_item debugfs_entries[] = {
37         { "userspace_handled", VCPU_STAT(exit_userspace) },
38         { "exit_null", VCPU_STAT(exit_null) },
39         { "exit_validity", VCPU_STAT(exit_validity) },
40         { "exit_stop_request", VCPU_STAT(exit_stop_request) },
41         { "exit_external_request", VCPU_STAT(exit_external_request) },
42         { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
43         { "exit_instruction", VCPU_STAT(exit_instruction) },
44         { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
45         { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
46         { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
47         { "instruction_lctl", VCPU_STAT(instruction_lctl) },
48         { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
49         { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
50         { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
51         { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
52         { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
53         { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
54         { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
55         { "exit_wait_state", VCPU_STAT(exit_wait_state) },
56         { "instruction_stidp", VCPU_STAT(instruction_stidp) },
57         { "instruction_spx", VCPU_STAT(instruction_spx) },
58         { "instruction_stpx", VCPU_STAT(instruction_stpx) },
59         { "instruction_stap", VCPU_STAT(instruction_stap) },
60         { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
61         { "instruction_stsch", VCPU_STAT(instruction_stsch) },
62         { "instruction_chsc", VCPU_STAT(instruction_chsc) },
63         { "instruction_stsi", VCPU_STAT(instruction_stsi) },
64         { "instruction_stfl", VCPU_STAT(instruction_stfl) },
65         { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
66         { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
67         { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
68         { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
69         { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
70         { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
71         { "diagnose_44", VCPU_STAT(diagnose_44) },
72         { NULL }
73 };
74
75 static unsigned long long *facilities;
76
77 /* Section: not file related */
78 int kvm_arch_hardware_enable(void *garbage)
79 {
80         /* every s390 is virtualization enabled ;-) */
81         return 0;
82 }
83
84 void kvm_arch_hardware_disable(void *garbage)
85 {
86 }
87
88 int kvm_arch_hardware_setup(void)
89 {
90         return 0;
91 }
92
93 void kvm_arch_hardware_unsetup(void)
94 {
95 }
96
97 void kvm_arch_check_processor_compat(void *rtn)
98 {
99 }
100
101 int kvm_arch_init(void *opaque)
102 {
103         return 0;
104 }
105
106 void kvm_arch_exit(void)
107 {
108 }
109
110 /* Section: device related */
111 long kvm_arch_dev_ioctl(struct file *filp,
112                         unsigned int ioctl, unsigned long arg)
113 {
114         if (ioctl == KVM_S390_ENABLE_SIE)
115                 return s390_enable_sie();
116         return -EINVAL;
117 }
118
119 int kvm_dev_ioctl_check_extension(long ext)
120 {
121         int r;
122
123         switch (ext) {
124         case KVM_CAP_S390_PSW:
125                 r = 1;
126                 break;
127         default:
128                 r = 0;
129         }
130         return r;
131 }
132
133 /* Section: vm related */
134 /*
135  * Get (and clear) the dirty memory log for a memory slot.
136  */
137 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
138                                struct kvm_dirty_log *log)
139 {
140         return 0;
141 }
142
143 long kvm_arch_vm_ioctl(struct file *filp,
144                        unsigned int ioctl, unsigned long arg)
145 {
146         struct kvm *kvm = filp->private_data;
147         void __user *argp = (void __user *)arg;
148         int r;
149
150         switch (ioctl) {
151         case KVM_S390_INTERRUPT: {
152                 struct kvm_s390_interrupt s390int;
153
154                 r = -EFAULT;
155                 if (copy_from_user(&s390int, argp, sizeof(s390int)))
156                         break;
157                 r = kvm_s390_inject_vm(kvm, &s390int);
158                 break;
159         }
160         default:
161                 r = -ENOTTY;
162         }
163
164         return r;
165 }
166
167 struct kvm *kvm_arch_create_vm(void)
168 {
169         struct kvm *kvm;
170         int rc;
171         char debug_name[16];
172
173         rc = s390_enable_sie();
174         if (rc)
175                 goto out_nokvm;
176
177         rc = -ENOMEM;
178         kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL);
179         if (!kvm)
180                 goto out_nokvm;
181
182         kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
183         if (!kvm->arch.sca)
184                 goto out_nosca;
185
186         sprintf(debug_name, "kvm-%u", current->pid);
187
188         kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
189         if (!kvm->arch.dbf)
190                 goto out_nodbf;
191
192         spin_lock_init(&kvm->arch.float_int.lock);
193         INIT_LIST_HEAD(&kvm->arch.float_int.list);
194
195         debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
196         VM_EVENT(kvm, 3, "%s", "vm created");
197
198         return kvm;
199 out_nodbf:
200         free_page((unsigned long)(kvm->arch.sca));
201 out_nosca:
202         kfree(kvm);
203 out_nokvm:
204         return ERR_PTR(rc);
205 }
206
207 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
208 {
209         VCPU_EVENT(vcpu, 3, "%s", "free cpu");
210         if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
211                 (__u64) vcpu->arch.sie_block)
212                 vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
213         smp_mb();
214         free_page((unsigned long)(vcpu->arch.sie_block));
215         kvm_vcpu_uninit(vcpu);
216         kfree(vcpu);
217 }
218
219 static void kvm_free_vcpus(struct kvm *kvm)
220 {
221         unsigned int i;
222         struct kvm_vcpu *vcpu;
223
224         kvm_for_each_vcpu(i, vcpu, kvm)
225                 kvm_arch_vcpu_destroy(vcpu);
226
227         mutex_lock(&kvm->lock);
228         for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
229                 kvm->vcpus[i] = NULL;
230
231         atomic_set(&kvm->online_vcpus, 0);
232         mutex_unlock(&kvm->lock);
233 }
234
235 void kvm_arch_sync_events(struct kvm *kvm)
236 {
237 }
238
239 void kvm_arch_destroy_vm(struct kvm *kvm)
240 {
241         kvm_free_vcpus(kvm);
242         kvm_free_physmem(kvm);
243         free_page((unsigned long)(kvm->arch.sca));
244         debug_unregister(kvm->arch.dbf);
245         cleanup_srcu_struct(&kvm->srcu);
246         kfree(kvm);
247 }
248
249 /* Section: vcpu related */
250 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
251 {
252         return 0;
253 }
254
255 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
256 {
257         /* Nothing todo */
258 }
259
260 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
261 {
262         save_fp_regs(&vcpu->arch.host_fpregs);
263         save_access_regs(vcpu->arch.host_acrs);
264         vcpu->arch.guest_fpregs.fpc &= FPC_VALID_MASK;
265         restore_fp_regs(&vcpu->arch.guest_fpregs);
266         restore_access_regs(vcpu->arch.guest_acrs);
267 }
268
269 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
270 {
271         save_fp_regs(&vcpu->arch.guest_fpregs);
272         save_access_regs(vcpu->arch.guest_acrs);
273         restore_fp_regs(&vcpu->arch.host_fpregs);
274         restore_access_regs(vcpu->arch.host_acrs);
275 }
276
277 static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
278 {
279         /* this equals initial cpu reset in pop, but we don't switch to ESA */
280         vcpu->arch.sie_block->gpsw.mask = 0UL;
281         vcpu->arch.sie_block->gpsw.addr = 0UL;
282         vcpu->arch.sie_block->prefix    = 0UL;
283         vcpu->arch.sie_block->ihcpu     = 0xffff;
284         vcpu->arch.sie_block->cputm     = 0UL;
285         vcpu->arch.sie_block->ckc       = 0UL;
286         vcpu->arch.sie_block->todpr     = 0;
287         memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
288         vcpu->arch.sie_block->gcr[0]  = 0xE0UL;
289         vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
290         vcpu->arch.guest_fpregs.fpc = 0;
291         asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
292         vcpu->arch.sie_block->gbea = 1;
293 }
294
295 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
296 {
297         atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH);
298         set_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests);
299         vcpu->arch.sie_block->ecb   = 2;
300         vcpu->arch.sie_block->eca   = 0xC1002001U;
301         vcpu->arch.sie_block->fac   = (int) (long) facilities;
302         hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
303         tasklet_init(&vcpu->arch.tasklet, kvm_s390_tasklet,
304                      (unsigned long) vcpu);
305         vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
306         get_cpu_id(&vcpu->arch.cpu_id);
307         vcpu->arch.cpu_id.version = 0xff;
308         return 0;
309 }
310
311 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
312                                       unsigned int id)
313 {
314         struct kvm_vcpu *vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
315         int rc = -ENOMEM;
316
317         if (!vcpu)
318                 goto out_nomem;
319
320         vcpu->arch.sie_block = (struct kvm_s390_sie_block *)
321                                         get_zeroed_page(GFP_KERNEL);
322
323         if (!vcpu->arch.sie_block)
324                 goto out_free_cpu;
325
326         vcpu->arch.sie_block->icpua = id;
327         BUG_ON(!kvm->arch.sca);
328         if (!kvm->arch.sca->cpu[id].sda)
329                 kvm->arch.sca->cpu[id].sda = (__u64) vcpu->arch.sie_block;
330         vcpu->arch.sie_block->scaoh = (__u32)(((__u64)kvm->arch.sca) >> 32);
331         vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
332
333         spin_lock_init(&vcpu->arch.local_int.lock);
334         INIT_LIST_HEAD(&vcpu->arch.local_int.list);
335         vcpu->arch.local_int.float_int = &kvm->arch.float_int;
336         spin_lock(&kvm->arch.float_int.lock);
337         kvm->arch.float_int.local_int[id] = &vcpu->arch.local_int;
338         init_waitqueue_head(&vcpu->arch.local_int.wq);
339         vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
340         spin_unlock(&kvm->arch.float_int.lock);
341
342         rc = kvm_vcpu_init(vcpu, kvm, id);
343         if (rc)
344                 goto out_free_sie_block;
345         VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
346                  vcpu->arch.sie_block);
347
348         return vcpu;
349 out_free_sie_block:
350         free_page((unsigned long)(vcpu->arch.sie_block));
351 out_free_cpu:
352         kfree(vcpu);
353 out_nomem:
354         return ERR_PTR(rc);
355 }
356
357 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
358 {
359         /* kvm common code refers to this, but never calls it */
360         BUG();
361         return 0;
362 }
363
364 static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
365 {
366         vcpu_load(vcpu);
367         kvm_s390_vcpu_initial_reset(vcpu);
368         vcpu_put(vcpu);
369         return 0;
370 }
371
372 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
373 {
374         vcpu_load(vcpu);
375         memcpy(&vcpu->arch.guest_gprs, &regs->gprs, sizeof(regs->gprs));
376         vcpu_put(vcpu);
377         return 0;
378 }
379
380 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
381 {
382         vcpu_load(vcpu);
383         memcpy(&regs->gprs, &vcpu->arch.guest_gprs, sizeof(regs->gprs));
384         vcpu_put(vcpu);
385         return 0;
386 }
387
388 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
389                                   struct kvm_sregs *sregs)
390 {
391         vcpu_load(vcpu);
392         memcpy(&vcpu->arch.guest_acrs, &sregs->acrs, sizeof(sregs->acrs));
393         memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
394         vcpu_put(vcpu);
395         return 0;
396 }
397
398 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
399                                   struct kvm_sregs *sregs)
400 {
401         vcpu_load(vcpu);
402         memcpy(&sregs->acrs, &vcpu->arch.guest_acrs, sizeof(sregs->acrs));
403         memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
404         vcpu_put(vcpu);
405         return 0;
406 }
407
408 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
409 {
410         vcpu_load(vcpu);
411         memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
412         vcpu->arch.guest_fpregs.fpc = fpu->fpc;
413         vcpu_put(vcpu);
414         return 0;
415 }
416
417 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
418 {
419         vcpu_load(vcpu);
420         memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
421         fpu->fpc = vcpu->arch.guest_fpregs.fpc;
422         vcpu_put(vcpu);
423         return 0;
424 }
425
426 static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
427 {
428         int rc = 0;
429
430         vcpu_load(vcpu);
431         if (atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_RUNNING)
432                 rc = -EBUSY;
433         else {
434                 vcpu->run->psw_mask = psw.mask;
435                 vcpu->run->psw_addr = psw.addr;
436         }
437         vcpu_put(vcpu);
438         return rc;
439 }
440
441 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
442                                   struct kvm_translation *tr)
443 {
444         return -EINVAL; /* not implemented yet */
445 }
446
447 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
448                                         struct kvm_guest_debug *dbg)
449 {
450         return -EINVAL; /* not implemented yet */
451 }
452
453 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
454                                     struct kvm_mp_state *mp_state)
455 {
456         return -EINVAL; /* not implemented yet */
457 }
458
459 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
460                                     struct kvm_mp_state *mp_state)
461 {
462         return -EINVAL; /* not implemented yet */
463 }
464
465 static void __vcpu_run(struct kvm_vcpu *vcpu)
466 {
467         memcpy(&vcpu->arch.sie_block->gg14, &vcpu->arch.guest_gprs[14], 16);
468
469         if (need_resched())
470                 schedule();
471
472         if (test_thread_flag(TIF_MCCK_PENDING))
473                 s390_handle_mcck();
474
475         kvm_s390_deliver_pending_interrupts(vcpu);
476
477         vcpu->arch.sie_block->icptcode = 0;
478         local_irq_disable();
479         kvm_guest_enter();
480         local_irq_enable();
481         VCPU_EVENT(vcpu, 6, "entering sie flags %x",
482                    atomic_read(&vcpu->arch.sie_block->cpuflags));
483         if (sie64a(vcpu->arch.sie_block, vcpu->arch.guest_gprs)) {
484                 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
485                 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
486         }
487         VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
488                    vcpu->arch.sie_block->icptcode);
489         local_irq_disable();
490         kvm_guest_exit();
491         local_irq_enable();
492
493         memcpy(&vcpu->arch.guest_gprs[14], &vcpu->arch.sie_block->gg14, 16);
494 }
495
496 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
497 {
498         int rc;
499         sigset_t sigsaved;
500
501         vcpu_load(vcpu);
502
503 rerun_vcpu:
504         if (vcpu->requests)
505                 if (test_and_clear_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests))
506                         kvm_s390_vcpu_set_mem(vcpu);
507
508         /* verify, that memory has been registered */
509         if (!vcpu->arch.sie_block->gmslm) {
510                 vcpu_put(vcpu);
511                 VCPU_EVENT(vcpu, 3, "%s", "no memory registered to run vcpu");
512                 return -EINVAL;
513         }
514
515         if (vcpu->sigset_active)
516                 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
517
518         atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
519
520         BUG_ON(vcpu->kvm->arch.float_int.local_int[vcpu->vcpu_id] == NULL);
521
522         switch (kvm_run->exit_reason) {
523         case KVM_EXIT_S390_SIEIC:
524         case KVM_EXIT_UNKNOWN:
525         case KVM_EXIT_INTR:
526         case KVM_EXIT_S390_RESET:
527                 break;
528         default:
529                 BUG();
530         }
531
532         vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
533         vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
534
535         might_fault();
536
537         do {
538                 __vcpu_run(vcpu);
539                 rc = kvm_handle_sie_intercept(vcpu);
540         } while (!signal_pending(current) && !rc);
541
542         if (rc == SIE_INTERCEPT_RERUNVCPU)
543                 goto rerun_vcpu;
544
545         if (signal_pending(current) && !rc) {
546                 kvm_run->exit_reason = KVM_EXIT_INTR;
547                 rc = -EINTR;
548         }
549
550         if (rc == -EOPNOTSUPP) {
551                 /* intercept cannot be handled in-kernel, prepare kvm-run */
552                 kvm_run->exit_reason         = KVM_EXIT_S390_SIEIC;
553                 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
554                 kvm_run->s390_sieic.ipa      = vcpu->arch.sie_block->ipa;
555                 kvm_run->s390_sieic.ipb      = vcpu->arch.sie_block->ipb;
556                 rc = 0;
557         }
558
559         if (rc == -EREMOTE) {
560                 /* intercept was handled, but userspace support is needed
561                  * kvm_run has been prepared by the handler */
562                 rc = 0;
563         }
564
565         kvm_run->psw_mask     = vcpu->arch.sie_block->gpsw.mask;
566         kvm_run->psw_addr     = vcpu->arch.sie_block->gpsw.addr;
567
568         if (vcpu->sigset_active)
569                 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
570
571         vcpu_put(vcpu);
572
573         vcpu->stat.exit_userspace++;
574         return rc;
575 }
576
577 static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, const void *from,
578                        unsigned long n, int prefix)
579 {
580         if (prefix)
581                 return copy_to_guest(vcpu, guestdest, from, n);
582         else
583                 return copy_to_guest_absolute(vcpu, guestdest, from, n);
584 }
585
586 /*
587  * store status at address
588  * we use have two special cases:
589  * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
590  * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
591  */
592 int __kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
593 {
594         const unsigned char archmode = 1;
595         int prefix;
596
597         if (addr == KVM_S390_STORE_STATUS_NOADDR) {
598                 if (copy_to_guest_absolute(vcpu, 163ul, &archmode, 1))
599                         return -EFAULT;
600                 addr = SAVE_AREA_BASE;
601                 prefix = 0;
602         } else if (addr == KVM_S390_STORE_STATUS_PREFIXED) {
603                 if (copy_to_guest(vcpu, 163ul, &archmode, 1))
604                         return -EFAULT;
605                 addr = SAVE_AREA_BASE;
606                 prefix = 1;
607         } else
608                 prefix = 0;
609
610         if (__guestcopy(vcpu, addr + offsetof(struct save_area, fp_regs),
611                         vcpu->arch.guest_fpregs.fprs, 128, prefix))
612                 return -EFAULT;
613
614         if (__guestcopy(vcpu, addr + offsetof(struct save_area, gp_regs),
615                         vcpu->arch.guest_gprs, 128, prefix))
616                 return -EFAULT;
617
618         if (__guestcopy(vcpu, addr + offsetof(struct save_area, psw),
619                         &vcpu->arch.sie_block->gpsw, 16, prefix))
620                 return -EFAULT;
621
622         if (__guestcopy(vcpu, addr + offsetof(struct save_area, pref_reg),
623                         &vcpu->arch.sie_block->prefix, 4, prefix))
624                 return -EFAULT;
625
626         if (__guestcopy(vcpu,
627                         addr + offsetof(struct save_area, fp_ctrl_reg),
628                         &vcpu->arch.guest_fpregs.fpc, 4, prefix))
629                 return -EFAULT;
630
631         if (__guestcopy(vcpu, addr + offsetof(struct save_area, tod_reg),
632                         &vcpu->arch.sie_block->todpr, 4, prefix))
633                 return -EFAULT;
634
635         if (__guestcopy(vcpu, addr + offsetof(struct save_area, timer),
636                         &vcpu->arch.sie_block->cputm, 8, prefix))
637                 return -EFAULT;
638
639         if (__guestcopy(vcpu, addr + offsetof(struct save_area, clk_cmp),
640                         &vcpu->arch.sie_block->ckc, 8, prefix))
641                 return -EFAULT;
642
643         if (__guestcopy(vcpu, addr + offsetof(struct save_area, acc_regs),
644                         &vcpu->arch.guest_acrs, 64, prefix))
645                 return -EFAULT;
646
647         if (__guestcopy(vcpu,
648                         addr + offsetof(struct save_area, ctrl_regs),
649                         &vcpu->arch.sie_block->gcr, 128, prefix))
650                 return -EFAULT;
651         return 0;
652 }
653
654 static int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
655 {
656         int rc;
657
658         vcpu_load(vcpu);
659         rc = __kvm_s390_vcpu_store_status(vcpu, addr);
660         vcpu_put(vcpu);
661         return rc;
662 }
663
664 long kvm_arch_vcpu_ioctl(struct file *filp,
665                          unsigned int ioctl, unsigned long arg)
666 {
667         struct kvm_vcpu *vcpu = filp->private_data;
668         void __user *argp = (void __user *)arg;
669
670         switch (ioctl) {
671         case KVM_S390_INTERRUPT: {
672                 struct kvm_s390_interrupt s390int;
673
674                 if (copy_from_user(&s390int, argp, sizeof(s390int)))
675                         return -EFAULT;
676                 return kvm_s390_inject_vcpu(vcpu, &s390int);
677         }
678         case KVM_S390_STORE_STATUS:
679                 return kvm_s390_vcpu_store_status(vcpu, arg);
680         case KVM_S390_SET_INITIAL_PSW: {
681                 psw_t psw;
682
683                 if (copy_from_user(&psw, argp, sizeof(psw)))
684                         return -EFAULT;
685                 return kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
686         }
687         case KVM_S390_INITIAL_RESET:
688                 return kvm_arch_vcpu_ioctl_initial_reset(vcpu);
689         default:
690                 ;
691         }
692         return -EINVAL;
693 }
694
695 /* Section: memory related */
696 int kvm_arch_prepare_memory_region(struct kvm *kvm,
697                                    struct kvm_memory_slot *memslot,
698                                    struct kvm_memory_slot old,
699                                    struct kvm_userspace_memory_region *mem,
700                                    int user_alloc)
701 {
702         /* A few sanity checks. We can have exactly one memory slot which has
703            to start at guest virtual zero and which has to be located at a
704            page boundary in userland and which has to end at a page boundary.
705            The memory in userland is ok to be fragmented into various different
706            vmas. It is okay to mmap() and munmap() stuff in this slot after
707            doing this call at any time */
708
709         if (mem->slot)
710                 return -EINVAL;
711
712         if (mem->guest_phys_addr)
713                 return -EINVAL;
714
715         if (mem->userspace_addr & (PAGE_SIZE - 1))
716                 return -EINVAL;
717
718         if (mem->memory_size & (PAGE_SIZE - 1))
719                 return -EINVAL;
720
721         if (!user_alloc)
722                 return -EINVAL;
723
724         return 0;
725 }
726
727 void kvm_arch_commit_memory_region(struct kvm *kvm,
728                                 struct kvm_userspace_memory_region *mem,
729                                 struct kvm_memory_slot old,
730                                 int user_alloc)
731 {
732         int i;
733         struct kvm_vcpu *vcpu;
734
735         /* request update of sie control block for all available vcpus */
736         kvm_for_each_vcpu(i, vcpu, kvm) {
737                 if (test_and_set_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests))
738                         continue;
739                 kvm_s390_inject_sigp_stop(vcpu, ACTION_RELOADVCPU_ON_STOP);
740         }
741 }
742
743 void kvm_arch_flush_shadow(struct kvm *kvm)
744 {
745 }
746
747 gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
748 {
749         return gfn;
750 }
751
752 static int __init kvm_s390_init(void)
753 {
754         int ret;
755         ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
756         if (ret)
757                 return ret;
758
759         /*
760          * guests can ask for up to 255+1 double words, we need a full page
761          * to hold the maximum amount of facilites. On the other hand, we
762          * only set facilities that are known to work in KVM.
763          */
764         facilities = (unsigned long long *) get_zeroed_page(GFP_KERNEL|GFP_DMA);
765         if (!facilities) {
766                 kvm_exit();
767                 return -ENOMEM;
768         }
769         stfle(facilities, 1);
770         facilities[0] &= 0xff00fff3f0700000ULL;
771         return 0;
772 }
773
774 static void __exit kvm_s390_exit(void)
775 {
776         free_page((unsigned long) facilities);
777         kvm_exit();
778 }
779
780 module_init(kvm_s390_init);
781 module_exit(kvm_s390_exit);