2 * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
3 * Copyright (C) 2009. SUSE Linux Products GmbH. All rights reserved.
6 * Paul Mackerras <paulus@au1.ibm.com>
7 * Alexander Graf <agraf@suse.de>
8 * Kevin Wolf <mail@kevin-wolf.de>
10 * Description: KVM functions specific to running on Book 3S
11 * processors in hypervisor mode (specifically POWER7 and later).
13 * This file is derived from arch/powerpc/kvm/book3s.c,
14 * by Alexander Graf <agraf@suse.de>.
16 * This program is free software; you can redistribute it and/or modify
17 * it under the terms of the GNU General Public License, version 2, as
18 * published by the Free Software Foundation.
21 #include <linux/kvm_host.h>
22 #include <linux/err.h>
23 #include <linux/slab.h>
24 #include <linux/preempt.h>
25 #include <linux/sched.h>
26 #include <linux/delay.h>
27 #include <linux/export.h>
29 #include <linux/anon_inodes.h>
30 #include <linux/cpumask.h>
31 #include <linux/spinlock.h>
32 #include <linux/page-flags.h>
35 #include <asm/cputable.h>
36 #include <asm/cacheflush.h>
37 #include <asm/tlbflush.h>
38 #include <asm/uaccess.h>
40 #include <asm/kvm_ppc.h>
41 #include <asm/kvm_book3s.h>
42 #include <asm/mmu_context.h>
43 #include <asm/lppaca.h>
44 #include <asm/processor.h>
45 #include <asm/cputhreads.h>
47 #include <asm/hvcall.h>
48 #include <linux/gfp.h>
49 #include <linux/vmalloc.h>
50 #include <linux/highmem.h>
51 #include <linux/hugetlb.h>
53 /* #define EXIT_DEBUG */
54 /* #define EXIT_DEBUG_SIMPLE */
55 /* #define EXIT_DEBUG_INT */
57 static void kvmppc_end_cede(struct kvm_vcpu *vcpu);
58 static int kvmppc_hv_setup_rma(struct kvm_vcpu *vcpu);
60 void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
62 local_paca->kvm_hstate.kvm_vcpu = vcpu;
63 local_paca->kvm_hstate.kvm_vcore = vcpu->arch.vcore;
66 void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
70 void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr)
72 vcpu->arch.shregs.msr = msr;
73 kvmppc_end_cede(vcpu);
76 void kvmppc_set_pvr(struct kvm_vcpu *vcpu, u32 pvr)
81 void kvmppc_dump_regs(struct kvm_vcpu *vcpu)
85 pr_err("vcpu %p (%d):\n", vcpu, vcpu->vcpu_id);
86 pr_err("pc = %.16lx msr = %.16llx trap = %x\n",
87 vcpu->arch.pc, vcpu->arch.shregs.msr, vcpu->arch.trap);
88 for (r = 0; r < 16; ++r)
89 pr_err("r%2d = %.16lx r%d = %.16lx\n",
90 r, kvmppc_get_gpr(vcpu, r),
91 r+16, kvmppc_get_gpr(vcpu, r+16));
92 pr_err("ctr = %.16lx lr = %.16lx\n",
93 vcpu->arch.ctr, vcpu->arch.lr);
94 pr_err("srr0 = %.16llx srr1 = %.16llx\n",
95 vcpu->arch.shregs.srr0, vcpu->arch.shregs.srr1);
96 pr_err("sprg0 = %.16llx sprg1 = %.16llx\n",
97 vcpu->arch.shregs.sprg0, vcpu->arch.shregs.sprg1);
98 pr_err("sprg2 = %.16llx sprg3 = %.16llx\n",
99 vcpu->arch.shregs.sprg2, vcpu->arch.shregs.sprg3);
100 pr_err("cr = %.8x xer = %.16lx dsisr = %.8x\n",
101 vcpu->arch.cr, vcpu->arch.xer, vcpu->arch.shregs.dsisr);
102 pr_err("dar = %.16llx\n", vcpu->arch.shregs.dar);
103 pr_err("fault dar = %.16lx dsisr = %.8x\n",
104 vcpu->arch.fault_dar, vcpu->arch.fault_dsisr);
105 pr_err("SLB (%d entries):\n", vcpu->arch.slb_max);
106 for (r = 0; r < vcpu->arch.slb_max; ++r)
107 pr_err(" ESID = %.16llx VSID = %.16llx\n",
108 vcpu->arch.slb[r].orige, vcpu->arch.slb[r].origv);
109 pr_err("lpcr = %.16lx sdr1 = %.16lx last_inst = %.8x\n",
110 vcpu->kvm->arch.lpcr, vcpu->kvm->arch.sdr1,
111 vcpu->arch.last_inst);
114 struct kvm_vcpu *kvmppc_find_vcpu(struct kvm *kvm, int id)
117 struct kvm_vcpu *v, *ret = NULL;
119 mutex_lock(&kvm->lock);
120 kvm_for_each_vcpu(r, v, kvm) {
121 if (v->vcpu_id == id) {
126 mutex_unlock(&kvm->lock);
130 static void init_vpa(struct kvm_vcpu *vcpu, struct lppaca *vpa)
132 vpa->shared_proc = 1;
133 vpa->yield_count = 1;
136 static unsigned long do_h_register_vpa(struct kvm_vcpu *vcpu,
138 unsigned long vcpuid, unsigned long vpa)
140 struct kvm *kvm = vcpu->kvm;
141 unsigned long len, nb;
143 struct kvm_vcpu *tvcpu;
144 int err = H_PARAMETER;
146 tvcpu = kvmppc_find_vcpu(kvm, vcpuid);
152 if (flags == 0 || flags == 4)
157 if (flags >= 2 && !tvcpu->arch.vpa)
159 /* registering new area; convert logical addr to real */
160 va = kvmppc_pin_guest_page(kvm, vpa, &nb);
164 len = *(unsigned short *)(va + 4);
166 len = *(unsigned int *)(va + 4);
170 case 1: /* register VPA */
174 kvmppc_unpin_guest_page(kvm, vcpu->arch.vpa);
175 tvcpu->arch.vpa = va;
178 case 2: /* register DTL */
183 kvmppc_unpin_guest_page(kvm, vcpu->arch.dtl);
184 tvcpu->arch.dtl = va;
185 tvcpu->arch.dtl_end = va + len;
187 case 3: /* register SLB shadow buffer */
190 if (tvcpu->arch.slb_shadow)
191 kvmppc_unpin_guest_page(kvm, vcpu->arch.slb_shadow);
192 tvcpu->arch.slb_shadow = va;
197 case 5: /* unregister VPA */
198 if (tvcpu->arch.slb_shadow || tvcpu->arch.dtl)
200 if (!tvcpu->arch.vpa)
202 kvmppc_unpin_guest_page(kvm, tvcpu->arch.vpa);
203 tvcpu->arch.vpa = NULL;
205 case 6: /* unregister DTL */
206 if (!tvcpu->arch.dtl)
208 kvmppc_unpin_guest_page(kvm, tvcpu->arch.dtl);
209 tvcpu->arch.dtl = NULL;
211 case 7: /* unregister SLB shadow buffer */
212 if (!tvcpu->arch.slb_shadow)
214 kvmppc_unpin_guest_page(kvm, tvcpu->arch.slb_shadow);
215 tvcpu->arch.slb_shadow = NULL;
222 kvmppc_unpin_guest_page(kvm, va);
226 int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
228 unsigned long req = kvmppc_get_gpr(vcpu, 3);
229 unsigned long target, ret = H_SUCCESS;
230 struct kvm_vcpu *tvcpu;
234 ret = kvmppc_virtmode_h_enter(vcpu, kvmppc_get_gpr(vcpu, 4),
235 kvmppc_get_gpr(vcpu, 5),
236 kvmppc_get_gpr(vcpu, 6),
237 kvmppc_get_gpr(vcpu, 7));
242 target = kvmppc_get_gpr(vcpu, 4);
243 tvcpu = kvmppc_find_vcpu(vcpu->kvm, target);
248 tvcpu->arch.prodded = 1;
250 if (vcpu->arch.ceded) {
251 if (waitqueue_active(&vcpu->wq)) {
252 wake_up_interruptible(&vcpu->wq);
253 vcpu->stat.halt_wakeup++;
260 ret = do_h_register_vpa(vcpu, kvmppc_get_gpr(vcpu, 4),
261 kvmppc_get_gpr(vcpu, 5),
262 kvmppc_get_gpr(vcpu, 6));
267 kvmppc_set_gpr(vcpu, 3, ret);
268 vcpu->arch.hcall_needed = 0;
272 static int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
273 struct task_struct *tsk)
277 vcpu->stat.sum_exits++;
279 run->exit_reason = KVM_EXIT_UNKNOWN;
280 run->ready_for_interrupt_injection = 1;
281 switch (vcpu->arch.trap) {
282 /* We're good on these - the host merely wanted to get our attention */
283 case BOOK3S_INTERRUPT_HV_DECREMENTER:
284 vcpu->stat.dec_exits++;
287 case BOOK3S_INTERRUPT_EXTERNAL:
288 vcpu->stat.ext_intr_exits++;
291 case BOOK3S_INTERRUPT_PERFMON:
294 case BOOK3S_INTERRUPT_PROGRAM:
298 * Normally program interrupts are delivered directly
299 * to the guest by the hardware, but we can get here
300 * as a result of a hypervisor emulation interrupt
301 * (e40) getting turned into a 700 by BML RTAS.
303 flags = vcpu->arch.shregs.msr & 0x1f0000ull;
304 kvmppc_core_queue_program(vcpu, flags);
308 case BOOK3S_INTERRUPT_SYSCALL:
310 /* hcall - punt to userspace */
313 if (vcpu->arch.shregs.msr & MSR_PR) {
314 /* sc 1 from userspace - reflect to guest syscall */
315 kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_SYSCALL);
319 run->papr_hcall.nr = kvmppc_get_gpr(vcpu, 3);
320 for (i = 0; i < 9; ++i)
321 run->papr_hcall.args[i] = kvmppc_get_gpr(vcpu, 4 + i);
322 run->exit_reason = KVM_EXIT_PAPR_HCALL;
323 vcpu->arch.hcall_needed = 1;
328 * We get these next two if the guest accesses a page which it thinks
329 * it has mapped but which is not actually present, either because
330 * it is for an emulated I/O device or because the corresonding
331 * host page has been paged out. Any other HDSI/HISI interrupts
332 * have been handled already.
334 case BOOK3S_INTERRUPT_H_DATA_STORAGE:
335 r = kvmppc_book3s_hv_page_fault(run, vcpu,
336 vcpu->arch.fault_dar, vcpu->arch.fault_dsisr);
338 case BOOK3S_INTERRUPT_H_INST_STORAGE:
339 r = kvmppc_book3s_hv_page_fault(run, vcpu,
340 kvmppc_get_pc(vcpu), 0);
343 * This occurs if the guest executes an illegal instruction.
344 * We just generate a program interrupt to the guest, since
345 * we don't emulate any guest instructions at this stage.
347 case BOOK3S_INTERRUPT_H_EMUL_ASSIST:
348 kvmppc_core_queue_program(vcpu, 0x80000);
352 kvmppc_dump_regs(vcpu);
353 printk(KERN_EMERG "trap=0x%x | pc=0x%lx | msr=0x%llx\n",
354 vcpu->arch.trap, kvmppc_get_pc(vcpu),
355 vcpu->arch.shregs.msr);
364 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
365 struct kvm_sregs *sregs)
369 sregs->pvr = vcpu->arch.pvr;
371 memset(sregs, 0, sizeof(struct kvm_sregs));
372 for (i = 0; i < vcpu->arch.slb_max; i++) {
373 sregs->u.s.ppc64.slb[i].slbe = vcpu->arch.slb[i].orige;
374 sregs->u.s.ppc64.slb[i].slbv = vcpu->arch.slb[i].origv;
380 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
381 struct kvm_sregs *sregs)
385 kvmppc_set_pvr(vcpu, sregs->pvr);
388 for (i = 0; i < vcpu->arch.slb_nr; i++) {
389 if (sregs->u.s.ppc64.slb[i].slbe & SLB_ESID_V) {
390 vcpu->arch.slb[j].orige = sregs->u.s.ppc64.slb[i].slbe;
391 vcpu->arch.slb[j].origv = sregs->u.s.ppc64.slb[i].slbv;
395 vcpu->arch.slb_max = j;
400 int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
405 case KVM_REG_PPC_HIOR:
406 r = put_user(0, (u64 __user *)reg->addr);
415 int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
420 case KVM_REG_PPC_HIOR:
423 /* Only allow this to be set to zero */
424 r = get_user(hior, (u64 __user *)reg->addr);
425 if (!r && (hior != 0))
436 int kvmppc_core_check_processor_compat(void)
438 if (cpu_has_feature(CPU_FTR_HVMODE))
443 struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
445 struct kvm_vcpu *vcpu;
448 struct kvmppc_vcore *vcore;
450 core = id / threads_per_core;
451 if (core >= KVM_MAX_VCORES)
455 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
459 err = kvm_vcpu_init(vcpu, kvm, id);
463 vcpu->arch.shared = &vcpu->arch.shregs;
464 vcpu->arch.last_cpu = -1;
465 vcpu->arch.mmcr[0] = MMCR0_FC;
466 vcpu->arch.ctrl = CTRL_RUNLATCH;
467 /* default to host PVR, since we can't spoof it */
468 vcpu->arch.pvr = mfspr(SPRN_PVR);
469 kvmppc_set_pvr(vcpu, vcpu->arch.pvr);
471 kvmppc_mmu_book3s_hv_init(vcpu);
474 * We consider the vcpu stopped until we see the first run ioctl for it.
476 vcpu->arch.state = KVMPPC_VCPU_STOPPED;
478 init_waitqueue_head(&vcpu->arch.cpu_run);
480 mutex_lock(&kvm->lock);
481 vcore = kvm->arch.vcores[core];
483 vcore = kzalloc(sizeof(struct kvmppc_vcore), GFP_KERNEL);
485 INIT_LIST_HEAD(&vcore->runnable_threads);
486 spin_lock_init(&vcore->lock);
487 init_waitqueue_head(&vcore->wq);
489 kvm->arch.vcores[core] = vcore;
491 mutex_unlock(&kvm->lock);
496 spin_lock(&vcore->lock);
497 ++vcore->num_threads;
498 spin_unlock(&vcore->lock);
499 vcpu->arch.vcore = vcore;
501 vcpu->arch.cpu_type = KVM_CPU_3S_64;
502 kvmppc_sanity_check(vcpu);
507 kmem_cache_free(kvm_vcpu_cache, vcpu);
512 void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
515 kvmppc_unpin_guest_page(vcpu->kvm, vcpu->arch.dtl);
516 if (vcpu->arch.slb_shadow)
517 kvmppc_unpin_guest_page(vcpu->kvm, vcpu->arch.slb_shadow);
519 kvmppc_unpin_guest_page(vcpu->kvm, vcpu->arch.vpa);
520 kvm_vcpu_uninit(vcpu);
521 kmem_cache_free(kvm_vcpu_cache, vcpu);
524 static void kvmppc_set_timer(struct kvm_vcpu *vcpu)
526 unsigned long dec_nsec, now;
529 if (now > vcpu->arch.dec_expires) {
530 /* decrementer has already gone negative */
531 kvmppc_core_queue_dec(vcpu);
532 kvmppc_core_prepare_to_enter(vcpu);
535 dec_nsec = (vcpu->arch.dec_expires - now) * NSEC_PER_SEC
537 hrtimer_start(&vcpu->arch.dec_timer, ktime_set(0, dec_nsec),
539 vcpu->arch.timer_running = 1;
542 static void kvmppc_end_cede(struct kvm_vcpu *vcpu)
544 vcpu->arch.ceded = 0;
545 if (vcpu->arch.timer_running) {
546 hrtimer_try_to_cancel(&vcpu->arch.dec_timer);
547 vcpu->arch.timer_running = 0;
551 extern int __kvmppc_vcore_entry(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu);
552 extern void xics_wake_cpu(int cpu);
554 static void kvmppc_remove_runnable(struct kvmppc_vcore *vc,
555 struct kvm_vcpu *vcpu)
559 if (vcpu->arch.state != KVMPPC_VCPU_RUNNABLE)
561 vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST;
564 /* decrement the physical thread id of each following vcpu */
566 list_for_each_entry_continue(v, &vc->runnable_threads, arch.run_list)
568 list_del(&vcpu->arch.run_list);
571 static void kvmppc_start_thread(struct kvm_vcpu *vcpu)
574 struct paca_struct *tpaca;
575 struct kvmppc_vcore *vc = vcpu->arch.vcore;
577 if (vcpu->arch.timer_running) {
578 hrtimer_try_to_cancel(&vcpu->arch.dec_timer);
579 vcpu->arch.timer_running = 0;
581 cpu = vc->pcpu + vcpu->arch.ptid;
583 tpaca->kvm_hstate.kvm_vcpu = vcpu;
584 tpaca->kvm_hstate.kvm_vcore = vc;
585 tpaca->kvm_hstate.napping = 0;
586 vcpu->cpu = vc->pcpu;
588 #if defined(CONFIG_PPC_ICP_NATIVE) && defined(CONFIG_SMP)
589 if (vcpu->arch.ptid) {
590 tpaca->cpu_start = 0x80;
598 static void kvmppc_wait_for_nap(struct kvmppc_vcore *vc)
604 while (vc->nap_count < vc->n_woken) {
605 if (++i >= 1000000) {
606 pr_err("kvmppc_wait_for_nap timeout %d %d\n",
607 vc->nap_count, vc->n_woken);
616 * Check that we are on thread 0 and that any other threads in
617 * this core are off-line.
619 static int on_primary_thread(void)
621 int cpu = smp_processor_id();
622 int thr = cpu_thread_in_core(cpu);
626 while (++thr < threads_per_core)
627 if (cpu_online(cpu + thr))
633 * Run a set of guest threads on a physical core.
634 * Called with vc->lock held.
636 static int kvmppc_run_core(struct kvmppc_vcore *vc)
638 struct kvm_vcpu *vcpu, *vcpu0, *vnext;
643 /* don't start if any threads have a signal pending */
644 list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list)
645 if (signal_pending(vcpu->arch.run_task))
649 * Make sure we are running on thread 0, and that
650 * secondary threads are offline.
651 * XXX we should also block attempts to bring any
652 * secondary threads online.
654 if (threads_per_core > 1 && !on_primary_thread()) {
655 list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list)
656 vcpu->arch.ret = -EBUSY;
661 * Assign physical thread IDs, first to non-ceded vcpus
662 * and then to ceded ones.
666 list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) {
667 if (!vcpu->arch.ceded) {
670 vcpu->arch.ptid = ptid++;
674 return 0; /* nothing to run */
675 list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list)
676 if (vcpu->arch.ceded)
677 vcpu->arch.ptid = ptid++;
681 vc->entry_exit_count = 0;
682 vc->vcore_state = VCORE_RUNNING;
684 vc->pcpu = smp_processor_id();
685 vc->napping_threads = 0;
686 list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list)
687 kvmppc_start_thread(vcpu);
690 spin_unlock(&vc->lock);
693 __kvmppc_vcore_entry(NULL, vcpu0);
695 spin_lock(&vc->lock);
696 /* disable sending of IPIs on virtual external irqs */
697 list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list)
699 /* wait for secondary threads to finish writing their state to memory */
700 if (vc->nap_count < vc->n_woken)
701 kvmppc_wait_for_nap(vc);
702 /* prevent other vcpu threads from doing kvmppc_start_thread() now */
703 vc->vcore_state = VCORE_EXITING;
704 spin_unlock(&vc->lock);
706 /* make sure updates to secondary vcpu structs are visible now */
714 list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) {
715 /* cancel pending dec exception if dec is positive */
716 if (now < vcpu->arch.dec_expires &&
717 kvmppc_core_pending_dec(vcpu))
718 kvmppc_core_dequeue_dec(vcpu);
722 ret = kvmppc_handle_exit(vcpu->arch.kvm_run, vcpu,
723 vcpu->arch.run_task);
725 vcpu->arch.ret = ret;
728 if (vcpu->arch.ceded) {
729 if (ret != RESUME_GUEST)
730 kvmppc_end_cede(vcpu);
732 kvmppc_set_timer(vcpu);
736 spin_lock(&vc->lock);
738 vc->vcore_state = VCORE_INACTIVE;
739 list_for_each_entry_safe(vcpu, vnext, &vc->runnable_threads,
741 if (vcpu->arch.ret != RESUME_GUEST) {
742 kvmppc_remove_runnable(vc, vcpu);
743 wake_up(&vcpu->arch.cpu_run);
751 * Wait for some other vcpu thread to execute us, and
752 * wake us up when we need to handle something in the host.
754 static void kvmppc_wait_for_exec(struct kvm_vcpu *vcpu, int wait_state)
758 prepare_to_wait(&vcpu->arch.cpu_run, &wait, wait_state);
759 if (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE)
761 finish_wait(&vcpu->arch.cpu_run, &wait);
765 * All the vcpus in this vcore are idle, so wait for a decrementer
766 * or external interrupt to one of the vcpus. vc->lock is held.
768 static void kvmppc_vcore_blocked(struct kvmppc_vcore *vc)
774 prepare_to_wait(&vc->wq, &wait, TASK_INTERRUPTIBLE);
775 vc->vcore_state = VCORE_SLEEPING;
776 spin_unlock(&vc->lock);
777 list_for_each_entry(v, &vc->runnable_threads, arch.run_list) {
778 if (!v->arch.ceded || v->arch.pending_exceptions) {
785 finish_wait(&vc->wq, &wait);
786 spin_lock(&vc->lock);
787 vc->vcore_state = VCORE_INACTIVE;
790 static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
794 struct kvmppc_vcore *vc;
795 struct kvm_vcpu *v, *vn;
797 kvm_run->exit_reason = 0;
798 vcpu->arch.ret = RESUME_GUEST;
802 * Synchronize with other threads in this virtual core
804 vc = vcpu->arch.vcore;
805 spin_lock(&vc->lock);
806 vcpu->arch.ceded = 0;
807 vcpu->arch.run_task = current;
808 vcpu->arch.kvm_run = kvm_run;
809 prev_state = vcpu->arch.state;
810 vcpu->arch.state = KVMPPC_VCPU_RUNNABLE;
811 list_add_tail(&vcpu->arch.run_list, &vc->runnable_threads);
815 * This happens the first time this is called for a vcpu.
816 * If the vcore is already running, we may be able to start
817 * this thread straight away and have it join in.
819 if (prev_state == KVMPPC_VCPU_STOPPED) {
820 if (vc->vcore_state == VCORE_RUNNING &&
821 VCORE_EXIT_COUNT(vc) == 0) {
822 vcpu->arch.ptid = vc->n_runnable - 1;
823 kvmppc_start_thread(vcpu);
826 } else if (prev_state == KVMPPC_VCPU_BUSY_IN_HOST)
829 while (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE &&
830 !signal_pending(current)) {
831 if (vc->n_busy || vc->vcore_state != VCORE_INACTIVE) {
832 spin_unlock(&vc->lock);
833 kvmppc_wait_for_exec(vcpu, TASK_INTERRUPTIBLE);
834 spin_lock(&vc->lock);
838 list_for_each_entry(v, &vc->runnable_threads, arch.run_list)
839 n_ceded += v->arch.ceded;
840 if (n_ceded == vc->n_runnable)
841 kvmppc_vcore_blocked(vc);
845 list_for_each_entry_safe(v, vn, &vc->runnable_threads,
847 kvmppc_core_prepare_to_enter(v);
848 if (signal_pending(v->arch.run_task)) {
849 kvmppc_remove_runnable(vc, v);
850 v->stat.signal_exits++;
851 v->arch.kvm_run->exit_reason = KVM_EXIT_INTR;
852 v->arch.ret = -EINTR;
853 wake_up(&v->arch.cpu_run);
858 if (signal_pending(current)) {
859 if (vc->vcore_state == VCORE_RUNNING ||
860 vc->vcore_state == VCORE_EXITING) {
861 spin_unlock(&vc->lock);
862 kvmppc_wait_for_exec(vcpu, TASK_UNINTERRUPTIBLE);
863 spin_lock(&vc->lock);
865 if (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE) {
866 kvmppc_remove_runnable(vc, vcpu);
867 vcpu->stat.signal_exits++;
868 kvm_run->exit_reason = KVM_EXIT_INTR;
869 vcpu->arch.ret = -EINTR;
873 spin_unlock(&vc->lock);
874 return vcpu->arch.ret;
877 int kvmppc_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu)
881 if (!vcpu->arch.sane) {
882 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
886 kvmppc_core_prepare_to_enter(vcpu);
888 /* No need to go into the guest when all we'll do is come back out */
889 if (signal_pending(current)) {
890 run->exit_reason = KVM_EXIT_INTR;
894 /* On the first time here, set up VRMA or RMA */
895 if (!vcpu->kvm->arch.rma_setup_done) {
896 r = kvmppc_hv_setup_rma(vcpu);
901 flush_fp_to_thread(current);
902 flush_altivec_to_thread(current);
903 flush_vsx_to_thread(current);
904 vcpu->arch.wqp = &vcpu->arch.vcore->wq;
905 vcpu->arch.pgdir = current->mm->pgd;
908 r = kvmppc_run_vcpu(run, vcpu);
910 if (run->exit_reason == KVM_EXIT_PAPR_HCALL &&
911 !(vcpu->arch.shregs.msr & MSR_PR)) {
912 r = kvmppc_pseries_do_hcall(vcpu);
913 kvmppc_core_prepare_to_enter(vcpu);
915 } while (r == RESUME_GUEST);
919 static long kvmppc_stt_npages(unsigned long window_size)
921 return ALIGN((window_size >> SPAPR_TCE_SHIFT)
922 * sizeof(u64), PAGE_SIZE) / PAGE_SIZE;
925 static void release_spapr_tce_table(struct kvmppc_spapr_tce_table *stt)
927 struct kvm *kvm = stt->kvm;
930 mutex_lock(&kvm->lock);
931 list_del(&stt->list);
932 for (i = 0; i < kvmppc_stt_npages(stt->window_size); i++)
933 __free_page(stt->pages[i]);
935 mutex_unlock(&kvm->lock);
940 static int kvm_spapr_tce_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
942 struct kvmppc_spapr_tce_table *stt = vma->vm_file->private_data;
945 if (vmf->pgoff >= kvmppc_stt_npages(stt->window_size))
946 return VM_FAULT_SIGBUS;
948 page = stt->pages[vmf->pgoff];
954 static const struct vm_operations_struct kvm_spapr_tce_vm_ops = {
955 .fault = kvm_spapr_tce_fault,
958 static int kvm_spapr_tce_mmap(struct file *file, struct vm_area_struct *vma)
960 vma->vm_ops = &kvm_spapr_tce_vm_ops;
964 static int kvm_spapr_tce_release(struct inode *inode, struct file *filp)
966 struct kvmppc_spapr_tce_table *stt = filp->private_data;
968 release_spapr_tce_table(stt);
972 static struct file_operations kvm_spapr_tce_fops = {
973 .mmap = kvm_spapr_tce_mmap,
974 .release = kvm_spapr_tce_release,
977 long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
978 struct kvm_create_spapr_tce *args)
980 struct kvmppc_spapr_tce_table *stt = NULL;
985 /* Check this LIOBN hasn't been previously allocated */
986 list_for_each_entry(stt, &kvm->arch.spapr_tce_tables, list) {
987 if (stt->liobn == args->liobn)
991 npages = kvmppc_stt_npages(args->window_size);
993 stt = kzalloc(sizeof(*stt) + npages* sizeof(struct page *),
998 stt->liobn = args->liobn;
999 stt->window_size = args->window_size;
1002 for (i = 0; i < npages; i++) {
1003 stt->pages[i] = alloc_page(GFP_KERNEL | __GFP_ZERO);
1010 mutex_lock(&kvm->lock);
1011 list_add(&stt->list, &kvm->arch.spapr_tce_tables);
1013 mutex_unlock(&kvm->lock);
1015 return anon_inode_getfd("kvm-spapr-tce", &kvm_spapr_tce_fops,
1020 for (i = 0; i < npages; i++)
1022 __free_page(stt->pages[i]);
1029 /* Work out RMLS (real mode limit selector) field value for a given RMA size.
1030 Assumes POWER7 or PPC970. */
1031 static inline int lpcr_rmls(unsigned long rma_size)
1034 case 32ul << 20: /* 32 MB */
1035 if (cpu_has_feature(CPU_FTR_ARCH_206))
1036 return 8; /* only supported on POWER7 */
1038 case 64ul << 20: /* 64 MB */
1040 case 128ul << 20: /* 128 MB */
1042 case 256ul << 20: /* 256 MB */
1044 case 1ul << 30: /* 1 GB */
1046 case 16ul << 30: /* 16 GB */
1048 case 256ul << 30: /* 256 GB */
1055 static int kvm_rma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1057 struct kvmppc_linear_info *ri = vma->vm_file->private_data;
1060 if (vmf->pgoff >= ri->npages)
1061 return VM_FAULT_SIGBUS;
1063 page = pfn_to_page(ri->base_pfn + vmf->pgoff);
1069 static const struct vm_operations_struct kvm_rma_vm_ops = {
1070 .fault = kvm_rma_fault,
1073 static int kvm_rma_mmap(struct file *file, struct vm_area_struct *vma)
1075 vma->vm_flags |= VM_RESERVED;
1076 vma->vm_ops = &kvm_rma_vm_ops;
1080 static int kvm_rma_release(struct inode *inode, struct file *filp)
1082 struct kvmppc_linear_info *ri = filp->private_data;
1084 kvm_release_rma(ri);
1088 static struct file_operations kvm_rma_fops = {
1089 .mmap = kvm_rma_mmap,
1090 .release = kvm_rma_release,
1093 long kvm_vm_ioctl_allocate_rma(struct kvm *kvm, struct kvm_allocate_rma *ret)
1095 struct kvmppc_linear_info *ri;
1098 ri = kvm_alloc_rma();
1102 fd = anon_inode_getfd("kvm-rma", &kvm_rma_fops, ri, O_RDWR);
1104 kvm_release_rma(ri);
1106 ret->rma_size = ri->npages << PAGE_SHIFT;
1111 * Get (and clear) the dirty memory log for a memory slot.
1113 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
1115 struct kvm_memory_slot *memslot;
1119 mutex_lock(&kvm->slots_lock);
1122 if (log->slot >= KVM_MEMORY_SLOTS)
1125 memslot = id_to_memslot(kvm->memslots, log->slot);
1127 if (!memslot->dirty_bitmap)
1130 n = kvm_dirty_bitmap_bytes(memslot);
1131 memset(memslot->dirty_bitmap, 0, n);
1133 r = kvmppc_hv_get_dirty_log(kvm, memslot);
1138 if (copy_to_user(log->dirty_bitmap, memslot->dirty_bitmap, n))
1143 mutex_unlock(&kvm->slots_lock);
1147 static unsigned long slb_pgsize_encoding(unsigned long psize)
1149 unsigned long senc = 0;
1151 if (psize > 0x1000) {
1153 if (psize == 0x10000)
1154 senc |= SLB_VSID_LP_01;
1159 int kvmppc_core_prepare_memory_region(struct kvm *kvm,
1160 struct kvm_userspace_memory_region *mem)
1162 unsigned long npages;
1163 unsigned long *phys;
1165 /* Allocate a slot_phys array */
1166 phys = kvm->arch.slot_phys[mem->slot];
1167 if (!kvm->arch.using_mmu_notifiers && !phys) {
1168 npages = mem->memory_size >> PAGE_SHIFT;
1169 phys = vzalloc(npages * sizeof(unsigned long));
1172 kvm->arch.slot_phys[mem->slot] = phys;
1173 kvm->arch.slot_npages[mem->slot] = npages;
1179 static void unpin_slot(struct kvm *kvm, int slot_id)
1181 unsigned long *physp;
1182 unsigned long j, npages, pfn;
1185 physp = kvm->arch.slot_phys[slot_id];
1186 npages = kvm->arch.slot_npages[slot_id];
1188 spin_lock(&kvm->arch.slot_phys_lock);
1189 for (j = 0; j < npages; j++) {
1190 if (!(physp[j] & KVMPPC_GOT_PAGE))
1192 pfn = physp[j] >> PAGE_SHIFT;
1193 page = pfn_to_page(pfn);
1195 page = compound_head(page);
1199 kvm->arch.slot_phys[slot_id] = NULL;
1200 spin_unlock(&kvm->arch.slot_phys_lock);
1205 void kvmppc_core_commit_memory_region(struct kvm *kvm,
1206 struct kvm_userspace_memory_region *mem)
1210 static int kvmppc_hv_setup_rma(struct kvm_vcpu *vcpu)
1213 struct kvm *kvm = vcpu->kvm;
1214 struct kvmppc_linear_info *ri = NULL;
1216 struct kvm_memory_slot *memslot;
1217 struct vm_area_struct *vma;
1218 unsigned long lpcr, senc;
1219 unsigned long psize, porder;
1220 unsigned long rma_size;
1222 unsigned long *physp;
1223 unsigned long i, npages;
1225 mutex_lock(&kvm->lock);
1226 if (kvm->arch.rma_setup_done)
1227 goto out; /* another vcpu beat us to it */
1229 /* Look up the memslot for guest physical address 0 */
1230 memslot = gfn_to_memslot(kvm, 0);
1232 /* We must have some memory at 0 by now */
1234 if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID))
1237 /* Look up the VMA for the start of this memory slot */
1238 hva = memslot->userspace_addr;
1239 down_read(¤t->mm->mmap_sem);
1240 vma = find_vma(current->mm, hva);
1241 if (!vma || vma->vm_start > hva || (vma->vm_flags & VM_IO))
1244 psize = vma_kernel_pagesize(vma);
1245 porder = __ilog2(psize);
1247 /* Is this one of our preallocated RMAs? */
1248 if (vma->vm_file && vma->vm_file->f_op == &kvm_rma_fops &&
1249 hva == vma->vm_start)
1250 ri = vma->vm_file->private_data;
1252 up_read(¤t->mm->mmap_sem);
1255 /* On POWER7, use VRMA; on PPC970, give up */
1257 if (cpu_has_feature(CPU_FTR_ARCH_201)) {
1258 pr_err("KVM: CPU requires an RMO\n");
1262 /* We can handle 4k, 64k or 16M pages in the VRMA */
1264 if (!(psize == 0x1000 || psize == 0x10000 ||
1265 psize == 0x1000000))
1268 /* Update VRMASD field in the LPCR */
1269 senc = slb_pgsize_encoding(psize);
1270 kvm->arch.vrma_slb_v = senc | SLB_VSID_B_1T |
1271 (VRMA_VSID << SLB_VSID_SHIFT_1T);
1272 lpcr = kvm->arch.lpcr & ~LPCR_VRMASD;
1273 lpcr |= senc << (LPCR_VRMASD_SH - 4);
1274 kvm->arch.lpcr = lpcr;
1276 /* Create HPTEs in the hash page table for the VRMA */
1277 kvmppc_map_vrma(vcpu, memslot, porder);
1280 /* Set up to use an RMO region */
1281 rma_size = ri->npages;
1282 if (rma_size > memslot->npages)
1283 rma_size = memslot->npages;
1284 rma_size <<= PAGE_SHIFT;
1285 rmls = lpcr_rmls(rma_size);
1288 pr_err("KVM: Can't use RMA of 0x%lx bytes\n", rma_size);
1291 atomic_inc(&ri->use_count);
1294 /* Update LPCR and RMOR */
1295 lpcr = kvm->arch.lpcr;
1296 if (cpu_has_feature(CPU_FTR_ARCH_201)) {
1297 /* PPC970; insert RMLS value (split field) in HID4 */
1298 lpcr &= ~((1ul << HID4_RMLS0_SH) |
1299 (3ul << HID4_RMLS2_SH));
1300 lpcr |= ((rmls >> 2) << HID4_RMLS0_SH) |
1301 ((rmls & 3) << HID4_RMLS2_SH);
1302 /* RMOR is also in HID4 */
1303 lpcr |= ((ri->base_pfn >> (26 - PAGE_SHIFT)) & 0xffff)
1307 lpcr &= ~(LPCR_VPM0 | LPCR_VRMA_L);
1308 lpcr |= rmls << LPCR_RMLS_SH;
1309 kvm->arch.rmor = kvm->arch.rma->base_pfn << PAGE_SHIFT;
1311 kvm->arch.lpcr = lpcr;
1312 pr_info("KVM: Using RMO at %lx size %lx (LPCR = %lx)\n",
1313 ri->base_pfn << PAGE_SHIFT, rma_size, lpcr);
1315 /* Initialize phys addrs of pages in RMO */
1316 npages = ri->npages;
1317 porder = __ilog2(npages);
1318 physp = kvm->arch.slot_phys[memslot->id];
1319 spin_lock(&kvm->arch.slot_phys_lock);
1320 for (i = 0; i < npages; ++i)
1321 physp[i] = ((ri->base_pfn + i) << PAGE_SHIFT) + porder;
1322 spin_unlock(&kvm->arch.slot_phys_lock);
1325 /* Order updates to kvm->arch.lpcr etc. vs. rma_setup_done */
1327 kvm->arch.rma_setup_done = 1;
1330 mutex_unlock(&kvm->lock);
1334 up_read(¤t->mm->mmap_sem);
1338 int kvmppc_core_init_vm(struct kvm *kvm)
1343 /* Allocate hashed page table */
1344 r = kvmppc_alloc_hpt(kvm);
1348 INIT_LIST_HEAD(&kvm->arch.spapr_tce_tables);
1350 kvm->arch.rma = NULL;
1352 kvm->arch.host_sdr1 = mfspr(SPRN_SDR1);
1354 if (cpu_has_feature(CPU_FTR_ARCH_201)) {
1355 /* PPC970; HID4 is effectively the LPCR */
1356 unsigned long lpid = kvm->arch.lpid;
1357 kvm->arch.host_lpid = 0;
1358 kvm->arch.host_lpcr = lpcr = mfspr(SPRN_HID4);
1359 lpcr &= ~((3 << HID4_LPID1_SH) | (0xful << HID4_LPID5_SH));
1360 lpcr |= ((lpid >> 4) << HID4_LPID1_SH) |
1361 ((lpid & 0xf) << HID4_LPID5_SH);
1363 /* POWER7; init LPCR for virtual RMA mode */
1364 kvm->arch.host_lpid = mfspr(SPRN_LPID);
1365 kvm->arch.host_lpcr = lpcr = mfspr(SPRN_LPCR);
1366 lpcr &= LPCR_PECE | LPCR_LPES;
1367 lpcr |= (4UL << LPCR_DPFD_SH) | LPCR_HDICE |
1368 LPCR_VPM0 | LPCR_VPM1;
1369 kvm->arch.vrma_slb_v = SLB_VSID_B_1T |
1370 (VRMA_VSID << SLB_VSID_SHIFT_1T);
1372 kvm->arch.lpcr = lpcr;
1374 kvm->arch.using_mmu_notifiers = !!cpu_has_feature(CPU_FTR_ARCH_206);
1375 spin_lock_init(&kvm->arch.slot_phys_lock);
1379 void kvmppc_core_destroy_vm(struct kvm *kvm)
1383 if (!kvm->arch.using_mmu_notifiers)
1384 for (i = 0; i < KVM_MEM_SLOTS_NUM; i++)
1387 if (kvm->arch.rma) {
1388 kvm_release_rma(kvm->arch.rma);
1389 kvm->arch.rma = NULL;
1392 kvmppc_free_hpt(kvm);
1393 WARN_ON(!list_empty(&kvm->arch.spapr_tce_tables));
1396 /* These are stubs for now */
1397 void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end)
1401 /* We don't need to emulate any privileged instructions or dcbz */
1402 int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
1403 unsigned int inst, int *advance)
1405 return EMULATE_FAIL;
1408 int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
1410 return EMULATE_FAIL;
1413 int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
1415 return EMULATE_FAIL;
1418 static int kvmppc_book3s_hv_init(void)
1422 r = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
1427 r = kvmppc_mmu_hv_init();
1432 static void kvmppc_book3s_hv_exit(void)
1437 module_init(kvmppc_book3s_hv_init);
1438 module_exit(kvmppc_book3s_hv_exit);