KVM: PPC: Call SLB patching code in interrupt safe manner
[linux-2.6.git] / arch / powerpc / kvm / book3s.c
1 /*
2  * Copyright (C) 2009. SUSE Linux Products GmbH. All rights reserved.
3  *
4  * Authors:
5  *    Alexander Graf <agraf@suse.de>
6  *    Kevin Wolf <mail@kevin-wolf.de>
7  *
8  * Description:
9  * This file is derived from arch/powerpc/kvm/44x.c,
10  * by Hollis Blanchard <hollisb@us.ibm.com>.
11  *
12  * This program is free software; you can redistribute it and/or modify
13  * it under the terms of the GNU General Public License, version 2, as
14  * published by the Free Software Foundation.
15  */
16
17 #include <linux/kvm_host.h>
18 #include <linux/err.h>
19
20 #include <asm/reg.h>
21 #include <asm/cputable.h>
22 #include <asm/cacheflush.h>
23 #include <asm/tlbflush.h>
24 #include <asm/uaccess.h>
25 #include <asm/io.h>
26 #include <asm/kvm_ppc.h>
27 #include <asm/kvm_book3s.h>
28 #include <asm/mmu_context.h>
29 #include <linux/sched.h>
30 #include <linux/vmalloc.h>
31
32 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
33
34 /* #define EXIT_DEBUG */
35 /* #define EXIT_DEBUG_SIMPLE */
36
37 struct kvm_stats_debugfs_item debugfs_entries[] = {
38         { "exits",       VCPU_STAT(sum_exits) },
39         { "mmio",        VCPU_STAT(mmio_exits) },
40         { "sig",         VCPU_STAT(signal_exits) },
41         { "sysc",        VCPU_STAT(syscall_exits) },
42         { "inst_emu",    VCPU_STAT(emulated_inst_exits) },
43         { "dec",         VCPU_STAT(dec_exits) },
44         { "ext_intr",    VCPU_STAT(ext_intr_exits) },
45         { "queue_intr",  VCPU_STAT(queue_intr) },
46         { "halt_wakeup", VCPU_STAT(halt_wakeup) },
47         { "pf_storage",  VCPU_STAT(pf_storage) },
48         { "sp_storage",  VCPU_STAT(sp_storage) },
49         { "pf_instruc",  VCPU_STAT(pf_instruc) },
50         { "sp_instruc",  VCPU_STAT(sp_instruc) },
51         { "ld",          VCPU_STAT(ld) },
52         { "ld_slow",     VCPU_STAT(ld_slow) },
53         { "st",          VCPU_STAT(st) },
54         { "st_slow",     VCPU_STAT(st_slow) },
55         { NULL }
56 };
57
58 void kvmppc_core_load_host_debugstate(struct kvm_vcpu *vcpu)
59 {
60 }
61
62 void kvmppc_core_load_guest_debugstate(struct kvm_vcpu *vcpu)
63 {
64 }
65
66 void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
67 {
68         memcpy(get_paca()->kvm_slb, to_book3s(vcpu)->slb_shadow, sizeof(get_paca()->kvm_slb));
69         memcpy(&get_paca()->shadow_vcpu, &to_book3s(vcpu)->shadow_vcpu,
70                sizeof(get_paca()->shadow_vcpu));
71         get_paca()->kvm_slb_max = to_book3s(vcpu)->slb_shadow_max;
72 }
73
74 void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
75 {
76         memcpy(to_book3s(vcpu)->slb_shadow, get_paca()->kvm_slb, sizeof(get_paca()->kvm_slb));
77         memcpy(&to_book3s(vcpu)->shadow_vcpu, &get_paca()->shadow_vcpu,
78                sizeof(get_paca()->shadow_vcpu));
79         to_book3s(vcpu)->slb_shadow_max = get_paca()->kvm_slb_max;
80 }
81
82 #if defined(EXIT_DEBUG)
83 static u32 kvmppc_get_dec(struct kvm_vcpu *vcpu)
84 {
85         u64 jd = mftb() - vcpu->arch.dec_jiffies;
86         return vcpu->arch.dec - jd;
87 }
88 #endif
89
90 void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr)
91 {
92         ulong old_msr = vcpu->arch.msr;
93
94 #ifdef EXIT_DEBUG
95         printk(KERN_INFO "KVM: Set MSR to 0x%llx\n", msr);
96 #endif
97         msr &= to_book3s(vcpu)->msr_mask;
98         vcpu->arch.msr = msr;
99         vcpu->arch.shadow_msr = msr | MSR_USER32;
100         vcpu->arch.shadow_msr &= ( MSR_VEC | MSR_VSX | MSR_FP | MSR_FE0 |
101                                    MSR_USER64 | MSR_SE | MSR_BE | MSR_DE |
102                                    MSR_FE1);
103
104         if (msr & (MSR_WE|MSR_POW)) {
105                 if (!vcpu->arch.pending_exceptions) {
106                         kvm_vcpu_block(vcpu);
107                         vcpu->stat.halt_wakeup++;
108                 }
109         }
110
111         if (((vcpu->arch.msr & (MSR_IR|MSR_DR)) != (old_msr & (MSR_IR|MSR_DR))) ||
112             (vcpu->arch.msr & MSR_PR) != (old_msr & MSR_PR)) {
113                 kvmppc_mmu_flush_segments(vcpu);
114                 kvmppc_mmu_map_segment(vcpu, vcpu->arch.pc);
115         }
116 }
117
118 void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags)
119 {
120         vcpu->arch.srr0 = vcpu->arch.pc;
121         vcpu->arch.srr1 = vcpu->arch.msr | flags;
122         vcpu->arch.pc = to_book3s(vcpu)->hior + vec;
123         vcpu->arch.mmu.reset_msr(vcpu);
124 }
125
126 static int kvmppc_book3s_vec2irqprio(unsigned int vec)
127 {
128         unsigned int prio;
129
130         switch (vec) {
131         case 0x100: prio = BOOK3S_IRQPRIO_SYSTEM_RESET;         break;
132         case 0x200: prio = BOOK3S_IRQPRIO_MACHINE_CHECK;        break;
133         case 0x300: prio = BOOK3S_IRQPRIO_DATA_STORAGE;         break;
134         case 0x380: prio = BOOK3S_IRQPRIO_DATA_SEGMENT;         break;
135         case 0x400: prio = BOOK3S_IRQPRIO_INST_STORAGE;         break;
136         case 0x480: prio = BOOK3S_IRQPRIO_INST_SEGMENT;         break;
137         case 0x500: prio = BOOK3S_IRQPRIO_EXTERNAL;             break;
138         case 0x600: prio = BOOK3S_IRQPRIO_ALIGNMENT;            break;
139         case 0x700: prio = BOOK3S_IRQPRIO_PROGRAM;              break;
140         case 0x800: prio = BOOK3S_IRQPRIO_FP_UNAVAIL;           break;
141         case 0x900: prio = BOOK3S_IRQPRIO_DECREMENTER;          break;
142         case 0xc00: prio = BOOK3S_IRQPRIO_SYSCALL;              break;
143         case 0xd00: prio = BOOK3S_IRQPRIO_DEBUG;                break;
144         case 0xf20: prio = BOOK3S_IRQPRIO_ALTIVEC;              break;
145         case 0xf40: prio = BOOK3S_IRQPRIO_VSX;                  break;
146         default:    prio = BOOK3S_IRQPRIO_MAX;                  break;
147         }
148
149         return prio;
150 }
151
152 static void kvmppc_book3s_dequeue_irqprio(struct kvm_vcpu *vcpu,
153                                           unsigned int vec)
154 {
155         clear_bit(kvmppc_book3s_vec2irqprio(vec),
156                   &vcpu->arch.pending_exceptions);
157 }
158
159 void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec)
160 {
161         vcpu->stat.queue_intr++;
162
163         set_bit(kvmppc_book3s_vec2irqprio(vec),
164                 &vcpu->arch.pending_exceptions);
165 #ifdef EXIT_DEBUG
166         printk(KERN_INFO "Queueing interrupt %x\n", vec);
167 #endif
168 }
169
170
171 void kvmppc_core_queue_program(struct kvm_vcpu *vcpu)
172 {
173         kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_PROGRAM);
174 }
175
176 void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu)
177 {
178         kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_DECREMENTER);
179 }
180
181 int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu)
182 {
183         return test_bit(BOOK3S_INTERRUPT_DECREMENTER >> 7, &vcpu->arch.pending_exceptions);
184 }
185
186 void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu)
187 {
188         kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_DECREMENTER);
189 }
190
191 void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
192                                 struct kvm_interrupt *irq)
193 {
194         kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_EXTERNAL);
195 }
196
197 int kvmppc_book3s_irqprio_deliver(struct kvm_vcpu *vcpu, unsigned int priority)
198 {
199         int deliver = 1;
200         int vec = 0;
201
202         switch (priority) {
203         case BOOK3S_IRQPRIO_DECREMENTER:
204                 deliver = vcpu->arch.msr & MSR_EE;
205                 vec = BOOK3S_INTERRUPT_DECREMENTER;
206                 break;
207         case BOOK3S_IRQPRIO_EXTERNAL:
208                 deliver = vcpu->arch.msr & MSR_EE;
209                 vec = BOOK3S_INTERRUPT_EXTERNAL;
210                 break;
211         case BOOK3S_IRQPRIO_SYSTEM_RESET:
212                 vec = BOOK3S_INTERRUPT_SYSTEM_RESET;
213                 break;
214         case BOOK3S_IRQPRIO_MACHINE_CHECK:
215                 vec = BOOK3S_INTERRUPT_MACHINE_CHECK;
216                 break;
217         case BOOK3S_IRQPRIO_DATA_STORAGE:
218                 vec = BOOK3S_INTERRUPT_DATA_STORAGE;
219                 break;
220         case BOOK3S_IRQPRIO_INST_STORAGE:
221                 vec = BOOK3S_INTERRUPT_INST_STORAGE;
222                 break;
223         case BOOK3S_IRQPRIO_DATA_SEGMENT:
224                 vec = BOOK3S_INTERRUPT_DATA_SEGMENT;
225                 break;
226         case BOOK3S_IRQPRIO_INST_SEGMENT:
227                 vec = BOOK3S_INTERRUPT_INST_SEGMENT;
228                 break;
229         case BOOK3S_IRQPRIO_ALIGNMENT:
230                 vec = BOOK3S_INTERRUPT_ALIGNMENT;
231                 break;
232         case BOOK3S_IRQPRIO_PROGRAM:
233                 vec = BOOK3S_INTERRUPT_PROGRAM;
234                 break;
235         case BOOK3S_IRQPRIO_VSX:
236                 vec = BOOK3S_INTERRUPT_VSX;
237                 break;
238         case BOOK3S_IRQPRIO_ALTIVEC:
239                 vec = BOOK3S_INTERRUPT_ALTIVEC;
240                 break;
241         case BOOK3S_IRQPRIO_FP_UNAVAIL:
242                 vec = BOOK3S_INTERRUPT_FP_UNAVAIL;
243                 break;
244         case BOOK3S_IRQPRIO_SYSCALL:
245                 vec = BOOK3S_INTERRUPT_SYSCALL;
246                 break;
247         case BOOK3S_IRQPRIO_DEBUG:
248                 vec = BOOK3S_INTERRUPT_TRACE;
249                 break;
250         case BOOK3S_IRQPRIO_PERFORMANCE_MONITOR:
251                 vec = BOOK3S_INTERRUPT_PERFMON;
252                 break;
253         default:
254                 deliver = 0;
255                 printk(KERN_ERR "KVM: Unknown interrupt: 0x%x\n", priority);
256                 break;
257         }
258
259 #if 0
260         printk(KERN_INFO "Deliver interrupt 0x%x? %x\n", vec, deliver);
261 #endif
262
263         if (deliver)
264                 kvmppc_inject_interrupt(vcpu, vec, 0ULL);
265
266         return deliver;
267 }
268
269 void kvmppc_core_deliver_interrupts(struct kvm_vcpu *vcpu)
270 {
271         unsigned long *pending = &vcpu->arch.pending_exceptions;
272         unsigned int priority;
273
274 #ifdef EXIT_DEBUG
275         if (vcpu->arch.pending_exceptions)
276                 printk(KERN_EMERG "KVM: Check pending: %lx\n", vcpu->arch.pending_exceptions);
277 #endif
278         priority = __ffs(*pending);
279         while (priority <= (sizeof(unsigned int) * 8)) {
280                 if (kvmppc_book3s_irqprio_deliver(vcpu, priority) &&
281                     (priority != BOOK3S_IRQPRIO_DECREMENTER)) {
282                         /* DEC interrupts get cleared by mtdec */
283                         clear_bit(priority, &vcpu->arch.pending_exceptions);
284                         break;
285                 }
286
287                 priority = find_next_bit(pending,
288                                          BITS_PER_BYTE * sizeof(*pending),
289                                          priority + 1);
290         }
291 }
292
293 void kvmppc_set_pvr(struct kvm_vcpu *vcpu, u32 pvr)
294 {
295         vcpu->arch.hflags &= ~BOOK3S_HFLAG_SLB;
296         vcpu->arch.pvr = pvr;
297         if ((pvr >= 0x330000) && (pvr < 0x70330000)) {
298                 kvmppc_mmu_book3s_64_init(vcpu);
299                 to_book3s(vcpu)->hior = 0xfff00000;
300                 to_book3s(vcpu)->msr_mask = 0xffffffffffffffffULL;
301         } else {
302                 kvmppc_mmu_book3s_32_init(vcpu);
303                 to_book3s(vcpu)->hior = 0;
304                 to_book3s(vcpu)->msr_mask = 0xffffffffULL;
305         }
306
307         /* If we are in hypervisor level on 970, we can tell the CPU to
308          * treat DCBZ as 32 bytes store */
309         vcpu->arch.hflags &= ~BOOK3S_HFLAG_DCBZ32;
310         if (vcpu->arch.mmu.is_dcbz32(vcpu) && (mfmsr() & MSR_HV) &&
311             !strcmp(cur_cpu_spec->platform, "ppc970"))
312                 vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32;
313
314 }
315
316 /* Book3s_32 CPUs always have 32 bytes cache line size, which Linux assumes. To
317  * make Book3s_32 Linux work on Book3s_64, we have to make sure we trap dcbz to
318  * emulate 32 bytes dcbz length.
319  *
320  * The Book3s_64 inventors also realized this case and implemented a special bit
321  * in the HID5 register, which is a hypervisor ressource. Thus we can't use it.
322  *
323  * My approach here is to patch the dcbz instruction on executing pages.
324  */
325 static void kvmppc_patch_dcbz(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte)
326 {
327         bool touched = false;
328         hva_t hpage;
329         u32 *page;
330         int i;
331
332         hpage = gfn_to_hva(vcpu->kvm, pte->raddr >> PAGE_SHIFT);
333         if (kvm_is_error_hva(hpage))
334                 return;
335
336         hpage |= pte->raddr & ~PAGE_MASK;
337         hpage &= ~0xFFFULL;
338
339         page = vmalloc(HW_PAGE_SIZE);
340
341         if (copy_from_user(page, (void __user *)hpage, HW_PAGE_SIZE))
342                 goto out;
343
344         for (i=0; i < HW_PAGE_SIZE / 4; i++)
345                 if ((page[i] & 0xff0007ff) == INS_DCBZ) {
346                         page[i] &= 0xfffffff7; // reserved instruction, so we trap
347                         touched = true;
348                 }
349
350         if (touched)
351                 copy_to_user((void __user *)hpage, page, HW_PAGE_SIZE);
352
353 out:
354         vfree(page);
355 }
356
357 static int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr, bool data,
358                          struct kvmppc_pte *pte)
359 {
360         int relocated = (vcpu->arch.msr & (data ? MSR_DR : MSR_IR));
361         int r;
362
363         if (relocated) {
364                 r = vcpu->arch.mmu.xlate(vcpu, eaddr, pte, data);
365         } else {
366                 pte->eaddr = eaddr;
367                 pte->raddr = eaddr & 0xffffffff;
368                 pte->vpage = eaddr >> 12;
369                 switch (vcpu->arch.msr & (MSR_DR|MSR_IR)) {
370                 case 0:
371                         pte->vpage |= VSID_REAL;
372                 case MSR_DR:
373                         pte->vpage |= VSID_REAL_DR;
374                 case MSR_IR:
375                         pte->vpage |= VSID_REAL_IR;
376                 }
377                 pte->may_read = true;
378                 pte->may_write = true;
379                 pte->may_execute = true;
380                 r = 0;
381         }
382
383         return r;
384 }
385
386 static hva_t kvmppc_bad_hva(void)
387 {
388         return PAGE_OFFSET;
389 }
390
391 static hva_t kvmppc_pte_to_hva(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte,
392                                bool read)
393 {
394         hva_t hpage;
395
396         if (read && !pte->may_read)
397                 goto err;
398
399         if (!read && !pte->may_write)
400                 goto err;
401
402         hpage = gfn_to_hva(vcpu->kvm, pte->raddr >> PAGE_SHIFT);
403         if (kvm_is_error_hva(hpage))
404                 goto err;
405
406         return hpage | (pte->raddr & ~PAGE_MASK);
407 err:
408         return kvmppc_bad_hva();
409 }
410
411 int kvmppc_st(struct kvm_vcpu *vcpu, ulong eaddr, int size, void *ptr)
412 {
413         struct kvmppc_pte pte;
414         hva_t hva = eaddr;
415
416         vcpu->stat.st++;
417
418         if (kvmppc_xlate(vcpu, eaddr, false, &pte))
419                 goto err;
420
421         hva = kvmppc_pte_to_hva(vcpu, &pte, false);
422         if (kvm_is_error_hva(hva))
423                 goto err;
424
425         if (copy_to_user((void __user *)hva, ptr, size)) {
426                 printk(KERN_INFO "kvmppc_st at 0x%lx failed\n", hva);
427                 goto err;
428         }
429
430         return 0;
431
432 err:
433         return -ENOENT;
434 }
435
436 int kvmppc_ld(struct kvm_vcpu *vcpu, ulong eaddr, int size, void *ptr,
437                       bool data)
438 {
439         struct kvmppc_pte pte;
440         hva_t hva = eaddr;
441
442         vcpu->stat.ld++;
443
444         if (kvmppc_xlate(vcpu, eaddr, data, &pte))
445                 goto err;
446
447         hva = kvmppc_pte_to_hva(vcpu, &pte, true);
448         if (kvm_is_error_hva(hva))
449                 goto err;
450
451         if (copy_from_user(ptr, (void __user *)hva, size)) {
452                 printk(KERN_INFO "kvmppc_ld at 0x%lx failed\n", hva);
453                 goto err;
454         }
455
456         return 0;
457
458 err:
459         return -ENOENT;
460 }
461
462 static int kvmppc_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
463 {
464         return kvm_is_visible_gfn(vcpu->kvm, gfn);
465 }
466
467 int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
468                             ulong eaddr, int vec)
469 {
470         bool data = (vec == BOOK3S_INTERRUPT_DATA_STORAGE);
471         int r = RESUME_GUEST;
472         int relocated;
473         int page_found = 0;
474         struct kvmppc_pte pte;
475         bool is_mmio = false;
476
477         if ( vec == BOOK3S_INTERRUPT_DATA_STORAGE ) {
478                 relocated = (vcpu->arch.msr & MSR_DR);
479         } else {
480                 relocated = (vcpu->arch.msr & MSR_IR);
481         }
482
483         /* Resolve real address if translation turned on */
484         if (relocated) {
485                 page_found = vcpu->arch.mmu.xlate(vcpu, eaddr, &pte, data);
486         } else {
487                 pte.may_execute = true;
488                 pte.may_read = true;
489                 pte.may_write = true;
490                 pte.raddr = eaddr & 0xffffffff;
491                 pte.eaddr = eaddr;
492                 pte.vpage = eaddr >> 12;
493                 switch (vcpu->arch.msr & (MSR_DR|MSR_IR)) {
494                 case 0:
495                         pte.vpage |= VSID_REAL;
496                 case MSR_DR:
497                         pte.vpage |= VSID_REAL_DR;
498                 case MSR_IR:
499                         pte.vpage |= VSID_REAL_IR;
500                 }
501         }
502
503         if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
504            (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) {
505                 /*
506                  * If we do the dcbz hack, we have to NX on every execution,
507                  * so we can patch the executing code. This renders our guest
508                  * NX-less.
509                  */
510                 pte.may_execute = !data;
511         }
512
513         if (page_found == -ENOENT) {
514                 /* Page not found in guest PTE entries */
515                 vcpu->arch.dear = vcpu->arch.fault_dear;
516                 to_book3s(vcpu)->dsisr = vcpu->arch.fault_dsisr;
517                 vcpu->arch.msr |= (vcpu->arch.shadow_msr & 0x00000000f8000000ULL);
518                 kvmppc_book3s_queue_irqprio(vcpu, vec);
519         } else if (page_found == -EPERM) {
520                 /* Storage protection */
521                 vcpu->arch.dear = vcpu->arch.fault_dear;
522                 to_book3s(vcpu)->dsisr = vcpu->arch.fault_dsisr & ~DSISR_NOHPTE;
523                 to_book3s(vcpu)->dsisr |= DSISR_PROTFAULT;
524                 vcpu->arch.msr |= (vcpu->arch.shadow_msr & 0x00000000f8000000ULL);
525                 kvmppc_book3s_queue_irqprio(vcpu, vec);
526         } else if (page_found == -EINVAL) {
527                 /* Page not found in guest SLB */
528                 vcpu->arch.dear = vcpu->arch.fault_dear;
529                 kvmppc_book3s_queue_irqprio(vcpu, vec + 0x80);
530         } else if (!is_mmio &&
531                    kvmppc_visible_gfn(vcpu, pte.raddr >> PAGE_SHIFT)) {
532                 /* The guest's PTE is not mapped yet. Map on the host */
533                 kvmppc_mmu_map_page(vcpu, &pte);
534                 if (data)
535                         vcpu->stat.sp_storage++;
536                 else if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
537                         (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32)))
538                         kvmppc_patch_dcbz(vcpu, &pte);
539         } else {
540                 /* MMIO */
541                 vcpu->stat.mmio_exits++;
542                 vcpu->arch.paddr_accessed = pte.raddr;
543                 r = kvmppc_emulate_mmio(run, vcpu);
544                 if ( r == RESUME_HOST_NV )
545                         r = RESUME_HOST;
546         }
547
548         return r;
549 }
550
551 int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
552                        unsigned int exit_nr)
553 {
554         int r = RESUME_HOST;
555
556         vcpu->stat.sum_exits++;
557
558         run->exit_reason = KVM_EXIT_UNKNOWN;
559         run->ready_for_interrupt_injection = 1;
560 #ifdef EXIT_DEBUG
561         printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | dar=0x%lx | dec=0x%x | msr=0x%lx\n",
562                 exit_nr, vcpu->arch.pc, vcpu->arch.fault_dear,
563                 kvmppc_get_dec(vcpu), vcpu->arch.msr);
564 #elif defined (EXIT_DEBUG_SIMPLE)
565         if ((exit_nr != 0x900) && (exit_nr != 0x500))
566                 printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | dar=0x%lx | msr=0x%lx\n",
567                         exit_nr, vcpu->arch.pc, vcpu->arch.fault_dear,
568                         vcpu->arch.msr);
569 #endif
570         kvm_resched(vcpu);
571         switch (exit_nr) {
572         case BOOK3S_INTERRUPT_INST_STORAGE:
573                 vcpu->stat.pf_instruc++;
574                 /* only care about PTEG not found errors, but leave NX alone */
575                 if (vcpu->arch.shadow_msr & 0x40000000) {
576                         r = kvmppc_handle_pagefault(run, vcpu, vcpu->arch.pc, exit_nr);
577                         vcpu->stat.sp_instruc++;
578                 } else if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
579                           (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) {
580                         /*
581                          * XXX If we do the dcbz hack we use the NX bit to flush&patch the page,
582                          *     so we can't use the NX bit inside the guest. Let's cross our fingers,
583                          *     that no guest that needs the dcbz hack does NX.
584                          */
585                         kvmppc_mmu_pte_flush(vcpu, vcpu->arch.pc, ~0xFFFULL);
586                 } else {
587                         vcpu->arch.msr |= (vcpu->arch.shadow_msr & 0x58000000);
588                         kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
589                         kvmppc_mmu_pte_flush(vcpu, vcpu->arch.pc, ~0xFFFULL);
590                         r = RESUME_GUEST;
591                 }
592                 break;
593         case BOOK3S_INTERRUPT_DATA_STORAGE:
594                 vcpu->stat.pf_storage++;
595                 /* The only case we need to handle is missing shadow PTEs */
596                 if (vcpu->arch.fault_dsisr & DSISR_NOHPTE) {
597                         r = kvmppc_handle_pagefault(run, vcpu, vcpu->arch.fault_dear, exit_nr);
598                 } else {
599                         vcpu->arch.dear = vcpu->arch.fault_dear;
600                         to_book3s(vcpu)->dsisr = vcpu->arch.fault_dsisr;
601                         kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
602                         kvmppc_mmu_pte_flush(vcpu, vcpu->arch.dear, ~0xFFFULL);
603                         r = RESUME_GUEST;
604                 }
605                 break;
606         case BOOK3S_INTERRUPT_DATA_SEGMENT:
607                 if (kvmppc_mmu_map_segment(vcpu, vcpu->arch.fault_dear) < 0) {
608                         vcpu->arch.dear = vcpu->arch.fault_dear;
609                         kvmppc_book3s_queue_irqprio(vcpu,
610                                 BOOK3S_INTERRUPT_DATA_SEGMENT);
611                 }
612                 r = RESUME_GUEST;
613                 break;
614         case BOOK3S_INTERRUPT_INST_SEGMENT:
615                 if (kvmppc_mmu_map_segment(vcpu, vcpu->arch.pc) < 0) {
616                         kvmppc_book3s_queue_irqprio(vcpu,
617                                 BOOK3S_INTERRUPT_INST_SEGMENT);
618                 }
619                 r = RESUME_GUEST;
620                 break;
621         /* We're good on these - the host merely wanted to get our attention */
622         case BOOK3S_INTERRUPT_DECREMENTER:
623                 vcpu->stat.dec_exits++;
624                 r = RESUME_GUEST;
625                 break;
626         case BOOK3S_INTERRUPT_EXTERNAL:
627                 vcpu->stat.ext_intr_exits++;
628                 r = RESUME_GUEST;
629                 break;
630         case BOOK3S_INTERRUPT_PROGRAM:
631         {
632                 enum emulation_result er;
633
634                 if (vcpu->arch.msr & MSR_PR) {
635 #ifdef EXIT_DEBUG
636                         printk(KERN_INFO "Userspace triggered 0x700 exception at 0x%lx (0x%x)\n", vcpu->arch.pc, vcpu->arch.last_inst);
637 #endif
638                         if ((vcpu->arch.last_inst & 0xff0007ff) !=
639                             (INS_DCBZ & 0xfffffff7)) {
640                                 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
641                                 r = RESUME_GUEST;
642                                 break;
643                         }
644                 }
645
646                 vcpu->stat.emulated_inst_exits++;
647                 er = kvmppc_emulate_instruction(run, vcpu);
648                 switch (er) {
649                 case EMULATE_DONE:
650                         r = RESUME_GUEST_NV;
651                         break;
652                 case EMULATE_FAIL:
653                         printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n",
654                                __func__, vcpu->arch.pc, vcpu->arch.last_inst);
655                         kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
656                         r = RESUME_GUEST;
657                         break;
658                 default:
659                         BUG();
660                 }
661                 break;
662         }
663         case BOOK3S_INTERRUPT_SYSCALL:
664 #ifdef EXIT_DEBUG
665                 printk(KERN_INFO "Syscall Nr %d\n", (int)kvmppc_get_gpr(vcpu, 0));
666 #endif
667                 vcpu->stat.syscall_exits++;
668                 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
669                 r = RESUME_GUEST;
670                 break;
671         case BOOK3S_INTERRUPT_MACHINE_CHECK:
672         case BOOK3S_INTERRUPT_FP_UNAVAIL:
673         case BOOK3S_INTERRUPT_TRACE:
674         case BOOK3S_INTERRUPT_ALTIVEC:
675         case BOOK3S_INTERRUPT_VSX:
676                 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
677                 r = RESUME_GUEST;
678                 break;
679         default:
680                 /* Ugh - bork here! What did we get? */
681                 printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | msr=0x%lx\n", exit_nr, vcpu->arch.pc, vcpu->arch.shadow_msr);
682                 r = RESUME_HOST;
683                 BUG();
684                 break;
685         }
686
687
688         if (!(r & RESUME_HOST)) {
689                 /* To avoid clobbering exit_reason, only check for signals if
690                  * we aren't already exiting to userspace for some other
691                  * reason. */
692                 if (signal_pending(current)) {
693 #ifdef EXIT_DEBUG
694                         printk(KERN_EMERG "KVM: Going back to host\n");
695 #endif
696                         vcpu->stat.signal_exits++;
697                         run->exit_reason = KVM_EXIT_INTR;
698                         r = -EINTR;
699                 } else {
700                         /* In case an interrupt came in that was triggered
701                          * from userspace (like DEC), we need to check what
702                          * to inject now! */
703                         kvmppc_core_deliver_interrupts(vcpu);
704                 }
705         }
706
707 #ifdef EXIT_DEBUG
708         printk(KERN_EMERG "KVM exit: vcpu=0x%p pc=0x%lx r=0x%x\n", vcpu, vcpu->arch.pc, r);
709 #endif
710
711         return r;
712 }
713
714 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
715 {
716         return 0;
717 }
718
719 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
720 {
721         int i;
722
723         regs->pc = vcpu->arch.pc;
724         regs->cr = kvmppc_get_cr(vcpu);
725         regs->ctr = vcpu->arch.ctr;
726         regs->lr = vcpu->arch.lr;
727         regs->xer = kvmppc_get_xer(vcpu);
728         regs->msr = vcpu->arch.msr;
729         regs->srr0 = vcpu->arch.srr0;
730         regs->srr1 = vcpu->arch.srr1;
731         regs->pid = vcpu->arch.pid;
732         regs->sprg0 = vcpu->arch.sprg0;
733         regs->sprg1 = vcpu->arch.sprg1;
734         regs->sprg2 = vcpu->arch.sprg2;
735         regs->sprg3 = vcpu->arch.sprg3;
736         regs->sprg5 = vcpu->arch.sprg4;
737         regs->sprg6 = vcpu->arch.sprg5;
738         regs->sprg7 = vcpu->arch.sprg6;
739
740         for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
741                 regs->gpr[i] = kvmppc_get_gpr(vcpu, i);
742
743         return 0;
744 }
745
746 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
747 {
748         int i;
749
750         vcpu->arch.pc = regs->pc;
751         kvmppc_set_cr(vcpu, regs->cr);
752         vcpu->arch.ctr = regs->ctr;
753         vcpu->arch.lr = regs->lr;
754         kvmppc_set_xer(vcpu, regs->xer);
755         kvmppc_set_msr(vcpu, regs->msr);
756         vcpu->arch.srr0 = regs->srr0;
757         vcpu->arch.srr1 = regs->srr1;
758         vcpu->arch.sprg0 = regs->sprg0;
759         vcpu->arch.sprg1 = regs->sprg1;
760         vcpu->arch.sprg2 = regs->sprg2;
761         vcpu->arch.sprg3 = regs->sprg3;
762         vcpu->arch.sprg5 = regs->sprg4;
763         vcpu->arch.sprg6 = regs->sprg5;
764         vcpu->arch.sprg7 = regs->sprg6;
765
766         for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
767                 kvmppc_set_gpr(vcpu, i, regs->gpr[i]);
768
769         return 0;
770 }
771
772 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
773                                   struct kvm_sregs *sregs)
774 {
775         struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
776         int i;
777
778         sregs->pvr = vcpu->arch.pvr;
779
780         sregs->u.s.sdr1 = to_book3s(vcpu)->sdr1;
781         if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) {
782                 for (i = 0; i < 64; i++) {
783                         sregs->u.s.ppc64.slb[i].slbe = vcpu3s->slb[i].orige | i;
784                         sregs->u.s.ppc64.slb[i].slbv = vcpu3s->slb[i].origv;
785                 }
786         } else {
787                 for (i = 0; i < 16; i++) {
788                         sregs->u.s.ppc32.sr[i] = vcpu3s->sr[i].raw;
789                         sregs->u.s.ppc32.sr[i] = vcpu3s->sr[i].raw;
790                 }
791                 for (i = 0; i < 8; i++) {
792                         sregs->u.s.ppc32.ibat[i] = vcpu3s->ibat[i].raw;
793                         sregs->u.s.ppc32.dbat[i] = vcpu3s->dbat[i].raw;
794                 }
795         }
796         return 0;
797 }
798
799 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
800                                   struct kvm_sregs *sregs)
801 {
802         struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
803         int i;
804
805         kvmppc_set_pvr(vcpu, sregs->pvr);
806
807         vcpu3s->sdr1 = sregs->u.s.sdr1;
808         if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) {
809                 for (i = 0; i < 64; i++) {
810                         vcpu->arch.mmu.slbmte(vcpu, sregs->u.s.ppc64.slb[i].slbv,
811                                                     sregs->u.s.ppc64.slb[i].slbe);
812                 }
813         } else {
814                 for (i = 0; i < 16; i++) {
815                         vcpu->arch.mmu.mtsrin(vcpu, i, sregs->u.s.ppc32.sr[i]);
816                 }
817                 for (i = 0; i < 8; i++) {
818                         kvmppc_set_bat(vcpu, &(vcpu3s->ibat[i]), false,
819                                        (u32)sregs->u.s.ppc32.ibat[i]);
820                         kvmppc_set_bat(vcpu, &(vcpu3s->ibat[i]), true,
821                                        (u32)(sregs->u.s.ppc32.ibat[i] >> 32));
822                         kvmppc_set_bat(vcpu, &(vcpu3s->dbat[i]), false,
823                                        (u32)sregs->u.s.ppc32.dbat[i]);
824                         kvmppc_set_bat(vcpu, &(vcpu3s->dbat[i]), true,
825                                        (u32)(sregs->u.s.ppc32.dbat[i] >> 32));
826                 }
827         }
828
829         /* Flush the MMU after messing with the segments */
830         kvmppc_mmu_pte_flush(vcpu, 0, 0);
831         return 0;
832 }
833
834 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
835 {
836         return -ENOTSUPP;
837 }
838
839 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
840 {
841         return -ENOTSUPP;
842 }
843
844 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
845                                   struct kvm_translation *tr)
846 {
847         return 0;
848 }
849
850 /*
851  * Get (and clear) the dirty memory log for a memory slot.
852  */
853 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
854                                       struct kvm_dirty_log *log)
855 {
856         struct kvm_memory_slot *memslot;
857         struct kvm_vcpu *vcpu;
858         ulong ga, ga_end;
859         int is_dirty = 0;
860         int r, n;
861
862         mutex_lock(&kvm->slots_lock);
863
864         r = kvm_get_dirty_log(kvm, log, &is_dirty);
865         if (r)
866                 goto out;
867
868         /* If nothing is dirty, don't bother messing with page tables. */
869         if (is_dirty) {
870                 memslot = &kvm->memslots->memslots[log->slot];
871
872                 ga = memslot->base_gfn << PAGE_SHIFT;
873                 ga_end = ga + (memslot->npages << PAGE_SHIFT);
874
875                 kvm_for_each_vcpu(n, vcpu, kvm)
876                         kvmppc_mmu_pte_pflush(vcpu, ga, ga_end);
877
878                 n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
879                 memset(memslot->dirty_bitmap, 0, n);
880         }
881
882         r = 0;
883 out:
884         mutex_unlock(&kvm->slots_lock);
885         return r;
886 }
887
888 int kvmppc_core_check_processor_compat(void)
889 {
890         return 0;
891 }
892
893 struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
894 {
895         struct kvmppc_vcpu_book3s *vcpu_book3s;
896         struct kvm_vcpu *vcpu;
897         int err;
898
899         vcpu_book3s = (struct kvmppc_vcpu_book3s *)__get_free_pages( GFP_KERNEL | __GFP_ZERO,
900                         get_order(sizeof(struct kvmppc_vcpu_book3s)));
901         if (!vcpu_book3s) {
902                 err = -ENOMEM;
903                 goto out;
904         }
905
906         vcpu = &vcpu_book3s->vcpu;
907         err = kvm_vcpu_init(vcpu, kvm, id);
908         if (err)
909                 goto free_vcpu;
910
911         vcpu->arch.host_retip = kvm_return_point;
912         vcpu->arch.host_msr = mfmsr();
913         /* default to book3s_64 (970fx) */
914         vcpu->arch.pvr = 0x3C0301;
915         kvmppc_set_pvr(vcpu, vcpu->arch.pvr);
916         vcpu_book3s->slb_nr = 64;
917
918         /* remember where some real-mode handlers are */
919         vcpu->arch.trampoline_lowmem = kvmppc_trampoline_lowmem;
920         vcpu->arch.trampoline_enter = kvmppc_trampoline_enter;
921         vcpu->arch.highmem_handler = (ulong)kvmppc_handler_highmem;
922         vcpu->arch.rmcall = *(ulong*)kvmppc_rmcall;
923
924         vcpu->arch.shadow_msr = MSR_USER64;
925
926         err = __init_new_context();
927         if (err < 0)
928                 goto free_vcpu;
929         vcpu_book3s->context_id = err;
930
931         vcpu_book3s->vsid_max = ((vcpu_book3s->context_id + 1) << USER_ESID_BITS) - 1;
932         vcpu_book3s->vsid_first = vcpu_book3s->context_id << USER_ESID_BITS;
933         vcpu_book3s->vsid_next = vcpu_book3s->vsid_first;
934
935         return vcpu;
936
937 free_vcpu:
938         free_pages((long)vcpu_book3s, get_order(sizeof(struct kvmppc_vcpu_book3s)));
939 out:
940         return ERR_PTR(err);
941 }
942
943 void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
944 {
945         struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
946
947         __destroy_context(vcpu_book3s->context_id);
948         kvm_vcpu_uninit(vcpu);
949         free_pages((long)vcpu_book3s, get_order(sizeof(struct kvmppc_vcpu_book3s)));
950 }
951
952 extern int __kvmppc_vcpu_entry(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu);
953 int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
954 {
955         int ret;
956
957         /* No need to go into the guest when all we do is going out */
958         if (signal_pending(current)) {
959                 kvm_run->exit_reason = KVM_EXIT_INTR;
960                 return -EINTR;
961         }
962
963         /* XXX we get called with irq disabled - change that! */
964         local_irq_enable();
965
966         ret = __kvmppc_vcpu_entry(kvm_run, vcpu);
967
968         local_irq_disable();
969
970         return ret;
971 }
972
973 static int kvmppc_book3s_init(void)
974 {
975         return kvm_init(NULL, sizeof(struct kvmppc_vcpu_book3s), THIS_MODULE);
976 }
977
978 static void kvmppc_book3s_exit(void)
979 {
980         kvm_exit();
981 }
982
983 module_init(kvmppc_book3s_init);
984 module_exit(kvmppc_book3s_exit);