KVM: PPC: Move fields between struct kvm_vcpu_arch and kvmppc_vcpu_book3s
[linux-2.6.git] / arch / powerpc / kvm / book3s_64_mmu.c
1 /*
2  * This program is free software; you can redistribute it and/or modify
3  * it under the terms of the GNU General Public License, version 2, as
4  * published by the Free Software Foundation.
5  *
6  * This program is distributed in the hope that it will be useful,
7  * but WITHOUT ANY WARRANTY; without even the implied warranty of
8  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
9  * GNU General Public License for more details.
10  *
11  * You should have received a copy of the GNU General Public License
12  * along with this program; if not, write to the Free Software
13  * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
14  *
15  * Copyright SUSE Linux Products GmbH 2009
16  *
17  * Authors: Alexander Graf <agraf@suse.de>
18  */
19
20 #include <linux/types.h>
21 #include <linux/string.h>
22 #include <linux/kvm.h>
23 #include <linux/kvm_host.h>
24 #include <linux/highmem.h>
25
26 #include <asm/tlbflush.h>
27 #include <asm/kvm_ppc.h>
28 #include <asm/kvm_book3s.h>
29
30 /* #define DEBUG_MMU */
31
32 #ifdef DEBUG_MMU
33 #define dprintk(X...) printk(KERN_INFO X)
34 #else
35 #define dprintk(X...) do { } while(0)
36 #endif
37
38 static void kvmppc_mmu_book3s_64_reset_msr(struct kvm_vcpu *vcpu)
39 {
40         kvmppc_set_msr(vcpu, MSR_SF);
41 }
42
43 static struct kvmppc_slb *kvmppc_mmu_book3s_64_find_slbe(
44                                 struct kvm_vcpu *vcpu,
45                                 gva_t eaddr)
46 {
47         int i;
48         u64 esid = GET_ESID(eaddr);
49         u64 esid_1t = GET_ESID_1T(eaddr);
50
51         for (i = 0; i < vcpu->arch.slb_nr; i++) {
52                 u64 cmp_esid = esid;
53
54                 if (!vcpu->arch.slb[i].valid)
55                         continue;
56
57                 if (vcpu->arch.slb[i].tb)
58                         cmp_esid = esid_1t;
59
60                 if (vcpu->arch.slb[i].esid == cmp_esid)
61                         return &vcpu->arch.slb[i];
62         }
63
64         dprintk("KVM: No SLB entry found for 0x%lx [%llx | %llx]\n",
65                 eaddr, esid, esid_1t);
66         for (i = 0; i < vcpu->arch.slb_nr; i++) {
67             if (vcpu->arch.slb[i].vsid)
68                 dprintk("  %d: %c%c%c %llx %llx\n", i,
69                         vcpu->arch.slb[i].valid ? 'v' : ' ',
70                         vcpu->arch.slb[i].large ? 'l' : ' ',
71                         vcpu->arch.slb[i].tb    ? 't' : ' ',
72                         vcpu->arch.slb[i].esid,
73                         vcpu->arch.slb[i].vsid);
74         }
75
76         return NULL;
77 }
78
79 static u64 kvmppc_mmu_book3s_64_ea_to_vp(struct kvm_vcpu *vcpu, gva_t eaddr,
80                                          bool data)
81 {
82         struct kvmppc_slb *slb;
83
84         slb = kvmppc_mmu_book3s_64_find_slbe(vcpu, eaddr);
85         if (!slb)
86                 return 0;
87
88         if (slb->tb)
89                 return (((u64)eaddr >> 12) & 0xfffffff) |
90                        (((u64)slb->vsid) << 28);
91
92         return (((u64)eaddr >> 12) & 0xffff) | (((u64)slb->vsid) << 16);
93 }
94
95 static int kvmppc_mmu_book3s_64_get_pagesize(struct kvmppc_slb *slbe)
96 {
97         return slbe->large ? 24 : 12;
98 }
99
100 static u32 kvmppc_mmu_book3s_64_get_page(struct kvmppc_slb *slbe, gva_t eaddr)
101 {
102         int p = kvmppc_mmu_book3s_64_get_pagesize(slbe);
103         return ((eaddr & 0xfffffff) >> p);
104 }
105
106 static hva_t kvmppc_mmu_book3s_64_get_pteg(
107                                 struct kvmppc_vcpu_book3s *vcpu_book3s,
108                                 struct kvmppc_slb *slbe, gva_t eaddr,
109                                 bool second)
110 {
111         u64 hash, pteg, htabsize;
112         u32 page;
113         hva_t r;
114
115         page = kvmppc_mmu_book3s_64_get_page(slbe, eaddr);
116         htabsize = ((1 << ((vcpu_book3s->sdr1 & 0x1f) + 11)) - 1);
117
118         hash = slbe->vsid ^ page;
119         if (second)
120                 hash = ~hash;
121         hash &= ((1ULL << 39ULL) - 1ULL);
122         hash &= htabsize;
123         hash <<= 7ULL;
124
125         pteg = vcpu_book3s->sdr1 & 0xfffffffffffc0000ULL;
126         pteg |= hash;
127
128         dprintk("MMU: page=0x%x sdr1=0x%llx pteg=0x%llx vsid=0x%llx\n",
129                 page, vcpu_book3s->sdr1, pteg, slbe->vsid);
130
131         r = gfn_to_hva(vcpu_book3s->vcpu.kvm, pteg >> PAGE_SHIFT);
132         if (kvm_is_error_hva(r))
133                 return r;
134         return r | (pteg & ~PAGE_MASK);
135 }
136
137 static u64 kvmppc_mmu_book3s_64_get_avpn(struct kvmppc_slb *slbe, gva_t eaddr)
138 {
139         int p = kvmppc_mmu_book3s_64_get_pagesize(slbe);
140         u64 avpn;
141
142         avpn = kvmppc_mmu_book3s_64_get_page(slbe, eaddr);
143         avpn |= slbe->vsid << (28 - p);
144
145         if (p < 24)
146                 avpn >>= ((80 - p) - 56) - 8;
147         else
148                 avpn <<= 8;
149
150         return avpn;
151 }
152
153 static int kvmppc_mmu_book3s_64_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
154                                 struct kvmppc_pte *gpte, bool data)
155 {
156         struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
157         struct kvmppc_slb *slbe;
158         hva_t ptegp;
159         u64 pteg[16];
160         u64 avpn = 0;
161         int i;
162         u8 key = 0;
163         bool found = false;
164         bool perm_err = false;
165         int second = 0;
166         ulong mp_ea = vcpu->arch.magic_page_ea;
167
168         /* Magic page override */
169         if (unlikely(mp_ea) &&
170             unlikely((eaddr & ~0xfffULL) == (mp_ea & ~0xfffULL)) &&
171             !(vcpu->arch.shared->msr & MSR_PR)) {
172                 gpte->eaddr = eaddr;
173                 gpte->vpage = kvmppc_mmu_book3s_64_ea_to_vp(vcpu, eaddr, data);
174                 gpte->raddr = vcpu->arch.magic_page_pa | (gpte->raddr & 0xfff);
175                 gpte->raddr &= KVM_PAM;
176                 gpte->may_execute = true;
177                 gpte->may_read = true;
178                 gpte->may_write = true;
179
180                 return 0;
181         }
182
183         slbe = kvmppc_mmu_book3s_64_find_slbe(vcpu, eaddr);
184         if (!slbe)
185                 goto no_seg_found;
186
187 do_second:
188         ptegp = kvmppc_mmu_book3s_64_get_pteg(vcpu_book3s, slbe, eaddr, second);
189         if (kvm_is_error_hva(ptegp))
190                 goto no_page_found;
191
192         avpn = kvmppc_mmu_book3s_64_get_avpn(slbe, eaddr);
193
194         if(copy_from_user(pteg, (void __user *)ptegp, sizeof(pteg))) {
195                 printk(KERN_ERR "KVM can't copy data from 0x%lx!\n", ptegp);
196                 goto no_page_found;
197         }
198
199         if ((vcpu->arch.shared->msr & MSR_PR) && slbe->Kp)
200                 key = 4;
201         else if (!(vcpu->arch.shared->msr & MSR_PR) && slbe->Ks)
202                 key = 4;
203
204         for (i=0; i<16; i+=2) {
205                 u64 v = pteg[i];
206                 u64 r = pteg[i+1];
207
208                 /* Valid check */
209                 if (!(v & HPTE_V_VALID))
210                         continue;
211                 /* Hash check */
212                 if ((v & HPTE_V_SECONDARY) != second)
213                         continue;
214
215                 /* AVPN compare */
216                 if (HPTE_V_AVPN_VAL(avpn) == HPTE_V_AVPN_VAL(v)) {
217                         u8 pp = (r & HPTE_R_PP) | key;
218                         int eaddr_mask = 0xFFF;
219
220                         gpte->eaddr = eaddr;
221                         gpte->vpage = kvmppc_mmu_book3s_64_ea_to_vp(vcpu,
222                                                                     eaddr,
223                                                                     data);
224                         if (slbe->large)
225                                 eaddr_mask = 0xFFFFFF;
226                         gpte->raddr = (r & HPTE_R_RPN) | (eaddr & eaddr_mask);
227                         gpte->may_execute = ((r & HPTE_R_N) ? false : true);
228                         gpte->may_read = false;
229                         gpte->may_write = false;
230
231                         switch (pp) {
232                         case 0:
233                         case 1:
234                         case 2:
235                         case 6:
236                                 gpte->may_write = true;
237                                 /* fall through */
238                         case 3:
239                         case 5:
240                         case 7:
241                                 gpte->may_read = true;
242                                 break;
243                         }
244
245                         if (!gpte->may_read) {
246                                 perm_err = true;
247                                 continue;
248                         }
249
250                         dprintk("KVM MMU: Translated 0x%lx [0x%llx] -> 0x%llx "
251                                 "-> 0x%lx\n",
252                                 eaddr, avpn, gpte->vpage, gpte->raddr);
253                         found = true;
254                         break;
255                 }
256         }
257
258         /* Update PTE R and C bits, so the guest's swapper knows we used the
259          * page */
260         if (found) {
261                 u32 oldr = pteg[i+1];
262
263                 if (gpte->may_read) {
264                         /* Set the accessed flag */
265                         pteg[i+1] |= HPTE_R_R;
266                 }
267                 if (gpte->may_write) {
268                         /* Set the dirty flag */
269                         pteg[i+1] |= HPTE_R_C;
270                 } else {
271                         dprintk("KVM: Mapping read-only page!\n");
272                 }
273
274                 /* Write back into the PTEG */
275                 if (pteg[i+1] != oldr)
276                         copy_to_user((void __user *)ptegp, pteg, sizeof(pteg));
277
278                 return 0;
279         } else {
280                 dprintk("KVM MMU: No PTE found (ea=0x%lx sdr1=0x%llx "
281                         "ptegp=0x%lx)\n",
282                         eaddr, to_book3s(vcpu)->sdr1, ptegp);
283                 for (i = 0; i < 16; i += 2)
284                         dprintk("   %02d: 0x%llx - 0x%llx (0x%llx)\n",
285                                 i, pteg[i], pteg[i+1], avpn);
286
287                 if (!second) {
288                         second = HPTE_V_SECONDARY;
289                         goto do_second;
290                 }
291         }
292
293
294 no_page_found:
295
296
297         if (perm_err)
298                 return -EPERM;
299
300         return -ENOENT;
301
302 no_seg_found:
303
304         dprintk("KVM MMU: Trigger segment fault\n");
305         return -EINVAL;
306 }
307
308 static void kvmppc_mmu_book3s_64_slbmte(struct kvm_vcpu *vcpu, u64 rs, u64 rb)
309 {
310         struct kvmppc_vcpu_book3s *vcpu_book3s;
311         u64 esid, esid_1t;
312         int slb_nr;
313         struct kvmppc_slb *slbe;
314
315         dprintk("KVM MMU: slbmte(0x%llx, 0x%llx)\n", rs, rb);
316
317         vcpu_book3s = to_book3s(vcpu);
318
319         esid = GET_ESID(rb);
320         esid_1t = GET_ESID_1T(rb);
321         slb_nr = rb & 0xfff;
322
323         if (slb_nr > vcpu->arch.slb_nr)
324                 return;
325
326         slbe = &vcpu->arch.slb[slb_nr];
327
328         slbe->large = (rs & SLB_VSID_L) ? 1 : 0;
329         slbe->tb    = (rs & SLB_VSID_B_1T) ? 1 : 0;
330         slbe->esid  = slbe->tb ? esid_1t : esid;
331         slbe->vsid  = rs >> 12;
332         slbe->valid = (rb & SLB_ESID_V) ? 1 : 0;
333         slbe->Ks    = (rs & SLB_VSID_KS) ? 1 : 0;
334         slbe->Kp    = (rs & SLB_VSID_KP) ? 1 : 0;
335         slbe->nx    = (rs & SLB_VSID_N) ? 1 : 0;
336         slbe->class = (rs & SLB_VSID_C) ? 1 : 0;
337
338         slbe->orige = rb & (ESID_MASK | SLB_ESID_V);
339         slbe->origv = rs;
340
341         /* Map the new segment */
342         kvmppc_mmu_map_segment(vcpu, esid << SID_SHIFT);
343 }
344
345 static u64 kvmppc_mmu_book3s_64_slbmfee(struct kvm_vcpu *vcpu, u64 slb_nr)
346 {
347         struct kvmppc_slb *slbe;
348
349         if (slb_nr > vcpu->arch.slb_nr)
350                 return 0;
351
352         slbe = &vcpu->arch.slb[slb_nr];
353
354         return slbe->orige;
355 }
356
357 static u64 kvmppc_mmu_book3s_64_slbmfev(struct kvm_vcpu *vcpu, u64 slb_nr)
358 {
359         struct kvmppc_slb *slbe;
360
361         if (slb_nr > vcpu->arch.slb_nr)
362                 return 0;
363
364         slbe = &vcpu->arch.slb[slb_nr];
365
366         return slbe->origv;
367 }
368
369 static void kvmppc_mmu_book3s_64_slbie(struct kvm_vcpu *vcpu, u64 ea)
370 {
371         struct kvmppc_slb *slbe;
372
373         dprintk("KVM MMU: slbie(0x%llx)\n", ea);
374
375         slbe = kvmppc_mmu_book3s_64_find_slbe(vcpu, ea);
376
377         if (!slbe)
378                 return;
379
380         dprintk("KVM MMU: slbie(0x%llx, 0x%llx)\n", ea, slbe->esid);
381
382         slbe->valid = false;
383
384         kvmppc_mmu_map_segment(vcpu, ea);
385 }
386
387 static void kvmppc_mmu_book3s_64_slbia(struct kvm_vcpu *vcpu)
388 {
389         int i;
390
391         dprintk("KVM MMU: slbia()\n");
392
393         for (i = 1; i < vcpu->arch.slb_nr; i++)
394                 vcpu->arch.slb[i].valid = false;
395
396         if (vcpu->arch.shared->msr & MSR_IR) {
397                 kvmppc_mmu_flush_segments(vcpu);
398                 kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu));
399         }
400 }
401
402 static void kvmppc_mmu_book3s_64_mtsrin(struct kvm_vcpu *vcpu, u32 srnum,
403                                         ulong value)
404 {
405         u64 rb = 0, rs = 0;
406
407         /*
408          * According to Book3 2.01 mtsrin is implemented as:
409          *
410          * The SLB entry specified by (RB)32:35 is loaded from register
411          * RS, as follows.
412          *
413          * SLBE Bit     Source                  SLB Field
414          *
415          * 0:31         0x0000_0000             ESID-0:31
416          * 32:35        (RB)32:35               ESID-32:35
417          * 36           0b1                     V
418          * 37:61        0x00_0000|| 0b0         VSID-0:24
419          * 62:88        (RS)37:63               VSID-25:51
420          * 89:91        (RS)33:35               Ks Kp N
421          * 92           (RS)36                  L ((RS)36 must be 0b0)
422          * 93           0b0                     C
423          */
424
425         dprintk("KVM MMU: mtsrin(0x%x, 0x%lx)\n", srnum, value);
426
427         /* ESID = srnum */
428         rb |= (srnum & 0xf) << 28;
429         /* Set the valid bit */
430         rb |= 1 << 27;
431         /* Index = ESID */
432         rb |= srnum;
433
434         /* VSID = VSID */
435         rs |= (value & 0xfffffff) << 12;
436         /* flags = flags */
437         rs |= ((value >> 28) & 0x7) << 9;
438
439         kvmppc_mmu_book3s_64_slbmte(vcpu, rs, rb);
440 }
441
442 static void kvmppc_mmu_book3s_64_tlbie(struct kvm_vcpu *vcpu, ulong va,
443                                        bool large)
444 {
445         u64 mask = 0xFFFFFFFFFULL;
446
447         dprintk("KVM MMU: tlbie(0x%lx)\n", va);
448
449         if (large)
450                 mask = 0xFFFFFF000ULL;
451         kvmppc_mmu_pte_vflush(vcpu, va >> 12, mask);
452 }
453
454 static int kvmppc_mmu_book3s_64_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid,
455                                              u64 *vsid)
456 {
457         ulong ea = esid << SID_SHIFT;
458         struct kvmppc_slb *slb;
459         u64 gvsid = esid;
460         ulong mp_ea = vcpu->arch.magic_page_ea;
461
462         if (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) {
463                 slb = kvmppc_mmu_book3s_64_find_slbe(vcpu, ea);
464                 if (slb)
465                         gvsid = slb->vsid;
466         }
467
468         switch (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) {
469         case 0:
470                 *vsid = VSID_REAL | esid;
471                 break;
472         case MSR_IR:
473                 *vsid = VSID_REAL_IR | gvsid;
474                 break;
475         case MSR_DR:
476                 *vsid = VSID_REAL_DR | gvsid;
477                 break;
478         case MSR_DR|MSR_IR:
479                 if (!slb)
480                         goto no_slb;
481
482                 *vsid = gvsid;
483                 break;
484         default:
485                 BUG();
486                 break;
487         }
488
489         if (vcpu->arch.shared->msr & MSR_PR)
490                 *vsid |= VSID_PR;
491
492         return 0;
493
494 no_slb:
495         /* Catch magic page case */
496         if (unlikely(mp_ea) &&
497             unlikely(esid == (mp_ea >> SID_SHIFT)) &&
498             !(vcpu->arch.shared->msr & MSR_PR)) {
499                 *vsid = VSID_REAL | esid;
500                 return 0;
501         }
502
503         return -EINVAL;
504 }
505
506 static bool kvmppc_mmu_book3s_64_is_dcbz32(struct kvm_vcpu *vcpu)
507 {
508         return (to_book3s(vcpu)->hid[5] & 0x80);
509 }
510
511 void kvmppc_mmu_book3s_64_init(struct kvm_vcpu *vcpu)
512 {
513         struct kvmppc_mmu *mmu = &vcpu->arch.mmu;
514
515         mmu->mfsrin = NULL;
516         mmu->mtsrin = kvmppc_mmu_book3s_64_mtsrin;
517         mmu->slbmte = kvmppc_mmu_book3s_64_slbmte;
518         mmu->slbmfee = kvmppc_mmu_book3s_64_slbmfee;
519         mmu->slbmfev = kvmppc_mmu_book3s_64_slbmfev;
520         mmu->slbie = kvmppc_mmu_book3s_64_slbie;
521         mmu->slbia = kvmppc_mmu_book3s_64_slbia;
522         mmu->xlate = kvmppc_mmu_book3s_64_xlate;
523         mmu->reset_msr = kvmppc_mmu_book3s_64_reset_msr;
524         mmu->tlbie = kvmppc_mmu_book3s_64_tlbie;
525         mmu->esid_to_vsid = kvmppc_mmu_book3s_64_esid_to_vsid;
526         mmu->ea_to_vp = kvmppc_mmu_book3s_64_ea_to_vp;
527         mmu->is_dcbz32 = kvmppc_mmu_book3s_64_is_dcbz32;
528
529         vcpu->arch.hflags |= BOOK3S_HFLAG_SLB;
530 }