uml: header untangling
[linux-2.6.git] / arch / um / kernel / tlb.c
1 /*
2  * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
3  * Licensed under the GPL
4  */
5
6 #include <linux/mm.h>
7 #include <linux/sched.h>
8 #include <asm/pgtable.h>
9 #include <asm/tlbflush.h>
10 #include "as-layout.h"
11 #include "mem_user.h"
12 #include "os.h"
13 #include "skas.h"
14 #include "tlb.h"
15
16 struct host_vm_change {
17         struct host_vm_op {
18                 enum { NONE, MMAP, MUNMAP, MPROTECT } type;
19                 union {
20                         struct {
21                                 unsigned long addr;
22                                 unsigned long len;
23                                 unsigned int prot;
24                                 int fd;
25                                 __u64 offset;
26                         } mmap;
27                         struct {
28                                 unsigned long addr;
29                                 unsigned long len;
30                         } munmap;
31                         struct {
32                                 unsigned long addr;
33                                 unsigned long len;
34                                 unsigned int prot;
35                         } mprotect;
36                 } u;
37         } ops[1];
38         int index;
39         struct mm_id *id;
40         void *data;
41         int force;
42 };
43
44 #define INIT_HVC(mm, force) \
45         ((struct host_vm_change) \
46          { .ops         = { { .type = NONE } }, \
47            .id          = &mm->context.id, \
48            .data        = NULL, \
49            .index       = 0, \
50            .force       = force })
51
52 static int do_ops(struct host_vm_change *hvc, int end,
53                   int finished)
54 {
55         struct host_vm_op *op;
56         int i, ret = 0;
57
58         for (i = 0; i < end && !ret; i++) {
59                 op = &hvc->ops[i];
60                 switch(op->type) {
61                 case MMAP:
62                         ret = map(hvc->id, op->u.mmap.addr, op->u.mmap.len,
63                                   op->u.mmap.prot, op->u.mmap.fd,
64                                   op->u.mmap.offset, finished, &hvc->data);
65                         break;
66                 case MUNMAP:
67                         ret = unmap(hvc->id, op->u.munmap.addr,
68                                     op->u.munmap.len, finished, &hvc->data);
69                         break;
70                 case MPROTECT:
71                         ret = protect(hvc->id, op->u.mprotect.addr,
72                                       op->u.mprotect.len, op->u.mprotect.prot,
73                                       finished, &hvc->data);
74                         break;
75                 default:
76                         printk(KERN_ERR "Unknown op type %d in do_ops\n",
77                                op->type);
78                         break;
79                 }
80         }
81
82         return ret;
83 }
84
85 static int add_mmap(unsigned long virt, unsigned long phys, unsigned long len,
86                     unsigned int prot, struct host_vm_change *hvc)
87 {
88         __u64 offset;
89         struct host_vm_op *last;
90         int fd, ret = 0;
91
92         fd = phys_mapping(phys, &offset);
93         if (hvc->index != 0) {
94                 last = &hvc->ops[hvc->index - 1];
95                 if ((last->type == MMAP) &&
96                    (last->u.mmap.addr + last->u.mmap.len == virt) &&
97                    (last->u.mmap.prot == prot) && (last->u.mmap.fd == fd) &&
98                    (last->u.mmap.offset + last->u.mmap.len == offset)) {
99                         last->u.mmap.len += len;
100                         return 0;
101                 }
102         }
103
104         if (hvc->index == ARRAY_SIZE(hvc->ops)) {
105                 ret = do_ops(hvc, ARRAY_SIZE(hvc->ops), 0);
106                 hvc->index = 0;
107         }
108
109         hvc->ops[hvc->index++] = ((struct host_vm_op)
110                                   { .type       = MMAP,
111                                     .u = { .mmap = { .addr      = virt,
112                                                      .len       = len,
113                                                      .prot      = prot,
114                                                      .fd        = fd,
115                                                      .offset    = offset }
116                            } });
117         return ret;
118 }
119
120 static int add_munmap(unsigned long addr, unsigned long len,
121                       struct host_vm_change *hvc)
122 {
123         struct host_vm_op *last;
124         int ret = 0;
125
126         if (hvc->index != 0) {
127                 last = &hvc->ops[hvc->index - 1];
128                 if ((last->type == MUNMAP) &&
129                    (last->u.munmap.addr + last->u.mmap.len == addr)) {
130                         last->u.munmap.len += len;
131                         return 0;
132                 }
133         }
134
135         if (hvc->index == ARRAY_SIZE(hvc->ops)) {
136                 ret = do_ops(hvc, ARRAY_SIZE(hvc->ops), 0);
137                 hvc->index = 0;
138         }
139
140         hvc->ops[hvc->index++] = ((struct host_vm_op)
141                                   { .type       = MUNMAP,
142                                     .u = { .munmap = { .addr    = addr,
143                                                        .len     = len } } });
144         return ret;
145 }
146
147 static int add_mprotect(unsigned long addr, unsigned long len,
148                         unsigned int prot, struct host_vm_change *hvc)
149 {
150         struct host_vm_op *last;
151         int ret = 0;
152
153         if (hvc->index != 0) {
154                 last = &hvc->ops[hvc->index - 1];
155                 if ((last->type == MPROTECT) &&
156                    (last->u.mprotect.addr + last->u.mprotect.len == addr) &&
157                    (last->u.mprotect.prot == prot)) {
158                         last->u.mprotect.len += len;
159                         return 0;
160                 }
161         }
162
163         if (hvc->index == ARRAY_SIZE(hvc->ops)) {
164                 ret = do_ops(hvc, ARRAY_SIZE(hvc->ops), 0);
165                 hvc->index = 0;
166         }
167
168         hvc->ops[hvc->index++] = ((struct host_vm_op)
169                                   { .type       = MPROTECT,
170                                     .u = { .mprotect = { .addr  = addr,
171                                                          .len   = len,
172                                                          .prot  = prot } } });
173         return ret;
174 }
175
176 #define ADD_ROUND(n, inc) (((n) + (inc)) & ~((inc) - 1))
177
178 static inline int update_pte_range(pmd_t *pmd, unsigned long addr,
179                                    unsigned long end,
180                                    struct host_vm_change *hvc)
181 {
182         pte_t *pte;
183         int r, w, x, prot, ret = 0;
184
185         pte = pte_offset_kernel(pmd, addr);
186         do {
187                 r = pte_read(*pte);
188                 w = pte_write(*pte);
189                 x = pte_exec(*pte);
190                 if (!pte_young(*pte)) {
191                         r = 0;
192                         w = 0;
193                 } else if (!pte_dirty(*pte)) {
194                         w = 0;
195                 }
196                 prot = ((r ? UM_PROT_READ : 0) | (w ? UM_PROT_WRITE : 0) |
197                         (x ? UM_PROT_EXEC : 0));
198                 if (hvc->force || pte_newpage(*pte)) {
199                         if (pte_present(*pte))
200                                 ret = add_mmap(addr, pte_val(*pte) & PAGE_MASK,
201                                                PAGE_SIZE, prot, hvc);
202                         else ret = add_munmap(addr, PAGE_SIZE, hvc);
203                 }
204                 else if (pte_newprot(*pte))
205                         ret = add_mprotect(addr, PAGE_SIZE, prot, hvc);
206                 *pte = pte_mkuptodate(*pte);
207         } while (pte++, addr += PAGE_SIZE, ((addr != end) && !ret));
208         return ret;
209 }
210
211 static inline int update_pmd_range(pud_t *pud, unsigned long addr,
212                                    unsigned long end,
213                                    struct host_vm_change *hvc)
214 {
215         pmd_t *pmd;
216         unsigned long next;
217         int ret = 0;
218
219         pmd = pmd_offset(pud, addr);
220         do {
221                 next = pmd_addr_end(addr, end);
222                 if (!pmd_present(*pmd)) {
223                         if (hvc->force || pmd_newpage(*pmd)) {
224                                 ret = add_munmap(addr, next - addr, hvc);
225                                 pmd_mkuptodate(*pmd);
226                         }
227                 }
228                 else ret = update_pte_range(pmd, addr, next, hvc);
229         } while (pmd++, addr = next, ((addr != end) && !ret));
230         return ret;
231 }
232
233 static inline int update_pud_range(pgd_t *pgd, unsigned long addr,
234                                    unsigned long end,
235                                    struct host_vm_change *hvc)
236 {
237         pud_t *pud;
238         unsigned long next;
239         int ret = 0;
240
241         pud = pud_offset(pgd, addr);
242         do {
243                 next = pud_addr_end(addr, end);
244                 if (!pud_present(*pud)) {
245                         if (hvc->force || pud_newpage(*pud)) {
246                                 ret = add_munmap(addr, next - addr, hvc);
247                                 pud_mkuptodate(*pud);
248                         }
249                 }
250                 else ret = update_pmd_range(pud, addr, next, hvc);
251         } while (pud++, addr = next, ((addr != end) && !ret));
252         return ret;
253 }
254
255 void fix_range_common(struct mm_struct *mm, unsigned long start_addr,
256                       unsigned long end_addr, int force)
257 {
258         pgd_t *pgd;
259         struct host_vm_change hvc;
260         unsigned long addr = start_addr, next;
261         int ret = 0;
262
263         hvc = INIT_HVC(mm, force);
264         pgd = pgd_offset(mm, addr);
265         do {
266                 next = pgd_addr_end(addr, end_addr);
267                 if (!pgd_present(*pgd)) {
268                         if (force || pgd_newpage(*pgd)) {
269                                 ret = add_munmap(addr, next - addr, &hvc);
270                                 pgd_mkuptodate(*pgd);
271                         }
272                 }
273                 else ret = update_pud_range(pgd, addr, next, &hvc);
274         } while (pgd++, addr = next, ((addr != end_addr) && !ret));
275
276         if (!ret)
277                 ret = do_ops(&hvc, hvc.index, 1);
278
279         /* This is not an else because ret is modified above */
280         if (ret) {
281                 printk(KERN_ERR "fix_range_common: failed, killing current "
282                        "process\n");
283                 force_sig(SIGKILL, current);
284         }
285 }
286
287 int flush_tlb_kernel_range_common(unsigned long start, unsigned long end)
288 {
289         struct mm_struct *mm;
290         pgd_t *pgd;
291         pud_t *pud;
292         pmd_t *pmd;
293         pte_t *pte;
294         unsigned long addr, last;
295         int updated = 0, err;
296
297         mm = &init_mm;
298         for (addr = start; addr < end;) {
299                 pgd = pgd_offset(mm, addr);
300                 if (!pgd_present(*pgd)) {
301                         last = ADD_ROUND(addr, PGDIR_SIZE);
302                         if (last > end)
303                                 last = end;
304                         if (pgd_newpage(*pgd)) {
305                                 updated = 1;
306                                 err = os_unmap_memory((void *) addr,
307                                                       last - addr);
308                                 if (err < 0)
309                                         panic("munmap failed, errno = %d\n",
310                                               -err);
311                         }
312                         addr = last;
313                         continue;
314                 }
315
316                 pud = pud_offset(pgd, addr);
317                 if (!pud_present(*pud)) {
318                         last = ADD_ROUND(addr, PUD_SIZE);
319                         if (last > end)
320                                 last = end;
321                         if (pud_newpage(*pud)) {
322                                 updated = 1;
323                                 err = os_unmap_memory((void *) addr,
324                                                       last - addr);
325                                 if (err < 0)
326                                         panic("munmap failed, errno = %d\n",
327                                               -err);
328                         }
329                         addr = last;
330                         continue;
331                 }
332
333                 pmd = pmd_offset(pud, addr);
334                 if (!pmd_present(*pmd)) {
335                         last = ADD_ROUND(addr, PMD_SIZE);
336                         if (last > end)
337                                 last = end;
338                         if (pmd_newpage(*pmd)) {
339                                 updated = 1;
340                                 err = os_unmap_memory((void *) addr,
341                                                       last - addr);
342                                 if (err < 0)
343                                         panic("munmap failed, errno = %d\n",
344                                               -err);
345                         }
346                         addr = last;
347                         continue;
348                 }
349
350                 pte = pte_offset_kernel(pmd, addr);
351                 if (!pte_present(*pte) || pte_newpage(*pte)) {
352                         updated = 1;
353                         err = os_unmap_memory((void *) addr,
354                                               PAGE_SIZE);
355                         if (err < 0)
356                                 panic("munmap failed, errno = %d\n",
357                                       -err);
358                         if (pte_present(*pte))
359                                 map_memory(addr,
360                                            pte_val(*pte) & PAGE_MASK,
361                                            PAGE_SIZE, 1, 1, 1);
362                 }
363                 else if (pte_newprot(*pte)) {
364                         updated = 1;
365                         os_protect_memory((void *) addr, PAGE_SIZE, 1, 1, 1);
366                 }
367                 addr += PAGE_SIZE;
368         }
369         return updated;
370 }
371
372 void flush_tlb_page(struct vm_area_struct *vma, unsigned long address)
373 {
374         pgd_t *pgd;
375         pud_t *pud;
376         pmd_t *pmd;
377         pte_t *pte;
378         struct mm_struct *mm = vma->vm_mm;
379         void *flush = NULL;
380         int r, w, x, prot, err = 0;
381         struct mm_id *mm_id;
382
383         address &= PAGE_MASK;
384         pgd = pgd_offset(mm, address);
385         if (!pgd_present(*pgd))
386                 goto kill;
387
388         pud = pud_offset(pgd, address);
389         if (!pud_present(*pud))
390                 goto kill;
391
392         pmd = pmd_offset(pud, address);
393         if (!pmd_present(*pmd))
394                 goto kill;
395
396         pte = pte_offset_kernel(pmd, address);
397
398         r = pte_read(*pte);
399         w = pte_write(*pte);
400         x = pte_exec(*pte);
401         if (!pte_young(*pte)) {
402                 r = 0;
403                 w = 0;
404         } else if (!pte_dirty(*pte)) {
405                 w = 0;
406         }
407
408         mm_id = &mm->context.id;
409         prot = ((r ? UM_PROT_READ : 0) | (w ? UM_PROT_WRITE : 0) |
410                 (x ? UM_PROT_EXEC : 0));
411         if (pte_newpage(*pte)) {
412                 if (pte_present(*pte)) {
413                         unsigned long long offset;
414                         int fd;
415
416                         fd = phys_mapping(pte_val(*pte) & PAGE_MASK, &offset);
417                         err = map(mm_id, address, PAGE_SIZE, prot, fd, offset,
418                                   1, &flush);
419                 }
420                 else err = unmap(mm_id, address, PAGE_SIZE, 1, &flush);
421         }
422         else if (pte_newprot(*pte))
423                 err = protect(mm_id, address, PAGE_SIZE, prot, 1, &flush);
424
425         if (err)
426                 goto kill;
427
428         *pte = pte_mkuptodate(*pte);
429
430         return;
431
432 kill:
433         printk(KERN_ERR "Failed to flush page for address 0x%lx\n", address);
434         force_sig(SIGKILL, current);
435 }
436
437 pgd_t *pgd_offset_proc(struct mm_struct *mm, unsigned long address)
438 {
439         return pgd_offset(mm, address);
440 }
441
442 pud_t *pud_offset_proc(pgd_t *pgd, unsigned long address)
443 {
444         return pud_offset(pgd, address);
445 }
446
447 pmd_t *pmd_offset_proc(pud_t *pud, unsigned long address)
448 {
449         return pmd_offset(pud, address);
450 }
451
452 pte_t *pte_offset_proc(pmd_t *pmd, unsigned long address)
453 {
454         return pte_offset_kernel(pmd, address);
455 }
456
457 pte_t *addr_pte(struct task_struct *task, unsigned long addr)
458 {
459         pgd_t *pgd = pgd_offset(task->mm, addr);
460         pud_t *pud = pud_offset(pgd, addr);
461         pmd_t *pmd = pmd_offset(pud, addr);
462
463         return pte_offset_map(pmd, addr);
464 }
465
466 void flush_tlb_all(void)
467 {
468         flush_tlb_mm(current->mm);
469 }
470
471 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
472 {
473         flush_tlb_kernel_range_common(start, end);
474 }
475
476 void flush_tlb_kernel_vm(void)
477 {
478         flush_tlb_kernel_range_common(start_vm, end_vm);
479 }
480
481 void __flush_tlb_one(unsigned long addr)
482 {
483         flush_tlb_kernel_range_common(addr, addr + PAGE_SIZE);
484 }
485
486 static void fix_range(struct mm_struct *mm, unsigned long start_addr,
487                       unsigned long end_addr, int force)
488 {
489         if (!proc_mm && (end_addr > STUB_START))
490                 end_addr = STUB_START;
491
492         fix_range_common(mm, start_addr, end_addr, force);
493 }
494
495 void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
496                      unsigned long end)
497 {
498         if (vma->vm_mm == NULL)
499                 flush_tlb_kernel_range_common(start, end);
500         else fix_range(vma->vm_mm, start, end, 0);
501 }
502
503 void flush_tlb_mm(struct mm_struct *mm)
504 {
505         unsigned long end;
506
507         /*
508          * Don't bother flushing if this address space is about to be
509          * destroyed.
510          */
511         if (atomic_read(&mm->mm_users) == 0)
512                 return;
513
514         end = proc_mm ? task_size : STUB_START;
515         fix_range(mm, 0, end, 0);
516 }
517
518 void force_flush_all(void)
519 {
520         struct mm_struct *mm = current->mm;
521         struct vm_area_struct *vma = mm->mmap;
522
523         while (vma != NULL) {
524                 fix_range(mm, vma->vm_start, vma->vm_end, 1);
525                 vma = vma->vm_next;
526         }
527 }