blob: 849922fcfb60058e360fc8c44dd1b365f897cd21 [file] [log] [blame]
Jeff Dike5e1f65a2006-09-25 23:33:01 -07001/*
Jeff Dikeba180fd2007-10-16 01:27:00 -07002 * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003 * Licensed under the GPL
4 */
5
6#include "linux/mm.h"
Jeff Dike7f0536f2007-05-06 14:51:30 -07007#include "asm/pgtable.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -07008#include "asm/tlbflush.h"
Jeff Dike4ff83ce2007-05-06 14:51:08 -07009#include "as-layout.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070010#include "mem_user.h"
11#include "os.h"
Jeff Dike77bf4402007-10-16 01:26:58 -070012#include "skas.h"
Jeff Dikeba180fd2007-10-16 01:27:00 -070013#include "tlb.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070014
Jeff Dikec5600492005-09-03 15:57:36 -070015static int add_mmap(unsigned long virt, unsigned long phys, unsigned long len,
Jeff Dike16dd07b2007-05-06 14:51:48 -070016 unsigned int prot, struct host_vm_op *ops, int *index,
Jeff Dikec5600492005-09-03 15:57:36 -070017 int last_filled, union mm_context *mmu, void **flush,
Bodo Stroesser07bf7312005-09-03 15:57:50 -070018 int (*do_ops)(union mm_context *, struct host_vm_op *,
19 int, int, void **))
Jeff Dikec5600492005-09-03 15:57:36 -070020{
Jeff Dike5e1f65a2006-09-25 23:33:01 -070021 __u64 offset;
Jeff Dikec5600492005-09-03 15:57:36 -070022 struct host_vm_op *last;
Bodo Stroesser07bf7312005-09-03 15:57:50 -070023 int fd, ret = 0;
Jeff Dikec5600492005-09-03 15:57:36 -070024
25 fd = phys_mapping(phys, &offset);
Jeff Dikeba180fd2007-10-16 01:27:00 -070026 if (*index != -1) {
Bodo Stroesser07bf7312005-09-03 15:57:50 -070027 last = &ops[*index];
Jeff Dikeba180fd2007-10-16 01:27:00 -070028 if ((last->type == MMAP) &&
Jeff Dikec5600492005-09-03 15:57:36 -070029 (last->u.mmap.addr + last->u.mmap.len == virt) &&
Jeff Dike16dd07b2007-05-06 14:51:48 -070030 (last->u.mmap.prot == prot) && (last->u.mmap.fd == fd) &&
Jeff Dikeba180fd2007-10-16 01:27:00 -070031 (last->u.mmap.offset + last->u.mmap.len == offset)) {
Jeff Dikec5600492005-09-03 15:57:36 -070032 last->u.mmap.len += len;
Bodo Stroesser07bf7312005-09-03 15:57:50 -070033 return 0;
Jeff Dikec5600492005-09-03 15:57:36 -070034 }
35 }
36
Jeff Dikeba180fd2007-10-16 01:27:00 -070037 if (*index == last_filled) {
Bodo Stroesser07bf7312005-09-03 15:57:50 -070038 ret = (*do_ops)(mmu, ops, last_filled, 0, flush);
39 *index = -1;
Jeff Dikec5600492005-09-03 15:57:36 -070040 }
41
Bodo Stroesser07bf7312005-09-03 15:57:50 -070042 ops[++*index] = ((struct host_vm_op) { .type = MMAP,
43 .u = { .mmap = {
44 .addr = virt,
45 .len = len,
Jeff Dike16dd07b2007-05-06 14:51:48 -070046 .prot = prot,
Bodo Stroesser07bf7312005-09-03 15:57:50 -070047 .fd = fd,
48 .offset = offset }
49 } });
50 return ret;
Jeff Dikec5600492005-09-03 15:57:36 -070051}
52
53static int add_munmap(unsigned long addr, unsigned long len,
Bodo Stroesser07bf7312005-09-03 15:57:50 -070054 struct host_vm_op *ops, int *index, int last_filled,
Jeff Dikec5600492005-09-03 15:57:36 -070055 union mm_context *mmu, void **flush,
Bodo Stroesser07bf7312005-09-03 15:57:50 -070056 int (*do_ops)(union mm_context *, struct host_vm_op *,
57 int, int, void **))
Jeff Dikec5600492005-09-03 15:57:36 -070058{
59 struct host_vm_op *last;
Bodo Stroesser07bf7312005-09-03 15:57:50 -070060 int ret = 0;
Jeff Dikec5600492005-09-03 15:57:36 -070061
Jeff Dikeba180fd2007-10-16 01:27:00 -070062 if (*index != -1) {
Bodo Stroesser07bf7312005-09-03 15:57:50 -070063 last = &ops[*index];
Jeff Dikeba180fd2007-10-16 01:27:00 -070064 if ((last->type == MUNMAP) &&
65 (last->u.munmap.addr + last->u.mmap.len == addr)) {
Jeff Dikec5600492005-09-03 15:57:36 -070066 last->u.munmap.len += len;
Bodo Stroesser07bf7312005-09-03 15:57:50 -070067 return 0;
Jeff Dikec5600492005-09-03 15:57:36 -070068 }
69 }
70
Jeff Dikeba180fd2007-10-16 01:27:00 -070071 if (*index == last_filled) {
Bodo Stroesser07bf7312005-09-03 15:57:50 -070072 ret = (*do_ops)(mmu, ops, last_filled, 0, flush);
73 *index = -1;
Jeff Dikec5600492005-09-03 15:57:36 -070074 }
75
Bodo Stroesser07bf7312005-09-03 15:57:50 -070076 ops[++*index] = ((struct host_vm_op) { .type = MUNMAP,
77 .u = { .munmap = {
78 .addr = addr,
79 .len = len } } });
80 return ret;
Jeff Dikec5600492005-09-03 15:57:36 -070081}
82
Jeff Dike16dd07b2007-05-06 14:51:48 -070083static int add_mprotect(unsigned long addr, unsigned long len,
84 unsigned int prot, struct host_vm_op *ops, int *index,
Jeff Dikec5600492005-09-03 15:57:36 -070085 int last_filled, union mm_context *mmu, void **flush,
Jeff Dike5e1f65a2006-09-25 23:33:01 -070086 int (*do_ops)(union mm_context *, struct host_vm_op *,
Bodo Stroesser07bf7312005-09-03 15:57:50 -070087 int, int, void **))
Jeff Dikec5600492005-09-03 15:57:36 -070088{
89 struct host_vm_op *last;
Bodo Stroesser07bf7312005-09-03 15:57:50 -070090 int ret = 0;
Jeff Dikec5600492005-09-03 15:57:36 -070091
Jeff Dikeba180fd2007-10-16 01:27:00 -070092 if (*index != -1) {
Bodo Stroesser07bf7312005-09-03 15:57:50 -070093 last = &ops[*index];
Jeff Dikeba180fd2007-10-16 01:27:00 -070094 if ((last->type == MPROTECT) &&
Jeff Dikec5600492005-09-03 15:57:36 -070095 (last->u.mprotect.addr + last->u.mprotect.len == addr) &&
Jeff Dikeba180fd2007-10-16 01:27:00 -070096 (last->u.mprotect.prot == prot)) {
Jeff Dikec5600492005-09-03 15:57:36 -070097 last->u.mprotect.len += len;
Bodo Stroesser07bf7312005-09-03 15:57:50 -070098 return 0;
Jeff Dikec5600492005-09-03 15:57:36 -070099 }
100 }
101
Jeff Dikeba180fd2007-10-16 01:27:00 -0700102 if (*index == last_filled) {
Bodo Stroesser07bf7312005-09-03 15:57:50 -0700103 ret = (*do_ops)(mmu, ops, last_filled, 0, flush);
104 *index = -1;
Jeff Dikec5600492005-09-03 15:57:36 -0700105 }
106
Bodo Stroesser07bf7312005-09-03 15:57:50 -0700107 ops[++*index] = ((struct host_vm_op) { .type = MPROTECT,
108 .u = { .mprotect = {
109 .addr = addr,
110 .len = len,
Jeff Dike16dd07b2007-05-06 14:51:48 -0700111 .prot = prot } } });
Bodo Stroesser07bf7312005-09-03 15:57:50 -0700112 return ret;
Jeff Dikec5600492005-09-03 15:57:36 -0700113}
114
Linus Torvalds1da177e2005-04-16 15:20:36 -0700115#define ADD_ROUND(n, inc) (((n) + (inc)) & ~((inc) - 1))
116
Jeff Dike7f0536f2007-05-06 14:51:30 -0700117static inline int update_pte_range(pmd_t *pmd, unsigned long addr,
118 unsigned long end, struct host_vm_op *ops,
119 int last_op, int *op_index, int force,
120 union mm_context *mmu, void **flush,
121 int (*do_ops)(union mm_context *,
122 struct host_vm_op *, int, int,
123 void **))
124{
125 pte_t *pte;
Jeff Dike16dd07b2007-05-06 14:51:48 -0700126 int r, w, x, prot, ret = 0;
Jeff Dike7f0536f2007-05-06 14:51:30 -0700127
128 pte = pte_offset_kernel(pmd, addr);
129 do {
130 r = pte_read(*pte);
131 w = pte_write(*pte);
132 x = pte_exec(*pte);
133 if (!pte_young(*pte)) {
134 r = 0;
135 w = 0;
136 } else if (!pte_dirty(*pte)) {
137 w = 0;
138 }
Jeff Dike16dd07b2007-05-06 14:51:48 -0700139 prot = ((r ? UM_PROT_READ : 0) | (w ? UM_PROT_WRITE : 0) |
140 (x ? UM_PROT_EXEC : 0));
Jeff Dikeba180fd2007-10-16 01:27:00 -0700141 if (force || pte_newpage(*pte)) {
142 if (pte_present(*pte))
Jeff Dike7f0536f2007-05-06 14:51:30 -0700143 ret = add_mmap(addr, pte_val(*pte) & PAGE_MASK,
Jeff Dike16dd07b2007-05-06 14:51:48 -0700144 PAGE_SIZE, prot, ops, op_index,
145 last_op, mmu, flush, do_ops);
Jeff Dike7f0536f2007-05-06 14:51:30 -0700146 else ret = add_munmap(addr, PAGE_SIZE, ops, op_index,
147 last_op, mmu, flush, do_ops);
148 }
Jeff Dikeba180fd2007-10-16 01:27:00 -0700149 else if (pte_newprot(*pte))
Jeff Dike16dd07b2007-05-06 14:51:48 -0700150 ret = add_mprotect(addr, PAGE_SIZE, prot, ops, op_index,
151 last_op, mmu, flush, do_ops);
Jeff Dike7f0536f2007-05-06 14:51:30 -0700152 *pte = pte_mkuptodate(*pte);
153 } while (pte++, addr += PAGE_SIZE, ((addr != end) && !ret));
154 return ret;
155}
156
157static inline int update_pmd_range(pud_t *pud, unsigned long addr,
158 unsigned long end, struct host_vm_op *ops,
159 int last_op, int *op_index, int force,
160 union mm_context *mmu, void **flush,
161 int (*do_ops)(union mm_context *,
162 struct host_vm_op *, int, int,
163 void **))
164{
165 pmd_t *pmd;
166 unsigned long next;
167 int ret = 0;
168
169 pmd = pmd_offset(pud, addr);
170 do {
171 next = pmd_addr_end(addr, end);
Jeff Dikeba180fd2007-10-16 01:27:00 -0700172 if (!pmd_present(*pmd)) {
173 if (force || pmd_newpage(*pmd)) {
Jeff Dike7f0536f2007-05-06 14:51:30 -0700174 ret = add_munmap(addr, next - addr, ops,
175 op_index, last_op, mmu,
176 flush, do_ops);
177 pmd_mkuptodate(*pmd);
178 }
179 }
180 else ret = update_pte_range(pmd, addr, next, ops, last_op,
181 op_index, force, mmu, flush,
182 do_ops);
183 } while (pmd++, addr = next, ((addr != end) && !ret));
184 return ret;
185}
186
187static inline int update_pud_range(pgd_t *pgd, unsigned long addr,
188 unsigned long end, struct host_vm_op *ops,
189 int last_op, int *op_index, int force,
190 union mm_context *mmu, void **flush,
191 int (*do_ops)(union mm_context *,
192 struct host_vm_op *, int, int,
193 void **))
194{
195 pud_t *pud;
196 unsigned long next;
197 int ret = 0;
198
199 pud = pud_offset(pgd, addr);
200 do {
201 next = pud_addr_end(addr, end);
Jeff Dikeba180fd2007-10-16 01:27:00 -0700202 if (!pud_present(*pud)) {
203 if (force || pud_newpage(*pud)) {
Jeff Dike7f0536f2007-05-06 14:51:30 -0700204 ret = add_munmap(addr, next - addr, ops,
205 op_index, last_op, mmu,
206 flush, do_ops);
207 pud_mkuptodate(*pud);
208 }
209 }
210 else ret = update_pmd_range(pud, addr, next, ops, last_op,
211 op_index, force, mmu, flush,
212 do_ops);
213 } while (pud++, addr = next, ((addr != end) && !ret));
214 return ret;
215}
216
Linus Torvalds1da177e2005-04-16 15:20:36 -0700217void fix_range_common(struct mm_struct *mm, unsigned long start_addr,
Jeff Dike5e1f65a2006-09-25 23:33:01 -0700218 unsigned long end_addr, int force,
Bodo Stroesser07bf7312005-09-03 15:57:50 -0700219 int (*do_ops)(union mm_context *, struct host_vm_op *,
220 int, int, void **))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700221{
Jeff Dike7f0536f2007-05-06 14:51:30 -0700222 pgd_t *pgd;
Jeff Dike5e1f65a2006-09-25 23:33:01 -0700223 union mm_context *mmu = &mm->context;
Jeff Dike5e1f65a2006-09-25 23:33:01 -0700224 struct host_vm_op ops[1];
Jeff Dike7f0536f2007-05-06 14:51:30 -0700225 unsigned long addr = start_addr, next;
226 int ret = 0, last_op = ARRAY_SIZE(ops) - 1, op_index = -1;
Jeff Dike5e1f65a2006-09-25 23:33:01 -0700227 void *flush = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700228
Jeff Dike5e1f65a2006-09-25 23:33:01 -0700229 ops[0].type = NONE;
Jeff Dike7f0536f2007-05-06 14:51:30 -0700230 pgd = pgd_offset(mm, addr);
231 do {
232 next = pgd_addr_end(addr, end_addr);
Jeff Dikeba180fd2007-10-16 01:27:00 -0700233 if (!pgd_present(*pgd)) {
234 if (force || pgd_newpage(*pgd)) {
Jeff Dike7f0536f2007-05-06 14:51:30 -0700235 ret = add_munmap(addr, next - addr, ops,
Jeff Dike5e1f65a2006-09-25 23:33:01 -0700236 &op_index, last_op, mmu,
237 &flush, do_ops);
Jeff Dike7f0536f2007-05-06 14:51:30 -0700238 pgd_mkuptodate(*pgd);
Jeff Dike5e1f65a2006-09-25 23:33:01 -0700239 }
Jeff Dike5e1f65a2006-09-25 23:33:01 -0700240 }
Jeff Dike7f0536f2007-05-06 14:51:30 -0700241 else ret = update_pud_range(pgd, addr, next, ops, last_op,
242 &op_index, force, mmu, &flush,
243 do_ops);
244 } while (pgd++, addr = next, ((addr != end_addr) && !ret));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700245
Jeff Dikeba180fd2007-10-16 01:27:00 -0700246 if (!ret)
Bodo Stroesser07bf7312005-09-03 15:57:50 -0700247 ret = (*do_ops)(mmu, ops, op_index, 1, &flush);
248
Jeff Dike7f0536f2007-05-06 14:51:30 -0700249 /* This is not an else because ret is modified above */
Jeff Dikeba180fd2007-10-16 01:27:00 -0700250 if (ret) {
251 printk(KERN_ERR "fix_range_common: failed, killing current "
252 "process\n");
Bodo Stroesser07bf7312005-09-03 15:57:50 -0700253 force_sig(SIGKILL, current);
254 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700255}
256
257int flush_tlb_kernel_range_common(unsigned long start, unsigned long end)
258{
Jeff Dike5e1f65a2006-09-25 23:33:01 -0700259 struct mm_struct *mm;
260 pgd_t *pgd;
261 pud_t *pud;
262 pmd_t *pmd;
263 pte_t *pte;
264 unsigned long addr, last;
265 int updated = 0, err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700266
Jeff Dike5e1f65a2006-09-25 23:33:01 -0700267 mm = &init_mm;
Jeff Dikeba180fd2007-10-16 01:27:00 -0700268 for (addr = start; addr < end;) {
Jeff Dike5e1f65a2006-09-25 23:33:01 -0700269 pgd = pgd_offset(mm, addr);
Jeff Dikeba180fd2007-10-16 01:27:00 -0700270 if (!pgd_present(*pgd)) {
Jeff Dike5e1f65a2006-09-25 23:33:01 -0700271 last = ADD_ROUND(addr, PGDIR_SIZE);
Jeff Dikeba180fd2007-10-16 01:27:00 -0700272 if (last > end)
Jeff Dike5e1f65a2006-09-25 23:33:01 -0700273 last = end;
Jeff Dikeba180fd2007-10-16 01:27:00 -0700274 if (pgd_newpage(*pgd)) {
Jeff Dike5e1f65a2006-09-25 23:33:01 -0700275 updated = 1;
276 err = os_unmap_memory((void *) addr,
277 last - addr);
Jeff Dikeba180fd2007-10-16 01:27:00 -0700278 if (err < 0)
Jeff Dike5e1f65a2006-09-25 23:33:01 -0700279 panic("munmap failed, errno = %d\n",
280 -err);
281 }
282 addr = last;
283 continue;
284 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700285
Jeff Dike5e1f65a2006-09-25 23:33:01 -0700286 pud = pud_offset(pgd, addr);
Jeff Dikeba180fd2007-10-16 01:27:00 -0700287 if (!pud_present(*pud)) {
Jeff Dike5e1f65a2006-09-25 23:33:01 -0700288 last = ADD_ROUND(addr, PUD_SIZE);
Jeff Dikeba180fd2007-10-16 01:27:00 -0700289 if (last > end)
Jeff Dike5e1f65a2006-09-25 23:33:01 -0700290 last = end;
Jeff Dikeba180fd2007-10-16 01:27:00 -0700291 if (pud_newpage(*pud)) {
Jeff Dike5e1f65a2006-09-25 23:33:01 -0700292 updated = 1;
293 err = os_unmap_memory((void *) addr,
294 last - addr);
Jeff Dikeba180fd2007-10-16 01:27:00 -0700295 if (err < 0)
Jeff Dike5e1f65a2006-09-25 23:33:01 -0700296 panic("munmap failed, errno = %d\n",
297 -err);
298 }
299 addr = last;
300 continue;
301 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700302
Jeff Dike5e1f65a2006-09-25 23:33:01 -0700303 pmd = pmd_offset(pud, addr);
Jeff Dikeba180fd2007-10-16 01:27:00 -0700304 if (!pmd_present(*pmd)) {
Jeff Dike5e1f65a2006-09-25 23:33:01 -0700305 last = ADD_ROUND(addr, PMD_SIZE);
Jeff Dikeba180fd2007-10-16 01:27:00 -0700306 if (last > end)
Jeff Dike5e1f65a2006-09-25 23:33:01 -0700307 last = end;
Jeff Dikeba180fd2007-10-16 01:27:00 -0700308 if (pmd_newpage(*pmd)) {
Jeff Dike5e1f65a2006-09-25 23:33:01 -0700309 updated = 1;
310 err = os_unmap_memory((void *) addr,
311 last - addr);
Jeff Dikeba180fd2007-10-16 01:27:00 -0700312 if (err < 0)
Jeff Dike5e1f65a2006-09-25 23:33:01 -0700313 panic("munmap failed, errno = %d\n",
314 -err);
315 }
316 addr = last;
317 continue;
318 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700319
Jeff Dike5e1f65a2006-09-25 23:33:01 -0700320 pte = pte_offset_kernel(pmd, addr);
Jeff Dikeba180fd2007-10-16 01:27:00 -0700321 if (!pte_present(*pte) || pte_newpage(*pte)) {
Jeff Dike5e1f65a2006-09-25 23:33:01 -0700322 updated = 1;
323 err = os_unmap_memory((void *) addr,
324 PAGE_SIZE);
Jeff Dikeba180fd2007-10-16 01:27:00 -0700325 if (err < 0)
Jeff Dike5e1f65a2006-09-25 23:33:01 -0700326 panic("munmap failed, errno = %d\n",
327 -err);
Jeff Dikeba180fd2007-10-16 01:27:00 -0700328 if (pte_present(*pte))
Jeff Dike5e1f65a2006-09-25 23:33:01 -0700329 map_memory(addr,
330 pte_val(*pte) & PAGE_MASK,
331 PAGE_SIZE, 1, 1, 1);
332 }
Jeff Dikeba180fd2007-10-16 01:27:00 -0700333 else if (pte_newprot(*pte)) {
Jeff Dike5e1f65a2006-09-25 23:33:01 -0700334 updated = 1;
335 os_protect_memory((void *) addr, PAGE_SIZE, 1, 1, 1);
336 }
337 addr += PAGE_SIZE;
338 }
Jeff Dikeba180fd2007-10-16 01:27:00 -0700339 return updated;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700340}
341
Jeff Dike77bf4402007-10-16 01:26:58 -0700342void flush_tlb_page(struct vm_area_struct *vma, unsigned long address)
343{
344 pgd_t *pgd;
345 pud_t *pud;
346 pmd_t *pmd;
347 pte_t *pte;
348 struct mm_struct *mm = vma->vm_mm;
349 void *flush = NULL;
350 int r, w, x, prot, err = 0;
351 struct mm_id *mm_id;
352
353 address &= PAGE_MASK;
354 pgd = pgd_offset(mm, address);
Jeff Dikeba180fd2007-10-16 01:27:00 -0700355 if (!pgd_present(*pgd))
Jeff Dike77bf4402007-10-16 01:26:58 -0700356 goto kill;
357
358 pud = pud_offset(pgd, address);
Jeff Dikeba180fd2007-10-16 01:27:00 -0700359 if (!pud_present(*pud))
Jeff Dike77bf4402007-10-16 01:26:58 -0700360 goto kill;
361
362 pmd = pmd_offset(pud, address);
Jeff Dikeba180fd2007-10-16 01:27:00 -0700363 if (!pmd_present(*pmd))
Jeff Dike77bf4402007-10-16 01:26:58 -0700364 goto kill;
365
366 pte = pte_offset_kernel(pmd, address);
367
368 r = pte_read(*pte);
369 w = pte_write(*pte);
370 x = pte_exec(*pte);
371 if (!pte_young(*pte)) {
372 r = 0;
373 w = 0;
374 } else if (!pte_dirty(*pte)) {
375 w = 0;
376 }
377
378 mm_id = &mm->context.skas.id;
379 prot = ((r ? UM_PROT_READ : 0) | (w ? UM_PROT_WRITE : 0) |
380 (x ? UM_PROT_EXEC : 0));
Jeff Dikeba180fd2007-10-16 01:27:00 -0700381 if (pte_newpage(*pte)) {
382 if (pte_present(*pte)) {
Jeff Dike77bf4402007-10-16 01:26:58 -0700383 unsigned long long offset;
384 int fd;
385
386 fd = phys_mapping(pte_val(*pte) & PAGE_MASK, &offset);
387 err = map(mm_id, address, PAGE_SIZE, prot, fd, offset,
388 1, &flush);
389 }
390 else err = unmap(mm_id, address, PAGE_SIZE, 1, &flush);
391 }
Jeff Dikeba180fd2007-10-16 01:27:00 -0700392 else if (pte_newprot(*pte))
Jeff Dike77bf4402007-10-16 01:26:58 -0700393 err = protect(mm_id, address, PAGE_SIZE, prot, 1, &flush);
394
Jeff Dikeba180fd2007-10-16 01:27:00 -0700395 if (err)
Jeff Dike77bf4402007-10-16 01:26:58 -0700396 goto kill;
397
398 *pte = pte_mkuptodate(*pte);
399
400 return;
401
402kill:
Jeff Dikeba180fd2007-10-16 01:27:00 -0700403 printk(KERN_ERR "Failed to flush page for address 0x%lx\n", address);
Jeff Dike77bf4402007-10-16 01:26:58 -0700404 force_sig(SIGKILL, current);
405}
406
Linus Torvalds1da177e2005-04-16 15:20:36 -0700407pgd_t *pgd_offset_proc(struct mm_struct *mm, unsigned long address)
408{
Jeff Dikeba180fd2007-10-16 01:27:00 -0700409 return pgd_offset(mm, address);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700410}
411
412pud_t *pud_offset_proc(pgd_t *pgd, unsigned long address)
413{
Jeff Dikeba180fd2007-10-16 01:27:00 -0700414 return pud_offset(pgd, address);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700415}
416
417pmd_t *pmd_offset_proc(pud_t *pud, unsigned long address)
418{
Jeff Dikeba180fd2007-10-16 01:27:00 -0700419 return pmd_offset(pud, address);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700420}
421
422pte_t *pte_offset_proc(pmd_t *pmd, unsigned long address)
423{
Jeff Dikeba180fd2007-10-16 01:27:00 -0700424 return pte_offset_kernel(pmd, address);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700425}
426
427pte_t *addr_pte(struct task_struct *task, unsigned long addr)
428{
Jeff Dike5e1f65a2006-09-25 23:33:01 -0700429 pgd_t *pgd = pgd_offset(task->mm, addr);
430 pud_t *pud = pud_offset(pgd, addr);
431 pmd_t *pmd = pmd_offset(pud, addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700432
Jeff Dikeba180fd2007-10-16 01:27:00 -0700433 return pte_offset_map(pmd, addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700434}
435
Jeff Diked67b5692005-07-07 17:56:49 -0700436void flush_tlb_all(void)
437{
Jeff Dike5e1f65a2006-09-25 23:33:01 -0700438 flush_tlb_mm(current->mm);
Jeff Diked67b5692005-07-07 17:56:49 -0700439}
440
441void flush_tlb_kernel_range(unsigned long start, unsigned long end)
442{
Jeff Dike6aa802c2007-10-16 01:26:56 -0700443 flush_tlb_kernel_range_common(start, end);
Jeff Diked67b5692005-07-07 17:56:49 -0700444}
445
446void flush_tlb_kernel_vm(void)
447{
Jeff Dike6aa802c2007-10-16 01:26:56 -0700448 flush_tlb_kernel_range_common(start_vm, end_vm);
Jeff Diked67b5692005-07-07 17:56:49 -0700449}
450
451void __flush_tlb_one(unsigned long addr)
452{
Jeff Dikeba180fd2007-10-16 01:27:00 -0700453 flush_tlb_kernel_range_common(addr, addr + PAGE_SIZE);
Jeff Dike77bf4402007-10-16 01:26:58 -0700454}
455
456static int do_ops(union mm_context *mmu, struct host_vm_op *ops, int last,
457 int finished, void **flush)
458{
459 struct host_vm_op *op;
Jeff Dikeba180fd2007-10-16 01:27:00 -0700460 int i, ret = 0;
Jeff Dike77bf4402007-10-16 01:26:58 -0700461
Jeff Dikeba180fd2007-10-16 01:27:00 -0700462 for (i = 0; i <= last && !ret; i++) {
463 op = &ops[i];
464 switch(op->type) {
Jeff Dike77bf4402007-10-16 01:26:58 -0700465 case MMAP:
466 ret = map(&mmu->skas.id, op->u.mmap.addr,
467 op->u.mmap.len, op->u.mmap.prot,
468 op->u.mmap.fd, op->u.mmap.offset, finished,
469 flush);
470 break;
471 case MUNMAP:
472 ret = unmap(&mmu->skas.id, op->u.munmap.addr,
473 op->u.munmap.len, finished, flush);
474 break;
475 case MPROTECT:
476 ret = protect(&mmu->skas.id, op->u.mprotect.addr,
477 op->u.mprotect.len, op->u.mprotect.prot,
478 finished, flush);
479 break;
480 default:
Jeff Dikeba180fd2007-10-16 01:27:00 -0700481 printk(KERN_ERR "Unknown op type %d in do_ops\n",
482 op->type);
Jeff Dike77bf4402007-10-16 01:26:58 -0700483 break;
484 }
485 }
486
487 return ret;
488}
489
490static void fix_range(struct mm_struct *mm, unsigned long start_addr,
491 unsigned long end_addr, int force)
492{
Jeff Dikeba180fd2007-10-16 01:27:00 -0700493 if (!proc_mm && (end_addr > CONFIG_STUB_START))
494 end_addr = CONFIG_STUB_START;
Jeff Dike77bf4402007-10-16 01:26:58 -0700495
Jeff Dikeba180fd2007-10-16 01:27:00 -0700496 fix_range_common(mm, start_addr, end_addr, force, do_ops);
Jeff Diked67b5692005-07-07 17:56:49 -0700497}
498
499void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
500 unsigned long end)
501{
Jeff Dikeba180fd2007-10-16 01:27:00 -0700502 if (vma->vm_mm == NULL)
503 flush_tlb_kernel_range_common(start, end);
504 else fix_range(vma->vm_mm, start, end, 0);
Jeff Diked67b5692005-07-07 17:56:49 -0700505}
506
507void flush_tlb_mm(struct mm_struct *mm)
508{
Jeff Dike77bf4402007-10-16 01:26:58 -0700509 unsigned long end;
510
Jeff Dikeba180fd2007-10-16 01:27:00 -0700511 /*
512 * Don't bother flushing if this address space is about to be
513 * destroyed.
514 */
515 if (atomic_read(&mm->mm_users) == 0)
516 return;
Jeff Dike77bf4402007-10-16 01:26:58 -0700517
518 end = proc_mm ? task_size : CONFIG_STUB_START;
Jeff Dikeba180fd2007-10-16 01:27:00 -0700519 fix_range(mm, 0, end, 0);
Jeff Diked67b5692005-07-07 17:56:49 -0700520}
521
522void force_flush_all(void)
523{
Jeff Dike77bf4402007-10-16 01:26:58 -0700524 struct mm_struct *mm = current->mm;
525 struct vm_area_struct *vma = mm->mmap;
526
Jeff Dikeba180fd2007-10-16 01:27:00 -0700527 while (vma != NULL) {
Jeff Dike77bf4402007-10-16 01:26:58 -0700528 fix_range(mm, vma->vm_start, vma->vm_end, 1);
529 vma = vma->vm_next;
530 }
Jeff Diked67b5692005-07-07 17:56:49 -0700531}