blob: 7d914bb6b002a03d2dfda51ea81423fd7e299778 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com)
3 * Licensed under the GPL
4 */
5
6#include "linux/mm.h"
7#include "asm/page.h"
8#include "asm/pgalloc.h"
9#include "asm/tlbflush.h"
10#include "choose-mode.h"
11#include "mode_kern.h"
12#include "user_util.h"
13#include "tlb.h"
14#include "mem.h"
15#include "mem_user.h"
16#include "os.h"
17
Jeff Dikec5600492005-09-03 15:57:36 -070018static int add_mmap(unsigned long virt, unsigned long phys, unsigned long len,
19 int r, int w, int x, struct host_vm_op *ops, int index,
20 int last_filled, union mm_context *mmu, void **flush,
21 void *(*do_ops)(union mm_context *, struct host_vm_op *,
22 int, int, void *))
23{
24 __u64 offset;
25 struct host_vm_op *last;
26 int fd;
27
28 fd = phys_mapping(phys, &offset);
29 if(index != -1){
30 last = &ops[index];
31 if((last->type == MMAP) &&
32 (last->u.mmap.addr + last->u.mmap.len == virt) &&
33 (last->u.mmap.r == r) && (last->u.mmap.w == w) &&
34 (last->u.mmap.x == x) && (last->u.mmap.fd == fd) &&
35 (last->u.mmap.offset + last->u.mmap.len == offset)){
36 last->u.mmap.len += len;
37 return index;
38 }
39 }
40
41 if(index == last_filled){
42 *flush = (*do_ops)(mmu, ops, last_filled, 0, *flush);
43 index = -1;
44 }
45
46 ops[++index] = ((struct host_vm_op) { .type = MMAP,
47 .u = { .mmap = {
48 .addr = virt,
49 .len = len,
50 .r = r,
51 .w = w,
52 .x = x,
53 .fd = fd,
54 .offset = offset }
55 } });
56 return index;
57}
58
59static int add_munmap(unsigned long addr, unsigned long len,
60 struct host_vm_op *ops, int index, int last_filled,
61 union mm_context *mmu, void **flush,
62 void *(*do_ops)(union mm_context *, struct host_vm_op *,
63 int, int, void *))
64{
65 struct host_vm_op *last;
66
67 if(index != -1){
68 last = &ops[index];
69 if((last->type == MUNMAP) &&
70 (last->u.munmap.addr + last->u.mmap.len == addr)){
71 last->u.munmap.len += len;
72 return index;
73 }
74 }
75
76 if(index == last_filled){
77 *flush = (*do_ops)(mmu, ops, last_filled, 0, *flush);
78 index = -1;
79 }
80
81 ops[++index] = ((struct host_vm_op) { .type = MUNMAP,
82 .u = { .munmap = {
83 .addr = addr,
84 .len = len } } });
85 return index;
86}
87
88static int add_mprotect(unsigned long addr, unsigned long len, int r, int w,
89 int x, struct host_vm_op *ops, int index,
90 int last_filled, union mm_context *mmu, void **flush,
91 void *(*do_ops)(union mm_context *,
92 struct host_vm_op *, int, int, void *))
93{
94 struct host_vm_op *last;
95
96 if(index != -1){
97 last = &ops[index];
98 if((last->type == MPROTECT) &&
99 (last->u.mprotect.addr + last->u.mprotect.len == addr) &&
100 (last->u.mprotect.r == r) && (last->u.mprotect.w == w) &&
101 (last->u.mprotect.x == x)){
102 last->u.mprotect.len += len;
103 return index;
104 }
105 }
106
107 if(index == last_filled){
108 *flush = (*do_ops)(mmu, ops, last_filled, 0, *flush);
109 index = -1;
110 }
111
112 ops[++index] = ((struct host_vm_op) { .type = MPROTECT,
113 .u = { .mprotect = {
114 .addr = addr,
115 .len = len,
116 .r = r,
117 .w = w,
118 .x = x } } });
119 return index;
120}
121
Linus Torvalds1da177e2005-04-16 15:20:36 -0700122#define ADD_ROUND(n, inc) (((n) + (inc)) & ~((inc) - 1))
123
124void fix_range_common(struct mm_struct *mm, unsigned long start_addr,
Jeff Diked67b5692005-07-07 17:56:49 -0700125 unsigned long end_addr, int force,
Jeff Dikec5600492005-09-03 15:57:36 -0700126 void *(*do_ops)(union mm_context *, struct host_vm_op *,
127 int, int, void *))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700128{
129 pgd_t *npgd;
130 pud_t *npud;
131 pmd_t *npmd;
132 pte_t *npte;
Jeff Diked67b5692005-07-07 17:56:49 -0700133 union mm_context *mmu = &mm->context;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700134 unsigned long addr, end;
135 int r, w, x;
Jeff Dikec5600492005-09-03 15:57:36 -0700136 struct host_vm_op ops[1];
137 void *flush = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700138 int op_index = -1, last_op = sizeof(ops) / sizeof(ops[0]) - 1;
139
140 if(mm == NULL) return;
141
Jeff Dikec5600492005-09-03 15:57:36 -0700142 ops[0].type = NONE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700143 for(addr = start_addr; addr < end_addr;){
144 npgd = pgd_offset(mm, addr);
145 if(!pgd_present(*npgd)){
146 end = ADD_ROUND(addr, PGDIR_SIZE);
147 if(end > end_addr)
148 end = end_addr;
149 if(force || pgd_newpage(*npgd)){
150 op_index = add_munmap(addr, end - addr, ops,
Jeff Diked67b5692005-07-07 17:56:49 -0700151 op_index, last_op, mmu,
Jeff Dikec5600492005-09-03 15:57:36 -0700152 &flush, do_ops);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700153 pgd_mkuptodate(*npgd);
154 }
155 addr = end;
156 continue;
157 }
158
159 npud = pud_offset(npgd, addr);
160 if(!pud_present(*npud)){
161 end = ADD_ROUND(addr, PUD_SIZE);
162 if(end > end_addr)
163 end = end_addr;
164 if(force || pud_newpage(*npud)){
165 op_index = add_munmap(addr, end - addr, ops,
Jeff Diked67b5692005-07-07 17:56:49 -0700166 op_index, last_op, mmu,
Jeff Dikec5600492005-09-03 15:57:36 -0700167 &flush, do_ops);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700168 pud_mkuptodate(*npud);
169 }
170 addr = end;
171 continue;
172 }
173
174 npmd = pmd_offset(npud, addr);
175 if(!pmd_present(*npmd)){
176 end = ADD_ROUND(addr, PMD_SIZE);
177 if(end > end_addr)
178 end = end_addr;
179 if(force || pmd_newpage(*npmd)){
180 op_index = add_munmap(addr, end - addr, ops,
Jeff Diked67b5692005-07-07 17:56:49 -0700181 op_index, last_op, mmu,
Jeff Dikec5600492005-09-03 15:57:36 -0700182 &flush, do_ops);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700183 pmd_mkuptodate(*npmd);
184 }
185 addr = end;
186 continue;
187 }
188
189 npte = pte_offset_kernel(npmd, addr);
190 r = pte_read(*npte);
191 w = pte_write(*npte);
192 x = pte_exec(*npte);
193 if(!pte_dirty(*npte))
194 w = 0;
195 if(!pte_young(*npte)){
196 r = 0;
197 w = 0;
198 }
199 if(force || pte_newpage(*npte)){
200 if(pte_present(*npte))
201 op_index = add_mmap(addr,
202 pte_val(*npte) & PAGE_MASK,
203 PAGE_SIZE, r, w, x, ops,
Jeff Diked67b5692005-07-07 17:56:49 -0700204 op_index, last_op, mmu,
Jeff Dikec5600492005-09-03 15:57:36 -0700205 &flush, do_ops);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700206 else op_index = add_munmap(addr, PAGE_SIZE, ops,
Jeff Diked67b5692005-07-07 17:56:49 -0700207 op_index, last_op, mmu,
Jeff Dikec5600492005-09-03 15:57:36 -0700208 &flush, do_ops);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700209 }
210 else if(pte_newprot(*npte))
211 op_index = add_mprotect(addr, PAGE_SIZE, r, w, x, ops,
Jeff Diked67b5692005-07-07 17:56:49 -0700212 op_index, last_op, mmu,
Jeff Dikec5600492005-09-03 15:57:36 -0700213 &flush, do_ops);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700214
215 *npte = pte_mkuptodate(*npte);
216 addr += PAGE_SIZE;
217 }
Jeff Dikec5600492005-09-03 15:57:36 -0700218 flush = (*do_ops)(mmu, ops, op_index, 1, flush);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700219}
220
221int flush_tlb_kernel_range_common(unsigned long start, unsigned long end)
222{
223 struct mm_struct *mm;
224 pgd_t *pgd;
225 pud_t *pud;
226 pmd_t *pmd;
227 pte_t *pte;
228 unsigned long addr, last;
229 int updated = 0, err;
230
231 mm = &init_mm;
232 for(addr = start; addr < end;){
233 pgd = pgd_offset(mm, addr);
234 if(!pgd_present(*pgd)){
235 last = ADD_ROUND(addr, PGDIR_SIZE);
236 if(last > end)
237 last = end;
238 if(pgd_newpage(*pgd)){
239 updated = 1;
240 err = os_unmap_memory((void *) addr,
241 last - addr);
242 if(err < 0)
243 panic("munmap failed, errno = %d\n",
244 -err);
245 }
246 addr = last;
247 continue;
248 }
249
250 pud = pud_offset(pgd, addr);
251 if(!pud_present(*pud)){
252 last = ADD_ROUND(addr, PUD_SIZE);
253 if(last > end)
254 last = end;
255 if(pud_newpage(*pud)){
256 updated = 1;
257 err = os_unmap_memory((void *) addr,
258 last - addr);
259 if(err < 0)
260 panic("munmap failed, errno = %d\n",
261 -err);
262 }
263 addr = last;
264 continue;
265 }
266
267 pmd = pmd_offset(pud, addr);
268 if(!pmd_present(*pmd)){
269 last = ADD_ROUND(addr, PMD_SIZE);
270 if(last > end)
271 last = end;
272 if(pmd_newpage(*pmd)){
273 updated = 1;
274 err = os_unmap_memory((void *) addr,
275 last - addr);
276 if(err < 0)
277 panic("munmap failed, errno = %d\n",
278 -err);
279 }
280 addr = last;
281 continue;
282 }
283
284 pte = pte_offset_kernel(pmd, addr);
285 if(!pte_present(*pte) || pte_newpage(*pte)){
286 updated = 1;
287 err = os_unmap_memory((void *) addr,
288 PAGE_SIZE);
289 if(err < 0)
290 panic("munmap failed, errno = %d\n",
291 -err);
292 if(pte_present(*pte))
293 map_memory(addr,
294 pte_val(*pte) & PAGE_MASK,
295 PAGE_SIZE, 1, 1, 1);
296 }
297 else if(pte_newprot(*pte)){
298 updated = 1;
299 protect_memory(addr, PAGE_SIZE, 1, 1, 1, 1);
300 }
301 addr += PAGE_SIZE;
302 }
303 return(updated);
304}
305
Linus Torvalds1da177e2005-04-16 15:20:36 -0700306pgd_t *pgd_offset_proc(struct mm_struct *mm, unsigned long address)
307{
308 return(pgd_offset(mm, address));
309}
310
311pud_t *pud_offset_proc(pgd_t *pgd, unsigned long address)
312{
313 return(pud_offset(pgd, address));
314}
315
316pmd_t *pmd_offset_proc(pud_t *pud, unsigned long address)
317{
318 return(pmd_offset(pud, address));
319}
320
321pte_t *pte_offset_proc(pmd_t *pmd, unsigned long address)
322{
323 return(pte_offset_kernel(pmd, address));
324}
325
326pte_t *addr_pte(struct task_struct *task, unsigned long addr)
327{
328 pgd_t *pgd = pgd_offset(task->mm, addr);
329 pud_t *pud = pud_offset(pgd, addr);
330 pmd_t *pmd = pmd_offset(pud, addr);
331
332 return(pte_offset_map(pmd, addr));
333}
334
Jeff Diked67b5692005-07-07 17:56:49 -0700335void flush_tlb_page(struct vm_area_struct *vma, unsigned long address)
336{
337 address &= PAGE_MASK;
338 flush_tlb_range(vma, address, address + PAGE_SIZE);
339}
340
341void flush_tlb_all(void)
342{
343 flush_tlb_mm(current->mm);
344}
345
346void flush_tlb_kernel_range(unsigned long start, unsigned long end)
347{
348 CHOOSE_MODE_PROC(flush_tlb_kernel_range_tt,
349 flush_tlb_kernel_range_common, start, end);
350}
351
352void flush_tlb_kernel_vm(void)
353{
354 CHOOSE_MODE(flush_tlb_kernel_vm_tt(),
355 flush_tlb_kernel_range_common(start_vm, end_vm));
356}
357
358void __flush_tlb_one(unsigned long addr)
359{
360 CHOOSE_MODE_PROC(__flush_tlb_one_tt, __flush_tlb_one_skas, addr);
361}
362
363void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
364 unsigned long end)
365{
366 CHOOSE_MODE_PROC(flush_tlb_range_tt, flush_tlb_range_skas, vma, start,
367 end);
368}
369
370void flush_tlb_mm(struct mm_struct *mm)
371{
372 CHOOSE_MODE_PROC(flush_tlb_mm_tt, flush_tlb_mm_skas, mm);
373}
374
375void force_flush_all(void)
376{
377 CHOOSE_MODE(force_flush_all_tt(), force_flush_all_skas());
378}
379