memcg: avoid accounting special pages
[linux-2.6.git] / mm / memory.c
1 /*
2  *  linux/mm/memory.c
3  *
4  *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
5  */
6
7 /*
8  * demand-loading started 01.12.91 - seems it is high on the list of
9  * things wanted, and it should be easy to implement. - Linus
10  */
11
12 /*
13  * Ok, demand-loading was easy, shared pages a little bit tricker. Shared
14  * pages started 02.12.91, seems to work. - Linus.
15  *
16  * Tested sharing by executing about 30 /bin/sh: under the old kernel it
17  * would have taken more than the 6M I have free, but it worked well as
18  * far as I could see.
19  *
20  * Also corrected some "invalidate()"s - I wasn't doing enough of them.
21  */
22
23 /*
24  * Real VM (paging to/from disk) started 18.12.91. Much more work and
25  * thought has to go into this. Oh, well..
26  * 19.12.91  -  works, somewhat. Sometimes I get faults, don't know why.
27  *              Found it. Everything seems to work now.
28  * 20.12.91  -  Ok, making the swap-device changeable like the root.
29  */
30
31 /*
32  * 05.04.94  -  Multi-page memory management added for v1.1.
33  *              Idea by Alex Bligh (alex@cconcepts.co.uk)
34  *
35  * 16.07.99  -  Support of BIGMEM added by Gerhard Wichert, Siemens AG
36  *              (Gerhard.Wichert@pdb.siemens.de)
37  *
38  * Aug/Sep 2004 Changed to four level page tables (Andi Kleen)
39  */
40
41 #include <linux/kernel_stat.h>
42 #include <linux/mm.h>
43 #include <linux/hugetlb.h>
44 #include <linux/mman.h>
45 #include <linux/swap.h>
46 #include <linux/highmem.h>
47 #include <linux/pagemap.h>
48 #include <linux/rmap.h>
49 #include <linux/module.h>
50 #include <linux/delayacct.h>
51 #include <linux/init.h>
52 #include <linux/writeback.h>
53 #include <linux/memcontrol.h>
54 #include <linux/mmu_notifier.h>
55
56 #include <asm/pgalloc.h>
57 #include <asm/uaccess.h>
58 #include <asm/tlb.h>
59 #include <asm/tlbflush.h>
60 #include <asm/pgtable.h>
61
62 #include <linux/swapops.h>
63 #include <linux/elf.h>
64
65 #include "internal.h"
66
67 #include "internal.h"
68
69 #ifndef CONFIG_NEED_MULTIPLE_NODES
70 /* use the per-pgdat data instead for discontigmem - mbligh */
71 unsigned long max_mapnr;
72 struct page *mem_map;
73
74 EXPORT_SYMBOL(max_mapnr);
75 EXPORT_SYMBOL(mem_map);
76 #endif
77
78 unsigned long num_physpages;
79 /*
80  * A number of key systems in x86 including ioremap() rely on the assumption
81  * that high_memory defines the upper bound on direct map memory, then end
82  * of ZONE_NORMAL.  Under CONFIG_DISCONTIG this means that max_low_pfn and
83  * highstart_pfn must be the same; there must be no gap between ZONE_NORMAL
84  * and ZONE_HIGHMEM.
85  */
86 void * high_memory;
87
88 EXPORT_SYMBOL(num_physpages);
89 EXPORT_SYMBOL(high_memory);
90
91 /*
92  * Randomize the address space (stacks, mmaps, brk, etc.).
93  *
94  * ( When CONFIG_COMPAT_BRK=y we exclude brk from randomization,
95  *   as ancient (libc5 based) binaries can segfault. )
96  */
97 int randomize_va_space __read_mostly =
98 #ifdef CONFIG_COMPAT_BRK
99                                         1;
100 #else
101                                         2;
102 #endif
103
104 static int __init disable_randmaps(char *s)
105 {
106         randomize_va_space = 0;
107         return 1;
108 }
109 __setup("norandmaps", disable_randmaps);
110
111
112 /*
113  * If a p?d_bad entry is found while walking page tables, report
114  * the error, before resetting entry to p?d_none.  Usually (but
115  * very seldom) called out from the p?d_none_or_clear_bad macros.
116  */
117
118 void pgd_clear_bad(pgd_t *pgd)
119 {
120         pgd_ERROR(*pgd);
121         pgd_clear(pgd);
122 }
123
124 void pud_clear_bad(pud_t *pud)
125 {
126         pud_ERROR(*pud);
127         pud_clear(pud);
128 }
129
130 void pmd_clear_bad(pmd_t *pmd)
131 {
132         pmd_ERROR(*pmd);
133         pmd_clear(pmd);
134 }
135
136 /*
137  * Note: this doesn't free the actual pages themselves. That
138  * has been handled earlier when unmapping all the memory regions.
139  */
140 static void free_pte_range(struct mmu_gather *tlb, pmd_t *pmd)
141 {
142         pgtable_t token = pmd_pgtable(*pmd);
143         pmd_clear(pmd);
144         pte_free_tlb(tlb, token);
145         tlb->mm->nr_ptes--;
146 }
147
148 static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
149                                 unsigned long addr, unsigned long end,
150                                 unsigned long floor, unsigned long ceiling)
151 {
152         pmd_t *pmd;
153         unsigned long next;
154         unsigned long start;
155
156         start = addr;
157         pmd = pmd_offset(pud, addr);
158         do {
159                 next = pmd_addr_end(addr, end);
160                 if (pmd_none_or_clear_bad(pmd))
161                         continue;
162                 free_pte_range(tlb, pmd);
163         } while (pmd++, addr = next, addr != end);
164
165         start &= PUD_MASK;
166         if (start < floor)
167                 return;
168         if (ceiling) {
169                 ceiling &= PUD_MASK;
170                 if (!ceiling)
171                         return;
172         }
173         if (end - 1 > ceiling - 1)
174                 return;
175
176         pmd = pmd_offset(pud, start);
177         pud_clear(pud);
178         pmd_free_tlb(tlb, pmd);
179 }
180
181 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
182                                 unsigned long addr, unsigned long end,
183                                 unsigned long floor, unsigned long ceiling)
184 {
185         pud_t *pud;
186         unsigned long next;
187         unsigned long start;
188
189         start = addr;
190         pud = pud_offset(pgd, addr);
191         do {
192                 next = pud_addr_end(addr, end);
193                 if (pud_none_or_clear_bad(pud))
194                         continue;
195                 free_pmd_range(tlb, pud, addr, next, floor, ceiling);
196         } while (pud++, addr = next, addr != end);
197
198         start &= PGDIR_MASK;
199         if (start < floor)
200                 return;
201         if (ceiling) {
202                 ceiling &= PGDIR_MASK;
203                 if (!ceiling)
204                         return;
205         }
206         if (end - 1 > ceiling - 1)
207                 return;
208
209         pud = pud_offset(pgd, start);
210         pgd_clear(pgd);
211         pud_free_tlb(tlb, pud);
212 }
213
214 /*
215  * This function frees user-level page tables of a process.
216  *
217  * Must be called with pagetable lock held.
218  */
219 void free_pgd_range(struct mmu_gather *tlb,
220                         unsigned long addr, unsigned long end,
221                         unsigned long floor, unsigned long ceiling)
222 {
223         pgd_t *pgd;
224         unsigned long next;
225         unsigned long start;
226
227         /*
228          * The next few lines have given us lots of grief...
229          *
230          * Why are we testing PMD* at this top level?  Because often
231          * there will be no work to do at all, and we'd prefer not to
232          * go all the way down to the bottom just to discover that.
233          *
234          * Why all these "- 1"s?  Because 0 represents both the bottom
235          * of the address space and the top of it (using -1 for the
236          * top wouldn't help much: the masks would do the wrong thing).
237          * The rule is that addr 0 and floor 0 refer to the bottom of
238          * the address space, but end 0 and ceiling 0 refer to the top
239          * Comparisons need to use "end - 1" and "ceiling - 1" (though
240          * that end 0 case should be mythical).
241          *
242          * Wherever addr is brought up or ceiling brought down, we must
243          * be careful to reject "the opposite 0" before it confuses the
244          * subsequent tests.  But what about where end is brought down
245          * by PMD_SIZE below? no, end can't go down to 0 there.
246          *
247          * Whereas we round start (addr) and ceiling down, by different
248          * masks at different levels, in order to test whether a table
249          * now has no other vmas using it, so can be freed, we don't
250          * bother to round floor or end up - the tests don't need that.
251          */
252
253         addr &= PMD_MASK;
254         if (addr < floor) {
255                 addr += PMD_SIZE;
256                 if (!addr)
257                         return;
258         }
259         if (ceiling) {
260                 ceiling &= PMD_MASK;
261                 if (!ceiling)
262                         return;
263         }
264         if (end - 1 > ceiling - 1)
265                 end -= PMD_SIZE;
266         if (addr > end - 1)
267                 return;
268
269         start = addr;
270         pgd = pgd_offset(tlb->mm, addr);
271         do {
272                 next = pgd_addr_end(addr, end);
273                 if (pgd_none_or_clear_bad(pgd))
274                         continue;
275                 free_pud_range(tlb, pgd, addr, next, floor, ceiling);
276         } while (pgd++, addr = next, addr != end);
277 }
278
279 void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *vma,
280                 unsigned long floor, unsigned long ceiling)
281 {
282         while (vma) {
283                 struct vm_area_struct *next = vma->vm_next;
284                 unsigned long addr = vma->vm_start;
285
286                 /*
287                  * Hide vma from rmap and vmtruncate before freeing pgtables
288                  */
289                 anon_vma_unlink(vma);
290                 unlink_file_vma(vma);
291
292                 if (is_vm_hugetlb_page(vma)) {
293                         hugetlb_free_pgd_range(tlb, addr, vma->vm_end,
294                                 floor, next? next->vm_start: ceiling);
295                 } else {
296                         /*
297                          * Optimization: gather nearby vmas into one call down
298                          */
299                         while (next && next->vm_start <= vma->vm_end + PMD_SIZE
300                                && !is_vm_hugetlb_page(next)) {
301                                 vma = next;
302                                 next = vma->vm_next;
303                                 anon_vma_unlink(vma);
304                                 unlink_file_vma(vma);
305                         }
306                         free_pgd_range(tlb, addr, vma->vm_end,
307                                 floor, next? next->vm_start: ceiling);
308                 }
309                 vma = next;
310         }
311 }
312
313 int __pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address)
314 {
315         pgtable_t new = pte_alloc_one(mm, address);
316         if (!new)
317                 return -ENOMEM;
318
319         /*
320          * Ensure all pte setup (eg. pte page lock and page clearing) are
321          * visible before the pte is made visible to other CPUs by being
322          * put into page tables.
323          *
324          * The other side of the story is the pointer chasing in the page
325          * table walking code (when walking the page table without locking;
326          * ie. most of the time). Fortunately, these data accesses consist
327          * of a chain of data-dependent loads, meaning most CPUs (alpha
328          * being the notable exception) will already guarantee loads are
329          * seen in-order. See the alpha page table accessors for the
330          * smp_read_barrier_depends() barriers in page table walking code.
331          */
332         smp_wmb(); /* Could be smp_wmb__xxx(before|after)_spin_lock */
333
334         spin_lock(&mm->page_table_lock);
335         if (!pmd_present(*pmd)) {       /* Has another populated it ? */
336                 mm->nr_ptes++;
337                 pmd_populate(mm, pmd, new);
338                 new = NULL;
339         }
340         spin_unlock(&mm->page_table_lock);
341         if (new)
342                 pte_free(mm, new);
343         return 0;
344 }
345
346 int __pte_alloc_kernel(pmd_t *pmd, unsigned long address)
347 {
348         pte_t *new = pte_alloc_one_kernel(&init_mm, address);
349         if (!new)
350                 return -ENOMEM;
351
352         smp_wmb(); /* See comment in __pte_alloc */
353
354         spin_lock(&init_mm.page_table_lock);
355         if (!pmd_present(*pmd)) {       /* Has another populated it ? */
356                 pmd_populate_kernel(&init_mm, pmd, new);
357                 new = NULL;
358         }
359         spin_unlock(&init_mm.page_table_lock);
360         if (new)
361                 pte_free_kernel(&init_mm, new);
362         return 0;
363 }
364
365 static inline void add_mm_rss(struct mm_struct *mm, int file_rss, int anon_rss)
366 {
367         if (file_rss)
368                 add_mm_counter(mm, file_rss, file_rss);
369         if (anon_rss)
370                 add_mm_counter(mm, anon_rss, anon_rss);
371 }
372
373 /*
374  * This function is called to print an error when a bad pte
375  * is found. For example, we might have a PFN-mapped pte in
376  * a region that doesn't allow it.
377  *
378  * The calling function must still handle the error.
379  */
380 static void print_bad_pte(struct vm_area_struct *vma, pte_t pte,
381                           unsigned long vaddr)
382 {
383         printk(KERN_ERR "Bad pte = %08llx, process = %s, "
384                         "vm_flags = %lx, vaddr = %lx\n",
385                 (long long)pte_val(pte),
386                 (vma->vm_mm == current->mm ? current->comm : "???"),
387                 vma->vm_flags, vaddr);
388         dump_stack();
389 }
390
391 static inline int is_cow_mapping(unsigned int flags)
392 {
393         return (flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
394 }
395
396 /*
397  * vm_normal_page -- This function gets the "struct page" associated with a pte.
398  *
399  * "Special" mappings do not wish to be associated with a "struct page" (either
400  * it doesn't exist, or it exists but they don't want to touch it). In this
401  * case, NULL is returned here. "Normal" mappings do have a struct page.
402  *
403  * There are 2 broad cases. Firstly, an architecture may define a pte_special()
404  * pte bit, in which case this function is trivial. Secondly, an architecture
405  * may not have a spare pte bit, which requires a more complicated scheme,
406  * described below.
407  *
408  * A raw VM_PFNMAP mapping (ie. one that is not COWed) is always considered a
409  * special mapping (even if there are underlying and valid "struct pages").
410  * COWed pages of a VM_PFNMAP are always normal.
411  *
412  * The way we recognize COWed pages within VM_PFNMAP mappings is through the
413  * rules set up by "remap_pfn_range()": the vma will have the VM_PFNMAP bit
414  * set, and the vm_pgoff will point to the first PFN mapped: thus every special
415  * mapping will always honor the rule
416  *
417  *      pfn_of_page == vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT)
418  *
419  * And for normal mappings this is false.
420  *
421  * This restricts such mappings to be a linear translation from virtual address
422  * to pfn. To get around this restriction, we allow arbitrary mappings so long
423  * as the vma is not a COW mapping; in that case, we know that all ptes are
424  * special (because none can have been COWed).
425  *
426  *
427  * In order to support COW of arbitrary special mappings, we have VM_MIXEDMAP.
428  *
429  * VM_MIXEDMAP mappings can likewise contain memory with or without "struct
430  * page" backing, however the difference is that _all_ pages with a struct
431  * page (that is, those where pfn_valid is true) are refcounted and considered
432  * normal pages by the VM. The disadvantage is that pages are refcounted
433  * (which can be slower and simply not an option for some PFNMAP users). The
434  * advantage is that we don't have to follow the strict linearity rule of
435  * PFNMAP mappings in order to support COWable mappings.
436  *
437  */
438 #ifdef __HAVE_ARCH_PTE_SPECIAL
439 # define HAVE_PTE_SPECIAL 1
440 #else
441 # define HAVE_PTE_SPECIAL 0
442 #endif
443 struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
444                                 pte_t pte)
445 {
446         unsigned long pfn;
447
448         if (HAVE_PTE_SPECIAL) {
449                 if (likely(!pte_special(pte))) {
450                         VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
451                         return pte_page(pte);
452                 }
453                 VM_BUG_ON(!(vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP)));
454                 return NULL;
455         }
456
457         /* !HAVE_PTE_SPECIAL case follows: */
458
459         pfn = pte_pfn(pte);
460
461         if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) {
462                 if (vma->vm_flags & VM_MIXEDMAP) {
463                         if (!pfn_valid(pfn))
464                                 return NULL;
465                         goto out;
466                 } else {
467                         unsigned long off;
468                         off = (addr - vma->vm_start) >> PAGE_SHIFT;
469                         if (pfn == vma->vm_pgoff + off)
470                                 return NULL;
471                         if (!is_cow_mapping(vma->vm_flags))
472                                 return NULL;
473                 }
474         }
475
476         VM_BUG_ON(!pfn_valid(pfn));
477
478         /*
479          * NOTE! We still have PageReserved() pages in the page tables.
480          *
481          * eg. VDSO mappings can cause them to exist.
482          */
483 out:
484         return pfn_to_page(pfn);
485 }
486
487 /*
488  * copy one vm_area from one task to the other. Assumes the page tables
489  * already present in the new task to be cleared in the whole range
490  * covered by this vma.
491  */
492
493 static inline void
494 copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
495                 pte_t *dst_pte, pte_t *src_pte, struct vm_area_struct *vma,
496                 unsigned long addr, int *rss)
497 {
498         unsigned long vm_flags = vma->vm_flags;
499         pte_t pte = *src_pte;
500         struct page *page;
501
502         /* pte contains position in swap or file, so copy. */
503         if (unlikely(!pte_present(pte))) {
504                 if (!pte_file(pte)) {
505                         swp_entry_t entry = pte_to_swp_entry(pte);
506
507                         swap_duplicate(entry);
508                         /* make sure dst_mm is on swapoff's mmlist. */
509                         if (unlikely(list_empty(&dst_mm->mmlist))) {
510                                 spin_lock(&mmlist_lock);
511                                 if (list_empty(&dst_mm->mmlist))
512                                         list_add(&dst_mm->mmlist,
513                                                  &src_mm->mmlist);
514                                 spin_unlock(&mmlist_lock);
515                         }
516                         if (is_write_migration_entry(entry) &&
517                                         is_cow_mapping(vm_flags)) {
518                                 /*
519                                  * COW mappings require pages in both parent
520                                  * and child to be set to read.
521                                  */
522                                 make_migration_entry_read(&entry);
523                                 pte = swp_entry_to_pte(entry);
524                                 set_pte_at(src_mm, addr, src_pte, pte);
525                         }
526                 }
527                 goto out_set_pte;
528         }
529
530         /*
531          * If it's a COW mapping, write protect it both
532          * in the parent and the child
533          */
534         if (is_cow_mapping(vm_flags)) {
535                 ptep_set_wrprotect(src_mm, addr, src_pte);
536                 pte = pte_wrprotect(pte);
537         }
538
539         /*
540          * If it's a shared mapping, mark it clean in
541          * the child
542          */
543         if (vm_flags & VM_SHARED)
544                 pte = pte_mkclean(pte);
545         pte = pte_mkold(pte);
546
547         page = vm_normal_page(vma, addr, pte);
548         if (page) {
549                 get_page(page);
550                 page_dup_rmap(page, vma, addr);
551                 rss[!!PageAnon(page)]++;
552         }
553
554 out_set_pte:
555         set_pte_at(dst_mm, addr, dst_pte, pte);
556 }
557
558 static int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
559                 pmd_t *dst_pmd, pmd_t *src_pmd, struct vm_area_struct *vma,
560                 unsigned long addr, unsigned long end)
561 {
562         pte_t *src_pte, *dst_pte;
563         spinlock_t *src_ptl, *dst_ptl;
564         int progress = 0;
565         int rss[2];
566
567 again:
568         rss[1] = rss[0] = 0;
569         dst_pte = pte_alloc_map_lock(dst_mm, dst_pmd, addr, &dst_ptl);
570         if (!dst_pte)
571                 return -ENOMEM;
572         src_pte = pte_offset_map_nested(src_pmd, addr);
573         src_ptl = pte_lockptr(src_mm, src_pmd);
574         spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
575         arch_enter_lazy_mmu_mode();
576
577         do {
578                 /*
579                  * We are holding two locks at this point - either of them
580                  * could generate latencies in another task on another CPU.
581                  */
582                 if (progress >= 32) {
583                         progress = 0;
584                         if (need_resched() ||
585                             spin_needbreak(src_ptl) || spin_needbreak(dst_ptl))
586                                 break;
587                 }
588                 if (pte_none(*src_pte)) {
589                         progress++;
590                         continue;
591                 }
592                 copy_one_pte(dst_mm, src_mm, dst_pte, src_pte, vma, addr, rss);
593                 progress += 8;
594         } while (dst_pte++, src_pte++, addr += PAGE_SIZE, addr != end);
595
596         arch_leave_lazy_mmu_mode();
597         spin_unlock(src_ptl);
598         pte_unmap_nested(src_pte - 1);
599         add_mm_rss(dst_mm, rss[0], rss[1]);
600         pte_unmap_unlock(dst_pte - 1, dst_ptl);
601         cond_resched();
602         if (addr != end)
603                 goto again;
604         return 0;
605 }
606
607 static inline int copy_pmd_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
608                 pud_t *dst_pud, pud_t *src_pud, struct vm_area_struct *vma,
609                 unsigned long addr, unsigned long end)
610 {
611         pmd_t *src_pmd, *dst_pmd;
612         unsigned long next;
613
614         dst_pmd = pmd_alloc(dst_mm, dst_pud, addr);
615         if (!dst_pmd)
616                 return -ENOMEM;
617         src_pmd = pmd_offset(src_pud, addr);
618         do {
619                 next = pmd_addr_end(addr, end);
620                 if (pmd_none_or_clear_bad(src_pmd))
621                         continue;
622                 if (copy_pte_range(dst_mm, src_mm, dst_pmd, src_pmd,
623                                                 vma, addr, next))
624                         return -ENOMEM;
625         } while (dst_pmd++, src_pmd++, addr = next, addr != end);
626         return 0;
627 }
628
629 static inline int copy_pud_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
630                 pgd_t *dst_pgd, pgd_t *src_pgd, struct vm_area_struct *vma,
631                 unsigned long addr, unsigned long end)
632 {
633         pud_t *src_pud, *dst_pud;
634         unsigned long next;
635
636         dst_pud = pud_alloc(dst_mm, dst_pgd, addr);
637         if (!dst_pud)
638                 return -ENOMEM;
639         src_pud = pud_offset(src_pgd, addr);
640         do {
641                 next = pud_addr_end(addr, end);
642                 if (pud_none_or_clear_bad(src_pud))
643                         continue;
644                 if (copy_pmd_range(dst_mm, src_mm, dst_pud, src_pud,
645                                                 vma, addr, next))
646                         return -ENOMEM;
647         } while (dst_pud++, src_pud++, addr = next, addr != end);
648         return 0;
649 }
650
651 int copy_page_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
652                 struct vm_area_struct *vma)
653 {
654         pgd_t *src_pgd, *dst_pgd;
655         unsigned long next;
656         unsigned long addr = vma->vm_start;
657         unsigned long end = vma->vm_end;
658         int ret;
659
660         /*
661          * Don't copy ptes where a page fault will fill them correctly.
662          * Fork becomes much lighter when there are big shared or private
663          * readonly mappings. The tradeoff is that copy_page_range is more
664          * efficient than faulting.
665          */
666         if (!(vma->vm_flags & (VM_HUGETLB|VM_NONLINEAR|VM_PFNMAP|VM_INSERTPAGE))) {
667                 if (!vma->anon_vma)
668                         return 0;
669         }
670
671         if (is_vm_hugetlb_page(vma))
672                 return copy_hugetlb_page_range(dst_mm, src_mm, vma);
673
674         /*
675          * We need to invalidate the secondary MMU mappings only when
676          * there could be a permission downgrade on the ptes of the
677          * parent mm. And a permission downgrade will only happen if
678          * is_cow_mapping() returns true.
679          */
680         if (is_cow_mapping(vma->vm_flags))
681                 mmu_notifier_invalidate_range_start(src_mm, addr, end);
682
683         ret = 0;
684         dst_pgd = pgd_offset(dst_mm, addr);
685         src_pgd = pgd_offset(src_mm, addr);
686         do {
687                 next = pgd_addr_end(addr, end);
688                 if (pgd_none_or_clear_bad(src_pgd))
689                         continue;
690                 if (unlikely(copy_pud_range(dst_mm, src_mm, dst_pgd, src_pgd,
691                                             vma, addr, next))) {
692                         ret = -ENOMEM;
693                         break;
694                 }
695         } while (dst_pgd++, src_pgd++, addr = next, addr != end);
696
697         if (is_cow_mapping(vma->vm_flags))
698                 mmu_notifier_invalidate_range_end(src_mm,
699                                                   vma->vm_start, end);
700         return ret;
701 }
702
703 static unsigned long zap_pte_range(struct mmu_gather *tlb,
704                                 struct vm_area_struct *vma, pmd_t *pmd,
705                                 unsigned long addr, unsigned long end,
706                                 long *zap_work, struct zap_details *details)
707 {
708         struct mm_struct *mm = tlb->mm;
709         pte_t *pte;
710         spinlock_t *ptl;
711         int file_rss = 0;
712         int anon_rss = 0;
713
714         pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
715         arch_enter_lazy_mmu_mode();
716         do {
717                 pte_t ptent = *pte;
718                 if (pte_none(ptent)) {
719                         (*zap_work)--;
720                         continue;
721                 }
722
723                 (*zap_work) -= PAGE_SIZE;
724
725                 if (pte_present(ptent)) {
726                         struct page *page;
727
728                         page = vm_normal_page(vma, addr, ptent);
729                         if (unlikely(details) && page) {
730                                 /*
731                                  * unmap_shared_mapping_pages() wants to
732                                  * invalidate cache without truncating:
733                                  * unmap shared but keep private pages.
734                                  */
735                                 if (details->check_mapping &&
736                                     details->check_mapping != page->mapping)
737                                         continue;
738                                 /*
739                                  * Each page->index must be checked when
740                                  * invalidating or truncating nonlinear.
741                                  */
742                                 if (details->nonlinear_vma &&
743                                     (page->index < details->first_index ||
744                                      page->index > details->last_index))
745                                         continue;
746                         }
747                         ptent = ptep_get_and_clear_full(mm, addr, pte,
748                                                         tlb->fullmm);
749                         tlb_remove_tlb_entry(tlb, pte, addr);
750                         if (unlikely(!page))
751                                 continue;
752                         if (unlikely(details) && details->nonlinear_vma
753                             && linear_page_index(details->nonlinear_vma,
754                                                 addr) != page->index)
755                                 set_pte_at(mm, addr, pte,
756                                            pgoff_to_pte(page->index));
757                         if (PageAnon(page))
758                                 anon_rss--;
759                         else {
760                                 if (pte_dirty(ptent))
761                                         set_page_dirty(page);
762                                 if (pte_young(ptent))
763                                         SetPageReferenced(page);
764                                 file_rss--;
765                         }
766                         page_remove_rmap(page, vma);
767                         tlb_remove_page(tlb, page);
768                         continue;
769                 }
770                 /*
771                  * If details->check_mapping, we leave swap entries;
772                  * if details->nonlinear_vma, we leave file entries.
773                  */
774                 if (unlikely(details))
775                         continue;
776                 if (!pte_file(ptent))
777                         free_swap_and_cache(pte_to_swp_entry(ptent));
778                 pte_clear_not_present_full(mm, addr, pte, tlb->fullmm);
779         } while (pte++, addr += PAGE_SIZE, (addr != end && *zap_work > 0));
780
781         add_mm_rss(mm, file_rss, anon_rss);
782         arch_leave_lazy_mmu_mode();
783         pte_unmap_unlock(pte - 1, ptl);
784
785         return addr;
786 }
787
788 static inline unsigned long zap_pmd_range(struct mmu_gather *tlb,
789                                 struct vm_area_struct *vma, pud_t *pud,
790                                 unsigned long addr, unsigned long end,
791                                 long *zap_work, struct zap_details *details)
792 {
793         pmd_t *pmd;
794         unsigned long next;
795
796         pmd = pmd_offset(pud, addr);
797         do {
798                 next = pmd_addr_end(addr, end);
799                 if (pmd_none_or_clear_bad(pmd)) {
800                         (*zap_work)--;
801                         continue;
802                 }
803                 next = zap_pte_range(tlb, vma, pmd, addr, next,
804                                                 zap_work, details);
805         } while (pmd++, addr = next, (addr != end && *zap_work > 0));
806
807         return addr;
808 }
809
810 static inline unsigned long zap_pud_range(struct mmu_gather *tlb,
811                                 struct vm_area_struct *vma, pgd_t *pgd,
812                                 unsigned long addr, unsigned long end,
813                                 long *zap_work, struct zap_details *details)
814 {
815         pud_t *pud;
816         unsigned long next;
817
818         pud = pud_offset(pgd, addr);
819         do {
820                 next = pud_addr_end(addr, end);
821                 if (pud_none_or_clear_bad(pud)) {
822                         (*zap_work)--;
823                         continue;
824                 }
825                 next = zap_pmd_range(tlb, vma, pud, addr, next,
826                                                 zap_work, details);
827         } while (pud++, addr = next, (addr != end && *zap_work > 0));
828
829         return addr;
830 }
831
832 static unsigned long unmap_page_range(struct mmu_gather *tlb,
833                                 struct vm_area_struct *vma,
834                                 unsigned long addr, unsigned long end,
835                                 long *zap_work, struct zap_details *details)
836 {
837         pgd_t *pgd;
838         unsigned long next;
839
840         if (details && !details->check_mapping && !details->nonlinear_vma)
841                 details = NULL;
842
843         BUG_ON(addr >= end);
844         tlb_start_vma(tlb, vma);
845         pgd = pgd_offset(vma->vm_mm, addr);
846         do {
847                 next = pgd_addr_end(addr, end);
848                 if (pgd_none_or_clear_bad(pgd)) {
849                         (*zap_work)--;
850                         continue;
851                 }
852                 next = zap_pud_range(tlb, vma, pgd, addr, next,
853                                                 zap_work, details);
854         } while (pgd++, addr = next, (addr != end && *zap_work > 0));
855         tlb_end_vma(tlb, vma);
856
857         return addr;
858 }
859
860 #ifdef CONFIG_PREEMPT
861 # define ZAP_BLOCK_SIZE (8 * PAGE_SIZE)
862 #else
863 /* No preempt: go for improved straight-line efficiency */
864 # define ZAP_BLOCK_SIZE (1024 * PAGE_SIZE)
865 #endif
866
867 /**
868  * unmap_vmas - unmap a range of memory covered by a list of vma's
869  * @tlbp: address of the caller's struct mmu_gather
870  * @vma: the starting vma
871  * @start_addr: virtual address at which to start unmapping
872  * @end_addr: virtual address at which to end unmapping
873  * @nr_accounted: Place number of unmapped pages in vm-accountable vma's here
874  * @details: details of nonlinear truncation or shared cache invalidation
875  *
876  * Returns the end address of the unmapping (restart addr if interrupted).
877  *
878  * Unmap all pages in the vma list.
879  *
880  * We aim to not hold locks for too long (for scheduling latency reasons).
881  * So zap pages in ZAP_BLOCK_SIZE bytecounts.  This means we need to
882  * return the ending mmu_gather to the caller.
883  *
884  * Only addresses between `start' and `end' will be unmapped.
885  *
886  * The VMA list must be sorted in ascending virtual address order.
887  *
888  * unmap_vmas() assumes that the caller will flush the whole unmapped address
889  * range after unmap_vmas() returns.  So the only responsibility here is to
890  * ensure that any thus-far unmapped pages are flushed before unmap_vmas()
891  * drops the lock and schedules.
892  */
893 unsigned long unmap_vmas(struct mmu_gather **tlbp,
894                 struct vm_area_struct *vma, unsigned long start_addr,
895                 unsigned long end_addr, unsigned long *nr_accounted,
896                 struct zap_details *details)
897 {
898         long zap_work = ZAP_BLOCK_SIZE;
899         unsigned long tlb_start = 0;    /* For tlb_finish_mmu */
900         int tlb_start_valid = 0;
901         unsigned long start = start_addr;
902         spinlock_t *i_mmap_lock = details? details->i_mmap_lock: NULL;
903         int fullmm = (*tlbp)->fullmm;
904         struct mm_struct *mm = vma->vm_mm;
905
906         mmu_notifier_invalidate_range_start(mm, start_addr, end_addr);
907         for ( ; vma && vma->vm_start < end_addr; vma = vma->vm_next) {
908                 unsigned long end;
909
910                 start = max(vma->vm_start, start_addr);
911                 if (start >= vma->vm_end)
912                         continue;
913                 end = min(vma->vm_end, end_addr);
914                 if (end <= vma->vm_start)
915                         continue;
916
917                 if (vma->vm_flags & VM_ACCOUNT)
918                         *nr_accounted += (end - start) >> PAGE_SHIFT;
919
920                 while (start != end) {
921                         if (!tlb_start_valid) {
922                                 tlb_start = start;
923                                 tlb_start_valid = 1;
924                         }
925
926                         if (unlikely(is_vm_hugetlb_page(vma))) {
927                                 /*
928                                  * It is undesirable to test vma->vm_file as it
929                                  * should be non-null for valid hugetlb area.
930                                  * However, vm_file will be NULL in the error
931                                  * cleanup path of do_mmap_pgoff. When
932                                  * hugetlbfs ->mmap method fails,
933                                  * do_mmap_pgoff() nullifies vma->vm_file
934                                  * before calling this function to clean up.
935                                  * Since no pte has actually been setup, it is
936                                  * safe to do nothing in this case.
937                                  */
938                                 if (vma->vm_file) {
939                                         unmap_hugepage_range(vma, start, end, NULL);
940                                         zap_work -= (end - start) /
941                                         pages_per_huge_page(hstate_vma(vma));
942                                 }
943
944                                 start = end;
945                         } else
946                                 start = unmap_page_range(*tlbp, vma,
947                                                 start, end, &zap_work, details);
948
949                         if (zap_work > 0) {
950                                 BUG_ON(start != end);
951                                 break;
952                         }
953
954                         tlb_finish_mmu(*tlbp, tlb_start, start);
955
956                         if (need_resched() ||
957                                 (i_mmap_lock && spin_needbreak(i_mmap_lock))) {
958                                 if (i_mmap_lock) {
959                                         *tlbp = NULL;
960                                         goto out;
961                                 }
962                                 cond_resched();
963                         }
964
965                         *tlbp = tlb_gather_mmu(vma->vm_mm, fullmm);
966                         tlb_start_valid = 0;
967                         zap_work = ZAP_BLOCK_SIZE;
968                 }
969         }
970 out:
971         mmu_notifier_invalidate_range_end(mm, start_addr, end_addr);
972         return start;   /* which is now the end (or restart) address */
973 }
974
975 /**
976  * zap_page_range - remove user pages in a given range
977  * @vma: vm_area_struct holding the applicable pages
978  * @address: starting address of pages to zap
979  * @size: number of bytes to zap
980  * @details: details of nonlinear truncation or shared cache invalidation
981  */
982 unsigned long zap_page_range(struct vm_area_struct *vma, unsigned long address,
983                 unsigned long size, struct zap_details *details)
984 {
985         struct mm_struct *mm = vma->vm_mm;
986         struct mmu_gather *tlb;
987         unsigned long end = address + size;
988         unsigned long nr_accounted = 0;
989
990         lru_add_drain();
991         tlb = tlb_gather_mmu(mm, 0);
992         update_hiwater_rss(mm);
993         end = unmap_vmas(&tlb, vma, address, end, &nr_accounted, details);
994         if (tlb)
995                 tlb_finish_mmu(tlb, address, end);
996         return end;
997 }
998
999 /**
1000  * zap_vma_ptes - remove ptes mapping the vma
1001  * @vma: vm_area_struct holding ptes to be zapped
1002  * @address: starting address of pages to zap
1003  * @size: number of bytes to zap
1004  *
1005  * This function only unmaps ptes assigned to VM_PFNMAP vmas.
1006  *
1007  * The entire address range must be fully contained within the vma.
1008  *
1009  * Returns 0 if successful.
1010  */
1011 int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
1012                 unsigned long size)
1013 {
1014         if (address < vma->vm_start || address + size > vma->vm_end ||
1015                         !(vma->vm_flags & VM_PFNMAP))
1016                 return -1;
1017         zap_page_range(vma, address, size, NULL);
1018         return 0;
1019 }
1020 EXPORT_SYMBOL_GPL(zap_vma_ptes);
1021
1022 /*
1023  * Do a quick page-table lookup for a single page.
1024  */
1025 struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
1026                         unsigned int flags)
1027 {
1028         pgd_t *pgd;
1029         pud_t *pud;
1030         pmd_t *pmd;
1031         pte_t *ptep, pte;
1032         spinlock_t *ptl;
1033         struct page *page;
1034         struct mm_struct *mm = vma->vm_mm;
1035
1036         page = follow_huge_addr(mm, address, flags & FOLL_WRITE);
1037         if (!IS_ERR(page)) {
1038                 BUG_ON(flags & FOLL_GET);
1039                 goto out;
1040         }
1041
1042         page = NULL;
1043         pgd = pgd_offset(mm, address);
1044         if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
1045                 goto no_page_table;
1046
1047         pud = pud_offset(pgd, address);
1048         if (pud_none(*pud))
1049                 goto no_page_table;
1050         if (pud_huge(*pud)) {
1051                 BUG_ON(flags & FOLL_GET);
1052                 page = follow_huge_pud(mm, address, pud, flags & FOLL_WRITE);
1053                 goto out;
1054         }
1055         if (unlikely(pud_bad(*pud)))
1056                 goto no_page_table;
1057
1058         pmd = pmd_offset(pud, address);
1059         if (pmd_none(*pmd))
1060                 goto no_page_table;
1061         if (pmd_huge(*pmd)) {
1062                 BUG_ON(flags & FOLL_GET);
1063                 page = follow_huge_pmd(mm, address, pmd, flags & FOLL_WRITE);
1064                 goto out;
1065         }
1066         if (unlikely(pmd_bad(*pmd)))
1067                 goto no_page_table;
1068
1069         ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
1070
1071         pte = *ptep;
1072         if (!pte_present(pte))
1073                 goto no_page;
1074         if ((flags & FOLL_WRITE) && !pte_write(pte))
1075                 goto unlock;
1076         page = vm_normal_page(vma, address, pte);
1077         if (unlikely(!page))
1078                 goto bad_page;
1079
1080         if (flags & FOLL_GET)
1081                 get_page(page);
1082         if (flags & FOLL_TOUCH) {
1083                 if ((flags & FOLL_WRITE) &&
1084                     !pte_dirty(pte) && !PageDirty(page))
1085                         set_page_dirty(page);
1086                 mark_page_accessed(page);
1087         }
1088 unlock:
1089         pte_unmap_unlock(ptep, ptl);
1090 out:
1091         return page;
1092
1093 bad_page:
1094         pte_unmap_unlock(ptep, ptl);
1095         return ERR_PTR(-EFAULT);
1096
1097 no_page:
1098         pte_unmap_unlock(ptep, ptl);
1099         if (!pte_none(pte))
1100                 return page;
1101         /* Fall through to ZERO_PAGE handling */
1102 no_page_table:
1103         /*
1104          * When core dumping an enormous anonymous area that nobody
1105          * has touched so far, we don't want to allocate page tables.
1106          */
1107         if (flags & FOLL_ANON) {
1108                 page = ZERO_PAGE(0);
1109                 if (flags & FOLL_GET)
1110                         get_page(page);
1111                 BUG_ON(flags & FOLL_WRITE);
1112         }
1113         return page;
1114 }
1115
1116 /* Can we do the FOLL_ANON optimization? */
1117 static inline int use_zero_page(struct vm_area_struct *vma)
1118 {
1119         /*
1120          * We don't want to optimize FOLL_ANON for make_pages_present()
1121          * when it tries to page in a VM_LOCKED region. As to VM_SHARED,
1122          * we want to get the page from the page tables to make sure
1123          * that we serialize and update with any other user of that
1124          * mapping.
1125          */
1126         if (vma->vm_flags & (VM_LOCKED | VM_SHARED))
1127                 return 0;
1128         /*
1129          * And if we have a fault routine, it's not an anonymous region.
1130          */
1131         return !vma->vm_ops || !vma->vm_ops->fault;
1132 }
1133
1134
1135
1136 int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
1137                      unsigned long start, int len, int flags,
1138                 struct page **pages, struct vm_area_struct **vmas)
1139 {
1140         int i;
1141         unsigned int vm_flags = 0;
1142         int write = !!(flags & GUP_FLAGS_WRITE);
1143         int force = !!(flags & GUP_FLAGS_FORCE);
1144         int ignore = !!(flags & GUP_FLAGS_IGNORE_VMA_PERMISSIONS);
1145
1146         if (len <= 0)
1147                 return 0;
1148         /* 
1149          * Require read or write permissions.
1150          * If 'force' is set, we only require the "MAY" flags.
1151          */
1152         vm_flags  = write ? (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
1153         vm_flags &= force ? (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
1154         i = 0;
1155
1156         do {
1157                 struct vm_area_struct *vma;
1158                 unsigned int foll_flags;
1159
1160                 vma = find_extend_vma(mm, start);
1161                 if (!vma && in_gate_area(tsk, start)) {
1162                         unsigned long pg = start & PAGE_MASK;
1163                         struct vm_area_struct *gate_vma = get_gate_vma(tsk);
1164                         pgd_t *pgd;
1165                         pud_t *pud;
1166                         pmd_t *pmd;
1167                         pte_t *pte;
1168
1169                         /* user gate pages are read-only */
1170                         if (!ignore && write)
1171                                 return i ? : -EFAULT;
1172                         if (pg > TASK_SIZE)
1173                                 pgd = pgd_offset_k(pg);
1174                         else
1175                                 pgd = pgd_offset_gate(mm, pg);
1176                         BUG_ON(pgd_none(*pgd));
1177                         pud = pud_offset(pgd, pg);
1178                         BUG_ON(pud_none(*pud));
1179                         pmd = pmd_offset(pud, pg);
1180                         if (pmd_none(*pmd))
1181                                 return i ? : -EFAULT;
1182                         pte = pte_offset_map(pmd, pg);
1183                         if (pte_none(*pte)) {
1184                                 pte_unmap(pte);
1185                                 return i ? : -EFAULT;
1186                         }
1187                         if (pages) {
1188                                 struct page *page = vm_normal_page(gate_vma, start, *pte);
1189                                 pages[i] = page;
1190                                 if (page)
1191                                         get_page(page);
1192                         }
1193                         pte_unmap(pte);
1194                         if (vmas)
1195                                 vmas[i] = gate_vma;
1196                         i++;
1197                         start += PAGE_SIZE;
1198                         len--;
1199                         continue;
1200                 }
1201
1202                 if (!vma ||
1203                     (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
1204                     (!ignore && !(vm_flags & vma->vm_flags)))
1205                         return i ? : -EFAULT;
1206
1207                 if (is_vm_hugetlb_page(vma)) {
1208                         i = follow_hugetlb_page(mm, vma, pages, vmas,
1209                                                 &start, &len, i, write);
1210                         continue;
1211                 }
1212
1213                 foll_flags = FOLL_TOUCH;
1214                 if (pages)
1215                         foll_flags |= FOLL_GET;
1216                 if (!write && use_zero_page(vma))
1217                         foll_flags |= FOLL_ANON;
1218
1219                 do {
1220                         struct page *page;
1221
1222                         /*
1223                          * If tsk is ooming, cut off its access to large memory
1224                          * allocations. It has a pending SIGKILL, but it can't
1225                          * be processed until returning to user space.
1226                          */
1227                         if (unlikely(test_tsk_thread_flag(tsk, TIF_MEMDIE)))
1228                                 return i ? i : -ENOMEM;
1229
1230                         if (write)
1231                                 foll_flags |= FOLL_WRITE;
1232
1233                         cond_resched();
1234                         while (!(page = follow_page(vma, start, foll_flags))) {
1235                                 int ret;
1236                                 ret = handle_mm_fault(mm, vma, start,
1237                                                 foll_flags & FOLL_WRITE);
1238                                 if (ret & VM_FAULT_ERROR) {
1239                                         if (ret & VM_FAULT_OOM)
1240                                                 return i ? i : -ENOMEM;
1241                                         else if (ret & VM_FAULT_SIGBUS)
1242                                                 return i ? i : -EFAULT;
1243                                         BUG();
1244                                 }
1245                                 if (ret & VM_FAULT_MAJOR)
1246                                         tsk->maj_flt++;
1247                                 else
1248                                         tsk->min_flt++;
1249
1250                                 /*
1251                                  * The VM_FAULT_WRITE bit tells us that
1252                                  * do_wp_page has broken COW when necessary,
1253                                  * even if maybe_mkwrite decided not to set
1254                                  * pte_write. We can thus safely do subsequent
1255                                  * page lookups as if they were reads.
1256                                  */
1257                                 if (ret & VM_FAULT_WRITE)
1258                                         foll_flags &= ~FOLL_WRITE;
1259
1260                                 cond_resched();
1261                         }
1262                         if (IS_ERR(page))
1263                                 return i ? i : PTR_ERR(page);
1264                         if (pages) {
1265                                 pages[i] = page;
1266
1267                                 flush_anon_page(vma, page, start);
1268                                 flush_dcache_page(page);
1269                         }
1270                         if (vmas)
1271                                 vmas[i] = vma;
1272                         i++;
1273                         start += PAGE_SIZE;
1274                         len--;
1275                 } while (len && start < vma->vm_end);
1276         } while (len);
1277         return i;
1278 }
1279
1280 int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
1281                 unsigned long start, int len, int write, int force,
1282                 struct page **pages, struct vm_area_struct **vmas)
1283 {
1284         int flags = 0;
1285
1286         if (write)
1287                 flags |= GUP_FLAGS_WRITE;
1288         if (force)
1289                 flags |= GUP_FLAGS_FORCE;
1290
1291         return __get_user_pages(tsk, mm,
1292                                 start, len, flags,
1293                                 pages, vmas);
1294 }
1295
1296 EXPORT_SYMBOL(get_user_pages);
1297
1298 pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr,
1299                         spinlock_t **ptl)
1300 {
1301         pgd_t * pgd = pgd_offset(mm, addr);
1302         pud_t * pud = pud_alloc(mm, pgd, addr);
1303         if (pud) {
1304                 pmd_t * pmd = pmd_alloc(mm, pud, addr);
1305                 if (pmd)
1306                         return pte_alloc_map_lock(mm, pmd, addr, ptl);
1307         }
1308         return NULL;
1309 }
1310
1311 /*
1312  * This is the old fallback for page remapping.
1313  *
1314  * For historical reasons, it only allows reserved pages. Only
1315  * old drivers should use this, and they needed to mark their
1316  * pages reserved for the old functions anyway.
1317  */
1318 static int insert_page(struct vm_area_struct *vma, unsigned long addr,
1319                         struct page *page, pgprot_t prot)
1320 {
1321         struct mm_struct *mm = vma->vm_mm;
1322         int retval;
1323         pte_t *pte;
1324         spinlock_t *ptl;
1325
1326         retval = -EINVAL;
1327         if (PageAnon(page))
1328                 goto out;
1329         retval = -ENOMEM;
1330         flush_dcache_page(page);
1331         pte = get_locked_pte(mm, addr, &ptl);
1332         if (!pte)
1333                 goto out;
1334         retval = -EBUSY;
1335         if (!pte_none(*pte))
1336                 goto out_unlock;
1337
1338         /* Ok, finally just insert the thing.. */
1339         get_page(page);
1340         inc_mm_counter(mm, file_rss);
1341         page_add_file_rmap(page);
1342         set_pte_at(mm, addr, pte, mk_pte(page, prot));
1343
1344         retval = 0;
1345         pte_unmap_unlock(pte, ptl);
1346         return retval;
1347 out_unlock:
1348         pte_unmap_unlock(pte, ptl);
1349 out:
1350         return retval;
1351 }
1352
1353 /**
1354  * vm_insert_page - insert single page into user vma
1355  * @vma: user vma to map to
1356  * @addr: target user address of this page
1357  * @page: source kernel page
1358  *
1359  * This allows drivers to insert individual pages they've allocated
1360  * into a user vma.
1361  *
1362  * The page has to be a nice clean _individual_ kernel allocation.
1363  * If you allocate a compound page, you need to have marked it as
1364  * such (__GFP_COMP), or manually just split the page up yourself
1365  * (see split_page()).
1366  *
1367  * NOTE! Traditionally this was done with "remap_pfn_range()" which
1368  * took an arbitrary page protection parameter. This doesn't allow
1369  * that. Your vma protection will have to be set up correctly, which
1370  * means that if you want a shared writable mapping, you'd better
1371  * ask for a shared writable mapping!
1372  *
1373  * The page does not need to be reserved.
1374  */
1375 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
1376                         struct page *page)
1377 {
1378         if (addr < vma->vm_start || addr >= vma->vm_end)
1379                 return -EFAULT;
1380         if (!page_count(page))
1381                 return -EINVAL;
1382         vma->vm_flags |= VM_INSERTPAGE;
1383         return insert_page(vma, addr, page, vma->vm_page_prot);
1384 }
1385 EXPORT_SYMBOL(vm_insert_page);
1386
1387 static int insert_pfn(struct vm_area_struct *vma, unsigned long addr,
1388                         unsigned long pfn, pgprot_t prot)
1389 {
1390         struct mm_struct *mm = vma->vm_mm;
1391         int retval;
1392         pte_t *pte, entry;
1393         spinlock_t *ptl;
1394
1395         retval = -ENOMEM;
1396         pte = get_locked_pte(mm, addr, &ptl);
1397         if (!pte)
1398                 goto out;
1399         retval = -EBUSY;
1400         if (!pte_none(*pte))
1401                 goto out_unlock;
1402
1403         /* Ok, finally just insert the thing.. */
1404         entry = pte_mkspecial(pfn_pte(pfn, prot));
1405         set_pte_at(mm, addr, pte, entry);
1406         update_mmu_cache(vma, addr, entry); /* XXX: why not for insert_page? */
1407
1408         retval = 0;
1409 out_unlock:
1410         pte_unmap_unlock(pte, ptl);
1411 out:
1412         return retval;
1413 }
1414
1415 /**
1416  * vm_insert_pfn - insert single pfn into user vma
1417  * @vma: user vma to map to
1418  * @addr: target user address of this page
1419  * @pfn: source kernel pfn
1420  *
1421  * Similar to vm_inert_page, this allows drivers to insert individual pages
1422  * they've allocated into a user vma. Same comments apply.
1423  *
1424  * This function should only be called from a vm_ops->fault handler, and
1425  * in that case the handler should return NULL.
1426  *
1427  * vma cannot be a COW mapping.
1428  *
1429  * As this is called only for pages that do not currently exist, we
1430  * do not need to flush old virtual caches or the TLB.
1431  */
1432 int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
1433                         unsigned long pfn)
1434 {
1435         /*
1436          * Technically, architectures with pte_special can avoid all these
1437          * restrictions (same for remap_pfn_range).  However we would like
1438          * consistency in testing and feature parity among all, so we should
1439          * try to keep these invariants in place for everybody.
1440          */
1441         BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)));
1442         BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) ==
1443                                                 (VM_PFNMAP|VM_MIXEDMAP));
1444         BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags));
1445         BUG_ON((vma->vm_flags & VM_MIXEDMAP) && pfn_valid(pfn));
1446
1447         if (addr < vma->vm_start || addr >= vma->vm_end)
1448                 return -EFAULT;
1449         return insert_pfn(vma, addr, pfn, vma->vm_page_prot);
1450 }
1451 EXPORT_SYMBOL(vm_insert_pfn);
1452
1453 int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
1454                         unsigned long pfn)
1455 {
1456         BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
1457
1458         if (addr < vma->vm_start || addr >= vma->vm_end)
1459                 return -EFAULT;
1460
1461         /*
1462          * If we don't have pte special, then we have to use the pfn_valid()
1463          * based VM_MIXEDMAP scheme (see vm_normal_page), and thus we *must*
1464          * refcount the page if pfn_valid is true (hence insert_page rather
1465          * than insert_pfn).
1466          */
1467         if (!HAVE_PTE_SPECIAL && pfn_valid(pfn)) {
1468                 struct page *page;
1469
1470                 page = pfn_to_page(pfn);
1471                 return insert_page(vma, addr, page, vma->vm_page_prot);
1472         }
1473         return insert_pfn(vma, addr, pfn, vma->vm_page_prot);
1474 }
1475 EXPORT_SYMBOL(vm_insert_mixed);
1476
1477 /*
1478  * maps a range of physical memory into the requested pages. the old
1479  * mappings are removed. any references to nonexistent pages results
1480  * in null mappings (currently treated as "copy-on-access")
1481  */
1482 static int remap_pte_range(struct mm_struct *mm, pmd_t *pmd,
1483                         unsigned long addr, unsigned long end,
1484                         unsigned long pfn, pgprot_t prot)
1485 {
1486         pte_t *pte;
1487         spinlock_t *ptl;
1488
1489         pte = pte_alloc_map_lock(mm, pmd, addr, &ptl);
1490         if (!pte)
1491                 return -ENOMEM;
1492         arch_enter_lazy_mmu_mode();
1493         do {
1494                 BUG_ON(!pte_none(*pte));
1495                 set_pte_at(mm, addr, pte, pte_mkspecial(pfn_pte(pfn, prot)));
1496                 pfn++;
1497         } while (pte++, addr += PAGE_SIZE, addr != end);
1498         arch_leave_lazy_mmu_mode();
1499         pte_unmap_unlock(pte - 1, ptl);
1500         return 0;
1501 }
1502
1503 static inline int remap_pmd_range(struct mm_struct *mm, pud_t *pud,
1504                         unsigned long addr, unsigned long end,
1505                         unsigned long pfn, pgprot_t prot)
1506 {
1507         pmd_t *pmd;
1508         unsigned long next;
1509
1510         pfn -= addr >> PAGE_SHIFT;
1511         pmd = pmd_alloc(mm, pud, addr);
1512         if (!pmd)
1513                 return -ENOMEM;
1514         do {
1515                 next = pmd_addr_end(addr, end);
1516                 if (remap_pte_range(mm, pmd, addr, next,
1517                                 pfn + (addr >> PAGE_SHIFT), prot))
1518                         return -ENOMEM;
1519         } while (pmd++, addr = next, addr != end);
1520         return 0;
1521 }
1522
1523 static inline int remap_pud_range(struct mm_struct *mm, pgd_t *pgd,
1524                         unsigned long addr, unsigned long end,
1525                         unsigned long pfn, pgprot_t prot)
1526 {
1527         pud_t *pud;
1528         unsigned long next;
1529
1530         pfn -= addr >> PAGE_SHIFT;
1531         pud = pud_alloc(mm, pgd, addr);
1532         if (!pud)
1533                 return -ENOMEM;
1534         do {
1535                 next = pud_addr_end(addr, end);
1536                 if (remap_pmd_range(mm, pud, addr, next,
1537                                 pfn + (addr >> PAGE_SHIFT), prot))
1538                         return -ENOMEM;
1539         } while (pud++, addr = next, addr != end);
1540         return 0;
1541 }
1542
1543 /**
1544  * remap_pfn_range - remap kernel memory to userspace
1545  * @vma: user vma to map to
1546  * @addr: target user address to start at
1547  * @pfn: physical address of kernel memory
1548  * @size: size of map area
1549  * @prot: page protection flags for this mapping
1550  *
1551  *  Note: this is only safe if the mm semaphore is held when called.
1552  */
1553 int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
1554                     unsigned long pfn, unsigned long size, pgprot_t prot)
1555 {
1556         pgd_t *pgd;
1557         unsigned long next;
1558         unsigned long end = addr + PAGE_ALIGN(size);
1559         struct mm_struct *mm = vma->vm_mm;
1560         int err;
1561
1562         /*
1563          * Physically remapped pages are special. Tell the
1564          * rest of the world about it:
1565          *   VM_IO tells people not to look at these pages
1566          *      (accesses can have side effects).
1567          *   VM_RESERVED is specified all over the place, because
1568          *      in 2.4 it kept swapout's vma scan off this vma; but
1569          *      in 2.6 the LRU scan won't even find its pages, so this
1570          *      flag means no more than count its pages in reserved_vm,
1571          *      and omit it from core dump, even when VM_IO turned off.
1572          *   VM_PFNMAP tells the core MM that the base pages are just
1573          *      raw PFN mappings, and do not have a "struct page" associated
1574          *      with them.
1575          *
1576          * There's a horrible special case to handle copy-on-write
1577          * behaviour that some programs depend on. We mark the "original"
1578          * un-COW'ed pages by matching them up with "vma->vm_pgoff".
1579          */
1580         if (is_cow_mapping(vma->vm_flags)) {
1581                 if (addr != vma->vm_start || end != vma->vm_end)
1582                         return -EINVAL;
1583                 vma->vm_pgoff = pfn;
1584         }
1585
1586         vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP;
1587
1588         BUG_ON(addr >= end);
1589         pfn -= addr >> PAGE_SHIFT;
1590         pgd = pgd_offset(mm, addr);
1591         flush_cache_range(vma, addr, end);
1592         do {
1593                 next = pgd_addr_end(addr, end);
1594                 err = remap_pud_range(mm, pgd, addr, next,
1595                                 pfn + (addr >> PAGE_SHIFT), prot);
1596                 if (err)
1597                         break;
1598         } while (pgd++, addr = next, addr != end);
1599         return err;
1600 }
1601 EXPORT_SYMBOL(remap_pfn_range);
1602
1603 static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd,
1604                                      unsigned long addr, unsigned long end,
1605                                      pte_fn_t fn, void *data)
1606 {
1607         pte_t *pte;
1608         int err;
1609         pgtable_t token;
1610         spinlock_t *uninitialized_var(ptl);
1611
1612         pte = (mm == &init_mm) ?
1613                 pte_alloc_kernel(pmd, addr) :
1614                 pte_alloc_map_lock(mm, pmd, addr, &ptl);
1615         if (!pte)
1616                 return -ENOMEM;
1617
1618         BUG_ON(pmd_huge(*pmd));
1619
1620         token = pmd_pgtable(*pmd);
1621
1622         do {
1623                 err = fn(pte, token, addr, data);
1624                 if (err)
1625                         break;
1626         } while (pte++, addr += PAGE_SIZE, addr != end);
1627
1628         if (mm != &init_mm)
1629                 pte_unmap_unlock(pte-1, ptl);
1630         return err;
1631 }
1632
1633 static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
1634                                      unsigned long addr, unsigned long end,
1635                                      pte_fn_t fn, void *data)
1636 {
1637         pmd_t *pmd;
1638         unsigned long next;
1639         int err;
1640
1641         BUG_ON(pud_huge(*pud));
1642
1643         pmd = pmd_alloc(mm, pud, addr);
1644         if (!pmd)
1645                 return -ENOMEM;
1646         do {
1647                 next = pmd_addr_end(addr, end);
1648                 err = apply_to_pte_range(mm, pmd, addr, next, fn, data);
1649                 if (err)
1650                         break;
1651         } while (pmd++, addr = next, addr != end);
1652         return err;
1653 }
1654
1655 static int apply_to_pud_range(struct mm_struct *mm, pgd_t *pgd,
1656                                      unsigned long addr, unsigned long end,
1657                                      pte_fn_t fn, void *data)
1658 {
1659         pud_t *pud;
1660         unsigned long next;
1661         int err;
1662
1663         pud = pud_alloc(mm, pgd, addr);
1664         if (!pud)
1665                 return -ENOMEM;
1666         do {
1667                 next = pud_addr_end(addr, end);
1668                 err = apply_to_pmd_range(mm, pud, addr, next, fn, data);
1669                 if (err)
1670                         break;
1671         } while (pud++, addr = next, addr != end);
1672         return err;
1673 }
1674
1675 /*
1676  * Scan a region of virtual memory, filling in page tables as necessary
1677  * and calling a provided function on each leaf page table.
1678  */
1679 int apply_to_page_range(struct mm_struct *mm, unsigned long addr,
1680                         unsigned long size, pte_fn_t fn, void *data)
1681 {
1682         pgd_t *pgd;
1683         unsigned long next;
1684         unsigned long start = addr, end = addr + size;
1685         int err;
1686
1687         BUG_ON(addr >= end);
1688         mmu_notifier_invalidate_range_start(mm, start, end);
1689         pgd = pgd_offset(mm, addr);
1690         do {
1691                 next = pgd_addr_end(addr, end);
1692                 err = apply_to_pud_range(mm, pgd, addr, next, fn, data);
1693                 if (err)
1694                         break;
1695         } while (pgd++, addr = next, addr != end);
1696         mmu_notifier_invalidate_range_end(mm, start, end);
1697         return err;
1698 }
1699 EXPORT_SYMBOL_GPL(apply_to_page_range);
1700
1701 /*
1702  * handle_pte_fault chooses page fault handler according to an entry
1703  * which was read non-atomically.  Before making any commitment, on
1704  * those architectures or configurations (e.g. i386 with PAE) which
1705  * might give a mix of unmatched parts, do_swap_page and do_file_page
1706  * must check under lock before unmapping the pte and proceeding
1707  * (but do_wp_page is only called after already making such a check;
1708  * and do_anonymous_page and do_no_page can safely check later on).
1709  */
1710 static inline int pte_unmap_same(struct mm_struct *mm, pmd_t *pmd,
1711                                 pte_t *page_table, pte_t orig_pte)
1712 {
1713         int same = 1;
1714 #if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT)
1715         if (sizeof(pte_t) > sizeof(unsigned long)) {
1716                 spinlock_t *ptl = pte_lockptr(mm, pmd);
1717                 spin_lock(ptl);
1718                 same = pte_same(*page_table, orig_pte);
1719                 spin_unlock(ptl);
1720         }
1721 #endif
1722         pte_unmap(page_table);
1723         return same;
1724 }
1725
1726 /*
1727  * Do pte_mkwrite, but only if the vma says VM_WRITE.  We do this when
1728  * servicing faults for write access.  In the normal case, do always want
1729  * pte_mkwrite.  But get_user_pages can cause write faults for mappings
1730  * that do not have writing enabled, when used by access_process_vm.
1731  */
1732 static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
1733 {
1734         if (likely(vma->vm_flags & VM_WRITE))
1735                 pte = pte_mkwrite(pte);
1736         return pte;
1737 }
1738
1739 static inline void cow_user_page(struct page *dst, struct page *src, unsigned long va, struct vm_area_struct *vma)
1740 {
1741         /*
1742          * If the source page was a PFN mapping, we don't have
1743          * a "struct page" for it. We do a best-effort copy by
1744          * just copying from the original user address. If that
1745          * fails, we just zero-fill it. Live with it.
1746          */
1747         if (unlikely(!src)) {
1748                 void *kaddr = kmap_atomic(dst, KM_USER0);
1749                 void __user *uaddr = (void __user *)(va & PAGE_MASK);
1750
1751                 /*
1752                  * This really shouldn't fail, because the page is there
1753                  * in the page tables. But it might just be unreadable,
1754                  * in which case we just give up and fill the result with
1755                  * zeroes.
1756                  */
1757                 if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE))
1758                         memset(kaddr, 0, PAGE_SIZE);
1759                 kunmap_atomic(kaddr, KM_USER0);
1760                 flush_dcache_page(dst);
1761         } else
1762                 copy_user_highpage(dst, src, va, vma);
1763 }
1764
1765 /*
1766  * This routine handles present pages, when users try to write
1767  * to a shared page. It is done by copying the page to a new address
1768  * and decrementing the shared-page counter for the old page.
1769  *
1770  * Note that this routine assumes that the protection checks have been
1771  * done by the caller (the low-level page fault routine in most cases).
1772  * Thus we can safely just mark it writable once we've done any necessary
1773  * COW.
1774  *
1775  * We also mark the page dirty at this point even though the page will
1776  * change only once the write actually happens. This avoids a few races,
1777  * and potentially makes it more efficient.
1778  *
1779  * We enter with non-exclusive mmap_sem (to exclude vma changes,
1780  * but allow concurrent faults), with pte both mapped and locked.
1781  * We return with mmap_sem still held, but pte unmapped and unlocked.
1782  */
1783 static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
1784                 unsigned long address, pte_t *page_table, pmd_t *pmd,
1785                 spinlock_t *ptl, pte_t orig_pte)
1786 {
1787         struct page *old_page, *new_page;
1788         pte_t entry;
1789         int reuse = 0, ret = 0;
1790         int page_mkwrite = 0;
1791         struct page *dirty_page = NULL;
1792
1793         old_page = vm_normal_page(vma, address, orig_pte);
1794         if (!old_page) {
1795                 /*
1796                  * VM_MIXEDMAP !pfn_valid() case
1797                  *
1798                  * We should not cow pages in a shared writeable mapping.
1799                  * Just mark the pages writable as we can't do any dirty
1800                  * accounting on raw pfn maps.
1801                  */
1802                 if ((vma->vm_flags & (VM_WRITE|VM_SHARED)) ==
1803                                      (VM_WRITE|VM_SHARED))
1804                         goto reuse;
1805                 goto gotten;
1806         }
1807
1808         /*
1809          * Take out anonymous pages first, anonymous shared vmas are
1810          * not dirty accountable.
1811          */
1812         if (PageAnon(old_page)) {
1813                 if (trylock_page(old_page)) {
1814                         reuse = can_share_swap_page(old_page);
1815                         unlock_page(old_page);
1816                 }
1817         } else if (unlikely((vma->vm_flags & (VM_WRITE|VM_SHARED)) ==
1818                                         (VM_WRITE|VM_SHARED))) {
1819                 /*
1820                  * Only catch write-faults on shared writable pages,
1821                  * read-only shared pages can get COWed by
1822                  * get_user_pages(.write=1, .force=1).
1823                  */
1824                 if (vma->vm_ops && vma->vm_ops->page_mkwrite) {
1825                         /*
1826                          * Notify the address space that the page is about to
1827                          * become writable so that it can prohibit this or wait
1828                          * for the page to get into an appropriate state.
1829                          *
1830                          * We do this without the lock held, so that it can
1831                          * sleep if it needs to.
1832                          */
1833                         page_cache_get(old_page);
1834                         pte_unmap_unlock(page_table, ptl);
1835
1836                         if (vma->vm_ops->page_mkwrite(vma, old_page) < 0)
1837                                 goto unwritable_page;
1838
1839                         /*
1840                          * Since we dropped the lock we need to revalidate
1841                          * the PTE as someone else may have changed it.  If
1842                          * they did, we just return, as we can count on the
1843                          * MMU to tell us if they didn't also make it writable.
1844                          */
1845                         page_table = pte_offset_map_lock(mm, pmd, address,
1846                                                          &ptl);
1847                         page_cache_release(old_page);
1848                         if (!pte_same(*page_table, orig_pte))
1849                                 goto unlock;
1850
1851                         page_mkwrite = 1;
1852                 }
1853                 dirty_page = old_page;
1854                 get_page(dirty_page);
1855                 reuse = 1;
1856         }
1857
1858         if (reuse) {
1859 reuse:
1860                 flush_cache_page(vma, address, pte_pfn(orig_pte));
1861                 entry = pte_mkyoung(orig_pte);
1862                 entry = maybe_mkwrite(pte_mkdirty(entry), vma);
1863                 if (ptep_set_access_flags(vma, address, page_table, entry,1))
1864                         update_mmu_cache(vma, address, entry);
1865                 ret |= VM_FAULT_WRITE;
1866                 goto unlock;
1867         }
1868
1869         /*
1870          * Ok, we need to copy. Oh, well..
1871          */
1872         page_cache_get(old_page);
1873 gotten:
1874         pte_unmap_unlock(page_table, ptl);
1875
1876         if (unlikely(anon_vma_prepare(vma)))
1877                 goto oom;
1878         VM_BUG_ON(old_page == ZERO_PAGE(0));
1879         new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
1880         if (!new_page)
1881                 goto oom;
1882         /*
1883          * Don't let another task, with possibly unlocked vma,
1884          * keep the mlocked page.
1885          */
1886         if (vma->vm_flags & VM_LOCKED) {
1887                 lock_page(old_page);    /* for LRU manipulation */
1888                 clear_page_mlock(old_page);
1889                 unlock_page(old_page);
1890         }
1891         cow_user_page(new_page, old_page, address, vma);
1892         __SetPageUptodate(new_page);
1893
1894         if (mem_cgroup_charge(new_page, mm, GFP_KERNEL))
1895                 goto oom_free_new;
1896
1897         /*
1898          * Re-check the pte - we dropped the lock
1899          */
1900         page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
1901         if (likely(pte_same(*page_table, orig_pte))) {
1902                 if (old_page) {
1903                         if (!PageAnon(old_page)) {
1904                                 dec_mm_counter(mm, file_rss);
1905                                 inc_mm_counter(mm, anon_rss);
1906                         }
1907                 } else
1908                         inc_mm_counter(mm, anon_rss);
1909                 flush_cache_page(vma, address, pte_pfn(orig_pte));
1910                 entry = mk_pte(new_page, vma->vm_page_prot);
1911                 entry = maybe_mkwrite(pte_mkdirty(entry), vma);
1912                 /*
1913                  * Clear the pte entry and flush it first, before updating the
1914                  * pte with the new entry. This will avoid a race condition
1915                  * seen in the presence of one thread doing SMC and another
1916                  * thread doing COW.
1917                  */
1918                 ptep_clear_flush_notify(vma, address, page_table);
1919                 SetPageSwapBacked(new_page);
1920                 lru_cache_add_active_or_unevictable(new_page, vma);
1921                 page_add_new_anon_rmap(new_page, vma, address);
1922
1923 //TODO:  is this safe?  do_anonymous_page() does it this way.
1924                 set_pte_at(mm, address, page_table, entry);
1925                 update_mmu_cache(vma, address, entry);
1926                 if (old_page) {
1927                         /*
1928                          * Only after switching the pte to the new page may
1929                          * we remove the mapcount here. Otherwise another
1930                          * process may come and find the rmap count decremented
1931                          * before the pte is switched to the new page, and
1932                          * "reuse" the old page writing into it while our pte
1933                          * here still points into it and can be read by other
1934                          * threads.
1935                          *
1936                          * The critical issue is to order this
1937                          * page_remove_rmap with the ptp_clear_flush above.
1938                          * Those stores are ordered by (if nothing else,)
1939                          * the barrier present in the atomic_add_negative
1940                          * in page_remove_rmap.
1941                          *
1942                          * Then the TLB flush in ptep_clear_flush ensures that
1943                          * no process can access the old page before the
1944                          * decremented mapcount is visible. And the old page
1945                          * cannot be reused until after the decremented
1946                          * mapcount is visible. So transitively, TLBs to
1947                          * old page will be flushed before it can be reused.
1948                          */
1949                         page_remove_rmap(old_page, vma);
1950                 }
1951
1952                 /* Free the old page.. */
1953                 new_page = old_page;
1954                 ret |= VM_FAULT_WRITE;
1955         } else
1956                 mem_cgroup_uncharge_page(new_page);
1957
1958         if (new_page)
1959                 page_cache_release(new_page);
1960         if (old_page)
1961                 page_cache_release(old_page);
1962 unlock:
1963         pte_unmap_unlock(page_table, ptl);
1964         if (dirty_page) {
1965                 if (vma->vm_file)
1966                         file_update_time(vma->vm_file);
1967
1968                 /*
1969                  * Yes, Virginia, this is actually required to prevent a race
1970                  * with clear_page_dirty_for_io() from clearing the page dirty
1971                  * bit after it clear all dirty ptes, but before a racing
1972                  * do_wp_page installs a dirty pte.
1973                  *
1974                  * do_no_page is protected similarly.
1975                  */
1976                 wait_on_page_locked(dirty_page);
1977                 set_page_dirty_balance(dirty_page, page_mkwrite);
1978                 put_page(dirty_page);
1979         }
1980         return ret;
1981 oom_free_new:
1982         page_cache_release(new_page);
1983 oom:
1984         if (old_page)
1985                 page_cache_release(old_page);
1986         return VM_FAULT_OOM;
1987
1988 unwritable_page:
1989         page_cache_release(old_page);
1990         return VM_FAULT_SIGBUS;
1991 }
1992
1993 /*
1994  * Helper functions for unmap_mapping_range().
1995  *
1996  * __ Notes on dropping i_mmap_lock to reduce latency while unmapping __
1997  *
1998  * We have to restart searching the prio_tree whenever we drop the lock,
1999  * since the iterator is only valid while the lock is held, and anyway
2000  * a later vma might be split and reinserted earlier while lock dropped.
2001  *
2002  * The list of nonlinear vmas could be handled more efficiently, using
2003  * a placeholder, but handle it in the same way until a need is shown.
2004  * It is important to search the prio_tree before nonlinear list: a vma
2005  * may become nonlinear and be shifted from prio_tree to nonlinear list
2006  * while the lock is dropped; but never shifted from list to prio_tree.
2007  *
2008  * In order to make forward progress despite restarting the search,
2009  * vm_truncate_count is used to mark a vma as now dealt with, so we can
2010  * quickly skip it next time around.  Since the prio_tree search only
2011  * shows us those vmas affected by unmapping the range in question, we
2012  * can't efficiently keep all vmas in step with mapping->truncate_count:
2013  * so instead reset them all whenever it wraps back to 0 (then go to 1).
2014  * mapping->truncate_count and vma->vm_truncate_count are protected by
2015  * i_mmap_lock.
2016  *
2017  * In order to make forward progress despite repeatedly restarting some
2018  * large vma, note the restart_addr from unmap_vmas when it breaks out:
2019  * and restart from that address when we reach that vma again.  It might
2020  * have been split or merged, shrunk or extended, but never shifted: so
2021  * restart_addr remains valid so long as it remains in the vma's range.
2022  * unmap_mapping_range forces truncate_count to leap over page-aligned
2023  * values so we can save vma's restart_addr in its truncate_count field.
2024  */
2025 #define is_restart_addr(truncate_count) (!((truncate_count) & ~PAGE_MASK))
2026
2027 static void reset_vma_truncate_counts(struct address_space *mapping)
2028 {
2029         struct vm_area_struct *vma;
2030         struct prio_tree_iter iter;
2031
2032         vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, 0, ULONG_MAX)
2033                 vma->vm_truncate_count = 0;
2034         list_for_each_entry(vma, &mapping->i_mmap_nonlinear, shared.vm_set.list)
2035                 vma->vm_truncate_count = 0;
2036 }
2037
2038 static int unmap_mapping_range_vma(struct vm_area_struct *vma,
2039                 unsigned long start_addr, unsigned long end_addr,
2040                 struct zap_details *details)
2041 {
2042         unsigned long restart_addr;
2043         int need_break;
2044
2045         /*
2046          * files that support invalidating or truncating portions of the
2047          * file from under mmaped areas must have their ->fault function
2048          * return a locked page (and set VM_FAULT_LOCKED in the return).
2049          * This provides synchronisation against concurrent unmapping here.
2050          */
2051
2052 again:
2053         restart_addr = vma->vm_truncate_count;
2054         if (is_restart_addr(restart_addr) && start_addr < restart_addr) {
2055                 start_addr = restart_addr;
2056                 if (start_addr >= end_addr) {
2057                         /* Top of vma has been split off since last time */
2058                         vma->vm_truncate_count = details->truncate_count;
2059                         return 0;
2060                 }
2061         }
2062
2063         restart_addr = zap_page_range(vma, start_addr,
2064                                         end_addr - start_addr, details);
2065         need_break = need_resched() || spin_needbreak(details->i_mmap_lock);
2066
2067         if (restart_addr >= end_addr) {
2068                 /* We have now completed this vma: mark it so */
2069                 vma->vm_truncate_count = details->truncate_count;
2070                 if (!need_break)
2071                         return 0;
2072         } else {
2073                 /* Note restart_addr in vma's truncate_count field */
2074                 vma->vm_truncate_count = restart_addr;
2075                 if (!need_break)
2076                         goto again;
2077         }
2078
2079         spin_unlock(details->i_mmap_lock);
2080         cond_resched();
2081         spin_lock(details->i_mmap_lock);
2082         return -EINTR;
2083 }
2084
2085 static inline void unmap_mapping_range_tree(struct prio_tree_root *root,
2086                                             struct zap_details *details)
2087 {
2088         struct vm_area_struct *vma;
2089         struct prio_tree_iter iter;
2090         pgoff_t vba, vea, zba, zea;
2091
2092 restart:
2093         vma_prio_tree_foreach(vma, &iter, root,
2094                         details->first_index, details->last_index) {
2095                 /* Skip quickly over those we have already dealt with */
2096                 if (vma->vm_truncate_count == details->truncate_count)
2097                         continue;
2098
2099                 vba = vma->vm_pgoff;
2100                 vea = vba + ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) - 1;
2101                 /* Assume for now that PAGE_CACHE_SHIFT == PAGE_SHIFT */
2102                 zba = details->first_index;
2103                 if (zba < vba)
2104                         zba = vba;
2105                 zea = details->last_index;
2106                 if (zea > vea)
2107                         zea = vea;
2108
2109                 if (unmap_mapping_range_vma(vma,
2110                         ((zba - vba) << PAGE_SHIFT) + vma->vm_start,
2111                         ((zea - vba + 1) << PAGE_SHIFT) + vma->vm_start,
2112                                 details) < 0)
2113                         goto restart;
2114         }
2115 }
2116
2117 static inline void unmap_mapping_range_list(struct list_head *head,
2118                                             struct zap_details *details)
2119 {
2120         struct vm_area_struct *vma;
2121
2122         /*
2123          * In nonlinear VMAs there is no correspondence between virtual address
2124          * offset and file offset.  So we must perform an exhaustive search
2125          * across *all* the pages in each nonlinear VMA, not just the pages
2126          * whose virtual address lies outside the file truncation point.
2127          */
2128 restart:
2129         list_for_each_entry(vma, head, shared.vm_set.list) {
2130                 /* Skip quickly over those we have already dealt with */
2131                 if (vma->vm_truncate_count == details->truncate_count)
2132                         continue;
2133                 details->nonlinear_vma = vma;
2134                 if (unmap_mapping_range_vma(vma, vma->vm_start,
2135                                         vma->vm_end, details) < 0)
2136                         goto restart;
2137         }
2138 }
2139
2140 /**
2141  * unmap_mapping_range - unmap the portion of all mmaps in the specified address_space corresponding to the specified page range in the underlying file.
2142  * @mapping: the address space containing mmaps to be unmapped.
2143  * @holebegin: byte in first page to unmap, relative to the start of
2144  * the underlying file.  This will be rounded down to a PAGE_SIZE
2145  * boundary.  Note that this is different from vmtruncate(), which
2146  * must keep the partial page.  In contrast, we must get rid of
2147  * partial pages.
2148  * @holelen: size of prospective hole in bytes.  This will be rounded
2149  * up to a PAGE_SIZE boundary.  A holelen of zero truncates to the
2150  * end of the file.
2151  * @even_cows: 1 when truncating a file, unmap even private COWed pages;
2152  * but 0 when invalidating pagecache, don't throw away private data.
2153  */
2154 void unmap_mapping_range(struct address_space *mapping,
2155                 loff_t const holebegin, loff_t const holelen, int even_cows)
2156 {
2157         struct zap_details details;
2158         pgoff_t hba = holebegin >> PAGE_SHIFT;
2159         pgoff_t hlen = (holelen + PAGE_SIZE - 1) >> PAGE_SHIFT;
2160
2161         /* Check for overflow. */
2162         if (sizeof(holelen) > sizeof(hlen)) {
2163                 long long holeend =
2164                         (holebegin + holelen + PAGE_SIZE - 1) >> PAGE_SHIFT;
2165                 if (holeend & ~(long long)ULONG_MAX)
2166                         hlen = ULONG_MAX - hba + 1;
2167         }
2168
2169         details.check_mapping = even_cows? NULL: mapping;
2170         details.nonlinear_vma = NULL;
2171         details.first_index = hba;
2172         details.last_index = hba + hlen - 1;
2173         if (details.last_index < details.first_index)
2174                 details.last_index = ULONG_MAX;
2175         details.i_mmap_lock = &mapping->i_mmap_lock;
2176
2177         spin_lock(&mapping->i_mmap_lock);
2178
2179         /* Protect against endless unmapping loops */
2180         mapping->truncate_count++;
2181         if (unlikely(is_restart_addr(mapping->truncate_count))) {
2182                 if (mapping->truncate_count == 0)
2183                         reset_vma_truncate_counts(mapping);
2184                 mapping->truncate_count++;
2185         }
2186         details.truncate_count = mapping->truncate_count;
2187
2188         if (unlikely(!prio_tree_empty(&mapping->i_mmap)))
2189                 unmap_mapping_range_tree(&mapping->i_mmap, &details);
2190         if (unlikely(!list_empty(&mapping->i_mmap_nonlinear)))
2191                 unmap_mapping_range_list(&mapping->i_mmap_nonlinear, &details);
2192         spin_unlock(&mapping->i_mmap_lock);
2193 }
2194 EXPORT_SYMBOL(unmap_mapping_range);
2195
2196 /**
2197  * vmtruncate - unmap mappings "freed" by truncate() syscall
2198  * @inode: inode of the file used
2199  * @offset: file offset to start truncating
2200  *
2201  * NOTE! We have to be ready to update the memory sharing
2202  * between the file and the memory map for a potential last
2203  * incomplete page.  Ugly, but necessary.
2204  */
2205 int vmtruncate(struct inode * inode, loff_t offset)
2206 {
2207         if (inode->i_size < offset) {
2208                 unsigned long limit;
2209
2210                 limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
2211                 if (limit != RLIM_INFINITY && offset > limit)
2212                         goto out_sig;
2213                 if (offset > inode->i_sb->s_maxbytes)
2214                         goto out_big;
2215                 i_size_write(inode, offset);
2216         } else {
2217                 struct address_space *mapping = inode->i_mapping;
2218
2219                 /*
2220                  * truncation of in-use swapfiles is disallowed - it would
2221                  * cause subsequent swapout to scribble on the now-freed
2222                  * blocks.
2223                  */
2224                 if (IS_SWAPFILE(inode))
2225                         return -ETXTBSY;
2226                 i_size_write(inode, offset);
2227
2228                 /*
2229                  * unmap_mapping_range is called twice, first simply for
2230                  * efficiency so that truncate_inode_pages does fewer
2231                  * single-page unmaps.  However after this first call, and
2232                  * before truncate_inode_pages finishes, it is possible for
2233                  * private pages to be COWed, which remain after
2234                  * truncate_inode_pages finishes, hence the second
2235                  * unmap_mapping_range call must be made for correctness.
2236                  */
2237                 unmap_mapping_range(mapping, offset + PAGE_SIZE - 1, 0, 1);
2238                 truncate_inode_pages(mapping, offset);
2239                 unmap_mapping_range(mapping, offset + PAGE_SIZE - 1, 0, 1);
2240         }
2241
2242         if (inode->i_op && inode->i_op->truncate)
2243                 inode->i_op->truncate(inode);
2244         return 0;
2245
2246 out_sig:
2247         send_sig(SIGXFSZ, current, 0);
2248 out_big:
2249         return -EFBIG;
2250 }
2251 EXPORT_SYMBOL(vmtruncate);
2252
2253 int vmtruncate_range(struct inode *inode, loff_t offset, loff_t end)
2254 {
2255         struct address_space *mapping = inode->i_mapping;
2256
2257         /*
2258          * If the underlying filesystem is not going to provide
2259          * a way to truncate a range of blocks (punch a hole) -
2260          * we should return failure right now.
2261          */
2262         if (!inode->i_op || !inode->i_op->truncate_range)
2263                 return -ENOSYS;
2264
2265         mutex_lock(&inode->i_mutex);
2266         down_write(&inode->i_alloc_sem);
2267         unmap_mapping_range(mapping, offset, (end - offset), 1);
2268         truncate_inode_pages_range(mapping, offset, end);
2269         unmap_mapping_range(mapping, offset, (end - offset), 1);
2270         inode->i_op->truncate_range(inode, offset, end);
2271         up_write(&inode->i_alloc_sem);
2272         mutex_unlock(&inode->i_mutex);
2273
2274         return 0;
2275 }
2276
2277 /*
2278  * We enter with non-exclusive mmap_sem (to exclude vma changes,
2279  * but allow concurrent faults), and pte mapped but not yet locked.
2280  * We return with mmap_sem still held, but pte unmapped and unlocked.
2281  */
2282 static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
2283                 unsigned long address, pte_t *page_table, pmd_t *pmd,
2284                 int write_access, pte_t orig_pte)
2285 {
2286         spinlock_t *ptl;
2287         struct page *page;
2288         swp_entry_t entry;
2289         pte_t pte;
2290         int ret = 0;
2291
2292         if (!pte_unmap_same(mm, pmd, page_table, orig_pte))
2293                 goto out;
2294
2295         entry = pte_to_swp_entry(orig_pte);
2296         if (is_migration_entry(entry)) {
2297                 migration_entry_wait(mm, pmd, address);
2298                 goto out;
2299         }
2300         delayacct_set_flag(DELAYACCT_PF_SWAPIN);
2301         page = lookup_swap_cache(entry);
2302         if (!page) {
2303                 grab_swap_token(); /* Contend for token _before_ read-in */
2304                 page = swapin_readahead(entry,
2305                                         GFP_HIGHUSER_MOVABLE, vma, address);
2306                 if (!page) {
2307                         /*
2308                          * Back out if somebody else faulted in this pte
2309                          * while we released the pte lock.
2310                          */
2311                         page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
2312                         if (likely(pte_same(*page_table, orig_pte)))
2313                                 ret = VM_FAULT_OOM;
2314                         delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
2315                         goto unlock;
2316                 }
2317
2318                 /* Had to read the page from swap area: Major fault */
2319                 ret = VM_FAULT_MAJOR;
2320                 count_vm_event(PGMAJFAULT);
2321         }
2322
2323         mark_page_accessed(page);
2324
2325         lock_page(page);
2326         delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
2327
2328         if (mem_cgroup_charge(page, mm, GFP_KERNEL)) {
2329                 ret = VM_FAULT_OOM;
2330                 unlock_page(page);
2331                 goto out;
2332         }
2333
2334         /*
2335          * Back out if somebody else already faulted in this pte.
2336          */
2337         page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
2338         if (unlikely(!pte_same(*page_table, orig_pte)))
2339                 goto out_nomap;
2340
2341         if (unlikely(!PageUptodate(page))) {
2342                 ret = VM_FAULT_SIGBUS;
2343                 goto out_nomap;
2344         }
2345
2346         /* The page isn't present yet, go ahead with the fault. */
2347
2348         inc_mm_counter(mm, anon_rss);
2349         pte = mk_pte(page, vma->vm_page_prot);
2350         if (write_access && can_share_swap_page(page)) {
2351                 pte = maybe_mkwrite(pte_mkdirty(pte), vma);
2352                 write_access = 0;
2353         }
2354
2355         flush_icache_page(vma, page);
2356         set_pte_at(mm, address, page_table, pte);
2357         page_add_anon_rmap(page, vma, address);
2358
2359         swap_free(entry);
2360         if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
2361                 remove_exclusive_swap_page(page);
2362         unlock_page(page);
2363
2364         if (write_access) {
2365                 ret |= do_wp_page(mm, vma, address, page_table, pmd, ptl, pte);
2366                 if (ret & VM_FAULT_ERROR)
2367                         ret &= VM_FAULT_ERROR;
2368                 goto out;
2369         }
2370
2371         /* No need to invalidate - it was non-present before */
2372         update_mmu_cache(vma, address, pte);
2373 unlock:
2374         pte_unmap_unlock(page_table, ptl);
2375 out:
2376         return ret;
2377 out_nomap:
2378         mem_cgroup_uncharge_page(page);
2379         pte_unmap_unlock(page_table, ptl);
2380         unlock_page(page);
2381         page_cache_release(page);
2382         return ret;
2383 }
2384
2385 /*
2386  * We enter with non-exclusive mmap_sem (to exclude vma changes,
2387  * but allow concurrent faults), and pte mapped but not yet locked.
2388  * We return with mmap_sem still held, but pte unmapped and unlocked.
2389  */
2390 static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
2391                 unsigned long address, pte_t *page_table, pmd_t *pmd,
2392                 int write_access)
2393 {
2394         struct page *page;
2395         spinlock_t *ptl;
2396         pte_t entry;
2397
2398         /* Allocate our own private page. */
2399         pte_unmap(page_table);
2400
2401         if (unlikely(anon_vma_prepare(vma)))
2402                 goto oom;
2403         page = alloc_zeroed_user_highpage_movable(vma, address);
2404         if (!page)
2405                 goto oom;
2406         __SetPageUptodate(page);
2407
2408         if (mem_cgroup_charge(page, mm, GFP_KERNEL))
2409                 goto oom_free_page;
2410
2411         entry = mk_pte(page, vma->vm_page_prot);
2412         entry = maybe_mkwrite(pte_mkdirty(entry), vma);
2413
2414         page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
2415         if (!pte_none(*page_table))
2416                 goto release;
2417         inc_mm_counter(mm, anon_rss);
2418         SetPageSwapBacked(page);
2419         lru_cache_add_active_or_unevictable(page, vma);
2420         page_add_new_anon_rmap(page, vma, address);
2421         set_pte_at(mm, address, page_table, entry);
2422
2423         /* No need to invalidate - it was non-present before */
2424         update_mmu_cache(vma, address, entry);
2425 unlock:
2426         pte_unmap_unlock(page_table, ptl);
2427         return 0;
2428 release:
2429         mem_cgroup_uncharge_page(page);
2430         page_cache_release(page);
2431         goto unlock;
2432 oom_free_page:
2433         page_cache_release(page);
2434 oom:
2435         return VM_FAULT_OOM;
2436 }
2437
2438 /*
2439  * __do_fault() tries to create a new page mapping. It aggressively
2440  * tries to share with existing pages, but makes a separate copy if
2441  * the FAULT_FLAG_WRITE is set in the flags parameter in order to avoid
2442  * the next page fault.
2443  *
2444  * As this is called only for pages that do not currently exist, we
2445  * do not need to flush old virtual caches or the TLB.
2446  *
2447  * We enter with non-exclusive mmap_sem (to exclude vma changes,
2448  * but allow concurrent faults), and pte neither mapped nor locked.
2449  * We return with mmap_sem still held, but pte unmapped and unlocked.
2450  */
2451 static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2452                 unsigned long address, pmd_t *pmd,
2453                 pgoff_t pgoff, unsigned int flags, pte_t orig_pte)
2454 {
2455         pte_t *page_table;
2456         spinlock_t *ptl;
2457         struct page *page;
2458         pte_t entry;
2459         int anon = 0;
2460         int charged = 0;
2461         struct page *dirty_page = NULL;
2462         struct vm_fault vmf;
2463         int ret;
2464         int page_mkwrite = 0;
2465
2466         vmf.virtual_address = (void __user *)(address & PAGE_MASK);
2467         vmf.pgoff = pgoff;
2468         vmf.flags = flags;
2469         vmf.page = NULL;
2470
2471         ret = vma->vm_ops->fault(vma, &vmf);
2472         if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))
2473                 return ret;
2474
2475         /*
2476          * For consistency in subsequent calls, make the faulted page always
2477          * locked.
2478          */
2479         if (unlikely(!(ret & VM_FAULT_LOCKED)))
2480                 lock_page(vmf.page);
2481         else
2482                 VM_BUG_ON(!PageLocked(vmf.page));
2483
2484         /*
2485          * Should we do an early C-O-W break?
2486          */
2487         page = vmf.page;
2488         if (flags & FAULT_FLAG_WRITE) {
2489                 if (!(vma->vm_flags & VM_SHARED)) {
2490                         anon = 1;
2491                         if (unlikely(anon_vma_prepare(vma))) {
2492                                 ret = VM_FAULT_OOM;
2493                                 goto out;
2494                         }
2495                         page = alloc_page_vma(GFP_HIGHUSER_MOVABLE,
2496                                                 vma, address);
2497                         if (!page) {
2498                                 ret = VM_FAULT_OOM;
2499                                 goto out;
2500                         }
2501                         if (mem_cgroup_charge(page, mm, GFP_KERNEL)) {
2502                                 ret = VM_FAULT_OOM;
2503                                 page_cache_release(page);
2504                                 goto out;
2505                         }
2506                         charged = 1;
2507                         /*
2508                          * Don't let another task, with possibly unlocked vma,
2509                          * keep the mlocked page.
2510                          */
2511                         if (vma->vm_flags & VM_LOCKED)
2512                                 clear_page_mlock(vmf.page);
2513                         copy_user_highpage(page, vmf.page, address, vma);
2514                         __SetPageUptodate(page);
2515                 } else {
2516                         /*
2517                          * If the page will be shareable, see if the backing
2518                          * address space wants to know that the page is about
2519                          * to become writable
2520                          */
2521                         if (vma->vm_ops->page_mkwrite) {
2522                                 unlock_page(page);
2523                                 if (vma->vm_ops->page_mkwrite(vma, page) < 0) {
2524                                         ret = VM_FAULT_SIGBUS;
2525                                         anon = 1; /* no anon but release vmf.page */
2526                                         goto out_unlocked;
2527                                 }
2528                                 lock_page(page);
2529                                 /*
2530                                  * XXX: this is not quite right (racy vs
2531                                  * invalidate) to unlock and relock the page
2532                                  * like this, however a better fix requires
2533                                  * reworking page_mkwrite locking API, which
2534                                  * is better done later.
2535                                  */
2536                                 if (!page->mapping) {
2537                                         ret = 0;
2538                                         anon = 1; /* no anon but release vmf.page */
2539                                         goto out;
2540                                 }
2541                                 page_mkwrite = 1;
2542                         }
2543                 }
2544
2545         }
2546
2547         page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
2548
2549         /*
2550          * This silly early PAGE_DIRTY setting removes a race
2551          * due to the bad i386 page protection. But it's valid
2552          * for other architectures too.
2553          *
2554          * Note that if write_access is true, we either now have
2555          * an exclusive copy of the page, or this is a shared mapping,
2556          * so we can make it writable and dirty to avoid having to
2557          * handle that later.
2558          */
2559         /* Only go through if we didn't race with anybody else... */
2560         if (likely(pte_same(*page_table, orig_pte))) {
2561                 flush_icache_page(vma, page);
2562                 entry = mk_pte(page, vma->vm_page_prot);
2563                 if (flags & FAULT_FLAG_WRITE)
2564                         entry = maybe_mkwrite(pte_mkdirty(entry), vma);
2565                 if (anon) {
2566                         inc_mm_counter(mm, anon_rss);
2567                         SetPageSwapBacked(page);
2568                         lru_cache_add_active_or_unevictable(page, vma);
2569                         page_add_new_anon_rmap(page, vma, address);
2570                 } else {
2571                         inc_mm_counter(mm, file_rss);
2572                         page_add_file_rmap(page);
2573                         if (flags & FAULT_FLAG_WRITE) {
2574                                 dirty_page = page;
2575                                 get_page(dirty_page);
2576                         }
2577                 }
2578 //TODO:  is this safe?  do_anonymous_page() does it this way.
2579                 set_pte_at(mm, address, page_table, entry);
2580
2581                 /* no need to invalidate: a not-present page won't be cached */
2582                 update_mmu_cache(vma, address, entry);
2583         } else {
2584                 if (charged)
2585                         mem_cgroup_uncharge_page(page);
2586                 if (anon)
2587                         page_cache_release(page);
2588                 else
2589                         anon = 1; /* no anon but release faulted_page */
2590         }
2591
2592         pte_unmap_unlock(page_table, ptl);
2593
2594 out:
2595         unlock_page(vmf.page);
2596 out_unlocked:
2597         if (anon)
2598                 page_cache_release(vmf.page);
2599         else if (dirty_page) {
2600                 if (vma->vm_file)
2601                         file_update_time(vma->vm_file);
2602
2603                 set_page_dirty_balance(dirty_page, page_mkwrite);
2604                 put_page(dirty_page);
2605         }
2606
2607         return ret;
2608 }
2609
2610 static int do_linear_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2611                 unsigned long address, pte_t *page_table, pmd_t *pmd,
2612                 int write_access, pte_t orig_pte)
2613 {
2614         pgoff_t pgoff = (((address & PAGE_MASK)
2615                         - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
2616         unsigned int flags = (write_access ? FAULT_FLAG_WRITE : 0);
2617
2618         pte_unmap(page_table);
2619         return __do_fault(mm, vma, address, pmd, pgoff, flags, orig_pte);
2620 }
2621
2622 /*
2623  * Fault of a previously existing named mapping. Repopulate the pte
2624  * from the encoded file_pte if possible. This enables swappable
2625  * nonlinear vmas.
2626  *
2627  * We enter with non-exclusive mmap_sem (to exclude vma changes,
2628  * but allow concurrent faults), and pte mapped but not yet locked.
2629  * We return with mmap_sem still held, but pte unmapped and unlocked.
2630  */
2631 static int do_nonlinear_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2632                 unsigned long address, pte_t *page_table, pmd_t *pmd,
2633                 int write_access, pte_t orig_pte)
2634 {
2635         unsigned int flags = FAULT_FLAG_NONLINEAR |
2636                                 (write_access ? FAULT_FLAG_WRITE : 0);
2637         pgoff_t pgoff;
2638
2639         if (!pte_unmap_same(mm, pmd, page_table, orig_pte))
2640                 return 0;
2641
2642         if (unlikely(!(vma->vm_flags & VM_NONLINEAR) ||
2643                         !(vma->vm_flags & VM_CAN_NONLINEAR))) {
2644                 /*
2645                  * Page table corrupted: show pte and kill process.
2646                  */
2647                 print_bad_pte(vma, orig_pte, address);
2648                 return VM_FAULT_OOM;
2649         }
2650
2651         pgoff = pte_to_pgoff(orig_pte);
2652         return __do_fault(mm, vma, address, pmd, pgoff, flags, orig_pte);
2653 }
2654
2655 /*
2656  * These routines also need to handle stuff like marking pages dirty
2657  * and/or accessed for architectures that don't do it in hardware (most
2658  * RISC architectures).  The early dirtying is also good on the i386.
2659  *
2660  * There is also a hook called "update_mmu_cache()" that architectures
2661  * with external mmu caches can use to update those (ie the Sparc or
2662  * PowerPC hashed page tables that act as extended TLBs).
2663  *
2664  * We enter with non-exclusive mmap_sem (to exclude vma changes,
2665  * but allow concurrent faults), and pte mapped but not yet locked.
2666  * We return with mmap_sem still held, but pte unmapped and unlocked.
2667  */
2668 static inline int handle_pte_fault(struct mm_struct *mm,
2669                 struct vm_area_struct *vma, unsigned long address,
2670                 pte_t *pte, pmd_t *pmd, int write_access)
2671 {
2672         pte_t entry;
2673         spinlock_t *ptl;
2674
2675         entry = *pte;
2676         if (!pte_present(entry)) {
2677                 if (pte_none(entry)) {
2678                         if (vma->vm_ops) {
2679                                 if (likely(vma->vm_ops->fault))
2680                                         return do_linear_fault(mm, vma, address,
2681                                                 pte, pmd, write_access, entry);
2682                         }
2683                         return do_anonymous_page(mm, vma, address,
2684                                                  pte, pmd, write_access);
2685                 }
2686                 if (pte_file(entry))
2687                         return do_nonlinear_fault(mm, vma, address,
2688                                         pte, pmd, write_access, entry);
2689                 return do_swap_page(mm, vma, address,
2690                                         pte, pmd, write_access, entry);
2691         }
2692
2693         ptl = pte_lockptr(mm, pmd);
2694         spin_lock(ptl);
2695         if (unlikely(!pte_same(*pte, entry)))
2696                 goto unlock;
2697         if (write_access) {
2698                 if (!pte_write(entry))
2699                         return do_wp_page(mm, vma, address,
2700                                         pte, pmd, ptl, entry);
2701                 entry = pte_mkdirty(entry);
2702         }
2703         entry = pte_mkyoung(entry);
2704         if (ptep_set_access_flags(vma, address, pte, entry, write_access)) {
2705                 update_mmu_cache(vma, address, entry);
2706         } else {
2707                 /*
2708                  * This is needed only for protection faults but the arch code
2709                  * is not yet telling us if this is a protection fault or not.
2710                  * This still avoids useless tlb flushes for .text page faults
2711                  * with threads.
2712                  */
2713                 if (write_access)
2714                         flush_tlb_page(vma, address);
2715         }
2716 unlock:
2717         pte_unmap_unlock(pte, ptl);
2718         return 0;
2719 }
2720
2721 /*
2722  * By the time we get here, we already hold the mm semaphore
2723  */
2724 int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2725                 unsigned long address, int write_access)
2726 {
2727         pgd_t *pgd;
2728         pud_t *pud;
2729         pmd_t *pmd;
2730         pte_t *pte;
2731
2732         __set_current_state(TASK_RUNNING);
2733
2734         count_vm_event(PGFAULT);
2735
2736         if (unlikely(is_vm_hugetlb_page(vma)))
2737                 return hugetlb_fault(mm, vma, address, write_access);
2738
2739         pgd = pgd_offset(mm, address);
2740         pud = pud_alloc(mm, pgd, address);
2741         if (!pud)
2742                 return VM_FAULT_OOM;
2743         pmd = pmd_alloc(mm, pud, address);
2744         if (!pmd)
2745                 return VM_FAULT_OOM;
2746         pte = pte_alloc_map(mm, pmd, address);
2747         if (!pte)
2748                 return VM_FAULT_OOM;
2749
2750         return handle_pte_fault(mm, vma, address, pte, pmd, write_access);
2751 }
2752
2753 #ifndef __PAGETABLE_PUD_FOLDED
2754 /*
2755  * Allocate page upper directory.
2756  * We've already handled the fast-path in-line.
2757  */
2758 int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
2759 {
2760         pud_t *new = pud_alloc_one(mm, address);
2761         if (!new)
2762                 return -ENOMEM;
2763
2764         smp_wmb(); /* See comment in __pte_alloc */
2765
2766         spin_lock(&mm->page_table_lock);
2767         if (pgd_present(*pgd))          /* Another has populated it */
2768                 pud_free(mm, new);
2769         else
2770                 pgd_populate(mm, pgd, new);
2771         spin_unlock(&mm->page_table_lock);
2772         return 0;
2773 }
2774 #endif /* __PAGETABLE_PUD_FOLDED */
2775
2776 #ifndef __PAGETABLE_PMD_FOLDED
2777 /*
2778  * Allocate page middle directory.
2779  * We've already handled the fast-path in-line.
2780  */
2781 int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
2782 {
2783         pmd_t *new = pmd_alloc_one(mm, address);
2784         if (!new)
2785                 return -ENOMEM;
2786
2787         smp_wmb(); /* See comment in __pte_alloc */
2788
2789         spin_lock(&mm->page_table_lock);
2790 #ifndef __ARCH_HAS_4LEVEL_HACK
2791         if (pud_present(*pud))          /* Another has populated it */
2792                 pmd_free(mm, new);
2793         else
2794                 pud_populate(mm, pud, new);
2795 #else
2796         if (pgd_present(*pud))          /* Another has populated it */
2797                 pmd_free(mm, new);
2798         else
2799                 pgd_populate(mm, pud, new);
2800 #endif /* __ARCH_HAS_4LEVEL_HACK */
2801         spin_unlock(&mm->page_table_lock);
2802         return 0;
2803 }
2804 #endif /* __PAGETABLE_PMD_FOLDED */
2805
2806 int make_pages_present(unsigned long addr, unsigned long end)
2807 {
2808         int ret, len, write;
2809         struct vm_area_struct * vma;
2810
2811         vma = find_vma(current->mm, addr);
2812         if (!vma)
2813                 return -ENOMEM;
2814         write = (vma->vm_flags & VM_WRITE) != 0;
2815         BUG_ON(addr >= end);
2816         BUG_ON(end > vma->vm_end);
2817         len = DIV_ROUND_UP(end, PAGE_SIZE) - addr/PAGE_SIZE;
2818         ret = get_user_pages(current, current->mm, addr,
2819                         len, write, 0, NULL, NULL);
2820         if (ret < 0)
2821                 return ret;
2822         return ret == len ? 0 : -EFAULT;
2823 }
2824
2825 #if !defined(__HAVE_ARCH_GATE_AREA)
2826
2827 #if defined(AT_SYSINFO_EHDR)
2828 static struct vm_area_struct gate_vma;
2829
2830 static int __init gate_vma_init(void)
2831 {
2832         gate_vma.vm_mm = NULL;
2833         gate_vma.vm_start = FIXADDR_USER_START;
2834         gate_vma.vm_end = FIXADDR_USER_END;
2835         gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
2836         gate_vma.vm_page_prot = __P101;
2837         /*
2838          * Make sure the vDSO gets into every core dump.
2839          * Dumping its contents makes post-mortem fully interpretable later
2840          * without matching up the same kernel and hardware config to see
2841          * what PC values meant.
2842          */
2843         gate_vma.vm_flags |= VM_ALWAYSDUMP;
2844         return 0;
2845 }
2846 __initcall(gate_vma_init);
2847 #endif
2848
2849 struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
2850 {
2851 #ifdef AT_SYSINFO_EHDR
2852         return &gate_vma;
2853 #else
2854         return NULL;
2855 #endif
2856 }
2857
2858 int in_gate_area_no_task(unsigned long addr)
2859 {
2860 #ifdef AT_SYSINFO_EHDR
2861         if ((addr >= FIXADDR_USER_START) && (addr < FIXADDR_USER_END))
2862                 return 1;
2863 #endif
2864         return 0;
2865 }
2866
2867 #endif  /* __HAVE_ARCH_GATE_AREA */
2868
2869 #ifdef CONFIG_HAVE_IOREMAP_PROT
2870 static resource_size_t follow_phys(struct vm_area_struct *vma,
2871                         unsigned long address, unsigned int flags,
2872                         unsigned long *prot)
2873 {
2874         pgd_t *pgd;
2875         pud_t *pud;
2876         pmd_t *pmd;
2877         pte_t *ptep, pte;
2878         spinlock_t *ptl;
2879         resource_size_t phys_addr = 0;
2880         struct mm_struct *mm = vma->vm_mm;
2881
2882         VM_BUG_ON(!(vma->vm_flags & (VM_IO | VM_PFNMAP)));
2883
2884         pgd = pgd_offset(mm, address);
2885         if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
2886                 goto no_page_table;
2887
2888         pud = pud_offset(pgd, address);
2889         if (pud_none(*pud) || unlikely(pud_bad(*pud)))
2890                 goto no_page_table;
2891
2892         pmd = pmd_offset(pud, address);
2893         if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
2894                 goto no_page_table;
2895
2896         /* We cannot handle huge page PFN maps. Luckily they don't exist. */
2897         if (pmd_huge(*pmd))
2898                 goto no_page_table;
2899
2900         ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
2901         if (!ptep)
2902                 goto out;
2903
2904         pte = *ptep;
2905         if (!pte_present(pte))
2906                 goto unlock;
2907         if ((flags & FOLL_WRITE) && !pte_write(pte))
2908                 goto unlock;
2909         phys_addr = pte_pfn(pte);
2910         phys_addr <<= PAGE_SHIFT; /* Shift here to avoid overflow on PAE */
2911
2912         *prot = pgprot_val(pte_pgprot(pte));
2913
2914 unlock:
2915         pte_unmap_unlock(ptep, ptl);
2916 out:
2917         return phys_addr;
2918 no_page_table:
2919         return 0;
2920 }
2921
2922 int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
2923                         void *buf, int len, int write)
2924 {
2925         resource_size_t phys_addr;
2926         unsigned long prot = 0;
2927         void *maddr;
2928         int offset = addr & (PAGE_SIZE-1);
2929
2930         if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
2931                 return -EINVAL;
2932
2933         phys_addr = follow_phys(vma, addr, write, &prot);
2934
2935         if (!phys_addr)
2936                 return -EINVAL;
2937
2938         maddr = ioremap_prot(phys_addr, PAGE_SIZE, prot);
2939         if (write)
2940                 memcpy_toio(maddr + offset, buf, len);
2941         else
2942                 memcpy_fromio(buf, maddr + offset, len);
2943         iounmap(maddr);
2944
2945         return len;
2946 }
2947 #endif
2948
2949 /*
2950  * Access another process' address space.
2951  * Source/target buffer must be kernel space,
2952  * Do not walk the page table directly, use get_user_pages
2953  */
2954 int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write)
2955 {
2956         struct mm_struct *mm;
2957         struct vm_area_struct *vma;
2958         void *old_buf = buf;
2959
2960         mm = get_task_mm(tsk);
2961         if (!mm)
2962                 return 0;
2963
2964         down_read(&mm->mmap_sem);
2965         /* ignore errors, just check how much was successfully transferred */
2966         while (len) {
2967                 int bytes, ret, offset;
2968                 void *maddr;
2969                 struct page *page = NULL;
2970
2971                 ret = get_user_pages(tsk, mm, addr, 1,
2972                                 write, 1, &page, &vma);
2973                 if (ret <= 0) {
2974                         /*
2975                          * Check if this is a VM_IO | VM_PFNMAP VMA, which
2976                          * we can access using slightly different code.
2977                          */
2978 #ifdef CONFIG_HAVE_IOREMAP_PROT
2979                         vma = find_vma(mm, addr);
2980                         if (!vma)
2981                                 break;
2982                         if (vma->vm_ops && vma->vm_ops->access)
2983                                 ret = vma->vm_ops->access(vma, addr, buf,
2984                                                           len, write);
2985                         if (ret <= 0)
2986 #endif
2987                                 break;
2988                         bytes = ret;
2989                 } else {
2990                         bytes = len;
2991                         offset = addr & (PAGE_SIZE-1);
2992                         if (bytes > PAGE_SIZE-offset)
2993                                 bytes = PAGE_SIZE-offset;
2994
2995                         maddr = kmap(page);
2996                         if (write) {
2997                                 copy_to_user_page(vma, page, addr,
2998                                                   maddr + offset, buf, bytes);
2999                                 set_page_dirty_lock(page);
3000                         } else {
3001                                 copy_from_user_page(vma, page, addr,
3002                                                     buf, maddr + offset, bytes);
3003                         }
3004                         kunmap(page);
3005                         page_cache_release(page);
3006                 }
3007                 len -= bytes;
3008                 buf += bytes;
3009                 addr += bytes;
3010         }
3011         up_read(&mm->mmap_sem);
3012         mmput(mm);
3013
3014         return buf - old_buf;
3015 }
3016
3017 /*
3018  * Print the name of a VMA.
3019  */
3020 void print_vma_addr(char *prefix, unsigned long ip)
3021 {
3022         struct mm_struct *mm = current->mm;
3023         struct vm_area_struct *vma;
3024
3025         /*
3026          * Do not print if we are in atomic
3027          * contexts (in exception stacks, etc.):
3028          */
3029         if (preempt_count())
3030                 return;
3031
3032         down_read(&mm->mmap_sem);
3033         vma = find_vma(mm, ip);
3034         if (vma && vma->vm_file) {
3035                 struct file *f = vma->vm_file;
3036                 char *buf = (char *)__get_free_page(GFP_KERNEL);
3037                 if (buf) {
3038                         char *p, *s;
3039
3040                         p = d_path(&f->f_path, buf, PAGE_SIZE);
3041                         if (IS_ERR(p))
3042                                 p = "?";
3043                         s = strrchr(p, '/');
3044                         if (s)
3045                                 p = s+1;
3046                         printk("%s%s[%lx+%lx]", prefix, p,
3047                                         vma->vm_start,
3048                                         vma->vm_end - vma->vm_start);
3049                         free_page((unsigned long)buf);
3050                 }
3051         }
3052         up_read(&current->mm->mmap_sem);
3053 }