mm: get_user_pages: migrate out CMA pages when FOLL_DURABLE flag is set
[linux-3.10.git] / mm / pgtable-generic.c
1 /*
2  *  mm/pgtable-generic.c
3  *
4  *  Generic pgtable methods declared in asm-generic/pgtable.h
5  *
6  *  Copyright (C) 2010  Linus Torvalds
7  */
8
9 #include <linux/pagemap.h>
10 #include <asm/tlb.h>
11 #include <asm-generic/pgtable.h>
12
13 #ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
14 /*
15  * Only sets the access flags (dirty, accessed), as well as write 
16  * permission. Furthermore, we know it always gets set to a "more
17  * permissive" setting, which allows most architectures to optimize
18  * this. We return whether the PTE actually changed, which in turn
19  * instructs the caller to do things like update__mmu_cache.  This
20  * used to be done in the caller, but sparc needs minor faults to
21  * force that call on sun4c so we changed this macro slightly
22  */
23 int ptep_set_access_flags(struct vm_area_struct *vma,
24                           unsigned long address, pte_t *ptep,
25                           pte_t entry, int dirty)
26 {
27         int changed = !pte_same(*ptep, entry);
28         if (changed) {
29                 set_pte_at(vma->vm_mm, address, ptep, entry);
30                 flush_tlb_fix_spurious_fault(vma, address);
31         }
32         return changed;
33 }
34 #endif
35
36 #ifndef __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
37 int pmdp_set_access_flags(struct vm_area_struct *vma,
38                           unsigned long address, pmd_t *pmdp,
39                           pmd_t entry, int dirty)
40 {
41 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
42         int changed = !pmd_same(*pmdp, entry);
43         VM_BUG_ON(address & ~HPAGE_PMD_MASK);
44         if (changed) {
45                 set_pmd_at(vma->vm_mm, address, pmdp, entry);
46                 flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
47         }
48         return changed;
49 #else /* CONFIG_TRANSPARENT_HUGEPAGE */
50         BUG();
51         return 0;
52 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
53 }
54 #endif
55
56 #ifndef __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
57 int ptep_clear_flush_young(struct vm_area_struct *vma,
58                            unsigned long address, pte_t *ptep)
59 {
60         int young;
61         young = ptep_test_and_clear_young(vma, address, ptep);
62         if (young)
63                 flush_tlb_page(vma, address);
64         return young;
65 }
66 #endif
67
68 #ifndef __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
69 int pmdp_clear_flush_young(struct vm_area_struct *vma,
70                            unsigned long address, pmd_t *pmdp)
71 {
72         int young;
73 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
74         VM_BUG_ON(address & ~HPAGE_PMD_MASK);
75 #else
76         BUG();
77 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
78         young = pmdp_test_and_clear_young(vma, address, pmdp);
79         if (young)
80                 flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
81         return young;
82 }
83 #endif
84
85 #ifndef __HAVE_ARCH_PTEP_CLEAR_FLUSH
86 pte_t ptep_clear_flush(struct vm_area_struct *vma, unsigned long address,
87                        pte_t *ptep)
88 {
89         struct mm_struct *mm = (vma)->vm_mm;
90         pte_t pte;
91         pte = ptep_get_and_clear(mm, address, ptep);
92         if (pte_accessible(mm, pte))
93                 flush_tlb_page(vma, address);
94         return pte;
95 }
96 #endif
97
98 #ifndef __HAVE_ARCH_PMDP_CLEAR_FLUSH
99 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
100 pmd_t pmdp_clear_flush(struct vm_area_struct *vma, unsigned long address,
101                        pmd_t *pmdp)
102 {
103         pmd_t pmd;
104         VM_BUG_ON(address & ~HPAGE_PMD_MASK);
105         pmd = pmdp_get_and_clear(vma->vm_mm, address, pmdp);
106         flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
107         return pmd;
108 }
109 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
110 #endif
111
112 #ifndef __HAVE_ARCH_PMDP_SPLITTING_FLUSH
113 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
114 void pmdp_splitting_flush(struct vm_area_struct *vma, unsigned long address,
115                           pmd_t *pmdp)
116 {
117         pmd_t pmd = pmd_mksplitting(*pmdp);
118         VM_BUG_ON(address & ~HPAGE_PMD_MASK);
119         set_pmd_at(vma->vm_mm, address, pmdp, pmd);
120         /* tlb flush only to serialize against gup-fast */
121         flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
122 }
123 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
124 #endif
125
126 #ifndef __HAVE_ARCH_PGTABLE_DEPOSIT
127 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
128 void pgtable_trans_huge_deposit(struct mm_struct *mm, pgtable_t pgtable)
129 {
130         assert_spin_locked(&mm->page_table_lock);
131
132         /* FIFO */
133         if (!mm->pmd_huge_pte)
134                 INIT_LIST_HEAD(&pgtable->lru);
135         else
136                 list_add(&pgtable->lru, &mm->pmd_huge_pte->lru);
137         mm->pmd_huge_pte = pgtable;
138 }
139 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
140 #endif
141
142 #ifndef __HAVE_ARCH_PGTABLE_WITHDRAW
143 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
144 /* no "address" argument so destroys page coloring of some arch */
145 pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm)
146 {
147         pgtable_t pgtable;
148
149         assert_spin_locked(&mm->page_table_lock);
150
151         /* FIFO */
152         pgtable = mm->pmd_huge_pte;
153         if (list_empty(&pgtable->lru))
154                 mm->pmd_huge_pte = NULL;
155         else {
156                 mm->pmd_huge_pte = list_entry(pgtable->lru.next,
157                                               struct page, lru);
158                 list_del(&pgtable->lru);
159         }
160         return pgtable;
161 }
162 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
163 #endif
164
165 #ifndef __HAVE_ARCH_PMDP_INVALIDATE
166 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
167 void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
168                      pmd_t *pmdp)
169 {
170         pmd_t entry = *pmdp;
171         if (pmd_numa(entry))
172                 entry = pmd_mknonnuma(entry);
173         set_pmd_at(vma->vm_mm, address, pmdp, pmd_mknotpresent(*pmdp));
174         flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
175 }
176 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
177 #endif