mm: Account for a THP NUMA hinting update as one PTE update
[linux-3.10.git] / mm / pgtable-generic.c
1 /*
2  *  mm/pgtable-generic.c
3  *
4  *  Generic pgtable methods declared in asm-generic/pgtable.h
5  *
6  *  Copyright (C) 2010  Linus Torvalds
7  */
8
9 #include <linux/pagemap.h>
10 #include <asm/tlb.h>
11 #include <asm-generic/pgtable.h>
12
13 #ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
14 /*
15  * Only sets the access flags (dirty, accessed), as well as write 
16  * permission. Furthermore, we know it always gets set to a "more
17  * permissive" setting, which allows most architectures to optimize
18  * this. We return whether the PTE actually changed, which in turn
19  * instructs the caller to do things like update__mmu_cache.  This
20  * used to be done in the caller, but sparc needs minor faults to
21  * force that call on sun4c so we changed this macro slightly
22  */
23 int ptep_set_access_flags(struct vm_area_struct *vma,
24                           unsigned long address, pte_t *ptep,
25                           pte_t entry, int dirty)
26 {
27         int changed = !pte_same(*ptep, entry);
28         if (changed) {
29                 set_pte_at(vma->vm_mm, address, ptep, entry);
30                 flush_tlb_fix_spurious_fault(vma, address);
31         }
32         return changed;
33 }
34 #endif
35
36 #ifndef __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
37 int pmdp_set_access_flags(struct vm_area_struct *vma,
38                           unsigned long address, pmd_t *pmdp,
39                           pmd_t entry, int dirty)
40 {
41 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
42         int changed = !pmd_same(*pmdp, entry);
43         VM_BUG_ON(address & ~HPAGE_PMD_MASK);
44         if (changed) {
45                 set_pmd_at(vma->vm_mm, address, pmdp, entry);
46                 flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
47         }
48         return changed;
49 #else /* CONFIG_TRANSPARENT_HUGEPAGE */
50         BUG();
51         return 0;
52 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
53 }
54 #endif
55
56 #ifndef __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
57 int ptep_clear_flush_young(struct vm_area_struct *vma,
58                            unsigned long address, pte_t *ptep)
59 {
60         int young;
61         young = ptep_test_and_clear_young(vma, address, ptep);
62         if (young)
63                 flush_tlb_page(vma, address);
64         return young;
65 }
66 #endif
67
68 #ifndef __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
69 int pmdp_clear_flush_young(struct vm_area_struct *vma,
70                            unsigned long address, pmd_t *pmdp)
71 {
72         int young;
73 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
74         VM_BUG_ON(address & ~HPAGE_PMD_MASK);
75 #else
76         BUG();
77 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
78         young = pmdp_test_and_clear_young(vma, address, pmdp);
79         if (young)
80                 flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
81         return young;
82 }
83 #endif
84
85 #ifndef __HAVE_ARCH_PTEP_CLEAR_FLUSH
86 pte_t ptep_clear_flush(struct vm_area_struct *vma, unsigned long address,
87                        pte_t *ptep)
88 {
89         pte_t pte;
90         pte = ptep_get_and_clear((vma)->vm_mm, address, ptep);
91         if (pte_accessible(pte))
92                 flush_tlb_page(vma, address);
93         return pte;
94 }
95 #endif
96
97 #ifndef __HAVE_ARCH_PMDP_CLEAR_FLUSH
98 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
99 pmd_t pmdp_clear_flush(struct vm_area_struct *vma, unsigned long address,
100                        pmd_t *pmdp)
101 {
102         pmd_t pmd;
103         VM_BUG_ON(address & ~HPAGE_PMD_MASK);
104         pmd = pmdp_get_and_clear(vma->vm_mm, address, pmdp);
105         flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
106         return pmd;
107 }
108 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
109 #endif
110
111 #ifndef __HAVE_ARCH_PMDP_SPLITTING_FLUSH
112 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
113 void pmdp_splitting_flush(struct vm_area_struct *vma, unsigned long address,
114                           pmd_t *pmdp)
115 {
116         pmd_t pmd = pmd_mksplitting(*pmdp);
117         VM_BUG_ON(address & ~HPAGE_PMD_MASK);
118         set_pmd_at(vma->vm_mm, address, pmdp, pmd);
119         /* tlb flush only to serialize against gup-fast */
120         flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
121 }
122 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
123 #endif
124
125 #ifndef __HAVE_ARCH_PGTABLE_DEPOSIT
126 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
127 void pgtable_trans_huge_deposit(struct mm_struct *mm, pgtable_t pgtable)
128 {
129         assert_spin_locked(&mm->page_table_lock);
130
131         /* FIFO */
132         if (!mm->pmd_huge_pte)
133                 INIT_LIST_HEAD(&pgtable->lru);
134         else
135                 list_add(&pgtable->lru, &mm->pmd_huge_pte->lru);
136         mm->pmd_huge_pte = pgtable;
137 }
138 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
139 #endif
140
141 #ifndef __HAVE_ARCH_PGTABLE_WITHDRAW
142 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
143 /* no "address" argument so destroys page coloring of some arch */
144 pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm)
145 {
146         pgtable_t pgtable;
147
148         assert_spin_locked(&mm->page_table_lock);
149
150         /* FIFO */
151         pgtable = mm->pmd_huge_pte;
152         if (list_empty(&pgtable->lru))
153                 mm->pmd_huge_pte = NULL;
154         else {
155                 mm->pmd_huge_pte = list_entry(pgtable->lru.next,
156                                               struct page, lru);
157                 list_del(&pgtable->lru);
158         }
159         return pgtable;
160 }
161 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
162 #endif
163
164 #ifndef __HAVE_ARCH_PMDP_INVALIDATE
165 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
166 void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
167                      pmd_t *pmdp)
168 {
169         set_pmd_at(vma->vm_mm, address, pmdp, pmd_mknotpresent(*pmdp));
170         flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
171 }
172 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
173 #endif