]> nv-tegra.nvidia Code Review - linux-2.6.git/blob - arch/x86/mm/hugetlbpage.c
x86: add code to dump the (kernel) page tables for visual inspection by kernel developers
[linux-2.6.git] / arch / x86 / mm / hugetlbpage.c
1 /*
2  * IA-32 Huge TLB Page Support for Kernel.
3  *
4  * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com>
5  */
6
7 #include <linux/init.h>
8 #include <linux/fs.h>
9 #include <linux/mm.h>
10 #include <linux/hugetlb.h>
11 #include <linux/pagemap.h>
12 #include <linux/slab.h>
13 #include <linux/err.h>
14 #include <linux/sysctl.h>
15 #include <asm/mman.h>
16 #include <asm/tlb.h>
17 #include <asm/tlbflush.h>
18 #include <asm/pgalloc.h>
19
20 static unsigned long page_table_shareable(struct vm_area_struct *svma,
21                                 struct vm_area_struct *vma,
22                                 unsigned long addr, pgoff_t idx)
23 {
24         unsigned long saddr = ((idx - svma->vm_pgoff) << PAGE_SHIFT) +
25                                 svma->vm_start;
26         unsigned long sbase = saddr & PUD_MASK;
27         unsigned long s_end = sbase + PUD_SIZE;
28
29         /*
30          * match the virtual addresses, permission and the alignment of the
31          * page table page.
32          */
33         if (pmd_index(addr) != pmd_index(saddr) ||
34             vma->vm_flags != svma->vm_flags ||
35             sbase < svma->vm_start || svma->vm_end < s_end)
36                 return 0;
37
38         return saddr;
39 }
40
41 static int vma_shareable(struct vm_area_struct *vma, unsigned long addr)
42 {
43         unsigned long base = addr & PUD_MASK;
44         unsigned long end = base + PUD_SIZE;
45
46         /*
47          * check on proper vm_flags and page table alignment
48          */
49         if (vma->vm_flags & VM_MAYSHARE &&
50             vma->vm_start <= base && end <= vma->vm_end)
51                 return 1;
52         return 0;
53 }
54
55 /*
56  * search for a shareable pmd page for hugetlb.
57  */
58 static void huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
59 {
60         struct vm_area_struct *vma = find_vma(mm, addr);
61         struct address_space *mapping = vma->vm_file->f_mapping;
62         pgoff_t idx = ((addr - vma->vm_start) >> PAGE_SHIFT) +
63                         vma->vm_pgoff;
64         struct prio_tree_iter iter;
65         struct vm_area_struct *svma;
66         unsigned long saddr;
67         pte_t *spte = NULL;
68
69         if (!vma_shareable(vma, addr))
70                 return;
71
72         spin_lock(&mapping->i_mmap_lock);
73         vma_prio_tree_foreach(svma, &iter, &mapping->i_mmap, idx, idx) {
74                 if (svma == vma)
75                         continue;
76
77                 saddr = page_table_shareable(svma, vma, addr, idx);
78                 if (saddr) {
79                         spte = huge_pte_offset(svma->vm_mm, saddr);
80                         if (spte) {
81                                 get_page(virt_to_page(spte));
82                                 break;
83                         }
84                 }
85         }
86
87         if (!spte)
88                 goto out;
89
90         spin_lock(&mm->page_table_lock);
91         if (pud_none(*pud))
92                 pud_populate(mm, pud, (pmd_t *)((unsigned long)spte & PAGE_MASK));
93         else
94                 put_page(virt_to_page(spte));
95         spin_unlock(&mm->page_table_lock);
96 out:
97         spin_unlock(&mapping->i_mmap_lock);
98 }
99
100 /*
101  * unmap huge page backed by shared pte.
102  *
103  * Hugetlb pte page is ref counted at the time of mapping.  If pte is shared
104  * indicated by page_count > 1, unmap is achieved by clearing pud and
105  * decrementing the ref count. If count == 1, the pte page is not shared.
106  *
107  * called with vma->vm_mm->page_table_lock held.
108  *
109  * returns: 1 successfully unmapped a shared pte page
110  *          0 the underlying pte page is not shared, or it is the last user
111  */
112 int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
113 {
114         pgd_t *pgd = pgd_offset(mm, *addr);
115         pud_t *pud = pud_offset(pgd, *addr);
116
117         BUG_ON(page_count(virt_to_page(ptep)) == 0);
118         if (page_count(virt_to_page(ptep)) == 1)
119                 return 0;
120
121         pud_clear(pud);
122         put_page(virt_to_page(ptep));
123         *addr = ALIGN(*addr, HPAGE_SIZE * PTRS_PER_PTE) - HPAGE_SIZE;
124         return 1;
125 }
126
127 pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr)
128 {
129         pgd_t *pgd;
130         pud_t *pud;
131         pte_t *pte = NULL;
132
133         pgd = pgd_offset(mm, addr);
134         pud = pud_alloc(mm, pgd, addr);
135         if (pud) {
136                 if (pud_none(*pud))
137                         huge_pmd_share(mm, addr, pud);
138                 pte = (pte_t *) pmd_alloc(mm, pud, addr);
139         }
140         BUG_ON(pte && !pte_none(*pte) && !pte_huge(*pte));
141
142         return pte;
143 }
144
145 pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
146 {
147         pgd_t *pgd;
148         pud_t *pud;
149         pmd_t *pmd = NULL;
150
151         pgd = pgd_offset(mm, addr);
152         if (pgd_present(*pgd)) {
153                 pud = pud_offset(pgd, addr);
154                 if (pud_present(*pud))
155                         pmd = pmd_offset(pud, addr);
156         }
157         return (pte_t *) pmd;
158 }
159
160 #if 0   /* This is just for testing */
161 struct page *
162 follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
163 {
164         unsigned long start = address;
165         int length = 1;
166         int nr;
167         struct page *page;
168         struct vm_area_struct *vma;
169
170         vma = find_vma(mm, addr);
171         if (!vma || !is_vm_hugetlb_page(vma))
172                 return ERR_PTR(-EINVAL);
173
174         pte = huge_pte_offset(mm, address);
175
176         /* hugetlb should be locked, and hence, prefaulted */
177         WARN_ON(!pte || pte_none(*pte));
178
179         page = &pte_page(*pte)[vpfn % (HPAGE_SIZE/PAGE_SIZE)];
180
181         WARN_ON(!PageHead(page));
182
183         return page;
184 }
185
186 int pmd_huge(pmd_t pmd)
187 {
188         return 0;
189 }
190
191 struct page *
192 follow_huge_pmd(struct mm_struct *mm, unsigned long address,
193                 pmd_t *pmd, int write)
194 {
195         return NULL;
196 }
197
198 #else
199
200 struct page *
201 follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
202 {
203         return ERR_PTR(-EINVAL);
204 }
205
206 int pmd_huge(pmd_t pmd)
207 {
208         return !!(pmd_val(pmd) & _PAGE_PSE);
209 }
210
211 struct page *
212 follow_huge_pmd(struct mm_struct *mm, unsigned long address,
213                 pmd_t *pmd, int write)
214 {
215         struct page *page;
216
217         page = pte_page(*(pte_t *)pmd);
218         if (page)
219                 page += ((address & ~HPAGE_MASK) >> PAGE_SHIFT);
220         return page;
221 }
222 #endif
223
224 /* x86_64 also uses this file */
225
226 #ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
227 static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
228                 unsigned long addr, unsigned long len,
229                 unsigned long pgoff, unsigned long flags)
230 {
231         struct mm_struct *mm = current->mm;
232         struct vm_area_struct *vma;
233         unsigned long start_addr;
234
235         if (len > mm->cached_hole_size) {
236                 start_addr = mm->free_area_cache;
237         } else {
238                 start_addr = TASK_UNMAPPED_BASE;
239                 mm->cached_hole_size = 0;
240         }
241
242 full_search:
243         addr = ALIGN(start_addr, HPAGE_SIZE);
244
245         for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
246                 /* At this point:  (!vma || addr < vma->vm_end). */
247                 if (TASK_SIZE - len < addr) {
248                         /*
249                          * Start a new search - just in case we missed
250                          * some holes.
251                          */
252                         if (start_addr != TASK_UNMAPPED_BASE) {
253                                 start_addr = TASK_UNMAPPED_BASE;
254                                 mm->cached_hole_size = 0;
255                                 goto full_search;
256                         }
257                         return -ENOMEM;
258                 }
259                 if (!vma || addr + len <= vma->vm_start) {
260                         mm->free_area_cache = addr + len;
261                         return addr;
262                 }
263                 if (addr + mm->cached_hole_size < vma->vm_start)
264                         mm->cached_hole_size = vma->vm_start - addr;
265                 addr = ALIGN(vma->vm_end, HPAGE_SIZE);
266         }
267 }
268
269 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
270                 unsigned long addr0, unsigned long len,
271                 unsigned long pgoff, unsigned long flags)
272 {
273         struct mm_struct *mm = current->mm;
274         struct vm_area_struct *vma, *prev_vma;
275         unsigned long base = mm->mmap_base, addr = addr0;
276         unsigned long largest_hole = mm->cached_hole_size;
277         int first_time = 1;
278
279         /* don't allow allocations above current base */
280         if (mm->free_area_cache > base)
281                 mm->free_area_cache = base;
282
283         if (len <= largest_hole) {
284                 largest_hole = 0;
285                 mm->free_area_cache  = base;
286         }
287 try_again:
288         /* make sure it can fit in the remaining address space */
289         if (mm->free_area_cache < len)
290                 goto fail;
291
292         /* either no address requested or cant fit in requested address hole */
293         addr = (mm->free_area_cache - len) & HPAGE_MASK;
294         do {
295                 /*
296                  * Lookup failure means no vma is above this address,
297                  * i.e. return with success:
298                  */
299                 if (!(vma = find_vma_prev(mm, addr, &prev_vma)))
300                         return addr;
301
302                 /*
303                  * new region fits between prev_vma->vm_end and
304                  * vma->vm_start, use it:
305                  */
306                 if (addr + len <= vma->vm_start &&
307                             (!prev_vma || (addr >= prev_vma->vm_end))) {
308                         /* remember the address as a hint for next time */
309                         mm->cached_hole_size = largest_hole;
310                         return (mm->free_area_cache = addr);
311                 } else {
312                         /* pull free_area_cache down to the first hole */
313                         if (mm->free_area_cache == vma->vm_end) {
314                                 mm->free_area_cache = vma->vm_start;
315                                 mm->cached_hole_size = largest_hole;
316                         }
317                 }
318
319                 /* remember the largest hole we saw so far */
320                 if (addr + largest_hole < vma->vm_start)
321                         largest_hole = vma->vm_start - addr;
322
323                 /* try just below the current vma->vm_start */
324                 addr = (vma->vm_start - len) & HPAGE_MASK;
325         } while (len <= vma->vm_start);
326
327 fail:
328         /*
329          * if hint left us with no space for the requested
330          * mapping then try again:
331          */
332         if (first_time) {
333                 mm->free_area_cache = base;
334                 largest_hole = 0;
335                 first_time = 0;
336                 goto try_again;
337         }
338         /*
339          * A failed mmap() very likely causes application failure,
340          * so fall back to the bottom-up function here. This scenario
341          * can happen with large stack limits and large mmap()
342          * allocations.
343          */
344         mm->free_area_cache = TASK_UNMAPPED_BASE;
345         mm->cached_hole_size = ~0UL;
346         addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
347                         len, pgoff, flags);
348
349         /*
350          * Restore the topdown base:
351          */
352         mm->free_area_cache = base;
353         mm->cached_hole_size = ~0UL;
354
355         return addr;
356 }
357
358 unsigned long
359 hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
360                 unsigned long len, unsigned long pgoff, unsigned long flags)
361 {
362         struct mm_struct *mm = current->mm;
363         struct vm_area_struct *vma;
364
365         if (len & ~HPAGE_MASK)
366                 return -EINVAL;
367         if (len > TASK_SIZE)
368                 return -ENOMEM;
369
370         if (flags & MAP_FIXED) {
371                 if (prepare_hugepage_range(addr, len))
372                         return -EINVAL;
373                 return addr;
374         }
375
376         if (addr) {
377                 addr = ALIGN(addr, HPAGE_SIZE);
378                 vma = find_vma(mm, addr);
379                 if (TASK_SIZE - len >= addr &&
380                     (!vma || addr + len <= vma->vm_start))
381                         return addr;
382         }
383         if (mm->get_unmapped_area == arch_get_unmapped_area)
384                 return hugetlb_get_unmapped_area_bottomup(file, addr, len,
385                                 pgoff, flags);
386         else
387                 return hugetlb_get_unmapped_area_topdown(file, addr, len,
388                                 pgoff, flags);
389 }
390
391 #endif /*HAVE_ARCH_HUGETLB_UNMAPPED_AREA*/
392