[PATCH] arch/i386/mm/fault.c: fix sparse warnings
[linux-2.6.git] / arch / i386 / mm / hugetlbpage.c
1 /*
2  * IA-32 Huge TLB Page Support for Kernel.
3  *
4  * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com>
5  */
6
7 #include <linux/config.h>
8 #include <linux/init.h>
9 #include <linux/fs.h>
10 #include <linux/mm.h>
11 #include <linux/hugetlb.h>
12 #include <linux/pagemap.h>
13 #include <linux/smp_lock.h>
14 #include <linux/slab.h>
15 #include <linux/err.h>
16 #include <linux/sysctl.h>
17 #include <asm/mman.h>
18 #include <asm/tlb.h>
19 #include <asm/tlbflush.h>
20
21 pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr)
22 {
23         pgd_t *pgd;
24         pud_t *pud;
25         pmd_t *pmd = NULL;
26
27         pgd = pgd_offset(mm, addr);
28         pud = pud_alloc(mm, pgd, addr);
29         pmd = pmd_alloc(mm, pud, addr);
30         return (pte_t *) pmd;
31 }
32
33 pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
34 {
35         pgd_t *pgd;
36         pud_t *pud;
37         pmd_t *pmd = NULL;
38
39         pgd = pgd_offset(mm, addr);
40         pud = pud_offset(pgd, addr);
41         pmd = pmd_offset(pud, addr);
42         return (pte_t *) pmd;
43 }
44
45 /*
46  * This function checks for proper alignment of input addr and len parameters.
47  */
48 int is_aligned_hugepage_range(unsigned long addr, unsigned long len)
49 {
50         if (len & ~HPAGE_MASK)
51                 return -EINVAL;
52         if (addr & ~HPAGE_MASK)
53                 return -EINVAL;
54         return 0;
55 }
56
57 #if 0   /* This is just for testing */
58 struct page *
59 follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
60 {
61         unsigned long start = address;
62         int length = 1;
63         int nr;
64         struct page *page;
65         struct vm_area_struct *vma;
66
67         vma = find_vma(mm, addr);
68         if (!vma || !is_vm_hugetlb_page(vma))
69                 return ERR_PTR(-EINVAL);
70
71         pte = huge_pte_offset(mm, address);
72
73         /* hugetlb should be locked, and hence, prefaulted */
74         WARN_ON(!pte || pte_none(*pte));
75
76         page = &pte_page(*pte)[vpfn % (HPAGE_SIZE/PAGE_SIZE)];
77
78         WARN_ON(!PageCompound(page));
79
80         return page;
81 }
82
83 int pmd_huge(pmd_t pmd)
84 {
85         return 0;
86 }
87
88 struct page *
89 follow_huge_pmd(struct mm_struct *mm, unsigned long address,
90                 pmd_t *pmd, int write)
91 {
92         return NULL;
93 }
94
95 #else
96
97 struct page *
98 follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
99 {
100         return ERR_PTR(-EINVAL);
101 }
102
103 int pmd_huge(pmd_t pmd)
104 {
105         return !!(pmd_val(pmd) & _PAGE_PSE);
106 }
107
108 struct page *
109 follow_huge_pmd(struct mm_struct *mm, unsigned long address,
110                 pmd_t *pmd, int write)
111 {
112         struct page *page;
113
114         page = pte_page(*(pte_t *)pmd);
115         if (page)
116                 page += ((address & ~HPAGE_MASK) >> PAGE_SHIFT);
117         return page;
118 }
119 #endif
120
121 void hugetlb_clean_stale_pgtable(pte_t *pte)
122 {
123         pmd_t *pmd = (pmd_t *) pte;
124         struct page *page;
125
126         page = pmd_page(*pmd);
127         pmd_clear(pmd);
128         dec_page_state(nr_page_table_pages);
129         page_cache_release(page);
130 }
131
132 /* x86_64 also uses this file */
133
134 #ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
135 static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
136                 unsigned long addr, unsigned long len,
137                 unsigned long pgoff, unsigned long flags)
138 {
139         struct mm_struct *mm = current->mm;
140         struct vm_area_struct *vma;
141         unsigned long start_addr;
142
143         if (len > mm->cached_hole_size) {
144                 start_addr = mm->free_area_cache;
145         } else {
146                 start_addr = TASK_UNMAPPED_BASE;
147                 mm->cached_hole_size = 0;
148         }
149
150 full_search:
151         addr = ALIGN(start_addr, HPAGE_SIZE);
152
153         for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
154                 /* At this point:  (!vma || addr < vma->vm_end). */
155                 if (TASK_SIZE - len < addr) {
156                         /*
157                          * Start a new search - just in case we missed
158                          * some holes.
159                          */
160                         if (start_addr != TASK_UNMAPPED_BASE) {
161                                 start_addr = TASK_UNMAPPED_BASE;
162                                 mm->cached_hole_size = 0;
163                                 goto full_search;
164                         }
165                         return -ENOMEM;
166                 }
167                 if (!vma || addr + len <= vma->vm_start) {
168                         mm->free_area_cache = addr + len;
169                         return addr;
170                 }
171                 if (addr + mm->cached_hole_size < vma->vm_start)
172                         mm->cached_hole_size = vma->vm_start - addr;
173                 addr = ALIGN(vma->vm_end, HPAGE_SIZE);
174         }
175 }
176
177 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
178                 unsigned long addr0, unsigned long len,
179                 unsigned long pgoff, unsigned long flags)
180 {
181         struct mm_struct *mm = current->mm;
182         struct vm_area_struct *vma, *prev_vma;
183         unsigned long base = mm->mmap_base, addr = addr0;
184         unsigned long largest_hole = mm->cached_hole_size;
185         int first_time = 1;
186
187         /* don't allow allocations above current base */
188         if (mm->free_area_cache > base)
189                 mm->free_area_cache = base;
190
191         if (len <= largest_hole) {
192                 largest_hole = 0;
193                 mm->free_area_cache  = base;
194         }
195 try_again:
196         /* make sure it can fit in the remaining address space */
197         if (mm->free_area_cache < len)
198                 goto fail;
199
200         /* either no address requested or cant fit in requested address hole */
201         addr = (mm->free_area_cache - len) & HPAGE_MASK;
202         do {
203                 /*
204                  * Lookup failure means no vma is above this address,
205                  * i.e. return with success:
206                  */
207                 if (!(vma = find_vma_prev(mm, addr, &prev_vma)))
208                         return addr;
209
210                 /*
211                  * new region fits between prev_vma->vm_end and
212                  * vma->vm_start, use it:
213                  */
214                 if (addr + len <= vma->vm_start &&
215                             (!prev_vma || (addr >= prev_vma->vm_end))) {
216                         /* remember the address as a hint for next time */
217                         mm->cached_hole_size = largest_hole;
218                         return (mm->free_area_cache = addr);
219                 } else {
220                         /* pull free_area_cache down to the first hole */
221                         if (mm->free_area_cache == vma->vm_end) {
222                                 mm->free_area_cache = vma->vm_start;
223                                 mm->cached_hole_size = largest_hole;
224                         }
225                 }
226
227                 /* remember the largest hole we saw so far */
228                 if (addr + largest_hole < vma->vm_start)
229                         largest_hole = vma->vm_start - addr;
230
231                 /* try just below the current vma->vm_start */
232                 addr = (vma->vm_start - len) & HPAGE_MASK;
233         } while (len <= vma->vm_start);
234
235 fail:
236         /*
237          * if hint left us with no space for the requested
238          * mapping then try again:
239          */
240         if (first_time) {
241                 mm->free_area_cache = base;
242                 largest_hole = 0;
243                 first_time = 0;
244                 goto try_again;
245         }
246         /*
247          * A failed mmap() very likely causes application failure,
248          * so fall back to the bottom-up function here. This scenario
249          * can happen with large stack limits and large mmap()
250          * allocations.
251          */
252         mm->free_area_cache = TASK_UNMAPPED_BASE;
253         mm->cached_hole_size = ~0UL;
254         addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
255                         len, pgoff, flags);
256
257         /*
258          * Restore the topdown base:
259          */
260         mm->free_area_cache = base;
261         mm->cached_hole_size = ~0UL;
262
263         return addr;
264 }
265
266 unsigned long
267 hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
268                 unsigned long len, unsigned long pgoff, unsigned long flags)
269 {
270         struct mm_struct *mm = current->mm;
271         struct vm_area_struct *vma;
272
273         if (len & ~HPAGE_MASK)
274                 return -EINVAL;
275         if (len > TASK_SIZE)
276                 return -ENOMEM;
277
278         if (addr) {
279                 addr = ALIGN(addr, HPAGE_SIZE);
280                 vma = find_vma(mm, addr);
281                 if (TASK_SIZE - len >= addr &&
282                     (!vma || addr + len <= vma->vm_start))
283                         return addr;
284         }
285         if (mm->get_unmapped_area == arch_get_unmapped_area)
286                 return hugetlb_get_unmapped_area_bottomup(file, addr, len,
287                                 pgoff, flags);
288         else
289                 return hugetlb_get_unmapped_area_topdown(file, addr, len,
290                                 pgoff, flags);
291 }
292
293 #endif /*HAVE_ARCH_HUGETLB_UNMAPPED_AREA*/
294