[IA64] Fill holes in FIXADDR_USER space with zero pages.
[linux-2.6.git] / arch / ia64 / mm / hugetlbpage.c
1 /*
2  * IA-64 Huge TLB Page Support for Kernel.
3  *
4  * Copyright (C) 2002-2004 Rohit Seth <rohit.seth@intel.com>
5  * Copyright (C) 2003-2004 Ken Chen <kenneth.w.chen@intel.com>
6  *
7  * Sep, 2003: add numa support
8  * Feb, 2004: dynamic hugetlb page size via boot parameter
9  */
10
11 #include <linux/config.h>
12 #include <linux/init.h>
13 #include <linux/fs.h>
14 #include <linux/mm.h>
15 #include <linux/hugetlb.h>
16 #include <linux/pagemap.h>
17 #include <linux/smp_lock.h>
18 #include <linux/slab.h>
19 #include <linux/sysctl.h>
20 #include <asm/mman.h>
21 #include <asm/pgalloc.h>
22 #include <asm/tlb.h>
23 #include <asm/tlbflush.h>
24
25 unsigned int hpage_shift=HPAGE_SHIFT_DEFAULT;
26
27 static pte_t *
28 huge_pte_alloc (struct mm_struct *mm, unsigned long addr)
29 {
30         unsigned long taddr = htlbpage_to_page(addr);
31         pgd_t *pgd;
32         pud_t *pud;
33         pmd_t *pmd;
34         pte_t *pte = NULL;
35
36         pgd = pgd_offset(mm, taddr);
37         pud = pud_alloc(mm, pgd, taddr);
38         if (pud) {
39                 pmd = pmd_alloc(mm, pud, taddr);
40                 if (pmd)
41                         pte = pte_alloc_map(mm, pmd, taddr);
42         }
43         return pte;
44 }
45
46 static pte_t *
47 huge_pte_offset (struct mm_struct *mm, unsigned long addr)
48 {
49         unsigned long taddr = htlbpage_to_page(addr);
50         pgd_t *pgd;
51         pud_t *pud;
52         pmd_t *pmd;
53         pte_t *pte = NULL;
54
55         pgd = pgd_offset(mm, taddr);
56         if (pgd_present(*pgd)) {
57                 pud = pud_offset(pgd, taddr);
58                 if (pud_present(*pud)) {
59                         pmd = pmd_offset(pud, taddr);
60                         if (pmd_present(*pmd))
61                                 pte = pte_offset_map(pmd, taddr);
62                 }
63         }
64
65         return pte;
66 }
67
68 #define mk_pte_huge(entry) { pte_val(entry) |= _PAGE_P; }
69
70 static void
71 set_huge_pte (struct mm_struct *mm, struct vm_area_struct *vma,
72               struct page *page, pte_t * page_table, int write_access)
73 {
74         pte_t entry;
75
76         add_mm_counter(mm, rss, HPAGE_SIZE / PAGE_SIZE);
77         if (write_access) {
78                 entry =
79                     pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot)));
80         } else
81                 entry = pte_wrprotect(mk_pte(page, vma->vm_page_prot));
82         entry = pte_mkyoung(entry);
83         mk_pte_huge(entry);
84         set_pte(page_table, entry);
85         return;
86 }
87 /*
88  * This function checks for proper alignment of input addr and len parameters.
89  */
90 int is_aligned_hugepage_range(unsigned long addr, unsigned long len)
91 {
92         if (len & ~HPAGE_MASK)
93                 return -EINVAL;
94         if (addr & ~HPAGE_MASK)
95                 return -EINVAL;
96         if (REGION_NUMBER(addr) != REGION_HPAGE)
97                 return -EINVAL;
98
99         return 0;
100 }
101
102 int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
103                         struct vm_area_struct *vma)
104 {
105         pte_t *src_pte, *dst_pte, entry;
106         struct page *ptepage;
107         unsigned long addr = vma->vm_start;
108         unsigned long end = vma->vm_end;
109
110         while (addr < end) {
111                 dst_pte = huge_pte_alloc(dst, addr);
112                 if (!dst_pte)
113                         goto nomem;
114                 src_pte = huge_pte_offset(src, addr);
115                 entry = *src_pte;
116                 ptepage = pte_page(entry);
117                 get_page(ptepage);
118                 set_pte(dst_pte, entry);
119                 add_mm_counter(dst, rss, HPAGE_SIZE / PAGE_SIZE);
120                 addr += HPAGE_SIZE;
121         }
122         return 0;
123 nomem:
124         return -ENOMEM;
125 }
126
127 int
128 follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
129                     struct page **pages, struct vm_area_struct **vmas,
130                     unsigned long *st, int *length, int i)
131 {
132         pte_t *ptep, pte;
133         unsigned long start = *st;
134         unsigned long pstart;
135         int len = *length;
136         struct page *page;
137
138         do {
139                 pstart = start & HPAGE_MASK;
140                 ptep = huge_pte_offset(mm, start);
141                 pte = *ptep;
142
143 back1:
144                 page = pte_page(pte);
145                 if (pages) {
146                         page += ((start & ~HPAGE_MASK) >> PAGE_SHIFT);
147                         get_page(page);
148                         pages[i] = page;
149                 }
150                 if (vmas)
151                         vmas[i] = vma;
152                 i++;
153                 len--;
154                 start += PAGE_SIZE;
155                 if (((start & HPAGE_MASK) == pstart) && len &&
156                                 (start < vma->vm_end))
157                         goto back1;
158         } while (len && start < vma->vm_end);
159         *length = len;
160         *st = start;
161         return i;
162 }
163
164 struct page *follow_huge_addr(struct mm_struct *mm, unsigned long addr, int write)
165 {
166         struct page *page;
167         pte_t *ptep;
168
169         if (REGION_NUMBER(addr) != REGION_HPAGE)
170                 return ERR_PTR(-EINVAL);
171
172         ptep = huge_pte_offset(mm, addr);
173         if (!ptep || pte_none(*ptep))
174                 return NULL;
175         page = pte_page(*ptep);
176         page += ((addr & ~HPAGE_MASK) >> PAGE_SHIFT);
177         return page;
178 }
179 int pmd_huge(pmd_t pmd)
180 {
181         return 0;
182 }
183 struct page *
184 follow_huge_pmd(struct mm_struct *mm, unsigned long address, pmd_t *pmd, int write)
185 {
186         return NULL;
187 }
188
189 void hugetlb_free_pgd_range(struct mmu_gather **tlb,
190                         unsigned long addr, unsigned long end,
191                         unsigned long floor, unsigned long ceiling)
192 {
193         /*
194          * This is called only when is_hugepage_only_range(addr,),
195          * and it follows that is_hugepage_only_range(end,) also.
196          *
197          * The offset of these addresses from the base of the hugetlb
198          * region must be scaled down by HPAGE_SIZE/PAGE_SIZE so that
199          * the standard free_pgd_range will free the right page tables.
200          *
201          * If floor and ceiling are also in the hugetlb region, they
202          * must likewise be scaled down; but if outside, left unchanged.
203          */
204
205         addr = htlbpage_to_page(addr);
206         end  = htlbpage_to_page(end);
207         if (is_hugepage_only_range(tlb->mm, floor, HPAGE_SIZE))
208                 floor = htlbpage_to_page(floor);
209         if (is_hugepage_only_range(tlb->mm, ceiling, HPAGE_SIZE))
210                 ceiling = htlbpage_to_page(ceiling);
211
212         free_pgd_range(tlb, addr, end, floor, ceiling);
213 }
214
215 void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
216 {
217         struct mm_struct *mm = vma->vm_mm;
218         unsigned long address;
219         pte_t *pte;
220         struct page *page;
221
222         BUG_ON(start & (HPAGE_SIZE - 1));
223         BUG_ON(end & (HPAGE_SIZE - 1));
224
225         for (address = start; address < end; address += HPAGE_SIZE) {
226                 pte = huge_pte_offset(mm, address);
227                 if (pte_none(*pte))
228                         continue;
229                 page = pte_page(*pte);
230                 put_page(page);
231                 pte_clear(mm, address, pte);
232         }
233         add_mm_counter(mm, rss, - ((end - start) >> PAGE_SHIFT));
234         flush_tlb_range(vma, start, end);
235 }
236
237 int hugetlb_prefault(struct address_space *mapping, struct vm_area_struct *vma)
238 {
239         struct mm_struct *mm = current->mm;
240         unsigned long addr;
241         int ret = 0;
242
243         BUG_ON(vma->vm_start & ~HPAGE_MASK);
244         BUG_ON(vma->vm_end & ~HPAGE_MASK);
245
246         spin_lock(&mm->page_table_lock);
247         for (addr = vma->vm_start; addr < vma->vm_end; addr += HPAGE_SIZE) {
248                 unsigned long idx;
249                 pte_t *pte = huge_pte_alloc(mm, addr);
250                 struct page *page;
251
252                 if (!pte) {
253                         ret = -ENOMEM;
254                         goto out;
255                 }
256                 if (!pte_none(*pte))
257                         continue;
258
259                 idx = ((addr - vma->vm_start) >> HPAGE_SHIFT)
260                         + (vma->vm_pgoff >> (HPAGE_SHIFT - PAGE_SHIFT));
261                 page = find_get_page(mapping, idx);
262                 if (!page) {
263                         /* charge the fs quota first */
264                         if (hugetlb_get_quota(mapping)) {
265                                 ret = -ENOMEM;
266                                 goto out;
267                         }
268                         page = alloc_huge_page();
269                         if (!page) {
270                                 hugetlb_put_quota(mapping);
271                                 ret = -ENOMEM;
272                                 goto out;
273                         }
274                         ret = add_to_page_cache(page, mapping, idx, GFP_ATOMIC);
275                         if (! ret) {
276                                 unlock_page(page);
277                         } else {
278                                 hugetlb_put_quota(mapping);
279                                 page_cache_release(page);
280                                 goto out;
281                         }
282                 }
283                 set_huge_pte(mm, vma, page, pte, vma->vm_flags & VM_WRITE);
284         }
285 out:
286         spin_unlock(&mm->page_table_lock);
287         return ret;
288 }
289
290 unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
291                 unsigned long pgoff, unsigned long flags)
292 {
293         struct vm_area_struct *vmm;
294
295         if (len > RGN_MAP_LIMIT)
296                 return -ENOMEM;
297         if (len & ~HPAGE_MASK)
298                 return -EINVAL;
299         /* This code assumes that REGION_HPAGE != 0. */
300         if ((REGION_NUMBER(addr) != REGION_HPAGE) || (addr & (HPAGE_SIZE - 1)))
301                 addr = HPAGE_REGION_BASE;
302         else
303                 addr = ALIGN(addr, HPAGE_SIZE);
304         for (vmm = find_vma(current->mm, addr); ; vmm = vmm->vm_next) {
305                 /* At this point:  (!vmm || addr < vmm->vm_end). */
306                 if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT)
307                         return -ENOMEM;
308                 if (!vmm || (addr + len) <= vmm->vm_start)
309                         return addr;
310                 addr = ALIGN(vmm->vm_end, HPAGE_SIZE);
311         }
312 }
313
314 static int __init hugetlb_setup_sz(char *str)
315 {
316         u64 tr_pages;
317         unsigned long long size;
318
319         if (ia64_pal_vm_page_size(&tr_pages, NULL) != 0)
320                 /*
321                  * shouldn't happen, but just in case.
322                  */
323                 tr_pages = 0x15557000UL;
324
325         size = memparse(str, &str);
326         if (*str || (size & (size-1)) || !(tr_pages & size) ||
327                 size <= PAGE_SIZE ||
328                 size >= (1UL << PAGE_SHIFT << MAX_ORDER)) {
329                 printk(KERN_WARNING "Invalid huge page size specified\n");
330                 return 1;
331         }
332
333         hpage_shift = __ffs(size);
334         /*
335          * boot cpu already executed ia64_mmu_init, and has HPAGE_SHIFT_DEFAULT
336          * override here with new page shift.
337          */
338         ia64_set_rr(HPAGE_REGION_BASE, hpage_shift << 2);
339         return 1;
340 }
341 __setup("hugepagesz=", hugetlb_setup_sz);