include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit...
[linux-2.6.git] / arch / arm / mm / pgd.c
1 /*
2  *  linux/arch/arm/mm/pgd.c
3  *
4  *  Copyright (C) 1998-2005 Russell King
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 #include <linux/mm.h>
11 #include <linux/gfp.h>
12 #include <linux/highmem.h>
13
14 #include <asm/pgalloc.h>
15 #include <asm/page.h>
16 #include <asm/tlbflush.h>
17
18 #include "mm.h"
19
20 #define FIRST_KERNEL_PGD_NR     (FIRST_USER_PGD_NR + USER_PTRS_PER_PGD)
21
22 /*
23  * need to get a 16k page for level 1
24  */
25 pgd_t *get_pgd_slow(struct mm_struct *mm)
26 {
27         pgd_t *new_pgd, *init_pgd;
28         pmd_t *new_pmd, *init_pmd;
29         pte_t *new_pte, *init_pte;
30
31         new_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL, 2);
32         if (!new_pgd)
33                 goto no_pgd;
34
35         memset(new_pgd, 0, FIRST_KERNEL_PGD_NR * sizeof(pgd_t));
36
37         /*
38          * Copy over the kernel and IO PGD entries
39          */
40         init_pgd = pgd_offset_k(0);
41         memcpy(new_pgd + FIRST_KERNEL_PGD_NR, init_pgd + FIRST_KERNEL_PGD_NR,
42                        (PTRS_PER_PGD - FIRST_KERNEL_PGD_NR) * sizeof(pgd_t));
43
44         clean_dcache_area(new_pgd, PTRS_PER_PGD * sizeof(pgd_t));
45
46         if (!vectors_high()) {
47                 /*
48                  * On ARM, first page must always be allocated since it
49                  * contains the machine vectors.
50                  */
51                 new_pmd = pmd_alloc(mm, new_pgd, 0);
52                 if (!new_pmd)
53                         goto no_pmd;
54
55                 new_pte = pte_alloc_map(mm, new_pmd, 0);
56                 if (!new_pte)
57                         goto no_pte;
58
59                 init_pmd = pmd_offset(init_pgd, 0);
60                 init_pte = pte_offset_map_nested(init_pmd, 0);
61                 set_pte_ext(new_pte, *init_pte, 0);
62                 pte_unmap_nested(init_pte);
63                 pte_unmap(new_pte);
64         }
65
66         return new_pgd;
67
68 no_pte:
69         pmd_free(mm, new_pmd);
70 no_pmd:
71         free_pages((unsigned long)new_pgd, 2);
72 no_pgd:
73         return NULL;
74 }
75
76 void free_pgd_slow(struct mm_struct *mm, pgd_t *pgd)
77 {
78         pmd_t *pmd;
79         pgtable_t pte;
80
81         if (!pgd)
82                 return;
83
84         /* pgd is always present and good */
85         pmd = pmd_off(pgd, 0);
86         if (pmd_none(*pmd))
87                 goto free;
88         if (pmd_bad(*pmd)) {
89                 pmd_ERROR(*pmd);
90                 pmd_clear(pmd);
91                 goto free;
92         }
93
94         pte = pmd_pgtable(*pmd);
95         pmd_clear(pmd);
96         pte_free(mm, pte);
97         pmd_free(mm, pmd);
98 free:
99         free_pages((unsigned long) pgd, 2);
100 }