[SPARC64]: Stop putting -finline-limit=XXX into CFLAGS
[linux-2.6.git] / arch / sparc64 / mm / generic.c
1 /* $Id: generic.c,v 1.18 2001/12/21 04:56:15 davem Exp $
2  * generic.c: Generic Sparc mm routines that are not dependent upon
3  *            MMU type but are Sparc specific.
4  *
5  * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
6  */
7
8 #include <linux/kernel.h>
9 #include <linux/mm.h>
10 #include <linux/swap.h>
11 #include <linux/pagemap.h>
12
13 #include <asm/pgalloc.h>
14 #include <asm/pgtable.h>
15 #include <asm/page.h>
16 #include <asm/tlbflush.h>
17
18 static inline pte_t mk_pte_io(unsigned long page, pgprot_t prot, int space)
19 {
20         pte_t pte;
21         pte_val(pte) = (((page) | pgprot_val(prot) | _PAGE_E) &
22                         ~(unsigned long)_PAGE_CACHE);
23         pte_val(pte) |= (((unsigned long)space) << 32);
24         return pte;
25 }
26
27 /* Remap IO memory, the same way as remap_pfn_range(), but use
28  * the obio memory space.
29  *
30  * They use a pgprot that sets PAGE_IO and does not check the
31  * mem_map table as this is independent of normal memory.
32  */
33 static inline void io_remap_pte_range(struct mm_struct *mm, pte_t * pte,
34                                       unsigned long address,
35                                       unsigned long size,
36                                       unsigned long offset, pgprot_t prot,
37                                       int space)
38 {
39         unsigned long end;
40
41         /* clear hack bit that was used as a write_combine side-effect flag */
42         offset &= ~0x1UL;
43         address &= ~PMD_MASK;
44         end = address + size;
45         if (end > PMD_SIZE)
46                 end = PMD_SIZE;
47         do {
48                 pte_t entry;
49                 unsigned long curend = address + PAGE_SIZE;
50                 
51                 entry = mk_pte_io(offset, prot, space);
52                 if (!(address & 0xffff)) {
53                         if (!(address & 0x3fffff) && !(offset & 0x3ffffe) && end >= address + 0x400000) {
54                                 entry = mk_pte_io(offset,
55                                                   __pgprot(pgprot_val (prot) | _PAGE_SZ4MB),
56                                                   space);
57                                 curend = address + 0x400000;
58                                 offset += 0x400000;
59                         } else if (!(address & 0x7ffff) && !(offset & 0x7fffe) && end >= address + 0x80000) {
60                                 entry = mk_pte_io(offset,
61                                                   __pgprot(pgprot_val (prot) | _PAGE_SZ512K),
62                                                   space);
63                                 curend = address + 0x80000;
64                                 offset += 0x80000;
65                         } else if (!(offset & 0xfffe) && end >= address + 0x10000) {
66                                 entry = mk_pte_io(offset,
67                                                   __pgprot(pgprot_val (prot) | _PAGE_SZ64K),
68                                                   space);
69                                 curend = address + 0x10000;
70                                 offset += 0x10000;
71                         } else
72                                 offset += PAGE_SIZE;
73                 } else
74                         offset += PAGE_SIZE;
75
76                 do {
77                         BUG_ON(!pte_none(*pte));
78                         set_pte_at(mm, address, pte, entry);
79                         address += PAGE_SIZE;
80                         pte_val(entry) += PAGE_SIZE;
81                         pte++;
82                 } while (address < curend);
83         } while (address < end);
84 }
85
86 static inline int io_remap_pmd_range(struct mm_struct *mm, pmd_t * pmd, unsigned long address, unsigned long size,
87         unsigned long offset, pgprot_t prot, int space)
88 {
89         unsigned long end;
90
91         address &= ~PGDIR_MASK;
92         end = address + size;
93         if (end > PGDIR_SIZE)
94                 end = PGDIR_SIZE;
95         offset -= address;
96         do {
97                 pte_t * pte = pte_alloc_map(mm, pmd, address);
98                 if (!pte)
99                         return -ENOMEM;
100                 io_remap_pte_range(mm, pte, address, end - address, address + offset, prot, space);
101                 pte_unmap(pte);
102                 address = (address + PMD_SIZE) & PMD_MASK;
103                 pmd++;
104         } while (address < end);
105         return 0;
106 }
107
108 static inline int io_remap_pud_range(struct mm_struct *mm, pud_t * pud, unsigned long address, unsigned long size,
109         unsigned long offset, pgprot_t prot, int space)
110 {
111         unsigned long end;
112
113         address &= ~PUD_MASK;
114         end = address + size;
115         if (end > PUD_SIZE)
116                 end = PUD_SIZE;
117         offset -= address;
118         do {
119                 pmd_t *pmd = pmd_alloc(mm, pud, address);
120                 if (!pud)
121                         return -ENOMEM;
122                 io_remap_pmd_range(mm, pmd, address, end - address, address + offset, prot, space);
123                 address = (address + PUD_SIZE) & PUD_MASK;
124                 pud++;
125         } while (address < end);
126         return 0;
127 }
128
129 int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long from,
130                 unsigned long pfn, unsigned long size, pgprot_t prot)
131 {
132         int error = 0;
133         pgd_t * dir;
134         unsigned long beg = from;
135         unsigned long end = from + size;
136         struct mm_struct *mm = vma->vm_mm;
137         int space = GET_IOSPACE(pfn);
138         unsigned long offset = GET_PFN(pfn) << PAGE_SHIFT;
139         unsigned long phys_base;
140
141         phys_base = offset | (((unsigned long) space) << 32UL);
142
143         /* See comment in mm/memory.c remap_pfn_range */
144         vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP;
145         vma->vm_pgoff = phys_base >> PAGE_SHIFT;
146
147         prot = __pgprot(pg_iobits);
148         offset -= from;
149         dir = pgd_offset(mm, from);
150         flush_cache_range(vma, beg, end);
151
152         while (from < end) {
153                 pud_t *pud = pud_alloc(mm, dir, from);
154                 error = -ENOMEM;
155                 if (!pud)
156                         break;
157                 error = io_remap_pud_range(mm, pud, from, end - from, offset + from, prot, space);
158                 if (error)
159                         break;
160                 from = (from + PGDIR_SIZE) & PGDIR_MASK;
161                 dir++;
162         }
163
164         flush_tlb_range(vma, beg, end);
165         return error;
166 }