x86: ioremap of 64-bit resource on 32-bit kernel fix
[linux-3.10.git] / arch / x86 / mm / ioremap.c
1 /*
2  * Re-map IO memory to kernel address space so that we can access it.
3  * This is needed for high PCI addresses that aren't mapped in the
4  * 640k-1MB IO memory area on PC's
5  *
6  * (C) Copyright 1995 1996 Linus Torvalds
7  */
8
9 #include <linux/bootmem.h>
10 #include <linux/init.h>
11 #include <linux/io.h>
12 #include <linux/module.h>
13 #include <linux/slab.h>
14 #include <linux/vmalloc.h>
15
16 #include <asm/cacheflush.h>
17 #include <asm/e820.h>
18 #include <asm/fixmap.h>
19 #include <asm/pgtable.h>
20 #include <asm/tlbflush.h>
21 #include <asm/pgalloc.h>
22 #include <asm/pat.h>
23
24 #ifdef CONFIG_X86_64
25
26 unsigned long __phys_addr(unsigned long x)
27 {
28         if (x >= __START_KERNEL_map)
29                 return x - __START_KERNEL_map + phys_base;
30         return x - PAGE_OFFSET;
31 }
32 EXPORT_SYMBOL(__phys_addr);
33
34 static inline int phys_addr_valid(unsigned long addr)
35 {
36         return addr < (1UL << boot_cpu_data.x86_phys_bits);
37 }
38
39 #else
40
41 static inline int phys_addr_valid(unsigned long addr)
42 {
43         return 1;
44 }
45
46 #endif
47
48 int page_is_ram(unsigned long pagenr)
49 {
50         resource_size_t addr, end;
51         int i;
52
53         /*
54          * A special case is the first 4Kb of memory;
55          * This is a BIOS owned area, not kernel ram, but generally
56          * not listed as such in the E820 table.
57          */
58         if (pagenr == 0)
59                 return 0;
60
61         /*
62          * Second special case: Some BIOSen report the PC BIOS
63          * area (640->1Mb) as ram even though it is not.
64          */
65         if (pagenr >= (BIOS_BEGIN >> PAGE_SHIFT) &&
66                     pagenr < (BIOS_END >> PAGE_SHIFT))
67                 return 0;
68
69         for (i = 0; i < e820.nr_map; i++) {
70                 /*
71                  * Not usable memory:
72                  */
73                 if (e820.map[i].type != E820_RAM)
74                         continue;
75                 addr = (e820.map[i].addr + PAGE_SIZE-1) >> PAGE_SHIFT;
76                 end = (e820.map[i].addr + e820.map[i].size) >> PAGE_SHIFT;
77
78
79                 if ((pagenr >= addr) && (pagenr < end))
80                         return 1;
81         }
82         return 0;
83 }
84
85 /*
86  * Fix up the linear direct mapping of the kernel to avoid cache attribute
87  * conflicts.
88  */
89 int ioremap_change_attr(unsigned long vaddr, unsigned long size,
90                                unsigned long prot_val)
91 {
92         unsigned long nrpages = size >> PAGE_SHIFT;
93         int err;
94
95         switch (prot_val) {
96         case _PAGE_CACHE_UC:
97         default:
98                 err = _set_memory_uc(vaddr, nrpages);
99                 break;
100         case _PAGE_CACHE_WC:
101                 err = _set_memory_wc(vaddr, nrpages);
102                 break;
103         case _PAGE_CACHE_WB:
104                 err = _set_memory_wb(vaddr, nrpages);
105                 break;
106         }
107
108         return err;
109 }
110
111 /*
112  * Remap an arbitrary physical address space into the kernel virtual
113  * address space. Needed when the kernel wants to access high addresses
114  * directly.
115  *
116  * NOTE! We need to allow non-page-aligned mappings too: we will obviously
117  * have to convert them into an offset in a page-aligned mapping, but the
118  * caller shouldn't need to know that small detail.
119  */
120 static void __iomem *__ioremap(resource_size_t phys_addr, unsigned long size,
121                                unsigned long prot_val)
122 {
123         unsigned long pfn, offset, vaddr;
124         resource_size_t last_addr;
125         struct vm_struct *area;
126         unsigned long new_prot_val;
127         pgprot_t prot;
128         int retval;
129
130         /* Don't allow wraparound or zero size */
131         last_addr = phys_addr + size - 1;
132         if (!size || last_addr < phys_addr)
133                 return NULL;
134
135         if (!phys_addr_valid(phys_addr)) {
136                 printk(KERN_WARNING "ioremap: invalid physical address %llx\n",
137                        phys_addr);
138                 WARN_ON_ONCE(1);
139                 return NULL;
140         }
141
142         /*
143          * Don't remap the low PCI/ISA area, it's always mapped..
144          */
145         if (phys_addr >= ISA_START_ADDRESS && last_addr < ISA_END_ADDRESS)
146                 return (__force void __iomem *)phys_to_virt(phys_addr);
147
148         /*
149          * Don't allow anybody to remap normal RAM that we're using..
150          */
151         for (pfn = phys_addr >> PAGE_SHIFT;
152                                 (pfn << PAGE_SHIFT) < last_addr; pfn++) {
153
154                 int is_ram = page_is_ram(pfn);
155
156                 if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
157                         return NULL;
158                 WARN_ON_ONCE(is_ram);
159         }
160
161         /*
162          * Mappings have to be page-aligned
163          */
164         offset = phys_addr & ~PAGE_MASK;
165         phys_addr &= PAGE_MASK;
166         size = PAGE_ALIGN(last_addr+1) - phys_addr;
167
168         retval = reserve_memtype(phys_addr, phys_addr + size,
169                                                 prot_val, &new_prot_val);
170         if (retval) {
171                 pr_debug("Warning: reserve_memtype returned %d\n", retval);
172                 return NULL;
173         }
174
175         if (prot_val != new_prot_val) {
176                 /*
177                  * Do not fallback to certain memory types with certain
178                  * requested type:
179                  * - request is uncached, return cannot be write-back
180                  * - request is uncached, return cannot be write-combine
181                  * - request is write-combine, return cannot be write-back
182                  */
183                 if ((prot_val == _PAGE_CACHE_UC &&
184                      (new_prot_val == _PAGE_CACHE_WB ||
185                       new_prot_val == _PAGE_CACHE_WC)) ||
186                     (prot_val == _PAGE_CACHE_WC &&
187                      new_prot_val == _PAGE_CACHE_WB)) {
188                         pr_debug(
189                 "ioremap error for 0x%llx-0x%llx, requested 0x%lx, got 0x%lx\n",
190                                 phys_addr, phys_addr + size,
191                                 prot_val, new_prot_val);
192                         free_memtype(phys_addr, phys_addr + size);
193                         return NULL;
194                 }
195                 prot_val = new_prot_val;
196         }
197
198         switch (prot_val) {
199         case _PAGE_CACHE_UC:
200         default:
201                 prot = PAGE_KERNEL_NOCACHE;
202                 break;
203         case _PAGE_CACHE_WC:
204                 prot = PAGE_KERNEL_WC;
205                 break;
206         case _PAGE_CACHE_WB:
207                 prot = PAGE_KERNEL;
208                 break;
209         }
210
211         /*
212          * Ok, go for it..
213          */
214         area = get_vm_area(size, VM_IOREMAP);
215         if (!area)
216                 return NULL;
217         area->phys_addr = phys_addr;
218         vaddr = (unsigned long) area->addr;
219         if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot)) {
220                 free_memtype(phys_addr, phys_addr + size);
221                 free_vm_area(area);
222                 return NULL;
223         }
224
225         if (ioremap_change_attr(vaddr, size, prot_val) < 0) {
226                 free_memtype(phys_addr, phys_addr + size);
227                 vunmap(area->addr);
228                 return NULL;
229         }
230
231         return (void __iomem *) (vaddr + offset);
232 }
233
234 /**
235  * ioremap_nocache     -   map bus memory into CPU space
236  * @offset:    bus address of the memory
237  * @size:      size of the resource to map
238  *
239  * ioremap_nocache performs a platform specific sequence of operations to
240  * make bus memory CPU accessible via the readb/readw/readl/writeb/
241  * writew/writel functions and the other mmio helpers. The returned
242  * address is not guaranteed to be usable directly as a virtual
243  * address.
244  *
245  * This version of ioremap ensures that the memory is marked uncachable
246  * on the CPU as well as honouring existing caching rules from things like
247  * the PCI bus. Note that there are other caches and buffers on many
248  * busses. In particular driver authors should read up on PCI writes
249  *
250  * It's useful if some control registers are in such an area and
251  * write combining or read caching is not desirable:
252  *
253  * Must be freed with iounmap.
254  */
255 void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size)
256 {
257         return __ioremap(phys_addr, size, _PAGE_CACHE_UC);
258 }
259 EXPORT_SYMBOL(ioremap_nocache);
260
261 /**
262  * ioremap_wc   -       map memory into CPU space write combined
263  * @offset:     bus address of the memory
264  * @size:       size of the resource to map
265  *
266  * This version of ioremap ensures that the memory is marked write combining.
267  * Write combining allows faster writes to some hardware devices.
268  *
269  * Must be freed with iounmap.
270  */
271 void __iomem *ioremap_wc(unsigned long phys_addr, unsigned long size)
272 {
273         if (pat_wc_enabled)
274                 return __ioremap(phys_addr, size, _PAGE_CACHE_WC);
275         else
276                 return ioremap_nocache(phys_addr, size);
277 }
278 EXPORT_SYMBOL(ioremap_wc);
279
280 void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size)
281 {
282         return __ioremap(phys_addr, size, _PAGE_CACHE_WB);
283 }
284 EXPORT_SYMBOL(ioremap_cache);
285
286 /**
287  * iounmap - Free a IO remapping
288  * @addr: virtual address from ioremap_*
289  *
290  * Caller must ensure there is only one unmapping for the same pointer.
291  */
292 void iounmap(volatile void __iomem *addr)
293 {
294         struct vm_struct *p, *o;
295
296         if ((void __force *)addr <= high_memory)
297                 return;
298
299         /*
300          * __ioremap special-cases the PCI/ISA range by not instantiating a
301          * vm_area and by simply returning an address into the kernel mapping
302          * of ISA space.   So handle that here.
303          */
304         if (addr >= phys_to_virt(ISA_START_ADDRESS) &&
305             addr < phys_to_virt(ISA_END_ADDRESS))
306                 return;
307
308         addr = (volatile void __iomem *)
309                 (PAGE_MASK & (unsigned long __force)addr);
310
311         /* Use the vm area unlocked, assuming the caller
312            ensures there isn't another iounmap for the same address
313            in parallel. Reuse of the virtual address is prevented by
314            leaving it in the global lists until we're done with it.
315            cpa takes care of the direct mappings. */
316         read_lock(&vmlist_lock);
317         for (p = vmlist; p; p = p->next) {
318                 if (p->addr == addr)
319                         break;
320         }
321         read_unlock(&vmlist_lock);
322
323         if (!p) {
324                 printk(KERN_ERR "iounmap: bad address %p\n", addr);
325                 dump_stack();
326                 return;
327         }
328
329         free_memtype(p->phys_addr, p->phys_addr + get_vm_area_size(p));
330
331         /* Finally remove it */
332         o = remove_vm_area((void *)addr);
333         BUG_ON(p != o || o == NULL);
334         kfree(p);
335 }
336 EXPORT_SYMBOL(iounmap);
337
338 #ifdef CONFIG_X86_32
339
340 int __initdata early_ioremap_debug;
341
342 static int __init early_ioremap_debug_setup(char *str)
343 {
344         early_ioremap_debug = 1;
345
346         return 0;
347 }
348 early_param("early_ioremap_debug", early_ioremap_debug_setup);
349
350 static __initdata int after_paging_init;
351 static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)]
352                 __section(.bss.page_aligned);
353
354 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
355 {
356         /* Don't assume we're using swapper_pg_dir at this point */
357         pgd_t *base = __va(read_cr3());
358         pgd_t *pgd = &base[pgd_index(addr)];
359         pud_t *pud = pud_offset(pgd, addr);
360         pmd_t *pmd = pmd_offset(pud, addr);
361
362         return pmd;
363 }
364
365 static inline pte_t * __init early_ioremap_pte(unsigned long addr)
366 {
367         return &bm_pte[pte_index(addr)];
368 }
369
370 void __init early_ioremap_init(void)
371 {
372         pmd_t *pmd;
373
374         if (early_ioremap_debug)
375                 printk(KERN_INFO "early_ioremap_init()\n");
376
377         pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
378         memset(bm_pte, 0, sizeof(bm_pte));
379         pmd_populate_kernel(&init_mm, pmd, bm_pte);
380
381         /*
382          * The boot-ioremap range spans multiple pmds, for which
383          * we are not prepared:
384          */
385         if (pmd != early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))) {
386                 WARN_ON(1);
387                 printk(KERN_WARNING "pmd %p != %p\n",
388                        pmd, early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END)));
389                 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
390                         fix_to_virt(FIX_BTMAP_BEGIN));
391                 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END):   %08lx\n",
392                         fix_to_virt(FIX_BTMAP_END));
393
394                 printk(KERN_WARNING "FIX_BTMAP_END:       %d\n", FIX_BTMAP_END);
395                 printk(KERN_WARNING "FIX_BTMAP_BEGIN:     %d\n",
396                        FIX_BTMAP_BEGIN);
397         }
398 }
399
400 void __init early_ioremap_clear(void)
401 {
402         pmd_t *pmd;
403
404         if (early_ioremap_debug)
405                 printk(KERN_INFO "early_ioremap_clear()\n");
406
407         pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
408         pmd_clear(pmd);
409         paravirt_release_pt(__pa(bm_pte) >> PAGE_SHIFT);
410         __flush_tlb_all();
411 }
412
413 void __init early_ioremap_reset(void)
414 {
415         enum fixed_addresses idx;
416         unsigned long addr, phys;
417         pte_t *pte;
418
419         after_paging_init = 1;
420         for (idx = FIX_BTMAP_BEGIN; idx >= FIX_BTMAP_END; idx--) {
421                 addr = fix_to_virt(idx);
422                 pte = early_ioremap_pte(addr);
423                 if (pte_present(*pte)) {
424                         phys = pte_val(*pte) & PAGE_MASK;
425                         set_fixmap(idx, phys);
426                 }
427         }
428 }
429
430 static void __init __early_set_fixmap(enum fixed_addresses idx,
431                                    unsigned long phys, pgprot_t flags)
432 {
433         unsigned long addr = __fix_to_virt(idx);
434         pte_t *pte;
435
436         if (idx >= __end_of_fixed_addresses) {
437                 BUG();
438                 return;
439         }
440         pte = early_ioremap_pte(addr);
441         if (pgprot_val(flags))
442                 set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
443         else
444                 pte_clear(NULL, addr, pte);
445         __flush_tlb_one(addr);
446 }
447
448 static inline void __init early_set_fixmap(enum fixed_addresses idx,
449                                         unsigned long phys)
450 {
451         if (after_paging_init)
452                 set_fixmap(idx, phys);
453         else
454                 __early_set_fixmap(idx, phys, PAGE_KERNEL);
455 }
456
457 static inline void __init early_clear_fixmap(enum fixed_addresses idx)
458 {
459         if (after_paging_init)
460                 clear_fixmap(idx);
461         else
462                 __early_set_fixmap(idx, 0, __pgprot(0));
463 }
464
465
466 int __initdata early_ioremap_nested;
467
468 static int __init check_early_ioremap_leak(void)
469 {
470         if (!early_ioremap_nested)
471                 return 0;
472
473         printk(KERN_WARNING
474                "Debug warning: early ioremap leak of %d areas detected.\n",
475                early_ioremap_nested);
476         printk(KERN_WARNING
477                "please boot with early_ioremap_debug and report the dmesg.\n");
478         WARN_ON(1);
479
480         return 1;
481 }
482 late_initcall(check_early_ioremap_leak);
483
484 void __init *early_ioremap(unsigned long phys_addr, unsigned long size)
485 {
486         unsigned long offset, last_addr;
487         unsigned int nrpages, nesting;
488         enum fixed_addresses idx0, idx;
489
490         WARN_ON(system_state != SYSTEM_BOOTING);
491
492         nesting = early_ioremap_nested;
493         if (early_ioremap_debug) {
494                 printk(KERN_INFO "early_ioremap(%08lx, %08lx) [%d] => ",
495                        phys_addr, size, nesting);
496                 dump_stack();
497         }
498
499         /* Don't allow wraparound or zero size */
500         last_addr = phys_addr + size - 1;
501         if (!size || last_addr < phys_addr) {
502                 WARN_ON(1);
503                 return NULL;
504         }
505
506         if (nesting >= FIX_BTMAPS_NESTING) {
507                 WARN_ON(1);
508                 return NULL;
509         }
510         early_ioremap_nested++;
511         /*
512          * Mappings have to be page-aligned
513          */
514         offset = phys_addr & ~PAGE_MASK;
515         phys_addr &= PAGE_MASK;
516         size = PAGE_ALIGN(last_addr) - phys_addr;
517
518         /*
519          * Mappings have to fit in the FIX_BTMAP area.
520          */
521         nrpages = size >> PAGE_SHIFT;
522         if (nrpages > NR_FIX_BTMAPS) {
523                 WARN_ON(1);
524                 return NULL;
525         }
526
527         /*
528          * Ok, go for it..
529          */
530         idx0 = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*nesting;
531         idx = idx0;
532         while (nrpages > 0) {
533                 early_set_fixmap(idx, phys_addr);
534                 phys_addr += PAGE_SIZE;
535                 --idx;
536                 --nrpages;
537         }
538         if (early_ioremap_debug)
539                 printk(KERN_CONT "%08lx + %08lx\n", offset, fix_to_virt(idx0));
540
541         return (void *) (offset + fix_to_virt(idx0));
542 }
543
544 void __init early_iounmap(void *addr, unsigned long size)
545 {
546         unsigned long virt_addr;
547         unsigned long offset;
548         unsigned int nrpages;
549         enum fixed_addresses idx;
550         unsigned int nesting;
551
552         nesting = --early_ioremap_nested;
553         WARN_ON(nesting < 0);
554
555         if (early_ioremap_debug) {
556                 printk(KERN_INFO "early_iounmap(%p, %08lx) [%d]\n", addr,
557                        size, nesting);
558                 dump_stack();
559         }
560
561         virt_addr = (unsigned long)addr;
562         if (virt_addr < fix_to_virt(FIX_BTMAP_BEGIN)) {
563                 WARN_ON(1);
564                 return;
565         }
566         offset = virt_addr & ~PAGE_MASK;
567         nrpages = PAGE_ALIGN(offset + size - 1) >> PAGE_SHIFT;
568
569         idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*nesting;
570         while (nrpages > 0) {
571                 early_clear_fixmap(idx);
572                 --idx;
573                 --nrpages;
574         }
575 }
576
577 void __this_fixmap_does_not_exist(void)
578 {
579         WARN_ON(1);
580 }
581
582 #endif /* CONFIG_X86_32 */