vmallocinfo: add caller information
[linux-3.10.git] / arch / x86 / mm / ioremap.c
1 /*
2  * Re-map IO memory to kernel address space so that we can access it.
3  * This is needed for high PCI addresses that aren't mapped in the
4  * 640k-1MB IO memory area on PC's
5  *
6  * (C) Copyright 1995 1996 Linus Torvalds
7  */
8
9 #include <linux/bootmem.h>
10 #include <linux/init.h>
11 #include <linux/io.h>
12 #include <linux/module.h>
13 #include <linux/slab.h>
14 #include <linux/vmalloc.h>
15
16 #include <asm/cacheflush.h>
17 #include <asm/e820.h>
18 #include <asm/fixmap.h>
19 #include <asm/pgtable.h>
20 #include <asm/tlbflush.h>
21 #include <asm/pgalloc.h>
22 #include <asm/pat.h>
23
24 #ifdef CONFIG_X86_64
25
26 unsigned long __phys_addr(unsigned long x)
27 {
28         if (x >= __START_KERNEL_map)
29                 return x - __START_KERNEL_map + phys_base;
30         return x - PAGE_OFFSET;
31 }
32 EXPORT_SYMBOL(__phys_addr);
33
34 static inline int phys_addr_valid(unsigned long addr)
35 {
36         return addr < (1UL << boot_cpu_data.x86_phys_bits);
37 }
38
39 #else
40
41 static inline int phys_addr_valid(unsigned long addr)
42 {
43         return 1;
44 }
45
46 #endif
47
48 int page_is_ram(unsigned long pagenr)
49 {
50         resource_size_t addr, end;
51         int i;
52
53         /*
54          * A special case is the first 4Kb of memory;
55          * This is a BIOS owned area, not kernel ram, but generally
56          * not listed as such in the E820 table.
57          */
58         if (pagenr == 0)
59                 return 0;
60
61         /*
62          * Second special case: Some BIOSen report the PC BIOS
63          * area (640->1Mb) as ram even though it is not.
64          */
65         if (pagenr >= (BIOS_BEGIN >> PAGE_SHIFT) &&
66                     pagenr < (BIOS_END >> PAGE_SHIFT))
67                 return 0;
68
69         for (i = 0; i < e820.nr_map; i++) {
70                 /*
71                  * Not usable memory:
72                  */
73                 if (e820.map[i].type != E820_RAM)
74                         continue;
75                 addr = (e820.map[i].addr + PAGE_SIZE-1) >> PAGE_SHIFT;
76                 end = (e820.map[i].addr + e820.map[i].size) >> PAGE_SHIFT;
77
78
79                 if ((pagenr >= addr) && (pagenr < end))
80                         return 1;
81         }
82         return 0;
83 }
84
85 /*
86  * Fix up the linear direct mapping of the kernel to avoid cache attribute
87  * conflicts.
88  */
89 int ioremap_change_attr(unsigned long vaddr, unsigned long size,
90                                unsigned long prot_val)
91 {
92         unsigned long nrpages = size >> PAGE_SHIFT;
93         int err;
94
95         switch (prot_val) {
96         case _PAGE_CACHE_UC:
97         default:
98                 err = _set_memory_uc(vaddr, nrpages);
99                 break;
100         case _PAGE_CACHE_WC:
101                 err = _set_memory_wc(vaddr, nrpages);
102                 break;
103         case _PAGE_CACHE_WB:
104                 err = _set_memory_wb(vaddr, nrpages);
105                 break;
106         }
107
108         return err;
109 }
110
111 /*
112  * Remap an arbitrary physical address space into the kernel virtual
113  * address space. Needed when the kernel wants to access high addresses
114  * directly.
115  *
116  * NOTE! We need to allow non-page-aligned mappings too: we will obviously
117  * have to convert them into an offset in a page-aligned mapping, but the
118  * caller shouldn't need to know that small detail.
119  */
120 static void __iomem *__ioremap_caller(resource_size_t phys_addr,
121                 unsigned long size, unsigned long prot_val, void *caller)
122 {
123         unsigned long pfn, offset, vaddr;
124         resource_size_t last_addr;
125         struct vm_struct *area;
126         unsigned long new_prot_val;
127         pgprot_t prot;
128         int retval;
129
130         /* Don't allow wraparound or zero size */
131         last_addr = phys_addr + size - 1;
132         if (!size || last_addr < phys_addr)
133                 return NULL;
134
135         if (!phys_addr_valid(phys_addr)) {
136                 printk(KERN_WARNING "ioremap: invalid physical address %llx\n",
137                        (unsigned long long)phys_addr);
138                 WARN_ON_ONCE(1);
139                 return NULL;
140         }
141
142         /*
143          * Don't remap the low PCI/ISA area, it's always mapped..
144          */
145         if (phys_addr >= ISA_START_ADDRESS && last_addr < ISA_END_ADDRESS)
146                 return (__force void __iomem *)phys_to_virt(phys_addr);
147
148         /*
149          * Don't allow anybody to remap normal RAM that we're using..
150          */
151         for (pfn = phys_addr >> PAGE_SHIFT;
152                                 (pfn << PAGE_SHIFT) < last_addr; pfn++) {
153
154                 int is_ram = page_is_ram(pfn);
155
156                 if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
157                         return NULL;
158                 WARN_ON_ONCE(is_ram);
159         }
160
161         /*
162          * Mappings have to be page-aligned
163          */
164         offset = phys_addr & ~PAGE_MASK;
165         phys_addr &= PAGE_MASK;
166         size = PAGE_ALIGN(last_addr+1) - phys_addr;
167
168         retval = reserve_memtype(phys_addr, phys_addr + size,
169                                                 prot_val, &new_prot_val);
170         if (retval) {
171                 pr_debug("Warning: reserve_memtype returned %d\n", retval);
172                 return NULL;
173         }
174
175         if (prot_val != new_prot_val) {
176                 /*
177                  * Do not fallback to certain memory types with certain
178                  * requested type:
179                  * - request is uncached, return cannot be write-back
180                  * - request is uncached, return cannot be write-combine
181                  * - request is write-combine, return cannot be write-back
182                  */
183                 if ((prot_val == _PAGE_CACHE_UC &&
184                      (new_prot_val == _PAGE_CACHE_WB ||
185                       new_prot_val == _PAGE_CACHE_WC)) ||
186                     (prot_val == _PAGE_CACHE_WC &&
187                      new_prot_val == _PAGE_CACHE_WB)) {
188                         pr_debug(
189                 "ioremap error for 0x%llx-0x%llx, requested 0x%lx, got 0x%lx\n",
190                                 (unsigned long long)phys_addr,
191                                 (unsigned long long)(phys_addr + size),
192                                 prot_val, new_prot_val);
193                         free_memtype(phys_addr, phys_addr + size);
194                         return NULL;
195                 }
196                 prot_val = new_prot_val;
197         }
198
199         switch (prot_val) {
200         case _PAGE_CACHE_UC:
201         default:
202                 prot = PAGE_KERNEL_NOCACHE;
203                 break;
204         case _PAGE_CACHE_WC:
205                 prot = PAGE_KERNEL_WC;
206                 break;
207         case _PAGE_CACHE_WB:
208                 prot = PAGE_KERNEL;
209                 break;
210         }
211
212         /*
213          * Ok, go for it..
214          */
215         area = get_vm_area_caller(size, VM_IOREMAP, caller);
216         if (!area)
217                 return NULL;
218         area->phys_addr = phys_addr;
219         vaddr = (unsigned long) area->addr;
220         if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot)) {
221                 free_memtype(phys_addr, phys_addr + size);
222                 free_vm_area(area);
223                 return NULL;
224         }
225
226         if (ioremap_change_attr(vaddr, size, prot_val) < 0) {
227                 free_memtype(phys_addr, phys_addr + size);
228                 vunmap(area->addr);
229                 return NULL;
230         }
231
232         return (void __iomem *) (vaddr + offset);
233 }
234
235 /**
236  * ioremap_nocache     -   map bus memory into CPU space
237  * @offset:    bus address of the memory
238  * @size:      size of the resource to map
239  *
240  * ioremap_nocache performs a platform specific sequence of operations to
241  * make bus memory CPU accessible via the readb/readw/readl/writeb/
242  * writew/writel functions and the other mmio helpers. The returned
243  * address is not guaranteed to be usable directly as a virtual
244  * address.
245  *
246  * This version of ioremap ensures that the memory is marked uncachable
247  * on the CPU as well as honouring existing caching rules from things like
248  * the PCI bus. Note that there are other caches and buffers on many
249  * busses. In particular driver authors should read up on PCI writes
250  *
251  * It's useful if some control registers are in such an area and
252  * write combining or read caching is not desirable:
253  *
254  * Must be freed with iounmap.
255  */
256 void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size)
257 {
258         return __ioremap_caller(phys_addr, size, _PAGE_CACHE_UC,
259                                 __builtin_return_address(0));
260 }
261 EXPORT_SYMBOL(ioremap_nocache);
262
263 /**
264  * ioremap_wc   -       map memory into CPU space write combined
265  * @offset:     bus address of the memory
266  * @size:       size of the resource to map
267  *
268  * This version of ioremap ensures that the memory is marked write combining.
269  * Write combining allows faster writes to some hardware devices.
270  *
271  * Must be freed with iounmap.
272  */
273 void __iomem *ioremap_wc(unsigned long phys_addr, unsigned long size)
274 {
275         if (pat_wc_enabled)
276                 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WC,
277                                         __builtin_return_address(0));
278         else
279                 return ioremap_nocache(phys_addr, size);
280 }
281 EXPORT_SYMBOL(ioremap_wc);
282
283 void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size)
284 {
285         return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WB,
286                                 __builtin_return_address(0));
287 }
288 EXPORT_SYMBOL(ioremap_cache);
289
290 /**
291  * iounmap - Free a IO remapping
292  * @addr: virtual address from ioremap_*
293  *
294  * Caller must ensure there is only one unmapping for the same pointer.
295  */
296 void iounmap(volatile void __iomem *addr)
297 {
298         struct vm_struct *p, *o;
299
300         if ((void __force *)addr <= high_memory)
301                 return;
302
303         /*
304          * __ioremap special-cases the PCI/ISA range by not instantiating a
305          * vm_area and by simply returning an address into the kernel mapping
306          * of ISA space.   So handle that here.
307          */
308         if (addr >= phys_to_virt(ISA_START_ADDRESS) &&
309             addr < phys_to_virt(ISA_END_ADDRESS))
310                 return;
311
312         addr = (volatile void __iomem *)
313                 (PAGE_MASK & (unsigned long __force)addr);
314
315         /* Use the vm area unlocked, assuming the caller
316            ensures there isn't another iounmap for the same address
317            in parallel. Reuse of the virtual address is prevented by
318            leaving it in the global lists until we're done with it.
319            cpa takes care of the direct mappings. */
320         read_lock(&vmlist_lock);
321         for (p = vmlist; p; p = p->next) {
322                 if (p->addr == addr)
323                         break;
324         }
325         read_unlock(&vmlist_lock);
326
327         if (!p) {
328                 printk(KERN_ERR "iounmap: bad address %p\n", addr);
329                 dump_stack();
330                 return;
331         }
332
333         free_memtype(p->phys_addr, p->phys_addr + get_vm_area_size(p));
334
335         /* Finally remove it */
336         o = remove_vm_area((void *)addr);
337         BUG_ON(p != o || o == NULL);
338         kfree(p);
339 }
340 EXPORT_SYMBOL(iounmap);
341
342 /*
343  * Convert a physical pointer to a virtual kernel pointer for /dev/mem
344  * access
345  */
346 void *xlate_dev_mem_ptr(unsigned long phys)
347 {
348         void *addr;
349         unsigned long start = phys & PAGE_MASK;
350
351         /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
352         if (page_is_ram(start >> PAGE_SHIFT))
353                 return __va(phys);
354
355         addr = (void *)ioremap(start, PAGE_SIZE);
356         if (addr)
357                 addr = (void *)((unsigned long)addr | (phys & ~PAGE_MASK));
358
359         return addr;
360 }
361
362 void unxlate_dev_mem_ptr(unsigned long phys, void *addr)
363 {
364         if (page_is_ram(phys >> PAGE_SHIFT))
365                 return;
366
367         iounmap((void __iomem *)((unsigned long)addr & PAGE_MASK));
368         return;
369 }
370
371 #ifdef CONFIG_X86_32
372
373 int __initdata early_ioremap_debug;
374
375 static int __init early_ioremap_debug_setup(char *str)
376 {
377         early_ioremap_debug = 1;
378
379         return 0;
380 }
381 early_param("early_ioremap_debug", early_ioremap_debug_setup);
382
383 static __initdata int after_paging_init;
384 static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)]
385                 __section(.bss.page_aligned);
386
387 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
388 {
389         /* Don't assume we're using swapper_pg_dir at this point */
390         pgd_t *base = __va(read_cr3());
391         pgd_t *pgd = &base[pgd_index(addr)];
392         pud_t *pud = pud_offset(pgd, addr);
393         pmd_t *pmd = pmd_offset(pud, addr);
394
395         return pmd;
396 }
397
398 static inline pte_t * __init early_ioremap_pte(unsigned long addr)
399 {
400         return &bm_pte[pte_index(addr)];
401 }
402
403 void __init early_ioremap_init(void)
404 {
405         pmd_t *pmd;
406
407         if (early_ioremap_debug)
408                 printk(KERN_INFO "early_ioremap_init()\n");
409
410         pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
411         memset(bm_pte, 0, sizeof(bm_pte));
412         pmd_populate_kernel(&init_mm, pmd, bm_pte);
413
414         /*
415          * The boot-ioremap range spans multiple pmds, for which
416          * we are not prepared:
417          */
418         if (pmd != early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))) {
419                 WARN_ON(1);
420                 printk(KERN_WARNING "pmd %p != %p\n",
421                        pmd, early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END)));
422                 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
423                         fix_to_virt(FIX_BTMAP_BEGIN));
424                 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END):   %08lx\n",
425                         fix_to_virt(FIX_BTMAP_END));
426
427                 printk(KERN_WARNING "FIX_BTMAP_END:       %d\n", FIX_BTMAP_END);
428                 printk(KERN_WARNING "FIX_BTMAP_BEGIN:     %d\n",
429                        FIX_BTMAP_BEGIN);
430         }
431 }
432
433 void __init early_ioremap_clear(void)
434 {
435         pmd_t *pmd;
436
437         if (early_ioremap_debug)
438                 printk(KERN_INFO "early_ioremap_clear()\n");
439
440         pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
441         pmd_clear(pmd);
442         paravirt_release_pte(__pa(bm_pte) >> PAGE_SHIFT);
443         __flush_tlb_all();
444 }
445
446 void __init early_ioremap_reset(void)
447 {
448         enum fixed_addresses idx;
449         unsigned long addr, phys;
450         pte_t *pte;
451
452         after_paging_init = 1;
453         for (idx = FIX_BTMAP_BEGIN; idx >= FIX_BTMAP_END; idx--) {
454                 addr = fix_to_virt(idx);
455                 pte = early_ioremap_pte(addr);
456                 if (pte_present(*pte)) {
457                         phys = pte_val(*pte) & PAGE_MASK;
458                         set_fixmap(idx, phys);
459                 }
460         }
461 }
462
463 static void __init __early_set_fixmap(enum fixed_addresses idx,
464                                    unsigned long phys, pgprot_t flags)
465 {
466         unsigned long addr = __fix_to_virt(idx);
467         pte_t *pte;
468
469         if (idx >= __end_of_fixed_addresses) {
470                 BUG();
471                 return;
472         }
473         pte = early_ioremap_pte(addr);
474         if (pgprot_val(flags))
475                 set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
476         else
477                 pte_clear(NULL, addr, pte);
478         __flush_tlb_one(addr);
479 }
480
481 static inline void __init early_set_fixmap(enum fixed_addresses idx,
482                                         unsigned long phys)
483 {
484         if (after_paging_init)
485                 set_fixmap(idx, phys);
486         else
487                 __early_set_fixmap(idx, phys, PAGE_KERNEL);
488 }
489
490 static inline void __init early_clear_fixmap(enum fixed_addresses idx)
491 {
492         if (after_paging_init)
493                 clear_fixmap(idx);
494         else
495                 __early_set_fixmap(idx, 0, __pgprot(0));
496 }
497
498
499 int __initdata early_ioremap_nested;
500
501 static int __init check_early_ioremap_leak(void)
502 {
503         if (!early_ioremap_nested)
504                 return 0;
505
506         printk(KERN_WARNING
507                "Debug warning: early ioremap leak of %d areas detected.\n",
508                early_ioremap_nested);
509         printk(KERN_WARNING
510                "please boot with early_ioremap_debug and report the dmesg.\n");
511         WARN_ON(1);
512
513         return 1;
514 }
515 late_initcall(check_early_ioremap_leak);
516
517 void __init *early_ioremap(unsigned long phys_addr, unsigned long size)
518 {
519         unsigned long offset, last_addr;
520         unsigned int nrpages, nesting;
521         enum fixed_addresses idx0, idx;
522
523         WARN_ON(system_state != SYSTEM_BOOTING);
524
525         nesting = early_ioremap_nested;
526         if (early_ioremap_debug) {
527                 printk(KERN_INFO "early_ioremap(%08lx, %08lx) [%d] => ",
528                        phys_addr, size, nesting);
529                 dump_stack();
530         }
531
532         /* Don't allow wraparound or zero size */
533         last_addr = phys_addr + size - 1;
534         if (!size || last_addr < phys_addr) {
535                 WARN_ON(1);
536                 return NULL;
537         }
538
539         if (nesting >= FIX_BTMAPS_NESTING) {
540                 WARN_ON(1);
541                 return NULL;
542         }
543         early_ioremap_nested++;
544         /*
545          * Mappings have to be page-aligned
546          */
547         offset = phys_addr & ~PAGE_MASK;
548         phys_addr &= PAGE_MASK;
549         size = PAGE_ALIGN(last_addr) - phys_addr;
550
551         /*
552          * Mappings have to fit in the FIX_BTMAP area.
553          */
554         nrpages = size >> PAGE_SHIFT;
555         if (nrpages > NR_FIX_BTMAPS) {
556                 WARN_ON(1);
557                 return NULL;
558         }
559
560         /*
561          * Ok, go for it..
562          */
563         idx0 = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*nesting;
564         idx = idx0;
565         while (nrpages > 0) {
566                 early_set_fixmap(idx, phys_addr);
567                 phys_addr += PAGE_SIZE;
568                 --idx;
569                 --nrpages;
570         }
571         if (early_ioremap_debug)
572                 printk(KERN_CONT "%08lx + %08lx\n", offset, fix_to_virt(idx0));
573
574         return (void *) (offset + fix_to_virt(idx0));
575 }
576
577 void __init early_iounmap(void *addr, unsigned long size)
578 {
579         unsigned long virt_addr;
580         unsigned long offset;
581         unsigned int nrpages;
582         enum fixed_addresses idx;
583         unsigned int nesting;
584
585         nesting = --early_ioremap_nested;
586         WARN_ON(nesting < 0);
587
588         if (early_ioremap_debug) {
589                 printk(KERN_INFO "early_iounmap(%p, %08lx) [%d]\n", addr,
590                        size, nesting);
591                 dump_stack();
592         }
593
594         virt_addr = (unsigned long)addr;
595         if (virt_addr < fix_to_virt(FIX_BTMAP_BEGIN)) {
596                 WARN_ON(1);
597                 return;
598         }
599         offset = virt_addr & ~PAGE_MASK;
600         nrpages = PAGE_ALIGN(offset + size - 1) >> PAGE_SHIFT;
601
602         idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*nesting;
603         while (nrpages > 0) {
604                 early_clear_fixmap(idx);
605                 --idx;
606                 --nrpages;
607         }
608 }
609
610 void __this_fixmap_does_not_exist(void)
611 {
612         WARN_ON(1);
613 }
614
615 #endif /* CONFIG_X86_32 */