Merge branch 'linus' into tracing/mmiotrace
[linux-3.10.git] / arch / x86 / mm / ioremap.c
index 3f7f05e..e92aa46 100644 (file)
@@ -12,6 +12,7 @@
 #include <linux/module.h>
 #include <linux/slab.h>
 #include <linux/vmalloc.h>
+#include <linux/mmiotrace.h>
 
 #include <asm/cacheflush.h>
 #include <asm/e820.h>
@@ -47,7 +48,7 @@ static inline int phys_addr_valid(unsigned long addr)
 
 int page_is_ram(unsigned long pagenr)
 {
-       unsigned long addr, end;
+       resource_size_t addr, end;
        int i;
 
        /*
@@ -117,14 +118,18 @@ int ioremap_change_attr(unsigned long vaddr, unsigned long size,
  * have to convert them into an offset in a page-aligned mapping, but the
  * caller shouldn't need to know that small detail.
  */
-static void __iomem *__ioremap(resource_size_t phys_addr, unsigned long size,
-                              unsigned long prot_val)
+static void __iomem *__ioremap_caller(resource_size_t phys_addr,
+               unsigned long size, unsigned long prot_val, void *caller)
 {
-       unsigned long pfn, offset, last_addr, vaddr;
+       unsigned long pfn, offset, vaddr;
+       resource_size_t last_addr;
+       const resource_size_t unaligned_phys_addr = phys_addr;
+       const unsigned long unaligned_size = size;
        struct vm_struct *area;
        unsigned long new_prot_val;
        pgprot_t prot;
        int retval;
+       void __iomem *ret_addr;
 
        /* Don't allow wraparound or zero size */
        last_addr = phys_addr + size - 1;
@@ -133,7 +138,7 @@ static void __iomem *__ioremap(resource_size_t phys_addr, unsigned long size,
 
        if (!phys_addr_valid(phys_addr)) {
                printk(KERN_WARNING "ioremap: invalid physical address %llx\n",
-                      phys_addr);
+                      (unsigned long long)phys_addr);
                WARN_ON_ONCE(1);
                return NULL;
        }
@@ -148,7 +153,8 @@ static void __iomem *__ioremap(resource_size_t phys_addr, unsigned long size,
         * Don't allow anybody to remap normal RAM that we're using..
         */
        for (pfn = phys_addr >> PAGE_SHIFT;
-                               (pfn << PAGE_SHIFT) < last_addr; pfn++) {
+                               (pfn << PAGE_SHIFT) < (last_addr & PAGE_MASK);
+                               pfn++) {
 
                int is_ram = page_is_ram(pfn);
 
@@ -167,7 +173,7 @@ static void __iomem *__ioremap(resource_size_t phys_addr, unsigned long size,
        retval = reserve_memtype(phys_addr, phys_addr + size,
                                                prot_val, &new_prot_val);
        if (retval) {
-               printk("reserve_memtype returned %d\n", retval);
+               pr_debug("Warning: reserve_memtype returned %d\n", retval);
                return NULL;
        }
 
@@ -175,18 +181,19 @@ static void __iomem *__ioremap(resource_size_t phys_addr, unsigned long size,
                /*
                 * Do not fallback to certain memory types with certain
                 * requested type:
-                * - request is uncached, return cannot be write-back
-                * - request is uncached, return cannot be write-combine
+                * - request is uc-, return cannot be write-back
+                * - request is uc-, return cannot be write-combine
                 * - request is write-combine, return cannot be write-back
                 */
-               if ((prot_val == _PAGE_CACHE_UC &&
+               if ((prot_val == _PAGE_CACHE_UC_MINUS &&
                     (new_prot_val == _PAGE_CACHE_WB ||
                      new_prot_val == _PAGE_CACHE_WC)) ||
                    (prot_val == _PAGE_CACHE_WC &&
                     new_prot_val == _PAGE_CACHE_WB)) {
-                       printk(
+                       pr_debug(
                "ioremap error for 0x%llx-0x%llx, requested 0x%lx, got 0x%lx\n",
-                               phys_addr, phys_addr + size,
+                               (unsigned long long)phys_addr,
+                               (unsigned long long)(phys_addr + size),
                                prot_val, new_prot_val);
                        free_memtype(phys_addr, phys_addr + size);
                        return NULL;
@@ -199,6 +206,9 @@ static void __iomem *__ioremap(resource_size_t phys_addr, unsigned long size,
        default:
                prot = PAGE_KERNEL_NOCACHE;
                break;
+       case _PAGE_CACHE_UC_MINUS:
+               prot = PAGE_KERNEL_UC_MINUS;
+               break;
        case _PAGE_CACHE_WC:
                prot = PAGE_KERNEL_WC;
                break;
@@ -210,7 +220,7 @@ static void __iomem *__ioremap(resource_size_t phys_addr, unsigned long size,
        /*
         * Ok, go for it..
         */
-       area = get_vm_area(size, VM_IOREMAP);
+       area = get_vm_area_caller(size, VM_IOREMAP, caller);
        if (!area)
                return NULL;
        area->phys_addr = phys_addr;
@@ -227,7 +237,10 @@ static void __iomem *__ioremap(resource_size_t phys_addr, unsigned long size,
                return NULL;
        }
 
-       return (void __iomem *) (vaddr + offset);
+       ret_addr = (void __iomem *) (vaddr + offset);
+       mmiotrace_ioremap(unaligned_phys_addr, unaligned_size, ret_addr);
+
+       return ret_addr;
 }
 
 /**
@@ -253,7 +266,17 @@ static void __iomem *__ioremap(resource_size_t phys_addr, unsigned long size,
  */
 void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size)
 {
-       return __ioremap(phys_addr, size, _PAGE_CACHE_UC);
+       /*
+        * Ideally, this should be:
+        *      pat_wc_enabled ? _PAGE_CACHE_UC : _PAGE_CACHE_UC_MINUS;
+        *
+        * Till we fix all X drivers to use ioremap_wc(), we will use
+        * UC MINUS.
+        */
+       unsigned long val = _PAGE_CACHE_UC_MINUS;
+
+       return __ioremap_caller(phys_addr, size, val,
+                               __builtin_return_address(0));
 }
 EXPORT_SYMBOL(ioremap_nocache);
 
@@ -270,7 +293,8 @@ EXPORT_SYMBOL(ioremap_nocache);
 void __iomem *ioremap_wc(unsigned long phys_addr, unsigned long size)
 {
        if (pat_wc_enabled)
-               return __ioremap(phys_addr, size, _PAGE_CACHE_WC);
+               return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WC,
+                                       __builtin_return_address(0));
        else
                return ioremap_nocache(phys_addr, size);
 }
@@ -278,7 +302,8 @@ EXPORT_SYMBOL(ioremap_wc);
 
 void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size)
 {
-       return __ioremap(phys_addr, size, _PAGE_CACHE_WB);
+       return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WB,
+                               __builtin_return_address(0));
 }
 EXPORT_SYMBOL(ioremap_cache);
 
@@ -307,6 +332,8 @@ void iounmap(volatile void __iomem *addr)
        addr = (volatile void __iomem *)
                (PAGE_MASK & (unsigned long __force)addr);
 
+       mmiotrace_iounmap(addr);
+
        /* Use the vm area unlocked, assuming the caller
           ensures there isn't another iounmap for the same address
           in parallel. Reuse of the virtual address is prevented by
@@ -334,6 +361,35 @@ void iounmap(volatile void __iomem *addr)
 }
 EXPORT_SYMBOL(iounmap);
 
+/*
+ * Convert a physical pointer to a virtual kernel pointer for /dev/mem
+ * access
+ */
+void *xlate_dev_mem_ptr(unsigned long phys)
+{
+       void *addr;
+       unsigned long start = phys & PAGE_MASK;
+
+       /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
+       if (page_is_ram(start >> PAGE_SHIFT))
+               return __va(phys);
+
+       addr = (void *)ioremap(start, PAGE_SIZE);
+       if (addr)
+               addr = (void *)((unsigned long)addr | (phys & ~PAGE_MASK));
+
+       return addr;
+}
+
+void unxlate_dev_mem_ptr(unsigned long phys, void *addr)
+{
+       if (page_is_ram(phys >> PAGE_SHIFT))
+               return;
+
+       iounmap((void __iomem *)((unsigned long)addr & PAGE_MASK));
+       return;
+}
+
 #ifdef CONFIG_X86_32
 
 int __initdata early_ioremap_debug;
@@ -405,7 +461,7 @@ void __init early_ioremap_clear(void)
 
        pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
        pmd_clear(pmd);
-       paravirt_release_pt(__pa(bm_pte) >> PAGE_SHIFT);
+       paravirt_release_pte(__pa(bm_pte) >> PAGE_SHIFT);
        __flush_tlb_all();
 }
 
@@ -546,10 +602,11 @@ void __init early_iounmap(void *addr, unsigned long size)
        unsigned long offset;
        unsigned int nrpages;
        enum fixed_addresses idx;
-       unsigned int nesting;
+       int nesting;
 
        nesting = --early_ioremap_nested;
-       WARN_ON(nesting < 0);
+       if (WARN_ON(nesting < 0))
+               return;
 
        if (early_ioremap_debug) {
                printk(KERN_INFO "early_iounmap(%p, %08lx) [%d]\n", addr,