Merge remote-tracking branch 'origin/dev/sumit-linux-3.10.96' into TOT-merge
[linux-3.10.git] / arch / arm / mm / dma-mapping.c
index 665e5c1..ff01976 100644 (file)
@@ -414,6 +414,9 @@ void __init dma_contiguous_early_fixup(phys_addr_t base, unsigned long size)
        dma_mmu_remap_num++;
 }
 
+__init void iotable_init_va(struct map_desc *io_desc, int nr);
+__init void iotable_init_mapping(struct map_desc *io_desc, int nr);
+
 void __init dma_contiguous_remap(void)
 {
        int i;
@@ -428,40 +431,33 @@ void __init dma_contiguous_remap(void)
                if (start >= end)
                        continue;
 
-               map.pfn = __phys_to_pfn(start);
-               map.virtual = __phys_to_virt(start);
-               map.length = end - start;
-               map.type = MT_MEMORY_DMA_READY;
+               map.type = MT_MEMORY;
 
                /*
-                * Clear previous low-memory mapping
+                * Clear previous low-memory mapping to ensure that the
+                * TLB does not see any conflicting entries, then flush
+                * the TLB of the old entries before creating new mappings.
+                *
+                * This ensures that any speculatively loaded TLB entries
+                * (even though they may be rare) can not cause any problems,
+                * and ensures that this code is architecturally compliant.
                 */
                for (addr = __phys_to_virt(start); addr < __phys_to_virt(end);
                     addr += PMD_SIZE)
                        pmd_clear(pmd_off_k(addr));
 
-               iotable_init(&map, 1);
-       }
-}
-
-static int __dma_update_pte(pte_t *pte, pgtable_t token, unsigned long addr,
-                           void *data)
-{
-       struct page *page = virt_to_page(addr);
-       pgprot_t prot = *(pgprot_t *)data;
-
-       set_pte_ext(pte, mk_pte(page, prot), 0);
-       return 0;
-}
-
-static void __dma_remap(struct page *page, size_t size, pgprot_t prot)
-{
-       unsigned long start = (unsigned long) page_address(page);
-       unsigned end = start + size;
+               for (addr = start; addr < end; addr += PAGE_SIZE) {
+                       map.pfn = __phys_to_pfn(addr);
+                       map.virtual = __phys_to_virt(addr);
+                       map.length = PAGE_SIZE;
+                       iotable_init_mapping(&map, 1);
+               }
 
-       apply_to_page_range(&init_mm, start, size, __dma_update_pte, &prot);
-       dsb();
-       flush_tlb_kernel_range(start, end);
+               map.pfn = __phys_to_pfn(start);
+               map.virtual = __phys_to_virt(start);
+               map.length = end - start;
+               iotable_init_va(&map, 1);
+       }
 }
 
 static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp,
@@ -573,8 +569,6 @@ static void *__alloc_from_contiguous(struct device *dev, size_t size,
        if (!page)
                return NULL;
 
-       __dma_clear_buffer(page, size);
-
        if (PageHighMem(page)) {
                ptr = __dma_alloc_remap(page, size, GFP_KERNEL, prot, caller);
                if (!ptr) {
@@ -582,7 +576,6 @@ static void *__alloc_from_contiguous(struct device *dev, size_t size,
                        return NULL;
                }
        } else {
-               __dma_remap(page, size, prot);
                ptr = page_address(page);
        }
        *ret_page = page;
@@ -594,8 +587,6 @@ static void __free_from_contiguous(struct device *dev, struct page *page,
 {
        if (PageHighMem(page))
                __dma_free_remap(cpu_addr, size);
-       else
-               __dma_remap(page, size, pgprot_kernel);
        dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT);
 }
 
@@ -647,7 +638,7 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
 #ifdef CONFIG_DMA_API_DEBUG
        u64 limit = (mask + 1) & ~mask;
        if (limit && size >= limit) {
-               dev_warn(dev, "coherent allocation too big (requested %#x mask %#llx)\n",
+               dev_warn(dev, "coherent allocation too big (requested %#zx mask %#llx)\n",
                        size, mask);
                return NULL;
        }
@@ -696,7 +687,8 @@ void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
        pgprot_t prot = __get_dma_pgprot(attrs, pgprot_kernel);
        void *memory;
 
-       if (dma_alloc_from_coherent(dev, size, handle, &memory))
+       if (dma_alloc_from_coherent_attr(dev, size, handle,
+                       &memory, attrs))
                return memory;
 
        return __dma_alloc(dev, size, handle, gfp, prot, false,
@@ -709,8 +701,9 @@ static void *arm_coherent_dma_alloc(struct device *dev, size_t size,
        pgprot_t prot = __get_dma_pgprot(attrs, pgprot_kernel);
        void *memory;
 
-       if (dma_alloc_from_coherent(dev, size, handle, &memory))
-               return memory;
+       if (dma_alloc_from_coherent_attr(dev, size, handle,
+                       &memory, attrs))
+                       return memory;
 
        return __dma_alloc(dev, size, handle, gfp, prot, true,
                           __builtin_return_address(0));
@@ -755,7 +748,7 @@ static void __arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
 {
        struct page *page = pfn_to_page(dma_to_pfn(dev, handle));
 
-       if (dma_release_from_coherent(dev, get_order(size), cpu_addr))
+       if (dma_release_from_coherent_attr(dev, size, cpu_addr, attrs))
                return;
 
        size = PAGE_ALIGN(size);
@@ -1033,19 +1026,21 @@ static void seq_print_dma_areas(struct seq_file *s, void *bitmap,
        size_t pos = find_first_bit(bitmap, bits), end;
 
        for (; pos < bits; pos = find_next_bit(bitmap, bits, end + 1)) {
+               dma_addr_t start_addr, end_addr;
+
                end = find_next_zero_bit(bitmap, bits, pos);
-               seq_printf(s, "    0x%08x-0x%08x pages=%d\n",
-                          bit_to_addr(pos, base, order),
-                          bit_to_addr(end, base, order) - 1,
-                          (end - pos) << order);
+               start_addr = bit_to_addr(pos, base, order);
+               end_addr = bit_to_addr(end, base, order) - 1;
+               seq_printf(s, "    %pa-%pa pages=%zu\n",
+                          &start_addr, &end_addr, (end - pos) << order);
        }
 }
 
 static void seq_print_mapping(struct seq_file *s,
                              struct dma_iommu_mapping *mapping)
 {
-       seq_printf(s, "  memory map: base=0x%x size=%d order=%d domain=%p\n",
-                  mapping->base, mapping->end - mapping->base,
+       seq_printf(s, "  memory map: base=%pa size=%lld order=%d domain=%p\n",
+                  &mapping->base, (u64)(mapping->end - mapping->base),
                   mapping->order, mapping->domain);
 
        seq_print_dma_areas(s, mapping->bitmap, mapping->base, mapping->bits,
@@ -1176,13 +1171,37 @@ static void iommu_mapping_list_del(struct dma_iommu_mapping *mapping)
        spin_unlock_irqrestore(&iommu_mapping_list_lock, flags);
 }
 
-static int pg_iommu_map(struct iommu_domain *domain, unsigned long iova,
+static inline int iommu_get_num_pf_pages(struct dma_iommu_mapping *mapping,
+                                        struct dma_attrs *attrs)
+{
+       /* XXX: give priority to DMA_ATTR_SKIP_IOVA_GAP */
+       if (dma_get_attr(DMA_ATTR_SKIP_IOVA_GAP, attrs))
+                       return 0;
+
+       /* XXX: currently we support only 1 prefetch page */
+       WARN_ON(mapping->num_pf_page > prefetch_page_count);
+
+       return mapping->num_pf_page;
+}
+
+static inline int iommu_gap_pg_count(struct dma_iommu_mapping *mapping,
+                                    struct dma_attrs *attrs)
+{
+       if (dma_get_attr(DMA_ATTR_SKIP_IOVA_GAP, attrs))
+               return 0;
+
+       return mapping->gap_page ? gap_page_count : 0;
+}
+
+static int pg_iommu_map(struct dma_iommu_mapping *mapping, unsigned long iova,
                        phys_addr_t phys, size_t len, int prot)
 {
        int err;
+       struct iommu_domain *domain = mapping->domain;
        struct dma_attrs *attrs = (struct dma_attrs *)prot;
+       bool need_prefetch_page = !!iommu_get_num_pf_pages(mapping, attrs);
 
-       if (!dma_get_attr(DMA_ATTR_SKIP_IOVA_GAP, attrs)) {
+       if (need_prefetch_page) {
                err = iommu_map(domain, iova + len, iova_gap_phys,
                                PF_PAGES_SIZE, prot);
                if (err)
@@ -1190,18 +1209,20 @@ static int pg_iommu_map(struct iommu_domain *domain, unsigned long iova,
        }
 
        err = iommu_map(domain, iova, phys, len, prot);
-       if (err && !dma_get_attr(DMA_ATTR_SKIP_IOVA_GAP, attrs))
+       if (err && need_prefetch_page)
                iommu_unmap(domain, iova + len, PF_PAGES_SIZE);
 
        return err;
 }
 
-static size_t pg_iommu_unmap(struct iommu_domain *domain,
+static size_t pg_iommu_unmap(struct dma_iommu_mapping *mapping,
                             unsigned long iova, size_t len, int prot)
 {
        struct dma_attrs *attrs = (struct dma_attrs *)prot;
+       struct iommu_domain *domain = mapping->domain;
+       bool need_prefetch_page = !!iommu_get_num_pf_pages(mapping, attrs);
 
-       if (!dma_get_attr(DMA_ATTR_SKIP_IOVA_GAP, attrs)) {
+       if (need_prefetch_page) {
                phys_addr_t phys_addr;
 
                phys_addr = iommu_iova_to_phys(domain, iova + len);
@@ -1212,33 +1233,16 @@ static size_t pg_iommu_unmap(struct iommu_domain *domain,
        return iommu_unmap(domain, iova, len);
 }
 
-static int pg_iommu_map_pages(struct iommu_domain *domain, unsigned long iova,
-                   struct page **pages, size_t count, int prot)
-{
-       int err;
-       struct dma_attrs *attrs = (struct dma_attrs *)prot;
-
-       if (!dma_get_attr(DMA_ATTR_SKIP_IOVA_GAP, attrs)) {
-               err = iommu_map(domain, iova + (count << PAGE_SHIFT),
-                               iova_gap_phys, PF_PAGES_SIZE, prot);
-               if (err)
-                       return err;
-       }
-
-       err = iommu_map_pages(domain, iova, pages, count, prot);
-       if (err && !dma_get_attr(DMA_ATTR_SKIP_IOVA_GAP, attrs))
-               iommu_unmap(domain, iova + (count << PAGE_SHIFT), PF_PAGES_SIZE);
-
-       return err;
-}
-
-static int pg_iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
-                struct scatterlist *sgl, int nents, int prot)
+static int pg_iommu_map_sg(struct dma_iommu_mapping *mapping,
+                          unsigned long iova, struct scatterlist *sgl,
+                          int nents, int prot)
 {
        int err;
        struct dma_attrs *attrs = (struct dma_attrs *)prot;
+       struct iommu_domain *domain = mapping->domain;
+       bool need_prefetch_page = !!iommu_get_num_pf_pages(mapping, attrs);
 
-       if (!dma_get_attr(DMA_ATTR_SKIP_IOVA_GAP, attrs)) {
+       if (need_prefetch_page) {
                err = iommu_map(domain, iova + (nents << PAGE_SHIFT),
                                iova_gap_phys, PF_PAGES_SIZE, prot);
                if (err)
@@ -1246,7 +1250,7 @@ static int pg_iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
        }
 
        err = iommu_map_sg(domain, iova, sgl, nents, prot);
-       if (err && !dma_get_attr(DMA_ATTR_SKIP_IOVA_GAP, attrs))
+       if (err && need_prefetch_page)
                iommu_unmap(domain, iova + (nents << PAGE_SHIFT), PF_PAGES_SIZE);
 
        return err;
@@ -1314,11 +1318,14 @@ static inline dma_addr_t __alloc_iova(struct dma_iommu_mapping *mapping,
        if (order > CONFIG_ARM_DMA_IOMMU_ALIGNMENT)
                order = CONFIG_ARM_DMA_IOMMU_ALIGNMENT;
 
+       if (mapping->alignment && order > get_order(mapping->alignment))
+               order = get_order(mapping->alignment);
+
        count = ((PAGE_ALIGN(size) >> PAGE_SHIFT) +
                 (1 << mapping->order) - 1) >> mapping->order;
 
-       if (!dma_get_attr(DMA_ATTR_SKIP_IOVA_GAP, attrs))
-               count += PG_PAGES;
+       count += iommu_get_num_pf_pages(mapping, attrs);
+       count += iommu_gap_pg_count(mapping, attrs);
 
        if (order > mapping->order)
                align = (1 << (order - mapping->order)) - 1;
@@ -1348,8 +1355,8 @@ static dma_addr_t __alloc_iova_at(struct dma_iommu_mapping *mapping,
        count = ((PAGE_ALIGN(size) >> PAGE_SHIFT) +
                 (1 << mapping->order) - 1) >> mapping->order;
 
-       if (!dma_get_attr(DMA_ATTR_SKIP_IOVA_GAP, attrs))
-               count += PG_PAGES;
+       count += iommu_get_num_pf_pages(mapping, attrs);
+       count += iommu_gap_pg_count(mapping, attrs);
 
        bytes = count << (mapping->order + PAGE_SHIFT);
 
@@ -1405,8 +1412,8 @@ static inline void __free_iova(struct dma_iommu_mapping *mapping,
                              (1 << mapping->order) - 1) >> mapping->order;
        unsigned long flags;
 
-       if (!dma_get_attr(DMA_ATTR_SKIP_IOVA_GAP, attrs))
-               count += PG_PAGES;
+       count += iommu_get_num_pf_pages(mapping, attrs);
+       count += iommu_gap_pg_count(mapping, attrs);
 
        spin_lock_irqsave(&mapping->lock, flags);
        bitmap_clear(mapping->bitmap, start, count);
@@ -1430,7 +1437,8 @@ static struct page **__iommu_alloc_buffer(struct device *dev, size_t size,
        int i = 0;
 
        if (array_size <= PAGE_SIZE)
-               pages = kzalloc(array_size, gfp);
+               pages = kzalloc(array_size,
+                               gfp & ~(__GFP_HIGHMEM | __GFP_DMA32));
        else
                pages = vzalloc(array_size);
        if (!pages)
@@ -1455,9 +1463,12 @@ static struct page **__iommu_alloc_buffer(struct device *dev, size_t size,
 
        /*
         * IOMMU can map any pages, so himem can also be used here
+        * unless some DMA'able area is explicitly required.
         */
-       gfp |= __GFP_NOWARN | __GFP_HIGHMEM;
+       if (!(gfp & GFP_DMA) && !(gfp & GFP_DMA32))
+               gfp |= __GFP_HIGHMEM;
 
+       gfp |= __GFP_NOWARN;
        while (count) {
                int j, order = __fls(count);
 
@@ -1578,16 +1589,23 @@ ____iommu_create_mapping(struct device *dev, dma_addr_t *req,
                                break;
 
                len = (j - i) << PAGE_SHIFT;
-               ret = pg_iommu_map(mapping->domain, iova, phys, len,
-                                  (int)attrs);
+               ret = iommu_map(mapping->domain, iova, phys, len, (int)attrs);
                if (ret < 0)
                        goto fail;
                iova += len;
                i = j;
        }
+
+       if (iommu_get_num_pf_pages(mapping, attrs)) {
+               int err = iommu_map(mapping->domain, iova, iova_gap_phys,
+                                   PF_PAGES_SIZE, (int)attrs);
+               if (err)
+                       goto fail;
+       }
+
        return dma_addr;
 fail:
-       pg_iommu_unmap(mapping->domain, dma_addr, iova-dma_addr, (int)attrs);
+       iommu_unmap(mapping->domain, dma_addr, iova - dma_addr);
        __free_iova(mapping, dma_addr, size, attrs);
        return DMA_ERROR_CODE;
 }
@@ -1611,7 +1629,7 @@ static int __iommu_remove_mapping(struct device *dev, dma_addr_t iova,
        size = PAGE_ALIGN((iova & ~PAGE_MASK) + size);
        iova &= PAGE_MASK;
 
-       pg_iommu_unmap(mapping->domain, iova, size, (int)attrs);
+       pg_iommu_unmap(mapping, iova, size, (int)attrs);
        __free_iova(mapping, iova, size, attrs);
        return 0;
 }
@@ -1655,7 +1673,7 @@ static void *__iommu_alloc_atomic(struct device *dev, size_t size,
        if (*handle == DMA_ERROR_CODE)
                goto err_mapping;
 
-       dev_dbg(dev, "%s() %08x(%x)\n", __func__, *handle, size);
+       dev_dbg(dev, "%s() %pa(%x)\n", __func__, handle, size);
        return addr;
 
 err_mapping:
@@ -1669,7 +1687,7 @@ static void __iommu_free_atomic(struct device *dev, void *cpu_addr,
 {
        __iommu_remove_mapping(dev, handle, size, attrs);
        __free_from_pool(cpu_addr, size);
-       dev_dbg(dev, "%s() %08x(%x)\n", __func__, handle, size);
+       dev_dbg(dev, "%s() %pa(%x)\n", __func__, &handle, size);
 }
 
 static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
@@ -1688,9 +1706,11 @@ static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
 
        size = PAGE_ALIGN(size);
 
-       if (gfp & GFP_ATOMIC)
-
-               return __iommu_alloc_atomic(dev, size, handle, attrs);
+       if (!(gfp & __GFP_WAIT)) {
+               addr = __iommu_alloc_atomic(dev, size, handle, attrs);
+               trace_dmadebug_alloc_attrs(dev, *handle, size, NULL, addr);
+               return addr;
+       }
 
        pages = __iommu_alloc_buffer(dev, size, gfp, attrs);
        if (!pages)
@@ -1713,6 +1733,7 @@ static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
        if (!addr)
                goto err_mapping;
 
+       trace_dmadebug_alloc_attrs(dev, *handle, size, pages, addr);
        return addr;
 
 err_mapping:
@@ -1729,12 +1750,19 @@ static int arm_iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
        unsigned long uaddr = vma->vm_start;
        unsigned long usize = vma->vm_end - vma->vm_start;
        struct page **pages = __iommu_get_pages(cpu_addr, attrs);
+       unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
+       unsigned long off = vma->vm_pgoff;
 
        vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot);
 
        if (!pages)
                return -ENXIO;
 
+       if (off >= nr_pages || (usize >> PAGE_SHIFT) > nr_pages - off)
+               return -ENXIO;
+
+       pages += off;
+
        do {
                int ret = vm_insert_page(vma, uaddr, *pages++);
                if (ret) {
@@ -1764,6 +1792,7 @@ void arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
        }
 
        if (__in_atomic_pool(cpu_addr, size)) {
+               trace_dmadebug_free_attrs(dev, handle, size, NULL, cpu_addr);
                __iommu_free_atomic(dev, cpu_addr, handle, size, attrs);
                return;
        }
@@ -1773,6 +1802,7 @@ void arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
                vunmap(cpu_addr);
        }
 
+       trace_dmadebug_free_attrs(dev, handle, size, pages, cpu_addr);
        __iommu_remove_mapping(dev, handle, size, attrs);
        __iommu_free_buffer(dev, pages, size, attrs);
 }
@@ -1826,8 +1856,7 @@ static int __map_sg_chunk(struct device *dev, struct scatterlist *sg,
 
 skip_cmaint:
        count = size >> PAGE_SHIFT;
-       ret = pg_iommu_map_sg(mapping->domain, iova_base, sg, count,
-                             (int)attrs);
+       ret = pg_iommu_map_sg(mapping, iova_base, sg, count, (int)attrs);
        if (WARN_ON(ret < 0))
                goto fail;
 
@@ -1835,8 +1864,6 @@ skip_cmaint:
 
        return 0;
 fail:
-       pg_iommu_unmap(mapping->domain, iova_base, count * PAGE_SIZE,
-                      (int)attrs);
        __free_iova(mapping, iova_base, size, attrs);
        return ret;
 }
@@ -2037,12 +2064,12 @@ static dma_addr_t arm_coherent_iommu_map_page(struct device *dev, struct page *p
        if (dma_addr == DMA_ERROR_CODE)
                return dma_addr;
 
-       ret = pg_iommu_map(mapping->domain, dma_addr,
+       ret = pg_iommu_map(mapping, dma_addr,
                           page_to_phys(page), len, (int)attrs);
        if (ret < 0)
                goto fail;
 
-       trace_dmadebug_map_page(dev, dma_addr, len, page);
+       trace_dmadebug_map_page(dev, dma_addr + offset, size, page);
        return dma_addr + offset;
 fail:
        __free_iova(mapping, dma_addr, len, attrs);
@@ -2079,7 +2106,7 @@ static dma_addr_t arm_iommu_map_page_at(struct device *dev, struct page *page,
        if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
                __dma_page_cpu_to_dev(page, offset, size, dir);
 
-       ret = pg_iommu_map(mapping->domain, dma_addr,
+       ret = pg_iommu_map(mapping, dma_addr,
                           page_to_phys(page), len, (int)attrs);
        if (ret < 0)
                return DMA_ERROR_CODE;
@@ -2088,31 +2115,6 @@ static dma_addr_t arm_iommu_map_page_at(struct device *dev, struct page *page,
        return dma_addr + offset;
 }
 
-static dma_addr_t arm_iommu_map_pages(struct device *dev, struct page **pages,
-                                 dma_addr_t dma_handle, size_t count,
-                                 enum dma_data_direction dir,
-                                 struct dma_attrs *attrs)
-{
-       struct dma_iommu_mapping *mapping = dev->archdata.mapping;
-       int ret;
-
-       if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) {
-               int i;
-
-               for (i = 0; i < count; i++)
-                       __dma_page_cpu_to_dev(pages[i], 0, PAGE_SIZE, dir);
-       }
-
-       ret = pg_iommu_map_pages(mapping->domain, dma_handle, pages, count,
-                                (int)attrs);
-       if (ret < 0)
-               return DMA_ERROR_CODE;
-
-       trace_dmadebug_map_page(dev, dma_handle, count * PAGE_SIZE, *pages);
-       return dma_handle;
-}
-
-
 /**
  * arm_coherent_iommu_unmap_page
  * @dev: valid struct device pointer
@@ -2134,12 +2136,11 @@ static void arm_coherent_iommu_unmap_page(struct device *dev, dma_addr_t handle,
        if (!iova)
                return;
 
-       pg_iommu_unmap(mapping->domain, iova, len, (int)attrs);
-       if (!dma_get_attr(DMA_ATTR_SKIP_FREE_IOVA, attrs))
-               __free_iova(mapping, iova, len, attrs);
-
        trace_dmadebug_unmap_page(dev, handle, size,
                  phys_to_page(iommu_iova_to_phys(mapping->domain, handle)));
+       pg_iommu_unmap(mapping, iova, len, (int)attrs);
+       if (!dma_get_attr(DMA_ATTR_SKIP_FREE_IOVA, attrs))
+               __free_iova(mapping, iova, len, attrs);
 }
 
 /**
@@ -2167,7 +2168,9 @@ static void arm_iommu_unmap_page(struct device *dev, dma_addr_t handle,
        if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
                __dma_page_dev_to_cpu(page, offset, size, dir);
 
-       pg_iommu_unmap(mapping->domain, iova, len, (int)attrs);
+       trace_dmadebug_unmap_page(dev, handle, size,
+                 phys_to_page(iommu_iova_to_phys(mapping->domain, handle)));
+       pg_iommu_unmap(mapping, iova, len, (int)attrs);
        if (!dma_get_attr(DMA_ATTR_SKIP_FREE_IOVA, attrs))
                __free_iova(mapping, iova, len, attrs);
 }
@@ -2206,6 +2209,13 @@ static void arm_iommu_sync_single_for_device(struct device *dev,
        __dma_page_cpu_to_dev(page, offset, size, dir);
 }
 
+static phys_addr_t arm_iommu_iova_to_phys(struct device *dev, dma_addr_t iova)
+{
+       struct dma_iommu_mapping *mapping = dev->archdata.mapping;
+
+       return iommu_iova_to_phys(mapping->domain, iova);
+}
+
 struct dma_map_ops iommu_ops = {
        .alloc          = arm_iommu_alloc_attrs,
        .free           = arm_iommu_free_attrs,
@@ -2213,7 +2223,6 @@ struct dma_map_ops iommu_ops = {
        .get_sgtable    = arm_iommu_get_sgtable,
 
        .map_page               = arm_iommu_map_page,
-       .map_pages              = arm_iommu_map_pages,
        .map_page_at            = arm_iommu_map_page_at,
        .unmap_page             = arm_iommu_unmap_page,
        .sync_single_for_cpu    = arm_iommu_sync_single_for_cpu,
@@ -2231,6 +2240,8 @@ struct dma_map_ops iommu_ops = {
        .iova_free              = arm_iommu_iova_free,
        .iova_get_free_total    = arm_iommu_iova_get_free_total,
        .iova_get_free_max      = arm_iommu_iova_get_free_max,
+
+       .iova_to_phys           = arm_iommu_iova_to_phys,
 };
 
 struct dma_map_ops iommu_coherent_ops = {
@@ -2248,6 +2259,131 @@ struct dma_map_ops iommu_coherent_ops = {
        .set_dma_mask   = arm_dma_set_mask,
 };
 
+bool device_is_iommuable(struct device *dev)
+{
+       return (dev->archdata.dma_ops == &iommu_ops) ||
+               (dev->archdata.dma_ops == &iommu_coherent_ops);
+}
+
+static inline void __dummy_common(void)
+{ WARN(1, "DMA API should be called after ->probe() is done.\n"); }
+
+static void *__dummy_alloc_attrs(struct device *dev, size_t size,
+                                dma_addr_t *dma_handle, gfp_t gfp,
+                                struct dma_attrs *attrs)
+{ __dummy_common(); return NULL; }
+
+static void __dummy_free_attrs(struct device *dev, size_t size,
+                              void *vaddr, dma_addr_t dma_handle,
+                              struct dma_attrs *attrs)
+{ __dummy_common(); }
+
+static int __dummy_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
+                             void *cpu_addr, dma_addr_t dma_addr, size_t size,
+                             struct dma_attrs *attrs)
+{ __dummy_common(); return -ENXIO; }
+
+static int __dummy_get_sgtable(struct device *dev, struct sg_table *sgt,
+                              void *cpu_addr, dma_addr_t dma_addr,
+                              size_t size, struct dma_attrs *attrs)
+{ __dummy_common(); return -ENXIO; }
+
+static dma_addr_t __dummy_map_page(struct device *dev, struct page *page,
+                                  unsigned long offset, size_t size,
+                                  enum dma_data_direction dir,
+                                  struct dma_attrs *attrs)
+{ __dummy_common(); return DMA_ERROR_CODE; }
+
+static dma_addr_t __dummy_map_page_at(struct device *dev, struct page *page,
+                                     dma_addr_t dma_handle,
+                                     unsigned long offset, size_t size,
+                                     enum dma_data_direction dir,
+                                     struct dma_attrs *attrs)
+{ __dummy_common(); return DMA_ERROR_CODE; }
+
+static void __dummy_unmap_page(struct device *dev, dma_addr_t dma_handle,
+                              size_t size, enum dma_data_direction dir,
+                              struct dma_attrs *attrs)
+{ __dummy_common(); }
+
+static int __dummy_map_sg(struct device *dev, struct scatterlist *sg,
+                         int nents, enum dma_data_direction dir,
+                         struct dma_attrs *attrs)
+{ __dummy_common(); return 0; }
+
+static void __dummy_unmap_sg(struct device *dev,
+                            struct scatterlist *sg, int nents,
+                            enum dma_data_direction dir,
+                            struct dma_attrs *attrs)
+{ __dummy_common(); }
+
+static void __dummy_sync_single_for_cpu(struct device *dev,
+                                       dma_addr_t dma_handle, size_t size,
+                                       enum dma_data_direction dir)
+{ __dummy_common(); }
+
+static void __dummy_sync_single_for_device(struct device *dev,
+                                          dma_addr_t dma_handle, size_t size,
+                                          enum dma_data_direction dir)
+{ __dummy_common(); }
+
+static void __dummy_sync_sg_for_cpu(struct device *dev,
+                                   struct scatterlist *sg, int nents,
+                                   enum dma_data_direction dir)
+{ __dummy_common(); }
+
+static void __dummy_sync_sg_for_device(struct device *dev,
+                                      struct scatterlist *sg, int nents,
+                                      enum dma_data_direction dir)
+{ __dummy_common(); }
+
+static dma_addr_t __dummy_iova_alloc(struct device *dev, size_t size,
+                                    struct dma_attrs *attrs)
+{ __dummy_common(); return DMA_ERROR_CODE; }
+
+static dma_addr_t __dummy_iova_alloc_at(struct device *dev, dma_addr_t *dma_addr,
+                                       size_t size, struct dma_attrs *attrs)
+{ __dummy_common(); return DMA_ERROR_CODE; }
+
+static void __dummy_iova_free(struct device *dev, dma_addr_t addr, size_t size,
+                             struct dma_attrs *attrs)
+{ __dummy_common(); }
+
+static size_t __dummy_iova_get_free_total(struct device *dev)
+{ __dummy_common(); return 0; }
+
+static size_t __dummy_iova_get_free_max(struct device *dev)
+{ __dummy_common(); return 0; }
+
+static struct dma_map_ops __dummy_ops = {
+       .alloc          = __dummy_alloc_attrs,
+       .free           = __dummy_free_attrs,
+       .mmap           = __dummy_mmap_attrs,
+       .get_sgtable    = __dummy_get_sgtable,
+
+       .map_page               = __dummy_map_page,
+       .map_page_at            = __dummy_map_page_at,
+       .unmap_page             = __dummy_unmap_page,
+       .sync_single_for_cpu    = __dummy_sync_single_for_cpu,
+       .sync_single_for_device = __dummy_sync_single_for_device,
+
+       .map_sg                 = __dummy_map_sg,
+       .unmap_sg               = __dummy_unmap_sg,
+       .sync_sg_for_cpu        = __dummy_sync_sg_for_cpu,
+       .sync_sg_for_device     = __dummy_sync_sg_for_device,
+
+       .iova_alloc             = __dummy_iova_alloc,
+       .iova_alloc_at          = __dummy_iova_alloc_at,
+       .iova_free              = __dummy_iova_free,
+       .iova_get_free_total    = __dummy_iova_get_free_total,
+       .iova_get_free_max      = __dummy_iova_get_free_max,
+};
+
+void set_dummy_dma_ops(struct device *dev)
+{
+       set_dma_ops(dev, &__dummy_ops);
+}
+
 /**
  * arm_iommu_create_mapping
  * @bus: pointer to the bus holding the client device (for IOMMU calls)