dma_mmu_remap_num++;
}
+__init void iotable_init_va(struct map_desc *io_desc, int nr);
+__init void iotable_init_mapping(struct map_desc *io_desc, int nr);
+
void __init dma_contiguous_remap(void)
{
int i;
if (start >= end)
continue;
- map.pfn = __phys_to_pfn(start);
- map.virtual = __phys_to_virt(start);
- map.length = end - start;
- map.type = MT_MEMORY_DMA_READY;
+ map.type = MT_MEMORY;
/*
- * Clear previous low-memory mapping
+ * Clear previous low-memory mapping to ensure that the
+ * TLB does not see any conflicting entries, then flush
+ * the TLB of the old entries before creating new mappings.
+ *
+ * This ensures that any speculatively loaded TLB entries
+ * (even though they may be rare) can not cause any problems,
+ * and ensures that this code is architecturally compliant.
*/
for (addr = __phys_to_virt(start); addr < __phys_to_virt(end);
addr += PMD_SIZE)
pmd_clear(pmd_off_k(addr));
- iotable_init(&map, 1);
- }
-}
-
-static int __dma_update_pte(pte_t *pte, pgtable_t token, unsigned long addr,
- void *data)
-{
- struct page *page = virt_to_page(addr);
- pgprot_t prot = *(pgprot_t *)data;
-
- set_pte_ext(pte, mk_pte(page, prot), 0);
- return 0;
-}
-
-static void __dma_remap(struct page *page, size_t size, pgprot_t prot)
-{
- unsigned long start = (unsigned long) page_address(page);
- unsigned end = start + size;
+ for (addr = start; addr < end; addr += PAGE_SIZE) {
+ map.pfn = __phys_to_pfn(addr);
+ map.virtual = __phys_to_virt(addr);
+ map.length = PAGE_SIZE;
+ iotable_init_mapping(&map, 1);
+ }
- apply_to_page_range(&init_mm, start, size, __dma_update_pte, &prot);
- dsb();
- flush_tlb_kernel_range(start, end);
+ map.pfn = __phys_to_pfn(start);
+ map.virtual = __phys_to_virt(start);
+ map.length = end - start;
+ iotable_init_va(&map, 1);
+ }
}
static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp,
if (!page)
return NULL;
- __dma_clear_buffer(page, size);
-
if (PageHighMem(page)) {
ptr = __dma_alloc_remap(page, size, GFP_KERNEL, prot, caller);
if (!ptr) {
return NULL;
}
} else {
- __dma_remap(page, size, prot);
ptr = page_address(page);
}
*ret_page = page;
{
if (PageHighMem(page))
__dma_free_remap(cpu_addr, size);
- else
- __dma_remap(page, size, pgprot_kernel);
dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT);
}
#ifdef CONFIG_DMA_API_DEBUG
u64 limit = (mask + 1) & ~mask;
if (limit && size >= limit) {
- dev_warn(dev, "coherent allocation too big (requested %#x mask %#llx)\n",
+ dev_warn(dev, "coherent allocation too big (requested %#zx mask %#llx)\n",
size, mask);
return NULL;
}
end = find_next_zero_bit(bitmap, bits, pos);
start_addr = bit_to_addr(pos, base, order);
- end_addr = bit_to_addr(end, base, order);
- seq_printf(s, " %pa-%pa pages=%d\n",
+ end_addr = bit_to_addr(end, base, order) - 1;
+ seq_printf(s, " %pa-%pa pages=%zu\n",
&start_addr, &end_addr, (end - pos) << order);
}
}
spin_unlock_irqrestore(&iommu_mapping_list_lock, flags);
}
-static int pg_iommu_map(struct iommu_domain *domain, unsigned long iova,
+static inline int iommu_get_num_pf_pages(struct dma_iommu_mapping *mapping,
+ struct dma_attrs *attrs)
+{
+ /* XXX: give priority to DMA_ATTR_SKIP_IOVA_GAP */
+ if (dma_get_attr(DMA_ATTR_SKIP_IOVA_GAP, attrs))
+ return 0;
+
+ /* XXX: currently we support only 1 prefetch page */
+ WARN_ON(mapping->num_pf_page > prefetch_page_count);
+
+ return mapping->num_pf_page;
+}
+
+static inline int iommu_gap_pg_count(struct dma_iommu_mapping *mapping,
+ struct dma_attrs *attrs)
+{
+ if (dma_get_attr(DMA_ATTR_SKIP_IOVA_GAP, attrs))
+ return 0;
+
+ return mapping->gap_page ? gap_page_count : 0;
+}
+
+static int pg_iommu_map(struct dma_iommu_mapping *mapping, unsigned long iova,
phys_addr_t phys, size_t len, int prot)
{
int err;
+ struct iommu_domain *domain = mapping->domain;
struct dma_attrs *attrs = (struct dma_attrs *)prot;
+ bool need_prefetch_page = !!iommu_get_num_pf_pages(mapping, attrs);
- if (!dma_get_attr(DMA_ATTR_SKIP_IOVA_GAP, attrs)) {
+ if (need_prefetch_page) {
err = iommu_map(domain, iova + len, iova_gap_phys,
PF_PAGES_SIZE, prot);
if (err)
}
err = iommu_map(domain, iova, phys, len, prot);
- if (err && !dma_get_attr(DMA_ATTR_SKIP_IOVA_GAP, attrs))
+ if (err && need_prefetch_page)
iommu_unmap(domain, iova + len, PF_PAGES_SIZE);
return err;
}
-static size_t pg_iommu_unmap(struct iommu_domain *domain,
+static size_t pg_iommu_unmap(struct dma_iommu_mapping *mapping,
unsigned long iova, size_t len, int prot)
{
struct dma_attrs *attrs = (struct dma_attrs *)prot;
+ struct iommu_domain *domain = mapping->domain;
+ bool need_prefetch_page = !!iommu_get_num_pf_pages(mapping, attrs);
- if (!dma_get_attr(DMA_ATTR_SKIP_IOVA_GAP, attrs)) {
+ if (need_prefetch_page) {
phys_addr_t phys_addr;
phys_addr = iommu_iova_to_phys(domain, iova + len);
return iommu_unmap(domain, iova, len);
}
-static int pg_iommu_map_pages(struct iommu_domain *domain, unsigned long iova,
- struct page **pages, size_t count, int prot)
+static int pg_iommu_map_sg(struct dma_iommu_mapping *mapping,
+ unsigned long iova, struct scatterlist *sgl,
+ int nents, int prot)
{
int err;
struct dma_attrs *attrs = (struct dma_attrs *)prot;
+ struct iommu_domain *domain = mapping->domain;
+ bool need_prefetch_page = !!iommu_get_num_pf_pages(mapping, attrs);
- if (!dma_get_attr(DMA_ATTR_SKIP_IOVA_GAP, attrs)) {
- err = iommu_map(domain, iova + (count << PAGE_SHIFT),
- iova_gap_phys, PF_PAGES_SIZE, prot);
- if (err)
- return err;
- }
-
- err = iommu_map_pages(domain, iova, pages, count, prot);
- if (err && !dma_get_attr(DMA_ATTR_SKIP_IOVA_GAP, attrs))
- iommu_unmap(domain, iova + (count << PAGE_SHIFT), PF_PAGES_SIZE);
-
- return err;
-}
-
-static int pg_iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
- struct scatterlist *sgl, int nents, int prot)
-{
- int err;
- struct dma_attrs *attrs = (struct dma_attrs *)prot;
-
- if (!dma_get_attr(DMA_ATTR_SKIP_IOVA_GAP, attrs)) {
+ if (need_prefetch_page) {
err = iommu_map(domain, iova + (nents << PAGE_SHIFT),
iova_gap_phys, PF_PAGES_SIZE, prot);
if (err)
}
err = iommu_map_sg(domain, iova, sgl, nents, prot);
- if (err && !dma_get_attr(DMA_ATTR_SKIP_IOVA_GAP, attrs))
+ if (err && need_prefetch_page)
iommu_unmap(domain, iova + (nents << PAGE_SHIFT), PF_PAGES_SIZE);
return err;
if (order > CONFIG_ARM_DMA_IOMMU_ALIGNMENT)
order = CONFIG_ARM_DMA_IOMMU_ALIGNMENT;
+ if (mapping->alignment && order > get_order(mapping->alignment))
+ order = get_order(mapping->alignment);
+
count = ((PAGE_ALIGN(size) >> PAGE_SHIFT) +
(1 << mapping->order) - 1) >> mapping->order;
- if (!dma_get_attr(DMA_ATTR_SKIP_IOVA_GAP, attrs))
- count += PG_PAGES;
+ count += iommu_get_num_pf_pages(mapping, attrs);
+ count += iommu_gap_pg_count(mapping, attrs);
if (order > mapping->order)
align = (1 << (order - mapping->order)) - 1;
count = ((PAGE_ALIGN(size) >> PAGE_SHIFT) +
(1 << mapping->order) - 1) >> mapping->order;
- if (!dma_get_attr(DMA_ATTR_SKIP_IOVA_GAP, attrs))
- count += PG_PAGES;
+ count += iommu_get_num_pf_pages(mapping, attrs);
+ count += iommu_gap_pg_count(mapping, attrs);
bytes = count << (mapping->order + PAGE_SHIFT);
(1 << mapping->order) - 1) >> mapping->order;
unsigned long flags;
- if (!dma_get_attr(DMA_ATTR_SKIP_IOVA_GAP, attrs))
- count += PG_PAGES;
+ count += iommu_get_num_pf_pages(mapping, attrs);
+ count += iommu_gap_pg_count(mapping, attrs);
spin_lock_irqsave(&mapping->lock, flags);
bitmap_clear(mapping->bitmap, start, count);
break;
len = (j - i) << PAGE_SHIFT;
- ret = pg_iommu_map(mapping->domain, iova, phys, len,
- (int)attrs);
+ ret = iommu_map(mapping->domain, iova, phys, len, (int)attrs);
if (ret < 0)
goto fail;
iova += len;
i = j;
}
+
+ if (iommu_get_num_pf_pages(mapping, attrs)) {
+ int err = iommu_map(mapping->domain, iova, iova_gap_phys,
+ PF_PAGES_SIZE, (int)attrs);
+ if (err)
+ goto fail;
+ }
+
return dma_addr;
fail:
- pg_iommu_unmap(mapping->domain, dma_addr, iova-dma_addr, (int)attrs);
+ iommu_unmap(mapping->domain, dma_addr, iova - dma_addr);
__free_iova(mapping, dma_addr, size, attrs);
return DMA_ERROR_CODE;
}
size = PAGE_ALIGN((iova & ~PAGE_MASK) + size);
iova &= PAGE_MASK;
- pg_iommu_unmap(mapping->domain, iova, size, (int)attrs);
+ pg_iommu_unmap(mapping, iova, size, (int)attrs);
__free_iova(mapping, iova, size, attrs);
return 0;
}
size = PAGE_ALIGN(size);
- if (!(gfp & __GFP_WAIT))
- return __iommu_alloc_atomic(dev, size, handle, attrs);
+ if (!(gfp & __GFP_WAIT)) {
+ addr = __iommu_alloc_atomic(dev, size, handle, attrs);
+ trace_dmadebug_alloc_attrs(dev, *handle, size, NULL, addr);
+ return addr;
+ }
pages = __iommu_alloc_buffer(dev, size, gfp, attrs);
if (!pages)
if (!addr)
goto err_mapping;
+ trace_dmadebug_alloc_attrs(dev, *handle, size, pages, addr);
return addr;
err_mapping:
unsigned long uaddr = vma->vm_start;
unsigned long usize = vma->vm_end - vma->vm_start;
struct page **pages = __iommu_get_pages(cpu_addr, attrs);
+ unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
+ unsigned long off = vma->vm_pgoff;
vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot);
if (!pages)
return -ENXIO;
+ if (off >= nr_pages || (usize >> PAGE_SHIFT) > nr_pages - off)
+ return -ENXIO;
+
+ pages += off;
+
do {
int ret = vm_insert_page(vma, uaddr, *pages++);
if (ret) {
}
if (__in_atomic_pool(cpu_addr, size)) {
+ trace_dmadebug_free_attrs(dev, handle, size, NULL, cpu_addr);
__iommu_free_atomic(dev, cpu_addr, handle, size, attrs);
return;
}
vunmap(cpu_addr);
}
+ trace_dmadebug_free_attrs(dev, handle, size, pages, cpu_addr);
__iommu_remove_mapping(dev, handle, size, attrs);
__iommu_free_buffer(dev, pages, size, attrs);
}
skip_cmaint:
count = size >> PAGE_SHIFT;
- ret = pg_iommu_map_sg(mapping->domain, iova_base, sg, count,
- (int)attrs);
+ ret = pg_iommu_map_sg(mapping, iova_base, sg, count, (int)attrs);
if (WARN_ON(ret < 0))
goto fail;
return 0;
fail:
- pg_iommu_unmap(mapping->domain, iova_base, count * PAGE_SIZE,
- (int)attrs);
__free_iova(mapping, iova_base, size, attrs);
return ret;
}
if (dma_addr == DMA_ERROR_CODE)
return dma_addr;
- ret = pg_iommu_map(mapping->domain, dma_addr,
+ ret = pg_iommu_map(mapping, dma_addr,
page_to_phys(page), len, (int)attrs);
if (ret < 0)
goto fail;
- trace_dmadebug_map_page(dev, dma_addr, len, page);
+ trace_dmadebug_map_page(dev, dma_addr + offset, size, page);
return dma_addr + offset;
fail:
__free_iova(mapping, dma_addr, len, attrs);
if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
__dma_page_cpu_to_dev(page, offset, size, dir);
- ret = pg_iommu_map(mapping->domain, dma_addr,
+ ret = pg_iommu_map(mapping, dma_addr,
page_to_phys(page), len, (int)attrs);
if (ret < 0)
return DMA_ERROR_CODE;
return dma_addr + offset;
}
-static dma_addr_t arm_iommu_map_pages(struct device *dev, struct page **pages,
- dma_addr_t dma_handle, size_t count,
- enum dma_data_direction dir,
- struct dma_attrs *attrs)
-{
- struct dma_iommu_mapping *mapping = dev->archdata.mapping;
- int ret;
-
- if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) {
- int i;
-
- for (i = 0; i < count; i++)
- __dma_page_cpu_to_dev(pages[i], 0, PAGE_SIZE, dir);
- }
-
- ret = pg_iommu_map_pages(mapping->domain, dma_handle, pages, count,
- (int)attrs);
- if (ret < 0)
- return DMA_ERROR_CODE;
-
- trace_dmadebug_map_page(dev, dma_handle, count * PAGE_SIZE, *pages);
- return dma_handle;
-}
-
-
/**
* arm_coherent_iommu_unmap_page
* @dev: valid struct device pointer
if (!iova)
return;
- pg_iommu_unmap(mapping->domain, iova, len, (int)attrs);
- if (!dma_get_attr(DMA_ATTR_SKIP_FREE_IOVA, attrs))
- __free_iova(mapping, iova, len, attrs);
-
trace_dmadebug_unmap_page(dev, handle, size,
phys_to_page(iommu_iova_to_phys(mapping->domain, handle)));
+ pg_iommu_unmap(mapping, iova, len, (int)attrs);
+ if (!dma_get_attr(DMA_ATTR_SKIP_FREE_IOVA, attrs))
+ __free_iova(mapping, iova, len, attrs);
}
/**
if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
__dma_page_dev_to_cpu(page, offset, size, dir);
- pg_iommu_unmap(mapping->domain, iova, len, (int)attrs);
+ trace_dmadebug_unmap_page(dev, handle, size,
+ phys_to_page(iommu_iova_to_phys(mapping->domain, handle)));
+ pg_iommu_unmap(mapping, iova, len, (int)attrs);
if (!dma_get_attr(DMA_ATTR_SKIP_FREE_IOVA, attrs))
__free_iova(mapping, iova, len, attrs);
}
__dma_page_cpu_to_dev(page, offset, size, dir);
}
+static phys_addr_t arm_iommu_iova_to_phys(struct device *dev, dma_addr_t iova)
+{
+ struct dma_iommu_mapping *mapping = dev->archdata.mapping;
+
+ return iommu_iova_to_phys(mapping->domain, iova);
+}
+
struct dma_map_ops iommu_ops = {
.alloc = arm_iommu_alloc_attrs,
.free = arm_iommu_free_attrs,
.get_sgtable = arm_iommu_get_sgtable,
.map_page = arm_iommu_map_page,
- .map_pages = arm_iommu_map_pages,
.map_page_at = arm_iommu_map_page_at,
.unmap_page = arm_iommu_unmap_page,
.sync_single_for_cpu = arm_iommu_sync_single_for_cpu,
.iova_free = arm_iommu_iova_free,
.iova_get_free_total = arm_iommu_iova_get_free_total,
.iova_get_free_max = arm_iommu_iova_get_free_max,
+
+ .iova_to_phys = arm_iommu_iova_to_phys,
};
struct dma_map_ops iommu_coherent_ops = {
.set_dma_mask = arm_dma_set_mask,
};
+bool device_is_iommuable(struct device *dev)
+{
+ return (dev->archdata.dma_ops == &iommu_ops) ||
+ (dev->archdata.dma_ops == &iommu_coherent_ops);
+}
+
+static inline void __dummy_common(void)
+{ WARN(1, "DMA API should be called after ->probe() is done.\n"); }
+
+static void *__dummy_alloc_attrs(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t gfp,
+ struct dma_attrs *attrs)
+{ __dummy_common(); return NULL; }
+
+static void __dummy_free_attrs(struct device *dev, size_t size,
+ void *vaddr, dma_addr_t dma_handle,
+ struct dma_attrs *attrs)
+{ __dummy_common(); }
+
+static int __dummy_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
+ void *cpu_addr, dma_addr_t dma_addr, size_t size,
+ struct dma_attrs *attrs)
+{ __dummy_common(); return -ENXIO; }
+
+static int __dummy_get_sgtable(struct device *dev, struct sg_table *sgt,
+ void *cpu_addr, dma_addr_t dma_addr,
+ size_t size, struct dma_attrs *attrs)
+{ __dummy_common(); return -ENXIO; }
+
+static dma_addr_t __dummy_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t size,
+ enum dma_data_direction dir,
+ struct dma_attrs *attrs)
+{ __dummy_common(); return DMA_ERROR_CODE; }
+
+static dma_addr_t __dummy_map_page_at(struct device *dev, struct page *page,
+ dma_addr_t dma_handle,
+ unsigned long offset, size_t size,
+ enum dma_data_direction dir,
+ struct dma_attrs *attrs)
+{ __dummy_common(); return DMA_ERROR_CODE; }
+
+static void __dummy_unmap_page(struct device *dev, dma_addr_t dma_handle,
+ size_t size, enum dma_data_direction dir,
+ struct dma_attrs *attrs)
+{ __dummy_common(); }
+
+static int __dummy_map_sg(struct device *dev, struct scatterlist *sg,
+ int nents, enum dma_data_direction dir,
+ struct dma_attrs *attrs)
+{ __dummy_common(); return 0; }
+
+static void __dummy_unmap_sg(struct device *dev,
+ struct scatterlist *sg, int nents,
+ enum dma_data_direction dir,
+ struct dma_attrs *attrs)
+{ __dummy_common(); }
+
+static void __dummy_sync_single_for_cpu(struct device *dev,
+ dma_addr_t dma_handle, size_t size,
+ enum dma_data_direction dir)
+{ __dummy_common(); }
+
+static void __dummy_sync_single_for_device(struct device *dev,
+ dma_addr_t dma_handle, size_t size,
+ enum dma_data_direction dir)
+{ __dummy_common(); }
+
+static void __dummy_sync_sg_for_cpu(struct device *dev,
+ struct scatterlist *sg, int nents,
+ enum dma_data_direction dir)
+{ __dummy_common(); }
+
+static void __dummy_sync_sg_for_device(struct device *dev,
+ struct scatterlist *sg, int nents,
+ enum dma_data_direction dir)
+{ __dummy_common(); }
+
+static dma_addr_t __dummy_iova_alloc(struct device *dev, size_t size,
+ struct dma_attrs *attrs)
+{ __dummy_common(); return DMA_ERROR_CODE; }
+
+static dma_addr_t __dummy_iova_alloc_at(struct device *dev, dma_addr_t *dma_addr,
+ size_t size, struct dma_attrs *attrs)
+{ __dummy_common(); return DMA_ERROR_CODE; }
+
+static void __dummy_iova_free(struct device *dev, dma_addr_t addr, size_t size,
+ struct dma_attrs *attrs)
+{ __dummy_common(); }
+
+static size_t __dummy_iova_get_free_total(struct device *dev)
+{ __dummy_common(); return 0; }
+
+static size_t __dummy_iova_get_free_max(struct device *dev)
+{ __dummy_common(); return 0; }
+
+static struct dma_map_ops __dummy_ops = {
+ .alloc = __dummy_alloc_attrs,
+ .free = __dummy_free_attrs,
+ .mmap = __dummy_mmap_attrs,
+ .get_sgtable = __dummy_get_sgtable,
+
+ .map_page = __dummy_map_page,
+ .map_page_at = __dummy_map_page_at,
+ .unmap_page = __dummy_unmap_page,
+ .sync_single_for_cpu = __dummy_sync_single_for_cpu,
+ .sync_single_for_device = __dummy_sync_single_for_device,
+
+ .map_sg = __dummy_map_sg,
+ .unmap_sg = __dummy_unmap_sg,
+ .sync_sg_for_cpu = __dummy_sync_sg_for_cpu,
+ .sync_sg_for_device = __dummy_sync_sg_for_device,
+
+ .iova_alloc = __dummy_iova_alloc,
+ .iova_alloc_at = __dummy_iova_alloc_at,
+ .iova_free = __dummy_iova_free,
+ .iova_get_free_total = __dummy_iova_get_free_total,
+ .iova_get_free_max = __dummy_iova_get_free_max,
+};
+
+void set_dummy_dma_ops(struct device *dev)
+{
+ set_dma_ops(dev, &__dummy_ops);
+}
+
/**
* arm_iommu_create_mapping
* @bus: pointer to the bus holding the client device (for IOMMU calls)