2 * Copyright 2004-2009 Analog Devices Inc.
4 * Licensed under the GPL-2 or later.
7 #ifndef _BLACKFIN_DMA_MAPPING_H
8 #define _BLACKFIN_DMA_MAPPING_H
10 #include <asm/cacheflush.h>
13 void *dma_alloc_coherent(struct device *dev, size_t size,
14 dma_addr_t *dma_handle, gfp_t gfp);
15 void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
16 dma_addr_t dma_handle);
19 * Now for the API extensions over the pci_ one
21 #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
22 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
23 #define dma_supported(d, m) (1)
24 #define dma_get_cache_alignment() (32)
25 #define dma_is_consistent(d, h) (1)
28 dma_set_mask(struct device *dev, u64 dma_mask)
30 if (!dev->dma_mask || !dma_supported(dev, dma_mask))
33 *dev->dma_mask = dma_mask;
39 dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
45 __dma_sync(dma_addr_t addr, size_t size, enum dma_data_direction dir);
47 _dma_sync(dma_addr_t addr, size_t size, enum dma_data_direction dir)
49 if (!__builtin_constant_p(dir)) {
50 __dma_sync(addr, size, dir);
57 case DMA_TO_DEVICE: /* writeback only */
58 flush_dcache_range(addr, addr + size);
60 case DMA_FROM_DEVICE: /* invalidate only */
61 case DMA_BIDIRECTIONAL: /* flush and invalidate */
62 /* Blackfin has no dedicated invalidate (it includes a flush) */
63 invalidate_dcache_range(addr, addr + size);
68 static inline dma_addr_t
69 dma_map_single(struct device *dev, void *ptr, size_t size,
70 enum dma_data_direction dir)
72 _dma_sync((dma_addr_t)ptr, size, dir);
73 return (dma_addr_t) ptr;
76 static inline dma_addr_t
77 dma_map_page(struct device *dev, struct page *page,
78 unsigned long offset, size_t size,
79 enum dma_data_direction dir)
81 return dma_map_single(dev, page_address(page) + offset, size, dir);
85 dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
86 enum dma_data_direction dir)
88 BUG_ON(!valid_dma_direction(dir));
92 dma_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
93 enum dma_data_direction dir)
95 dma_unmap_single(dev, dma_addr, size, dir);
98 extern int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
99 enum dma_data_direction dir);
102 dma_unmap_sg(struct device *dev, struct scatterlist *sg,
103 int nhwentries, enum dma_data_direction dir)
105 BUG_ON(!valid_dma_direction(dir));
109 dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t handle,
110 unsigned long offset, size_t size,
111 enum dma_data_direction dir)
113 BUG_ON(!valid_dma_direction(dir));
117 dma_sync_single_range_for_device(struct device *dev, dma_addr_t handle,
118 unsigned long offset, size_t size,
119 enum dma_data_direction dir)
121 _dma_sync(handle + offset, size, dir);
125 dma_sync_single_for_cpu(struct device *dev, dma_addr_t handle, size_t size,
126 enum dma_data_direction dir)
128 dma_sync_single_range_for_cpu(dev, handle, 0, size, dir);
132 dma_sync_single_for_device(struct device *dev, dma_addr_t handle, size_t size,
133 enum dma_data_direction dir)
135 dma_sync_single_range_for_device(dev, handle, 0, size, dir);
139 dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nents,
140 enum dma_data_direction dir)
142 BUG_ON(!valid_dma_direction(dir));
146 dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
147 int nents, enum dma_data_direction dir);
150 dma_cache_sync(struct device *dev, void *vaddr, size_t size,
151 enum dma_data_direction dir)
153 _dma_sync((dma_addr_t)vaddr, size, dir);
156 #endif /* _BLACKFIN_DMA_MAPPING_H */