1 #ifndef _ASM_X86_DMA_MAPPING_H
2 #define _ASM_X86_DMA_MAPPING_H
5 * IOMMU interface. See Documentation/DMA-mapping.txt and DMA-API.txt for
9 #include <linux/scatterlist.h>
10 #include <linux/dma-attrs.h>
12 #include <asm/swiotlb.h>
13 #include <asm-generic/dma-coherent.h>
15 extern dma_addr_t bad_dma_address;
16 extern int iommu_merge;
17 extern struct device x86_dma_fallback_dev;
18 extern int panic_on_overflow;
20 struct dma_mapping_ops {
21 int (*mapping_error)(struct device *dev,
23 void* (*alloc_coherent)(struct device *dev, size_t size,
24 dma_addr_t *dma_handle, gfp_t gfp);
25 void (*free_coherent)(struct device *dev, size_t size,
26 void *vaddr, dma_addr_t dma_handle);
27 void (*sync_single_for_cpu)(struct device *hwdev,
28 dma_addr_t dma_handle, size_t size,
30 void (*sync_single_for_device)(struct device *hwdev,
31 dma_addr_t dma_handle, size_t size,
33 void (*sync_single_range_for_cpu)(struct device *hwdev,
34 dma_addr_t dma_handle, unsigned long offset,
35 size_t size, int direction);
36 void (*sync_single_range_for_device)(struct device *hwdev,
37 dma_addr_t dma_handle, unsigned long offset,
38 size_t size, int direction);
39 void (*sync_sg_for_cpu)(struct device *hwdev,
40 struct scatterlist *sg, int nelems,
42 void (*sync_sg_for_device)(struct device *hwdev,
43 struct scatterlist *sg, int nelems,
45 int (*map_sg)(struct device *hwdev, struct scatterlist *sg,
46 int nents, int direction);
47 void (*unmap_sg)(struct device *hwdev,
48 struct scatterlist *sg, int nents,
50 dma_addr_t (*map_page)(struct device *dev, struct page *page,
51 unsigned long offset, size_t size,
52 enum dma_data_direction dir,
53 struct dma_attrs *attrs);
54 void (*unmap_page)(struct device *dev, dma_addr_t dma_handle,
55 size_t size, enum dma_data_direction dir,
56 struct dma_attrs *attrs);
57 int (*dma_supported)(struct device *hwdev, u64 mask);
61 extern struct dma_mapping_ops *dma_ops;
63 static inline struct dma_mapping_ops *get_dma_ops(struct device *dev)
68 if (unlikely(!dev) || !dev->archdata.dma_ops)
71 return dev->archdata.dma_ops;
75 /* Make sure we keep the same behaviour */
76 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
78 struct dma_mapping_ops *ops = get_dma_ops(dev);
79 if (ops->mapping_error)
80 return ops->mapping_error(dev, dma_addr);
82 return (dma_addr == bad_dma_address);
85 #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
86 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
87 #define dma_is_consistent(d, h) (1)
89 extern int dma_supported(struct device *hwdev, u64 mask);
90 extern int dma_set_mask(struct device *dev, u64 mask);
92 extern void *dma_generic_alloc_coherent(struct device *dev, size_t size,
93 dma_addr_t *dma_addr, gfp_t flag);
95 static inline dma_addr_t
96 dma_map_single(struct device *hwdev, void *ptr, size_t size,
99 struct dma_mapping_ops *ops = get_dma_ops(hwdev);
101 BUG_ON(!valid_dma_direction(direction));
102 return ops->map_page(hwdev, virt_to_page(ptr),
103 (unsigned long)ptr & ~PAGE_MASK, size,
108 dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size,
111 struct dma_mapping_ops *ops = get_dma_ops(dev);
113 BUG_ON(!valid_dma_direction(direction));
115 ops->unmap_page(dev, addr, size, direction, NULL);
119 dma_map_sg(struct device *hwdev, struct scatterlist *sg,
120 int nents, int direction)
122 struct dma_mapping_ops *ops = get_dma_ops(hwdev);
124 BUG_ON(!valid_dma_direction(direction));
125 return ops->map_sg(hwdev, sg, nents, direction);
129 dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents,
132 struct dma_mapping_ops *ops = get_dma_ops(hwdev);
134 BUG_ON(!valid_dma_direction(direction));
136 ops->unmap_sg(hwdev, sg, nents, direction);
140 dma_sync_single_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
141 size_t size, int direction)
143 struct dma_mapping_ops *ops = get_dma_ops(hwdev);
145 BUG_ON(!valid_dma_direction(direction));
146 if (ops->sync_single_for_cpu)
147 ops->sync_single_for_cpu(hwdev, dma_handle, size, direction);
148 flush_write_buffers();
152 dma_sync_single_for_device(struct device *hwdev, dma_addr_t dma_handle,
153 size_t size, int direction)
155 struct dma_mapping_ops *ops = get_dma_ops(hwdev);
157 BUG_ON(!valid_dma_direction(direction));
158 if (ops->sync_single_for_device)
159 ops->sync_single_for_device(hwdev, dma_handle, size, direction);
160 flush_write_buffers();
164 dma_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
165 unsigned long offset, size_t size, int direction)
167 struct dma_mapping_ops *ops = get_dma_ops(hwdev);
169 BUG_ON(!valid_dma_direction(direction));
170 if (ops->sync_single_range_for_cpu)
171 ops->sync_single_range_for_cpu(hwdev, dma_handle, offset,
173 flush_write_buffers();
177 dma_sync_single_range_for_device(struct device *hwdev, dma_addr_t dma_handle,
178 unsigned long offset, size_t size,
181 struct dma_mapping_ops *ops = get_dma_ops(hwdev);
183 BUG_ON(!valid_dma_direction(direction));
184 if (ops->sync_single_range_for_device)
185 ops->sync_single_range_for_device(hwdev, dma_handle,
186 offset, size, direction);
187 flush_write_buffers();
191 dma_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
192 int nelems, int direction)
194 struct dma_mapping_ops *ops = get_dma_ops(hwdev);
196 BUG_ON(!valid_dma_direction(direction));
197 if (ops->sync_sg_for_cpu)
198 ops->sync_sg_for_cpu(hwdev, sg, nelems, direction);
199 flush_write_buffers();
203 dma_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
204 int nelems, int direction)
206 struct dma_mapping_ops *ops = get_dma_ops(hwdev);
208 BUG_ON(!valid_dma_direction(direction));
209 if (ops->sync_sg_for_device)
210 ops->sync_sg_for_device(hwdev, sg, nelems, direction);
212 flush_write_buffers();
215 static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
216 size_t offset, size_t size,
219 struct dma_mapping_ops *ops = get_dma_ops(dev);
221 BUG_ON(!valid_dma_direction(direction));
222 return ops->map_page(dev, page, offset, size, direction, NULL);
225 static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
226 size_t size, int direction)
228 dma_unmap_single(dev, addr, size, direction);
232 dma_cache_sync(struct device *dev, void *vaddr, size_t size,
233 enum dma_data_direction dir)
235 flush_write_buffers();
238 static inline int dma_get_cache_alignment(void)
240 /* no easy way to get cache size on all x86, so return the
241 * maximum possible, to be safe */
242 return boot_cpu_data.x86_clflush_size;
245 static inline unsigned long dma_alloc_coherent_mask(struct device *dev,
248 unsigned long dma_mask = 0;
250 dma_mask = dev->coherent_dma_mask;
252 dma_mask = (gfp & GFP_DMA) ? DMA_24BIT_MASK : DMA_32BIT_MASK;
257 static inline gfp_t dma_alloc_coherent_gfp_flags(struct device *dev, gfp_t gfp)
259 unsigned long dma_mask = dma_alloc_coherent_mask(dev, gfp);
261 if (dma_mask <= DMA_24BIT_MASK)
264 if (dma_mask <= DMA_32BIT_MASK && !(gfp & GFP_DMA))
271 dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
274 struct dma_mapping_ops *ops = get_dma_ops(dev);
277 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
279 if (dma_alloc_from_coherent(dev, size, dma_handle, &memory))
283 dev = &x86_dma_fallback_dev;
287 if (!is_device_dma_capable(dev))
290 if (!ops->alloc_coherent)
293 return ops->alloc_coherent(dev, size, dma_handle,
294 dma_alloc_coherent_gfp_flags(dev, gfp));
297 static inline void dma_free_coherent(struct device *dev, size_t size,
298 void *vaddr, dma_addr_t bus)
300 struct dma_mapping_ops *ops = get_dma_ops(dev);
302 WARN_ON(irqs_disabled()); /* for portability */
304 if (dma_release_from_coherent(dev, get_order(size), vaddr))
307 if (ops->free_coherent)
308 ops->free_coherent(dev, size, vaddr, bus);