x86, ia64: convert to use generic dma_map_ops struct
[linux-2.6.git] / arch / x86 / include / asm / dma-mapping.h
1 #ifndef _ASM_X86_DMA_MAPPING_H
2 #define _ASM_X86_DMA_MAPPING_H
3
4 /*
5  * IOMMU interface. See Documentation/DMA-mapping.txt and DMA-API.txt for
6  * documentation.
7  */
8
9 #include <linux/scatterlist.h>
10 #include <linux/dma-attrs.h>
11 #include <asm/io.h>
12 #include <asm/swiotlb.h>
13 #include <asm-generic/dma-coherent.h>
14
15 extern dma_addr_t bad_dma_address;
16 extern int iommu_merge;
17 extern struct device x86_dma_fallback_dev;
18 extern int panic_on_overflow;
19
20 extern struct dma_map_ops *dma_ops;
21
22 static inline struct dma_map_ops *get_dma_ops(struct device *dev)
23 {
24 #ifdef CONFIG_X86_32
25         return dma_ops;
26 #else
27         if (unlikely(!dev) || !dev->archdata.dma_ops)
28                 return dma_ops;
29         else
30                 return dev->archdata.dma_ops;
31 #endif
32 }
33
34 /* Make sure we keep the same behaviour */
35 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
36 {
37         struct dma_map_ops *ops = get_dma_ops(dev);
38         if (ops->mapping_error)
39                 return ops->mapping_error(dev, dma_addr);
40
41         return (dma_addr == bad_dma_address);
42 }
43
44 #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
45 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
46 #define dma_is_consistent(d, h) (1)
47
48 extern int dma_supported(struct device *hwdev, u64 mask);
49 extern int dma_set_mask(struct device *dev, u64 mask);
50
51 extern void *dma_generic_alloc_coherent(struct device *dev, size_t size,
52                                         dma_addr_t *dma_addr, gfp_t flag);
53
54 static inline dma_addr_t
55 dma_map_single(struct device *hwdev, void *ptr, size_t size,
56                enum dma_data_direction dir)
57 {
58         struct dma_map_ops *ops = get_dma_ops(hwdev);
59
60         BUG_ON(!valid_dma_direction(dir));
61         return ops->map_page(hwdev, virt_to_page(ptr),
62                              (unsigned long)ptr & ~PAGE_MASK, size,
63                              dir, NULL);
64 }
65
66 static inline void
67 dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size,
68                  enum dma_data_direction dir)
69 {
70         struct dma_map_ops *ops = get_dma_ops(dev);
71
72         BUG_ON(!valid_dma_direction(dir));
73         if (ops->unmap_page)
74                 ops->unmap_page(dev, addr, size, dir, NULL);
75 }
76
77 static inline int
78 dma_map_sg(struct device *hwdev, struct scatterlist *sg,
79            int nents, enum dma_data_direction dir)
80 {
81         struct dma_map_ops *ops = get_dma_ops(hwdev);
82
83         BUG_ON(!valid_dma_direction(dir));
84         return ops->map_sg(hwdev, sg, nents, dir, NULL);
85 }
86
87 static inline void
88 dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents,
89              enum dma_data_direction dir)
90 {
91         struct dma_map_ops *ops = get_dma_ops(hwdev);
92
93         BUG_ON(!valid_dma_direction(dir));
94         if (ops->unmap_sg)
95                 ops->unmap_sg(hwdev, sg, nents, dir, NULL);
96 }
97
98 static inline void
99 dma_sync_single_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
100                         size_t size, enum dma_data_direction dir)
101 {
102         struct dma_map_ops *ops = get_dma_ops(hwdev);
103
104         BUG_ON(!valid_dma_direction(dir));
105         if (ops->sync_single_for_cpu)
106                 ops->sync_single_for_cpu(hwdev, dma_handle, size, dir);
107         flush_write_buffers();
108 }
109
110 static inline void
111 dma_sync_single_for_device(struct device *hwdev, dma_addr_t dma_handle,
112                            size_t size, enum dma_data_direction dir)
113 {
114         struct dma_map_ops *ops = get_dma_ops(hwdev);
115
116         BUG_ON(!valid_dma_direction(dir));
117         if (ops->sync_single_for_device)
118                 ops->sync_single_for_device(hwdev, dma_handle, size, dir);
119         flush_write_buffers();
120 }
121
122 static inline void
123 dma_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
124                               unsigned long offset, size_t size,
125                               enum dma_data_direction dir)
126 {
127         struct dma_map_ops *ops = get_dma_ops(hwdev);
128
129         BUG_ON(!valid_dma_direction(dir));
130         if (ops->sync_single_range_for_cpu)
131                 ops->sync_single_range_for_cpu(hwdev, dma_handle, offset,
132                                                size, dir);
133         flush_write_buffers();
134 }
135
136 static inline void
137 dma_sync_single_range_for_device(struct device *hwdev, dma_addr_t dma_handle,
138                                  unsigned long offset, size_t size,
139                                  enum dma_data_direction dir)
140 {
141         struct dma_map_ops *ops = get_dma_ops(hwdev);
142
143         BUG_ON(!valid_dma_direction(dir));
144         if (ops->sync_single_range_for_device)
145                 ops->sync_single_range_for_device(hwdev, dma_handle,
146                                                   offset, size, dir);
147         flush_write_buffers();
148 }
149
150 static inline void
151 dma_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
152                     int nelems, enum dma_data_direction dir)
153 {
154         struct dma_map_ops *ops = get_dma_ops(hwdev);
155
156         BUG_ON(!valid_dma_direction(dir));
157         if (ops->sync_sg_for_cpu)
158                 ops->sync_sg_for_cpu(hwdev, sg, nelems, dir);
159         flush_write_buffers();
160 }
161
162 static inline void
163 dma_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
164                        int nelems, enum dma_data_direction dir)
165 {
166         struct dma_map_ops *ops = get_dma_ops(hwdev);
167
168         BUG_ON(!valid_dma_direction(dir));
169         if (ops->sync_sg_for_device)
170                 ops->sync_sg_for_device(hwdev, sg, nelems, dir);
171
172         flush_write_buffers();
173 }
174
175 static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
176                                       size_t offset, size_t size,
177                                       enum dma_data_direction dir)
178 {
179         struct dma_map_ops *ops = get_dma_ops(dev);
180
181         BUG_ON(!valid_dma_direction(dir));
182         return ops->map_page(dev, page, offset, size, dir, NULL);
183 }
184
185 static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
186                                   size_t size, enum dma_data_direction dir)
187 {
188         dma_unmap_single(dev, addr, size, dir);
189 }
190
191 static inline void
192 dma_cache_sync(struct device *dev, void *vaddr, size_t size,
193         enum dma_data_direction dir)
194 {
195         flush_write_buffers();
196 }
197
198 static inline int dma_get_cache_alignment(void)
199 {
200         /* no easy way to get cache size on all x86, so return the
201          * maximum possible, to be safe */
202         return boot_cpu_data.x86_clflush_size;
203 }
204
205 static inline unsigned long dma_alloc_coherent_mask(struct device *dev,
206                                                     gfp_t gfp)
207 {
208         unsigned long dma_mask = 0;
209
210         dma_mask = dev->coherent_dma_mask;
211         if (!dma_mask)
212                 dma_mask = (gfp & GFP_DMA) ? DMA_24BIT_MASK : DMA_32BIT_MASK;
213
214         return dma_mask;
215 }
216
217 static inline gfp_t dma_alloc_coherent_gfp_flags(struct device *dev, gfp_t gfp)
218 {
219         unsigned long dma_mask = dma_alloc_coherent_mask(dev, gfp);
220
221         if (dma_mask <= DMA_24BIT_MASK)
222                 gfp |= GFP_DMA;
223 #ifdef CONFIG_X86_64
224         if (dma_mask <= DMA_32BIT_MASK && !(gfp & GFP_DMA))
225                 gfp |= GFP_DMA32;
226 #endif
227        return gfp;
228 }
229
230 static inline void *
231 dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
232                 gfp_t gfp)
233 {
234         struct dma_map_ops *ops = get_dma_ops(dev);
235         void *memory;
236
237         gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
238
239         if (dma_alloc_from_coherent(dev, size, dma_handle, &memory))
240                 return memory;
241
242         if (!dev) {
243                 dev = &x86_dma_fallback_dev;
244                 gfp |= GFP_DMA;
245         }
246
247         if (!is_device_dma_capable(dev))
248                 return NULL;
249
250         if (!ops->alloc_coherent)
251                 return NULL;
252
253         return ops->alloc_coherent(dev, size, dma_handle,
254                                    dma_alloc_coherent_gfp_flags(dev, gfp));
255 }
256
257 static inline void dma_free_coherent(struct device *dev, size_t size,
258                                      void *vaddr, dma_addr_t bus)
259 {
260         struct dma_map_ops *ops = get_dma_ops(dev);
261
262         WARN_ON(irqs_disabled());       /* for portability */
263
264         if (dma_release_from_coherent(dev, get_order(size), vaddr))
265                 return;
266
267         if (ops->free_coherent)
268                 ops->free_coherent(dev, size, vaddr, bus);
269 }
270
271 #endif