b81f82268a1691174a46f514e10b0ed612cfcb7e
[linux-2.6.git] / arch / x86 / include / asm / dma-mapping.h
1 #ifndef _ASM_X86_DMA_MAPPING_H
2 #define _ASM_X86_DMA_MAPPING_H
3
4 /*
5  * IOMMU interface. See Documentation/DMA-mapping.txt and DMA-API.txt for
6  * documentation.
7  */
8
9 #include <linux/scatterlist.h>
10 #include <linux/dma-attrs.h>
11 #include <asm/io.h>
12 #include <asm/swiotlb.h>
13 #include <asm-generic/dma-coherent.h>
14
15 extern dma_addr_t bad_dma_address;
16 extern int iommu_merge;
17 extern struct device x86_dma_fallback_dev;
18 extern int panic_on_overflow;
19
20 struct dma_mapping_ops {
21         int             (*mapping_error)(struct device *dev,
22                                          dma_addr_t dma_addr);
23         void*           (*alloc_coherent)(struct device *dev, size_t size,
24                                 dma_addr_t *dma_handle, gfp_t gfp);
25         void            (*free_coherent)(struct device *dev, size_t size,
26                                 void *vaddr, dma_addr_t dma_handle);
27         void            (*sync_single_for_cpu)(struct device *hwdev,
28                                 dma_addr_t dma_handle, size_t size,
29                                 int direction);
30         void            (*sync_single_for_device)(struct device *hwdev,
31                                 dma_addr_t dma_handle, size_t size,
32                                 int direction);
33         void            (*sync_single_range_for_cpu)(struct device *hwdev,
34                                 dma_addr_t dma_handle, unsigned long offset,
35                                 size_t size, int direction);
36         void            (*sync_single_range_for_device)(struct device *hwdev,
37                                 dma_addr_t dma_handle, unsigned long offset,
38                                 size_t size, int direction);
39         void            (*sync_sg_for_cpu)(struct device *hwdev,
40                                 struct scatterlist *sg, int nelems,
41                                 int direction);
42         void            (*sync_sg_for_device)(struct device *hwdev,
43                                 struct scatterlist *sg, int nelems,
44                                 int direction);
45         int             (*map_sg)(struct device *hwdev, struct scatterlist *sg,
46                                 int nents, int direction);
47         void            (*unmap_sg)(struct device *hwdev,
48                                 struct scatterlist *sg, int nents,
49                                 int direction);
50         dma_addr_t      (*map_page)(struct device *dev, struct page *page,
51                                     unsigned long offset, size_t size,
52                                     enum dma_data_direction dir,
53                                     struct dma_attrs *attrs);
54         void            (*unmap_page)(struct device *dev, dma_addr_t dma_handle,
55                                       size_t size, enum dma_data_direction dir,
56                                       struct dma_attrs *attrs);
57         int             (*dma_supported)(struct device *hwdev, u64 mask);
58         int             is_phys;
59 };
60
61 extern struct dma_mapping_ops *dma_ops;
62
63 static inline struct dma_mapping_ops *get_dma_ops(struct device *dev)
64 {
65 #ifdef CONFIG_X86_32
66         return dma_ops;
67 #else
68         if (unlikely(!dev) || !dev->archdata.dma_ops)
69                 return dma_ops;
70         else
71                 return dev->archdata.dma_ops;
72 #endif
73 }
74
75 /* Make sure we keep the same behaviour */
76 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
77 {
78         struct dma_mapping_ops *ops = get_dma_ops(dev);
79         if (ops->mapping_error)
80                 return ops->mapping_error(dev, dma_addr);
81
82         return (dma_addr == bad_dma_address);
83 }
84
85 #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
86 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
87 #define dma_is_consistent(d, h) (1)
88
89 extern int dma_supported(struct device *hwdev, u64 mask);
90 extern int dma_set_mask(struct device *dev, u64 mask);
91
92 extern void *dma_generic_alloc_coherent(struct device *dev, size_t size,
93                                         dma_addr_t *dma_addr, gfp_t flag);
94
95 static inline dma_addr_t
96 dma_map_single(struct device *hwdev, void *ptr, size_t size,
97                int direction)
98 {
99         struct dma_mapping_ops *ops = get_dma_ops(hwdev);
100
101         BUG_ON(!valid_dma_direction(direction));
102         return ops->map_page(hwdev, virt_to_page(ptr),
103                              (unsigned long)ptr & ~PAGE_MASK, size,
104                              direction, NULL);
105 }
106
107 static inline void
108 dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size,
109                  int direction)
110 {
111         struct dma_mapping_ops *ops = get_dma_ops(dev);
112
113         BUG_ON(!valid_dma_direction(direction));
114         if (ops->unmap_page)
115                 ops->unmap_page(dev, addr, size, direction, NULL);
116 }
117
118 static inline int
119 dma_map_sg(struct device *hwdev, struct scatterlist *sg,
120            int nents, int direction)
121 {
122         struct dma_mapping_ops *ops = get_dma_ops(hwdev);
123
124         BUG_ON(!valid_dma_direction(direction));
125         return ops->map_sg(hwdev, sg, nents, direction);
126 }
127
128 static inline void
129 dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents,
130              int direction)
131 {
132         struct dma_mapping_ops *ops = get_dma_ops(hwdev);
133
134         BUG_ON(!valid_dma_direction(direction));
135         if (ops->unmap_sg)
136                 ops->unmap_sg(hwdev, sg, nents, direction);
137 }
138
139 static inline void
140 dma_sync_single_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
141                         size_t size, int direction)
142 {
143         struct dma_mapping_ops *ops = get_dma_ops(hwdev);
144
145         BUG_ON(!valid_dma_direction(direction));
146         if (ops->sync_single_for_cpu)
147                 ops->sync_single_for_cpu(hwdev, dma_handle, size, direction);
148         flush_write_buffers();
149 }
150
151 static inline void
152 dma_sync_single_for_device(struct device *hwdev, dma_addr_t dma_handle,
153                            size_t size, int direction)
154 {
155         struct dma_mapping_ops *ops = get_dma_ops(hwdev);
156
157         BUG_ON(!valid_dma_direction(direction));
158         if (ops->sync_single_for_device)
159                 ops->sync_single_for_device(hwdev, dma_handle, size, direction);
160         flush_write_buffers();
161 }
162
163 static inline void
164 dma_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
165                               unsigned long offset, size_t size, int direction)
166 {
167         struct dma_mapping_ops *ops = get_dma_ops(hwdev);
168
169         BUG_ON(!valid_dma_direction(direction));
170         if (ops->sync_single_range_for_cpu)
171                 ops->sync_single_range_for_cpu(hwdev, dma_handle, offset,
172                                                size, direction);
173         flush_write_buffers();
174 }
175
176 static inline void
177 dma_sync_single_range_for_device(struct device *hwdev, dma_addr_t dma_handle,
178                                  unsigned long offset, size_t size,
179                                  int direction)
180 {
181         struct dma_mapping_ops *ops = get_dma_ops(hwdev);
182
183         BUG_ON(!valid_dma_direction(direction));
184         if (ops->sync_single_range_for_device)
185                 ops->sync_single_range_for_device(hwdev, dma_handle,
186                                                   offset, size, direction);
187         flush_write_buffers();
188 }
189
190 static inline void
191 dma_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
192                     int nelems, int direction)
193 {
194         struct dma_mapping_ops *ops = get_dma_ops(hwdev);
195
196         BUG_ON(!valid_dma_direction(direction));
197         if (ops->sync_sg_for_cpu)
198                 ops->sync_sg_for_cpu(hwdev, sg, nelems, direction);
199         flush_write_buffers();
200 }
201
202 static inline void
203 dma_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
204                        int nelems, int direction)
205 {
206         struct dma_mapping_ops *ops = get_dma_ops(hwdev);
207
208         BUG_ON(!valid_dma_direction(direction));
209         if (ops->sync_sg_for_device)
210                 ops->sync_sg_for_device(hwdev, sg, nelems, direction);
211
212         flush_write_buffers();
213 }
214
215 static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
216                                       size_t offset, size_t size,
217                                       int direction)
218 {
219         struct dma_mapping_ops *ops = get_dma_ops(dev);
220
221         BUG_ON(!valid_dma_direction(direction));
222         return ops->map_page(dev, page, offset, size, direction, NULL);
223 }
224
225 static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
226                                   size_t size, int direction)
227 {
228         dma_unmap_single(dev, addr, size, direction);
229 }
230
231 static inline void
232 dma_cache_sync(struct device *dev, void *vaddr, size_t size,
233         enum dma_data_direction dir)
234 {
235         flush_write_buffers();
236 }
237
238 static inline int dma_get_cache_alignment(void)
239 {
240         /* no easy way to get cache size on all x86, so return the
241          * maximum possible, to be safe */
242         return boot_cpu_data.x86_clflush_size;
243 }
244
245 static inline unsigned long dma_alloc_coherent_mask(struct device *dev,
246                                                     gfp_t gfp)
247 {
248         unsigned long dma_mask = 0;
249
250         dma_mask = dev->coherent_dma_mask;
251         if (!dma_mask)
252                 dma_mask = (gfp & GFP_DMA) ? DMA_24BIT_MASK : DMA_32BIT_MASK;
253
254         return dma_mask;
255 }
256
257 static inline gfp_t dma_alloc_coherent_gfp_flags(struct device *dev, gfp_t gfp)
258 {
259         unsigned long dma_mask = dma_alloc_coherent_mask(dev, gfp);
260
261         if (dma_mask <= DMA_24BIT_MASK)
262                 gfp |= GFP_DMA;
263 #ifdef CONFIG_X86_64
264         if (dma_mask <= DMA_32BIT_MASK && !(gfp & GFP_DMA))
265                 gfp |= GFP_DMA32;
266 #endif
267        return gfp;
268 }
269
270 static inline void *
271 dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
272                 gfp_t gfp)
273 {
274         struct dma_mapping_ops *ops = get_dma_ops(dev);
275         void *memory;
276
277         gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
278
279         if (dma_alloc_from_coherent(dev, size, dma_handle, &memory))
280                 return memory;
281
282         if (!dev) {
283                 dev = &x86_dma_fallback_dev;
284                 gfp |= GFP_DMA;
285         }
286
287         if (!is_device_dma_capable(dev))
288                 return NULL;
289
290         if (!ops->alloc_coherent)
291                 return NULL;
292
293         return ops->alloc_coherent(dev, size, dma_handle,
294                                    dma_alloc_coherent_gfp_flags(dev, gfp));
295 }
296
297 static inline void dma_free_coherent(struct device *dev, size_t size,
298                                      void *vaddr, dma_addr_t bus)
299 {
300         struct dma_mapping_ops *ops = get_dma_ops(dev);
301
302         WARN_ON(irqs_disabled());       /* for portability */
303
304         if (dma_release_from_coherent(dev, get_order(size), vaddr))
305                 return;
306
307         if (ops->free_coherent)
308                 ops->free_coherent(dev, size, vaddr, bus);
309 }
310
311 #endif