blob: ea9d4f41c9d2a0d60fafacb4e0b20af545ab7f81 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef __ASM_SH_DMA_MAPPING_H
2#define __ASM_SH_DMA_MAPPING_H
3
Linus Torvalds1da177e2005-04-16 15:20:36 -07004#include <linux/mm.h>
Jens Axboe71df50a2007-10-23 12:52:48 +02005#include <linux/scatterlist.h>
Paul Mundtf802d962009-04-09 10:36:54 -07006#include <linux/dma-debug.h>
Paul Mundt0d831772006-01-16 22:14:09 -08007#include <asm/cacheflush.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -07008#include <asm/io.h>
Dmitry Baryshkov9de90ac2008-07-18 13:30:31 +04009#include <asm-generic/dma-coherent.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070010
11extern struct bus_type pci_bus_type;
12
Linus Torvalds1da177e2005-04-16 15:20:36 -070013#define dma_supported(dev, mask) (1)
14
15static inline int dma_set_mask(struct device *dev, u64 mask)
16{
17 if (!dev->dma_mask || !dma_supported(dev, mask))
18 return -EIO;
19
20 *dev->dma_mask = mask;
21
22 return 0;
23}
24
Magnus Dammf93e97e2008-01-24 18:35:10 +090025void *dma_alloc_coherent(struct device *dev, size_t size,
26 dma_addr_t *dma_handle, gfp_t flag);
Linus Torvalds1da177e2005-04-16 15:20:36 -070027
Magnus Dammf93e97e2008-01-24 18:35:10 +090028void dma_free_coherent(struct device *dev, size_t size,
29 void *vaddr, dma_addr_t dma_handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -070030
Magnus Dammf93e97e2008-01-24 18:35:10 +090031void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
32 enum dma_data_direction dir);
Linus Torvalds1da177e2005-04-16 15:20:36 -070033
Paul Mundtc7666e72007-02-13 11:11:22 +090034#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
35#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
36#define dma_is_consistent(d, h) (1)
37
Linus Torvalds1da177e2005-04-16 15:20:36 -070038static inline dma_addr_t dma_map_single(struct device *dev,
39 void *ptr, size_t size,
40 enum dma_data_direction dir)
41{
Paul Mundtf802d962009-04-09 10:36:54 -070042 dma_addr_t addr = virt_to_phys(ptr);
43
Linus Torvalds1da177e2005-04-16 15:20:36 -070044#if defined(CONFIG_PCI) && !defined(CONFIG_SH_PCIDMA_NONCOHERENT)
45 if (dev->bus == &pci_bus_type)
Paul Mundtf802d962009-04-09 10:36:54 -070046 return addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -070047#endif
Paul Mundt54321432006-12-09 09:17:01 +090048 dma_cache_sync(dev, ptr, size, dir);
Linus Torvalds1da177e2005-04-16 15:20:36 -070049
Paul Mundtf802d962009-04-09 10:36:54 -070050 debug_dma_map_page(dev, virt_to_page(ptr),
51 (unsigned long)ptr & ~PAGE_MASK, size,
52 dir, addr, true);
53
54 return addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -070055}
56
Paul Mundtf802d962009-04-09 10:36:54 -070057static inline void dma_unmap_single(struct device *dev, dma_addr_t addr,
58 size_t size, enum dma_data_direction dir)
59{
60 debug_dma_unmap_page(dev, addr, size, dir, true);
61}
Linus Torvalds1da177e2005-04-16 15:20:36 -070062
63static inline int dma_map_sg(struct device *dev, struct scatterlist *sg,
64 int nents, enum dma_data_direction dir)
65{
66 int i;
67
68 for (i = 0; i < nents; i++) {
69#if !defined(CONFIG_PCI) || defined(CONFIG_SH_PCIDMA_NONCOHERENT)
Jens Axboe71df50a2007-10-23 12:52:48 +020070 dma_cache_sync(dev, sg_virt(&sg[i]), sg[i].length, dir);
Linus Torvalds1da177e2005-04-16 15:20:36 -070071#endif
Jens Axboe71df50a2007-10-23 12:52:48 +020072 sg[i].dma_address = sg_phys(&sg[i]);
Paul Mundtf802d962009-04-09 10:36:54 -070073 sg[i].dma_length = sg[i].length;
Linus Torvalds1da177e2005-04-16 15:20:36 -070074 }
75
Paul Mundtf802d962009-04-09 10:36:54 -070076 debug_dma_map_sg(dev, sg, nents, i, dir);
77
Linus Torvalds1da177e2005-04-16 15:20:36 -070078 return nents;
79}
80
Paul Mundtf802d962009-04-09 10:36:54 -070081static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sg,
82 int nents, enum dma_data_direction dir)
83{
84 debug_dma_unmap_sg(dev, sg, nents, dir);
85}
Linus Torvalds1da177e2005-04-16 15:20:36 -070086
87static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
88 unsigned long offset, size_t size,
89 enum dma_data_direction dir)
90{
91 return dma_map_single(dev, page_address(page) + offset, size, dir);
92}
93
94static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address,
95 size_t size, enum dma_data_direction dir)
96{
97 dma_unmap_single(dev, dma_address, size, dir);
98}
99
100static inline void dma_sync_single(struct device *dev, dma_addr_t dma_handle,
101 size_t size, enum dma_data_direction dir)
102{
103#if defined(CONFIG_PCI) && !defined(CONFIG_SH_PCIDMA_NONCOHERENT)
104 if (dev->bus == &pci_bus_type)
105 return;
106#endif
Paul Mundte257ad02007-07-25 11:18:00 +0900107 dma_cache_sync(dev, phys_to_virt(dma_handle), size, dir);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700108}
109
110static inline void dma_sync_single_range(struct device *dev,
111 dma_addr_t dma_handle,
112 unsigned long offset, size_t size,
113 enum dma_data_direction dir)
114{
115#if defined(CONFIG_PCI) && !defined(CONFIG_SH_PCIDMA_NONCOHERENT)
116 if (dev->bus == &pci_bus_type)
117 return;
118#endif
Paul Mundte257ad02007-07-25 11:18:00 +0900119 dma_cache_sync(dev, phys_to_virt(dma_handle) + offset, size, dir);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700120}
121
122static inline void dma_sync_sg(struct device *dev, struct scatterlist *sg,
123 int nelems, enum dma_data_direction dir)
124{
125 int i;
126
127 for (i = 0; i < nelems; i++) {
128#if !defined(CONFIG_PCI) || defined(CONFIG_SH_PCIDMA_NONCOHERENT)
Jens Axboe71df50a2007-10-23 12:52:48 +0200129 dma_cache_sync(dev, sg_virt(&sg[i]), sg[i].length, dir);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700130#endif
Jens Axboe71df50a2007-10-23 12:52:48 +0200131 sg[i].dma_address = sg_phys(&sg[i]);
Paul Mundtf802d962009-04-09 10:36:54 -0700132 sg[i].dma_length = sg[i].length;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700133 }
134}
135
Paul Mundt87b0ef92006-09-27 18:34:41 +0900136static inline void dma_sync_single_for_cpu(struct device *dev,
137 dma_addr_t dma_handle, size_t size,
138 enum dma_data_direction dir)
139{
140 dma_sync_single(dev, dma_handle, size, dir);
Paul Mundtf802d962009-04-09 10:36:54 -0700141 debug_dma_sync_single_for_cpu(dev, dma_handle, size, dir);
Paul Mundt87b0ef92006-09-27 18:34:41 +0900142}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700143
Paul Mundt87b0ef92006-09-27 18:34:41 +0900144static inline void dma_sync_single_for_device(struct device *dev,
145 dma_addr_t dma_handle,
146 size_t size,
147 enum dma_data_direction dir)
148{
149 dma_sync_single(dev, dma_handle, size, dir);
Paul Mundtf802d962009-04-09 10:36:54 -0700150 debug_dma_sync_single_for_device(dev, dma_handle, size, dir);
Paul Mundt87b0ef92006-09-27 18:34:41 +0900151}
152
Paul Mundt32239262007-08-10 02:37:01 +0900153static inline void dma_sync_single_range_for_cpu(struct device *dev,
154 dma_addr_t dma_handle,
155 unsigned long offset,
156 size_t size,
157 enum dma_data_direction direction)
158{
159 dma_sync_single_for_cpu(dev, dma_handle+offset, size, direction);
Paul Mundtf802d962009-04-09 10:36:54 -0700160 debug_dma_sync_single_range_for_cpu(dev, dma_handle,
161 offset, size, direction);
Paul Mundt32239262007-08-10 02:37:01 +0900162}
163
164static inline void dma_sync_single_range_for_device(struct device *dev,
165 dma_addr_t dma_handle,
166 unsigned long offset,
167 size_t size,
168 enum dma_data_direction direction)
169{
170 dma_sync_single_for_device(dev, dma_handle+offset, size, direction);
Paul Mundtf802d962009-04-09 10:36:54 -0700171 debug_dma_sync_single_range_for_device(dev, dma_handle,
172 offset, size, direction);
Paul Mundt32239262007-08-10 02:37:01 +0900173}
174
175
Paul Mundt87b0ef92006-09-27 18:34:41 +0900176static inline void dma_sync_sg_for_cpu(struct device *dev,
177 struct scatterlist *sg, int nelems,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700178 enum dma_data_direction dir)
Paul Mundt87b0ef92006-09-27 18:34:41 +0900179{
180 dma_sync_sg(dev, sg, nelems, dir);
Paul Mundtf802d962009-04-09 10:36:54 -0700181 debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir);
Paul Mundt87b0ef92006-09-27 18:34:41 +0900182}
Paul Mundt0d831772006-01-16 22:14:09 -0800183
Paul Mundt87b0ef92006-09-27 18:34:41 +0900184static inline void dma_sync_sg_for_device(struct device *dev,
185 struct scatterlist *sg, int nelems,
186 enum dma_data_direction dir)
187{
188 dma_sync_sg(dev, sg, nelems, dir);
Paul Mundtf802d962009-04-09 10:36:54 -0700189 debug_dma_sync_sg_for_device(dev, sg, nelems, dir);
Paul Mundt87b0ef92006-09-27 18:34:41 +0900190}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700191
Linus Torvalds1da177e2005-04-16 15:20:36 -0700192static inline int dma_get_cache_alignment(void)
193{
194 /*
195 * Each processor family will define its own L1_CACHE_SHIFT,
196 * L1_CACHE_BYTES wraps to this, so this is always safe.
197 */
198 return L1_CACHE_BYTES;
199}
200
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700201static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700202{
203 return dma_addr == 0;
204}
Magnus Dammf93e97e2008-01-24 18:35:10 +0900205
206#define ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY
207
208extern int
209dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
210 dma_addr_t device_addr, size_t size, int flags);
211
212extern void
213dma_release_declared_memory(struct device *dev);
214
215extern void *
216dma_mark_declared_memory_occupied(struct device *dev,
217 dma_addr_t device_addr, size_t size);
218
Linus Torvalds1da177e2005-04-16 15:20:36 -0700219#endif /* __ASM_SH_DMA_MAPPING_H */