]> nv-tegra.nvidia Code Review - linux-2.6.git/blob - drivers/infiniband/hw/ipath/ipath_dma.c
Various typo fixes.
[linux-2.6.git] / drivers / infiniband / hw / ipath / ipath_dma.c
1 /*
2  * Copyright (c) 2006 QLogic, Corporation. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32
33 #include <rdma/ib_verbs.h>
34
35 #include "ipath_verbs.h"
36
37 #define BAD_DMA_ADDRESS ((u64) 0)
38
39 /*
40  * The following functions implement driver specific replacements
41  * for the ib_dma_*() functions.
42  *
43  * These functions return kernel virtual addresses instead of
44  * device bus addresses since the driver uses the CPU to copy
45  * data instead of using hardware DMA.
46  */
47
48 static int ipath_mapping_error(struct ib_device *dev, u64 dma_addr)
49 {
50         return dma_addr == BAD_DMA_ADDRESS;
51 }
52
53 static u64 ipath_dma_map_single(struct ib_device *dev,
54                                 void *cpu_addr, size_t size,
55                                 enum dma_data_direction direction)
56 {
57         BUG_ON(!valid_dma_direction(direction));
58         return (u64) cpu_addr;
59 }
60
61 static void ipath_dma_unmap_single(struct ib_device *dev,
62                                    u64 addr, size_t size,
63                                    enum dma_data_direction direction)
64 {
65         BUG_ON(!valid_dma_direction(direction));
66 }
67
68 static u64 ipath_dma_map_page(struct ib_device *dev,
69                               struct page *page,
70                               unsigned long offset,
71                               size_t size,
72                               enum dma_data_direction direction)
73 {
74         u64 addr;
75
76         BUG_ON(!valid_dma_direction(direction));
77
78         if (offset + size > PAGE_SIZE) {
79                 addr = BAD_DMA_ADDRESS;
80                 goto done;
81         }
82
83         addr = (u64) page_address(page);
84         if (addr)
85                 addr += offset;
86         /* TODO: handle highmem pages */
87
88 done:
89         return addr;
90 }
91
92 static void ipath_dma_unmap_page(struct ib_device *dev,
93                                  u64 addr, size_t size,
94                                  enum dma_data_direction direction)
95 {
96         BUG_ON(!valid_dma_direction(direction));
97 }
98
99 int ipath_map_sg(struct ib_device *dev, struct scatterlist *sg, int nents,
100                  enum dma_data_direction direction)
101 {
102         u64 addr;
103         int i;
104         int ret = nents;
105
106         BUG_ON(!valid_dma_direction(direction));
107
108         for (i = 0; i < nents; i++) {
109                 addr = (u64) page_address(sg[i].page);
110                 /* TODO: handle highmem pages */
111                 if (!addr) {
112                         ret = 0;
113                         break;
114                 }
115         }
116         return ret;
117 }
118
119 static void ipath_unmap_sg(struct ib_device *dev,
120                            struct scatterlist *sg, int nents,
121                            enum dma_data_direction direction)
122 {
123         BUG_ON(!valid_dma_direction(direction));
124 }
125
126 static u64 ipath_sg_dma_address(struct ib_device *dev, struct scatterlist *sg)
127 {
128         u64 addr = (u64) page_address(sg->page);
129
130         if (addr)
131                 addr += sg->offset;
132         return addr;
133 }
134
135 static unsigned int ipath_sg_dma_len(struct ib_device *dev,
136                                      struct scatterlist *sg)
137 {
138         return sg->length;
139 }
140
141 static void ipath_sync_single_for_cpu(struct ib_device *dev,
142                                       u64 addr,
143                                       size_t size,
144                                       enum dma_data_direction dir)
145 {
146 }
147
148 static void ipath_sync_single_for_device(struct ib_device *dev,
149                                          u64 addr,
150                                          size_t size,
151                                          enum dma_data_direction dir)
152 {
153 }
154
155 static void *ipath_dma_alloc_coherent(struct ib_device *dev, size_t size,
156                                       u64 *dma_handle, gfp_t flag)
157 {
158         struct page *p;
159         void *addr = NULL;
160
161         p = alloc_pages(flag, get_order(size));
162         if (p)
163                 addr = page_address(p);
164         if (dma_handle)
165                 *dma_handle = (u64) addr;
166         return addr;
167 }
168
169 static void ipath_dma_free_coherent(struct ib_device *dev, size_t size,
170                                     void *cpu_addr, dma_addr_t dma_handle)
171 {
172         free_pages((unsigned long) cpu_addr, get_order(size));
173 }
174
175 struct ib_dma_mapping_ops ipath_dma_mapping_ops = {
176         ipath_mapping_error,
177         ipath_dma_map_single,
178         ipath_dma_unmap_single,
179         ipath_dma_map_page,
180         ipath_dma_unmap_page,
181         ipath_map_sg,
182         ipath_unmap_sg,
183         ipath_sg_dma_address,
184         ipath_sg_dma_len,
185         ipath_sync_single_for_cpu,
186         ipath_sync_single_for_device,
187         ipath_dma_alloc_coherent,
188         ipath_dma_free_coherent
189 };