2 * include/linux/nvmap.h
4 * structure declarations for nvmem and nvmap user-space ioctls
6 * Copyright (c) 2009-2016, NVIDIA CORPORATION. All rights reserved.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful, but WITHOUT
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18 * You should have received a copy of the GNU General Public License along
19 * with this program; if not, write to the Free Software Foundation, Inc.,
20 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
23 #include <linux/ioctl.h>
24 #include <linux/types.h>
25 #if defined(__KERNEL__)
26 #include <linux/rbtree.h>
27 #include <linux/file.h>
28 #include <linux/dma-buf.h>
29 #include <linux/device.h>
32 #ifndef _LINUX_NVMAP_H
33 #define _LINUX_NVMAP_H
35 #define NVMAP_HEAP_IOVMM (1ul<<30)
37 /* common carveout heaps */
38 #define NVMAP_HEAP_CARVEOUT_IRAM (1ul<<29)
39 #define NVMAP_HEAP_CARVEOUT_VPR (1ul<<28)
40 #define NVMAP_HEAP_CARVEOUT_TSEC (1ul<<27)
41 #define NVMAP_HEAP_CARVEOUT_IVM (1ul<<1)
42 #define NVMAP_HEAP_CARVEOUT_GENERIC (1ul<<0)
44 #define NVMAP_HEAP_CARVEOUT_MASK (NVMAP_HEAP_IOVMM - 1)
46 /* allocation flags */
47 #define NVMAP_HANDLE_UNCACHEABLE (0x0ul << 0)
48 #define NVMAP_HANDLE_WRITE_COMBINE (0x1ul << 0)
49 #define NVMAP_HANDLE_INNER_CACHEABLE (0x2ul << 0)
50 #define NVMAP_HANDLE_CACHEABLE (0x3ul << 0)
51 #define NVMAP_HANDLE_CACHE_FLAG (0x3ul << 0)
53 #define NVMAP_HANDLE_SECURE (0x1ul << 2)
54 #define NVMAP_HANDLE_KIND_SPECIFIED (0x1ul << 3)
55 #define NVMAP_HANDLE_COMPR_SPECIFIED (0x1ul << 4)
56 #define NVMAP_HANDLE_ZEROED_PAGES (0x1ul << 5)
57 #define NVMAP_HANDLE_PHYS_CONTIG (0x1ul << 6)
58 #define NVMAP_HANDLE_CACHE_SYNC (0x1ul << 7)
59 #define NVMAP_HANDLE_CACHE_SYNC_AT_RESERVE (0x1ul << 8)
61 #if defined(__KERNEL__)
63 int nvmap_get_dmabuf_param(struct dma_buf *dmabuf, u32 param, u64 *result);
65 #ifdef CONFIG_NVMAP_PAGE_POOLS
66 ulong nvmap_page_pool_get_unused_pages(void);
68 static inline int nvmap_page_pool_get_unused_pages(void)
74 ulong nvmap_iovmm_get_used_pages(void);
76 struct nvmap_platform_carveout {
78 unsigned int usage_mask;
81 struct device *cma_dev;
83 struct device *dma_dev;
85 struct dma_declare_info *dma_info;
90 bool enable_static_dma_map;
91 bool disable_dynamic_dma_map;
92 bool init_done; /* FIXME: remove once all caveouts use reserved-memory */
95 struct nvmap_platform_data {
96 const struct nvmap_platform_carveout *carveouts;
97 unsigned int nr_carveouts;
100 #endif /* __KERNEL__ */
103 * DOC: NvMap Userspace API
105 * create a client by opening /dev/nvmap
106 * most operations handled via following ioctls
110 NVMAP_HANDLE_PARAM_SIZE = 1,
111 NVMAP_HANDLE_PARAM_ALIGNMENT,
112 NVMAP_HANDLE_PARAM_BASE,
113 NVMAP_HANDLE_PARAM_HEAP,
114 NVMAP_HANDLE_PARAM_KIND,
115 NVMAP_HANDLE_PARAM_COMPR, /* ignored, to be removed */
119 NVMAP_CACHE_OP_WB = 0,
121 NVMAP_CACHE_OP_WB_INV,
125 NVMAP_PAGES_UNRESERVE = 0,
127 NVMAP_INSERT_PAGES_ON_UNRESERVE,
128 NVMAP_PAGES_ZAP_AND_CLEAN,
131 struct nvmap_create_handle {
133 __u32 id; /* FromId */
134 __u32 size; /* CreateHandle */
135 __s32 fd; /* DmaBufFd or FromFd */
137 __u32 handle; /* returns nvmap handle */
140 struct nvmap_alloc_handle {
141 __u32 handle; /* nvmap handle */
142 __u32 heap_mask; /* heaps to allocate from */
143 __u32 flags; /* wb/wc/uc/iwb etc. */
144 __u32 align; /* min alignment necessary */
147 struct nvmap_alloc_ivm_handle {
148 __u32 handle; /* nvmap handle */
149 __u32 heap_mask; /* heaps to allocate from */
150 __u32 flags; /* wb/wc/uc/iwb etc. */
151 __u32 align; /* min alignment necessary */
152 __u32 peer; /* peer with whom handle must be shared. Used
153 * only for NVMAP_HEAP_CARVEOUT_IVM
157 struct nvmap_alloc_kind_handle {
158 __u32 handle; /* nvmap handle */
166 struct nvmap_map_caller {
167 __u32 handle; /* nvmap handle */
168 __u32 offset; /* offset into hmem; should be page-aligned */
169 __u32 length; /* number of bytes to map */
170 __u32 flags; /* maps as wb/iwb etc. */
171 unsigned long addr; /* user pointer */
175 struct nvmap_map_caller_32 {
176 __u32 handle; /* nvmap handle */
177 __u32 offset; /* offset into hmem; should be page-aligned */
178 __u32 length; /* number of bytes to map */
179 __u32 flags; /* maps as wb/iwb etc. */
180 __u32 addr; /* user pointer*/
184 struct nvmap_rw_handle {
185 unsigned long addr; /* user pointer*/
186 __u32 handle; /* nvmap handle */
187 __u32 offset; /* offset into hmem */
188 __u32 elem_size; /* individual atom size */
189 __u32 hmem_stride; /* delta in bytes between atoms in hmem */
190 __u32 user_stride; /* delta in bytes between atoms in user */
191 __u32 count; /* number of atoms to copy */
195 struct nvmap_rw_handle_32 {
196 __u32 addr; /* user pointer */
197 __u32 handle; /* nvmap handle */
198 __u32 offset; /* offset into hmem */
199 __u32 elem_size; /* individual atom size */
200 __u32 hmem_stride; /* delta in bytes between atoms in hmem */
201 __u32 user_stride; /* delta in bytes between atoms in user */
202 __u32 count; /* number of atoms to copy */
206 struct nvmap_pin_handle {
207 __u32 *handles; /* array of handles to pin/unpin */
208 unsigned long *addr; /* array of addresses to return */
209 __u32 count; /* number of entries in handles */
213 struct nvmap_pin_handle_32 {
214 __u32 handles; /* array of handles to pin/unpin */
215 __u32 addr; /* array of addresses to return */
216 __u32 count; /* number of entries in handles */
220 struct nvmap_handle_param {
221 __u32 handle; /* nvmap handle */
222 __u32 param; /* size/align/base/heap etc. */
223 unsigned long result; /* returns requested info*/
227 struct nvmap_handle_param_32 {
228 __u32 handle; /* nvmap handle */
229 __u32 param; /* size/align/base/heap etc. */
230 __u32 result; /* returns requested info*/
234 struct nvmap_cache_op {
235 unsigned long addr; /* user pointer*/
236 __u32 handle; /* nvmap handle */
237 __u32 len; /* bytes to flush */
238 __s32 op; /* wb/wb_inv/inv */
242 struct nvmap_cache_op_32 {
243 __u32 addr; /* user pointer*/
244 __u32 handle; /* nvmap handle */
245 __u32 len; /* bytes to flush */
246 __s32 op; /* wb/wb_inv/inv */
250 struct nvmap_cache_op_list {
251 __u64 handles; /* Ptr to u32 type array, holding handles */
252 __u64 offsets; /* Ptr to u32 type array, holding offsets
254 __u64 sizes; /* Ptr to u32 type array, holindg sizes of memory
255 * regions within each handle */
256 __u32 nr; /* Number of handles */
257 __s32 op; /* wb/wb_inv/inv */
260 struct nvmap_debugfs_handles_header {
264 struct nvmap_debugfs_handles_entry {
272 #define NVMAP_IOC_MAGIC 'N'
274 /* Creates a new memory handle. On input, the argument is the size of the new
275 * handle; on return, the argument is the name of the new handle
277 #define NVMAP_IOC_CREATE _IOWR(NVMAP_IOC_MAGIC, 0, struct nvmap_create_handle)
278 #define NVMAP_IOC_FROM_ID _IOWR(NVMAP_IOC_MAGIC, 2, struct nvmap_create_handle)
280 /* Actually allocates memory for the specified handle */
281 #define NVMAP_IOC_ALLOC _IOW(NVMAP_IOC_MAGIC, 3, struct nvmap_alloc_handle)
283 /* Frees a memory handle, unpinning any pinned pages and unmapping any mappings
285 #define NVMAP_IOC_FREE _IO(NVMAP_IOC_MAGIC, 4)
287 /* Maps the region of the specified handle into a user-provided virtual address
288 * that was previously created via an mmap syscall on this fd */
289 #define NVMAP_IOC_MMAP _IOWR(NVMAP_IOC_MAGIC, 5, struct nvmap_map_caller)
291 #define NVMAP_IOC_MMAP_32 _IOWR(NVMAP_IOC_MAGIC, 5, struct nvmap_map_caller_32)
294 /* Reads/writes data (possibly strided) from a user-provided buffer into the
295 * hmem at the specified offset */
296 #define NVMAP_IOC_WRITE _IOW(NVMAP_IOC_MAGIC, 6, struct nvmap_rw_handle)
297 #define NVMAP_IOC_READ _IOW(NVMAP_IOC_MAGIC, 7, struct nvmap_rw_handle)
299 #define NVMAP_IOC_WRITE_32 _IOW(NVMAP_IOC_MAGIC, 6, struct nvmap_rw_handle_32)
300 #define NVMAP_IOC_READ_32 _IOW(NVMAP_IOC_MAGIC, 7, struct nvmap_rw_handle_32)
303 #define NVMAP_IOC_PARAM _IOWR(NVMAP_IOC_MAGIC, 8, struct nvmap_handle_param)
305 #define NVMAP_IOC_PARAM_32 _IOWR(NVMAP_IOC_MAGIC, 8, struct nvmap_handle_param_32)
308 /* Pins a list of memory handles into IO-addressable memory (either IOVMM
309 * space or physical memory, depending on the allocation), and returns the
310 * address. Handles may be pinned recursively. */
311 #define NVMAP_IOC_PIN_MULT _IOWR(NVMAP_IOC_MAGIC, 10, struct nvmap_pin_handle)
312 #define NVMAP_IOC_UNPIN_MULT _IOW(NVMAP_IOC_MAGIC, 11, struct nvmap_pin_handle)
314 #define NVMAP_IOC_PIN_MULT_32 _IOWR(NVMAP_IOC_MAGIC, 10, struct nvmap_pin_handle_32)
315 #define NVMAP_IOC_UNPIN_MULT_32 _IOW(NVMAP_IOC_MAGIC, 11, struct nvmap_pin_handle_32)
318 #define NVMAP_IOC_CACHE _IOW(NVMAP_IOC_MAGIC, 12, struct nvmap_cache_op)
320 #define NVMAP_IOC_CACHE_32 _IOW(NVMAP_IOC_MAGIC, 12, struct nvmap_cache_op_32)
323 /* Returns a global ID usable to allow a remote process to create a handle
324 * reference to the same handle */
325 #define NVMAP_IOC_GET_ID _IOWR(NVMAP_IOC_MAGIC, 13, struct nvmap_create_handle)
327 /* Returns a dma-buf fd usable to allow a remote process to create a handle
328 * reference to the same handle */
329 #define NVMAP_IOC_SHARE _IOWR(NVMAP_IOC_MAGIC, 14, struct nvmap_create_handle)
331 /* Returns a file id that allows a remote process to create a handle
332 * reference to the same handle */
333 #define NVMAP_IOC_GET_FD _IOWR(NVMAP_IOC_MAGIC, 15, struct nvmap_create_handle)
335 /* Create a new memory handle from file id passed */
336 #define NVMAP_IOC_FROM_FD _IOWR(NVMAP_IOC_MAGIC, 16, struct nvmap_create_handle)
338 /* Perform cache maintenance on a list of handles. */
339 #define NVMAP_IOC_CACHE_LIST _IOW(NVMAP_IOC_MAGIC, 17, \
340 struct nvmap_cache_op_list)
341 /* Perform reserve operation on a list of handles. */
342 #define NVMAP_IOC_RESERVE _IOW(NVMAP_IOC_MAGIC, 18, \
343 struct nvmap_cache_op_list)
345 #define NVMAP_IOC_FROM_IVC_ID _IOWR(NVMAP_IOC_MAGIC, 19, struct nvmap_create_handle)
346 #define NVMAP_IOC_GET_IVC_ID _IOWR(NVMAP_IOC_MAGIC, 20, struct nvmap_create_handle)
347 #define NVMAP_IOC_GET_IVM_HEAPS _IOR(NVMAP_IOC_MAGIC, 21, unsigned int)
349 /* START of T124 IOCTLS */
350 /* Actually allocates memory for the specified handle, with kind */
351 #define NVMAP_IOC_ALLOC_KIND _IOW(NVMAP_IOC_MAGIC, 100, struct nvmap_alloc_kind_handle)
353 /* Actually allocates memory from IVM heaps */
354 #define NVMAP_IOC_ALLOC_IVM _IOW(NVMAP_IOC_MAGIC, 101, struct nvmap_alloc_ivm_handle)
356 /* Allocate seperate memory for VPR */
357 #define NVMAP_IOC_VPR_FLOOR_SIZE _IOW(NVMAP_IOC_MAGIC, 102, __u32)
359 #define NVMAP_IOC_MAXNR (_IOC_NR(NVMAP_IOC_VPR_FLOOR_SIZE))
361 #endif /* _LINUX_NVMAP_H */